2  * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. 
   4  * @APPLE_LICENSE_HEADER_START@ 
   6  * The contents of this file constitute Original Code as defined in and 
   7  * are subject to the Apple Public Source License Version 1.1 (the 
   8  * "License").  You may not use this file except in compliance with the 
   9  * License.  Please obtain a copy of the License at 
  10  * http://www.apple.com/publicsource and read it before using this file. 
  12  * This Original Code and all software distributed under the License are 
  13  * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
  14  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
  15  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
  16  * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the 
  17  * License for the specific language governing rights and limitations 
  20  * @APPLE_LICENSE_HEADER_END@ 
  22 #include <IOKit/assert.h> 
  23 #include <IOKit/system.h> 
  25 #include <IOKit/IOLib.h> 
  26 #include <IOKit/IOBufferMemoryDescriptor.h> 
  29 void ipc_port_release_send(ipc_port_t port
); 
  32 vm_map_t 
IOPageableMapForAddress( vm_address_t address 
); 
  35 #define super IOGeneralMemoryDescriptor 
  36 OSDefineMetaClassAndStructors(IOBufferMemoryDescriptor
, 
  37                                 IOGeneralMemoryDescriptor
); 
  39 bool IOBufferMemoryDescriptor::initWithAddress( 
  40                                   void *      /* address       */ , 
  41                                   IOByteCount 
/* withLength    */ , 
  42                                   IODirection 
/* withDirection */ ) 
  47 bool IOBufferMemoryDescriptor::initWithAddress( 
  48                                   vm_address_t 
/* address       */ , 
  49                                   IOByteCount  
/* withLength    */ , 
  50                                   IODirection  
/* withDirection */ , 
  51                                   task_t       
/* withTask      */ ) 
  56 bool IOBufferMemoryDescriptor::initWithPhysicalAddress( 
  57                                   IOPhysicalAddress 
/* address       */ , 
  58                                   IOByteCount       
/* withLength    */ , 
  59                                   IODirection       
/* withDirection */ ) 
  64 bool IOBufferMemoryDescriptor::initWithPhysicalRanges( 
  65                                   IOPhysicalRange 
* /* ranges        */ , 
  66                                   UInt32            
/* withCount     */ , 
  67                                   IODirection       
/* withDirection */ , 
  68                                   bool              /* asReference   */ ) 
  73 bool IOBufferMemoryDescriptor::initWithRanges( 
  74                                   IOVirtualRange 
* /* ranges        */ , 
  75                                   UInt32           
/* withCount     */ , 
  76                                   IODirection      
/* withDirection */ , 
  77                                   task_t           
/* withTask      */ , 
  78                                   bool             /* asReference   */ ) 
  83 bool IOBufferMemoryDescriptor::initWithOptions( 
  86                                vm_offset_t  alignment
, 
  90     IOOptionBits iomdOptions 
= kIOMemoryAsReference 
| kIOMemoryTypeVirtual
; 
 101     // Grab the direction and the Auto Prepare bits from the Buffer MD options 
 102     iomdOptions  
|= options 
& (kIOMemoryDirectionMask 
| kIOMemoryAutoPrepare
); 
 104     if ((options 
& kIOMemorySharingTypeMask
) && (alignment 
< page_size
)) 
 105         alignment 
= page_size
; 
 107     if ((inTask 
!= kernel_task
) && !(options 
& kIOMemoryPageable
)) 
 110     _alignment 
= alignment
; 
 111     if (options 
& kIOMemoryPageable
) 
 113         iomdOptions 
|= kIOMemoryBufferPageable
; 
 114         if (inTask 
== kernel_task
) 
 116             /* Allocate some kernel address space. */ 
 117             _buffer 
= IOMallocPageable(capacity
, alignment
); 
 119                 map 
= IOPageableMapForAddress((vm_address_t
) _buffer
); 
 126                 reserved 
= IONew( ExpansionData
, 1 ); 
 130             map 
= get_task_map(inTask
); 
 131             vm_map_reference(map
); 
 133             kr 
= vm_allocate( map
, (vm_address_t 
*) &_buffer
, round_page_32(capacity
), 
 134                                 VM_FLAGS_ANYWHERE 
| VM_MAKE_TAG(VM_MEMORY_IOKIT
) ); 
 135             if( KERN_SUCCESS 
!= kr
) 
 138             // we have to make sure that these pages don't get copied on fork. 
 139             kr 
= vm_inherit( map
, (vm_address_t
) _buffer
, round_page_32(capacity
), VM_INHERIT_NONE
); 
 140             if( KERN_SUCCESS 
!= kr
) 
 146         // @@@ gvdl: Need to remove this 
 147         // Buffer should never auto prepare they should be prepared explicitly 
 148         // But it never was enforced so what are you going to do? 
 149         iomdOptions 
|= kIOMemoryAutoPrepare
; 
 151         /* Allocate a wired-down buffer inside kernel space. */ 
 152         if (options 
& kIOMemoryPhysicallyContiguous
) 
 153             _buffer 
= IOMallocContiguous(capacity
, alignment
, 0); 
 154         else if (alignment 
> 1) 
 155             _buffer 
= IOMallocAligned(capacity
, alignment
); 
 157             _buffer 
= IOMalloc(capacity
); 
 163     _singleRange
.v
.address 
= (vm_address_t
) _buffer
; 
 164     _singleRange
.v
.length  
= capacity
; 
 166     if (!super::initWithOptions(&_singleRange
.v
, 1, 0, 
 167                                inTask
, iomdOptions
, /* System mapper */ 0)) 
 170     if (options 
& kIOMemoryPageable
) { 
 172         ipc_port_t sharedMem 
= (ipc_port_t
) _memEntry
; 
 173         vm_size_t size 
= round_page_32(_ranges
.v
[0].length
); 
 175         // must create the entry before any pages are allocated 
 176         if( 0 == sharedMem
) { 
 178             // set memory entry cache 
 179             vm_prot_t memEntryCacheMode 
= VM_PROT_READ 
| VM_PROT_WRITE
; 
 180             switch (options 
& kIOMapCacheMask
) 
 182                 case kIOMapInhibitCache
: 
 183                     SET_MAP_MEM(MAP_MEM_IO
, memEntryCacheMode
); 
 186                 case kIOMapWriteThruCache
: 
 187                     SET_MAP_MEM(MAP_MEM_WTHRU
, memEntryCacheMode
); 
 190                 case kIOMapWriteCombineCache
: 
 191                     SET_MAP_MEM(MAP_MEM_WCOMB
, memEntryCacheMode
); 
 194                 case kIOMapCopybackCache
: 
 195                     SET_MAP_MEM(MAP_MEM_COPYBACK
, memEntryCacheMode
); 
 198                 case kIOMapDefaultCache
: 
 200                     SET_MAP_MEM(MAP_MEM_NOOP
, memEntryCacheMode
); 
 204             kr 
= mach_make_memory_entry( map
, 
 205                         &size
, _ranges
.v
[0].address
, 
 206                         memEntryCacheMode
, &sharedMem
, 
 209             if( (KERN_SUCCESS 
== kr
) && (size 
!= round_page_32(_ranges
.v
[0].length
))) { 
 210                 ipc_port_release_send( sharedMem 
); 
 211                 kr 
= kIOReturnVMError
; 
 213             if( KERN_SUCCESS 
!= kr
) 
 215             _memEntry 
= (void *) sharedMem
; 
 224 IOBufferMemoryDescriptor 
* IOBufferMemoryDescriptor::inTaskWithOptions( 
 226                                             IOOptionBits options
, 
 228                                             vm_offset_t  alignment
) 
 230     IOBufferMemoryDescriptor 
*me 
= new IOBufferMemoryDescriptor
; 
 232     if (me 
&& !me
->initWithOptions(options
, capacity
, alignment
, inTask
)) { 
 239 bool IOBufferMemoryDescriptor::initWithOptions( 
 240                                IOOptionBits options
, 
 242                                vm_offset_t  alignment
) 
 244     return( initWithOptions(options
, capacity
, alignment
, kernel_task
) ); 
 247 IOBufferMemoryDescriptor 
* IOBufferMemoryDescriptor::withOptions( 
 248                                             IOOptionBits options
, 
 250                                             vm_offset_t  alignment
) 
 252     IOBufferMemoryDescriptor 
*me 
= new IOBufferMemoryDescriptor
; 
 254     if (me 
&& !me
->initWithOptions(options
, capacity
, alignment
, kernel_task
)) { 
 265  * Returns a new IOBufferMemoryDescriptor with a buffer large enough to 
 266  * hold capacity bytes.  The descriptor's length is initially set to the capacity. 
 268 IOBufferMemoryDescriptor 
* 
 269 IOBufferMemoryDescriptor::withCapacity(vm_size_t   inCapacity
, 
 270                                        IODirection inDirection
, 
 273     return( IOBufferMemoryDescriptor::withOptions( 
 274                inDirection 
| kIOMemoryUnshared
 
 275                 | (inContiguous 
? kIOMemoryPhysicallyContiguous 
: 0), 
 276                inCapacity
, inContiguous 
? inCapacity 
: 1 )); 
 282  * Initialize a new IOBufferMemoryDescriptor preloaded with bytes (copied). 
 283  * The descriptor's length and capacity are set to the input buffer's size. 
 285 bool IOBufferMemoryDescriptor::initWithBytes(const void * inBytes
, 
 287                                              IODirection  inDirection
, 
 290     if (!initWithOptions( 
 291                inDirection 
| kIOMemoryUnshared
 
 292                 | (inContiguous 
? kIOMemoryPhysicallyContiguous 
: 0), 
 293                inLength
, inLength 
)) 
 296     // start out with no data 
 299     if (!appendBytes(inBytes
, inLength
)) 
 308  * Returns a new IOBufferMemoryDescriptor preloaded with bytes (copied). 
 309  * The descriptor's length and capacity are set to the input buffer's size. 
 311 IOBufferMemoryDescriptor 
* 
 312 IOBufferMemoryDescriptor::withBytes(const void * inBytes
, 
 314                                     IODirection  inDirection
, 
 317     IOBufferMemoryDescriptor 
*me 
= new IOBufferMemoryDescriptor
; 
 319     if (me 
&& !me
->initWithBytes(inBytes
, inLength
, inDirection
, inContiguous
)){ 
 331 void IOBufferMemoryDescriptor::free() 
 333     // Cache all of the relevant information on the stack for use 
 334     // after we call super::free()! 
 335     IOOptionBits options   
= _options
; 
 336     vm_size_t    size      
= _capacity
; 
 337     void *       buffer    
= _buffer
; 
 339     vm_offset_t  alignment 
= _alignment
; 
 344         IODelete( reserved
, ExpansionData
, 1 ); 
 347     /* super::free may unwire - deallocate buffer afterwards */ 
 352         if (options 
& kIOMemoryPageable
) 
 355                 vm_deallocate(map
, (vm_address_t
) buffer
, round_page_32(size
)); 
 357                IOFreePageable(buffer
, size
); 
 361             if (options 
& kIOMemoryPhysicallyContiguous
) 
 362                 IOFreeContiguous(buffer
, size
); 
 363             else if (alignment 
> 1) 
 364                 IOFreeAligned(buffer
, size
); 
 366                 IOFree(buffer
, size
); 
 370         vm_map_deallocate(map
); 
 376  * Get the buffer capacity 
 378 vm_size_t 
IOBufferMemoryDescriptor::getCapacity() const 
 386  * Change the buffer length of the memory descriptor.  When a new buffer 
 387  * is created, the initial length of the buffer is set to be the same as 
 388  * the capacity.  The length can be adjusted via setLength for a shorter 
 389  * transfer (there is no need to create more buffer descriptors when you 
 390  * can reuse an existing one, even for different transfer sizes).   Note 
 391  * that the specified length must not exceed the capacity of the buffer. 
 393 void IOBufferMemoryDescriptor::setLength(vm_size_t length
) 
 395     assert(length 
<= _capacity
); 
 398     _singleRange
.v
.length 
= length
; 
 404  * Change the direction of the transfer.  This method allows one to redirect 
 405  * the descriptor's transfer direction.  This eliminates the need to destroy 
 406  * and create new buffers when different transfer directions are needed. 
 408 void IOBufferMemoryDescriptor::setDirection(IODirection direction
) 
 410     _direction 
= direction
; 
 416  * Add some data to the end of the buffer.  This method automatically 
 417  * maintains the memory descriptor buffer length.  Note that appendBytes 
 418  * will not copy past the end of the memory descriptor's current capacity. 
 421 IOBufferMemoryDescriptor::appendBytes(const void * bytes
, vm_size_t withLength
) 
 423     vm_size_t actualBytesToCopy 
= min(withLength
, _capacity 
- _length
); 
 425     assert(_length 
<= _capacity
); 
 426     bcopy(/* from */ bytes
, (void *)(_singleRange
.v
.address 
+ _length
), 
 428     _length 
+= actualBytesToCopy
; 
 429     _singleRange
.v
.length 
+= actualBytesToCopy
; 
 437  * Return the virtual address of the beginning of the buffer 
 439 void * IOBufferMemoryDescriptor::getBytesNoCopy() 
 441     return (void *)_singleRange
.v
.address
; 
 447  * Return the virtual address of an offset from the beginning of the buffer 
 450 IOBufferMemoryDescriptor::getBytesNoCopy(vm_size_t start
, vm_size_t withLength
) 
 452     if (start 
< _length 
&& (start 
+ withLength
) <= _length
) 
 453         return (void *)(_singleRange
.v
.address 
+ start
); 
 457 OSMetaClassDefineReservedUsed(IOBufferMemoryDescriptor
, 0); 
 458 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 1); 
 459 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 2); 
 460 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 3); 
 461 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 4); 
 462 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 5); 
 463 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 6); 
 464 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 7); 
 465 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 8); 
 466 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 9); 
 467 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 10); 
 468 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 11); 
 469 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 12); 
 470 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 13); 
 471 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 14); 
 472 OSMetaClassDefineReservedUnused(IOBufferMemoryDescriptor
, 15);