2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
29 #include <IOKit/assert.h>
30 #include <IOKit/system.h>
31 #include <IOKit/IOLib.h>
32 #include <IOKit/IOMemoryDescriptor.h>
34 #include <IOKit/IOKitDebug.h>
36 #include <libkern/c++/OSContainers.h>
37 #include <sys/cdefs.h>
41 void pmap_enter(pmap_t pmap
, vm_offset_t va
, vm_offset_t pa
,
42 vm_prot_t prot
, boolean_t wired
);
43 void ipc_port_release_send(ipc_port_t port
);
44 vm_offset_t
vm_map_get_phys_page(vm_map_t map
, vm_offset_t offset
);
47 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
49 OSDefineMetaClass( IOMemoryDescriptor
, OSObject
)
50 OSDefineAbstractStructors( IOMemoryDescriptor
, OSObject
)
52 #define super IOMemoryDescriptor
54 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor
, IOMemoryDescriptor
)
56 extern "C" vm_map_t
IOPageableMapForAddress( vm_address_t address
);
58 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
60 inline vm_map_t
IOGeneralMemoryDescriptor::getMapForTask( task_t task
, vm_address_t address
)
62 if( (task
== kernel_task
) && (kIOMemoryRequiresWire
& _flags
))
63 return( IOPageableMapForAddress( address
) );
65 return( get_task_map( task
));
68 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
73 * Create a new IOMemoryDescriptor. The buffer is a virtual address
74 * relative to the specified task. If no task is supplied, the kernel
78 IOMemoryDescriptor::withAddress(void * address
,
79 IOByteCount withLength
,
80 IODirection withDirection
)
82 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
85 if (that
->initWithAddress(address
, withLength
, withDirection
))
94 IOMemoryDescriptor::withAddress(vm_address_t address
,
95 IOByteCount withLength
,
96 IODirection withDirection
,
99 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
102 if (that
->initWithAddress(address
, withLength
, withDirection
, withTask
))
111 IOMemoryDescriptor::withPhysicalAddress(
112 IOPhysicalAddress address
,
113 IOByteCount withLength
,
114 IODirection withDirection
)
116 return( IOMemoryDescriptor::withAddress( address
, withLength
,
117 withDirection
, (task_t
) 0 ));
124 * Create a new IOMemoryDescriptor. The buffer is made up of several
125 * virtual address ranges, from a given task.
127 * Passing the ranges as a reference will avoid an extra allocation.
130 IOMemoryDescriptor::withRanges( IOVirtualRange
* ranges
,
132 IODirection withDirection
,
134 bool asReference
= false)
136 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
139 if (that
->initWithRanges(ranges
, withCount
, withDirection
, withTask
, asReference
))
148 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange
* ranges
,
150 IODirection withDirection
,
151 bool asReference
= false)
153 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
156 if (that
->initWithPhysicalRanges(ranges
, withCount
, withDirection
, asReference
))
165 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor
* of
,
168 IODirection withDirection
)
170 IOSubMemoryDescriptor
* that
= new IOSubMemoryDescriptor
;
172 if (that
&& !that
->initSubRange(of
, offset
, length
, withDirection
)) {
182 * Initialize an IOMemoryDescriptor. The buffer is a virtual address
183 * relative to the specified task. If no task is supplied, the kernel
186 * An IOMemoryDescriptor can be re-used by calling initWithAddress or
187 * initWithRanges again on an existing instance -- note this behavior
188 * is not commonly supported in other I/O Kit classes, although it is
192 IOGeneralMemoryDescriptor::initWithAddress(void * address
,
193 IOByteCount withLength
,
194 IODirection withDirection
)
196 _singleRange
.v
.address
= (vm_address_t
) address
;
197 _singleRange
.v
.length
= withLength
;
199 return initWithRanges(&_singleRange
.v
, 1, withDirection
, kernel_task
, true);
203 IOGeneralMemoryDescriptor::initWithAddress(vm_address_t address
,
204 IOByteCount withLength
,
205 IODirection withDirection
,
208 _singleRange
.v
.address
= address
;
209 _singleRange
.v
.length
= withLength
;
211 return initWithRanges(&_singleRange
.v
, 1, withDirection
, withTask
, true);
215 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
216 IOPhysicalAddress address
,
217 IOByteCount withLength
,
218 IODirection withDirection
)
220 _singleRange
.p
.address
= address
;
221 _singleRange
.p
.length
= withLength
;
223 return initWithPhysicalRanges( &_singleRange
.p
, 1, withDirection
, true);
229 * Initialize an IOMemoryDescriptor. The buffer is made up of several
230 * virtual address ranges, from a given task
232 * Passing the ranges as a reference will avoid an extra allocation.
234 * An IOMemoryDescriptor can be re-used by calling initWithAddress or
235 * initWithRanges again on an existing instance -- note this behavior
236 * is not commonly supported in other I/O Kit classes, although it is
240 IOGeneralMemoryDescriptor::initWithRanges(
241 IOVirtualRange
* ranges
,
243 IODirection withDirection
,
245 bool asReference
= false)
251 * We can check the _initialized instance variable before having ever set
252 * it to an initial value because I/O Kit guarantees that all our instance
253 * variables are zeroed on an object's allocation.
256 if (_initialized
== false)
258 if (super::init() == false) return false;
264 * An existing memory descriptor is being retargeted to point to
265 * somewhere else. Clean up our present state.
268 assert(_wireCount
== 0);
274 if (_ranges
.v
&& _rangesIsAllocated
)
275 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
279 * Initialize the memory descriptor.
283 _rangesCount
= withCount
;
284 _rangesIsAllocated
= asReference
? false : true;
285 _direction
= withDirection
;
289 _positionAtIndex
= 0;
290 _positionAtOffset
= 0;
292 _cachedPhysicalAddress
= 0;
293 _cachedVirtualAddress
= 0;
296 if (withTask
&& (withTask
!= kernel_task
))
297 _flags
|= kIOMemoryRequiresWire
;
303 _ranges
.v
= IONew(IOVirtualRange
, withCount
);
304 if (_ranges
.v
== 0) return false;
305 bcopy(/* from */ ranges
, _ranges
.v
, withCount
* sizeof(IOVirtualRange
));
308 for (unsigned index
= 0; index
< _rangesCount
; index
++)
310 _length
+= _ranges
.v
[index
].length
;
317 IOGeneralMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange
* ranges
,
319 IODirection withDirection
,
320 bool asReference
= false)
322 #warning assuming virtual, physical addresses same size
323 return( initWithRanges( (IOVirtualRange
*) ranges
,
324 withCount
, withDirection
, (task_t
) 0, asReference
));
332 void IOGeneralMemoryDescriptor::free()
338 if (_ranges
.v
&& _rangesIsAllocated
)
339 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
341 ipc_port_release_send( (ipc_port_t
) _memEntry
);
345 void IOGeneralMemoryDescriptor::unmapFromKernel()
349 // Pull the shared pages out of the task map
350 // Do we need to unwire it first?
351 for ( off
= 0; off
< _kernSize
; off
+= page_size
)
355 _kernPtrAligned
+ off
,
360 _kernPtrAligned
+ off
,
361 _kernPtrAligned
+ off
+ page_size
);
363 // Free the former shmem area in the task
364 krtn
= vm_deallocate(kernel_map
,
367 assert(krtn
== KERN_SUCCESS
);
371 void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex
)
378 if (_kernPtrAtIndex
== rangeIndex
) return;
380 assert(_kernPtrAligned
== 0);
383 vm_offset_t srcAlign
= trunc_page(_ranges
.v
[rangeIndex
].address
);
385 _kernSize
= trunc_page(_ranges
.v
[rangeIndex
].address
+
386 _ranges
.v
[rangeIndex
].length
+
387 page_size
- 1) - srcAlign
;
389 /* Find some memory of the same size in kernel task. We use vm_allocate()
390 to do this. vm_allocate inserts the found memory object in the
391 target task's map as a side effect. */
392 krtn
= vm_allocate( kernel_map
,
395 VM_FLAGS_ANYWHERE
|VM_MAKE_TAG(VM_MEMORY_IOKIT
) ); // Find first fit
396 assert(krtn
== KERN_SUCCESS
);
399 /* For each page in the area allocated from the kernel map,
400 find the physical address of the page.
401 Enter the page in the target task's pmap, at the
402 appropriate target task virtual address. */
403 for ( off
= 0; off
< _kernSize
; off
+= page_size
)
405 vm_offset_t kern_phys_addr
, phys_addr
;
407 phys_addr
= pmap_extract( get_task_pmap(_task
), srcAlign
+ off
);
409 phys_addr
= srcAlign
+ off
;
411 if(phys_addr
== 0) return;
413 // Check original state.
414 kern_phys_addr
= pmap_extract( kernel_pmap
, _kernPtrAligned
+ off
);
415 // Set virtual page to point to the right physical one
418 _kernPtrAligned
+ off
,
420 VM_PROT_READ
|VM_PROT_WRITE
,
423 _kernPtrAtIndex
= rangeIndex
;
429 * Get the direction of the transfer.
431 IODirection
IOMemoryDescriptor::getDirection() const
439 * Get the length of the transfer (over all ranges).
441 IOByteCount
IOMemoryDescriptor::getLength() const
446 void IOMemoryDescriptor::setTag(
452 IOOptionBits
IOMemoryDescriptor::getTag( void )
460 * Set the logical start position inside the client buffer.
462 * It is convention that the position reflect the actual byte count that
463 * is successfully transferred into or out of the buffer, before the I/O
464 * request is "completed" (ie. sent back to its originator).
467 void IOGeneralMemoryDescriptor::setPosition(IOByteCount position
)
469 assert(position
<= _length
);
471 if (position
>= _length
)
474 _positionAtIndex
= _rangesCount
; /* careful: out-of-bounds */
475 _positionAtOffset
= 0;
479 if (position
< _position
)
481 _positionAtOffset
= position
;
482 _positionAtIndex
= 0;
486 _positionAtOffset
+= (position
- _position
);
488 _position
= position
;
490 while (_positionAtOffset
>= _ranges
.v
[_positionAtIndex
].length
)
492 _positionAtOffset
-= _ranges
.v
[_positionAtIndex
].length
;
500 * Copy data from the memory descriptor's buffer into the specified buffer,
501 * relative to the current position. The memory descriptor's position is
502 * advanced based on the number of bytes copied.
505 IOByteCount
IOGeneralMemoryDescriptor::readBytes(IOByteCount offset
,
506 void * bytes
, IOByteCount withLength
)
508 IOByteCount bytesLeft
;
510 IOByteCount segmentLength
;
512 if( offset
!= _position
)
513 setPosition( offset
);
515 withLength
= min(withLength
, _length
- _position
);
516 bytesLeft
= withLength
;
519 while (bytesLeft
&& (_position
< _length
))
521 /* Compute the relative length to the end of this virtual segment. */
522 segmentLength
= min(_ranges
.v
[_positionAtIndex
].length
- _positionAtOffset
, bytesLeft
);
524 /* Compute the relative address of this virtual segment. */
525 segment
= (void *)(_ranges
.v
[_positionAtIndex
].address
+ _positionAtOffset
);
527 if (KERN_SUCCESS
!= vm_map_read_user(getMapForTask(_task
, segment
),
528 /* from */ (vm_offset_t
) segment
, /* to */ (vm_offset_t
) bytes
,
529 /* size */ segmentLength
))
532 bytesLeft
= withLength
;
535 bytesLeft
-= segmentLength
;
536 offset
+= segmentLength
;
540 while (bytesLeft
&& (segment
= getVirtualSegment(offset
, &segmentLength
)))
542 segmentLength
= min(segmentLength
, bytesLeft
);
543 bcopy(/* from */ segment
, /* to */ bytes
, /* size */ segmentLength
);
544 bytesLeft
-= segmentLength
;
545 offset
+= segmentLength
;
546 bytes
= (void *) (((UInt32
) bytes
) + segmentLength
);
550 return withLength
- bytesLeft
;
556 * Copy data to the memory descriptor's buffer from the specified buffer,
557 * relative to the current position. The memory descriptor's position is
558 * advanced based on the number of bytes copied.
560 IOByteCount
IOGeneralMemoryDescriptor::writeBytes(IOByteCount offset
,
561 const void* bytes
,IOByteCount withLength
)
563 IOByteCount bytesLeft
;
565 IOByteCount segmentLength
;
567 if( offset
!= _position
)
568 setPosition( offset
);
570 withLength
= min(withLength
, _length
- _position
);
571 bytesLeft
= withLength
;
574 while (bytesLeft
&& (_position
< _length
))
576 assert(_position
<= _length
);
578 /* Compute the relative length to the end of this virtual segment. */
579 segmentLength
= min(_ranges
.v
[_positionAtIndex
].length
- _positionAtOffset
, bytesLeft
);
581 /* Compute the relative address of this virtual segment. */
582 segment
= (void *)(_ranges
.v
[_positionAtIndex
].address
+ _positionAtOffset
);
584 if (KERN_SUCCESS
!= vm_map_write_user(getMapForTask(_task
, segment
),
585 /* from */ (vm_offset_t
) bytes
,
586 /* to */ (vm_offset_t
) segment
,
587 /* size */ segmentLength
))
590 bytesLeft
= withLength
;
593 bytesLeft
-= segmentLength
;
594 offset
+= segmentLength
;
598 while (bytesLeft
&& (segment
= getVirtualSegment(offset
, &segmentLength
)))
600 segmentLength
= min(segmentLength
, bytesLeft
);
601 bcopy(/* from */ bytes
, /* to */ segment
, /* size */ segmentLength
);
602 // Flush cache in case we're copying code around, eg. handling a code page fault
603 IOFlushProcessorCache(kernel_task
, (vm_offset_t
) segment
, segmentLength
);
605 bytesLeft
-= segmentLength
;
606 offset
+= segmentLength
;
607 bytes
= (void *) (((UInt32
) bytes
) + segmentLength
);
611 return withLength
- bytesLeft
;
615 * getPhysicalSegment:
617 * Get the physical address of the buffer, relative to the current position.
618 * If the current position is at the end of the buffer, a zero is returned.
621 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset
,
622 IOByteCount
* lengthOfSegment
)
624 vm_address_t virtualAddress
;
625 IOByteCount virtualLength
;
627 IOPhysicalAddress physicalAddress
;
628 IOPhysicalLength physicalLength
;
630 if( kIOMemoryRequiresWire
& _flags
)
631 assert( _wireCount
);
633 if ((0 == _task
) && (1 == _rangesCount
))
635 assert(offset
<= _length
);
636 if (offset
>= _length
)
643 physicalLength
= _length
- offset
;
644 physicalAddress
= offset
+ _ranges
.v
[0].address
;
648 *lengthOfSegment
= physicalLength
;
649 return physicalAddress
;
652 if( offset
!= _position
)
653 setPosition( offset
);
655 assert(_position
<= _length
);
657 /* Fail gracefully if the position is at (or past) the end-of-buffer. */
658 if (_position
>= _length
)
660 *lengthOfSegment
= 0;
664 /* Prepare to compute the largest contiguous physical length possible. */
666 virtualAddress
= _ranges
.v
[_positionAtIndex
].address
+ _positionAtOffset
;
667 virtualLength
= _ranges
.v
[_positionAtIndex
].length
- _positionAtOffset
;
668 vm_address_t virtualPage
= trunc_page(virtualAddress
);
670 virtualPMap
= get_task_pmap(_task
);
674 physicalAddress
= (virtualAddress
== _cachedVirtualAddress
) ?
675 _cachedPhysicalAddress
: /* optimization */
677 pmap_extract(virtualPMap
, virtualAddress
) :
679 physicalLength
= trunc_page(physicalAddress
) + page_size
- physicalAddress
;
681 if (!physicalAddress
&& _task
)
684 vm_map_get_phys_page(get_task_map(_task
), virtualPage
);
685 physicalAddress
+= virtualAddress
- virtualPage
;
688 if (physicalAddress
== 0) /* memory must be wired in order to proceed */
690 assert(physicalAddress
);
691 *lengthOfSegment
= 0;
695 /* Compute the largest contiguous physical length possible, within range. */
696 IOPhysicalAddress physicalPage
= trunc_page(physicalAddress
);
698 while (physicalLength
< virtualLength
)
700 physicalPage
+= page_size
;
701 virtualPage
+= page_size
;
702 _cachedVirtualAddress
= virtualPage
;
703 _cachedPhysicalAddress
= virtualPMap
?
704 pmap_extract(virtualPMap
, virtualPage
) :
706 if (!_cachedPhysicalAddress
&& _task
)
708 _cachedPhysicalAddress
=
709 vm_map_get_phys_page(get_task_map(_task
), virtualPage
);
712 if (_cachedPhysicalAddress
!= physicalPage
) break;
714 physicalLength
+= page_size
;
717 /* Clip contiguous physical length at the end of this range. */
718 if (physicalLength
> virtualLength
)
719 physicalLength
= virtualLength
;
722 *lengthOfSegment
= physicalLength
;
724 return physicalAddress
;
731 * Get the virtual address of the buffer, relative to the current position.
732 * If the memory wasn't mapped into the caller's address space, it will be
733 * mapped in now. If the current position is at the end of the buffer, a
736 void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
737 IOByteCount
* lengthOfSegment
)
739 if( offset
!= _position
)
740 setPosition( offset
);
742 assert(_position
<= _length
);
744 /* Fail gracefully if the position is at (or past) the end-of-buffer. */
745 if (_position
>= _length
)
747 *lengthOfSegment
= 0;
751 /* Compute the relative length to the end of this virtual segment. */
752 *lengthOfSegment
= _ranges
.v
[_positionAtIndex
].length
- _positionAtOffset
;
754 /* Compute the relative address of this virtual segment. */
755 if (_task
== kernel_task
)
756 return (void *)(_ranges
.v
[_positionAtIndex
].address
+ _positionAtOffset
);
761 mapIntoKernel(_positionAtIndex
);
763 off
= _ranges
.v
[_kernPtrAtIndex
].address
;
764 off
-= trunc_page(off
);
766 return (void *) (_kernPtrAligned
+ off
+ _positionAtOffset
);
773 * Prepare the memory for an I/O transfer. This involves paging in
774 * the memory, if necessary, and wiring it down for the duration of
775 * the transfer. The complete() method completes the processing of
776 * the memory after the I/O transfer finishes. This method needn't
777 * called for non-pageable memory.
779 IOReturn
IOGeneralMemoryDescriptor::prepare(
780 IODirection forDirection
= kIODirectionNone
)
784 if((_wireCount
== 0) && (kIOMemoryRequiresWire
& _flags
)) {
787 if(forDirection
== kIODirectionNone
)
788 forDirection
= _direction
;
790 vm_prot_t access
= VM_PROT_DEFAULT
; // Could be cleverer using direction
793 // Check user read/write access to the data buffer.
796 for (rangeIndex
= 0; rangeIndex
< _rangesCount
; rangeIndex
++)
798 vm_offset_t checkBase
= trunc_page(_ranges
.v
[rangeIndex
].address
);
799 vm_size_t checkSize
= round_page(_ranges
.v
[rangeIndex
].length
);
803 vm_region_basic_info_data_t regionInfo
;
804 mach_msg_type_number_t regionInfoSize
= sizeof(regionInfo
);
805 vm_size_t regionSize
;
808 /* map */ getMapForTask(_task
, checkBase
),
809 /* address */ &checkBase
,
810 /* size */ ®ionSize
,
811 /* flavor */ VM_REGION_BASIC_INFO
,
812 /* info */ (vm_region_info_t
) ®ionInfo
,
813 /* info size */ ®ionInfoSize
,
814 /* object name */ 0 ) != KERN_SUCCESS
) ||
815 ( (forDirection
& kIODirectionIn
) &&
816 !(regionInfo
.protection
& VM_PROT_WRITE
) ) ||
817 ( (forDirection
& kIODirectionOut
) &&
818 !(regionInfo
.protection
& VM_PROT_READ
) ) )
820 return kIOReturnVMError
;
823 assert((regionSize
& PAGE_MASK
) == 0);
825 regionSize
= min(regionSize
, checkSize
);
826 checkSize
-= regionSize
;
827 checkBase
+= regionSize
;
828 } // (for each vm region)
829 } // (for each io range)
831 for (rangeIndex
= 0; rangeIndex
< _rangesCount
; rangeIndex
++) {
833 vm_offset_t srcAlign
= trunc_page(_ranges
.v
[rangeIndex
].address
);
834 IOByteCount srcAlignEnd
= trunc_page(_ranges
.v
[rangeIndex
].address
+
835 _ranges
.v
[rangeIndex
].length
+
838 vm_map_t taskVMMap
= getMapForTask(_task
, srcAlign
);
840 rc
= vm_map_wire(taskVMMap
, srcAlign
, srcAlignEnd
, access
, FALSE
);
841 if (KERN_SUCCESS
!= rc
) {
842 IOLog("IOMemoryDescriptor::prepare vm_map_wire failed: %d\n", rc
);
846 // If this I/O is for a user land task then protect ourselves
847 // against COW and other vm_shenanigans
848 if (_task
&& _task
!= kernel_task
) {
849 // setup a data object to hold the 'named' memory regions
850 // @@@ gvdl: If we fail to allocate an OSData we will just
851 // hope for the best for the time being. Lets not fail a
852 // prepare at this late stage in product release.
854 _memoryEntries
= OSData::withCapacity(16);
855 if (_memoryEntries
) {
856 vm_object_offset_t desiredSize
= srcAlignEnd
- srcAlign
;
857 vm_object_offset_t entryStart
= srcAlign
;
858 ipc_port_t memHandle
;
861 vm_object_offset_t actualSize
= desiredSize
;
863 rc
= mach_make_memory_entry_64
864 (taskVMMap
, &actualSize
, entryStart
,
865 forDirection
, &memHandle
, NULL
);
866 if (KERN_SUCCESS
!= rc
) {
867 IOLog("IOMemoryDescriptor::prepare mach_make_memory_entry_64 failed: %d\n", rc
);
872 appendBytes(&memHandle
, sizeof(memHandle
));
873 desiredSize
-= actualSize
;
874 entryStart
+= actualSize
;
875 } while (desiredSize
);
881 return kIOReturnSuccess
;
887 for(doneIndex
= 0; doneIndex
< rangeIndex
; doneIndex
++) {
888 vm_offset_t srcAlign
= trunc_page(_ranges
.v
[doneIndex
].address
);
889 IOByteCount srcAlignEnd
= trunc_page(_ranges
.v
[doneIndex
].address
+
890 _ranges
.v
[doneIndex
].length
+
893 vm_map_unwire(getMapForTask(_task
, srcAlign
), srcAlign
,
897 if (_memoryEntries
) {
898 ipc_port_t
*handles
, *handlesEnd
;
900 handles
= (ipc_port_t
*) _memoryEntries
->getBytesNoCopy();
901 handlesEnd
= (ipc_port_t
*)
902 ((vm_address_t
) handles
+ _memoryEntries
->getLength());
903 while (handles
< handlesEnd
)
904 ipc_port_release_send(*handles
++);
905 _memoryEntries
->release();
909 return kIOReturnVMError
;
915 * Complete processing of the memory after an I/O transfer finishes.
916 * This method should not be called unless a prepare was previously
917 * issued; the prepare() and complete() must occur in pairs, before
918 * before and after an I/O transfer involving pageable memory.
921 IOReturn
IOGeneralMemoryDescriptor::complete(
922 IODirection forDirection
= kIODirectionNone
)
927 return kIOReturnSuccess
;
930 if((_wireCount
== 0) && (kIOMemoryRequiresWire
& _flags
)) {
934 if(forDirection
== kIODirectionNone
)
935 forDirection
= _direction
;
937 for(rangeIndex
= 0; rangeIndex
< _rangesCount
; rangeIndex
++) {
939 vm_offset_t srcAlign
= trunc_page(_ranges
.v
[rangeIndex
].address
);
940 IOByteCount srcAlignEnd
= trunc_page(_ranges
.v
[rangeIndex
].address
+
941 _ranges
.v
[rangeIndex
].length
+
944 if(forDirection
== kIODirectionIn
)
945 pmap_modify_pages(get_task_pmap(_task
), srcAlign
, srcAlignEnd
);
947 rc
= vm_map_unwire(getMapForTask(_task
, srcAlign
), srcAlign
,
949 if(rc
!= KERN_SUCCESS
)
950 IOLog("IOMemoryDescriptor::complete: vm_map_unwire failed: %d\n", rc
);
953 if (_memoryEntries
) {
954 ipc_port_t
*handles
, *handlesEnd
;
956 handles
= (ipc_port_t
*) _memoryEntries
->getBytesNoCopy();
957 handlesEnd
= (ipc_port_t
*)
958 ((vm_address_t
) handles
+ _memoryEntries
->getLength());
959 while (handles
< handlesEnd
)
960 ipc_port_release_send(*handles
++);
962 _memoryEntries
->release();
966 _cachedVirtualAddress
= 0;
968 return kIOReturnSuccess
;
971 IOReturn
IOGeneralMemoryDescriptor::doMap(
973 IOVirtualAddress
* atAddress
,
974 IOOptionBits options
,
975 IOByteCount sourceOffset
= 0,
976 IOByteCount length
= 0 )
980 // mapping source == dest? (could be much better)
981 if( _task
&& (addressMap
== get_task_map(_task
)) && (options
& kIOMapAnywhere
)
982 && (1 == _rangesCount
) && (0 == sourceOffset
)
983 && (length
<= _ranges
.v
[0].length
) ) {
984 *atAddress
= _ranges
.v
[0].address
;
985 return( kIOReturnSuccess
);
988 if( _task
&& _memEntry
&& (_flags
& kIOMemoryRequiresWire
)) {
992 if( (1 != _rangesCount
)
993 || (kIOMapDefaultCache
!= (options
& kIOMapCacheMask
)) ) {
994 kr
= kIOReturnUnsupported
;
999 length
= getLength();
1000 if( (sourceOffset
+ length
) > _ranges
.v
[0].length
) {
1001 kr
= kIOReturnBadArgument
;
1005 ipc_port_t sharedMem
= (ipc_port_t
) _memEntry
;
1006 vm_prot_t prot
= VM_PROT_READ
1007 | ((options
& kIOMapReadOnly
) ? 0 : VM_PROT_WRITE
);
1009 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1010 if( options
& kIOMapAnywhere
)
1014 kr
= kIOReturnVMError
;
1018 if( KERN_SUCCESS
== kr
)
1019 kr
= vm_map( addressMap
,
1021 length
, 0 /* mask */,
1022 (( options
& kIOMapAnywhere
) ? VM_FLAGS_ANYWHERE
: VM_FLAGS_FIXED
)
1023 | VM_MAKE_TAG(VM_MEMORY_IOKIT
),
1024 sharedMem
, sourceOffset
,
1033 kr
= super::doMap( addressMap
, atAddress
,
1034 options
, sourceOffset
, length
);
1038 IOReturn
IOGeneralMemoryDescriptor::doUnmap(
1039 vm_map_t addressMap
,
1040 IOVirtualAddress logical
,
1041 IOByteCount length
)
1043 // could be much better
1044 if( _task
&& (addressMap
== getMapForTask(_task
, _ranges
.v
[0].address
)) && (1 == _rangesCount
)
1045 && (logical
== _ranges
.v
[0].address
)
1046 && (length
<= _ranges
.v
[0].length
) )
1047 return( kIOReturnSuccess
);
1049 return( super::doUnmap( addressMap
, logical
, length
));
1052 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1055 // osfmk/device/iokit_rpc.c
1056 extern kern_return_t
IOMapPages( vm_map_t map
, vm_offset_t va
, vm_offset_t pa
,
1057 vm_size_t length
, unsigned int mapFlags
);
1058 extern kern_return_t
IOUnmapPages(vm_map_t map
, vm_offset_t va
, vm_size_t length
);
1061 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1063 static IORecursiveLock
* gIOMemoryLock
;
1065 #define LOCK IORecursiveLockLock( gIOMemoryLock)
1066 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
1068 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1070 OSDefineMetaClass( IOMemoryMap
, OSObject
)
1071 OSDefineAbstractStructors( IOMemoryMap
, OSObject
)
1073 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1075 class _IOMemoryMap
: public IOMemoryMap
1077 OSDeclareDefaultStructors(_IOMemoryMap
)
1079 IOMemoryDescriptor
* memory
;
1080 IOMemoryMap
* superMap
;
1083 IOVirtualAddress logical
;
1085 vm_map_t addressMap
;
1086 IOOptionBits options
;
1089 virtual void free();
1091 // IOMemoryMap methods
1092 virtual IOVirtualAddress
getVirtualAddress();
1093 virtual IOByteCount
getLength();
1094 virtual task_t
getAddressTask();
1095 virtual IOMemoryDescriptor
* getMemoryDescriptor();
1096 virtual IOOptionBits
getMapOptions();
1098 virtual IOReturn
unmap();
1099 virtual void taskDied();
1101 virtual IOPhysicalAddress
getPhysicalSegment(IOByteCount offset
,
1102 IOByteCount
* length
);
1104 // for IOMemoryDescriptor use
1105 _IOMemoryMap
* isCompatible(
1106 IOMemoryDescriptor
* owner
,
1108 IOVirtualAddress toAddress
,
1109 IOOptionBits options
,
1111 IOByteCount length
);
1114 IOMemoryDescriptor
* memory
,
1115 IOMemoryMap
* superMap
,
1117 IOByteCount length
);
1120 IOMemoryDescriptor
* memory
,
1122 IOVirtualAddress toAddress
,
1123 IOOptionBits options
,
1125 IOByteCount length
);
1128 task_t intoTask
, bool redirect
);
1131 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1134 #define super IOMemoryMap
1136 OSDefineMetaClassAndStructors(_IOMemoryMap
, IOMemoryMap
)
1138 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1140 bool _IOMemoryMap::init(
1141 IOMemoryDescriptor
* _memory
,
1142 IOMemoryMap
* _superMap
,
1143 IOByteCount _offset
,
1144 IOByteCount _length
)
1150 if( (_offset
+ _length
) > _superMap
->getLength())
1155 _superMap
->retain();
1156 superMap
= _superMap
;
1162 length
= _memory
->getLength();
1164 options
= superMap
->getMapOptions();
1165 logical
= superMap
->getVirtualAddress() + offset
;
1170 bool _IOMemoryMap::init(
1171 IOMemoryDescriptor
* _memory
,
1173 IOVirtualAddress toAddress
,
1174 IOOptionBits _options
,
1175 IOByteCount _offset
,
1176 IOByteCount _length
)
1180 if( (!_memory
) || (!intoTask
) || !super::init())
1183 if( (_offset
+ _length
) > _memory
->getLength())
1186 addressMap
= get_task_map(intoTask
);
1189 kernel_vm_map_reference(addressMap
);
1198 length
= _memory
->getLength();
1200 addressTask
= intoTask
;
1201 logical
= toAddress
;
1204 if( options
& kIOMapStatic
)
1207 ok
= (kIOReturnSuccess
== memory
->doMap( addressMap
, &logical
,
1208 options
, offset
, length
));
1213 vm_map_deallocate(addressMap
);
1219 IOReturn
IOMemoryDescriptor::doMap(
1220 vm_map_t addressMap
,
1221 IOVirtualAddress
* atAddress
,
1222 IOOptionBits options
,
1223 IOByteCount sourceOffset
= 0,
1224 IOByteCount length
= 0 )
1226 IOReturn err
= kIOReturnSuccess
;
1230 vm_address_t logical
;
1231 IOByteCount pageOffset
;
1232 IOPhysicalLength segLen
;
1233 IOPhysicalAddress physAddr
;
1236 length
= getLength();
1238 physAddr
= getPhysicalSegment( sourceOffset
, &segLen
);
1241 pageOffset
= physAddr
- trunc_page( physAddr
);
1242 ourSize
= length
+ pageOffset
;
1243 physAddr
-= pageOffset
;
1245 logical
= *atAddress
;
1246 if( 0 == (options
& kIOMapAnywhere
)) {
1247 mapped
= trunc_page( logical
);
1248 if( (logical
- mapped
) != pageOffset
)
1249 err
= kIOReturnVMError
;
1251 if( kIOReturnSuccess
== err
)
1252 err
= vm_allocate( addressMap
, &mapped
, ourSize
,
1253 ((options
& kIOMapAnywhere
) ? VM_FLAGS_ANYWHERE
: VM_FLAGS_FIXED
)
1254 | VM_MAKE_TAG(VM_MEMORY_IOKIT
) );
1258 kprintf("IOMemoryDescriptor::doMap: vm_allocate() "
1259 "returned %08x\n", err
);
1264 // we have to make sure that these guys don't get copied if we fork.
1265 err
= vm_inherit( addressMap
, mapped
, ourSize
, VM_INHERIT_NONE
);
1266 if( err
!= KERN_SUCCESS
) {
1267 doUnmap( addressMap
, mapped
, ourSize
); // back out
1272 *atAddress
= mapped
+ pageOffset
;
1274 segLen
+= pageOffset
;
1277 // in the middle of the loop only map whole pages
1278 if( segLen
>= bytes
)
1280 else if( segLen
!= trunc_page( segLen
))
1281 err
= kIOReturnVMError
;
1282 if( physAddr
!= trunc_page( physAddr
))
1283 err
= kIOReturnBadArgument
;
1286 if( kIOLogMapping
& gIOKitDebug
)
1287 kprintf("_IOMemoryMap::map(%x) %08x->%08x:%08x\n",
1288 addressMap
, mapped
+ pageOffset
, physAddr
+ pageOffset
,
1289 segLen
- pageOffset
);
1292 if( kIOReturnSuccess
== err
)
1293 err
= IOMapPages( addressMap
, mapped
, physAddr
, segLen
, options
);
1297 sourceOffset
+= segLen
- pageOffset
;
1303 && (physAddr
= getPhysicalSegment( sourceOffset
, &segLen
)));
1306 err
= kIOReturnBadArgument
;
1308 doUnmap( addressMap
, logical
, ourSize
);
1315 IOReturn
IOMemoryDescriptor::doUnmap(
1316 vm_map_t addressMap
,
1317 IOVirtualAddress logical
,
1318 IOByteCount length
)
1323 if( kIOLogMapping
& gIOKitDebug
)
1324 kprintf("IOMemoryDescriptor::doUnmap(%x) %08x:%08x\n",
1325 addressMap
, logical
, length
);
1328 if( (addressMap
== kernel_map
) || (addressMap
== get_task_map(current_task())))
1329 err
= vm_deallocate( addressMap
, logical
, length
);
1331 err
= kIOReturnSuccess
;
1336 IOReturn
IOMemoryDescriptor::redirect( task_t safeTask
, bool redirect
)
1339 _IOMemoryMap
* mapping
= 0;
1345 if( (iter
= OSCollectionIterator::withCollection( _mappings
))) {
1346 while( (mapping
= (_IOMemoryMap
*) iter
->getNextObject()))
1347 mapping
->redirect( safeTask
, redirect
);
1355 // temporary binary compatibility
1356 IOSubMemoryDescriptor
* subMem
;
1357 if( (subMem
= OSDynamicCast( IOSubMemoryDescriptor
, this)))
1358 err
= subMem
->redirect( safeTask
, redirect
);
1360 err
= kIOReturnSuccess
;
1365 IOReturn
IOSubMemoryDescriptor::redirect( task_t safeTask
, bool redirect
)
1367 // temporary binary compatibility IOMemoryDescriptor::redirect( safeTask, redirect );
1368 return( _parent
->redirect( safeTask
, redirect
));
1371 IOReturn
_IOMemoryMap::redirect( task_t safeTask
, bool redirect
)
1373 IOReturn err
= kIOReturnSuccess
;
1376 // err = ((_IOMemoryMap *)superMap)->redirect( safeTask, redirect );
1380 if( logical
&& addressMap
1381 && (get_task_map( safeTask
) != addressMap
)
1382 && (0 == (options
& kIOMapStatic
))) {
1384 IOUnmapPages( addressMap
, logical
, length
);
1386 err
= vm_deallocate( addressMap
, logical
, length
);
1387 err
= memory
->doMap( addressMap
, &logical
,
1388 (options
& ~kIOMapAnywhere
) /*| kIOMapReserve*/ );
1390 err
= kIOReturnSuccess
;
1392 IOLog("IOMemoryMap::redirect(%d, %x) %x from %lx\n", redirect
, err
, logical
, addressMap
);
1401 IOReturn
_IOMemoryMap::unmap( void )
1407 if( logical
&& addressMap
&& (0 == superMap
)
1408 && (0 == (options
& kIOMapStatic
))) {
1410 err
= memory
->doUnmap( addressMap
, logical
, length
);
1411 vm_map_deallocate(addressMap
);
1415 err
= kIOReturnSuccess
;
1424 void _IOMemoryMap::taskDied( void )
1428 vm_map_deallocate(addressMap
);
1436 void _IOMemoryMap::free()
1442 memory
->removeMapping( this);
1448 superMap
->release();
1453 IOByteCount
_IOMemoryMap::getLength()
1458 IOVirtualAddress
_IOMemoryMap::getVirtualAddress()
1463 task_t
_IOMemoryMap::getAddressTask()
1466 return( superMap
->getAddressTask());
1468 return( addressTask
);
1471 IOOptionBits
_IOMemoryMap::getMapOptions()
1476 IOMemoryDescriptor
* _IOMemoryMap::getMemoryDescriptor()
1481 _IOMemoryMap
* _IOMemoryMap::isCompatible(
1482 IOMemoryDescriptor
* owner
,
1484 IOVirtualAddress toAddress
,
1485 IOOptionBits _options
,
1486 IOByteCount _offset
,
1487 IOByteCount _length
)
1489 _IOMemoryMap
* mapping
;
1491 if( (!task
) || (task
!= getAddressTask()))
1493 if( (options
^ _options
) & (kIOMapCacheMask
| kIOMapReadOnly
))
1496 if( (0 == (_options
& kIOMapAnywhere
)) && (logical
!= toAddress
))
1499 if( _offset
< offset
)
1504 if( (_offset
+ _length
) > length
)
1507 if( (length
== _length
) && (!_offset
)) {
1512 mapping
= new _IOMemoryMap
;
1514 && !mapping
->init( owner
, this, _offset
, _length
)) {
1523 IOPhysicalAddress
_IOMemoryMap::getPhysicalSegment( IOByteCount _offset
,
1524 IOPhysicalLength
* length
)
1526 IOPhysicalAddress address
;
1529 address
= memory
->getPhysicalSegment( offset
+ _offset
, length
);
1535 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1538 #define super OSObject
1540 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1542 void IOMemoryDescriptor::initialize( void )
1544 if( 0 == gIOMemoryLock
)
1545 gIOMemoryLock
= IORecursiveLockAlloc();
1548 void IOMemoryDescriptor::free( void )
1551 _mappings
->release();
1556 IOMemoryMap
* IOMemoryDescriptor::setMapping(
1558 IOVirtualAddress mapAddress
,
1559 IOOptionBits options
= 0 )
1563 map
= new _IOMemoryMap
;
1568 && !map
->init( this, intoTask
, mapAddress
,
1569 options
| kIOMapStatic
, 0, getLength() )) {
1581 IOMemoryMap
* IOMemoryDescriptor::map(
1582 IOOptionBits options
= 0 )
1585 return( makeMapping( this, kernel_task
, 0,
1586 options
| kIOMapAnywhere
,
1590 IOMemoryMap
* IOMemoryDescriptor::map(
1592 IOVirtualAddress toAddress
,
1593 IOOptionBits options
,
1594 IOByteCount offset
= 0,
1595 IOByteCount length
= 0 )
1598 length
= getLength();
1600 return( makeMapping( this, intoTask
, toAddress
, options
, offset
, length
));
1603 IOMemoryMap
* IOMemoryDescriptor::makeMapping(
1604 IOMemoryDescriptor
* owner
,
1606 IOVirtualAddress toAddress
,
1607 IOOptionBits options
,
1609 IOByteCount length
)
1611 _IOMemoryMap
* mapping
= 0;
1617 // look for an existing mapping
1618 if( (iter
= OSCollectionIterator::withCollection( _mappings
))) {
1620 while( (mapping
= (_IOMemoryMap
*) iter
->getNextObject())) {
1622 if( (mapping
= mapping
->isCompatible(
1623 owner
, intoTask
, toAddress
,
1624 options
| kIOMapReference
,
1634 if( mapping
|| (options
& kIOMapReference
))
1639 mapping
= new _IOMemoryMap
;
1641 && !mapping
->init( owner
, intoTask
, toAddress
, options
,
1644 IOLog("Didn't make map %08lx : %08lx\n", offset
, length
);
1651 owner
->addMapping( mapping
);
1658 void IOMemoryDescriptor::addMapping(
1659 IOMemoryMap
* mapping
)
1663 _mappings
= OSSet::withCapacity(1);
1664 if( _mappings
&& _mappings
->setObject( mapping
))
1665 mapping
->release(); /* really */
1669 void IOMemoryDescriptor::removeMapping(
1670 IOMemoryMap
* mapping
)
1675 _mappings
->removeObject( mapping
);
1679 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1682 #define super IOMemoryDescriptor
1684 OSDefineMetaClassAndStructors(IOSubMemoryDescriptor
, IOMemoryDescriptor
)
1686 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1688 bool IOSubMemoryDescriptor::initSubRange( IOMemoryDescriptor
* parent
,
1689 IOByteCount offset
, IOByteCount length
,
1690 IODirection withDirection
)
1698 if( (offset
+ length
) > parent
->getLength())
1705 _direction
= withDirection
;
1706 _tag
= parent
->getTag();
1711 void IOSubMemoryDescriptor::free( void )
1720 IOPhysicalAddress
IOSubMemoryDescriptor::getPhysicalSegment( IOByteCount offset
,
1721 IOByteCount
* length
)
1723 IOPhysicalAddress address
;
1724 IOByteCount actualLength
;
1726 assert(offset
<= _length
);
1731 if( offset
>= _length
)
1734 address
= _parent
->getPhysicalSegment( offset
+ _start
, &actualLength
);
1736 if( address
&& length
)
1737 *length
= min( _length
- offset
, actualLength
);
1742 void * IOSubMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
1743 IOByteCount
* lengthOfSegment
)
1748 IOByteCount
IOSubMemoryDescriptor::readBytes(IOByteCount offset
,
1749 void * bytes
, IOByteCount withLength
)
1751 IOByteCount byteCount
;
1753 assert(offset
<= _length
);
1755 if( offset
>= _length
)
1759 byteCount
= _parent
->readBytes( _start
+ offset
, bytes
,
1760 min(withLength
, _length
- offset
) );
1763 return( byteCount
);
1766 IOByteCount
IOSubMemoryDescriptor::writeBytes(IOByteCount offset
,
1767 const void* bytes
, IOByteCount withLength
)
1769 IOByteCount byteCount
;
1771 assert(offset
<= _length
);
1773 if( offset
>= _length
)
1777 byteCount
= _parent
->writeBytes( _start
+ offset
, bytes
,
1778 min(withLength
, _length
- offset
) );
1781 return( byteCount
);
1784 IOReturn
IOSubMemoryDescriptor::prepare(
1785 IODirection forDirection
= kIODirectionNone
)
1790 err
= _parent
->prepare( forDirection
);
1796 IOReturn
IOSubMemoryDescriptor::complete(
1797 IODirection forDirection
= kIODirectionNone
)
1802 err
= _parent
->complete( forDirection
);
1808 IOMemoryMap
* IOSubMemoryDescriptor::makeMapping(
1809 IOMemoryDescriptor
* owner
,
1811 IOVirtualAddress toAddress
,
1812 IOOptionBits options
,
1814 IOByteCount length
)
1816 IOMemoryMap
* mapping
;
1818 mapping
= (IOMemoryMap
*) _parent
->makeMapping(
1820 toAddress
- (_start
+ offset
),
1821 options
| kIOMapReference
,
1822 _start
+ offset
, length
);
1825 mapping
= super::makeMapping( owner
, intoTask
, toAddress
, options
,
1834 IOSubMemoryDescriptor::initWithAddress(void * address
,
1835 IOByteCount withLength
,
1836 IODirection withDirection
)
1842 IOSubMemoryDescriptor::initWithAddress(vm_address_t address
,
1843 IOByteCount withLength
,
1844 IODirection withDirection
,
1851 IOSubMemoryDescriptor::initWithPhysicalAddress(
1852 IOPhysicalAddress address
,
1853 IOByteCount withLength
,
1854 IODirection withDirection
)
1860 IOSubMemoryDescriptor::initWithRanges(
1861 IOVirtualRange
* ranges
,
1863 IODirection withDirection
,
1865 bool asReference
= false)
1871 IOSubMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange
* ranges
,
1873 IODirection withDirection
,
1874 bool asReference
= false)
1879 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1881 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 0);
1882 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 1);
1883 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 2);
1884 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 3);
1885 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 4);
1886 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 5);
1887 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 6);
1888 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 7);
1889 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 8);
1890 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 9);
1891 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 10);
1892 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 11);
1893 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 12);
1894 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 13);
1895 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 14);
1896 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 15);