2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
28 // 45678901234567890123456789012345678901234567890123456789012345678901234567890
29 #include <sys/cdefs.h>
31 #include <IOKit/assert.h>
32 #include <IOKit/system.h>
33 #include <IOKit/IOLib.h>
34 #include <IOKit/IOMemoryDescriptor.h>
35 #include <IOKit/IOMapper.h>
36 #include <IOKit/IOKitKeysPrivate.h>
38 #include <IOKit/IOKitDebug.h>
40 #include <libkern/c++/OSContainers.h>
41 #include <libkern/c++/OSDictionary.h>
42 #include <libkern/c++/OSArray.h>
43 #include <libkern/c++/OSSymbol.h>
44 #include <libkern/c++/OSNumber.h>
45 #include <sys/cdefs.h>
49 #include <mach/memory_object_types.h>
50 #include <device/device_port.h>
53 struct phys_entry
*pmap_find_physentry(ppnum_t pa
);
55 extern ppnum_t
pmap_find_phys(pmap_t pmap
, addr64_t va
);
56 void ipc_port_release_send(ipc_port_t port
);
58 /* Copy between a physical page and a virtual address in the given vm_map */
59 kern_return_t
copypv(addr64_t source
, addr64_t sink
, unsigned int size
, int which
);
63 memory_object_t pager
,
68 device_pager_deallocate(
71 device_pager_populate_object(
72 memory_object_t pager
,
73 vm_object_offset_t offset
,
77 memory_object_iopl_request(
79 memory_object_offset_t offset
,
82 upl_page_info_array_t user_page_list
,
83 unsigned int *page_list_count
,
87 * Page fault handling based on vm_map (or entries therein)
89 extern kern_return_t
vm_fault(
93 boolean_t change_wiring
,
96 vm_offset_t caller_pmap_addr
);
98 unsigned int IOTranslateCacheBits(struct phys_entry
*pp
);
100 vm_map_t
IOPageableMapForAddress( vm_address_t address
);
102 typedef kern_return_t (*IOIteratePageableMapsCallback
)(vm_map_t map
, void * ref
);
104 kern_return_t
IOIteratePageableMaps(vm_size_t size
,
105 IOIteratePageableMapsCallback callback
, void * ref
);
108 #define kIOMaximumMappedIOByteCount (512*1024*1024)
110 static IOMapper
* gIOSystemMapper
;
111 static ppnum_t gIOMaximumMappedIOPageCount
= atop_32(kIOMaximumMappedIOByteCount
);
113 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
115 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor
, OSObject
)
117 #define super IOMemoryDescriptor
119 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor
, IOMemoryDescriptor
)
121 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
123 static IORecursiveLock
* gIOMemoryLock
;
125 #define LOCK IORecursiveLockLock( gIOMemoryLock)
126 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
127 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
129 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
131 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
133 #define next_page(a) ( trunc_page_32(a) + PAGE_SIZE )
138 kern_return_t
device_data_action(
140 ipc_port_t device_pager
,
141 vm_prot_t protection
,
142 vm_object_offset_t offset
,
145 struct ExpansionData
{
147 unsigned int pagerContig
:1;
148 unsigned int unused
:31;
149 IOMemoryDescriptor
* memory
;
152 ExpansionData
* ref
= (ExpansionData
*) device_handle
;
153 IOMemoryDescriptor
* memDesc
;
156 memDesc
= ref
->memory
;
158 kr
= memDesc
->handleFault( device_pager
, 0, 0,
159 offset
, size
, kIOMapDefaultCache
/*?*/);
167 kern_return_t
device_close(
170 struct ExpansionData
{
172 unsigned int pagerContig
:1;
173 unsigned int unused
:31;
174 IOMemoryDescriptor
* memory
;
176 ExpansionData
* ref
= (ExpansionData
*) device_handle
;
178 IODelete( ref
, ExpansionData
, 1 );
180 return( kIOReturnSuccess
);
185 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
190 * Create a new IOMemoryDescriptor. The buffer is a virtual address
191 * relative to the specified task. If no task is supplied, the kernel
195 IOMemoryDescriptor::withAddress(void * address
,
197 IODirection direction
)
199 return IOMemoryDescriptor::
200 withAddress((vm_address_t
) address
, length
, direction
, kernel_task
);
204 IOMemoryDescriptor::withAddress(vm_address_t address
,
206 IODirection direction
,
209 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
212 if (that
->initWithAddress(address
, length
, direction
, task
))
221 IOMemoryDescriptor::withPhysicalAddress(
222 IOPhysicalAddress address
,
224 IODirection direction
)
226 IOGeneralMemoryDescriptor
*self
= new IOGeneralMemoryDescriptor
;
228 && !self
->initWithPhysicalAddress(address
, length
, direction
)) {
237 IOMemoryDescriptor::withRanges( IOVirtualRange
* ranges
,
239 IODirection direction
,
243 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
246 if (that
->initWithRanges(ranges
, withCount
, direction
, task
, asReference
))
258 * Create a new IOMemoryDescriptor. The buffer is made up of several
259 * virtual address ranges, from a given task.
261 * Passing the ranges as a reference will avoid an extra allocation.
264 IOMemoryDescriptor::withOptions(void * buffers
,
271 IOGeneralMemoryDescriptor
*self
= new IOGeneralMemoryDescriptor
;
274 && !self
->initWithOptions(buffers
, count
, offset
, task
, opts
, mapper
))
283 // Can't leave abstract but this should never be used directly,
284 bool IOMemoryDescriptor::initWithOptions(void * buffers
,
288 IOOptionBits options
,
291 // @@@ gvdl: Should I panic?
292 panic("IOMD::initWithOptions called\n");
297 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange
* ranges
,
299 IODirection direction
,
302 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
305 if (that
->initWithPhysicalRanges(ranges
, withCount
, direction
, asReference
))
314 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor
* of
,
317 IODirection direction
)
319 IOSubMemoryDescriptor
*self
= new IOSubMemoryDescriptor
;
321 if (self
&& !self
->initSubRange(of
, offset
, length
, direction
)) {
331 * Initialize an IOMemoryDescriptor. The buffer is a virtual address
332 * relative to the specified task. If no task is supplied, the kernel
335 * An IOMemoryDescriptor can be re-used by calling initWithAddress or
336 * initWithRanges again on an existing instance -- note this behavior
337 * is not commonly supported in other I/O Kit classes, although it is
341 IOGeneralMemoryDescriptor::initWithAddress(void * address
,
342 IOByteCount withLength
,
343 IODirection withDirection
)
345 _singleRange
.v
.address
= (vm_address_t
) address
;
346 _singleRange
.v
.length
= withLength
;
348 return initWithRanges(&_singleRange
.v
, 1, withDirection
, kernel_task
, true);
352 IOGeneralMemoryDescriptor::initWithAddress(vm_address_t address
,
353 IOByteCount withLength
,
354 IODirection withDirection
,
357 _singleRange
.v
.address
= address
;
358 _singleRange
.v
.length
= withLength
;
360 return initWithRanges(&_singleRange
.v
, 1, withDirection
, withTask
, true);
364 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
365 IOPhysicalAddress address
,
366 IOByteCount withLength
,
367 IODirection withDirection
)
369 _singleRange
.p
.address
= address
;
370 _singleRange
.p
.length
= withLength
;
372 return initWithPhysicalRanges( &_singleRange
.p
, 1, withDirection
, true);
376 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
377 IOPhysicalRange
* ranges
,
379 IODirection direction
,
382 IOOptionBits mdOpts
= direction
| kIOMemoryTypePhysical
;
385 mdOpts
|= kIOMemoryAsReference
;
387 return initWithOptions(ranges
, count
, 0, 0, mdOpts
, /* mapper */ 0);
391 IOGeneralMemoryDescriptor::initWithRanges(
392 IOVirtualRange
* ranges
,
394 IODirection direction
,
398 IOOptionBits mdOpts
= direction
;
401 mdOpts
|= kIOMemoryAsReference
;
404 mdOpts
|= kIOMemoryTypeVirtual
;
405 if (task
== kernel_task
)
406 mdOpts
|= kIOMemoryAutoPrepare
;
409 mdOpts
|= kIOMemoryTypePhysical
;
411 // @@@ gvdl: Need to remove this
412 // Auto-prepare if this is a kernel memory descriptor as very few
413 // clients bother to prepare() kernel memory.
414 // But it has been enforced so what are you going to do?
416 return initWithOptions(ranges
, count
, 0, task
, mdOpts
, /* mapper */ 0);
422 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
423 * from a given task or several physical ranges or finally an UPL from the ubc
426 * Passing the ranges as a reference will avoid an extra allocation.
428 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
429 * existing instance -- note this behavior is not commonly supported in other
430 * I/O Kit classes, although it is supported here.
433 enum ioPLBlockFlags
{
434 kIOPLOnDevice
= 0x00000001,
435 kIOPLExternUPL
= 0x00000002,
440 vm_address_t fIOMDOffset
; // The offset of this iopl in descriptor
441 vm_offset_t fPageInfo
; // Pointer to page list or index into it
442 ppnum_t fMappedBase
; // Page number of first page in this iopl
443 unsigned int fPageOffset
; // Offset within first page of iopl
444 unsigned int fFlags
; // Flags
449 unsigned int fPageCnt
;
450 upl_page_info_t fPageList
[0]; // @@@ gvdl need to get rid of this
451 // should be able to use upl directly
452 ioPLBlock fBlocks
[0];
455 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
456 #define getIOPLList(d) ((ioPLBlock *) &(d->fPageList[d->fPageCnt]))
457 #define getNumIOPL(d,len) \
458 ((len - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
459 #define getPageList(d) (&(d->fPageList[0]))
460 #define computeDataSize(p, u) \
461 (sizeof(ioGMDData) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
464 IOGeneralMemoryDescriptor::initWithOptions(void * buffers
,
468 IOOptionBits options
,
472 switch (options
& kIOMemoryTypeMask
) {
473 case kIOMemoryTypeVirtual
:
480 case kIOMemoryTypePhysical
: // Neither Physical nor UPL should have a task
481 mapper
= kIOMapperNone
;
482 case kIOMemoryTypeUPL
:
486 panic("IOGMD::iWO(): bad type"); // @@@ gvdl: for testing
487 return false; /* bad argument */
494 * We can check the _initialized instance variable before having ever set
495 * it to an initial value because I/O Kit guarantees that all our instance
496 * variables are zeroed on an object's allocation.
501 * An existing memory descriptor is being retargeted to point to
502 * somewhere else. Clean up our present state.
509 if (_ranges
.v
&& _rangesIsAllocated
)
510 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
518 // Grab the appropriate mapper
519 if (mapper
== kIOMapperNone
)
520 mapper
= 0; // No Mapper
522 IOMapper::checkForSystemMapper();
523 gIOSystemMapper
= mapper
= IOMapper::gSystem
;
529 // DEPRECATED variable initialisation
530 _direction
= (IODirection
) (_flags
& kIOMemoryDirectionMask
);
533 _cachedPhysicalAddress
= 0;
534 _cachedVirtualAddress
= 0;
536 if ( (options
& kIOMemoryTypeMask
) == kIOMemoryTypeUPL
) {
539 unsigned int dataSize
= computeDataSize(/* pages */ 0, /* upls */ 1);
541 if (!_memoryEntries
) {
542 _memoryEntries
= OSData::withCapacity(dataSize
);
546 else if (!_memoryEntries
->initWithCapacity(dataSize
))
549 _memoryEntries
->appendBytes(0, sizeof(ioGMDData
));
550 dataP
= getDataP(_memoryEntries
);
551 dataP
->fMapper
= mapper
;
554 _wireCount
++; // UPLs start out life wired
557 _pages
+= atop_32(offset
+ count
+ PAGE_MASK
) - atop_32(offset
);
560 upl_page_info_t
*pageList
= UPL_GET_INTERNAL_PAGE_LIST((upl_t
) buffers
);
562 iopl
.fIOPL
= (upl_t
) buffers
;
563 // Set the flag kIOPLOnDevice convieniently equal to 1
564 iopl
.fFlags
= pageList
->device
| kIOPLExternUPL
;
565 iopl
.fIOMDOffset
= 0;
566 if (!pageList
->device
) {
567 // @@@ gvdl: Ask JoeS are the pages contiguious with the list?
568 // or there a chance that we may be inserting 0 phys_addrs?
569 // Pre-compute the offset into the UPL's page list
570 pageList
= &pageList
[atop_32(offset
)];
573 iopl
.fMappedBase
= mapper
->iovmAlloc(_pages
);
574 mapper
->iovmInsert(iopl
.fMappedBase
, 0, pageList
, _pages
);
577 iopl
.fMappedBase
= 0;
580 iopl
.fMappedBase
= 0;
581 iopl
.fPageInfo
= (vm_address_t
) pageList
;
582 iopl
.fPageOffset
= offset
;
584 _memoryEntries
->appendBytes(&iopl
, sizeof(iopl
));
586 else { /* kIOMemoryTypeVirtual | kIOMemoryTypePhysical */
587 IOVirtualRange
*ranges
= (IOVirtualRange
*) buffers
;
590 * Initialize the memory descriptor.
595 for (unsigned ind
= 0; ind
< count
; ind
++) {
596 IOVirtualRange cur
= ranges
[ind
];
598 _length
+= cur
.length
;
599 _pages
+= atop_32(cur
.address
+ cur
.length
+ PAGE_MASK
)
600 - atop_32(cur
.address
);
604 _rangesIsAllocated
= !(options
& kIOMemoryAsReference
);
605 _rangesCount
= count
;
607 if (options
& kIOMemoryAsReference
)
610 _ranges
.v
= IONew(IOVirtualRange
, count
);
613 bcopy(/* from */ ranges
, _ranges
.v
,
614 count
* sizeof(IOVirtualRange
));
617 // Auto-prepare memory at creation time.
618 // Implied completion when descriptor is free-ed
619 if ( (options
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
620 _wireCount
++; // Physical MDs are start out wired
621 else { /* kIOMemoryTypeVirtual */
623 unsigned int dataSize
=
624 computeDataSize(_pages
, /* upls */ _rangesCount
* 2);
626 if (!_memoryEntries
) {
627 _memoryEntries
= OSData::withCapacity(dataSize
);
631 else if (!_memoryEntries
->initWithCapacity(dataSize
))
634 _memoryEntries
->appendBytes(0, sizeof(ioGMDData
));
635 dataP
= getDataP(_memoryEntries
);
636 dataP
->fMapper
= mapper
;
637 dataP
->fPageCnt
= _pages
;
639 if (kIOMemoryPersistent
& _flags
)
642 ipc_port_t sharedMem
;
644 vm_size_t size
= _pages
<< PAGE_SHIFT
;
645 vm_address_t startPage
;
647 startPage
= trunc_page_32(_ranges
.v
[0].address
);
649 vm_map_t theMap
= ((_task
== kernel_task
) && (kIOMemoryBufferPageable
& _flags
))
650 ? IOPageableMapForAddress(startPage
)
651 : get_task_map(_task
);
653 vm_size_t actualSize
= size
;
654 error
= mach_make_memory_entry( theMap
,
655 &actualSize
, startPage
,
656 VM_PROT_READ
| VM_PROT_WRITE
, &sharedMem
,
659 if (KERN_SUCCESS
== error
) {
660 if (actualSize
== round_page_32(size
)) {
661 _memEntry
= (void *) sharedMem
;
664 IOLog("mach_make_memory_entry_64 (%08x) size (%08lx:%08x)\n",
665 startPage
, (UInt32
)actualSize
, size
);
667 ipc_port_release_send( sharedMem
);
672 if ((_flags
& kIOMemoryAutoPrepare
)
673 && prepare() != kIOReturnSuccess
)
686 void IOGeneralMemoryDescriptor::free()
690 reserved
->memory
= 0;
696 _memoryEntries
->release();
700 if (_ranges
.v
&& _rangesIsAllocated
)
701 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
703 if (reserved
&& reserved
->devicePager
)
704 device_pager_deallocate( (memory_object_t
) reserved
->devicePager
);
706 // memEntry holds a ref on the device pager which owns reserved
707 // (ExpansionData) so no reserved access after this point
709 ipc_port_release_send( (ipc_port_t
) _memEntry
);
714 /* DEPRECATED */ void IOGeneralMemoryDescriptor::unmapFromKernel()
716 panic("IOGMD::unmapFromKernel deprecated");
719 /* DEPRECATED */ void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex
)
721 panic("IOGMD::mapIntoKernel deprecated");
727 * Get the direction of the transfer.
729 IODirection
IOMemoryDescriptor::getDirection() const
737 * Get the length of the transfer (over all ranges).
739 IOByteCount
IOMemoryDescriptor::getLength() const
744 void IOMemoryDescriptor::setTag( IOOptionBits tag
)
749 IOOptionBits
IOMemoryDescriptor::getTag( void )
754 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
755 IOPhysicalAddress
IOMemoryDescriptor::getSourceSegment( IOByteCount offset
,
756 IOByteCount
* length
)
758 IOPhysicalAddress physAddr
= 0;
760 if( prepare() == kIOReturnSuccess
) {
761 physAddr
= getPhysicalSegment( offset
, length
);
768 IOByteCount
IOMemoryDescriptor::readBytes
769 (IOByteCount offset
, void *bytes
, IOByteCount length
)
771 addr64_t dstAddr
= (addr64_t
) (UInt32
) bytes
;
772 IOByteCount remaining
;
774 // Assert that this entire I/O is withing the available range
775 assert(offset
< _length
);
776 assert(offset
+ length
<= _length
);
777 if (offset
>= _length
) {
778 IOLog("IOGMD(%p): rB = o%lx, l%lx\n", this, offset
, length
); // @@@ gvdl
782 remaining
= length
= min(length
, _length
- offset
);
783 while (remaining
) { // (process another target segment?)
787 srcAddr64
= getPhysicalSegment64(offset
, &srcLen
);
791 // Clip segment length to remaining
792 if (srcLen
> remaining
)
795 copypv(srcAddr64
, dstAddr
, srcLen
,
796 cppvPsrc
| cppvNoRefSrc
| cppvFsnk
| cppvKmap
);
805 return length
- remaining
;
808 IOByteCount
IOMemoryDescriptor::writeBytes
809 (IOByteCount offset
, const void *bytes
, IOByteCount length
)
811 addr64_t srcAddr
= (addr64_t
) (UInt32
) bytes
;
812 IOByteCount remaining
;
814 // Assert that this entire I/O is withing the available range
815 assert(offset
< _length
);
816 assert(offset
+ length
<= _length
);
818 assert( !(kIOMemoryPreparedReadOnly
& _flags
) );
820 if ( (kIOMemoryPreparedReadOnly
& _flags
) || offset
>= _length
) {
821 IOLog("IOGMD(%p): wB = o%lx, l%lx\n", this, offset
, length
); // @@@ gvdl
825 remaining
= length
= min(length
, _length
- offset
);
826 while (remaining
) { // (process another target segment?)
830 dstAddr64
= getPhysicalSegment64(offset
, &dstLen
);
834 // Clip segment length to remaining
835 if (dstLen
> remaining
)
838 copypv(srcAddr
, (addr64_t
) dstAddr64
, dstLen
,
839 cppvPsnk
| cppvFsnk
| cppvNoRefSrc
| cppvNoModSnk
| cppvKmap
);
848 return length
- remaining
;
851 // osfmk/device/iokit_rpc.c
852 extern "C" unsigned int IODefaultCacheBits(addr64_t pa
);
854 /* DEPRECATED */ void IOGeneralMemoryDescriptor::setPosition(IOByteCount position
)
856 panic("IOGMD::setPosition deprecated");
859 IOPhysicalAddress
IOGeneralMemoryDescriptor::getPhysicalSegment
860 (IOByteCount offset
, IOByteCount
*lengthOfSegment
)
862 IOPhysicalAddress address
= 0;
863 IOPhysicalLength length
= 0;
865 // assert(offset <= _length);
866 if (offset
< _length
) // (within bounds?)
868 if ( (_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
) {
871 // Physical address based memory descriptor
873 // Find offset within descriptor and make it relative
874 // to the current _range.
875 for (ind
= 0 ; offset
>= _ranges
.p
[ind
].length
; ind
++ )
876 offset
-= _ranges
.p
[ind
].length
;
878 IOPhysicalRange cur
= _ranges
.p
[ind
];
879 address
= cur
.address
+ offset
;
880 length
= cur
.length
- offset
;
882 // see how far we can coalesce ranges
883 for (++ind
; ind
< _rangesCount
; ind
++) {
884 cur
= _ranges
.p
[ind
];
886 if (address
+ length
!= cur
.address
)
889 length
+= cur
.length
;
892 // @@@ gvdl: should assert(address);
893 // but can't as NVidia GeForce creates a bogus physical mem
895 assert(address
|| /*nvidia*/(!_ranges
.p
[0].address
&& 1 == _rangesCount
));
900 // We need wiring & we are wired.
905 panic("IOGMD: not wired for getPhysicalSegment()");
909 assert(_memoryEntries
);
911 ioGMDData
* dataP
= getDataP(_memoryEntries
);
912 const ioPLBlock
*ioplList
= getIOPLList(dataP
);
913 UInt ind
, numIOPLs
= getNumIOPL(dataP
, _memoryEntries
->getLength());
914 upl_page_info_t
*pageList
= getPageList(dataP
);
916 assert(numIOPLs
> 0);
918 // Scan through iopl info blocks looking for block containing offset
919 for (ind
= 1; ind
< numIOPLs
; ind
++) {
920 if (offset
< ioplList
[ind
].fIOMDOffset
)
924 // Go back to actual range as search goes past it
925 ioPLBlock ioplInfo
= ioplList
[ind
- 1];
928 length
= ioplList
[ind
].fIOMDOffset
;
931 length
-= offset
; // Remainder within iopl
933 // Subtract offset till this iopl in total list
934 offset
-= ioplInfo
.fIOMDOffset
;
936 // This is a mapped IOPL so we just need to compute an offset
937 // relative to the mapped base.
938 if (ioplInfo
.fMappedBase
) {
939 offset
+= (ioplInfo
.fPageOffset
& PAGE_MASK
);
940 address
= ptoa_32(ioplInfo
.fMappedBase
) + offset
;
944 // Currently the offset is rebased into the current iopl.
945 // Now add the iopl 1st page offset.
946 offset
+= ioplInfo
.fPageOffset
;
948 // For external UPLs the fPageInfo field points directly to
949 // the upl's upl_page_info_t array.
950 if (ioplInfo
.fFlags
& kIOPLExternUPL
)
951 pageList
= (upl_page_info_t
*) ioplInfo
.fPageInfo
;
953 pageList
= &pageList
[ioplInfo
.fPageInfo
];
955 // Check for direct device non-paged memory
956 if ( ioplInfo
.fFlags
& kIOPLOnDevice
) {
957 address
= ptoa_32(pageList
->phys_addr
) + offset
;
961 // Now we need compute the index into the pageList
962 ind
= atop_32(offset
);
965 IOPhysicalAddress pageAddr
= pageList
[ind
].phys_addr
;
966 address
= ptoa_32(pageAddr
) + offset
;
968 // Check for the remaining data in this upl being longer than the
969 // remainder on the current page. This should be checked for
971 if (length
> PAGE_SIZE
- offset
) {
972 // See if the next page is contiguous. Stop looking when we hit
973 // the end of this upl, which is indicated by the
974 // contigLength >= length.
975 IOByteCount contigLength
= PAGE_SIZE
- offset
;
977 // Look for contiguous segment
978 while (contigLength
< length
979 && ++pageAddr
== pageList
[++ind
].phys_addr
) {
980 contigLength
+= PAGE_SIZE
;
982 if (length
> contigLength
)
983 length
= contigLength
;
996 *lengthOfSegment
= length
;
1001 addr64_t
IOMemoryDescriptor::getPhysicalSegment64
1002 (IOByteCount offset
, IOByteCount
*lengthOfSegment
)
1004 IOPhysicalAddress phys32
;
1008 phys32
= getPhysicalSegment(offset
, lengthOfSegment
);
1012 if (gIOSystemMapper
)
1014 IOByteCount origLen
;
1016 phys64
= gIOSystemMapper
->mapAddr(phys32
);
1017 origLen
= *lengthOfSegment
;
1018 length
= page_size
- (phys64
& (page_size
- 1));
1019 while ((length
< origLen
)
1020 && ((phys64
+ length
) == gIOSystemMapper
->mapAddr(phys32
+ length
)))
1021 length
+= page_size
;
1022 if (length
> origLen
)
1025 *lengthOfSegment
= length
;
1028 phys64
= (addr64_t
) phys32
;
1033 // Note this function is NOT a virtual function
1034 void * IOGeneralMemoryDescriptor::getBackingID() const
1036 if (!_memEntry
) // Not created as a persistent memory descriptor
1039 vm_size_t size
= _pages
<< PAGE_SHIFT
;
1040 vm_size_t seenSize
= 0;
1041 vm_address_t basePage
= trunc_page_32(_ranges
.v
[0].address
);
1045 ((_task
== kernel_task
) && (kIOMemoryBufferPageable
& _flags
))
1046 ? IOPageableMapForAddress(basePage
)
1047 : get_task_map(_task
);
1051 vm_region_object_info_data_64_t objInfo
;
1052 vm_address_t actualPage
= basePage
;
1053 vm_size_t actualSize
;
1054 mach_msg_type_number_t objInfoSize
;
1055 kern_return_t error
;
1057 objInfoSize
= VM_REGION_OBJECT_INFO_COUNT_64
;
1058 error
= vm_region_64(theMap
,
1061 VM_REGION_OBJECT_INFO_64
,
1062 (vm_region_info_t
) &objInfo
,
1066 if (KERN_SUCCESS
!= error
|| actualSize
== 0 || actualPage
> basePage
1067 || (retObjID
&& retObjID
!= (void *) objInfo
.object_id
))
1070 actualPage
+= actualSize
; // Calculate the end address
1071 seenSize
+= actualPage
- basePage
; // Size of overlap
1072 basePage
= actualPage
; // Start here for next loop
1073 if (seenSize
>= size
)
1074 return (void *) objInfo
.object_id
;
1077 retObjID
= (void *) objInfo
.object_id
;
1082 IOPhysicalAddress
IOGeneralMemoryDescriptor::getSourceSegment
1083 (IOByteCount offset
, IOByteCount
*lengthOfSegment
)
1085 IOPhysicalAddress address
= 0;
1086 IOPhysicalLength length
= 0;
1088 assert(offset
<= _length
);
1090 if ( (_flags
& kIOMemoryTypeMask
) == kIOMemoryTypeUPL
)
1091 return super::getSourceSegment( offset
, lengthOfSegment
);
1093 if ( offset
< _length
) // (within bounds?)
1095 unsigned rangesIndex
= 0;
1097 for ( ; offset
>= _ranges
.v
[rangesIndex
].length
; rangesIndex
++ )
1099 offset
-= _ranges
.v
[rangesIndex
].length
; // (make offset relative)
1102 address
= _ranges
.v
[rangesIndex
].address
+ offset
;
1103 length
= _ranges
.v
[rangesIndex
].length
- offset
;
1105 for ( ++rangesIndex
; rangesIndex
< _rangesCount
; rangesIndex
++ )
1107 if ( address
+ length
!= _ranges
.v
[rangesIndex
].address
) break;
1109 length
+= _ranges
.v
[rangesIndex
].length
; // (coalesce ranges)
1113 if ( address
== 0 ) length
= 0;
1116 if ( lengthOfSegment
) *lengthOfSegment
= length
;
1121 /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
1122 /* DEPRECATED */ void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
1123 /* DEPRECATED */ IOByteCount
* lengthOfSegment
)
1125 if (_task
== kernel_task
)
1126 return (void *) getSourceSegment(offset
, lengthOfSegment
);
1128 panic("IOGMD::getVirtualSegment deprecated");
1132 /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
1135 extern vm_offset_t static_memory_end
;
1136 #define io_kernel_static_end static_memory_end
1138 extern vm_offset_t first_avail
;
1139 #define io_kernel_static_end first_avail
1142 static kern_return_t
1143 io_get_kernel_static_upl(
1145 vm_address_t offset
,
1146 vm_size_t
*upl_size
,
1148 upl_page_info_array_t page_list
,
1149 unsigned int *count
,
1151 int force_data_sync
)
1153 unsigned int pageCount
, page
;
1156 pageCount
= atop_32(*upl_size
);
1157 if (pageCount
> *count
)
1162 for (page
= 0; page
< pageCount
; page
++)
1164 phys
= pmap_find_phys(kernel_pmap
, ((addr64_t
)offset
) + ptoa_64(page
));
1167 page_list
[page
].phys_addr
= phys
;
1168 page_list
[page
].pageout
= 0;
1169 page_list
[page
].absent
= 0;
1170 page_list
[page
].dirty
= 0;
1171 page_list
[page
].precious
= 0;
1172 page_list
[page
].device
= 0;
1175 return ((page
>= pageCount
) ? kIOReturnSuccess
: kIOReturnVMError
);
1178 IOReturn
IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection
)
1180 IOReturn error
= kIOReturnNoMemory
;
1182 ppnum_t mapBase
= 0;
1184 ipc_port_t sharedMem
= (ipc_port_t
) _memEntry
;
1186 assert(!_wireCount
);
1188 if (_pages
>= gIOMaximumMappedIOPageCount
)
1189 return kIOReturnNoResources
;
1191 dataP
= getDataP(_memoryEntries
);
1192 mapper
= dataP
->fMapper
;
1193 if (mapper
&& _pages
)
1194 mapBase
= mapper
->iovmAlloc(_pages
);
1196 // Note that appendBytes(NULL) zeros the data up to the
1198 _memoryEntries
->appendBytes(0, dataP
->fPageCnt
* sizeof(upl_page_info_t
));
1199 dataP
= 0; // May no longer be valid so lets not get tempted.
1201 if (forDirection
== kIODirectionNone
)
1202 forDirection
= _direction
;
1204 int uplFlags
; // This Mem Desc's default flags for upl creation
1205 switch (forDirection
)
1207 case kIODirectionOut
:
1208 // Pages do not need to be marked as dirty on commit
1209 uplFlags
= UPL_COPYOUT_FROM
;
1210 _flags
|= kIOMemoryPreparedReadOnly
;
1213 case kIODirectionIn
:
1215 uplFlags
= 0; // i.e. ~UPL_COPYOUT_FROM
1218 uplFlags
|= UPL_SET_IO_WIRE
| UPL_SET_LITE
;
1221 // Check user read/write access to the data buffer.
1223 unsigned int pageIndex
= 0;
1224 IOByteCount mdOffset
= 0;
1226 if (_task
== kernel_task
&& (kIOMemoryBufferPageable
& _flags
))
1229 { curMap
= get_task_map(_task
); }
1231 for (UInt range
= 0; range
< _rangesCount
; range
++) {
1233 IOVirtualRange curRange
= _ranges
.v
[range
];
1234 vm_address_t startPage
;
1235 IOByteCount numBytes
;
1237 startPage
= trunc_page_32(curRange
.address
);
1238 iopl
.fPageOffset
= (short) curRange
.address
& PAGE_MASK
;
1240 iopl
.fMappedBase
= mapBase
+ pageIndex
;
1242 iopl
.fMappedBase
= 0;
1243 numBytes
= iopl
.fPageOffset
+ curRange
.length
;
1246 dataP
= getDataP(_memoryEntries
);
1249 : IOPageableMapForAddress(startPage
);
1250 upl_page_info_array_t pageInfo
= getPageList(dataP
);
1251 int ioplFlags
= uplFlags
;
1252 upl_page_list_ptr_t baseInfo
= &pageInfo
[pageIndex
];
1254 vm_size_t ioplSize
= round_page_32(numBytes
);
1255 unsigned int numPageInfo
= atop_32(ioplSize
);
1257 if ((theMap
== kernel_map
) && (startPage
< io_kernel_static_end
))
1259 error
= io_get_kernel_static_upl(theMap
,
1268 } else if (sharedMem
&& (kIOMemoryPersistent
& _flags
)) {
1270 error
= memory_object_iopl_request(sharedMem
,
1279 error
= vm_map_get_upl(theMap
,
1290 if (error
!= KERN_SUCCESS
)
1293 error
= kIOReturnNoMemory
;
1295 if (baseInfo
->device
) {
1297 iopl
.fFlags
= kIOPLOnDevice
;
1298 // Don't translate device memory at all
1299 if (mapper
&& mapBase
) {
1300 mapper
->iovmFree(mapBase
, _pages
);
1302 iopl
.fMappedBase
= 0;
1308 mapper
->iovmInsert(mapBase
, pageIndex
,
1309 baseInfo
, numPageInfo
);
1312 iopl
.fIOMDOffset
= mdOffset
;
1313 iopl
.fPageInfo
= pageIndex
;
1315 if ((_flags
& kIOMemoryAutoPrepare
) && iopl
.fIOPL
)
1317 kernel_upl_commit(iopl
.fIOPL
, 0, 0);
1321 if (!_memoryEntries
->appendBytes(&iopl
, sizeof(iopl
))) {
1322 // Clean up partial created and unsaved iopl
1324 kernel_upl_abort(iopl
.fIOPL
, 0);
1328 // Check for a multiple iopl's in one virtual range
1329 pageIndex
+= numPageInfo
;
1330 mdOffset
-= iopl
.fPageOffset
;
1331 if (ioplSize
< numBytes
) {
1332 numBytes
-= ioplSize
;
1333 startPage
+= ioplSize
;
1334 mdOffset
+= ioplSize
;
1335 iopl
.fPageOffset
= 0;
1337 iopl
.fMappedBase
= mapBase
+ pageIndex
;
1340 mdOffset
+= numBytes
;
1346 return kIOReturnSuccess
;
1350 dataP
= getDataP(_memoryEntries
);
1351 UInt done
= getNumIOPL(dataP
, _memoryEntries
->getLength());
1352 ioPLBlock
*ioplList
= getIOPLList(dataP
);
1354 for (UInt range
= 0; range
< done
; range
++)
1356 if (ioplList
[range
].fIOPL
)
1357 kernel_upl_abort(ioplList
[range
].fIOPL
, 0);
1360 if (mapper
&& mapBase
)
1361 mapper
->iovmFree(mapBase
, _pages
);
1370 * Prepare the memory for an I/O transfer. This involves paging in
1371 * the memory, if necessary, and wiring it down for the duration of
1372 * the transfer. The complete() method completes the processing of
1373 * the memory after the I/O transfer finishes. This method needn't
1374 * called for non-pageable memory.
1376 IOReturn
IOGeneralMemoryDescriptor::prepare(IODirection forDirection
)
1378 IOReturn error
= kIOReturnSuccess
;
1380 if (!_wireCount
&& (_flags
& kIOMemoryTypeMask
) == kIOMemoryTypeVirtual
) {
1381 error
= wireVirtual(forDirection
);
1388 return kIOReturnSuccess
;
1394 * Complete processing of the memory after an I/O transfer finishes.
1395 * This method should not be called unless a prepare was previously
1396 * issued; the prepare() and complete() must occur in pairs, before
1397 * before and after an I/O transfer involving pageable memory.
1400 IOReturn
IOGeneralMemoryDescriptor::complete(IODirection
/* forDirection */)
1405 return kIOReturnSuccess
;
1409 if ((_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
) {
1410 /* kIOMemoryTypePhysical */
1414 ioGMDData
* dataP
= getDataP(_memoryEntries
);
1415 ioPLBlock
*ioplList
= getIOPLList(dataP
);
1416 UInt count
= getNumIOPL(dataP
, _memoryEntries
->getLength());
1418 if (dataP
->fMapper
&& _pages
&& ioplList
[0].fMappedBase
)
1419 dataP
->fMapper
->iovmFree(ioplList
[0].fMappedBase
, _pages
);
1421 // Only complete iopls that we created which are for TypeVirtual
1422 if ( (_flags
& kIOMemoryTypeMask
) == kIOMemoryTypeVirtual
) {
1423 for (UInt ind
= 0; ind
< count
; ind
++)
1424 if (ioplList
[ind
].fIOPL
)
1425 kernel_upl_commit(ioplList
[ind
].fIOPL
, 0, 0);
1428 (void) _memoryEntries
->initWithBytes(dataP
, sizeof(ioGMDData
)); // == setLength()
1431 return kIOReturnSuccess
;
1434 IOReturn
IOGeneralMemoryDescriptor::doMap(
1435 vm_map_t addressMap
,
1436 IOVirtualAddress
* atAddress
,
1437 IOOptionBits options
,
1438 IOByteCount sourceOffset
,
1439 IOByteCount length
)
1442 ipc_port_t sharedMem
= (ipc_port_t
) _memEntry
;
1444 // mapping source == dest? (could be much better)
1445 if( _task
&& (addressMap
== get_task_map(_task
)) && (options
& kIOMapAnywhere
)
1446 && (1 == _rangesCount
) && (0 == sourceOffset
)
1447 && (length
<= _ranges
.v
[0].length
) ) {
1448 *atAddress
= _ranges
.v
[0].address
;
1449 return( kIOReturnSuccess
);
1452 if( 0 == sharedMem
) {
1454 vm_size_t size
= _pages
<< PAGE_SHIFT
;
1458 vm_size_t actualSize
= size
;
1459 kr
= mach_make_memory_entry( get_task_map(_task
),
1460 &actualSize
, _ranges
.v
[0].address
,
1461 VM_PROT_READ
| VM_PROT_WRITE
, &sharedMem
,
1464 if( (KERN_SUCCESS
== kr
) && (actualSize
!= round_page_32(size
))) {
1466 IOLog("mach_make_memory_entry_64 (%08x) size (%08lx:%08x)\n",
1467 _ranges
.v
[0].address
, (UInt32
)actualSize
, size
);
1469 kr
= kIOReturnVMError
;
1470 ipc_port_release_send( sharedMem
);
1473 if( KERN_SUCCESS
!= kr
)
1475 sharedMem
= MACH_PORT_NULL
;
1479 memory_object_t pager
;
1480 unsigned int flags
= 0;
1482 IOPhysicalLength segLen
;
1484 pa
= getPhysicalSegment64( sourceOffset
, &segLen
);
1487 reserved
= IONew( ExpansionData
, 1 );
1491 reserved
->pagerContig
= (1 == _rangesCount
);
1492 reserved
->memory
= this;
1494 /*What cache mode do we need*/
1495 switch(options
& kIOMapCacheMask
) {
1497 case kIOMapDefaultCache
:
1499 flags
= IODefaultCacheBits(pa
);
1502 case kIOMapInhibitCache
:
1503 flags
= DEVICE_PAGER_CACHE_INHIB
|
1504 DEVICE_PAGER_COHERENT
| DEVICE_PAGER_GUARDED
;
1507 case kIOMapWriteThruCache
:
1508 flags
= DEVICE_PAGER_WRITE_THROUGH
|
1509 DEVICE_PAGER_COHERENT
| DEVICE_PAGER_GUARDED
;
1512 case kIOMapCopybackCache
:
1513 flags
= DEVICE_PAGER_COHERENT
;
1516 case kIOMapWriteCombineCache
:
1517 flags
= DEVICE_PAGER_CACHE_INHIB
|
1518 DEVICE_PAGER_COHERENT
;
1522 flags
|= reserved
->pagerContig
? DEVICE_PAGER_CONTIGUOUS
: 0;
1524 pager
= device_pager_setup( (memory_object_t
) 0, (int) reserved
,
1529 kr
= mach_memory_object_memory_entry_64( (host_t
) 1, false /*internal*/,
1530 size
, VM_PROT_READ
| VM_PROT_WRITE
, pager
, &sharedMem
);
1532 assert( KERN_SUCCESS
== kr
);
1533 if( KERN_SUCCESS
!= kr
) {
1534 device_pager_deallocate( pager
);
1535 pager
= MACH_PORT_NULL
;
1536 sharedMem
= MACH_PORT_NULL
;
1539 if( pager
&& sharedMem
)
1540 reserved
->devicePager
= pager
;
1542 IODelete( reserved
, ExpansionData
, 1 );
1548 _memEntry
= (void *) sharedMem
;
1553 kr
= kIOReturnVMError
;
1556 kr
= super::doMap( addressMap
, atAddress
,
1557 options
, sourceOffset
, length
);
1562 IOReturn
IOGeneralMemoryDescriptor::doUnmap(
1563 vm_map_t addressMap
,
1564 IOVirtualAddress logical
,
1565 IOByteCount length
)
1567 // could be much better
1568 if( _task
&& (addressMap
== get_task_map(_task
)) && (1 == _rangesCount
)
1569 && (logical
== _ranges
.v
[0].address
)
1570 && (length
<= _ranges
.v
[0].length
) )
1571 return( kIOReturnSuccess
);
1573 return( super::doUnmap( addressMap
, logical
, length
));
1576 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1579 // osfmk/device/iokit_rpc.c
1580 extern kern_return_t
IOMapPages( vm_map_t map
, vm_offset_t va
, vm_offset_t pa
,
1581 vm_size_t length
, unsigned int mapFlags
);
1582 extern kern_return_t
IOUnmapPages(vm_map_t map
, vm_offset_t va
, vm_size_t length
);
1585 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1587 OSDefineMetaClassAndAbstractStructors( IOMemoryMap
, OSObject
)
1589 /* inline function implementation */
1590 IOPhysicalAddress
IOMemoryMap::getPhysicalAddress()
1591 { return( getPhysicalSegment( 0, 0 )); }
1593 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1595 class _IOMemoryMap
: public IOMemoryMap
1597 OSDeclareDefaultStructors(_IOMemoryMap
)
1599 IOMemoryDescriptor
* memory
;
1600 IOMemoryMap
* superMap
;
1603 IOVirtualAddress logical
;
1605 vm_map_t addressMap
;
1606 IOOptionBits options
;
1609 virtual void taggedRelease(const void *tag
= 0) const;
1610 virtual void free();
1614 // IOMemoryMap methods
1615 virtual IOVirtualAddress
getVirtualAddress();
1616 virtual IOByteCount
getLength();
1617 virtual task_t
getAddressTask();
1618 virtual IOMemoryDescriptor
* getMemoryDescriptor();
1619 virtual IOOptionBits
getMapOptions();
1621 virtual IOReturn
unmap();
1622 virtual void taskDied();
1624 virtual IOPhysicalAddress
getPhysicalSegment(IOByteCount offset
,
1625 IOByteCount
* length
);
1627 // for IOMemoryDescriptor use
1628 _IOMemoryMap
* copyCompatible(
1629 IOMemoryDescriptor
* owner
,
1631 IOVirtualAddress toAddress
,
1632 IOOptionBits options
,
1634 IOByteCount length
);
1636 bool initCompatible(
1637 IOMemoryDescriptor
* memory
,
1638 IOMemoryMap
* superMap
,
1640 IOByteCount length
);
1642 bool initWithDescriptor(
1643 IOMemoryDescriptor
* memory
,
1645 IOVirtualAddress toAddress
,
1646 IOOptionBits options
,
1648 IOByteCount length
);
1651 task_t intoTask
, bool redirect
);
1654 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1657 #define super IOMemoryMap
1659 OSDefineMetaClassAndStructors(_IOMemoryMap
, IOMemoryMap
)
1661 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1663 bool _IOMemoryMap::initCompatible(
1664 IOMemoryDescriptor
* _memory
,
1665 IOMemoryMap
* _superMap
,
1666 IOByteCount _offset
,
1667 IOByteCount _length
)
1673 if( (_offset
+ _length
) > _superMap
->getLength())
1678 _superMap
->retain();
1679 superMap
= _superMap
;
1685 length
= _memory
->getLength();
1687 options
= superMap
->getMapOptions();
1688 logical
= superMap
->getVirtualAddress() + offset
;
1693 bool _IOMemoryMap::initWithDescriptor(
1694 IOMemoryDescriptor
* _memory
,
1696 IOVirtualAddress toAddress
,
1697 IOOptionBits _options
,
1698 IOByteCount _offset
,
1699 IOByteCount _length
)
1703 if( (!_memory
) || (!intoTask
) || !super::init())
1706 if( (_offset
+ _length
) > _memory
->getLength())
1709 addressMap
= get_task_map(intoTask
);
1712 vm_map_reference(addressMap
);
1721 length
= _memory
->getLength();
1723 addressTask
= intoTask
;
1724 logical
= toAddress
;
1727 if( options
& kIOMapStatic
)
1730 ok
= (kIOReturnSuccess
== memory
->doMap( addressMap
, &logical
,
1731 options
, offset
, length
));
1736 vm_map_deallocate(addressMap
);
1742 struct IOMemoryDescriptorMapAllocRef
1744 ipc_port_t sharedMem
;
1747 IOByteCount sourceOffset
;
1748 IOOptionBits options
;
1751 static kern_return_t
IOMemoryDescriptorMapAlloc(vm_map_t map
, void * _ref
)
1753 IOMemoryDescriptorMapAllocRef
* ref
= (IOMemoryDescriptorMapAllocRef
*)_ref
;
1757 if( ref
->sharedMem
) {
1758 vm_prot_t prot
= VM_PROT_READ
1759 | ((ref
->options
& kIOMapReadOnly
) ? 0 : VM_PROT_WRITE
);
1761 // set memory entry cache
1762 vm_prot_t memEntryCacheMode
= prot
| MAP_MEM_ONLY
;
1763 switch (ref
->options
& kIOMapCacheMask
)
1765 case kIOMapInhibitCache
:
1766 SET_MAP_MEM(MAP_MEM_IO
, memEntryCacheMode
);
1769 case kIOMapWriteThruCache
:
1770 SET_MAP_MEM(MAP_MEM_WTHRU
, memEntryCacheMode
);
1773 case kIOMapWriteCombineCache
:
1774 SET_MAP_MEM(MAP_MEM_WCOMB
, memEntryCacheMode
);
1777 case kIOMapCopybackCache
:
1778 SET_MAP_MEM(MAP_MEM_COPYBACK
, memEntryCacheMode
);
1781 case kIOMapDefaultCache
:
1783 SET_MAP_MEM(MAP_MEM_NOOP
, memEntryCacheMode
);
1787 vm_size_t unused
= 0;
1789 err
= mach_make_memory_entry( NULL
/*unused*/, &unused
, 0 /*unused*/,
1790 memEntryCacheMode
, NULL
, ref
->sharedMem
);
1791 if (KERN_SUCCESS
!= err
)
1792 IOLog("MAP_MEM_ONLY failed %d\n", err
);
1796 ref
->size
, 0 /* mask */,
1797 (( ref
->options
& kIOMapAnywhere
) ? VM_FLAGS_ANYWHERE
: VM_FLAGS_FIXED
)
1798 | VM_MAKE_TAG(VM_MEMORY_IOKIT
),
1799 ref
->sharedMem
, ref
->sourceOffset
,
1805 if( KERN_SUCCESS
!= err
) {
1812 err
= vm_allocate( map
, &ref
->mapped
, ref
->size
,
1813 ((ref
->options
& kIOMapAnywhere
) ? VM_FLAGS_ANYWHERE
: VM_FLAGS_FIXED
)
1814 | VM_MAKE_TAG(VM_MEMORY_IOKIT
) );
1816 if( KERN_SUCCESS
!= err
) {
1821 // we have to make sure that these guys don't get copied if we fork.
1822 err
= vm_inherit( map
, ref
->mapped
, ref
->size
, VM_INHERIT_NONE
);
1823 assert( KERN_SUCCESS
== err
);
1832 IOReturn
IOMemoryDescriptor::doMap(
1833 vm_map_t addressMap
,
1834 IOVirtualAddress
* atAddress
,
1835 IOOptionBits options
,
1836 IOByteCount sourceOffset
,
1837 IOByteCount length
)
1839 IOReturn err
= kIOReturnSuccess
;
1840 memory_object_t pager
;
1841 vm_address_t logical
;
1842 IOByteCount pageOffset
;
1843 IOPhysicalAddress sourceAddr
;
1844 IOMemoryDescriptorMapAllocRef ref
;
1846 ref
.sharedMem
= (ipc_port_t
) _memEntry
;
1847 ref
.sourceOffset
= sourceOffset
;
1848 ref
.options
= options
;
1853 length
= getLength();
1855 sourceAddr
= getSourceSegment( sourceOffset
, NULL
);
1856 assert( sourceAddr
);
1857 pageOffset
= sourceAddr
- trunc_page_32( sourceAddr
);
1859 ref
.size
= round_page_32( length
+ pageOffset
);
1861 logical
= *atAddress
;
1862 if( options
& kIOMapAnywhere
)
1863 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1866 ref
.mapped
= trunc_page_32( logical
);
1867 if( (logical
- ref
.mapped
) != pageOffset
) {
1868 err
= kIOReturnVMError
;
1873 if( ref
.sharedMem
&& (addressMap
== kernel_map
) && (kIOMemoryBufferPageable
& _flags
))
1874 err
= IOIteratePageableMaps( ref
.size
, &IOMemoryDescriptorMapAlloc
, &ref
);
1876 err
= IOMemoryDescriptorMapAlloc( addressMap
, &ref
);
1878 if( err
!= KERN_SUCCESS
)
1882 pager
= (memory_object_t
) reserved
->devicePager
;
1884 pager
= MACH_PORT_NULL
;
1886 if( !ref
.sharedMem
|| pager
)
1887 err
= handleFault( pager
, addressMap
, ref
.mapped
, sourceOffset
, length
, options
);
1891 if( err
!= KERN_SUCCESS
) {
1893 doUnmap( addressMap
, ref
.mapped
, ref
.size
);
1896 *atAddress
= ref
.mapped
+ pageOffset
;
1902 kIOMemoryRedirected
= 0x00010000
1905 IOReturn
IOMemoryDescriptor::handleFault(
1907 vm_map_t addressMap
,
1908 IOVirtualAddress address
,
1909 IOByteCount sourceOffset
,
1911 IOOptionBits options
)
1913 IOReturn err
= kIOReturnSuccess
;
1914 memory_object_t pager
= (memory_object_t
) _pager
;
1918 IOByteCount pageOffset
;
1919 IOByteCount pagerOffset
;
1920 IOPhysicalLength segLen
;
1925 if( kIOMemoryRedirected
& _flags
) {
1927 IOLog("sleep mem redirect %p, %lx\n", this, sourceOffset
);
1931 } while( kIOMemoryRedirected
& _flags
);
1934 return( kIOReturnSuccess
);
1937 physAddr
= getPhysicalSegment64( sourceOffset
, &segLen
);
1939 pageOffset
= physAddr
- trunc_page_64( physAddr
);
1940 pagerOffset
= sourceOffset
;
1942 size
= length
+ pageOffset
;
1943 physAddr
-= pageOffset
;
1945 segLen
+= pageOffset
;
1948 // in the middle of the loop only map whole pages
1949 if( segLen
>= bytes
)
1951 else if( segLen
!= trunc_page_32( segLen
))
1952 err
= kIOReturnVMError
;
1953 if( physAddr
!= trunc_page_64( physAddr
))
1954 err
= kIOReturnBadArgument
;
1957 if( kIOLogMapping
& gIOKitDebug
)
1958 IOLog("_IOMemoryMap::map(%p) %08lx->%08qx:%08lx\n",
1959 addressMap
, address
+ pageOffset
, physAddr
+ pageOffset
,
1960 segLen
- pageOffset
);
1968 /* i386 doesn't support faulting on device memory yet */
1969 if( addressMap
&& (kIOReturnSuccess
== err
))
1970 err
= IOMapPages( addressMap
, address
, (IOPhysicalAddress
) physAddr
, segLen
, options
);
1971 assert( KERN_SUCCESS
== err
);
1977 if( reserved
&& reserved
->pagerContig
) {
1978 IOPhysicalLength allLen
;
1981 allPhys
= getPhysicalSegment64( 0, &allLen
);
1983 err
= device_pager_populate_object( pager
, 0, allPhys
>> PAGE_SHIFT
, round_page_32(allLen
) );
1988 (page
< segLen
) && (KERN_SUCCESS
== err
);
1989 page
+= page_size
) {
1990 err
= device_pager_populate_object(pager
, pagerOffset
,
1991 (ppnum_t
)((physAddr
+ page
) >> PAGE_SHIFT
), page_size
);
1992 pagerOffset
+= page_size
;
1995 assert( KERN_SUCCESS
== err
);
2001 /* *** Temporary Workaround *** */
2003 /* This call to vm_fault causes an early pmap level resolution */
2004 /* of the mappings created above. Need for this is in absolute */
2005 /* violation of the basic tenet that the pmap layer is a cache. */
2006 /* Further, it implies a serious I/O architectural violation on */
2007 /* the part of some user of the mapping. As of this writing, */
2008 /* the call to vm_fault is needed because the NVIDIA driver */
2009 /* makes a call to pmap_extract. The NVIDIA driver needs to be */
2010 /* fixed as soon as possible. The NVIDIA driver should not */
2011 /* need to query for this info as it should know from the doMap */
2012 /* call where the physical memory is mapped. When a query is */
2013 /* necessary to find a physical mapping, it should be done */
2014 /* through an iokit call which includes the mapped memory */
2015 /* handle. This is required for machine architecture independence.*/
2017 if(!(kIOMemoryRedirected
& _flags
)) {
2018 vm_fault(addressMap
, address
, 3, FALSE
, FALSE
, NULL
, 0);
2021 /* *** Temporary Workaround *** */
2024 sourceOffset
+= segLen
- pageOffset
;
2030 && (physAddr
= getPhysicalSegment64( sourceOffset
, &segLen
)));
2033 err
= kIOReturnBadArgument
;
2038 IOReturn
IOMemoryDescriptor::doUnmap(
2039 vm_map_t addressMap
,
2040 IOVirtualAddress logical
,
2041 IOByteCount length
)
2046 if( kIOLogMapping
& gIOKitDebug
)
2047 kprintf("IOMemoryDescriptor::doUnmap(%x) %08x:%08x\n",
2048 addressMap
, logical
, length
);
2051 if( true /* && (addressMap == kernel_map) || (addressMap == get_task_map(current_task()))*/) {
2053 if( _memEntry
&& (addressMap
== kernel_map
) && (kIOMemoryBufferPageable
& _flags
))
2054 addressMap
= IOPageableMapForAddress( logical
);
2056 err
= vm_deallocate( addressMap
, logical
, length
);
2059 err
= kIOReturnSuccess
;
2064 IOReturn
IOMemoryDescriptor::redirect( task_t safeTask
, bool redirect
)
2067 _IOMemoryMap
* mapping
= 0;
2073 if( (iter
= OSCollectionIterator::withCollection( _mappings
))) {
2074 while( (mapping
= (_IOMemoryMap
*) iter
->getNextObject()))
2075 mapping
->redirect( safeTask
, redirect
);
2082 _flags
|= kIOMemoryRedirected
;
2084 _flags
&= ~kIOMemoryRedirected
;
2090 // temporary binary compatibility
2091 IOSubMemoryDescriptor
* subMem
;
2092 if( (subMem
= OSDynamicCast( IOSubMemoryDescriptor
, this)))
2093 err
= subMem
->redirect( safeTask
, redirect
);
2095 err
= kIOReturnSuccess
;
2100 IOReturn
IOSubMemoryDescriptor::redirect( task_t safeTask
, bool redirect
)
2102 return( _parent
->redirect( safeTask
, redirect
));
2105 IOReturn
_IOMemoryMap::redirect( task_t safeTask
, bool redirect
)
2107 IOReturn err
= kIOReturnSuccess
;
2110 // err = ((_IOMemoryMap *)superMap)->redirect( safeTask, redirect );
2114 if( logical
&& addressMap
2115 && (get_task_map( safeTask
) != addressMap
)
2116 && (0 == (options
& kIOMapStatic
))) {
2118 IOUnmapPages( addressMap
, logical
, length
);
2120 err
= vm_deallocate( addressMap
, logical
, length
);
2121 err
= memory
->doMap( addressMap
, &logical
,
2122 (options
& ~kIOMapAnywhere
) /*| kIOMapReserve*/,
2125 err
= kIOReturnSuccess
;
2127 IOLog("IOMemoryMap::redirect(%d, %p) %x:%lx from %p\n", redirect
, this, logical
, length
, addressMap
);
2136 IOReturn
_IOMemoryMap::unmap( void )
2142 if( logical
&& addressMap
&& (0 == superMap
)
2143 && (0 == (options
& kIOMapStatic
))) {
2145 err
= memory
->doUnmap( addressMap
, logical
, length
);
2146 vm_map_deallocate(addressMap
);
2150 err
= kIOReturnSuccess
;
2159 void _IOMemoryMap::taskDied( void )
2163 vm_map_deallocate(addressMap
);
2171 // Overload the release mechanism. All mappings must be a member
2172 // of a memory descriptors _mappings set. This means that we
2173 // always have 2 references on a mapping. When either of these mappings
2174 // are released we need to free ourselves.
2175 void _IOMemoryMap::taggedRelease(const void *tag
) const
2178 super::taggedRelease(tag
, 2);
2182 void _IOMemoryMap::free()
2188 memory
->removeMapping( this);
2194 superMap
->release();
2199 IOByteCount
_IOMemoryMap::getLength()
2204 IOVirtualAddress
_IOMemoryMap::getVirtualAddress()
2209 task_t
_IOMemoryMap::getAddressTask()
2212 return( superMap
->getAddressTask());
2214 return( addressTask
);
2217 IOOptionBits
_IOMemoryMap::getMapOptions()
2222 IOMemoryDescriptor
* _IOMemoryMap::getMemoryDescriptor()
2227 _IOMemoryMap
* _IOMemoryMap::copyCompatible(
2228 IOMemoryDescriptor
* owner
,
2230 IOVirtualAddress toAddress
,
2231 IOOptionBits _options
,
2232 IOByteCount _offset
,
2233 IOByteCount _length
)
2235 _IOMemoryMap
* mapping
;
2237 if( (!task
) || (!addressMap
) || (addressMap
!= get_task_map(task
)))
2239 if( (options
^ _options
) & kIOMapReadOnly
)
2241 if( (kIOMapDefaultCache
!= (_options
& kIOMapCacheMask
))
2242 && ((options
^ _options
) & kIOMapCacheMask
))
2245 if( (0 == (_options
& kIOMapAnywhere
)) && (logical
!= toAddress
))
2248 if( _offset
< offset
)
2253 if( (_offset
+ _length
) > length
)
2256 if( (length
== _length
) && (!_offset
)) {
2261 mapping
= new _IOMemoryMap
;
2263 && !mapping
->initCompatible( owner
, this, _offset
, _length
)) {
2272 IOPhysicalAddress
_IOMemoryMap::getPhysicalSegment( IOByteCount _offset
,
2273 IOPhysicalLength
* length
)
2275 IOPhysicalAddress address
;
2278 address
= memory
->getPhysicalSegment( offset
+ _offset
, length
);
2284 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2287 #define super OSObject
2289 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2291 void IOMemoryDescriptor::initialize( void )
2293 if( 0 == gIOMemoryLock
)
2294 gIOMemoryLock
= IORecursiveLockAlloc();
2296 IORegistryEntry::getRegistryRoot()->setProperty(kIOMaximumMappedIOByteCountKey
,
2297 ptoa_64(gIOMaximumMappedIOPageCount
), 64);
2300 void IOMemoryDescriptor::free( void )
2303 _mappings
->release();
2308 IOMemoryMap
* IOMemoryDescriptor::setMapping(
2310 IOVirtualAddress mapAddress
,
2311 IOOptionBits options
)
2315 map
= new _IOMemoryMap
;
2320 && !map
->initWithDescriptor( this, intoTask
, mapAddress
,
2321 options
| kIOMapStatic
, 0, getLength() )) {
2333 IOMemoryMap
* IOMemoryDescriptor::map(
2334 IOOptionBits options
)
2337 return( makeMapping( this, kernel_task
, 0,
2338 options
| kIOMapAnywhere
,
2342 IOMemoryMap
* IOMemoryDescriptor::map(
2344 IOVirtualAddress toAddress
,
2345 IOOptionBits options
,
2347 IOByteCount length
)
2350 length
= getLength();
2352 return( makeMapping( this, intoTask
, toAddress
, options
, offset
, length
));
2355 IOMemoryMap
* IOMemoryDescriptor::makeMapping(
2356 IOMemoryDescriptor
* owner
,
2358 IOVirtualAddress toAddress
,
2359 IOOptionBits options
,
2361 IOByteCount length
)
2363 _IOMemoryMap
* mapping
= 0;
2369 // look for an existing mapping
2370 if( (iter
= OSCollectionIterator::withCollection( _mappings
))) {
2372 while( (mapping
= (_IOMemoryMap
*) iter
->getNextObject())) {
2374 if( (mapping
= mapping
->copyCompatible(
2375 owner
, intoTask
, toAddress
,
2376 options
| kIOMapReference
,
2386 if( mapping
|| (options
& kIOMapReference
))
2391 mapping
= new _IOMemoryMap
;
2393 && !mapping
->initWithDescriptor( owner
, intoTask
, toAddress
, options
,
2396 IOLog("Didn't make map %08lx : %08lx\n", offset
, length
);
2404 owner
->addMapping( mapping
);
2411 void IOMemoryDescriptor::addMapping(
2412 IOMemoryMap
* mapping
)
2416 _mappings
= OSSet::withCapacity(1);
2418 _mappings
->setObject( mapping
);
2422 void IOMemoryDescriptor::removeMapping(
2423 IOMemoryMap
* mapping
)
2426 _mappings
->removeObject( mapping
);
2429 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2432 #define super IOMemoryDescriptor
2434 OSDefineMetaClassAndStructors(IOSubMemoryDescriptor
, IOMemoryDescriptor
)
2436 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2438 bool IOSubMemoryDescriptor::initSubRange( IOMemoryDescriptor
* parent
,
2439 IOByteCount offset
, IOByteCount length
,
2440 IODirection direction
)
2445 if( (offset
+ length
) > parent
->getLength())
2449 * We can check the _parent instance variable before having ever set it
2450 * to an initial value because I/O Kit guarantees that all our instance
2451 * variables are zeroed on an object's allocation.
2459 * An existing memory descriptor is being retargeted to
2460 * point to somewhere else. Clean up our present state.
2471 _direction
= direction
;
2472 _tag
= parent
->getTag();
2477 void IOSubMemoryDescriptor::free( void )
2486 IOPhysicalAddress
IOSubMemoryDescriptor::getPhysicalSegment( IOByteCount offset
,
2487 IOByteCount
* length
)
2489 IOPhysicalAddress address
;
2490 IOByteCount actualLength
;
2492 assert(offset
<= _length
);
2497 if( offset
>= _length
)
2500 address
= _parent
->getPhysicalSegment( offset
+ _start
, &actualLength
);
2502 if( address
&& length
)
2503 *length
= min( _length
- offset
, actualLength
);
2508 IOPhysicalAddress
IOSubMemoryDescriptor::getSourceSegment( IOByteCount offset
,
2509 IOByteCount
* length
)
2511 IOPhysicalAddress address
;
2512 IOByteCount actualLength
;
2514 assert(offset
<= _length
);
2519 if( offset
>= _length
)
2522 address
= _parent
->getSourceSegment( offset
+ _start
, &actualLength
);
2524 if( address
&& length
)
2525 *length
= min( _length
- offset
, actualLength
);
2530 void * IOSubMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
2531 IOByteCount
* lengthOfSegment
)
2536 IOByteCount
IOSubMemoryDescriptor::readBytes(IOByteCount offset
,
2537 void * bytes
, IOByteCount length
)
2539 IOByteCount byteCount
;
2541 assert(offset
<= _length
);
2543 if( offset
>= _length
)
2547 byteCount
= _parent
->readBytes( _start
+ offset
, bytes
,
2548 min(length
, _length
- offset
) );
2551 return( byteCount
);
2554 IOByteCount
IOSubMemoryDescriptor::writeBytes(IOByteCount offset
,
2555 const void* bytes
, IOByteCount length
)
2557 IOByteCount byteCount
;
2559 assert(offset
<= _length
);
2561 if( offset
>= _length
)
2565 byteCount
= _parent
->writeBytes( _start
+ offset
, bytes
,
2566 min(length
, _length
- offset
) );
2569 return( byteCount
);
2572 IOReturn
IOSubMemoryDescriptor::prepare(
2573 IODirection forDirection
)
2578 err
= _parent
->prepare( forDirection
);
2584 IOReturn
IOSubMemoryDescriptor::complete(
2585 IODirection forDirection
)
2590 err
= _parent
->complete( forDirection
);
2596 IOMemoryMap
* IOSubMemoryDescriptor::makeMapping(
2597 IOMemoryDescriptor
* owner
,
2599 IOVirtualAddress toAddress
,
2600 IOOptionBits options
,
2602 IOByteCount length
)
2604 IOMemoryMap
* mapping
;
2606 mapping
= (IOMemoryMap
*) _parent
->makeMapping(
2608 toAddress
- (_start
+ offset
),
2609 options
| kIOMapReference
,
2610 _start
+ offset
, length
);
2613 mapping
= (IOMemoryMap
*) _parent
->makeMapping(
2616 options
, _start
+ offset
, length
);
2619 mapping
= super::makeMapping( owner
, intoTask
, toAddress
, options
,
2628 IOSubMemoryDescriptor::initWithAddress(void * address
,
2630 IODirection direction
)
2636 IOSubMemoryDescriptor::initWithAddress(vm_address_t address
,
2638 IODirection direction
,
2645 IOSubMemoryDescriptor::initWithPhysicalAddress(
2646 IOPhysicalAddress address
,
2648 IODirection direction
)
2654 IOSubMemoryDescriptor::initWithRanges(
2655 IOVirtualRange
* ranges
,
2657 IODirection direction
,
2665 IOSubMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange
* ranges
,
2667 IODirection direction
,
2673 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2675 bool IOGeneralMemoryDescriptor::serialize(OSSerialize
* s
) const
2677 OSSymbol
const *keys
[2];
2678 OSObject
*values
[2];
2679 IOVirtualRange
*vcopy
;
2680 unsigned int index
, nRanges
;
2683 if (s
== NULL
) return false;
2684 if (s
->previouslySerialized(this)) return true;
2686 // Pretend we are an array.
2687 if (!s
->addXMLStartTag(this, "array")) return false;
2689 nRanges
= _rangesCount
;
2690 vcopy
= (IOVirtualRange
*) IOMalloc(sizeof(IOVirtualRange
) * nRanges
);
2691 if (vcopy
== 0) return false;
2693 keys
[0] = OSSymbol::withCString("address");
2694 keys
[1] = OSSymbol::withCString("length");
2697 values
[0] = values
[1] = 0;
2699 // From this point on we can go to bail.
2701 // Copy the volatile data so we don't have to allocate memory
2702 // while the lock is held.
2704 if (nRanges
== _rangesCount
) {
2705 for (index
= 0; index
< nRanges
; index
++) {
2706 vcopy
[index
] = _ranges
.v
[index
];
2709 // The descriptor changed out from under us. Give up.
2716 for (index
= 0; index
< nRanges
; index
++)
2718 values
[0] = OSNumber::withNumber(_ranges
.v
[index
].address
, sizeof(_ranges
.v
[index
].address
) * 8);
2719 if (values
[0] == 0) {
2723 values
[1] = OSNumber::withNumber(_ranges
.v
[index
].length
, sizeof(_ranges
.v
[index
].length
) * 8);
2724 if (values
[1] == 0) {
2728 OSDictionary
*dict
= OSDictionary::withObjects((const OSObject
**)values
, (const OSSymbol
**)keys
, 2);
2733 values
[0]->release();
2734 values
[1]->release();
2735 values
[0] = values
[1] = 0;
2737 result
= dict
->serialize(s
);
2743 result
= s
->addXMLEndTag("array");
2747 values
[0]->release();
2749 values
[1]->release();
2755 IOFree(vcopy
, sizeof(IOVirtualRange
) * nRanges
);
2759 bool IOSubMemoryDescriptor::serialize(OSSerialize
* s
) const
2764 if (s
->previouslySerialized(this)) return true;
2766 // Pretend we are a dictionary.
2767 // We must duplicate the functionality of OSDictionary here
2768 // because otherwise object references will not work;
2769 // they are based on the value of the object passed to
2770 // previouslySerialized and addXMLStartTag.
2772 if (!s
->addXMLStartTag(this, "dict")) return false;
2774 char const *keys
[3] = {"offset", "length", "parent"};
2776 OSObject
*values
[3];
2777 values
[0] = OSNumber::withNumber(_start
, sizeof(_start
) * 8);
2780 values
[1] = OSNumber::withNumber(_length
, sizeof(_length
) * 8);
2781 if (values
[1] == 0) {
2782 values
[0]->release();
2785 values
[2] = _parent
;
2788 for (int i
=0; i
<3; i
++) {
2789 if (!s
->addString("<key>") ||
2790 !s
->addString(keys
[i
]) ||
2791 !s
->addXMLEndTag("key") ||
2792 !values
[i
]->serialize(s
)) {
2797 values
[0]->release();
2798 values
[1]->release();
2803 return s
->addXMLEndTag("dict");
2806 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2808 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 0);
2809 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 1);
2810 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 2);
2811 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 3);
2812 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 4);
2813 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 5);
2814 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 6);
2815 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 7);
2816 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 8);
2817 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 9);
2818 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 10);
2819 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 11);
2820 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 12);
2821 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 13);
2822 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 14);
2823 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 15);
2825 /* ex-inline function implementation */
2826 IOPhysicalAddress
IOMemoryDescriptor::getPhysicalAddress()
2827 { return( getPhysicalSegment( 0, 0 )); }