2 * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
36 #include <sys/cdefs.h>
38 #include <IOKit/assert.h>
39 #include <IOKit/system.h>
40 #include <IOKit/IOLib.h>
41 #include <IOKit/IOMemoryDescriptor.h>
42 #include <IOKit/IOMapper.h>
43 #include <IOKit/IODMACommand.h>
44 #include <IOKit/IOKitKeysPrivate.h>
47 #include <IOKit/IOSubMemoryDescriptor.h>
48 #endif /* !__LP64__ */
50 #include <IOKit/IOKitDebug.h>
51 #include <libkern/OSDebug.h>
53 #include "IOKitKernelInternal.h"
55 #include <libkern/c++/OSContainers.h>
56 #include <libkern/c++/OSDictionary.h>
57 #include <libkern/c++/OSArray.h>
58 #include <libkern/c++/OSSymbol.h>
59 #include <libkern/c++/OSNumber.h>
65 #include <vm/vm_pageout.h>
66 #include <mach/memory_object_types.h>
67 #include <device/device_port.h>
69 #include <mach/vm_prot.h>
70 #include <mach/mach_vm.h>
71 #include <vm/vm_fault.h>
72 #include <vm/vm_protos.h>
74 extern ppnum_t
pmap_find_phys(pmap_t pmap
, addr64_t va
);
75 extern void ipc_port_release_send(ipc_port_t port
);
78 memory_object_iopl_request(
80 memory_object_offset_t offset
,
83 upl_page_info_array_t user_page_list
,
84 unsigned int *page_list_count
,
87 unsigned int IOTranslateCacheBits(struct phys_entry
*pp
);
91 #define kIOMapperWaitSystem ((IOMapper *) 1)
93 static IOMapper
* gIOSystemMapper
= NULL
;
97 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
99 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor
, OSObject
)
101 #define super IOMemoryDescriptor
103 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor
, IOMemoryDescriptor
)
105 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
107 static IORecursiveLock
* gIOMemoryLock
;
109 #define LOCK IORecursiveLockLock( gIOMemoryLock)
110 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
111 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
113 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
116 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
118 #define DEBG(fmt, args...) {}
121 #define IOMD_DEBUG_DMAACTIVE 1
123 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
125 // Some data structures and accessor macros used by the initWithOptions
128 enum ioPLBlockFlags
{
129 kIOPLOnDevice
= 0x00000001,
130 kIOPLExternUPL
= 0x00000002,
133 struct typePersMDData
135 const IOGeneralMemoryDescriptor
*fMD
;
136 ipc_port_t fMemEntry
;
141 vm_address_t fPageInfo
; // Pointer to page list or index into it
142 uint32_t fIOMDOffset
; // The offset of this iopl in descriptor
143 ppnum_t fMappedPage
; // Page number of first page in this iopl
144 unsigned int fPageOffset
; // Offset within first page of iopl
145 unsigned int fFlags
; // Flags
150 uint8_t fDMAMapNumAddressBits
;
151 uint64_t fDMAMapAlignment
;
152 addr64_t fMappedBase
;
153 uint64_t fPreparationID
;
154 unsigned int fPageCnt
;
155 unsigned char fDiscontig
;
157 // align arrays to 8 bytes so following macros work
158 unsigned char fPad
[3];
160 upl_page_info_t fPageList
[1]; /* variable length */
161 ioPLBlock fBlocks
[1]; /* variable length */
164 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
165 #define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
166 #define getNumIOPL(osd, d) \
167 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
168 #define getPageList(d) (&(d->fPageList[0]))
169 #define computeDataSize(p, u) \
170 (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
173 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
175 #define next_page(a) ( trunc_page(a) + PAGE_SIZE )
180 kern_return_t
device_data_action(
181 uintptr_t device_handle
,
182 ipc_port_t device_pager
,
183 vm_prot_t protection
,
184 vm_object_offset_t offset
,
188 IOMemoryDescriptorReserved
* ref
= (IOMemoryDescriptorReserved
*) device_handle
;
189 IOMemoryDescriptor
* memDesc
;
192 memDesc
= ref
->dp
.memory
;
196 kr
= memDesc
->handleFault( device_pager
, 0, 0,
197 offset
, size
, kIOMapDefaultCache
/*?*/);
207 kern_return_t
device_close(
208 uintptr_t device_handle
)
210 IOMemoryDescriptorReserved
* ref
= (IOMemoryDescriptorReserved
*) device_handle
;
212 IODelete( ref
, IOMemoryDescriptorReserved
, 1 );
214 return( kIOReturnSuccess
);
218 // Note this inline function uses C++ reference arguments to return values
219 // This means that pointers are not passed and NULLs don't have to be
220 // checked for as a NULL reference is illegal.
222 getAddrLenForInd(user_addr_t
&addr
, IOPhysicalLength
&len
, // Output variables
223 UInt32 type
, IOGeneralMemoryDescriptor::Ranges r
, UInt32 ind
)
225 assert(kIOMemoryTypeUIO
== type
226 || kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
227 || kIOMemoryTypePhysical
== type
|| kIOMemoryTypePhysical64
== type
);
228 if (kIOMemoryTypeUIO
== type
) {
230 uio_getiov((uio_t
) r
.uio
, ind
, &addr
, &us
); len
= us
;
233 else if ((kIOMemoryTypeVirtual64
== type
) || (kIOMemoryTypePhysical64
== type
)) {
234 IOAddressRange cur
= r
.v64
[ind
];
238 #endif /* !__LP64__ */
240 IOVirtualRange cur
= r
.v
[ind
];
246 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
249 IOMemoryDescriptor::withAddress(void * address
,
251 IODirection direction
)
253 return IOMemoryDescriptor::
254 withAddressRange((IOVirtualAddress
) address
, length
, direction
| kIOMemoryAutoPrepare
, kernel_task
);
259 IOMemoryDescriptor::withAddress(IOVirtualAddress address
,
261 IODirection direction
,
264 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
267 if (that
->initWithAddress(address
, length
, direction
, task
))
274 #endif /* !__LP64__ */
277 IOMemoryDescriptor::withPhysicalAddress(
278 IOPhysicalAddress address
,
280 IODirection direction
)
282 return (IOMemoryDescriptor::withAddressRange(address
, length
, direction
, TASK_NULL
));
287 IOMemoryDescriptor::withRanges( IOVirtualRange
* ranges
,
289 IODirection direction
,
293 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
296 if (that
->initWithRanges(ranges
, withCount
, direction
, task
, asReference
))
303 #endif /* !__LP64__ */
306 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address
,
307 mach_vm_size_t length
,
308 IOOptionBits options
,
311 IOAddressRange range
= { address
, length
};
312 return (IOMemoryDescriptor::withAddressRanges(&range
, 1, options
, task
));
316 IOMemoryDescriptor::withAddressRanges(IOAddressRange
* ranges
,
318 IOOptionBits options
,
321 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
325 options
|= kIOMemoryTypeVirtual64
;
327 options
|= kIOMemoryTypePhysical64
;
329 if (that
->initWithOptions(ranges
, rangeCount
, 0, task
, options
, /* mapper */ 0))
342 * Create a new IOMemoryDescriptor. The buffer is made up of several
343 * virtual address ranges, from a given task.
345 * Passing the ranges as a reference will avoid an extra allocation.
348 IOMemoryDescriptor::withOptions(void * buffers
,
355 IOGeneralMemoryDescriptor
*self
= new IOGeneralMemoryDescriptor
;
358 && !self
->initWithOptions(buffers
, count
, offset
, task
, opts
, mapper
))
367 bool IOMemoryDescriptor::initWithOptions(void * buffers
,
371 IOOptionBits options
,
379 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange
* ranges
,
381 IODirection direction
,
384 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
387 if (that
->initWithPhysicalRanges(ranges
, withCount
, direction
, asReference
))
396 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor
* of
,
399 IODirection direction
)
401 return (IOSubMemoryDescriptor::withSubRange(of
, offset
, length
, direction
| kIOMemoryThreadSafe
));
403 #endif /* !__LP64__ */
406 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor
*originalMD
)
408 IOGeneralMemoryDescriptor
*origGenMD
=
409 OSDynamicCast(IOGeneralMemoryDescriptor
, originalMD
);
412 return IOGeneralMemoryDescriptor::
413 withPersistentMemoryDescriptor(origGenMD
);
419 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor
*originalMD
)
421 ipc_port_t sharedMem
= (ipc_port_t
) originalMD
->createNamedEntry();
426 if (sharedMem
== originalMD
->_memEntry
) {
427 originalMD
->retain(); // Add a new reference to ourselves
428 ipc_port_release_send(sharedMem
); // Remove extra send right
432 IOGeneralMemoryDescriptor
* self
= new IOGeneralMemoryDescriptor
;
433 typePersMDData initData
= { originalMD
, sharedMem
};
436 && !self
->initWithOptions(&initData
, 1, 0, 0, kIOMemoryTypePersistentMD
, 0)) {
443 void *IOGeneralMemoryDescriptor::createNamedEntry()
446 ipc_port_t sharedMem
;
448 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
450 user_addr_t range0Addr
;
451 IOByteCount range0Len
;
452 getAddrLenForInd(range0Addr
, range0Len
, type
, _ranges
, 0);
453 range0Addr
= trunc_page_64(range0Addr
);
455 vm_size_t size
= ptoa_32(_pages
);
456 vm_address_t kernelPage
= (vm_address_t
) range0Addr
;
458 vm_map_t theMap
= ((_task
== kernel_task
)
459 && (kIOMemoryBufferPageable
& _flags
))
460 ? IOPageableMapForAddress(kernelPage
)
461 : get_task_map(_task
);
463 memory_object_size_t actualSize
= size
;
464 vm_prot_t prot
= VM_PROT_READ
;
465 if (kIODirectionOut
!= (kIODirectionOutIn
& _flags
))
466 prot
|= VM_PROT_WRITE
;
469 prot
|= MAP_MEM_NAMED_REUSE
;
471 error
= mach_make_memory_entry_64(theMap
,
472 &actualSize
, range0Addr
, prot
, &sharedMem
, (ipc_port_t
) _memEntry
);
474 if (KERN_SUCCESS
== error
) {
475 if (actualSize
== size
) {
479 IOLog("IOGMD::mach_make_memory_entry_64 (%08llx) size (%08llx:%08llx)\n",
480 (UInt64
)range0Addr
, (UInt64
)actualSize
, (UInt64
)size
);
482 ipc_port_release_send( sharedMem
);
486 return MACH_PORT_NULL
;
491 IOGeneralMemoryDescriptor::initWithAddress(void * address
,
492 IOByteCount withLength
,
493 IODirection withDirection
)
495 _singleRange
.v
.address
= (vm_offset_t
) address
;
496 _singleRange
.v
.length
= withLength
;
498 return initWithRanges(&_singleRange
.v
, 1, withDirection
, kernel_task
, true);
502 IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address
,
503 IOByteCount withLength
,
504 IODirection withDirection
,
507 _singleRange
.v
.address
= address
;
508 _singleRange
.v
.length
= withLength
;
510 return initWithRanges(&_singleRange
.v
, 1, withDirection
, withTask
, true);
514 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
515 IOPhysicalAddress address
,
516 IOByteCount withLength
,
517 IODirection withDirection
)
519 _singleRange
.p
.address
= address
;
520 _singleRange
.p
.length
= withLength
;
522 return initWithPhysicalRanges( &_singleRange
.p
, 1, withDirection
, true);
526 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
527 IOPhysicalRange
* ranges
,
529 IODirection direction
,
532 IOOptionBits mdOpts
= direction
| kIOMemoryTypePhysical
;
535 mdOpts
|= kIOMemoryAsReference
;
537 return initWithOptions(ranges
, count
, 0, 0, mdOpts
, /* mapper */ 0);
541 IOGeneralMemoryDescriptor::initWithRanges(
542 IOVirtualRange
* ranges
,
544 IODirection direction
,
548 IOOptionBits mdOpts
= direction
;
551 mdOpts
|= kIOMemoryAsReference
;
554 mdOpts
|= kIOMemoryTypeVirtual
;
556 // Auto-prepare if this is a kernel memory descriptor as very few
557 // clients bother to prepare() kernel memory.
558 // But it was not enforced so what are you going to do?
559 if (task
== kernel_task
)
560 mdOpts
|= kIOMemoryAutoPrepare
;
563 mdOpts
|= kIOMemoryTypePhysical
;
565 return initWithOptions(ranges
, count
, 0, task
, mdOpts
, /* mapper */ 0);
567 #endif /* !__LP64__ */
572 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
573 * from a given task, several physical ranges, an UPL from the ubc
574 * system or a uio (may be 64bit) from the BSD subsystem.
576 * Passing the ranges as a reference will avoid an extra allocation.
578 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
579 * existing instance -- note this behavior is not commonly supported in other
580 * I/O Kit classes, although it is supported here.
584 IOGeneralMemoryDescriptor::initWithOptions(void * buffers
,
588 IOOptionBits options
,
591 IOOptionBits type
= options
& kIOMemoryTypeMask
;
595 && (kIOMemoryTypeVirtual
== type
)
596 && vm_map_is_64bit(get_task_map(task
))
597 && ((IOVirtualRange
*) buffers
)->address
)
599 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
602 #endif /* !__LP64__ */
604 // Grab the original MD's configuation data to initialse the
605 // arguments to this function.
606 if (kIOMemoryTypePersistentMD
== type
) {
608 typePersMDData
*initData
= (typePersMDData
*) buffers
;
609 const IOGeneralMemoryDescriptor
*orig
= initData
->fMD
;
610 ioGMDData
*dataP
= getDataP(orig
->_memoryEntries
);
612 // Only accept persistent memory descriptors with valid dataP data.
613 assert(orig
->_rangesCount
== 1);
614 if ( !(orig
->_flags
& kIOMemoryPersistent
) || !dataP
)
617 _memEntry
= initData
->fMemEntry
; // Grab the new named entry
618 options
= orig
->_flags
& ~kIOMemoryAsReference
;
619 type
= options
& kIOMemoryTypeMask
;
620 buffers
= orig
->_ranges
.v
;
621 count
= orig
->_rangesCount
;
623 // Now grab the original task and whatever mapper was previously used
625 mapper
= dataP
->fMapper
;
627 // We are ready to go through the original initialisation now
631 case kIOMemoryTypeUIO
:
632 case kIOMemoryTypeVirtual
:
634 case kIOMemoryTypeVirtual64
:
635 #endif /* !__LP64__ */
641 case kIOMemoryTypePhysical
: // Neither Physical nor UPL should have a task
643 case kIOMemoryTypePhysical64
:
644 #endif /* !__LP64__ */
645 case kIOMemoryTypeUPL
:
649 return false; /* bad argument */
656 * We can check the _initialized instance variable before having ever set
657 * it to an initial value because I/O Kit guarantees that all our instance
658 * variables are zeroed on an object's allocation.
663 * An existing memory descriptor is being retargeted to point to
664 * somewhere else. Clean up our present state.
666 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
667 if ((kIOMemoryTypePhysical
!= type
) && (kIOMemoryTypePhysical64
!= type
))
672 if (_ranges
.v
&& !(kIOMemoryAsReference
& _flags
))
674 if (kIOMemoryTypeUIO
== type
)
675 uio_free((uio_t
) _ranges
.v
);
677 else if ((kIOMemoryTypeVirtual64
== type
) || (kIOMemoryTypePhysical64
== type
))
678 IODelete(_ranges
.v64
, IOAddressRange
, _rangesCount
);
679 #endif /* !__LP64__ */
681 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
684 options
|= (kIOMemoryRedirected
& _flags
);
685 if (!(kIOMemoryRedirected
& options
))
689 ipc_port_release_send((ipc_port_t
) _memEntry
);
693 _mappings
->flushCollection();
702 // Grab the appropriate mapper
703 if (kIOMemoryHostOnly
& options
) options
|= kIOMemoryMapperNone
;
704 if (kIOMemoryMapperNone
& options
)
705 mapper
= 0; // No Mapper
706 else if (mapper
== kIOMapperSystem
) {
707 IOMapper::checkForSystemMapper();
708 gIOSystemMapper
= mapper
= IOMapper::gSystem
;
711 // Temp binary compatibility for kIOMemoryThreadSafe
712 if (kIOMemoryReserved6156215
& options
)
714 options
&= ~kIOMemoryReserved6156215
;
715 options
|= kIOMemoryThreadSafe
;
717 // Remove the dynamic internal use flags from the initial setting
718 options
&= ~(kIOMemoryPreparedReadOnly
);
723 _direction
= (IODirection
) (_flags
& kIOMemoryDirectionMask
);
724 #endif /* !__LP64__ */
726 __iomd_reservedA
= 0;
727 __iomd_reservedB
= 0;
730 if (kIOMemoryThreadSafe
& options
)
733 _prepareLock
= IOLockAlloc();
735 else if (_prepareLock
)
737 IOLockFree(_prepareLock
);
741 if (kIOMemoryTypeUPL
== type
) {
744 unsigned int dataSize
= computeDataSize(/* pages */ 0, /* upls */ 1);
746 if (!initMemoryEntries(dataSize
, mapper
)) return (false);
747 dataP
= getDataP(_memoryEntries
);
750 // _wireCount++; // UPLs start out life wired
753 _pages
+= atop_32(offset
+ count
+ PAGE_MASK
) - atop_32(offset
);
756 iopl
.fIOPL
= (upl_t
) buffers
;
757 upl_set_referenced(iopl
.fIOPL
, true);
758 upl_page_info_t
*pageList
= UPL_GET_INTERNAL_PAGE_LIST(iopl
.fIOPL
);
760 if (upl_get_size(iopl
.fIOPL
) < (count
+ offset
))
761 panic("short external upl");
763 _highestPage
= upl_get_highest_page(iopl
.fIOPL
);
765 // Set the flag kIOPLOnDevice convieniently equal to 1
766 iopl
.fFlags
= pageList
->device
| kIOPLExternUPL
;
767 if (!pageList
->device
) {
768 // Pre-compute the offset into the UPL's page list
769 pageList
= &pageList
[atop_32(offset
)];
772 iopl
.fIOMDOffset
= 0;
773 iopl
.fMappedPage
= 0;
774 iopl
.fPageInfo
= (vm_address_t
) pageList
;
775 iopl
.fPageOffset
= offset
;
776 _memoryEntries
->appendBytes(&iopl
, sizeof(iopl
));
779 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
780 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
782 // Initialize the memory descriptor
783 if (options
& kIOMemoryAsReference
) {
785 _rangesIsAllocated
= false;
786 #endif /* !__LP64__ */
788 // Hack assignment to get the buffer arg into _ranges.
789 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
791 // This also initialises the uio & physical ranges.
792 _ranges
.v
= (IOVirtualRange
*) buffers
;
796 _rangesIsAllocated
= true;
797 #endif /* !__LP64__ */
800 case kIOMemoryTypeUIO
:
801 _ranges
.v
= (IOVirtualRange
*) uio_duplicate((uio_t
) buffers
);
805 case kIOMemoryTypeVirtual64
:
806 case kIOMemoryTypePhysical64
:
808 && (((IOAddressRange
*) buffers
)->address
+ ((IOAddressRange
*) buffers
)->length
) <= 0x100000000ULL
810 if (kIOMemoryTypeVirtual64
== type
)
811 type
= kIOMemoryTypeVirtual
;
813 type
= kIOMemoryTypePhysical
;
814 _flags
= (_flags
& ~kIOMemoryTypeMask
) | type
| kIOMemoryAsReference
;
815 _rangesIsAllocated
= false;
816 _ranges
.v
= &_singleRange
.v
;
817 _singleRange
.v
.address
= ((IOAddressRange
*) buffers
)->address
;
818 _singleRange
.v
.length
= ((IOAddressRange
*) buffers
)->length
;
821 _ranges
.v64
= IONew(IOAddressRange
, count
);
824 bcopy(buffers
, _ranges
.v
, count
* sizeof(IOAddressRange
));
826 #endif /* !__LP64__ */
827 case kIOMemoryTypeVirtual
:
828 case kIOMemoryTypePhysical
:
830 _flags
|= kIOMemoryAsReference
;
832 _rangesIsAllocated
= false;
833 #endif /* !__LP64__ */
834 _ranges
.v
= &_singleRange
.v
;
836 _ranges
.v
= IONew(IOVirtualRange
, count
);
840 bcopy(buffers
, _ranges
.v
, count
* sizeof(IOVirtualRange
));
845 // Find starting address within the vector of ranges
846 Ranges vec
= _ranges
;
849 for (unsigned ind
= 0; ind
< count
; ind
++) {
851 IOPhysicalLength len
;
853 // addr & len are returned by this function
854 getAddrLenForInd(addr
, len
, type
, vec
, ind
);
855 pages
+= (atop_64(addr
+ len
+ PAGE_MASK
) - atop_64(addr
));
857 assert(len
>= length
); // Check for 32 bit wrap around
860 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
))
862 ppnum_t highPage
= atop_64(addr
+ len
- 1);
863 if (highPage
> _highestPage
)
864 _highestPage
= highPage
;
869 _rangesCount
= count
;
871 // Auto-prepare memory at creation time.
872 // Implied completion when descriptor is free-ed
873 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
))
874 _wireCount
++; // Physical MDs are, by definition, wired
875 else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
877 unsigned dataSize
= computeDataSize(_pages
, /* upls */ count
* 2);
879 if (!initMemoryEntries(dataSize
, mapper
)) return false;
880 dataP
= getDataP(_memoryEntries
);
881 dataP
->fPageCnt
= _pages
;
883 if ( (kIOMemoryPersistent
& _flags
) && !_memEntry
)
884 _memEntry
= createNamedEntry();
886 if ((_flags
& kIOMemoryAutoPrepare
)
887 && prepare() != kIOReturnSuccess
)
900 void IOGeneralMemoryDescriptor::free()
902 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
907 reserved
->dp
.memory
= 0;
910 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
))
913 if (_memoryEntries
&& (dataP
= getDataP(_memoryEntries
)) && dataP
->fMappedBase
)
915 dataP
->fMapper
->iovmFree(atop_64(dataP
->fMappedBase
), _pages
);
916 dataP
->fMappedBase
= 0;
921 while (_wireCount
) complete();
924 if (_memoryEntries
) _memoryEntries
->release();
926 if (_ranges
.v
&& !(kIOMemoryAsReference
& _flags
))
928 if (kIOMemoryTypeUIO
== type
)
929 uio_free((uio_t
) _ranges
.v
);
931 else if ((kIOMemoryTypeVirtual64
== type
) || (kIOMemoryTypePhysical64
== type
))
932 IODelete(_ranges
.v64
, IOAddressRange
, _rangesCount
);
933 #endif /* !__LP64__ */
935 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
942 if (reserved
->dp
.devicePager
)
944 // memEntry holds a ref on the device pager which owns reserved
945 // (IOMemoryDescriptorReserved) so no reserved access after this point
946 device_pager_deallocate( (memory_object_t
) reserved
->dp
.devicePager
);
949 IODelete(reserved
, IOMemoryDescriptorReserved
, 1);
954 ipc_port_release_send( (ipc_port_t
) _memEntry
);
957 IOLockFree(_prepareLock
);
963 void IOGeneralMemoryDescriptor::unmapFromKernel()
965 panic("IOGMD::unmapFromKernel deprecated");
968 void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex
)
970 panic("IOGMD::mapIntoKernel deprecated");
972 #endif /* !__LP64__ */
977 * Get the direction of the transfer.
979 IODirection
IOMemoryDescriptor::getDirection() const
984 #endif /* !__LP64__ */
985 return (IODirection
) (_flags
& kIOMemoryDirectionMask
);
991 * Get the length of the transfer (over all ranges).
993 IOByteCount
IOMemoryDescriptor::getLength() const
998 void IOMemoryDescriptor::setTag( IOOptionBits tag
)
1003 IOOptionBits
IOMemoryDescriptor::getTag( void )
1009 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
1011 IOMemoryDescriptor::getSourceSegment( IOByteCount offset
, IOByteCount
* length
)
1013 addr64_t physAddr
= 0;
1015 if( prepare() == kIOReturnSuccess
) {
1016 physAddr
= getPhysicalSegment64( offset
, length
);
1020 return( (IOPhysicalAddress
) physAddr
); // truncated but only page offset is used
1022 #endif /* !__LP64__ */
1024 IOByteCount
IOMemoryDescriptor::readBytes
1025 (IOByteCount offset
, void *bytes
, IOByteCount length
)
1027 addr64_t dstAddr
= CAST_DOWN(addr64_t
, bytes
);
1028 IOByteCount remaining
;
1030 // Assert that this entire I/O is withing the available range
1031 assert(offset
< _length
);
1032 assert(offset
+ length
<= _length
);
1033 if (offset
>= _length
) {
1037 if (kIOMemoryThreadSafe
& _flags
)
1040 remaining
= length
= min(length
, _length
- offset
);
1041 while (remaining
) { // (process another target segment?)
1045 srcAddr64
= getPhysicalSegment(offset
, &srcLen
, kIOMemoryMapperNone
);
1049 // Clip segment length to remaining
1050 if (srcLen
> remaining
)
1053 copypv(srcAddr64
, dstAddr
, srcLen
,
1054 cppvPsrc
| cppvNoRefSrc
| cppvFsnk
| cppvKmap
);
1058 remaining
-= srcLen
;
1061 if (kIOMemoryThreadSafe
& _flags
)
1066 return length
- remaining
;
1069 IOByteCount
IOMemoryDescriptor::writeBytes
1070 (IOByteCount offset
, const void *bytes
, IOByteCount length
)
1072 addr64_t srcAddr
= CAST_DOWN(addr64_t
, bytes
);
1073 IOByteCount remaining
;
1075 // Assert that this entire I/O is withing the available range
1076 assert(offset
< _length
);
1077 assert(offset
+ length
<= _length
);
1079 assert( !(kIOMemoryPreparedReadOnly
& _flags
) );
1081 if ( (kIOMemoryPreparedReadOnly
& _flags
) || offset
>= _length
) {
1085 if (kIOMemoryThreadSafe
& _flags
)
1088 remaining
= length
= min(length
, _length
- offset
);
1089 while (remaining
) { // (process another target segment?)
1093 dstAddr64
= getPhysicalSegment(offset
, &dstLen
, kIOMemoryMapperNone
);
1097 // Clip segment length to remaining
1098 if (dstLen
> remaining
)
1101 copypv(srcAddr
, (addr64_t
) dstAddr64
, dstLen
,
1102 cppvPsnk
| cppvFsnk
| cppvNoRefSrc
| cppvNoModSnk
| cppvKmap
);
1106 remaining
-= dstLen
;
1109 if (kIOMemoryThreadSafe
& _flags
)
1114 return length
- remaining
;
1117 // osfmk/device/iokit_rpc.c
1118 extern "C" unsigned int IODefaultCacheBits(addr64_t pa
);
1121 void IOGeneralMemoryDescriptor::setPosition(IOByteCount position
)
1123 panic("IOGMD::setPosition deprecated");
1125 #endif /* !__LP64__ */
1127 static volatile SInt64 gIOMDPreparationID
__attribute__((aligned(8))) = (1ULL << 32);
1130 IOGeneralMemoryDescriptor::getPreparationID( void )
1135 return (kIOPreparationIDUnprepared
);
1137 if (((kIOMemoryTypeMask
& _flags
) == kIOMemoryTypePhysical
)
1138 || ((kIOMemoryTypeMask
& _flags
) == kIOMemoryTypePhysical64
))
1140 IOMemoryDescriptor::setPreparationID();
1141 return (IOMemoryDescriptor::getPreparationID());
1144 if (!_memoryEntries
|| !(dataP
= getDataP(_memoryEntries
)))
1145 return (kIOPreparationIDUnprepared
);
1147 if (kIOPreparationIDUnprepared
== dataP
->fPreparationID
)
1149 dataP
->fPreparationID
= OSIncrementAtomic64(&gIOMDPreparationID
);
1151 return (dataP
->fPreparationID
);
1154 IOMemoryDescriptorReserved
* IOMemoryDescriptor::getKernelReserved( void )
1158 reserved
= IONew(IOMemoryDescriptorReserved
, 1);
1160 bzero(reserved
, sizeof(IOMemoryDescriptorReserved
));
1165 void IOMemoryDescriptor::setPreparationID( void )
1167 if (getKernelReserved() && (kIOPreparationIDUnprepared
== reserved
->preparationID
))
1169 #if defined(__ppc__ )
1170 reserved
->preparationID
= gIOMDPreparationID
++;
1172 reserved
->preparationID
= OSIncrementAtomic64(&gIOMDPreparationID
);
1177 uint64_t IOMemoryDescriptor::getPreparationID( void )
1180 return (reserved
->preparationID
);
1182 return (kIOPreparationIDUnsupported
);
1185 IOReturn
IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op
, void *vData
, UInt dataSize
) const
1187 IOReturn err
= kIOReturnSuccess
;
1188 DMACommandOps params
;
1189 IOGeneralMemoryDescriptor
* md
= const_cast<IOGeneralMemoryDescriptor
*>(this);
1192 params
= (op
& ~kIOMDDMACommandOperationMask
& op
);
1193 op
&= kIOMDDMACommandOperationMask
;
1195 if (kIOMDDMAMap
== op
)
1197 if (dataSize
< sizeof(IOMDDMAMapArgs
))
1198 return kIOReturnUnderrun
;
1200 IOMDDMAMapArgs
* data
= (IOMDDMAMapArgs
*) vData
;
1203 && !md
->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem
)) return (kIOReturnNoMemory
);
1205 if (_memoryEntries
&& data
->fMapper
)
1208 bool whole
= ((data
->fOffset
== 0) && (data
->fLength
== _length
));
1209 dataP
= getDataP(_memoryEntries
);
1211 if (data
->fMapSpec
.numAddressBits
< dataP
->fDMAMapNumAddressBits
) dataP
->fDMAMapNumAddressBits
= data
->fMapSpec
.numAddressBits
;
1212 if (data
->fMapSpec
.alignment
> dataP
->fDMAMapAlignment
) dataP
->fDMAMapAlignment
= data
->fMapSpec
.alignment
;
1214 remap
= (dataP
->fDMAMapNumAddressBits
< 64)
1215 && ((dataP
->fMappedBase
+ _length
) > (1ULL << dataP
->fDMAMapNumAddressBits
));
1216 remap
|= (dataP
->fDMAMapAlignment
> page_size
);
1218 if (remap
|| !dataP
->fMappedBase
)
1220 // if (dataP->fMappedBase) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params);
1221 err
= md
->dmaMap(data
->fMapper
, &data
->fMapSpec
, data
->fOffset
, data
->fLength
, &data
->fAlloc
, &data
->fAllocCount
);
1222 if ((kIOReturnSuccess
== err
) && whole
&& !dataP
->fMappedBase
)
1224 dataP
->fMappedBase
= data
->fAlloc
;
1225 data
->fAllocCount
= 0; // IOMD owns the alloc now
1230 data
->fAlloc
= dataP
->fMappedBase
;
1231 data
->fAllocCount
= 0; // IOMD owns the alloc
1233 data
->fMapContig
= !dataP
->fDiscontig
;
1239 if (kIOMDAddDMAMapSpec
== op
)
1241 if (dataSize
< sizeof(IODMAMapSpecification
))
1242 return kIOReturnUnderrun
;
1244 IODMAMapSpecification
* data
= (IODMAMapSpecification
*) vData
;
1247 && !md
->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem
)) return (kIOReturnNoMemory
);
1251 dataP
= getDataP(_memoryEntries
);
1252 if (data
->numAddressBits
< dataP
->fDMAMapNumAddressBits
)
1253 dataP
->fDMAMapNumAddressBits
= data
->numAddressBits
;
1254 if (data
->alignment
> dataP
->fDMAMapAlignment
)
1255 dataP
->fDMAMapAlignment
= data
->alignment
;
1257 return kIOReturnSuccess
;
1260 if (kIOMDGetCharacteristics
== op
) {
1262 if (dataSize
< sizeof(IOMDDMACharacteristics
))
1263 return kIOReturnUnderrun
;
1265 IOMDDMACharacteristics
*data
= (IOMDDMACharacteristics
*) vData
;
1266 data
->fLength
= _length
;
1267 data
->fSGCount
= _rangesCount
;
1268 data
->fPages
= _pages
;
1269 data
->fDirection
= getDirection();
1271 data
->fIsPrepared
= false;
1273 data
->fIsPrepared
= true;
1274 data
->fHighestPage
= _highestPage
;
1277 dataP
= getDataP(_memoryEntries
);
1278 ioPLBlock
*ioplList
= getIOPLList(dataP
);
1279 UInt count
= getNumIOPL(_memoryEntries
, dataP
);
1281 data
->fPageAlign
= (ioplList
[0].fPageOffset
& PAGE_MASK
) | ~PAGE_MASK
;
1285 return kIOReturnSuccess
;
1287 #if IOMD_DEBUG_DMAACTIVE
1288 } else if (kIOMDDMAActive
== op
) {
1289 if (params
) OSIncrementAtomic(&md
->__iomd_reservedA
);
1291 if (md
->__iomd_reservedA
)
1292 OSDecrementAtomic(&md
->__iomd_reservedA
);
1294 panic("kIOMDSetDMAInactive");
1296 #endif /* IOMD_DEBUG_DMAACTIVE */
1298 } else if (kIOMDWalkSegments
!= op
)
1299 return kIOReturnBadArgument
;
1301 // Get the next segment
1302 struct InternalState
{
1303 IOMDDMAWalkSegmentArgs fIO
;
1309 // Find the next segment
1310 if (dataSize
< sizeof(*isP
))
1311 return kIOReturnUnderrun
;
1313 isP
= (InternalState
*) vData
;
1314 UInt offset
= isP
->fIO
.fOffset
;
1315 bool mapped
= isP
->fIO
.fMapped
;
1317 if (IOMapper::gSystem
&& mapped
1318 && (!(kIOMemoryHostOnly
& _flags
))
1319 && (!_memoryEntries
|| !getDataP(_memoryEntries
)->fMappedBase
))
1320 // && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBase))
1323 && !md
->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem
)) return (kIOReturnNoMemory
);
1325 dataP
= getDataP(_memoryEntries
);
1328 IODMAMapSpecification mapSpec
;
1329 bzero(&mapSpec
, sizeof(mapSpec
));
1330 mapSpec
.numAddressBits
= dataP
->fDMAMapNumAddressBits
;
1331 mapSpec
.alignment
= dataP
->fDMAMapAlignment
;
1332 err
= md
->dmaMap(dataP
->fMapper
, &mapSpec
, 0, _length
, &dataP
->fMappedBase
, NULL
);
1333 if (kIOReturnSuccess
!= err
) return (err
);
1337 if (offset
>= _length
)
1338 return (offset
== _length
)? kIOReturnOverrun
: kIOReturnInternalError
;
1340 // Validate the previous offset
1341 UInt ind
, off2Ind
= isP
->fOffset2Index
;
1344 && (offset
== isP
->fNextOffset
|| off2Ind
<= offset
))
1347 ind
= off2Ind
= 0; // Start from beginning
1353 if ( (_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
) {
1355 // Physical address based memory descriptor
1356 const IOPhysicalRange
*physP
= (IOPhysicalRange
*) &_ranges
.p
[0];
1358 // Find the range after the one that contains the offset
1360 for (len
= 0; off2Ind
<= offset
; ind
++) {
1361 len
= physP
[ind
].length
;
1365 // Calculate length within range and starting address
1366 length
= off2Ind
- offset
;
1367 address
= physP
[ind
- 1].address
+ len
- length
;
1369 if (true && mapped
&& _memoryEntries
1370 && (dataP
= getDataP(_memoryEntries
)) && dataP
->fMappedBase
)
1372 address
= dataP
->fMappedBase
+ offset
;
1376 // see how far we can coalesce ranges
1377 while (ind
< _rangesCount
&& address
+ length
== physP
[ind
].address
) {
1378 len
= physP
[ind
].length
;
1385 // correct contiguous check overshoot
1390 else if ( (_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
) {
1392 // Physical address based memory descriptor
1393 const IOAddressRange
*physP
= (IOAddressRange
*) &_ranges
.v64
[0];
1395 // Find the range after the one that contains the offset
1397 for (len
= 0; off2Ind
<= offset
; ind
++) {
1398 len
= physP
[ind
].length
;
1402 // Calculate length within range and starting address
1403 length
= off2Ind
- offset
;
1404 address
= physP
[ind
- 1].address
+ len
- length
;
1406 if (true && mapped
&& _memoryEntries
1407 && (dataP
= getDataP(_memoryEntries
)) && dataP
->fMappedBase
)
1409 address
= dataP
->fMappedBase
+ offset
;
1413 // see how far we can coalesce ranges
1414 while (ind
< _rangesCount
&& address
+ length
== physP
[ind
].address
) {
1415 len
= physP
[ind
].length
;
1421 // correct contiguous check overshoot
1425 #endif /* !__LP64__ */
1428 panic("IOGMD: not wired for the IODMACommand");
1430 assert(_memoryEntries
);
1432 dataP
= getDataP(_memoryEntries
);
1433 const ioPLBlock
*ioplList
= getIOPLList(dataP
);
1434 UInt numIOPLs
= getNumIOPL(_memoryEntries
, dataP
);
1435 upl_page_info_t
*pageList
= getPageList(dataP
);
1437 assert(numIOPLs
> 0);
1439 // Scan through iopl info blocks looking for block containing offset
1440 while (ind
< numIOPLs
&& offset
>= ioplList
[ind
].fIOMDOffset
)
1443 // Go back to actual range as search goes past it
1444 ioPLBlock ioplInfo
= ioplList
[ind
- 1];
1445 off2Ind
= ioplInfo
.fIOMDOffset
;
1448 length
= ioplList
[ind
].fIOMDOffset
;
1451 length
-= offset
; // Remainder within iopl
1453 // Subtract offset till this iopl in total list
1456 // If a mapped address is requested and this is a pre-mapped IOPL
1457 // then just need to compute an offset relative to the mapped base.
1458 if (mapped
&& dataP
->fMappedBase
) {
1459 offset
+= (ioplInfo
.fPageOffset
& PAGE_MASK
);
1460 address
= trunc_page_64(dataP
->fMappedBase
) + ptoa_64(ioplInfo
.fMappedPage
) + offset
;
1461 continue; // Done leave do/while(false) now
1464 // The offset is rebased into the current iopl.
1465 // Now add the iopl 1st page offset.
1466 offset
+= ioplInfo
.fPageOffset
;
1468 // For external UPLs the fPageInfo field points directly to
1469 // the upl's upl_page_info_t array.
1470 if (ioplInfo
.fFlags
& kIOPLExternUPL
)
1471 pageList
= (upl_page_info_t
*) ioplInfo
.fPageInfo
;
1473 pageList
= &pageList
[ioplInfo
.fPageInfo
];
1475 // Check for direct device non-paged memory
1476 if ( ioplInfo
.fFlags
& kIOPLOnDevice
) {
1477 address
= ptoa_64(pageList
->phys_addr
) + offset
;
1478 continue; // Done leave do/while(false) now
1481 // Now we need compute the index into the pageList
1482 UInt pageInd
= atop_32(offset
);
1483 offset
&= PAGE_MASK
;
1485 // Compute the starting address of this segment
1486 IOPhysicalAddress pageAddr
= pageList
[pageInd
].phys_addr
;
1488 panic("!pageList phys_addr");
1491 address
= ptoa_64(pageAddr
) + offset
;
1493 // length is currently set to the length of the remainider of the iopl.
1494 // We need to check that the remainder of the iopl is contiguous.
1495 // This is indicated by pageList[ind].phys_addr being sequential.
1496 IOByteCount contigLength
= PAGE_SIZE
- offset
;
1497 while (contigLength
< length
1498 && ++pageAddr
== pageList
[++pageInd
].phys_addr
)
1500 contigLength
+= PAGE_SIZE
;
1503 if (contigLength
< length
)
1504 length
= contigLength
;
1512 // Update return values and state
1513 isP
->fIO
.fIOVMAddr
= address
;
1514 isP
->fIO
.fLength
= length
;
1516 isP
->fOffset2Index
= off2Ind
;
1517 isP
->fNextOffset
= isP
->fIO
.fOffset
+ length
;
1519 return kIOReturnSuccess
;
1523 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
, IOOptionBits options
)
1526 addr64_t address
= 0;
1527 IOByteCount length
= 0;
1528 IOMapper
* mapper
= gIOSystemMapper
;
1529 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
1531 if (lengthOfSegment
)
1532 *lengthOfSegment
= 0;
1534 if (offset
>= _length
)
1537 // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
1538 // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
1539 // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
1540 // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
1542 if ((options
& _kIOMemorySourceSegment
) && (kIOMemoryTypeUPL
!= type
))
1544 unsigned rangesIndex
= 0;
1545 Ranges vec
= _ranges
;
1548 // Find starting address within the vector of ranges
1550 getAddrLenForInd(addr
, length
, type
, vec
, rangesIndex
);
1551 if (offset
< length
)
1553 offset
-= length
; // (make offset relative)
1557 // Now that we have the starting range,
1558 // lets find the last contiguous range
1562 for ( ++rangesIndex
; rangesIndex
< _rangesCount
; rangesIndex
++ ) {
1563 user_addr_t newAddr
;
1564 IOPhysicalLength newLen
;
1566 getAddrLenForInd(newAddr
, newLen
, type
, vec
, rangesIndex
);
1567 if (addr
+ length
!= newAddr
)
1572 address
= (IOPhysicalAddress
) addr
; // Truncate address to 32bit
1576 IOMDDMAWalkSegmentState _state
;
1577 IOMDDMAWalkSegmentArgs
* state
= (IOMDDMAWalkSegmentArgs
*) (void *)&_state
;
1579 state
->fOffset
= offset
;
1580 state
->fLength
= _length
- offset
;
1581 state
->fMapped
= (0 == (options
& kIOMemoryMapperNone
)) && !(_flags
& kIOMemoryHostOnly
);
1583 ret
= dmaCommandOperation(kIOMDFirstSegment
, _state
, sizeof(_state
));
1585 if ((kIOReturnSuccess
!= ret
) && (kIOReturnOverrun
!= ret
))
1586 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
1587 ret
, this, state
->fOffset
,
1588 state
->fIOVMAddr
, state
->fLength
);
1589 if (kIOReturnSuccess
== ret
)
1591 address
= state
->fIOVMAddr
;
1592 length
= state
->fLength
;
1595 // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
1596 // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
1598 if (mapper
&& ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
)))
1600 if ((options
& kIOMemoryMapperNone
) && !(_flags
& kIOMemoryMapperNone
))
1602 addr64_t origAddr
= address
;
1603 IOByteCount origLen
= length
;
1605 address
= mapper
->mapAddr(origAddr
);
1606 length
= page_size
- (address
& (page_size
- 1));
1607 while ((length
< origLen
)
1608 && ((address
+ length
) == mapper
->mapAddr(origAddr
+ length
)))
1609 length
+= page_size
;
1610 if (length
> origLen
)
1619 if (lengthOfSegment
)
1620 *lengthOfSegment
= length
;
1627 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
, IOOptionBits options
)
1629 addr64_t address
= 0;
1631 if (options
& _kIOMemorySourceSegment
)
1633 address
= getSourceSegment(offset
, lengthOfSegment
);
1635 else if (options
& kIOMemoryMapperNone
)
1637 address
= getPhysicalSegment64(offset
, lengthOfSegment
);
1641 address
= getPhysicalSegment(offset
, lengthOfSegment
);
1648 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
1650 return (getPhysicalSegment(offset
, lengthOfSegment
, kIOMemoryMapperNone
));
1654 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
1656 addr64_t address
= 0;
1657 IOByteCount length
= 0;
1659 address
= getPhysicalSegment(offset
, lengthOfSegment
, 0);
1661 if (lengthOfSegment
)
1662 length
= *lengthOfSegment
;
1664 if ((address
+ length
) > 0x100000000ULL
)
1666 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
1667 address
, (long) length
, (getMetaClass())->getClassName());
1670 return ((IOPhysicalAddress
) address
);
1674 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
1676 IOPhysicalAddress phys32
;
1679 IOMapper
* mapper
= 0;
1681 phys32
= getPhysicalSegment(offset
, lengthOfSegment
);
1685 if (gIOSystemMapper
)
1686 mapper
= gIOSystemMapper
;
1690 IOByteCount origLen
;
1692 phys64
= mapper
->mapAddr(phys32
);
1693 origLen
= *lengthOfSegment
;
1694 length
= page_size
- (phys64
& (page_size
- 1));
1695 while ((length
< origLen
)
1696 && ((phys64
+ length
) == mapper
->mapAddr(phys32
+ length
)))
1697 length
+= page_size
;
1698 if (length
> origLen
)
1701 *lengthOfSegment
= length
;
1704 phys64
= (addr64_t
) phys32
;
1710 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
1712 return ((IOPhysicalAddress
) getPhysicalSegment(offset
, lengthOfSegment
, 0));
1716 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
1718 return ((IOPhysicalAddress
) getPhysicalSegment(offset
, lengthOfSegment
, _kIOMemorySourceSegment
));
1721 void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
1722 IOByteCount
* lengthOfSegment
)
1724 if (_task
== kernel_task
)
1725 return (void *) getSourceSegment(offset
, lengthOfSegment
);
1727 panic("IOGMD::getVirtualSegment deprecated");
1731 #endif /* !__LP64__ */
1734 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op
, void *vData
, UInt dataSize
) const
1736 IOMemoryDescriptor
*md
= const_cast<IOMemoryDescriptor
*>(this);
1737 DMACommandOps params
;
1740 params
= (op
& ~kIOMDDMACommandOperationMask
& op
);
1741 op
&= kIOMDDMACommandOperationMask
;
1743 if (kIOMDGetCharacteristics
== op
) {
1744 if (dataSize
< sizeof(IOMDDMACharacteristics
))
1745 return kIOReturnUnderrun
;
1747 IOMDDMACharacteristics
*data
= (IOMDDMACharacteristics
*) vData
;
1748 data
->fLength
= getLength();
1750 data
->fDirection
= getDirection();
1751 data
->fIsPrepared
= true; // Assume prepared - fails safe
1753 else if (kIOMDWalkSegments
== op
) {
1754 if (dataSize
< sizeof(IOMDDMAWalkSegmentArgs
))
1755 return kIOReturnUnderrun
;
1757 IOMDDMAWalkSegmentArgs
*data
= (IOMDDMAWalkSegmentArgs
*) vData
;
1758 IOByteCount offset
= (IOByteCount
) data
->fOffset
;
1760 IOPhysicalLength length
;
1761 if (data
->fMapped
&& IOMapper::gSystem
)
1762 data
->fIOVMAddr
= md
->getPhysicalSegment(offset
, &length
);
1764 data
->fIOVMAddr
= md
->getPhysicalSegment(offset
, &length
, kIOMemoryMapperNone
);
1765 data
->fLength
= length
;
1767 else if (kIOMDAddDMAMapSpec
== op
) return kIOReturnUnsupported
;
1768 else if (kIOMDDMAMap
== op
)
1770 if (dataSize
< sizeof(IOMDDMAMapArgs
))
1771 return kIOReturnUnderrun
;
1772 IOMDDMAMapArgs
* data
= (IOMDDMAMapArgs
*) vData
;
1774 if (params
) panic("class %s does not support IODMACommand::kIterateOnly", getMetaClass()->getClassName());
1776 data
->fMapContig
= true;
1777 err
= md
->dmaMap(data
->fMapper
, &data
->fMapSpec
, data
->fOffset
, data
->fLength
, &data
->fAlloc
, &data
->fAllocCount
);
1780 else return kIOReturnBadArgument
;
1782 return kIOReturnSuccess
;
1786 purgeableControlBits(IOOptionBits newState
, vm_purgable_t
* control
, int * state
)
1788 IOReturn err
= kIOReturnSuccess
;
1790 *control
= VM_PURGABLE_SET_STATE
;
1792 enum { kIOMemoryPurgeableControlMask
= 15 };
1794 switch (kIOMemoryPurgeableControlMask
& newState
)
1796 case kIOMemoryPurgeableKeepCurrent
:
1797 *control
= VM_PURGABLE_GET_STATE
;
1800 case kIOMemoryPurgeableNonVolatile
:
1801 *state
= VM_PURGABLE_NONVOLATILE
;
1803 case kIOMemoryPurgeableVolatile
:
1804 *state
= VM_PURGABLE_VOLATILE
| (newState
& ~kIOMemoryPurgeableControlMask
);
1806 case kIOMemoryPurgeableEmpty
:
1807 *state
= VM_PURGABLE_EMPTY
;
1810 err
= kIOReturnBadArgument
;
1817 purgeableStateBits(int * state
)
1819 IOReturn err
= kIOReturnSuccess
;
1821 switch (VM_PURGABLE_STATE_MASK
& *state
)
1823 case VM_PURGABLE_NONVOLATILE
:
1824 *state
= kIOMemoryPurgeableNonVolatile
;
1826 case VM_PURGABLE_VOLATILE
:
1827 *state
= kIOMemoryPurgeableVolatile
;
1829 case VM_PURGABLE_EMPTY
:
1830 *state
= kIOMemoryPurgeableEmpty
;
1833 *state
= kIOMemoryPurgeableNonVolatile
;
1834 err
= kIOReturnNotReady
;
1841 IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState
,
1842 IOOptionBits
* oldState
)
1844 IOReturn err
= kIOReturnSuccess
;
1845 vm_purgable_t control
;
1850 err
= super::setPurgeable(newState
, oldState
);
1854 if (kIOMemoryThreadSafe
& _flags
)
1858 // Find the appropriate vm_map for the given task
1860 if (_task
== kernel_task
&& (kIOMemoryBufferPageable
& _flags
))
1862 err
= kIOReturnNotReady
;
1867 err
= kIOReturnUnsupported
;
1871 curMap
= get_task_map(_task
);
1873 // can only do one range
1874 Ranges vec
= _ranges
;
1875 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
1878 getAddrLenForInd(addr
, len
, type
, vec
, 0);
1880 err
= purgeableControlBits(newState
, &control
, &state
);
1881 if (kIOReturnSuccess
!= err
)
1883 err
= mach_vm_purgable_control(curMap
, addr
, control
, &state
);
1886 if (kIOReturnSuccess
== err
)
1888 err
= purgeableStateBits(&state
);
1894 if (kIOMemoryThreadSafe
& _flags
)
1900 IOReturn
IOMemoryDescriptor::setPurgeable( IOOptionBits newState
,
1901 IOOptionBits
* oldState
)
1903 IOReturn err
= kIOReturnSuccess
;
1904 vm_purgable_t control
;
1907 if (kIOMemoryThreadSafe
& _flags
)
1914 err
= kIOReturnNotReady
;
1917 err
= purgeableControlBits(newState
, &control
, &state
);
1918 if (kIOReturnSuccess
!= err
)
1920 err
= mach_memory_entry_purgable_control((ipc_port_t
) _memEntry
, control
, &state
);
1923 if (kIOReturnSuccess
== err
)
1925 err
= purgeableStateBits(&state
);
1932 if (kIOMemoryThreadSafe
& _flags
)
1939 IOReturn
IOMemoryDescriptor::getPageCounts( IOByteCount
* residentPageCount
,
1940 IOByteCount
* dirtyPageCount
)
1942 IOReturn err
= kIOReturnSuccess
;
1943 unsigned int _residentPageCount
, _dirtyPageCount
;
1945 if (kIOMemoryThreadSafe
& _flags
) LOCK
;
1951 err
= kIOReturnNotReady
;
1954 if ((residentPageCount
== NULL
) && (dirtyPageCount
== NULL
))
1956 err
= kIOReturnBadArgument
;
1960 err
= mach_memory_entry_get_page_counts((ipc_port_t
) _memEntry
,
1961 residentPageCount
? &_residentPageCount
: NULL
,
1962 dirtyPageCount
? &_dirtyPageCount
: NULL
);
1963 if (kIOReturnSuccess
!= err
) break;
1964 if (residentPageCount
) *residentPageCount
= _residentPageCount
;
1965 if (dirtyPageCount
) *dirtyPageCount
= _dirtyPageCount
;
1969 if (kIOMemoryThreadSafe
& _flags
) UNLOCK
;
1975 extern "C" void dcache_incoherent_io_flush64(addr64_t pa
, unsigned int count
);
1976 extern "C" void dcache_incoherent_io_store64(addr64_t pa
, unsigned int count
);
1978 static void SetEncryptOp(addr64_t pa
, unsigned int count
)
1982 page
= atop_64(round_page_64(pa
));
1983 end
= atop_64(trunc_page_64(pa
+ count
));
1984 for (; page
< end
; page
++)
1986 pmap_clear_noencrypt(page
);
1990 static void ClearEncryptOp(addr64_t pa
, unsigned int count
)
1994 page
= atop_64(round_page_64(pa
));
1995 end
= atop_64(trunc_page_64(pa
+ count
));
1996 for (; page
< end
; page
++)
1998 pmap_set_noencrypt(page
);
2002 IOReturn
IOMemoryDescriptor::performOperation( IOOptionBits options
,
2003 IOByteCount offset
, IOByteCount length
)
2005 IOByteCount remaining
;
2007 void (*func
)(addr64_t pa
, unsigned int count
) = 0;
2011 case kIOMemoryIncoherentIOFlush
:
2012 func
= &dcache_incoherent_io_flush64
;
2014 case kIOMemoryIncoherentIOStore
:
2015 func
= &dcache_incoherent_io_store64
;
2018 case kIOMemorySetEncrypted
:
2019 func
= &SetEncryptOp
;
2021 case kIOMemoryClearEncrypted
:
2022 func
= &ClearEncryptOp
;
2027 return (kIOReturnUnsupported
);
2029 if (kIOMemoryThreadSafe
& _flags
)
2033 remaining
= length
= min(length
, getLength() - offset
);
2035 // (process another target segment?)
2040 dstAddr64
= getPhysicalSegment(offset
, &dstLen
, kIOMemoryMapperNone
);
2044 // Clip segment length to remaining
2045 if (dstLen
> remaining
)
2048 (*func
)(dstAddr64
, dstLen
);
2051 remaining
-= dstLen
;
2054 if (kIOMemoryThreadSafe
& _flags
)
2057 return (remaining
? kIOReturnUnderrun
: kIOReturnSuccess
);
2060 #if defined(__i386__) || defined(__x86_64__)
2061 extern vm_offset_t first_avail
;
2062 #define io_kernel_static_end first_avail
2064 #error io_kernel_static_end is undefined for this architecture
2067 static kern_return_t
2068 io_get_kernel_static_upl(
2071 vm_size_t
*upl_size
,
2073 upl_page_info_array_t page_list
,
2074 unsigned int *count
,
2075 ppnum_t
*highest_page
)
2077 unsigned int pageCount
, page
;
2079 ppnum_t highestPage
= 0;
2081 pageCount
= atop_32(*upl_size
);
2082 if (pageCount
> *count
)
2087 for (page
= 0; page
< pageCount
; page
++)
2089 phys
= pmap_find_phys(kernel_pmap
, ((addr64_t
)offset
) + ptoa_64(page
));
2092 page_list
[page
].phys_addr
= phys
;
2093 page_list
[page
].pageout
= 0;
2094 page_list
[page
].absent
= 0;
2095 page_list
[page
].dirty
= 0;
2096 page_list
[page
].precious
= 0;
2097 page_list
[page
].device
= 0;
2098 if (phys
> highestPage
)
2102 *highest_page
= highestPage
;
2104 return ((page
>= pageCount
) ? kIOReturnSuccess
: kIOReturnVMError
);
2107 IOReturn
IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection
)
2109 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
2110 IOReturn error
= kIOReturnCannotWire
;
2112 upl_page_info_array_t pageInfo
;
2114 ipc_port_t sharedMem
;
2116 assert(kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
|| kIOMemoryTypeUIO
== type
);
2118 if ((kIODirectionOutIn
& forDirection
) == kIODirectionNone
)
2119 forDirection
= (IODirection
) (forDirection
| getDirection());
2121 int uplFlags
; // This Mem Desc's default flags for upl creation
2122 switch (kIODirectionOutIn
& forDirection
)
2124 case kIODirectionOut
:
2125 // Pages do not need to be marked as dirty on commit
2126 uplFlags
= UPL_COPYOUT_FROM
;
2129 case kIODirectionIn
:
2131 uplFlags
= 0; // i.e. ~UPL_COPYOUT_FROM
2137 if ((kIOMemoryPreparedReadOnly
& _flags
) && !(UPL_COPYOUT_FROM
& uplFlags
))
2139 OSReportWithBacktrace("IOMemoryDescriptor 0x%lx prepared read only", VM_KERNEL_ADDRPERM(this));
2140 error
= kIOReturnNotWritable
;
2142 else error
= kIOReturnSuccess
;
2146 dataP
= getDataP(_memoryEntries
);
2148 mapper
= dataP
->fMapper
;
2149 dataP
->fMappedBase
= 0;
2151 uplFlags
|= UPL_SET_IO_WIRE
| UPL_SET_LITE
;
2152 if (kIODirectionPrepareToPhys32
& forDirection
)
2154 if (!mapper
) uplFlags
|= UPL_NEED_32BIT_ADDR
;
2155 if (dataP
->fDMAMapNumAddressBits
> 32) dataP
->fDMAMapNumAddressBits
= 32;
2157 if (kIODirectionPrepareNoFault
& forDirection
) uplFlags
|= UPL_REQUEST_NO_FAULT
;
2158 if (kIODirectionPrepareNoZeroFill
& forDirection
) uplFlags
|= UPL_NOZEROFILLIO
;
2159 if (kIODirectionPrepareNonCoherent
& forDirection
) uplFlags
|= UPL_REQUEST_FORCE_COHERENCY
;
2162 sharedMem
= (ipc_port_t
) _memEntry
;
2164 // Note that appendBytes(NULL) zeros the data up to the desired length.
2165 _memoryEntries
->appendBytes(0, dataP
->fPageCnt
* sizeof(upl_page_info_t
));
2168 // Find the appropriate vm_map for the given task
2170 if (_task
== kernel_task
&& (kIOMemoryBufferPageable
& _flags
))
2173 { curMap
= get_task_map(_task
); }
2175 // Iterate over the vector of virtual ranges
2176 Ranges vec
= _ranges
;
2177 unsigned int pageIndex
= 0;
2178 IOByteCount mdOffset
= 0;
2179 ppnum_t highestPage
= 0;
2181 for (UInt range
= 0; range
< _rangesCount
; range
++) {
2183 user_addr_t startPage
;
2184 IOByteCount numBytes
;
2185 ppnum_t highPage
= 0;
2187 // Get the startPage address and length of vec[range]
2188 getAddrLenForInd(startPage
, numBytes
, type
, vec
, range
);
2189 iopl
.fPageOffset
= startPage
& PAGE_MASK
;
2190 numBytes
+= iopl
.fPageOffset
;
2191 startPage
= trunc_page_64(startPage
);
2194 iopl
.fMappedPage
= mapBase
+ pageIndex
;
2196 iopl
.fMappedPage
= 0;
2198 // Iterate over the current range, creating UPLs
2200 vm_address_t kernelStart
= (vm_address_t
) startPage
;
2204 else if (!sharedMem
) {
2205 assert(_task
== kernel_task
);
2206 theMap
= IOPageableMapForAddress(kernelStart
);
2211 int ioplFlags
= uplFlags
;
2212 dataP
= getDataP(_memoryEntries
);
2213 pageInfo
= getPageList(dataP
);
2214 upl_page_list_ptr_t baseInfo
= &pageInfo
[pageIndex
];
2216 vm_size_t ioplSize
= round_page(numBytes
);
2217 unsigned int numPageInfo
= atop_32(ioplSize
);
2219 if (theMap
== kernel_map
&& kernelStart
< io_kernel_static_end
) {
2220 error
= io_get_kernel_static_upl(theMap
,
2228 else if (sharedMem
) {
2229 error
= memory_object_iopl_request(sharedMem
,
2239 error
= vm_map_create_upl(theMap
,
2241 (upl_size_t
*)&ioplSize
,
2249 if (error
!= KERN_SUCCESS
)
2253 highPage
= upl_get_highest_page(iopl
.fIOPL
);
2254 if (highPage
> highestPage
)
2255 highestPage
= highPage
;
2257 error
= kIOReturnCannotWire
;
2259 if (baseInfo
->device
) {
2261 iopl
.fFlags
= kIOPLOnDevice
;
2267 iopl
.fIOMDOffset
= mdOffset
;
2268 iopl
.fPageInfo
= pageIndex
;
2269 if (mapper
&& pageIndex
&& (page_mask
& (mdOffset
+ iopl
.fPageOffset
))) dataP
->fDiscontig
= true;
2272 // used to remove the upl for auto prepares here, for some errant code
2273 // that freed memory before the descriptor pointing at it
2274 if ((_flags
& kIOMemoryAutoPrepare
) && iopl
.fIOPL
)
2276 upl_commit(iopl
.fIOPL
, 0, 0);
2277 upl_deallocate(iopl
.fIOPL
);
2282 if (!_memoryEntries
->appendBytes(&iopl
, sizeof(iopl
))) {
2283 // Clean up partial created and unsaved iopl
2285 upl_abort(iopl
.fIOPL
, 0);
2286 upl_deallocate(iopl
.fIOPL
);
2292 // Check for a multiple iopl's in one virtual range
2293 pageIndex
+= numPageInfo
;
2294 mdOffset
-= iopl
.fPageOffset
;
2295 if (ioplSize
< numBytes
) {
2296 numBytes
-= ioplSize
;
2297 startPage
+= ioplSize
;
2298 mdOffset
+= ioplSize
;
2299 iopl
.fPageOffset
= 0;
2300 if (mapper
) iopl
.fMappedPage
= mapBase
+ pageIndex
;
2303 mdOffset
+= numBytes
;
2309 _highestPage
= highestPage
;
2311 if (UPL_COPYOUT_FROM
& uplFlags
) _flags
|= kIOMemoryPreparedReadOnly
;
2313 return kIOReturnSuccess
;
2317 dataP
= getDataP(_memoryEntries
);
2318 UInt done
= getNumIOPL(_memoryEntries
, dataP
);
2319 ioPLBlock
*ioplList
= getIOPLList(dataP
);
2321 for (UInt range
= 0; range
< done
; range
++)
2323 if (ioplList
[range
].fIOPL
) {
2324 upl_abort(ioplList
[range
].fIOPL
, 0);
2325 upl_deallocate(ioplList
[range
].fIOPL
);
2328 (void) _memoryEntries
->initWithBytes(dataP
, computeDataSize(0, 0)); // == setLength()
2331 if (error
== KERN_FAILURE
)
2332 error
= kIOReturnCannotWire
;
2333 else if (error
== KERN_MEMORY_ERROR
)
2334 error
= kIOReturnNoResources
;
2339 bool IOGeneralMemoryDescriptor::initMemoryEntries(size_t size
, IOMapper
* mapper
)
2342 unsigned dataSize
= size
;
2344 if (!_memoryEntries
) {
2345 _memoryEntries
= OSData::withCapacity(dataSize
);
2346 if (!_memoryEntries
)
2349 else if (!_memoryEntries
->initWithCapacity(dataSize
))
2352 _memoryEntries
->appendBytes(0, computeDataSize(0, 0));
2353 dataP
= getDataP(_memoryEntries
);
2355 if (mapper
== kIOMapperWaitSystem
) {
2356 IOMapper::checkForSystemMapper();
2357 mapper
= IOMapper::gSystem
;
2359 dataP
->fMapper
= mapper
;
2360 dataP
->fPageCnt
= 0;
2361 dataP
->fMappedBase
= 0;
2362 dataP
->fDMAMapNumAddressBits
= 64;
2363 dataP
->fDMAMapAlignment
= 0;
2364 dataP
->fPreparationID
= kIOPreparationIDUnprepared
;
2365 dataP
->fDiscontig
= false;
2370 IOReturn
IOMemoryDescriptor::dmaMap(
2372 const IODMAMapSpecification
* mapSpec
,
2378 IOMDDMAWalkSegmentState walkState
;
2379 IOMDDMAWalkSegmentArgs
* walkArgs
= (IOMDDMAWalkSegmentArgs
*) (void *)&walkState
;
2382 IOPhysicalLength segLen
;
2383 addr64_t phys
, align
, pageOffset
;
2384 ppnum_t base
, pageIndex
, pageCount
;
2386 uint32_t mapOptions
= 0;
2388 if (!(kIOMemoryPreparedReadOnly
& _flags
)) mapOptions
|= kIODMAMapWriteAccess
;
2390 walkArgs
->fMapped
= false;
2391 mdOp
= kIOMDFirstSegment
;
2393 for (index
= 0; index
< length
; )
2395 if (index
&& (page_mask
& (index
+ pageOffset
))) break;
2397 walkArgs
->fOffset
= offset
+ index
;
2398 ret
= dmaCommandOperation(mdOp
, &walkState
, sizeof(walkState
));
2399 mdOp
= kIOMDWalkSegments
;
2400 if (ret
!= kIOReturnSuccess
) break;
2401 phys
= walkArgs
->fIOVMAddr
;
2402 segLen
= walkArgs
->fLength
;
2404 align
= (phys
& page_mask
);
2405 if (!index
) pageOffset
= align
;
2406 else if (align
) break;
2407 pageCount
+= atop_64(round_page_64(align
+ segLen
));
2411 if (index
< length
) return (kIOReturnVMError
);
2413 base
= mapper
->iovmMapMemory(this, offset
, pageCount
,
2414 mapOptions
, NULL
, mapSpec
);
2416 if (!base
) return (kIOReturnNoResources
);
2418 mdOp
= kIOMDFirstSegment
;
2419 for (pageIndex
= 0, index
= 0; index
< length
; )
2421 walkArgs
->fOffset
= offset
+ index
;
2422 ret
= dmaCommandOperation(mdOp
, &walkState
, sizeof(walkState
));
2423 mdOp
= kIOMDWalkSegments
;
2424 if (ret
!= kIOReturnSuccess
) break;
2425 phys
= walkArgs
->fIOVMAddr
;
2426 segLen
= walkArgs
->fLength
;
2428 ppnum_t page
= atop_64(phys
);
2429 ppnum_t count
= atop_64(round_page_64(phys
+ segLen
)) - page
;
2432 mapper
->iovmInsert(base
, pageIndex
, page
);
2438 if (pageIndex
!= pageCount
) panic("pageIndex");
2440 *address
= ptoa_64(base
) + pageOffset
;
2441 if (mapPages
) *mapPages
= pageCount
;
2443 return (kIOReturnSuccess
);
2446 IOReturn
IOGeneralMemoryDescriptor::dmaMap(
2448 const IODMAMapSpecification
* mapSpec
,
2454 IOReturn err
= kIOReturnSuccess
;
2456 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
2459 if (kIOMemoryHostOnly
& _flags
) return (kIOReturnSuccess
);
2461 if ((type
== kIOMemoryTypePhysical
) || (type
== kIOMemoryTypePhysical64
)
2462 || offset
|| (length
!= _length
))
2464 err
= super::dmaMap(mapper
, mapSpec
, offset
, length
, address
, mapPages
);
2466 else if (_memoryEntries
&& _pages
&& (dataP
= getDataP(_memoryEntries
)))
2468 const ioPLBlock
* ioplList
= getIOPLList(dataP
);
2469 upl_page_info_t
* pageList
;
2470 uint32_t mapOptions
= 0;
2473 IODMAMapSpecification mapSpec
;
2474 bzero(&mapSpec
, sizeof(mapSpec
));
2475 mapSpec
.numAddressBits
= dataP
->fDMAMapNumAddressBits
;
2476 mapSpec
.alignment
= dataP
->fDMAMapAlignment
;
2478 // For external UPLs the fPageInfo field points directly to
2479 // the upl's upl_page_info_t array.
2480 if (ioplList
->fFlags
& kIOPLExternUPL
)
2482 pageList
= (upl_page_info_t
*) ioplList
->fPageInfo
;
2483 mapOptions
|= kIODMAMapPagingPath
;
2486 pageList
= getPageList(dataP
);
2488 if (!(kIOMemoryPreparedReadOnly
& _flags
)) mapOptions
|= kIODMAMapWriteAccess
;
2490 // Check for direct device non-paged memory
2491 if (ioplList
->fFlags
& kIOPLOnDevice
) mapOptions
|= kIODMAMapPhysicallyContiguous
;
2493 base
= mapper
->iovmMapMemory(
2494 this, offset
, _pages
, mapOptions
, &pageList
[0], &mapSpec
);
2495 *address
= ptoa_64(base
) + (ioplList
->fPageOffset
& PAGE_MASK
);
2496 if (mapPages
) *mapPages
= _pages
;
2505 * Prepare the memory for an I/O transfer. This involves paging in
2506 * the memory, if necessary, and wiring it down for the duration of
2507 * the transfer. The complete() method completes the processing of
2508 * the memory after the I/O transfer finishes. This method needn't
2509 * called for non-pageable memory.
2512 IOReturn
IOGeneralMemoryDescriptor::prepare(IODirection forDirection
)
2514 IOReturn error
= kIOReturnSuccess
;
2515 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
2517 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
))
2518 return kIOReturnSuccess
;
2521 IOLockLock(_prepareLock
);
2523 if (kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
|| kIOMemoryTypeUIO
== type
)
2525 error
= wireVirtual(forDirection
);
2528 if (kIOReturnSuccess
== error
)
2530 if (1 == ++_wireCount
)
2532 if (kIOMemoryClearEncrypt
& _flags
)
2534 performOperation(kIOMemoryClearEncrypted
, 0, _length
);
2540 IOLockUnlock(_prepareLock
);
2548 * Complete processing of the memory after an I/O transfer finishes.
2549 * This method should not be called unless a prepare was previously
2550 * issued; the prepare() and complete() must occur in pairs, before
2551 * before and after an I/O transfer involving pageable memory.
2554 IOReturn
IOGeneralMemoryDescriptor::complete(IODirection
/* forDirection */)
2556 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
2558 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
))
2559 return kIOReturnSuccess
;
2562 IOLockLock(_prepareLock
);
2568 if ((kIOMemoryClearEncrypt
& _flags
) && (1 == _wireCount
))
2570 performOperation(kIOMemorySetEncrypted
, 0, _length
);
2576 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
2577 ioGMDData
* dataP
= getDataP(_memoryEntries
);
2578 ioPLBlock
*ioplList
= getIOPLList(dataP
);
2579 UInt count
= getNumIOPL(_memoryEntries
, dataP
);
2581 #if IOMD_DEBUG_DMAACTIVE
2582 if (__iomd_reservedA
) panic("complete() while dma active");
2583 #endif /* IOMD_DEBUG_DMAACTIVE */
2585 if (dataP
->fMappedBase
) {
2586 dataP
->fMapper
->iovmFree(atop_64(dataP
->fMappedBase
), _pages
);
2587 dataP
->fMappedBase
= 0;
2589 // Only complete iopls that we created which are for TypeVirtual
2590 if (kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
|| kIOMemoryTypeUIO
== type
) {
2591 for (UInt ind
= 0; ind
< count
; ind
++)
2592 if (ioplList
[ind
].fIOPL
) {
2593 upl_commit(ioplList
[ind
].fIOPL
, 0, 0);
2594 upl_deallocate(ioplList
[ind
].fIOPL
);
2596 } else if (kIOMemoryTypeUPL
== type
) {
2597 upl_set_referenced(ioplList
[0].fIOPL
, false);
2600 (void) _memoryEntries
->initWithBytes(dataP
, computeDataSize(0, 0)); // == setLength()
2602 dataP
->fPreparationID
= kIOPreparationIDUnprepared
;
2607 IOLockUnlock(_prepareLock
);
2609 return kIOReturnSuccess
;
2612 IOReturn
IOGeneralMemoryDescriptor::doMap(
2613 vm_map_t __addressMap
,
2614 IOVirtualAddress
* __address
,
2615 IOOptionBits options
,
2616 IOByteCount __offset
,
2617 IOByteCount __length
)
2621 if (!(kIOMap64Bit
& options
)) panic("IOGeneralMemoryDescriptor::doMap !64bit");
2622 #endif /* !__LP64__ */
2624 IOMemoryMap
* mapping
= (IOMemoryMap
*) *__address
;
2625 mach_vm_size_t offset
= mapping
->fOffset
+ __offset
;
2626 mach_vm_size_t length
= mapping
->fLength
;
2628 kern_return_t kr
= kIOReturnVMError
;
2629 ipc_port_t sharedMem
= (ipc_port_t
) _memEntry
;
2631 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
2632 Ranges vec
= _ranges
;
2634 user_addr_t range0Addr
= 0;
2635 IOByteCount range0Len
= 0;
2637 if ((offset
>= _length
) || ((offset
+ length
) > _length
))
2638 return( kIOReturnBadArgument
);
2641 getAddrLenForInd(range0Addr
, range0Len
, type
, vec
, 0);
2643 // mapping source == dest? (could be much better)
2645 && (mapping
->fAddressMap
== get_task_map(_task
)) && (options
& kIOMapAnywhere
)
2646 && (1 == _rangesCount
) && (0 == offset
)
2647 && range0Addr
&& (length
<= range0Len
) )
2649 mapping
->fAddress
= range0Addr
;
2650 mapping
->fOptions
|= kIOMapStatic
;
2652 return( kIOReturnSuccess
);
2655 if( 0 == sharedMem
) {
2657 vm_size_t size
= ptoa_32(_pages
);
2661 memory_object_size_t actualSize
= size
;
2662 vm_prot_t prot
= VM_PROT_READ
;
2663 if (!(kIOMapReadOnly
& options
))
2664 prot
|= VM_PROT_WRITE
;
2665 else if (kIOMapDefaultCache
!= (options
& kIOMapCacheMask
))
2666 prot
|= VM_PROT_WRITE
;
2668 if (_rangesCount
== 1)
2670 kr
= mach_make_memory_entry_64(get_task_map(_task
),
2671 &actualSize
, range0Addr
,
2675 if( (_rangesCount
!= 1)
2676 || ((KERN_SUCCESS
== kr
) && (actualSize
!= round_page(size
))))
2680 IOLog("mach_vm_remap path for ranges %d size (%08llx:%08llx)\n",
2681 _rangesCount
, (UInt64
)actualSize
, (UInt64
)size
);
2683 kr
= kIOReturnVMError
;
2686 ipc_port_release_send(sharedMem
);
2687 sharedMem
= MACH_PORT_NULL
;
2690 mach_vm_address_t address
, segDestAddr
;
2691 mach_vm_size_t mapLength
;
2692 unsigned rangesIndex
;
2693 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
2694 user_addr_t srcAddr
;
2695 IOPhysicalLength segLen
= 0;
2697 // Find starting address within the vector of ranges
2698 for (rangesIndex
= 0; rangesIndex
< _rangesCount
; rangesIndex
++) {
2699 getAddrLenForInd(srcAddr
, segLen
, type
, _ranges
, rangesIndex
);
2700 if (offset
< segLen
)
2702 offset
-= segLen
; // (make offset relative)
2705 mach_vm_size_t pageOffset
= (srcAddr
& PAGE_MASK
);
2706 address
= trunc_page_64(mapping
->fAddress
);
2708 if ((options
& kIOMapAnywhere
) || ((mapping
->fAddress
- address
) == pageOffset
))
2710 vm_map_t map
= mapping
->fAddressMap
;
2711 kr
= IOMemoryDescriptorMapCopy(&map
,
2713 offset
, &address
, round_page_64(length
+ pageOffset
));
2714 if (kr
== KERN_SUCCESS
)
2716 segDestAddr
= address
;
2723 vm_prot_t cur_prot
, max_prot
;
2725 if (segLen
> length
) segLen
= length
;
2726 kr
= mach_vm_remap(map
, &segDestAddr
, round_page_64(segLen
), PAGE_MASK
,
2727 VM_FLAGS_FIXED
| VM_FLAGS_OVERWRITE
,
2728 get_task_map(_task
), trunc_page_64(srcAddr
),
2733 if (KERN_SUCCESS
== kr
)
2735 if ((!(VM_PROT_READ
& cur_prot
))
2736 || (!(kIOMapReadOnly
& options
) && !(VM_PROT_WRITE
& cur_prot
)))
2738 kr
= KERN_PROTECTION_FAILURE
;
2741 if (KERN_SUCCESS
!= kr
)
2743 segDestAddr
+= segLen
;
2744 mapLength
-= segLen
;
2748 if (rangesIndex
>= _rangesCount
)
2750 kr
= kIOReturnBadArgument
;
2753 getAddrLenForInd(srcAddr
, segLen
, type
, vec
, rangesIndex
);
2754 if (srcAddr
& PAGE_MASK
)
2756 kr
= kIOReturnBadArgument
;
2759 if (segLen
> mapLength
)
2762 if (KERN_SUCCESS
!= kr
)
2764 mach_vm_deallocate(mapping
->fAddressMap
, address
, round_page_64(length
+ pageOffset
));
2768 if (KERN_SUCCESS
== kr
)
2769 mapping
->fAddress
= address
+ pageOffset
;
2771 mapping
->fAddress
= NULL
;
2777 { // _task == 0, must be physical
2779 memory_object_t pager
;
2780 unsigned int flags
= 0;
2782 IOPhysicalLength segLen
;
2784 pa
= getPhysicalSegment( offset
, &segLen
, kIOMemoryMapperNone
);
2786 if( !getKernelReserved())
2788 reserved
->dp
.pagerContig
= (1 == _rangesCount
);
2789 reserved
->dp
.memory
= this;
2791 /*What cache mode do we need*/
2792 switch(options
& kIOMapCacheMask
) {
2794 case kIOMapDefaultCache
:
2796 flags
= IODefaultCacheBits(pa
);
2797 if (DEVICE_PAGER_CACHE_INHIB
& flags
)
2799 if (DEVICE_PAGER_GUARDED
& flags
)
2800 mapping
->fOptions
|= kIOMapInhibitCache
;
2802 mapping
->fOptions
|= kIOMapWriteCombineCache
;
2804 else if (DEVICE_PAGER_WRITE_THROUGH
& flags
)
2805 mapping
->fOptions
|= kIOMapWriteThruCache
;
2807 mapping
->fOptions
|= kIOMapCopybackCache
;
2810 case kIOMapInhibitCache
:
2811 flags
= DEVICE_PAGER_CACHE_INHIB
|
2812 DEVICE_PAGER_COHERENT
| DEVICE_PAGER_GUARDED
;
2815 case kIOMapWriteThruCache
:
2816 flags
= DEVICE_PAGER_WRITE_THROUGH
|
2817 DEVICE_PAGER_COHERENT
| DEVICE_PAGER_GUARDED
;
2820 case kIOMapCopybackCache
:
2821 flags
= DEVICE_PAGER_COHERENT
;
2824 case kIOMapWriteCombineCache
:
2825 flags
= DEVICE_PAGER_CACHE_INHIB
|
2826 DEVICE_PAGER_COHERENT
;
2830 flags
|= reserved
->dp
.pagerContig
? DEVICE_PAGER_CONTIGUOUS
: 0;
2832 pager
= device_pager_setup( (memory_object_t
) 0, (uintptr_t) reserved
,
2837 kr
= mach_memory_object_memory_entry_64( (host_t
) 1, false /*internal*/,
2838 size
, VM_PROT_READ
| VM_PROT_WRITE
, pager
, &sharedMem
);
2840 assert( KERN_SUCCESS
== kr
);
2841 if( KERN_SUCCESS
!= kr
)
2843 device_pager_deallocate( pager
);
2844 pager
= MACH_PORT_NULL
;
2845 sharedMem
= MACH_PORT_NULL
;
2848 if( pager
&& sharedMem
)
2849 reserved
->dp
.devicePager
= pager
;
2853 _memEntry
= (void *) sharedMem
;
2860 result
= super::doMap( __addressMap
, __address
,
2861 options
, __offset
, __length
);
2866 IOReturn
IOGeneralMemoryDescriptor::doUnmap(
2867 vm_map_t addressMap
,
2868 IOVirtualAddress __address
,
2869 IOByteCount __length
)
2871 return (super::doUnmap(addressMap
, __address
, __length
));
2874 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2877 #define super OSObject
2879 OSDefineMetaClassAndStructors( IOMemoryMap
, OSObject
)
2881 OSMetaClassDefineReservedUnused(IOMemoryMap
, 0);
2882 OSMetaClassDefineReservedUnused(IOMemoryMap
, 1);
2883 OSMetaClassDefineReservedUnused(IOMemoryMap
, 2);
2884 OSMetaClassDefineReservedUnused(IOMemoryMap
, 3);
2885 OSMetaClassDefineReservedUnused(IOMemoryMap
, 4);
2886 OSMetaClassDefineReservedUnused(IOMemoryMap
, 5);
2887 OSMetaClassDefineReservedUnused(IOMemoryMap
, 6);
2888 OSMetaClassDefineReservedUnused(IOMemoryMap
, 7);
2890 /* ex-inline function implementation */
2891 IOPhysicalAddress
IOMemoryMap::getPhysicalAddress()
2892 { return( getPhysicalSegment( 0, 0 )); }
2894 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2896 bool IOMemoryMap::init(
2898 mach_vm_address_t toAddress
,
2899 IOOptionBits _options
,
2900 mach_vm_size_t _offset
,
2901 mach_vm_size_t _length
)
2909 fAddressMap
= get_task_map(intoTask
);
2912 vm_map_reference(fAddressMap
);
2914 fAddressTask
= intoTask
;
2915 fOptions
= _options
;
2918 fAddress
= toAddress
;
2923 bool IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor
* _memory
, mach_vm_size_t _offset
)
2930 if( (_offset
+ fLength
) > _memory
->getLength())
2938 if (fMemory
!= _memory
)
2939 fMemory
->removeMapping(this);
2947 struct IOMemoryDescriptorMapAllocRef
2949 ipc_port_t sharedMem
;
2951 mach_vm_address_t mapped
;
2952 mach_vm_size_t size
;
2953 mach_vm_size_t sourceOffset
;
2954 IOOptionBits options
;
2957 static kern_return_t
IOMemoryDescriptorMapAlloc(vm_map_t map
, void * _ref
)
2959 IOMemoryDescriptorMapAllocRef
* ref
= (IOMemoryDescriptorMapAllocRef
*)_ref
;
2965 vm_prot_t prot
= VM_PROT_READ
2966 | ((ref
->options
& kIOMapReadOnly
) ? 0 : VM_PROT_WRITE
);
2968 // VM system requires write access to change cache mode
2969 if (kIOMapDefaultCache
!= (ref
->options
& kIOMapCacheMask
))
2970 prot
|= VM_PROT_WRITE
;
2972 // set memory entry cache
2973 vm_prot_t memEntryCacheMode
= prot
| MAP_MEM_ONLY
;
2974 switch (ref
->options
& kIOMapCacheMask
)
2976 case kIOMapInhibitCache
:
2977 SET_MAP_MEM(MAP_MEM_IO
, memEntryCacheMode
);
2980 case kIOMapWriteThruCache
:
2981 SET_MAP_MEM(MAP_MEM_WTHRU
, memEntryCacheMode
);
2984 case kIOMapWriteCombineCache
:
2985 SET_MAP_MEM(MAP_MEM_WCOMB
, memEntryCacheMode
);
2988 case kIOMapCopybackCache
:
2989 SET_MAP_MEM(MAP_MEM_COPYBACK
, memEntryCacheMode
);
2992 case kIOMapCopybackInnerCache
:
2993 SET_MAP_MEM(MAP_MEM_INNERWBACK
, memEntryCacheMode
);
2996 case kIOMapDefaultCache
:
2998 SET_MAP_MEM(MAP_MEM_NOOP
, memEntryCacheMode
);
3002 vm_size_t unused
= 0;
3004 err
= mach_make_memory_entry( NULL
/*unused*/, &unused
, 0 /*unused*/,
3005 memEntryCacheMode
, NULL
, ref
->sharedMem
);
3006 if (KERN_SUCCESS
!= err
)
3007 IOLog("MAP_MEM_ONLY failed %d\n", err
);
3009 err
= mach_vm_map( map
,
3011 ref
->size
, 0 /* mask */,
3012 (( ref
->options
& kIOMapAnywhere
) ? VM_FLAGS_ANYWHERE
: VM_FLAGS_FIXED
)
3013 | VM_MAKE_TAG(VM_MEMORY_IOKIT
),
3014 ref
->sharedMem
, ref
->sourceOffset
,
3020 if( KERN_SUCCESS
!= err
) {
3028 err
= mach_vm_allocate(map
, &ref
->mapped
, ref
->size
,
3029 ((ref
->options
& kIOMapAnywhere
) ? VM_FLAGS_ANYWHERE
: VM_FLAGS_FIXED
)
3030 | VM_MAKE_TAG(VM_MEMORY_IOKIT
) );
3031 if( KERN_SUCCESS
!= err
) {
3036 // we have to make sure that these guys don't get copied if we fork.
3037 err
= vm_inherit(map
, ref
->mapped
, ref
->size
, VM_INHERIT_NONE
);
3038 assert( KERN_SUCCESS
== err
);
3047 IOMemoryDescriptorMapMemEntry(vm_map_t
* map
, ipc_port_t entry
, IOOptionBits options
, bool pageable
,
3048 mach_vm_size_t offset
,
3049 mach_vm_address_t
* address
, mach_vm_size_t length
)
3052 IOMemoryDescriptorMapAllocRef ref
;
3055 ref
.sharedMem
= entry
;
3056 ref
.sourceOffset
= trunc_page_64(offset
);
3057 ref
.options
= options
;
3060 if (options
& kIOMapAnywhere
)
3061 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
3064 ref
.mapped
= *address
;
3066 if( ref
.sharedMem
&& (ref
.map
== kernel_map
) && pageable
)
3067 err
= IOIteratePageableMaps( ref
.size
, &IOMemoryDescriptorMapAlloc
, &ref
);
3069 err
= IOMemoryDescriptorMapAlloc( ref
.map
, &ref
);
3071 *address
= ref
.mapped
;
3078 IOMemoryDescriptorMapCopy(vm_map_t
* map
,
3079 IOOptionBits options
,
3080 mach_vm_size_t offset
,
3081 mach_vm_address_t
* address
, mach_vm_size_t length
)
3084 IOMemoryDescriptorMapAllocRef ref
;
3087 ref
.sharedMem
= NULL
;
3088 ref
.sourceOffset
= trunc_page_64(offset
);
3089 ref
.options
= options
;
3092 if (options
& kIOMapAnywhere
)
3093 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
3096 ref
.mapped
= *address
;
3098 if (ref
.map
== kernel_map
)
3099 err
= IOIteratePageableMaps(ref
.size
, &IOMemoryDescriptorMapAlloc
, &ref
);
3101 err
= IOMemoryDescriptorMapAlloc(ref
.map
, &ref
);
3103 *address
= ref
.mapped
;
3109 IOReturn
IOMemoryDescriptor::doMap(
3110 vm_map_t __addressMap
,
3111 IOVirtualAddress
* __address
,
3112 IOOptionBits options
,
3113 IOByteCount __offset
,
3114 IOByteCount __length
)
3117 if (!(kIOMap64Bit
& options
)) panic("IOMemoryDescriptor::doMap !64bit");
3118 #endif /* !__LP64__ */
3120 IOMemoryMap
* mapping
= (IOMemoryMap
*) *__address
;
3121 mach_vm_size_t offset
= mapping
->fOffset
+ __offset
;
3122 mach_vm_size_t length
= mapping
->fLength
;
3124 IOReturn err
= kIOReturnSuccess
;
3125 memory_object_t pager
;
3126 mach_vm_size_t pageOffset
;
3127 IOPhysicalAddress sourceAddr
;
3128 unsigned int lock_count
;
3132 sourceAddr
= getPhysicalSegment( offset
, NULL
, _kIOMemorySourceSegment
);
3133 pageOffset
= sourceAddr
- trunc_page( sourceAddr
);
3136 pager
= (memory_object_t
) reserved
->dp
.devicePager
;
3138 pager
= MACH_PORT_NULL
;
3140 if ((kIOMapReference
|kIOMapUnique
) == ((kIOMapReference
|kIOMapUnique
) & options
))
3148 err
= kIOReturnNotReadable
;
3152 size
= round_page(mapping
->fLength
+ pageOffset
);
3153 flags
= UPL_COPYOUT_FROM
| UPL_SET_INTERNAL
3154 | UPL_SET_LITE
| UPL_SET_IO_WIRE
| UPL_BLOCK_ACCESS
;
3156 if (KERN_SUCCESS
!= memory_object_iopl_request((ipc_port_t
) _memEntry
, 0, &size
, &redirUPL2
,
3161 for (lock_count
= 0;
3162 IORecursiveLockHaveLock(gIOMemoryLock
);
3166 err
= upl_transpose(redirUPL2
, mapping
->fRedirUPL
);
3173 if (kIOReturnSuccess
!= err
)
3175 IOLog("upl_transpose(%x)\n", err
);
3176 err
= kIOReturnSuccess
;
3181 upl_commit(redirUPL2
, NULL
, 0);
3182 upl_deallocate(redirUPL2
);
3186 // swap the memEntries since they now refer to different vm_objects
3187 void * me
= _memEntry
;
3188 _memEntry
= mapping
->fMemory
->_memEntry
;
3189 mapping
->fMemory
->_memEntry
= me
;
3192 err
= handleFault( pager
, mapping
->fAddressMap
, mapping
->fAddress
, offset
, length
, options
);
3196 mach_vm_address_t address
;
3198 if (!(options
& kIOMapAnywhere
))
3200 address
= trunc_page_64(mapping
->fAddress
);
3201 if( (mapping
->fAddress
- address
) != pageOffset
)
3203 err
= kIOReturnVMError
;
3208 vm_map_t map
= mapping
->fAddressMap
;
3209 err
= IOMemoryDescriptorMapMemEntry(&map
, (ipc_port_t
) _memEntry
,
3210 options
, (kIOMemoryBufferPageable
& _flags
),
3211 offset
, &address
, round_page_64(length
+ pageOffset
));
3212 if( err
!= KERN_SUCCESS
)
3215 if (!_memEntry
|| pager
)
3217 err
= handleFault( pager
, mapping
->fAddressMap
, address
, offset
, length
, options
);
3218 if (err
!= KERN_SUCCESS
)
3219 doUnmap( mapping
->fAddressMap
, (IOVirtualAddress
) mapping
, 0 );
3223 if (kIOLogMapping
& gIOKitDebug
)
3224 IOLog("mapping(%x) desc %p @ %qx, map %p, address %qx, offset %qx, length %qx\n",
3225 err
, this, (uint64_t)sourceAddr
, mapping
, address
, offset
, length
);
3228 if (err
== KERN_SUCCESS
)
3229 mapping
->fAddress
= address
+ pageOffset
;
3231 mapping
->fAddress
= NULL
;
3239 IOReturn
IOMemoryDescriptor::handleFault(
3241 vm_map_t addressMap
,
3242 mach_vm_address_t address
,
3243 mach_vm_size_t sourceOffset
,
3244 mach_vm_size_t length
,
3245 IOOptionBits options
)
3247 IOReturn err
= kIOReturnSuccess
;
3248 memory_object_t pager
= (memory_object_t
) _pager
;
3249 mach_vm_size_t size
;
3250 mach_vm_size_t bytes
;
3251 mach_vm_size_t page
;
3252 mach_vm_size_t pageOffset
;
3253 mach_vm_size_t pagerOffset
;
3254 IOPhysicalLength segLen
;
3259 if( kIOMemoryRedirected
& _flags
)
3262 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset
);
3266 } while( kIOMemoryRedirected
& _flags
);
3269 return( kIOReturnSuccess
);
3272 physAddr
= getPhysicalSegment( sourceOffset
, &segLen
, kIOMemoryMapperNone
);
3274 pageOffset
= physAddr
- trunc_page_64( physAddr
);
3275 pagerOffset
= sourceOffset
;
3277 size
= length
+ pageOffset
;
3278 physAddr
-= pageOffset
;
3280 segLen
+= pageOffset
;
3284 // in the middle of the loop only map whole pages
3285 if( segLen
>= bytes
)
3287 else if( segLen
!= trunc_page( segLen
))
3288 err
= kIOReturnVMError
;
3289 if( physAddr
!= trunc_page_64( physAddr
))
3290 err
= kIOReturnBadArgument
;
3291 if (kIOReturnSuccess
!= err
)
3295 if( kIOLogMapping
& gIOKitDebug
)
3296 IOLog("IOMemoryMap::map(%p) 0x%qx->0x%qx:0x%qx\n",
3297 addressMap
, address
+ pageOffset
, physAddr
+ pageOffset
,
3298 segLen
- pageOffset
);
3303 if( reserved
&& reserved
->dp
.pagerContig
) {
3304 IOPhysicalLength allLen
;
3307 allPhys
= getPhysicalSegment( 0, &allLen
, kIOMemoryMapperNone
);
3309 err
= device_pager_populate_object( pager
, 0, atop_64(allPhys
), round_page(allLen
) );
3315 (page
< segLen
) && (KERN_SUCCESS
== err
);
3318 err
= device_pager_populate_object(pager
, pagerOffset
,
3319 (ppnum_t
)(atop_64(physAddr
+ page
)), page_size
);
3320 pagerOffset
+= page_size
;
3323 assert( KERN_SUCCESS
== err
);
3328 // This call to vm_fault causes an early pmap level resolution
3329 // of the mappings created above for kernel mappings, since
3330 // faulting in later can't take place from interrupt level.
3332 /* *** Temporary Workaround *** */
3334 if ((addressMap
== kernel_map
) && !(kIOMemoryRedirected
& _flags
))
3336 vm_fault(addressMap
,
3337 (vm_map_offset_t
)address
,
3338 VM_PROT_READ
|VM_PROT_WRITE
,
3339 FALSE
, THREAD_UNINT
, NULL
,
3340 (vm_map_offset_t
)0);
3343 /* *** Temporary Workaround *** */
3346 sourceOffset
+= segLen
- pageOffset
;
3352 while (bytes
&& (physAddr
= getPhysicalSegment( sourceOffset
, &segLen
, kIOMemoryMapperNone
)));
3355 err
= kIOReturnBadArgument
;
3360 IOReturn
IOMemoryDescriptor::doUnmap(
3361 vm_map_t addressMap
,
3362 IOVirtualAddress __address
,
3363 IOByteCount __length
)
3366 mach_vm_address_t address
;
3367 mach_vm_size_t length
;
3371 address
= __address
;
3376 addressMap
= ((IOMemoryMap
*) __address
)->fAddressMap
;
3377 address
= ((IOMemoryMap
*) __address
)->fAddress
;
3378 length
= ((IOMemoryMap
*) __address
)->fLength
;
3381 if ((addressMap
== kernel_map
)
3382 && ((kIOMemoryBufferPageable
& _flags
) || !_memEntry
))
3383 addressMap
= IOPageableMapForAddress( address
);
3386 if( kIOLogMapping
& gIOKitDebug
)
3387 IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
3388 addressMap
, address
, length
);
3391 err
= mach_vm_deallocate( addressMap
, address
, length
);
3396 IOReturn
IOMemoryDescriptor::redirect( task_t safeTask
, bool doRedirect
)
3398 IOReturn err
= kIOReturnSuccess
;
3399 IOMemoryMap
* mapping
= 0;
3405 _flags
|= kIOMemoryRedirected
;
3407 _flags
&= ~kIOMemoryRedirected
;
3410 if( (iter
= OSCollectionIterator::withCollection( _mappings
))) {
3412 memory_object_t pager
;
3415 pager
= (memory_object_t
) reserved
->dp
.devicePager
;
3417 pager
= MACH_PORT_NULL
;
3419 while( (mapping
= (IOMemoryMap
*) iter
->getNextObject()))
3421 mapping
->redirect( safeTask
, doRedirect
);
3422 if (!doRedirect
&& !safeTask
&& pager
&& (kernel_map
== mapping
->fAddressMap
))
3424 err
= handleFault( pager
, mapping
->fAddressMap
, mapping
->fAddress
, mapping
->fOffset
, mapping
->fLength
, kIOMapDefaultCache
);
3440 // temporary binary compatibility
3441 IOSubMemoryDescriptor
* subMem
;
3442 if( (subMem
= OSDynamicCast( IOSubMemoryDescriptor
, this)))
3443 err
= subMem
->redirect( safeTask
, doRedirect
);
3445 err
= kIOReturnSuccess
;
3446 #endif /* !__LP64__ */
3451 IOReturn
IOMemoryMap::redirect( task_t safeTask
, bool doRedirect
)
3453 IOReturn err
= kIOReturnSuccess
;
3456 // err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
3468 if ((!safeTask
|| (get_task_map(safeTask
) != fAddressMap
))
3469 && (0 == (fOptions
& kIOMapStatic
)))
3471 IOUnmapPages( fAddressMap
, fAddress
, fLength
);
3472 err
= kIOReturnSuccess
;
3474 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect
, this, fAddress
, fLength
, fAddressMap
);
3477 else if (kIOMapWriteCombineCache
== (fOptions
& kIOMapCacheMask
))
3479 IOOptionBits newMode
;
3480 newMode
= (fOptions
& ~kIOMapCacheMask
) | (doRedirect
? kIOMapInhibitCache
: kIOMapWriteCombineCache
);
3481 IOProtectCacheMode(fAddressMap
, fAddress
, fLength
, newMode
);
3488 if ((((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
3489 || ((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
))
3491 && (doRedirect
!= (0 != (fMemory
->_flags
& kIOMemoryRedirected
))))
3492 fMemory
->redirect(safeTask
, doRedirect
);
3497 IOReturn
IOMemoryMap::unmap( void )
3503 if( fAddress
&& fAddressMap
&& (0 == fSuperMap
) && fMemory
3504 && (0 == (fOptions
& kIOMapStatic
))) {
3506 vm_map_iokit_unmapped_region(fAddressMap
, fLength
);
3508 err
= fMemory
->doUnmap(fAddressMap
, (IOVirtualAddress
) this, 0);
3511 err
= kIOReturnSuccess
;
3515 vm_map_deallocate(fAddressMap
);
3526 void IOMemoryMap::taskDied( void )
3529 if (fUserClientUnmap
)
3532 vm_map_deallocate(fAddressMap
);
3540 IOReturn
IOMemoryMap::userClientUnmap( void )
3542 fUserClientUnmap
= true;
3543 return (kIOReturnSuccess
);
3546 // Overload the release mechanism. All mappings must be a member
3547 // of a memory descriptors _mappings set. This means that we
3548 // always have 2 references on a mapping. When either of these mappings
3549 // are released we need to free ourselves.
3550 void IOMemoryMap::taggedRelease(const void *tag
) const
3553 super::taggedRelease(tag
, 2);
3557 void IOMemoryMap::free()
3564 fMemory
->removeMapping(this);
3569 if (fOwner
&& (fOwner
!= fMemory
))
3572 fOwner
->removeMapping(this);
3577 fSuperMap
->release();
3580 upl_commit(fRedirUPL
, NULL
, 0);
3581 upl_deallocate(fRedirUPL
);
3587 IOByteCount
IOMemoryMap::getLength()
3592 IOVirtualAddress
IOMemoryMap::getVirtualAddress()
3596 fSuperMap
->getVirtualAddress();
3597 else if (fAddressMap
3598 && vm_map_is_64bit(fAddressMap
)
3599 && (sizeof(IOVirtualAddress
) < 8))
3601 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress
);
3603 #endif /* !__LP64__ */
3609 mach_vm_address_t
IOMemoryMap::getAddress()
3614 mach_vm_size_t
IOMemoryMap::getSize()
3618 #endif /* !__LP64__ */
3621 task_t
IOMemoryMap::getAddressTask()
3624 return( fSuperMap
->getAddressTask());
3626 return( fAddressTask
);
3629 IOOptionBits
IOMemoryMap::getMapOptions()
3634 IOMemoryDescriptor
* IOMemoryMap::getMemoryDescriptor()
3639 IOMemoryMap
* IOMemoryMap::copyCompatible(
3640 IOMemoryMap
* newMapping
)
3642 task_t task
= newMapping
->getAddressTask();
3643 mach_vm_address_t toAddress
= newMapping
->fAddress
;
3644 IOOptionBits _options
= newMapping
->fOptions
;
3645 mach_vm_size_t _offset
= newMapping
->fOffset
;
3646 mach_vm_size_t _length
= newMapping
->fLength
;
3648 if( (!task
) || (!fAddressMap
) || (fAddressMap
!= get_task_map(task
)))
3650 if( (fOptions
^ _options
) & kIOMapReadOnly
)
3652 if( (kIOMapDefaultCache
!= (_options
& kIOMapCacheMask
))
3653 && ((fOptions
^ _options
) & kIOMapCacheMask
))
3656 if( (0 == (_options
& kIOMapAnywhere
)) && (fAddress
!= toAddress
))
3659 if( _offset
< fOffset
)
3664 if( (_offset
+ _length
) > fLength
)
3668 if( (fLength
== _length
) && (!_offset
))
3674 newMapping
->fSuperMap
= this;
3675 newMapping
->fOffset
= fOffset
+ _offset
;
3676 newMapping
->fAddress
= fAddress
+ _offset
;
3679 return( newMapping
);
3682 IOReturn
IOMemoryMap::wireRange(
3684 mach_vm_size_t offset
,
3685 mach_vm_size_t length
)
3688 mach_vm_address_t start
= trunc_page_64(fAddress
+ offset
);
3689 mach_vm_address_t end
= round_page_64(fAddress
+ offset
+ length
);
3691 if (kIODirectionOutIn
& options
)
3693 kr
= vm_map_wire(fAddressMap
, start
, end
, (kIODirectionOutIn
& options
), FALSE
);
3697 kr
= vm_map_unwire(fAddressMap
, start
, end
, FALSE
);
3706 IOMemoryMap::getPhysicalSegment( IOByteCount _offset
, IOPhysicalLength
* _length
, IOOptionBits _options
)
3707 #else /* !__LP64__ */
3708 IOMemoryMap::getPhysicalSegment( IOByteCount _offset
, IOPhysicalLength
* _length
)
3709 #endif /* !__LP64__ */
3711 IOPhysicalAddress address
;
3715 address
= fMemory
->getPhysicalSegment( fOffset
+ _offset
, _length
, _options
);
3716 #else /* !__LP64__ */
3717 address
= fMemory
->getPhysicalSegment( fOffset
+ _offset
, _length
);
3718 #endif /* !__LP64__ */
3724 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3727 #define super OSObject
3729 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3731 void IOMemoryDescriptor::initialize( void )
3733 if( 0 == gIOMemoryLock
)
3734 gIOMemoryLock
= IORecursiveLockAlloc();
3736 gIOLastPage
= IOGetLastPageNumber();
3739 void IOMemoryDescriptor::free( void )
3742 _mappings
->release();
3747 IOMemoryMap
* IOMemoryDescriptor::setMapping(
3749 IOVirtualAddress mapAddress
,
3750 IOOptionBits options
)
3752 return (createMappingInTask( intoTask
, mapAddress
,
3753 options
| kIOMapStatic
,
3757 IOMemoryMap
* IOMemoryDescriptor::map(
3758 IOOptionBits options
)
3760 return (createMappingInTask( kernel_task
, 0,
3761 options
| kIOMapAnywhere
,
3766 IOMemoryMap
* IOMemoryDescriptor::map(
3768 IOVirtualAddress atAddress
,
3769 IOOptionBits options
,
3771 IOByteCount length
)
3773 if ((!(kIOMapAnywhere
& options
)) && vm_map_is_64bit(get_task_map(intoTask
)))
3775 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
3779 return (createMappingInTask(intoTask
, atAddress
,
3780 options
, offset
, length
));
3782 #endif /* !__LP64__ */
3784 IOMemoryMap
* IOMemoryDescriptor::createMappingInTask(
3786 mach_vm_address_t atAddress
,
3787 IOOptionBits options
,
3788 mach_vm_size_t offset
,
3789 mach_vm_size_t length
)
3791 IOMemoryMap
* result
;
3792 IOMemoryMap
* mapping
;
3795 length
= getLength();
3797 mapping
= new IOMemoryMap
;
3800 && !mapping
->init( intoTask
, atAddress
,
3801 options
, offset
, length
)) {
3807 result
= makeMapping(this, intoTask
, (IOVirtualAddress
) mapping
, options
| kIOMap64Bit
, 0, 0);
3813 IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
3814 this, atAddress
, (uint32_t) options
, offset
, length
);
3820 #ifndef __LP64__ // there is only a 64 bit version for LP64
3821 IOReturn
IOMemoryMap::redirect(IOMemoryDescriptor
* newBackingMemory
,
3822 IOOptionBits options
,
3825 return (redirect(newBackingMemory
, options
, (mach_vm_size_t
)offset
));
3829 IOReturn
IOMemoryMap::redirect(IOMemoryDescriptor
* newBackingMemory
,
3830 IOOptionBits options
,
3831 mach_vm_size_t offset
)
3833 IOReturn err
= kIOReturnSuccess
;
3834 IOMemoryDescriptor
* physMem
= 0;
3838 if (fAddress
&& fAddressMap
) do
3840 if (((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
3841 || ((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
))
3849 vm_size_t size
= round_page(fLength
);
3850 int flags
= UPL_COPYOUT_FROM
| UPL_SET_INTERNAL
3851 | UPL_SET_LITE
| UPL_SET_IO_WIRE
| UPL_BLOCK_ACCESS
;
3852 if (KERN_SUCCESS
!= memory_object_iopl_request((ipc_port_t
) fMemory
->_memEntry
, 0, &size
, &fRedirUPL
,
3859 IOUnmapPages( fAddressMap
, fAddress
, fLength
);
3861 physMem
->redirect(0, true);
3865 if (newBackingMemory
)
3867 if (newBackingMemory
!= fMemory
)
3870 if (this != newBackingMemory
->makeMapping(newBackingMemory
, fAddressTask
, (IOVirtualAddress
) this,
3871 options
| kIOMapUnique
| kIOMapReference
| kIOMap64Bit
,
3873 err
= kIOReturnError
;
3877 upl_commit(fRedirUPL
, NULL
, 0);
3878 upl_deallocate(fRedirUPL
);
3881 if (false && physMem
)
3882 physMem
->redirect(0, false);
3895 IOMemoryMap
* IOMemoryDescriptor::makeMapping(
3896 IOMemoryDescriptor
* owner
,
3898 IOVirtualAddress __address
,
3899 IOOptionBits options
,
3900 IOByteCount __offset
,
3901 IOByteCount __length
)
3904 if (!(kIOMap64Bit
& options
)) panic("IOMemoryDescriptor::makeMapping !64bit");
3905 #endif /* !__LP64__ */
3907 IOMemoryDescriptor
* mapDesc
= 0;
3908 IOMemoryMap
* result
= 0;
3911 IOMemoryMap
* mapping
= (IOMemoryMap
*) __address
;
3912 mach_vm_size_t offset
= mapping
->fOffset
+ __offset
;
3913 mach_vm_size_t length
= mapping
->fLength
;
3915 mapping
->fOffset
= offset
;
3921 if (kIOMapStatic
& options
)
3924 addMapping(mapping
);
3925 mapping
->setMemoryDescriptor(this, 0);
3929 if (kIOMapUnique
& options
)
3932 IOByteCount physLen
;
3934 // if (owner != this) continue;
3936 if (((_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
3937 || ((_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
))
3939 phys
= getPhysicalSegment(offset
, &physLen
, kIOMemoryMapperNone
);
3940 if (!phys
|| (physLen
< length
))
3943 mapDesc
= IOMemoryDescriptor::withAddressRange(
3944 phys
, length
, getDirection() | kIOMemoryMapperNone
, NULL
);
3948 mapping
->fOffset
= offset
;
3953 // look for a compatible existing mapping
3954 if( (iter
= OSCollectionIterator::withCollection(_mappings
)))
3956 IOMemoryMap
* lookMapping
;
3957 while ((lookMapping
= (IOMemoryMap
*) iter
->getNextObject()))
3959 if ((result
= lookMapping
->copyCompatible(mapping
)))
3962 result
->setMemoryDescriptor(this, offset
);
3968 if (result
|| (options
& kIOMapReference
))
3970 if (result
!= mapping
)
3985 kr
= mapDesc
->doMap( 0, (IOVirtualAddress
*) &mapping
, options
, 0, 0 );
3986 if (kIOReturnSuccess
== kr
)
3988 if (0 == (mapping
->fOptions
& kIOMapStatic
)) {
3989 vm_map_iokit_mapped_region(mapping
->fAddressMap
, length
);
3993 mapDesc
->addMapping(result
);
3994 result
->setMemoryDescriptor(mapDesc
, offset
);
4012 void IOMemoryDescriptor::addMapping(
4013 IOMemoryMap
* mapping
)
4018 _mappings
= OSSet::withCapacity(1);
4020 _mappings
->setObject( mapping
);
4024 void IOMemoryDescriptor::removeMapping(
4025 IOMemoryMap
* mapping
)
4028 _mappings
->removeObject( mapping
);
4032 // obsolete initializers
4033 // - initWithOptions is the designated initializer
4035 IOMemoryDescriptor::initWithAddress(void * address
,
4037 IODirection direction
)
4043 IOMemoryDescriptor::initWithAddress(IOVirtualAddress address
,
4045 IODirection direction
,
4052 IOMemoryDescriptor::initWithPhysicalAddress(
4053 IOPhysicalAddress address
,
4055 IODirection direction
)
4061 IOMemoryDescriptor::initWithRanges(
4062 IOVirtualRange
* ranges
,
4064 IODirection direction
,
4072 IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange
* ranges
,
4074 IODirection direction
,
4080 void * IOMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
4081 IOByteCount
* lengthOfSegment
)
4085 #endif /* !__LP64__ */
4087 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4089 bool IOGeneralMemoryDescriptor::serialize(OSSerialize
* s
) const
4091 OSSymbol
const *keys
[2];
4092 OSObject
*values
[2];
4094 user_addr_t address
;
4097 unsigned int index
, nRanges
;
4100 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
4102 if (s
== NULL
) return false;
4103 if (s
->previouslySerialized(this)) return true;
4105 // Pretend we are an array.
4106 if (!s
->addXMLStartTag(this, "array")) return false;
4108 nRanges
= _rangesCount
;
4109 vcopy
= (SerData
*) IOMalloc(sizeof(SerData
) * nRanges
);
4110 if (vcopy
== 0) return false;
4112 keys
[0] = OSSymbol::withCString("address");
4113 keys
[1] = OSSymbol::withCString("length");
4116 values
[0] = values
[1] = 0;
4118 // From this point on we can go to bail.
4120 // Copy the volatile data so we don't have to allocate memory
4121 // while the lock is held.
4123 if (nRanges
== _rangesCount
) {
4124 Ranges vec
= _ranges
;
4125 for (index
= 0; index
< nRanges
; index
++) {
4126 user_addr_t addr
; IOByteCount len
;
4127 getAddrLenForInd(addr
, len
, type
, vec
, index
);
4128 vcopy
[index
].address
= addr
;
4129 vcopy
[index
].length
= len
;
4132 // The descriptor changed out from under us. Give up.
4139 for (index
= 0; index
< nRanges
; index
++)
4141 user_addr_t addr
= vcopy
[index
].address
;
4142 IOByteCount len
= (IOByteCount
) vcopy
[index
].length
;
4144 OSNumber::withNumber(addr
, sizeof(addr
) * 8);
4145 if (values
[0] == 0) {
4149 values
[1] = OSNumber::withNumber(len
, sizeof(len
) * 8);
4150 if (values
[1] == 0) {
4154 OSDictionary
*dict
= OSDictionary::withObjects((const OSObject
**)values
, (const OSSymbol
**)keys
, 2);
4159 values
[0]->release();
4160 values
[1]->release();
4161 values
[0] = values
[1] = 0;
4163 result
= dict
->serialize(s
);
4169 result
= s
->addXMLEndTag("array");
4173 values
[0]->release();
4175 values
[1]->release();
4181 IOFree(vcopy
, sizeof(SerData
) * nRanges
);
4185 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4187 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 0);
4189 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 1);
4190 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 2);
4191 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 3);
4192 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 4);
4193 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 5);
4194 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 6);
4195 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 7);
4196 #else /* !__LP64__ */
4197 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 1);
4198 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 2);
4199 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 3);
4200 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 4);
4201 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 5);
4202 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 6);
4203 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 7);
4204 #endif /* !__LP64__ */
4205 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 8);
4206 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 9);
4207 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 10);
4208 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 11);
4209 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 12);
4210 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 13);
4211 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 14);
4212 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 15);
4214 /* ex-inline function implementation */
4216 IOMemoryDescriptor::getPhysicalAddress()
4217 { return( getPhysicalSegment( 0, 0 )); }