2 * Copyright (c) 1998-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <sys/cdefs.h>
32 #include <IOKit/assert.h>
33 #include <IOKit/system.h>
34 #include <IOKit/IOLib.h>
35 #include <IOKit/IOMemoryDescriptor.h>
36 #include <IOKit/IOMapper.h>
37 #include <IOKit/IODMACommand.h>
38 #include <IOKit/IOKitKeysPrivate.h>
40 #include <IOKit/IOSubMemoryDescriptor.h>
41 #include <IOKit/IOMultiMemoryDescriptor.h>
43 #include <IOKit/IOKitDebug.h>
44 #include <libkern/OSDebug.h>
45 #include <libkern/OSKextLibPrivate.h>
47 #include "IOKitKernelInternal.h"
49 #include <libkern/c++/OSContainers.h>
50 #include <libkern/c++/OSDictionary.h>
51 #include <libkern/c++/OSArray.h>
52 #include <libkern/c++/OSSymbol.h>
53 #include <libkern/c++/OSNumber.h>
54 #include <os/overflow.h>
60 #include <vm/vm_pageout.h>
61 #include <mach/memory_object_types.h>
62 #include <device/device_port.h>
64 #include <mach/vm_prot.h>
65 #include <mach/mach_vm.h>
66 #include <vm/vm_fault.h>
67 #include <vm/vm_protos.h>
69 extern ppnum_t
pmap_find_phys(pmap_t pmap
, addr64_t va
);
70 extern void ipc_port_release_send(ipc_port_t port
);
74 #define kIOMapperWaitSystem ((IOMapper *) 1)
76 static IOMapper
* gIOSystemMapper
= NULL
;
80 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
82 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor
, OSObject
)
84 #define super IOMemoryDescriptor
86 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor
, IOMemoryDescriptor
)
88 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
90 static IORecursiveLock
* gIOMemoryLock
;
92 #define LOCK IORecursiveLockLock( gIOMemoryLock)
93 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
94 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
96 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
99 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
101 #define DEBG(fmt, args...) {}
104 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
106 // Some data structures and accessor macros used by the initWithOptions
109 enum ioPLBlockFlags
{
110 kIOPLOnDevice
= 0x00000001,
111 kIOPLExternUPL
= 0x00000002,
114 struct IOMDPersistentInitData
{
115 const IOGeneralMemoryDescriptor
* fMD
;
116 IOMemoryReference
* fMemRef
;
121 vm_address_t fPageInfo
; // Pointer to page list or index into it
122 uint32_t fIOMDOffset
; // The offset of this iopl in descriptor
123 ppnum_t fMappedPage
; // Page number of first page in this iopl
124 unsigned int fPageOffset
; // Offset within first page of iopl
125 unsigned int fFlags
; // Flags
128 enum { kMaxWireTags
= 6 };
132 uint64_t fDMAMapAlignment
;
133 uint64_t fMappedBase
;
134 uint64_t fMappedLength
;
135 uint64_t fPreparationID
;
137 IOTracking fWireTracking
;
138 #endif /* IOTRACKING */
139 unsigned int fPageCnt
;
140 uint8_t fDMAMapNumAddressBits
;
141 unsigned char fDiscontig
:1;
142 unsigned char fCompletionError
:1;
143 unsigned char fMappedBaseValid
:1;
144 unsigned char _resv
:3;
145 unsigned char fDMAAccess
:2;
147 /* variable length arrays */
148 upl_page_info_t fPageList
[1]
150 // align fPageList as for ioPLBlock
151 __attribute__((aligned(sizeof(upl_t
))))
154 ioPLBlock fBlocks
[1];
157 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
158 #define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
159 #define getNumIOPL(osd, d) \
160 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
161 #define getPageList(d) (&(d->fPageList[0]))
162 #define computeDataSize(p, u) \
163 (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
165 enum { kIOMemoryHostOrRemote
= kIOMemoryHostOnly
| kIOMemoryRemote
};
167 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
169 #define next_page(a) ( trunc_page(a) + PAGE_SIZE )
174 uintptr_t device_handle
,
175 ipc_port_t device_pager
,
176 vm_prot_t protection
,
177 vm_object_offset_t offset
,
181 IOMemoryDescriptorReserved
* ref
= (IOMemoryDescriptorReserved
*) device_handle
;
182 IOMemoryDescriptor
* memDesc
;
185 memDesc
= ref
->dp
.memory
;
188 kr
= memDesc
->handleFault(device_pager
, offset
, size
);
200 uintptr_t device_handle
)
202 IOMemoryDescriptorReserved
* ref
= (IOMemoryDescriptorReserved
*) device_handle
;
204 IODelete( ref
, IOMemoryDescriptorReserved
, 1 );
206 return kIOReturnSuccess
;
210 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
212 // Note this inline function uses C++ reference arguments to return values
213 // This means that pointers are not passed and NULLs don't have to be
214 // checked for as a NULL reference is illegal.
216 getAddrLenForInd(mach_vm_address_t
&addr
, mach_vm_size_t
&len
, // Output variables
217 UInt32 type
, IOGeneralMemoryDescriptor::Ranges r
, UInt32 ind
)
219 assert(kIOMemoryTypeUIO
== type
220 || kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
221 || kIOMemoryTypePhysical
== type
|| kIOMemoryTypePhysical64
== type
);
222 if (kIOMemoryTypeUIO
== type
) {
225 uio_getiov((uio_t
) r
.uio
, ind
, &ad
, &us
); addr
= ad
; len
= us
;
228 else if ((kIOMemoryTypeVirtual64
== type
) || (kIOMemoryTypePhysical64
== type
)) {
229 IOAddressRange cur
= r
.v64
[ind
];
233 #endif /* !__LP64__ */
235 IOVirtualRange cur
= r
.v
[ind
];
241 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
244 purgeableControlBits(IOOptionBits newState
, vm_purgable_t
* control
, int * state
)
246 IOReturn err
= kIOReturnSuccess
;
248 *control
= VM_PURGABLE_SET_STATE
;
250 enum { kIOMemoryPurgeableControlMask
= 15 };
252 switch (kIOMemoryPurgeableControlMask
& newState
) {
253 case kIOMemoryPurgeableKeepCurrent
:
254 *control
= VM_PURGABLE_GET_STATE
;
257 case kIOMemoryPurgeableNonVolatile
:
258 *state
= VM_PURGABLE_NONVOLATILE
;
260 case kIOMemoryPurgeableVolatile
:
261 *state
= VM_PURGABLE_VOLATILE
| (newState
& ~kIOMemoryPurgeableControlMask
);
263 case kIOMemoryPurgeableEmpty
:
264 *state
= VM_PURGABLE_EMPTY
| (newState
& ~kIOMemoryPurgeableControlMask
);
267 err
= kIOReturnBadArgument
;
271 if (*control
== VM_PURGABLE_SET_STATE
) {
272 // let VM know this call is from the kernel and is allowed to alter
273 // the volatility of the memory entry even if it was created with
274 // MAP_MEM_PURGABLE_KERNEL_ONLY
275 *control
= VM_PURGABLE_SET_STATE_FROM_KERNEL
;
282 purgeableStateBits(int * state
)
284 IOReturn err
= kIOReturnSuccess
;
286 switch (VM_PURGABLE_STATE_MASK
& *state
) {
287 case VM_PURGABLE_NONVOLATILE
:
288 *state
= kIOMemoryPurgeableNonVolatile
;
290 case VM_PURGABLE_VOLATILE
:
291 *state
= kIOMemoryPurgeableVolatile
;
293 case VM_PURGABLE_EMPTY
:
294 *state
= kIOMemoryPurgeableEmpty
;
297 *state
= kIOMemoryPurgeableNonVolatile
;
298 err
= kIOReturnNotReady
;
306 vmProtForCacheMode(IOOptionBits cacheMode
)
310 case kIOInhibitCache
:
311 SET_MAP_MEM(MAP_MEM_IO
, prot
);
314 case kIOWriteThruCache
:
315 SET_MAP_MEM(MAP_MEM_WTHRU
, prot
);
318 case kIOWriteCombineCache
:
319 SET_MAP_MEM(MAP_MEM_WCOMB
, prot
);
322 case kIOCopybackCache
:
323 SET_MAP_MEM(MAP_MEM_COPYBACK
, prot
);
326 case kIOCopybackInnerCache
:
327 SET_MAP_MEM(MAP_MEM_INNERWBACK
, prot
);
331 SET_MAP_MEM(MAP_MEM_POSTED
, prot
);
334 case kIODefaultCache
:
336 SET_MAP_MEM(MAP_MEM_NOOP
, prot
);
344 pagerFlagsForCacheMode(IOOptionBits cacheMode
)
346 unsigned int pagerFlags
= 0;
348 case kIOInhibitCache
:
349 pagerFlags
= DEVICE_PAGER_CACHE_INHIB
| DEVICE_PAGER_COHERENT
| DEVICE_PAGER_GUARDED
;
352 case kIOWriteThruCache
:
353 pagerFlags
= DEVICE_PAGER_WRITE_THROUGH
| DEVICE_PAGER_COHERENT
| DEVICE_PAGER_GUARDED
;
356 case kIOWriteCombineCache
:
357 pagerFlags
= DEVICE_PAGER_CACHE_INHIB
| DEVICE_PAGER_COHERENT
;
360 case kIOCopybackCache
:
361 pagerFlags
= DEVICE_PAGER_COHERENT
;
364 case kIOCopybackInnerCache
:
365 pagerFlags
= DEVICE_PAGER_COHERENT
;
369 pagerFlags
= DEVICE_PAGER_CACHE_INHIB
| DEVICE_PAGER_COHERENT
| DEVICE_PAGER_GUARDED
| DEVICE_PAGER_EARLY_ACK
;
372 case kIODefaultCache
:
380 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
381 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
383 struct IOMemoryEntry
{
389 struct IOMemoryReference
{
390 volatile SInt32 refCount
;
394 struct IOMemoryReference
* mapRef
;
395 IOMemoryEntry entries
[0];
399 kIOMemoryReferenceReuse
= 0x00000001,
400 kIOMemoryReferenceWrite
= 0x00000002,
401 kIOMemoryReferenceCOW
= 0x00000004,
404 SInt32 gIOMemoryReferenceCount
;
407 IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity
, IOMemoryReference
* realloc
)
409 IOMemoryReference
* ref
;
410 size_t newSize
, oldSize
, copySize
;
412 newSize
= (sizeof(IOMemoryReference
)
413 - sizeof(ref
->entries
)
414 + capacity
* sizeof(ref
->entries
[0]));
415 ref
= (typeof(ref
))IOMalloc(newSize
);
417 oldSize
= (sizeof(IOMemoryReference
)
418 - sizeof(realloc
->entries
)
419 + realloc
->capacity
* sizeof(realloc
->entries
[0]));
421 if (copySize
> newSize
) {
425 bcopy(realloc
, ref
, copySize
);
427 IOFree(realloc
, oldSize
);
429 bzero(ref
, sizeof(*ref
));
431 OSIncrementAtomic(&gIOMemoryReferenceCount
);
436 ref
->capacity
= capacity
;
441 IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference
* ref
)
443 IOMemoryEntry
* entries
;
447 memoryReferenceFree(ref
->mapRef
);
451 entries
= ref
->entries
+ ref
->count
;
452 while (entries
> &ref
->entries
[0]) {
454 ipc_port_release_send(entries
->entry
);
456 size
= (sizeof(IOMemoryReference
)
457 - sizeof(ref
->entries
)
458 + ref
->capacity
* sizeof(ref
->entries
[0]));
461 OSDecrementAtomic(&gIOMemoryReferenceCount
);
465 IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference
* ref
)
467 if (1 == OSDecrementAtomic(&ref
->refCount
)) {
468 memoryReferenceFree(ref
);
474 IOGeneralMemoryDescriptor::memoryReferenceCreate(
475 IOOptionBits options
,
476 IOMemoryReference
** reference
)
478 enum { kCapacity
= 4, kCapacityInc
= 4 };
481 IOMemoryReference
* ref
;
482 IOMemoryEntry
* entries
;
483 IOMemoryEntry
* cloneEntries
;
485 ipc_port_t entry
, cloneEntry
;
487 memory_object_size_t actualSize
;
490 mach_vm_address_t entryAddr
, endAddr
, entrySize
;
491 mach_vm_size_t srcAddr
, srcLen
;
492 mach_vm_size_t nextAddr
, nextLen
;
493 mach_vm_size_t offset
, remain
;
495 IOOptionBits type
= (_flags
& kIOMemoryTypeMask
);
496 IOOptionBits cacheMode
;
497 unsigned int pagerFlags
;
500 ref
= memoryReferenceAlloc(kCapacity
, NULL
);
502 return kIOReturnNoMemory
;
505 tag
= getVMTag(kernel_map
);
506 entries
= &ref
->entries
[0];
513 getAddrLenForInd(nextAddr
, nextLen
, type
, _ranges
, rangeIdx
);
515 nextAddr
= getPhysicalSegment(offset
, &physLen
, kIOMemoryMapperNone
);
518 // default cache mode for physical
519 if (kIODefaultCache
== ((_flags
& kIOMemoryBufferCacheMask
) >> kIOMemoryBufferCacheShift
)) {
521 pagerFlags
= IODefaultCacheBits(nextAddr
);
522 if (DEVICE_PAGER_CACHE_INHIB
& pagerFlags
) {
523 if (DEVICE_PAGER_EARLY_ACK
& pagerFlags
) {
524 mode
= kIOPostedWrite
;
525 } else if (DEVICE_PAGER_GUARDED
& pagerFlags
) {
526 mode
= kIOInhibitCache
;
528 mode
= kIOWriteCombineCache
;
530 } else if (DEVICE_PAGER_WRITE_THROUGH
& pagerFlags
) {
531 mode
= kIOWriteThruCache
;
533 mode
= kIOCopybackCache
;
535 _flags
|= (mode
<< kIOMemoryBufferCacheShift
);
539 // cache mode & vm_prot
541 cacheMode
= ((_flags
& kIOMemoryBufferCacheMask
) >> kIOMemoryBufferCacheShift
);
542 prot
|= vmProtForCacheMode(cacheMode
);
543 // VM system requires write access to change cache mode
544 if (kIODefaultCache
!= cacheMode
) {
545 prot
|= VM_PROT_WRITE
;
547 if (kIODirectionOut
!= (kIODirectionOutIn
& _flags
)) {
548 prot
|= VM_PROT_WRITE
;
550 if (kIOMemoryReferenceWrite
& options
) {
551 prot
|= VM_PROT_WRITE
;
553 if (kIOMemoryReferenceCOW
& options
) {
554 prot
|= MAP_MEM_VM_COPY
;
557 if ((kIOMemoryReferenceReuse
& options
) && _memRef
) {
558 cloneEntries
= &_memRef
->entries
[0];
559 prot
|= MAP_MEM_NAMED_REUSE
;
565 if (kIOMemoryBufferPageable
& _flags
) {
566 // IOBufferMemoryDescriptor alloc - set flags for entry + object create
567 prot
|= MAP_MEM_NAMED_CREATE
;
568 if (kIOMemoryBufferPurgeable
& _flags
) {
569 prot
|= (MAP_MEM_PURGABLE
| MAP_MEM_PURGABLE_KERNEL_ONLY
);
570 if (VM_KERN_MEMORY_SKYWALK
== tag
) {
571 prot
|= MAP_MEM_LEDGER_TAG_NETWORK
;
574 if (kIOMemoryUseReserve
& _flags
) {
575 prot
|= MAP_MEM_GRAB_SECLUDED
;
578 prot
|= VM_PROT_WRITE
;
581 map
= get_task_map(_task
);
590 // coalesce addr range
591 for (++rangeIdx
; rangeIdx
< _rangesCount
; rangeIdx
++) {
592 getAddrLenForInd(nextAddr
, nextLen
, type
, _ranges
, rangeIdx
);
593 if ((srcAddr
+ srcLen
) != nextAddr
) {
598 entryAddr
= trunc_page_64(srcAddr
);
599 endAddr
= round_page_64(srcAddr
+ srcLen
);
601 entrySize
= (endAddr
- entryAddr
);
605 actualSize
= entrySize
;
607 cloneEntry
= MACH_PORT_NULL
;
608 if (MAP_MEM_NAMED_REUSE
& prot
) {
609 if (cloneEntries
< &_memRef
->entries
[_memRef
->count
]) {
610 cloneEntry
= cloneEntries
->entry
;
612 prot
&= ~MAP_MEM_NAMED_REUSE
;
616 err
= mach_make_memory_entry_internal(map
,
617 &actualSize
, entryAddr
, prot
, &entry
, cloneEntry
);
619 if (KERN_SUCCESS
!= err
) {
622 if (actualSize
> entrySize
) {
623 panic("mach_make_memory_entry_64 actualSize");
626 if (count
>= ref
->capacity
) {
627 ref
= memoryReferenceAlloc(ref
->capacity
+ kCapacityInc
, ref
);
628 entries
= &ref
->entries
[count
];
630 entries
->entry
= entry
;
631 entries
->size
= actualSize
;
632 entries
->offset
= offset
+ (entryAddr
- srcAddr
);
633 entryAddr
+= actualSize
;
634 if (MAP_MEM_NAMED_REUSE
& prot
) {
635 if ((cloneEntries
->entry
== entries
->entry
)
636 && (cloneEntries
->size
== entries
->size
)
637 && (cloneEntries
->offset
== entries
->offset
)) {
640 prot
&= ~MAP_MEM_NAMED_REUSE
;
650 // _task == 0, physical or kIOMemoryTypeUPL
651 memory_object_t pager
;
652 vm_size_t size
= ptoa_32(_pages
);
654 if (!getKernelReserved()) {
655 panic("getKernelReserved");
658 reserved
->dp
.pagerContig
= (1 == _rangesCount
);
659 reserved
->dp
.memory
= this;
661 pagerFlags
= pagerFlagsForCacheMode(cacheMode
);
662 if (-1U == pagerFlags
) {
663 panic("phys is kIODefaultCache");
665 if (reserved
->dp
.pagerContig
) {
666 pagerFlags
|= DEVICE_PAGER_CONTIGUOUS
;
669 pager
= device_pager_setup((memory_object_t
) 0, (uintptr_t) reserved
,
673 err
= kIOReturnVMError
;
676 entryAddr
= trunc_page_64(srcAddr
);
677 err
= mach_memory_object_memory_entry_64((host_t
) 1, false /*internal*/,
678 size
, VM_PROT_READ
| VM_PROT_WRITE
, pager
, &entry
);
679 assert(KERN_SUCCESS
== err
);
680 if (KERN_SUCCESS
!= err
) {
681 device_pager_deallocate(pager
);
683 reserved
->dp
.devicePager
= pager
;
684 entries
->entry
= entry
;
685 entries
->size
= size
;
686 entries
->offset
= offset
+ (entryAddr
- srcAddr
);
696 if (_task
&& (KERN_SUCCESS
== err
)
697 && (kIOMemoryMapCopyOnWrite
& _flags
)
698 && !(kIOMemoryReferenceCOW
& options
)) {
699 err
= memoryReferenceCreate(options
| kIOMemoryReferenceCOW
, &ref
->mapRef
);
702 if (KERN_SUCCESS
== err
) {
703 if (MAP_MEM_NAMED_REUSE
& prot
) {
704 memoryReferenceFree(ref
);
705 OSIncrementAtomic(&_memRef
->refCount
);
709 memoryReferenceFree(ref
);
719 IOMemoryDescriptorMapAlloc(vm_map_t map
, void * _ref
)
721 IOMemoryDescriptorMapAllocRef
* ref
= (typeof(ref
))_ref
;
723 vm_map_offset_t addr
;
727 err
= vm_map_enter_mem_object(map
, &addr
, ref
->size
,
729 (((ref
->options
& kIOMapAnywhere
)
732 VM_MAP_KERNEL_FLAGS_NONE
,
735 (memory_object_offset_t
) 0,
740 if (KERN_SUCCESS
== err
) {
741 ref
->mapped
= (mach_vm_address_t
) addr
;
749 IOGeneralMemoryDescriptor::memoryReferenceMap(
750 IOMemoryReference
* ref
,
752 mach_vm_size_t inoffset
,
754 IOOptionBits options
,
755 mach_vm_address_t
* inaddr
)
758 int64_t offset
= inoffset
;
759 uint32_t rangeIdx
, entryIdx
;
760 vm_map_offset_t addr
, mapAddr
;
761 vm_map_offset_t pageOffset
, entryOffset
, remain
, chunk
;
763 mach_vm_address_t nextAddr
;
764 mach_vm_size_t nextLen
;
766 IOMemoryEntry
* entry
;
767 vm_prot_t prot
, memEntryCacheMode
;
769 IOOptionBits cacheMode
;
771 // for the kIOMapPrefault option.
772 upl_page_info_t
* pageList
= NULL
;
773 UInt currentPageIndex
= 0;
777 err
= memoryReferenceMap(ref
->mapRef
, map
, inoffset
, size
, options
, inaddr
);
781 type
= _flags
& kIOMemoryTypeMask
;
784 if (!(kIOMapReadOnly
& options
)) {
785 prot
|= VM_PROT_WRITE
;
789 cacheMode
= ((options
& kIOMapCacheMask
) >> kIOMapCacheShift
);
790 if (kIODefaultCache
!= cacheMode
) {
791 // VM system requires write access to update named entry cache mode
792 memEntryCacheMode
= (MAP_MEM_ONLY
| VM_PROT_WRITE
| prot
| vmProtForCacheMode(cacheMode
));
798 // Find first range for offset
800 return kIOReturnBadArgument
;
802 for (remain
= offset
, rangeIdx
= 0; rangeIdx
< _rangesCount
; rangeIdx
++) {
803 getAddrLenForInd(nextAddr
, nextLen
, type
, _ranges
, rangeIdx
);
804 if (remain
< nextLen
) {
812 nextAddr
= getPhysicalSegment(offset
, &physLen
, kIOMemoryMapperNone
);
816 assert(remain
< nextLen
);
817 if (remain
>= nextLen
) {
818 return kIOReturnBadArgument
;
823 pageOffset
= (page_mask
& nextAddr
);
827 if (!(options
& kIOMapAnywhere
)) {
829 if (pageOffset
!= (page_mask
& addr
)) {
830 return kIOReturnNotAligned
;
835 // find first entry for offset
837 (entryIdx
< ref
->count
) && (offset
>= ref
->entries
[entryIdx
].offset
);
841 entry
= &ref
->entries
[entryIdx
];
844 size
= round_page_64(size
+ pageOffset
);
845 if (kIOMapOverwrite
& options
) {
846 if ((map
== kernel_map
) && (kIOMemoryBufferPageable
& _flags
)) {
847 map
= IOPageableMapForAddress(addr
);
851 IOMemoryDescriptorMapAllocRef ref
;
854 ref
.options
= options
;
857 if (options
& kIOMapAnywhere
) {
858 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
863 if ((ref
.map
== kernel_map
) && (kIOMemoryBufferPageable
& _flags
)) {
864 err
= IOIteratePageableMaps( ref
.size
, &IOMemoryDescriptorMapAlloc
, &ref
);
866 err
= IOMemoryDescriptorMapAlloc(ref
.map
, &ref
);
868 if (KERN_SUCCESS
== err
) {
876 * If the memory is associated with a device pager but doesn't have a UPL,
877 * it will be immediately faulted in through the pager via populateDevicePager().
878 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
881 if ((reserved
!= NULL
) && (reserved
->dp
.devicePager
) && (_wireCount
!= 0)) {
882 options
&= ~kIOMapPrefault
;
886 * Prefaulting is only possible if we wired the memory earlier. Check the
887 * memory type, and the underlying data.
889 if (options
& kIOMapPrefault
) {
891 * The memory must have been wired by calling ::prepare(), otherwise
892 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
894 assert(_wireCount
!= 0);
895 assert(_memoryEntries
!= NULL
);
896 if ((_wireCount
== 0) ||
897 (_memoryEntries
== NULL
)) {
898 return kIOReturnBadArgument
;
901 // Get the page list.
902 ioGMDData
* dataP
= getDataP(_memoryEntries
);
903 ioPLBlock
const* ioplList
= getIOPLList(dataP
);
904 pageList
= getPageList(dataP
);
906 // Get the number of IOPLs.
907 UInt numIOPLs
= getNumIOPL(_memoryEntries
, dataP
);
910 * Scan through the IOPL Info Blocks, looking for the first block containing
911 * the offset. The research will go past it, so we'll need to go back to the
912 * right range at the end.
915 while (ioplIndex
< numIOPLs
&& offset
>= ioplList
[ioplIndex
].fIOMDOffset
) {
920 // Retrieve the IOPL info block.
921 ioPLBlock ioplInfo
= ioplList
[ioplIndex
];
924 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
927 if (ioplInfo
.fFlags
& kIOPLExternUPL
) {
928 pageList
= (upl_page_info_t
*) ioplInfo
.fPageInfo
;
930 pageList
= &pageList
[ioplInfo
.fPageInfo
];
933 // Rebase [offset] into the IOPL in order to looks for the first page index.
934 mach_vm_size_t offsetInIOPL
= offset
- ioplInfo
.fIOMDOffset
+ ioplInfo
.fPageOffset
;
936 // Retrieve the index of the first page corresponding to the offset.
937 currentPageIndex
= atop_32(offsetInIOPL
);
945 while (remain
&& (KERN_SUCCESS
== err
)) {
946 entryOffset
= offset
- entry
->offset
;
947 if ((page_mask
& entryOffset
) != pageOffset
) {
948 err
= kIOReturnNotAligned
;
952 if (kIODefaultCache
!= cacheMode
) {
953 vm_size_t unused
= 0;
954 err
= mach_make_memory_entry(NULL
/*unused*/, &unused
, 0 /*unused*/,
955 memEntryCacheMode
, NULL
, entry
->entry
);
956 assert(KERN_SUCCESS
== err
);
959 entryOffset
-= pageOffset
;
960 if (entryOffset
>= entry
->size
) {
961 panic("entryOffset");
963 chunk
= entry
->size
- entryOffset
;
965 vm_map_kernel_flags_t vmk_flags
;
967 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
968 vmk_flags
.vmkf_iokit_acct
= TRUE
; /* iokit accounting */
970 if (chunk
> remain
) {
973 if (options
& kIOMapPrefault
) {
974 UInt nb_pages
= round_page(chunk
) / PAGE_SIZE
;
976 err
= vm_map_enter_mem_object_prefault(map
,
980 | VM_FLAGS_OVERWRITE
),
987 &pageList
[currentPageIndex
],
990 // Compute the next index in the page list.
991 currentPageIndex
+= nb_pages
;
992 assert(currentPageIndex
<= _pages
);
994 err
= vm_map_enter_mem_object(map
,
998 | VM_FLAGS_OVERWRITE
),
1008 if (KERN_SUCCESS
!= err
) {
1016 offset
+= chunk
- pageOffset
;
1021 if (entryIdx
>= ref
->count
) {
1022 err
= kIOReturnOverrun
;
1027 if ((KERN_SUCCESS
!= err
) && didAlloc
) {
1028 (void) mach_vm_deallocate(map
, trunc_page_64(addr
), size
);
1037 IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
1038 IOMemoryReference
* ref
,
1039 IOByteCount
* residentPageCount
,
1040 IOByteCount
* dirtyPageCount
)
1043 IOMemoryEntry
* entries
;
1044 unsigned int resident
, dirty
;
1045 unsigned int totalResident
, totalDirty
;
1047 totalResident
= totalDirty
= 0;
1048 err
= kIOReturnSuccess
;
1049 entries
= ref
->entries
+ ref
->count
;
1050 while (entries
> &ref
->entries
[0]) {
1052 err
= mach_memory_entry_get_page_counts(entries
->entry
, &resident
, &dirty
);
1053 if (KERN_SUCCESS
!= err
) {
1056 totalResident
+= resident
;
1057 totalDirty
+= dirty
;
1060 if (residentPageCount
) {
1061 *residentPageCount
= totalResident
;
1063 if (dirtyPageCount
) {
1064 *dirtyPageCount
= totalDirty
;
1070 IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
1071 IOMemoryReference
* ref
,
1072 IOOptionBits newState
,
1073 IOOptionBits
* oldState
)
1076 IOMemoryEntry
* entries
;
1077 vm_purgable_t control
;
1078 int totalState
, state
;
1080 totalState
= kIOMemoryPurgeableNonVolatile
;
1081 err
= kIOReturnSuccess
;
1082 entries
= ref
->entries
+ ref
->count
;
1083 while (entries
> &ref
->entries
[0]) {
1086 err
= purgeableControlBits(newState
, &control
, &state
);
1087 if (KERN_SUCCESS
!= err
) {
1090 err
= memory_entry_purgeable_control_internal(entries
->entry
, control
, &state
);
1091 if (KERN_SUCCESS
!= err
) {
1094 err
= purgeableStateBits(&state
);
1095 if (KERN_SUCCESS
!= err
) {
1099 if (kIOMemoryPurgeableEmpty
== state
) {
1100 totalState
= kIOMemoryPurgeableEmpty
;
1101 } else if (kIOMemoryPurgeableEmpty
== totalState
) {
1103 } else if (kIOMemoryPurgeableVolatile
== totalState
) {
1105 } else if (kIOMemoryPurgeableVolatile
== state
) {
1106 totalState
= kIOMemoryPurgeableVolatile
;
1108 totalState
= kIOMemoryPurgeableNonVolatile
;
1113 *oldState
= totalState
;
1118 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1120 IOMemoryDescriptor
*
1121 IOMemoryDescriptor::withAddress(void * address
,
1123 IODirection direction
)
1125 return IOMemoryDescriptor::
1126 withAddressRange((IOVirtualAddress
) address
, length
, direction
| kIOMemoryAutoPrepare
, kernel_task
);
1130 IOMemoryDescriptor
*
1131 IOMemoryDescriptor::withAddress(IOVirtualAddress address
,
1133 IODirection direction
,
1136 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
1138 if (that
->initWithAddress(address
, length
, direction
, task
)) {
1146 #endif /* !__LP64__ */
1148 IOMemoryDescriptor
*
1149 IOMemoryDescriptor::withPhysicalAddress(
1150 IOPhysicalAddress address
,
1152 IODirection direction
)
1154 return IOMemoryDescriptor::withAddressRange(address
, length
, direction
, TASK_NULL
);
1158 IOMemoryDescriptor
*
1159 IOMemoryDescriptor::withRanges( IOVirtualRange
* ranges
,
1161 IODirection direction
,
1165 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
1167 if (that
->initWithRanges(ranges
, withCount
, direction
, task
, asReference
)) {
1175 #endif /* !__LP64__ */
1177 IOMemoryDescriptor
*
1178 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address
,
1179 mach_vm_size_t length
,
1180 IOOptionBits options
,
1183 IOAddressRange range
= { address
, length
};
1184 return IOMemoryDescriptor::withAddressRanges(&range
, 1, options
, task
);
1187 IOMemoryDescriptor
*
1188 IOMemoryDescriptor::withAddressRanges(IOAddressRange
* ranges
,
1190 IOOptionBits options
,
1193 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
1196 options
|= kIOMemoryTypeVirtual64
;
1198 options
|= kIOMemoryTypePhysical64
;
1201 if (that
->initWithOptions(ranges
, rangeCount
, 0, task
, options
, /* mapper */ 0)) {
1215 * Create a new IOMemoryDescriptor. The buffer is made up of several
1216 * virtual address ranges, from a given task.
1218 * Passing the ranges as a reference will avoid an extra allocation.
1220 IOMemoryDescriptor
*
1221 IOMemoryDescriptor::withOptions(void * buffers
,
1228 IOGeneralMemoryDescriptor
*self
= new IOGeneralMemoryDescriptor
;
1231 && !self
->initWithOptions(buffers
, count
, offset
, task
, opts
, mapper
)) {
1240 IOMemoryDescriptor::initWithOptions(void * buffers
,
1244 IOOptionBits options
,
1251 IOMemoryDescriptor
*
1252 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange
* ranges
,
1254 IODirection direction
,
1257 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
1259 if (that
->initWithPhysicalRanges(ranges
, withCount
, direction
, asReference
)) {
1268 IOMemoryDescriptor
*
1269 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor
* of
,
1272 IODirection direction
)
1274 return IOSubMemoryDescriptor::withSubRange(of
, offset
, length
, direction
);
1276 #endif /* !__LP64__ */
1278 IOMemoryDescriptor
*
1279 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor
*originalMD
)
1281 IOGeneralMemoryDescriptor
*origGenMD
=
1282 OSDynamicCast(IOGeneralMemoryDescriptor
, originalMD
);
1285 return IOGeneralMemoryDescriptor::
1286 withPersistentMemoryDescriptor(origGenMD
);
1292 IOMemoryDescriptor
*
1293 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor
*originalMD
)
1295 IOMemoryReference
* memRef
;
1297 if (kIOReturnSuccess
!= originalMD
->memoryReferenceCreate(kIOMemoryReferenceReuse
, &memRef
)) {
1301 if (memRef
== originalMD
->_memRef
) {
1302 originalMD
->retain(); // Add a new reference to ourselves
1303 originalMD
->memoryReferenceRelease(memRef
);
1307 IOGeneralMemoryDescriptor
* self
= new IOGeneralMemoryDescriptor
;
1308 IOMDPersistentInitData initData
= { originalMD
, memRef
};
1311 && !self
->initWithOptions(&initData
, 1, 0, 0, kIOMemoryTypePersistentMD
, 0)) {
1320 IOGeneralMemoryDescriptor::initWithAddress(void * address
,
1321 IOByteCount withLength
,
1322 IODirection withDirection
)
1324 _singleRange
.v
.address
= (vm_offset_t
) address
;
1325 _singleRange
.v
.length
= withLength
;
1327 return initWithRanges(&_singleRange
.v
, 1, withDirection
, kernel_task
, true);
1331 IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address
,
1332 IOByteCount withLength
,
1333 IODirection withDirection
,
1336 _singleRange
.v
.address
= address
;
1337 _singleRange
.v
.length
= withLength
;
1339 return initWithRanges(&_singleRange
.v
, 1, withDirection
, withTask
, true);
1343 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
1344 IOPhysicalAddress address
,
1345 IOByteCount withLength
,
1346 IODirection withDirection
)
1348 _singleRange
.p
.address
= address
;
1349 _singleRange
.p
.length
= withLength
;
1351 return initWithPhysicalRanges( &_singleRange
.p
, 1, withDirection
, true);
1355 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
1356 IOPhysicalRange
* ranges
,
1358 IODirection direction
,
1361 IOOptionBits mdOpts
= direction
| kIOMemoryTypePhysical
;
1364 mdOpts
|= kIOMemoryAsReference
;
1367 return initWithOptions(ranges
, count
, 0, 0, mdOpts
, /* mapper */ 0);
1371 IOGeneralMemoryDescriptor::initWithRanges(
1372 IOVirtualRange
* ranges
,
1374 IODirection direction
,
1378 IOOptionBits mdOpts
= direction
;
1381 mdOpts
|= kIOMemoryAsReference
;
1385 mdOpts
|= kIOMemoryTypeVirtual
;
1387 // Auto-prepare if this is a kernel memory descriptor as very few
1388 // clients bother to prepare() kernel memory.
1389 // But it was not enforced so what are you going to do?
1390 if (task
== kernel_task
) {
1391 mdOpts
|= kIOMemoryAutoPrepare
;
1394 mdOpts
|= kIOMemoryTypePhysical
;
1397 return initWithOptions(ranges
, count
, 0, task
, mdOpts
, /* mapper */ 0);
1399 #endif /* !__LP64__ */
1404 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
1405 * from a given task, several physical ranges, an UPL from the ubc
1406 * system or a uio (may be 64bit) from the BSD subsystem.
1408 * Passing the ranges as a reference will avoid an extra allocation.
1410 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
1411 * existing instance -- note this behavior is not commonly supported in other
1412 * I/O Kit classes, although it is supported here.
1416 IOGeneralMemoryDescriptor::initWithOptions(void * buffers
,
1420 IOOptionBits options
,
1423 IOOptionBits type
= options
& kIOMemoryTypeMask
;
1427 && (kIOMemoryTypeVirtual
== type
)
1428 && vm_map_is_64bit(get_task_map(task
))
1429 && ((IOVirtualRange
*) buffers
)->address
) {
1430 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
1433 #endif /* !__LP64__ */
1435 // Grab the original MD's configuation data to initialse the
1436 // arguments to this function.
1437 if (kIOMemoryTypePersistentMD
== type
) {
1438 IOMDPersistentInitData
*initData
= (typeof(initData
))buffers
;
1439 const IOGeneralMemoryDescriptor
*orig
= initData
->fMD
;
1440 ioGMDData
*dataP
= getDataP(orig
->_memoryEntries
);
1442 // Only accept persistent memory descriptors with valid dataP data.
1443 assert(orig
->_rangesCount
== 1);
1444 if (!(orig
->_flags
& kIOMemoryPersistent
) || !dataP
) {
1448 _memRef
= initData
->fMemRef
; // Grab the new named entry
1449 options
= orig
->_flags
& ~kIOMemoryAsReference
;
1450 type
= options
& kIOMemoryTypeMask
;
1451 buffers
= orig
->_ranges
.v
;
1452 count
= orig
->_rangesCount
;
1454 // Now grab the original task and whatever mapper was previously used
1456 mapper
= dataP
->fMapper
;
1458 // We are ready to go through the original initialisation now
1462 case kIOMemoryTypeUIO
:
1463 case kIOMemoryTypeVirtual
:
1465 case kIOMemoryTypeVirtual64
:
1466 #endif /* !__LP64__ */
1473 case kIOMemoryTypePhysical
: // Neither Physical nor UPL should have a task
1475 case kIOMemoryTypePhysical64
:
1476 #endif /* !__LP64__ */
1477 case kIOMemoryTypeUPL
:
1481 return false; /* bad argument */
1488 * We can check the _initialized instance variable before having ever set
1489 * it to an initial value because I/O Kit guarantees that all our instance
1490 * variables are zeroed on an object's allocation.
1495 * An existing memory descriptor is being retargeted to point to
1496 * somewhere else. Clean up our present state.
1498 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
1499 if ((kIOMemoryTypePhysical
!= type
) && (kIOMemoryTypePhysical64
!= type
)) {
1500 while (_wireCount
) {
1504 if (_ranges
.v
&& !(kIOMemoryAsReference
& _flags
)) {
1505 if (kIOMemoryTypeUIO
== type
) {
1506 uio_free((uio_t
) _ranges
.v
);
1509 else if ((kIOMemoryTypeVirtual64
== type
) || (kIOMemoryTypePhysical64
== type
)) {
1510 IODelete(_ranges
.v64
, IOAddressRange
, _rangesCount
);
1512 #endif /* !__LP64__ */
1514 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
1518 options
|= (kIOMemoryRedirected
& _flags
);
1519 if (!(kIOMemoryRedirected
& options
)) {
1521 memoryReferenceRelease(_memRef
);
1525 _mappings
->flushCollection();
1529 if (!super::init()) {
1532 _initialized
= true;
1535 // Grab the appropriate mapper
1536 if (kIOMemoryHostOrRemote
& options
) {
1537 options
|= kIOMemoryMapperNone
;
1539 if (kIOMemoryMapperNone
& options
) {
1540 mapper
= 0; // No Mapper
1541 } else if (mapper
== kIOMapperSystem
) {
1542 IOMapper::checkForSystemMapper();
1543 gIOSystemMapper
= mapper
= IOMapper::gSystem
;
1546 // Remove the dynamic internal use flags from the initial setting
1547 options
&= ~(kIOMemoryPreparedReadOnly
);
1552 _direction
= (IODirection
) (_flags
& kIOMemoryDirectionMask
);
1553 #endif /* !__LP64__ */
1556 __iomd_reservedA
= 0;
1557 __iomd_reservedB
= 0;
1560 if (kIOMemoryThreadSafe
& options
) {
1561 if (!_prepareLock
) {
1562 _prepareLock
= IOLockAlloc();
1564 } else if (_prepareLock
) {
1565 IOLockFree(_prepareLock
);
1566 _prepareLock
= NULL
;
1569 if (kIOMemoryTypeUPL
== type
) {
1571 unsigned int dataSize
= computeDataSize(/* pages */ 0, /* upls */ 1);
1573 if (!initMemoryEntries(dataSize
, mapper
)) {
1576 dataP
= getDataP(_memoryEntries
);
1577 dataP
->fPageCnt
= 0;
1578 switch (kIOMemoryDirectionMask
& options
) {
1579 case kIODirectionOut
:
1580 dataP
->fDMAAccess
= kIODMAMapReadAccess
;
1582 case kIODirectionIn
:
1583 dataP
->fDMAAccess
= kIODMAMapWriteAccess
;
1585 case kIODirectionNone
:
1586 case kIODirectionOutIn
:
1588 panic("bad dir for upl 0x%x\n", (int) options
);
1591 // _wireCount++; // UPLs start out life wired
1594 _pages
+= atop_32(offset
+ count
+ PAGE_MASK
) - atop_32(offset
);
1597 iopl
.fIOPL
= (upl_t
) buffers
;
1598 upl_set_referenced(iopl
.fIOPL
, true);
1599 upl_page_info_t
*pageList
= UPL_GET_INTERNAL_PAGE_LIST(iopl
.fIOPL
);
1601 if (upl_get_size(iopl
.fIOPL
) < (count
+ offset
)) {
1602 panic("short external upl");
1605 _highestPage
= upl_get_highest_page(iopl
.fIOPL
);
1607 // Set the flag kIOPLOnDevice convieniently equal to 1
1608 iopl
.fFlags
= pageList
->device
| kIOPLExternUPL
;
1609 if (!pageList
->device
) {
1610 // Pre-compute the offset into the UPL's page list
1611 pageList
= &pageList
[atop_32(offset
)];
1612 offset
&= PAGE_MASK
;
1614 iopl
.fIOMDOffset
= 0;
1615 iopl
.fMappedPage
= 0;
1616 iopl
.fPageInfo
= (vm_address_t
) pageList
;
1617 iopl
.fPageOffset
= offset
;
1618 _memoryEntries
->appendBytes(&iopl
, sizeof(iopl
));
1620 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
1621 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
1623 // Initialize the memory descriptor
1624 if (options
& kIOMemoryAsReference
) {
1626 _rangesIsAllocated
= false;
1627 #endif /* !__LP64__ */
1629 // Hack assignment to get the buffer arg into _ranges.
1630 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
1632 // This also initialises the uio & physical ranges.
1633 _ranges
.v
= (IOVirtualRange
*) buffers
;
1636 _rangesIsAllocated
= true;
1637 #endif /* !__LP64__ */
1639 case kIOMemoryTypeUIO
:
1640 _ranges
.v
= (IOVirtualRange
*) uio_duplicate((uio_t
) buffers
);
1644 case kIOMemoryTypeVirtual64
:
1645 case kIOMemoryTypePhysical64
:
1648 && (((IOAddressRange
*) buffers
)->address
+ ((IOAddressRange
*) buffers
)->length
) <= 0x100000000ULL
1651 if (kIOMemoryTypeVirtual64
== type
) {
1652 type
= kIOMemoryTypeVirtual
;
1654 type
= kIOMemoryTypePhysical
;
1656 _flags
= (_flags
& ~kIOMemoryTypeMask
) | type
| kIOMemoryAsReference
;
1657 _rangesIsAllocated
= false;
1658 _ranges
.v
= &_singleRange
.v
;
1659 _singleRange
.v
.address
= ((IOAddressRange
*) buffers
)->address
;
1660 _singleRange
.v
.length
= ((IOAddressRange
*) buffers
)->length
;
1663 _ranges
.v64
= IONew(IOAddressRange
, count
);
1667 bcopy(buffers
, _ranges
.v
, count
* sizeof(IOAddressRange
));
1669 #endif /* !__LP64__ */
1670 case kIOMemoryTypeVirtual
:
1671 case kIOMemoryTypePhysical
:
1673 _flags
|= kIOMemoryAsReference
;
1675 _rangesIsAllocated
= false;
1676 #endif /* !__LP64__ */
1677 _ranges
.v
= &_singleRange
.v
;
1679 _ranges
.v
= IONew(IOVirtualRange
, count
);
1684 bcopy(buffers
, _ranges
.v
, count
* sizeof(IOVirtualRange
));
1688 _rangesCount
= count
;
1690 // Find starting address within the vector of ranges
1691 Ranges vec
= _ranges
;
1692 mach_vm_size_t totalLength
= 0;
1693 unsigned int ind
, pages
= 0;
1694 for (ind
= 0; ind
< count
; ind
++) {
1695 mach_vm_address_t addr
;
1696 mach_vm_address_t endAddr
;
1699 // addr & len are returned by this function
1700 getAddrLenForInd(addr
, len
, type
, vec
, ind
);
1701 if (os_add3_overflow(addr
, len
, PAGE_MASK
, &endAddr
)) {
1704 if (os_add_overflow(pages
, (atop_64(endAddr
) - atop_64(addr
)), &pages
)) {
1707 if (os_add_overflow(totalLength
, len
, &totalLength
)) {
1710 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
)) {
1711 ppnum_t highPage
= atop_64(addr
+ len
- 1);
1712 if (highPage
> _highestPage
) {
1713 _highestPage
= highPage
;
1718 || (totalLength
!= ((IOByteCount
) totalLength
))) {
1719 return false; /* overflow */
1721 _length
= totalLength
;
1724 // Auto-prepare memory at creation time.
1725 // Implied completion when descriptor is free-ed
1728 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
)) {
1729 _wireCount
++; // Physical MDs are, by definition, wired
1730 } else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
1734 if (_pages
> atop_64(max_mem
)) {
1738 dataSize
= computeDataSize(_pages
, /* upls */ count
* 2);
1739 if (!initMemoryEntries(dataSize
, mapper
)) {
1742 dataP
= getDataP(_memoryEntries
);
1743 dataP
->fPageCnt
= _pages
;
1745 if (((_task
!= kernel_task
) || (kIOMemoryBufferPageable
& _flags
))
1746 && (VM_KERN_MEMORY_NONE
== _kernelTag
)) {
1747 _kernelTag
= IOMemoryTag(kernel_map
);
1748 if (_kernelTag
== gIOSurfaceTag
) {
1749 _userTag
= VM_MEMORY_IOSURFACE
;
1753 if ((kIOMemoryPersistent
& _flags
) && !_memRef
) {
1755 err
= memoryReferenceCreate(0, &_memRef
);
1756 if (kIOReturnSuccess
!= err
) {
1761 if ((_flags
& kIOMemoryAutoPrepare
)
1762 && prepare() != kIOReturnSuccess
) {
1777 IOGeneralMemoryDescriptor::free()
1779 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
1783 reserved
->dp
.memory
= 0;
1786 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
)) {
1788 if (_memoryEntries
&& (dataP
= getDataP(_memoryEntries
)) && dataP
->fMappedBaseValid
) {
1789 dmaUnmap(dataP
->fMapper
, NULL
, 0, dataP
->fMappedBase
, dataP
->fMappedLength
);
1790 dataP
->fMappedBaseValid
= dataP
->fMappedBase
= 0;
1793 while (_wireCount
) {
1798 if (_memoryEntries
) {
1799 _memoryEntries
->release();
1802 if (_ranges
.v
&& !(kIOMemoryAsReference
& _flags
)) {
1803 if (kIOMemoryTypeUIO
== type
) {
1804 uio_free((uio_t
) _ranges
.v
);
1807 else if ((kIOMemoryTypeVirtual64
== type
) || (kIOMemoryTypePhysical64
== type
)) {
1808 IODelete(_ranges
.v64
, IOAddressRange
, _rangesCount
);
1810 #endif /* !__LP64__ */
1812 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
1819 if (reserved
->dp
.devicePager
) {
1820 // memEntry holds a ref on the device pager which owns reserved
1821 // (IOMemoryDescriptorReserved) so no reserved access after this point
1822 device_pager_deallocate((memory_object_t
) reserved
->dp
.devicePager
);
1824 IODelete(reserved
, IOMemoryDescriptorReserved
, 1);
1830 memoryReferenceRelease(_memRef
);
1833 IOLockFree(_prepareLock
);
1841 IOGeneralMemoryDescriptor::unmapFromKernel()
1843 panic("IOGMD::unmapFromKernel deprecated");
1847 IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex
)
1849 panic("IOGMD::mapIntoKernel deprecated");
1851 #endif /* !__LP64__ */
1856 * Get the direction of the transfer.
1859 IOMemoryDescriptor::getDirection() const
1865 #endif /* !__LP64__ */
1866 return (IODirection
) (_flags
& kIOMemoryDirectionMask
);
1872 * Get the length of the transfer (over all ranges).
1875 IOMemoryDescriptor::getLength() const
1881 IOMemoryDescriptor::setTag( IOOptionBits tag
)
1887 IOMemoryDescriptor::getTag( void )
1893 IOMemoryDescriptor::getFlags(void)
1899 #pragma clang diagnostic push
1900 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
1902 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
1904 IOMemoryDescriptor::getSourceSegment( IOByteCount offset
, IOByteCount
* length
)
1906 addr64_t physAddr
= 0;
1908 if (prepare() == kIOReturnSuccess
) {
1909 physAddr
= getPhysicalSegment64( offset
, length
);
1913 return (IOPhysicalAddress
) physAddr
; // truncated but only page offset is used
1916 #pragma clang diagnostic pop
1918 #endif /* !__LP64__ */
1921 IOMemoryDescriptor::readBytes
1922 (IOByteCount offset
, void *bytes
, IOByteCount length
)
1924 addr64_t dstAddr
= CAST_DOWN(addr64_t
, bytes
);
1925 IOByteCount remaining
;
1927 // Assert that this entire I/O is withing the available range
1928 assert(offset
<= _length
);
1929 assert(offset
+ length
<= _length
);
1930 if ((offset
>= _length
)
1931 || ((offset
+ length
) > _length
)) {
1935 assert(!(kIOMemoryRemote
& _flags
));
1936 if (kIOMemoryRemote
& _flags
) {
1940 if (kIOMemoryThreadSafe
& _flags
) {
1944 remaining
= length
= min(length
, _length
- offset
);
1945 while (remaining
) { // (process another target segment?)
1949 srcAddr64
= getPhysicalSegment(offset
, &srcLen
, kIOMemoryMapperNone
);
1954 // Clip segment length to remaining
1955 if (srcLen
> remaining
) {
1959 copypv(srcAddr64
, dstAddr
, srcLen
,
1960 cppvPsrc
| cppvNoRefSrc
| cppvFsnk
| cppvKmap
);
1964 remaining
-= srcLen
;
1967 if (kIOMemoryThreadSafe
& _flags
) {
1973 return length
- remaining
;
1977 IOMemoryDescriptor::writeBytes
1978 (IOByteCount inoffset
, const void *bytes
, IOByteCount length
)
1980 addr64_t srcAddr
= CAST_DOWN(addr64_t
, bytes
);
1981 IOByteCount remaining
;
1982 IOByteCount offset
= inoffset
;
1984 // Assert that this entire I/O is withing the available range
1985 assert(offset
<= _length
);
1986 assert(offset
+ length
<= _length
);
1988 assert( !(kIOMemoryPreparedReadOnly
& _flags
));
1990 if ((kIOMemoryPreparedReadOnly
& _flags
)
1991 || (offset
>= _length
)
1992 || ((offset
+ length
) > _length
)) {
1996 assert(!(kIOMemoryRemote
& _flags
));
1997 if (kIOMemoryRemote
& _flags
) {
2001 if (kIOMemoryThreadSafe
& _flags
) {
2005 remaining
= length
= min(length
, _length
- offset
);
2006 while (remaining
) { // (process another target segment?)
2010 dstAddr64
= getPhysicalSegment(offset
, &dstLen
, kIOMemoryMapperNone
);
2015 // Clip segment length to remaining
2016 if (dstLen
> remaining
) {
2021 bzero_phys(dstAddr64
, dstLen
);
2023 copypv(srcAddr
, (addr64_t
) dstAddr64
, dstLen
,
2024 cppvPsnk
| cppvFsnk
| cppvNoRefSrc
| cppvNoModSnk
| cppvKmap
);
2028 remaining
-= dstLen
;
2031 if (kIOMemoryThreadSafe
& _flags
) {
2037 #if defined(__x86_64__)
2038 // copypv does not cppvFsnk on intel
2041 performOperation(kIOMemoryIncoherentIOFlush
, inoffset
, length
);
2045 return length
- remaining
;
2050 IOGeneralMemoryDescriptor::setPosition(IOByteCount position
)
2052 panic("IOGMD::setPosition deprecated");
2054 #endif /* !__LP64__ */
2056 static volatile SInt64 gIOMDPreparationID
__attribute__((aligned(8))) = (1ULL << 32);
2059 IOGeneralMemoryDescriptor::getPreparationID( void )
2064 return kIOPreparationIDUnprepared
;
2067 if (((kIOMemoryTypeMask
& _flags
) == kIOMemoryTypePhysical
)
2068 || ((kIOMemoryTypeMask
& _flags
) == kIOMemoryTypePhysical64
)) {
2069 IOMemoryDescriptor::setPreparationID();
2070 return IOMemoryDescriptor::getPreparationID();
2073 if (!_memoryEntries
|| !(dataP
= getDataP(_memoryEntries
))) {
2074 return kIOPreparationIDUnprepared
;
2077 if (kIOPreparationIDUnprepared
== dataP
->fPreparationID
) {
2078 dataP
->fPreparationID
= OSIncrementAtomic64(&gIOMDPreparationID
);
2080 return dataP
->fPreparationID
;
2083 IOMemoryDescriptorReserved
*
2084 IOMemoryDescriptor::getKernelReserved( void )
2087 reserved
= IONew(IOMemoryDescriptorReserved
, 1);
2089 bzero(reserved
, sizeof(IOMemoryDescriptorReserved
));
2096 IOMemoryDescriptor::setPreparationID( void )
2098 if (getKernelReserved() && (kIOPreparationIDUnprepared
== reserved
->preparationID
)) {
2099 reserved
->preparationID
= OSIncrementAtomic64(&gIOMDPreparationID
);
2104 IOMemoryDescriptor::getPreparationID( void )
2107 return reserved
->preparationID
;
2109 return kIOPreparationIDUnsupported
;
2114 IOMemoryDescriptor::setVMTags(vm_tag_t kernelTag
, vm_tag_t userTag
)
2116 _kernelTag
= kernelTag
;
2121 IOMemoryDescriptor::getVMTag(vm_map_t map
)
2123 if (vm_kernel_map_is_kernel(map
)) {
2124 if (VM_KERN_MEMORY_NONE
!= _kernelTag
) {
2128 if (VM_KERN_MEMORY_NONE
!= _userTag
) {
2132 return IOMemoryTag(map
);
2136 IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op
, void *vData
, UInt dataSize
) const
2138 IOReturn err
= kIOReturnSuccess
;
2139 DMACommandOps params
;
2140 IOGeneralMemoryDescriptor
* md
= const_cast<IOGeneralMemoryDescriptor
*>(this);
2143 params
= (op
& ~kIOMDDMACommandOperationMask
& op
);
2144 op
&= kIOMDDMACommandOperationMask
;
2146 if (kIOMDDMAMap
== op
) {
2147 if (dataSize
< sizeof(IOMDDMAMapArgs
)) {
2148 return kIOReturnUnderrun
;
2151 IOMDDMAMapArgs
* data
= (IOMDDMAMapArgs
*) vData
;
2154 && !md
->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem
)) {
2155 return kIOReturnNoMemory
;
2158 if (_memoryEntries
&& data
->fMapper
) {
2159 bool remap
, keepMap
;
2160 dataP
= getDataP(_memoryEntries
);
2162 if (data
->fMapSpec
.numAddressBits
< dataP
->fDMAMapNumAddressBits
) {
2163 dataP
->fDMAMapNumAddressBits
= data
->fMapSpec
.numAddressBits
;
2165 if (data
->fMapSpec
.alignment
> dataP
->fDMAMapAlignment
) {
2166 dataP
->fDMAMapAlignment
= data
->fMapSpec
.alignment
;
2169 keepMap
= (data
->fMapper
== gIOSystemMapper
);
2170 keepMap
&= ((data
->fOffset
== 0) && (data
->fLength
== _length
));
2172 if ((data
->fMapper
== gIOSystemMapper
) && _prepareLock
) {
2173 IOLockLock(_prepareLock
);
2177 remap
|= (dataP
->fDMAMapNumAddressBits
< 64)
2178 && ((dataP
->fMappedBase
+ _length
) > (1ULL << dataP
->fDMAMapNumAddressBits
));
2179 remap
|= (dataP
->fDMAMapAlignment
> page_size
);
2181 if (remap
|| !dataP
->fMappedBaseValid
) {
2182 // if (dataP->fMappedBaseValid) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params);
2183 err
= md
->dmaMap(data
->fMapper
, data
->fCommand
, &data
->fMapSpec
, data
->fOffset
, data
->fLength
, &data
->fAlloc
, &data
->fAllocLength
);
2184 if (keepMap
&& (kIOReturnSuccess
== err
) && !dataP
->fMappedBaseValid
) {
2185 dataP
->fMappedBase
= data
->fAlloc
;
2186 dataP
->fMappedBaseValid
= true;
2187 dataP
->fMappedLength
= data
->fAllocLength
;
2188 data
->fAllocLength
= 0; // IOMD owns the alloc now
2191 data
->fAlloc
= dataP
->fMappedBase
;
2192 data
->fAllocLength
= 0; // give out IOMD map
2193 md
->dmaMapRecord(data
->fMapper
, data
->fCommand
, dataP
->fMappedLength
);
2195 data
->fMapContig
= !dataP
->fDiscontig
;
2197 if ((data
->fMapper
== gIOSystemMapper
) && _prepareLock
) {
2198 IOLockUnlock(_prepareLock
);
2203 if (kIOMDDMAUnmap
== op
) {
2204 if (dataSize
< sizeof(IOMDDMAMapArgs
)) {
2205 return kIOReturnUnderrun
;
2207 IOMDDMAMapArgs
* data
= (IOMDDMAMapArgs
*) vData
;
2209 err
= md
->dmaUnmap(data
->fMapper
, data
->fCommand
, data
->fOffset
, data
->fAlloc
, data
->fAllocLength
);
2211 return kIOReturnSuccess
;
2214 if (kIOMDAddDMAMapSpec
== op
) {
2215 if (dataSize
< sizeof(IODMAMapSpecification
)) {
2216 return kIOReturnUnderrun
;
2219 IODMAMapSpecification
* data
= (IODMAMapSpecification
*) vData
;
2222 && !md
->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem
)) {
2223 return kIOReturnNoMemory
;
2226 if (_memoryEntries
) {
2227 dataP
= getDataP(_memoryEntries
);
2228 if (data
->numAddressBits
< dataP
->fDMAMapNumAddressBits
) {
2229 dataP
->fDMAMapNumAddressBits
= data
->numAddressBits
;
2231 if (data
->alignment
> dataP
->fDMAMapAlignment
) {
2232 dataP
->fDMAMapAlignment
= data
->alignment
;
2235 return kIOReturnSuccess
;
2238 if (kIOMDGetCharacteristics
== op
) {
2239 if (dataSize
< sizeof(IOMDDMACharacteristics
)) {
2240 return kIOReturnUnderrun
;
2243 IOMDDMACharacteristics
*data
= (IOMDDMACharacteristics
*) vData
;
2244 data
->fLength
= _length
;
2245 data
->fSGCount
= _rangesCount
;
2246 data
->fPages
= _pages
;
2247 data
->fDirection
= getDirection();
2249 data
->fIsPrepared
= false;
2251 data
->fIsPrepared
= true;
2252 data
->fHighestPage
= _highestPage
;
2253 if (_memoryEntries
) {
2254 dataP
= getDataP(_memoryEntries
);
2255 ioPLBlock
*ioplList
= getIOPLList(dataP
);
2256 UInt count
= getNumIOPL(_memoryEntries
, dataP
);
2258 data
->fPageAlign
= (ioplList
[0].fPageOffset
& PAGE_MASK
) | ~PAGE_MASK
;
2263 return kIOReturnSuccess
;
2264 } else if (kIOMDDMAActive
== op
) {
2267 prior
= OSAddAtomic16(1, &md
->_dmaReferences
);
2269 md
->_mapName
= NULL
;
2272 if (md
->_dmaReferences
) {
2273 OSAddAtomic16(-1, &md
->_dmaReferences
);
2275 panic("_dmaReferences underflow");
2278 } else if (kIOMDWalkSegments
!= op
) {
2279 return kIOReturnBadArgument
;
2282 // Get the next segment
2283 struct InternalState
{
2284 IOMDDMAWalkSegmentArgs fIO
;
2290 // Find the next segment
2291 if (dataSize
< sizeof(*isP
)) {
2292 return kIOReturnUnderrun
;
2295 isP
= (InternalState
*) vData
;
2296 UInt offset
= isP
->fIO
.fOffset
;
2297 uint8_t mapped
= isP
->fIO
.fMapped
;
2298 uint64_t mappedBase
;
2300 if (mapped
&& (kIOMemoryRemote
& _flags
)) {
2301 return kIOReturnNotAttached
;
2304 if (IOMapper::gSystem
&& mapped
2305 && (!(kIOMemoryHostOnly
& _flags
))
2306 && (!_memoryEntries
|| !getDataP(_memoryEntries
)->fMappedBaseValid
)) {
2307 // && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBaseValid))
2309 && !md
->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem
)) {
2310 return kIOReturnNoMemory
;
2313 dataP
= getDataP(_memoryEntries
);
2314 if (dataP
->fMapper
) {
2315 IODMAMapSpecification mapSpec
;
2316 bzero(&mapSpec
, sizeof(mapSpec
));
2317 mapSpec
.numAddressBits
= dataP
->fDMAMapNumAddressBits
;
2318 mapSpec
.alignment
= dataP
->fDMAMapAlignment
;
2319 err
= md
->dmaMap(dataP
->fMapper
, NULL
, &mapSpec
, 0, _length
, &dataP
->fMappedBase
, &dataP
->fMappedLength
);
2320 if (kIOReturnSuccess
!= err
) {
2323 dataP
->fMappedBaseValid
= true;
2327 if (kIOMDDMAWalkMappedLocal
== mapped
) {
2328 mappedBase
= isP
->fIO
.fMappedBase
;
2329 } else if (mapped
) {
2330 if (IOMapper::gSystem
2331 && (!(kIOMemoryHostOnly
& _flags
))
2333 && (dataP
= getDataP(_memoryEntries
))
2334 && dataP
->fMappedBaseValid
) {
2335 mappedBase
= dataP
->fMappedBase
;
2341 if (offset
>= _length
) {
2342 return (offset
== _length
)? kIOReturnOverrun
: kIOReturnInternalError
;
2345 // Validate the previous offset
2346 UInt ind
, off2Ind
= isP
->fOffset2Index
;
2349 && (offset
== isP
->fNextOffset
|| off2Ind
<= offset
)) {
2352 ind
= off2Ind
= 0; // Start from beginning
2357 if ((_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
) {
2358 // Physical address based memory descriptor
2359 const IOPhysicalRange
*physP
= (IOPhysicalRange
*) &_ranges
.p
[0];
2361 // Find the range after the one that contains the offset
2363 for (len
= 0; off2Ind
<= offset
; ind
++) {
2364 len
= physP
[ind
].length
;
2368 // Calculate length within range and starting address
2369 length
= off2Ind
- offset
;
2370 address
= physP
[ind
- 1].address
+ len
- length
;
2372 if (true && mapped
) {
2373 address
= mappedBase
+ offset
;
2375 // see how far we can coalesce ranges
2376 while (ind
< _rangesCount
&& address
+ length
== physP
[ind
].address
) {
2377 len
= physP
[ind
].length
;
2384 // correct contiguous check overshoot
2389 else if ((_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
) {
2390 // Physical address based memory descriptor
2391 const IOAddressRange
*physP
= (IOAddressRange
*) &_ranges
.v64
[0];
2393 // Find the range after the one that contains the offset
2395 for (len
= 0; off2Ind
<= offset
; ind
++) {
2396 len
= physP
[ind
].length
;
2400 // Calculate length within range and starting address
2401 length
= off2Ind
- offset
;
2402 address
= physP
[ind
- 1].address
+ len
- length
;
2404 if (true && mapped
) {
2405 address
= mappedBase
+ offset
;
2407 // see how far we can coalesce ranges
2408 while (ind
< _rangesCount
&& address
+ length
== physP
[ind
].address
) {
2409 len
= physP
[ind
].length
;
2415 // correct contiguous check overshoot
2419 #endif /* !__LP64__ */
2423 panic("IOGMD: not wired for the IODMACommand");
2426 assert(_memoryEntries
);
2428 dataP
= getDataP(_memoryEntries
);
2429 const ioPLBlock
*ioplList
= getIOPLList(dataP
);
2430 UInt numIOPLs
= getNumIOPL(_memoryEntries
, dataP
);
2431 upl_page_info_t
*pageList
= getPageList(dataP
);
2433 assert(numIOPLs
> 0);
2435 // Scan through iopl info blocks looking for block containing offset
2436 while (ind
< numIOPLs
&& offset
>= ioplList
[ind
].fIOMDOffset
) {
2440 // Go back to actual range as search goes past it
2441 ioPLBlock ioplInfo
= ioplList
[ind
- 1];
2442 off2Ind
= ioplInfo
.fIOMDOffset
;
2444 if (ind
< numIOPLs
) {
2445 length
= ioplList
[ind
].fIOMDOffset
;
2449 length
-= offset
; // Remainder within iopl
2451 // Subtract offset till this iopl in total list
2454 // If a mapped address is requested and this is a pre-mapped IOPL
2455 // then just need to compute an offset relative to the mapped base.
2457 offset
+= (ioplInfo
.fPageOffset
& PAGE_MASK
);
2458 address
= trunc_page_64(mappedBase
) + ptoa_64(ioplInfo
.fMappedPage
) + offset
;
2459 continue; // Done leave do/while(false) now
2462 // The offset is rebased into the current iopl.
2463 // Now add the iopl 1st page offset.
2464 offset
+= ioplInfo
.fPageOffset
;
2466 // For external UPLs the fPageInfo field points directly to
2467 // the upl's upl_page_info_t array.
2468 if (ioplInfo
.fFlags
& kIOPLExternUPL
) {
2469 pageList
= (upl_page_info_t
*) ioplInfo
.fPageInfo
;
2471 pageList
= &pageList
[ioplInfo
.fPageInfo
];
2474 // Check for direct device non-paged memory
2475 if (ioplInfo
.fFlags
& kIOPLOnDevice
) {
2476 address
= ptoa_64(pageList
->phys_addr
) + offset
;
2477 continue; // Done leave do/while(false) now
2480 // Now we need compute the index into the pageList
2481 UInt pageInd
= atop_32(offset
);
2482 offset
&= PAGE_MASK
;
2484 // Compute the starting address of this segment
2485 IOPhysicalAddress pageAddr
= pageList
[pageInd
].phys_addr
;
2487 panic("!pageList phys_addr");
2490 address
= ptoa_64(pageAddr
) + offset
;
2492 // length is currently set to the length of the remainider of the iopl.
2493 // We need to check that the remainder of the iopl is contiguous.
2494 // This is indicated by pageList[ind].phys_addr being sequential.
2495 IOByteCount contigLength
= PAGE_SIZE
- offset
;
2496 while (contigLength
< length
2497 && ++pageAddr
== pageList
[++pageInd
].phys_addr
) {
2498 contigLength
+= PAGE_SIZE
;
2501 if (contigLength
< length
) {
2502 length
= contigLength
;
2511 // Update return values and state
2512 isP
->fIO
.fIOVMAddr
= address
;
2513 isP
->fIO
.fLength
= length
;
2515 isP
->fOffset2Index
= off2Ind
;
2516 isP
->fNextOffset
= isP
->fIO
.fOffset
+ length
;
2518 return kIOReturnSuccess
;
2522 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
, IOOptionBits options
)
2525 mach_vm_address_t address
= 0;
2526 mach_vm_size_t length
= 0;
2527 IOMapper
* mapper
= gIOSystemMapper
;
2528 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
2530 if (lengthOfSegment
) {
2531 *lengthOfSegment
= 0;
2534 if (offset
>= _length
) {
2538 // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
2539 // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
2540 // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
2541 // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
2543 if ((options
& _kIOMemorySourceSegment
) && (kIOMemoryTypeUPL
!= type
)) {
2544 unsigned rangesIndex
= 0;
2545 Ranges vec
= _ranges
;
2546 mach_vm_address_t addr
;
2548 // Find starting address within the vector of ranges
2550 getAddrLenForInd(addr
, length
, type
, vec
, rangesIndex
);
2551 if (offset
< length
) {
2554 offset
-= length
; // (make offset relative)
2558 // Now that we have the starting range,
2559 // lets find the last contiguous range
2563 for (++rangesIndex
; rangesIndex
< _rangesCount
; rangesIndex
++) {
2564 mach_vm_address_t newAddr
;
2565 mach_vm_size_t newLen
;
2567 getAddrLenForInd(newAddr
, newLen
, type
, vec
, rangesIndex
);
2568 if (addr
+ length
!= newAddr
) {
2574 address
= (IOPhysicalAddress
) addr
; // Truncate address to 32bit
2577 IOMDDMAWalkSegmentState _state
;
2578 IOMDDMAWalkSegmentArgs
* state
= (IOMDDMAWalkSegmentArgs
*) (void *)&_state
;
2580 state
->fOffset
= offset
;
2581 state
->fLength
= _length
- offset
;
2582 state
->fMapped
= (0 == (options
& kIOMemoryMapperNone
)) && !(_flags
& kIOMemoryHostOrRemote
);
2584 ret
= dmaCommandOperation(kIOMDFirstSegment
, _state
, sizeof(_state
));
2586 if ((kIOReturnSuccess
!= ret
) && (kIOReturnOverrun
!= ret
)) {
2587 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
2588 ret
, this, state
->fOffset
,
2589 state
->fIOVMAddr
, state
->fLength
);
2591 if (kIOReturnSuccess
== ret
) {
2592 address
= state
->fIOVMAddr
;
2593 length
= state
->fLength
;
2596 // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
2597 // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
2599 if (mapper
&& ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
))) {
2600 if ((options
& kIOMemoryMapperNone
) && !(_flags
& kIOMemoryMapperNone
)) {
2601 addr64_t origAddr
= address
;
2602 IOByteCount origLen
= length
;
2604 address
= mapper
->mapToPhysicalAddress(origAddr
);
2605 length
= page_size
- (address
& (page_size
- 1));
2606 while ((length
< origLen
)
2607 && ((address
+ length
) == mapper
->mapToPhysicalAddress(origAddr
+ length
))) {
2608 length
+= page_size
;
2610 if (length
> origLen
) {
2621 if (lengthOfSegment
) {
2622 *lengthOfSegment
= length
;
2629 #pragma clang diagnostic push
2630 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2633 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
, IOOptionBits options
)
2635 addr64_t address
= 0;
2637 if (options
& _kIOMemorySourceSegment
) {
2638 address
= getSourceSegment(offset
, lengthOfSegment
);
2639 } else if (options
& kIOMemoryMapperNone
) {
2640 address
= getPhysicalSegment64(offset
, lengthOfSegment
);
2642 address
= getPhysicalSegment(offset
, lengthOfSegment
);
2647 #pragma clang diagnostic pop
2650 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
2652 return getPhysicalSegment(offset
, lengthOfSegment
, kIOMemoryMapperNone
);
2656 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
2658 addr64_t address
= 0;
2659 IOByteCount length
= 0;
2661 address
= getPhysicalSegment(offset
, lengthOfSegment
, 0);
2663 if (lengthOfSegment
) {
2664 length
= *lengthOfSegment
;
2667 if ((address
+ length
) > 0x100000000ULL
) {
2668 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
2669 address
, (long) length
, (getMetaClass())->getClassName());
2672 return (IOPhysicalAddress
) address
;
2676 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
2678 IOPhysicalAddress phys32
;
2681 IOMapper
* mapper
= 0;
2683 phys32
= getPhysicalSegment(offset
, lengthOfSegment
);
2688 if (gIOSystemMapper
) {
2689 mapper
= gIOSystemMapper
;
2693 IOByteCount origLen
;
2695 phys64
= mapper
->mapToPhysicalAddress(phys32
);
2696 origLen
= *lengthOfSegment
;
2697 length
= page_size
- (phys64
& (page_size
- 1));
2698 while ((length
< origLen
)
2699 && ((phys64
+ length
) == mapper
->mapToPhysicalAddress(phys32
+ length
))) {
2700 length
+= page_size
;
2702 if (length
> origLen
) {
2706 *lengthOfSegment
= length
;
2708 phys64
= (addr64_t
) phys32
;
2715 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
2717 return (IOPhysicalAddress
) getPhysicalSegment(offset
, lengthOfSegment
, 0);
2721 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
2723 return (IOPhysicalAddress
) getPhysicalSegment(offset
, lengthOfSegment
, _kIOMemorySourceSegment
);
2726 #pragma clang diagnostic push
2727 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2730 IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
2731 IOByteCount
* lengthOfSegment
)
2733 if (_task
== kernel_task
) {
2734 return (void *) getSourceSegment(offset
, lengthOfSegment
);
2736 panic("IOGMD::getVirtualSegment deprecated");
2741 #pragma clang diagnostic pop
2742 #endif /* !__LP64__ */
2745 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op
, void *vData
, UInt dataSize
) const
2747 IOMemoryDescriptor
*md
= const_cast<IOMemoryDescriptor
*>(this);
2748 DMACommandOps params
;
2751 params
= (op
& ~kIOMDDMACommandOperationMask
& op
);
2752 op
&= kIOMDDMACommandOperationMask
;
2754 if (kIOMDGetCharacteristics
== op
) {
2755 if (dataSize
< sizeof(IOMDDMACharacteristics
)) {
2756 return kIOReturnUnderrun
;
2759 IOMDDMACharacteristics
*data
= (IOMDDMACharacteristics
*) vData
;
2760 data
->fLength
= getLength();
2762 data
->fDirection
= getDirection();
2763 data
->fIsPrepared
= true; // Assume prepared - fails safe
2764 } else if (kIOMDWalkSegments
== op
) {
2765 if (dataSize
< sizeof(IOMDDMAWalkSegmentArgs
)) {
2766 return kIOReturnUnderrun
;
2769 IOMDDMAWalkSegmentArgs
*data
= (IOMDDMAWalkSegmentArgs
*) vData
;
2770 IOByteCount offset
= (IOByteCount
) data
->fOffset
;
2772 IOPhysicalLength length
;
2773 if (data
->fMapped
&& IOMapper::gSystem
) {
2774 data
->fIOVMAddr
= md
->getPhysicalSegment(offset
, &length
);
2776 data
->fIOVMAddr
= md
->getPhysicalSegment(offset
, &length
, kIOMemoryMapperNone
);
2778 data
->fLength
= length
;
2779 } else if (kIOMDAddDMAMapSpec
== op
) {
2780 return kIOReturnUnsupported
;
2781 } else if (kIOMDDMAMap
== op
) {
2782 if (dataSize
< sizeof(IOMDDMAMapArgs
)) {
2783 return kIOReturnUnderrun
;
2785 IOMDDMAMapArgs
* data
= (IOMDDMAMapArgs
*) vData
;
2788 panic("class %s does not support IODMACommand::kIterateOnly", getMetaClass()->getClassName());
2791 data
->fMapContig
= true;
2792 err
= md
->dmaMap(data
->fMapper
, data
->fCommand
, &data
->fMapSpec
, data
->fOffset
, data
->fLength
, &data
->fAlloc
, &data
->fAllocLength
);
2795 } else if (kIOMDDMAUnmap
== op
) {
2796 if (dataSize
< sizeof(IOMDDMAMapArgs
)) {
2797 return kIOReturnUnderrun
;
2799 IOMDDMAMapArgs
* data
= (IOMDDMAMapArgs
*) vData
;
2801 err
= md
->dmaUnmap(data
->fMapper
, data
->fCommand
, data
->fOffset
, data
->fAlloc
, data
->fAllocLength
);
2803 return kIOReturnSuccess
;
2805 return kIOReturnBadArgument
;
2808 return kIOReturnSuccess
;
2812 IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState
,
2813 IOOptionBits
* oldState
)
2815 IOReturn err
= kIOReturnSuccess
;
2817 vm_purgable_t control
;
2820 assert(!(kIOMemoryRemote
& _flags
));
2821 if (kIOMemoryRemote
& _flags
) {
2822 return kIOReturnNotAttached
;
2826 err
= super::setPurgeable(newState
, oldState
);
2828 if (kIOMemoryThreadSafe
& _flags
) {
2832 // Find the appropriate vm_map for the given task
2834 if (_task
== kernel_task
&& (kIOMemoryBufferPageable
& _flags
)) {
2835 err
= kIOReturnNotReady
;
2837 } else if (!_task
) {
2838 err
= kIOReturnUnsupported
;
2841 curMap
= get_task_map(_task
);
2842 if (NULL
== curMap
) {
2843 err
= KERN_INVALID_ARGUMENT
;
2848 // can only do one range
2849 Ranges vec
= _ranges
;
2850 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
2851 mach_vm_address_t addr
;
2853 getAddrLenForInd(addr
, len
, type
, vec
, 0);
2855 err
= purgeableControlBits(newState
, &control
, &state
);
2856 if (kIOReturnSuccess
!= err
) {
2859 err
= vm_map_purgable_control(curMap
, addr
, control
, &state
);
2861 if (kIOReturnSuccess
== err
) {
2862 err
= purgeableStateBits(&state
);
2867 if (kIOMemoryThreadSafe
& _flags
) {
2876 IOMemoryDescriptor::setPurgeable( IOOptionBits newState
,
2877 IOOptionBits
* oldState
)
2879 IOReturn err
= kIOReturnNotReady
;
2881 if (kIOMemoryThreadSafe
& _flags
) {
2885 err
= IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef
, newState
, oldState
);
2887 if (kIOMemoryThreadSafe
& _flags
) {
2895 IOMemoryDescriptor::getPageCounts( IOByteCount
* residentPageCount
,
2896 IOByteCount
* dirtyPageCount
)
2898 IOReturn err
= kIOReturnNotReady
;
2900 assert(!(kIOMemoryRemote
& _flags
));
2901 if (kIOMemoryRemote
& _flags
) {
2902 return kIOReturnNotAttached
;
2905 if (kIOMemoryThreadSafe
& _flags
) {
2909 err
= IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef
, residentPageCount
, dirtyPageCount
);
2911 IOMultiMemoryDescriptor
* mmd
;
2912 IOSubMemoryDescriptor
* smd
;
2913 if ((smd
= OSDynamicCast(IOSubMemoryDescriptor
, this))) {
2914 err
= smd
->getPageCounts(residentPageCount
, dirtyPageCount
);
2915 } else if ((mmd
= OSDynamicCast(IOMultiMemoryDescriptor
, this))) {
2916 err
= mmd
->getPageCounts(residentPageCount
, dirtyPageCount
);
2919 if (kIOMemoryThreadSafe
& _flags
) {
2927 #if defined(__arm__) || defined(__arm64__)
2928 extern "C" void dcache_incoherent_io_flush64(addr64_t pa
, unsigned int count
, unsigned int remaining
, unsigned int *res
);
2929 extern "C" void dcache_incoherent_io_store64(addr64_t pa
, unsigned int count
, unsigned int remaining
, unsigned int *res
);
2930 #else /* defined(__arm__) || defined(__arm64__) */
2931 extern "C" void dcache_incoherent_io_flush64(addr64_t pa
, unsigned int count
);
2932 extern "C" void dcache_incoherent_io_store64(addr64_t pa
, unsigned int count
);
2933 #endif /* defined(__arm__) || defined(__arm64__) */
2936 SetEncryptOp(addr64_t pa
, unsigned int count
)
2940 page
= atop_64(round_page_64(pa
));
2941 end
= atop_64(trunc_page_64(pa
+ count
));
2942 for (; page
< end
; page
++) {
2943 pmap_clear_noencrypt(page
);
2948 ClearEncryptOp(addr64_t pa
, unsigned int count
)
2952 page
= atop_64(round_page_64(pa
));
2953 end
= atop_64(trunc_page_64(pa
+ count
));
2954 for (; page
< end
; page
++) {
2955 pmap_set_noencrypt(page
);
2960 IOMemoryDescriptor::performOperation( IOOptionBits options
,
2961 IOByteCount offset
, IOByteCount length
)
2963 IOByteCount remaining
;
2965 void (*func
)(addr64_t pa
, unsigned int count
) = 0;
2966 #if defined(__arm__) || defined(__arm64__)
2967 void (*func_ext
)(addr64_t pa
, unsigned int count
, unsigned int remaining
, unsigned int *result
) = 0;
2970 assert(!(kIOMemoryRemote
& _flags
));
2971 if (kIOMemoryRemote
& _flags
) {
2972 return kIOReturnNotAttached
;
2976 case kIOMemoryIncoherentIOFlush
:
2977 #if defined(__arm__) || defined(__arm64__)
2978 func_ext
= &dcache_incoherent_io_flush64
;
2979 #if __ARM_COHERENT_IO__
2980 func_ext(0, 0, 0, &res
);
2981 return kIOReturnSuccess
;
2982 #else /* __ARM_COHERENT_IO__ */
2984 #endif /* __ARM_COHERENT_IO__ */
2985 #else /* defined(__arm__) || defined(__arm64__) */
2986 func
= &dcache_incoherent_io_flush64
;
2988 #endif /* defined(__arm__) || defined(__arm64__) */
2989 case kIOMemoryIncoherentIOStore
:
2990 #if defined(__arm__) || defined(__arm64__)
2991 func_ext
= &dcache_incoherent_io_store64
;
2992 #if __ARM_COHERENT_IO__
2993 func_ext(0, 0, 0, &res
);
2994 return kIOReturnSuccess
;
2995 #else /* __ARM_COHERENT_IO__ */
2997 #endif /* __ARM_COHERENT_IO__ */
2998 #else /* defined(__arm__) || defined(__arm64__) */
2999 func
= &dcache_incoherent_io_store64
;
3001 #endif /* defined(__arm__) || defined(__arm64__) */
3003 case kIOMemorySetEncrypted
:
3004 func
= &SetEncryptOp
;
3006 case kIOMemoryClearEncrypted
:
3007 func
= &ClearEncryptOp
;
3011 #if defined(__arm__) || defined(__arm64__)
3012 if ((func
== 0) && (func_ext
== 0)) {
3013 return kIOReturnUnsupported
;
3015 #else /* defined(__arm__) || defined(__arm64__) */
3017 return kIOReturnUnsupported
;
3019 #endif /* defined(__arm__) || defined(__arm64__) */
3021 if (kIOMemoryThreadSafe
& _flags
) {
3026 remaining
= length
= min(length
, getLength() - offset
);
3028 // (process another target segment?)
3032 dstAddr64
= getPhysicalSegment(offset
, &dstLen
, kIOMemoryMapperNone
);
3037 // Clip segment length to remaining
3038 if (dstLen
> remaining
) {
3042 #if defined(__arm__) || defined(__arm64__)
3044 (*func
)(dstAddr64
, dstLen
);
3047 (*func_ext
)(dstAddr64
, dstLen
, remaining
, &res
);
3053 #else /* defined(__arm__) || defined(__arm64__) */
3054 (*func
)(dstAddr64
, dstLen
);
3055 #endif /* defined(__arm__) || defined(__arm64__) */
3058 remaining
-= dstLen
;
3061 if (kIOMemoryThreadSafe
& _flags
) {
3065 return remaining
? kIOReturnUnderrun
: kIOReturnSuccess
;
3072 #if defined(__i386__) || defined(__x86_64__)
3074 #define io_kernel_static_start vm_kernel_stext
3075 #define io_kernel_static_end vm_kernel_etext
3077 #elif defined(__arm__) || defined(__arm64__)
3079 extern vm_offset_t static_memory_end
;
3081 #if defined(__arm64__)
3082 #define io_kernel_static_start vm_kext_base
3083 #else /* defined(__arm64__) */
3084 #define io_kernel_static_start vm_kernel_stext
3085 #endif /* defined(__arm64__) */
3087 #define io_kernel_static_end static_memory_end
3090 #error io_kernel_static_end is undefined for this architecture
3093 static kern_return_t
3094 io_get_kernel_static_upl(
3097 upl_size_t
*upl_size
,
3099 upl_page_info_array_t page_list
,
3100 unsigned int *count
,
3101 ppnum_t
*highest_page
)
3103 unsigned int pageCount
, page
;
3105 ppnum_t highestPage
= 0;
3107 pageCount
= atop_32(*upl_size
);
3108 if (pageCount
> *count
) {
3114 for (page
= 0; page
< pageCount
; page
++) {
3115 phys
= pmap_find_phys(kernel_pmap
, ((addr64_t
)offset
) + ptoa_64(page
));
3119 page_list
[page
].phys_addr
= phys
;
3120 page_list
[page
].free_when_done
= 0;
3121 page_list
[page
].absent
= 0;
3122 page_list
[page
].dirty
= 0;
3123 page_list
[page
].precious
= 0;
3124 page_list
[page
].device
= 0;
3125 if (phys
> highestPage
) {
3130 *highest_page
= highestPage
;
3132 return (page
>= pageCount
) ? kIOReturnSuccess
: kIOReturnVMError
;
3136 IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection
)
3138 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
3139 IOReturn error
= kIOReturnSuccess
;
3141 upl_page_info_array_t pageInfo
;
3143 vm_tag_t tag
= VM_KERN_MEMORY_NONE
;
3145 assert(kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
|| kIOMemoryTypeUIO
== type
);
3147 if ((kIODirectionOutIn
& forDirection
) == kIODirectionNone
) {
3148 forDirection
= (IODirection
) (forDirection
| getDirection());
3151 dataP
= getDataP(_memoryEntries
);
3152 upl_control_flags_t uplFlags
; // This Mem Desc's default flags for upl creation
3153 switch (kIODirectionOutIn
& forDirection
) {
3154 case kIODirectionOut
:
3155 // Pages do not need to be marked as dirty on commit
3156 uplFlags
= UPL_COPYOUT_FROM
;
3157 dataP
->fDMAAccess
= kIODMAMapReadAccess
;
3160 case kIODirectionIn
:
3161 dataP
->fDMAAccess
= kIODMAMapWriteAccess
;
3162 uplFlags
= 0; // i.e. ~UPL_COPYOUT_FROM
3166 dataP
->fDMAAccess
= kIODMAMapReadAccess
| kIODMAMapWriteAccess
;
3167 uplFlags
= 0; // i.e. ~UPL_COPYOUT_FROM
3172 if ((kIOMemoryPreparedReadOnly
& _flags
) && !(UPL_COPYOUT_FROM
& uplFlags
)) {
3173 OSReportWithBacktrace("IOMemoryDescriptor 0x%lx prepared read only", VM_KERNEL_ADDRPERM(this));
3174 error
= kIOReturnNotWritable
;
3179 mapper
= dataP
->fMapper
;
3180 dataP
->fMappedBaseValid
= dataP
->fMappedBase
= 0;
3182 uplFlags
|= UPL_SET_IO_WIRE
| UPL_SET_LITE
;
3184 if (VM_KERN_MEMORY_NONE
== tag
) {
3185 tag
= IOMemoryTag(kernel_map
);
3188 if (kIODirectionPrepareToPhys32
& forDirection
) {
3190 uplFlags
|= UPL_NEED_32BIT_ADDR
;
3192 if (dataP
->fDMAMapNumAddressBits
> 32) {
3193 dataP
->fDMAMapNumAddressBits
= 32;
3196 if (kIODirectionPrepareNoFault
& forDirection
) {
3197 uplFlags
|= UPL_REQUEST_NO_FAULT
;
3199 if (kIODirectionPrepareNoZeroFill
& forDirection
) {
3200 uplFlags
|= UPL_NOZEROFILLIO
;
3202 if (kIODirectionPrepareNonCoherent
& forDirection
) {
3203 uplFlags
|= UPL_REQUEST_FORCE_COHERENCY
;
3208 // Note that appendBytes(NULL) zeros the data up to the desired length
3209 // and the length parameter is an unsigned int
3210 size_t uplPageSize
= dataP
->fPageCnt
* sizeof(upl_page_info_t
);
3211 if (uplPageSize
> ((unsigned int)uplPageSize
)) {
3212 return kIOReturnNoMemory
;
3214 if (!_memoryEntries
->appendBytes(0, uplPageSize
)) {
3215 return kIOReturnNoMemory
;
3219 // Find the appropriate vm_map for the given task
3221 if (_task
== kernel_task
&& (kIOMemoryBufferPageable
& _flags
)) {
3224 curMap
= get_task_map(_task
);
3227 // Iterate over the vector of virtual ranges
3228 Ranges vec
= _ranges
;
3229 unsigned int pageIndex
= 0;
3230 IOByteCount mdOffset
= 0;
3231 ppnum_t highestPage
= 0;
3233 IOMemoryEntry
* memRefEntry
= 0;
3235 memRefEntry
= &_memRef
->entries
[0];
3238 for (UInt range
= 0; range
< _rangesCount
; range
++) {
3240 mach_vm_address_t startPage
, startPageOffset
;
3241 mach_vm_size_t numBytes
;
3242 ppnum_t highPage
= 0;
3244 // Get the startPage address and length of vec[range]
3245 getAddrLenForInd(startPage
, numBytes
, type
, vec
, range
);
3246 startPageOffset
= startPage
& PAGE_MASK
;
3247 iopl
.fPageOffset
= startPageOffset
;
3248 numBytes
+= startPageOffset
;
3249 startPage
= trunc_page_64(startPage
);
3252 iopl
.fMappedPage
= mapBase
+ pageIndex
;
3254 iopl
.fMappedPage
= 0;
3257 // Iterate over the current range, creating UPLs
3259 vm_address_t kernelStart
= (vm_address_t
) startPage
;
3263 } else if (_memRef
) {
3266 assert(_task
== kernel_task
);
3267 theMap
= IOPageableMapForAddress(kernelStart
);
3270 // ioplFlags is an in/out parameter
3271 upl_control_flags_t ioplFlags
= uplFlags
;
3272 dataP
= getDataP(_memoryEntries
);
3273 pageInfo
= getPageList(dataP
);
3274 upl_page_list_ptr_t baseInfo
= &pageInfo
[pageIndex
];
3276 mach_vm_size_t _ioplSize
= round_page(numBytes
);
3277 upl_size_t ioplSize
= (_ioplSize
<= MAX_UPL_SIZE_BYTES
) ? _ioplSize
: MAX_UPL_SIZE_BYTES
;
3278 unsigned int numPageInfo
= atop_32(ioplSize
);
3280 if ((theMap
== kernel_map
)
3281 && (kernelStart
>= io_kernel_static_start
)
3282 && (kernelStart
< io_kernel_static_end
)) {
3283 error
= io_get_kernel_static_upl(theMap
,
3290 } else if (_memRef
) {
3291 memory_object_offset_t entryOffset
;
3293 entryOffset
= mdOffset
;
3294 entryOffset
= (entryOffset
- iopl
.fPageOffset
- memRefEntry
->offset
);
3295 if (entryOffset
>= memRefEntry
->size
) {
3297 if (memRefEntry
>= &_memRef
->entries
[_memRef
->count
]) {
3298 panic("memRefEntry");
3302 if (ioplSize
> (memRefEntry
->size
- entryOffset
)) {
3303 ioplSize
= (memRefEntry
->size
- entryOffset
);
3305 error
= memory_object_iopl_request(memRefEntry
->entry
,
3315 error
= vm_map_create_upl(theMap
,
3317 (upl_size_t
*)&ioplSize
,
3325 if (error
!= KERN_SUCCESS
) {
3332 highPage
= upl_get_highest_page(iopl
.fIOPL
);
3334 if (highPage
> highestPage
) {
3335 highestPage
= highPage
;
3338 if (baseInfo
->device
) {
3340 iopl
.fFlags
= kIOPLOnDevice
;
3345 iopl
.fIOMDOffset
= mdOffset
;
3346 iopl
.fPageInfo
= pageIndex
;
3347 if (mapper
&& pageIndex
&& (page_mask
& (mdOffset
+ startPageOffset
))) {
3348 dataP
->fDiscontig
= true;
3351 if (!_memoryEntries
->appendBytes(&iopl
, sizeof(iopl
))) {
3352 // Clean up partial created and unsaved iopl
3354 upl_abort(iopl
.fIOPL
, 0);
3355 upl_deallocate(iopl
.fIOPL
);
3361 // Check for a multiple iopl's in one virtual range
3362 pageIndex
+= numPageInfo
;
3363 mdOffset
-= iopl
.fPageOffset
;
3364 if (ioplSize
< numBytes
) {
3365 numBytes
-= ioplSize
;
3366 startPage
+= ioplSize
;
3367 mdOffset
+= ioplSize
;
3368 iopl
.fPageOffset
= 0;
3370 iopl
.fMappedPage
= mapBase
+ pageIndex
;
3373 mdOffset
+= numBytes
;
3379 _highestPage
= highestPage
;
3381 if (UPL_COPYOUT_FROM
& uplFlags
) {
3382 _flags
|= kIOMemoryPreparedReadOnly
;
3387 if (!(_flags
& kIOMemoryAutoPrepare
) && (kIOReturnSuccess
== error
)) {
3388 dataP
= getDataP(_memoryEntries
);
3389 if (!dataP
->fWireTracking
.link
.next
) {
3390 IOTrackingAdd(gIOWireTracking
, &dataP
->fWireTracking
, ptoa(_pages
), false, tag
);
3393 #endif /* IOTRACKING */
3399 dataP
= getDataP(_memoryEntries
);
3400 UInt done
= getNumIOPL(_memoryEntries
, dataP
);
3401 ioPLBlock
*ioplList
= getIOPLList(dataP
);
3403 for (UInt range
= 0; range
< done
; range
++) {
3404 if (ioplList
[range
].fIOPL
) {
3405 upl_abort(ioplList
[range
].fIOPL
, 0);
3406 upl_deallocate(ioplList
[range
].fIOPL
);
3409 (void) _memoryEntries
->initWithBytes(dataP
, computeDataSize(0, 0)); // == setLength()
3412 if (error
== KERN_FAILURE
) {
3413 error
= kIOReturnCannotWire
;
3414 } else if (error
== KERN_MEMORY_ERROR
) {
3415 error
= kIOReturnNoResources
;
3422 IOGeneralMemoryDescriptor::initMemoryEntries(size_t size
, IOMapper
* mapper
)
3425 unsigned dataSize
= size
;
3427 if (!_memoryEntries
) {
3428 _memoryEntries
= OSData::withCapacity(dataSize
);
3429 if (!_memoryEntries
) {
3432 } else if (!_memoryEntries
->initWithCapacity(dataSize
)) {
3436 _memoryEntries
->appendBytes(0, computeDataSize(0, 0));
3437 dataP
= getDataP(_memoryEntries
);
3439 if (mapper
== kIOMapperWaitSystem
) {
3440 IOMapper::checkForSystemMapper();
3441 mapper
= IOMapper::gSystem
;
3443 dataP
->fMapper
= mapper
;
3444 dataP
->fPageCnt
= 0;
3445 dataP
->fMappedBase
= 0;
3446 dataP
->fDMAMapNumAddressBits
= 64;
3447 dataP
->fDMAMapAlignment
= 0;
3448 dataP
->fPreparationID
= kIOPreparationIDUnprepared
;
3449 dataP
->fDiscontig
= false;
3450 dataP
->fCompletionError
= false;
3451 dataP
->fMappedBaseValid
= false;
3457 IOMemoryDescriptor::dmaMap(
3459 IODMACommand
* command
,
3460 const IODMAMapSpecification
* mapSpec
,
3463 uint64_t * mapAddress
,
3464 uint64_t * mapLength
)
3467 uint32_t mapOptions
;
3470 mapOptions
|= kIODMAMapReadAccess
;
3471 if (!(kIOMemoryPreparedReadOnly
& _flags
)) {
3472 mapOptions
|= kIODMAMapWriteAccess
;
3475 err
= mapper
->iovmMapMemory(this, offset
, length
, mapOptions
,
3476 mapSpec
, command
, NULL
, mapAddress
, mapLength
);
3478 if (kIOReturnSuccess
== err
) {
3479 dmaMapRecord(mapper
, command
, *mapLength
);
3486 IOMemoryDescriptor::dmaMapRecord(
3488 IODMACommand
* command
,
3491 kern_allocation_name_t alloc
;
3494 if ((alloc
= mapper
->fAllocName
) /* && mapper != IOMapper::gSystem */) {
3495 kern_allocation_update_size(mapper
->fAllocName
, mapLength
);
3501 prior
= OSAddAtomic16(1, &_dmaReferences
);
3503 if (alloc
&& (VM_KERN_MEMORY_NONE
!= _kernelTag
)) {
3505 mapLength
= _length
;
3506 kern_allocation_update_subtotal(alloc
, _kernelTag
, mapLength
);
3514 IOMemoryDescriptor::dmaUnmap(
3516 IODMACommand
* command
,
3518 uint64_t mapAddress
,
3522 kern_allocation_name_t alloc
;
3523 kern_allocation_name_t mapName
;
3530 if (_dmaReferences
) {
3531 prior
= OSAddAtomic16(-1, &_dmaReferences
);
3533 panic("_dmaReferences underflow");
3538 return kIOReturnSuccess
;
3541 ret
= mapper
->iovmUnmapMemory(this, command
, mapAddress
, mapLength
);
3543 if ((alloc
= mapper
->fAllocName
)) {
3544 kern_allocation_update_size(alloc
, -mapLength
);
3545 if ((1 == prior
) && mapName
&& (VM_KERN_MEMORY_NONE
!= _kernelTag
)) {
3546 mapLength
= _length
;
3547 kern_allocation_update_subtotal(mapName
, _kernelTag
, -mapLength
);
3555 IOGeneralMemoryDescriptor::dmaMap(
3557 IODMACommand
* command
,
3558 const IODMAMapSpecification
* mapSpec
,
3561 uint64_t * mapAddress
,
3562 uint64_t * mapLength
)
3564 IOReturn err
= kIOReturnSuccess
;
3566 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
3569 if (kIOMemoryHostOnly
& _flags
) {
3570 return kIOReturnSuccess
;
3572 if (kIOMemoryRemote
& _flags
) {
3573 return kIOReturnNotAttached
;
3576 if ((type
== kIOMemoryTypePhysical
) || (type
== kIOMemoryTypePhysical64
)
3577 || offset
|| (length
!= _length
)) {
3578 err
= super::dmaMap(mapper
, command
, mapSpec
, offset
, length
, mapAddress
, mapLength
);
3579 } else if (_memoryEntries
&& _pages
&& (dataP
= getDataP(_memoryEntries
))) {
3580 const ioPLBlock
* ioplList
= getIOPLList(dataP
);
3581 upl_page_info_t
* pageList
;
3582 uint32_t mapOptions
= 0;
3584 IODMAMapSpecification mapSpec
;
3585 bzero(&mapSpec
, sizeof(mapSpec
));
3586 mapSpec
.numAddressBits
= dataP
->fDMAMapNumAddressBits
;
3587 mapSpec
.alignment
= dataP
->fDMAMapAlignment
;
3589 // For external UPLs the fPageInfo field points directly to
3590 // the upl's upl_page_info_t array.
3591 if (ioplList
->fFlags
& kIOPLExternUPL
) {
3592 pageList
= (upl_page_info_t
*) ioplList
->fPageInfo
;
3593 mapOptions
|= kIODMAMapPagingPath
;
3595 pageList
= getPageList(dataP
);
3598 if ((_length
== ptoa_64(_pages
)) && !(page_mask
& ioplList
->fPageOffset
)) {
3599 mapOptions
|= kIODMAMapPageListFullyOccupied
;
3602 assert(dataP
->fDMAAccess
);
3603 mapOptions
|= dataP
->fDMAAccess
;
3605 // Check for direct device non-paged memory
3606 if (ioplList
->fFlags
& kIOPLOnDevice
) {
3607 mapOptions
|= kIODMAMapPhysicallyContiguous
;
3610 IODMAMapPageList dmaPageList
=
3612 .pageOffset
= (uint32_t)(ioplList
->fPageOffset
& page_mask
),
3613 .pageListCount
= _pages
,
3614 .pageList
= &pageList
[0]
3616 err
= mapper
->iovmMapMemory(this, offset
, length
, mapOptions
, &mapSpec
,
3617 command
, &dmaPageList
, mapAddress
, mapLength
);
3619 if (kIOReturnSuccess
== err
) {
3620 dmaMapRecord(mapper
, command
, *mapLength
);
3630 * Prepare the memory for an I/O transfer. This involves paging in
3631 * the memory, if necessary, and wiring it down for the duration of
3632 * the transfer. The complete() method completes the processing of
3633 * the memory after the I/O transfer finishes. This method needn't
3634 * called for non-pageable memory.
3638 IOGeneralMemoryDescriptor::prepare(IODirection forDirection
)
3640 IOReturn error
= kIOReturnSuccess
;
3641 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
3643 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
)) {
3644 return kIOReturnSuccess
;
3647 assert(!(kIOMemoryRemote
& _flags
));
3648 if (kIOMemoryRemote
& _flags
) {
3649 return kIOReturnNotAttached
;
3653 IOLockLock(_prepareLock
);
3656 if (kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
|| kIOMemoryTypeUIO
== type
) {
3657 error
= wireVirtual(forDirection
);
3660 if (kIOReturnSuccess
== error
) {
3661 if (1 == ++_wireCount
) {
3662 if (kIOMemoryClearEncrypt
& _flags
) {
3663 performOperation(kIOMemoryClearEncrypted
, 0, _length
);
3669 IOLockUnlock(_prepareLock
);
3678 * Complete processing of the memory after an I/O transfer finishes.
3679 * This method should not be called unless a prepare was previously
3680 * issued; the prepare() and complete() must occur in pairs, before
3681 * before and after an I/O transfer involving pageable memory.
3685 IOGeneralMemoryDescriptor::complete(IODirection forDirection
)
3687 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
3690 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
)) {
3691 return kIOReturnSuccess
;
3694 assert(!(kIOMemoryRemote
& _flags
));
3695 if (kIOMemoryRemote
& _flags
) {
3696 return kIOReturnNotAttached
;
3700 IOLockLock(_prepareLock
);
3707 dataP
= getDataP(_memoryEntries
);
3712 if (kIODirectionCompleteWithError
& forDirection
) {
3713 dataP
->fCompletionError
= true;
3716 if ((kIOMemoryClearEncrypt
& _flags
) && (1 == _wireCount
)) {
3717 performOperation(kIOMemorySetEncrypted
, 0, _length
);
3721 if (!_wireCount
|| (kIODirectionCompleteWithDataValid
& forDirection
)) {
3722 ioPLBlock
*ioplList
= getIOPLList(dataP
);
3723 UInt ind
, count
= getNumIOPL(_memoryEntries
, dataP
);
3726 // kIODirectionCompleteWithDataValid & forDirection
3727 if (kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
|| kIOMemoryTypeUIO
== type
) {
3729 tag
= getVMTag(kernel_map
);
3730 for (ind
= 0; ind
< count
; ind
++) {
3731 if (ioplList
[ind
].fIOPL
) {
3732 iopl_valid_data(ioplList
[ind
].fIOPL
, tag
);
3737 if (_dmaReferences
) {
3738 panic("complete() while dma active");
3741 if (dataP
->fMappedBaseValid
) {
3742 dmaUnmap(dataP
->fMapper
, NULL
, 0, dataP
->fMappedBase
, dataP
->fMappedLength
);
3743 dataP
->fMappedBaseValid
= dataP
->fMappedBase
= 0;
3746 if (dataP
->fWireTracking
.link
.next
) {
3747 IOTrackingRemove(gIOWireTracking
, &dataP
->fWireTracking
, ptoa(_pages
));
3749 #endif /* IOTRACKING */
3750 // Only complete iopls that we created which are for TypeVirtual
3751 if (kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
|| kIOMemoryTypeUIO
== type
) {
3752 for (ind
= 0; ind
< count
; ind
++) {
3753 if (ioplList
[ind
].fIOPL
) {
3754 if (dataP
->fCompletionError
) {
3755 upl_abort(ioplList
[ind
].fIOPL
, 0 /*!UPL_ABORT_DUMP_PAGES*/);
3757 upl_commit(ioplList
[ind
].fIOPL
, 0, 0);
3759 upl_deallocate(ioplList
[ind
].fIOPL
);
3762 } else if (kIOMemoryTypeUPL
== type
) {
3763 upl_set_referenced(ioplList
[0].fIOPL
, false);
3766 (void) _memoryEntries
->initWithBytes(dataP
, computeDataSize(0, 0)); // == setLength()
3768 dataP
->fPreparationID
= kIOPreparationIDUnprepared
;
3769 _flags
&= ~kIOMemoryPreparedReadOnly
;
3775 IOLockUnlock(_prepareLock
);
3778 return kIOReturnSuccess
;
3782 IOGeneralMemoryDescriptor::doMap(
3783 vm_map_t __addressMap
,
3784 IOVirtualAddress
* __address
,
3785 IOOptionBits options
,
3786 IOByteCount __offset
,
3787 IOByteCount __length
)
3790 if (!(kIOMap64Bit
& options
)) {
3791 panic("IOGeneralMemoryDescriptor::doMap !64bit");
3793 #endif /* !__LP64__ */
3797 IOMemoryMap
* mapping
= (IOMemoryMap
*) *__address
;
3798 mach_vm_size_t offset
= mapping
->fOffset
+ __offset
;
3799 mach_vm_size_t length
= mapping
->fLength
;
3801 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
3802 Ranges vec
= _ranges
;
3804 mach_vm_address_t range0Addr
= 0;
3805 mach_vm_size_t range0Len
= 0;
3807 if ((offset
>= _length
) || ((offset
+ length
) > _length
)) {
3808 return kIOReturnBadArgument
;
3811 assert(!(kIOMemoryRemote
& _flags
));
3812 if (kIOMemoryRemote
& _flags
) {
3817 getAddrLenForInd(range0Addr
, range0Len
, type
, vec
, 0);
3820 // mapping source == dest? (could be much better)
3822 && (mapping
->fAddressTask
== _task
)
3823 && (mapping
->fAddressMap
== get_task_map(_task
))
3824 && (options
& kIOMapAnywhere
)
3825 && (!(kIOMapUnique
& options
))
3826 && (1 == _rangesCount
)
3829 && (length
<= range0Len
)) {
3830 mapping
->fAddress
= range0Addr
;
3831 mapping
->fOptions
|= kIOMapStatic
;
3833 return kIOReturnSuccess
;
3837 IOOptionBits createOptions
= 0;
3838 if (!(kIOMapReadOnly
& options
)) {
3839 createOptions
|= kIOMemoryReferenceWrite
;
3840 #if DEVELOPMENT || DEBUG
3841 if (kIODirectionOut
== (kIODirectionOutIn
& _flags
)) {
3842 OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction");
3846 err
= memoryReferenceCreate(createOptions
, &_memRef
);
3847 if (kIOReturnSuccess
!= err
) {
3852 memory_object_t pager
;
3853 pager
= (memory_object_t
) (reserved
? reserved
->dp
.devicePager
: 0);
3855 // <upl_transpose //
3856 if ((kIOMapReference
| kIOMapUnique
) == ((kIOMapReference
| kIOMapUnique
) & options
)) {
3860 upl_control_flags_t flags
;
3861 unsigned int lock_count
;
3863 if (!_memRef
|| (1 != _memRef
->count
)) {
3864 err
= kIOReturnNotReadable
;
3868 size
= round_page(mapping
->fLength
);
3869 flags
= UPL_COPYOUT_FROM
| UPL_SET_INTERNAL
3870 | UPL_SET_LITE
| UPL_SET_IO_WIRE
| UPL_BLOCK_ACCESS
;
3872 if (KERN_SUCCESS
!= memory_object_iopl_request(_memRef
->entries
[0].entry
, 0, &size
, &redirUPL2
,
3874 &flags
, getVMTag(kernel_map
))) {
3878 for (lock_count
= 0;
3879 IORecursiveLockHaveLock(gIOMemoryLock
);
3883 err
= upl_transpose(redirUPL2
, mapping
->fRedirUPL
);
3890 if (kIOReturnSuccess
!= err
) {
3891 IOLog("upl_transpose(%x)\n", err
);
3892 err
= kIOReturnSuccess
;
3896 upl_commit(redirUPL2
, NULL
, 0);
3897 upl_deallocate(redirUPL2
);
3901 // swap the memEntries since they now refer to different vm_objects
3902 IOMemoryReference
* me
= _memRef
;
3903 _memRef
= mapping
->fMemory
->_memRef
;
3904 mapping
->fMemory
->_memRef
= me
;
3907 err
= populateDevicePager( pager
, mapping
->fAddressMap
, mapping
->fAddress
, offset
, length
, options
);
3911 // upl_transpose> //
3913 err
= memoryReferenceMap(_memRef
, mapping
->fAddressMap
, offset
, length
, options
, &mapping
->fAddress
);
3915 if ((err
== KERN_SUCCESS
) && ((kIOTracking
& gIOKitDebug
) || _task
)) {
3916 // only dram maps in the default on developement case
3917 IOTrackingAddUser(gIOMapTracking
, &mapping
->fTracking
, mapping
->fLength
);
3919 #endif /* IOTRACKING */
3920 if ((err
== KERN_SUCCESS
) && pager
) {
3921 err
= populateDevicePager(pager
, mapping
->fAddressMap
, mapping
->fAddress
, offset
, length
, options
);
3923 if (err
!= KERN_SUCCESS
) {
3924 doUnmap(mapping
->fAddressMap
, (IOVirtualAddress
) mapping
, 0);
3925 } else if (kIOMapDefaultCache
== (options
& kIOMapCacheMask
)) {
3926 mapping
->fOptions
|= ((_flags
& kIOMemoryBufferCacheMask
) >> kIOMemoryBufferCacheShift
);
3936 IOMemoryMapTracking(IOTrackingUser
* tracking
, task_t
* task
,
3937 mach_vm_address_t
* address
, mach_vm_size_t
* size
)
3939 #define iomap_offsetof(type, field) ((size_t)(&((type *)0)->field))
3941 IOMemoryMap
* map
= (typeof(map
))(((uintptr_t) tracking
) - iomap_offsetof(IOMemoryMap
, fTracking
));
3943 if (!map
->fAddressMap
|| (map
->fAddressMap
!= get_task_map(map
->fAddressTask
))) {
3944 return kIOReturnNotReady
;
3947 *task
= map
->fAddressTask
;
3948 *address
= map
->fAddress
;
3949 *size
= map
->fLength
;
3951 return kIOReturnSuccess
;
3953 #endif /* IOTRACKING */
3956 IOGeneralMemoryDescriptor::doUnmap(
3957 vm_map_t addressMap
,
3958 IOVirtualAddress __address
,
3959 IOByteCount __length
)
3961 return super::doUnmap(addressMap
, __address
, __length
);
3964 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3967 #define super OSObject
3969 OSDefineMetaClassAndStructors( IOMemoryMap
, OSObject
)
3971 OSMetaClassDefineReservedUnused(IOMemoryMap
, 0);
3972 OSMetaClassDefineReservedUnused(IOMemoryMap
, 1);
3973 OSMetaClassDefineReservedUnused(IOMemoryMap
, 2);
3974 OSMetaClassDefineReservedUnused(IOMemoryMap
, 3);
3975 OSMetaClassDefineReservedUnused(IOMemoryMap
, 4);
3976 OSMetaClassDefineReservedUnused(IOMemoryMap
, 5);
3977 OSMetaClassDefineReservedUnused(IOMemoryMap
, 6);
3978 OSMetaClassDefineReservedUnused(IOMemoryMap
, 7);
3980 /* ex-inline function implementation */
3982 IOMemoryMap::getPhysicalAddress()
3984 return getPhysicalSegment( 0, 0 );
3987 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3992 mach_vm_address_t toAddress
,
3993 IOOptionBits _options
,
3994 mach_vm_size_t _offset
,
3995 mach_vm_size_t _length
)
4001 if (!super::init()) {
4005 fAddressMap
= get_task_map(intoTask
);
4009 vm_map_reference(fAddressMap
);
4011 fAddressTask
= intoTask
;
4012 fOptions
= _options
;
4015 fAddress
= toAddress
;
4021 IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor
* _memory
, mach_vm_size_t _offset
)
4028 if ((_offset
+ fLength
) > _memory
->getLength()) {
4036 if (fMemory
!= _memory
) {
4037 fMemory
->removeMapping(this);
4047 IOMemoryDescriptor::doMap(
4048 vm_map_t __addressMap
,
4049 IOVirtualAddress
* __address
,
4050 IOOptionBits options
,
4051 IOByteCount __offset
,
4052 IOByteCount __length
)
4054 return kIOReturnUnsupported
;
4058 IOMemoryDescriptor::handleFault(
4060 mach_vm_size_t sourceOffset
,
4061 mach_vm_size_t length
)
4063 if (kIOMemoryRedirected
& _flags
) {
4065 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset
);
4069 } while (kIOMemoryRedirected
& _flags
);
4071 return kIOReturnSuccess
;
4075 IOMemoryDescriptor::populateDevicePager(
4077 vm_map_t addressMap
,
4078 mach_vm_address_t address
,
4079 mach_vm_size_t sourceOffset
,
4080 mach_vm_size_t length
,
4081 IOOptionBits options
)
4083 IOReturn err
= kIOReturnSuccess
;
4084 memory_object_t pager
= (memory_object_t
) _pager
;
4085 mach_vm_size_t size
;
4086 mach_vm_size_t bytes
;
4087 mach_vm_size_t page
;
4088 mach_vm_size_t pageOffset
;
4089 mach_vm_size_t pagerOffset
;
4090 IOPhysicalLength segLen
, chunk
;
4094 type
= _flags
& kIOMemoryTypeMask
;
4096 if (reserved
->dp
.pagerContig
) {
4101 physAddr
= getPhysicalSegment( sourceOffset
, &segLen
, kIOMemoryMapperNone
);
4103 pageOffset
= physAddr
- trunc_page_64( physAddr
);
4104 pagerOffset
= sourceOffset
;
4106 size
= length
+ pageOffset
;
4107 physAddr
-= pageOffset
;
4109 segLen
+= pageOffset
;
4112 // in the middle of the loop only map whole pages
4113 if (segLen
>= bytes
) {
4115 } else if (segLen
!= trunc_page(segLen
)) {
4116 err
= kIOReturnVMError
;
4118 if (physAddr
!= trunc_page_64(physAddr
)) {
4119 err
= kIOReturnBadArgument
;
4122 if (kIOReturnSuccess
!= err
) {
4126 #if DEBUG || DEVELOPMENT
4127 if ((kIOMemoryTypeUPL
!= type
)
4128 && pmap_has_managed_page(atop_64(physAddr
), atop_64(physAddr
+ segLen
- 1))) {
4129 OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx", physAddr
, segLen
);
4131 #endif /* DEBUG || DEVELOPMENT */
4133 chunk
= (reserved
->dp
.pagerContig
? round_page(segLen
) : page_size
);
4135 (page
< segLen
) && (KERN_SUCCESS
== err
);
4137 err
= device_pager_populate_object(pager
, pagerOffset
,
4138 (ppnum_t
)(atop_64(physAddr
+ page
)), chunk
);
4139 pagerOffset
+= chunk
;
4142 assert(KERN_SUCCESS
== err
);
4147 // This call to vm_fault causes an early pmap level resolution
4148 // of the mappings created above for kernel mappings, since
4149 // faulting in later can't take place from interrupt level.
4150 if ((addressMap
== kernel_map
) && !(kIOMemoryRedirected
& _flags
)) {
4151 err
= vm_fault(addressMap
,
4152 (vm_map_offset_t
)trunc_page_64(address
),
4153 options
& kIOMapReadOnly
? VM_PROT_READ
: VM_PROT_READ
| VM_PROT_WRITE
,
4154 FALSE
, VM_KERN_MEMORY_NONE
,
4156 (vm_map_offset_t
)0);
4158 if (KERN_SUCCESS
!= err
) {
4163 sourceOffset
+= segLen
- pageOffset
;
4167 }while (bytes
&& (physAddr
= getPhysicalSegment( sourceOffset
, &segLen
, kIOMemoryMapperNone
)));
4170 err
= kIOReturnBadArgument
;
4177 IOMemoryDescriptor::doUnmap(
4178 vm_map_t addressMap
,
4179 IOVirtualAddress __address
,
4180 IOByteCount __length
)
4183 IOMemoryMap
* mapping
;
4184 mach_vm_address_t address
;
4185 mach_vm_size_t length
;
4191 mapping
= (IOMemoryMap
*) __address
;
4192 addressMap
= mapping
->fAddressMap
;
4193 address
= mapping
->fAddress
;
4194 length
= mapping
->fLength
;
4196 if (kIOMapOverwrite
& mapping
->fOptions
) {
4199 if ((addressMap
== kernel_map
) && (kIOMemoryBufferPageable
& _flags
)) {
4200 addressMap
= IOPageableMapForAddress( address
);
4203 if (kIOLogMapping
& gIOKitDebug
) {
4204 IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
4205 addressMap
, address
, length
);
4208 err
= mach_vm_deallocate( addressMap
, address
, length
);
4212 IOTrackingRemoveUser(gIOMapTracking
, &mapping
->fTracking
);
4213 #endif /* IOTRACKING */
4219 IOMemoryDescriptor::redirect( task_t safeTask
, bool doRedirect
)
4221 IOReturn err
= kIOReturnSuccess
;
4222 IOMemoryMap
* mapping
= 0;
4228 _flags
|= kIOMemoryRedirected
;
4230 _flags
&= ~kIOMemoryRedirected
;
4234 if ((iter
= OSCollectionIterator::withCollection( _mappings
))) {
4235 memory_object_t pager
;
4238 pager
= (memory_object_t
) reserved
->dp
.devicePager
;
4240 pager
= MACH_PORT_NULL
;
4243 while ((mapping
= (IOMemoryMap
*) iter
->getNextObject())) {
4244 mapping
->redirect( safeTask
, doRedirect
);
4245 if (!doRedirect
&& !safeTask
&& pager
&& (kernel_map
== mapping
->fAddressMap
)) {
4246 err
= populateDevicePager(pager
, mapping
->fAddressMap
, mapping
->fAddress
, mapping
->fOffset
, mapping
->fLength
, kIOMapDefaultCache
);
4261 // temporary binary compatibility
4262 IOSubMemoryDescriptor
* subMem
;
4263 if ((subMem
= OSDynamicCast( IOSubMemoryDescriptor
, this))) {
4264 err
= subMem
->redirect( safeTask
, doRedirect
);
4266 err
= kIOReturnSuccess
;
4268 #endif /* !__LP64__ */
4274 IOMemoryMap::redirect( task_t safeTask
, bool doRedirect
)
4276 IOReturn err
= kIOReturnSuccess
;
4279 // err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
4291 if ((!safeTask
|| (get_task_map(safeTask
) != fAddressMap
))
4292 && (0 == (fOptions
& kIOMapStatic
))) {
4293 IOUnmapPages( fAddressMap
, fAddress
, fLength
);
4294 err
= kIOReturnSuccess
;
4296 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect
, this, fAddress
, fLength
, fAddressMap
);
4298 } else if (kIOMapWriteCombineCache
== (fOptions
& kIOMapCacheMask
)) {
4299 IOOptionBits newMode
;
4300 newMode
= (fOptions
& ~kIOMapCacheMask
) | (doRedirect
? kIOMapInhibitCache
: kIOMapWriteCombineCache
);
4301 IOProtectCacheMode(fAddressMap
, fAddress
, fLength
, newMode
);
4307 if ((((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
4308 || ((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
))
4310 && (doRedirect
!= (0 != (fMemory
->_flags
& kIOMemoryRedirected
)))) {
4311 fMemory
->redirect(safeTask
, doRedirect
);
4318 IOMemoryMap::unmap( void )
4324 if (fAddress
&& fAddressMap
&& (0 == fSuperMap
) && fMemory
4325 && (0 == (kIOMapStatic
& fOptions
))) {
4326 err
= fMemory
->doUnmap(fAddressMap
, (IOVirtualAddress
) this, 0);
4328 err
= kIOReturnSuccess
;
4332 vm_map_deallocate(fAddressMap
);
4344 IOMemoryMap::taskDied( void )
4347 if (fUserClientUnmap
) {
4352 IOTrackingRemoveUser(gIOMapTracking
, &fTracking
);
4354 #endif /* IOTRACKING */
4357 vm_map_deallocate(fAddressMap
);
4366 IOMemoryMap::userClientUnmap( void )
4368 fUserClientUnmap
= true;
4369 return kIOReturnSuccess
;
4372 // Overload the release mechanism. All mappings must be a member
4373 // of a memory descriptors _mappings set. This means that we
4374 // always have 2 references on a mapping. When either of these mappings
4375 // are released we need to free ourselves.
4377 IOMemoryMap::taggedRelease(const void *tag
) const
4380 super::taggedRelease(tag
, 2);
4391 fMemory
->removeMapping(this);
4396 if (fOwner
&& (fOwner
!= fMemory
)) {
4398 fOwner
->removeMapping(this);
4403 fSuperMap
->release();
4407 upl_commit(fRedirUPL
, NULL
, 0);
4408 upl_deallocate(fRedirUPL
);
4415 IOMemoryMap::getLength()
4421 IOMemoryMap::getVirtualAddress()
4425 fSuperMap
->getVirtualAddress();
4426 } else if (fAddressMap
4427 && vm_map_is_64bit(fAddressMap
)
4428 && (sizeof(IOVirtualAddress
) < 8)) {
4429 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress
);
4431 #endif /* !__LP64__ */
4438 IOMemoryMap::getAddress()
4444 IOMemoryMap::getSize()
4448 #endif /* !__LP64__ */
4452 IOMemoryMap::getAddressTask()
4455 return fSuperMap
->getAddressTask();
4457 return fAddressTask
;
4462 IOMemoryMap::getMapOptions()
4467 IOMemoryDescriptor
*
4468 IOMemoryMap::getMemoryDescriptor()
4474 IOMemoryMap::copyCompatible(
4475 IOMemoryMap
* newMapping
)
4477 task_t task
= newMapping
->getAddressTask();
4478 mach_vm_address_t toAddress
= newMapping
->fAddress
;
4479 IOOptionBits _options
= newMapping
->fOptions
;
4480 mach_vm_size_t _offset
= newMapping
->fOffset
;
4481 mach_vm_size_t _length
= newMapping
->fLength
;
4483 if ((!task
) || (!fAddressMap
) || (fAddressMap
!= get_task_map(task
))) {
4486 if ((fOptions
^ _options
) & kIOMapReadOnly
) {
4489 if ((kIOMapDefaultCache
!= (_options
& kIOMapCacheMask
))
4490 && ((fOptions
^ _options
) & kIOMapCacheMask
)) {
4494 if ((0 == (_options
& kIOMapAnywhere
)) && (fAddress
!= toAddress
)) {
4498 if (_offset
< fOffset
) {
4504 if ((_offset
+ _length
) > fLength
) {
4509 if ((fLength
== _length
) && (!_offset
)) {
4512 newMapping
->fSuperMap
= this;
4513 newMapping
->fOffset
= fOffset
+ _offset
;
4514 newMapping
->fAddress
= fAddress
+ _offset
;
4521 IOMemoryMap::wireRange(
4523 mach_vm_size_t offset
,
4524 mach_vm_size_t length
)
4527 mach_vm_address_t start
= trunc_page_64(fAddress
+ offset
);
4528 mach_vm_address_t end
= round_page_64(fAddress
+ offset
+ length
);
4531 prot
= (kIODirectionOutIn
& options
);
4533 kr
= vm_map_wire_kernel(fAddressMap
, start
, end
, prot
, fMemory
->getVMTag(kernel_map
), FALSE
);
4535 kr
= vm_map_unwire(fAddressMap
, start
, end
, FALSE
);
4544 IOMemoryMap::getPhysicalSegment( IOByteCount _offset
, IOPhysicalLength
* _length
, IOOptionBits _options
)
4545 #else /* !__LP64__ */
4546 IOMemoryMap::getPhysicalSegment( IOByteCount _offset
, IOPhysicalLength
* _length
)
4547 #endif /* !__LP64__ */
4549 IOPhysicalAddress address
;
4553 address
= fMemory
->getPhysicalSegment( fOffset
+ _offset
, _length
, _options
);
4554 #else /* !__LP64__ */
4555 address
= fMemory
->getPhysicalSegment( fOffset
+ _offset
, _length
);
4556 #endif /* !__LP64__ */
4562 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4565 #define super OSObject
4567 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4570 IOMemoryDescriptor::initialize( void )
4572 if (0 == gIOMemoryLock
) {
4573 gIOMemoryLock
= IORecursiveLockAlloc();
4576 gIOLastPage
= IOGetLastPageNumber();
4580 IOMemoryDescriptor::free( void )
4583 _mappings
->release();
4587 IODelete(reserved
, IOMemoryDescriptorReserved
, 1);
4594 IOMemoryDescriptor::setMapping(
4596 IOVirtualAddress mapAddress
,
4597 IOOptionBits options
)
4599 return createMappingInTask( intoTask
, mapAddress
,
4600 options
| kIOMapStatic
,
4605 IOMemoryDescriptor::map(
4606 IOOptionBits options
)
4608 return createMappingInTask( kernel_task
, 0,
4609 options
| kIOMapAnywhere
,
4615 IOMemoryDescriptor::map(
4617 IOVirtualAddress atAddress
,
4618 IOOptionBits options
,
4620 IOByteCount length
)
4622 if ((!(kIOMapAnywhere
& options
)) && vm_map_is_64bit(get_task_map(intoTask
))) {
4623 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
4627 return createMappingInTask(intoTask
, atAddress
,
4628 options
, offset
, length
);
4630 #endif /* !__LP64__ */
4633 IOMemoryDescriptor::createMappingInTask(
4635 mach_vm_address_t atAddress
,
4636 IOOptionBits options
,
4637 mach_vm_size_t offset
,
4638 mach_vm_size_t length
)
4640 IOMemoryMap
* result
;
4641 IOMemoryMap
* mapping
;
4644 length
= getLength();
4647 mapping
= new IOMemoryMap
;
4650 && !mapping
->init( intoTask
, atAddress
,
4651 options
, offset
, length
)) {
4657 result
= makeMapping(this, intoTask
, (IOVirtualAddress
) mapping
, options
| kIOMap64Bit
, 0, 0);
4664 IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
4665 this, atAddress
, (uint32_t) options
, offset
, length
);
4672 #ifndef __LP64__ // there is only a 64 bit version for LP64
4674 IOMemoryMap::redirect(IOMemoryDescriptor
* newBackingMemory
,
4675 IOOptionBits options
,
4678 return redirect(newBackingMemory
, options
, (mach_vm_size_t
)offset
);
4683 IOMemoryMap::redirect(IOMemoryDescriptor
* newBackingMemory
,
4684 IOOptionBits options
,
4685 mach_vm_size_t offset
)
4687 IOReturn err
= kIOReturnSuccess
;
4688 IOMemoryDescriptor
* physMem
= 0;
4692 if (fAddress
&& fAddressMap
) {
4694 if (((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
4695 || ((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
)) {
4700 if (!fRedirUPL
&& fMemory
->_memRef
&& (1 == fMemory
->_memRef
->count
)) {
4701 upl_size_t size
= round_page(fLength
);
4702 upl_control_flags_t flags
= UPL_COPYOUT_FROM
| UPL_SET_INTERNAL
4703 | UPL_SET_LITE
| UPL_SET_IO_WIRE
| UPL_BLOCK_ACCESS
;
4704 if (KERN_SUCCESS
!= memory_object_iopl_request(fMemory
->_memRef
->entries
[0].entry
, 0, &size
, &fRedirUPL
,
4706 &flags
, fMemory
->getVMTag(kernel_map
))) {
4711 IOUnmapPages( fAddressMap
, fAddress
, fLength
);
4713 physMem
->redirect(0, true);
4718 if (newBackingMemory
) {
4719 if (newBackingMemory
!= fMemory
) {
4721 if (this != newBackingMemory
->makeMapping(newBackingMemory
, fAddressTask
, (IOVirtualAddress
) this,
4722 options
| kIOMapUnique
| kIOMapReference
| kIOMap64Bit
,
4724 err
= kIOReturnError
;
4728 upl_commit(fRedirUPL
, NULL
, 0);
4729 upl_deallocate(fRedirUPL
);
4732 if ((false) && physMem
) {
4733 physMem
->redirect(0, false);
4749 IOMemoryDescriptor::makeMapping(
4750 IOMemoryDescriptor
* owner
,
4752 IOVirtualAddress __address
,
4753 IOOptionBits options
,
4754 IOByteCount __offset
,
4755 IOByteCount __length
)
4758 if (!(kIOMap64Bit
& options
)) {
4759 panic("IOMemoryDescriptor::makeMapping !64bit");
4761 #endif /* !__LP64__ */
4763 IOMemoryDescriptor
* mapDesc
= 0;
4764 __block IOMemoryMap
* result
= 0;
4766 IOMemoryMap
* mapping
= (IOMemoryMap
*) __address
;
4767 mach_vm_size_t offset
= mapping
->fOffset
+ __offset
;
4768 mach_vm_size_t length
= mapping
->fLength
;
4770 mapping
->fOffset
= offset
;
4775 if (kIOMapStatic
& options
) {
4777 addMapping(mapping
);
4778 mapping
->setMemoryDescriptor(this, 0);
4782 if (kIOMapUnique
& options
) {
4784 IOByteCount physLen
;
4786 // if (owner != this) continue;
4788 if (((_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
4789 || ((_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
)) {
4790 phys
= getPhysicalSegment(offset
, &physLen
, kIOMemoryMapperNone
);
4791 if (!phys
|| (physLen
< length
)) {
4795 mapDesc
= IOMemoryDescriptor::withAddressRange(
4796 phys
, length
, getDirection() | kIOMemoryMapperNone
, NULL
);
4801 mapping
->fOffset
= offset
;
4804 // look for a compatible existing mapping
4806 _mappings
->iterateObjects(^(OSObject
* object
)
4808 IOMemoryMap
* lookMapping
= (IOMemoryMap
*) object
;
4809 if ((result
= lookMapping
->copyCompatible(mapping
))) {
4811 result
->setMemoryDescriptor(this, offset
);
4817 if (result
|| (options
& kIOMapReference
)) {
4818 if (result
!= mapping
) {
4831 kr
= mapDesc
->doMap( 0, (IOVirtualAddress
*) &mapping
, options
, 0, 0 );
4832 if (kIOReturnSuccess
== kr
) {
4834 mapDesc
->addMapping(result
);
4835 result
->setMemoryDescriptor(mapDesc
, offset
);
4852 IOMemoryDescriptor::addMapping(
4853 IOMemoryMap
* mapping
)
4856 if (0 == _mappings
) {
4857 _mappings
= OSSet::withCapacity(1);
4860 _mappings
->setObject( mapping
);
4866 IOMemoryDescriptor::removeMapping(
4867 IOMemoryMap
* mapping
)
4870 _mappings
->removeObject( mapping
);
4875 // obsolete initializers
4876 // - initWithOptions is the designated initializer
4878 IOMemoryDescriptor::initWithAddress(void * address
,
4880 IODirection direction
)
4886 IOMemoryDescriptor::initWithAddress(IOVirtualAddress address
,
4888 IODirection direction
,
4895 IOMemoryDescriptor::initWithPhysicalAddress(
4896 IOPhysicalAddress address
,
4898 IODirection direction
)
4904 IOMemoryDescriptor::initWithRanges(
4905 IOVirtualRange
* ranges
,
4907 IODirection direction
,
4915 IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange
* ranges
,
4917 IODirection direction
,
4924 IOMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
4925 IOByteCount
* lengthOfSegment
)
4929 #endif /* !__LP64__ */
4931 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4934 IOGeneralMemoryDescriptor::serialize(OSSerialize
* s
) const
4936 OSSymbol
const *keys
[2] = {0};
4937 OSObject
*values
[2] = {0};
4939 vm_size_t vcopy_size
;
4942 user_addr_t address
;
4945 unsigned int index
, nRanges
;
4946 bool result
= false;
4948 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
4954 array
= OSArray::withCapacity(4);
4959 nRanges
= _rangesCount
;
4960 if (os_mul_overflow(sizeof(SerData
), nRanges
, &vcopy_size
)) {
4964 vcopy
= (SerData
*) IOMalloc(vcopy_size
);
4970 keys
[0] = OSSymbol::withCString("address");
4971 keys
[1] = OSSymbol::withCString("length");
4973 // Copy the volatile data so we don't have to allocate memory
4974 // while the lock is held.
4976 if (nRanges
== _rangesCount
) {
4977 Ranges vec
= _ranges
;
4978 for (index
= 0; index
< nRanges
; index
++) {
4979 mach_vm_address_t addr
; mach_vm_size_t len
;
4980 getAddrLenForInd(addr
, len
, type
, vec
, index
);
4981 vcopy
[index
].address
= addr
;
4982 vcopy
[index
].length
= len
;
4985 // The descriptor changed out from under us. Give up.
4992 for (index
= 0; index
< nRanges
; index
++) {
4993 user_addr_t addr
= vcopy
[index
].address
;
4994 IOByteCount len
= (IOByteCount
) vcopy
[index
].length
;
4995 values
[0] = OSNumber::withNumber(addr
, sizeof(addr
) * 8);
4996 if (values
[0] == 0) {
5000 values
[1] = OSNumber::withNumber(len
, sizeof(len
) * 8);
5001 if (values
[1] == 0) {
5005 OSDictionary
*dict
= OSDictionary::withObjects((const OSObject
**)values
, (const OSSymbol
**)keys
, 2);
5010 array
->setObject(dict
);
5012 values
[0]->release();
5013 values
[1]->release();
5014 values
[0] = values
[1] = 0;
5017 result
= array
->serialize(s
);
5024 values
[0]->release();
5027 values
[1]->release();
5036 IOFree(vcopy
, vcopy_size
);
5042 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5044 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 0);
5046 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 1);
5047 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 2);
5048 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 3);
5049 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 4);
5050 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 5);
5051 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 6);
5052 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 7);
5053 #else /* !__LP64__ */
5054 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 1);
5055 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 2);
5056 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 3);
5057 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 4);
5058 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 5);
5059 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 6);
5060 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 7);
5061 #endif /* !__LP64__ */
5062 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 8);
5063 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 9);
5064 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 10);
5065 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 11);
5066 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 12);
5067 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 13);
5068 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 14);
5069 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 15);
5071 /* ex-inline function implementation */
5073 IOMemoryDescriptor::getPhysicalAddress()
5075 return getPhysicalSegment( 0, 0 );