2 * Copyright (c) 1998-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <sys/cdefs.h>
32 #include <IOKit/assert.h>
33 #include <IOKit/system.h>
34 #include <IOKit/IOLib.h>
35 #include <IOKit/IOMemoryDescriptor.h>
36 #include <IOKit/IOMapper.h>
37 #include <IOKit/IODMACommand.h>
38 #include <IOKit/IOKitKeysPrivate.h>
40 #include <IOKit/IOSubMemoryDescriptor.h>
41 #include <IOKit/IOMultiMemoryDescriptor.h>
43 #include <IOKit/IOKitDebug.h>
44 #include <libkern/OSDebug.h>
46 #include "IOKitKernelInternal.h"
48 #include <libkern/c++/OSContainers.h>
49 #include <libkern/c++/OSDictionary.h>
50 #include <libkern/c++/OSArray.h>
51 #include <libkern/c++/OSSymbol.h>
52 #include <libkern/c++/OSNumber.h>
53 #include <os/overflow.h>
59 #include <vm/vm_pageout.h>
60 #include <mach/memory_object_types.h>
61 #include <device/device_port.h>
63 #include <mach/vm_prot.h>
64 #include <mach/mach_vm.h>
65 #include <vm/vm_fault.h>
66 #include <vm/vm_protos.h>
68 extern ppnum_t
pmap_find_phys(pmap_t pmap
, addr64_t va
);
69 extern void ipc_port_release_send(ipc_port_t port
);
71 // osfmk/device/iokit_rpc.c
72 unsigned int IODefaultCacheBits(addr64_t pa
);
73 unsigned int IOTranslateCacheBits(struct phys_entry
*pp
);
77 #define kIOMapperWaitSystem ((IOMapper *) 1)
79 static IOMapper
* gIOSystemMapper
= NULL
;
83 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
85 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor
, OSObject
)
87 #define super IOMemoryDescriptor
89 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor
, IOMemoryDescriptor
)
91 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
93 static IORecursiveLock
* gIOMemoryLock
;
95 #define LOCK IORecursiveLockLock( gIOMemoryLock)
96 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
97 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
99 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
102 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
104 #define DEBG(fmt, args...) {}
107 #define IOMD_DEBUG_DMAACTIVE 1
109 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
111 // Some data structures and accessor macros used by the initWithOptions
114 enum ioPLBlockFlags
{
115 kIOPLOnDevice
= 0x00000001,
116 kIOPLExternUPL
= 0x00000002,
119 struct IOMDPersistentInitData
121 const IOGeneralMemoryDescriptor
* fMD
;
122 IOMemoryReference
* fMemRef
;
127 vm_address_t fPageInfo
; // Pointer to page list or index into it
128 uint32_t fIOMDOffset
; // The offset of this iopl in descriptor
129 ppnum_t fMappedPage
; // Page number of first page in this iopl
130 unsigned int fPageOffset
; // Offset within first page of iopl
131 unsigned int fFlags
; // Flags
134 enum { kMaxWireTags
= 6 };
139 uint64_t fDMAMapAlignment
;
140 uint64_t fMappedBase
;
141 uint64_t fMappedLength
;
142 uint64_t fPreparationID
;
144 IOTracking fWireTracking
;
145 struct vm_tag_set fWireTags
;
146 struct vm_tag_set_entry fWireTagsEntries
[kMaxWireTags
];
147 #endif /* IOTRACKING */
148 unsigned int fPageCnt
;
149 uint8_t fDMAMapNumAddressBits
;
151 unsigned char fDiscontig
:1;
152 unsigned char fCompletionError
:1;
153 unsigned char _resv
:6;
155 /* variable length arrays */
156 upl_page_info_t fPageList
[1]
158 // align fPageList as for ioPLBlock
159 __attribute__((aligned(sizeof(upl_t
))))
162 ioPLBlock fBlocks
[1];
165 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
166 #define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
167 #define getNumIOPL(osd, d) \
168 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
169 #define getPageList(d) (&(d->fPageList[0]))
170 #define computeDataSize(p, u) \
171 (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
173 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
175 #define next_page(a) ( trunc_page(a) + PAGE_SIZE )
179 kern_return_t
device_data_action(
180 uintptr_t device_handle
,
181 ipc_port_t device_pager
,
182 vm_prot_t protection
,
183 vm_object_offset_t offset
,
187 IOMemoryDescriptorReserved
* ref
= (IOMemoryDescriptorReserved
*) device_handle
;
188 IOMemoryDescriptor
* memDesc
;
191 memDesc
= ref
->dp
.memory
;
195 kr
= memDesc
->handleFault(device_pager
, offset
, size
);
205 kern_return_t
device_close(
206 uintptr_t device_handle
)
208 IOMemoryDescriptorReserved
* ref
= (IOMemoryDescriptorReserved
*) device_handle
;
210 IODelete( ref
, IOMemoryDescriptorReserved
, 1 );
212 return( kIOReturnSuccess
);
216 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
218 // Note this inline function uses C++ reference arguments to return values
219 // This means that pointers are not passed and NULLs don't have to be
220 // checked for as a NULL reference is illegal.
222 getAddrLenForInd(mach_vm_address_t
&addr
, mach_vm_size_t
&len
, // Output variables
223 UInt32 type
, IOGeneralMemoryDescriptor::Ranges r
, UInt32 ind
)
225 assert(kIOMemoryTypeUIO
== type
226 || kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
227 || kIOMemoryTypePhysical
== type
|| kIOMemoryTypePhysical64
== type
);
228 if (kIOMemoryTypeUIO
== type
) {
231 uio_getiov((uio_t
) r
.uio
, ind
, &ad
, &us
); addr
= ad
; len
= us
;
234 else if ((kIOMemoryTypeVirtual64
== type
) || (kIOMemoryTypePhysical64
== type
)) {
235 IOAddressRange cur
= r
.v64
[ind
];
239 #endif /* !__LP64__ */
241 IOVirtualRange cur
= r
.v
[ind
];
247 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
250 purgeableControlBits(IOOptionBits newState
, vm_purgable_t
* control
, int * state
)
252 IOReturn err
= kIOReturnSuccess
;
254 *control
= VM_PURGABLE_SET_STATE
;
256 enum { kIOMemoryPurgeableControlMask
= 15 };
258 switch (kIOMemoryPurgeableControlMask
& newState
)
260 case kIOMemoryPurgeableKeepCurrent
:
261 *control
= VM_PURGABLE_GET_STATE
;
264 case kIOMemoryPurgeableNonVolatile
:
265 *state
= VM_PURGABLE_NONVOLATILE
;
267 case kIOMemoryPurgeableVolatile
:
268 *state
= VM_PURGABLE_VOLATILE
| (newState
& ~kIOMemoryPurgeableControlMask
);
270 case kIOMemoryPurgeableEmpty
:
271 *state
= VM_PURGABLE_EMPTY
| (newState
& ~kIOMemoryPurgeableControlMask
);
274 err
= kIOReturnBadArgument
;
281 purgeableStateBits(int * state
)
283 IOReturn err
= kIOReturnSuccess
;
285 switch (VM_PURGABLE_STATE_MASK
& *state
)
287 case VM_PURGABLE_NONVOLATILE
:
288 *state
= kIOMemoryPurgeableNonVolatile
;
290 case VM_PURGABLE_VOLATILE
:
291 *state
= kIOMemoryPurgeableVolatile
;
293 case VM_PURGABLE_EMPTY
:
294 *state
= kIOMemoryPurgeableEmpty
;
297 *state
= kIOMemoryPurgeableNonVolatile
;
298 err
= kIOReturnNotReady
;
306 vmProtForCacheMode(IOOptionBits cacheMode
)
311 case kIOInhibitCache
:
312 SET_MAP_MEM(MAP_MEM_IO
, prot
);
315 case kIOWriteThruCache
:
316 SET_MAP_MEM(MAP_MEM_WTHRU
, prot
);
319 case kIOWriteCombineCache
:
320 SET_MAP_MEM(MAP_MEM_WCOMB
, prot
);
323 case kIOCopybackCache
:
324 SET_MAP_MEM(MAP_MEM_COPYBACK
, prot
);
327 case kIOCopybackInnerCache
:
328 SET_MAP_MEM(MAP_MEM_INNERWBACK
, prot
);
331 case kIODefaultCache
:
333 SET_MAP_MEM(MAP_MEM_NOOP
, prot
);
341 pagerFlagsForCacheMode(IOOptionBits cacheMode
)
343 unsigned int pagerFlags
= 0;
346 case kIOInhibitCache
:
347 pagerFlags
= DEVICE_PAGER_CACHE_INHIB
| DEVICE_PAGER_COHERENT
| DEVICE_PAGER_GUARDED
;
350 case kIOWriteThruCache
:
351 pagerFlags
= DEVICE_PAGER_WRITE_THROUGH
| DEVICE_PAGER_COHERENT
| DEVICE_PAGER_GUARDED
;
354 case kIOWriteCombineCache
:
355 pagerFlags
= DEVICE_PAGER_CACHE_INHIB
| DEVICE_PAGER_COHERENT
;
358 case kIOCopybackCache
:
359 pagerFlags
= DEVICE_PAGER_COHERENT
;
362 case kIOCopybackInnerCache
:
363 pagerFlags
= DEVICE_PAGER_COHERENT
;
366 case kIODefaultCache
:
374 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
375 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
384 struct IOMemoryReference
386 volatile SInt32 refCount
;
390 struct IOMemoryReference
* mapRef
;
391 IOMemoryEntry entries
[0];
396 kIOMemoryReferenceReuse
= 0x00000001,
397 kIOMemoryReferenceWrite
= 0x00000002,
398 kIOMemoryReferenceCOW
= 0x00000004,
401 SInt32 gIOMemoryReferenceCount
;
404 IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity
, IOMemoryReference
* realloc
)
406 IOMemoryReference
* ref
;
407 size_t newSize
, oldSize
, copySize
;
409 newSize
= (sizeof(IOMemoryReference
)
410 - sizeof(ref
->entries
)
411 + capacity
* sizeof(ref
->entries
[0]));
412 ref
= (typeof(ref
)) IOMalloc(newSize
);
415 oldSize
= (sizeof(IOMemoryReference
)
416 - sizeof(realloc
->entries
)
417 + realloc
->capacity
* sizeof(realloc
->entries
[0]));
419 if (copySize
> newSize
) copySize
= newSize
;
420 if (ref
) bcopy(realloc
, ref
, copySize
);
421 IOFree(realloc
, oldSize
);
425 bzero(ref
, sizeof(*ref
));
427 OSIncrementAtomic(&gIOMemoryReferenceCount
);
429 if (!ref
) return (0);
430 ref
->capacity
= capacity
;
435 IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference
* ref
)
437 IOMemoryEntry
* entries
;
442 memoryReferenceFree(ref
->mapRef
);
446 entries
= ref
->entries
+ ref
->count
;
447 while (entries
> &ref
->entries
[0])
450 ipc_port_release_send(entries
->entry
);
452 size
= (sizeof(IOMemoryReference
)
453 - sizeof(ref
->entries
)
454 + ref
->capacity
* sizeof(ref
->entries
[0]));
457 OSDecrementAtomic(&gIOMemoryReferenceCount
);
461 IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference
* ref
)
463 if (1 == OSDecrementAtomic(&ref
->refCount
)) memoryReferenceFree(ref
);
468 IOGeneralMemoryDescriptor::memoryReferenceCreate(
469 IOOptionBits options
,
470 IOMemoryReference
** reference
)
472 enum { kCapacity
= 4, kCapacityInc
= 4 };
475 IOMemoryReference
* ref
;
476 IOMemoryEntry
* entries
;
477 IOMemoryEntry
* cloneEntries
;
479 ipc_port_t entry
, cloneEntry
;
481 memory_object_size_t actualSize
;
484 mach_vm_address_t entryAddr
, endAddr
, entrySize
;
485 mach_vm_size_t srcAddr
, srcLen
;
486 mach_vm_size_t nextAddr
, nextLen
;
487 mach_vm_size_t offset
, remain
;
489 IOOptionBits type
= (_flags
& kIOMemoryTypeMask
);
490 IOOptionBits cacheMode
;
491 unsigned int pagerFlags
;
494 ref
= memoryReferenceAlloc(kCapacity
, NULL
);
495 if (!ref
) return (kIOReturnNoMemory
);
497 tag
= getVMTag(kernel_map
);
498 entries
= &ref
->entries
[0];
506 getAddrLenForInd(nextAddr
, nextLen
, type
, _ranges
, rangeIdx
);
510 nextAddr
= getPhysicalSegment(offset
, &physLen
, kIOMemoryMapperNone
);
513 // default cache mode for physical
514 if (kIODefaultCache
== ((_flags
& kIOMemoryBufferCacheMask
) >> kIOMemoryBufferCacheShift
))
517 pagerFlags
= IODefaultCacheBits(nextAddr
);
518 if (DEVICE_PAGER_CACHE_INHIB
& pagerFlags
)
520 if (DEVICE_PAGER_GUARDED
& pagerFlags
)
521 mode
= kIOInhibitCache
;
523 mode
= kIOWriteCombineCache
;
525 else if (DEVICE_PAGER_WRITE_THROUGH
& pagerFlags
)
526 mode
= kIOWriteThruCache
;
528 mode
= kIOCopybackCache
;
529 _flags
|= (mode
<< kIOMemoryBufferCacheShift
);
533 // cache mode & vm_prot
535 cacheMode
= ((_flags
& kIOMemoryBufferCacheMask
) >> kIOMemoryBufferCacheShift
);
536 prot
|= vmProtForCacheMode(cacheMode
);
537 // VM system requires write access to change cache mode
538 if (kIODefaultCache
!= cacheMode
) prot
|= VM_PROT_WRITE
;
539 if (kIODirectionOut
!= (kIODirectionOutIn
& _flags
)) prot
|= VM_PROT_WRITE
;
540 if (kIOMemoryReferenceWrite
& options
) prot
|= VM_PROT_WRITE
;
541 if (kIOMemoryReferenceCOW
& options
) prot
|= MAP_MEM_VM_COPY
;
543 if ((kIOMemoryReferenceReuse
& options
) && _memRef
)
545 cloneEntries
= &_memRef
->entries
[0];
546 prot
|= MAP_MEM_NAMED_REUSE
;
553 if (kIOMemoryBufferPageable
& _flags
)
555 // IOBufferMemoryDescriptor alloc - set flags for entry + object create
556 prot
|= MAP_MEM_NAMED_CREATE
;
557 if (kIOMemoryBufferPurgeable
& _flags
) prot
|= MAP_MEM_PURGABLE
;
558 if (kIOMemoryUseReserve
& _flags
) prot
|= MAP_MEM_GRAB_SECLUDED
;
560 prot
|= VM_PROT_WRITE
;
563 else map
= get_task_map(_task
);
572 // coalesce addr range
573 for (++rangeIdx
; rangeIdx
< _rangesCount
; rangeIdx
++)
575 getAddrLenForInd(nextAddr
, nextLen
, type
, _ranges
, rangeIdx
);
576 if ((srcAddr
+ srcLen
) != nextAddr
) break;
579 entryAddr
= trunc_page_64(srcAddr
);
580 endAddr
= round_page_64(srcAddr
+ srcLen
);
583 entrySize
= (endAddr
- entryAddr
);
584 if (!entrySize
) break;
585 actualSize
= entrySize
;
587 cloneEntry
= MACH_PORT_NULL
;
588 if (MAP_MEM_NAMED_REUSE
& prot
)
590 if (cloneEntries
< &_memRef
->entries
[_memRef
->count
]) cloneEntry
= cloneEntries
->entry
;
591 else prot
&= ~MAP_MEM_NAMED_REUSE
;
594 err
= mach_make_memory_entry_64(map
,
595 &actualSize
, entryAddr
, prot
, &entry
, cloneEntry
);
597 if (KERN_SUCCESS
!= err
) break;
598 if (actualSize
> entrySize
) panic("mach_make_memory_entry_64 actualSize");
600 if (count
>= ref
->capacity
)
602 ref
= memoryReferenceAlloc(ref
->capacity
+ kCapacityInc
, ref
);
603 entries
= &ref
->entries
[count
];
605 entries
->entry
= entry
;
606 entries
->size
= actualSize
;
607 entries
->offset
= offset
+ (entryAddr
- srcAddr
);
608 entryAddr
+= actualSize
;
609 if (MAP_MEM_NAMED_REUSE
& prot
)
611 if ((cloneEntries
->entry
== entries
->entry
)
612 && (cloneEntries
->size
== entries
->size
)
613 && (cloneEntries
->offset
== entries
->offset
)) cloneEntries
++;
614 else prot
&= ~MAP_MEM_NAMED_REUSE
;
626 // _task == 0, physical or kIOMemoryTypeUPL
627 memory_object_t pager
;
628 vm_size_t size
= ptoa_32(_pages
);
630 if (!getKernelReserved()) panic("getKernelReserved");
632 reserved
->dp
.pagerContig
= (1 == _rangesCount
);
633 reserved
->dp
.memory
= this;
635 pagerFlags
= pagerFlagsForCacheMode(cacheMode
);
636 if (-1U == pagerFlags
) panic("phys is kIODefaultCache");
637 if (reserved
->dp
.pagerContig
) pagerFlags
|= DEVICE_PAGER_CONTIGUOUS
;
639 pager
= device_pager_setup((memory_object_t
) 0, (uintptr_t) reserved
,
642 if (!pager
) err
= kIOReturnVMError
;
646 entryAddr
= trunc_page_64(srcAddr
);
647 err
= mach_memory_object_memory_entry_64((host_t
) 1, false /*internal*/,
648 size
, VM_PROT_READ
| VM_PROT_WRITE
, pager
, &entry
);
649 assert (KERN_SUCCESS
== err
);
650 if (KERN_SUCCESS
!= err
) device_pager_deallocate(pager
);
653 reserved
->dp
.devicePager
= pager
;
654 entries
->entry
= entry
;
655 entries
->size
= size
;
656 entries
->offset
= offset
+ (entryAddr
- srcAddr
);
666 if (_task
&& (KERN_SUCCESS
== err
)
667 && (kIOMemoryMapCopyOnWrite
& _flags
)
668 && !(kIOMemoryReferenceCOW
& options
))
670 err
= memoryReferenceCreate(options
| kIOMemoryReferenceCOW
, &ref
->mapRef
);
673 if (KERN_SUCCESS
== err
)
675 if (MAP_MEM_NAMED_REUSE
& prot
)
677 memoryReferenceFree(ref
);
678 OSIncrementAtomic(&_memRef
->refCount
);
684 memoryReferenceFree(ref
);
694 IOMemoryDescriptorMapAlloc(vm_map_t map
, void * _ref
)
696 IOMemoryDescriptorMapAllocRef
* ref
= (typeof(ref
))_ref
;
698 vm_map_offset_t addr
;
702 err
= vm_map_enter_mem_object(map
, &addr
, ref
->size
,
704 (((ref
->options
& kIOMapAnywhere
)
707 | VM_MAKE_TAG(ref
->tag
)),
709 (memory_object_offset_t
) 0,
714 if (KERN_SUCCESS
== err
)
716 ref
->mapped
= (mach_vm_address_t
) addr
;
724 IOGeneralMemoryDescriptor::memoryReferenceMap(
725 IOMemoryReference
* ref
,
727 mach_vm_size_t inoffset
,
729 IOOptionBits options
,
730 mach_vm_address_t
* inaddr
)
733 int64_t offset
= inoffset
;
734 uint32_t rangeIdx
, entryIdx
;
735 vm_map_offset_t addr
, mapAddr
;
736 vm_map_offset_t pageOffset
, entryOffset
, remain
, chunk
;
738 mach_vm_address_t nextAddr
;
739 mach_vm_size_t nextLen
;
741 IOMemoryEntry
* entry
;
742 vm_prot_t prot
, memEntryCacheMode
;
744 IOOptionBits cacheMode
;
746 // for the kIOMapPrefault option.
747 upl_page_info_t
* pageList
= NULL
;
748 UInt currentPageIndex
= 0;
753 err
= memoryReferenceMap(ref
->mapRef
, map
, inoffset
, size
, options
, inaddr
);
757 type
= _flags
& kIOMemoryTypeMask
;
760 if (!(kIOMapReadOnly
& options
)) prot
|= VM_PROT_WRITE
;
763 cacheMode
= ((options
& kIOMapCacheMask
) >> kIOMapCacheShift
);
764 if (kIODefaultCache
!= cacheMode
)
766 // VM system requires write access to update named entry cache mode
767 memEntryCacheMode
= (MAP_MEM_ONLY
| VM_PROT_WRITE
| prot
| vmProtForCacheMode(cacheMode
));
774 // Find first range for offset
775 if (!_rangesCount
) return (kIOReturnBadArgument
);
776 for (remain
= offset
, rangeIdx
= 0; rangeIdx
< _rangesCount
; rangeIdx
++)
778 getAddrLenForInd(nextAddr
, nextLen
, type
, _ranges
, rangeIdx
);
779 if (remain
< nextLen
) break;
787 nextAddr
= getPhysicalSegment(offset
, &physLen
, kIOMemoryMapperNone
);
791 assert(remain
< nextLen
);
792 if (remain
>= nextLen
) return (kIOReturnBadArgument
);
796 pageOffset
= (page_mask
& nextAddr
);
800 if (!(options
& kIOMapAnywhere
))
803 if (pageOffset
!= (page_mask
& addr
)) return (kIOReturnNotAligned
);
807 // find first entry for offset
809 (entryIdx
< ref
->count
) && (offset
>= ref
->entries
[entryIdx
].offset
);
812 entry
= &ref
->entries
[entryIdx
];
815 size
= round_page_64(size
+ pageOffset
);
816 if (kIOMapOverwrite
& options
)
818 if ((map
== kernel_map
) && (kIOMemoryBufferPageable
& _flags
))
820 map
= IOPageableMapForAddress(addr
);
826 IOMemoryDescriptorMapAllocRef ref
;
829 ref
.options
= options
;
832 if (options
& kIOMapAnywhere
)
833 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
837 if ((ref
.map
== kernel_map
) && (kIOMemoryBufferPageable
& _flags
))
838 err
= IOIteratePageableMaps( ref
.size
, &IOMemoryDescriptorMapAlloc
, &ref
);
840 err
= IOMemoryDescriptorMapAlloc(ref
.map
, &ref
);
841 if (KERN_SUCCESS
== err
)
850 * Prefaulting is only possible if we wired the memory earlier. Check the
851 * memory type, and the underlying data.
853 if (options
& kIOMapPrefault
)
856 * The memory must have been wired by calling ::prepare(), otherwise
857 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
859 assert(map
!= kernel_map
);
860 assert(_wireCount
!= 0);
861 assert(_memoryEntries
!= NULL
);
862 if ((map
== kernel_map
) ||
864 (_memoryEntries
== NULL
))
866 return kIOReturnBadArgument
;
869 // Get the page list.
870 ioGMDData
* dataP
= getDataP(_memoryEntries
);
871 ioPLBlock
const* ioplList
= getIOPLList(dataP
);
872 pageList
= getPageList(dataP
);
874 // Get the number of IOPLs.
875 UInt numIOPLs
= getNumIOPL(_memoryEntries
, dataP
);
878 * Scan through the IOPL Info Blocks, looking for the first block containing
879 * the offset. The research will go past it, so we'll need to go back to the
880 * right range at the end.
883 while (ioplIndex
< numIOPLs
&& offset
>= ioplList
[ioplIndex
].fIOMDOffset
)
887 // Retrieve the IOPL info block.
888 ioPLBlock ioplInfo
= ioplList
[ioplIndex
];
891 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
894 if (ioplInfo
.fFlags
& kIOPLExternUPL
)
895 pageList
= (upl_page_info_t
*) ioplInfo
.fPageInfo
;
897 pageList
= &pageList
[ioplInfo
.fPageInfo
];
899 // Rebase [offset] into the IOPL in order to looks for the first page index.
900 mach_vm_size_t offsetInIOPL
= offset
- ioplInfo
.fIOMDOffset
+ ioplInfo
.fPageOffset
;
902 // Retrieve the index of the first page corresponding to the offset.
903 currentPageIndex
= atop_32(offsetInIOPL
);
911 while (remain
&& (KERN_SUCCESS
== err
))
913 entryOffset
= offset
- entry
->offset
;
914 if ((page_mask
& entryOffset
) != pageOffset
)
916 err
= kIOReturnNotAligned
;
920 if (kIODefaultCache
!= cacheMode
)
922 vm_size_t unused
= 0;
923 err
= mach_make_memory_entry(NULL
/*unused*/, &unused
, 0 /*unused*/,
924 memEntryCacheMode
, NULL
, entry
->entry
);
925 assert (KERN_SUCCESS
== err
);
928 entryOffset
-= pageOffset
;
929 if (entryOffset
>= entry
->size
) panic("entryOffset");
930 chunk
= entry
->size
- entryOffset
;
933 if (chunk
> remain
) chunk
= remain
;
934 if (options
& kIOMapPrefault
)
936 UInt nb_pages
= round_page(chunk
) / PAGE_SIZE
;
937 err
= vm_map_enter_mem_object_prefault(map
,
943 | VM_FLAGS_IOKIT_ACCT
), /* iokit accounting */
948 &pageList
[currentPageIndex
],
951 // Compute the next index in the page list.
952 currentPageIndex
+= nb_pages
;
953 assert(currentPageIndex
<= _pages
);
957 err
= vm_map_enter_mem_object(map
,
963 | VM_FLAGS_IOKIT_ACCT
), /* iokit accounting */
971 if (KERN_SUCCESS
!= err
) break;
975 offset
+= chunk
- pageOffset
;
980 if (entryIdx
>= ref
->count
)
982 err
= kIOReturnOverrun
;
987 if ((KERN_SUCCESS
!= err
) && didAlloc
)
989 (void) mach_vm_deallocate(map
, trunc_page_64(addr
), size
);
998 IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
999 IOMemoryReference
* ref
,
1000 IOByteCount
* residentPageCount
,
1001 IOByteCount
* dirtyPageCount
)
1004 IOMemoryEntry
* entries
;
1005 unsigned int resident
, dirty
;
1006 unsigned int totalResident
, totalDirty
;
1008 totalResident
= totalDirty
= 0;
1009 err
= kIOReturnSuccess
;
1010 entries
= ref
->entries
+ ref
->count
;
1011 while (entries
> &ref
->entries
[0])
1014 err
= mach_memory_entry_get_page_counts(entries
->entry
, &resident
, &dirty
);
1015 if (KERN_SUCCESS
!= err
) break;
1016 totalResident
+= resident
;
1017 totalDirty
+= dirty
;
1020 if (residentPageCount
) *residentPageCount
= totalResident
;
1021 if (dirtyPageCount
) *dirtyPageCount
= totalDirty
;
1026 IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
1027 IOMemoryReference
* ref
,
1028 IOOptionBits newState
,
1029 IOOptionBits
* oldState
)
1032 IOMemoryEntry
* entries
;
1033 vm_purgable_t control
;
1034 int totalState
, state
;
1036 totalState
= kIOMemoryPurgeableNonVolatile
;
1037 err
= kIOReturnSuccess
;
1038 entries
= ref
->entries
+ ref
->count
;
1039 while (entries
> &ref
->entries
[0])
1043 err
= purgeableControlBits(newState
, &control
, &state
);
1044 if (KERN_SUCCESS
!= err
) break;
1045 err
= mach_memory_entry_purgable_control(entries
->entry
, control
, &state
);
1046 if (KERN_SUCCESS
!= err
) break;
1047 err
= purgeableStateBits(&state
);
1048 if (KERN_SUCCESS
!= err
) break;
1050 if (kIOMemoryPurgeableEmpty
== state
) totalState
= kIOMemoryPurgeableEmpty
;
1051 else if (kIOMemoryPurgeableEmpty
== totalState
) continue;
1052 else if (kIOMemoryPurgeableVolatile
== totalState
) continue;
1053 else if (kIOMemoryPurgeableVolatile
== state
) totalState
= kIOMemoryPurgeableVolatile
;
1054 else totalState
= kIOMemoryPurgeableNonVolatile
;
1057 if (oldState
) *oldState
= totalState
;
1061 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1063 IOMemoryDescriptor
*
1064 IOMemoryDescriptor::withAddress(void * address
,
1066 IODirection direction
)
1068 return IOMemoryDescriptor::
1069 withAddressRange((IOVirtualAddress
) address
, length
, direction
| kIOMemoryAutoPrepare
, kernel_task
);
1073 IOMemoryDescriptor
*
1074 IOMemoryDescriptor::withAddress(IOVirtualAddress address
,
1076 IODirection direction
,
1079 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
1082 if (that
->initWithAddress(address
, length
, direction
, task
))
1089 #endif /* !__LP64__ */
1091 IOMemoryDescriptor
*
1092 IOMemoryDescriptor::withPhysicalAddress(
1093 IOPhysicalAddress address
,
1095 IODirection direction
)
1097 return (IOMemoryDescriptor::withAddressRange(address
, length
, direction
, TASK_NULL
));
1101 IOMemoryDescriptor
*
1102 IOMemoryDescriptor::withRanges( IOVirtualRange
* ranges
,
1104 IODirection direction
,
1108 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
1111 if (that
->initWithRanges(ranges
, withCount
, direction
, task
, asReference
))
1118 #endif /* !__LP64__ */
1120 IOMemoryDescriptor
*
1121 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address
,
1122 mach_vm_size_t length
,
1123 IOOptionBits options
,
1126 IOAddressRange range
= { address
, length
};
1127 return (IOMemoryDescriptor::withAddressRanges(&range
, 1, options
, task
));
1130 IOMemoryDescriptor
*
1131 IOMemoryDescriptor::withAddressRanges(IOAddressRange
* ranges
,
1133 IOOptionBits options
,
1136 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
1140 options
|= kIOMemoryTypeVirtual64
;
1142 options
|= kIOMemoryTypePhysical64
;
1144 if (that
->initWithOptions(ranges
, rangeCount
, 0, task
, options
, /* mapper */ 0))
1157 * Create a new IOMemoryDescriptor. The buffer is made up of several
1158 * virtual address ranges, from a given task.
1160 * Passing the ranges as a reference will avoid an extra allocation.
1162 IOMemoryDescriptor
*
1163 IOMemoryDescriptor::withOptions(void * buffers
,
1170 IOGeneralMemoryDescriptor
*self
= new IOGeneralMemoryDescriptor
;
1173 && !self
->initWithOptions(buffers
, count
, offset
, task
, opts
, mapper
))
1182 bool IOMemoryDescriptor::initWithOptions(void * buffers
,
1186 IOOptionBits options
,
1193 IOMemoryDescriptor
*
1194 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange
* ranges
,
1196 IODirection direction
,
1199 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
1202 if (that
->initWithPhysicalRanges(ranges
, withCount
, direction
, asReference
))
1210 IOMemoryDescriptor
*
1211 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor
* of
,
1214 IODirection direction
)
1216 return (IOSubMemoryDescriptor::withSubRange(of
, offset
, length
, direction
));
1218 #endif /* !__LP64__ */
1220 IOMemoryDescriptor
*
1221 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor
*originalMD
)
1223 IOGeneralMemoryDescriptor
*origGenMD
=
1224 OSDynamicCast(IOGeneralMemoryDescriptor
, originalMD
);
1227 return IOGeneralMemoryDescriptor::
1228 withPersistentMemoryDescriptor(origGenMD
);
1233 IOMemoryDescriptor
*
1234 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor
*originalMD
)
1236 IOMemoryReference
* memRef
;
1238 if (kIOReturnSuccess
!= originalMD
->memoryReferenceCreate(kIOMemoryReferenceReuse
, &memRef
)) return (0);
1240 if (memRef
== originalMD
->_memRef
)
1242 originalMD
->retain(); // Add a new reference to ourselves
1243 originalMD
->memoryReferenceRelease(memRef
);
1247 IOGeneralMemoryDescriptor
* self
= new IOGeneralMemoryDescriptor
;
1248 IOMDPersistentInitData initData
= { originalMD
, memRef
};
1251 && !self
->initWithOptions(&initData
, 1, 0, 0, kIOMemoryTypePersistentMD
, 0)) {
1260 IOGeneralMemoryDescriptor::initWithAddress(void * address
,
1261 IOByteCount withLength
,
1262 IODirection withDirection
)
1264 _singleRange
.v
.address
= (vm_offset_t
) address
;
1265 _singleRange
.v
.length
= withLength
;
1267 return initWithRanges(&_singleRange
.v
, 1, withDirection
, kernel_task
, true);
1271 IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address
,
1272 IOByteCount withLength
,
1273 IODirection withDirection
,
1276 _singleRange
.v
.address
= address
;
1277 _singleRange
.v
.length
= withLength
;
1279 return initWithRanges(&_singleRange
.v
, 1, withDirection
, withTask
, true);
1283 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
1284 IOPhysicalAddress address
,
1285 IOByteCount withLength
,
1286 IODirection withDirection
)
1288 _singleRange
.p
.address
= address
;
1289 _singleRange
.p
.length
= withLength
;
1291 return initWithPhysicalRanges( &_singleRange
.p
, 1, withDirection
, true);
1295 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
1296 IOPhysicalRange
* ranges
,
1298 IODirection direction
,
1301 IOOptionBits mdOpts
= direction
| kIOMemoryTypePhysical
;
1304 mdOpts
|= kIOMemoryAsReference
;
1306 return initWithOptions(ranges
, count
, 0, 0, mdOpts
, /* mapper */ 0);
1310 IOGeneralMemoryDescriptor::initWithRanges(
1311 IOVirtualRange
* ranges
,
1313 IODirection direction
,
1317 IOOptionBits mdOpts
= direction
;
1320 mdOpts
|= kIOMemoryAsReference
;
1323 mdOpts
|= kIOMemoryTypeVirtual
;
1325 // Auto-prepare if this is a kernel memory descriptor as very few
1326 // clients bother to prepare() kernel memory.
1327 // But it was not enforced so what are you going to do?
1328 if (task
== kernel_task
)
1329 mdOpts
|= kIOMemoryAutoPrepare
;
1332 mdOpts
|= kIOMemoryTypePhysical
;
1334 return initWithOptions(ranges
, count
, 0, task
, mdOpts
, /* mapper */ 0);
1336 #endif /* !__LP64__ */
1341 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
1342 * from a given task, several physical ranges, an UPL from the ubc
1343 * system or a uio (may be 64bit) from the BSD subsystem.
1345 * Passing the ranges as a reference will avoid an extra allocation.
1347 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
1348 * existing instance -- note this behavior is not commonly supported in other
1349 * I/O Kit classes, although it is supported here.
1353 IOGeneralMemoryDescriptor::initWithOptions(void * buffers
,
1357 IOOptionBits options
,
1360 IOOptionBits type
= options
& kIOMemoryTypeMask
;
1364 && (kIOMemoryTypeVirtual
== type
)
1365 && vm_map_is_64bit(get_task_map(task
))
1366 && ((IOVirtualRange
*) buffers
)->address
)
1368 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
1371 #endif /* !__LP64__ */
1373 // Grab the original MD's configuation data to initialse the
1374 // arguments to this function.
1375 if (kIOMemoryTypePersistentMD
== type
) {
1377 IOMDPersistentInitData
*initData
= (typeof(initData
)) buffers
;
1378 const IOGeneralMemoryDescriptor
*orig
= initData
->fMD
;
1379 ioGMDData
*dataP
= getDataP(orig
->_memoryEntries
);
1381 // Only accept persistent memory descriptors with valid dataP data.
1382 assert(orig
->_rangesCount
== 1);
1383 if ( !(orig
->_flags
& kIOMemoryPersistent
) || !dataP
)
1386 _memRef
= initData
->fMemRef
; // Grab the new named entry
1387 options
= orig
->_flags
& ~kIOMemoryAsReference
;
1388 type
= options
& kIOMemoryTypeMask
;
1389 buffers
= orig
->_ranges
.v
;
1390 count
= orig
->_rangesCount
;
1392 // Now grab the original task and whatever mapper was previously used
1394 mapper
= dataP
->fMapper
;
1396 // We are ready to go through the original initialisation now
1400 case kIOMemoryTypeUIO
:
1401 case kIOMemoryTypeVirtual
:
1403 case kIOMemoryTypeVirtual64
:
1404 #endif /* !__LP64__ */
1410 case kIOMemoryTypePhysical
: // Neither Physical nor UPL should have a task
1412 case kIOMemoryTypePhysical64
:
1413 #endif /* !__LP64__ */
1414 case kIOMemoryTypeUPL
:
1418 return false; /* bad argument */
1425 * We can check the _initialized instance variable before having ever set
1426 * it to an initial value because I/O Kit guarantees that all our instance
1427 * variables are zeroed on an object's allocation.
1432 * An existing memory descriptor is being retargeted to point to
1433 * somewhere else. Clean up our present state.
1435 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
1436 if ((kIOMemoryTypePhysical
!= type
) && (kIOMemoryTypePhysical64
!= type
))
1441 if (_ranges
.v
&& !(kIOMemoryAsReference
& _flags
))
1443 if (kIOMemoryTypeUIO
== type
)
1444 uio_free((uio_t
) _ranges
.v
);
1446 else if ((kIOMemoryTypeVirtual64
== type
) || (kIOMemoryTypePhysical64
== type
))
1447 IODelete(_ranges
.v64
, IOAddressRange
, _rangesCount
);
1448 #endif /* !__LP64__ */
1450 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
1453 options
|= (kIOMemoryRedirected
& _flags
);
1454 if (!(kIOMemoryRedirected
& options
))
1458 memoryReferenceRelease(_memRef
);
1462 _mappings
->flushCollection();
1468 _initialized
= true;
1471 // Grab the appropriate mapper
1472 if (kIOMemoryHostOnly
& options
) options
|= kIOMemoryMapperNone
;
1473 if (kIOMemoryMapperNone
& options
)
1474 mapper
= 0; // No Mapper
1475 else if (mapper
== kIOMapperSystem
) {
1476 IOMapper::checkForSystemMapper();
1477 gIOSystemMapper
= mapper
= IOMapper::gSystem
;
1480 // Remove the dynamic internal use flags from the initial setting
1481 options
&= ~(kIOMemoryPreparedReadOnly
);
1486 _direction
= (IODirection
) (_flags
& kIOMemoryDirectionMask
);
1487 #endif /* !__LP64__ */
1489 __iomd_reservedA
= 0;
1490 __iomd_reservedB
= 0;
1493 if (kIOMemoryThreadSafe
& options
)
1496 _prepareLock
= IOLockAlloc();
1498 else if (_prepareLock
)
1500 IOLockFree(_prepareLock
);
1501 _prepareLock
= NULL
;
1504 if (kIOMemoryTypeUPL
== type
) {
1507 unsigned int dataSize
= computeDataSize(/* pages */ 0, /* upls */ 1);
1509 if (!initMemoryEntries(dataSize
, mapper
)) return (false);
1510 dataP
= getDataP(_memoryEntries
);
1511 dataP
->fPageCnt
= 0;
1513 // _wireCount++; // UPLs start out life wired
1516 _pages
+= atop_32(offset
+ count
+ PAGE_MASK
) - atop_32(offset
);
1519 iopl
.fIOPL
= (upl_t
) buffers
;
1520 upl_set_referenced(iopl
.fIOPL
, true);
1521 upl_page_info_t
*pageList
= UPL_GET_INTERNAL_PAGE_LIST(iopl
.fIOPL
);
1523 if (upl_get_size(iopl
.fIOPL
) < (count
+ offset
))
1524 panic("short external upl");
1526 _highestPage
= upl_get_highest_page(iopl
.fIOPL
);
1528 // Set the flag kIOPLOnDevice convieniently equal to 1
1529 iopl
.fFlags
= pageList
->device
| kIOPLExternUPL
;
1530 if (!pageList
->device
) {
1531 // Pre-compute the offset into the UPL's page list
1532 pageList
= &pageList
[atop_32(offset
)];
1533 offset
&= PAGE_MASK
;
1535 iopl
.fIOMDOffset
= 0;
1536 iopl
.fMappedPage
= 0;
1537 iopl
.fPageInfo
= (vm_address_t
) pageList
;
1538 iopl
.fPageOffset
= offset
;
1539 _memoryEntries
->appendBytes(&iopl
, sizeof(iopl
));
1542 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
1543 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
1545 // Initialize the memory descriptor
1546 if (options
& kIOMemoryAsReference
) {
1548 _rangesIsAllocated
= false;
1549 #endif /* !__LP64__ */
1551 // Hack assignment to get the buffer arg into _ranges.
1552 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
1554 // This also initialises the uio & physical ranges.
1555 _ranges
.v
= (IOVirtualRange
*) buffers
;
1559 _rangesIsAllocated
= true;
1560 #endif /* !__LP64__ */
1563 case kIOMemoryTypeUIO
:
1564 _ranges
.v
= (IOVirtualRange
*) uio_duplicate((uio_t
) buffers
);
1568 case kIOMemoryTypeVirtual64
:
1569 case kIOMemoryTypePhysical64
:
1571 && (((IOAddressRange
*) buffers
)->address
+ ((IOAddressRange
*) buffers
)->length
) <= 0x100000000ULL
1573 if (kIOMemoryTypeVirtual64
== type
)
1574 type
= kIOMemoryTypeVirtual
;
1576 type
= kIOMemoryTypePhysical
;
1577 _flags
= (_flags
& ~kIOMemoryTypeMask
) | type
| kIOMemoryAsReference
;
1578 _rangesIsAllocated
= false;
1579 _ranges
.v
= &_singleRange
.v
;
1580 _singleRange
.v
.address
= ((IOAddressRange
*) buffers
)->address
;
1581 _singleRange
.v
.length
= ((IOAddressRange
*) buffers
)->length
;
1584 _ranges
.v64
= IONew(IOAddressRange
, count
);
1587 bcopy(buffers
, _ranges
.v
, count
* sizeof(IOAddressRange
));
1589 #endif /* !__LP64__ */
1590 case kIOMemoryTypeVirtual
:
1591 case kIOMemoryTypePhysical
:
1593 _flags
|= kIOMemoryAsReference
;
1595 _rangesIsAllocated
= false;
1596 #endif /* !__LP64__ */
1597 _ranges
.v
= &_singleRange
.v
;
1599 _ranges
.v
= IONew(IOVirtualRange
, count
);
1603 bcopy(buffers
, _ranges
.v
, count
* sizeof(IOVirtualRange
));
1608 // Find starting address within the vector of ranges
1609 Ranges vec
= _ranges
;
1610 mach_vm_size_t totalLength
= 0;
1611 unsigned int ind
, pages
= 0;
1612 for (ind
= 0; ind
< count
; ind
++) {
1613 mach_vm_address_t addr
;
1614 mach_vm_address_t endAddr
;
1617 // addr & len are returned by this function
1618 getAddrLenForInd(addr
, len
, type
, vec
, ind
);
1619 if (os_add3_overflow(addr
, len
, PAGE_MASK
, &endAddr
)) break;
1620 if (os_add_overflow(pages
, (atop_64(endAddr
) - atop_64(addr
)), &pages
)) break;
1621 if (os_add_overflow(totalLength
, len
, &totalLength
)) break;
1622 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
))
1624 ppnum_t highPage
= atop_64(addr
+ len
- 1);
1625 if (highPage
> _highestPage
)
1626 _highestPage
= highPage
;
1630 || (totalLength
!= ((IOByteCount
) totalLength
))) return (false); /* overflow */
1632 _length
= totalLength
;
1634 _rangesCount
= count
;
1636 // Auto-prepare memory at creation time.
1637 // Implied completion when descriptor is free-ed
1638 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
))
1639 _wireCount
++; // Physical MDs are, by definition, wired
1640 else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
1644 if (_pages
> atop_64(max_mem
)) return false;
1646 dataSize
= computeDataSize(_pages
, /* upls */ count
* 2);
1647 if (!initMemoryEntries(dataSize
, mapper
)) return false;
1648 dataP
= getDataP(_memoryEntries
);
1649 dataP
->fPageCnt
= _pages
;
1651 if ( (kIOMemoryPersistent
& _flags
) && !_memRef
)
1654 err
= memoryReferenceCreate(0, &_memRef
);
1655 if (kIOReturnSuccess
!= err
) return false;
1658 if ((_flags
& kIOMemoryAutoPrepare
)
1659 && prepare() != kIOReturnSuccess
)
1672 void IOGeneralMemoryDescriptor::free()
1674 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
1679 reserved
->dp
.memory
= 0;
1682 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
))
1685 if (_memoryEntries
&& (dataP
= getDataP(_memoryEntries
)) && dataP
->fMappedBase
)
1687 dataP
->fMapper
->iovmUnmapMemory(this, NULL
, dataP
->fMappedBase
, dataP
->fMappedLength
);
1688 dataP
->fMappedBase
= 0;
1693 while (_wireCount
) complete();
1696 if (_memoryEntries
) _memoryEntries
->release();
1698 if (_ranges
.v
&& !(kIOMemoryAsReference
& _flags
))
1700 if (kIOMemoryTypeUIO
== type
)
1701 uio_free((uio_t
) _ranges
.v
);
1703 else if ((kIOMemoryTypeVirtual64
== type
) || (kIOMemoryTypePhysical64
== type
))
1704 IODelete(_ranges
.v64
, IOAddressRange
, _rangesCount
);
1705 #endif /* !__LP64__ */
1707 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
1714 if (reserved
->dp
.devicePager
)
1716 // memEntry holds a ref on the device pager which owns reserved
1717 // (IOMemoryDescriptorReserved) so no reserved access after this point
1718 device_pager_deallocate( (memory_object_t
) reserved
->dp
.devicePager
);
1721 IODelete(reserved
, IOMemoryDescriptorReserved
, 1);
1725 if (_memRef
) memoryReferenceRelease(_memRef
);
1726 if (_prepareLock
) IOLockFree(_prepareLock
);
1732 void IOGeneralMemoryDescriptor::unmapFromKernel()
1734 panic("IOGMD::unmapFromKernel deprecated");
1737 void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex
)
1739 panic("IOGMD::mapIntoKernel deprecated");
1741 #endif /* !__LP64__ */
1746 * Get the direction of the transfer.
1748 IODirection
IOMemoryDescriptor::getDirection() const
1753 #endif /* !__LP64__ */
1754 return (IODirection
) (_flags
& kIOMemoryDirectionMask
);
1760 * Get the length of the transfer (over all ranges).
1762 IOByteCount
IOMemoryDescriptor::getLength() const
1767 void IOMemoryDescriptor::setTag( IOOptionBits tag
)
1772 IOOptionBits
IOMemoryDescriptor::getTag( void )
1778 #pragma clang diagnostic push
1779 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
1781 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
1783 IOMemoryDescriptor::getSourceSegment( IOByteCount offset
, IOByteCount
* length
)
1785 addr64_t physAddr
= 0;
1787 if( prepare() == kIOReturnSuccess
) {
1788 physAddr
= getPhysicalSegment64( offset
, length
);
1792 return( (IOPhysicalAddress
) physAddr
); // truncated but only page offset is used
1795 #pragma clang diagnostic pop
1797 #endif /* !__LP64__ */
1799 IOByteCount
IOMemoryDescriptor::readBytes
1800 (IOByteCount offset
, void *bytes
, IOByteCount length
)
1802 addr64_t dstAddr
= CAST_DOWN(addr64_t
, bytes
);
1803 IOByteCount remaining
;
1805 // Assert that this entire I/O is withing the available range
1806 assert(offset
<= _length
);
1807 assert(offset
+ length
<= _length
);
1808 if ((offset
>= _length
)
1809 || ((offset
+ length
) > _length
)) {
1813 if (kIOMemoryThreadSafe
& _flags
)
1816 remaining
= length
= min(length
, _length
- offset
);
1817 while (remaining
) { // (process another target segment?)
1821 srcAddr64
= getPhysicalSegment(offset
, &srcLen
, kIOMemoryMapperNone
);
1825 // Clip segment length to remaining
1826 if (srcLen
> remaining
)
1829 copypv(srcAddr64
, dstAddr
, srcLen
,
1830 cppvPsrc
| cppvNoRefSrc
| cppvFsnk
| cppvKmap
);
1834 remaining
-= srcLen
;
1837 if (kIOMemoryThreadSafe
& _flags
)
1842 return length
- remaining
;
1845 IOByteCount
IOMemoryDescriptor::writeBytes
1846 (IOByteCount inoffset
, const void *bytes
, IOByteCount length
)
1848 addr64_t srcAddr
= CAST_DOWN(addr64_t
, bytes
);
1849 IOByteCount remaining
;
1850 IOByteCount offset
= inoffset
;
1852 // Assert that this entire I/O is withing the available range
1853 assert(offset
<= _length
);
1854 assert(offset
+ length
<= _length
);
1856 assert( !(kIOMemoryPreparedReadOnly
& _flags
) );
1858 if ( (kIOMemoryPreparedReadOnly
& _flags
)
1859 || (offset
>= _length
)
1860 || ((offset
+ length
) > _length
)) {
1864 if (kIOMemoryThreadSafe
& _flags
)
1867 remaining
= length
= min(length
, _length
- offset
);
1868 while (remaining
) { // (process another target segment?)
1872 dstAddr64
= getPhysicalSegment(offset
, &dstLen
, kIOMemoryMapperNone
);
1876 // Clip segment length to remaining
1877 if (dstLen
> remaining
)
1880 if (!srcAddr
) bzero_phys(dstAddr64
, dstLen
);
1883 copypv(srcAddr
, (addr64_t
) dstAddr64
, dstLen
,
1884 cppvPsnk
| cppvFsnk
| cppvNoRefSrc
| cppvNoModSnk
| cppvKmap
);
1888 remaining
-= dstLen
;
1891 if (kIOMemoryThreadSafe
& _flags
)
1896 if (!srcAddr
) performOperation(kIOMemoryIncoherentIOFlush
, inoffset
, length
);
1898 return length
- remaining
;
1902 void IOGeneralMemoryDescriptor::setPosition(IOByteCount position
)
1904 panic("IOGMD::setPosition deprecated");
1906 #endif /* !__LP64__ */
1908 static volatile SInt64 gIOMDPreparationID
__attribute__((aligned(8))) = (1ULL << 32);
1911 IOGeneralMemoryDescriptor::getPreparationID( void )
1916 return (kIOPreparationIDUnprepared
);
1918 if (((kIOMemoryTypeMask
& _flags
) == kIOMemoryTypePhysical
)
1919 || ((kIOMemoryTypeMask
& _flags
) == kIOMemoryTypePhysical64
))
1921 IOMemoryDescriptor::setPreparationID();
1922 return (IOMemoryDescriptor::getPreparationID());
1925 if (!_memoryEntries
|| !(dataP
= getDataP(_memoryEntries
)))
1926 return (kIOPreparationIDUnprepared
);
1928 if (kIOPreparationIDUnprepared
== dataP
->fPreparationID
)
1930 dataP
->fPreparationID
= OSIncrementAtomic64(&gIOMDPreparationID
);
1932 return (dataP
->fPreparationID
);
1935 IOMemoryDescriptorReserved
* IOMemoryDescriptor::getKernelReserved( void )
1939 reserved
= IONew(IOMemoryDescriptorReserved
, 1);
1941 bzero(reserved
, sizeof(IOMemoryDescriptorReserved
));
1946 void IOMemoryDescriptor::setPreparationID( void )
1948 if (getKernelReserved() && (kIOPreparationIDUnprepared
== reserved
->preparationID
))
1950 reserved
->preparationID
= OSIncrementAtomic64(&gIOMDPreparationID
);
1954 uint64_t IOMemoryDescriptor::getPreparationID( void )
1957 return (reserved
->preparationID
);
1959 return (kIOPreparationIDUnsupported
);
1962 void IOMemoryDescriptor::setVMTags(vm_tag_t kernelTag
, vm_tag_t userTag
)
1964 if (!getKernelReserved()) return;
1965 reserved
->kernelTag
= kernelTag
;
1966 reserved
->userTag
= userTag
;
1969 vm_tag_t
IOMemoryDescriptor::getVMTag(vm_map_t map
)
1972 || (VM_KERN_MEMORY_NONE
== reserved
->kernelTag
)
1973 || (VM_KERN_MEMORY_NONE
== reserved
->userTag
))
1975 return (IOMemoryTag(map
));
1978 if (vm_kernel_map_is_kernel(map
)) return (reserved
->kernelTag
);
1979 return (reserved
->userTag
);
1982 IOReturn
IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op
, void *vData
, UInt dataSize
) const
1984 IOReturn err
= kIOReturnSuccess
;
1985 DMACommandOps params
;
1986 IOGeneralMemoryDescriptor
* md
= const_cast<IOGeneralMemoryDescriptor
*>(this);
1989 params
= (op
& ~kIOMDDMACommandOperationMask
& op
);
1990 op
&= kIOMDDMACommandOperationMask
;
1992 if (kIOMDDMAMap
== op
)
1994 if (dataSize
< sizeof(IOMDDMAMapArgs
))
1995 return kIOReturnUnderrun
;
1997 IOMDDMAMapArgs
* data
= (IOMDDMAMapArgs
*) vData
;
2000 && !md
->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem
)) return (kIOReturnNoMemory
);
2002 if (_memoryEntries
&& data
->fMapper
)
2004 bool remap
, keepMap
;
2005 dataP
= getDataP(_memoryEntries
);
2007 if (data
->fMapSpec
.numAddressBits
< dataP
->fDMAMapNumAddressBits
) dataP
->fDMAMapNumAddressBits
= data
->fMapSpec
.numAddressBits
;
2008 if (data
->fMapSpec
.alignment
> dataP
->fDMAMapAlignment
) dataP
->fDMAMapAlignment
= data
->fMapSpec
.alignment
;
2010 keepMap
= (data
->fMapper
== gIOSystemMapper
);
2011 keepMap
&= ((data
->fOffset
== 0) && (data
->fLength
== _length
));
2014 remap
|= (dataP
->fDMAMapNumAddressBits
< 64)
2015 && ((dataP
->fMappedBase
+ _length
) > (1ULL << dataP
->fDMAMapNumAddressBits
));
2016 remap
|= (dataP
->fDMAMapAlignment
> page_size
);
2018 if (remap
|| !dataP
->fMappedBase
)
2020 // if (dataP->fMappedBase) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params);
2021 err
= md
->dmaMap(data
->fMapper
, data
->fCommand
, &data
->fMapSpec
, data
->fOffset
, data
->fLength
, &data
->fAlloc
, &data
->fAllocLength
);
2022 if (keepMap
&& (kIOReturnSuccess
== err
) && !dataP
->fMappedBase
)
2024 dataP
->fMappedBase
= data
->fAlloc
;
2025 dataP
->fMappedLength
= data
->fAllocLength
;
2026 data
->fAllocLength
= 0; // IOMD owns the alloc now
2031 data
->fAlloc
= dataP
->fMappedBase
;
2032 data
->fAllocLength
= 0; // give out IOMD map
2034 data
->fMapContig
= !dataP
->fDiscontig
;
2040 if (kIOMDAddDMAMapSpec
== op
)
2042 if (dataSize
< sizeof(IODMAMapSpecification
))
2043 return kIOReturnUnderrun
;
2045 IODMAMapSpecification
* data
= (IODMAMapSpecification
*) vData
;
2048 && !md
->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem
)) return (kIOReturnNoMemory
);
2052 dataP
= getDataP(_memoryEntries
);
2053 if (data
->numAddressBits
< dataP
->fDMAMapNumAddressBits
)
2054 dataP
->fDMAMapNumAddressBits
= data
->numAddressBits
;
2055 if (data
->alignment
> dataP
->fDMAMapAlignment
)
2056 dataP
->fDMAMapAlignment
= data
->alignment
;
2058 return kIOReturnSuccess
;
2061 if (kIOMDGetCharacteristics
== op
) {
2063 if (dataSize
< sizeof(IOMDDMACharacteristics
))
2064 return kIOReturnUnderrun
;
2066 IOMDDMACharacteristics
*data
= (IOMDDMACharacteristics
*) vData
;
2067 data
->fLength
= _length
;
2068 data
->fSGCount
= _rangesCount
;
2069 data
->fPages
= _pages
;
2070 data
->fDirection
= getDirection();
2072 data
->fIsPrepared
= false;
2074 data
->fIsPrepared
= true;
2075 data
->fHighestPage
= _highestPage
;
2078 dataP
= getDataP(_memoryEntries
);
2079 ioPLBlock
*ioplList
= getIOPLList(dataP
);
2080 UInt count
= getNumIOPL(_memoryEntries
, dataP
);
2082 data
->fPageAlign
= (ioplList
[0].fPageOffset
& PAGE_MASK
) | ~PAGE_MASK
;
2086 return kIOReturnSuccess
;
2088 } else if (kIOMDWalkSegments
!= op
)
2089 return kIOReturnBadArgument
;
2091 // Get the next segment
2092 struct InternalState
{
2093 IOMDDMAWalkSegmentArgs fIO
;
2099 // Find the next segment
2100 if (dataSize
< sizeof(*isP
))
2101 return kIOReturnUnderrun
;
2103 isP
= (InternalState
*) vData
;
2104 UInt offset
= isP
->fIO
.fOffset
;
2105 bool mapped
= isP
->fIO
.fMapped
;
2107 if (IOMapper::gSystem
&& mapped
2108 && (!(kIOMemoryHostOnly
& _flags
))
2109 && (!_memoryEntries
|| !getDataP(_memoryEntries
)->fMappedBase
))
2110 // && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBase))
2113 && !md
->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem
)) return (kIOReturnNoMemory
);
2115 dataP
= getDataP(_memoryEntries
);
2118 IODMAMapSpecification mapSpec
;
2119 bzero(&mapSpec
, sizeof(mapSpec
));
2120 mapSpec
.numAddressBits
= dataP
->fDMAMapNumAddressBits
;
2121 mapSpec
.alignment
= dataP
->fDMAMapAlignment
;
2122 err
= md
->dmaMap(dataP
->fMapper
, NULL
, &mapSpec
, 0, _length
, &dataP
->fMappedBase
, &dataP
->fMappedLength
);
2123 if (kIOReturnSuccess
!= err
) return (err
);
2127 if (offset
>= _length
)
2128 return (offset
== _length
)? kIOReturnOverrun
: kIOReturnInternalError
;
2130 // Validate the previous offset
2131 UInt ind
, off2Ind
= isP
->fOffset2Index
;
2134 && (offset
== isP
->fNextOffset
|| off2Ind
<= offset
))
2137 ind
= off2Ind
= 0; // Start from beginning
2143 if ( (_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
) {
2145 // Physical address based memory descriptor
2146 const IOPhysicalRange
*physP
= (IOPhysicalRange
*) &_ranges
.p
[0];
2148 // Find the range after the one that contains the offset
2150 for (len
= 0; off2Ind
<= offset
; ind
++) {
2151 len
= physP
[ind
].length
;
2155 // Calculate length within range and starting address
2156 length
= off2Ind
- offset
;
2157 address
= physP
[ind
- 1].address
+ len
- length
;
2159 if (true && mapped
&& _memoryEntries
2160 && (dataP
= getDataP(_memoryEntries
)) && dataP
->fMappedBase
)
2162 address
= dataP
->fMappedBase
+ offset
;
2166 // see how far we can coalesce ranges
2167 while (ind
< _rangesCount
&& address
+ length
== physP
[ind
].address
) {
2168 len
= physP
[ind
].length
;
2175 // correct contiguous check overshoot
2180 else if ( (_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
) {
2182 // Physical address based memory descriptor
2183 const IOAddressRange
*physP
= (IOAddressRange
*) &_ranges
.v64
[0];
2185 // Find the range after the one that contains the offset
2187 for (len
= 0; off2Ind
<= offset
; ind
++) {
2188 len
= physP
[ind
].length
;
2192 // Calculate length within range and starting address
2193 length
= off2Ind
- offset
;
2194 address
= physP
[ind
- 1].address
+ len
- length
;
2196 if (true && mapped
&& _memoryEntries
2197 && (dataP
= getDataP(_memoryEntries
)) && dataP
->fMappedBase
)
2199 address
= dataP
->fMappedBase
+ offset
;
2203 // see how far we can coalesce ranges
2204 while (ind
< _rangesCount
&& address
+ length
== physP
[ind
].address
) {
2205 len
= physP
[ind
].length
;
2211 // correct contiguous check overshoot
2215 #endif /* !__LP64__ */
2218 panic("IOGMD: not wired for the IODMACommand");
2220 assert(_memoryEntries
);
2222 dataP
= getDataP(_memoryEntries
);
2223 const ioPLBlock
*ioplList
= getIOPLList(dataP
);
2224 UInt numIOPLs
= getNumIOPL(_memoryEntries
, dataP
);
2225 upl_page_info_t
*pageList
= getPageList(dataP
);
2227 assert(numIOPLs
> 0);
2229 // Scan through iopl info blocks looking for block containing offset
2230 while (ind
< numIOPLs
&& offset
>= ioplList
[ind
].fIOMDOffset
)
2233 // Go back to actual range as search goes past it
2234 ioPLBlock ioplInfo
= ioplList
[ind
- 1];
2235 off2Ind
= ioplInfo
.fIOMDOffset
;
2238 length
= ioplList
[ind
].fIOMDOffset
;
2241 length
-= offset
; // Remainder within iopl
2243 // Subtract offset till this iopl in total list
2246 // If a mapped address is requested and this is a pre-mapped IOPL
2247 // then just need to compute an offset relative to the mapped base.
2248 if (mapped
&& dataP
->fMappedBase
) {
2249 offset
+= (ioplInfo
.fPageOffset
& PAGE_MASK
);
2250 address
= trunc_page_64(dataP
->fMappedBase
) + ptoa_64(ioplInfo
.fMappedPage
) + offset
;
2251 continue; // Done leave do/while(false) now
2254 // The offset is rebased into the current iopl.
2255 // Now add the iopl 1st page offset.
2256 offset
+= ioplInfo
.fPageOffset
;
2258 // For external UPLs the fPageInfo field points directly to
2259 // the upl's upl_page_info_t array.
2260 if (ioplInfo
.fFlags
& kIOPLExternUPL
)
2261 pageList
= (upl_page_info_t
*) ioplInfo
.fPageInfo
;
2263 pageList
= &pageList
[ioplInfo
.fPageInfo
];
2265 // Check for direct device non-paged memory
2266 if ( ioplInfo
.fFlags
& kIOPLOnDevice
) {
2267 address
= ptoa_64(pageList
->phys_addr
) + offset
;
2268 continue; // Done leave do/while(false) now
2271 // Now we need compute the index into the pageList
2272 UInt pageInd
= atop_32(offset
);
2273 offset
&= PAGE_MASK
;
2275 // Compute the starting address of this segment
2276 IOPhysicalAddress pageAddr
= pageList
[pageInd
].phys_addr
;
2278 panic("!pageList phys_addr");
2281 address
= ptoa_64(pageAddr
) + offset
;
2283 // length is currently set to the length of the remainider of the iopl.
2284 // We need to check that the remainder of the iopl is contiguous.
2285 // This is indicated by pageList[ind].phys_addr being sequential.
2286 IOByteCount contigLength
= PAGE_SIZE
- offset
;
2287 while (contigLength
< length
2288 && ++pageAddr
== pageList
[++pageInd
].phys_addr
)
2290 contigLength
+= PAGE_SIZE
;
2293 if (contigLength
< length
)
2294 length
= contigLength
;
2302 // Update return values and state
2303 isP
->fIO
.fIOVMAddr
= address
;
2304 isP
->fIO
.fLength
= length
;
2306 isP
->fOffset2Index
= off2Ind
;
2307 isP
->fNextOffset
= isP
->fIO
.fOffset
+ length
;
2309 return kIOReturnSuccess
;
2313 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
, IOOptionBits options
)
2316 mach_vm_address_t address
= 0;
2317 mach_vm_size_t length
= 0;
2318 IOMapper
* mapper
= gIOSystemMapper
;
2319 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
2321 if (lengthOfSegment
)
2322 *lengthOfSegment
= 0;
2324 if (offset
>= _length
)
2327 // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
2328 // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
2329 // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
2330 // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
2332 if ((options
& _kIOMemorySourceSegment
) && (kIOMemoryTypeUPL
!= type
))
2334 unsigned rangesIndex
= 0;
2335 Ranges vec
= _ranges
;
2336 mach_vm_address_t addr
;
2338 // Find starting address within the vector of ranges
2340 getAddrLenForInd(addr
, length
, type
, vec
, rangesIndex
);
2341 if (offset
< length
)
2343 offset
-= length
; // (make offset relative)
2347 // Now that we have the starting range,
2348 // lets find the last contiguous range
2352 for ( ++rangesIndex
; rangesIndex
< _rangesCount
; rangesIndex
++ ) {
2353 mach_vm_address_t newAddr
;
2354 mach_vm_size_t newLen
;
2356 getAddrLenForInd(newAddr
, newLen
, type
, vec
, rangesIndex
);
2357 if (addr
+ length
!= newAddr
)
2362 address
= (IOPhysicalAddress
) addr
; // Truncate address to 32bit
2366 IOMDDMAWalkSegmentState _state
;
2367 IOMDDMAWalkSegmentArgs
* state
= (IOMDDMAWalkSegmentArgs
*) (void *)&_state
;
2369 state
->fOffset
= offset
;
2370 state
->fLength
= _length
- offset
;
2371 state
->fMapped
= (0 == (options
& kIOMemoryMapperNone
)) && !(_flags
& kIOMemoryHostOnly
);
2373 ret
= dmaCommandOperation(kIOMDFirstSegment
, _state
, sizeof(_state
));
2375 if ((kIOReturnSuccess
!= ret
) && (kIOReturnOverrun
!= ret
))
2376 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
2377 ret
, this, state
->fOffset
,
2378 state
->fIOVMAddr
, state
->fLength
);
2379 if (kIOReturnSuccess
== ret
)
2381 address
= state
->fIOVMAddr
;
2382 length
= state
->fLength
;
2385 // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
2386 // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
2388 if (mapper
&& ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
)))
2390 if ((options
& kIOMemoryMapperNone
) && !(_flags
& kIOMemoryMapperNone
))
2392 addr64_t origAddr
= address
;
2393 IOByteCount origLen
= length
;
2395 address
= mapper
->mapToPhysicalAddress(origAddr
);
2396 length
= page_size
- (address
& (page_size
- 1));
2397 while ((length
< origLen
)
2398 && ((address
+ length
) == mapper
->mapToPhysicalAddress(origAddr
+ length
)))
2399 length
+= page_size
;
2400 if (length
> origLen
)
2409 if (lengthOfSegment
)
2410 *lengthOfSegment
= length
;
2416 #pragma clang diagnostic push
2417 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2420 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
, IOOptionBits options
)
2422 addr64_t address
= 0;
2424 if (options
& _kIOMemorySourceSegment
)
2426 address
= getSourceSegment(offset
, lengthOfSegment
);
2428 else if (options
& kIOMemoryMapperNone
)
2430 address
= getPhysicalSegment64(offset
, lengthOfSegment
);
2434 address
= getPhysicalSegment(offset
, lengthOfSegment
);
2439 #pragma clang diagnostic pop
2442 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
2444 return (getPhysicalSegment(offset
, lengthOfSegment
, kIOMemoryMapperNone
));
2448 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
2450 addr64_t address
= 0;
2451 IOByteCount length
= 0;
2453 address
= getPhysicalSegment(offset
, lengthOfSegment
, 0);
2455 if (lengthOfSegment
)
2456 length
= *lengthOfSegment
;
2458 if ((address
+ length
) > 0x100000000ULL
)
2460 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
2461 address
, (long) length
, (getMetaClass())->getClassName());
2464 return ((IOPhysicalAddress
) address
);
2468 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
2470 IOPhysicalAddress phys32
;
2473 IOMapper
* mapper
= 0;
2475 phys32
= getPhysicalSegment(offset
, lengthOfSegment
);
2479 if (gIOSystemMapper
)
2480 mapper
= gIOSystemMapper
;
2484 IOByteCount origLen
;
2486 phys64
= mapper
->mapToPhysicalAddress(phys32
);
2487 origLen
= *lengthOfSegment
;
2488 length
= page_size
- (phys64
& (page_size
- 1));
2489 while ((length
< origLen
)
2490 && ((phys64
+ length
) == mapper
->mapToPhysicalAddress(phys32
+ length
)))
2491 length
+= page_size
;
2492 if (length
> origLen
)
2495 *lengthOfSegment
= length
;
2498 phys64
= (addr64_t
) phys32
;
2504 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
2506 return ((IOPhysicalAddress
) getPhysicalSegment(offset
, lengthOfSegment
, 0));
2510 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
2512 return ((IOPhysicalAddress
) getPhysicalSegment(offset
, lengthOfSegment
, _kIOMemorySourceSegment
));
2515 #pragma clang diagnostic push
2516 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2518 void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
2519 IOByteCount
* lengthOfSegment
)
2521 if (_task
== kernel_task
)
2522 return (void *) getSourceSegment(offset
, lengthOfSegment
);
2524 panic("IOGMD::getVirtualSegment deprecated");
2528 #pragma clang diagnostic pop
2529 #endif /* !__LP64__ */
2532 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op
, void *vData
, UInt dataSize
) const
2534 IOMemoryDescriptor
*md
= const_cast<IOMemoryDescriptor
*>(this);
2535 DMACommandOps params
;
2538 params
= (op
& ~kIOMDDMACommandOperationMask
& op
);
2539 op
&= kIOMDDMACommandOperationMask
;
2541 if (kIOMDGetCharacteristics
== op
) {
2542 if (dataSize
< sizeof(IOMDDMACharacteristics
))
2543 return kIOReturnUnderrun
;
2545 IOMDDMACharacteristics
*data
= (IOMDDMACharacteristics
*) vData
;
2546 data
->fLength
= getLength();
2548 data
->fDirection
= getDirection();
2549 data
->fIsPrepared
= true; // Assume prepared - fails safe
2551 else if (kIOMDWalkSegments
== op
) {
2552 if (dataSize
< sizeof(IOMDDMAWalkSegmentArgs
))
2553 return kIOReturnUnderrun
;
2555 IOMDDMAWalkSegmentArgs
*data
= (IOMDDMAWalkSegmentArgs
*) vData
;
2556 IOByteCount offset
= (IOByteCount
) data
->fOffset
;
2558 IOPhysicalLength length
;
2559 if (data
->fMapped
&& IOMapper::gSystem
)
2560 data
->fIOVMAddr
= md
->getPhysicalSegment(offset
, &length
);
2562 data
->fIOVMAddr
= md
->getPhysicalSegment(offset
, &length
, kIOMemoryMapperNone
);
2563 data
->fLength
= length
;
2565 else if (kIOMDAddDMAMapSpec
== op
) return kIOReturnUnsupported
;
2566 else if (kIOMDDMAMap
== op
)
2568 if (dataSize
< sizeof(IOMDDMAMapArgs
))
2569 return kIOReturnUnderrun
;
2570 IOMDDMAMapArgs
* data
= (IOMDDMAMapArgs
*) vData
;
2572 if (params
) panic("class %s does not support IODMACommand::kIterateOnly", getMetaClass()->getClassName());
2574 data
->fMapContig
= true;
2575 err
= md
->dmaMap(data
->fMapper
, data
->fCommand
, &data
->fMapSpec
, data
->fOffset
, data
->fLength
, &data
->fAlloc
, &data
->fAllocLength
);
2578 else return kIOReturnBadArgument
;
2580 return kIOReturnSuccess
;
2584 IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState
,
2585 IOOptionBits
* oldState
)
2587 IOReturn err
= kIOReturnSuccess
;
2589 vm_purgable_t control
;
2594 err
= super::setPurgeable(newState
, oldState
);
2598 if (kIOMemoryThreadSafe
& _flags
)
2602 // Find the appropriate vm_map for the given task
2604 if (_task
== kernel_task
&& (kIOMemoryBufferPageable
& _flags
))
2606 err
= kIOReturnNotReady
;
2611 err
= kIOReturnUnsupported
;
2615 curMap
= get_task_map(_task
);
2617 // can only do one range
2618 Ranges vec
= _ranges
;
2619 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
2620 mach_vm_address_t addr
;
2622 getAddrLenForInd(addr
, len
, type
, vec
, 0);
2624 err
= purgeableControlBits(newState
, &control
, &state
);
2625 if (kIOReturnSuccess
!= err
)
2627 err
= mach_vm_purgable_control(curMap
, addr
, control
, &state
);
2630 if (kIOReturnSuccess
== err
)
2632 err
= purgeableStateBits(&state
);
2638 if (kIOMemoryThreadSafe
& _flags
)
2645 IOReturn
IOMemoryDescriptor::setPurgeable( IOOptionBits newState
,
2646 IOOptionBits
* oldState
)
2648 IOReturn err
= kIOReturnNotReady
;
2650 if (kIOMemoryThreadSafe
& _flags
) LOCK
;
2651 if (_memRef
) err
= IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef
, newState
, oldState
);
2652 if (kIOMemoryThreadSafe
& _flags
) UNLOCK
;
2657 IOReturn
IOMemoryDescriptor::getPageCounts( IOByteCount
* residentPageCount
,
2658 IOByteCount
* dirtyPageCount
)
2660 IOReturn err
= kIOReturnNotReady
;
2662 if (kIOMemoryThreadSafe
& _flags
) LOCK
;
2663 if (_memRef
) err
= IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef
, residentPageCount
, dirtyPageCount
);
2666 IOMultiMemoryDescriptor
* mmd
;
2667 IOSubMemoryDescriptor
* smd
;
2668 if ((smd
= OSDynamicCast(IOSubMemoryDescriptor
, this)))
2670 err
= smd
->getPageCounts(residentPageCount
, dirtyPageCount
);
2672 else if ((mmd
= OSDynamicCast(IOMultiMemoryDescriptor
, this)))
2674 err
= mmd
->getPageCounts(residentPageCount
, dirtyPageCount
);
2677 if (kIOMemoryThreadSafe
& _flags
) UNLOCK
;
2683 extern "C" void dcache_incoherent_io_flush64(addr64_t pa
, unsigned int count
);
2684 extern "C" void dcache_incoherent_io_store64(addr64_t pa
, unsigned int count
);
2686 static void SetEncryptOp(addr64_t pa
, unsigned int count
)
2690 page
= atop_64(round_page_64(pa
));
2691 end
= atop_64(trunc_page_64(pa
+ count
));
2692 for (; page
< end
; page
++)
2694 pmap_clear_noencrypt(page
);
2698 static void ClearEncryptOp(addr64_t pa
, unsigned int count
)
2702 page
= atop_64(round_page_64(pa
));
2703 end
= atop_64(trunc_page_64(pa
+ count
));
2704 for (; page
< end
; page
++)
2706 pmap_set_noencrypt(page
);
2710 IOReturn
IOMemoryDescriptor::performOperation( IOOptionBits options
,
2711 IOByteCount offset
, IOByteCount length
)
2713 IOByteCount remaining
;
2715 void (*func
)(addr64_t pa
, unsigned int count
) = 0;
2719 case kIOMemoryIncoherentIOFlush
:
2720 func
= &dcache_incoherent_io_flush64
;
2722 case kIOMemoryIncoherentIOStore
:
2723 func
= &dcache_incoherent_io_store64
;
2726 case kIOMemorySetEncrypted
:
2727 func
= &SetEncryptOp
;
2729 case kIOMemoryClearEncrypted
:
2730 func
= &ClearEncryptOp
;
2735 return (kIOReturnUnsupported
);
2737 if (kIOMemoryThreadSafe
& _flags
)
2741 remaining
= length
= min(length
, getLength() - offset
);
2743 // (process another target segment?)
2748 dstAddr64
= getPhysicalSegment(offset
, &dstLen
, kIOMemoryMapperNone
);
2752 // Clip segment length to remaining
2753 if (dstLen
> remaining
)
2756 (*func
)(dstAddr64
, dstLen
);
2759 remaining
-= dstLen
;
2762 if (kIOMemoryThreadSafe
& _flags
)
2765 return (remaining
? kIOReturnUnderrun
: kIOReturnSuccess
);
2772 #if defined(__i386__) || defined(__x86_64__)
2774 #define io_kernel_static_start vm_kernel_stext
2775 #define io_kernel_static_end vm_kernel_etext
2778 #error io_kernel_static_end is undefined for this architecture
2781 static kern_return_t
2782 io_get_kernel_static_upl(
2785 upl_size_t
*upl_size
,
2787 upl_page_info_array_t page_list
,
2788 unsigned int *count
,
2789 ppnum_t
*highest_page
)
2791 unsigned int pageCount
, page
;
2793 ppnum_t highestPage
= 0;
2795 pageCount
= atop_32(*upl_size
);
2796 if (pageCount
> *count
)
2801 for (page
= 0; page
< pageCount
; page
++)
2803 phys
= pmap_find_phys(kernel_pmap
, ((addr64_t
)offset
) + ptoa_64(page
));
2806 page_list
[page
].phys_addr
= phys
;
2807 page_list
[page
].free_when_done
= 0;
2808 page_list
[page
].absent
= 0;
2809 page_list
[page
].dirty
= 0;
2810 page_list
[page
].precious
= 0;
2811 page_list
[page
].device
= 0;
2812 if (phys
> highestPage
)
2816 *highest_page
= highestPage
;
2818 return ((page
>= pageCount
) ? kIOReturnSuccess
: kIOReturnVMError
);
2826 IOMemoryDescriptorUpdateWireOwner(ioGMDData
* dataP
, OSData
* memoryEntries
, vm_tag_t tag
)
2828 ioPLBlock
*ioplList
;
2832 count
= getNumIOPL(memoryEntries
, dataP
);
2834 ioplList
= getIOPLList(dataP
);
2836 if (VM_KERN_MEMORY_NONE
== tag
) tag
= dataP
->fAllocTag
;
2837 assert(VM_KERN_MEMORY_NONE
!= tag
);
2839 for (ind
= 0; ind
< count
; ind
++)
2841 if (!ioplList
[ind
].fIOPL
) continue;
2842 prior
= iopl_set_tag(ioplList
[ind
].fIOPL
, tag
);
2843 if (VM_KERN_MEMORY_NONE
== dataP
->fAllocTag
) dataP
->fAllocTag
= prior
;
2848 vm_tag_get_kext(prior
, &name
[0][0], sizeof(name
[0]));
2849 vm_tag_get_kext(tag
, &name
[1][0], sizeof(name
[1]));
2850 IOLog("switched %48s to %48s\n", name
[0], name
[1]);
2855 #endif /* IOTRACKING */
2858 IOReturn
IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection
)
2860 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
2861 IOReturn error
= kIOReturnSuccess
;
2863 upl_page_info_array_t pageInfo
;
2866 assert(kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
|| kIOMemoryTypeUIO
== type
);
2868 if ((kIODirectionOutIn
& forDirection
) == kIODirectionNone
)
2869 forDirection
= (IODirection
) (forDirection
| getDirection());
2871 upl_control_flags_t uplFlags
; // This Mem Desc's default flags for upl creation
2872 switch (kIODirectionOutIn
& forDirection
)
2874 case kIODirectionOut
:
2875 // Pages do not need to be marked as dirty on commit
2876 uplFlags
= UPL_COPYOUT_FROM
;
2879 case kIODirectionIn
:
2881 uplFlags
= 0; // i.e. ~UPL_COPYOUT_FROM
2884 dataP
= getDataP(_memoryEntries
);
2886 if (kIODirectionDMACommand
& forDirection
) assert(_wireCount
);
2890 if ((kIOMemoryPreparedReadOnly
& _flags
) && !(UPL_COPYOUT_FROM
& uplFlags
))
2892 OSReportWithBacktrace("IOMemoryDescriptor 0x%lx prepared read only", VM_KERNEL_ADDRPERM(this));
2893 error
= kIOReturnNotWritable
;
2899 mapper
= dataP
->fMapper
;
2900 dataP
->fMappedBase
= 0;
2902 uplFlags
|= UPL_SET_IO_WIRE
| UPL_SET_LITE
;
2903 uplFlags
|= UPL_MEMORY_TAG_MAKE(getVMTag(kernel_map
));
2905 if (kIODirectionPrepareToPhys32
& forDirection
)
2907 if (!mapper
) uplFlags
|= UPL_NEED_32BIT_ADDR
;
2908 if (dataP
->fDMAMapNumAddressBits
> 32) dataP
->fDMAMapNumAddressBits
= 32;
2910 if (kIODirectionPrepareNoFault
& forDirection
) uplFlags
|= UPL_REQUEST_NO_FAULT
;
2911 if (kIODirectionPrepareNoZeroFill
& forDirection
) uplFlags
|= UPL_NOZEROFILLIO
;
2912 if (kIODirectionPrepareNonCoherent
& forDirection
) uplFlags
|= UPL_REQUEST_FORCE_COHERENCY
;
2916 // Note that appendBytes(NULL) zeros the data up to the desired length
2917 // and the length parameter is an unsigned int
2918 size_t uplPageSize
= dataP
->fPageCnt
* sizeof(upl_page_info_t
);
2919 if (uplPageSize
> ((unsigned int)uplPageSize
)) return (kIOReturnNoMemory
);
2920 if (!_memoryEntries
->appendBytes(0, uplPageSize
)) return (kIOReturnNoMemory
);
2923 // Find the appropriate vm_map for the given task
2925 if (_task
== kernel_task
&& (kIOMemoryBufferPageable
& _flags
)) curMap
= 0;
2926 else curMap
= get_task_map(_task
);
2928 // Iterate over the vector of virtual ranges
2929 Ranges vec
= _ranges
;
2930 unsigned int pageIndex
= 0;
2931 IOByteCount mdOffset
= 0;
2932 ppnum_t highestPage
= 0;
2934 IOMemoryEntry
* memRefEntry
= 0;
2935 if (_memRef
) memRefEntry
= &_memRef
->entries
[0];
2937 for (UInt range
= 0; range
< _rangesCount
; range
++) {
2939 mach_vm_address_t startPage
;
2940 mach_vm_size_t numBytes
;
2941 ppnum_t highPage
= 0;
2943 // Get the startPage address and length of vec[range]
2944 getAddrLenForInd(startPage
, numBytes
, type
, vec
, range
);
2945 iopl
.fPageOffset
= startPage
& PAGE_MASK
;
2946 numBytes
+= iopl
.fPageOffset
;
2947 startPage
= trunc_page_64(startPage
);
2950 iopl
.fMappedPage
= mapBase
+ pageIndex
;
2952 iopl
.fMappedPage
= 0;
2954 // Iterate over the current range, creating UPLs
2956 vm_address_t kernelStart
= (vm_address_t
) startPage
;
2958 if (curMap
) theMap
= curMap
;
2965 assert(_task
== kernel_task
);
2966 theMap
= IOPageableMapForAddress(kernelStart
);
2969 // ioplFlags is an in/out parameter
2970 upl_control_flags_t ioplFlags
= uplFlags
;
2971 dataP
= getDataP(_memoryEntries
);
2972 pageInfo
= getPageList(dataP
);
2973 upl_page_list_ptr_t baseInfo
= &pageInfo
[pageIndex
];
2975 mach_vm_size_t _ioplSize
= round_page(numBytes
);
2976 upl_size_t ioplSize
= (_ioplSize
<= MAX_UPL_SIZE_BYTES
) ? _ioplSize
: MAX_UPL_SIZE_BYTES
;
2977 unsigned int numPageInfo
= atop_32(ioplSize
);
2979 if ((theMap
== kernel_map
)
2980 && (kernelStart
>= io_kernel_static_start
)
2981 && (kernelStart
< io_kernel_static_end
)) {
2982 error
= io_get_kernel_static_upl(theMap
,
2991 memory_object_offset_t entryOffset
;
2993 entryOffset
= mdOffset
;
2994 entryOffset
= (entryOffset
- iopl
.fPageOffset
- memRefEntry
->offset
);
2995 if (entryOffset
>= memRefEntry
->size
) {
2997 if (memRefEntry
>= &_memRef
->entries
[_memRef
->count
]) panic("memRefEntry");
3000 if (ioplSize
> (memRefEntry
->size
- entryOffset
)) ioplSize
= (memRefEntry
->size
- entryOffset
);
3001 error
= memory_object_iopl_request(memRefEntry
->entry
,
3011 error
= vm_map_create_upl(theMap
,
3013 (upl_size_t
*)&ioplSize
,
3020 if (error
!= KERN_SUCCESS
) goto abortExit
;
3025 highPage
= upl_get_highest_page(iopl
.fIOPL
);
3026 if (highPage
> highestPage
)
3027 highestPage
= highPage
;
3029 if (baseInfo
->device
) {
3031 iopl
.fFlags
= kIOPLOnDevice
;
3037 iopl
.fIOMDOffset
= mdOffset
;
3038 iopl
.fPageInfo
= pageIndex
;
3039 if (mapper
&& pageIndex
&& (page_mask
& (mdOffset
+ iopl
.fPageOffset
))) dataP
->fDiscontig
= true;
3041 if (!_memoryEntries
->appendBytes(&iopl
, sizeof(iopl
))) {
3042 // Clean up partial created and unsaved iopl
3044 upl_abort(iopl
.fIOPL
, 0);
3045 upl_deallocate(iopl
.fIOPL
);
3051 // Check for a multiple iopl's in one virtual range
3052 pageIndex
+= numPageInfo
;
3053 mdOffset
-= iopl
.fPageOffset
;
3054 if (ioplSize
< numBytes
) {
3055 numBytes
-= ioplSize
;
3056 startPage
+= ioplSize
;
3057 mdOffset
+= ioplSize
;
3058 iopl
.fPageOffset
= 0;
3059 if (mapper
) iopl
.fMappedPage
= mapBase
+ pageIndex
;
3062 mdOffset
+= numBytes
;
3068 _highestPage
= highestPage
;
3070 if (UPL_COPYOUT_FROM
& uplFlags
) _flags
|= kIOMemoryPreparedReadOnly
;
3074 if (kIOReturnSuccess
== error
)
3078 dataP
= getDataP(_memoryEntries
);
3079 if (forDirection
& kIODirectionDMACommand
) tag
= (forDirection
& kIODirectionDMACommandMask
) >> kIODirectionDMACommandShift
;
3080 else tag
= IOMemoryTag(kernel_map
);
3082 if (!_wireCount
) vm_tag_set_init(&dataP
->fWireTags
, kMaxWireTags
);
3083 vm_tag_set_enter(&dataP
->fWireTags
, kMaxWireTags
, tag
);
3085 IOMemoryDescriptorUpdateWireOwner(dataP
, _memoryEntries
, tag
);
3088 //if (!(_flags & kIOMemoryAutoPrepare))
3089 IOTrackingAdd(gIOWireTracking
, &dataP
->fWireTracking
, ptoa(_pages
), false);
3092 #endif /* IOTRACKING */
3098 dataP
= getDataP(_memoryEntries
);
3099 UInt done
= getNumIOPL(_memoryEntries
, dataP
);
3100 ioPLBlock
*ioplList
= getIOPLList(dataP
);
3102 for (UInt range
= 0; range
< done
; range
++)
3104 if (ioplList
[range
].fIOPL
) {
3105 upl_abort(ioplList
[range
].fIOPL
, 0);
3106 upl_deallocate(ioplList
[range
].fIOPL
);
3109 (void) _memoryEntries
->initWithBytes(dataP
, computeDataSize(0, 0)); // == setLength()
3112 if (error
== KERN_FAILURE
)
3113 error
= kIOReturnCannotWire
;
3114 else if (error
== KERN_MEMORY_ERROR
)
3115 error
= kIOReturnNoResources
;
3120 bool IOGeneralMemoryDescriptor::initMemoryEntries(size_t size
, IOMapper
* mapper
)
3123 unsigned dataSize
= size
;
3125 if (!_memoryEntries
) {
3126 _memoryEntries
= OSData::withCapacity(dataSize
);
3127 if (!_memoryEntries
)
3130 else if (!_memoryEntries
->initWithCapacity(dataSize
))
3133 _memoryEntries
->appendBytes(0, computeDataSize(0, 0));
3134 dataP
= getDataP(_memoryEntries
);
3136 if (mapper
== kIOMapperWaitSystem
) {
3137 IOMapper::checkForSystemMapper();
3138 mapper
= IOMapper::gSystem
;
3140 dataP
->fMapper
= mapper
;
3141 dataP
->fPageCnt
= 0;
3142 dataP
->fMappedBase
= 0;
3143 dataP
->fDMAMapNumAddressBits
= 64;
3144 dataP
->fDMAMapAlignment
= 0;
3145 dataP
->fPreparationID
= kIOPreparationIDUnprepared
;
3146 dataP
->fDiscontig
= false;
3147 dataP
->fCompletionError
= false;
3152 IOReturn
IOMemoryDescriptor::dmaMap(
3154 IODMACommand
* command
,
3155 const IODMAMapSpecification
* mapSpec
,
3158 uint64_t * mapAddress
,
3159 uint64_t * mapLength
)
3162 uint32_t mapOptions
;
3165 mapOptions
|= kIODMAMapReadAccess
;
3166 if (!(kIOMemoryPreparedReadOnly
& _flags
)) mapOptions
|= kIODMAMapWriteAccess
;
3168 ret
= mapper
->iovmMapMemory(this, offset
, length
, mapOptions
,
3169 mapSpec
, command
, NULL
, mapAddress
, mapLength
);
3174 IOReturn
IOGeneralMemoryDescriptor::dmaMap(
3176 IODMACommand
* command
,
3177 const IODMAMapSpecification
* mapSpec
,
3180 uint64_t * mapAddress
,
3181 uint64_t * mapLength
)
3183 IOReturn err
= kIOReturnSuccess
;
3185 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
3188 if (kIOMemoryHostOnly
& _flags
) return (kIOReturnSuccess
);
3190 if ((type
== kIOMemoryTypePhysical
) || (type
== kIOMemoryTypePhysical64
)
3191 || offset
|| (length
!= _length
))
3193 err
= super::dmaMap(mapper
, command
, mapSpec
, offset
, length
, mapAddress
, mapLength
);
3195 else if (_memoryEntries
&& _pages
&& (dataP
= getDataP(_memoryEntries
)))
3197 const ioPLBlock
* ioplList
= getIOPLList(dataP
);
3198 upl_page_info_t
* pageList
;
3199 uint32_t mapOptions
= 0;
3201 IODMAMapSpecification mapSpec
;
3202 bzero(&mapSpec
, sizeof(mapSpec
));
3203 mapSpec
.numAddressBits
= dataP
->fDMAMapNumAddressBits
;
3204 mapSpec
.alignment
= dataP
->fDMAMapAlignment
;
3206 // For external UPLs the fPageInfo field points directly to
3207 // the upl's upl_page_info_t array.
3208 if (ioplList
->fFlags
& kIOPLExternUPL
)
3210 pageList
= (upl_page_info_t
*) ioplList
->fPageInfo
;
3211 mapOptions
|= kIODMAMapPagingPath
;
3213 else pageList
= getPageList(dataP
);
3215 if ((_length
== ptoa_64(_pages
)) && !(page_mask
& ioplList
->fPageOffset
))
3217 mapOptions
|= kIODMAMapPageListFullyOccupied
;
3220 mapOptions
|= kIODMAMapReadAccess
;
3221 if (!(kIOMemoryPreparedReadOnly
& _flags
)) mapOptions
|= kIODMAMapWriteAccess
;
3223 // Check for direct device non-paged memory
3224 if (ioplList
->fFlags
& kIOPLOnDevice
) mapOptions
|= kIODMAMapPhysicallyContiguous
;
3226 IODMAMapPageList dmaPageList
=
3228 .pageOffset
= (uint32_t)(ioplList
->fPageOffset
& page_mask
),
3229 .pageListCount
= _pages
,
3230 .pageList
= &pageList
[0]
3232 err
= mapper
->iovmMapMemory(this, offset
, length
, mapOptions
, &mapSpec
,
3233 command
, &dmaPageList
, mapAddress
, mapLength
);
3242 * Prepare the memory for an I/O transfer. This involves paging in
3243 * the memory, if necessary, and wiring it down for the duration of
3244 * the transfer. The complete() method completes the processing of
3245 * the memory after the I/O transfer finishes. This method needn't
3246 * called for non-pageable memory.
3249 IOReturn
IOGeneralMemoryDescriptor::prepare(IODirection forDirection
)
3251 IOReturn error
= kIOReturnSuccess
;
3252 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
3254 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
))
3255 return kIOReturnSuccess
;
3257 if (_prepareLock
) IOLockLock(_prepareLock
);
3259 if (kIODirectionDMACommand
& forDirection
)
3261 #if IOMD_DEBUG_DMAACTIVE
3262 OSIncrementAtomic(&__iomd_reservedA
);
3263 #endif /* IOMD_DEBUG_DMAACTIVE */
3265 if (kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
|| kIOMemoryTypeUIO
== type
)
3267 error
= wireVirtual(forDirection
);
3270 if ((kIOReturnSuccess
== error
) && !(kIODirectionDMACommand
& forDirection
))
3272 if (1 == ++_wireCount
)
3274 if (kIOMemoryClearEncrypt
& _flags
)
3276 performOperation(kIOMemoryClearEncrypted
, 0, _length
);
3281 if (_prepareLock
) IOLockUnlock(_prepareLock
);
3289 * Complete processing of the memory after an I/O transfer finishes.
3290 * This method should not be called unless a prepare was previously
3291 * issued; the prepare() and complete() must occur in pairs, before
3292 * before and after an I/O transfer involving pageable memory.
3295 IOReturn
IOGeneralMemoryDescriptor::complete(IODirection forDirection
)
3297 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
3300 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
))
3301 return kIOReturnSuccess
;
3303 if (_prepareLock
) IOLockLock(_prepareLock
);
3307 if (!_wireCount
) break;
3308 dataP
= getDataP(_memoryEntries
);
3311 #if IOMD_DEBUG_DMAACTIVE
3312 if (kIODirectionDMACommand
& forDirection
)
3314 if (__iomd_reservedA
) OSDecrementAtomic(&__iomd_reservedA
);
3315 else panic("kIOMDSetDMAInactive");
3317 #endif /* IOMD_DEBUG_DMAACTIVE */
3319 if (kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
|| kIOMemoryTypeUIO
== type
)
3323 if (forDirection
& kIODirectionDMACommand
) tag
= (forDirection
& kIODirectionDMACommandMask
) >> kIODirectionDMACommandShift
;
3324 else tag
= IOMemoryTag(kernel_map
);
3325 vm_tag_set_remove(&dataP
->fWireTags
, kMaxWireTags
, tag
, &tag
);
3326 IOMemoryDescriptorUpdateWireOwner(dataP
, _memoryEntries
, tag
);
3328 if (kIODirectionDMACommand
& forDirection
) break;
3329 #endif /* IOTRACKING */
3331 if (kIODirectionCompleteWithError
& forDirection
) dataP
->fCompletionError
= true;
3333 if ((kIOMemoryClearEncrypt
& _flags
) && (1 == _wireCount
))
3335 performOperation(kIOMemorySetEncrypted
, 0, _length
);
3339 if (!_wireCount
|| (kIODirectionCompleteWithDataValid
& forDirection
))
3341 ioPLBlock
*ioplList
= getIOPLList(dataP
);
3342 UInt ind
, count
= getNumIOPL(_memoryEntries
, dataP
);
3346 // kIODirectionCompleteWithDataValid & forDirection
3347 if (kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
|| kIOMemoryTypeUIO
== type
)
3349 for (ind
= 0; ind
< count
; ind
++)
3351 if (ioplList
[ind
].fIOPL
) iopl_valid_data(ioplList
[ind
].fIOPL
);
3357 #if IOMD_DEBUG_DMAACTIVE
3358 if (__iomd_reservedA
) panic("complete() while dma active");
3359 #endif /* IOMD_DEBUG_DMAACTIVE */
3361 if (dataP
->fMappedBase
) {
3362 dataP
->fMapper
->iovmUnmapMemory(this, NULL
, dataP
->fMappedBase
, dataP
->fMappedLength
);
3363 dataP
->fMappedBase
= 0;
3365 // Only complete iopls that we created which are for TypeVirtual
3366 if (kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
|| kIOMemoryTypeUIO
== type
) {
3368 //if (!(_flags & kIOMemoryAutoPrepare))
3370 IOTrackingRemove(gIOWireTracking
, &dataP
->fWireTracking
, ptoa(_pages
));
3372 #endif /* IOTRACKING */
3373 for (ind
= 0; ind
< count
; ind
++)
3374 if (ioplList
[ind
].fIOPL
) {
3375 if (dataP
->fCompletionError
)
3376 upl_abort(ioplList
[ind
].fIOPL
, 0 /*!UPL_ABORT_DUMP_PAGES*/);
3378 upl_commit(ioplList
[ind
].fIOPL
, 0, 0);
3379 upl_deallocate(ioplList
[ind
].fIOPL
);
3381 } else if (kIOMemoryTypeUPL
== type
) {
3382 upl_set_referenced(ioplList
[0].fIOPL
, false);
3385 (void) _memoryEntries
->initWithBytes(dataP
, computeDataSize(0, 0)); // == setLength()
3387 dataP
->fPreparationID
= kIOPreparationIDUnprepared
;
3388 dataP
->fAllocTag
= VM_KERN_MEMORY_NONE
;
3394 if (_prepareLock
) IOLockUnlock(_prepareLock
);
3396 return kIOReturnSuccess
;
3399 IOReturn
IOGeneralMemoryDescriptor::doMap(
3400 vm_map_t __addressMap
,
3401 IOVirtualAddress
* __address
,
3402 IOOptionBits options
,
3403 IOByteCount __offset
,
3404 IOByteCount __length
)
3407 if (!(kIOMap64Bit
& options
)) panic("IOGeneralMemoryDescriptor::doMap !64bit");
3408 #endif /* !__LP64__ */
3412 IOMemoryMap
* mapping
= (IOMemoryMap
*) *__address
;
3413 mach_vm_size_t offset
= mapping
->fOffset
+ __offset
;
3414 mach_vm_size_t length
= mapping
->fLength
;
3416 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
3417 Ranges vec
= _ranges
;
3419 mach_vm_address_t range0Addr
= 0;
3420 mach_vm_size_t range0Len
= 0;
3422 if ((offset
>= _length
) || ((offset
+ length
) > _length
))
3423 return( kIOReturnBadArgument
);
3426 getAddrLenForInd(range0Addr
, range0Len
, type
, vec
, 0);
3428 // mapping source == dest? (could be much better)
3430 && (mapping
->fAddressTask
== _task
)
3431 && (mapping
->fAddressMap
== get_task_map(_task
))
3432 && (options
& kIOMapAnywhere
)
3433 && (1 == _rangesCount
)
3436 && (length
<= range0Len
))
3438 mapping
->fAddress
= range0Addr
;
3439 mapping
->fOptions
|= kIOMapStatic
;
3441 return( kIOReturnSuccess
);
3446 IOOptionBits createOptions
= 0;
3447 if (!(kIOMapReadOnly
& options
))
3449 createOptions
|= kIOMemoryReferenceWrite
;
3450 #if DEVELOPMENT || DEBUG
3451 if (kIODirectionOut
== (kIODirectionOutIn
& _flags
))
3453 OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction");
3457 err
= memoryReferenceCreate(createOptions
, &_memRef
);
3458 if (kIOReturnSuccess
!= err
) return (err
);
3461 memory_object_t pager
;
3462 pager
= (memory_object_t
) (reserved
? reserved
->dp
.devicePager
: 0);
3464 // <upl_transpose //
3465 if ((kIOMapReference
|kIOMapUnique
) == ((kIOMapReference
|kIOMapUnique
) & options
))
3471 upl_control_flags_t flags
;
3472 unsigned int lock_count
;
3474 if (!_memRef
|| (1 != _memRef
->count
))
3476 err
= kIOReturnNotReadable
;
3480 size
= round_page(mapping
->fLength
);
3481 flags
= UPL_COPYOUT_FROM
| UPL_SET_INTERNAL
3482 | UPL_SET_LITE
| UPL_SET_IO_WIRE
| UPL_BLOCK_ACCESS
3483 | UPL_MEMORY_TAG_MAKE(getVMTag(kernel_map
));
3485 if (KERN_SUCCESS
!= memory_object_iopl_request(_memRef
->entries
[0].entry
, 0, &size
, &redirUPL2
,
3490 for (lock_count
= 0;
3491 IORecursiveLockHaveLock(gIOMemoryLock
);
3495 err
= upl_transpose(redirUPL2
, mapping
->fRedirUPL
);
3502 if (kIOReturnSuccess
!= err
)
3504 IOLog("upl_transpose(%x)\n", err
);
3505 err
= kIOReturnSuccess
;
3510 upl_commit(redirUPL2
, NULL
, 0);
3511 upl_deallocate(redirUPL2
);
3515 // swap the memEntries since they now refer to different vm_objects
3516 IOMemoryReference
* me
= _memRef
;
3517 _memRef
= mapping
->fMemory
->_memRef
;
3518 mapping
->fMemory
->_memRef
= me
;
3521 err
= populateDevicePager( pager
, mapping
->fAddressMap
, mapping
->fAddress
, offset
, length
, options
);
3525 // upl_transpose> //
3528 err
= memoryReferenceMap(_memRef
, mapping
->fAddressMap
, offset
, length
, options
, &mapping
->fAddress
);
3530 if ((err
== KERN_SUCCESS
) && ((kIOTracking
& gIOKitDebug
) || _task
))
3532 // only dram maps in the default on developement case
3533 IOTrackingAddUser(gIOMapTracking
, &mapping
->fTracking
, mapping
->fLength
);
3535 #endif /* IOTRACKING */
3536 if ((err
== KERN_SUCCESS
) && pager
)
3538 err
= populateDevicePager(pager
, mapping
->fAddressMap
, mapping
->fAddress
, offset
, length
, options
);
3540 if (err
!= KERN_SUCCESS
) doUnmap(mapping
->fAddressMap
, (IOVirtualAddress
) mapping
, 0);
3541 else if (kIOMapDefaultCache
== (options
& kIOMapCacheMask
))
3543 mapping
->fOptions
|= ((_flags
& kIOMemoryBufferCacheMask
) >> kIOMemoryBufferCacheShift
);
3553 IOMemoryMapTracking(IOTrackingUser
* tracking
, task_t
* task
,
3554 mach_vm_address_t
* address
, mach_vm_size_t
* size
)
3556 #define iomap_offsetof(type, field) ((size_t)(&((type *)0)->field))
3558 IOMemoryMap
* map
= (typeof(map
)) (((uintptr_t) tracking
) - iomap_offsetof(IOMemoryMap
, fTracking
));
3560 if (!map
->fAddressMap
|| (map
->fAddressMap
!= get_task_map(map
->fAddressTask
))) return (kIOReturnNotReady
);
3562 *task
= map
->fAddressTask
;
3563 *address
= map
->fAddress
;
3564 *size
= map
->fLength
;
3566 return (kIOReturnSuccess
);
3568 #endif /* IOTRACKING */
3570 IOReturn
IOGeneralMemoryDescriptor::doUnmap(
3571 vm_map_t addressMap
,
3572 IOVirtualAddress __address
,
3573 IOByteCount __length
)
3575 return (super::doUnmap(addressMap
, __address
, __length
));
3578 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3581 #define super OSObject
3583 OSDefineMetaClassAndStructors( IOMemoryMap
, OSObject
)
3585 OSMetaClassDefineReservedUnused(IOMemoryMap
, 0);
3586 OSMetaClassDefineReservedUnused(IOMemoryMap
, 1);
3587 OSMetaClassDefineReservedUnused(IOMemoryMap
, 2);
3588 OSMetaClassDefineReservedUnused(IOMemoryMap
, 3);
3589 OSMetaClassDefineReservedUnused(IOMemoryMap
, 4);
3590 OSMetaClassDefineReservedUnused(IOMemoryMap
, 5);
3591 OSMetaClassDefineReservedUnused(IOMemoryMap
, 6);
3592 OSMetaClassDefineReservedUnused(IOMemoryMap
, 7);
3594 /* ex-inline function implementation */
3595 IOPhysicalAddress
IOMemoryMap::getPhysicalAddress()
3596 { return( getPhysicalSegment( 0, 0 )); }
3598 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3600 bool IOMemoryMap::init(
3602 mach_vm_address_t toAddress
,
3603 IOOptionBits _options
,
3604 mach_vm_size_t _offset
,
3605 mach_vm_size_t _length
)
3613 fAddressMap
= get_task_map(intoTask
);
3616 vm_map_reference(fAddressMap
);
3618 fAddressTask
= intoTask
;
3619 fOptions
= _options
;
3622 fAddress
= toAddress
;
3627 bool IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor
* _memory
, mach_vm_size_t _offset
)
3634 if( (_offset
+ fLength
) > _memory
->getLength())
3642 if (fMemory
!= _memory
)
3643 fMemory
->removeMapping(this);
3651 IOReturn
IOMemoryDescriptor::doMap(
3652 vm_map_t __addressMap
,
3653 IOVirtualAddress
* __address
,
3654 IOOptionBits options
,
3655 IOByteCount __offset
,
3656 IOByteCount __length
)
3658 return (kIOReturnUnsupported
);
3661 IOReturn
IOMemoryDescriptor::handleFault(
3663 mach_vm_size_t sourceOffset
,
3664 mach_vm_size_t length
)
3666 if( kIOMemoryRedirected
& _flags
)
3669 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset
);
3673 } while( kIOMemoryRedirected
& _flags
);
3675 return (kIOReturnSuccess
);
3678 IOReturn
IOMemoryDescriptor::populateDevicePager(
3680 vm_map_t addressMap
,
3681 mach_vm_address_t address
,
3682 mach_vm_size_t sourceOffset
,
3683 mach_vm_size_t length
,
3684 IOOptionBits options
)
3686 IOReturn err
= kIOReturnSuccess
;
3687 memory_object_t pager
= (memory_object_t
) _pager
;
3688 mach_vm_size_t size
;
3689 mach_vm_size_t bytes
;
3690 mach_vm_size_t page
;
3691 mach_vm_size_t pageOffset
;
3692 mach_vm_size_t pagerOffset
;
3693 IOPhysicalLength segLen
, chunk
;
3697 type
= _flags
& kIOMemoryTypeMask
;
3699 if (reserved
->dp
.pagerContig
)
3705 physAddr
= getPhysicalSegment( sourceOffset
, &segLen
, kIOMemoryMapperNone
);
3707 pageOffset
= physAddr
- trunc_page_64( physAddr
);
3708 pagerOffset
= sourceOffset
;
3710 size
= length
+ pageOffset
;
3711 physAddr
-= pageOffset
;
3713 segLen
+= pageOffset
;
3717 // in the middle of the loop only map whole pages
3718 if( segLen
>= bytes
) segLen
= bytes
;
3719 else if (segLen
!= trunc_page(segLen
)) err
= kIOReturnVMError
;
3720 if (physAddr
!= trunc_page_64(physAddr
)) err
= kIOReturnBadArgument
;
3722 if (kIOReturnSuccess
!= err
) break;
3724 #if DEBUG || DEVELOPMENT
3725 if ((kIOMemoryTypeUPL
!= type
)
3726 && pmap_has_managed_page(atop_64(physAddr
), atop_64(physAddr
+ segLen
- 1)))
3728 OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx", physAddr
, segLen
);
3730 #endif /* DEBUG || DEVELOPMENT */
3732 chunk
= (reserved
->dp
.pagerContig
? round_page(segLen
) : page_size
);
3734 (page
< segLen
) && (KERN_SUCCESS
== err
);
3737 err
= device_pager_populate_object(pager
, pagerOffset
,
3738 (ppnum_t
)(atop_64(physAddr
+ page
)), chunk
);
3739 pagerOffset
+= chunk
;
3742 assert (KERN_SUCCESS
== err
);
3745 // This call to vm_fault causes an early pmap level resolution
3746 // of the mappings created above for kernel mappings, since
3747 // faulting in later can't take place from interrupt level.
3748 if ((addressMap
== kernel_map
) && !(kIOMemoryRedirected
& _flags
))
3750 vm_fault(addressMap
,
3751 (vm_map_offset_t
)trunc_page_64(address
),
3752 VM_PROT_READ
|VM_PROT_WRITE
,
3753 FALSE
, THREAD_UNINT
, NULL
,
3754 (vm_map_offset_t
)0);
3757 sourceOffset
+= segLen
- pageOffset
;
3762 while (bytes
&& (physAddr
= getPhysicalSegment( sourceOffset
, &segLen
, kIOMemoryMapperNone
)));
3765 err
= kIOReturnBadArgument
;
3770 IOReturn
IOMemoryDescriptor::doUnmap(
3771 vm_map_t addressMap
,
3772 IOVirtualAddress __address
,
3773 IOByteCount __length
)
3776 IOMemoryMap
* mapping
;
3777 mach_vm_address_t address
;
3778 mach_vm_size_t length
;
3780 if (__length
) panic("doUnmap");
3782 mapping
= (IOMemoryMap
*) __address
;
3783 addressMap
= mapping
->fAddressMap
;
3784 address
= mapping
->fAddress
;
3785 length
= mapping
->fLength
;
3787 if (kIOMapOverwrite
& mapping
->fOptions
) err
= KERN_SUCCESS
;
3790 if ((addressMap
== kernel_map
) && (kIOMemoryBufferPageable
& _flags
))
3791 addressMap
= IOPageableMapForAddress( address
);
3793 if( kIOLogMapping
& gIOKitDebug
) IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
3794 addressMap
, address
, length
);
3796 err
= mach_vm_deallocate( addressMap
, address
, length
);
3800 IOTrackingRemoveUser(gIOMapTracking
, &mapping
->fTracking
);
3801 #endif /* IOTRACKING */
3806 IOReturn
IOMemoryDescriptor::redirect( task_t safeTask
, bool doRedirect
)
3808 IOReturn err
= kIOReturnSuccess
;
3809 IOMemoryMap
* mapping
= 0;
3815 _flags
|= kIOMemoryRedirected
;
3817 _flags
&= ~kIOMemoryRedirected
;
3820 if( (iter
= OSCollectionIterator::withCollection( _mappings
))) {
3822 memory_object_t pager
;
3825 pager
= (memory_object_t
) reserved
->dp
.devicePager
;
3827 pager
= MACH_PORT_NULL
;
3829 while( (mapping
= (IOMemoryMap
*) iter
->getNextObject()))
3831 mapping
->redirect( safeTask
, doRedirect
);
3832 if (!doRedirect
&& !safeTask
&& pager
&& (kernel_map
== mapping
->fAddressMap
))
3834 err
= populateDevicePager(pager
, mapping
->fAddressMap
, mapping
->fAddress
, mapping
->fOffset
, mapping
->fLength
, kIOMapDefaultCache
);
3850 // temporary binary compatibility
3851 IOSubMemoryDescriptor
* subMem
;
3852 if( (subMem
= OSDynamicCast( IOSubMemoryDescriptor
, this)))
3853 err
= subMem
->redirect( safeTask
, doRedirect
);
3855 err
= kIOReturnSuccess
;
3856 #endif /* !__LP64__ */
3861 IOReturn
IOMemoryMap::redirect( task_t safeTask
, bool doRedirect
)
3863 IOReturn err
= kIOReturnSuccess
;
3866 // err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
3878 if ((!safeTask
|| (get_task_map(safeTask
) != fAddressMap
))
3879 && (0 == (fOptions
& kIOMapStatic
)))
3881 IOUnmapPages( fAddressMap
, fAddress
, fLength
);
3882 err
= kIOReturnSuccess
;
3884 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect
, this, fAddress
, fLength
, fAddressMap
);
3887 else if (kIOMapWriteCombineCache
== (fOptions
& kIOMapCacheMask
))
3889 IOOptionBits newMode
;
3890 newMode
= (fOptions
& ~kIOMapCacheMask
) | (doRedirect
? kIOMapInhibitCache
: kIOMapWriteCombineCache
);
3891 IOProtectCacheMode(fAddressMap
, fAddress
, fLength
, newMode
);
3898 if ((((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
3899 || ((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
))
3901 && (doRedirect
!= (0 != (fMemory
->_flags
& kIOMemoryRedirected
))))
3902 fMemory
->redirect(safeTask
, doRedirect
);
3907 IOReturn
IOMemoryMap::unmap( void )
3913 if( fAddress
&& fAddressMap
&& (0 == fSuperMap
) && fMemory
3914 && (0 == (kIOMapStatic
& fOptions
))) {
3916 err
= fMemory
->doUnmap(fAddressMap
, (IOVirtualAddress
) this, 0);
3919 err
= kIOReturnSuccess
;
3923 vm_map_deallocate(fAddressMap
);
3934 void IOMemoryMap::taskDied( void )
3937 if (fUserClientUnmap
) unmap();
3939 else IOTrackingRemoveUser(gIOMapTracking
, &fTracking
);
3940 #endif /* IOTRACKING */
3943 vm_map_deallocate(fAddressMap
);
3951 IOReturn
IOMemoryMap::userClientUnmap( void )
3953 fUserClientUnmap
= true;
3954 return (kIOReturnSuccess
);
3957 // Overload the release mechanism. All mappings must be a member
3958 // of a memory descriptors _mappings set. This means that we
3959 // always have 2 references on a mapping. When either of these mappings
3960 // are released we need to free ourselves.
3961 void IOMemoryMap::taggedRelease(const void *tag
) const
3964 super::taggedRelease(tag
, 2);
3968 void IOMemoryMap::free()
3975 fMemory
->removeMapping(this);
3980 if (fOwner
&& (fOwner
!= fMemory
))
3983 fOwner
->removeMapping(this);
3988 fSuperMap
->release();
3991 upl_commit(fRedirUPL
, NULL
, 0);
3992 upl_deallocate(fRedirUPL
);
3998 IOByteCount
IOMemoryMap::getLength()
4003 IOVirtualAddress
IOMemoryMap::getVirtualAddress()
4007 fSuperMap
->getVirtualAddress();
4008 else if (fAddressMap
4009 && vm_map_is_64bit(fAddressMap
)
4010 && (sizeof(IOVirtualAddress
) < 8))
4012 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress
);
4014 #endif /* !__LP64__ */
4020 mach_vm_address_t
IOMemoryMap::getAddress()
4025 mach_vm_size_t
IOMemoryMap::getSize()
4029 #endif /* !__LP64__ */
4032 task_t
IOMemoryMap::getAddressTask()
4035 return( fSuperMap
->getAddressTask());
4037 return( fAddressTask
);
4040 IOOptionBits
IOMemoryMap::getMapOptions()
4045 IOMemoryDescriptor
* IOMemoryMap::getMemoryDescriptor()
4050 IOMemoryMap
* IOMemoryMap::copyCompatible(
4051 IOMemoryMap
* newMapping
)
4053 task_t task
= newMapping
->getAddressTask();
4054 mach_vm_address_t toAddress
= newMapping
->fAddress
;
4055 IOOptionBits _options
= newMapping
->fOptions
;
4056 mach_vm_size_t _offset
= newMapping
->fOffset
;
4057 mach_vm_size_t _length
= newMapping
->fLength
;
4059 if( (!task
) || (!fAddressMap
) || (fAddressMap
!= get_task_map(task
)))
4061 if( (fOptions
^ _options
) & kIOMapReadOnly
)
4063 if( (kIOMapDefaultCache
!= (_options
& kIOMapCacheMask
))
4064 && ((fOptions
^ _options
) & kIOMapCacheMask
))
4067 if( (0 == (_options
& kIOMapAnywhere
)) && (fAddress
!= toAddress
))
4070 if( _offset
< fOffset
)
4075 if( (_offset
+ _length
) > fLength
)
4079 if( (fLength
== _length
) && (!_offset
))
4085 newMapping
->fSuperMap
= this;
4086 newMapping
->fOffset
= fOffset
+ _offset
;
4087 newMapping
->fAddress
= fAddress
+ _offset
;
4090 return( newMapping
);
4093 IOReturn
IOMemoryMap::wireRange(
4095 mach_vm_size_t offset
,
4096 mach_vm_size_t length
)
4099 mach_vm_address_t start
= trunc_page_64(fAddress
+ offset
);
4100 mach_vm_address_t end
= round_page_64(fAddress
+ offset
+ length
);
4103 prot
= (kIODirectionOutIn
& options
);
4106 prot
|= VM_PROT_MEMORY_TAG_MAKE(fMemory
->getVMTag(kernel_map
));
4107 kr
= vm_map_wire(fAddressMap
, start
, end
, prot
, FALSE
);
4111 kr
= vm_map_unwire(fAddressMap
, start
, end
, FALSE
);
4120 IOMemoryMap::getPhysicalSegment( IOByteCount _offset
, IOPhysicalLength
* _length
, IOOptionBits _options
)
4121 #else /* !__LP64__ */
4122 IOMemoryMap::getPhysicalSegment( IOByteCount _offset
, IOPhysicalLength
* _length
)
4123 #endif /* !__LP64__ */
4125 IOPhysicalAddress address
;
4129 address
= fMemory
->getPhysicalSegment( fOffset
+ _offset
, _length
, _options
);
4130 #else /* !__LP64__ */
4131 address
= fMemory
->getPhysicalSegment( fOffset
+ _offset
, _length
);
4132 #endif /* !__LP64__ */
4138 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4141 #define super OSObject
4143 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4145 void IOMemoryDescriptor::initialize( void )
4147 if( 0 == gIOMemoryLock
)
4148 gIOMemoryLock
= IORecursiveLockAlloc();
4150 gIOLastPage
= IOGetLastPageNumber();
4153 void IOMemoryDescriptor::free( void )
4155 if( _mappings
) _mappings
->release();
4159 IODelete(reserved
, IOMemoryDescriptorReserved
, 1);
4165 IOMemoryMap
* IOMemoryDescriptor::setMapping(
4167 IOVirtualAddress mapAddress
,
4168 IOOptionBits options
)
4170 return (createMappingInTask( intoTask
, mapAddress
,
4171 options
| kIOMapStatic
,
4175 IOMemoryMap
* IOMemoryDescriptor::map(
4176 IOOptionBits options
)
4178 return (createMappingInTask( kernel_task
, 0,
4179 options
| kIOMapAnywhere
,
4184 IOMemoryMap
* IOMemoryDescriptor::map(
4186 IOVirtualAddress atAddress
,
4187 IOOptionBits options
,
4189 IOByteCount length
)
4191 if ((!(kIOMapAnywhere
& options
)) && vm_map_is_64bit(get_task_map(intoTask
)))
4193 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
4197 return (createMappingInTask(intoTask
, atAddress
,
4198 options
, offset
, length
));
4200 #endif /* !__LP64__ */
4202 IOMemoryMap
* IOMemoryDescriptor::createMappingInTask(
4204 mach_vm_address_t atAddress
,
4205 IOOptionBits options
,
4206 mach_vm_size_t offset
,
4207 mach_vm_size_t length
)
4209 IOMemoryMap
* result
;
4210 IOMemoryMap
* mapping
;
4213 length
= getLength();
4215 mapping
= new IOMemoryMap
;
4218 && !mapping
->init( intoTask
, atAddress
,
4219 options
, offset
, length
)) {
4225 result
= makeMapping(this, intoTask
, (IOVirtualAddress
) mapping
, options
| kIOMap64Bit
, 0, 0);
4231 IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
4232 this, atAddress
, (uint32_t) options
, offset
, length
);
4238 #ifndef __LP64__ // there is only a 64 bit version for LP64
4239 IOReturn
IOMemoryMap::redirect(IOMemoryDescriptor
* newBackingMemory
,
4240 IOOptionBits options
,
4243 return (redirect(newBackingMemory
, options
, (mach_vm_size_t
)offset
));
4247 IOReturn
IOMemoryMap::redirect(IOMemoryDescriptor
* newBackingMemory
,
4248 IOOptionBits options
,
4249 mach_vm_size_t offset
)
4251 IOReturn err
= kIOReturnSuccess
;
4252 IOMemoryDescriptor
* physMem
= 0;
4256 if (fAddress
&& fAddressMap
) do
4258 if (((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
4259 || ((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
))
4265 if (!fRedirUPL
&& fMemory
->_memRef
&& (1 == fMemory
->_memRef
->count
))
4267 upl_size_t size
= round_page(fLength
);
4268 upl_control_flags_t flags
= UPL_COPYOUT_FROM
| UPL_SET_INTERNAL
4269 | UPL_SET_LITE
| UPL_SET_IO_WIRE
| UPL_BLOCK_ACCESS
4270 | UPL_MEMORY_TAG_MAKE(fMemory
->getVMTag(kernel_map
));
4271 if (KERN_SUCCESS
!= memory_object_iopl_request(fMemory
->_memRef
->entries
[0].entry
, 0, &size
, &fRedirUPL
,
4278 IOUnmapPages( fAddressMap
, fAddress
, fLength
);
4280 physMem
->redirect(0, true);
4284 if (newBackingMemory
)
4286 if (newBackingMemory
!= fMemory
)
4289 if (this != newBackingMemory
->makeMapping(newBackingMemory
, fAddressTask
, (IOVirtualAddress
) this,
4290 options
| kIOMapUnique
| kIOMapReference
| kIOMap64Bit
,
4292 err
= kIOReturnError
;
4296 upl_commit(fRedirUPL
, NULL
, 0);
4297 upl_deallocate(fRedirUPL
);
4300 if ((false) && physMem
)
4301 physMem
->redirect(0, false);
4314 IOMemoryMap
* IOMemoryDescriptor::makeMapping(
4315 IOMemoryDescriptor
* owner
,
4317 IOVirtualAddress __address
,
4318 IOOptionBits options
,
4319 IOByteCount __offset
,
4320 IOByteCount __length
)
4323 if (!(kIOMap64Bit
& options
)) panic("IOMemoryDescriptor::makeMapping !64bit");
4324 #endif /* !__LP64__ */
4326 IOMemoryDescriptor
* mapDesc
= 0;
4327 IOMemoryMap
* result
= 0;
4330 IOMemoryMap
* mapping
= (IOMemoryMap
*) __address
;
4331 mach_vm_size_t offset
= mapping
->fOffset
+ __offset
;
4332 mach_vm_size_t length
= mapping
->fLength
;
4334 mapping
->fOffset
= offset
;
4340 if (kIOMapStatic
& options
)
4343 addMapping(mapping
);
4344 mapping
->setMemoryDescriptor(this, 0);
4348 if (kIOMapUnique
& options
)
4351 IOByteCount physLen
;
4353 // if (owner != this) continue;
4355 if (((_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
4356 || ((_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
))
4358 phys
= getPhysicalSegment(offset
, &physLen
, kIOMemoryMapperNone
);
4359 if (!phys
|| (physLen
< length
))
4362 mapDesc
= IOMemoryDescriptor::withAddressRange(
4363 phys
, length
, getDirection() | kIOMemoryMapperNone
, NULL
);
4367 mapping
->fOffset
= offset
;
4372 // look for a compatible existing mapping
4373 if( (iter
= OSCollectionIterator::withCollection(_mappings
)))
4375 IOMemoryMap
* lookMapping
;
4376 while ((lookMapping
= (IOMemoryMap
*) iter
->getNextObject()))
4378 if ((result
= lookMapping
->copyCompatible(mapping
)))
4381 result
->setMemoryDescriptor(this, offset
);
4387 if (result
|| (options
& kIOMapReference
))
4389 if (result
!= mapping
)
4404 kr
= mapDesc
->doMap( 0, (IOVirtualAddress
*) &mapping
, options
, 0, 0 );
4405 if (kIOReturnSuccess
== kr
)
4408 mapDesc
->addMapping(result
);
4409 result
->setMemoryDescriptor(mapDesc
, offset
);
4427 void IOMemoryDescriptor::addMapping(
4428 IOMemoryMap
* mapping
)
4433 _mappings
= OSSet::withCapacity(1);
4435 _mappings
->setObject( mapping
);
4439 void IOMemoryDescriptor::removeMapping(
4440 IOMemoryMap
* mapping
)
4443 _mappings
->removeObject( mapping
);
4447 // obsolete initializers
4448 // - initWithOptions is the designated initializer
4450 IOMemoryDescriptor::initWithAddress(void * address
,
4452 IODirection direction
)
4458 IOMemoryDescriptor::initWithAddress(IOVirtualAddress address
,
4460 IODirection direction
,
4467 IOMemoryDescriptor::initWithPhysicalAddress(
4468 IOPhysicalAddress address
,
4470 IODirection direction
)
4476 IOMemoryDescriptor::initWithRanges(
4477 IOVirtualRange
* ranges
,
4479 IODirection direction
,
4487 IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange
* ranges
,
4489 IODirection direction
,
4495 void * IOMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
4496 IOByteCount
* lengthOfSegment
)
4500 #endif /* !__LP64__ */
4502 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4504 bool IOGeneralMemoryDescriptor::serialize(OSSerialize
* s
) const
4506 OSSymbol
const *keys
[2];
4507 OSObject
*values
[2];
4511 user_addr_t address
;
4514 unsigned int index
, nRanges
;
4517 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
4519 if (s
== NULL
) return false;
4521 array
= OSArray::withCapacity(4);
4522 if (!array
) return (false);
4524 nRanges
= _rangesCount
;
4525 vcopy
= (SerData
*) IOMalloc(sizeof(SerData
) * nRanges
);
4526 if (vcopy
== 0) return false;
4528 keys
[0] = OSSymbol::withCString("address");
4529 keys
[1] = OSSymbol::withCString("length");
4532 values
[0] = values
[1] = 0;
4534 // From this point on we can go to bail.
4536 // Copy the volatile data so we don't have to allocate memory
4537 // while the lock is held.
4539 if (nRanges
== _rangesCount
) {
4540 Ranges vec
= _ranges
;
4541 for (index
= 0; index
< nRanges
; index
++) {
4542 mach_vm_address_t addr
; mach_vm_size_t len
;
4543 getAddrLenForInd(addr
, len
, type
, vec
, index
);
4544 vcopy
[index
].address
= addr
;
4545 vcopy
[index
].length
= len
;
4548 // The descriptor changed out from under us. Give up.
4555 for (index
= 0; index
< nRanges
; index
++)
4557 user_addr_t addr
= vcopy
[index
].address
;
4558 IOByteCount len
= (IOByteCount
) vcopy
[index
].length
;
4559 values
[0] = OSNumber::withNumber(addr
, sizeof(addr
) * 8);
4560 if (values
[0] == 0) {
4564 values
[1] = OSNumber::withNumber(len
, sizeof(len
) * 8);
4565 if (values
[1] == 0) {
4569 OSDictionary
*dict
= OSDictionary::withObjects((const OSObject
**)values
, (const OSSymbol
**)keys
, 2);
4574 array
->setObject(dict
);
4576 values
[0]->release();
4577 values
[1]->release();
4578 values
[0] = values
[1] = 0;
4581 result
= array
->serialize(s
);
4587 values
[0]->release();
4589 values
[1]->release();
4595 IOFree(vcopy
, sizeof(SerData
) * nRanges
);
4600 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4602 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 0);
4604 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 1);
4605 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 2);
4606 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 3);
4607 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 4);
4608 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 5);
4609 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 6);
4610 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 7);
4611 #else /* !__LP64__ */
4612 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 1);
4613 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 2);
4614 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 3);
4615 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 4);
4616 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 5);
4617 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 6);
4618 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 7);
4619 #endif /* !__LP64__ */
4620 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 8);
4621 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 9);
4622 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 10);
4623 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 11);
4624 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 12);
4625 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 13);
4626 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 14);
4627 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 15);
4629 /* ex-inline function implementation */
4631 IOMemoryDescriptor::getPhysicalAddress()
4632 { return( getPhysicalSegment( 0, 0 )); }