2 * Copyright (c) 1998-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #define IOKIT_ENABLE_SHARED_PTR
30 #include <sys/cdefs.h>
32 #include <IOKit/assert.h>
33 #include <IOKit/system.h>
34 #include <IOKit/IOLib.h>
35 #include <IOKit/IOMemoryDescriptor.h>
36 #include <IOKit/IOMapper.h>
37 #include <IOKit/IODMACommand.h>
38 #include <IOKit/IOKitKeysPrivate.h>
40 #include <IOKit/IOSubMemoryDescriptor.h>
41 #include <IOKit/IOMultiMemoryDescriptor.h>
43 #include <IOKit/IOKitDebug.h>
44 #include <IOKit/IOTimeStamp.h>
45 #include <libkern/OSDebug.h>
46 #include <libkern/OSKextLibPrivate.h>
48 #include "IOKitKernelInternal.h"
50 #include <libkern/c++/OSContainers.h>
51 #include <libkern/c++/OSDictionary.h>
52 #include <libkern/c++/OSArray.h>
53 #include <libkern/c++/OSSymbol.h>
54 #include <libkern/c++/OSNumber.h>
55 #include <os/overflow.h>
56 #include <os/cpp_util.h>
57 #include <os/base_private.h>
63 #include <vm/vm_pageout.h>
64 #include <mach/memory_object_types.h>
65 #include <device/device_port.h>
67 #include <mach/vm_prot.h>
68 #include <mach/mach_vm.h>
69 #include <mach/memory_entry.h>
70 #include <vm/vm_fault.h>
71 #include <vm/vm_protos.h>
73 extern ppnum_t
pmap_find_phys(pmap_t pmap
, addr64_t va
);
74 extern void ipc_port_release_send(ipc_port_t port
);
78 #define kIOMapperWaitSystem ((IOMapper *) 1)
80 static IOMapper
* gIOSystemMapper
= NULL
;
84 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
86 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor
, OSObject
)
88 #define super IOMemoryDescriptor
90 OSDefineMetaClassAndStructorsWithZone(IOGeneralMemoryDescriptor
,
91 IOMemoryDescriptor
, ZC_ZFREE_CLEARMEM
)
93 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
95 static IORecursiveLock
* gIOMemoryLock
;
97 #define LOCK IORecursiveLockLock( gIOMemoryLock)
98 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
99 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
101 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
104 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
106 #define DEBG(fmt, args...) {}
109 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
111 // Some data structures and accessor macros used by the initWithOptions
114 enum ioPLBlockFlags
{
115 kIOPLOnDevice
= 0x00000001,
116 kIOPLExternUPL
= 0x00000002,
119 struct IOMDPersistentInitData
{
120 const IOGeneralMemoryDescriptor
* fMD
;
121 IOMemoryReference
* fMemRef
;
126 vm_address_t fPageInfo
; // Pointer to page list or index into it
127 uint64_t fIOMDOffset
; // The offset of this iopl in descriptor
128 ppnum_t fMappedPage
; // Page number of first page in this iopl
129 unsigned int fPageOffset
; // Offset within first page of iopl
130 unsigned int fFlags
; // Flags
133 enum { kMaxWireTags
= 6 };
137 uint64_t fDMAMapAlignment
;
138 uint64_t fMappedBase
;
139 uint64_t fMappedLength
;
140 uint64_t fPreparationID
;
142 IOTracking fWireTracking
;
143 #endif /* IOTRACKING */
144 unsigned int fPageCnt
;
145 uint8_t fDMAMapNumAddressBits
;
146 unsigned char fCompletionError
:1;
147 unsigned char fMappedBaseValid
:1;
148 unsigned char _resv
:4;
149 unsigned char fDMAAccess
:2;
151 /* variable length arrays */
152 upl_page_info_t fPageList
[1]
154 // align fPageList as for ioPLBlock
155 __attribute__((aligned(sizeof(upl_t
))))
158 //ioPLBlock fBlocks[1];
161 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
162 #define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
163 #define getNumIOPL(osd, d) \
164 ((UInt)(((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock)))
165 #define getPageList(d) (&(d->fPageList[0]))
166 #define computeDataSize(p, u) \
167 (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
169 enum { kIOMemoryHostOrRemote
= kIOMemoryHostOnly
| kIOMemoryRemote
};
171 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
176 uintptr_t device_handle
,
177 ipc_port_t device_pager
,
178 vm_prot_t protection
,
179 vm_object_offset_t offset
,
183 IOMemoryDescriptorReserved
* ref
= (IOMemoryDescriptorReserved
*) device_handle
;
184 OSSharedPtr
<IOMemoryDescriptor
> memDesc
;
187 if (ref
->dp
.memory
) {
188 memDesc
.reset(ref
->dp
.memory
, OSRetain
);
189 kr
= memDesc
->handleFault(device_pager
, offset
, size
);
201 uintptr_t device_handle
)
203 IOMemoryDescriptorReserved
* ref
= (IOMemoryDescriptorReserved
*) device_handle
;
205 IODelete( ref
, IOMemoryDescriptorReserved
, 1 );
207 return kIOReturnSuccess
;
211 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
213 // Note this inline function uses C++ reference arguments to return values
214 // This means that pointers are not passed and NULLs don't have to be
215 // checked for as a NULL reference is illegal.
217 getAddrLenForInd(mach_vm_address_t
&addr
, mach_vm_size_t
&len
, // Output variables
218 UInt32 type
, IOGeneralMemoryDescriptor::Ranges r
, UInt32 ind
)
220 assert(kIOMemoryTypeUIO
== type
221 || kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
222 || kIOMemoryTypePhysical
== type
|| kIOMemoryTypePhysical64
== type
);
223 if (kIOMemoryTypeUIO
== type
) {
226 uio_getiov((uio_t
) r
.uio
, ind
, &ad
, &us
); addr
= ad
; len
= us
;
229 else if ((kIOMemoryTypeVirtual64
== type
) || (kIOMemoryTypePhysical64
== type
)) {
230 IOAddressRange cur
= r
.v64
[ind
];
234 #endif /* !__LP64__ */
236 IOVirtualRange cur
= r
.v
[ind
];
242 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
245 purgeableControlBits(IOOptionBits newState
, vm_purgable_t
* control
, int * state
)
247 IOReturn err
= kIOReturnSuccess
;
249 *control
= VM_PURGABLE_SET_STATE
;
251 enum { kIOMemoryPurgeableControlMask
= 15 };
253 switch (kIOMemoryPurgeableControlMask
& newState
) {
254 case kIOMemoryPurgeableKeepCurrent
:
255 *control
= VM_PURGABLE_GET_STATE
;
258 case kIOMemoryPurgeableNonVolatile
:
259 *state
= VM_PURGABLE_NONVOLATILE
;
261 case kIOMemoryPurgeableVolatile
:
262 *state
= VM_PURGABLE_VOLATILE
| (newState
& ~kIOMemoryPurgeableControlMask
);
264 case kIOMemoryPurgeableEmpty
:
265 *state
= VM_PURGABLE_EMPTY
| (newState
& ~kIOMemoryPurgeableControlMask
);
268 err
= kIOReturnBadArgument
;
272 if (*control
== VM_PURGABLE_SET_STATE
) {
273 // let VM know this call is from the kernel and is allowed to alter
274 // the volatility of the memory entry even if it was created with
275 // MAP_MEM_PURGABLE_KERNEL_ONLY
276 *control
= VM_PURGABLE_SET_STATE_FROM_KERNEL
;
283 purgeableStateBits(int * state
)
285 IOReturn err
= kIOReturnSuccess
;
287 switch (VM_PURGABLE_STATE_MASK
& *state
) {
288 case VM_PURGABLE_NONVOLATILE
:
289 *state
= kIOMemoryPurgeableNonVolatile
;
291 case VM_PURGABLE_VOLATILE
:
292 *state
= kIOMemoryPurgeableVolatile
;
294 case VM_PURGABLE_EMPTY
:
295 *state
= kIOMemoryPurgeableEmpty
;
298 *state
= kIOMemoryPurgeableNonVolatile
;
299 err
= kIOReturnNotReady
;
307 unsigned int object_type
;
308 } iokit_memtype_entry
;
310 static const iokit_memtype_entry iomd_mem_types
[] = {
311 [kIODefaultCache
] = {VM_WIMG_DEFAULT
, MAP_MEM_NOOP
},
312 [kIOInhibitCache
] = {VM_WIMG_IO
, MAP_MEM_IO
},
313 [kIOWriteThruCache
] = {VM_WIMG_WTHRU
, MAP_MEM_WTHRU
},
314 [kIOWriteCombineCache
] = {VM_WIMG_WCOMB
, MAP_MEM_WCOMB
},
315 [kIOCopybackCache
] = {VM_WIMG_COPYBACK
, MAP_MEM_COPYBACK
},
316 [kIOCopybackInnerCache
] = {VM_WIMG_INNERWBACK
, MAP_MEM_INNERWBACK
},
317 [kIOPostedWrite
] = {VM_WIMG_POSTED
, MAP_MEM_POSTED
},
318 [kIORealTimeCache
] = {VM_WIMG_RT
, MAP_MEM_RT
},
319 [kIOPostedReordered
] = {VM_WIMG_POSTED_REORDERED
, MAP_MEM_POSTED_REORDERED
},
320 [kIOPostedCombinedReordered
] = {VM_WIMG_POSTED_COMBINED_REORDERED
, MAP_MEM_POSTED_COMBINED_REORDERED
},
324 vmProtForCacheMode(IOOptionBits cacheMode
)
326 assert(cacheMode
< (sizeof(iomd_mem_types
) / sizeof(iomd_mem_types
[0])));
328 SET_MAP_MEM(iomd_mem_types
[cacheMode
].object_type
, prot
);
333 pagerFlagsForCacheMode(IOOptionBits cacheMode
)
335 assert(cacheMode
< (sizeof(iomd_mem_types
) / sizeof(iomd_mem_types
[0])));
336 if (cacheMode
== kIODefaultCache
) {
339 return iomd_mem_types
[cacheMode
].wimg
;
343 cacheModeForPagerFlags(unsigned int pagerFlags
)
345 pagerFlags
&= VM_WIMG_MASK
;
346 IOOptionBits cacheMode
= kIODefaultCache
;
347 for (IOOptionBits i
= 0; i
< (sizeof(iomd_mem_types
) / sizeof(iomd_mem_types
[0])); ++i
) {
348 if (iomd_mem_types
[i
].wimg
== pagerFlags
) {
353 return (cacheMode
== kIODefaultCache
) ? kIOCopybackCache
: cacheMode
;
356 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
357 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
359 struct IOMemoryEntry
{
366 struct IOMemoryReference
{
367 volatile SInt32 refCount
;
371 struct IOMemoryReference
* mapRef
;
372 IOMemoryEntry entries
[0];
376 kIOMemoryReferenceReuse
= 0x00000001,
377 kIOMemoryReferenceWrite
= 0x00000002,
378 kIOMemoryReferenceCOW
= 0x00000004,
381 SInt32 gIOMemoryReferenceCount
;
384 IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity
, IOMemoryReference
* realloc
)
386 IOMemoryReference
* ref
;
387 size_t newSize
, oldSize
, copySize
;
389 newSize
= (sizeof(IOMemoryReference
)
390 - sizeof(ref
->entries
)
391 + capacity
* sizeof(ref
->entries
[0]));
392 ref
= (typeof(ref
))IOMalloc(newSize
);
394 oldSize
= (sizeof(IOMemoryReference
)
395 - sizeof(realloc
->entries
)
396 + realloc
->capacity
* sizeof(realloc
->entries
[0]));
398 if (copySize
> newSize
) {
402 bcopy(realloc
, ref
, copySize
);
404 IOFree(realloc
, oldSize
);
406 bzero(ref
, sizeof(*ref
));
408 OSIncrementAtomic(&gIOMemoryReferenceCount
);
413 ref
->capacity
= capacity
;
418 IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference
* ref
)
420 IOMemoryEntry
* entries
;
424 memoryReferenceFree(ref
->mapRef
);
428 entries
= ref
->entries
+ ref
->count
;
429 while (entries
> &ref
->entries
[0]) {
431 ipc_port_release_send(entries
->entry
);
433 size
= (sizeof(IOMemoryReference
)
434 - sizeof(ref
->entries
)
435 + ref
->capacity
* sizeof(ref
->entries
[0]));
438 OSDecrementAtomic(&gIOMemoryReferenceCount
);
442 IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference
* ref
)
444 if (1 == OSDecrementAtomic(&ref
->refCount
)) {
445 memoryReferenceFree(ref
);
451 IOGeneralMemoryDescriptor::memoryReferenceCreate(
452 IOOptionBits options
,
453 IOMemoryReference
** reference
)
455 enum { kCapacity
= 4, kCapacityInc
= 4 };
458 IOMemoryReference
* ref
;
459 IOMemoryEntry
* entries
;
460 IOMemoryEntry
* cloneEntries
;
462 ipc_port_t entry
, cloneEntry
;
464 memory_object_size_t actualSize
;
467 mach_vm_address_t entryAddr
, endAddr
, entrySize
;
468 mach_vm_size_t srcAddr
, srcLen
;
469 mach_vm_size_t nextAddr
, nextLen
;
470 mach_vm_size_t offset
, remain
;
471 vm_map_offset_t overmap_start
= 0, overmap_end
= 0;
472 int misaligned_start
= 0, misaligned_end
= 0;
474 IOOptionBits type
= (_flags
& kIOMemoryTypeMask
);
475 IOOptionBits cacheMode
;
476 unsigned int pagerFlags
;
478 vm_named_entry_kernel_flags_t vmne_kflags
;
480 ref
= memoryReferenceAlloc(kCapacity
, NULL
);
482 return kIOReturnNoMemory
;
485 tag
= (vm_tag_t
) getVMTag(kernel_map
);
486 vmne_kflags
= VM_NAMED_ENTRY_KERNEL_FLAGS_NONE
;
487 entries
= &ref
->entries
[0];
494 getAddrLenForInd(nextAddr
, nextLen
, type
, _ranges
, rangeIdx
);
496 nextAddr
= getPhysicalSegment(offset
, &physLen
, kIOMemoryMapperNone
);
499 // default cache mode for physical
500 if (kIODefaultCache
== ((_flags
& kIOMemoryBufferCacheMask
) >> kIOMemoryBufferCacheShift
)) {
501 IOOptionBits mode
= cacheModeForPagerFlags(IODefaultCacheBits(nextAddr
));
502 _flags
|= (mode
<< kIOMemoryBufferCacheShift
);
506 // cache mode & vm_prot
508 cacheMode
= ((_flags
& kIOMemoryBufferCacheMask
) >> kIOMemoryBufferCacheShift
);
509 prot
|= vmProtForCacheMode(cacheMode
);
510 // VM system requires write access to change cache mode
511 if (kIODefaultCache
!= cacheMode
) {
512 prot
|= VM_PROT_WRITE
;
514 if (kIODirectionOut
!= (kIODirectionOutIn
& _flags
)) {
515 prot
|= VM_PROT_WRITE
;
517 if (kIOMemoryReferenceWrite
& options
) {
518 prot
|= VM_PROT_WRITE
;
520 if (kIOMemoryReferenceCOW
& options
) {
521 prot
|= MAP_MEM_VM_COPY
;
524 if (kIOMemoryUseReserve
& _flags
) {
525 prot
|= MAP_MEM_GRAB_SECLUDED
;
528 if ((kIOMemoryReferenceReuse
& options
) && _memRef
) {
529 cloneEntries
= &_memRef
->entries
[0];
530 prot
|= MAP_MEM_NAMED_REUSE
;
536 if (kIOMemoryBufferPageable
& _flags
) {
537 int ledger_tag
, ledger_no_footprint
;
539 // IOBufferMemoryDescriptor alloc - set flags for entry + object create
540 prot
|= MAP_MEM_NAMED_CREATE
;
542 // default accounting settings:
543 // + "none" ledger tag
544 // + include in footprint
545 // can be changed later with ::setOwnership()
546 ledger_tag
= VM_LEDGER_TAG_NONE
;
547 ledger_no_footprint
= 0;
549 if (kIOMemoryBufferPurgeable
& _flags
) {
550 prot
|= (MAP_MEM_PURGABLE
| MAP_MEM_PURGABLE_KERNEL_ONLY
);
551 if (VM_KERN_MEMORY_SKYWALK
== tag
) {
552 // Skywalk purgeable memory accounting:
553 // + "network" ledger tag
554 // + not included in footprint
555 ledger_tag
= VM_LEDGER_TAG_NETWORK
;
556 ledger_no_footprint
= 1;
558 // regular purgeable memory accounting:
560 // + included in footprint
561 ledger_tag
= VM_LEDGER_TAG_NONE
;
562 ledger_no_footprint
= 0;
565 vmne_kflags
.vmnekf_ledger_tag
= ledger_tag
;
566 vmne_kflags
.vmnekf_ledger_no_footprint
= ledger_no_footprint
;
567 if (kIOMemoryUseReserve
& _flags
) {
568 prot
|= MAP_MEM_GRAB_SECLUDED
;
571 prot
|= VM_PROT_WRITE
;
574 prot
|= MAP_MEM_USE_DATA_ADDR
;
575 map
= get_task_map(_task
);
577 DEBUG4K_IOKIT("map %p _length 0x%llx prot 0x%x\n", map
, (uint64_t)_length
, prot
);
585 // coalesce addr range
586 for (++rangeIdx
; rangeIdx
< _rangesCount
; rangeIdx
++) {
587 getAddrLenForInd(nextAddr
, nextLen
, type
, _ranges
, rangeIdx
);
588 if ((srcAddr
+ srcLen
) != nextAddr
) {
594 if (MAP_MEM_USE_DATA_ADDR
& prot
) {
596 endAddr
= srcAddr
+ srcLen
;
598 entryAddr
= trunc_page_64(srcAddr
);
599 endAddr
= round_page_64(srcAddr
+ srcLen
);
601 if (vm_map_page_mask(get_task_map(_task
)) < PAGE_MASK
) {
602 DEBUG4K_IOKIT("IOMemRef %p _flags 0x%x prot 0x%x _ranges[%d]: 0x%llx 0x%llx\n", ref
, (uint32_t)_flags
, prot
, rangeIdx
- 1, srcAddr
, srcLen
);
606 entrySize
= (endAddr
- entryAddr
);
610 actualSize
= entrySize
;
612 cloneEntry
= MACH_PORT_NULL
;
613 if (MAP_MEM_NAMED_REUSE
& prot
) {
614 if (cloneEntries
< &_memRef
->entries
[_memRef
->count
]) {
615 cloneEntry
= cloneEntries
->entry
;
617 prot
&= ~MAP_MEM_NAMED_REUSE
;
621 err
= mach_make_memory_entry_internal(map
,
622 &actualSize
, entryAddr
, prot
, vmne_kflags
, &entry
, cloneEntry
);
624 if (KERN_SUCCESS
!= err
) {
625 DEBUG4K_ERROR("make_memory_entry(map %p, addr 0x%llx, size 0x%llx, prot 0x%x) err 0x%x\n", map
, entryAddr
, actualSize
, prot
, err
);
628 if (MAP_MEM_USE_DATA_ADDR
& prot
) {
629 if (actualSize
> entrySize
) {
630 actualSize
= entrySize
;
632 } else if (actualSize
> entrySize
) {
633 panic("mach_make_memory_entry_64 actualSize");
636 memory_entry_check_for_adjustment(map
, entry
, &overmap_start
, &overmap_end
);
638 if (count
&& overmap_start
) {
640 * Track misaligned start for all
641 * except the first entry.
648 * Ignore misaligned end for the
651 if ((entryAddr
+ actualSize
) != endAddr
) {
658 if (misaligned_start
|| misaligned_end
) {
659 DEBUG4K_IOKIT("stopped at entryAddr 0x%llx\n", entryAddr
);
660 ipc_port_release_send(entry
);
661 err
= KERN_NOT_SUPPORTED
;
666 if (count
>= ref
->capacity
) {
667 ref
= memoryReferenceAlloc(ref
->capacity
+ kCapacityInc
, ref
);
668 entries
= &ref
->entries
[count
];
670 entries
->entry
= entry
;
671 entries
->size
= actualSize
;
672 entries
->offset
= offset
+ (entryAddr
- srcAddr
);
673 entries
->start
= entryAddr
;
674 entryAddr
+= actualSize
;
675 if (MAP_MEM_NAMED_REUSE
& prot
) {
676 if ((cloneEntries
->entry
== entries
->entry
)
677 && (cloneEntries
->size
== entries
->size
)
678 && (cloneEntries
->offset
== entries
->offset
)) {
681 prot
&= ~MAP_MEM_NAMED_REUSE
;
691 // _task == 0, physical or kIOMemoryTypeUPL
692 memory_object_t pager
;
693 vm_size_t size
= ptoa_64(_pages
);
695 if (!getKernelReserved()) {
696 panic("getKernelReserved");
699 reserved
->dp
.pagerContig
= (1 == _rangesCount
);
700 reserved
->dp
.memory
= this;
702 pagerFlags
= pagerFlagsForCacheMode(cacheMode
);
703 if (-1U == pagerFlags
) {
704 panic("phys is kIODefaultCache");
706 if (reserved
->dp
.pagerContig
) {
707 pagerFlags
|= DEVICE_PAGER_CONTIGUOUS
;
710 pager
= device_pager_setup((memory_object_t
) NULL
, (uintptr_t) reserved
,
714 DEBUG4K_ERROR("pager setup failed size 0x%llx flags 0x%x\n", (uint64_t)size
, pagerFlags
);
715 err
= kIOReturnVMError
;
718 entryAddr
= trunc_page_64(srcAddr
);
719 err
= mach_memory_object_memory_entry_64((host_t
) 1, false /*internal*/,
720 size
, VM_PROT_READ
| VM_PROT_WRITE
, pager
, &entry
);
721 assert(KERN_SUCCESS
== err
);
722 if (KERN_SUCCESS
!= err
) {
723 device_pager_deallocate(pager
);
725 reserved
->dp
.devicePager
= pager
;
726 entries
->entry
= entry
;
727 entries
->size
= size
;
728 entries
->offset
= offset
+ (entryAddr
- srcAddr
);
738 if (_task
&& (KERN_SUCCESS
== err
)
739 && (kIOMemoryMapCopyOnWrite
& _flags
)
740 && !(kIOMemoryReferenceCOW
& options
)) {
741 err
= memoryReferenceCreate(options
| kIOMemoryReferenceCOW
, &ref
->mapRef
);
742 if (KERN_SUCCESS
!= err
) {
743 DEBUG4K_ERROR("ref %p options 0x%x err 0x%x\n", ref
, (unsigned int)options
, err
);
747 if (KERN_SUCCESS
== err
) {
748 if (MAP_MEM_NAMED_REUSE
& prot
) {
749 memoryReferenceFree(ref
);
750 OSIncrementAtomic(&_memRef
->refCount
);
754 DEBUG4K_ERROR("ref %p err 0x%x\n", ref
, err
);
755 memoryReferenceFree(ref
);
765 IOMemoryDescriptorMapAlloc(vm_map_t map
, void * _ref
)
767 IOMemoryDescriptorMapAllocRef
* ref
= (typeof(ref
))_ref
;
769 vm_map_offset_t addr
;
773 err
= vm_map_enter_mem_object(map
, &addr
, ref
->size
,
774 #if __ARM_MIXED_PAGE_SIZE__
775 // TODO4K this should not be necessary...
776 (vm_map_offset_t
)((ref
->options
& kIOMapAnywhere
) ? max(PAGE_MASK
, vm_map_page_mask(map
)) : 0),
777 #else /* __ARM_MIXED_PAGE_SIZE__ */
779 #endif /* __ARM_MIXED_PAGE_SIZE__ */
780 (((ref
->options
& kIOMapAnywhere
)
783 VM_MAP_KERNEL_FLAGS_NONE
,
786 (memory_object_offset_t
) 0,
791 if (KERN_SUCCESS
== err
) {
792 ref
->mapped
= (mach_vm_address_t
) addr
;
800 IOGeneralMemoryDescriptor::memoryReferenceMap(
801 IOMemoryReference
* ref
,
803 mach_vm_size_t inoffset
,
805 IOOptionBits options
,
806 mach_vm_address_t
* inaddr
)
809 int64_t offset
= inoffset
;
810 uint32_t rangeIdx
, entryIdx
;
811 vm_map_offset_t addr
, mapAddr
;
812 vm_map_offset_t pageOffset
, entryOffset
, remain
, chunk
;
814 mach_vm_address_t nextAddr
;
815 mach_vm_size_t nextLen
;
817 IOMemoryEntry
* entry
;
818 vm_prot_t prot
, memEntryCacheMode
;
820 IOOptionBits cacheMode
;
822 // for the kIOMapPrefault option.
823 upl_page_info_t
* pageList
= NULL
;
824 UInt currentPageIndex
= 0;
827 DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref
, map
, inoffset
, size
, (uint32_t)options
, *inaddr
);
830 err
= memoryReferenceMap(ref
->mapRef
, map
, inoffset
, size
, options
, inaddr
);
834 if (MAP_MEM_USE_DATA_ADDR
& ref
->prot
) {
835 err
= memoryReferenceMapNew(ref
, map
, inoffset
, size
, options
, inaddr
);
839 type
= _flags
& kIOMemoryTypeMask
;
842 if (!(kIOMapReadOnly
& options
)) {
843 prot
|= VM_PROT_WRITE
;
847 cacheMode
= ((options
& kIOMapCacheMask
) >> kIOMapCacheShift
);
848 if (kIODefaultCache
!= cacheMode
) {
849 // VM system requires write access to update named entry cache mode
850 memEntryCacheMode
= (MAP_MEM_ONLY
| VM_PROT_WRITE
| prot
| vmProtForCacheMode(cacheMode
));
853 tag
= (typeof(tag
))getVMTag(map
);
856 // Find first range for offset
858 return kIOReturnBadArgument
;
860 for (remain
= offset
, rangeIdx
= 0; rangeIdx
< _rangesCount
; rangeIdx
++) {
861 getAddrLenForInd(nextAddr
, nextLen
, type
, _ranges
, rangeIdx
);
862 if (remain
< nextLen
) {
870 nextAddr
= getPhysicalSegment(offset
, &physLen
, kIOMemoryMapperNone
);
874 assert(remain
< nextLen
);
875 if (remain
>= nextLen
) {
876 DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx remain 0x%llx nextLen 0x%llx\n", map
, inoffset
, size
, (uint32_t)options
, *inaddr
, (uint64_t)remain
, nextLen
);
877 return kIOReturnBadArgument
;
882 #if __ARM_MIXED_PAGE_SIZE__
883 pageOffset
= (vm_map_page_mask(map
) & nextAddr
);
884 #else /* __ARM_MIXED_PAGE_SIZE__ */
885 pageOffset
= (page_mask
& nextAddr
);
886 #endif /* __ARM_MIXED_PAGE_SIZE__ */
890 if (!(options
& kIOMapAnywhere
)) {
892 if (pageOffset
!= (vm_map_page_mask(map
) & addr
)) {
893 DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx addr 0x%llx page_mask 0x%llx pageOffset 0x%llx\n", map
, inoffset
, size
, (uint32_t)options
, *inaddr
, (uint64_t)addr
, (uint64_t)page_mask
, (uint64_t)pageOffset
);
898 // find first entry for offset
900 (entryIdx
< ref
->count
) && (offset
>= ref
->entries
[entryIdx
].offset
);
904 entry
= &ref
->entries
[entryIdx
];
907 size
= round_page_64(size
+ pageOffset
);
908 if (kIOMapOverwrite
& options
) {
909 if ((map
== kernel_map
) && (kIOMemoryBufferPageable
& _flags
)) {
910 map
= IOPageableMapForAddress(addr
);
914 IOMemoryDescriptorMapAllocRef ref
;
917 ref
.options
= options
;
920 if (options
& kIOMapAnywhere
) {
921 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
926 if ((ref
.map
== kernel_map
) && (kIOMemoryBufferPageable
& _flags
)) {
927 err
= IOIteratePageableMaps( ref
.size
, &IOMemoryDescriptorMapAlloc
, &ref
);
929 err
= IOMemoryDescriptorMapAlloc(ref
.map
, &ref
);
931 if (KERN_SUCCESS
== err
) {
939 * If the memory is associated with a device pager but doesn't have a UPL,
940 * it will be immediately faulted in through the pager via populateDevicePager().
941 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
944 if ((reserved
!= NULL
) && (reserved
->dp
.devicePager
) && (_wireCount
!= 0)) {
945 options
&= ~kIOMapPrefault
;
949 * Prefaulting is only possible if we wired the memory earlier. Check the
950 * memory type, and the underlying data.
952 if (options
& kIOMapPrefault
) {
954 * The memory must have been wired by calling ::prepare(), otherwise
955 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
957 assert(_wireCount
!= 0);
958 assert(_memoryEntries
!= NULL
);
959 if ((_wireCount
== 0) ||
960 (_memoryEntries
== NULL
)) {
961 DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx\n", map
, inoffset
, size
, (uint32_t)options
, *inaddr
);
962 return kIOReturnBadArgument
;
965 // Get the page list.
966 ioGMDData
* dataP
= getDataP(_memoryEntries
);
967 ioPLBlock
const* ioplList
= getIOPLList(dataP
);
968 pageList
= getPageList(dataP
);
970 // Get the number of IOPLs.
971 UInt numIOPLs
= getNumIOPL(_memoryEntries
, dataP
);
974 * Scan through the IOPL Info Blocks, looking for the first block containing
975 * the offset. The research will go past it, so we'll need to go back to the
976 * right range at the end.
979 while ((ioplIndex
< numIOPLs
) && (((uint64_t) offset
) >= ioplList
[ioplIndex
].fIOMDOffset
)) {
984 // Retrieve the IOPL info block.
985 ioPLBlock ioplInfo
= ioplList
[ioplIndex
];
988 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
991 if (ioplInfo
.fFlags
& kIOPLExternUPL
) {
992 pageList
= (upl_page_info_t
*) ioplInfo
.fPageInfo
;
994 pageList
= &pageList
[ioplInfo
.fPageInfo
];
997 // Rebase [offset] into the IOPL in order to looks for the first page index.
998 mach_vm_size_t offsetInIOPL
= offset
- ioplInfo
.fIOMDOffset
+ ioplInfo
.fPageOffset
;
1000 // Retrieve the index of the first page corresponding to the offset.
1001 currentPageIndex
= atop_32(offsetInIOPL
);
1009 while (remain
&& (KERN_SUCCESS
== err
)) {
1010 entryOffset
= offset
- entry
->offset
;
1011 if ((min(vm_map_page_mask(map
), page_mask
) & entryOffset
) != pageOffset
) {
1012 err
= kIOReturnNotAligned
;
1013 DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryOffset 0x%llx pageOffset 0x%llx\n", map
, inoffset
, size
, (uint32_t)options
, *inaddr
, (uint64_t)entryOffset
, (uint64_t)pageOffset
);
1017 if (kIODefaultCache
!= cacheMode
) {
1018 vm_size_t unused
= 0;
1019 err
= mach_make_memory_entry(NULL
/*unused*/, &unused
, 0 /*unused*/,
1020 memEntryCacheMode
, NULL
, entry
->entry
);
1021 assert(KERN_SUCCESS
== err
);
1024 entryOffset
-= pageOffset
;
1025 if (entryOffset
>= entry
->size
) {
1026 panic("entryOffset");
1028 chunk
= entry
->size
- entryOffset
;
1030 vm_map_kernel_flags_t vmk_flags
;
1032 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
1033 vmk_flags
.vmkf_iokit_acct
= TRUE
; /* iokit accounting */
1035 if (chunk
> remain
) {
1038 if (options
& kIOMapPrefault
) {
1039 UInt nb_pages
= (typeof(nb_pages
))round_page(chunk
) / PAGE_SIZE
;
1041 err
= vm_map_enter_mem_object_prefault(map
,
1043 chunk
, 0 /* mask */,
1045 | VM_FLAGS_OVERWRITE
),
1052 &pageList
[currentPageIndex
],
1055 if (err
|| vm_map_page_mask(map
) < PAGE_MASK
) {
1056 DEBUG4K_IOKIT("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref
, map
, vm_map_page_shift(map
), (uint64_t)mapAddr
, (uint64_t)chunk
, err
);
1058 // Compute the next index in the page list.
1059 currentPageIndex
+= nb_pages
;
1060 assert(currentPageIndex
<= _pages
);
1062 err
= vm_map_enter_mem_object(map
,
1064 chunk
, 0 /* mask */,
1066 | VM_FLAGS_OVERWRITE
),
1076 if (KERN_SUCCESS
!= err
) {
1077 DEBUG4K_ERROR("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref
, map
, vm_map_page_shift(map
), (uint64_t)mapAddr
, (uint64_t)chunk
, err
);
1085 offset
+= chunk
- pageOffset
;
1090 if (entryIdx
>= ref
->count
) {
1091 err
= kIOReturnOverrun
;
1092 DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryIdx %d ref->count %d\n", map
, inoffset
, size
, (uint32_t)options
, *inaddr
, entryIdx
, ref
->count
);
1097 if ((KERN_SUCCESS
!= err
) && didAlloc
) {
1098 (void) mach_vm_deallocate(map
, trunc_page_64(addr
), size
);
1103 if (err
/* || vm_map_page_mask(map) < PAGE_MASK */) {
1104 DEBUG4K_ERROR("map %p (%d) inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx err 0x%x\n", map
, vm_map_page_shift(map
), inoffset
, size
, (uint32_t)options
, *inaddr
, err
);
1109 #define LOGUNALIGN 0
1111 IOGeneralMemoryDescriptor::memoryReferenceMapNew(
1112 IOMemoryReference
* ref
,
1114 mach_vm_size_t inoffset
,
1115 mach_vm_size_t size
,
1116 IOOptionBits options
,
1117 mach_vm_address_t
* inaddr
)
1120 int64_t offset
= inoffset
;
1121 uint32_t entryIdx
, firstEntryIdx
;
1122 vm_map_offset_t addr
, mapAddr
, mapAddrOut
;
1123 vm_map_offset_t entryOffset
, remain
, chunk
;
1125 IOMemoryEntry
* entry
;
1126 vm_prot_t prot
, memEntryCacheMode
;
1128 IOOptionBits cacheMode
;
1130 // for the kIOMapPrefault option.
1131 upl_page_info_t
* pageList
= NULL
;
1132 UInt currentPageIndex
= 0;
1135 DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref
, map
, inoffset
, size
, (uint32_t)options
, *inaddr
);
1138 err
= memoryReferenceMap(ref
->mapRef
, map
, inoffset
, size
, options
, inaddr
);
1143 printf("MAP offset %qx, %qx\n", inoffset
, size
);
1146 type
= _flags
& kIOMemoryTypeMask
;
1148 prot
= VM_PROT_READ
;
1149 if (!(kIOMapReadOnly
& options
)) {
1150 prot
|= VM_PROT_WRITE
;
1154 cacheMode
= ((options
& kIOMapCacheMask
) >> kIOMapCacheShift
);
1155 if (kIODefaultCache
!= cacheMode
) {
1156 // VM system requires write access to update named entry cache mode
1157 memEntryCacheMode
= (MAP_MEM_ONLY
| VM_PROT_WRITE
| prot
| vmProtForCacheMode(cacheMode
));
1160 tag
= (vm_tag_t
) getVMTag(map
);
1165 if (!(options
& kIOMapAnywhere
)) {
1169 // find first entry for offset
1170 for (firstEntryIdx
= 0;
1171 (firstEntryIdx
< ref
->count
) && (offset
>= ref
->entries
[firstEntryIdx
].offset
);
1176 // calculate required VM space
1178 entryIdx
= firstEntryIdx
;
1179 entry
= &ref
->entries
[entryIdx
];
1182 int64_t iteroffset
= offset
;
1183 uint64_t mapSize
= 0;
1185 entryOffset
= iteroffset
- entry
->offset
;
1186 if (entryOffset
>= entry
->size
) {
1187 panic("entryOffset");
1191 printf("[%d] size %qx offset %qx start %qx iter %qx\n",
1192 entryIdx
, entry
->size
, entry
->offset
, entry
->start
, iteroffset
);
1195 chunk
= entry
->size
- entryOffset
;
1197 if (chunk
> remain
) {
1200 mach_vm_size_t entrySize
;
1201 err
= mach_memory_entry_map_size(entry
->entry
, map
, entryOffset
, chunk
, &entrySize
);
1202 assert(KERN_SUCCESS
== err
);
1203 mapSize
+= entrySize
;
1209 iteroffset
+= chunk
; // - pageOffset;
1213 if (entryIdx
>= ref
->count
) {
1215 err
= kIOReturnOverrun
;
1220 if (kIOMapOverwrite
& options
) {
1221 if ((map
== kernel_map
) && (kIOMemoryBufferPageable
& _flags
)) {
1222 map
= IOPageableMapForAddress(addr
);
1226 IOMemoryDescriptorMapAllocRef ref
;
1229 ref
.options
= options
;
1232 if (options
& kIOMapAnywhere
) {
1233 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1238 if ((ref
.map
== kernel_map
) && (kIOMemoryBufferPageable
& _flags
)) {
1239 err
= IOIteratePageableMaps( ref
.size
, &IOMemoryDescriptorMapAlloc
, &ref
);
1241 err
= IOMemoryDescriptorMapAlloc(ref
.map
, &ref
);
1244 if (KERN_SUCCESS
== err
) {
1250 IOLog("map err %x size %qx addr %qx\n", err
, mapSize
, addr
);
1255 * If the memory is associated with a device pager but doesn't have a UPL,
1256 * it will be immediately faulted in through the pager via populateDevicePager().
1257 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
1260 if ((reserved
!= NULL
) && (reserved
->dp
.devicePager
) && (_wireCount
!= 0)) {
1261 options
&= ~kIOMapPrefault
;
1265 * Prefaulting is only possible if we wired the memory earlier. Check the
1266 * memory type, and the underlying data.
1268 if (options
& kIOMapPrefault
) {
1270 * The memory must have been wired by calling ::prepare(), otherwise
1271 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
1273 assert(_wireCount
!= 0);
1274 assert(_memoryEntries
!= NULL
);
1275 if ((_wireCount
== 0) ||
1276 (_memoryEntries
== NULL
)) {
1277 return kIOReturnBadArgument
;
1280 // Get the page list.
1281 ioGMDData
* dataP
= getDataP(_memoryEntries
);
1282 ioPLBlock
const* ioplList
= getIOPLList(dataP
);
1283 pageList
= getPageList(dataP
);
1285 // Get the number of IOPLs.
1286 UInt numIOPLs
= getNumIOPL(_memoryEntries
, dataP
);
1289 * Scan through the IOPL Info Blocks, looking for the first block containing
1290 * the offset. The research will go past it, so we'll need to go back to the
1291 * right range at the end.
1294 while ((ioplIndex
< numIOPLs
) && (((uint64_t) offset
) >= ioplList
[ioplIndex
].fIOMDOffset
)) {
1299 // Retrieve the IOPL info block.
1300 ioPLBlock ioplInfo
= ioplList
[ioplIndex
];
1303 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
1306 if (ioplInfo
.fFlags
& kIOPLExternUPL
) {
1307 pageList
= (upl_page_info_t
*) ioplInfo
.fPageInfo
;
1309 pageList
= &pageList
[ioplInfo
.fPageInfo
];
1312 // Rebase [offset] into the IOPL in order to looks for the first page index.
1313 mach_vm_size_t offsetInIOPL
= offset
- ioplInfo
.fIOMDOffset
+ ioplInfo
.fPageOffset
;
1315 // Retrieve the index of the first page corresponding to the offset.
1316 currentPageIndex
= atop_32(offsetInIOPL
);
1322 entryIdx
= firstEntryIdx
;
1323 entry
= &ref
->entries
[entryIdx
];
1325 while (remain
&& (KERN_SUCCESS
== err
)) {
1327 printf("offset %qx, %qx\n", offset
, entry
->offset
);
1329 if (kIODefaultCache
!= cacheMode
) {
1330 vm_size_t unused
= 0;
1331 err
= mach_make_memory_entry(NULL
/*unused*/, &unused
, 0 /*unused*/,
1332 memEntryCacheMode
, NULL
, entry
->entry
);
1333 assert(KERN_SUCCESS
== err
);
1335 entryOffset
= offset
- entry
->offset
;
1336 if (entryOffset
>= entry
->size
) {
1337 panic("entryOffset");
1339 chunk
= entry
->size
- entryOffset
;
1341 printf("entryIdx %d, chunk %qx\n", entryIdx
, chunk
);
1344 vm_map_kernel_flags_t vmk_flags
;
1346 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
1347 vmk_flags
.vmkf_iokit_acct
= TRUE
; /* iokit accounting */
1349 if (chunk
> remain
) {
1352 mapAddrOut
= mapAddr
;
1353 if (options
& kIOMapPrefault
) {
1354 UInt nb_pages
= (typeof(nb_pages
))round_page(chunk
) / PAGE_SIZE
;
1356 err
= vm_map_enter_mem_object_prefault(map
,
1358 chunk
, 0 /* mask */,
1360 | VM_FLAGS_OVERWRITE
1361 | VM_FLAGS_RETURN_DATA_ADDR
),
1368 &pageList
[currentPageIndex
],
1371 // Compute the next index in the page list.
1372 currentPageIndex
+= nb_pages
;
1373 assert(currentPageIndex
<= _pages
);
1376 printf("mapAddr i %qx chunk %qx\n", mapAddr
, chunk
);
1378 err
= vm_map_enter_mem_object(map
,
1380 chunk
, 0 /* mask */,
1382 | VM_FLAGS_OVERWRITE
1383 | VM_FLAGS_RETURN_DATA_ADDR
),
1393 if (KERN_SUCCESS
!= err
) {
1394 panic("map enter err %x", err
);
1398 printf("mapAddr o %qx\n", mapAddrOut
);
1400 if (entryIdx
== firstEntryIdx
) {
1407 mach_vm_size_t entrySize
;
1408 err
= mach_memory_entry_map_size(entry
->entry
, map
, entryOffset
, chunk
, &entrySize
);
1409 assert(KERN_SUCCESS
== err
);
1410 mapAddr
+= entrySize
;
1416 if (entryIdx
>= ref
->count
) {
1417 err
= kIOReturnOverrun
;
1422 if (KERN_SUCCESS
!= err
) {
1423 DEBUG4K_ERROR("size 0x%llx err 0x%x\n", size
, err
);
1426 if ((KERN_SUCCESS
!= err
) && didAlloc
) {
1427 (void) mach_vm_deallocate(map
, trunc_page_64(addr
), size
);
1436 IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(
1437 IOMemoryReference
* ref
,
1441 vm_object_offset_t data_offset
= 0;
1447 *offset
= (uint64_t) data_offset
;
1450 for (idx
= 0; idx
< ref
->count
; idx
++) {
1451 kr
= mach_memory_entry_phys_page_offset(ref
->entries
[idx
].entry
,
1453 if (KERN_SUCCESS
!= kr
) {
1454 DEBUG4K_ERROR("ref %p entry %p kr 0x%x\n", ref
, ref
->entries
[idx
].entry
, kr
);
1455 } else if (0 != data_offset
) {
1456 DEBUG4K_IOKIT("ref %p entry %p offset 0x%llx kr 0x%x\n", ref
, ref
->entries
[0].entry
, data_offset
, kr
);
1458 if (offset
&& !idx
) {
1459 *offset
= (uint64_t) data_offset
;
1461 total
+= round_page(data_offset
+ ref
->entries
[idx
].size
);
1464 DEBUG4K_IOKIT("ref %p offset 0x%llx total 0x%llx\n", ref
,
1465 (offset
? *offset
: (vm_object_offset_t
)-1), total
);
1472 IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
1473 IOMemoryReference
* ref
,
1474 IOByteCount
* residentPageCount
,
1475 IOByteCount
* dirtyPageCount
)
1478 IOMemoryEntry
* entries
;
1479 unsigned int resident
, dirty
;
1480 unsigned int totalResident
, totalDirty
;
1482 totalResident
= totalDirty
= 0;
1483 err
= kIOReturnSuccess
;
1484 entries
= ref
->entries
+ ref
->count
;
1485 while (entries
> &ref
->entries
[0]) {
1487 err
= mach_memory_entry_get_page_counts(entries
->entry
, &resident
, &dirty
);
1488 if (KERN_SUCCESS
!= err
) {
1491 totalResident
+= resident
;
1492 totalDirty
+= dirty
;
1495 if (residentPageCount
) {
1496 *residentPageCount
= totalResident
;
1498 if (dirtyPageCount
) {
1499 *dirtyPageCount
= totalDirty
;
1505 IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
1506 IOMemoryReference
* ref
,
1507 IOOptionBits newState
,
1508 IOOptionBits
* oldState
)
1511 IOMemoryEntry
* entries
;
1512 vm_purgable_t control
;
1513 int totalState
, state
;
1515 totalState
= kIOMemoryPurgeableNonVolatile
;
1516 err
= kIOReturnSuccess
;
1517 entries
= ref
->entries
+ ref
->count
;
1518 while (entries
> &ref
->entries
[0]) {
1521 err
= purgeableControlBits(newState
, &control
, &state
);
1522 if (KERN_SUCCESS
!= err
) {
1525 err
= memory_entry_purgeable_control_internal(entries
->entry
, control
, &state
);
1526 if (KERN_SUCCESS
!= err
) {
1529 err
= purgeableStateBits(&state
);
1530 if (KERN_SUCCESS
!= err
) {
1534 if (kIOMemoryPurgeableEmpty
== state
) {
1535 totalState
= kIOMemoryPurgeableEmpty
;
1536 } else if (kIOMemoryPurgeableEmpty
== totalState
) {
1538 } else if (kIOMemoryPurgeableVolatile
== totalState
) {
1540 } else if (kIOMemoryPurgeableVolatile
== state
) {
1541 totalState
= kIOMemoryPurgeableVolatile
;
1543 totalState
= kIOMemoryPurgeableNonVolatile
;
1548 *oldState
= totalState
;
1554 IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(
1555 IOMemoryReference
* ref
,
1558 IOOptionBits newLedgerOptions
)
1560 IOReturn err
, totalErr
;
1561 IOMemoryEntry
* entries
;
1563 totalErr
= kIOReturnSuccess
;
1564 entries
= ref
->entries
+ ref
->count
;
1565 while (entries
> &ref
->entries
[0]) {
1568 err
= mach_memory_entry_ownership(entries
->entry
, newOwner
, newLedgerTag
, newLedgerOptions
);
1569 if (KERN_SUCCESS
!= err
) {
1577 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1579 OSSharedPtr
<IOMemoryDescriptor
>
1580 IOMemoryDescriptor::withAddress(void * address
,
1582 IODirection direction
)
1584 return IOMemoryDescriptor::
1585 withAddressRange((IOVirtualAddress
) address
, length
, direction
| kIOMemoryAutoPrepare
, kernel_task
);
1589 OSSharedPtr
<IOMemoryDescriptor
>
1590 IOMemoryDescriptor::withAddress(IOVirtualAddress address
,
1592 IODirection direction
,
1595 OSSharedPtr
<IOGeneralMemoryDescriptor
> that
= OSMakeShared
<IOGeneralMemoryDescriptor
>();
1597 if (that
->initWithAddress(address
, length
, direction
, task
)) {
1598 return os::move(that
);
1603 #endif /* !__LP64__ */
1605 OSSharedPtr
<IOMemoryDescriptor
>
1606 IOMemoryDescriptor::withPhysicalAddress(
1607 IOPhysicalAddress address
,
1609 IODirection direction
)
1611 return IOMemoryDescriptor::withAddressRange(address
, length
, direction
, TASK_NULL
);
1615 OSSharedPtr
<IOMemoryDescriptor
>
1616 IOMemoryDescriptor::withRanges( IOVirtualRange
* ranges
,
1618 IODirection direction
,
1622 OSSharedPtr
<IOGeneralMemoryDescriptor
> that
= OSMakeShared
<IOGeneralMemoryDescriptor
>();
1624 if (that
->initWithRanges(ranges
, withCount
, direction
, task
, asReference
)) {
1625 return os::move(that
);
1630 #endif /* !__LP64__ */
1632 OSSharedPtr
<IOMemoryDescriptor
>
1633 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address
,
1634 mach_vm_size_t length
,
1635 IOOptionBits options
,
1638 IOAddressRange range
= { address
, length
};
1639 return IOMemoryDescriptor::withAddressRanges(&range
, 1, options
, task
);
1642 OSSharedPtr
<IOMemoryDescriptor
>
1643 IOMemoryDescriptor::withAddressRanges(IOAddressRange
* ranges
,
1645 IOOptionBits options
,
1648 OSSharedPtr
<IOGeneralMemoryDescriptor
> that
= OSMakeShared
<IOGeneralMemoryDescriptor
>();
1651 options
|= kIOMemoryTypeVirtual64
;
1653 options
|= kIOMemoryTypePhysical64
;
1656 if (that
->initWithOptions(ranges
, rangeCount
, 0, task
, options
, /* mapper */ NULL
)) {
1657 return os::move(that
);
1668 * Create a new IOMemoryDescriptor. The buffer is made up of several
1669 * virtual address ranges, from a given task.
1671 * Passing the ranges as a reference will avoid an extra allocation.
1673 OSSharedPtr
<IOMemoryDescriptor
>
1674 IOMemoryDescriptor::withOptions(void * buffers
,
1681 OSSharedPtr
<IOGeneralMemoryDescriptor
> self
= OSMakeShared
<IOGeneralMemoryDescriptor
>();
1684 && !self
->initWithOptions(buffers
, count
, offset
, task
, opts
, mapper
)) {
1688 return os::move(self
);
1692 IOMemoryDescriptor::initWithOptions(void * buffers
,
1696 IOOptionBits options
,
1703 OSSharedPtr
<IOMemoryDescriptor
>
1704 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange
* ranges
,
1706 IODirection direction
,
1709 OSSharedPtr
<IOGeneralMemoryDescriptor
> that
= OSMakeShared
<IOGeneralMemoryDescriptor
>();
1711 if (that
->initWithPhysicalRanges(ranges
, withCount
, direction
, asReference
)) {
1712 return os::move(that
);
1718 OSSharedPtr
<IOMemoryDescriptor
>
1719 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor
* of
,
1722 IODirection direction
)
1724 return IOSubMemoryDescriptor::withSubRange(of
, offset
, length
, direction
);
1726 #endif /* !__LP64__ */
1728 OSSharedPtr
<IOMemoryDescriptor
>
1729 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor
*originalMD
)
1731 IOGeneralMemoryDescriptor
*origGenMD
=
1732 OSDynamicCast(IOGeneralMemoryDescriptor
, originalMD
);
1735 return IOGeneralMemoryDescriptor::
1736 withPersistentMemoryDescriptor(origGenMD
);
1742 OSSharedPtr
<IOMemoryDescriptor
>
1743 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor
*originalMD
)
1745 IOMemoryReference
* memRef
;
1746 OSSharedPtr
<IOGeneralMemoryDescriptor
> self
;
1748 if (kIOReturnSuccess
!= originalMD
->memoryReferenceCreate(kIOMemoryReferenceReuse
, &memRef
)) {
1752 if (memRef
== originalMD
->_memRef
) {
1753 self
.reset(originalMD
, OSRetain
);
1754 originalMD
->memoryReferenceRelease(memRef
);
1755 return os::move(self
);
1758 self
= OSMakeShared
<IOGeneralMemoryDescriptor
>();
1759 IOMDPersistentInitData initData
= { originalMD
, memRef
};
1762 && !self
->initWithOptions(&initData
, 1, 0, NULL
, kIOMemoryTypePersistentMD
, NULL
)) {
1765 return os::move(self
);
1770 IOGeneralMemoryDescriptor::initWithAddress(void * address
,
1771 IOByteCount withLength
,
1772 IODirection withDirection
)
1774 _singleRange
.v
.address
= (vm_offset_t
) address
;
1775 _singleRange
.v
.length
= withLength
;
1777 return initWithRanges(&_singleRange
.v
, 1, withDirection
, kernel_task
, true);
1781 IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address
,
1782 IOByteCount withLength
,
1783 IODirection withDirection
,
1786 _singleRange
.v
.address
= address
;
1787 _singleRange
.v
.length
= withLength
;
1789 return initWithRanges(&_singleRange
.v
, 1, withDirection
, withTask
, true);
1793 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
1794 IOPhysicalAddress address
,
1795 IOByteCount withLength
,
1796 IODirection withDirection
)
1798 _singleRange
.p
.address
= address
;
1799 _singleRange
.p
.length
= withLength
;
1801 return initWithPhysicalRanges( &_singleRange
.p
, 1, withDirection
, true);
1805 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
1806 IOPhysicalRange
* ranges
,
1808 IODirection direction
,
1811 IOOptionBits mdOpts
= direction
| kIOMemoryTypePhysical
;
1814 mdOpts
|= kIOMemoryAsReference
;
1817 return initWithOptions(ranges
, count
, 0, NULL
, mdOpts
, /* mapper */ NULL
);
1821 IOGeneralMemoryDescriptor::initWithRanges(
1822 IOVirtualRange
* ranges
,
1824 IODirection direction
,
1828 IOOptionBits mdOpts
= direction
;
1831 mdOpts
|= kIOMemoryAsReference
;
1835 mdOpts
|= kIOMemoryTypeVirtual
;
1837 // Auto-prepare if this is a kernel memory descriptor as very few
1838 // clients bother to prepare() kernel memory.
1839 // But it was not enforced so what are you going to do?
1840 if (task
== kernel_task
) {
1841 mdOpts
|= kIOMemoryAutoPrepare
;
1844 mdOpts
|= kIOMemoryTypePhysical
;
1847 return initWithOptions(ranges
, count
, 0, task
, mdOpts
, /* mapper */ NULL
);
1849 #endif /* !__LP64__ */
1854 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
1855 * from a given task, several physical ranges, an UPL from the ubc
1856 * system or a uio (may be 64bit) from the BSD subsystem.
1858 * Passing the ranges as a reference will avoid an extra allocation.
1860 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
1861 * existing instance -- note this behavior is not commonly supported in other
1862 * I/O Kit classes, although it is supported here.
1866 IOGeneralMemoryDescriptor::initWithOptions(void * buffers
,
1870 IOOptionBits options
,
1873 IOOptionBits type
= options
& kIOMemoryTypeMask
;
1877 && (kIOMemoryTypeVirtual
== type
)
1878 && vm_map_is_64bit(get_task_map(task
))
1879 && ((IOVirtualRange
*) buffers
)->address
) {
1880 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
1883 #endif /* !__LP64__ */
1885 // Grab the original MD's configuation data to initialse the
1886 // arguments to this function.
1887 if (kIOMemoryTypePersistentMD
== type
) {
1888 IOMDPersistentInitData
*initData
= (typeof(initData
))buffers
;
1889 const IOGeneralMemoryDescriptor
*orig
= initData
->fMD
;
1890 ioGMDData
*dataP
= getDataP(orig
->_memoryEntries
);
1892 // Only accept persistent memory descriptors with valid dataP data.
1893 assert(orig
->_rangesCount
== 1);
1894 if (!(orig
->_flags
& kIOMemoryPersistent
) || !dataP
) {
1898 _memRef
= initData
->fMemRef
; // Grab the new named entry
1899 options
= orig
->_flags
& ~kIOMemoryAsReference
;
1900 type
= options
& kIOMemoryTypeMask
;
1901 buffers
= orig
->_ranges
.v
;
1902 count
= orig
->_rangesCount
;
1904 // Now grab the original task and whatever mapper was previously used
1906 mapper
= dataP
->fMapper
;
1908 // We are ready to go through the original initialisation now
1912 case kIOMemoryTypeUIO
:
1913 case kIOMemoryTypeVirtual
:
1915 case kIOMemoryTypeVirtual64
:
1916 #endif /* !__LP64__ */
1923 case kIOMemoryTypePhysical
: // Neither Physical nor UPL should have a task
1925 case kIOMemoryTypePhysical64
:
1926 #endif /* !__LP64__ */
1927 case kIOMemoryTypeUPL
:
1931 return false; /* bad argument */
1938 * We can check the _initialized instance variable before having ever set
1939 * it to an initial value because I/O Kit guarantees that all our instance
1940 * variables are zeroed on an object's allocation.
1945 * An existing memory descriptor is being retargeted to point to
1946 * somewhere else. Clean up our present state.
1948 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
1949 if ((kIOMemoryTypePhysical
!= type
) && (kIOMemoryTypePhysical64
!= type
)) {
1950 while (_wireCount
) {
1954 if (_ranges
.v
&& !(kIOMemoryAsReference
& _flags
)) {
1955 if (kIOMemoryTypeUIO
== type
) {
1956 uio_free((uio_t
) _ranges
.v
);
1959 else if ((kIOMemoryTypeVirtual64
== type
) || (kIOMemoryTypePhysical64
== type
)) {
1960 IODelete(_ranges
.v64
, IOAddressRange
, _rangesCount
);
1962 #endif /* !__LP64__ */
1964 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
1968 options
|= (kIOMemoryRedirected
& _flags
);
1969 if (!(kIOMemoryRedirected
& options
)) {
1971 memoryReferenceRelease(_memRef
);
1975 _mappings
->flushCollection();
1979 if (!super::init()) {
1982 _initialized
= true;
1985 // Grab the appropriate mapper
1986 if (kIOMemoryHostOrRemote
& options
) {
1987 options
|= kIOMemoryMapperNone
;
1989 if (kIOMemoryMapperNone
& options
) {
1990 mapper
= NULL
; // No Mapper
1991 } else if (mapper
== kIOMapperSystem
) {
1992 IOMapper::checkForSystemMapper();
1993 gIOSystemMapper
= mapper
= IOMapper::gSystem
;
1996 // Remove the dynamic internal use flags from the initial setting
1997 options
&= ~(kIOMemoryPreparedReadOnly
);
2002 _direction
= (IODirection
) (_flags
& kIOMemoryDirectionMask
);
2003 #endif /* !__LP64__ */
2006 __iomd_reservedA
= 0;
2007 __iomd_reservedB
= 0;
2010 if (kIOMemoryThreadSafe
& options
) {
2011 if (!_prepareLock
) {
2012 _prepareLock
= IOLockAlloc();
2014 } else if (_prepareLock
) {
2015 IOLockFree(_prepareLock
);
2016 _prepareLock
= NULL
;
2019 if (kIOMemoryTypeUPL
== type
) {
2021 unsigned int dataSize
= computeDataSize(/* pages */ 0, /* upls */ 1);
2023 if (!initMemoryEntries(dataSize
, mapper
)) {
2026 dataP
= getDataP(_memoryEntries
);
2027 dataP
->fPageCnt
= 0;
2028 switch (kIOMemoryDirectionMask
& options
) {
2029 case kIODirectionOut
:
2030 dataP
->fDMAAccess
= kIODMAMapReadAccess
;
2032 case kIODirectionIn
:
2033 dataP
->fDMAAccess
= kIODMAMapWriteAccess
;
2035 case kIODirectionNone
:
2036 case kIODirectionOutIn
:
2038 panic("bad dir for upl 0x%x\n", (int) options
);
2041 // _wireCount++; // UPLs start out life wired
2044 _pages
+= atop_32(offset
+ count
+ PAGE_MASK
) - atop_32(offset
);
2047 iopl
.fIOPL
= (upl_t
) buffers
;
2048 upl_set_referenced(iopl
.fIOPL
, true);
2049 upl_page_info_t
*pageList
= UPL_GET_INTERNAL_PAGE_LIST(iopl
.fIOPL
);
2051 if (upl_get_size(iopl
.fIOPL
) < (count
+ offset
)) {
2052 panic("short external upl");
2055 _highestPage
= upl_get_highest_page(iopl
.fIOPL
);
2056 DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset
, task
, (uint32_t)options
, _highestPage
);
2058 // Set the flag kIOPLOnDevice convieniently equal to 1
2059 iopl
.fFlags
= pageList
->device
| kIOPLExternUPL
;
2060 if (!pageList
->device
) {
2061 // Pre-compute the offset into the UPL's page list
2062 pageList
= &pageList
[atop_32(offset
)];
2063 offset
&= PAGE_MASK
;
2065 iopl
.fIOMDOffset
= 0;
2066 iopl
.fMappedPage
= 0;
2067 iopl
.fPageInfo
= (vm_address_t
) pageList
;
2068 iopl
.fPageOffset
= offset
;
2069 _memoryEntries
->appendBytes(&iopl
, sizeof(iopl
));
2071 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
2072 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
2074 // Initialize the memory descriptor
2075 if (options
& kIOMemoryAsReference
) {
2077 _rangesIsAllocated
= false;
2078 #endif /* !__LP64__ */
2080 // Hack assignment to get the buffer arg into _ranges.
2081 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
2083 // This also initialises the uio & physical ranges.
2084 _ranges
.v
= (IOVirtualRange
*) buffers
;
2087 _rangesIsAllocated
= true;
2088 #endif /* !__LP64__ */
2090 case kIOMemoryTypeUIO
:
2091 _ranges
.v
= (IOVirtualRange
*) uio_duplicate((uio_t
) buffers
);
2095 case kIOMemoryTypeVirtual64
:
2096 case kIOMemoryTypePhysical64
:
2099 && (((IOAddressRange
*) buffers
)->address
+ ((IOAddressRange
*) buffers
)->length
) <= 0x100000000ULL
2102 if (kIOMemoryTypeVirtual64
== type
) {
2103 type
= kIOMemoryTypeVirtual
;
2105 type
= kIOMemoryTypePhysical
;
2107 _flags
= (_flags
& ~kIOMemoryTypeMask
) | type
| kIOMemoryAsReference
;
2108 _rangesIsAllocated
= false;
2109 _ranges
.v
= &_singleRange
.v
;
2110 _singleRange
.v
.address
= ((IOAddressRange
*) buffers
)->address
;
2111 _singleRange
.v
.length
= ((IOAddressRange
*) buffers
)->length
;
2114 _ranges
.v64
= IONew(IOAddressRange
, count
);
2118 bcopy(buffers
, _ranges
.v
, count
* sizeof(IOAddressRange
));
2120 #endif /* !__LP64__ */
2121 case kIOMemoryTypeVirtual
:
2122 case kIOMemoryTypePhysical
:
2124 _flags
|= kIOMemoryAsReference
;
2126 _rangesIsAllocated
= false;
2127 #endif /* !__LP64__ */
2128 _ranges
.v
= &_singleRange
.v
;
2130 _ranges
.v
= IONew(IOVirtualRange
, count
);
2135 bcopy(buffers
, _ranges
.v
, count
* sizeof(IOVirtualRange
));
2139 _rangesCount
= count
;
2141 // Find starting address within the vector of ranges
2142 Ranges vec
= _ranges
;
2143 mach_vm_size_t totalLength
= 0;
2144 unsigned int ind
, pages
= 0;
2145 for (ind
= 0; ind
< count
; ind
++) {
2146 mach_vm_address_t addr
;
2147 mach_vm_address_t endAddr
;
2150 // addr & len are returned by this function
2151 getAddrLenForInd(addr
, len
, type
, vec
, ind
);
2153 mach_vm_size_t phys_size
;
2155 kret
= vm_map_range_physical_size(get_task_map(_task
), addr
, len
, &phys_size
);
2156 if (KERN_SUCCESS
!= kret
) {
2159 if (os_add_overflow(pages
, atop_64(phys_size
), &pages
)) {
2163 if (os_add3_overflow(addr
, len
, PAGE_MASK
, &endAddr
)) {
2166 if (!(kIOMemoryRemote
& options
) && (atop_64(endAddr
) > UINT_MAX
)) {
2169 if (os_add_overflow(pages
, (atop_64(endAddr
) - atop_64(addr
)), &pages
)) {
2173 if (os_add_overflow(totalLength
, len
, &totalLength
)) {
2176 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
)) {
2177 uint64_t highPage
= atop_64(addr
+ len
- 1);
2178 if ((highPage
> _highestPage
) && (highPage
<= UINT_MAX
)) {
2179 _highestPage
= (ppnum_t
) highPage
;
2180 DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset
, task
, (uint32_t)options
, _highestPage
);
2185 || (totalLength
!= ((IOByteCount
) totalLength
))) {
2186 return false; /* overflow */
2188 _length
= totalLength
;
2191 // Auto-prepare memory at creation time.
2192 // Implied completion when descriptor is free-ed
2195 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
)) {
2196 _wireCount
++; // Physical MDs are, by definition, wired
2197 } else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
2201 if (_pages
> atop_64(max_mem
)) {
2205 dataSize
= computeDataSize(_pages
, /* upls */ count
* 2);
2206 if (!initMemoryEntries(dataSize
, mapper
)) {
2209 dataP
= getDataP(_memoryEntries
);
2210 dataP
->fPageCnt
= _pages
;
2212 if (((_task
!= kernel_task
) || (kIOMemoryBufferPageable
& _flags
))
2213 && (VM_KERN_MEMORY_NONE
== _kernelTag
)) {
2214 _kernelTag
= IOMemoryTag(kernel_map
);
2215 if (_kernelTag
== gIOSurfaceTag
) {
2216 _userTag
= VM_MEMORY_IOSURFACE
;
2220 if ((kIOMemoryPersistent
& _flags
) && !_memRef
) {
2222 err
= memoryReferenceCreate(0, &_memRef
);
2223 if (kIOReturnSuccess
!= err
) {
2228 if ((_flags
& kIOMemoryAutoPrepare
)
2229 && prepare() != kIOReturnSuccess
) {
2244 IOGeneralMemoryDescriptor::free()
2246 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
2248 if (reserved
&& reserved
->dp
.memory
) {
2250 reserved
->dp
.memory
= NULL
;
2253 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
)) {
2255 if (_memoryEntries
&& (dataP
= getDataP(_memoryEntries
)) && dataP
->fMappedBaseValid
) {
2256 dmaUnmap(dataP
->fMapper
, NULL
, 0, dataP
->fMappedBase
, dataP
->fMappedLength
);
2257 dataP
->fMappedBaseValid
= dataP
->fMappedBase
= 0;
2260 while (_wireCount
) {
2265 if (_memoryEntries
) {
2266 _memoryEntries
.reset();
2269 if (_ranges
.v
&& !(kIOMemoryAsReference
& _flags
)) {
2270 if (kIOMemoryTypeUIO
== type
) {
2271 uio_free((uio_t
) _ranges
.v
);
2274 else if ((kIOMemoryTypeVirtual64
== type
) || (kIOMemoryTypePhysical64
== type
)) {
2275 IODelete(_ranges
.v64
, IOAddressRange
, _rangesCount
);
2277 #endif /* !__LP64__ */
2279 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
2286 cleanKernelReserved(reserved
);
2287 if (reserved
->dp
.devicePager
) {
2288 // memEntry holds a ref on the device pager which owns reserved
2289 // (IOMemoryDescriptorReserved) so no reserved access after this point
2290 device_pager_deallocate((memory_object_t
) reserved
->dp
.devicePager
);
2292 IODelete(reserved
, IOMemoryDescriptorReserved
, 1);
2298 memoryReferenceRelease(_memRef
);
2301 IOLockFree(_prepareLock
);
2309 IOGeneralMemoryDescriptor::unmapFromKernel()
2311 panic("IOGMD::unmapFromKernel deprecated");
2315 IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex
)
2317 panic("IOGMD::mapIntoKernel deprecated");
2319 #endif /* !__LP64__ */
2324 * Get the direction of the transfer.
2327 IOMemoryDescriptor::getDirection() const
2333 #endif /* !__LP64__ */
2334 return (IODirection
) (_flags
& kIOMemoryDirectionMask
);
2340 * Get the length of the transfer (over all ranges).
2343 IOMemoryDescriptor::getLength() const
2349 IOMemoryDescriptor::setTag( IOOptionBits tag
)
2355 IOMemoryDescriptor::getTag( void )
2361 IOMemoryDescriptor::getFlags(void)
2367 #pragma clang diagnostic push
2368 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2370 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
2372 IOMemoryDescriptor::getSourceSegment( IOByteCount offset
, IOByteCount
* length
)
2374 addr64_t physAddr
= 0;
2376 if (prepare() == kIOReturnSuccess
) {
2377 physAddr
= getPhysicalSegment64( offset
, length
);
2381 return (IOPhysicalAddress
) physAddr
; // truncated but only page offset is used
2384 #pragma clang diagnostic pop
2386 #endif /* !__LP64__ */
2389 IOMemoryDescriptor::readBytes
2390 (IOByteCount offset
, void *bytes
, IOByteCount length
)
2392 addr64_t dstAddr
= CAST_DOWN(addr64_t
, bytes
);
2393 IOByteCount endoffset
;
2394 IOByteCount remaining
;
2397 // Check that this entire I/O is within the available range
2398 if ((offset
> _length
)
2399 || os_add_overflow(length
, offset
, &endoffset
)
2400 || (endoffset
> _length
)) {
2401 assertf(false, "readBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) offset
, (long) length
, (long) _length
);
2404 if (offset
>= _length
) {
2408 assert(!(kIOMemoryRemote
& _flags
));
2409 if (kIOMemoryRemote
& _flags
) {
2413 if (kIOMemoryThreadSafe
& _flags
) {
2417 remaining
= length
= min(length
, _length
- offset
);
2418 while (remaining
) { // (process another target segment?)
2422 srcAddr64
= getPhysicalSegment(offset
, &srcLen
, kIOMemoryMapperNone
);
2427 // Clip segment length to remaining
2428 if (srcLen
> remaining
) {
2432 if (srcLen
> (UINT_MAX
- PAGE_SIZE
+ 1)) {
2433 srcLen
= (UINT_MAX
- PAGE_SIZE
+ 1);
2435 copypv(srcAddr64
, dstAddr
, (unsigned int) srcLen
,
2436 cppvPsrc
| cppvNoRefSrc
| cppvFsnk
| cppvKmap
);
2440 remaining
-= srcLen
;
2443 if (kIOMemoryThreadSafe
& _flags
) {
2449 return length
- remaining
;
2453 IOMemoryDescriptor::writeBytes
2454 (IOByteCount inoffset
, const void *bytes
, IOByteCount length
)
2456 addr64_t srcAddr
= CAST_DOWN(addr64_t
, bytes
);
2457 IOByteCount remaining
;
2458 IOByteCount endoffset
;
2459 IOByteCount offset
= inoffset
;
2461 assert( !(kIOMemoryPreparedReadOnly
& _flags
));
2463 // Check that this entire I/O is within the available range
2464 if ((offset
> _length
)
2465 || os_add_overflow(length
, offset
, &endoffset
)
2466 || (endoffset
> _length
)) {
2467 assertf(false, "writeBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) inoffset
, (long) length
, (long) _length
);
2470 if (kIOMemoryPreparedReadOnly
& _flags
) {
2473 if (offset
>= _length
) {
2477 assert(!(kIOMemoryRemote
& _flags
));
2478 if (kIOMemoryRemote
& _flags
) {
2482 if (kIOMemoryThreadSafe
& _flags
) {
2486 remaining
= length
= min(length
, _length
- offset
);
2487 while (remaining
) { // (process another target segment?)
2491 dstAddr64
= getPhysicalSegment(offset
, &dstLen
, kIOMemoryMapperNone
);
2496 // Clip segment length to remaining
2497 if (dstLen
> remaining
) {
2501 if (dstLen
> (UINT_MAX
- PAGE_SIZE
+ 1)) {
2502 dstLen
= (UINT_MAX
- PAGE_SIZE
+ 1);
2505 bzero_phys(dstAddr64
, (unsigned int) dstLen
);
2507 copypv(srcAddr
, (addr64_t
) dstAddr64
, (unsigned int) dstLen
,
2508 cppvPsnk
| cppvFsnk
| cppvNoRefSrc
| cppvNoModSnk
| cppvKmap
);
2512 remaining
-= dstLen
;
2515 if (kIOMemoryThreadSafe
& _flags
) {
2521 #if defined(__x86_64__)
2522 // copypv does not cppvFsnk on intel
2525 performOperation(kIOMemoryIncoherentIOFlush
, inoffset
, length
);
2529 return length
- remaining
;
2534 IOGeneralMemoryDescriptor::setPosition(IOByteCount position
)
2536 panic("IOGMD::setPosition deprecated");
2538 #endif /* !__LP64__ */
2540 static volatile SInt64 gIOMDPreparationID
__attribute__((aligned(8))) = (1ULL << 32);
2541 static volatile SInt64 gIOMDDescriptorID
__attribute__((aligned(8))) = (kIODescriptorIDInvalid
+ 1ULL);
2544 IOGeneralMemoryDescriptor::getPreparationID( void )
2549 return kIOPreparationIDUnprepared
;
2552 if (((kIOMemoryTypeMask
& _flags
) == kIOMemoryTypePhysical
)
2553 || ((kIOMemoryTypeMask
& _flags
) == kIOMemoryTypePhysical64
)) {
2554 IOMemoryDescriptor::setPreparationID();
2555 return IOMemoryDescriptor::getPreparationID();
2558 if (!_memoryEntries
|| !(dataP
= getDataP(_memoryEntries
))) {
2559 return kIOPreparationIDUnprepared
;
2562 if (kIOPreparationIDUnprepared
== dataP
->fPreparationID
) {
2563 SInt64 newID
= OSIncrementAtomic64(&gIOMDPreparationID
);
2564 OSCompareAndSwap64(kIOPreparationIDUnprepared
, newID
, &dataP
->fPreparationID
);
2566 return dataP
->fPreparationID
;
2570 IOMemoryDescriptor::cleanKernelReserved( IOMemoryDescriptorReserved
* reserved
)
2572 if (reserved
->creator
) {
2573 task_deallocate(reserved
->creator
);
2574 reserved
->creator
= NULL
;
2578 IOMemoryDescriptorReserved
*
2579 IOMemoryDescriptor::getKernelReserved( void )
2582 reserved
= IONewZero(IOMemoryDescriptorReserved
, 1);
2588 IOMemoryDescriptor::setPreparationID( void )
2590 if (getKernelReserved() && (kIOPreparationIDUnprepared
== reserved
->preparationID
)) {
2591 SInt64 newID
= OSIncrementAtomic64(&gIOMDPreparationID
);
2592 OSCompareAndSwap64(kIOPreparationIDUnprepared
, newID
, &reserved
->preparationID
);
2597 IOMemoryDescriptor::getPreparationID( void )
2600 return reserved
->preparationID
;
2602 return kIOPreparationIDUnsupported
;
2607 IOMemoryDescriptor::setDescriptorID( void )
2609 if (getKernelReserved() && (kIODescriptorIDInvalid
== reserved
->descriptorID
)) {
2610 SInt64 newID
= OSIncrementAtomic64(&gIOMDDescriptorID
);
2611 OSCompareAndSwap64(kIODescriptorIDInvalid
, newID
, &reserved
->descriptorID
);
2616 IOMemoryDescriptor::getDescriptorID( void )
2621 return reserved
->descriptorID
;
2623 return kIODescriptorIDInvalid
;
2628 IOMemoryDescriptor::ktraceEmitPhysicalSegments( void )
2630 if (!kdebug_debugid_explicitly_enabled(IODBG_IOMDPA(IOMDPA_MAPPED
))) {
2631 return kIOReturnSuccess
;
2634 assert(getPreparationID() >= kIOPreparationIDAlwaysPrepared
);
2635 if (getPreparationID() < kIOPreparationIDAlwaysPrepared
) {
2636 return kIOReturnBadArgument
;
2639 uint64_t descriptorID
= getDescriptorID();
2640 assert(descriptorID
!= kIODescriptorIDInvalid
);
2641 if (getDescriptorID() == kIODescriptorIDInvalid
) {
2642 return kIOReturnBadArgument
;
2645 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_MAPPED
), descriptorID
, VM_KERNEL_ADDRHIDE(this), getLength());
2648 static const uint8_t num_segments_page
= 8;
2650 static const uint8_t num_segments_page
= 4;
2652 static const uint8_t num_segments_long
= 2;
2654 IOPhysicalAddress segments_page
[num_segments_page
];
2655 IOPhysicalRange segments_long
[num_segments_long
];
2656 memset(segments_page
, UINT32_MAX
, sizeof(segments_page
));
2657 memset(segments_long
, 0, sizeof(segments_long
));
2659 uint8_t segment_page_idx
= 0;
2660 uint8_t segment_long_idx
= 0;
2662 IOPhysicalRange physical_segment
;
2663 for (IOByteCount offset
= 0; offset
< getLength(); offset
+= physical_segment
.length
) {
2664 physical_segment
.address
= getPhysicalSegment(offset
, &physical_segment
.length
);
2666 if (physical_segment
.length
== 0) {
2671 * Most IOMemoryDescriptors are made up of many individual physically discontiguous pages. To optimize for trace
2672 * buffer memory, pack segment events according to the following.
2674 * Mappings must be emitted in ascending order starting from offset 0. Mappings can be associated with the previous
2675 * IOMDPA_MAPPED event emitted on by the current thread_id.
2677 * IOMDPA_SEGMENTS_PAGE = up to 8 virtually contiguous page aligned mappings of PAGE_SIZE length
2678 * - (ppn_0 << 32 | ppn_1), ..., (ppn_6 << 32 | ppn_7)
2679 * - unmapped pages will have a ppn of MAX_INT_32
2680 * IOMDPA_SEGMENTS_LONG = up to 2 virtually contiguous mappings of variable length
2681 * - address_0, length_0, address_0, length_1
2682 * - unmapped pages will have an address of 0
2684 * During each iteration do the following depending on the length of the mapping:
2685 * 1. add the current segment to the appropriate queue of pending segments
2686 * 1. check if we are operating on the same type of segment (PAGE/LONG) as the previous pass
2687 * 1a. if FALSE emit and reset all events in the previous queue
2688 * 2. check if we have filled up the current queue of pending events
2689 * 2a. if TRUE emit and reset all events in the pending queue
2690 * 3. after completing all iterations emit events in the current queue
2693 bool emit_page
= false;
2694 bool emit_long
= false;
2695 if ((physical_segment
.address
& PAGE_MASK
) == 0 && physical_segment
.length
== PAGE_SIZE
) {
2696 segments_page
[segment_page_idx
] = physical_segment
.address
;
2699 emit_long
= segment_long_idx
!= 0;
2700 emit_page
= segment_page_idx
== num_segments_page
;
2702 if (os_unlikely(emit_long
)) {
2703 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG
),
2704 segments_long
[0].address
, segments_long
[0].length
,
2705 segments_long
[1].address
, segments_long
[1].length
);
2708 if (os_unlikely(emit_page
)) {
2710 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE
),
2711 ((uintptr_t) atop_64(segments_page
[0]) << 32) | (ppnum_t
) atop_64(segments_page
[1]),
2712 ((uintptr_t) atop_64(segments_page
[2]) << 32) | (ppnum_t
) atop_64(segments_page
[3]),
2713 ((uintptr_t) atop_64(segments_page
[4]) << 32) | (ppnum_t
) atop_64(segments_page
[5]),
2714 ((uintptr_t) atop_64(segments_page
[6]) << 32) | (ppnum_t
) atop_64(segments_page
[7]));
2716 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE
),
2717 (ppnum_t
) atop_32(segments_page
[1]),
2718 (ppnum_t
) atop_32(segments_page
[2]),
2719 (ppnum_t
) atop_32(segments_page
[3]),
2720 (ppnum_t
) atop_32(segments_page
[4]));
2724 segments_long
[segment_long_idx
] = physical_segment
;
2727 emit_page
= segment_page_idx
!= 0;
2728 emit_long
= segment_long_idx
== num_segments_long
;
2730 if (os_unlikely(emit_page
)) {
2732 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE
),
2733 ((uintptr_t) atop_64(segments_page
[0]) << 32) | (ppnum_t
) atop_64(segments_page
[1]),
2734 ((uintptr_t) atop_64(segments_page
[2]) << 32) | (ppnum_t
) atop_64(segments_page
[3]),
2735 ((uintptr_t) atop_64(segments_page
[4]) << 32) | (ppnum_t
) atop_64(segments_page
[5]),
2736 ((uintptr_t) atop_64(segments_page
[6]) << 32) | (ppnum_t
) atop_64(segments_page
[7]));
2738 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE
),
2739 (ppnum_t
) atop_32(segments_page
[1]),
2740 (ppnum_t
) atop_32(segments_page
[2]),
2741 (ppnum_t
) atop_32(segments_page
[3]),
2742 (ppnum_t
) atop_32(segments_page
[4]));
2747 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG
),
2748 segments_long
[0].address
, segments_long
[0].length
,
2749 segments_long
[1].address
, segments_long
[1].length
);
2753 if (os_unlikely(emit_page
)) {
2754 memset(segments_page
, UINT32_MAX
, sizeof(segments_page
));
2755 segment_page_idx
= 0;
2758 if (os_unlikely(emit_long
)) {
2759 memset(segments_long
, 0, sizeof(segments_long
));
2760 segment_long_idx
= 0;
2764 if (segment_page_idx
!= 0) {
2765 assert(segment_long_idx
== 0);
2767 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE
),
2768 ((uintptr_t) atop_64(segments_page
[0]) << 32) | (ppnum_t
) atop_64(segments_page
[1]),
2769 ((uintptr_t) atop_64(segments_page
[2]) << 32) | (ppnum_t
) atop_64(segments_page
[3]),
2770 ((uintptr_t) atop_64(segments_page
[4]) << 32) | (ppnum_t
) atop_64(segments_page
[5]),
2771 ((uintptr_t) atop_64(segments_page
[6]) << 32) | (ppnum_t
) atop_64(segments_page
[7]));
2773 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE
),
2774 (ppnum_t
) atop_32(segments_page
[1]),
2775 (ppnum_t
) atop_32(segments_page
[2]),
2776 (ppnum_t
) atop_32(segments_page
[3]),
2777 (ppnum_t
) atop_32(segments_page
[4]));
2779 } else if (segment_long_idx
!= 0) {
2780 assert(segment_page_idx
== 0);
2781 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG
),
2782 segments_long
[0].address
, segments_long
[0].length
,
2783 segments_long
[1].address
, segments_long
[1].length
);
2786 return kIOReturnSuccess
;
2790 IOMemoryDescriptor::setVMTags(uint32_t kernelTag
, uint32_t userTag
)
2792 _kernelTag
= (vm_tag_t
) kernelTag
;
2793 _userTag
= (vm_tag_t
) userTag
;
2797 IOMemoryDescriptor::getVMTag(vm_map_t map
)
2799 if (vm_kernel_map_is_kernel(map
)) {
2800 if (VM_KERN_MEMORY_NONE
!= _kernelTag
) {
2801 return (uint32_t) _kernelTag
;
2804 if (VM_KERN_MEMORY_NONE
!= _userTag
) {
2805 return (uint32_t) _userTag
;
2808 return IOMemoryTag(map
);
2812 IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op
, void *vData
, UInt dataSize
) const
2814 IOReturn err
= kIOReturnSuccess
;
2815 DMACommandOps params
;
2816 IOGeneralMemoryDescriptor
* md
= const_cast<IOGeneralMemoryDescriptor
*>(this);
2819 params
= (op
& ~kIOMDDMACommandOperationMask
& op
);
2820 op
&= kIOMDDMACommandOperationMask
;
2822 if (kIOMDDMAMap
== op
) {
2823 if (dataSize
< sizeof(IOMDDMAMapArgs
)) {
2824 return kIOReturnUnderrun
;
2827 IOMDDMAMapArgs
* data
= (IOMDDMAMapArgs
*) vData
;
2830 && !md
->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem
)) {
2831 return kIOReturnNoMemory
;
2834 if (_memoryEntries
&& data
->fMapper
) {
2835 bool remap
, keepMap
;
2836 dataP
= getDataP(_memoryEntries
);
2838 if (data
->fMapSpec
.numAddressBits
< dataP
->fDMAMapNumAddressBits
) {
2839 dataP
->fDMAMapNumAddressBits
= data
->fMapSpec
.numAddressBits
;
2841 if (data
->fMapSpec
.alignment
> dataP
->fDMAMapAlignment
) {
2842 dataP
->fDMAMapAlignment
= data
->fMapSpec
.alignment
;
2845 keepMap
= (data
->fMapper
== gIOSystemMapper
);
2846 keepMap
&= ((data
->fOffset
== 0) && (data
->fLength
== _length
));
2848 if ((data
->fMapper
== gIOSystemMapper
) && _prepareLock
) {
2849 IOLockLock(_prepareLock
);
2853 remap
|= (dataP
->fDMAMapNumAddressBits
< 64)
2854 && ((dataP
->fMappedBase
+ _length
) > (1ULL << dataP
->fDMAMapNumAddressBits
));
2855 remap
|= (dataP
->fDMAMapAlignment
> page_size
);
2857 if (remap
|| !dataP
->fMappedBaseValid
) {
2858 err
= md
->dmaMap(data
->fMapper
, md
, data
->fCommand
, &data
->fMapSpec
, data
->fOffset
, data
->fLength
, &data
->fAlloc
, &data
->fAllocLength
);
2859 if (keepMap
&& (kIOReturnSuccess
== err
) && !dataP
->fMappedBaseValid
) {
2860 dataP
->fMappedBase
= data
->fAlloc
;
2861 dataP
->fMappedBaseValid
= true;
2862 dataP
->fMappedLength
= data
->fAllocLength
;
2863 data
->fAllocLength
= 0; // IOMD owns the alloc now
2866 data
->fAlloc
= dataP
->fMappedBase
;
2867 data
->fAllocLength
= 0; // give out IOMD map
2868 md
->dmaMapRecord(data
->fMapper
, data
->fCommand
, dataP
->fMappedLength
);
2871 if ((data
->fMapper
== gIOSystemMapper
) && _prepareLock
) {
2872 IOLockUnlock(_prepareLock
);
2877 if (kIOMDDMAUnmap
== op
) {
2878 if (dataSize
< sizeof(IOMDDMAMapArgs
)) {
2879 return kIOReturnUnderrun
;
2881 IOMDDMAMapArgs
* data
= (IOMDDMAMapArgs
*) vData
;
2883 err
= md
->dmaUnmap(data
->fMapper
, data
->fCommand
, data
->fOffset
, data
->fAlloc
, data
->fAllocLength
);
2885 return kIOReturnSuccess
;
2888 if (kIOMDAddDMAMapSpec
== op
) {
2889 if (dataSize
< sizeof(IODMAMapSpecification
)) {
2890 return kIOReturnUnderrun
;
2893 IODMAMapSpecification
* data
= (IODMAMapSpecification
*) vData
;
2896 && !md
->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem
)) {
2897 return kIOReturnNoMemory
;
2900 if (_memoryEntries
) {
2901 dataP
= getDataP(_memoryEntries
);
2902 if (data
->numAddressBits
< dataP
->fDMAMapNumAddressBits
) {
2903 dataP
->fDMAMapNumAddressBits
= data
->numAddressBits
;
2905 if (data
->alignment
> dataP
->fDMAMapAlignment
) {
2906 dataP
->fDMAMapAlignment
= data
->alignment
;
2909 return kIOReturnSuccess
;
2912 if (kIOMDGetCharacteristics
== op
) {
2913 if (dataSize
< sizeof(IOMDDMACharacteristics
)) {
2914 return kIOReturnUnderrun
;
2917 IOMDDMACharacteristics
*data
= (IOMDDMACharacteristics
*) vData
;
2918 data
->fLength
= _length
;
2919 data
->fSGCount
= _rangesCount
;
2920 data
->fPages
= _pages
;
2921 data
->fDirection
= getDirection();
2923 data
->fIsPrepared
= false;
2925 data
->fIsPrepared
= true;
2926 data
->fHighestPage
= _highestPage
;
2927 if (_memoryEntries
) {
2928 dataP
= getDataP(_memoryEntries
);
2929 ioPLBlock
*ioplList
= getIOPLList(dataP
);
2930 UInt count
= getNumIOPL(_memoryEntries
, dataP
);
2932 data
->fPageAlign
= (ioplList
[0].fPageOffset
& PAGE_MASK
) | ~PAGE_MASK
;
2937 return kIOReturnSuccess
;
2938 } else if (kIOMDDMAActive
== op
) {
2941 prior
= OSAddAtomic16(1, &md
->_dmaReferences
);
2943 md
->_mapName
= NULL
;
2946 if (md
->_dmaReferences
) {
2947 OSAddAtomic16(-1, &md
->_dmaReferences
);
2949 panic("_dmaReferences underflow");
2952 } else if (kIOMDWalkSegments
!= op
) {
2953 return kIOReturnBadArgument
;
2956 // Get the next segment
2957 struct InternalState
{
2958 IOMDDMAWalkSegmentArgs fIO
;
2959 mach_vm_size_t fOffset2Index
;
2960 mach_vm_size_t fNextOffset
;
2964 // Find the next segment
2965 if (dataSize
< sizeof(*isP
)) {
2966 return kIOReturnUnderrun
;
2969 isP
= (InternalState
*) vData
;
2970 uint64_t offset
= isP
->fIO
.fOffset
;
2971 uint8_t mapped
= isP
->fIO
.fMapped
;
2972 uint64_t mappedBase
;
2974 if (mapped
&& (kIOMemoryRemote
& _flags
)) {
2975 return kIOReturnNotAttached
;
2978 if (IOMapper::gSystem
&& mapped
2979 && (!(kIOMemoryHostOnly
& _flags
))
2980 && (!_memoryEntries
|| !getDataP(_memoryEntries
)->fMappedBaseValid
)) {
2981 // && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBaseValid))
2983 && !md
->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem
)) {
2984 return kIOReturnNoMemory
;
2987 dataP
= getDataP(_memoryEntries
);
2988 if (dataP
->fMapper
) {
2989 IODMAMapSpecification mapSpec
;
2990 bzero(&mapSpec
, sizeof(mapSpec
));
2991 mapSpec
.numAddressBits
= dataP
->fDMAMapNumAddressBits
;
2992 mapSpec
.alignment
= dataP
->fDMAMapAlignment
;
2993 err
= md
->dmaMap(dataP
->fMapper
, md
, NULL
, &mapSpec
, 0, _length
, &dataP
->fMappedBase
, &dataP
->fMappedLength
);
2994 if (kIOReturnSuccess
!= err
) {
2997 dataP
->fMappedBaseValid
= true;
3002 if (IOMapper::gSystem
3003 && (!(kIOMemoryHostOnly
& _flags
))
3005 && (dataP
= getDataP(_memoryEntries
))
3006 && dataP
->fMappedBaseValid
) {
3007 mappedBase
= dataP
->fMappedBase
;
3013 if (offset
>= _length
) {
3014 return (offset
== _length
)? kIOReturnOverrun
: kIOReturnInternalError
;
3017 // Validate the previous offset
3019 mach_vm_size_t off2Ind
= isP
->fOffset2Index
;
3022 && (offset
== isP
->fNextOffset
|| off2Ind
<= offset
)) {
3025 ind
= off2Ind
= 0; // Start from beginning
3027 mach_vm_size_t length
;
3030 if ((_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
) {
3031 // Physical address based memory descriptor
3032 const IOPhysicalRange
*physP
= (IOPhysicalRange
*) &_ranges
.p
[0];
3034 // Find the range after the one that contains the offset
3036 for (len
= 0; off2Ind
<= offset
; ind
++) {
3037 len
= physP
[ind
].length
;
3041 // Calculate length within range and starting address
3042 length
= off2Ind
- offset
;
3043 address
= physP
[ind
- 1].address
+ len
- length
;
3045 if (true && mapped
) {
3046 address
= mappedBase
+ offset
;
3048 // see how far we can coalesce ranges
3049 while (ind
< _rangesCount
&& address
+ length
== physP
[ind
].address
) {
3050 len
= physP
[ind
].length
;
3057 // correct contiguous check overshoot
3062 else if ((_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
) {
3063 // Physical address based memory descriptor
3064 const IOAddressRange
*physP
= (IOAddressRange
*) &_ranges
.v64
[0];
3066 // Find the range after the one that contains the offset
3068 for (len
= 0; off2Ind
<= offset
; ind
++) {
3069 len
= physP
[ind
].length
;
3073 // Calculate length within range and starting address
3074 length
= off2Ind
- offset
;
3075 address
= physP
[ind
- 1].address
+ len
- length
;
3077 if (true && mapped
) {
3078 address
= mappedBase
+ offset
;
3080 // see how far we can coalesce ranges
3081 while (ind
< _rangesCount
&& address
+ length
== physP
[ind
].address
) {
3082 len
= physP
[ind
].length
;
3088 // correct contiguous check overshoot
3092 #endif /* !__LP64__ */
3096 panic("IOGMD: not wired for the IODMACommand");
3099 assert(_memoryEntries
);
3101 dataP
= getDataP(_memoryEntries
);
3102 const ioPLBlock
*ioplList
= getIOPLList(dataP
);
3103 UInt numIOPLs
= getNumIOPL(_memoryEntries
, dataP
);
3104 upl_page_info_t
*pageList
= getPageList(dataP
);
3106 assert(numIOPLs
> 0);
3108 // Scan through iopl info blocks looking for block containing offset
3109 while (ind
< numIOPLs
&& offset
>= ioplList
[ind
].fIOMDOffset
) {
3113 // Go back to actual range as search goes past it
3114 ioPLBlock ioplInfo
= ioplList
[ind
- 1];
3115 off2Ind
= ioplInfo
.fIOMDOffset
;
3117 if (ind
< numIOPLs
) {
3118 length
= ioplList
[ind
].fIOMDOffset
;
3122 length
-= offset
; // Remainder within iopl
3124 // Subtract offset till this iopl in total list
3127 // If a mapped address is requested and this is a pre-mapped IOPL
3128 // then just need to compute an offset relative to the mapped base.
3130 offset
+= (ioplInfo
.fPageOffset
& PAGE_MASK
);
3131 address
= trunc_page_64(mappedBase
) + ptoa_64(ioplInfo
.fMappedPage
) + offset
;
3132 continue; // Done leave do/while(false) now
3135 // The offset is rebased into the current iopl.
3136 // Now add the iopl 1st page offset.
3137 offset
+= ioplInfo
.fPageOffset
;
3139 // For external UPLs the fPageInfo field points directly to
3140 // the upl's upl_page_info_t array.
3141 if (ioplInfo
.fFlags
& kIOPLExternUPL
) {
3142 pageList
= (upl_page_info_t
*) ioplInfo
.fPageInfo
;
3144 pageList
= &pageList
[ioplInfo
.fPageInfo
];
3147 // Check for direct device non-paged memory
3148 if (ioplInfo
.fFlags
& kIOPLOnDevice
) {
3149 address
= ptoa_64(pageList
->phys_addr
) + offset
;
3150 continue; // Done leave do/while(false) now
3153 // Now we need compute the index into the pageList
3154 UInt pageInd
= atop_32(offset
);
3155 offset
&= PAGE_MASK
;
3157 // Compute the starting address of this segment
3158 IOPhysicalAddress pageAddr
= pageList
[pageInd
].phys_addr
;
3160 panic("!pageList phys_addr");
3163 address
= ptoa_64(pageAddr
) + offset
;
3165 // length is currently set to the length of the remainider of the iopl.
3166 // We need to check that the remainder of the iopl is contiguous.
3167 // This is indicated by pageList[ind].phys_addr being sequential.
3168 IOByteCount contigLength
= PAGE_SIZE
- offset
;
3169 while (contigLength
< length
3170 && ++pageAddr
== pageList
[++pageInd
].phys_addr
) {
3171 contigLength
+= PAGE_SIZE
;
3174 if (contigLength
< length
) {
3175 length
= contigLength
;
3184 // Update return values and state
3185 isP
->fIO
.fIOVMAddr
= address
;
3186 isP
->fIO
.fLength
= length
;
3188 isP
->fOffset2Index
= off2Ind
;
3189 isP
->fNextOffset
= isP
->fIO
.fOffset
+ length
;
3191 return kIOReturnSuccess
;
3195 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
, IOOptionBits options
)
3198 mach_vm_address_t address
= 0;
3199 mach_vm_size_t length
= 0;
3200 IOMapper
* mapper
= gIOSystemMapper
;
3201 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
3203 if (lengthOfSegment
) {
3204 *lengthOfSegment
= 0;
3207 if (offset
>= _length
) {
3211 // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
3212 // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
3213 // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
3214 // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
3216 if ((options
& _kIOMemorySourceSegment
) && (kIOMemoryTypeUPL
!= type
)) {
3217 unsigned rangesIndex
= 0;
3218 Ranges vec
= _ranges
;
3219 mach_vm_address_t addr
;
3221 // Find starting address within the vector of ranges
3223 getAddrLenForInd(addr
, length
, type
, vec
, rangesIndex
);
3224 if (offset
< length
) {
3227 offset
-= length
; // (make offset relative)
3231 // Now that we have the starting range,
3232 // lets find the last contiguous range
3236 for (++rangesIndex
; rangesIndex
< _rangesCount
; rangesIndex
++) {
3237 mach_vm_address_t newAddr
;
3238 mach_vm_size_t newLen
;
3240 getAddrLenForInd(newAddr
, newLen
, type
, vec
, rangesIndex
);
3241 if (addr
+ length
!= newAddr
) {
3247 address
= (IOPhysicalAddress
) addr
; // Truncate address to 32bit
3250 IOMDDMAWalkSegmentState _state
;
3251 IOMDDMAWalkSegmentArgs
* state
= (IOMDDMAWalkSegmentArgs
*) (void *)&_state
;
3253 state
->fOffset
= offset
;
3254 state
->fLength
= _length
- offset
;
3255 state
->fMapped
= (0 == (options
& kIOMemoryMapperNone
)) && !(_flags
& kIOMemoryHostOrRemote
);
3257 ret
= dmaCommandOperation(kIOMDFirstSegment
, _state
, sizeof(_state
));
3259 if ((kIOReturnSuccess
!= ret
) && (kIOReturnOverrun
!= ret
)) {
3260 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
3261 ret
, this, state
->fOffset
,
3262 state
->fIOVMAddr
, state
->fLength
);
3264 if (kIOReturnSuccess
== ret
) {
3265 address
= state
->fIOVMAddr
;
3266 length
= state
->fLength
;
3269 // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
3270 // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
3272 if (mapper
&& ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
))) {
3273 if ((options
& kIOMemoryMapperNone
) && !(_flags
& kIOMemoryMapperNone
)) {
3274 addr64_t origAddr
= address
;
3275 IOByteCount origLen
= length
;
3277 address
= mapper
->mapToPhysicalAddress(origAddr
);
3278 length
= page_size
- (address
& (page_size
- 1));
3279 while ((length
< origLen
)
3280 && ((address
+ length
) == mapper
->mapToPhysicalAddress(origAddr
+ length
))) {
3281 length
+= page_size
;
3283 if (length
> origLen
) {
3294 if (lengthOfSegment
) {
3295 *lengthOfSegment
= length
;
3302 #pragma clang diagnostic push
3303 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
3306 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
, IOOptionBits options
)
3308 addr64_t address
= 0;
3310 if (options
& _kIOMemorySourceSegment
) {
3311 address
= getSourceSegment(offset
, lengthOfSegment
);
3312 } else if (options
& kIOMemoryMapperNone
) {
3313 address
= getPhysicalSegment64(offset
, lengthOfSegment
);
3315 address
= getPhysicalSegment(offset
, lengthOfSegment
);
3320 #pragma clang diagnostic pop
3323 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
3325 return getPhysicalSegment(offset
, lengthOfSegment
, kIOMemoryMapperNone
);
3329 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
3331 addr64_t address
= 0;
3332 IOByteCount length
= 0;
3334 address
= getPhysicalSegment(offset
, lengthOfSegment
, 0);
3336 if (lengthOfSegment
) {
3337 length
= *lengthOfSegment
;
3340 if ((address
+ length
) > 0x100000000ULL
) {
3341 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
3342 address
, (long) length
, (getMetaClass())->getClassName());
3345 return (IOPhysicalAddress
) address
;
3349 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
3351 IOPhysicalAddress phys32
;
3354 IOMapper
* mapper
= NULL
;
3356 phys32
= getPhysicalSegment(offset
, lengthOfSegment
);
3361 if (gIOSystemMapper
) {
3362 mapper
= gIOSystemMapper
;
3366 IOByteCount origLen
;
3368 phys64
= mapper
->mapToPhysicalAddress(phys32
);
3369 origLen
= *lengthOfSegment
;
3370 length
= page_size
- (phys64
& (page_size
- 1));
3371 while ((length
< origLen
)
3372 && ((phys64
+ length
) == mapper
->mapToPhysicalAddress(phys32
+ length
))) {
3373 length
+= page_size
;
3375 if (length
> origLen
) {
3379 *lengthOfSegment
= length
;
3381 phys64
= (addr64_t
) phys32
;
3388 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
3390 return (IOPhysicalAddress
) getPhysicalSegment(offset
, lengthOfSegment
, 0);
3394 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
3396 return (IOPhysicalAddress
) getPhysicalSegment(offset
, lengthOfSegment
, _kIOMemorySourceSegment
);
3399 #pragma clang diagnostic push
3400 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
3403 IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
3404 IOByteCount
* lengthOfSegment
)
3406 if (_task
== kernel_task
) {
3407 return (void *) getSourceSegment(offset
, lengthOfSegment
);
3409 panic("IOGMD::getVirtualSegment deprecated");
3414 #pragma clang diagnostic pop
3415 #endif /* !__LP64__ */
3418 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op
, void *vData
, UInt dataSize
) const
3420 IOMemoryDescriptor
*md
= const_cast<IOMemoryDescriptor
*>(this);
3421 DMACommandOps params
;
3424 params
= (op
& ~kIOMDDMACommandOperationMask
& op
);
3425 op
&= kIOMDDMACommandOperationMask
;
3427 if (kIOMDGetCharacteristics
== op
) {
3428 if (dataSize
< sizeof(IOMDDMACharacteristics
)) {
3429 return kIOReturnUnderrun
;
3432 IOMDDMACharacteristics
*data
= (IOMDDMACharacteristics
*) vData
;
3433 data
->fLength
= getLength();
3435 data
->fDirection
= getDirection();
3436 data
->fIsPrepared
= true; // Assume prepared - fails safe
3437 } else if (kIOMDWalkSegments
== op
) {
3438 if (dataSize
< sizeof(IOMDDMAWalkSegmentArgs
)) {
3439 return kIOReturnUnderrun
;
3442 IOMDDMAWalkSegmentArgs
*data
= (IOMDDMAWalkSegmentArgs
*) vData
;
3443 IOByteCount offset
= (IOByteCount
) data
->fOffset
;
3444 IOPhysicalLength length
, nextLength
;
3445 addr64_t addr
, nextAddr
;
3447 if (data
->fMapped
) {
3448 panic("fMapped %p %s %qx\n", this, getMetaClass()->getClassName(), (uint64_t) getLength());
3450 addr
= md
->getPhysicalSegment(offset
, &length
, kIOMemoryMapperNone
);
3452 while (offset
< getLength()) {
3453 nextAddr
= md
->getPhysicalSegment(offset
, &nextLength
, kIOMemoryMapperNone
);
3454 if ((addr
+ length
) != nextAddr
) {
3457 length
+= nextLength
;
3458 offset
+= nextLength
;
3460 data
->fIOVMAddr
= addr
;
3461 data
->fLength
= length
;
3462 } else if (kIOMDAddDMAMapSpec
== op
) {
3463 return kIOReturnUnsupported
;
3464 } else if (kIOMDDMAMap
== op
) {
3465 if (dataSize
< sizeof(IOMDDMAMapArgs
)) {
3466 return kIOReturnUnderrun
;
3468 IOMDDMAMapArgs
* data
= (IOMDDMAMapArgs
*) vData
;
3470 err
= md
->dmaMap(data
->fMapper
, md
, data
->fCommand
, &data
->fMapSpec
, data
->fOffset
, data
->fLength
, &data
->fAlloc
, &data
->fAllocLength
);
3473 } else if (kIOMDDMAUnmap
== op
) {
3474 if (dataSize
< sizeof(IOMDDMAMapArgs
)) {
3475 return kIOReturnUnderrun
;
3477 IOMDDMAMapArgs
* data
= (IOMDDMAMapArgs
*) vData
;
3479 err
= md
->dmaUnmap(data
->fMapper
, data
->fCommand
, data
->fOffset
, data
->fAlloc
, data
->fAllocLength
);
3481 return kIOReturnSuccess
;
3483 return kIOReturnBadArgument
;
3486 return kIOReturnSuccess
;
3490 IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState
,
3491 IOOptionBits
* oldState
)
3493 IOReturn err
= kIOReturnSuccess
;
3495 vm_purgable_t control
;
3498 assert(!(kIOMemoryRemote
& _flags
));
3499 if (kIOMemoryRemote
& _flags
) {
3500 return kIOReturnNotAttached
;
3504 err
= super::setPurgeable(newState
, oldState
);
3506 if (kIOMemoryThreadSafe
& _flags
) {
3510 // Find the appropriate vm_map for the given task
3512 if (_task
== kernel_task
&& (kIOMemoryBufferPageable
& _flags
)) {
3513 err
= kIOReturnNotReady
;
3515 } else if (!_task
) {
3516 err
= kIOReturnUnsupported
;
3519 curMap
= get_task_map(_task
);
3520 if (NULL
== curMap
) {
3521 err
= KERN_INVALID_ARGUMENT
;
3526 // can only do one range
3527 Ranges vec
= _ranges
;
3528 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
3529 mach_vm_address_t addr
;
3531 getAddrLenForInd(addr
, len
, type
, vec
, 0);
3533 err
= purgeableControlBits(newState
, &control
, &state
);
3534 if (kIOReturnSuccess
!= err
) {
3537 err
= vm_map_purgable_control(curMap
, addr
, control
, &state
);
3539 if (kIOReturnSuccess
== err
) {
3540 err
= purgeableStateBits(&state
);
3545 if (kIOMemoryThreadSafe
& _flags
) {
3554 IOMemoryDescriptor::setPurgeable( IOOptionBits newState
,
3555 IOOptionBits
* oldState
)
3557 IOReturn err
= kIOReturnNotReady
;
3559 if (kIOMemoryThreadSafe
& _flags
) {
3563 err
= IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef
, newState
, oldState
);
3565 if (kIOMemoryThreadSafe
& _flags
) {
3573 IOGeneralMemoryDescriptor::setOwnership( task_t newOwner
,
3575 IOOptionBits newLedgerOptions
)
3577 IOReturn err
= kIOReturnSuccess
;
3579 assert(!(kIOMemoryRemote
& _flags
));
3580 if (kIOMemoryRemote
& _flags
) {
3581 return kIOReturnNotAttached
;
3584 if (iokit_iomd_setownership_enabled
== FALSE
) {
3585 return kIOReturnUnsupported
;
3589 err
= super::setOwnership(newOwner
, newLedgerTag
, newLedgerOptions
);
3591 err
= kIOReturnUnsupported
;
3598 IOMemoryDescriptor::setOwnership( task_t newOwner
,
3600 IOOptionBits newLedgerOptions
)
3602 IOReturn err
= kIOReturnNotReady
;
3604 assert(!(kIOMemoryRemote
& _flags
));
3605 if (kIOMemoryRemote
& _flags
) {
3606 return kIOReturnNotAttached
;
3609 if (iokit_iomd_setownership_enabled
== FALSE
) {
3610 return kIOReturnUnsupported
;
3613 if (kIOMemoryThreadSafe
& _flags
) {
3617 err
= IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(_memRef
, newOwner
, newLedgerTag
, newLedgerOptions
);
3619 IOMultiMemoryDescriptor
* mmd
;
3620 IOSubMemoryDescriptor
* smd
;
3621 if ((smd
= OSDynamicCast(IOSubMemoryDescriptor
, this))) {
3622 err
= smd
->setOwnership(newOwner
, newLedgerTag
, newLedgerOptions
);
3623 } else if ((mmd
= OSDynamicCast(IOMultiMemoryDescriptor
, this))) {
3624 err
= mmd
->setOwnership(newOwner
, newLedgerTag
, newLedgerOptions
);
3627 if (kIOMemoryThreadSafe
& _flags
) {
3636 IOMemoryDescriptor::getDMAMapLength(uint64_t * offset
)
3641 length
= IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(_memRef
, offset
);
3643 IOByteCount iterate
, segLen
;
3644 IOPhysicalAddress sourceAddr
, sourceAlign
;
3646 if (kIOMemoryThreadSafe
& _flags
) {
3651 while ((sourceAddr
= getPhysicalSegment(iterate
, &segLen
, _kIOMemorySourceSegment
))) {
3652 sourceAlign
= (sourceAddr
& page_mask
);
3653 if (offset
&& !iterate
) {
3654 *offset
= sourceAlign
;
3656 length
+= round_page(sourceAddr
+ segLen
) - trunc_page(sourceAddr
);
3659 if (kIOMemoryThreadSafe
& _flags
) {
3669 IOMemoryDescriptor::getPageCounts( IOByteCount
* residentPageCount
,
3670 IOByteCount
* dirtyPageCount
)
3672 IOReturn err
= kIOReturnNotReady
;
3674 assert(!(kIOMemoryRemote
& _flags
));
3675 if (kIOMemoryRemote
& _flags
) {
3676 return kIOReturnNotAttached
;
3679 if (kIOMemoryThreadSafe
& _flags
) {
3683 err
= IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef
, residentPageCount
, dirtyPageCount
);
3685 IOMultiMemoryDescriptor
* mmd
;
3686 IOSubMemoryDescriptor
* smd
;
3687 if ((smd
= OSDynamicCast(IOSubMemoryDescriptor
, this))) {
3688 err
= smd
->getPageCounts(residentPageCount
, dirtyPageCount
);
3689 } else if ((mmd
= OSDynamicCast(IOMultiMemoryDescriptor
, this))) {
3690 err
= mmd
->getPageCounts(residentPageCount
, dirtyPageCount
);
3693 if (kIOMemoryThreadSafe
& _flags
) {
3701 #if defined(__arm__) || defined(__arm64__)
3702 extern "C" void dcache_incoherent_io_flush64(addr64_t pa
, unsigned int count
, unsigned int remaining
, unsigned int *res
);
3703 extern "C" void dcache_incoherent_io_store64(addr64_t pa
, unsigned int count
, unsigned int remaining
, unsigned int *res
);
3704 #else /* defined(__arm__) || defined(__arm64__) */
3705 extern "C" void dcache_incoherent_io_flush64(addr64_t pa
, unsigned int count
);
3706 extern "C" void dcache_incoherent_io_store64(addr64_t pa
, unsigned int count
);
3707 #endif /* defined(__arm__) || defined(__arm64__) */
3710 SetEncryptOp(addr64_t pa
, unsigned int count
)
3714 page
= (ppnum_t
) atop_64(round_page_64(pa
));
3715 end
= (ppnum_t
) atop_64(trunc_page_64(pa
+ count
));
3716 for (; page
< end
; page
++) {
3717 pmap_clear_noencrypt(page
);
3722 ClearEncryptOp(addr64_t pa
, unsigned int count
)
3726 page
= (ppnum_t
) atop_64(round_page_64(pa
));
3727 end
= (ppnum_t
) atop_64(trunc_page_64(pa
+ count
));
3728 for (; page
< end
; page
++) {
3729 pmap_set_noencrypt(page
);
3734 IOMemoryDescriptor::performOperation( IOOptionBits options
,
3735 IOByteCount offset
, IOByteCount length
)
3737 IOByteCount remaining
;
3739 void (*func
)(addr64_t pa
, unsigned int count
) = NULL
;
3740 #if defined(__arm__) || defined(__arm64__)
3741 void (*func_ext
)(addr64_t pa
, unsigned int count
, unsigned int remaining
, unsigned int *result
) = NULL
;
3744 assert(!(kIOMemoryRemote
& _flags
));
3745 if (kIOMemoryRemote
& _flags
) {
3746 return kIOReturnNotAttached
;
3750 case kIOMemoryIncoherentIOFlush
:
3751 #if defined(__arm__) || defined(__arm64__)
3752 func_ext
= &dcache_incoherent_io_flush64
;
3753 #if __ARM_COHERENT_IO__
3754 func_ext(0, 0, 0, &res
);
3755 return kIOReturnSuccess
;
3756 #else /* __ARM_COHERENT_IO__ */
3758 #endif /* __ARM_COHERENT_IO__ */
3759 #else /* defined(__arm__) || defined(__arm64__) */
3760 func
= &dcache_incoherent_io_flush64
;
3762 #endif /* defined(__arm__) || defined(__arm64__) */
3763 case kIOMemoryIncoherentIOStore
:
3764 #if defined(__arm__) || defined(__arm64__)
3765 func_ext
= &dcache_incoherent_io_store64
;
3766 #if __ARM_COHERENT_IO__
3767 func_ext(0, 0, 0, &res
);
3768 return kIOReturnSuccess
;
3769 #else /* __ARM_COHERENT_IO__ */
3771 #endif /* __ARM_COHERENT_IO__ */
3772 #else /* defined(__arm__) || defined(__arm64__) */
3773 func
= &dcache_incoherent_io_store64
;
3775 #endif /* defined(__arm__) || defined(__arm64__) */
3777 case kIOMemorySetEncrypted
:
3778 func
= &SetEncryptOp
;
3780 case kIOMemoryClearEncrypted
:
3781 func
= &ClearEncryptOp
;
3785 #if defined(__arm__) || defined(__arm64__)
3786 if ((func
== NULL
) && (func_ext
== NULL
)) {
3787 return kIOReturnUnsupported
;
3789 #else /* defined(__arm__) || defined(__arm64__) */
3791 return kIOReturnUnsupported
;
3793 #endif /* defined(__arm__) || defined(__arm64__) */
3795 if (kIOMemoryThreadSafe
& _flags
) {
3800 remaining
= length
= min(length
, getLength() - offset
);
3802 // (process another target segment?)
3806 dstAddr64
= getPhysicalSegment(offset
, &dstLen
, kIOMemoryMapperNone
);
3811 // Clip segment length to remaining
3812 if (dstLen
> remaining
) {
3815 if (dstLen
> (UINT_MAX
- PAGE_SIZE
+ 1)) {
3816 dstLen
= (UINT_MAX
- PAGE_SIZE
+ 1);
3818 if (remaining
> UINT_MAX
) {
3819 remaining
= UINT_MAX
;
3822 #if defined(__arm__) || defined(__arm64__)
3824 (*func
)(dstAddr64
, (unsigned int) dstLen
);
3827 (*func_ext
)(dstAddr64
, (unsigned int) dstLen
, (unsigned int) remaining
, &res
);
3833 #else /* defined(__arm__) || defined(__arm64__) */
3834 (*func
)(dstAddr64
, (unsigned int) dstLen
);
3835 #endif /* defined(__arm__) || defined(__arm64__) */
3838 remaining
-= dstLen
;
3841 if (kIOMemoryThreadSafe
& _flags
) {
3845 return remaining
? kIOReturnUnderrun
: kIOReturnSuccess
;
3852 #if defined(__i386__) || defined(__x86_64__)
3854 extern vm_offset_t kc_highest_nonlinkedit_vmaddr
;
3856 /* XXX: By extending io_kernel_static_end to the highest virtual address in the KC,
3857 * we're opening up this path to IOMemoryDescriptor consumers who can now create UPLs to
3858 * kernel non-text data -- should we just add another range instead?
3860 #define io_kernel_static_start vm_kernel_stext
3861 #define io_kernel_static_end (kc_highest_nonlinkedit_vmaddr ? kc_highest_nonlinkedit_vmaddr : vm_kernel_etext)
3863 #elif defined(__arm__) || defined(__arm64__)
3865 extern vm_offset_t static_memory_end
;
3867 #if defined(__arm64__)
3868 #define io_kernel_static_start vm_kext_base
3869 #else /* defined(__arm64__) */
3870 #define io_kernel_static_start vm_kernel_stext
3871 #endif /* defined(__arm64__) */
3873 #define io_kernel_static_end static_memory_end
3876 #error io_kernel_static_end is undefined for this architecture
3879 static kern_return_t
3880 io_get_kernel_static_upl(
3883 upl_size_t
*upl_size
,
3884 unsigned int *page_offset
,
3886 upl_page_info_array_t page_list
,
3887 unsigned int *count
,
3888 ppnum_t
*highest_page
)
3890 unsigned int pageCount
, page
;
3892 ppnum_t highestPage
= 0;
3894 pageCount
= atop_32(round_page(*upl_size
+ (page_mask
& offset
)));
3895 if (pageCount
> *count
) {
3898 *upl_size
= (upl_size_t
) ptoa_64(pageCount
);
3901 *page_offset
= ((unsigned int) page_mask
& offset
);
3903 for (page
= 0; page
< pageCount
; page
++) {
3904 phys
= pmap_find_phys(kernel_pmap
, ((addr64_t
)offset
) + ptoa_64(page
));
3908 page_list
[page
].phys_addr
= phys
;
3909 page_list
[page
].free_when_done
= 0;
3910 page_list
[page
].absent
= 0;
3911 page_list
[page
].dirty
= 0;
3912 page_list
[page
].precious
= 0;
3913 page_list
[page
].device
= 0;
3914 if (phys
> highestPage
) {
3919 *highest_page
= highestPage
;
3921 return (page
>= pageCount
) ? kIOReturnSuccess
: kIOReturnVMError
;
3925 IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection
)
3927 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
3928 IOReturn error
= kIOReturnSuccess
;
3930 upl_page_info_array_t pageInfo
;
3932 vm_tag_t tag
= VM_KERN_MEMORY_NONE
;
3933 mach_vm_size_t numBytesWired
= 0;
3935 assert(kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
|| kIOMemoryTypeUIO
== type
);
3937 if ((kIODirectionOutIn
& forDirection
) == kIODirectionNone
) {
3938 forDirection
= (IODirection
) (forDirection
| getDirection());
3941 dataP
= getDataP(_memoryEntries
);
3942 upl_control_flags_t uplFlags
; // This Mem Desc's default flags for upl creation
3943 switch (kIODirectionOutIn
& forDirection
) {
3944 case kIODirectionOut
:
3945 // Pages do not need to be marked as dirty on commit
3946 uplFlags
= UPL_COPYOUT_FROM
;
3947 dataP
->fDMAAccess
= kIODMAMapReadAccess
;
3950 case kIODirectionIn
:
3951 dataP
->fDMAAccess
= kIODMAMapWriteAccess
;
3952 uplFlags
= 0; // i.e. ~UPL_COPYOUT_FROM
3956 dataP
->fDMAAccess
= kIODMAMapReadAccess
| kIODMAMapWriteAccess
;
3957 uplFlags
= 0; // i.e. ~UPL_COPYOUT_FROM
3962 if ((kIOMemoryPreparedReadOnly
& _flags
) && !(UPL_COPYOUT_FROM
& uplFlags
)) {
3963 OSReportWithBacktrace("IOMemoryDescriptor 0x%lx prepared read only", VM_KERNEL_ADDRPERM(this));
3964 error
= kIOReturnNotWritable
;
3967 IOTimeStampIntervalConstantFiltered
traceInterval(IODBG_MDESC(IOMDESC_WIRE
), VM_KERNEL_ADDRHIDE(this), forDirection
);
3970 mapper
= dataP
->fMapper
;
3971 dataP
->fMappedBaseValid
= dataP
->fMappedBase
= 0;
3973 uplFlags
|= UPL_SET_IO_WIRE
| UPL_SET_LITE
;
3975 if (VM_KERN_MEMORY_NONE
== tag
) {
3976 tag
= IOMemoryTag(kernel_map
);
3979 if (kIODirectionPrepareToPhys32
& forDirection
) {
3981 uplFlags
|= UPL_NEED_32BIT_ADDR
;
3983 if (dataP
->fDMAMapNumAddressBits
> 32) {
3984 dataP
->fDMAMapNumAddressBits
= 32;
3987 if (kIODirectionPrepareNoFault
& forDirection
) {
3988 uplFlags
|= UPL_REQUEST_NO_FAULT
;
3990 if (kIODirectionPrepareNoZeroFill
& forDirection
) {
3991 uplFlags
|= UPL_NOZEROFILLIO
;
3993 if (kIODirectionPrepareNonCoherent
& forDirection
) {
3994 uplFlags
|= UPL_REQUEST_FORCE_COHERENCY
;
3999 // Note that appendBytes(NULL) zeros the data up to the desired length
4000 // and the length parameter is an unsigned int
4001 size_t uplPageSize
= dataP
->fPageCnt
* sizeof(upl_page_info_t
);
4002 if (uplPageSize
> ((unsigned int)uplPageSize
)) {
4003 error
= kIOReturnNoMemory
;
4004 traceInterval
.setEndArg2(error
);
4007 if (!_memoryEntries
->appendBytes(NULL
, (unsigned int) uplPageSize
)) {
4008 error
= kIOReturnNoMemory
;
4009 traceInterval
.setEndArg2(error
);
4014 // Find the appropriate vm_map for the given task
4016 if ((NULL
!= _memRef
) || ((_task
== kernel_task
&& (kIOMemoryBufferPageable
& _flags
)))) {
4019 curMap
= get_task_map(_task
);
4022 // Iterate over the vector of virtual ranges
4023 Ranges vec
= _ranges
;
4024 unsigned int pageIndex
= 0;
4025 IOByteCount mdOffset
= 0;
4026 ppnum_t highestPage
= 0;
4029 IOMemoryEntry
* memRefEntry
= NULL
;
4031 memRefEntry
= &_memRef
->entries
[0];
4032 byteAlignUPL
= (0 != (MAP_MEM_USE_DATA_ADDR
& _memRef
->prot
));
4034 byteAlignUPL
= true;
4037 for (UInt range
= 0; mdOffset
< _length
; range
++) {
4039 mach_vm_address_t startPage
, startPageOffset
;
4040 mach_vm_size_t numBytes
;
4041 ppnum_t highPage
= 0;
4044 if (range
>= _memRef
->count
) {
4045 panic("memRefEntry");
4047 memRefEntry
= &_memRef
->entries
[range
];
4048 numBytes
= memRefEntry
->size
;
4051 startPageOffset
= 0;
4053 startPageOffset
= (memRefEntry
->start
& PAGE_MASK
);
4056 // Get the startPage address and length of vec[range]
4057 getAddrLenForInd(startPage
, numBytes
, type
, vec
, range
);
4059 startPageOffset
= 0;
4061 startPageOffset
= startPage
& PAGE_MASK
;
4062 startPage
= trunc_page_64(startPage
);
4065 iopl
.fPageOffset
= (typeof(iopl
.fPageOffset
))startPageOffset
;
4066 numBytes
+= startPageOffset
;
4069 iopl
.fMappedPage
= mapBase
+ pageIndex
;
4071 iopl
.fMappedPage
= 0;
4074 // Iterate over the current range, creating UPLs
4076 vm_address_t kernelStart
= (vm_address_t
) startPage
;
4080 } else if (_memRef
) {
4083 assert(_task
== kernel_task
);
4084 theMap
= IOPageableMapForAddress(kernelStart
);
4087 // ioplFlags is an in/out parameter
4088 upl_control_flags_t ioplFlags
= uplFlags
;
4089 dataP
= getDataP(_memoryEntries
);
4090 pageInfo
= getPageList(dataP
);
4091 upl_page_list_ptr_t baseInfo
= &pageInfo
[pageIndex
];
4093 mach_vm_size_t ioplPhysSize
;
4094 upl_size_t ioplSize
;
4095 unsigned int numPageInfo
;
4098 error
= mach_memory_entry_map_size(memRefEntry
->entry
, NULL
/*physical*/, 0, memRefEntry
->size
, &ioplPhysSize
);
4099 DEBUG4K_IOKIT("_memRef %p memRefEntry %p entry %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef
, memRefEntry
, memRefEntry
->entry
, startPage
, numBytes
, ioplPhysSize
);
4101 error
= vm_map_range_physical_size(theMap
, startPage
, numBytes
, &ioplPhysSize
);
4102 DEBUG4K_IOKIT("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef
, theMap
, startPage
, numBytes
, ioplPhysSize
);
4104 if (error
!= KERN_SUCCESS
) {
4106 DEBUG4K_ERROR("_memRef %p memRefEntry %p entry %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef
, memRefEntry
, memRefEntry
->entry
, theMap
, startPage
, numBytes
, error
);
4108 DEBUG4K_ERROR("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef
, theMap
, startPage
, numBytes
, error
);
4110 printf("entry size error %d\n", error
);
4113 ioplPhysSize
= (ioplPhysSize
<= MAX_UPL_SIZE_BYTES
) ? ioplPhysSize
: MAX_UPL_SIZE_BYTES
;
4114 numPageInfo
= atop_32(ioplPhysSize
);
4116 if (numBytes
> ioplPhysSize
) {
4117 ioplSize
= ((typeof(ioplSize
))ioplPhysSize
);
4119 ioplSize
= ((typeof(ioplSize
))numBytes
);
4122 ioplSize
= ((typeof(ioplSize
))ioplPhysSize
);
4126 memory_object_offset_t entryOffset
;
4128 entryOffset
= mdOffset
;
4130 entryOffset
= (entryOffset
- memRefEntry
->offset
);
4132 entryOffset
= (entryOffset
- iopl
.fPageOffset
- memRefEntry
->offset
);
4134 if (ioplSize
> (memRefEntry
->size
- entryOffset
)) {
4135 ioplSize
= ((typeof(ioplSize
))(memRefEntry
->size
- entryOffset
));
4137 error
= memory_object_iopl_request(memRefEntry
->entry
,
4145 } else if ((theMap
== kernel_map
)
4146 && (kernelStart
>= io_kernel_static_start
)
4147 && (kernelStart
< io_kernel_static_end
)) {
4148 error
= io_get_kernel_static_upl(theMap
,
4158 error
= vm_map_create_upl(theMap
,
4160 (upl_size_t
*)&ioplSize
,
4168 if (error
!= KERN_SUCCESS
) {
4169 traceInterval
.setEndArg2(error
);
4170 DEBUG4K_ERROR("UPL create error 0x%x theMap %p (kernel:%d) _memRef %p startPage 0x%llx ioplSize 0x%x\n", error
, theMap
, (theMap
== kernel_map
), _memRef
, startPage
, ioplSize
);
4177 highPage
= upl_get_highest_page(iopl
.fIOPL
);
4179 if (highPage
> highestPage
) {
4180 highestPage
= highPage
;
4183 if (baseInfo
->device
) {
4185 iopl
.fFlags
= kIOPLOnDevice
;
4192 DEBUG4K_UPL("startPage 0x%llx numBytes 0x%llx iopl.fPageOffset 0x%x upl_get_data_offset(%p) 0x%llx\n", startPage
, numBytes
, iopl
.fPageOffset
, iopl
.fIOPL
, upl_get_data_offset(iopl
.fIOPL
));
4193 iopl
.fPageOffset
= (typeof(iopl
.fPageOffset
))upl_get_data_offset(iopl
.fIOPL
);
4195 if (startPage
!= (mach_vm_address_t
)-1) {
4196 // assert(iopl.fPageOffset == (startPage & PAGE_MASK));
4197 startPage
-= iopl
.fPageOffset
;
4199 ioplSize
= ((typeof(ioplSize
))ptoa_64(numPageInfo
));
4200 numBytes
+= iopl
.fPageOffset
;
4203 iopl
.fIOMDOffset
= mdOffset
;
4204 iopl
.fPageInfo
= pageIndex
;
4206 if (!_memoryEntries
->appendBytes(&iopl
, sizeof(iopl
))) {
4207 // Clean up partial created and unsaved iopl
4209 upl_abort(iopl
.fIOPL
, 0);
4210 upl_deallocate(iopl
.fIOPL
);
4212 error
= kIOReturnNoMemory
;
4213 traceInterval
.setEndArg2(error
);
4218 // Check for a multiple iopl's in one virtual range
4219 pageIndex
+= numPageInfo
;
4220 mdOffset
-= iopl
.fPageOffset
;
4221 numBytesWired
+= ioplSize
;
4222 if (ioplSize
< numBytes
) {
4223 numBytes
-= ioplSize
;
4224 if (startPage
!= (mach_vm_address_t
)-1) {
4225 startPage
+= ioplSize
;
4227 mdOffset
+= ioplSize
;
4228 iopl
.fPageOffset
= 0;
4230 iopl
.fMappedPage
= mapBase
+ pageIndex
;
4233 mdOffset
+= numBytes
;
4239 _highestPage
= highestPage
;
4240 DEBUG4K_IOKIT("-> _highestPage 0x%x\n", _highestPage
);
4242 if (UPL_COPYOUT_FROM
& uplFlags
) {
4243 _flags
|= kIOMemoryPreparedReadOnly
;
4245 traceInterval
.setEndCodes(numBytesWired
, error
);
4249 if (!(_flags
& kIOMemoryAutoPrepare
) && (kIOReturnSuccess
== error
)) {
4250 dataP
= getDataP(_memoryEntries
);
4251 if (!dataP
->fWireTracking
.link
.next
) {
4252 IOTrackingAdd(gIOWireTracking
, &dataP
->fWireTracking
, ptoa(_pages
), false, tag
);
4255 #endif /* IOTRACKING */
4261 dataP
= getDataP(_memoryEntries
);
4262 UInt done
= getNumIOPL(_memoryEntries
, dataP
);
4263 ioPLBlock
*ioplList
= getIOPLList(dataP
);
4265 for (UInt ioplIdx
= 0; ioplIdx
< done
; ioplIdx
++) {
4266 if (ioplList
[ioplIdx
].fIOPL
) {
4267 upl_abort(ioplList
[ioplIdx
].fIOPL
, 0);
4268 upl_deallocate(ioplList
[ioplIdx
].fIOPL
);
4271 (void) _memoryEntries
->initWithBytes(dataP
, computeDataSize(0, 0)); // == setLength()
4274 if (error
== KERN_FAILURE
) {
4275 error
= kIOReturnCannotWire
;
4276 } else if (error
== KERN_MEMORY_ERROR
) {
4277 error
= kIOReturnNoResources
;
4284 IOGeneralMemoryDescriptor::initMemoryEntries(size_t size
, IOMapper
* mapper
)
4289 if (size
> UINT_MAX
) {
4292 dataSize
= (unsigned int) size
;
4293 if (!_memoryEntries
) {
4294 _memoryEntries
= OSData::withCapacity(dataSize
);
4295 if (!_memoryEntries
) {
4298 } else if (!_memoryEntries
->initWithCapacity(dataSize
)) {
4302 _memoryEntries
->appendBytes(NULL
, computeDataSize(0, 0));
4303 dataP
= getDataP(_memoryEntries
);
4305 if (mapper
== kIOMapperWaitSystem
) {
4306 IOMapper::checkForSystemMapper();
4307 mapper
= IOMapper::gSystem
;
4309 dataP
->fMapper
= mapper
;
4310 dataP
->fPageCnt
= 0;
4311 dataP
->fMappedBase
= 0;
4312 dataP
->fDMAMapNumAddressBits
= 64;
4313 dataP
->fDMAMapAlignment
= 0;
4314 dataP
->fPreparationID
= kIOPreparationIDUnprepared
;
4315 dataP
->fCompletionError
= false;
4316 dataP
->fMappedBaseValid
= false;
4322 IOMemoryDescriptor::dmaMap(
4324 IOMemoryDescriptor
* memory
,
4325 IODMACommand
* command
,
4326 const IODMAMapSpecification
* mapSpec
,
4329 uint64_t * mapAddress
,
4330 uint64_t * mapLength
)
4333 uint32_t mapOptions
;
4336 mapOptions
|= kIODMAMapReadAccess
;
4337 if (!(kIOMemoryPreparedReadOnly
& _flags
)) {
4338 mapOptions
|= kIODMAMapWriteAccess
;
4341 err
= mapper
->iovmMapMemory(memory
, offset
, length
, mapOptions
,
4342 mapSpec
, command
, NULL
, mapAddress
, mapLength
);
4344 if (kIOReturnSuccess
== err
) {
4345 dmaMapRecord(mapper
, command
, *mapLength
);
4352 IOMemoryDescriptor::dmaMapRecord(
4354 IODMACommand
* command
,
4357 IOTimeStampIntervalConstantFiltered
traceInterval(IODBG_MDESC(IOMDESC_DMA_MAP
), VM_KERNEL_ADDRHIDE(this));
4358 kern_allocation_name_t alloc
;
4361 if ((alloc
= mapper
->fAllocName
) /* && mapper != IOMapper::gSystem */) {
4362 kern_allocation_update_size(mapper
->fAllocName
, mapLength
);
4368 prior
= OSAddAtomic16(1, &_dmaReferences
);
4370 if (alloc
&& (VM_KERN_MEMORY_NONE
!= _kernelTag
)) {
4372 mapLength
= _length
;
4373 kern_allocation_update_subtotal(alloc
, _kernelTag
, mapLength
);
4381 IOMemoryDescriptor::dmaUnmap(
4383 IODMACommand
* command
,
4385 uint64_t mapAddress
,
4388 IOTimeStampIntervalConstantFiltered
traceInterval(IODBG_MDESC(IOMDESC_DMA_UNMAP
), VM_KERNEL_ADDRHIDE(this));
4390 kern_allocation_name_t alloc
;
4391 kern_allocation_name_t mapName
;
4398 if (_dmaReferences
) {
4399 prior
= OSAddAtomic16(-1, &_dmaReferences
);
4401 panic("_dmaReferences underflow");
4406 traceInterval
.setEndArg1(kIOReturnSuccess
);
4407 return kIOReturnSuccess
;
4410 ret
= mapper
->iovmUnmapMemory(this, command
, mapAddress
, mapLength
);
4412 if ((alloc
= mapper
->fAllocName
)) {
4413 kern_allocation_update_size(alloc
, -mapLength
);
4414 if ((1 == prior
) && mapName
&& (VM_KERN_MEMORY_NONE
!= _kernelTag
)) {
4415 mapLength
= _length
;
4416 kern_allocation_update_subtotal(mapName
, _kernelTag
, -mapLength
);
4420 traceInterval
.setEndArg1(ret
);
4425 IOGeneralMemoryDescriptor::dmaMap(
4427 IOMemoryDescriptor
* memory
,
4428 IODMACommand
* command
,
4429 const IODMAMapSpecification
* mapSpec
,
4432 uint64_t * mapAddress
,
4433 uint64_t * mapLength
)
4435 IOReturn err
= kIOReturnSuccess
;
4437 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
4440 if (kIOMemoryHostOnly
& _flags
) {
4441 return kIOReturnSuccess
;
4443 if (kIOMemoryRemote
& _flags
) {
4444 return kIOReturnNotAttached
;
4447 if ((type
== kIOMemoryTypePhysical
) || (type
== kIOMemoryTypePhysical64
)
4448 || offset
|| (length
!= _length
)) {
4449 err
= super::dmaMap(mapper
, memory
, command
, mapSpec
, offset
, length
, mapAddress
, mapLength
);
4450 } else if (_memoryEntries
&& _pages
&& (dataP
= getDataP(_memoryEntries
))) {
4451 const ioPLBlock
* ioplList
= getIOPLList(dataP
);
4452 upl_page_info_t
* pageList
;
4453 uint32_t mapOptions
= 0;
4455 IODMAMapSpecification mapSpec
;
4456 bzero(&mapSpec
, sizeof(mapSpec
));
4457 mapSpec
.numAddressBits
= dataP
->fDMAMapNumAddressBits
;
4458 mapSpec
.alignment
= dataP
->fDMAMapAlignment
;
4460 // For external UPLs the fPageInfo field points directly to
4461 // the upl's upl_page_info_t array.
4462 if (ioplList
->fFlags
& kIOPLExternUPL
) {
4463 pageList
= (upl_page_info_t
*) ioplList
->fPageInfo
;
4464 mapOptions
|= kIODMAMapPagingPath
;
4466 pageList
= getPageList(dataP
);
4469 if ((_length
== ptoa_64(_pages
)) && !(page_mask
& ioplList
->fPageOffset
)) {
4470 mapOptions
|= kIODMAMapPageListFullyOccupied
;
4473 assert(dataP
->fDMAAccess
);
4474 mapOptions
|= dataP
->fDMAAccess
;
4476 // Check for direct device non-paged memory
4477 if (ioplList
->fFlags
& kIOPLOnDevice
) {
4478 mapOptions
|= kIODMAMapPhysicallyContiguous
;
4481 IODMAMapPageList dmaPageList
=
4483 .pageOffset
= (uint32_t)(ioplList
->fPageOffset
& page_mask
),
4484 .pageListCount
= _pages
,
4485 .pageList
= &pageList
[0]
4487 err
= mapper
->iovmMapMemory(memory
, offset
, length
, mapOptions
, &mapSpec
,
4488 command
, &dmaPageList
, mapAddress
, mapLength
);
4490 if (kIOReturnSuccess
== err
) {
4491 dmaMapRecord(mapper
, command
, *mapLength
);
4501 * Prepare the memory for an I/O transfer. This involves paging in
4502 * the memory, if necessary, and wiring it down for the duration of
4503 * the transfer. The complete() method completes the processing of
4504 * the memory after the I/O transfer finishes. This method needn't
4505 * called for non-pageable memory.
4509 IOGeneralMemoryDescriptor::prepare(IODirection forDirection
)
4511 IOReturn error
= kIOReturnSuccess
;
4512 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
4513 IOTimeStampIntervalConstantFiltered
traceInterval(IODBG_MDESC(IOMDESC_PREPARE
), VM_KERNEL_ADDRHIDE(this), forDirection
);
4515 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
)) {
4516 traceInterval
.setEndArg1(kIOReturnSuccess
);
4517 return kIOReturnSuccess
;
4520 assert(!(kIOMemoryRemote
& _flags
));
4521 if (kIOMemoryRemote
& _flags
) {
4522 traceInterval
.setEndArg1(kIOReturnNotAttached
);
4523 return kIOReturnNotAttached
;
4527 IOLockLock(_prepareLock
);
4530 if (kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
|| kIOMemoryTypeUIO
== type
) {
4531 if ((forDirection
& kIODirectionPrepareAvoidThrottling
) && NEED_TO_HARD_THROTTLE_THIS_TASK()) {
4532 error
= kIOReturnNotReady
;
4535 error
= wireVirtual(forDirection
);
4538 if (kIOReturnSuccess
== error
) {
4539 if (1 == ++_wireCount
) {
4540 if (kIOMemoryClearEncrypt
& _flags
) {
4541 performOperation(kIOMemoryClearEncrypted
, 0, _length
);
4544 ktraceEmitPhysicalSegments();
4551 IOLockUnlock(_prepareLock
);
4553 traceInterval
.setEndArg1(error
);
4561 * Complete processing of the memory after an I/O transfer finishes.
4562 * This method should not be called unless a prepare was previously
4563 * issued; the prepare() and complete() must occur in pairs, before
4564 * before and after an I/O transfer involving pageable memory.
4568 IOGeneralMemoryDescriptor::complete(IODirection forDirection
)
4570 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
4572 IOTimeStampIntervalConstantFiltered
traceInterval(IODBG_MDESC(IOMDESC_COMPLETE
), VM_KERNEL_ADDRHIDE(this), forDirection
);
4574 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
)) {
4575 traceInterval
.setEndArg1(kIOReturnSuccess
);
4576 return kIOReturnSuccess
;
4579 assert(!(kIOMemoryRemote
& _flags
));
4580 if (kIOMemoryRemote
& _flags
) {
4581 traceInterval
.setEndArg1(kIOReturnNotAttached
);
4582 return kIOReturnNotAttached
;
4586 IOLockLock(_prepareLock
);
4593 dataP
= getDataP(_memoryEntries
);
4598 if (kIODirectionCompleteWithError
& forDirection
) {
4599 dataP
->fCompletionError
= true;
4602 if ((kIOMemoryClearEncrypt
& _flags
) && (1 == _wireCount
)) {
4603 performOperation(kIOMemorySetEncrypted
, 0, _length
);
4607 if (!_wireCount
|| (kIODirectionCompleteWithDataValid
& forDirection
)) {
4608 ioPLBlock
*ioplList
= getIOPLList(dataP
);
4609 UInt ind
, count
= getNumIOPL(_memoryEntries
, dataP
);
4612 // kIODirectionCompleteWithDataValid & forDirection
4613 if (kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
|| kIOMemoryTypeUIO
== type
) {
4615 tag
= (typeof(tag
))getVMTag(kernel_map
);
4616 for (ind
= 0; ind
< count
; ind
++) {
4617 if (ioplList
[ind
].fIOPL
) {
4618 iopl_valid_data(ioplList
[ind
].fIOPL
, tag
);
4623 if (_dmaReferences
) {
4624 panic("complete() while dma active");
4627 if (dataP
->fMappedBaseValid
) {
4628 dmaUnmap(dataP
->fMapper
, NULL
, 0, dataP
->fMappedBase
, dataP
->fMappedLength
);
4629 dataP
->fMappedBaseValid
= dataP
->fMappedBase
= 0;
4632 if (dataP
->fWireTracking
.link
.next
) {
4633 IOTrackingRemove(gIOWireTracking
, &dataP
->fWireTracking
, ptoa(_pages
));
4635 #endif /* IOTRACKING */
4636 // Only complete iopls that we created which are for TypeVirtual
4637 if (kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
|| kIOMemoryTypeUIO
== type
) {
4638 for (ind
= 0; ind
< count
; ind
++) {
4639 if (ioplList
[ind
].fIOPL
) {
4640 if (dataP
->fCompletionError
) {
4641 upl_abort(ioplList
[ind
].fIOPL
, 0 /*!UPL_ABORT_DUMP_PAGES*/);
4643 upl_commit(ioplList
[ind
].fIOPL
, NULL
, 0);
4645 upl_deallocate(ioplList
[ind
].fIOPL
);
4648 } else if (kIOMemoryTypeUPL
== type
) {
4649 upl_set_referenced(ioplList
[0].fIOPL
, false);
4652 (void) _memoryEntries
->initWithBytes(dataP
, computeDataSize(0, 0)); // == setLength()
4654 dataP
->fPreparationID
= kIOPreparationIDUnprepared
;
4655 _flags
&= ~kIOMemoryPreparedReadOnly
;
4657 if (kdebug_debugid_explicitly_enabled(IODBG_IOMDPA(IOMDPA_UNMAPPED
))) {
4658 IOTimeStampConstantFiltered(IODBG_IOMDPA(IOMDPA_UNMAPPED
), getDescriptorID(), VM_KERNEL_ADDRHIDE(this));
4665 IOLockUnlock(_prepareLock
);
4668 traceInterval
.setEndArg1(kIOReturnSuccess
);
4669 return kIOReturnSuccess
;
4673 IOGeneralMemoryDescriptor::doMap(
4674 vm_map_t __addressMap
,
4675 IOVirtualAddress
* __address
,
4676 IOOptionBits options
,
4677 IOByteCount __offset
,
4678 IOByteCount __length
)
4680 IOTimeStampIntervalConstantFiltered
traceInterval(IODBG_MDESC(IOMDESC_MAP
), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(*__address
), __length
);
4681 traceInterval
.setEndArg1(kIOReturnSuccess
);
4683 if (!(kIOMap64Bit
& options
)) {
4684 panic("IOGeneralMemoryDescriptor::doMap !64bit");
4686 #endif /* !__LP64__ */
4690 IOMemoryMap
* mapping
= (IOMemoryMap
*) *__address
;
4691 mach_vm_size_t offset
= mapping
->fOffset
+ __offset
;
4692 mach_vm_size_t length
= mapping
->fLength
;
4694 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
4695 Ranges vec
= _ranges
;
4697 mach_vm_address_t range0Addr
= 0;
4698 mach_vm_size_t range0Len
= 0;
4700 if ((offset
>= _length
) || ((offset
+ length
) > _length
)) {
4701 traceInterval
.setEndArg1(kIOReturnBadArgument
);
4702 DEBUG4K_ERROR("map %p offset 0x%llx length 0x%llx _length 0x%llx kIOReturnBadArgument\n", __addressMap
, offset
, length
, (uint64_t)_length
);
4703 // assert(offset == 0 && _length == 0 && length == 0);
4704 return kIOReturnBadArgument
;
4707 assert(!(kIOMemoryRemote
& _flags
));
4708 if (kIOMemoryRemote
& _flags
) {
4713 getAddrLenForInd(range0Addr
, range0Len
, type
, vec
, 0);
4716 // mapping source == dest? (could be much better)
4718 && (mapping
->fAddressTask
== _task
)
4719 && (mapping
->fAddressMap
== get_task_map(_task
))
4720 && (options
& kIOMapAnywhere
)
4721 && (!(kIOMapUnique
& options
))
4722 && (1 == _rangesCount
)
4725 && (length
<= range0Len
)) {
4726 mapping
->fAddress
= range0Addr
;
4727 mapping
->fOptions
|= kIOMapStatic
;
4729 return kIOReturnSuccess
;
4733 IOOptionBits createOptions
= 0;
4734 if (!(kIOMapReadOnly
& options
)) {
4735 createOptions
|= kIOMemoryReferenceWrite
;
4736 #if DEVELOPMENT || DEBUG
4737 if ((kIODirectionOut
== (kIODirectionOutIn
& _flags
))
4738 && (!reserved
|| (reserved
->creator
!= mapping
->fAddressTask
))) {
4739 OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction");
4743 err
= memoryReferenceCreate(createOptions
, &_memRef
);
4744 if (kIOReturnSuccess
!= err
) {
4745 traceInterval
.setEndArg1(err
);
4746 DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap
, err
);
4751 memory_object_t pager
;
4752 pager
= (memory_object_t
) (reserved
? reserved
->dp
.devicePager
: NULL
);
4754 // <upl_transpose //
4755 if ((kIOMapReference
| kIOMapUnique
) == ((kIOMapReference
| kIOMapUnique
) & options
)) {
4759 upl_control_flags_t flags
;
4760 unsigned int lock_count
;
4762 if (!_memRef
|| (1 != _memRef
->count
)) {
4763 err
= kIOReturnNotReadable
;
4764 DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap
, err
);
4768 size
= (upl_size_t
) round_page(mapping
->fLength
);
4769 flags
= UPL_COPYOUT_FROM
| UPL_SET_INTERNAL
4770 | UPL_SET_LITE
| UPL_SET_IO_WIRE
| UPL_BLOCK_ACCESS
;
4772 if (KERN_SUCCESS
!= memory_object_iopl_request(_memRef
->entries
[0].entry
, 0, &size
, &redirUPL2
,
4774 &flags
, (vm_tag_t
) getVMTag(kernel_map
))) {
4778 for (lock_count
= 0;
4779 IORecursiveLockHaveLock(gIOMemoryLock
);
4783 err
= upl_transpose(redirUPL2
, mapping
->fRedirUPL
);
4790 if (kIOReturnSuccess
!= err
) {
4791 IOLog("upl_transpose(%x)\n", err
);
4792 err
= kIOReturnSuccess
;
4796 upl_commit(redirUPL2
, NULL
, 0);
4797 upl_deallocate(redirUPL2
);
4801 // swap the memEntries since they now refer to different vm_objects
4802 IOMemoryReference
* me
= _memRef
;
4803 _memRef
= mapping
->fMemory
->_memRef
;
4804 mapping
->fMemory
->_memRef
= me
;
4807 err
= populateDevicePager( pager
, mapping
->fAddressMap
, mapping
->fAddress
, offset
, length
, options
);
4811 // upl_transpose> //
4813 err
= memoryReferenceMap(_memRef
, mapping
->fAddressMap
, offset
, length
, options
, &mapping
->fAddress
);
4815 DEBUG4K_ERROR("map %p err 0x%x\n", mapping
->fAddressMap
, err
);
4818 if ((err
== KERN_SUCCESS
) && ((kIOTracking
& gIOKitDebug
) || _task
)) {
4819 // only dram maps in the default on developement case
4820 IOTrackingAddUser(gIOMapTracking
, &mapping
->fTracking
, mapping
->fLength
);
4822 #endif /* IOTRACKING */
4823 if ((err
== KERN_SUCCESS
) && pager
) {
4824 err
= populateDevicePager(pager
, mapping
->fAddressMap
, mapping
->fAddress
, offset
, length
, options
);
4826 if (err
!= KERN_SUCCESS
) {
4827 doUnmap(mapping
->fAddressMap
, (IOVirtualAddress
) mapping
, 0);
4828 } else if (kIOMapDefaultCache
== (options
& kIOMapCacheMask
)) {
4829 mapping
->fOptions
|= ((_flags
& kIOMemoryBufferCacheMask
) >> kIOMemoryBufferCacheShift
);
4834 traceInterval
.setEndArg1(err
);
4836 DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap
, err
);
4843 IOMemoryMapTracking(IOTrackingUser
* tracking
, task_t
* task
,
4844 mach_vm_address_t
* address
, mach_vm_size_t
* size
)
4846 #define iomap_offsetof(type, field) ((size_t)(&((type *)NULL)->field))
4848 IOMemoryMap
* map
= (typeof(map
))(((uintptr_t) tracking
) - iomap_offsetof(IOMemoryMap
, fTracking
));
4850 if (!map
->fAddressMap
|| (map
->fAddressMap
!= get_task_map(map
->fAddressTask
))) {
4851 return kIOReturnNotReady
;
4854 *task
= map
->fAddressTask
;
4855 *address
= map
->fAddress
;
4856 *size
= map
->fLength
;
4858 return kIOReturnSuccess
;
4860 #endif /* IOTRACKING */
4863 IOGeneralMemoryDescriptor::doUnmap(
4864 vm_map_t addressMap
,
4865 IOVirtualAddress __address
,
4866 IOByteCount __length
)
4868 IOTimeStampIntervalConstantFiltered
traceInterval(IODBG_MDESC(IOMDESC_UNMAP
), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(__address
), __length
);
4870 ret
= super::doUnmap(addressMap
, __address
, __length
);
4871 traceInterval
.setEndArg1(ret
);
4875 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4878 #define super OSObject
4880 OSDefineMetaClassAndStructorsWithZone( IOMemoryMap
, OSObject
, ZC_NONE
)
4882 OSMetaClassDefineReservedUnused(IOMemoryMap
, 0);
4883 OSMetaClassDefineReservedUnused(IOMemoryMap
, 1);
4884 OSMetaClassDefineReservedUnused(IOMemoryMap
, 2);
4885 OSMetaClassDefineReservedUnused(IOMemoryMap
, 3);
4886 OSMetaClassDefineReservedUnused(IOMemoryMap
, 4);
4887 OSMetaClassDefineReservedUnused(IOMemoryMap
, 5);
4888 OSMetaClassDefineReservedUnused(IOMemoryMap
, 6);
4889 OSMetaClassDefineReservedUnused(IOMemoryMap
, 7);
4891 /* ex-inline function implementation */
4893 IOMemoryMap::getPhysicalAddress()
4895 return getPhysicalSegment( 0, NULL
);
4898 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4903 mach_vm_address_t toAddress
,
4904 IOOptionBits _options
,
4905 mach_vm_size_t _offset
,
4906 mach_vm_size_t _length
)
4912 if (!super::init()) {
4916 fAddressMap
= get_task_map(intoTask
);
4920 vm_map_reference(fAddressMap
);
4922 fAddressTask
= intoTask
;
4923 fOptions
= _options
;
4926 fAddress
= toAddress
;
4932 IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor
* _memory
, mach_vm_size_t _offset
)
4939 if ((_offset
+ fLength
) > _memory
->getLength()) {
4946 OSSharedPtr
<IOMemoryDescriptor
> tempval(_memory
, OSRetain
);
4948 if (fMemory
!= _memory
) {
4949 fMemory
->removeMapping(this);
4952 fMemory
= os::move(tempval
);
4958 IOMemoryDescriptor::doMap(
4959 vm_map_t __addressMap
,
4960 IOVirtualAddress
* __address
,
4961 IOOptionBits options
,
4962 IOByteCount __offset
,
4963 IOByteCount __length
)
4965 return kIOReturnUnsupported
;
4969 IOMemoryDescriptor::handleFault(
4971 mach_vm_size_t sourceOffset
,
4972 mach_vm_size_t length
)
4974 if (kIOMemoryRedirected
& _flags
) {
4976 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset
);
4980 } while (kIOMemoryRedirected
& _flags
);
4982 return kIOReturnSuccess
;
4986 IOMemoryDescriptor::populateDevicePager(
4988 vm_map_t addressMap
,
4989 mach_vm_address_t address
,
4990 mach_vm_size_t sourceOffset
,
4991 mach_vm_size_t length
,
4992 IOOptionBits options
)
4994 IOReturn err
= kIOReturnSuccess
;
4995 memory_object_t pager
= (memory_object_t
) _pager
;
4996 mach_vm_size_t size
;
4997 mach_vm_size_t bytes
;
4998 mach_vm_size_t page
;
4999 mach_vm_size_t pageOffset
;
5000 mach_vm_size_t pagerOffset
;
5001 IOPhysicalLength segLen
, chunk
;
5005 type
= _flags
& kIOMemoryTypeMask
;
5007 if (reserved
->dp
.pagerContig
) {
5012 physAddr
= getPhysicalSegment( sourceOffset
, &segLen
, kIOMemoryMapperNone
);
5014 pageOffset
= physAddr
- trunc_page_64( physAddr
);
5015 pagerOffset
= sourceOffset
;
5017 size
= length
+ pageOffset
;
5018 physAddr
-= pageOffset
;
5020 segLen
+= pageOffset
;
5023 // in the middle of the loop only map whole pages
5024 if (segLen
>= bytes
) {
5026 } else if (segLen
!= trunc_page_64(segLen
)) {
5027 err
= kIOReturnVMError
;
5029 if (physAddr
!= trunc_page_64(physAddr
)) {
5030 err
= kIOReturnBadArgument
;
5033 if (kIOReturnSuccess
!= err
) {
5037 #if DEBUG || DEVELOPMENT
5038 if ((kIOMemoryTypeUPL
!= type
)
5039 && pmap_has_managed_page((ppnum_t
) atop_64(physAddr
), (ppnum_t
) atop_64(physAddr
+ segLen
- 1))) {
5040 OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx", physAddr
, segLen
);
5042 #endif /* DEBUG || DEVELOPMENT */
5044 chunk
= (reserved
->dp
.pagerContig
? round_page(segLen
) : page_size
);
5046 (page
< segLen
) && (KERN_SUCCESS
== err
);
5048 err
= device_pager_populate_object(pager
, pagerOffset
,
5049 (ppnum_t
)(atop_64(physAddr
+ page
)), chunk
);
5050 pagerOffset
+= chunk
;
5053 assert(KERN_SUCCESS
== err
);
5058 // This call to vm_fault causes an early pmap level resolution
5059 // of the mappings created above for kernel mappings, since
5060 // faulting in later can't take place from interrupt level.
5061 if ((addressMap
== kernel_map
) && !(kIOMemoryRedirected
& _flags
)) {
5062 err
= vm_fault(addressMap
,
5063 (vm_map_offset_t
)trunc_page_64(address
),
5064 options
& kIOMapReadOnly
? VM_PROT_READ
: VM_PROT_READ
| VM_PROT_WRITE
,
5065 FALSE
, VM_KERN_MEMORY_NONE
,
5067 (vm_map_offset_t
)0);
5069 if (KERN_SUCCESS
!= err
) {
5074 sourceOffset
+= segLen
- pageOffset
;
5078 }while (bytes
&& (physAddr
= getPhysicalSegment( sourceOffset
, &segLen
, kIOMemoryMapperNone
)));
5081 err
= kIOReturnBadArgument
;
5088 IOMemoryDescriptor::doUnmap(
5089 vm_map_t addressMap
,
5090 IOVirtualAddress __address
,
5091 IOByteCount __length
)
5094 IOMemoryMap
* mapping
;
5095 mach_vm_address_t address
;
5096 mach_vm_size_t length
;
5102 mapping
= (IOMemoryMap
*) __address
;
5103 addressMap
= mapping
->fAddressMap
;
5104 address
= mapping
->fAddress
;
5105 length
= mapping
->fLength
;
5107 if (kIOMapOverwrite
& mapping
->fOptions
) {
5110 if ((addressMap
== kernel_map
) && (kIOMemoryBufferPageable
& _flags
)) {
5111 addressMap
= IOPageableMapForAddress( address
);
5114 if (kIOLogMapping
& gIOKitDebug
) {
5115 IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
5116 addressMap
, address
, length
);
5119 err
= mach_vm_deallocate( addressMap
, address
, length
);
5120 if (vm_map_page_mask(addressMap
) < PAGE_MASK
) {
5121 DEBUG4K_IOKIT("map %p address 0x%llx length 0x%llx err 0x%x\n", addressMap
, address
, length
, err
);
5126 IOTrackingRemoveUser(gIOMapTracking
, &mapping
->fTracking
);
5127 #endif /* IOTRACKING */
5133 IOMemoryDescriptor::redirect( task_t safeTask
, bool doRedirect
)
5135 IOReturn err
= kIOReturnSuccess
;
5136 IOMemoryMap
* mapping
= NULL
;
5137 OSSharedPtr
<OSIterator
> iter
;
5142 _flags
|= kIOMemoryRedirected
;
5144 _flags
&= ~kIOMemoryRedirected
;
5148 if ((iter
= OSCollectionIterator::withCollection( _mappings
.get()))) {
5149 memory_object_t pager
;
5152 pager
= (memory_object_t
) reserved
->dp
.devicePager
;
5154 pager
= MACH_PORT_NULL
;
5157 while ((mapping
= (IOMemoryMap
*) iter
->getNextObject())) {
5158 mapping
->redirect( safeTask
, doRedirect
);
5159 if (!doRedirect
&& !safeTask
&& pager
&& (kernel_map
== mapping
->fAddressMap
)) {
5160 err
= populateDevicePager(pager
, mapping
->fAddressMap
, mapping
->fAddress
, mapping
->fOffset
, mapping
->fLength
, kIOMapDefaultCache
);
5175 // temporary binary compatibility
5176 IOSubMemoryDescriptor
* subMem
;
5177 if ((subMem
= OSDynamicCast( IOSubMemoryDescriptor
, this))) {
5178 err
= subMem
->redirect( safeTask
, doRedirect
);
5180 err
= kIOReturnSuccess
;
5182 #endif /* !__LP64__ */
5188 IOMemoryMap::redirect( task_t safeTask
, bool doRedirect
)
5190 IOReturn err
= kIOReturnSuccess
;
5193 // err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
5205 if ((!safeTask
|| (get_task_map(safeTask
) != fAddressMap
))
5206 && (0 == (fOptions
& kIOMapStatic
))) {
5207 IOUnmapPages( fAddressMap
, fAddress
, fLength
);
5208 err
= kIOReturnSuccess
;
5210 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect
, this, fAddress
, fLength
, fAddressMap
);
5212 } else if (kIOMapWriteCombineCache
== (fOptions
& kIOMapCacheMask
)) {
5213 IOOptionBits newMode
;
5214 newMode
= (fOptions
& ~kIOMapCacheMask
) | (doRedirect
? kIOMapInhibitCache
: kIOMapWriteCombineCache
);
5215 IOProtectCacheMode(fAddressMap
, fAddress
, fLength
, newMode
);
5221 if ((((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
5222 || ((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
))
5224 && (doRedirect
!= (0 != (fMemory
->_flags
& kIOMemoryRedirected
)))) {
5225 fMemory
->redirect(safeTask
, doRedirect
);
5232 IOMemoryMap::unmap( void )
5238 if (fAddress
&& fAddressMap
&& (NULL
== fSuperMap
) && fMemory
5239 && (0 == (kIOMapStatic
& fOptions
))) {
5240 err
= fMemory
->doUnmap(fAddressMap
, (IOVirtualAddress
) this, 0);
5242 err
= kIOReturnSuccess
;
5246 vm_map_deallocate(fAddressMap
);
5258 IOMemoryMap::taskDied( void )
5261 if (fUserClientUnmap
) {
5266 IOTrackingRemoveUser(gIOMapTracking
, &fTracking
);
5268 #endif /* IOTRACKING */
5271 vm_map_deallocate(fAddressMap
);
5274 fAddressTask
= NULL
;
5280 IOMemoryMap::userClientUnmap( void )
5282 fUserClientUnmap
= true;
5283 return kIOReturnSuccess
;
5286 // Overload the release mechanism. All mappings must be a member
5287 // of a memory descriptors _mappings set. This means that we
5288 // always have 2 references on a mapping. When either of these mappings
5289 // are released we need to free ourselves.
5291 IOMemoryMap::taggedRelease(const void *tag
) const
5294 super::taggedRelease(tag
, 2);
5305 fMemory
->removeMapping(this);
5315 upl_commit(fRedirUPL
, NULL
, 0);
5316 upl_deallocate(fRedirUPL
);
5323 IOMemoryMap::getLength()
5329 IOMemoryMap::getVirtualAddress()
5333 fSuperMap
->getVirtualAddress();
5334 } else if (fAddressMap
5335 && vm_map_is_64bit(fAddressMap
)
5336 && (sizeof(IOVirtualAddress
) < 8)) {
5337 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress
);
5339 #endif /* !__LP64__ */
5346 IOMemoryMap::getAddress()
5352 IOMemoryMap::getSize()
5356 #endif /* !__LP64__ */
5360 IOMemoryMap::getAddressTask()
5363 return fSuperMap
->getAddressTask();
5365 return fAddressTask
;
5370 IOMemoryMap::getMapOptions()
5375 IOMemoryDescriptor
*
5376 IOMemoryMap::getMemoryDescriptor()
5378 return fMemory
.get();
5382 IOMemoryMap::copyCompatible(
5383 IOMemoryMap
* newMapping
)
5385 task_t task
= newMapping
->getAddressTask();
5386 mach_vm_address_t toAddress
= newMapping
->fAddress
;
5387 IOOptionBits _options
= newMapping
->fOptions
;
5388 mach_vm_size_t _offset
= newMapping
->fOffset
;
5389 mach_vm_size_t _length
= newMapping
->fLength
;
5391 if ((!task
) || (!fAddressMap
) || (fAddressMap
!= get_task_map(task
))) {
5394 if ((fOptions
^ _options
) & kIOMapReadOnly
) {
5397 if ((kIOMapDefaultCache
!= (_options
& kIOMapCacheMask
))
5398 && ((fOptions
^ _options
) & kIOMapCacheMask
)) {
5402 if ((0 == (_options
& kIOMapAnywhere
)) && (fAddress
!= toAddress
)) {
5406 if (_offset
< fOffset
) {
5412 if ((_offset
+ _length
) > fLength
) {
5416 if ((fLength
== _length
) && (!_offset
)) {
5420 newMapping
->fSuperMap
.reset(this, OSRetain
);
5421 newMapping
->fOffset
= fOffset
+ _offset
;
5422 newMapping
->fAddress
= fAddress
+ _offset
;
5429 IOMemoryMap::wireRange(
5431 mach_vm_size_t offset
,
5432 mach_vm_size_t length
)
5435 mach_vm_address_t start
= trunc_page_64(fAddress
+ offset
);
5436 mach_vm_address_t end
= round_page_64(fAddress
+ offset
+ length
);
5439 prot
= (kIODirectionOutIn
& options
);
5441 kr
= vm_map_wire_kernel(fAddressMap
, start
, end
, prot
, (vm_tag_t
) fMemory
->getVMTag(kernel_map
), FALSE
);
5443 kr
= vm_map_unwire(fAddressMap
, start
, end
, FALSE
);
5452 IOMemoryMap::getPhysicalSegment( IOByteCount _offset
, IOPhysicalLength
* _length
, IOOptionBits _options
)
5453 #else /* !__LP64__ */
5454 IOMemoryMap::getPhysicalSegment( IOByteCount _offset
, IOPhysicalLength
* _length
)
5455 #endif /* !__LP64__ */
5457 IOPhysicalAddress address
;
5461 address
= fMemory
->getPhysicalSegment( fOffset
+ _offset
, _length
, _options
);
5462 #else /* !__LP64__ */
5463 address
= fMemory
->getPhysicalSegment( fOffset
+ _offset
, _length
);
5464 #endif /* !__LP64__ */
5470 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5473 #define super OSObject
5475 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5478 IOMemoryDescriptor::initialize( void )
5480 if (NULL
== gIOMemoryLock
) {
5481 gIOMemoryLock
= IORecursiveLockAlloc();
5484 gIOLastPage
= IOGetLastPageNumber();
5488 IOMemoryDescriptor::free( void )
5495 cleanKernelReserved(reserved
);
5496 IODelete(reserved
, IOMemoryDescriptorReserved
, 1);
5502 OSSharedPtr
<IOMemoryMap
>
5503 IOMemoryDescriptor::setMapping(
5505 IOVirtualAddress mapAddress
,
5506 IOOptionBits options
)
5508 return createMappingInTask( intoTask
, mapAddress
,
5509 options
| kIOMapStatic
,
5513 OSSharedPtr
<IOMemoryMap
>
5514 IOMemoryDescriptor::map(
5515 IOOptionBits options
)
5517 return createMappingInTask( kernel_task
, 0,
5518 options
| kIOMapAnywhere
,
5523 OSSharedPtr
<IOMemoryMap
>
5524 IOMemoryDescriptor::map(
5526 IOVirtualAddress atAddress
,
5527 IOOptionBits options
,
5529 IOByteCount length
)
5531 if ((!(kIOMapAnywhere
& options
)) && vm_map_is_64bit(get_task_map(intoTask
))) {
5532 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
5536 return createMappingInTask(intoTask
, atAddress
,
5537 options
, offset
, length
);
5539 #endif /* !__LP64__ */
5541 OSSharedPtr
<IOMemoryMap
>
5542 IOMemoryDescriptor::createMappingInTask(
5544 mach_vm_address_t atAddress
,
5545 IOOptionBits options
,
5546 mach_vm_size_t offset
,
5547 mach_vm_size_t length
)
5549 IOMemoryMap
* result
;
5550 IOMemoryMap
* mapping
;
5553 length
= getLength();
5556 mapping
= new IOMemoryMap
;
5559 && !mapping
->init( intoTask
, atAddress
,
5560 options
, offset
, length
)) {
5566 result
= makeMapping(this, intoTask
, (IOVirtualAddress
) mapping
, options
| kIOMap64Bit
, 0, 0);
5573 IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
5574 this, atAddress
, (uint32_t) options
, offset
, length
);
5578 // already retained through makeMapping
5579 OSSharedPtr
<IOMemoryMap
> retval(result
, OSNoRetain
);
5584 #ifndef __LP64__ // there is only a 64 bit version for LP64
5586 IOMemoryMap::redirect(IOMemoryDescriptor
* newBackingMemory
,
5587 IOOptionBits options
,
5590 return redirect(newBackingMemory
, options
, (mach_vm_size_t
)offset
);
5595 IOMemoryMap::redirect(IOMemoryDescriptor
* newBackingMemory
,
5596 IOOptionBits options
,
5597 mach_vm_size_t offset
)
5599 IOReturn err
= kIOReturnSuccess
;
5600 OSSharedPtr
<IOMemoryDescriptor
> physMem
;
5604 if (fAddress
&& fAddressMap
) {
5606 if (((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
5607 || ((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
)) {
5611 if (!fRedirUPL
&& fMemory
->_memRef
&& (1 == fMemory
->_memRef
->count
)) {
5612 upl_size_t size
= (typeof(size
))round_page(fLength
);
5613 upl_control_flags_t flags
= UPL_COPYOUT_FROM
| UPL_SET_INTERNAL
5614 | UPL_SET_LITE
| UPL_SET_IO_WIRE
| UPL_BLOCK_ACCESS
;
5615 if (KERN_SUCCESS
!= memory_object_iopl_request(fMemory
->_memRef
->entries
[0].entry
, 0, &size
, &fRedirUPL
,
5617 &flags
, (vm_tag_t
) fMemory
->getVMTag(kernel_map
))) {
5622 IOUnmapPages( fAddressMap
, fAddress
, fLength
);
5624 physMem
->redirect(NULL
, true);
5629 if (newBackingMemory
) {
5630 if (newBackingMemory
!= fMemory
) {
5632 if (this != newBackingMemory
->makeMapping(newBackingMemory
, fAddressTask
, (IOVirtualAddress
) this,
5633 options
| kIOMapUnique
| kIOMapReference
| kIOMap64Bit
,
5635 err
= kIOReturnError
;
5639 upl_commit(fRedirUPL
, NULL
, 0);
5640 upl_deallocate(fRedirUPL
);
5643 if ((false) && physMem
) {
5644 physMem
->redirect(NULL
, false);
5656 IOMemoryDescriptor::makeMapping(
5657 IOMemoryDescriptor
* owner
,
5659 IOVirtualAddress __address
,
5660 IOOptionBits options
,
5661 IOByteCount __offset
,
5662 IOByteCount __length
)
5665 if (!(kIOMap64Bit
& options
)) {
5666 panic("IOMemoryDescriptor::makeMapping !64bit");
5668 #endif /* !__LP64__ */
5670 OSSharedPtr
<IOMemoryDescriptor
> mapDesc
;
5671 __block IOMemoryMap
* result
= NULL
;
5673 IOMemoryMap
* mapping
= (IOMemoryMap
*) __address
;
5674 mach_vm_size_t offset
= mapping
->fOffset
+ __offset
;
5675 mach_vm_size_t length
= mapping
->fLength
;
5677 mapping
->fOffset
= offset
;
5682 if (kIOMapStatic
& options
) {
5684 addMapping(mapping
);
5685 mapping
->setMemoryDescriptor(this, 0);
5689 if (kIOMapUnique
& options
) {
5691 IOByteCount physLen
;
5693 // if (owner != this) continue;
5695 if (((_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
5696 || ((_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
)) {
5697 phys
= getPhysicalSegment(offset
, &physLen
, kIOMemoryMapperNone
);
5698 if (!phys
|| (physLen
< length
)) {
5702 mapDesc
= IOMemoryDescriptor::withAddressRange(
5703 phys
, length
, getDirection() | kIOMemoryMapperNone
, NULL
);
5708 mapping
->fOffset
= offset
;
5711 // look for a compatible existing mapping
5713 _mappings
->iterateObjects(^(OSObject
* object
)
5715 IOMemoryMap
* lookMapping
= (IOMemoryMap
*) object
;
5716 if ((result
= lookMapping
->copyCompatible(mapping
))) {
5718 result
->setMemoryDescriptor(this, offset
);
5724 if (result
|| (options
& kIOMapReference
)) {
5725 if (result
!= mapping
) {
5734 mapDesc
.reset(this, OSRetain
);
5737 kr
= mapDesc
->doMap( NULL
, (IOVirtualAddress
*) &mapping
, options
, 0, 0 );
5738 if (kIOReturnSuccess
== kr
) {
5740 mapDesc
->addMapping(result
);
5741 result
->setMemoryDescriptor(mapDesc
.get(), offset
);
5754 IOMemoryDescriptor::addMapping(
5755 IOMemoryMap
* mapping
)
5758 if (NULL
== _mappings
) {
5759 _mappings
= OSSet::withCapacity(1);
5762 _mappings
->setObject( mapping
);
5768 IOMemoryDescriptor::removeMapping(
5769 IOMemoryMap
* mapping
)
5772 _mappings
->removeObject( mapping
);
5777 // obsolete initializers
5778 // - initWithOptions is the designated initializer
5780 IOMemoryDescriptor::initWithAddress(void * address
,
5782 IODirection direction
)
5788 IOMemoryDescriptor::initWithAddress(IOVirtualAddress address
,
5790 IODirection direction
,
5797 IOMemoryDescriptor::initWithPhysicalAddress(
5798 IOPhysicalAddress address
,
5800 IODirection direction
)
5806 IOMemoryDescriptor::initWithRanges(
5807 IOVirtualRange
* ranges
,
5809 IODirection direction
,
5817 IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange
* ranges
,
5819 IODirection direction
,
5826 IOMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
5827 IOByteCount
* lengthOfSegment
)
5831 #endif /* !__LP64__ */
5833 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5836 IOGeneralMemoryDescriptor::serialize(OSSerialize
* s
) const
5838 OSSharedPtr
<OSSymbol
const> keys
[2] = {NULL
};
5839 OSSharedPtr
<OSObject
> values
[2] = {NULL
};
5840 OSSharedPtr
<OSArray
> array
;
5842 vm_size_t vcopy_size
;
5845 user_addr_t address
;
5849 unsigned int index
, nRanges
;
5850 bool result
= false;
5852 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
5858 array
= OSArray::withCapacity(4);
5863 nRanges
= _rangesCount
;
5864 if (os_mul_overflow(sizeof(SerData
), nRanges
, &vcopy_size
)) {
5868 vcopy
= (SerData
*) IOMalloc(vcopy_size
);
5869 if (vcopy
== NULL
) {
5874 keys
[0] = OSSymbol::withCString("address");
5875 keys
[1] = OSSymbol::withCString("length");
5877 // Copy the volatile data so we don't have to allocate memory
5878 // while the lock is held.
5880 if (nRanges
== _rangesCount
) {
5881 Ranges vec
= _ranges
;
5882 for (index
= 0; index
< nRanges
; index
++) {
5883 mach_vm_address_t addr
; mach_vm_size_t len
;
5884 getAddrLenForInd(addr
, len
, type
, vec
, index
);
5885 vcopy
[index
].address
= addr
;
5886 vcopy
[index
].length
= len
;
5889 // The descriptor changed out from under us. Give up.
5896 for (index
= 0; index
< nRanges
; index
++) {
5897 user_addr_t addr
= vcopy
[index
].address
;
5898 IOByteCount len
= (IOByteCount
) vcopy
[index
].length
;
5899 values
[0] = OSNumber::withNumber(addr
, sizeof(addr
) * 8);
5900 if (values
[0] == NULL
) {
5904 values
[1] = OSNumber::withNumber(len
, sizeof(len
) * 8);
5905 if (values
[1] == NULL
) {
5909 OSSharedPtr
<OSDictionary
> dict
= OSDictionary::withObjects((const OSObject
**)values
, (const OSSymbol
**)keys
, 2);
5914 array
->setObject(dict
.get());
5920 result
= array
->serialize(s
);
5924 IOFree(vcopy
, vcopy_size
);
5929 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5931 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor
, 0);
5933 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 1);
5934 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 2);
5935 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 3);
5936 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 4);
5937 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 5);
5938 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 6);
5939 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 7);
5940 #else /* !__LP64__ */
5941 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor
, 1);
5942 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor
, 2);
5943 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor
, 3);
5944 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor
, 4);
5945 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor
, 5);
5946 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor
, 6);
5947 OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor
, 7);
5948 #endif /* !__LP64__ */
5949 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 8);
5950 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 9);
5951 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 10);
5952 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 11);
5953 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 12);
5954 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 13);
5955 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 14);
5956 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 15);
5958 /* ex-inline function implementation */
5960 IOMemoryDescriptor::getPhysicalAddress()
5962 return getPhysicalSegment( 0, NULL
);