2 * Copyright (c) 1998-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <sys/cdefs.h>
32 #include <IOKit/assert.h>
33 #include <IOKit/system.h>
34 #include <IOKit/IOLib.h>
35 #include <IOKit/IOMemoryDescriptor.h>
36 #include <IOKit/IOMapper.h>
37 #include <IOKit/IODMACommand.h>
38 #include <IOKit/IOKitKeysPrivate.h>
40 #include <IOKit/IOSubMemoryDescriptor.h>
41 #include <IOKit/IOMultiMemoryDescriptor.h>
43 #include <IOKit/IOKitDebug.h>
44 #include <libkern/OSDebug.h>
45 #include <libkern/OSKextLibPrivate.h>
47 #include "IOKitKernelInternal.h"
49 #include <libkern/c++/OSContainers.h>
50 #include <libkern/c++/OSDictionary.h>
51 #include <libkern/c++/OSArray.h>
52 #include <libkern/c++/OSSymbol.h>
53 #include <libkern/c++/OSNumber.h>
54 #include <os/overflow.h>
60 #include <vm/vm_pageout.h>
61 #include <mach/memory_object_types.h>
62 #include <device/device_port.h>
64 #include <mach/vm_prot.h>
65 #include <mach/mach_vm.h>
66 #include <mach/memory_entry.h>
67 #include <vm/vm_fault.h>
68 #include <vm/vm_protos.h>
70 extern ppnum_t
pmap_find_phys(pmap_t pmap
, addr64_t va
);
71 extern void ipc_port_release_send(ipc_port_t port
);
75 #define kIOMapperWaitSystem ((IOMapper *) 1)
77 static IOMapper
* gIOSystemMapper
= NULL
;
81 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
83 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor
, OSObject
)
85 #define super IOMemoryDescriptor
87 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor
, IOMemoryDescriptor
)
89 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
91 static IORecursiveLock
* gIOMemoryLock
;
93 #define LOCK IORecursiveLockLock( gIOMemoryLock)
94 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
95 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
97 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
100 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
102 #define DEBG(fmt, args...) {}
105 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
107 // Some data structures and accessor macros used by the initWithOptions
110 enum ioPLBlockFlags
{
111 kIOPLOnDevice
= 0x00000001,
112 kIOPLExternUPL
= 0x00000002,
115 struct IOMDPersistentInitData
{
116 const IOGeneralMemoryDescriptor
* fMD
;
117 IOMemoryReference
* fMemRef
;
122 vm_address_t fPageInfo
; // Pointer to page list or index into it
123 uint32_t fIOMDOffset
; // The offset of this iopl in descriptor
124 ppnum_t fMappedPage
; // Page number of first page in this iopl
125 unsigned int fPageOffset
; // Offset within first page of iopl
126 unsigned int fFlags
; // Flags
129 enum { kMaxWireTags
= 6 };
133 uint64_t fDMAMapAlignment
;
134 uint64_t fMappedBase
;
135 uint64_t fMappedLength
;
136 uint64_t fPreparationID
;
138 IOTracking fWireTracking
;
139 #endif /* IOTRACKING */
140 unsigned int fPageCnt
;
141 uint8_t fDMAMapNumAddressBits
;
142 unsigned char fDiscontig
:1;
143 unsigned char fCompletionError
:1;
144 unsigned char fMappedBaseValid
:1;
145 unsigned char _resv
:3;
146 unsigned char fDMAAccess
:2;
148 /* variable length arrays */
149 upl_page_info_t fPageList
[1]
151 // align fPageList as for ioPLBlock
152 __attribute__((aligned(sizeof(upl_t
))))
155 //ioPLBlock fBlocks[1];
158 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
159 #define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
160 #define getNumIOPL(osd, d) \
161 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
162 #define getPageList(d) (&(d->fPageList[0]))
163 #define computeDataSize(p, u) \
164 (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
166 enum { kIOMemoryHostOrRemote
= kIOMemoryHostOnly
| kIOMemoryRemote
};
168 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
170 #define next_page(a) ( trunc_page(a) + PAGE_SIZE )
175 uintptr_t device_handle
,
176 ipc_port_t device_pager
,
177 vm_prot_t protection
,
178 vm_object_offset_t offset
,
182 IOMemoryDescriptorReserved
* ref
= (IOMemoryDescriptorReserved
*) device_handle
;
183 IOMemoryDescriptor
* memDesc
;
186 memDesc
= ref
->dp
.memory
;
189 kr
= memDesc
->handleFault(device_pager
, offset
, size
);
201 uintptr_t device_handle
)
203 IOMemoryDescriptorReserved
* ref
= (IOMemoryDescriptorReserved
*) device_handle
;
205 IODelete( ref
, IOMemoryDescriptorReserved
, 1 );
207 return kIOReturnSuccess
;
211 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
213 // Note this inline function uses C++ reference arguments to return values
214 // This means that pointers are not passed and NULLs don't have to be
215 // checked for as a NULL reference is illegal.
217 getAddrLenForInd(mach_vm_address_t
&addr
, mach_vm_size_t
&len
, // Output variables
218 UInt32 type
, IOGeneralMemoryDescriptor::Ranges r
, UInt32 ind
)
220 assert(kIOMemoryTypeUIO
== type
221 || kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
222 || kIOMemoryTypePhysical
== type
|| kIOMemoryTypePhysical64
== type
);
223 if (kIOMemoryTypeUIO
== type
) {
226 uio_getiov((uio_t
) r
.uio
, ind
, &ad
, &us
); addr
= ad
; len
= us
;
229 else if ((kIOMemoryTypeVirtual64
== type
) || (kIOMemoryTypePhysical64
== type
)) {
230 IOAddressRange cur
= r
.v64
[ind
];
234 #endif /* !__LP64__ */
236 IOVirtualRange cur
= r
.v
[ind
];
242 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
245 purgeableControlBits(IOOptionBits newState
, vm_purgable_t
* control
, int * state
)
247 IOReturn err
= kIOReturnSuccess
;
249 *control
= VM_PURGABLE_SET_STATE
;
251 enum { kIOMemoryPurgeableControlMask
= 15 };
253 switch (kIOMemoryPurgeableControlMask
& newState
) {
254 case kIOMemoryPurgeableKeepCurrent
:
255 *control
= VM_PURGABLE_GET_STATE
;
258 case kIOMemoryPurgeableNonVolatile
:
259 *state
= VM_PURGABLE_NONVOLATILE
;
261 case kIOMemoryPurgeableVolatile
:
262 *state
= VM_PURGABLE_VOLATILE
| (newState
& ~kIOMemoryPurgeableControlMask
);
264 case kIOMemoryPurgeableEmpty
:
265 *state
= VM_PURGABLE_EMPTY
| (newState
& ~kIOMemoryPurgeableControlMask
);
268 err
= kIOReturnBadArgument
;
272 if (*control
== VM_PURGABLE_SET_STATE
) {
273 // let VM know this call is from the kernel and is allowed to alter
274 // the volatility of the memory entry even if it was created with
275 // MAP_MEM_PURGABLE_KERNEL_ONLY
276 *control
= VM_PURGABLE_SET_STATE_FROM_KERNEL
;
283 purgeableStateBits(int * state
)
285 IOReturn err
= kIOReturnSuccess
;
287 switch (VM_PURGABLE_STATE_MASK
& *state
) {
288 case VM_PURGABLE_NONVOLATILE
:
289 *state
= kIOMemoryPurgeableNonVolatile
;
291 case VM_PURGABLE_VOLATILE
:
292 *state
= kIOMemoryPurgeableVolatile
;
294 case VM_PURGABLE_EMPTY
:
295 *state
= kIOMemoryPurgeableEmpty
;
298 *state
= kIOMemoryPurgeableNonVolatile
;
299 err
= kIOReturnNotReady
;
307 unsigned int object_type
;
308 } iokit_memtype_entry
;
310 static const iokit_memtype_entry iomd_mem_types
[] = {
311 [kIODefaultCache
] = {VM_WIMG_DEFAULT
, MAP_MEM_NOOP
},
312 [kIOInhibitCache
] = {VM_WIMG_IO
, MAP_MEM_IO
},
313 [kIOWriteThruCache
] = {VM_WIMG_WTHRU
, MAP_MEM_WTHRU
},
314 [kIOWriteCombineCache
] = {VM_WIMG_WCOMB
, MAP_MEM_WCOMB
},
315 [kIOCopybackCache
] = {VM_WIMG_COPYBACK
, MAP_MEM_COPYBACK
},
316 [kIOCopybackInnerCache
] = {VM_WIMG_INNERWBACK
, MAP_MEM_INNERWBACK
},
317 [kIOPostedWrite
] = {VM_WIMG_POSTED
, MAP_MEM_POSTED
},
318 [kIORealTimeCache
] = {VM_WIMG_RT
, MAP_MEM_RT
},
319 [kIOPostedReordered
] = {VM_WIMG_POSTED_REORDERED
, MAP_MEM_POSTED_REORDERED
},
320 [kIOPostedCombinedReordered
] = {VM_WIMG_POSTED_COMBINED_REORDERED
, MAP_MEM_POSTED_COMBINED_REORDERED
},
324 vmProtForCacheMode(IOOptionBits cacheMode
)
326 assert(cacheMode
< (sizeof(iomd_mem_types
) / sizeof(iomd_mem_types
[0])));
328 SET_MAP_MEM(iomd_mem_types
[cacheMode
].object_type
, prot
);
333 pagerFlagsForCacheMode(IOOptionBits cacheMode
)
335 assert(cacheMode
< (sizeof(iomd_mem_types
) / sizeof(iomd_mem_types
[0])));
336 if (cacheMode
== kIODefaultCache
) {
339 return iomd_mem_types
[cacheMode
].wimg
;
343 cacheModeForPagerFlags(unsigned int pagerFlags
)
345 pagerFlags
&= VM_WIMG_MASK
;
346 IOOptionBits cacheMode
= kIODefaultCache
;
347 for (IOOptionBits i
= 0; i
< (sizeof(iomd_mem_types
) / sizeof(iomd_mem_types
[0])); ++i
) {
348 if (iomd_mem_types
[i
].wimg
== pagerFlags
) {
353 return (cacheMode
== kIODefaultCache
) ? kIOCopybackCache
: cacheMode
;
356 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
357 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
359 struct IOMemoryEntry
{
365 struct IOMemoryReference
{
366 volatile SInt32 refCount
;
370 struct IOMemoryReference
* mapRef
;
371 IOMemoryEntry entries
[0];
375 kIOMemoryReferenceReuse
= 0x00000001,
376 kIOMemoryReferenceWrite
= 0x00000002,
377 kIOMemoryReferenceCOW
= 0x00000004,
380 SInt32 gIOMemoryReferenceCount
;
383 IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity
, IOMemoryReference
* realloc
)
385 IOMemoryReference
* ref
;
386 size_t newSize
, oldSize
, copySize
;
388 newSize
= (sizeof(IOMemoryReference
)
389 - sizeof(ref
->entries
)
390 + capacity
* sizeof(ref
->entries
[0]));
391 ref
= (typeof(ref
))IOMalloc(newSize
);
393 oldSize
= (sizeof(IOMemoryReference
)
394 - sizeof(realloc
->entries
)
395 + realloc
->capacity
* sizeof(realloc
->entries
[0]));
397 if (copySize
> newSize
) {
401 bcopy(realloc
, ref
, copySize
);
403 IOFree(realloc
, oldSize
);
405 bzero(ref
, sizeof(*ref
));
407 OSIncrementAtomic(&gIOMemoryReferenceCount
);
412 ref
->capacity
= capacity
;
417 IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference
* ref
)
419 IOMemoryEntry
* entries
;
423 memoryReferenceFree(ref
->mapRef
);
427 entries
= ref
->entries
+ ref
->count
;
428 while (entries
> &ref
->entries
[0]) {
430 ipc_port_release_send(entries
->entry
);
432 size
= (sizeof(IOMemoryReference
)
433 - sizeof(ref
->entries
)
434 + ref
->capacity
* sizeof(ref
->entries
[0]));
437 OSDecrementAtomic(&gIOMemoryReferenceCount
);
441 IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference
* ref
)
443 if (1 == OSDecrementAtomic(&ref
->refCount
)) {
444 memoryReferenceFree(ref
);
450 IOGeneralMemoryDescriptor::memoryReferenceCreate(
451 IOOptionBits options
,
452 IOMemoryReference
** reference
)
454 enum { kCapacity
= 4, kCapacityInc
= 4 };
457 IOMemoryReference
* ref
;
458 IOMemoryEntry
* entries
;
459 IOMemoryEntry
* cloneEntries
;
461 ipc_port_t entry
, cloneEntry
;
463 memory_object_size_t actualSize
;
466 mach_vm_address_t entryAddr
, endAddr
, entrySize
;
467 mach_vm_size_t srcAddr
, srcLen
;
468 mach_vm_size_t nextAddr
, nextLen
;
469 mach_vm_size_t offset
, remain
;
471 IOOptionBits type
= (_flags
& kIOMemoryTypeMask
);
472 IOOptionBits cacheMode
;
473 unsigned int pagerFlags
;
475 vm_named_entry_kernel_flags_t vmne_kflags
;
477 ref
= memoryReferenceAlloc(kCapacity
, NULL
);
479 return kIOReturnNoMemory
;
482 tag
= getVMTag(kernel_map
);
483 vmne_kflags
= VM_NAMED_ENTRY_KERNEL_FLAGS_NONE
;
484 entries
= &ref
->entries
[0];
491 getAddrLenForInd(nextAddr
, nextLen
, type
, _ranges
, rangeIdx
);
493 nextAddr
= getPhysicalSegment(offset
, &physLen
, kIOMemoryMapperNone
);
496 // default cache mode for physical
497 if (kIODefaultCache
== ((_flags
& kIOMemoryBufferCacheMask
) >> kIOMemoryBufferCacheShift
)) {
498 IOOptionBits mode
= cacheModeForPagerFlags(IODefaultCacheBits(nextAddr
));
499 _flags
|= (mode
<< kIOMemoryBufferCacheShift
);
503 // cache mode & vm_prot
505 cacheMode
= ((_flags
& kIOMemoryBufferCacheMask
) >> kIOMemoryBufferCacheShift
);
506 prot
|= vmProtForCacheMode(cacheMode
);
507 // VM system requires write access to change cache mode
508 if (kIODefaultCache
!= cacheMode
) {
509 prot
|= VM_PROT_WRITE
;
511 if (kIODirectionOut
!= (kIODirectionOutIn
& _flags
)) {
512 prot
|= VM_PROT_WRITE
;
514 if (kIOMemoryReferenceWrite
& options
) {
515 prot
|= VM_PROT_WRITE
;
517 if (kIOMemoryReferenceCOW
& options
) {
518 prot
|= MAP_MEM_VM_COPY
;
521 if (kIOMemoryUseReserve
& _flags
) {
522 prot
|= MAP_MEM_GRAB_SECLUDED
;
525 if ((kIOMemoryReferenceReuse
& options
) && _memRef
) {
526 cloneEntries
= &_memRef
->entries
[0];
527 prot
|= MAP_MEM_NAMED_REUSE
;
533 if (kIOMemoryBufferPageable
& _flags
) {
534 int ledger_tag
, ledger_no_footprint
;
536 // IOBufferMemoryDescriptor alloc - set flags for entry + object create
537 prot
|= MAP_MEM_NAMED_CREATE
;
539 // default accounting settings:
540 // + "none" ledger tag
541 // + include in footprint
542 // can be changed later with ::setOwnership()
543 ledger_tag
= VM_LEDGER_TAG_NONE
;
544 ledger_no_footprint
= 0;
546 if (kIOMemoryBufferPurgeable
& _flags
) {
547 prot
|= (MAP_MEM_PURGABLE
| MAP_MEM_PURGABLE_KERNEL_ONLY
);
548 if (VM_KERN_MEMORY_SKYWALK
== tag
) {
549 // Skywalk purgeable memory accounting:
550 // + "network" ledger tag
551 // + not included in footprint
552 ledger_tag
= VM_LEDGER_TAG_NETWORK
;
553 ledger_no_footprint
= 1;
555 // regular purgeable memory accounting:
557 // + included in footprint
558 ledger_tag
= VM_LEDGER_TAG_NONE
;
559 ledger_no_footprint
= 0;
562 vmne_kflags
.vmnekf_ledger_tag
= ledger_tag
;
563 vmne_kflags
.vmnekf_ledger_no_footprint
= ledger_no_footprint
;
564 if (kIOMemoryUseReserve
& _flags
) {
565 prot
|= MAP_MEM_GRAB_SECLUDED
;
568 prot
|= VM_PROT_WRITE
;
571 map
= get_task_map(_task
);
580 // coalesce addr range
581 for (++rangeIdx
; rangeIdx
< _rangesCount
; rangeIdx
++) {
582 getAddrLenForInd(nextAddr
, nextLen
, type
, _ranges
, rangeIdx
);
583 if ((srcAddr
+ srcLen
) != nextAddr
) {
588 entryAddr
= trunc_page_64(srcAddr
);
589 endAddr
= round_page_64(srcAddr
+ srcLen
);
591 entrySize
= (endAddr
- entryAddr
);
595 actualSize
= entrySize
;
597 cloneEntry
= MACH_PORT_NULL
;
598 if (MAP_MEM_NAMED_REUSE
& prot
) {
599 if (cloneEntries
< &_memRef
->entries
[_memRef
->count
]) {
600 cloneEntry
= cloneEntries
->entry
;
602 prot
&= ~MAP_MEM_NAMED_REUSE
;
606 err
= mach_make_memory_entry_internal(map
,
607 &actualSize
, entryAddr
, prot
, vmne_kflags
, &entry
, cloneEntry
);
609 if (KERN_SUCCESS
!= err
) {
612 if (actualSize
> entrySize
) {
613 panic("mach_make_memory_entry_64 actualSize");
616 if (count
>= ref
->capacity
) {
617 ref
= memoryReferenceAlloc(ref
->capacity
+ kCapacityInc
, ref
);
618 entries
= &ref
->entries
[count
];
620 entries
->entry
= entry
;
621 entries
->size
= actualSize
;
622 entries
->offset
= offset
+ (entryAddr
- srcAddr
);
623 entryAddr
+= actualSize
;
624 if (MAP_MEM_NAMED_REUSE
& prot
) {
625 if ((cloneEntries
->entry
== entries
->entry
)
626 && (cloneEntries
->size
== entries
->size
)
627 && (cloneEntries
->offset
== entries
->offset
)) {
630 prot
&= ~MAP_MEM_NAMED_REUSE
;
640 // _task == 0, physical or kIOMemoryTypeUPL
641 memory_object_t pager
;
642 vm_size_t size
= ptoa_64(_pages
);
644 if (!getKernelReserved()) {
645 panic("getKernelReserved");
648 reserved
->dp
.pagerContig
= (1 == _rangesCount
);
649 reserved
->dp
.memory
= this;
651 pagerFlags
= pagerFlagsForCacheMode(cacheMode
);
652 if (-1U == pagerFlags
) {
653 panic("phys is kIODefaultCache");
655 if (reserved
->dp
.pagerContig
) {
656 pagerFlags
|= DEVICE_PAGER_CONTIGUOUS
;
659 pager
= device_pager_setup((memory_object_t
) NULL
, (uintptr_t) reserved
,
663 err
= kIOReturnVMError
;
666 entryAddr
= trunc_page_64(srcAddr
);
667 err
= mach_memory_object_memory_entry_64((host_t
) 1, false /*internal*/,
668 size
, VM_PROT_READ
| VM_PROT_WRITE
, pager
, &entry
);
669 assert(KERN_SUCCESS
== err
);
670 if (KERN_SUCCESS
!= err
) {
671 device_pager_deallocate(pager
);
673 reserved
->dp
.devicePager
= pager
;
674 entries
->entry
= entry
;
675 entries
->size
= size
;
676 entries
->offset
= offset
+ (entryAddr
- srcAddr
);
686 if (_task
&& (KERN_SUCCESS
== err
)
687 && (kIOMemoryMapCopyOnWrite
& _flags
)
688 && !(kIOMemoryReferenceCOW
& options
)) {
689 err
= memoryReferenceCreate(options
| kIOMemoryReferenceCOW
, &ref
->mapRef
);
692 if (KERN_SUCCESS
== err
) {
693 if (MAP_MEM_NAMED_REUSE
& prot
) {
694 memoryReferenceFree(ref
);
695 OSIncrementAtomic(&_memRef
->refCount
);
699 memoryReferenceFree(ref
);
709 IOMemoryDescriptorMapAlloc(vm_map_t map
, void * _ref
)
711 IOMemoryDescriptorMapAllocRef
* ref
= (typeof(ref
))_ref
;
713 vm_map_offset_t addr
;
717 err
= vm_map_enter_mem_object(map
, &addr
, ref
->size
,
719 (((ref
->options
& kIOMapAnywhere
)
722 VM_MAP_KERNEL_FLAGS_NONE
,
725 (memory_object_offset_t
) 0,
730 if (KERN_SUCCESS
== err
) {
731 ref
->mapped
= (mach_vm_address_t
) addr
;
739 IOGeneralMemoryDescriptor::memoryReferenceMap(
740 IOMemoryReference
* ref
,
742 mach_vm_size_t inoffset
,
744 IOOptionBits options
,
745 mach_vm_address_t
* inaddr
)
748 int64_t offset
= inoffset
;
749 uint32_t rangeIdx
, entryIdx
;
750 vm_map_offset_t addr
, mapAddr
;
751 vm_map_offset_t pageOffset
, entryOffset
, remain
, chunk
;
753 mach_vm_address_t nextAddr
;
754 mach_vm_size_t nextLen
;
756 IOMemoryEntry
* entry
;
757 vm_prot_t prot
, memEntryCacheMode
;
759 IOOptionBits cacheMode
;
761 // for the kIOMapPrefault option.
762 upl_page_info_t
* pageList
= NULL
;
763 UInt currentPageIndex
= 0;
767 err
= memoryReferenceMap(ref
->mapRef
, map
, inoffset
, size
, options
, inaddr
);
771 type
= _flags
& kIOMemoryTypeMask
;
774 if (!(kIOMapReadOnly
& options
)) {
775 prot
|= VM_PROT_WRITE
;
779 cacheMode
= ((options
& kIOMapCacheMask
) >> kIOMapCacheShift
);
780 if (kIODefaultCache
!= cacheMode
) {
781 // VM system requires write access to update named entry cache mode
782 memEntryCacheMode
= (MAP_MEM_ONLY
| VM_PROT_WRITE
| prot
| vmProtForCacheMode(cacheMode
));
788 // Find first range for offset
790 return kIOReturnBadArgument
;
792 for (remain
= offset
, rangeIdx
= 0; rangeIdx
< _rangesCount
; rangeIdx
++) {
793 getAddrLenForInd(nextAddr
, nextLen
, type
, _ranges
, rangeIdx
);
794 if (remain
< nextLen
) {
802 nextAddr
= getPhysicalSegment(offset
, &physLen
, kIOMemoryMapperNone
);
806 assert(remain
< nextLen
);
807 if (remain
>= nextLen
) {
808 return kIOReturnBadArgument
;
813 pageOffset
= (page_mask
& nextAddr
);
817 if (!(options
& kIOMapAnywhere
)) {
819 if (pageOffset
!= (page_mask
& addr
)) {
820 return kIOReturnNotAligned
;
825 // find first entry for offset
827 (entryIdx
< ref
->count
) && (offset
>= ref
->entries
[entryIdx
].offset
);
831 entry
= &ref
->entries
[entryIdx
];
834 size
= round_page_64(size
+ pageOffset
);
835 if (kIOMapOverwrite
& options
) {
836 if ((map
== kernel_map
) && (kIOMemoryBufferPageable
& _flags
)) {
837 map
= IOPageableMapForAddress(addr
);
841 IOMemoryDescriptorMapAllocRef ref
;
844 ref
.options
= options
;
847 if (options
& kIOMapAnywhere
) {
848 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
853 if ((ref
.map
== kernel_map
) && (kIOMemoryBufferPageable
& _flags
)) {
854 err
= IOIteratePageableMaps( ref
.size
, &IOMemoryDescriptorMapAlloc
, &ref
);
856 err
= IOMemoryDescriptorMapAlloc(ref
.map
, &ref
);
858 if (KERN_SUCCESS
== err
) {
866 * If the memory is associated with a device pager but doesn't have a UPL,
867 * it will be immediately faulted in through the pager via populateDevicePager().
868 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
871 if ((reserved
!= NULL
) && (reserved
->dp
.devicePager
) && (_wireCount
!= 0)) {
872 options
&= ~kIOMapPrefault
;
876 * Prefaulting is only possible if we wired the memory earlier. Check the
877 * memory type, and the underlying data.
879 if (options
& kIOMapPrefault
) {
881 * The memory must have been wired by calling ::prepare(), otherwise
882 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
884 assert(_wireCount
!= 0);
885 assert(_memoryEntries
!= NULL
);
886 if ((_wireCount
== 0) ||
887 (_memoryEntries
== NULL
)) {
888 return kIOReturnBadArgument
;
891 // Get the page list.
892 ioGMDData
* dataP
= getDataP(_memoryEntries
);
893 ioPLBlock
const* ioplList
= getIOPLList(dataP
);
894 pageList
= getPageList(dataP
);
896 // Get the number of IOPLs.
897 UInt numIOPLs
= getNumIOPL(_memoryEntries
, dataP
);
900 * Scan through the IOPL Info Blocks, looking for the first block containing
901 * the offset. The research will go past it, so we'll need to go back to the
902 * right range at the end.
905 while (ioplIndex
< numIOPLs
&& offset
>= ioplList
[ioplIndex
].fIOMDOffset
) {
910 // Retrieve the IOPL info block.
911 ioPLBlock ioplInfo
= ioplList
[ioplIndex
];
914 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
917 if (ioplInfo
.fFlags
& kIOPLExternUPL
) {
918 pageList
= (upl_page_info_t
*) ioplInfo
.fPageInfo
;
920 pageList
= &pageList
[ioplInfo
.fPageInfo
];
923 // Rebase [offset] into the IOPL in order to looks for the first page index.
924 mach_vm_size_t offsetInIOPL
= offset
- ioplInfo
.fIOMDOffset
+ ioplInfo
.fPageOffset
;
926 // Retrieve the index of the first page corresponding to the offset.
927 currentPageIndex
= atop_32(offsetInIOPL
);
935 while (remain
&& (KERN_SUCCESS
== err
)) {
936 entryOffset
= offset
- entry
->offset
;
937 if ((page_mask
& entryOffset
) != pageOffset
) {
938 err
= kIOReturnNotAligned
;
942 if (kIODefaultCache
!= cacheMode
) {
943 vm_size_t unused
= 0;
944 err
= mach_make_memory_entry(NULL
/*unused*/, &unused
, 0 /*unused*/,
945 memEntryCacheMode
, NULL
, entry
->entry
);
946 assert(KERN_SUCCESS
== err
);
949 entryOffset
-= pageOffset
;
950 if (entryOffset
>= entry
->size
) {
951 panic("entryOffset");
953 chunk
= entry
->size
- entryOffset
;
955 vm_map_kernel_flags_t vmk_flags
;
957 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
958 vmk_flags
.vmkf_iokit_acct
= TRUE
; /* iokit accounting */
960 if (chunk
> remain
) {
963 if (options
& kIOMapPrefault
) {
964 UInt nb_pages
= round_page(chunk
) / PAGE_SIZE
;
966 err
= vm_map_enter_mem_object_prefault(map
,
970 | VM_FLAGS_OVERWRITE
),
977 &pageList
[currentPageIndex
],
980 // Compute the next index in the page list.
981 currentPageIndex
+= nb_pages
;
982 assert(currentPageIndex
<= _pages
);
984 err
= vm_map_enter_mem_object(map
,
988 | VM_FLAGS_OVERWRITE
),
998 if (KERN_SUCCESS
!= err
) {
1006 offset
+= chunk
- pageOffset
;
1011 if (entryIdx
>= ref
->count
) {
1012 err
= kIOReturnOverrun
;
1017 if ((KERN_SUCCESS
!= err
) && didAlloc
) {
1018 (void) mach_vm_deallocate(map
, trunc_page_64(addr
), size
);
1027 IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
1028 IOMemoryReference
* ref
,
1029 IOByteCount
* residentPageCount
,
1030 IOByteCount
* dirtyPageCount
)
1033 IOMemoryEntry
* entries
;
1034 unsigned int resident
, dirty
;
1035 unsigned int totalResident
, totalDirty
;
1037 totalResident
= totalDirty
= 0;
1038 err
= kIOReturnSuccess
;
1039 entries
= ref
->entries
+ ref
->count
;
1040 while (entries
> &ref
->entries
[0]) {
1042 err
= mach_memory_entry_get_page_counts(entries
->entry
, &resident
, &dirty
);
1043 if (KERN_SUCCESS
!= err
) {
1046 totalResident
+= resident
;
1047 totalDirty
+= dirty
;
1050 if (residentPageCount
) {
1051 *residentPageCount
= totalResident
;
1053 if (dirtyPageCount
) {
1054 *dirtyPageCount
= totalDirty
;
1060 IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
1061 IOMemoryReference
* ref
,
1062 IOOptionBits newState
,
1063 IOOptionBits
* oldState
)
1066 IOMemoryEntry
* entries
;
1067 vm_purgable_t control
;
1068 int totalState
, state
;
1070 totalState
= kIOMemoryPurgeableNonVolatile
;
1071 err
= kIOReturnSuccess
;
1072 entries
= ref
->entries
+ ref
->count
;
1073 while (entries
> &ref
->entries
[0]) {
1076 err
= purgeableControlBits(newState
, &control
, &state
);
1077 if (KERN_SUCCESS
!= err
) {
1080 err
= memory_entry_purgeable_control_internal(entries
->entry
, control
, &state
);
1081 if (KERN_SUCCESS
!= err
) {
1084 err
= purgeableStateBits(&state
);
1085 if (KERN_SUCCESS
!= err
) {
1089 if (kIOMemoryPurgeableEmpty
== state
) {
1090 totalState
= kIOMemoryPurgeableEmpty
;
1091 } else if (kIOMemoryPurgeableEmpty
== totalState
) {
1093 } else if (kIOMemoryPurgeableVolatile
== totalState
) {
1095 } else if (kIOMemoryPurgeableVolatile
== state
) {
1096 totalState
= kIOMemoryPurgeableVolatile
;
1098 totalState
= kIOMemoryPurgeableNonVolatile
;
1103 *oldState
= totalState
;
1109 IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(
1110 IOMemoryReference
* ref
,
1113 IOOptionBits newLedgerOptions
)
1115 IOReturn err
, totalErr
;
1116 IOMemoryEntry
* entries
;
1118 totalErr
= kIOReturnSuccess
;
1119 entries
= ref
->entries
+ ref
->count
;
1120 while (entries
> &ref
->entries
[0]) {
1123 err
= mach_memory_entry_ownership(entries
->entry
, newOwner
, newLedgerTag
, newLedgerOptions
);
1124 if (KERN_SUCCESS
!= err
) {
1132 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1134 IOMemoryDescriptor
*
1135 IOMemoryDescriptor::withAddress(void * address
,
1137 IODirection direction
)
1139 return IOMemoryDescriptor::
1140 withAddressRange((IOVirtualAddress
) address
, length
, direction
| kIOMemoryAutoPrepare
, kernel_task
);
1144 IOMemoryDescriptor
*
1145 IOMemoryDescriptor::withAddress(IOVirtualAddress address
,
1147 IODirection direction
,
1150 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
1152 if (that
->initWithAddress(address
, length
, direction
, task
)) {
1160 #endif /* !__LP64__ */
1162 IOMemoryDescriptor
*
1163 IOMemoryDescriptor::withPhysicalAddress(
1164 IOPhysicalAddress address
,
1166 IODirection direction
)
1168 return IOMemoryDescriptor::withAddressRange(address
, length
, direction
, TASK_NULL
);
1172 IOMemoryDescriptor
*
1173 IOMemoryDescriptor::withRanges( IOVirtualRange
* ranges
,
1175 IODirection direction
,
1179 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
1181 if (that
->initWithRanges(ranges
, withCount
, direction
, task
, asReference
)) {
1189 #endif /* !__LP64__ */
1191 IOMemoryDescriptor
*
1192 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address
,
1193 mach_vm_size_t length
,
1194 IOOptionBits options
,
1197 IOAddressRange range
= { address
, length
};
1198 return IOMemoryDescriptor::withAddressRanges(&range
, 1, options
, task
);
1201 IOMemoryDescriptor
*
1202 IOMemoryDescriptor::withAddressRanges(IOAddressRange
* ranges
,
1204 IOOptionBits options
,
1207 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
1210 options
|= kIOMemoryTypeVirtual64
;
1212 options
|= kIOMemoryTypePhysical64
;
1215 if (that
->initWithOptions(ranges
, rangeCount
, 0, task
, options
, /* mapper */ NULL
)) {
1229 * Create a new IOMemoryDescriptor. The buffer is made up of several
1230 * virtual address ranges, from a given task.
1232 * Passing the ranges as a reference will avoid an extra allocation.
1234 IOMemoryDescriptor
*
1235 IOMemoryDescriptor::withOptions(void * buffers
,
1242 IOGeneralMemoryDescriptor
*self
= new IOGeneralMemoryDescriptor
;
1245 && !self
->initWithOptions(buffers
, count
, offset
, task
, opts
, mapper
)) {
1254 IOMemoryDescriptor::initWithOptions(void * buffers
,
1258 IOOptionBits options
,
1265 IOMemoryDescriptor
*
1266 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange
* ranges
,
1268 IODirection direction
,
1271 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
1273 if (that
->initWithPhysicalRanges(ranges
, withCount
, direction
, asReference
)) {
1282 IOMemoryDescriptor
*
1283 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor
* of
,
1286 IODirection direction
)
1288 return IOSubMemoryDescriptor::withSubRange(of
, offset
, length
, direction
);
1290 #endif /* !__LP64__ */
1292 IOMemoryDescriptor
*
1293 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor
*originalMD
)
1295 IOGeneralMemoryDescriptor
*origGenMD
=
1296 OSDynamicCast(IOGeneralMemoryDescriptor
, originalMD
);
1299 return IOGeneralMemoryDescriptor::
1300 withPersistentMemoryDescriptor(origGenMD
);
1306 IOMemoryDescriptor
*
1307 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor
*originalMD
)
1309 IOMemoryReference
* memRef
;
1311 if (kIOReturnSuccess
!= originalMD
->memoryReferenceCreate(kIOMemoryReferenceReuse
, &memRef
)) {
1315 if (memRef
== originalMD
->_memRef
) {
1316 originalMD
->retain(); // Add a new reference to ourselves
1317 originalMD
->memoryReferenceRelease(memRef
);
1321 IOGeneralMemoryDescriptor
* self
= new IOGeneralMemoryDescriptor
;
1322 IOMDPersistentInitData initData
= { originalMD
, memRef
};
1325 && !self
->initWithOptions(&initData
, 1, 0, NULL
, kIOMemoryTypePersistentMD
, NULL
)) {
1334 IOGeneralMemoryDescriptor::initWithAddress(void * address
,
1335 IOByteCount withLength
,
1336 IODirection withDirection
)
1338 _singleRange
.v
.address
= (vm_offset_t
) address
;
1339 _singleRange
.v
.length
= withLength
;
1341 return initWithRanges(&_singleRange
.v
, 1, withDirection
, kernel_task
, true);
1345 IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address
,
1346 IOByteCount withLength
,
1347 IODirection withDirection
,
1350 _singleRange
.v
.address
= address
;
1351 _singleRange
.v
.length
= withLength
;
1353 return initWithRanges(&_singleRange
.v
, 1, withDirection
, withTask
, true);
1357 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
1358 IOPhysicalAddress address
,
1359 IOByteCount withLength
,
1360 IODirection withDirection
)
1362 _singleRange
.p
.address
= address
;
1363 _singleRange
.p
.length
= withLength
;
1365 return initWithPhysicalRanges( &_singleRange
.p
, 1, withDirection
, true);
1369 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
1370 IOPhysicalRange
* ranges
,
1372 IODirection direction
,
1375 IOOptionBits mdOpts
= direction
| kIOMemoryTypePhysical
;
1378 mdOpts
|= kIOMemoryAsReference
;
1381 return initWithOptions(ranges
, count
, 0, NULL
, mdOpts
, /* mapper */ NULL
);
1385 IOGeneralMemoryDescriptor::initWithRanges(
1386 IOVirtualRange
* ranges
,
1388 IODirection direction
,
1392 IOOptionBits mdOpts
= direction
;
1395 mdOpts
|= kIOMemoryAsReference
;
1399 mdOpts
|= kIOMemoryTypeVirtual
;
1401 // Auto-prepare if this is a kernel memory descriptor as very few
1402 // clients bother to prepare() kernel memory.
1403 // But it was not enforced so what are you going to do?
1404 if (task
== kernel_task
) {
1405 mdOpts
|= kIOMemoryAutoPrepare
;
1408 mdOpts
|= kIOMemoryTypePhysical
;
1411 return initWithOptions(ranges
, count
, 0, task
, mdOpts
, /* mapper */ NULL
);
1413 #endif /* !__LP64__ */
1418 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
1419 * from a given task, several physical ranges, an UPL from the ubc
1420 * system or a uio (may be 64bit) from the BSD subsystem.
1422 * Passing the ranges as a reference will avoid an extra allocation.
1424 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
1425 * existing instance -- note this behavior is not commonly supported in other
1426 * I/O Kit classes, although it is supported here.
1430 IOGeneralMemoryDescriptor::initWithOptions(void * buffers
,
1434 IOOptionBits options
,
1437 IOOptionBits type
= options
& kIOMemoryTypeMask
;
1441 && (kIOMemoryTypeVirtual
== type
)
1442 && vm_map_is_64bit(get_task_map(task
))
1443 && ((IOVirtualRange
*) buffers
)->address
) {
1444 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
1447 #endif /* !__LP64__ */
1449 // Grab the original MD's configuation data to initialse the
1450 // arguments to this function.
1451 if (kIOMemoryTypePersistentMD
== type
) {
1452 IOMDPersistentInitData
*initData
= (typeof(initData
))buffers
;
1453 const IOGeneralMemoryDescriptor
*orig
= initData
->fMD
;
1454 ioGMDData
*dataP
= getDataP(orig
->_memoryEntries
);
1456 // Only accept persistent memory descriptors with valid dataP data.
1457 assert(orig
->_rangesCount
== 1);
1458 if (!(orig
->_flags
& kIOMemoryPersistent
) || !dataP
) {
1462 _memRef
= initData
->fMemRef
; // Grab the new named entry
1463 options
= orig
->_flags
& ~kIOMemoryAsReference
;
1464 type
= options
& kIOMemoryTypeMask
;
1465 buffers
= orig
->_ranges
.v
;
1466 count
= orig
->_rangesCount
;
1468 // Now grab the original task and whatever mapper was previously used
1470 mapper
= dataP
->fMapper
;
1472 // We are ready to go through the original initialisation now
1476 case kIOMemoryTypeUIO
:
1477 case kIOMemoryTypeVirtual
:
1479 case kIOMemoryTypeVirtual64
:
1480 #endif /* !__LP64__ */
1487 case kIOMemoryTypePhysical
: // Neither Physical nor UPL should have a task
1489 case kIOMemoryTypePhysical64
:
1490 #endif /* !__LP64__ */
1491 case kIOMemoryTypeUPL
:
1495 return false; /* bad argument */
1502 * We can check the _initialized instance variable before having ever set
1503 * it to an initial value because I/O Kit guarantees that all our instance
1504 * variables are zeroed on an object's allocation.
1509 * An existing memory descriptor is being retargeted to point to
1510 * somewhere else. Clean up our present state.
1512 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
1513 if ((kIOMemoryTypePhysical
!= type
) && (kIOMemoryTypePhysical64
!= type
)) {
1514 while (_wireCount
) {
1518 if (_ranges
.v
&& !(kIOMemoryAsReference
& _flags
)) {
1519 if (kIOMemoryTypeUIO
== type
) {
1520 uio_free((uio_t
) _ranges
.v
);
1523 else if ((kIOMemoryTypeVirtual64
== type
) || (kIOMemoryTypePhysical64
== type
)) {
1524 IODelete(_ranges
.v64
, IOAddressRange
, _rangesCount
);
1526 #endif /* !__LP64__ */
1528 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
1532 options
|= (kIOMemoryRedirected
& _flags
);
1533 if (!(kIOMemoryRedirected
& options
)) {
1535 memoryReferenceRelease(_memRef
);
1539 _mappings
->flushCollection();
1543 if (!super::init()) {
1546 _initialized
= true;
1549 // Grab the appropriate mapper
1550 if (kIOMemoryHostOrRemote
& options
) {
1551 options
|= kIOMemoryMapperNone
;
1553 if (kIOMemoryMapperNone
& options
) {
1554 mapper
= NULL
; // No Mapper
1555 } else if (mapper
== kIOMapperSystem
) {
1556 IOMapper::checkForSystemMapper();
1557 gIOSystemMapper
= mapper
= IOMapper::gSystem
;
1560 // Remove the dynamic internal use flags from the initial setting
1561 options
&= ~(kIOMemoryPreparedReadOnly
);
1566 _direction
= (IODirection
) (_flags
& kIOMemoryDirectionMask
);
1567 #endif /* !__LP64__ */
1570 __iomd_reservedA
= 0;
1571 __iomd_reservedB
= 0;
1574 if (kIOMemoryThreadSafe
& options
) {
1575 if (!_prepareLock
) {
1576 _prepareLock
= IOLockAlloc();
1578 } else if (_prepareLock
) {
1579 IOLockFree(_prepareLock
);
1580 _prepareLock
= NULL
;
1583 if (kIOMemoryTypeUPL
== type
) {
1585 unsigned int dataSize
= computeDataSize(/* pages */ 0, /* upls */ 1);
1587 if (!initMemoryEntries(dataSize
, mapper
)) {
1590 dataP
= getDataP(_memoryEntries
);
1591 dataP
->fPageCnt
= 0;
1592 switch (kIOMemoryDirectionMask
& options
) {
1593 case kIODirectionOut
:
1594 dataP
->fDMAAccess
= kIODMAMapReadAccess
;
1596 case kIODirectionIn
:
1597 dataP
->fDMAAccess
= kIODMAMapWriteAccess
;
1599 case kIODirectionNone
:
1600 case kIODirectionOutIn
:
1602 panic("bad dir for upl 0x%x\n", (int) options
);
1605 // _wireCount++; // UPLs start out life wired
1608 _pages
+= atop_32(offset
+ count
+ PAGE_MASK
) - atop_32(offset
);
1611 iopl
.fIOPL
= (upl_t
) buffers
;
1612 upl_set_referenced(iopl
.fIOPL
, true);
1613 upl_page_info_t
*pageList
= UPL_GET_INTERNAL_PAGE_LIST(iopl
.fIOPL
);
1615 if (upl_get_size(iopl
.fIOPL
) < (count
+ offset
)) {
1616 panic("short external upl");
1619 _highestPage
= upl_get_highest_page(iopl
.fIOPL
);
1621 // Set the flag kIOPLOnDevice convieniently equal to 1
1622 iopl
.fFlags
= pageList
->device
| kIOPLExternUPL
;
1623 if (!pageList
->device
) {
1624 // Pre-compute the offset into the UPL's page list
1625 pageList
= &pageList
[atop_32(offset
)];
1626 offset
&= PAGE_MASK
;
1628 iopl
.fIOMDOffset
= 0;
1629 iopl
.fMappedPage
= 0;
1630 iopl
.fPageInfo
= (vm_address_t
) pageList
;
1631 iopl
.fPageOffset
= offset
;
1632 _memoryEntries
->appendBytes(&iopl
, sizeof(iopl
));
1634 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
1635 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
1637 // Initialize the memory descriptor
1638 if (options
& kIOMemoryAsReference
) {
1640 _rangesIsAllocated
= false;
1641 #endif /* !__LP64__ */
1643 // Hack assignment to get the buffer arg into _ranges.
1644 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
1646 // This also initialises the uio & physical ranges.
1647 _ranges
.v
= (IOVirtualRange
*) buffers
;
1650 _rangesIsAllocated
= true;
1651 #endif /* !__LP64__ */
1653 case kIOMemoryTypeUIO
:
1654 _ranges
.v
= (IOVirtualRange
*) uio_duplicate((uio_t
) buffers
);
1658 case kIOMemoryTypeVirtual64
:
1659 case kIOMemoryTypePhysical64
:
1662 && (((IOAddressRange
*) buffers
)->address
+ ((IOAddressRange
*) buffers
)->length
) <= 0x100000000ULL
1665 if (kIOMemoryTypeVirtual64
== type
) {
1666 type
= kIOMemoryTypeVirtual
;
1668 type
= kIOMemoryTypePhysical
;
1670 _flags
= (_flags
& ~kIOMemoryTypeMask
) | type
| kIOMemoryAsReference
;
1671 _rangesIsAllocated
= false;
1672 _ranges
.v
= &_singleRange
.v
;
1673 _singleRange
.v
.address
= ((IOAddressRange
*) buffers
)->address
;
1674 _singleRange
.v
.length
= ((IOAddressRange
*) buffers
)->length
;
1677 _ranges
.v64
= IONew(IOAddressRange
, count
);
1681 bcopy(buffers
, _ranges
.v
, count
* sizeof(IOAddressRange
));
1683 #endif /* !__LP64__ */
1684 case kIOMemoryTypeVirtual
:
1685 case kIOMemoryTypePhysical
:
1687 _flags
|= kIOMemoryAsReference
;
1689 _rangesIsAllocated
= false;
1690 #endif /* !__LP64__ */
1691 _ranges
.v
= &_singleRange
.v
;
1693 _ranges
.v
= IONew(IOVirtualRange
, count
);
1698 bcopy(buffers
, _ranges
.v
, count
* sizeof(IOVirtualRange
));
1702 _rangesCount
= count
;
1704 // Find starting address within the vector of ranges
1705 Ranges vec
= _ranges
;
1706 mach_vm_size_t totalLength
= 0;
1707 unsigned int ind
, pages
= 0;
1708 for (ind
= 0; ind
< count
; ind
++) {
1709 mach_vm_address_t addr
;
1710 mach_vm_address_t endAddr
;
1713 // addr & len are returned by this function
1714 getAddrLenForInd(addr
, len
, type
, vec
, ind
);
1715 if (os_add3_overflow(addr
, len
, PAGE_MASK
, &endAddr
)) {
1718 if (os_add_overflow(pages
, (atop_64(endAddr
) - atop_64(addr
)), &pages
)) {
1721 if (os_add_overflow(totalLength
, len
, &totalLength
)) {
1724 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
)) {
1725 ppnum_t highPage
= atop_64(addr
+ len
- 1);
1726 if (highPage
> _highestPage
) {
1727 _highestPage
= highPage
;
1732 || (totalLength
!= ((IOByteCount
) totalLength
))) {
1733 return false; /* overflow */
1735 _length
= totalLength
;
1738 // Auto-prepare memory at creation time.
1739 // Implied completion when descriptor is free-ed
1742 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
)) {
1743 _wireCount
++; // Physical MDs are, by definition, wired
1744 } else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
1748 if (_pages
> atop_64(max_mem
)) {
1752 dataSize
= computeDataSize(_pages
, /* upls */ count
* 2);
1753 if (!initMemoryEntries(dataSize
, mapper
)) {
1756 dataP
= getDataP(_memoryEntries
);
1757 dataP
->fPageCnt
= _pages
;
1759 if (((_task
!= kernel_task
) || (kIOMemoryBufferPageable
& _flags
))
1760 && (VM_KERN_MEMORY_NONE
== _kernelTag
)) {
1761 _kernelTag
= IOMemoryTag(kernel_map
);
1762 if (_kernelTag
== gIOSurfaceTag
) {
1763 _userTag
= VM_MEMORY_IOSURFACE
;
1767 if ((kIOMemoryPersistent
& _flags
) && !_memRef
) {
1769 err
= memoryReferenceCreate(0, &_memRef
);
1770 if (kIOReturnSuccess
!= err
) {
1775 if ((_flags
& kIOMemoryAutoPrepare
)
1776 && prepare() != kIOReturnSuccess
) {
1791 IOGeneralMemoryDescriptor::free()
1793 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
1797 reserved
->dp
.memory
= NULL
;
1800 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
)) {
1802 if (_memoryEntries
&& (dataP
= getDataP(_memoryEntries
)) && dataP
->fMappedBaseValid
) {
1803 dmaUnmap(dataP
->fMapper
, NULL
, 0, dataP
->fMappedBase
, dataP
->fMappedLength
);
1804 dataP
->fMappedBaseValid
= dataP
->fMappedBase
= 0;
1807 while (_wireCount
) {
1812 if (_memoryEntries
) {
1813 _memoryEntries
->release();
1816 if (_ranges
.v
&& !(kIOMemoryAsReference
& _flags
)) {
1817 if (kIOMemoryTypeUIO
== type
) {
1818 uio_free((uio_t
) _ranges
.v
);
1821 else if ((kIOMemoryTypeVirtual64
== type
) || (kIOMemoryTypePhysical64
== type
)) {
1822 IODelete(_ranges
.v64
, IOAddressRange
, _rangesCount
);
1824 #endif /* !__LP64__ */
1826 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
1833 cleanKernelReserved(reserved
);
1834 if (reserved
->dp
.devicePager
) {
1835 // memEntry holds a ref on the device pager which owns reserved
1836 // (IOMemoryDescriptorReserved) so no reserved access after this point
1837 device_pager_deallocate((memory_object_t
) reserved
->dp
.devicePager
);
1839 IODelete(reserved
, IOMemoryDescriptorReserved
, 1);
1845 memoryReferenceRelease(_memRef
);
1848 IOLockFree(_prepareLock
);
1856 IOGeneralMemoryDescriptor::unmapFromKernel()
1858 panic("IOGMD::unmapFromKernel deprecated");
1862 IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex
)
1864 panic("IOGMD::mapIntoKernel deprecated");
1866 #endif /* !__LP64__ */
1871 * Get the direction of the transfer.
1874 IOMemoryDescriptor::getDirection() const
1880 #endif /* !__LP64__ */
1881 return (IODirection
) (_flags
& kIOMemoryDirectionMask
);
1887 * Get the length of the transfer (over all ranges).
1890 IOMemoryDescriptor::getLength() const
1896 IOMemoryDescriptor::setTag( IOOptionBits tag
)
1902 IOMemoryDescriptor::getTag( void )
1908 IOMemoryDescriptor::getFlags(void)
1914 #pragma clang diagnostic push
1915 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
1917 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
1919 IOMemoryDescriptor::getSourceSegment( IOByteCount offset
, IOByteCount
* length
)
1921 addr64_t physAddr
= 0;
1923 if (prepare() == kIOReturnSuccess
) {
1924 physAddr
= getPhysicalSegment64( offset
, length
);
1928 return (IOPhysicalAddress
) physAddr
; // truncated but only page offset is used
1931 #pragma clang diagnostic pop
1933 #endif /* !__LP64__ */
1936 IOMemoryDescriptor::readBytes
1937 (IOByteCount offset
, void *bytes
, IOByteCount length
)
1939 addr64_t dstAddr
= CAST_DOWN(addr64_t
, bytes
);
1940 IOByteCount remaining
;
1942 // Assert that this entire I/O is withing the available range
1943 assert(offset
<= _length
);
1944 assert(offset
+ length
<= _length
);
1945 if ((offset
>= _length
)
1946 || ((offset
+ length
) > _length
)) {
1950 assert(!(kIOMemoryRemote
& _flags
));
1951 if (kIOMemoryRemote
& _flags
) {
1955 if (kIOMemoryThreadSafe
& _flags
) {
1959 remaining
= length
= min(length
, _length
- offset
);
1960 while (remaining
) { // (process another target segment?)
1964 srcAddr64
= getPhysicalSegment(offset
, &srcLen
, kIOMemoryMapperNone
);
1969 // Clip segment length to remaining
1970 if (srcLen
> remaining
) {
1974 copypv(srcAddr64
, dstAddr
, srcLen
,
1975 cppvPsrc
| cppvNoRefSrc
| cppvFsnk
| cppvKmap
);
1979 remaining
-= srcLen
;
1982 if (kIOMemoryThreadSafe
& _flags
) {
1988 return length
- remaining
;
1992 IOMemoryDescriptor::writeBytes
1993 (IOByteCount inoffset
, const void *bytes
, IOByteCount length
)
1995 addr64_t srcAddr
= CAST_DOWN(addr64_t
, bytes
);
1996 IOByteCount remaining
;
1997 IOByteCount offset
= inoffset
;
1999 // Assert that this entire I/O is withing the available range
2000 assert(offset
<= _length
);
2001 assert(offset
+ length
<= _length
);
2003 assert( !(kIOMemoryPreparedReadOnly
& _flags
));
2005 if ((kIOMemoryPreparedReadOnly
& _flags
)
2006 || (offset
>= _length
)
2007 || ((offset
+ length
) > _length
)) {
2011 assert(!(kIOMemoryRemote
& _flags
));
2012 if (kIOMemoryRemote
& _flags
) {
2016 if (kIOMemoryThreadSafe
& _flags
) {
2020 remaining
= length
= min(length
, _length
- offset
);
2021 while (remaining
) { // (process another target segment?)
2025 dstAddr64
= getPhysicalSegment(offset
, &dstLen
, kIOMemoryMapperNone
);
2030 // Clip segment length to remaining
2031 if (dstLen
> remaining
) {
2036 bzero_phys(dstAddr64
, dstLen
);
2038 copypv(srcAddr
, (addr64_t
) dstAddr64
, dstLen
,
2039 cppvPsnk
| cppvFsnk
| cppvNoRefSrc
| cppvNoModSnk
| cppvKmap
);
2043 remaining
-= dstLen
;
2046 if (kIOMemoryThreadSafe
& _flags
) {
2052 #if defined(__x86_64__)
2053 // copypv does not cppvFsnk on intel
2056 performOperation(kIOMemoryIncoherentIOFlush
, inoffset
, length
);
2060 return length
- remaining
;
2065 IOGeneralMemoryDescriptor::setPosition(IOByteCount position
)
2067 panic("IOGMD::setPosition deprecated");
2069 #endif /* !__LP64__ */
2071 static volatile SInt64 gIOMDPreparationID
__attribute__((aligned(8))) = (1ULL << 32);
2074 IOGeneralMemoryDescriptor::getPreparationID( void )
2079 return kIOPreparationIDUnprepared
;
2082 if (((kIOMemoryTypeMask
& _flags
) == kIOMemoryTypePhysical
)
2083 || ((kIOMemoryTypeMask
& _flags
) == kIOMemoryTypePhysical64
)) {
2084 IOMemoryDescriptor::setPreparationID();
2085 return IOMemoryDescriptor::getPreparationID();
2088 if (!_memoryEntries
|| !(dataP
= getDataP(_memoryEntries
))) {
2089 return kIOPreparationIDUnprepared
;
2092 if (kIOPreparationIDUnprepared
== dataP
->fPreparationID
) {
2093 SInt64 newID
= OSIncrementAtomic64(&gIOMDPreparationID
);
2094 OSCompareAndSwap64(kIOPreparationIDUnprepared
, newID
, &dataP
->fPreparationID
);
2096 return dataP
->fPreparationID
;
2100 IOMemoryDescriptor::cleanKernelReserved( IOMemoryDescriptorReserved
* reserved
)
2102 if (reserved
->creator
) {
2103 task_deallocate(reserved
->creator
);
2104 reserved
->creator
= NULL
;
2108 IOMemoryDescriptorReserved
*
2109 IOMemoryDescriptor::getKernelReserved( void )
2112 reserved
= IONewZero(IOMemoryDescriptorReserved
, 1);
2118 IOMemoryDescriptor::setPreparationID( void )
2120 if (getKernelReserved() && (kIOPreparationIDUnprepared
== reserved
->preparationID
)) {
2121 SInt64 newID
= OSIncrementAtomic64(&gIOMDPreparationID
);
2122 OSCompareAndSwap64(kIOPreparationIDUnprepared
, newID
, &reserved
->preparationID
);
2127 IOMemoryDescriptor::getPreparationID( void )
2130 return reserved
->preparationID
;
2132 return kIOPreparationIDUnsupported
;
2137 IOMemoryDescriptor::setVMTags(uint32_t kernelTag
, uint32_t userTag
)
2139 _kernelTag
= (vm_tag_t
) kernelTag
;
2140 _userTag
= (vm_tag_t
) userTag
;
2144 IOMemoryDescriptor::getVMTag(vm_map_t map
)
2146 if (vm_kernel_map_is_kernel(map
)) {
2147 if (VM_KERN_MEMORY_NONE
!= _kernelTag
) {
2148 return (uint32_t) _kernelTag
;
2151 if (VM_KERN_MEMORY_NONE
!= _userTag
) {
2152 return (uint32_t) _userTag
;
2155 return IOMemoryTag(map
);
2159 IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op
, void *vData
, UInt dataSize
) const
2161 IOReturn err
= kIOReturnSuccess
;
2162 DMACommandOps params
;
2163 IOGeneralMemoryDescriptor
* md
= const_cast<IOGeneralMemoryDescriptor
*>(this);
2166 params
= (op
& ~kIOMDDMACommandOperationMask
& op
);
2167 op
&= kIOMDDMACommandOperationMask
;
2169 if (kIOMDDMAMap
== op
) {
2170 if (dataSize
< sizeof(IOMDDMAMapArgs
)) {
2171 return kIOReturnUnderrun
;
2174 IOMDDMAMapArgs
* data
= (IOMDDMAMapArgs
*) vData
;
2177 && !md
->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem
)) {
2178 return kIOReturnNoMemory
;
2181 if (_memoryEntries
&& data
->fMapper
) {
2182 bool remap
, keepMap
;
2183 dataP
= getDataP(_memoryEntries
);
2185 if (data
->fMapSpec
.numAddressBits
< dataP
->fDMAMapNumAddressBits
) {
2186 dataP
->fDMAMapNumAddressBits
= data
->fMapSpec
.numAddressBits
;
2188 if (data
->fMapSpec
.alignment
> dataP
->fDMAMapAlignment
) {
2189 dataP
->fDMAMapAlignment
= data
->fMapSpec
.alignment
;
2192 keepMap
= (data
->fMapper
== gIOSystemMapper
);
2193 keepMap
&= ((data
->fOffset
== 0) && (data
->fLength
== _length
));
2195 if ((data
->fMapper
== gIOSystemMapper
) && _prepareLock
) {
2196 IOLockLock(_prepareLock
);
2200 remap
|= (dataP
->fDMAMapNumAddressBits
< 64)
2201 && ((dataP
->fMappedBase
+ _length
) > (1ULL << dataP
->fDMAMapNumAddressBits
));
2202 remap
|= (dataP
->fDMAMapAlignment
> page_size
);
2204 if (remap
|| !dataP
->fMappedBaseValid
) {
2205 // if (dataP->fMappedBaseValid) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params);
2206 err
= md
->dmaMap(data
->fMapper
, data
->fCommand
, &data
->fMapSpec
, data
->fOffset
, data
->fLength
, &data
->fAlloc
, &data
->fAllocLength
);
2207 if (keepMap
&& (kIOReturnSuccess
== err
) && !dataP
->fMappedBaseValid
) {
2208 dataP
->fMappedBase
= data
->fAlloc
;
2209 dataP
->fMappedBaseValid
= true;
2210 dataP
->fMappedLength
= data
->fAllocLength
;
2211 data
->fAllocLength
= 0; // IOMD owns the alloc now
2214 data
->fAlloc
= dataP
->fMappedBase
;
2215 data
->fAllocLength
= 0; // give out IOMD map
2216 md
->dmaMapRecord(data
->fMapper
, data
->fCommand
, dataP
->fMappedLength
);
2218 data
->fMapContig
= !dataP
->fDiscontig
;
2220 if ((data
->fMapper
== gIOSystemMapper
) && _prepareLock
) {
2221 IOLockUnlock(_prepareLock
);
2226 if (kIOMDDMAUnmap
== op
) {
2227 if (dataSize
< sizeof(IOMDDMAMapArgs
)) {
2228 return kIOReturnUnderrun
;
2230 IOMDDMAMapArgs
* data
= (IOMDDMAMapArgs
*) vData
;
2232 err
= md
->dmaUnmap(data
->fMapper
, data
->fCommand
, data
->fOffset
, data
->fAlloc
, data
->fAllocLength
);
2234 return kIOReturnSuccess
;
2237 if (kIOMDAddDMAMapSpec
== op
) {
2238 if (dataSize
< sizeof(IODMAMapSpecification
)) {
2239 return kIOReturnUnderrun
;
2242 IODMAMapSpecification
* data
= (IODMAMapSpecification
*) vData
;
2245 && !md
->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem
)) {
2246 return kIOReturnNoMemory
;
2249 if (_memoryEntries
) {
2250 dataP
= getDataP(_memoryEntries
);
2251 if (data
->numAddressBits
< dataP
->fDMAMapNumAddressBits
) {
2252 dataP
->fDMAMapNumAddressBits
= data
->numAddressBits
;
2254 if (data
->alignment
> dataP
->fDMAMapAlignment
) {
2255 dataP
->fDMAMapAlignment
= data
->alignment
;
2258 return kIOReturnSuccess
;
2261 if (kIOMDGetCharacteristics
== op
) {
2262 if (dataSize
< sizeof(IOMDDMACharacteristics
)) {
2263 return kIOReturnUnderrun
;
2266 IOMDDMACharacteristics
*data
= (IOMDDMACharacteristics
*) vData
;
2267 data
->fLength
= _length
;
2268 data
->fSGCount
= _rangesCount
;
2269 data
->fPages
= _pages
;
2270 data
->fDirection
= getDirection();
2272 data
->fIsPrepared
= false;
2274 data
->fIsPrepared
= true;
2275 data
->fHighestPage
= _highestPage
;
2276 if (_memoryEntries
) {
2277 dataP
= getDataP(_memoryEntries
);
2278 ioPLBlock
*ioplList
= getIOPLList(dataP
);
2279 UInt count
= getNumIOPL(_memoryEntries
, dataP
);
2281 data
->fPageAlign
= (ioplList
[0].fPageOffset
& PAGE_MASK
) | ~PAGE_MASK
;
2286 return kIOReturnSuccess
;
2287 } else if (kIOMDDMAActive
== op
) {
2290 prior
= OSAddAtomic16(1, &md
->_dmaReferences
);
2292 md
->_mapName
= NULL
;
2295 if (md
->_dmaReferences
) {
2296 OSAddAtomic16(-1, &md
->_dmaReferences
);
2298 panic("_dmaReferences underflow");
2301 } else if (kIOMDWalkSegments
!= op
) {
2302 return kIOReturnBadArgument
;
2305 // Get the next segment
2306 struct InternalState
{
2307 IOMDDMAWalkSegmentArgs fIO
;
2308 mach_vm_size_t fOffset2Index
;
2309 mach_vm_size_t fNextOffset
;
2313 // Find the next segment
2314 if (dataSize
< sizeof(*isP
)) {
2315 return kIOReturnUnderrun
;
2318 isP
= (InternalState
*) vData
;
2319 mach_vm_size_t offset
= isP
->fIO
.fOffset
;
2320 uint8_t mapped
= isP
->fIO
.fMapped
;
2321 uint64_t mappedBase
;
2323 if (mapped
&& (kIOMemoryRemote
& _flags
)) {
2324 return kIOReturnNotAttached
;
2327 if (IOMapper::gSystem
&& mapped
2328 && (!(kIOMemoryHostOnly
& _flags
))
2329 && (!_memoryEntries
|| !getDataP(_memoryEntries
)->fMappedBaseValid
)) {
2330 // && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBaseValid))
2332 && !md
->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem
)) {
2333 return kIOReturnNoMemory
;
2336 dataP
= getDataP(_memoryEntries
);
2337 if (dataP
->fMapper
) {
2338 IODMAMapSpecification mapSpec
;
2339 bzero(&mapSpec
, sizeof(mapSpec
));
2340 mapSpec
.numAddressBits
= dataP
->fDMAMapNumAddressBits
;
2341 mapSpec
.alignment
= dataP
->fDMAMapAlignment
;
2342 err
= md
->dmaMap(dataP
->fMapper
, NULL
, &mapSpec
, 0, _length
, &dataP
->fMappedBase
, &dataP
->fMappedLength
);
2343 if (kIOReturnSuccess
!= err
) {
2346 dataP
->fMappedBaseValid
= true;
2350 if (kIOMDDMAWalkMappedLocal
== mapped
) {
2351 mappedBase
= isP
->fIO
.fMappedBase
;
2352 } else if (mapped
) {
2353 if (IOMapper::gSystem
2354 && (!(kIOMemoryHostOnly
& _flags
))
2356 && (dataP
= getDataP(_memoryEntries
))
2357 && dataP
->fMappedBaseValid
) {
2358 mappedBase
= dataP
->fMappedBase
;
2364 if (offset
>= _length
) {
2365 return (offset
== _length
)? kIOReturnOverrun
: kIOReturnInternalError
;
2368 // Validate the previous offset
2370 mach_vm_size_t off2Ind
= isP
->fOffset2Index
;
2373 && (offset
== isP
->fNextOffset
|| off2Ind
<= offset
)) {
2376 ind
= off2Ind
= 0; // Start from beginning
2378 mach_vm_size_t length
;
2381 if ((_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
) {
2382 // Physical address based memory descriptor
2383 const IOPhysicalRange
*physP
= (IOPhysicalRange
*) &_ranges
.p
[0];
2385 // Find the range after the one that contains the offset
2387 for (len
= 0; off2Ind
<= offset
; ind
++) {
2388 len
= physP
[ind
].length
;
2392 // Calculate length within range and starting address
2393 length
= off2Ind
- offset
;
2394 address
= physP
[ind
- 1].address
+ len
- length
;
2396 if (true && mapped
) {
2397 address
= mappedBase
+ offset
;
2399 // see how far we can coalesce ranges
2400 while (ind
< _rangesCount
&& address
+ length
== physP
[ind
].address
) {
2401 len
= physP
[ind
].length
;
2408 // correct contiguous check overshoot
2413 else if ((_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
) {
2414 // Physical address based memory descriptor
2415 const IOAddressRange
*physP
= (IOAddressRange
*) &_ranges
.v64
[0];
2417 // Find the range after the one that contains the offset
2419 for (len
= 0; off2Ind
<= offset
; ind
++) {
2420 len
= physP
[ind
].length
;
2424 // Calculate length within range and starting address
2425 length
= off2Ind
- offset
;
2426 address
= physP
[ind
- 1].address
+ len
- length
;
2428 if (true && mapped
) {
2429 address
= mappedBase
+ offset
;
2431 // see how far we can coalesce ranges
2432 while (ind
< _rangesCount
&& address
+ length
== physP
[ind
].address
) {
2433 len
= physP
[ind
].length
;
2439 // correct contiguous check overshoot
2443 #endif /* !__LP64__ */
2447 panic("IOGMD: not wired for the IODMACommand");
2450 assert(_memoryEntries
);
2452 dataP
= getDataP(_memoryEntries
);
2453 const ioPLBlock
*ioplList
= getIOPLList(dataP
);
2454 UInt numIOPLs
= getNumIOPL(_memoryEntries
, dataP
);
2455 upl_page_info_t
*pageList
= getPageList(dataP
);
2457 assert(numIOPLs
> 0);
2459 // Scan through iopl info blocks looking for block containing offset
2460 while (ind
< numIOPLs
&& offset
>= ioplList
[ind
].fIOMDOffset
) {
2464 // Go back to actual range as search goes past it
2465 ioPLBlock ioplInfo
= ioplList
[ind
- 1];
2466 off2Ind
= ioplInfo
.fIOMDOffset
;
2468 if (ind
< numIOPLs
) {
2469 length
= ioplList
[ind
].fIOMDOffset
;
2473 length
-= offset
; // Remainder within iopl
2475 // Subtract offset till this iopl in total list
2478 // If a mapped address is requested and this is a pre-mapped IOPL
2479 // then just need to compute an offset relative to the mapped base.
2481 offset
+= (ioplInfo
.fPageOffset
& PAGE_MASK
);
2482 address
= trunc_page_64(mappedBase
) + ptoa_64(ioplInfo
.fMappedPage
) + offset
;
2483 continue; // Done leave do/while(false) now
2486 // The offset is rebased into the current iopl.
2487 // Now add the iopl 1st page offset.
2488 offset
+= ioplInfo
.fPageOffset
;
2490 // For external UPLs the fPageInfo field points directly to
2491 // the upl's upl_page_info_t array.
2492 if (ioplInfo
.fFlags
& kIOPLExternUPL
) {
2493 pageList
= (upl_page_info_t
*) ioplInfo
.fPageInfo
;
2495 pageList
= &pageList
[ioplInfo
.fPageInfo
];
2498 // Check for direct device non-paged memory
2499 if (ioplInfo
.fFlags
& kIOPLOnDevice
) {
2500 address
= ptoa_64(pageList
->phys_addr
) + offset
;
2501 continue; // Done leave do/while(false) now
2504 // Now we need compute the index into the pageList
2505 UInt pageInd
= atop_32(offset
);
2506 offset
&= PAGE_MASK
;
2508 // Compute the starting address of this segment
2509 IOPhysicalAddress pageAddr
= pageList
[pageInd
].phys_addr
;
2511 panic("!pageList phys_addr");
2514 address
= ptoa_64(pageAddr
) + offset
;
2516 // length is currently set to the length of the remainider of the iopl.
2517 // We need to check that the remainder of the iopl is contiguous.
2518 // This is indicated by pageList[ind].phys_addr being sequential.
2519 IOByteCount contigLength
= PAGE_SIZE
- offset
;
2520 while (contigLength
< length
2521 && ++pageAddr
== pageList
[++pageInd
].phys_addr
) {
2522 contigLength
+= PAGE_SIZE
;
2525 if (contigLength
< length
) {
2526 length
= contigLength
;
2535 // Update return values and state
2536 isP
->fIO
.fIOVMAddr
= address
;
2537 isP
->fIO
.fLength
= length
;
2539 isP
->fOffset2Index
= off2Ind
;
2540 isP
->fNextOffset
= isP
->fIO
.fOffset
+ length
;
2542 return kIOReturnSuccess
;
2546 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
, IOOptionBits options
)
2549 mach_vm_address_t address
= 0;
2550 mach_vm_size_t length
= 0;
2551 IOMapper
* mapper
= gIOSystemMapper
;
2552 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
2554 if (lengthOfSegment
) {
2555 *lengthOfSegment
= 0;
2558 if (offset
>= _length
) {
2562 // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
2563 // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
2564 // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
2565 // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
2567 if ((options
& _kIOMemorySourceSegment
) && (kIOMemoryTypeUPL
!= type
)) {
2568 unsigned rangesIndex
= 0;
2569 Ranges vec
= _ranges
;
2570 mach_vm_address_t addr
;
2572 // Find starting address within the vector of ranges
2574 getAddrLenForInd(addr
, length
, type
, vec
, rangesIndex
);
2575 if (offset
< length
) {
2578 offset
-= length
; // (make offset relative)
2582 // Now that we have the starting range,
2583 // lets find the last contiguous range
2587 for (++rangesIndex
; rangesIndex
< _rangesCount
; rangesIndex
++) {
2588 mach_vm_address_t newAddr
;
2589 mach_vm_size_t newLen
;
2591 getAddrLenForInd(newAddr
, newLen
, type
, vec
, rangesIndex
);
2592 if (addr
+ length
!= newAddr
) {
2598 address
= (IOPhysicalAddress
) addr
; // Truncate address to 32bit
2601 IOMDDMAWalkSegmentState _state
;
2602 IOMDDMAWalkSegmentArgs
* state
= (IOMDDMAWalkSegmentArgs
*) (void *)&_state
;
2604 state
->fOffset
= offset
;
2605 state
->fLength
= _length
- offset
;
2606 state
->fMapped
= (0 == (options
& kIOMemoryMapperNone
)) && !(_flags
& kIOMemoryHostOrRemote
);
2608 ret
= dmaCommandOperation(kIOMDFirstSegment
, _state
, sizeof(_state
));
2610 if ((kIOReturnSuccess
!= ret
) && (kIOReturnOverrun
!= ret
)) {
2611 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
2612 ret
, this, state
->fOffset
,
2613 state
->fIOVMAddr
, state
->fLength
);
2615 if (kIOReturnSuccess
== ret
) {
2616 address
= state
->fIOVMAddr
;
2617 length
= state
->fLength
;
2620 // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
2621 // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
2623 if (mapper
&& ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
))) {
2624 if ((options
& kIOMemoryMapperNone
) && !(_flags
& kIOMemoryMapperNone
)) {
2625 addr64_t origAddr
= address
;
2626 IOByteCount origLen
= length
;
2628 address
= mapper
->mapToPhysicalAddress(origAddr
);
2629 length
= page_size
- (address
& (page_size
- 1));
2630 while ((length
< origLen
)
2631 && ((address
+ length
) == mapper
->mapToPhysicalAddress(origAddr
+ length
))) {
2632 length
+= page_size
;
2634 if (length
> origLen
) {
2645 if (lengthOfSegment
) {
2646 *lengthOfSegment
= length
;
2653 #pragma clang diagnostic push
2654 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2657 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
, IOOptionBits options
)
2659 addr64_t address
= 0;
2661 if (options
& _kIOMemorySourceSegment
) {
2662 address
= getSourceSegment(offset
, lengthOfSegment
);
2663 } else if (options
& kIOMemoryMapperNone
) {
2664 address
= getPhysicalSegment64(offset
, lengthOfSegment
);
2666 address
= getPhysicalSegment(offset
, lengthOfSegment
);
2671 #pragma clang diagnostic pop
2674 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
2676 return getPhysicalSegment(offset
, lengthOfSegment
, kIOMemoryMapperNone
);
2680 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
2682 addr64_t address
= 0;
2683 IOByteCount length
= 0;
2685 address
= getPhysicalSegment(offset
, lengthOfSegment
, 0);
2687 if (lengthOfSegment
) {
2688 length
= *lengthOfSegment
;
2691 if ((address
+ length
) > 0x100000000ULL
) {
2692 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
2693 address
, (long) length
, (getMetaClass())->getClassName());
2696 return (IOPhysicalAddress
) address
;
2700 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
2702 IOPhysicalAddress phys32
;
2705 IOMapper
* mapper
= NULL
;
2707 phys32
= getPhysicalSegment(offset
, lengthOfSegment
);
2712 if (gIOSystemMapper
) {
2713 mapper
= gIOSystemMapper
;
2717 IOByteCount origLen
;
2719 phys64
= mapper
->mapToPhysicalAddress(phys32
);
2720 origLen
= *lengthOfSegment
;
2721 length
= page_size
- (phys64
& (page_size
- 1));
2722 while ((length
< origLen
)
2723 && ((phys64
+ length
) == mapper
->mapToPhysicalAddress(phys32
+ length
))) {
2724 length
+= page_size
;
2726 if (length
> origLen
) {
2730 *lengthOfSegment
= length
;
2732 phys64
= (addr64_t
) phys32
;
2739 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
2741 return (IOPhysicalAddress
) getPhysicalSegment(offset
, lengthOfSegment
, 0);
2745 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
2747 return (IOPhysicalAddress
) getPhysicalSegment(offset
, lengthOfSegment
, _kIOMemorySourceSegment
);
2750 #pragma clang diagnostic push
2751 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2754 IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
2755 IOByteCount
* lengthOfSegment
)
2757 if (_task
== kernel_task
) {
2758 return (void *) getSourceSegment(offset
, lengthOfSegment
);
2760 panic("IOGMD::getVirtualSegment deprecated");
2765 #pragma clang diagnostic pop
2766 #endif /* !__LP64__ */
2769 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op
, void *vData
, UInt dataSize
) const
2771 IOMemoryDescriptor
*md
= const_cast<IOMemoryDescriptor
*>(this);
2772 DMACommandOps params
;
2775 params
= (op
& ~kIOMDDMACommandOperationMask
& op
);
2776 op
&= kIOMDDMACommandOperationMask
;
2778 if (kIOMDGetCharacteristics
== op
) {
2779 if (dataSize
< sizeof(IOMDDMACharacteristics
)) {
2780 return kIOReturnUnderrun
;
2783 IOMDDMACharacteristics
*data
= (IOMDDMACharacteristics
*) vData
;
2784 data
->fLength
= getLength();
2786 data
->fDirection
= getDirection();
2787 data
->fIsPrepared
= true; // Assume prepared - fails safe
2788 } else if (kIOMDWalkSegments
== op
) {
2789 if (dataSize
< sizeof(IOMDDMAWalkSegmentArgs
)) {
2790 return kIOReturnUnderrun
;
2793 IOMDDMAWalkSegmentArgs
*data
= (IOMDDMAWalkSegmentArgs
*) vData
;
2794 IOByteCount offset
= (IOByteCount
) data
->fOffset
;
2796 IOPhysicalLength length
;
2797 if (data
->fMapped
&& IOMapper::gSystem
) {
2798 data
->fIOVMAddr
= md
->getPhysicalSegment(offset
, &length
);
2800 data
->fIOVMAddr
= md
->getPhysicalSegment(offset
, &length
, kIOMemoryMapperNone
);
2802 data
->fLength
= length
;
2803 } else if (kIOMDAddDMAMapSpec
== op
) {
2804 return kIOReturnUnsupported
;
2805 } else if (kIOMDDMAMap
== op
) {
2806 if (dataSize
< sizeof(IOMDDMAMapArgs
)) {
2807 return kIOReturnUnderrun
;
2809 IOMDDMAMapArgs
* data
= (IOMDDMAMapArgs
*) vData
;
2812 panic("class %s does not support IODMACommand::kIterateOnly", getMetaClass()->getClassName());
2815 data
->fMapContig
= true;
2816 err
= md
->dmaMap(data
->fMapper
, data
->fCommand
, &data
->fMapSpec
, data
->fOffset
, data
->fLength
, &data
->fAlloc
, &data
->fAllocLength
);
2819 } else if (kIOMDDMAUnmap
== op
) {
2820 if (dataSize
< sizeof(IOMDDMAMapArgs
)) {
2821 return kIOReturnUnderrun
;
2823 IOMDDMAMapArgs
* data
= (IOMDDMAMapArgs
*) vData
;
2825 err
= md
->dmaUnmap(data
->fMapper
, data
->fCommand
, data
->fOffset
, data
->fAlloc
, data
->fAllocLength
);
2827 return kIOReturnSuccess
;
2829 return kIOReturnBadArgument
;
2832 return kIOReturnSuccess
;
2836 IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState
,
2837 IOOptionBits
* oldState
)
2839 IOReturn err
= kIOReturnSuccess
;
2841 vm_purgable_t control
;
2844 assert(!(kIOMemoryRemote
& _flags
));
2845 if (kIOMemoryRemote
& _flags
) {
2846 return kIOReturnNotAttached
;
2850 err
= super::setPurgeable(newState
, oldState
);
2852 if (kIOMemoryThreadSafe
& _flags
) {
2856 // Find the appropriate vm_map for the given task
2858 if (_task
== kernel_task
&& (kIOMemoryBufferPageable
& _flags
)) {
2859 err
= kIOReturnNotReady
;
2861 } else if (!_task
) {
2862 err
= kIOReturnUnsupported
;
2865 curMap
= get_task_map(_task
);
2866 if (NULL
== curMap
) {
2867 err
= KERN_INVALID_ARGUMENT
;
2872 // can only do one range
2873 Ranges vec
= _ranges
;
2874 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
2875 mach_vm_address_t addr
;
2877 getAddrLenForInd(addr
, len
, type
, vec
, 0);
2879 err
= purgeableControlBits(newState
, &control
, &state
);
2880 if (kIOReturnSuccess
!= err
) {
2883 err
= vm_map_purgable_control(curMap
, addr
, control
, &state
);
2885 if (kIOReturnSuccess
== err
) {
2886 err
= purgeableStateBits(&state
);
2891 if (kIOMemoryThreadSafe
& _flags
) {
2900 IOMemoryDescriptor::setPurgeable( IOOptionBits newState
,
2901 IOOptionBits
* oldState
)
2903 IOReturn err
= kIOReturnNotReady
;
2905 if (kIOMemoryThreadSafe
& _flags
) {
2909 err
= IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef
, newState
, oldState
);
2911 if (kIOMemoryThreadSafe
& _flags
) {
2919 IOGeneralMemoryDescriptor::setOwnership( task_t newOwner
,
2921 IOOptionBits newLedgerOptions
)
2923 IOReturn err
= kIOReturnSuccess
;
2925 assert(!(kIOMemoryRemote
& _flags
));
2926 if (kIOMemoryRemote
& _flags
) {
2927 return kIOReturnNotAttached
;
2930 if (iokit_iomd_setownership_enabled
== FALSE
) {
2931 return kIOReturnUnsupported
;
2935 err
= super::setOwnership(newOwner
, newLedgerTag
, newLedgerOptions
);
2937 err
= kIOReturnUnsupported
;
2944 IOMemoryDescriptor::setOwnership( task_t newOwner
,
2946 IOOptionBits newLedgerOptions
)
2948 IOReturn err
= kIOReturnNotReady
;
2950 assert(!(kIOMemoryRemote
& _flags
));
2951 if (kIOMemoryRemote
& _flags
) {
2952 return kIOReturnNotAttached
;
2955 if (iokit_iomd_setownership_enabled
== FALSE
) {
2956 return kIOReturnUnsupported
;
2959 if (kIOMemoryThreadSafe
& _flags
) {
2963 err
= IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(_memRef
, newOwner
, newLedgerTag
, newLedgerOptions
);
2965 IOMultiMemoryDescriptor
* mmd
;
2966 IOSubMemoryDescriptor
* smd
;
2967 if ((smd
= OSDynamicCast(IOSubMemoryDescriptor
, this))) {
2968 err
= smd
->setOwnership(newOwner
, newLedgerTag
, newLedgerOptions
);
2969 } else if ((mmd
= OSDynamicCast(IOMultiMemoryDescriptor
, this))) {
2970 err
= mmd
->setOwnership(newOwner
, newLedgerTag
, newLedgerOptions
);
2973 if (kIOMemoryThreadSafe
& _flags
) {
2981 IOMemoryDescriptor::getPageCounts( IOByteCount
* residentPageCount
,
2982 IOByteCount
* dirtyPageCount
)
2984 IOReturn err
= kIOReturnNotReady
;
2986 assert(!(kIOMemoryRemote
& _flags
));
2987 if (kIOMemoryRemote
& _flags
) {
2988 return kIOReturnNotAttached
;
2991 if (kIOMemoryThreadSafe
& _flags
) {
2995 err
= IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef
, residentPageCount
, dirtyPageCount
);
2997 IOMultiMemoryDescriptor
* mmd
;
2998 IOSubMemoryDescriptor
* smd
;
2999 if ((smd
= OSDynamicCast(IOSubMemoryDescriptor
, this))) {
3000 err
= smd
->getPageCounts(residentPageCount
, dirtyPageCount
);
3001 } else if ((mmd
= OSDynamicCast(IOMultiMemoryDescriptor
, this))) {
3002 err
= mmd
->getPageCounts(residentPageCount
, dirtyPageCount
);
3005 if (kIOMemoryThreadSafe
& _flags
) {
3013 #if defined(__arm__) || defined(__arm64__)
3014 extern "C" void dcache_incoherent_io_flush64(addr64_t pa
, unsigned int count
, unsigned int remaining
, unsigned int *res
);
3015 extern "C" void dcache_incoherent_io_store64(addr64_t pa
, unsigned int count
, unsigned int remaining
, unsigned int *res
);
3016 #else /* defined(__arm__) || defined(__arm64__) */
3017 extern "C" void dcache_incoherent_io_flush64(addr64_t pa
, unsigned int count
);
3018 extern "C" void dcache_incoherent_io_store64(addr64_t pa
, unsigned int count
);
3019 #endif /* defined(__arm__) || defined(__arm64__) */
3022 SetEncryptOp(addr64_t pa
, unsigned int count
)
3026 page
= atop_64(round_page_64(pa
));
3027 end
= atop_64(trunc_page_64(pa
+ count
));
3028 for (; page
< end
; page
++) {
3029 pmap_clear_noencrypt(page
);
3034 ClearEncryptOp(addr64_t pa
, unsigned int count
)
3038 page
= atop_64(round_page_64(pa
));
3039 end
= atop_64(trunc_page_64(pa
+ count
));
3040 for (; page
< end
; page
++) {
3041 pmap_set_noencrypt(page
);
3046 IOMemoryDescriptor::performOperation( IOOptionBits options
,
3047 IOByteCount offset
, IOByteCount length
)
3049 IOByteCount remaining
;
3051 void (*func
)(addr64_t pa
, unsigned int count
) = NULL
;
3052 #if defined(__arm__) || defined(__arm64__)
3053 void (*func_ext
)(addr64_t pa
, unsigned int count
, unsigned int remaining
, unsigned int *result
) = NULL
;
3056 assert(!(kIOMemoryRemote
& _flags
));
3057 if (kIOMemoryRemote
& _flags
) {
3058 return kIOReturnNotAttached
;
3062 case kIOMemoryIncoherentIOFlush
:
3063 #if defined(__arm__) || defined(__arm64__)
3064 func_ext
= &dcache_incoherent_io_flush64
;
3065 #if __ARM_COHERENT_IO__
3066 func_ext(0, 0, 0, &res
);
3067 return kIOReturnSuccess
;
3068 #else /* __ARM_COHERENT_IO__ */
3070 #endif /* __ARM_COHERENT_IO__ */
3071 #else /* defined(__arm__) || defined(__arm64__) */
3072 func
= &dcache_incoherent_io_flush64
;
3074 #endif /* defined(__arm__) || defined(__arm64__) */
3075 case kIOMemoryIncoherentIOStore
:
3076 #if defined(__arm__) || defined(__arm64__)
3077 func_ext
= &dcache_incoherent_io_store64
;
3078 #if __ARM_COHERENT_IO__
3079 func_ext(0, 0, 0, &res
);
3080 return kIOReturnSuccess
;
3081 #else /* __ARM_COHERENT_IO__ */
3083 #endif /* __ARM_COHERENT_IO__ */
3084 #else /* defined(__arm__) || defined(__arm64__) */
3085 func
= &dcache_incoherent_io_store64
;
3087 #endif /* defined(__arm__) || defined(__arm64__) */
3089 case kIOMemorySetEncrypted
:
3090 func
= &SetEncryptOp
;
3092 case kIOMemoryClearEncrypted
:
3093 func
= &ClearEncryptOp
;
3097 #if defined(__arm__) || defined(__arm64__)
3098 if ((func
== NULL
) && (func_ext
== NULL
)) {
3099 return kIOReturnUnsupported
;
3101 #else /* defined(__arm__) || defined(__arm64__) */
3103 return kIOReturnUnsupported
;
3105 #endif /* defined(__arm__) || defined(__arm64__) */
3107 if (kIOMemoryThreadSafe
& _flags
) {
3112 remaining
= length
= min(length
, getLength() - offset
);
3114 // (process another target segment?)
3118 dstAddr64
= getPhysicalSegment(offset
, &dstLen
, kIOMemoryMapperNone
);
3123 // Clip segment length to remaining
3124 if (dstLen
> remaining
) {
3128 #if defined(__arm__) || defined(__arm64__)
3130 (*func
)(dstAddr64
, dstLen
);
3133 (*func_ext
)(dstAddr64
, dstLen
, remaining
, &res
);
3139 #else /* defined(__arm__) || defined(__arm64__) */
3140 (*func
)(dstAddr64
, dstLen
);
3141 #endif /* defined(__arm__) || defined(__arm64__) */
3144 remaining
-= dstLen
;
3147 if (kIOMemoryThreadSafe
& _flags
) {
3151 return remaining
? kIOReturnUnderrun
: kIOReturnSuccess
;
3158 #if defined(__i386__) || defined(__x86_64__)
3160 #define io_kernel_static_start vm_kernel_stext
3161 #define io_kernel_static_end vm_kernel_etext
3163 #elif defined(__arm__) || defined(__arm64__)
3165 extern vm_offset_t static_memory_end
;
3167 #if defined(__arm64__)
3168 #define io_kernel_static_start vm_kext_base
3169 #else /* defined(__arm64__) */
3170 #define io_kernel_static_start vm_kernel_stext
3171 #endif /* defined(__arm64__) */
3173 #define io_kernel_static_end static_memory_end
3176 #error io_kernel_static_end is undefined for this architecture
3179 static kern_return_t
3180 io_get_kernel_static_upl(
3183 upl_size_t
*upl_size
,
3185 upl_page_info_array_t page_list
,
3186 unsigned int *count
,
3187 ppnum_t
*highest_page
)
3189 unsigned int pageCount
, page
;
3191 ppnum_t highestPage
= 0;
3193 pageCount
= atop_32(*upl_size
);
3194 if (pageCount
> *count
) {
3200 for (page
= 0; page
< pageCount
; page
++) {
3201 phys
= pmap_find_phys(kernel_pmap
, ((addr64_t
)offset
) + ptoa_64(page
));
3205 page_list
[page
].phys_addr
= phys
;
3206 page_list
[page
].free_when_done
= 0;
3207 page_list
[page
].absent
= 0;
3208 page_list
[page
].dirty
= 0;
3209 page_list
[page
].precious
= 0;
3210 page_list
[page
].device
= 0;
3211 if (phys
> highestPage
) {
3216 *highest_page
= highestPage
;
3218 return (page
>= pageCount
) ? kIOReturnSuccess
: kIOReturnVMError
;
3222 IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection
)
3224 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
3225 IOReturn error
= kIOReturnSuccess
;
3227 upl_page_info_array_t pageInfo
;
3229 vm_tag_t tag
= VM_KERN_MEMORY_NONE
;
3231 assert(kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
|| kIOMemoryTypeUIO
== type
);
3233 if ((kIODirectionOutIn
& forDirection
) == kIODirectionNone
) {
3234 forDirection
= (IODirection
) (forDirection
| getDirection());
3237 dataP
= getDataP(_memoryEntries
);
3238 upl_control_flags_t uplFlags
; // This Mem Desc's default flags for upl creation
3239 switch (kIODirectionOutIn
& forDirection
) {
3240 case kIODirectionOut
:
3241 // Pages do not need to be marked as dirty on commit
3242 uplFlags
= UPL_COPYOUT_FROM
;
3243 dataP
->fDMAAccess
= kIODMAMapReadAccess
;
3246 case kIODirectionIn
:
3247 dataP
->fDMAAccess
= kIODMAMapWriteAccess
;
3248 uplFlags
= 0; // i.e. ~UPL_COPYOUT_FROM
3252 dataP
->fDMAAccess
= kIODMAMapReadAccess
| kIODMAMapWriteAccess
;
3253 uplFlags
= 0; // i.e. ~UPL_COPYOUT_FROM
3258 if ((kIOMemoryPreparedReadOnly
& _flags
) && !(UPL_COPYOUT_FROM
& uplFlags
)) {
3259 OSReportWithBacktrace("IOMemoryDescriptor 0x%lx prepared read only", VM_KERNEL_ADDRPERM(this));
3260 error
= kIOReturnNotWritable
;
3265 mapper
= dataP
->fMapper
;
3266 dataP
->fMappedBaseValid
= dataP
->fMappedBase
= 0;
3268 uplFlags
|= UPL_SET_IO_WIRE
| UPL_SET_LITE
;
3270 if (VM_KERN_MEMORY_NONE
== tag
) {
3271 tag
= IOMemoryTag(kernel_map
);
3274 if (kIODirectionPrepareToPhys32
& forDirection
) {
3276 uplFlags
|= UPL_NEED_32BIT_ADDR
;
3278 if (dataP
->fDMAMapNumAddressBits
> 32) {
3279 dataP
->fDMAMapNumAddressBits
= 32;
3282 if (kIODirectionPrepareNoFault
& forDirection
) {
3283 uplFlags
|= UPL_REQUEST_NO_FAULT
;
3285 if (kIODirectionPrepareNoZeroFill
& forDirection
) {
3286 uplFlags
|= UPL_NOZEROFILLIO
;
3288 if (kIODirectionPrepareNonCoherent
& forDirection
) {
3289 uplFlags
|= UPL_REQUEST_FORCE_COHERENCY
;
3294 // Note that appendBytes(NULL) zeros the data up to the desired length
3295 // and the length parameter is an unsigned int
3296 size_t uplPageSize
= dataP
->fPageCnt
* sizeof(upl_page_info_t
);
3297 if (uplPageSize
> ((unsigned int)uplPageSize
)) {
3298 return kIOReturnNoMemory
;
3300 if (!_memoryEntries
->appendBytes(NULL
, uplPageSize
)) {
3301 return kIOReturnNoMemory
;
3305 // Find the appropriate vm_map for the given task
3307 if ((NULL
!= _memRef
) || ((_task
== kernel_task
&& (kIOMemoryBufferPageable
& _flags
)))) {
3310 curMap
= get_task_map(_task
);
3313 // Iterate over the vector of virtual ranges
3314 Ranges vec
= _ranges
;
3315 unsigned int pageIndex
= 0;
3316 IOByteCount mdOffset
= 0;
3317 ppnum_t highestPage
= 0;
3319 IOMemoryEntry
* memRefEntry
= NULL
;
3321 memRefEntry
= &_memRef
->entries
[0];
3324 for (UInt range
= 0; range
< _rangesCount
; range
++) {
3326 mach_vm_address_t startPage
, startPageOffset
;
3327 mach_vm_size_t numBytes
;
3328 ppnum_t highPage
= 0;
3330 // Get the startPage address and length of vec[range]
3331 getAddrLenForInd(startPage
, numBytes
, type
, vec
, range
);
3332 startPageOffset
= startPage
& PAGE_MASK
;
3333 iopl
.fPageOffset
= startPageOffset
;
3334 numBytes
+= startPageOffset
;
3335 startPage
= trunc_page_64(startPage
);
3338 iopl
.fMappedPage
= mapBase
+ pageIndex
;
3340 iopl
.fMappedPage
= 0;
3343 // Iterate over the current range, creating UPLs
3345 vm_address_t kernelStart
= (vm_address_t
) startPage
;
3349 } else if (_memRef
) {
3352 assert(_task
== kernel_task
);
3353 theMap
= IOPageableMapForAddress(kernelStart
);
3356 // ioplFlags is an in/out parameter
3357 upl_control_flags_t ioplFlags
= uplFlags
;
3358 dataP
= getDataP(_memoryEntries
);
3359 pageInfo
= getPageList(dataP
);
3360 upl_page_list_ptr_t baseInfo
= &pageInfo
[pageIndex
];
3362 mach_vm_size_t _ioplSize
= round_page(numBytes
);
3363 upl_size_t ioplSize
= (_ioplSize
<= MAX_UPL_SIZE_BYTES
) ? _ioplSize
: MAX_UPL_SIZE_BYTES
;
3364 unsigned int numPageInfo
= atop_32(ioplSize
);
3366 if ((theMap
== kernel_map
)
3367 && (kernelStart
>= io_kernel_static_start
)
3368 && (kernelStart
< io_kernel_static_end
)) {
3369 error
= io_get_kernel_static_upl(theMap
,
3376 } else if (_memRef
) {
3377 memory_object_offset_t entryOffset
;
3379 entryOffset
= mdOffset
;
3380 entryOffset
= (entryOffset
- iopl
.fPageOffset
- memRefEntry
->offset
);
3381 if (entryOffset
>= memRefEntry
->size
) {
3383 if (memRefEntry
>= &_memRef
->entries
[_memRef
->count
]) {
3384 panic("memRefEntry");
3388 if (ioplSize
> (memRefEntry
->size
- entryOffset
)) {
3389 ioplSize
= (memRefEntry
->size
- entryOffset
);
3391 error
= memory_object_iopl_request(memRefEntry
->entry
,
3401 error
= vm_map_create_upl(theMap
,
3403 (upl_size_t
*)&ioplSize
,
3411 if (error
!= KERN_SUCCESS
) {
3418 highPage
= upl_get_highest_page(iopl
.fIOPL
);
3420 if (highPage
> highestPage
) {
3421 highestPage
= highPage
;
3424 if (baseInfo
->device
) {
3426 iopl
.fFlags
= kIOPLOnDevice
;
3431 iopl
.fIOMDOffset
= mdOffset
;
3432 iopl
.fPageInfo
= pageIndex
;
3433 if (mapper
&& pageIndex
&& (page_mask
& (mdOffset
+ startPageOffset
))) {
3434 dataP
->fDiscontig
= true;
3437 if (!_memoryEntries
->appendBytes(&iopl
, sizeof(iopl
))) {
3438 // Clean up partial created and unsaved iopl
3440 upl_abort(iopl
.fIOPL
, 0);
3441 upl_deallocate(iopl
.fIOPL
);
3443 error
= kIOReturnNoMemory
;
3448 // Check for a multiple iopl's in one virtual range
3449 pageIndex
+= numPageInfo
;
3450 mdOffset
-= iopl
.fPageOffset
;
3451 if (ioplSize
< numBytes
) {
3452 numBytes
-= ioplSize
;
3453 startPage
+= ioplSize
;
3454 mdOffset
+= ioplSize
;
3455 iopl
.fPageOffset
= 0;
3457 iopl
.fMappedPage
= mapBase
+ pageIndex
;
3460 mdOffset
+= numBytes
;
3466 _highestPage
= highestPage
;
3468 if (UPL_COPYOUT_FROM
& uplFlags
) {
3469 _flags
|= kIOMemoryPreparedReadOnly
;
3474 if (!(_flags
& kIOMemoryAutoPrepare
) && (kIOReturnSuccess
== error
)) {
3475 dataP
= getDataP(_memoryEntries
);
3476 if (!dataP
->fWireTracking
.link
.next
) {
3477 IOTrackingAdd(gIOWireTracking
, &dataP
->fWireTracking
, ptoa(_pages
), false, tag
);
3480 #endif /* IOTRACKING */
3486 dataP
= getDataP(_memoryEntries
);
3487 UInt done
= getNumIOPL(_memoryEntries
, dataP
);
3488 ioPLBlock
*ioplList
= getIOPLList(dataP
);
3490 for (UInt range
= 0; range
< done
; range
++) {
3491 if (ioplList
[range
].fIOPL
) {
3492 upl_abort(ioplList
[range
].fIOPL
, 0);
3493 upl_deallocate(ioplList
[range
].fIOPL
);
3496 (void) _memoryEntries
->initWithBytes(dataP
, computeDataSize(0, 0)); // == setLength()
3499 if (error
== KERN_FAILURE
) {
3500 error
= kIOReturnCannotWire
;
3501 } else if (error
== KERN_MEMORY_ERROR
) {
3502 error
= kIOReturnNoResources
;
3509 IOGeneralMemoryDescriptor::initMemoryEntries(size_t size
, IOMapper
* mapper
)
3512 unsigned dataSize
= size
;
3514 if (!_memoryEntries
) {
3515 _memoryEntries
= OSData::withCapacity(dataSize
);
3516 if (!_memoryEntries
) {
3519 } else if (!_memoryEntries
->initWithCapacity(dataSize
)) {
3523 _memoryEntries
->appendBytes(NULL
, computeDataSize(0, 0));
3524 dataP
= getDataP(_memoryEntries
);
3526 if (mapper
== kIOMapperWaitSystem
) {
3527 IOMapper::checkForSystemMapper();
3528 mapper
= IOMapper::gSystem
;
3530 dataP
->fMapper
= mapper
;
3531 dataP
->fPageCnt
= 0;
3532 dataP
->fMappedBase
= 0;
3533 dataP
->fDMAMapNumAddressBits
= 64;
3534 dataP
->fDMAMapAlignment
= 0;
3535 dataP
->fPreparationID
= kIOPreparationIDUnprepared
;
3536 dataP
->fDiscontig
= false;
3537 dataP
->fCompletionError
= false;
3538 dataP
->fMappedBaseValid
= false;
3544 IOMemoryDescriptor::dmaMap(
3546 IODMACommand
* command
,
3547 const IODMAMapSpecification
* mapSpec
,
3550 uint64_t * mapAddress
,
3551 uint64_t * mapLength
)
3554 uint32_t mapOptions
;
3557 mapOptions
|= kIODMAMapReadAccess
;
3558 if (!(kIOMemoryPreparedReadOnly
& _flags
)) {
3559 mapOptions
|= kIODMAMapWriteAccess
;
3562 err
= mapper
->iovmMapMemory(this, offset
, length
, mapOptions
,
3563 mapSpec
, command
, NULL
, mapAddress
, mapLength
);
3565 if (kIOReturnSuccess
== err
) {
3566 dmaMapRecord(mapper
, command
, *mapLength
);
3573 IOMemoryDescriptor::dmaMapRecord(
3575 IODMACommand
* command
,
3578 kern_allocation_name_t alloc
;
3581 if ((alloc
= mapper
->fAllocName
) /* && mapper != IOMapper::gSystem */) {
3582 kern_allocation_update_size(mapper
->fAllocName
, mapLength
);
3588 prior
= OSAddAtomic16(1, &_dmaReferences
);
3590 if (alloc
&& (VM_KERN_MEMORY_NONE
!= _kernelTag
)) {
3592 mapLength
= _length
;
3593 kern_allocation_update_subtotal(alloc
, _kernelTag
, mapLength
);
3601 IOMemoryDescriptor::dmaUnmap(
3603 IODMACommand
* command
,
3605 uint64_t mapAddress
,
3609 kern_allocation_name_t alloc
;
3610 kern_allocation_name_t mapName
;
3617 if (_dmaReferences
) {
3618 prior
= OSAddAtomic16(-1, &_dmaReferences
);
3620 panic("_dmaReferences underflow");
3625 return kIOReturnSuccess
;
3628 ret
= mapper
->iovmUnmapMemory(this, command
, mapAddress
, mapLength
);
3630 if ((alloc
= mapper
->fAllocName
)) {
3631 kern_allocation_update_size(alloc
, -mapLength
);
3632 if ((1 == prior
) && mapName
&& (VM_KERN_MEMORY_NONE
!= _kernelTag
)) {
3633 mapLength
= _length
;
3634 kern_allocation_update_subtotal(mapName
, _kernelTag
, -mapLength
);
3642 IOGeneralMemoryDescriptor::dmaMap(
3644 IODMACommand
* command
,
3645 const IODMAMapSpecification
* mapSpec
,
3648 uint64_t * mapAddress
,
3649 uint64_t * mapLength
)
3651 IOReturn err
= kIOReturnSuccess
;
3653 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
3656 if (kIOMemoryHostOnly
& _flags
) {
3657 return kIOReturnSuccess
;
3659 if (kIOMemoryRemote
& _flags
) {
3660 return kIOReturnNotAttached
;
3663 if ((type
== kIOMemoryTypePhysical
) || (type
== kIOMemoryTypePhysical64
)
3664 || offset
|| (length
!= _length
)) {
3665 err
= super::dmaMap(mapper
, command
, mapSpec
, offset
, length
, mapAddress
, mapLength
);
3666 } else if (_memoryEntries
&& _pages
&& (dataP
= getDataP(_memoryEntries
))) {
3667 const ioPLBlock
* ioplList
= getIOPLList(dataP
);
3668 upl_page_info_t
* pageList
;
3669 uint32_t mapOptions
= 0;
3671 IODMAMapSpecification mapSpec
;
3672 bzero(&mapSpec
, sizeof(mapSpec
));
3673 mapSpec
.numAddressBits
= dataP
->fDMAMapNumAddressBits
;
3674 mapSpec
.alignment
= dataP
->fDMAMapAlignment
;
3676 // For external UPLs the fPageInfo field points directly to
3677 // the upl's upl_page_info_t array.
3678 if (ioplList
->fFlags
& kIOPLExternUPL
) {
3679 pageList
= (upl_page_info_t
*) ioplList
->fPageInfo
;
3680 mapOptions
|= kIODMAMapPagingPath
;
3682 pageList
= getPageList(dataP
);
3685 if ((_length
== ptoa_64(_pages
)) && !(page_mask
& ioplList
->fPageOffset
)) {
3686 mapOptions
|= kIODMAMapPageListFullyOccupied
;
3689 assert(dataP
->fDMAAccess
);
3690 mapOptions
|= dataP
->fDMAAccess
;
3692 // Check for direct device non-paged memory
3693 if (ioplList
->fFlags
& kIOPLOnDevice
) {
3694 mapOptions
|= kIODMAMapPhysicallyContiguous
;
3697 IODMAMapPageList dmaPageList
=
3699 .pageOffset
= (uint32_t)(ioplList
->fPageOffset
& page_mask
),
3700 .pageListCount
= _pages
,
3701 .pageList
= &pageList
[0]
3703 err
= mapper
->iovmMapMemory(this, offset
, length
, mapOptions
, &mapSpec
,
3704 command
, &dmaPageList
, mapAddress
, mapLength
);
3706 if (kIOReturnSuccess
== err
) {
3707 dmaMapRecord(mapper
, command
, *mapLength
);
3717 * Prepare the memory for an I/O transfer. This involves paging in
3718 * the memory, if necessary, and wiring it down for the duration of
3719 * the transfer. The complete() method completes the processing of
3720 * the memory after the I/O transfer finishes. This method needn't
3721 * called for non-pageable memory.
3725 IOGeneralMemoryDescriptor::prepare(IODirection forDirection
)
3727 IOReturn error
= kIOReturnSuccess
;
3728 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
3730 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
)) {
3731 return kIOReturnSuccess
;
3734 assert(!(kIOMemoryRemote
& _flags
));
3735 if (kIOMemoryRemote
& _flags
) {
3736 return kIOReturnNotAttached
;
3740 IOLockLock(_prepareLock
);
3743 if (kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
|| kIOMemoryTypeUIO
== type
) {
3744 if ((forDirection
& kIODirectionPrepareAvoidThrottling
) && NEED_TO_HARD_THROTTLE_THIS_TASK()) {
3745 error
= kIOReturnNotReady
;
3748 error
= wireVirtual(forDirection
);
3751 if (kIOReturnSuccess
== error
) {
3752 if (1 == ++_wireCount
) {
3753 if (kIOMemoryClearEncrypt
& _flags
) {
3754 performOperation(kIOMemoryClearEncrypted
, 0, _length
);
3762 IOLockUnlock(_prepareLock
);
3771 * Complete processing of the memory after an I/O transfer finishes.
3772 * This method should not be called unless a prepare was previously
3773 * issued; the prepare() and complete() must occur in pairs, before
3774 * before and after an I/O transfer involving pageable memory.
3778 IOGeneralMemoryDescriptor::complete(IODirection forDirection
)
3780 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
3783 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
)) {
3784 return kIOReturnSuccess
;
3787 assert(!(kIOMemoryRemote
& _flags
));
3788 if (kIOMemoryRemote
& _flags
) {
3789 return kIOReturnNotAttached
;
3793 IOLockLock(_prepareLock
);
3800 dataP
= getDataP(_memoryEntries
);
3805 if (kIODirectionCompleteWithError
& forDirection
) {
3806 dataP
->fCompletionError
= true;
3809 if ((kIOMemoryClearEncrypt
& _flags
) && (1 == _wireCount
)) {
3810 performOperation(kIOMemorySetEncrypted
, 0, _length
);
3814 if (!_wireCount
|| (kIODirectionCompleteWithDataValid
& forDirection
)) {
3815 ioPLBlock
*ioplList
= getIOPLList(dataP
);
3816 UInt ind
, count
= getNumIOPL(_memoryEntries
, dataP
);
3819 // kIODirectionCompleteWithDataValid & forDirection
3820 if (kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
|| kIOMemoryTypeUIO
== type
) {
3822 tag
= getVMTag(kernel_map
);
3823 for (ind
= 0; ind
< count
; ind
++) {
3824 if (ioplList
[ind
].fIOPL
) {
3825 iopl_valid_data(ioplList
[ind
].fIOPL
, tag
);
3830 if (_dmaReferences
) {
3831 panic("complete() while dma active");
3834 if (dataP
->fMappedBaseValid
) {
3835 dmaUnmap(dataP
->fMapper
, NULL
, 0, dataP
->fMappedBase
, dataP
->fMappedLength
);
3836 dataP
->fMappedBaseValid
= dataP
->fMappedBase
= 0;
3839 if (dataP
->fWireTracking
.link
.next
) {
3840 IOTrackingRemove(gIOWireTracking
, &dataP
->fWireTracking
, ptoa(_pages
));
3842 #endif /* IOTRACKING */
3843 // Only complete iopls that we created which are for TypeVirtual
3844 if (kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
|| kIOMemoryTypeUIO
== type
) {
3845 for (ind
= 0; ind
< count
; ind
++) {
3846 if (ioplList
[ind
].fIOPL
) {
3847 if (dataP
->fCompletionError
) {
3848 upl_abort(ioplList
[ind
].fIOPL
, 0 /*!UPL_ABORT_DUMP_PAGES*/);
3850 upl_commit(ioplList
[ind
].fIOPL
, NULL
, 0);
3852 upl_deallocate(ioplList
[ind
].fIOPL
);
3855 } else if (kIOMemoryTypeUPL
== type
) {
3856 upl_set_referenced(ioplList
[0].fIOPL
, false);
3859 (void) _memoryEntries
->initWithBytes(dataP
, computeDataSize(0, 0)); // == setLength()
3861 dataP
->fPreparationID
= kIOPreparationIDUnprepared
;
3862 _flags
&= ~kIOMemoryPreparedReadOnly
;
3868 IOLockUnlock(_prepareLock
);
3871 return kIOReturnSuccess
;
3875 IOGeneralMemoryDescriptor::doMap(
3876 vm_map_t __addressMap
,
3877 IOVirtualAddress
* __address
,
3878 IOOptionBits options
,
3879 IOByteCount __offset
,
3880 IOByteCount __length
)
3883 if (!(kIOMap64Bit
& options
)) {
3884 panic("IOGeneralMemoryDescriptor::doMap !64bit");
3886 #endif /* !__LP64__ */
3890 IOMemoryMap
* mapping
= (IOMemoryMap
*) *__address
;
3891 mach_vm_size_t offset
= mapping
->fOffset
+ __offset
;
3892 mach_vm_size_t length
= mapping
->fLength
;
3894 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
3895 Ranges vec
= _ranges
;
3897 mach_vm_address_t range0Addr
= 0;
3898 mach_vm_size_t range0Len
= 0;
3900 if ((offset
>= _length
) || ((offset
+ length
) > _length
)) {
3901 return kIOReturnBadArgument
;
3904 assert(!(kIOMemoryRemote
& _flags
));
3905 if (kIOMemoryRemote
& _flags
) {
3910 getAddrLenForInd(range0Addr
, range0Len
, type
, vec
, 0);
3913 // mapping source == dest? (could be much better)
3915 && (mapping
->fAddressTask
== _task
)
3916 && (mapping
->fAddressMap
== get_task_map(_task
))
3917 && (options
& kIOMapAnywhere
)
3918 && (!(kIOMapUnique
& options
))
3919 && (1 == _rangesCount
)
3922 && (length
<= range0Len
)) {
3923 mapping
->fAddress
= range0Addr
;
3924 mapping
->fOptions
|= kIOMapStatic
;
3926 return kIOReturnSuccess
;
3930 IOOptionBits createOptions
= 0;
3931 if (!(kIOMapReadOnly
& options
)) {
3932 createOptions
|= kIOMemoryReferenceWrite
;
3933 #if DEVELOPMENT || DEBUG
3934 if ((kIODirectionOut
== (kIODirectionOutIn
& _flags
))
3935 && (!reserved
|| (reserved
->creator
!= mapping
->fAddressTask
))) {
3936 OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction");
3940 err
= memoryReferenceCreate(createOptions
, &_memRef
);
3941 if (kIOReturnSuccess
!= err
) {
3946 memory_object_t pager
;
3947 pager
= (memory_object_t
) (reserved
? reserved
->dp
.devicePager
: NULL
);
3949 // <upl_transpose //
3950 if ((kIOMapReference
| kIOMapUnique
) == ((kIOMapReference
| kIOMapUnique
) & options
)) {
3954 upl_control_flags_t flags
;
3955 unsigned int lock_count
;
3957 if (!_memRef
|| (1 != _memRef
->count
)) {
3958 err
= kIOReturnNotReadable
;
3962 size
= round_page(mapping
->fLength
);
3963 flags
= UPL_COPYOUT_FROM
| UPL_SET_INTERNAL
3964 | UPL_SET_LITE
| UPL_SET_IO_WIRE
| UPL_BLOCK_ACCESS
;
3966 if (KERN_SUCCESS
!= memory_object_iopl_request(_memRef
->entries
[0].entry
, 0, &size
, &redirUPL2
,
3968 &flags
, getVMTag(kernel_map
))) {
3972 for (lock_count
= 0;
3973 IORecursiveLockHaveLock(gIOMemoryLock
);
3977 err
= upl_transpose(redirUPL2
, mapping
->fRedirUPL
);
3984 if (kIOReturnSuccess
!= err
) {
3985 IOLog("upl_transpose(%x)\n", err
);
3986 err
= kIOReturnSuccess
;
3990 upl_commit(redirUPL2
, NULL
, 0);
3991 upl_deallocate(redirUPL2
);
3995 // swap the memEntries since they now refer to different vm_objects
3996 IOMemoryReference
* me
= _memRef
;
3997 _memRef
= mapping
->fMemory
->_memRef
;
3998 mapping
->fMemory
->_memRef
= me
;
4001 err
= populateDevicePager( pager
, mapping
->fAddressMap
, mapping
->fAddress
, offset
, length
, options
);
4005 // upl_transpose> //
4007 err
= memoryReferenceMap(_memRef
, mapping
->fAddressMap
, offset
, length
, options
, &mapping
->fAddress
);
4009 if ((err
== KERN_SUCCESS
) && ((kIOTracking
& gIOKitDebug
) || _task
)) {
4010 // only dram maps in the default on developement case
4011 IOTrackingAddUser(gIOMapTracking
, &mapping
->fTracking
, mapping
->fLength
);
4013 #endif /* IOTRACKING */
4014 if ((err
== KERN_SUCCESS
) && pager
) {
4015 err
= populateDevicePager(pager
, mapping
->fAddressMap
, mapping
->fAddress
, offset
, length
, options
);
4017 if (err
!= KERN_SUCCESS
) {
4018 doUnmap(mapping
->fAddressMap
, (IOVirtualAddress
) mapping
, 0);
4019 } else if (kIOMapDefaultCache
== (options
& kIOMapCacheMask
)) {
4020 mapping
->fOptions
|= ((_flags
& kIOMemoryBufferCacheMask
) >> kIOMemoryBufferCacheShift
);
4030 IOMemoryMapTracking(IOTrackingUser
* tracking
, task_t
* task
,
4031 mach_vm_address_t
* address
, mach_vm_size_t
* size
)
4033 #define iomap_offsetof(type, field) ((size_t)(&((type *)NULL)->field))
4035 IOMemoryMap
* map
= (typeof(map
))(((uintptr_t) tracking
) - iomap_offsetof(IOMemoryMap
, fTracking
));
4037 if (!map
->fAddressMap
|| (map
->fAddressMap
!= get_task_map(map
->fAddressTask
))) {
4038 return kIOReturnNotReady
;
4041 *task
= map
->fAddressTask
;
4042 *address
= map
->fAddress
;
4043 *size
= map
->fLength
;
4045 return kIOReturnSuccess
;
4047 #endif /* IOTRACKING */
4050 IOGeneralMemoryDescriptor::doUnmap(
4051 vm_map_t addressMap
,
4052 IOVirtualAddress __address
,
4053 IOByteCount __length
)
4055 return super::doUnmap(addressMap
, __address
, __length
);
4058 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4061 #define super OSObject
4063 OSDefineMetaClassAndStructors( IOMemoryMap
, OSObject
)
4065 OSMetaClassDefineReservedUnused(IOMemoryMap
, 0);
4066 OSMetaClassDefineReservedUnused(IOMemoryMap
, 1);
4067 OSMetaClassDefineReservedUnused(IOMemoryMap
, 2);
4068 OSMetaClassDefineReservedUnused(IOMemoryMap
, 3);
4069 OSMetaClassDefineReservedUnused(IOMemoryMap
, 4);
4070 OSMetaClassDefineReservedUnused(IOMemoryMap
, 5);
4071 OSMetaClassDefineReservedUnused(IOMemoryMap
, 6);
4072 OSMetaClassDefineReservedUnused(IOMemoryMap
, 7);
4074 /* ex-inline function implementation */
4076 IOMemoryMap::getPhysicalAddress()
4078 return getPhysicalSegment( 0, NULL
);
4081 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4086 mach_vm_address_t toAddress
,
4087 IOOptionBits _options
,
4088 mach_vm_size_t _offset
,
4089 mach_vm_size_t _length
)
4095 if (!super::init()) {
4099 fAddressMap
= get_task_map(intoTask
);
4103 vm_map_reference(fAddressMap
);
4105 fAddressTask
= intoTask
;
4106 fOptions
= _options
;
4109 fAddress
= toAddress
;
4115 IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor
* _memory
, mach_vm_size_t _offset
)
4122 if ((_offset
+ fLength
) > _memory
->getLength()) {
4130 if (fMemory
!= _memory
) {
4131 fMemory
->removeMapping(this);
4141 IOMemoryDescriptor::doMap(
4142 vm_map_t __addressMap
,
4143 IOVirtualAddress
* __address
,
4144 IOOptionBits options
,
4145 IOByteCount __offset
,
4146 IOByteCount __length
)
4148 return kIOReturnUnsupported
;
4152 IOMemoryDescriptor::handleFault(
4154 mach_vm_size_t sourceOffset
,
4155 mach_vm_size_t length
)
4157 if (kIOMemoryRedirected
& _flags
) {
4159 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset
);
4163 } while (kIOMemoryRedirected
& _flags
);
4165 return kIOReturnSuccess
;
4169 IOMemoryDescriptor::populateDevicePager(
4171 vm_map_t addressMap
,
4172 mach_vm_address_t address
,
4173 mach_vm_size_t sourceOffset
,
4174 mach_vm_size_t length
,
4175 IOOptionBits options
)
4177 IOReturn err
= kIOReturnSuccess
;
4178 memory_object_t pager
= (memory_object_t
) _pager
;
4179 mach_vm_size_t size
;
4180 mach_vm_size_t bytes
;
4181 mach_vm_size_t page
;
4182 mach_vm_size_t pageOffset
;
4183 mach_vm_size_t pagerOffset
;
4184 IOPhysicalLength segLen
, chunk
;
4188 type
= _flags
& kIOMemoryTypeMask
;
4190 if (reserved
->dp
.pagerContig
) {
4195 physAddr
= getPhysicalSegment( sourceOffset
, &segLen
, kIOMemoryMapperNone
);
4197 pageOffset
= physAddr
- trunc_page_64( physAddr
);
4198 pagerOffset
= sourceOffset
;
4200 size
= length
+ pageOffset
;
4201 physAddr
-= pageOffset
;
4203 segLen
+= pageOffset
;
4206 // in the middle of the loop only map whole pages
4207 if (segLen
>= bytes
) {
4209 } else if (segLen
!= trunc_page_64(segLen
)) {
4210 err
= kIOReturnVMError
;
4212 if (physAddr
!= trunc_page_64(physAddr
)) {
4213 err
= kIOReturnBadArgument
;
4216 if (kIOReturnSuccess
!= err
) {
4220 #if DEBUG || DEVELOPMENT
4221 if ((kIOMemoryTypeUPL
!= type
)
4222 && pmap_has_managed_page(atop_64(physAddr
), atop_64(physAddr
+ segLen
- 1))) {
4223 OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx", physAddr
, segLen
);
4225 #endif /* DEBUG || DEVELOPMENT */
4227 chunk
= (reserved
->dp
.pagerContig
? round_page(segLen
) : page_size
);
4229 (page
< segLen
) && (KERN_SUCCESS
== err
);
4231 err
= device_pager_populate_object(pager
, pagerOffset
,
4232 (ppnum_t
)(atop_64(physAddr
+ page
)), chunk
);
4233 pagerOffset
+= chunk
;
4236 assert(KERN_SUCCESS
== err
);
4241 // This call to vm_fault causes an early pmap level resolution
4242 // of the mappings created above for kernel mappings, since
4243 // faulting in later can't take place from interrupt level.
4244 if ((addressMap
== kernel_map
) && !(kIOMemoryRedirected
& _flags
)) {
4245 err
= vm_fault(addressMap
,
4246 (vm_map_offset_t
)trunc_page_64(address
),
4247 options
& kIOMapReadOnly
? VM_PROT_READ
: VM_PROT_READ
| VM_PROT_WRITE
,
4248 FALSE
, VM_KERN_MEMORY_NONE
,
4250 (vm_map_offset_t
)0);
4252 if (KERN_SUCCESS
!= err
) {
4257 sourceOffset
+= segLen
- pageOffset
;
4261 }while (bytes
&& (physAddr
= getPhysicalSegment( sourceOffset
, &segLen
, kIOMemoryMapperNone
)));
4264 err
= kIOReturnBadArgument
;
4271 IOMemoryDescriptor::doUnmap(
4272 vm_map_t addressMap
,
4273 IOVirtualAddress __address
,
4274 IOByteCount __length
)
4277 IOMemoryMap
* mapping
;
4278 mach_vm_address_t address
;
4279 mach_vm_size_t length
;
4285 mapping
= (IOMemoryMap
*) __address
;
4286 addressMap
= mapping
->fAddressMap
;
4287 address
= mapping
->fAddress
;
4288 length
= mapping
->fLength
;
4290 if (kIOMapOverwrite
& mapping
->fOptions
) {
4293 if ((addressMap
== kernel_map
) && (kIOMemoryBufferPageable
& _flags
)) {
4294 addressMap
= IOPageableMapForAddress( address
);
4297 if (kIOLogMapping
& gIOKitDebug
) {
4298 IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
4299 addressMap
, address
, length
);
4302 err
= mach_vm_deallocate( addressMap
, address
, length
);
4306 IOTrackingRemoveUser(gIOMapTracking
, &mapping
->fTracking
);
4307 #endif /* IOTRACKING */
4313 IOMemoryDescriptor::redirect( task_t safeTask
, bool doRedirect
)
4315 IOReturn err
= kIOReturnSuccess
;
4316 IOMemoryMap
* mapping
= NULL
;
4322 _flags
|= kIOMemoryRedirected
;
4324 _flags
&= ~kIOMemoryRedirected
;
4328 if ((iter
= OSCollectionIterator::withCollection( _mappings
))) {
4329 memory_object_t pager
;
4332 pager
= (memory_object_t
) reserved
->dp
.devicePager
;
4334 pager
= MACH_PORT_NULL
;
4337 while ((mapping
= (IOMemoryMap
*) iter
->getNextObject())) {
4338 mapping
->redirect( safeTask
, doRedirect
);
4339 if (!doRedirect
&& !safeTask
&& pager
&& (kernel_map
== mapping
->fAddressMap
)) {
4340 err
= populateDevicePager(pager
, mapping
->fAddressMap
, mapping
->fAddress
, mapping
->fOffset
, mapping
->fLength
, kIOMapDefaultCache
);
4355 // temporary binary compatibility
4356 IOSubMemoryDescriptor
* subMem
;
4357 if ((subMem
= OSDynamicCast( IOSubMemoryDescriptor
, this))) {
4358 err
= subMem
->redirect( safeTask
, doRedirect
);
4360 err
= kIOReturnSuccess
;
4362 #endif /* !__LP64__ */
4368 IOMemoryMap::redirect( task_t safeTask
, bool doRedirect
)
4370 IOReturn err
= kIOReturnSuccess
;
4373 // err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
4385 if ((!safeTask
|| (get_task_map(safeTask
) != fAddressMap
))
4386 && (0 == (fOptions
& kIOMapStatic
))) {
4387 IOUnmapPages( fAddressMap
, fAddress
, fLength
);
4388 err
= kIOReturnSuccess
;
4390 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect
, this, fAddress
, fLength
, fAddressMap
);
4392 } else if (kIOMapWriteCombineCache
== (fOptions
& kIOMapCacheMask
)) {
4393 IOOptionBits newMode
;
4394 newMode
= (fOptions
& ~kIOMapCacheMask
) | (doRedirect
? kIOMapInhibitCache
: kIOMapWriteCombineCache
);
4395 IOProtectCacheMode(fAddressMap
, fAddress
, fLength
, newMode
);
4401 if ((((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
4402 || ((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
))
4404 && (doRedirect
!= (0 != (fMemory
->_flags
& kIOMemoryRedirected
)))) {
4405 fMemory
->redirect(safeTask
, doRedirect
);
4412 IOMemoryMap::unmap( void )
4418 if (fAddress
&& fAddressMap
&& (NULL
== fSuperMap
) && fMemory
4419 && (0 == (kIOMapStatic
& fOptions
))) {
4420 err
= fMemory
->doUnmap(fAddressMap
, (IOVirtualAddress
) this, 0);
4422 err
= kIOReturnSuccess
;
4426 vm_map_deallocate(fAddressMap
);
4438 IOMemoryMap::taskDied( void )
4441 if (fUserClientUnmap
) {
4446 IOTrackingRemoveUser(gIOMapTracking
, &fTracking
);
4448 #endif /* IOTRACKING */
4451 vm_map_deallocate(fAddressMap
);
4454 fAddressTask
= NULL
;
4460 IOMemoryMap::userClientUnmap( void )
4462 fUserClientUnmap
= true;
4463 return kIOReturnSuccess
;
4466 // Overload the release mechanism. All mappings must be a member
4467 // of a memory descriptors _mappings set. This means that we
4468 // always have 2 references on a mapping. When either of these mappings
4469 // are released we need to free ourselves.
4471 IOMemoryMap::taggedRelease(const void *tag
) const
4474 super::taggedRelease(tag
, 2);
4485 fMemory
->removeMapping(this);
4490 if (fOwner
&& (fOwner
!= fMemory
)) {
4492 fOwner
->removeMapping(this);
4497 fSuperMap
->release();
4501 upl_commit(fRedirUPL
, NULL
, 0);
4502 upl_deallocate(fRedirUPL
);
4509 IOMemoryMap::getLength()
4515 IOMemoryMap::getVirtualAddress()
4519 fSuperMap
->getVirtualAddress();
4520 } else if (fAddressMap
4521 && vm_map_is_64bit(fAddressMap
)
4522 && (sizeof(IOVirtualAddress
) < 8)) {
4523 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress
);
4525 #endif /* !__LP64__ */
4532 IOMemoryMap::getAddress()
4538 IOMemoryMap::getSize()
4542 #endif /* !__LP64__ */
4546 IOMemoryMap::getAddressTask()
4549 return fSuperMap
->getAddressTask();
4551 return fAddressTask
;
4556 IOMemoryMap::getMapOptions()
4561 IOMemoryDescriptor
*
4562 IOMemoryMap::getMemoryDescriptor()
4568 IOMemoryMap::copyCompatible(
4569 IOMemoryMap
* newMapping
)
4571 task_t task
= newMapping
->getAddressTask();
4572 mach_vm_address_t toAddress
= newMapping
->fAddress
;
4573 IOOptionBits _options
= newMapping
->fOptions
;
4574 mach_vm_size_t _offset
= newMapping
->fOffset
;
4575 mach_vm_size_t _length
= newMapping
->fLength
;
4577 if ((!task
) || (!fAddressMap
) || (fAddressMap
!= get_task_map(task
))) {
4580 if ((fOptions
^ _options
) & kIOMapReadOnly
) {
4583 if ((kIOMapDefaultCache
!= (_options
& kIOMapCacheMask
))
4584 && ((fOptions
^ _options
) & kIOMapCacheMask
)) {
4588 if ((0 == (_options
& kIOMapAnywhere
)) && (fAddress
!= toAddress
)) {
4592 if (_offset
< fOffset
) {
4598 if ((_offset
+ _length
) > fLength
) {
4603 if ((fLength
== _length
) && (!_offset
)) {
4606 newMapping
->fSuperMap
= this;
4607 newMapping
->fOffset
= fOffset
+ _offset
;
4608 newMapping
->fAddress
= fAddress
+ _offset
;
4615 IOMemoryMap::wireRange(
4617 mach_vm_size_t offset
,
4618 mach_vm_size_t length
)
4621 mach_vm_address_t start
= trunc_page_64(fAddress
+ offset
);
4622 mach_vm_address_t end
= round_page_64(fAddress
+ offset
+ length
);
4625 prot
= (kIODirectionOutIn
& options
);
4627 kr
= vm_map_wire_kernel(fAddressMap
, start
, end
, prot
, fMemory
->getVMTag(kernel_map
), FALSE
);
4629 kr
= vm_map_unwire(fAddressMap
, start
, end
, FALSE
);
4638 IOMemoryMap::getPhysicalSegment( IOByteCount _offset
, IOPhysicalLength
* _length
, IOOptionBits _options
)
4639 #else /* !__LP64__ */
4640 IOMemoryMap::getPhysicalSegment( IOByteCount _offset
, IOPhysicalLength
* _length
)
4641 #endif /* !__LP64__ */
4643 IOPhysicalAddress address
;
4647 address
= fMemory
->getPhysicalSegment( fOffset
+ _offset
, _length
, _options
);
4648 #else /* !__LP64__ */
4649 address
= fMemory
->getPhysicalSegment( fOffset
+ _offset
, _length
);
4650 #endif /* !__LP64__ */
4656 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4659 #define super OSObject
4661 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4664 IOMemoryDescriptor::initialize( void )
4666 if (NULL
== gIOMemoryLock
) {
4667 gIOMemoryLock
= IORecursiveLockAlloc();
4670 gIOLastPage
= IOGetLastPageNumber();
4674 IOMemoryDescriptor::free( void )
4677 _mappings
->release();
4681 cleanKernelReserved(reserved
);
4682 IODelete(reserved
, IOMemoryDescriptorReserved
, 1);
4689 IOMemoryDescriptor::setMapping(
4691 IOVirtualAddress mapAddress
,
4692 IOOptionBits options
)
4694 return createMappingInTask( intoTask
, mapAddress
,
4695 options
| kIOMapStatic
,
4700 IOMemoryDescriptor::map(
4701 IOOptionBits options
)
4703 return createMappingInTask( kernel_task
, 0,
4704 options
| kIOMapAnywhere
,
4710 IOMemoryDescriptor::map(
4712 IOVirtualAddress atAddress
,
4713 IOOptionBits options
,
4715 IOByteCount length
)
4717 if ((!(kIOMapAnywhere
& options
)) && vm_map_is_64bit(get_task_map(intoTask
))) {
4718 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
4722 return createMappingInTask(intoTask
, atAddress
,
4723 options
, offset
, length
);
4725 #endif /* !__LP64__ */
4728 IOMemoryDescriptor::createMappingInTask(
4730 mach_vm_address_t atAddress
,
4731 IOOptionBits options
,
4732 mach_vm_size_t offset
,
4733 mach_vm_size_t length
)
4735 IOMemoryMap
* result
;
4736 IOMemoryMap
* mapping
;
4739 length
= getLength();
4742 mapping
= new IOMemoryMap
;
4745 && !mapping
->init( intoTask
, atAddress
,
4746 options
, offset
, length
)) {
4752 result
= makeMapping(this, intoTask
, (IOVirtualAddress
) mapping
, options
| kIOMap64Bit
, 0, 0);
4759 IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
4760 this, atAddress
, (uint32_t) options
, offset
, length
);
4767 #ifndef __LP64__ // there is only a 64 bit version for LP64
4769 IOMemoryMap::redirect(IOMemoryDescriptor
* newBackingMemory
,
4770 IOOptionBits options
,
4773 return redirect(newBackingMemory
, options
, (mach_vm_size_t
)offset
);
4778 IOMemoryMap::redirect(IOMemoryDescriptor
* newBackingMemory
,
4779 IOOptionBits options
,
4780 mach_vm_size_t offset
)
4782 IOReturn err
= kIOReturnSuccess
;
4783 IOMemoryDescriptor
* physMem
= NULL
;
4787 if (fAddress
&& fAddressMap
) {
4789 if (((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
4790 || ((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
)) {
4795 if (!fRedirUPL
&& fMemory
->_memRef
&& (1 == fMemory
->_memRef
->count
)) {
4796 upl_size_t size
= round_page(fLength
);
4797 upl_control_flags_t flags
= UPL_COPYOUT_FROM
| UPL_SET_INTERNAL
4798 | UPL_SET_LITE
| UPL_SET_IO_WIRE
| UPL_BLOCK_ACCESS
;
4799 if (KERN_SUCCESS
!= memory_object_iopl_request(fMemory
->_memRef
->entries
[0].entry
, 0, &size
, &fRedirUPL
,
4801 &flags
, fMemory
->getVMTag(kernel_map
))) {
4806 IOUnmapPages( fAddressMap
, fAddress
, fLength
);
4808 physMem
->redirect(NULL
, true);
4813 if (newBackingMemory
) {
4814 if (newBackingMemory
!= fMemory
) {
4816 if (this != newBackingMemory
->makeMapping(newBackingMemory
, fAddressTask
, (IOVirtualAddress
) this,
4817 options
| kIOMapUnique
| kIOMapReference
| kIOMap64Bit
,
4819 err
= kIOReturnError
;
4823 upl_commit(fRedirUPL
, NULL
, 0);
4824 upl_deallocate(fRedirUPL
);
4827 if ((false) && physMem
) {
4828 physMem
->redirect(NULL
, false);
4844 IOMemoryDescriptor::makeMapping(
4845 IOMemoryDescriptor
* owner
,
4847 IOVirtualAddress __address
,
4848 IOOptionBits options
,
4849 IOByteCount __offset
,
4850 IOByteCount __length
)
4853 if (!(kIOMap64Bit
& options
)) {
4854 panic("IOMemoryDescriptor::makeMapping !64bit");
4856 #endif /* !__LP64__ */
4858 IOMemoryDescriptor
* mapDesc
= NULL
;
4859 __block IOMemoryMap
* result
= NULL
;
4861 IOMemoryMap
* mapping
= (IOMemoryMap
*) __address
;
4862 mach_vm_size_t offset
= mapping
->fOffset
+ __offset
;
4863 mach_vm_size_t length
= mapping
->fLength
;
4865 mapping
->fOffset
= offset
;
4870 if (kIOMapStatic
& options
) {
4872 addMapping(mapping
);
4873 mapping
->setMemoryDescriptor(this, 0);
4877 if (kIOMapUnique
& options
) {
4879 IOByteCount physLen
;
4881 // if (owner != this) continue;
4883 if (((_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
4884 || ((_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
)) {
4885 phys
= getPhysicalSegment(offset
, &physLen
, kIOMemoryMapperNone
);
4886 if (!phys
|| (physLen
< length
)) {
4890 mapDesc
= IOMemoryDescriptor::withAddressRange(
4891 phys
, length
, getDirection() | kIOMemoryMapperNone
, NULL
);
4896 mapping
->fOffset
= offset
;
4899 // look for a compatible existing mapping
4901 _mappings
->iterateObjects(^(OSObject
* object
)
4903 IOMemoryMap
* lookMapping
= (IOMemoryMap
*) object
;
4904 if ((result
= lookMapping
->copyCompatible(mapping
))) {
4906 result
->setMemoryDescriptor(this, offset
);
4912 if (result
|| (options
& kIOMapReference
)) {
4913 if (result
!= mapping
) {
4926 kr
= mapDesc
->doMap( NULL
, (IOVirtualAddress
*) &mapping
, options
, 0, 0 );
4927 if (kIOReturnSuccess
== kr
) {
4929 mapDesc
->addMapping(result
);
4930 result
->setMemoryDescriptor(mapDesc
, offset
);
4947 IOMemoryDescriptor::addMapping(
4948 IOMemoryMap
* mapping
)
4951 if (NULL
== _mappings
) {
4952 _mappings
= OSSet::withCapacity(1);
4955 _mappings
->setObject( mapping
);
4961 IOMemoryDescriptor::removeMapping(
4962 IOMemoryMap
* mapping
)
4965 _mappings
->removeObject( mapping
);
4970 // obsolete initializers
4971 // - initWithOptions is the designated initializer
4973 IOMemoryDescriptor::initWithAddress(void * address
,
4975 IODirection direction
)
4981 IOMemoryDescriptor::initWithAddress(IOVirtualAddress address
,
4983 IODirection direction
,
4990 IOMemoryDescriptor::initWithPhysicalAddress(
4991 IOPhysicalAddress address
,
4993 IODirection direction
)
4999 IOMemoryDescriptor::initWithRanges(
5000 IOVirtualRange
* ranges
,
5002 IODirection direction
,
5010 IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange
* ranges
,
5012 IODirection direction
,
5019 IOMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
5020 IOByteCount
* lengthOfSegment
)
5024 #endif /* !__LP64__ */
5026 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5029 IOGeneralMemoryDescriptor::serialize(OSSerialize
* s
) const
5031 OSSymbol
const *keys
[2] = {NULL
};
5032 OSObject
*values
[2] = {NULL
};
5034 vm_size_t vcopy_size
;
5037 user_addr_t address
;
5040 unsigned int index
, nRanges
;
5041 bool result
= false;
5043 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
5049 array
= OSArray::withCapacity(4);
5054 nRanges
= _rangesCount
;
5055 if (os_mul_overflow(sizeof(SerData
), nRanges
, &vcopy_size
)) {
5059 vcopy
= (SerData
*) IOMalloc(vcopy_size
);
5060 if (vcopy
== NULL
) {
5065 keys
[0] = OSSymbol::withCString("address");
5066 keys
[1] = OSSymbol::withCString("length");
5068 // Copy the volatile data so we don't have to allocate memory
5069 // while the lock is held.
5071 if (nRanges
== _rangesCount
) {
5072 Ranges vec
= _ranges
;
5073 for (index
= 0; index
< nRanges
; index
++) {
5074 mach_vm_address_t addr
; mach_vm_size_t len
;
5075 getAddrLenForInd(addr
, len
, type
, vec
, index
);
5076 vcopy
[index
].address
= addr
;
5077 vcopy
[index
].length
= len
;
5080 // The descriptor changed out from under us. Give up.
5087 for (index
= 0; index
< nRanges
; index
++) {
5088 user_addr_t addr
= vcopy
[index
].address
;
5089 IOByteCount len
= (IOByteCount
) vcopy
[index
].length
;
5090 values
[0] = OSNumber::withNumber(addr
, sizeof(addr
) * 8);
5091 if (values
[0] == NULL
) {
5095 values
[1] = OSNumber::withNumber(len
, sizeof(len
) * 8);
5096 if (values
[1] == NULL
) {
5100 OSDictionary
*dict
= OSDictionary::withObjects((const OSObject
**)values
, (const OSSymbol
**)keys
, 2);
5105 array
->setObject(dict
);
5107 values
[0]->release();
5108 values
[1]->release();
5109 values
[0] = values
[1] = NULL
;
5112 result
= array
->serialize(s
);
5119 values
[0]->release();
5122 values
[1]->release();
5131 IOFree(vcopy
, vcopy_size
);
5137 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5139 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 0);
5141 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 1);
5142 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 2);
5143 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 3);
5144 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 4);
5145 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 5);
5146 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 6);
5147 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 7);
5148 #else /* !__LP64__ */
5149 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 1);
5150 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 2);
5151 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 3);
5152 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 4);
5153 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 5);
5154 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 6);
5155 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 7);
5156 #endif /* !__LP64__ */
5157 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 8);
5158 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 9);
5159 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 10);
5160 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 11);
5161 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 12);
5162 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 13);
5163 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 14);
5164 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 15);
5166 /* ex-inline function implementation */
5168 IOMemoryDescriptor::getPhysicalAddress()
5170 return getPhysicalSegment( 0, NULL
);