2 * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
36 #include <sys/cdefs.h>
38 #include <IOKit/assert.h>
39 #include <IOKit/system.h>
40 #include <IOKit/IOLib.h>
41 #include <IOKit/IOMemoryDescriptor.h>
42 #include <IOKit/IOMapper.h>
43 #include <IOKit/IODMACommand.h>
44 #include <IOKit/IOKitKeysPrivate.h>
47 #include <IOKit/IOSubMemoryDescriptor.h>
48 #endif /* !__LP64__ */
50 #include <IOKit/IOKitDebug.h>
51 #include <libkern/OSDebug.h>
53 #include "IOKitKernelInternal.h"
55 #include <libkern/c++/OSContainers.h>
56 #include <libkern/c++/OSDictionary.h>
57 #include <libkern/c++/OSArray.h>
58 #include <libkern/c++/OSSymbol.h>
59 #include <libkern/c++/OSNumber.h>
65 #include <vm/vm_pageout.h>
66 #include <mach/memory_object_types.h>
67 #include <device/device_port.h>
69 #include <mach/vm_prot.h>
70 #include <mach/mach_vm.h>
71 #include <vm/vm_fault.h>
72 #include <vm/vm_protos.h>
74 extern ppnum_t
pmap_find_phys(pmap_t pmap
, addr64_t va
);
75 extern void ipc_port_release_send(ipc_port_t port
);
78 memory_object_iopl_request(
80 memory_object_offset_t offset
,
83 upl_page_info_array_t user_page_list
,
84 unsigned int *page_list_count
,
87 // osfmk/device/iokit_rpc.c
88 unsigned int IODefaultCacheBits(addr64_t pa
);
89 unsigned int IOTranslateCacheBits(struct phys_entry
*pp
);
93 #define kIOMapperWaitSystem ((IOMapper *) 1)
95 static IOMapper
* gIOSystemMapper
= NULL
;
99 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
101 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor
, OSObject
)
103 #define super IOMemoryDescriptor
105 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor
, IOMemoryDescriptor
)
107 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
109 static IORecursiveLock
* gIOMemoryLock
;
111 #define LOCK IORecursiveLockLock( gIOMemoryLock)
112 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
113 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
115 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
118 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
120 #define DEBG(fmt, args...) {}
123 #define IOMD_DEBUG_DMAACTIVE 1
125 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
127 // Some data structures and accessor macros used by the initWithOptions
130 enum ioPLBlockFlags
{
131 kIOPLOnDevice
= 0x00000001,
132 kIOPLExternUPL
= 0x00000002,
135 struct IOMDPersistentInitData
137 const IOGeneralMemoryDescriptor
* fMD
;
138 IOMemoryReference
* fMemRef
;
143 vm_address_t fPageInfo
; // Pointer to page list or index into it
144 uint32_t fIOMDOffset
; // The offset of this iopl in descriptor
145 ppnum_t fMappedPage
; // Page number of first page in this iopl
146 unsigned int fPageOffset
; // Offset within first page of iopl
147 unsigned int fFlags
; // Flags
152 uint8_t fDMAMapNumAddressBits
;
153 uint64_t fDMAMapAlignment
;
154 addr64_t fMappedBase
;
155 uint64_t fPreparationID
;
156 unsigned int fPageCnt
;
157 unsigned char fDiscontig
:1;
158 unsigned char fCompletionError
:1;
159 unsigned char _resv
:6;
161 // align arrays to 8 bytes so following macros work
162 unsigned char fPad
[3];
164 upl_page_info_t fPageList
[1]; /* variable length */
165 ioPLBlock fBlocks
[1]; /* variable length */
168 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
169 #define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
170 #define getNumIOPL(osd, d) \
171 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
172 #define getPageList(d) (&(d->fPageList[0]))
173 #define computeDataSize(p, u) \
174 (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
176 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
178 #define next_page(a) ( trunc_page(a) + PAGE_SIZE )
182 kern_return_t
device_data_action(
183 uintptr_t device_handle
,
184 ipc_port_t device_pager
,
185 vm_prot_t protection
,
186 vm_object_offset_t offset
,
190 IOMemoryDescriptorReserved
* ref
= (IOMemoryDescriptorReserved
*) device_handle
;
191 IOMemoryDescriptor
* memDesc
;
194 memDesc
= ref
->dp
.memory
;
198 kr
= memDesc
->handleFault(device_pager
, offset
, size
);
208 kern_return_t
device_close(
209 uintptr_t device_handle
)
211 IOMemoryDescriptorReserved
* ref
= (IOMemoryDescriptorReserved
*) device_handle
;
213 IODelete( ref
, IOMemoryDescriptorReserved
, 1 );
215 return( kIOReturnSuccess
);
219 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
221 // Note this inline function uses C++ reference arguments to return values
222 // This means that pointers are not passed and NULLs don't have to be
223 // checked for as a NULL reference is illegal.
225 getAddrLenForInd(mach_vm_address_t
&addr
, mach_vm_size_t
&len
, // Output variables
226 UInt32 type
, IOGeneralMemoryDescriptor::Ranges r
, UInt32 ind
)
228 assert(kIOMemoryTypeUIO
== type
229 || kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
230 || kIOMemoryTypePhysical
== type
|| kIOMemoryTypePhysical64
== type
);
231 if (kIOMemoryTypeUIO
== type
) {
234 uio_getiov((uio_t
) r
.uio
, ind
, &ad
, &us
); addr
= ad
; len
= us
;
237 else if ((kIOMemoryTypeVirtual64
== type
) || (kIOMemoryTypePhysical64
== type
)) {
238 IOAddressRange cur
= r
.v64
[ind
];
242 #endif /* !__LP64__ */
244 IOVirtualRange cur
= r
.v
[ind
];
250 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
253 purgeableControlBits(IOOptionBits newState
, vm_purgable_t
* control
, int * state
)
255 IOReturn err
= kIOReturnSuccess
;
257 *control
= VM_PURGABLE_SET_STATE
;
259 enum { kIOMemoryPurgeableControlMask
= 15 };
261 switch (kIOMemoryPurgeableControlMask
& newState
)
263 case kIOMemoryPurgeableKeepCurrent
:
264 *control
= VM_PURGABLE_GET_STATE
;
267 case kIOMemoryPurgeableNonVolatile
:
268 *state
= VM_PURGABLE_NONVOLATILE
;
270 case kIOMemoryPurgeableVolatile
:
271 *state
= VM_PURGABLE_VOLATILE
| (newState
& ~kIOMemoryPurgeableControlMask
);
273 case kIOMemoryPurgeableEmpty
:
274 *state
= VM_PURGABLE_EMPTY
;
277 err
= kIOReturnBadArgument
;
284 purgeableStateBits(int * state
)
286 IOReturn err
= kIOReturnSuccess
;
288 switch (VM_PURGABLE_STATE_MASK
& *state
)
290 case VM_PURGABLE_NONVOLATILE
:
291 *state
= kIOMemoryPurgeableNonVolatile
;
293 case VM_PURGABLE_VOLATILE
:
294 *state
= kIOMemoryPurgeableVolatile
;
296 case VM_PURGABLE_EMPTY
:
297 *state
= kIOMemoryPurgeableEmpty
;
300 *state
= kIOMemoryPurgeableNonVolatile
;
301 err
= kIOReturnNotReady
;
309 vmProtForCacheMode(IOOptionBits cacheMode
)
314 case kIOInhibitCache
:
315 SET_MAP_MEM(MAP_MEM_IO
, prot
);
318 case kIOWriteThruCache
:
319 SET_MAP_MEM(MAP_MEM_WTHRU
, prot
);
322 case kIOWriteCombineCache
:
323 SET_MAP_MEM(MAP_MEM_WCOMB
, prot
);
326 case kIOCopybackCache
:
327 SET_MAP_MEM(MAP_MEM_COPYBACK
, prot
);
330 case kIOCopybackInnerCache
:
331 SET_MAP_MEM(MAP_MEM_INNERWBACK
, prot
);
334 case kIODefaultCache
:
336 SET_MAP_MEM(MAP_MEM_NOOP
, prot
);
344 pagerFlagsForCacheMode(IOOptionBits cacheMode
)
346 unsigned int pagerFlags
= 0;
349 case kIOInhibitCache
:
350 pagerFlags
= DEVICE_PAGER_CACHE_INHIB
| DEVICE_PAGER_COHERENT
| DEVICE_PAGER_GUARDED
;
353 case kIOWriteThruCache
:
354 pagerFlags
= DEVICE_PAGER_WRITE_THROUGH
| DEVICE_PAGER_COHERENT
| DEVICE_PAGER_GUARDED
;
357 case kIOWriteCombineCache
:
358 pagerFlags
= DEVICE_PAGER_CACHE_INHIB
| DEVICE_PAGER_COHERENT
;
361 case kIOCopybackCache
:
362 pagerFlags
= DEVICE_PAGER_COHERENT
;
365 case kIOCopybackInnerCache
:
366 pagerFlags
= DEVICE_PAGER_COHERENT
;
369 case kIODefaultCache
:
377 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
378 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
387 struct IOMemoryReference
389 volatile SInt32 refCount
;
393 IOMemoryEntry entries
[0];
398 kIOMemoryReferenceReuse
= 0x00000001,
399 kIOMemoryReferenceWrite
= 0x00000002,
402 SInt32 gIOMemoryReferenceCount
;
405 IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity
, IOMemoryReference
* realloc
)
407 IOMemoryReference
* ref
;
408 size_t newSize
, oldSize
, copySize
;
410 newSize
= (sizeof(IOMemoryReference
)
411 - sizeof(ref
->entries
)
412 + capacity
* sizeof(ref
->entries
[0]));
413 ref
= (typeof(ref
)) IOMalloc(newSize
);
416 oldSize
= (sizeof(IOMemoryReference
)
417 - sizeof(realloc
->entries
)
418 + realloc
->capacity
* sizeof(realloc
->entries
[0]));
420 if (copySize
> newSize
) copySize
= newSize
;
421 if (ref
) bcopy(realloc
, ref
, copySize
);
422 IOFree(realloc
, oldSize
);
426 bzero(ref
, sizeof(*ref
));
428 OSIncrementAtomic(&gIOMemoryReferenceCount
);
430 if (!ref
) return (0);
431 ref
->capacity
= capacity
;
436 IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference
* ref
)
438 IOMemoryEntry
* entries
;
441 entries
= ref
->entries
+ ref
->count
;
442 while (entries
> &ref
->entries
[0])
445 ipc_port_release_send(entries
->entry
);
447 size
= (sizeof(IOMemoryReference
)
448 - sizeof(ref
->entries
)
449 + ref
->capacity
* sizeof(ref
->entries
[0]));
452 OSDecrementAtomic(&gIOMemoryReferenceCount
);
456 IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference
* ref
)
458 if (1 == OSDecrementAtomic(&ref
->refCount
)) memoryReferenceFree(ref
);
463 IOGeneralMemoryDescriptor::memoryReferenceCreate(
464 IOOptionBits options
,
465 IOMemoryReference
** reference
)
467 enum { kCapacity
= 4, kCapacityInc
= 4 };
470 IOMemoryReference
* ref
;
471 IOMemoryEntry
* entries
;
472 IOMemoryEntry
* cloneEntries
;
474 ipc_port_t entry
, cloneEntry
;
476 memory_object_size_t actualSize
;
479 mach_vm_address_t entryAddr
, endAddr
, entrySize
;
480 mach_vm_size_t srcAddr
, srcLen
;
481 mach_vm_size_t nextAddr
, nextLen
;
482 mach_vm_size_t offset
, remain
;
484 IOOptionBits type
= (_flags
& kIOMemoryTypeMask
);
485 IOOptionBits cacheMode
;
486 unsigned int pagerFlags
;
488 ref
= memoryReferenceAlloc(kCapacity
, NULL
);
489 if (!ref
) return (kIOReturnNoMemory
);
490 entries
= &ref
->entries
[0];
495 if (_task
) getAddrLenForInd(nextAddr
, nextLen
, type
, _ranges
, rangeIdx
);
498 nextAddr
= getPhysicalSegment(offset
, &physLen
, kIOMemoryMapperNone
);
500 // default cache mode for physical
501 if (kIODefaultCache
== ((_flags
& kIOMemoryBufferCacheMask
) >> kIOMemoryBufferCacheShift
))
504 pagerFlags
= IODefaultCacheBits(nextAddr
);
505 if (DEVICE_PAGER_CACHE_INHIB
& pagerFlags
)
507 if (DEVICE_PAGER_GUARDED
& pagerFlags
)
508 mode
= kIOInhibitCache
;
510 mode
= kIOWriteCombineCache
;
512 else if (DEVICE_PAGER_WRITE_THROUGH
& pagerFlags
)
513 mode
= kIOWriteThruCache
;
515 mode
= kIOCopybackCache
;
516 _flags
|= (mode
<< kIOMemoryBufferCacheShift
);
520 // cache mode & vm_prot
522 cacheMode
= ((_flags
& kIOMemoryBufferCacheMask
) >> kIOMemoryBufferCacheShift
);
523 prot
|= vmProtForCacheMode(cacheMode
);
524 // VM system requires write access to change cache mode
525 if (kIODefaultCache
!= cacheMode
) prot
|= VM_PROT_WRITE
;
526 if (kIODirectionOut
!= (kIODirectionOutIn
& _flags
)) prot
|= VM_PROT_WRITE
;
527 if (kIOMemoryReferenceWrite
& options
) prot
|= VM_PROT_WRITE
;
529 if ((kIOMemoryReferenceReuse
& options
) && _memRef
)
531 cloneEntries
= &_memRef
->entries
[0];
532 prot
|= MAP_MEM_NAMED_REUSE
;
539 if (kIOMemoryBufferPageable
& _flags
)
541 // IOBufferMemoryDescriptor alloc - set flags for entry + object create
542 prot
|= MAP_MEM_NAMED_CREATE
;
543 if (kIOMemoryBufferPurgeable
& _flags
) prot
|= MAP_MEM_PURGABLE
;
544 prot
|= VM_PROT_WRITE
;
547 else map
= get_task_map(_task
);
556 // coalesce addr range
557 for (++rangeIdx
; rangeIdx
< _rangesCount
; rangeIdx
++)
559 getAddrLenForInd(nextAddr
, nextLen
, type
, _ranges
, rangeIdx
);
560 if ((srcAddr
+ srcLen
) != nextAddr
) break;
563 entryAddr
= trunc_page_64(srcAddr
);
564 endAddr
= round_page_64(srcAddr
+ srcLen
);
567 entrySize
= (endAddr
- entryAddr
);
568 if (!entrySize
) break;
569 actualSize
= entrySize
;
571 cloneEntry
= MACH_PORT_NULL
;
572 if (MAP_MEM_NAMED_REUSE
& prot
)
574 if (cloneEntries
< &_memRef
->entries
[_memRef
->count
]) cloneEntry
= cloneEntries
->entry
;
575 else prot
&= ~MAP_MEM_NAMED_REUSE
;
578 err
= mach_make_memory_entry_64(map
,
579 &actualSize
, entryAddr
, prot
, &entry
, cloneEntry
);
581 if (KERN_SUCCESS
!= err
) break;
582 if (actualSize
> entrySize
) panic("mach_make_memory_entry_64 actualSize");
584 if (count
>= ref
->capacity
)
586 ref
= memoryReferenceAlloc(ref
->capacity
+ kCapacityInc
, ref
);
587 entries
= &ref
->entries
[count
];
589 entries
->entry
= entry
;
590 entries
->size
= actualSize
;
591 entries
->offset
= offset
+ (entryAddr
- srcAddr
);
592 entryAddr
+= actualSize
;
593 if (MAP_MEM_NAMED_REUSE
& prot
)
595 if ((cloneEntries
->entry
== entries
->entry
)
596 && (cloneEntries
->size
== entries
->size
)
597 && (cloneEntries
->offset
== entries
->offset
)) cloneEntries
++;
598 else prot
&= ~MAP_MEM_NAMED_REUSE
;
610 // _task == 0, physical
611 memory_object_t pager
;
612 vm_size_t size
= ptoa_32(_pages
);
614 if (!getKernelReserved()) panic("getKernelReserved");
616 reserved
->dp
.pagerContig
= (1 == _rangesCount
);
617 reserved
->dp
.memory
= this;
619 pagerFlags
= pagerFlagsForCacheMode(cacheMode
);
620 if (-1U == pagerFlags
) panic("phys is kIODefaultCache");
621 if (reserved
->dp
.pagerContig
) pagerFlags
|= DEVICE_PAGER_CONTIGUOUS
;
623 pager
= device_pager_setup((memory_object_t
) 0, (uintptr_t) reserved
,
626 if (!pager
) err
= kIOReturnVMError
;
630 entryAddr
= trunc_page_64(srcAddr
);
631 err
= mach_memory_object_memory_entry_64((host_t
) 1, false /*internal*/,
632 size
, VM_PROT_READ
| VM_PROT_WRITE
, pager
, &entry
);
633 assert (KERN_SUCCESS
== err
);
634 if (KERN_SUCCESS
!= err
) device_pager_deallocate(pager
);
637 reserved
->dp
.devicePager
= pager
;
638 entries
->entry
= entry
;
639 entries
->size
= size
;
640 entries
->offset
= offset
+ (entryAddr
- srcAddr
);
650 if (KERN_SUCCESS
== err
)
652 if (MAP_MEM_NAMED_REUSE
& prot
)
654 memoryReferenceFree(ref
);
655 OSIncrementAtomic(&_memRef
->refCount
);
661 memoryReferenceFree(ref
);
670 struct IOMemoryDescriptorMapAllocRef
673 mach_vm_address_t mapped
;
676 IOOptionBits options
;
680 IOMemoryDescriptorMapAlloc(vm_map_t map
, void * _ref
)
682 IOMemoryDescriptorMapAllocRef
* ref
= (typeof(ref
))_ref
;
684 vm_map_offset_t addr
;
687 err
= vm_map_enter_mem_object(map
, &addr
, ref
->size
,
689 (((ref
->options
& kIOMapAnywhere
)
692 | VM_MAKE_TAG(VM_MEMORY_IOKIT
)
693 | VM_FLAGS_IOKIT_ACCT
), /* iokit accounting */
695 (memory_object_offset_t
) 0,
700 if (KERN_SUCCESS
== err
)
702 ref
->mapped
= (mach_vm_address_t
) addr
;
710 IOGeneralMemoryDescriptor::memoryReferenceMap(
711 IOMemoryReference
* ref
,
713 mach_vm_size_t inoffset
,
715 IOOptionBits options
,
716 mach_vm_address_t
* inaddr
)
719 int64_t offset
= inoffset
;
720 uint32_t rangeIdx
, entryIdx
;
721 vm_map_offset_t addr
, mapAddr
;
722 vm_map_offset_t pageOffset
, entryOffset
, remain
, chunk
;
724 mach_vm_address_t srcAddr
, nextAddr
;
725 mach_vm_size_t srcLen
, nextLen
;
727 IOMemoryEntry
* entry
;
728 vm_prot_t prot
, memEntryCacheMode
;
730 IOOptionBits cacheMode
;
733 * For the kIOMapPrefault option.
735 upl_page_info_t
*pageList
= NULL
;
736 UInt currentPageIndex
= 0;
738 type
= _flags
& kIOMemoryTypeMask
;
740 if (!(kIOMapReadOnly
& options
)) prot
|= VM_PROT_WRITE
;
743 cacheMode
= ((options
& kIOMapCacheMask
) >> kIOMapCacheShift
);
744 if (kIODefaultCache
!= cacheMode
)
746 // VM system requires write access to change cache mode
747 prot
|= VM_PROT_WRITE
;
748 // update named entries cache mode
749 memEntryCacheMode
= (MAP_MEM_ONLY
| prot
| vmProtForCacheMode(cacheMode
));
754 // Find first range for offset
755 for (remain
= offset
, rangeIdx
= 0; rangeIdx
< _rangesCount
; rangeIdx
++)
757 getAddrLenForInd(nextAddr
, nextLen
, type
, _ranges
, rangeIdx
);
758 if (remain
< nextLen
) break;
766 nextAddr
= getPhysicalSegment(offset
, &physLen
, kIOMemoryMapperNone
);
770 assert(remain
< nextLen
);
771 if (remain
>= nextLen
) return (kIOReturnBadArgument
);
775 pageOffset
= (page_mask
& nextAddr
);
777 if (!(options
& kIOMapAnywhere
))
780 if (pageOffset
!= (page_mask
& addr
)) return (kIOReturnNotAligned
);
784 // find first entry for offset
786 (entryIdx
< ref
->count
) && (offset
>= ref
->entries
[entryIdx
].offset
);
789 entry
= &ref
->entries
[entryIdx
];
792 size
= round_page_64(size
+ pageOffset
);
794 IOMemoryDescriptorMapAllocRef ref
;
796 ref
.options
= options
;
799 if (options
& kIOMapAnywhere
)
800 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
805 if ((ref
.map
== kernel_map
) && (kIOMemoryBufferPageable
& _flags
))
806 err
= IOIteratePageableMaps( ref
.size
, &IOMemoryDescriptorMapAlloc
, &ref
);
808 err
= IOMemoryDescriptorMapAlloc(ref
.map
, &ref
);
809 if (KERN_SUCCESS
== err
)
817 * Prefaulting is only possible if we wired the memory earlier. Check the
818 * memory type, and the underlying data.
820 if (options
& kIOMapPrefault
) {
822 * The memory must have been wired by calling ::prepare(), otherwise
823 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
825 assert(map
!= kernel_map
);
826 assert(_wireCount
!= 0);
827 assert(_memoryEntries
!= NULL
);
828 if ((map
== kernel_map
) ||
830 (_memoryEntries
== NULL
))
832 return kIOReturnBadArgument
;
835 // Get the page list.
836 ioGMDData
* dataP
= getDataP(_memoryEntries
);
837 ioPLBlock
const* ioplList
= getIOPLList(dataP
);
838 pageList
= getPageList(dataP
);
840 // Get the number of IOPLs.
841 UInt numIOPLs
= getNumIOPL(_memoryEntries
, dataP
);
844 * Scan through the IOPL Info Blocks, looking for the first block containing
845 * the offset. The research will go past it, so we'll need to go back to the
846 * right range at the end.
849 while (ioplIndex
< numIOPLs
&& offset
>= ioplList
[ioplIndex
].fIOMDOffset
)
853 // Retrieve the IOPL info block.
854 ioPLBlock ioplInfo
= ioplList
[ioplIndex
];
857 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
860 if (ioplInfo
.fFlags
& kIOPLExternUPL
)
861 pageList
= (upl_page_info_t
*) ioplInfo
.fPageInfo
;
863 pageList
= &pageList
[ioplInfo
.fPageInfo
];
865 // Rebase [offset] into the IOPL in order to looks for the first page index.
866 mach_vm_size_t offsetInIOPL
= offset
- ioplInfo
.fIOMDOffset
+ ioplInfo
.fPageOffset
;
868 // Retrieve the index of the first page corresponding to the offset.
869 currentPageIndex
= atop_32(offsetInIOPL
);
876 while (remain
&& nextLen
&& (KERN_SUCCESS
== err
))
882 // coalesce addr range
883 for (++rangeIdx
; rangeIdx
< _rangesCount
; rangeIdx
++)
885 getAddrLenForInd(nextAddr
, nextLen
, type
, _ranges
, rangeIdx
);
886 if ((srcAddr
+ srcLen
) != nextAddr
) break;
890 while (srcLen
&& (KERN_SUCCESS
== err
))
892 entryOffset
= offset
- entry
->offset
;
893 if ((page_mask
& entryOffset
) != pageOffset
)
895 err
= kIOReturnNotAligned
;
899 if (kIODefaultCache
!= cacheMode
)
901 vm_size_t unused
= 0;
902 err
= mach_make_memory_entry(NULL
/*unused*/, &unused
, 0 /*unused*/,
903 memEntryCacheMode
, NULL
, entry
->entry
);
904 assert (KERN_SUCCESS
== err
);
907 entryOffset
-= pageOffset
;
908 if (entryOffset
>= entry
->size
) panic("entryOffset");
909 chunk
= entry
->size
- entryOffset
;
912 if (chunk
> remain
) chunk
= remain
;
914 if (options
& kIOMapPrefault
) {
915 UInt nb_pages
= round_page(chunk
) / PAGE_SIZE
;
916 err
= vm_map_enter_mem_object_prefault(map
,
921 | VM_MAKE_TAG(VM_MEMORY_IOKIT
)
922 | VM_FLAGS_IOKIT_ACCT
), /* iokit accounting */
927 &pageList
[currentPageIndex
],
930 // Compute the next index in the page list.
931 currentPageIndex
+= nb_pages
;
932 assert(currentPageIndex
<= _pages
);
934 err
= vm_map_enter_mem_object(map
,
939 | VM_MAKE_TAG(VM_MEMORY_IOKIT
)
940 | VM_FLAGS_IOKIT_ACCT
), /* iokit accounting */
949 if (KERN_SUCCESS
!= err
) break;
953 offset
+= chunk
- pageOffset
;
958 if (entryIdx
>= ref
->count
)
960 err
= kIOReturnOverrun
;
966 if ((KERN_SUCCESS
!= err
) && addr
)
968 (void) mach_vm_deallocate(map
, trunc_page_64(addr
), size
);
977 IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
978 IOMemoryReference
* ref
,
979 IOByteCount
* residentPageCount
,
980 IOByteCount
* dirtyPageCount
)
983 IOMemoryEntry
* entries
;
984 unsigned int resident
, dirty
;
985 unsigned int totalResident
, totalDirty
;
987 totalResident
= totalDirty
= 0;
988 entries
= ref
->entries
+ ref
->count
;
989 while (entries
> &ref
->entries
[0])
992 err
= mach_memory_entry_get_page_counts(entries
->entry
, &resident
, &dirty
);
993 if (KERN_SUCCESS
!= err
) break;
994 totalResident
+= resident
;
998 if (residentPageCount
) *residentPageCount
= totalResident
;
999 if (dirtyPageCount
) *dirtyPageCount
= totalDirty
;
1004 IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
1005 IOMemoryReference
* ref
,
1006 IOOptionBits newState
,
1007 IOOptionBits
* oldState
)
1010 IOMemoryEntry
* entries
;
1011 vm_purgable_t control
;
1012 int totalState
, state
;
1014 entries
= ref
->entries
+ ref
->count
;
1015 totalState
= kIOMemoryPurgeableNonVolatile
;
1016 while (entries
> &ref
->entries
[0])
1020 err
= purgeableControlBits(newState
, &control
, &state
);
1021 if (KERN_SUCCESS
!= err
) break;
1022 err
= mach_memory_entry_purgable_control(entries
->entry
, control
, &state
);
1023 if (KERN_SUCCESS
!= err
) break;
1024 err
= purgeableStateBits(&state
);
1025 if (KERN_SUCCESS
!= err
) break;
1027 if (kIOMemoryPurgeableEmpty
== state
) totalState
= kIOMemoryPurgeableEmpty
;
1028 else if (kIOMemoryPurgeableEmpty
== totalState
) continue;
1029 else if (kIOMemoryPurgeableVolatile
== totalState
) continue;
1030 else if (kIOMemoryPurgeableVolatile
== state
) totalState
= kIOMemoryPurgeableVolatile
;
1031 else totalState
= kIOMemoryPurgeableNonVolatile
;
1034 if (oldState
) *oldState
= totalState
;
1038 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1040 IOMemoryDescriptor
*
1041 IOMemoryDescriptor::withAddress(void * address
,
1043 IODirection direction
)
1045 return IOMemoryDescriptor::
1046 withAddressRange((IOVirtualAddress
) address
, length
, direction
| kIOMemoryAutoPrepare
, kernel_task
);
1050 IOMemoryDescriptor
*
1051 IOMemoryDescriptor::withAddress(IOVirtualAddress address
,
1053 IODirection direction
,
1056 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
1059 if (that
->initWithAddress(address
, length
, direction
, task
))
1066 #endif /* !__LP64__ */
1068 IOMemoryDescriptor
*
1069 IOMemoryDescriptor::withPhysicalAddress(
1070 IOPhysicalAddress address
,
1072 IODirection direction
)
1074 return (IOMemoryDescriptor::withAddressRange(address
, length
, direction
, TASK_NULL
));
1078 IOMemoryDescriptor
*
1079 IOMemoryDescriptor::withRanges( IOVirtualRange
* ranges
,
1081 IODirection direction
,
1085 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
1088 if (that
->initWithRanges(ranges
, withCount
, direction
, task
, asReference
))
1095 #endif /* !__LP64__ */
1097 IOMemoryDescriptor
*
1098 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address
,
1099 mach_vm_size_t length
,
1100 IOOptionBits options
,
1103 IOAddressRange range
= { address
, length
};
1104 return (IOMemoryDescriptor::withAddressRanges(&range
, 1, options
, task
));
1107 IOMemoryDescriptor
*
1108 IOMemoryDescriptor::withAddressRanges(IOAddressRange
* ranges
,
1110 IOOptionBits options
,
1113 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
1117 options
|= kIOMemoryTypeVirtual64
;
1119 options
|= kIOMemoryTypePhysical64
;
1121 if (that
->initWithOptions(ranges
, rangeCount
, 0, task
, options
, /* mapper */ 0))
1134 * Create a new IOMemoryDescriptor. The buffer is made up of several
1135 * virtual address ranges, from a given task.
1137 * Passing the ranges as a reference will avoid an extra allocation.
1139 IOMemoryDescriptor
*
1140 IOMemoryDescriptor::withOptions(void * buffers
,
1147 IOGeneralMemoryDescriptor
*self
= new IOGeneralMemoryDescriptor
;
1150 && !self
->initWithOptions(buffers
, count
, offset
, task
, opts
, mapper
))
1159 bool IOMemoryDescriptor::initWithOptions(void * buffers
,
1163 IOOptionBits options
,
1170 IOMemoryDescriptor
*
1171 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange
* ranges
,
1173 IODirection direction
,
1176 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
1179 if (that
->initWithPhysicalRanges(ranges
, withCount
, direction
, asReference
))
1187 IOMemoryDescriptor
*
1188 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor
* of
,
1191 IODirection direction
)
1193 return (IOSubMemoryDescriptor::withSubRange(of
, offset
, length
, direction
| kIOMemoryThreadSafe
));
1195 #endif /* !__LP64__ */
1197 IOMemoryDescriptor
*
1198 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor
*originalMD
)
1200 IOGeneralMemoryDescriptor
*origGenMD
=
1201 OSDynamicCast(IOGeneralMemoryDescriptor
, originalMD
);
1204 return IOGeneralMemoryDescriptor::
1205 withPersistentMemoryDescriptor(origGenMD
);
1210 IOMemoryDescriptor
*
1211 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor
*originalMD
)
1213 IOMemoryReference
* memRef
;
1215 if (kIOReturnSuccess
!= originalMD
->memoryReferenceCreate(kIOMemoryReferenceReuse
, &memRef
)) return (0);
1217 if (memRef
== originalMD
->_memRef
)
1219 originalMD
->retain(); // Add a new reference to ourselves
1220 originalMD
->memoryReferenceRelease(memRef
);
1224 IOGeneralMemoryDescriptor
* self
= new IOGeneralMemoryDescriptor
;
1225 IOMDPersistentInitData initData
= { originalMD
, memRef
};
1228 && !self
->initWithOptions(&initData
, 1, 0, 0, kIOMemoryTypePersistentMD
, 0)) {
1237 IOGeneralMemoryDescriptor::initWithAddress(void * address
,
1238 IOByteCount withLength
,
1239 IODirection withDirection
)
1241 _singleRange
.v
.address
= (vm_offset_t
) address
;
1242 _singleRange
.v
.length
= withLength
;
1244 return initWithRanges(&_singleRange
.v
, 1, withDirection
, kernel_task
, true);
1248 IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address
,
1249 IOByteCount withLength
,
1250 IODirection withDirection
,
1253 _singleRange
.v
.address
= address
;
1254 _singleRange
.v
.length
= withLength
;
1256 return initWithRanges(&_singleRange
.v
, 1, withDirection
, withTask
, true);
1260 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
1261 IOPhysicalAddress address
,
1262 IOByteCount withLength
,
1263 IODirection withDirection
)
1265 _singleRange
.p
.address
= address
;
1266 _singleRange
.p
.length
= withLength
;
1268 return initWithPhysicalRanges( &_singleRange
.p
, 1, withDirection
, true);
1272 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
1273 IOPhysicalRange
* ranges
,
1275 IODirection direction
,
1278 IOOptionBits mdOpts
= direction
| kIOMemoryTypePhysical
;
1281 mdOpts
|= kIOMemoryAsReference
;
1283 return initWithOptions(ranges
, count
, 0, 0, mdOpts
, /* mapper */ 0);
1287 IOGeneralMemoryDescriptor::initWithRanges(
1288 IOVirtualRange
* ranges
,
1290 IODirection direction
,
1294 IOOptionBits mdOpts
= direction
;
1297 mdOpts
|= kIOMemoryAsReference
;
1300 mdOpts
|= kIOMemoryTypeVirtual
;
1302 // Auto-prepare if this is a kernel memory descriptor as very few
1303 // clients bother to prepare() kernel memory.
1304 // But it was not enforced so what are you going to do?
1305 if (task
== kernel_task
)
1306 mdOpts
|= kIOMemoryAutoPrepare
;
1309 mdOpts
|= kIOMemoryTypePhysical
;
1311 return initWithOptions(ranges
, count
, 0, task
, mdOpts
, /* mapper */ 0);
1313 #endif /* !__LP64__ */
1318 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
1319 * from a given task, several physical ranges, an UPL from the ubc
1320 * system or a uio (may be 64bit) from the BSD subsystem.
1322 * Passing the ranges as a reference will avoid an extra allocation.
1324 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
1325 * existing instance -- note this behavior is not commonly supported in other
1326 * I/O Kit classes, although it is supported here.
1330 IOGeneralMemoryDescriptor::initWithOptions(void * buffers
,
1334 IOOptionBits options
,
1337 IOOptionBits type
= options
& kIOMemoryTypeMask
;
1341 && (kIOMemoryTypeVirtual
== type
)
1342 && vm_map_is_64bit(get_task_map(task
))
1343 && ((IOVirtualRange
*) buffers
)->address
)
1345 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
1348 #endif /* !__LP64__ */
1350 // Grab the original MD's configuation data to initialse the
1351 // arguments to this function.
1352 if (kIOMemoryTypePersistentMD
== type
) {
1354 IOMDPersistentInitData
*initData
= (typeof(initData
)) buffers
;
1355 const IOGeneralMemoryDescriptor
*orig
= initData
->fMD
;
1356 ioGMDData
*dataP
= getDataP(orig
->_memoryEntries
);
1358 // Only accept persistent memory descriptors with valid dataP data.
1359 assert(orig
->_rangesCount
== 1);
1360 if ( !(orig
->_flags
& kIOMemoryPersistent
) || !dataP
)
1363 _memRef
= initData
->fMemRef
; // Grab the new named entry
1364 options
= orig
->_flags
& ~kIOMemoryAsReference
;
1365 type
= options
& kIOMemoryTypeMask
;
1366 buffers
= orig
->_ranges
.v
;
1367 count
= orig
->_rangesCount
;
1369 // Now grab the original task and whatever mapper was previously used
1371 mapper
= dataP
->fMapper
;
1373 // We are ready to go through the original initialisation now
1377 case kIOMemoryTypeUIO
:
1378 case kIOMemoryTypeVirtual
:
1380 case kIOMemoryTypeVirtual64
:
1381 #endif /* !__LP64__ */
1387 case kIOMemoryTypePhysical
: // Neither Physical nor UPL should have a task
1389 case kIOMemoryTypePhysical64
:
1390 #endif /* !__LP64__ */
1391 case kIOMemoryTypeUPL
:
1395 return false; /* bad argument */
1402 * We can check the _initialized instance variable before having ever set
1403 * it to an initial value because I/O Kit guarantees that all our instance
1404 * variables are zeroed on an object's allocation.
1409 * An existing memory descriptor is being retargeted to point to
1410 * somewhere else. Clean up our present state.
1412 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
1413 if ((kIOMemoryTypePhysical
!= type
) && (kIOMemoryTypePhysical64
!= type
))
1418 if (_ranges
.v
&& !(kIOMemoryAsReference
& _flags
))
1420 if (kIOMemoryTypeUIO
== type
)
1421 uio_free((uio_t
) _ranges
.v
);
1423 else if ((kIOMemoryTypeVirtual64
== type
) || (kIOMemoryTypePhysical64
== type
))
1424 IODelete(_ranges
.v64
, IOAddressRange
, _rangesCount
);
1425 #endif /* !__LP64__ */
1427 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
1430 options
|= (kIOMemoryRedirected
& _flags
);
1431 if (!(kIOMemoryRedirected
& options
))
1435 memoryReferenceRelease(_memRef
);
1439 _mappings
->flushCollection();
1445 _initialized
= true;
1448 // Grab the appropriate mapper
1449 if (kIOMemoryHostOnly
& options
) options
|= kIOMemoryMapperNone
;
1450 if (kIOMemoryMapperNone
& options
)
1451 mapper
= 0; // No Mapper
1452 else if (mapper
== kIOMapperSystem
) {
1453 IOMapper::checkForSystemMapper();
1454 gIOSystemMapper
= mapper
= IOMapper::gSystem
;
1457 // Temp binary compatibility for kIOMemoryThreadSafe
1458 if (kIOMemoryReserved6156215
& options
)
1460 options
&= ~kIOMemoryReserved6156215
;
1461 options
|= kIOMemoryThreadSafe
;
1463 // Remove the dynamic internal use flags from the initial setting
1464 options
&= ~(kIOMemoryPreparedReadOnly
);
1469 _direction
= (IODirection
) (_flags
& kIOMemoryDirectionMask
);
1470 #endif /* !__LP64__ */
1472 __iomd_reservedA
= 0;
1473 __iomd_reservedB
= 0;
1476 if (kIOMemoryThreadSafe
& options
)
1479 _prepareLock
= IOLockAlloc();
1481 else if (_prepareLock
)
1483 IOLockFree(_prepareLock
);
1484 _prepareLock
= NULL
;
1487 if (kIOMemoryTypeUPL
== type
) {
1490 unsigned int dataSize
= computeDataSize(/* pages */ 0, /* upls */ 1);
1492 if (!initMemoryEntries(dataSize
, mapper
)) return (false);
1493 dataP
= getDataP(_memoryEntries
);
1494 dataP
->fPageCnt
= 0;
1496 // _wireCount++; // UPLs start out life wired
1499 _pages
+= atop_32(offset
+ count
+ PAGE_MASK
) - atop_32(offset
);
1502 iopl
.fIOPL
= (upl_t
) buffers
;
1503 upl_set_referenced(iopl
.fIOPL
, true);
1504 upl_page_info_t
*pageList
= UPL_GET_INTERNAL_PAGE_LIST(iopl
.fIOPL
);
1506 if (upl_get_size(iopl
.fIOPL
) < (count
+ offset
))
1507 panic("short external upl");
1509 _highestPage
= upl_get_highest_page(iopl
.fIOPL
);
1511 // Set the flag kIOPLOnDevice convieniently equal to 1
1512 iopl
.fFlags
= pageList
->device
| kIOPLExternUPL
;
1513 if (!pageList
->device
) {
1514 // Pre-compute the offset into the UPL's page list
1515 pageList
= &pageList
[atop_32(offset
)];
1516 offset
&= PAGE_MASK
;
1518 iopl
.fIOMDOffset
= 0;
1519 iopl
.fMappedPage
= 0;
1520 iopl
.fPageInfo
= (vm_address_t
) pageList
;
1521 iopl
.fPageOffset
= offset
;
1522 _memoryEntries
->appendBytes(&iopl
, sizeof(iopl
));
1525 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
1526 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
1528 // Initialize the memory descriptor
1529 if (options
& kIOMemoryAsReference
) {
1531 _rangesIsAllocated
= false;
1532 #endif /* !__LP64__ */
1534 // Hack assignment to get the buffer arg into _ranges.
1535 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
1537 // This also initialises the uio & physical ranges.
1538 _ranges
.v
= (IOVirtualRange
*) buffers
;
1542 _rangesIsAllocated
= true;
1543 #endif /* !__LP64__ */
1546 case kIOMemoryTypeUIO
:
1547 _ranges
.v
= (IOVirtualRange
*) uio_duplicate((uio_t
) buffers
);
1551 case kIOMemoryTypeVirtual64
:
1552 case kIOMemoryTypePhysical64
:
1554 && (((IOAddressRange
*) buffers
)->address
+ ((IOAddressRange
*) buffers
)->length
) <= 0x100000000ULL
1556 if (kIOMemoryTypeVirtual64
== type
)
1557 type
= kIOMemoryTypeVirtual
;
1559 type
= kIOMemoryTypePhysical
;
1560 _flags
= (_flags
& ~kIOMemoryTypeMask
) | type
| kIOMemoryAsReference
;
1561 _rangesIsAllocated
= false;
1562 _ranges
.v
= &_singleRange
.v
;
1563 _singleRange
.v
.address
= ((IOAddressRange
*) buffers
)->address
;
1564 _singleRange
.v
.length
= ((IOAddressRange
*) buffers
)->length
;
1567 _ranges
.v64
= IONew(IOAddressRange
, count
);
1570 bcopy(buffers
, _ranges
.v
, count
* sizeof(IOAddressRange
));
1572 #endif /* !__LP64__ */
1573 case kIOMemoryTypeVirtual
:
1574 case kIOMemoryTypePhysical
:
1576 _flags
|= kIOMemoryAsReference
;
1578 _rangesIsAllocated
= false;
1579 #endif /* !__LP64__ */
1580 _ranges
.v
= &_singleRange
.v
;
1582 _ranges
.v
= IONew(IOVirtualRange
, count
);
1586 bcopy(buffers
, _ranges
.v
, count
* sizeof(IOVirtualRange
));
1591 // Find starting address within the vector of ranges
1592 Ranges vec
= _ranges
;
1595 for (unsigned ind
= 0; ind
< count
; ind
++) {
1596 mach_vm_address_t addr
;
1599 // addr & len are returned by this function
1600 getAddrLenForInd(addr
, len
, type
, vec
, ind
);
1601 pages
+= (atop_64(addr
+ len
+ PAGE_MASK
) - atop_64(addr
));
1603 assert(len
>= length
); // Check for 32 bit wrap around
1606 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
))
1608 ppnum_t highPage
= atop_64(addr
+ len
- 1);
1609 if (highPage
> _highestPage
)
1610 _highestPage
= highPage
;
1615 _rangesCount
= count
;
1617 // Auto-prepare memory at creation time.
1618 // Implied completion when descriptor is free-ed
1619 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
))
1620 _wireCount
++; // Physical MDs are, by definition, wired
1621 else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
1623 unsigned dataSize
= computeDataSize(_pages
, /* upls */ count
* 2);
1625 if (!initMemoryEntries(dataSize
, mapper
)) return false;
1626 dataP
= getDataP(_memoryEntries
);
1627 dataP
->fPageCnt
= _pages
;
1629 if ( (kIOMemoryPersistent
& _flags
) && !_memRef
)
1632 err
= memoryReferenceCreate(0, &_memRef
);
1633 if (kIOReturnSuccess
!= err
) return false;
1636 if ((_flags
& kIOMemoryAutoPrepare
)
1637 && prepare() != kIOReturnSuccess
)
1650 void IOGeneralMemoryDescriptor::free()
1652 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
1657 reserved
->dp
.memory
= 0;
1660 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
))
1663 if (_memoryEntries
&& (dataP
= getDataP(_memoryEntries
)) && dataP
->fMappedBase
)
1665 dataP
->fMapper
->iovmFree(atop_64(dataP
->fMappedBase
), _pages
);
1666 dataP
->fMappedBase
= 0;
1671 while (_wireCount
) complete();
1674 if (_memoryEntries
) _memoryEntries
->release();
1676 if (_ranges
.v
&& !(kIOMemoryAsReference
& _flags
))
1678 if (kIOMemoryTypeUIO
== type
)
1679 uio_free((uio_t
) _ranges
.v
);
1681 else if ((kIOMemoryTypeVirtual64
== type
) || (kIOMemoryTypePhysical64
== type
))
1682 IODelete(_ranges
.v64
, IOAddressRange
, _rangesCount
);
1683 #endif /* !__LP64__ */
1685 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
1692 if (reserved
->dp
.devicePager
)
1694 // memEntry holds a ref on the device pager which owns reserved
1695 // (IOMemoryDescriptorReserved) so no reserved access after this point
1696 device_pager_deallocate( (memory_object_t
) reserved
->dp
.devicePager
);
1699 IODelete(reserved
, IOMemoryDescriptorReserved
, 1);
1703 if (_memRef
) memoryReferenceRelease(_memRef
);
1704 if (_prepareLock
) IOLockFree(_prepareLock
);
1710 void IOGeneralMemoryDescriptor::unmapFromKernel()
1712 panic("IOGMD::unmapFromKernel deprecated");
1715 void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex
)
1717 panic("IOGMD::mapIntoKernel deprecated");
1719 #endif /* !__LP64__ */
1724 * Get the direction of the transfer.
1726 IODirection
IOMemoryDescriptor::getDirection() const
1731 #endif /* !__LP64__ */
1732 return (IODirection
) (_flags
& kIOMemoryDirectionMask
);
1738 * Get the length of the transfer (over all ranges).
1740 IOByteCount
IOMemoryDescriptor::getLength() const
1745 void IOMemoryDescriptor::setTag( IOOptionBits tag
)
1750 IOOptionBits
IOMemoryDescriptor::getTag( void )
1756 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
1758 IOMemoryDescriptor::getSourceSegment( IOByteCount offset
, IOByteCount
* length
)
1760 addr64_t physAddr
= 0;
1762 if( prepare() == kIOReturnSuccess
) {
1763 physAddr
= getPhysicalSegment64( offset
, length
);
1767 return( (IOPhysicalAddress
) physAddr
); // truncated but only page offset is used
1769 #endif /* !__LP64__ */
1771 IOByteCount
IOMemoryDescriptor::readBytes
1772 (IOByteCount offset
, void *bytes
, IOByteCount length
)
1774 addr64_t dstAddr
= CAST_DOWN(addr64_t
, bytes
);
1775 IOByteCount remaining
;
1777 // Assert that this entire I/O is withing the available range
1778 assert(offset
<= _length
);
1779 assert(offset
+ length
<= _length
);
1780 if (offset
>= _length
) {
1784 if (kIOMemoryThreadSafe
& _flags
)
1787 remaining
= length
= min(length
, _length
- offset
);
1788 while (remaining
) { // (process another target segment?)
1792 srcAddr64
= getPhysicalSegment(offset
, &srcLen
, kIOMemoryMapperNone
);
1796 // Clip segment length to remaining
1797 if (srcLen
> remaining
)
1800 copypv(srcAddr64
, dstAddr
, srcLen
,
1801 cppvPsrc
| cppvNoRefSrc
| cppvFsnk
| cppvKmap
);
1805 remaining
-= srcLen
;
1808 if (kIOMemoryThreadSafe
& _flags
)
1813 return length
- remaining
;
1816 IOByteCount
IOMemoryDescriptor::writeBytes
1817 (IOByteCount inoffset
, const void *bytes
, IOByteCount length
)
1819 addr64_t srcAddr
= CAST_DOWN(addr64_t
, bytes
);
1820 IOByteCount remaining
;
1821 IOByteCount offset
= inoffset
;
1823 // Assert that this entire I/O is withing the available range
1824 assert(offset
<= _length
);
1825 assert(offset
+ length
<= _length
);
1827 assert( !(kIOMemoryPreparedReadOnly
& _flags
) );
1829 if ( (kIOMemoryPreparedReadOnly
& _flags
) || offset
>= _length
) {
1833 if (kIOMemoryThreadSafe
& _flags
)
1836 remaining
= length
= min(length
, _length
- offset
);
1837 while (remaining
) { // (process another target segment?)
1841 dstAddr64
= getPhysicalSegment(offset
, &dstLen
, kIOMemoryMapperNone
);
1845 // Clip segment length to remaining
1846 if (dstLen
> remaining
)
1849 if (!srcAddr
) bzero_phys(dstAddr64
, dstLen
);
1852 copypv(srcAddr
, (addr64_t
) dstAddr64
, dstLen
,
1853 cppvPsnk
| cppvFsnk
| cppvNoRefSrc
| cppvNoModSnk
| cppvKmap
);
1857 remaining
-= dstLen
;
1860 if (kIOMemoryThreadSafe
& _flags
)
1865 if (!srcAddr
) performOperation(kIOMemoryIncoherentIOFlush
, inoffset
, length
);
1867 return length
- remaining
;
1871 void IOGeneralMemoryDescriptor::setPosition(IOByteCount position
)
1873 panic("IOGMD::setPosition deprecated");
1875 #endif /* !__LP64__ */
1877 static volatile SInt64 gIOMDPreparationID
__attribute__((aligned(8))) = (1ULL << 32);
1880 IOGeneralMemoryDescriptor::getPreparationID( void )
1885 return (kIOPreparationIDUnprepared
);
1887 if (((kIOMemoryTypeMask
& _flags
) == kIOMemoryTypePhysical
)
1888 || ((kIOMemoryTypeMask
& _flags
) == kIOMemoryTypePhysical64
))
1890 IOMemoryDescriptor::setPreparationID();
1891 return (IOMemoryDescriptor::getPreparationID());
1894 if (!_memoryEntries
|| !(dataP
= getDataP(_memoryEntries
)))
1895 return (kIOPreparationIDUnprepared
);
1897 if (kIOPreparationIDUnprepared
== dataP
->fPreparationID
)
1899 dataP
->fPreparationID
= OSIncrementAtomic64(&gIOMDPreparationID
);
1901 return (dataP
->fPreparationID
);
1904 IOMemoryDescriptorReserved
* IOMemoryDescriptor::getKernelReserved( void )
1908 reserved
= IONew(IOMemoryDescriptorReserved
, 1);
1910 bzero(reserved
, sizeof(IOMemoryDescriptorReserved
));
1915 void IOMemoryDescriptor::setPreparationID( void )
1917 if (getKernelReserved() && (kIOPreparationIDUnprepared
== reserved
->preparationID
))
1919 #if defined(__ppc__ )
1920 reserved
->preparationID
= gIOMDPreparationID
++;
1922 reserved
->preparationID
= OSIncrementAtomic64(&gIOMDPreparationID
);
1927 uint64_t IOMemoryDescriptor::getPreparationID( void )
1930 return (reserved
->preparationID
);
1932 return (kIOPreparationIDUnsupported
);
1935 IOReturn
IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op
, void *vData
, UInt dataSize
) const
1937 IOReturn err
= kIOReturnSuccess
;
1938 DMACommandOps params
;
1939 IOGeneralMemoryDescriptor
* md
= const_cast<IOGeneralMemoryDescriptor
*>(this);
1942 params
= (op
& ~kIOMDDMACommandOperationMask
& op
);
1943 op
&= kIOMDDMACommandOperationMask
;
1945 if (kIOMDDMAMap
== op
)
1947 if (dataSize
< sizeof(IOMDDMAMapArgs
))
1948 return kIOReturnUnderrun
;
1950 IOMDDMAMapArgs
* data
= (IOMDDMAMapArgs
*) vData
;
1953 && !md
->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem
)) return (kIOReturnNoMemory
);
1955 if (_memoryEntries
&& data
->fMapper
)
1958 bool whole
= ((data
->fOffset
== 0) && (data
->fLength
== _length
));
1959 dataP
= getDataP(_memoryEntries
);
1961 if (data
->fMapSpec
.numAddressBits
< dataP
->fDMAMapNumAddressBits
) dataP
->fDMAMapNumAddressBits
= data
->fMapSpec
.numAddressBits
;
1962 if (data
->fMapSpec
.alignment
> dataP
->fDMAMapAlignment
) dataP
->fDMAMapAlignment
= data
->fMapSpec
.alignment
;
1964 remap
= (dataP
->fDMAMapNumAddressBits
< 64)
1965 && ((dataP
->fMappedBase
+ _length
) > (1ULL << dataP
->fDMAMapNumAddressBits
));
1966 remap
|= (dataP
->fDMAMapAlignment
> page_size
);
1968 if (remap
|| !dataP
->fMappedBase
)
1970 // if (dataP->fMappedBase) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params);
1971 err
= md
->dmaMap(data
->fMapper
, &data
->fMapSpec
, data
->fOffset
, data
->fLength
, &data
->fAlloc
, &data
->fAllocCount
);
1972 if ((kIOReturnSuccess
== err
) && whole
&& !dataP
->fMappedBase
)
1974 dataP
->fMappedBase
= data
->fAlloc
;
1975 data
->fAllocCount
= 0; // IOMD owns the alloc now
1980 data
->fAlloc
= dataP
->fMappedBase
;
1981 data
->fAllocCount
= 0; // IOMD owns the alloc
1983 data
->fMapContig
= !dataP
->fDiscontig
;
1989 if (kIOMDAddDMAMapSpec
== op
)
1991 if (dataSize
< sizeof(IODMAMapSpecification
))
1992 return kIOReturnUnderrun
;
1994 IODMAMapSpecification
* data
= (IODMAMapSpecification
*) vData
;
1997 && !md
->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem
)) return (kIOReturnNoMemory
);
2001 dataP
= getDataP(_memoryEntries
);
2002 if (data
->numAddressBits
< dataP
->fDMAMapNumAddressBits
)
2003 dataP
->fDMAMapNumAddressBits
= data
->numAddressBits
;
2004 if (data
->alignment
> dataP
->fDMAMapAlignment
)
2005 dataP
->fDMAMapAlignment
= data
->alignment
;
2007 return kIOReturnSuccess
;
2010 if (kIOMDGetCharacteristics
== op
) {
2012 if (dataSize
< sizeof(IOMDDMACharacteristics
))
2013 return kIOReturnUnderrun
;
2015 IOMDDMACharacteristics
*data
= (IOMDDMACharacteristics
*) vData
;
2016 data
->fLength
= _length
;
2017 data
->fSGCount
= _rangesCount
;
2018 data
->fPages
= _pages
;
2019 data
->fDirection
= getDirection();
2021 data
->fIsPrepared
= false;
2023 data
->fIsPrepared
= true;
2024 data
->fHighestPage
= _highestPage
;
2027 dataP
= getDataP(_memoryEntries
);
2028 ioPLBlock
*ioplList
= getIOPLList(dataP
);
2029 UInt count
= getNumIOPL(_memoryEntries
, dataP
);
2031 data
->fPageAlign
= (ioplList
[0].fPageOffset
& PAGE_MASK
) | ~PAGE_MASK
;
2035 return kIOReturnSuccess
;
2037 #if IOMD_DEBUG_DMAACTIVE
2038 } else if (kIOMDDMAActive
== op
) {
2039 if (params
) OSIncrementAtomic(&md
->__iomd_reservedA
);
2041 if (md
->__iomd_reservedA
)
2042 OSDecrementAtomic(&md
->__iomd_reservedA
);
2044 panic("kIOMDSetDMAInactive");
2046 #endif /* IOMD_DEBUG_DMAACTIVE */
2048 } else if (kIOMDWalkSegments
!= op
)
2049 return kIOReturnBadArgument
;
2051 // Get the next segment
2052 struct InternalState
{
2053 IOMDDMAWalkSegmentArgs fIO
;
2059 // Find the next segment
2060 if (dataSize
< sizeof(*isP
))
2061 return kIOReturnUnderrun
;
2063 isP
= (InternalState
*) vData
;
2064 UInt offset
= isP
->fIO
.fOffset
;
2065 bool mapped
= isP
->fIO
.fMapped
;
2067 if (IOMapper::gSystem
&& mapped
2068 && (!(kIOMemoryHostOnly
& _flags
))
2069 && (!_memoryEntries
|| !getDataP(_memoryEntries
)->fMappedBase
))
2070 // && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBase))
2073 && !md
->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem
)) return (kIOReturnNoMemory
);
2075 dataP
= getDataP(_memoryEntries
);
2078 IODMAMapSpecification mapSpec
;
2079 bzero(&mapSpec
, sizeof(mapSpec
));
2080 mapSpec
.numAddressBits
= dataP
->fDMAMapNumAddressBits
;
2081 mapSpec
.alignment
= dataP
->fDMAMapAlignment
;
2082 err
= md
->dmaMap(dataP
->fMapper
, &mapSpec
, 0, _length
, &dataP
->fMappedBase
, NULL
);
2083 if (kIOReturnSuccess
!= err
) return (err
);
2087 if (offset
>= _length
)
2088 return (offset
== _length
)? kIOReturnOverrun
: kIOReturnInternalError
;
2090 // Validate the previous offset
2091 UInt ind
, off2Ind
= isP
->fOffset2Index
;
2094 && (offset
== isP
->fNextOffset
|| off2Ind
<= offset
))
2097 ind
= off2Ind
= 0; // Start from beginning
2103 if ( (_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
) {
2105 // Physical address based memory descriptor
2106 const IOPhysicalRange
*physP
= (IOPhysicalRange
*) &_ranges
.p
[0];
2108 // Find the range after the one that contains the offset
2110 for (len
= 0; off2Ind
<= offset
; ind
++) {
2111 len
= physP
[ind
].length
;
2115 // Calculate length within range and starting address
2116 length
= off2Ind
- offset
;
2117 address
= physP
[ind
- 1].address
+ len
- length
;
2119 if (true && mapped
&& _memoryEntries
2120 && (dataP
= getDataP(_memoryEntries
)) && dataP
->fMappedBase
)
2122 address
= dataP
->fMappedBase
+ offset
;
2126 // see how far we can coalesce ranges
2127 while (ind
< _rangesCount
&& address
+ length
== physP
[ind
].address
) {
2128 len
= physP
[ind
].length
;
2135 // correct contiguous check overshoot
2140 else if ( (_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
) {
2142 // Physical address based memory descriptor
2143 const IOAddressRange
*physP
= (IOAddressRange
*) &_ranges
.v64
[0];
2145 // Find the range after the one that contains the offset
2147 for (len
= 0; off2Ind
<= offset
; ind
++) {
2148 len
= physP
[ind
].length
;
2152 // Calculate length within range and starting address
2153 length
= off2Ind
- offset
;
2154 address
= physP
[ind
- 1].address
+ len
- length
;
2156 if (true && mapped
&& _memoryEntries
2157 && (dataP
= getDataP(_memoryEntries
)) && dataP
->fMappedBase
)
2159 address
= dataP
->fMappedBase
+ offset
;
2163 // see how far we can coalesce ranges
2164 while (ind
< _rangesCount
&& address
+ length
== physP
[ind
].address
) {
2165 len
= physP
[ind
].length
;
2171 // correct contiguous check overshoot
2175 #endif /* !__LP64__ */
2178 panic("IOGMD: not wired for the IODMACommand");
2180 assert(_memoryEntries
);
2182 dataP
= getDataP(_memoryEntries
);
2183 const ioPLBlock
*ioplList
= getIOPLList(dataP
);
2184 UInt numIOPLs
= getNumIOPL(_memoryEntries
, dataP
);
2185 upl_page_info_t
*pageList
= getPageList(dataP
);
2187 assert(numIOPLs
> 0);
2189 // Scan through iopl info blocks looking for block containing offset
2190 while (ind
< numIOPLs
&& offset
>= ioplList
[ind
].fIOMDOffset
)
2193 // Go back to actual range as search goes past it
2194 ioPLBlock ioplInfo
= ioplList
[ind
- 1];
2195 off2Ind
= ioplInfo
.fIOMDOffset
;
2198 length
= ioplList
[ind
].fIOMDOffset
;
2201 length
-= offset
; // Remainder within iopl
2203 // Subtract offset till this iopl in total list
2206 // If a mapped address is requested and this is a pre-mapped IOPL
2207 // then just need to compute an offset relative to the mapped base.
2208 if (mapped
&& dataP
->fMappedBase
) {
2209 offset
+= (ioplInfo
.fPageOffset
& PAGE_MASK
);
2210 address
= trunc_page_64(dataP
->fMappedBase
) + ptoa_64(ioplInfo
.fMappedPage
) + offset
;
2211 continue; // Done leave do/while(false) now
2214 // The offset is rebased into the current iopl.
2215 // Now add the iopl 1st page offset.
2216 offset
+= ioplInfo
.fPageOffset
;
2218 // For external UPLs the fPageInfo field points directly to
2219 // the upl's upl_page_info_t array.
2220 if (ioplInfo
.fFlags
& kIOPLExternUPL
)
2221 pageList
= (upl_page_info_t
*) ioplInfo
.fPageInfo
;
2223 pageList
= &pageList
[ioplInfo
.fPageInfo
];
2225 // Check for direct device non-paged memory
2226 if ( ioplInfo
.fFlags
& kIOPLOnDevice
) {
2227 address
= ptoa_64(pageList
->phys_addr
) + offset
;
2228 continue; // Done leave do/while(false) now
2231 // Now we need compute the index into the pageList
2232 UInt pageInd
= atop_32(offset
);
2233 offset
&= PAGE_MASK
;
2235 // Compute the starting address of this segment
2236 IOPhysicalAddress pageAddr
= pageList
[pageInd
].phys_addr
;
2238 panic("!pageList phys_addr");
2241 address
= ptoa_64(pageAddr
) + offset
;
2243 // length is currently set to the length of the remainider of the iopl.
2244 // We need to check that the remainder of the iopl is contiguous.
2245 // This is indicated by pageList[ind].phys_addr being sequential.
2246 IOByteCount contigLength
= PAGE_SIZE
- offset
;
2247 while (contigLength
< length
2248 && ++pageAddr
== pageList
[++pageInd
].phys_addr
)
2250 contigLength
+= PAGE_SIZE
;
2253 if (contigLength
< length
)
2254 length
= contigLength
;
2262 // Update return values and state
2263 isP
->fIO
.fIOVMAddr
= address
;
2264 isP
->fIO
.fLength
= length
;
2266 isP
->fOffset2Index
= off2Ind
;
2267 isP
->fNextOffset
= isP
->fIO
.fOffset
+ length
;
2269 return kIOReturnSuccess
;
2273 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
, IOOptionBits options
)
2276 mach_vm_address_t address
= 0;
2277 mach_vm_size_t length
= 0;
2278 IOMapper
* mapper
= gIOSystemMapper
;
2279 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
2281 if (lengthOfSegment
)
2282 *lengthOfSegment
= 0;
2284 if (offset
>= _length
)
2287 // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
2288 // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
2289 // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
2290 // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
2292 if ((options
& _kIOMemorySourceSegment
) && (kIOMemoryTypeUPL
!= type
))
2294 unsigned rangesIndex
= 0;
2295 Ranges vec
= _ranges
;
2296 mach_vm_address_t addr
;
2298 // Find starting address within the vector of ranges
2300 getAddrLenForInd(addr
, length
, type
, vec
, rangesIndex
);
2301 if (offset
< length
)
2303 offset
-= length
; // (make offset relative)
2307 // Now that we have the starting range,
2308 // lets find the last contiguous range
2312 for ( ++rangesIndex
; rangesIndex
< _rangesCount
; rangesIndex
++ ) {
2313 mach_vm_address_t newAddr
;
2314 mach_vm_size_t newLen
;
2316 getAddrLenForInd(newAddr
, newLen
, type
, vec
, rangesIndex
);
2317 if (addr
+ length
!= newAddr
)
2322 address
= (IOPhysicalAddress
) addr
; // Truncate address to 32bit
2326 IOMDDMAWalkSegmentState _state
;
2327 IOMDDMAWalkSegmentArgs
* state
= (IOMDDMAWalkSegmentArgs
*) (void *)&_state
;
2329 state
->fOffset
= offset
;
2330 state
->fLength
= _length
- offset
;
2331 state
->fMapped
= (0 == (options
& kIOMemoryMapperNone
)) && !(_flags
& kIOMemoryHostOnly
);
2333 ret
= dmaCommandOperation(kIOMDFirstSegment
, _state
, sizeof(_state
));
2335 if ((kIOReturnSuccess
!= ret
) && (kIOReturnOverrun
!= ret
))
2336 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
2337 ret
, this, state
->fOffset
,
2338 state
->fIOVMAddr
, state
->fLength
);
2339 if (kIOReturnSuccess
== ret
)
2341 address
= state
->fIOVMAddr
;
2342 length
= state
->fLength
;
2345 // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
2346 // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
2348 if (mapper
&& ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
)))
2350 if ((options
& kIOMemoryMapperNone
) && !(_flags
& kIOMemoryMapperNone
))
2352 addr64_t origAddr
= address
;
2353 IOByteCount origLen
= length
;
2355 address
= mapper
->mapAddr(origAddr
);
2356 length
= page_size
- (address
& (page_size
- 1));
2357 while ((length
< origLen
)
2358 && ((address
+ length
) == mapper
->mapAddr(origAddr
+ length
)))
2359 length
+= page_size
;
2360 if (length
> origLen
)
2369 if (lengthOfSegment
)
2370 *lengthOfSegment
= length
;
2377 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
, IOOptionBits options
)
2379 addr64_t address
= 0;
2381 if (options
& _kIOMemorySourceSegment
)
2383 address
= getSourceSegment(offset
, lengthOfSegment
);
2385 else if (options
& kIOMemoryMapperNone
)
2387 address
= getPhysicalSegment64(offset
, lengthOfSegment
);
2391 address
= getPhysicalSegment(offset
, lengthOfSegment
);
2398 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
2400 return (getPhysicalSegment(offset
, lengthOfSegment
, kIOMemoryMapperNone
));
2404 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
2406 addr64_t address
= 0;
2407 IOByteCount length
= 0;
2409 address
= getPhysicalSegment(offset
, lengthOfSegment
, 0);
2411 if (lengthOfSegment
)
2412 length
= *lengthOfSegment
;
2414 if ((address
+ length
) > 0x100000000ULL
)
2416 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
2417 address
, (long) length
, (getMetaClass())->getClassName());
2420 return ((IOPhysicalAddress
) address
);
2424 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
2426 IOPhysicalAddress phys32
;
2429 IOMapper
* mapper
= 0;
2431 phys32
= getPhysicalSegment(offset
, lengthOfSegment
);
2435 if (gIOSystemMapper
)
2436 mapper
= gIOSystemMapper
;
2440 IOByteCount origLen
;
2442 phys64
= mapper
->mapAddr(phys32
);
2443 origLen
= *lengthOfSegment
;
2444 length
= page_size
- (phys64
& (page_size
- 1));
2445 while ((length
< origLen
)
2446 && ((phys64
+ length
) == mapper
->mapAddr(phys32
+ length
)))
2447 length
+= page_size
;
2448 if (length
> origLen
)
2451 *lengthOfSegment
= length
;
2454 phys64
= (addr64_t
) phys32
;
2460 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
2462 return ((IOPhysicalAddress
) getPhysicalSegment(offset
, lengthOfSegment
, 0));
2466 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
2468 return ((IOPhysicalAddress
) getPhysicalSegment(offset
, lengthOfSegment
, _kIOMemorySourceSegment
));
2471 void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
2472 IOByteCount
* lengthOfSegment
)
2474 if (_task
== kernel_task
)
2475 return (void *) getSourceSegment(offset
, lengthOfSegment
);
2477 panic("IOGMD::getVirtualSegment deprecated");
2481 #endif /* !__LP64__ */
2484 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op
, void *vData
, UInt dataSize
) const
2486 IOMemoryDescriptor
*md
= const_cast<IOMemoryDescriptor
*>(this);
2487 DMACommandOps params
;
2490 params
= (op
& ~kIOMDDMACommandOperationMask
& op
);
2491 op
&= kIOMDDMACommandOperationMask
;
2493 if (kIOMDGetCharacteristics
== op
) {
2494 if (dataSize
< sizeof(IOMDDMACharacteristics
))
2495 return kIOReturnUnderrun
;
2497 IOMDDMACharacteristics
*data
= (IOMDDMACharacteristics
*) vData
;
2498 data
->fLength
= getLength();
2500 data
->fDirection
= getDirection();
2501 data
->fIsPrepared
= true; // Assume prepared - fails safe
2503 else if (kIOMDWalkSegments
== op
) {
2504 if (dataSize
< sizeof(IOMDDMAWalkSegmentArgs
))
2505 return kIOReturnUnderrun
;
2507 IOMDDMAWalkSegmentArgs
*data
= (IOMDDMAWalkSegmentArgs
*) vData
;
2508 IOByteCount offset
= (IOByteCount
) data
->fOffset
;
2510 IOPhysicalLength length
;
2511 if (data
->fMapped
&& IOMapper::gSystem
)
2512 data
->fIOVMAddr
= md
->getPhysicalSegment(offset
, &length
);
2514 data
->fIOVMAddr
= md
->getPhysicalSegment(offset
, &length
, kIOMemoryMapperNone
);
2515 data
->fLength
= length
;
2517 else if (kIOMDAddDMAMapSpec
== op
) return kIOReturnUnsupported
;
2518 else if (kIOMDDMAMap
== op
)
2520 if (dataSize
< sizeof(IOMDDMAMapArgs
))
2521 return kIOReturnUnderrun
;
2522 IOMDDMAMapArgs
* data
= (IOMDDMAMapArgs
*) vData
;
2524 if (params
) panic("class %s does not support IODMACommand::kIterateOnly", getMetaClass()->getClassName());
2526 data
->fMapContig
= true;
2527 err
= md
->dmaMap(data
->fMapper
, &data
->fMapSpec
, data
->fOffset
, data
->fLength
, &data
->fAlloc
, &data
->fAllocCount
);
2530 else return kIOReturnBadArgument
;
2532 return kIOReturnSuccess
;
2536 IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState
,
2537 IOOptionBits
* oldState
)
2539 IOReturn err
= kIOReturnSuccess
;
2541 vm_purgable_t control
;
2546 err
= super::setPurgeable(newState
, oldState
);
2550 if (kIOMemoryThreadSafe
& _flags
)
2554 // Find the appropriate vm_map for the given task
2556 if (_task
== kernel_task
&& (kIOMemoryBufferPageable
& _flags
))
2558 err
= kIOReturnNotReady
;
2563 err
= kIOReturnUnsupported
;
2567 curMap
= get_task_map(_task
);
2569 // can only do one range
2570 Ranges vec
= _ranges
;
2571 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
2572 mach_vm_address_t addr
;
2574 getAddrLenForInd(addr
, len
, type
, vec
, 0);
2576 err
= purgeableControlBits(newState
, &control
, &state
);
2577 if (kIOReturnSuccess
!= err
)
2579 err
= mach_vm_purgable_control(curMap
, addr
, control
, &state
);
2582 if (kIOReturnSuccess
== err
)
2584 err
= purgeableStateBits(&state
);
2590 if (kIOMemoryThreadSafe
& _flags
)
2597 IOReturn
IOMemoryDescriptor::setPurgeable( IOOptionBits newState
,
2598 IOOptionBits
* oldState
)
2600 IOReturn err
= kIOReturnNotReady
;
2602 if (kIOMemoryThreadSafe
& _flags
) LOCK
;
2603 if (_memRef
) err
= IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef
, newState
, oldState
);
2604 if (kIOMemoryThreadSafe
& _flags
) UNLOCK
;
2609 IOReturn
IOMemoryDescriptor::getPageCounts( IOByteCount
* residentPageCount
,
2610 IOByteCount
* dirtyPageCount
)
2612 IOReturn err
= kIOReturnNotReady
;
2614 if (kIOMemoryThreadSafe
& _flags
) LOCK
;
2615 if (_memRef
) err
= IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef
, residentPageCount
, dirtyPageCount
);
2616 if (kIOMemoryThreadSafe
& _flags
) UNLOCK
;
2622 extern "C" void dcache_incoherent_io_flush64(addr64_t pa
, unsigned int count
);
2623 extern "C" void dcache_incoherent_io_store64(addr64_t pa
, unsigned int count
);
2625 static void SetEncryptOp(addr64_t pa
, unsigned int count
)
2629 page
= atop_64(round_page_64(pa
));
2630 end
= atop_64(trunc_page_64(pa
+ count
));
2631 for (; page
< end
; page
++)
2633 pmap_clear_noencrypt(page
);
2637 static void ClearEncryptOp(addr64_t pa
, unsigned int count
)
2641 page
= atop_64(round_page_64(pa
));
2642 end
= atop_64(trunc_page_64(pa
+ count
));
2643 for (; page
< end
; page
++)
2645 pmap_set_noencrypt(page
);
2649 IOReturn
IOMemoryDescriptor::performOperation( IOOptionBits options
,
2650 IOByteCount offset
, IOByteCount length
)
2652 IOByteCount remaining
;
2654 void (*func
)(addr64_t pa
, unsigned int count
) = 0;
2658 case kIOMemoryIncoherentIOFlush
:
2659 func
= &dcache_incoherent_io_flush64
;
2661 case kIOMemoryIncoherentIOStore
:
2662 func
= &dcache_incoherent_io_store64
;
2665 case kIOMemorySetEncrypted
:
2666 func
= &SetEncryptOp
;
2668 case kIOMemoryClearEncrypted
:
2669 func
= &ClearEncryptOp
;
2674 return (kIOReturnUnsupported
);
2676 if (kIOMemoryThreadSafe
& _flags
)
2680 remaining
= length
= min(length
, getLength() - offset
);
2682 // (process another target segment?)
2687 dstAddr64
= getPhysicalSegment(offset
, &dstLen
, kIOMemoryMapperNone
);
2691 // Clip segment length to remaining
2692 if (dstLen
> remaining
)
2695 (*func
)(dstAddr64
, dstLen
);
2698 remaining
-= dstLen
;
2701 if (kIOMemoryThreadSafe
& _flags
)
2704 return (remaining
? kIOReturnUnderrun
: kIOReturnSuccess
);
2707 #if defined(__i386__) || defined(__x86_64__)
2708 extern vm_offset_t first_avail
;
2709 #define io_kernel_static_end first_avail
2711 #error io_kernel_static_end is undefined for this architecture
2714 static kern_return_t
2715 io_get_kernel_static_upl(
2718 vm_size_t
*upl_size
,
2720 upl_page_info_array_t page_list
,
2721 unsigned int *count
,
2722 ppnum_t
*highest_page
)
2724 unsigned int pageCount
, page
;
2726 ppnum_t highestPage
= 0;
2728 pageCount
= atop_32(*upl_size
);
2729 if (pageCount
> *count
)
2734 for (page
= 0; page
< pageCount
; page
++)
2736 phys
= pmap_find_phys(kernel_pmap
, ((addr64_t
)offset
) + ptoa_64(page
));
2739 page_list
[page
].phys_addr
= phys
;
2740 page_list
[page
].pageout
= 0;
2741 page_list
[page
].absent
= 0;
2742 page_list
[page
].dirty
= 0;
2743 page_list
[page
].precious
= 0;
2744 page_list
[page
].device
= 0;
2745 if (phys
> highestPage
)
2749 *highest_page
= highestPage
;
2751 return ((page
>= pageCount
) ? kIOReturnSuccess
: kIOReturnVMError
);
2754 IOReturn
IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection
)
2756 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
2757 IOReturn error
= kIOReturnCannotWire
;
2759 upl_page_info_array_t pageInfo
;
2762 assert(kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
|| kIOMemoryTypeUIO
== type
);
2764 if ((kIODirectionOutIn
& forDirection
) == kIODirectionNone
)
2765 forDirection
= (IODirection
) (forDirection
| getDirection());
2767 int uplFlags
; // This Mem Desc's default flags for upl creation
2768 switch (kIODirectionOutIn
& forDirection
)
2770 case kIODirectionOut
:
2771 // Pages do not need to be marked as dirty on commit
2772 uplFlags
= UPL_COPYOUT_FROM
;
2775 case kIODirectionIn
:
2777 uplFlags
= 0; // i.e. ~UPL_COPYOUT_FROM
2783 if ((kIOMemoryPreparedReadOnly
& _flags
) && !(UPL_COPYOUT_FROM
& uplFlags
))
2785 OSReportWithBacktrace("IOMemoryDescriptor 0x%lx prepared read only", VM_KERNEL_ADDRPERM(this));
2786 error
= kIOReturnNotWritable
;
2788 else error
= kIOReturnSuccess
;
2792 dataP
= getDataP(_memoryEntries
);
2794 mapper
= dataP
->fMapper
;
2795 dataP
->fMappedBase
= 0;
2797 uplFlags
|= UPL_SET_IO_WIRE
| UPL_SET_LITE
;
2798 if (kIODirectionPrepareToPhys32
& forDirection
)
2800 if (!mapper
) uplFlags
|= UPL_NEED_32BIT_ADDR
;
2801 if (dataP
->fDMAMapNumAddressBits
> 32) dataP
->fDMAMapNumAddressBits
= 32;
2803 if (kIODirectionPrepareNoFault
& forDirection
) uplFlags
|= UPL_REQUEST_NO_FAULT
;
2804 if (kIODirectionPrepareNoZeroFill
& forDirection
) uplFlags
|= UPL_NOZEROFILLIO
;
2805 if (kIODirectionPrepareNonCoherent
& forDirection
) uplFlags
|= UPL_REQUEST_FORCE_COHERENCY
;
2809 // Note that appendBytes(NULL) zeros the data up to the desired length
2810 // and the length parameter is an unsigned int
2811 size_t uplPageSize
= dataP
->fPageCnt
* sizeof(upl_page_info_t
);
2812 if (uplPageSize
> ((unsigned int)uplPageSize
)) return (kIOReturnNoMemory
);
2813 if (!_memoryEntries
->appendBytes(0, uplPageSize
)) return (kIOReturnNoMemory
);
2816 // Find the appropriate vm_map for the given task
2818 if (_task
== kernel_task
&& (kIOMemoryBufferPageable
& _flags
)) curMap
= 0;
2819 else curMap
= get_task_map(_task
);
2821 // Iterate over the vector of virtual ranges
2822 Ranges vec
= _ranges
;
2823 unsigned int pageIndex
= 0;
2824 IOByteCount mdOffset
= 0;
2825 ppnum_t highestPage
= 0;
2827 IOMemoryEntry
* memRefEntry
= 0;
2828 if (_memRef
) memRefEntry
= &_memRef
->entries
[0];
2830 for (UInt range
= 0; range
< _rangesCount
; range
++) {
2832 mach_vm_address_t startPage
;
2833 mach_vm_size_t numBytes
;
2834 ppnum_t highPage
= 0;
2836 // Get the startPage address and length of vec[range]
2837 getAddrLenForInd(startPage
, numBytes
, type
, vec
, range
);
2838 iopl
.fPageOffset
= startPage
& PAGE_MASK
;
2839 numBytes
+= iopl
.fPageOffset
;
2840 startPage
= trunc_page_64(startPage
);
2843 iopl
.fMappedPage
= mapBase
+ pageIndex
;
2845 iopl
.fMappedPage
= 0;
2847 // Iterate over the current range, creating UPLs
2849 vm_address_t kernelStart
= (vm_address_t
) startPage
;
2851 if (curMap
) theMap
= curMap
;
2858 assert(_task
== kernel_task
);
2859 theMap
= IOPageableMapForAddress(kernelStart
);
2862 int ioplFlags
= uplFlags
;
2863 dataP
= getDataP(_memoryEntries
);
2864 pageInfo
= getPageList(dataP
);
2865 upl_page_list_ptr_t baseInfo
= &pageInfo
[pageIndex
];
2867 vm_size_t ioplSize
= round_page(numBytes
);
2868 unsigned int numPageInfo
= atop_32(ioplSize
);
2870 if ((theMap
== kernel_map
) && (kernelStart
< io_kernel_static_end
)) {
2871 error
= io_get_kernel_static_upl(theMap
,
2880 memory_object_offset_t entryOffset
;
2882 entryOffset
= (mdOffset
- iopl
.fPageOffset
- memRefEntry
->offset
);
2883 if (entryOffset
>= memRefEntry
->size
) {
2885 if (memRefEntry
>= &_memRef
->entries
[_memRef
->count
]) panic("memRefEntry");
2888 if (ioplSize
> (memRefEntry
->size
- entryOffset
)) ioplSize
= (memRefEntry
->size
- entryOffset
);
2889 error
= memory_object_iopl_request(memRefEntry
->entry
,
2899 error
= vm_map_create_upl(theMap
,
2901 (upl_size_t
*)&ioplSize
,
2909 if (error
!= KERN_SUCCESS
)
2913 highPage
= upl_get_highest_page(iopl
.fIOPL
);
2914 if (highPage
> highestPage
)
2915 highestPage
= highPage
;
2917 error
= kIOReturnCannotWire
;
2919 if (baseInfo
->device
) {
2921 iopl
.fFlags
= kIOPLOnDevice
;
2927 iopl
.fIOMDOffset
= mdOffset
;
2928 iopl
.fPageInfo
= pageIndex
;
2929 if (mapper
&& pageIndex
&& (page_mask
& (mdOffset
+ iopl
.fPageOffset
))) dataP
->fDiscontig
= true;
2932 // used to remove the upl for auto prepares here, for some errant code
2933 // that freed memory before the descriptor pointing at it
2934 if ((_flags
& kIOMemoryAutoPrepare
) && iopl
.fIOPL
)
2936 upl_commit(iopl
.fIOPL
, 0, 0);
2937 upl_deallocate(iopl
.fIOPL
);
2942 if (!_memoryEntries
->appendBytes(&iopl
, sizeof(iopl
))) {
2943 // Clean up partial created and unsaved iopl
2945 upl_abort(iopl
.fIOPL
, 0);
2946 upl_deallocate(iopl
.fIOPL
);
2952 // Check for a multiple iopl's in one virtual range
2953 pageIndex
+= numPageInfo
;
2954 mdOffset
-= iopl
.fPageOffset
;
2955 if (ioplSize
< numBytes
) {
2956 numBytes
-= ioplSize
;
2957 startPage
+= ioplSize
;
2958 mdOffset
+= ioplSize
;
2959 iopl
.fPageOffset
= 0;
2960 if (mapper
) iopl
.fMappedPage
= mapBase
+ pageIndex
;
2963 mdOffset
+= numBytes
;
2969 _highestPage
= highestPage
;
2971 if (UPL_COPYOUT_FROM
& uplFlags
) _flags
|= kIOMemoryPreparedReadOnly
;
2973 return kIOReturnSuccess
;
2977 dataP
= getDataP(_memoryEntries
);
2978 UInt done
= getNumIOPL(_memoryEntries
, dataP
);
2979 ioPLBlock
*ioplList
= getIOPLList(dataP
);
2981 for (UInt range
= 0; range
< done
; range
++)
2983 if (ioplList
[range
].fIOPL
) {
2984 upl_abort(ioplList
[range
].fIOPL
, 0);
2985 upl_deallocate(ioplList
[range
].fIOPL
);
2988 (void) _memoryEntries
->initWithBytes(dataP
, computeDataSize(0, 0)); // == setLength()
2991 if (error
== KERN_FAILURE
)
2992 error
= kIOReturnCannotWire
;
2993 else if (error
== KERN_MEMORY_ERROR
)
2994 error
= kIOReturnNoResources
;
2999 bool IOGeneralMemoryDescriptor::initMemoryEntries(size_t size
, IOMapper
* mapper
)
3002 unsigned dataSize
= size
;
3004 if (!_memoryEntries
) {
3005 _memoryEntries
= OSData::withCapacity(dataSize
);
3006 if (!_memoryEntries
)
3009 else if (!_memoryEntries
->initWithCapacity(dataSize
))
3012 _memoryEntries
->appendBytes(0, computeDataSize(0, 0));
3013 dataP
= getDataP(_memoryEntries
);
3015 if (mapper
== kIOMapperWaitSystem
) {
3016 IOMapper::checkForSystemMapper();
3017 mapper
= IOMapper::gSystem
;
3019 dataP
->fMapper
= mapper
;
3020 dataP
->fPageCnt
= 0;
3021 dataP
->fMappedBase
= 0;
3022 dataP
->fDMAMapNumAddressBits
= 64;
3023 dataP
->fDMAMapAlignment
= 0;
3024 dataP
->fPreparationID
= kIOPreparationIDUnprepared
;
3025 dataP
->fDiscontig
= false;
3026 dataP
->fCompletionError
= false;
3031 IOReturn
IOMemoryDescriptor::dmaMap(
3033 const IODMAMapSpecification
* mapSpec
,
3039 IOMDDMAWalkSegmentState walkState
;
3040 IOMDDMAWalkSegmentArgs
* walkArgs
= (IOMDDMAWalkSegmentArgs
*) (void *)&walkState
;
3043 IOPhysicalLength segLen
;
3044 addr64_t phys
, align
, pageOffset
;
3045 ppnum_t base
, pageIndex
, pageCount
;
3047 uint32_t mapOptions
= 0;
3049 if (!(kIOMemoryPreparedReadOnly
& _flags
)) mapOptions
|= kIODMAMapWriteAccess
;
3051 walkArgs
->fMapped
= false;
3052 mdOp
= kIOMDFirstSegment
;
3054 for (index
= 0; index
< length
; )
3056 if (index
&& (page_mask
& (index
+ pageOffset
))) break;
3058 walkArgs
->fOffset
= offset
+ index
;
3059 ret
= dmaCommandOperation(mdOp
, &walkState
, sizeof(walkState
));
3060 mdOp
= kIOMDWalkSegments
;
3061 if (ret
!= kIOReturnSuccess
) break;
3062 phys
= walkArgs
->fIOVMAddr
;
3063 segLen
= walkArgs
->fLength
;
3065 align
= (phys
& page_mask
);
3066 if (!index
) pageOffset
= align
;
3067 else if (align
) break;
3068 pageCount
+= atop_64(round_page_64(align
+ segLen
));
3072 if (index
< length
) return (kIOReturnVMError
);
3074 base
= mapper
->iovmMapMemory(this, offset
, pageCount
,
3075 mapOptions
, NULL
, mapSpec
);
3077 if (!base
) return (kIOReturnNoResources
);
3079 mdOp
= kIOMDFirstSegment
;
3080 for (pageIndex
= 0, index
= 0; index
< length
; )
3082 walkArgs
->fOffset
= offset
+ index
;
3083 ret
= dmaCommandOperation(mdOp
, &walkState
, sizeof(walkState
));
3084 mdOp
= kIOMDWalkSegments
;
3085 if (ret
!= kIOReturnSuccess
) break;
3086 phys
= walkArgs
->fIOVMAddr
;
3087 segLen
= walkArgs
->fLength
;
3089 ppnum_t page
= atop_64(phys
);
3090 ppnum_t count
= atop_64(round_page_64(phys
+ segLen
)) - page
;
3093 mapper
->iovmInsert(base
, pageIndex
, page
);
3099 if (pageIndex
!= pageCount
) panic("pageIndex");
3101 *address
= ptoa_64(base
) + pageOffset
;
3102 if (mapPages
) *mapPages
= pageCount
;
3104 return (kIOReturnSuccess
);
3107 IOReturn
IOGeneralMemoryDescriptor::dmaMap(
3109 const IODMAMapSpecification
* mapSpec
,
3115 IOReturn err
= kIOReturnSuccess
;
3117 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
3120 if (kIOMemoryHostOnly
& _flags
) return (kIOReturnSuccess
);
3122 if ((type
== kIOMemoryTypePhysical
) || (type
== kIOMemoryTypePhysical64
)
3123 || offset
|| (length
!= _length
))
3125 err
= super::dmaMap(mapper
, mapSpec
, offset
, length
, address
, mapPages
);
3127 else if (_memoryEntries
&& _pages
&& (dataP
= getDataP(_memoryEntries
)))
3129 const ioPLBlock
* ioplList
= getIOPLList(dataP
);
3130 upl_page_info_t
* pageList
;
3131 uint32_t mapOptions
= 0;
3134 IODMAMapSpecification mapSpec
;
3135 bzero(&mapSpec
, sizeof(mapSpec
));
3136 mapSpec
.numAddressBits
= dataP
->fDMAMapNumAddressBits
;
3137 mapSpec
.alignment
= dataP
->fDMAMapAlignment
;
3139 // For external UPLs the fPageInfo field points directly to
3140 // the upl's upl_page_info_t array.
3141 if (ioplList
->fFlags
& kIOPLExternUPL
)
3143 pageList
= (upl_page_info_t
*) ioplList
->fPageInfo
;
3144 mapOptions
|= kIODMAMapPagingPath
;
3147 pageList
= getPageList(dataP
);
3149 if (!(kIOMemoryPreparedReadOnly
& _flags
)) mapOptions
|= kIODMAMapWriteAccess
;
3151 // Check for direct device non-paged memory
3152 if (ioplList
->fFlags
& kIOPLOnDevice
) mapOptions
|= kIODMAMapPhysicallyContiguous
;
3154 base
= mapper
->iovmMapMemory(
3155 this, offset
, _pages
, mapOptions
, &pageList
[0], &mapSpec
);
3156 *address
= ptoa_64(base
) + (ioplList
->fPageOffset
& PAGE_MASK
);
3157 if (mapPages
) *mapPages
= _pages
;
3166 * Prepare the memory for an I/O transfer. This involves paging in
3167 * the memory, if necessary, and wiring it down for the duration of
3168 * the transfer. The complete() method completes the processing of
3169 * the memory after the I/O transfer finishes. This method needn't
3170 * called for non-pageable memory.
3173 IOReturn
IOGeneralMemoryDescriptor::prepare(IODirection forDirection
)
3175 IOReturn error
= kIOReturnSuccess
;
3176 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
3178 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
))
3179 return kIOReturnSuccess
;
3182 IOLockLock(_prepareLock
);
3184 if (kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
|| kIOMemoryTypeUIO
== type
)
3186 error
= wireVirtual(forDirection
);
3189 if (kIOReturnSuccess
== error
)
3191 if (1 == ++_wireCount
)
3193 if (kIOMemoryClearEncrypt
& _flags
)
3195 performOperation(kIOMemoryClearEncrypted
, 0, _length
);
3201 IOLockUnlock(_prepareLock
);
3209 * Complete processing of the memory after an I/O transfer finishes.
3210 * This method should not be called unless a prepare was previously
3211 * issued; the prepare() and complete() must occur in pairs, before
3212 * before and after an I/O transfer involving pageable memory.
3215 IOReturn
IOGeneralMemoryDescriptor::complete(IODirection forDirection
)
3217 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
3220 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
))
3221 return kIOReturnSuccess
;
3224 IOLockLock(_prepareLock
);
3228 if ((kIODirectionCompleteWithError
& forDirection
)
3229 && (dataP
= getDataP(_memoryEntries
)))
3230 dataP
->fCompletionError
= true;
3234 if ((kIOMemoryClearEncrypt
& _flags
) && (1 == _wireCount
))
3236 performOperation(kIOMemorySetEncrypted
, 0, _length
);
3240 if (!_wireCount
|| (kIODirectionCompleteWithDataValid
& forDirection
))
3242 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
3243 dataP
= getDataP(_memoryEntries
);
3244 ioPLBlock
*ioplList
= getIOPLList(dataP
);
3245 UInt ind
, count
= getNumIOPL(_memoryEntries
, dataP
);
3249 // kIODirectionCompleteWithDataValid & forDirection
3250 if (kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
|| kIOMemoryTypeUIO
== type
)
3252 for (ind
= 0; ind
< count
; ind
++)
3254 if (ioplList
[ind
].fIOPL
) iopl_valid_data(ioplList
[ind
].fIOPL
);
3260 #if IOMD_DEBUG_DMAACTIVE
3261 if (__iomd_reservedA
) panic("complete() while dma active");
3262 #endif /* IOMD_DEBUG_DMAACTIVE */
3264 if (dataP
->fMappedBase
) {
3265 dataP
->fMapper
->iovmFree(atop_64(dataP
->fMappedBase
), _pages
);
3266 dataP
->fMappedBase
= 0;
3268 // Only complete iopls that we created which are for TypeVirtual
3269 if (kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
|| kIOMemoryTypeUIO
== type
) {
3270 for (ind
= 0; ind
< count
; ind
++)
3271 if (ioplList
[ind
].fIOPL
) {
3272 if (dataP
->fCompletionError
)
3273 upl_abort(ioplList
[ind
].fIOPL
, 0 /*!UPL_ABORT_DUMP_PAGES*/);
3275 upl_commit(ioplList
[ind
].fIOPL
, 0, 0);
3276 upl_deallocate(ioplList
[ind
].fIOPL
);
3278 } else if (kIOMemoryTypeUPL
== type
) {
3279 upl_set_referenced(ioplList
[0].fIOPL
, false);
3282 (void) _memoryEntries
->initWithBytes(dataP
, computeDataSize(0, 0)); // == setLength()
3284 dataP
->fPreparationID
= kIOPreparationIDUnprepared
;
3290 IOLockUnlock(_prepareLock
);
3292 return kIOReturnSuccess
;
3295 IOReturn
IOGeneralMemoryDescriptor::doMap(
3296 vm_map_t __addressMap
,
3297 IOVirtualAddress
* __address
,
3298 IOOptionBits options
,
3299 IOByteCount __offset
,
3300 IOByteCount __length
)
3304 if (!(kIOMap64Bit
& options
)) panic("IOGeneralMemoryDescriptor::doMap !64bit");
3305 #endif /* !__LP64__ */
3309 IOMemoryMap
* mapping
= (IOMemoryMap
*) *__address
;
3310 mach_vm_size_t offset
= mapping
->fOffset
+ __offset
;
3311 mach_vm_size_t length
= mapping
->fLength
;
3313 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
3314 Ranges vec
= _ranges
;
3316 mach_vm_address_t range0Addr
= 0;
3317 mach_vm_size_t range0Len
= 0;
3319 if ((offset
>= _length
) || ((offset
+ length
) > _length
))
3320 return( kIOReturnBadArgument
);
3323 getAddrLenForInd(range0Addr
, range0Len
, type
, vec
, 0);
3325 // mapping source == dest? (could be much better)
3327 && (mapping
->fAddressTask
== _task
)
3328 && (mapping
->fAddressMap
== get_task_map(_task
))
3329 && (options
& kIOMapAnywhere
)
3330 && (1 == _rangesCount
)
3333 && (length
<= range0Len
))
3335 mapping
->fAddress
= range0Addr
;
3336 mapping
->fOptions
|= kIOMapStatic
;
3338 return( kIOReturnSuccess
);
3343 IOOptionBits createOptions
= 0;
3344 if (!(kIOMapReadOnly
& options
))
3346 createOptions
|= kIOMemoryReferenceWrite
;
3347 #if DEVELOPMENT || DEBUG
3348 if (kIODirectionOut
== (kIODirectionOutIn
& _flags
))
3350 OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction");
3354 err
= memoryReferenceCreate(createOptions
, &_memRef
);
3355 if (kIOReturnSuccess
!= err
) return (err
);
3358 memory_object_t pager
;
3359 pager
= (memory_object_t
) (reserved
? reserved
->dp
.devicePager
: 0);
3361 // <upl_transpose //
3362 if ((kIOMapReference
|kIOMapUnique
) == ((kIOMapReference
|kIOMapUnique
) & options
))
3369 unsigned int lock_count
;
3371 if (!_memRef
|| (1 != _memRef
->count
))
3373 err
= kIOReturnNotReadable
;
3377 size
= round_page(mapping
->fLength
);
3378 flags
= UPL_COPYOUT_FROM
| UPL_SET_INTERNAL
3379 | UPL_SET_LITE
| UPL_SET_IO_WIRE
| UPL_BLOCK_ACCESS
;
3381 if (KERN_SUCCESS
!= memory_object_iopl_request(_memRef
->entries
[0].entry
, 0, &size
, &redirUPL2
,
3386 for (lock_count
= 0;
3387 IORecursiveLockHaveLock(gIOMemoryLock
);
3391 err
= upl_transpose(redirUPL2
, mapping
->fRedirUPL
);
3398 if (kIOReturnSuccess
!= err
)
3400 IOLog("upl_transpose(%x)\n", err
);
3401 err
= kIOReturnSuccess
;
3406 upl_commit(redirUPL2
, NULL
, 0);
3407 upl_deallocate(redirUPL2
);
3411 // swap the memEntries since they now refer to different vm_objects
3412 IOMemoryReference
* me
= _memRef
;
3413 _memRef
= mapping
->fMemory
->_memRef
;
3414 mapping
->fMemory
->_memRef
= me
;
3417 err
= populateDevicePager( pager
, mapping
->fAddressMap
, mapping
->fAddress
, offset
, length
, options
);
3421 // upl_transpose> //
3424 err
= memoryReferenceMap(_memRef
, mapping
->fAddressMap
, offset
, length
, options
, &mapping
->fAddress
);
3426 if ((err
== KERN_SUCCESS
) && pager
)
3428 err
= populateDevicePager(pager
, mapping
->fAddressMap
, mapping
->fAddress
, offset
, length
, options
);
3429 if (err
!= KERN_SUCCESS
)
3431 doUnmap(mapping
->fAddressMap
, (IOVirtualAddress
) mapping
, 0);
3433 else if (kIOMapDefaultCache
== (options
& kIOMapCacheMask
))
3435 mapping
->fOptions
|= ((_flags
& kIOMemoryBufferCacheMask
) >> kIOMemoryBufferCacheShift
);
3443 IOReturn
IOGeneralMemoryDescriptor::doUnmap(
3444 vm_map_t addressMap
,
3445 IOVirtualAddress __address
,
3446 IOByteCount __length
)
3448 return (super::doUnmap(addressMap
, __address
, __length
));
3451 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3454 #define super OSObject
3456 OSDefineMetaClassAndStructors( IOMemoryMap
, OSObject
)
3458 OSMetaClassDefineReservedUnused(IOMemoryMap
, 0);
3459 OSMetaClassDefineReservedUnused(IOMemoryMap
, 1);
3460 OSMetaClassDefineReservedUnused(IOMemoryMap
, 2);
3461 OSMetaClassDefineReservedUnused(IOMemoryMap
, 3);
3462 OSMetaClassDefineReservedUnused(IOMemoryMap
, 4);
3463 OSMetaClassDefineReservedUnused(IOMemoryMap
, 5);
3464 OSMetaClassDefineReservedUnused(IOMemoryMap
, 6);
3465 OSMetaClassDefineReservedUnused(IOMemoryMap
, 7);
3467 /* ex-inline function implementation */
3468 IOPhysicalAddress
IOMemoryMap::getPhysicalAddress()
3469 { return( getPhysicalSegment( 0, 0 )); }
3471 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3473 bool IOMemoryMap::init(
3475 mach_vm_address_t toAddress
,
3476 IOOptionBits _options
,
3477 mach_vm_size_t _offset
,
3478 mach_vm_size_t _length
)
3486 fAddressMap
= get_task_map(intoTask
);
3489 vm_map_reference(fAddressMap
);
3491 fAddressTask
= intoTask
;
3492 fOptions
= _options
;
3495 fAddress
= toAddress
;
3500 bool IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor
* _memory
, mach_vm_size_t _offset
)
3507 if( (_offset
+ fLength
) > _memory
->getLength())
3515 if (fMemory
!= _memory
)
3516 fMemory
->removeMapping(this);
3524 IOReturn
IOMemoryDescriptor::doMap(
3525 vm_map_t __addressMap
,
3526 IOVirtualAddress
* __address
,
3527 IOOptionBits options
,
3528 IOByteCount __offset
,
3529 IOByteCount __length
)
3531 return (kIOReturnUnsupported
);
3534 IOReturn
IOMemoryDescriptor::handleFault(
3536 mach_vm_size_t sourceOffset
,
3537 mach_vm_size_t length
)
3539 if( kIOMemoryRedirected
& _flags
)
3542 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset
);
3546 } while( kIOMemoryRedirected
& _flags
);
3548 return (kIOReturnSuccess
);
3551 IOReturn
IOMemoryDescriptor::populateDevicePager(
3553 vm_map_t addressMap
,
3554 mach_vm_address_t address
,
3555 mach_vm_size_t sourceOffset
,
3556 mach_vm_size_t length
,
3557 IOOptionBits options
)
3559 IOReturn err
= kIOReturnSuccess
;
3560 memory_object_t pager
= (memory_object_t
) _pager
;
3561 mach_vm_size_t size
;
3562 mach_vm_size_t bytes
;
3563 mach_vm_size_t page
;
3564 mach_vm_size_t pageOffset
;
3565 mach_vm_size_t pagerOffset
;
3566 IOPhysicalLength segLen
;
3569 physAddr
= getPhysicalSegment( sourceOffset
, &segLen
, kIOMemoryMapperNone
);
3571 pageOffset
= physAddr
- trunc_page_64( physAddr
);
3572 pagerOffset
= sourceOffset
;
3574 size
= length
+ pageOffset
;
3575 physAddr
-= pageOffset
;
3577 segLen
+= pageOffset
;
3581 // in the middle of the loop only map whole pages
3582 if( segLen
>= bytes
) segLen
= bytes
;
3583 else if (segLen
!= trunc_page(segLen
)) err
= kIOReturnVMError
;
3584 if (physAddr
!= trunc_page_64(physAddr
)) err
= kIOReturnBadArgument
;
3586 if (kIOReturnSuccess
!= err
) break;
3588 if (reserved
&& reserved
->dp
.pagerContig
)
3590 IOPhysicalLength allLen
;
3593 allPhys
= getPhysicalSegment( 0, &allLen
, kIOMemoryMapperNone
);
3595 err
= device_pager_populate_object( pager
, 0, atop_64(allPhys
), round_page(allLen
) );
3600 (page
< segLen
) && (KERN_SUCCESS
== err
);
3603 err
= device_pager_populate_object(pager
, pagerOffset
,
3604 (ppnum_t
)(atop_64(physAddr
+ page
)), page_size
);
3605 pagerOffset
+= page_size
;
3608 assert (KERN_SUCCESS
== err
);
3611 // This call to vm_fault causes an early pmap level resolution
3612 // of the mappings created above for kernel mappings, since
3613 // faulting in later can't take place from interrupt level.
3614 if ((addressMap
== kernel_map
) && !(kIOMemoryRedirected
& _flags
))
3616 vm_fault(addressMap
,
3617 (vm_map_offset_t
)trunc_page_64(address
),
3618 VM_PROT_READ
|VM_PROT_WRITE
,
3619 FALSE
, THREAD_UNINT
, NULL
,
3620 (vm_map_offset_t
)0);
3623 sourceOffset
+= segLen
- pageOffset
;
3628 while (bytes
&& (physAddr
= getPhysicalSegment( sourceOffset
, &segLen
, kIOMemoryMapperNone
)));
3631 err
= kIOReturnBadArgument
;
3636 IOReturn
IOMemoryDescriptor::doUnmap(
3637 vm_map_t addressMap
,
3638 IOVirtualAddress __address
,
3639 IOByteCount __length
)
3642 mach_vm_address_t address
;
3643 mach_vm_size_t length
;
3647 address
= __address
;
3652 addressMap
= ((IOMemoryMap
*) __address
)->fAddressMap
;
3653 address
= ((IOMemoryMap
*) __address
)->fAddress
;
3654 length
= ((IOMemoryMap
*) __address
)->fLength
;
3657 if ((addressMap
== kernel_map
) && (kIOMemoryBufferPageable
& _flags
))
3658 addressMap
= IOPageableMapForAddress( address
);
3661 if( kIOLogMapping
& gIOKitDebug
)
3662 IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
3663 addressMap
, address
, length
);
3666 err
= mach_vm_deallocate( addressMap
, address
, length
);
3671 IOReturn
IOMemoryDescriptor::redirect( task_t safeTask
, bool doRedirect
)
3673 IOReturn err
= kIOReturnSuccess
;
3674 IOMemoryMap
* mapping
= 0;
3680 _flags
|= kIOMemoryRedirected
;
3682 _flags
&= ~kIOMemoryRedirected
;
3685 if( (iter
= OSCollectionIterator::withCollection( _mappings
))) {
3687 memory_object_t pager
;
3690 pager
= (memory_object_t
) reserved
->dp
.devicePager
;
3692 pager
= MACH_PORT_NULL
;
3694 while( (mapping
= (IOMemoryMap
*) iter
->getNextObject()))
3696 mapping
->redirect( safeTask
, doRedirect
);
3697 if (!doRedirect
&& !safeTask
&& pager
&& (kernel_map
== mapping
->fAddressMap
))
3699 err
= populateDevicePager(pager
, mapping
->fAddressMap
, mapping
->fAddress
, mapping
->fOffset
, mapping
->fLength
, kIOMapDefaultCache
);
3715 // temporary binary compatibility
3716 IOSubMemoryDescriptor
* subMem
;
3717 if( (subMem
= OSDynamicCast( IOSubMemoryDescriptor
, this)))
3718 err
= subMem
->redirect( safeTask
, doRedirect
);
3720 err
= kIOReturnSuccess
;
3721 #endif /* !__LP64__ */
3726 IOReturn
IOMemoryMap::redirect( task_t safeTask
, bool doRedirect
)
3728 IOReturn err
= kIOReturnSuccess
;
3731 // err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
3743 if ((!safeTask
|| (get_task_map(safeTask
) != fAddressMap
))
3744 && (0 == (fOptions
& kIOMapStatic
)))
3746 IOUnmapPages( fAddressMap
, fAddress
, fLength
);
3747 err
= kIOReturnSuccess
;
3749 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect
, this, fAddress
, fLength
, fAddressMap
);
3752 else if (kIOMapWriteCombineCache
== (fOptions
& kIOMapCacheMask
))
3754 IOOptionBits newMode
;
3755 newMode
= (fOptions
& ~kIOMapCacheMask
) | (doRedirect
? kIOMapInhibitCache
: kIOMapWriteCombineCache
);
3756 IOProtectCacheMode(fAddressMap
, fAddress
, fLength
, newMode
);
3763 if ((((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
3764 || ((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
))
3766 && (doRedirect
!= (0 != (fMemory
->_flags
& kIOMemoryRedirected
))))
3767 fMemory
->redirect(safeTask
, doRedirect
);
3772 IOReturn
IOMemoryMap::unmap( void )
3778 if( fAddress
&& fAddressMap
&& (0 == fSuperMap
) && fMemory
3779 && (0 == (fOptions
& kIOMapStatic
))) {
3781 err
= fMemory
->doUnmap(fAddressMap
, (IOVirtualAddress
) this, 0);
3784 err
= kIOReturnSuccess
;
3788 vm_map_deallocate(fAddressMap
);
3799 void IOMemoryMap::taskDied( void )
3802 if (fUserClientUnmap
)
3805 vm_map_deallocate(fAddressMap
);
3813 IOReturn
IOMemoryMap::userClientUnmap( void )
3815 fUserClientUnmap
= true;
3816 return (kIOReturnSuccess
);
3819 // Overload the release mechanism. All mappings must be a member
3820 // of a memory descriptors _mappings set. This means that we
3821 // always have 2 references on a mapping. When either of these mappings
3822 // are released we need to free ourselves.
3823 void IOMemoryMap::taggedRelease(const void *tag
) const
3826 super::taggedRelease(tag
, 2);
3830 void IOMemoryMap::free()
3837 fMemory
->removeMapping(this);
3842 if (fOwner
&& (fOwner
!= fMemory
))
3845 fOwner
->removeMapping(this);
3850 fSuperMap
->release();
3853 upl_commit(fRedirUPL
, NULL
, 0);
3854 upl_deallocate(fRedirUPL
);
3860 IOByteCount
IOMemoryMap::getLength()
3865 IOVirtualAddress
IOMemoryMap::getVirtualAddress()
3869 fSuperMap
->getVirtualAddress();
3870 else if (fAddressMap
3871 && vm_map_is_64bit(fAddressMap
)
3872 && (sizeof(IOVirtualAddress
) < 8))
3874 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress
);
3876 #endif /* !__LP64__ */
3882 mach_vm_address_t
IOMemoryMap::getAddress()
3887 mach_vm_size_t
IOMemoryMap::getSize()
3891 #endif /* !__LP64__ */
3894 task_t
IOMemoryMap::getAddressTask()
3897 return( fSuperMap
->getAddressTask());
3899 return( fAddressTask
);
3902 IOOptionBits
IOMemoryMap::getMapOptions()
3907 IOMemoryDescriptor
* IOMemoryMap::getMemoryDescriptor()
3912 IOMemoryMap
* IOMemoryMap::copyCompatible(
3913 IOMemoryMap
* newMapping
)
3915 task_t task
= newMapping
->getAddressTask();
3916 mach_vm_address_t toAddress
= newMapping
->fAddress
;
3917 IOOptionBits _options
= newMapping
->fOptions
;
3918 mach_vm_size_t _offset
= newMapping
->fOffset
;
3919 mach_vm_size_t _length
= newMapping
->fLength
;
3921 if( (!task
) || (!fAddressMap
) || (fAddressMap
!= get_task_map(task
)))
3923 if( (fOptions
^ _options
) & kIOMapReadOnly
)
3925 if( (kIOMapDefaultCache
!= (_options
& kIOMapCacheMask
))
3926 && ((fOptions
^ _options
) & kIOMapCacheMask
))
3929 if( (0 == (_options
& kIOMapAnywhere
)) && (fAddress
!= toAddress
))
3932 if( _offset
< fOffset
)
3937 if( (_offset
+ _length
) > fLength
)
3941 if( (fLength
== _length
) && (!_offset
))
3947 newMapping
->fSuperMap
= this;
3948 newMapping
->fOffset
= fOffset
+ _offset
;
3949 newMapping
->fAddress
= fAddress
+ _offset
;
3952 return( newMapping
);
3955 IOReturn
IOMemoryMap::wireRange(
3957 mach_vm_size_t offset
,
3958 mach_vm_size_t length
)
3961 mach_vm_address_t start
= trunc_page_64(fAddress
+ offset
);
3962 mach_vm_address_t end
= round_page_64(fAddress
+ offset
+ length
);
3964 if (kIODirectionOutIn
& options
)
3966 kr
= vm_map_wire(fAddressMap
, start
, end
, (kIODirectionOutIn
& options
), FALSE
);
3970 kr
= vm_map_unwire(fAddressMap
, start
, end
, FALSE
);
3979 IOMemoryMap::getPhysicalSegment( IOByteCount _offset
, IOPhysicalLength
* _length
, IOOptionBits _options
)
3980 #else /* !__LP64__ */
3981 IOMemoryMap::getPhysicalSegment( IOByteCount _offset
, IOPhysicalLength
* _length
)
3982 #endif /* !__LP64__ */
3984 IOPhysicalAddress address
;
3988 address
= fMemory
->getPhysicalSegment( fOffset
+ _offset
, _length
, _options
);
3989 #else /* !__LP64__ */
3990 address
= fMemory
->getPhysicalSegment( fOffset
+ _offset
, _length
);
3991 #endif /* !__LP64__ */
3997 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4000 #define super OSObject
4002 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4004 void IOMemoryDescriptor::initialize( void )
4006 if( 0 == gIOMemoryLock
)
4007 gIOMemoryLock
= IORecursiveLockAlloc();
4009 gIOLastPage
= IOGetLastPageNumber();
4012 void IOMemoryDescriptor::free( void )
4015 _mappings
->release();
4020 IOMemoryMap
* IOMemoryDescriptor::setMapping(
4022 IOVirtualAddress mapAddress
,
4023 IOOptionBits options
)
4025 return (createMappingInTask( intoTask
, mapAddress
,
4026 options
| kIOMapStatic
,
4030 IOMemoryMap
* IOMemoryDescriptor::map(
4031 IOOptionBits options
)
4033 return (createMappingInTask( kernel_task
, 0,
4034 options
| kIOMapAnywhere
,
4039 IOMemoryMap
* IOMemoryDescriptor::map(
4041 IOVirtualAddress atAddress
,
4042 IOOptionBits options
,
4044 IOByteCount length
)
4046 if ((!(kIOMapAnywhere
& options
)) && vm_map_is_64bit(get_task_map(intoTask
)))
4048 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
4052 return (createMappingInTask(intoTask
, atAddress
,
4053 options
, offset
, length
));
4055 #endif /* !__LP64__ */
4057 IOMemoryMap
* IOMemoryDescriptor::createMappingInTask(
4059 mach_vm_address_t atAddress
,
4060 IOOptionBits options
,
4061 mach_vm_size_t offset
,
4062 mach_vm_size_t length
)
4064 IOMemoryMap
* result
;
4065 IOMemoryMap
* mapping
;
4068 length
= getLength();
4070 mapping
= new IOMemoryMap
;
4073 && !mapping
->init( intoTask
, atAddress
,
4074 options
, offset
, length
)) {
4080 result
= makeMapping(this, intoTask
, (IOVirtualAddress
) mapping
, options
| kIOMap64Bit
, 0, 0);
4086 IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
4087 this, atAddress
, (uint32_t) options
, offset
, length
);
4093 #ifndef __LP64__ // there is only a 64 bit version for LP64
4094 IOReturn
IOMemoryMap::redirect(IOMemoryDescriptor
* newBackingMemory
,
4095 IOOptionBits options
,
4098 return (redirect(newBackingMemory
, options
, (mach_vm_size_t
)offset
));
4102 IOReturn
IOMemoryMap::redirect(IOMemoryDescriptor
* newBackingMemory
,
4103 IOOptionBits options
,
4104 mach_vm_size_t offset
)
4106 IOReturn err
= kIOReturnSuccess
;
4107 IOMemoryDescriptor
* physMem
= 0;
4111 if (fAddress
&& fAddressMap
) do
4113 if (((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
4114 || ((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
))
4120 if (!fRedirUPL
&& fMemory
->_memRef
&& (1 == fMemory
->_memRef
->count
))
4122 vm_size_t size
= round_page(fLength
);
4123 int flags
= UPL_COPYOUT_FROM
| UPL_SET_INTERNAL
4124 | UPL_SET_LITE
| UPL_SET_IO_WIRE
| UPL_BLOCK_ACCESS
;
4125 if (KERN_SUCCESS
!= memory_object_iopl_request(fMemory
->_memRef
->entries
[0].entry
, 0, &size
, &fRedirUPL
,
4132 IOUnmapPages( fAddressMap
, fAddress
, fLength
);
4134 physMem
->redirect(0, true);
4138 if (newBackingMemory
)
4140 if (newBackingMemory
!= fMemory
)
4143 if (this != newBackingMemory
->makeMapping(newBackingMemory
, fAddressTask
, (IOVirtualAddress
) this,
4144 options
| kIOMapUnique
| kIOMapReference
| kIOMap64Bit
,
4146 err
= kIOReturnError
;
4150 upl_commit(fRedirUPL
, NULL
, 0);
4151 upl_deallocate(fRedirUPL
);
4154 if ((false) && physMem
)
4155 physMem
->redirect(0, false);
4168 IOMemoryMap
* IOMemoryDescriptor::makeMapping(
4169 IOMemoryDescriptor
* owner
,
4171 IOVirtualAddress __address
,
4172 IOOptionBits options
,
4173 IOByteCount __offset
,
4174 IOByteCount __length
)
4177 if (!(kIOMap64Bit
& options
)) panic("IOMemoryDescriptor::makeMapping !64bit");
4178 #endif /* !__LP64__ */
4180 IOMemoryDescriptor
* mapDesc
= 0;
4181 IOMemoryMap
* result
= 0;
4184 IOMemoryMap
* mapping
= (IOMemoryMap
*) __address
;
4185 mach_vm_size_t offset
= mapping
->fOffset
+ __offset
;
4186 mach_vm_size_t length
= mapping
->fLength
;
4188 mapping
->fOffset
= offset
;
4194 if (kIOMapStatic
& options
)
4197 addMapping(mapping
);
4198 mapping
->setMemoryDescriptor(this, 0);
4202 if (kIOMapUnique
& options
)
4205 IOByteCount physLen
;
4207 // if (owner != this) continue;
4209 if (((_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
4210 || ((_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
))
4212 phys
= getPhysicalSegment(offset
, &physLen
, kIOMemoryMapperNone
);
4213 if (!phys
|| (physLen
< length
))
4216 mapDesc
= IOMemoryDescriptor::withAddressRange(
4217 phys
, length
, getDirection() | kIOMemoryMapperNone
, NULL
);
4221 mapping
->fOffset
= offset
;
4226 // look for a compatible existing mapping
4227 if( (iter
= OSCollectionIterator::withCollection(_mappings
)))
4229 IOMemoryMap
* lookMapping
;
4230 while ((lookMapping
= (IOMemoryMap
*) iter
->getNextObject()))
4232 if ((result
= lookMapping
->copyCompatible(mapping
)))
4235 result
->setMemoryDescriptor(this, offset
);
4241 if (result
|| (options
& kIOMapReference
))
4243 if (result
!= mapping
)
4258 kr
= mapDesc
->doMap( 0, (IOVirtualAddress
*) &mapping
, options
, 0, 0 );
4259 if (kIOReturnSuccess
== kr
)
4262 mapDesc
->addMapping(result
);
4263 result
->setMemoryDescriptor(mapDesc
, offset
);
4281 void IOMemoryDescriptor::addMapping(
4282 IOMemoryMap
* mapping
)
4287 _mappings
= OSSet::withCapacity(1);
4289 _mappings
->setObject( mapping
);
4293 void IOMemoryDescriptor::removeMapping(
4294 IOMemoryMap
* mapping
)
4297 _mappings
->removeObject( mapping
);
4301 // obsolete initializers
4302 // - initWithOptions is the designated initializer
4304 IOMemoryDescriptor::initWithAddress(void * address
,
4306 IODirection direction
)
4312 IOMemoryDescriptor::initWithAddress(IOVirtualAddress address
,
4314 IODirection direction
,
4321 IOMemoryDescriptor::initWithPhysicalAddress(
4322 IOPhysicalAddress address
,
4324 IODirection direction
)
4330 IOMemoryDescriptor::initWithRanges(
4331 IOVirtualRange
* ranges
,
4333 IODirection direction
,
4341 IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange
* ranges
,
4343 IODirection direction
,
4349 void * IOMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
4350 IOByteCount
* lengthOfSegment
)
4354 #endif /* !__LP64__ */
4356 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4358 bool IOGeneralMemoryDescriptor::serialize(OSSerialize
* s
) const
4360 OSSymbol
const *keys
[2];
4361 OSObject
*values
[2];
4365 user_addr_t address
;
4368 unsigned int index
, nRanges
;
4371 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
4373 if (s
== NULL
) return false;
4375 array
= OSArray::withCapacity(4);
4376 if (!array
) return (false);
4378 nRanges
= _rangesCount
;
4379 vcopy
= (SerData
*) IOMalloc(sizeof(SerData
) * nRanges
);
4380 if (vcopy
== 0) return false;
4382 keys
[0] = OSSymbol::withCString("address");
4383 keys
[1] = OSSymbol::withCString("length");
4386 values
[0] = values
[1] = 0;
4388 // From this point on we can go to bail.
4390 // Copy the volatile data so we don't have to allocate memory
4391 // while the lock is held.
4393 if (nRanges
== _rangesCount
) {
4394 Ranges vec
= _ranges
;
4395 for (index
= 0; index
< nRanges
; index
++) {
4396 mach_vm_address_t addr
; mach_vm_size_t len
;
4397 getAddrLenForInd(addr
, len
, type
, vec
, index
);
4398 vcopy
[index
].address
= addr
;
4399 vcopy
[index
].length
= len
;
4402 // The descriptor changed out from under us. Give up.
4409 for (index
= 0; index
< nRanges
; index
++)
4411 user_addr_t addr
= vcopy
[index
].address
;
4412 IOByteCount len
= (IOByteCount
) vcopy
[index
].length
;
4413 values
[0] = OSNumber::withNumber(addr
, sizeof(addr
) * 8);
4414 if (values
[0] == 0) {
4418 values
[1] = OSNumber::withNumber(len
, sizeof(len
) * 8);
4419 if (values
[1] == 0) {
4423 OSDictionary
*dict
= OSDictionary::withObjects((const OSObject
**)values
, (const OSSymbol
**)keys
, 2);
4428 array
->setObject(dict
);
4430 values
[0]->release();
4431 values
[1]->release();
4432 values
[0] = values
[1] = 0;
4435 result
= array
->serialize(s
);
4441 values
[0]->release();
4443 values
[1]->release();
4449 IOFree(vcopy
, sizeof(SerData
) * nRanges
);
4454 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4456 #if DEVELOPMENT || DEBUG
4458 extern "C" void IOMemoryDescriptorTest(int x
)
4460 IOGeneralMemoryDescriptor
* md
;
4462 vm_offset_t data
[2];
4463 vm_size_t bsize
= 16*1024*1024;
4465 vm_size_t srcsize
, srcoffset
, mapoffset
, size
;
4469 kr
= vm_allocate(kernel_map
, &data
[0], bsize
, VM_FLAGS_ANYWHERE
);
4470 vm_inherit(kernel_map
, data
[0] + 1*4096, 4096, VM_INHERIT_NONE
);
4471 vm_inherit(kernel_map
, data
[0] + 16*4096, 4096, VM_INHERIT_NONE
);
4473 kprintf("data 0x%lx, 0x%lx\n", (long)data
[0], (long)data
[1]);
4475 uint32_t idx
, offidx
;
4476 for (idx
= 0; idx
< (bsize
/ sizeof(uint32_t)); idx
++)
4478 ((uint32_t*)data
[0])[idx
] = idx
;
4481 for (srcoffset
= 0; srcoffset
< bsize
; srcoffset
= ((srcoffset
<< 1) + 0x40c))
4483 for (srcsize
= 4; srcsize
< (bsize
- srcoffset
- 1); srcsize
= ((srcsize
<< 1) + 0x3fc))
4485 IOAddressRange ranges
[3];
4486 uint32_t rangeCount
= 1;
4488 bzero(&ranges
[0], sizeof(ranges
));
4489 ranges
[0].address
= data
[0] + srcoffset
;
4490 ranges
[0].length
= srcsize
;
4492 if (srcsize
> 5*page_size
)
4494 ranges
[0].length
= 7634;
4495 ranges
[1].length
= 9870;
4496 ranges
[2].length
= srcsize
- ranges
[0].length
- ranges
[1].length
;
4497 ranges
[1].address
= ranges
[0].address
+ ranges
[0].length
;
4498 ranges
[2].address
= ranges
[1].address
+ ranges
[1].length
;
4501 else if ((srcsize
> 2*page_size
) && !(page_mask
& srcoffset
))
4503 ranges
[0].length
= 4096;
4504 ranges
[1].length
= 4096;
4505 ranges
[2].length
= srcsize
- ranges
[0].length
- ranges
[1].length
;
4506 ranges
[0].address
= data
[0] + srcoffset
+ 4096;
4507 ranges
[1].address
= data
[0] + srcoffset
;
4508 ranges
[2].address
= ranges
[0].address
+ ranges
[0].length
;
4512 md
= OSDynamicCast(IOGeneralMemoryDescriptor
,
4513 IOMemoryDescriptor::withAddressRanges(&ranges
[0], rangeCount
, kIODirectionInOut
, kernel_task
));
4516 kprintf("IOMemoryReferenceCreate [0x%lx @ 0x%lx]\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx]\n",
4517 (long) srcsize
, (long) srcoffset
,
4518 (long long) ranges
[0].address
- data
[0], (long long) ranges
[0].length
,
4519 (long long) ranges
[1].address
- data
[0], (long long) ranges
[1].length
,
4520 (long long) ranges
[2].address
- data
[0], (long long) ranges
[2].length
);
4522 if (kIOReturnSuccess
== kr
)
4524 for (mapoffset
= 0; mapoffset
< srcsize
; mapoffset
= ((mapoffset
<< 1) + 0xf00))
4526 for (size
= 4; size
< (srcsize
- mapoffset
- 1); size
= ((size
<< 1) + 0x20))
4529 mach_vm_address_t addr
= 0;
4532 kprintf("<mapRef [0x%lx @ 0x%lx]\n", (long) size
, (long) mapoffset
);
4534 map
= md
->createMappingInTask(kernel_task
, 0, kIOMapAnywhere
, mapoffset
, size
);
4535 if (map
) addr
= map
->getAddress();
4536 else kr
= kIOReturnError
;
4538 kprintf(">mapRef 0x%x %llx\n", kr
, addr
);
4540 if (kIOReturnSuccess
!= kr
) break;
4542 if (kIOReturnSuccess
!= kr
)
4544 kprintf("prepare() fail 0x%x\n", kr
);
4547 for (idx
= 0; idx
< size
; idx
+= sizeof(uint32_t))
4549 offidx
= (idx
+ mapoffset
+ srcoffset
);
4550 if ((srcsize
<= 5*page_size
) && (srcsize
> 2*page_size
) && !(page_mask
& srcoffset
))
4552 if (offidx
< 8192) offidx
^= 0x1000;
4554 offidx
/= sizeof(uint32_t);
4556 if (offidx
!= ((uint32_t*)addr
)[idx
/sizeof(uint32_t)])
4558 kprintf("vm mismatch @ 0x%x, 0x%lx, 0x%lx, \n", idx
, (long) srcoffset
, (long) mapoffset
);
4559 kr
= kIOReturnBadMedia
;
4563 if (sizeof(data
) != md
->readBytes(mapoffset
+ idx
, &data
, sizeof(data
))) data
= 0;
4566 kprintf("phys mismatch @ 0x%x, 0x%lx, 0x%lx, \n", idx
, (long) srcoffset
, (long) mapoffset
);
4567 kr
= kIOReturnBadMedia
;
4573 kprintf("unmapRef %llx\n", addr
);
4575 if (kIOReturnSuccess
!= kr
) break;
4578 if (kIOReturnSuccess
!= kr
) break;
4580 if (kIOReturnSuccess
!= kr
) break;
4583 if (kIOReturnSuccess
!= kr
) kprintf("FAIL: src 0x%lx @ 0x%lx, map 0x%lx @ 0x%lx\n",
4584 (long) srcsize
, (long) srcoffset
, (long) size
, (long) mapoffset
);
4586 vm_deallocate(kernel_map
, data
[0], bsize
);
4587 // vm_deallocate(kernel_map, data[1], size);
4590 #endif /* DEVELOPMENT || DEBUG */
4592 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4594 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 0);
4596 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 1);
4597 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 2);
4598 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 3);
4599 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 4);
4600 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 5);
4601 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 6);
4602 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 7);
4603 #else /* !__LP64__ */
4604 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 1);
4605 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 2);
4606 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 3);
4607 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 4);
4608 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 5);
4609 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 6);
4610 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 7);
4611 #endif /* !__LP64__ */
4612 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 8);
4613 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 9);
4614 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 10);
4615 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 11);
4616 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 12);
4617 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 13);
4618 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 14);
4619 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 15);
4621 /* ex-inline function implementation */
4623 IOMemoryDescriptor::getPhysicalAddress()
4624 { return( getPhysicalSegment( 0, 0 )); }