2 * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <sys/cdefs.h>
32 #include <IOKit/assert.h>
33 #include <IOKit/system.h>
34 #include <IOKit/IOLib.h>
35 #include <IOKit/IOMemoryDescriptor.h>
36 #include <IOKit/IOMapper.h>
37 #include <IOKit/IODMACommand.h>
38 #include <IOKit/IOKitKeysPrivate.h>
40 #include <IOKit/IOSubMemoryDescriptor.h>
41 #include <IOKit/IOMultiMemoryDescriptor.h>
43 #include <IOKit/IOKitDebug.h>
44 #include <libkern/OSDebug.h>
46 #include "IOKitKernelInternal.h"
48 #include <libkern/c++/OSContainers.h>
49 #include <libkern/c++/OSDictionary.h>
50 #include <libkern/c++/OSArray.h>
51 #include <libkern/c++/OSSymbol.h>
52 #include <libkern/c++/OSNumber.h>
58 #include <vm/vm_pageout.h>
59 #include <mach/memory_object_types.h>
60 #include <device/device_port.h>
62 #include <mach/vm_prot.h>
63 #include <mach/mach_vm.h>
64 #include <vm/vm_fault.h>
65 #include <vm/vm_protos.h>
67 extern ppnum_t
pmap_find_phys(pmap_t pmap
, addr64_t va
);
68 extern void ipc_port_release_send(ipc_port_t port
);
70 // osfmk/device/iokit_rpc.c
71 unsigned int IODefaultCacheBits(addr64_t pa
);
72 unsigned int IOTranslateCacheBits(struct phys_entry
*pp
);
76 #define kIOMapperWaitSystem ((IOMapper *) 1)
78 static IOMapper
* gIOSystemMapper
= NULL
;
82 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
84 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor
, OSObject
)
86 #define super IOMemoryDescriptor
88 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor
, IOMemoryDescriptor
)
90 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
92 static IORecursiveLock
* gIOMemoryLock
;
94 #define LOCK IORecursiveLockLock( gIOMemoryLock)
95 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
96 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
98 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
101 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
103 #define DEBG(fmt, args...) {}
106 #define IOMD_DEBUG_DMAACTIVE 1
108 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
110 // Some data structures and accessor macros used by the initWithOptions
113 enum ioPLBlockFlags
{
114 kIOPLOnDevice
= 0x00000001,
115 kIOPLExternUPL
= 0x00000002,
118 struct IOMDPersistentInitData
120 const IOGeneralMemoryDescriptor
* fMD
;
121 IOMemoryReference
* fMemRef
;
126 vm_address_t fPageInfo
; // Pointer to page list or index into it
127 uint32_t fIOMDOffset
; // The offset of this iopl in descriptor
128 ppnum_t fMappedPage
; // Page number of first page in this iopl
129 unsigned int fPageOffset
; // Offset within first page of iopl
130 unsigned int fFlags
; // Flags
135 uint8_t fDMAMapNumAddressBits
;
136 uint64_t fDMAMapAlignment
;
137 uint64_t fMappedBase
;
138 uint64_t fMappedLength
;
139 uint64_t fPreparationID
;
141 IOTracking fWireTracking
;
143 unsigned int fPageCnt
;
144 unsigned char fDiscontig
:1;
145 unsigned char fCompletionError
:1;
146 unsigned char _resv
:6;
148 // align arrays to 8 bytes so following macros work
149 unsigned char fPad
[3];
151 upl_page_info_t fPageList
[1]; /* variable length */
152 ioPLBlock fBlocks
[1]; /* variable length */
155 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
156 #define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
157 #define getNumIOPL(osd, d) \
158 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
159 #define getPageList(d) (&(d->fPageList[0]))
160 #define computeDataSize(p, u) \
161 (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
163 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
165 #define next_page(a) ( trunc_page(a) + PAGE_SIZE )
169 kern_return_t
device_data_action(
170 uintptr_t device_handle
,
171 ipc_port_t device_pager
,
172 vm_prot_t protection
,
173 vm_object_offset_t offset
,
177 IOMemoryDescriptorReserved
* ref
= (IOMemoryDescriptorReserved
*) device_handle
;
178 IOMemoryDescriptor
* memDesc
;
181 memDesc
= ref
->dp
.memory
;
185 kr
= memDesc
->handleFault(device_pager
, offset
, size
);
195 kern_return_t
device_close(
196 uintptr_t device_handle
)
198 IOMemoryDescriptorReserved
* ref
= (IOMemoryDescriptorReserved
*) device_handle
;
200 IODelete( ref
, IOMemoryDescriptorReserved
, 1 );
202 return( kIOReturnSuccess
);
206 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
208 // Note this inline function uses C++ reference arguments to return values
209 // This means that pointers are not passed and NULLs don't have to be
210 // checked for as a NULL reference is illegal.
212 getAddrLenForInd(mach_vm_address_t
&addr
, mach_vm_size_t
&len
, // Output variables
213 UInt32 type
, IOGeneralMemoryDescriptor::Ranges r
, UInt32 ind
)
215 assert(kIOMemoryTypeUIO
== type
216 || kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
217 || kIOMemoryTypePhysical
== type
|| kIOMemoryTypePhysical64
== type
);
218 if (kIOMemoryTypeUIO
== type
) {
221 uio_getiov((uio_t
) r
.uio
, ind
, &ad
, &us
); addr
= ad
; len
= us
;
224 else if ((kIOMemoryTypeVirtual64
== type
) || (kIOMemoryTypePhysical64
== type
)) {
225 IOAddressRange cur
= r
.v64
[ind
];
229 #endif /* !__LP64__ */
231 IOVirtualRange cur
= r
.v
[ind
];
237 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
240 purgeableControlBits(IOOptionBits newState
, vm_purgable_t
* control
, int * state
)
242 IOReturn err
= kIOReturnSuccess
;
244 *control
= VM_PURGABLE_SET_STATE
;
246 enum { kIOMemoryPurgeableControlMask
= 15 };
248 switch (kIOMemoryPurgeableControlMask
& newState
)
250 case kIOMemoryPurgeableKeepCurrent
:
251 *control
= VM_PURGABLE_GET_STATE
;
254 case kIOMemoryPurgeableNonVolatile
:
255 *state
= VM_PURGABLE_NONVOLATILE
;
257 case kIOMemoryPurgeableVolatile
:
258 *state
= VM_PURGABLE_VOLATILE
| (newState
& ~kIOMemoryPurgeableControlMask
);
260 case kIOMemoryPurgeableEmpty
:
261 *state
= VM_PURGABLE_EMPTY
;
264 err
= kIOReturnBadArgument
;
271 purgeableStateBits(int * state
)
273 IOReturn err
= kIOReturnSuccess
;
275 switch (VM_PURGABLE_STATE_MASK
& *state
)
277 case VM_PURGABLE_NONVOLATILE
:
278 *state
= kIOMemoryPurgeableNonVolatile
;
280 case VM_PURGABLE_VOLATILE
:
281 *state
= kIOMemoryPurgeableVolatile
;
283 case VM_PURGABLE_EMPTY
:
284 *state
= kIOMemoryPurgeableEmpty
;
287 *state
= kIOMemoryPurgeableNonVolatile
;
288 err
= kIOReturnNotReady
;
296 vmProtForCacheMode(IOOptionBits cacheMode
)
301 case kIOInhibitCache
:
302 SET_MAP_MEM(MAP_MEM_IO
, prot
);
305 case kIOWriteThruCache
:
306 SET_MAP_MEM(MAP_MEM_WTHRU
, prot
);
309 case kIOWriteCombineCache
:
310 SET_MAP_MEM(MAP_MEM_WCOMB
, prot
);
313 case kIOCopybackCache
:
314 SET_MAP_MEM(MAP_MEM_COPYBACK
, prot
);
317 case kIOCopybackInnerCache
:
318 SET_MAP_MEM(MAP_MEM_INNERWBACK
, prot
);
321 case kIODefaultCache
:
323 SET_MAP_MEM(MAP_MEM_NOOP
, prot
);
331 pagerFlagsForCacheMode(IOOptionBits cacheMode
)
333 unsigned int pagerFlags
= 0;
336 case kIOInhibitCache
:
337 pagerFlags
= DEVICE_PAGER_CACHE_INHIB
| DEVICE_PAGER_COHERENT
| DEVICE_PAGER_GUARDED
;
340 case kIOWriteThruCache
:
341 pagerFlags
= DEVICE_PAGER_WRITE_THROUGH
| DEVICE_PAGER_COHERENT
| DEVICE_PAGER_GUARDED
;
344 case kIOWriteCombineCache
:
345 pagerFlags
= DEVICE_PAGER_CACHE_INHIB
| DEVICE_PAGER_COHERENT
;
348 case kIOCopybackCache
:
349 pagerFlags
= DEVICE_PAGER_COHERENT
;
352 case kIOCopybackInnerCache
:
353 pagerFlags
= DEVICE_PAGER_COHERENT
;
356 case kIODefaultCache
:
364 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
365 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
374 struct IOMemoryReference
376 volatile SInt32 refCount
;
380 IOMemoryEntry entries
[0];
385 kIOMemoryReferenceReuse
= 0x00000001,
386 kIOMemoryReferenceWrite
= 0x00000002,
389 SInt32 gIOMemoryReferenceCount
;
392 IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity
, IOMemoryReference
* realloc
)
394 IOMemoryReference
* ref
;
395 size_t newSize
, oldSize
, copySize
;
397 newSize
= (sizeof(IOMemoryReference
)
398 - sizeof(ref
->entries
)
399 + capacity
* sizeof(ref
->entries
[0]));
400 ref
= (typeof(ref
)) IOMalloc(newSize
);
403 oldSize
= (sizeof(IOMemoryReference
)
404 - sizeof(realloc
->entries
)
405 + realloc
->capacity
* sizeof(realloc
->entries
[0]));
407 if (copySize
> newSize
) copySize
= newSize
;
408 if (ref
) bcopy(realloc
, ref
, copySize
);
409 IOFree(realloc
, oldSize
);
413 bzero(ref
, sizeof(*ref
));
415 OSIncrementAtomic(&gIOMemoryReferenceCount
);
417 if (!ref
) return (0);
418 ref
->capacity
= capacity
;
423 IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference
* ref
)
425 IOMemoryEntry
* entries
;
428 entries
= ref
->entries
+ ref
->count
;
429 while (entries
> &ref
->entries
[0])
432 ipc_port_release_send(entries
->entry
);
434 size
= (sizeof(IOMemoryReference
)
435 - sizeof(ref
->entries
)
436 + ref
->capacity
* sizeof(ref
->entries
[0]));
439 OSDecrementAtomic(&gIOMemoryReferenceCount
);
443 IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference
* ref
)
445 if (1 == OSDecrementAtomic(&ref
->refCount
)) memoryReferenceFree(ref
);
450 IOGeneralMemoryDescriptor::memoryReferenceCreate(
451 IOOptionBits options
,
452 IOMemoryReference
** reference
)
454 enum { kCapacity
= 4, kCapacityInc
= 4 };
457 IOMemoryReference
* ref
;
458 IOMemoryEntry
* entries
;
459 IOMemoryEntry
* cloneEntries
;
461 ipc_port_t entry
, cloneEntry
;
463 memory_object_size_t actualSize
;
466 mach_vm_address_t entryAddr
, endAddr
, entrySize
;
467 mach_vm_size_t srcAddr
, srcLen
;
468 mach_vm_size_t nextAddr
, nextLen
;
469 mach_vm_size_t offset
, remain
;
471 IOOptionBits type
= (_flags
& kIOMemoryTypeMask
);
472 IOOptionBits cacheMode
;
473 unsigned int pagerFlags
;
476 ref
= memoryReferenceAlloc(kCapacity
, NULL
);
477 if (!ref
) return (kIOReturnNoMemory
);
479 tag
= IOMemoryTag(kernel_map
);
480 entries
= &ref
->entries
[0];
485 if (_task
) getAddrLenForInd(nextAddr
, nextLen
, type
, _ranges
, rangeIdx
);
488 nextAddr
= getPhysicalSegment(offset
, &physLen
, kIOMemoryMapperNone
);
491 // default cache mode for physical
492 if (kIODefaultCache
== ((_flags
& kIOMemoryBufferCacheMask
) >> kIOMemoryBufferCacheShift
))
495 pagerFlags
= IODefaultCacheBits(nextAddr
);
496 if (DEVICE_PAGER_CACHE_INHIB
& pagerFlags
)
498 if (DEVICE_PAGER_GUARDED
& pagerFlags
)
499 mode
= kIOInhibitCache
;
501 mode
= kIOWriteCombineCache
;
503 else if (DEVICE_PAGER_WRITE_THROUGH
& pagerFlags
)
504 mode
= kIOWriteThruCache
;
506 mode
= kIOCopybackCache
;
507 _flags
|= (mode
<< kIOMemoryBufferCacheShift
);
511 // cache mode & vm_prot
513 cacheMode
= ((_flags
& kIOMemoryBufferCacheMask
) >> kIOMemoryBufferCacheShift
);
514 prot
|= vmProtForCacheMode(cacheMode
);
515 // VM system requires write access to change cache mode
516 if (kIODefaultCache
!= cacheMode
) prot
|= VM_PROT_WRITE
;
517 if (kIODirectionOut
!= (kIODirectionOutIn
& _flags
)) prot
|= VM_PROT_WRITE
;
518 if (kIOMemoryReferenceWrite
& options
) prot
|= VM_PROT_WRITE
;
520 if ((kIOMemoryReferenceReuse
& options
) && _memRef
)
522 cloneEntries
= &_memRef
->entries
[0];
523 prot
|= MAP_MEM_NAMED_REUSE
;
530 if (kIOMemoryBufferPageable
& _flags
)
532 // IOBufferMemoryDescriptor alloc - set flags for entry + object create
533 prot
|= MAP_MEM_NAMED_CREATE
;
534 if (kIOMemoryBufferPurgeable
& _flags
) prot
|= MAP_MEM_PURGABLE
;
535 prot
|= VM_PROT_WRITE
;
538 else map
= get_task_map(_task
);
547 // coalesce addr range
548 for (++rangeIdx
; rangeIdx
< _rangesCount
; rangeIdx
++)
550 getAddrLenForInd(nextAddr
, nextLen
, type
, _ranges
, rangeIdx
);
551 if ((srcAddr
+ srcLen
) != nextAddr
) break;
554 entryAddr
= trunc_page_64(srcAddr
);
555 endAddr
= round_page_64(srcAddr
+ srcLen
);
558 entrySize
= (endAddr
- entryAddr
);
559 if (!entrySize
) break;
560 actualSize
= entrySize
;
562 cloneEntry
= MACH_PORT_NULL
;
563 if (MAP_MEM_NAMED_REUSE
& prot
)
565 if (cloneEntries
< &_memRef
->entries
[_memRef
->count
]) cloneEntry
= cloneEntries
->entry
;
566 else prot
&= ~MAP_MEM_NAMED_REUSE
;
569 err
= mach_make_memory_entry_64(map
,
570 &actualSize
, entryAddr
, prot
, &entry
, cloneEntry
);
572 if (KERN_SUCCESS
!= err
) break;
573 if (actualSize
> entrySize
) panic("mach_make_memory_entry_64 actualSize");
575 if (count
>= ref
->capacity
)
577 ref
= memoryReferenceAlloc(ref
->capacity
+ kCapacityInc
, ref
);
578 entries
= &ref
->entries
[count
];
580 entries
->entry
= entry
;
581 entries
->size
= actualSize
;
582 entries
->offset
= offset
+ (entryAddr
- srcAddr
);
583 entryAddr
+= actualSize
;
584 if (MAP_MEM_NAMED_REUSE
& prot
)
586 if ((cloneEntries
->entry
== entries
->entry
)
587 && (cloneEntries
->size
== entries
->size
)
588 && (cloneEntries
->offset
== entries
->offset
)) cloneEntries
++;
589 else prot
&= ~MAP_MEM_NAMED_REUSE
;
601 // _task == 0, physical or kIOMemoryTypeUPL
602 memory_object_t pager
;
603 vm_size_t size
= ptoa_32(_pages
);
605 if (!getKernelReserved()) panic("getKernelReserved");
607 reserved
->dp
.pagerContig
= (1 == _rangesCount
);
608 reserved
->dp
.memory
= this;
610 pagerFlags
= pagerFlagsForCacheMode(cacheMode
);
611 if (-1U == pagerFlags
) panic("phys is kIODefaultCache");
612 if (reserved
->dp
.pagerContig
) pagerFlags
|= DEVICE_PAGER_CONTIGUOUS
;
614 pager
= device_pager_setup((memory_object_t
) 0, (uintptr_t) reserved
,
617 if (!pager
) err
= kIOReturnVMError
;
621 entryAddr
= trunc_page_64(srcAddr
);
622 err
= mach_memory_object_memory_entry_64((host_t
) 1, false /*internal*/,
623 size
, VM_PROT_READ
| VM_PROT_WRITE
, pager
, &entry
);
624 assert (KERN_SUCCESS
== err
);
625 if (KERN_SUCCESS
!= err
) device_pager_deallocate(pager
);
628 reserved
->dp
.devicePager
= pager
;
629 entries
->entry
= entry
;
630 entries
->size
= size
;
631 entries
->offset
= offset
+ (entryAddr
- srcAddr
);
641 if (KERN_SUCCESS
== err
)
643 if (MAP_MEM_NAMED_REUSE
& prot
)
645 memoryReferenceFree(ref
);
646 OSIncrementAtomic(&_memRef
->refCount
);
652 memoryReferenceFree(ref
);
662 IOMemoryDescriptorMapAlloc(vm_map_t map
, void * _ref
)
664 IOMemoryDescriptorMapAllocRef
* ref
= (typeof(ref
))_ref
;
666 vm_map_offset_t addr
;
670 err
= vm_map_enter_mem_object(map
, &addr
, ref
->size
,
672 (((ref
->options
& kIOMapAnywhere
)
675 | VM_MAKE_TAG(ref
->tag
)),
677 (memory_object_offset_t
) 0,
682 if (KERN_SUCCESS
== err
)
684 ref
->mapped
= (mach_vm_address_t
) addr
;
692 IOGeneralMemoryDescriptor::memoryReferenceMap(
693 IOMemoryReference
* ref
,
695 mach_vm_size_t inoffset
,
697 IOOptionBits options
,
698 mach_vm_address_t
* inaddr
)
701 int64_t offset
= inoffset
;
702 uint32_t rangeIdx
, entryIdx
;
703 vm_map_offset_t addr
, mapAddr
;
704 vm_map_offset_t pageOffset
, entryOffset
, remain
, chunk
;
706 mach_vm_address_t nextAddr
;
707 mach_vm_size_t nextLen
;
709 IOMemoryEntry
* entry
;
710 vm_prot_t prot
, memEntryCacheMode
;
712 IOOptionBits cacheMode
;
716 * For the kIOMapPrefault option.
718 upl_page_info_t
*pageList
= NULL
;
719 UInt currentPageIndex
= 0;
721 type
= _flags
& kIOMemoryTypeMask
;
723 if (!(kIOMapReadOnly
& options
)) prot
|= VM_PROT_WRITE
;
726 cacheMode
= ((options
& kIOMapCacheMask
) >> kIOMapCacheShift
);
727 if (kIODefaultCache
!= cacheMode
)
729 // VM system requires write access to update named entry cache mode
730 memEntryCacheMode
= (MAP_MEM_ONLY
| VM_PROT_WRITE
| prot
| vmProtForCacheMode(cacheMode
));
733 tag
= IOMemoryTag(map
);
737 // Find first range for offset
738 for (remain
= offset
, rangeIdx
= 0; rangeIdx
< _rangesCount
; rangeIdx
++)
740 getAddrLenForInd(nextAddr
, nextLen
, type
, _ranges
, rangeIdx
);
741 if (remain
< nextLen
) break;
749 nextAddr
= getPhysicalSegment(offset
, &physLen
, kIOMemoryMapperNone
);
753 assert(remain
< nextLen
);
754 if (remain
>= nextLen
) return (kIOReturnBadArgument
);
758 pageOffset
= (page_mask
& nextAddr
);
760 if (!(options
& kIOMapAnywhere
))
763 if (pageOffset
!= (page_mask
& addr
)) return (kIOReturnNotAligned
);
767 // find first entry for offset
769 (entryIdx
< ref
->count
) && (offset
>= ref
->entries
[entryIdx
].offset
);
772 entry
= &ref
->entries
[entryIdx
];
775 size
= round_page_64(size
+ pageOffset
);
776 if (kIOMapOverwrite
& options
)
778 if ((map
== kernel_map
) && (kIOMemoryBufferPageable
& _flags
))
780 map
= IOPageableMapForAddress(addr
);
786 IOMemoryDescriptorMapAllocRef ref
;
789 ref
.options
= options
;
792 if (options
& kIOMapAnywhere
)
793 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
797 if ((ref
.map
== kernel_map
) && (kIOMemoryBufferPageable
& _flags
))
798 err
= IOIteratePageableMaps( ref
.size
, &IOMemoryDescriptorMapAlloc
, &ref
);
800 err
= IOMemoryDescriptorMapAlloc(ref
.map
, &ref
);
801 if (KERN_SUCCESS
== err
)
809 * Prefaulting is only possible if we wired the memory earlier. Check the
810 * memory type, and the underlying data.
812 if (options
& kIOMapPrefault
)
815 * The memory must have been wired by calling ::prepare(), otherwise
816 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
818 assert(map
!= kernel_map
);
819 assert(_wireCount
!= 0);
820 assert(_memoryEntries
!= NULL
);
821 if ((map
== kernel_map
) ||
823 (_memoryEntries
== NULL
))
825 return kIOReturnBadArgument
;
828 // Get the page list.
829 ioGMDData
* dataP
= getDataP(_memoryEntries
);
830 ioPLBlock
const* ioplList
= getIOPLList(dataP
);
831 pageList
= getPageList(dataP
);
833 // Get the number of IOPLs.
834 UInt numIOPLs
= getNumIOPL(_memoryEntries
, dataP
);
837 * Scan through the IOPL Info Blocks, looking for the first block containing
838 * the offset. The research will go past it, so we'll need to go back to the
839 * right range at the end.
842 while (ioplIndex
< numIOPLs
&& offset
>= ioplList
[ioplIndex
].fIOMDOffset
)
846 // Retrieve the IOPL info block.
847 ioPLBlock ioplInfo
= ioplList
[ioplIndex
];
850 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
853 if (ioplInfo
.fFlags
& kIOPLExternUPL
)
854 pageList
= (upl_page_info_t
*) ioplInfo
.fPageInfo
;
856 pageList
= &pageList
[ioplInfo
.fPageInfo
];
858 // Rebase [offset] into the IOPL in order to looks for the first page index.
859 mach_vm_size_t offsetInIOPL
= offset
- ioplInfo
.fIOMDOffset
+ ioplInfo
.fPageOffset
;
861 // Retrieve the index of the first page corresponding to the offset.
862 currentPageIndex
= atop_32(offsetInIOPL
);
870 while (remain
&& (KERN_SUCCESS
== err
))
872 entryOffset
= offset
- entry
->offset
;
873 if ((page_mask
& entryOffset
) != pageOffset
)
875 err
= kIOReturnNotAligned
;
879 if (kIODefaultCache
!= cacheMode
)
881 vm_size_t unused
= 0;
882 err
= mach_make_memory_entry(NULL
/*unused*/, &unused
, 0 /*unused*/,
883 memEntryCacheMode
, NULL
, entry
->entry
);
884 assert (KERN_SUCCESS
== err
);
887 entryOffset
-= pageOffset
;
888 if (entryOffset
>= entry
->size
) panic("entryOffset");
889 chunk
= entry
->size
- entryOffset
;
892 if (chunk
> remain
) chunk
= remain
;
893 if (options
& kIOMapPrefault
)
895 UInt nb_pages
= round_page(chunk
) / PAGE_SIZE
;
896 err
= vm_map_enter_mem_object_prefault(map
,
902 | VM_FLAGS_IOKIT_ACCT
), /* iokit accounting */
907 &pageList
[currentPageIndex
],
910 // Compute the next index in the page list.
911 currentPageIndex
+= nb_pages
;
912 assert(currentPageIndex
<= _pages
);
916 err
= vm_map_enter_mem_object(map
,
922 | VM_FLAGS_IOKIT_ACCT
), /* iokit accounting */
930 if (KERN_SUCCESS
!= err
) break;
934 offset
+= chunk
- pageOffset
;
939 if (entryIdx
>= ref
->count
)
941 err
= kIOReturnOverrun
;
946 if ((KERN_SUCCESS
!= err
) && addr
&& !(kIOMapOverwrite
& options
))
948 (void) mach_vm_deallocate(map
, trunc_page_64(addr
), size
);
957 IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
958 IOMemoryReference
* ref
,
959 IOByteCount
* residentPageCount
,
960 IOByteCount
* dirtyPageCount
)
963 IOMemoryEntry
* entries
;
964 unsigned int resident
, dirty
;
965 unsigned int totalResident
, totalDirty
;
967 totalResident
= totalDirty
= 0;
968 entries
= ref
->entries
+ ref
->count
;
969 while (entries
> &ref
->entries
[0])
972 err
= mach_memory_entry_get_page_counts(entries
->entry
, &resident
, &dirty
);
973 if (KERN_SUCCESS
!= err
) break;
974 totalResident
+= resident
;
978 if (residentPageCount
) *residentPageCount
= totalResident
;
979 if (dirtyPageCount
) *dirtyPageCount
= totalDirty
;
984 IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
985 IOMemoryReference
* ref
,
986 IOOptionBits newState
,
987 IOOptionBits
* oldState
)
990 IOMemoryEntry
* entries
;
991 vm_purgable_t control
;
992 int totalState
, state
;
994 entries
= ref
->entries
+ ref
->count
;
995 totalState
= kIOMemoryPurgeableNonVolatile
;
996 while (entries
> &ref
->entries
[0])
1000 err
= purgeableControlBits(newState
, &control
, &state
);
1001 if (KERN_SUCCESS
!= err
) break;
1002 err
= mach_memory_entry_purgable_control(entries
->entry
, control
, &state
);
1003 if (KERN_SUCCESS
!= err
) break;
1004 err
= purgeableStateBits(&state
);
1005 if (KERN_SUCCESS
!= err
) break;
1007 if (kIOMemoryPurgeableEmpty
== state
) totalState
= kIOMemoryPurgeableEmpty
;
1008 else if (kIOMemoryPurgeableEmpty
== totalState
) continue;
1009 else if (kIOMemoryPurgeableVolatile
== totalState
) continue;
1010 else if (kIOMemoryPurgeableVolatile
== state
) totalState
= kIOMemoryPurgeableVolatile
;
1011 else totalState
= kIOMemoryPurgeableNonVolatile
;
1014 if (oldState
) *oldState
= totalState
;
1018 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1020 IOMemoryDescriptor
*
1021 IOMemoryDescriptor::withAddress(void * address
,
1023 IODirection direction
)
1025 return IOMemoryDescriptor::
1026 withAddressRange((IOVirtualAddress
) address
, length
, direction
| kIOMemoryAutoPrepare
, kernel_task
);
1030 IOMemoryDescriptor
*
1031 IOMemoryDescriptor::withAddress(IOVirtualAddress address
,
1033 IODirection direction
,
1036 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
1039 if (that
->initWithAddress(address
, length
, direction
, task
))
1046 #endif /* !__LP64__ */
1048 IOMemoryDescriptor
*
1049 IOMemoryDescriptor::withPhysicalAddress(
1050 IOPhysicalAddress address
,
1052 IODirection direction
)
1054 return (IOMemoryDescriptor::withAddressRange(address
, length
, direction
, TASK_NULL
));
1058 IOMemoryDescriptor
*
1059 IOMemoryDescriptor::withRanges( IOVirtualRange
* ranges
,
1061 IODirection direction
,
1065 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
1068 if (that
->initWithRanges(ranges
, withCount
, direction
, task
, asReference
))
1075 #endif /* !__LP64__ */
1077 IOMemoryDescriptor
*
1078 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address
,
1079 mach_vm_size_t length
,
1080 IOOptionBits options
,
1083 IOAddressRange range
= { address
, length
};
1084 return (IOMemoryDescriptor::withAddressRanges(&range
, 1, options
, task
));
1087 IOMemoryDescriptor
*
1088 IOMemoryDescriptor::withAddressRanges(IOAddressRange
* ranges
,
1090 IOOptionBits options
,
1093 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
1097 options
|= kIOMemoryTypeVirtual64
;
1099 options
|= kIOMemoryTypePhysical64
;
1101 if (that
->initWithOptions(ranges
, rangeCount
, 0, task
, options
, /* mapper */ 0))
1114 * Create a new IOMemoryDescriptor. The buffer is made up of several
1115 * virtual address ranges, from a given task.
1117 * Passing the ranges as a reference will avoid an extra allocation.
1119 IOMemoryDescriptor
*
1120 IOMemoryDescriptor::withOptions(void * buffers
,
1127 IOGeneralMemoryDescriptor
*self
= new IOGeneralMemoryDescriptor
;
1130 && !self
->initWithOptions(buffers
, count
, offset
, task
, opts
, mapper
))
1139 bool IOMemoryDescriptor::initWithOptions(void * buffers
,
1143 IOOptionBits options
,
1150 IOMemoryDescriptor
*
1151 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange
* ranges
,
1153 IODirection direction
,
1156 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
1159 if (that
->initWithPhysicalRanges(ranges
, withCount
, direction
, asReference
))
1167 IOMemoryDescriptor
*
1168 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor
* of
,
1171 IODirection direction
)
1173 return (IOSubMemoryDescriptor::withSubRange(of
, offset
, length
, direction
));
1175 #endif /* !__LP64__ */
1177 IOMemoryDescriptor
*
1178 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor
*originalMD
)
1180 IOGeneralMemoryDescriptor
*origGenMD
=
1181 OSDynamicCast(IOGeneralMemoryDescriptor
, originalMD
);
1184 return IOGeneralMemoryDescriptor::
1185 withPersistentMemoryDescriptor(origGenMD
);
1190 IOMemoryDescriptor
*
1191 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor
*originalMD
)
1193 IOMemoryReference
* memRef
;
1195 if (kIOReturnSuccess
!= originalMD
->memoryReferenceCreate(kIOMemoryReferenceReuse
, &memRef
)) return (0);
1197 if (memRef
== originalMD
->_memRef
)
1199 originalMD
->retain(); // Add a new reference to ourselves
1200 originalMD
->memoryReferenceRelease(memRef
);
1204 IOGeneralMemoryDescriptor
* self
= new IOGeneralMemoryDescriptor
;
1205 IOMDPersistentInitData initData
= { originalMD
, memRef
};
1208 && !self
->initWithOptions(&initData
, 1, 0, 0, kIOMemoryTypePersistentMD
, 0)) {
1217 IOGeneralMemoryDescriptor::initWithAddress(void * address
,
1218 IOByteCount withLength
,
1219 IODirection withDirection
)
1221 _singleRange
.v
.address
= (vm_offset_t
) address
;
1222 _singleRange
.v
.length
= withLength
;
1224 return initWithRanges(&_singleRange
.v
, 1, withDirection
, kernel_task
, true);
1228 IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address
,
1229 IOByteCount withLength
,
1230 IODirection withDirection
,
1233 _singleRange
.v
.address
= address
;
1234 _singleRange
.v
.length
= withLength
;
1236 return initWithRanges(&_singleRange
.v
, 1, withDirection
, withTask
, true);
1240 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
1241 IOPhysicalAddress address
,
1242 IOByteCount withLength
,
1243 IODirection withDirection
)
1245 _singleRange
.p
.address
= address
;
1246 _singleRange
.p
.length
= withLength
;
1248 return initWithPhysicalRanges( &_singleRange
.p
, 1, withDirection
, true);
1252 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
1253 IOPhysicalRange
* ranges
,
1255 IODirection direction
,
1258 IOOptionBits mdOpts
= direction
| kIOMemoryTypePhysical
;
1261 mdOpts
|= kIOMemoryAsReference
;
1263 return initWithOptions(ranges
, count
, 0, 0, mdOpts
, /* mapper */ 0);
1267 IOGeneralMemoryDescriptor::initWithRanges(
1268 IOVirtualRange
* ranges
,
1270 IODirection direction
,
1274 IOOptionBits mdOpts
= direction
;
1277 mdOpts
|= kIOMemoryAsReference
;
1280 mdOpts
|= kIOMemoryTypeVirtual
;
1282 // Auto-prepare if this is a kernel memory descriptor as very few
1283 // clients bother to prepare() kernel memory.
1284 // But it was not enforced so what are you going to do?
1285 if (task
== kernel_task
)
1286 mdOpts
|= kIOMemoryAutoPrepare
;
1289 mdOpts
|= kIOMemoryTypePhysical
;
1291 return initWithOptions(ranges
, count
, 0, task
, mdOpts
, /* mapper */ 0);
1293 #endif /* !__LP64__ */
1298 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
1299 * from a given task, several physical ranges, an UPL from the ubc
1300 * system or a uio (may be 64bit) from the BSD subsystem.
1302 * Passing the ranges as a reference will avoid an extra allocation.
1304 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
1305 * existing instance -- note this behavior is not commonly supported in other
1306 * I/O Kit classes, although it is supported here.
1310 IOGeneralMemoryDescriptor::initWithOptions(void * buffers
,
1314 IOOptionBits options
,
1317 IOOptionBits type
= options
& kIOMemoryTypeMask
;
1321 && (kIOMemoryTypeVirtual
== type
)
1322 && vm_map_is_64bit(get_task_map(task
))
1323 && ((IOVirtualRange
*) buffers
)->address
)
1325 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
1328 #endif /* !__LP64__ */
1330 // Grab the original MD's configuation data to initialse the
1331 // arguments to this function.
1332 if (kIOMemoryTypePersistentMD
== type
) {
1334 IOMDPersistentInitData
*initData
= (typeof(initData
)) buffers
;
1335 const IOGeneralMemoryDescriptor
*orig
= initData
->fMD
;
1336 ioGMDData
*dataP
= getDataP(orig
->_memoryEntries
);
1338 // Only accept persistent memory descriptors with valid dataP data.
1339 assert(orig
->_rangesCount
== 1);
1340 if ( !(orig
->_flags
& kIOMemoryPersistent
) || !dataP
)
1343 _memRef
= initData
->fMemRef
; // Grab the new named entry
1344 options
= orig
->_flags
& ~kIOMemoryAsReference
;
1345 type
= options
& kIOMemoryTypeMask
;
1346 buffers
= orig
->_ranges
.v
;
1347 count
= orig
->_rangesCount
;
1349 // Now grab the original task and whatever mapper was previously used
1351 mapper
= dataP
->fMapper
;
1353 // We are ready to go through the original initialisation now
1357 case kIOMemoryTypeUIO
:
1358 case kIOMemoryTypeVirtual
:
1360 case kIOMemoryTypeVirtual64
:
1361 #endif /* !__LP64__ */
1367 case kIOMemoryTypePhysical
: // Neither Physical nor UPL should have a task
1369 case kIOMemoryTypePhysical64
:
1370 #endif /* !__LP64__ */
1371 case kIOMemoryTypeUPL
:
1375 return false; /* bad argument */
1382 * We can check the _initialized instance variable before having ever set
1383 * it to an initial value because I/O Kit guarantees that all our instance
1384 * variables are zeroed on an object's allocation.
1389 * An existing memory descriptor is being retargeted to point to
1390 * somewhere else. Clean up our present state.
1392 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
1393 if ((kIOMemoryTypePhysical
!= type
) && (kIOMemoryTypePhysical64
!= type
))
1398 if (_ranges
.v
&& !(kIOMemoryAsReference
& _flags
))
1400 if (kIOMemoryTypeUIO
== type
)
1401 uio_free((uio_t
) _ranges
.v
);
1403 else if ((kIOMemoryTypeVirtual64
== type
) || (kIOMemoryTypePhysical64
== type
))
1404 IODelete(_ranges
.v64
, IOAddressRange
, _rangesCount
);
1405 #endif /* !__LP64__ */
1407 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
1410 options
|= (kIOMemoryRedirected
& _flags
);
1411 if (!(kIOMemoryRedirected
& options
))
1415 memoryReferenceRelease(_memRef
);
1419 _mappings
->flushCollection();
1425 _initialized
= true;
1428 // Grab the appropriate mapper
1429 if (kIOMemoryHostOnly
& options
) options
|= kIOMemoryMapperNone
;
1430 if (kIOMemoryMapperNone
& options
)
1431 mapper
= 0; // No Mapper
1432 else if (mapper
== kIOMapperSystem
) {
1433 IOMapper::checkForSystemMapper();
1434 gIOSystemMapper
= mapper
= IOMapper::gSystem
;
1437 // Temp binary compatibility for kIOMemoryThreadSafe
1438 if (kIOMemoryReserved6156215
& options
)
1440 options
&= ~kIOMemoryReserved6156215
;
1441 options
|= kIOMemoryThreadSafe
;
1443 // Remove the dynamic internal use flags from the initial setting
1444 options
&= ~(kIOMemoryPreparedReadOnly
);
1449 _direction
= (IODirection
) (_flags
& kIOMemoryDirectionMask
);
1450 #endif /* !__LP64__ */
1452 __iomd_reservedA
= 0;
1453 __iomd_reservedB
= 0;
1456 if (kIOMemoryThreadSafe
& options
)
1459 _prepareLock
= IOLockAlloc();
1461 else if (_prepareLock
)
1463 IOLockFree(_prepareLock
);
1464 _prepareLock
= NULL
;
1467 if (kIOMemoryTypeUPL
== type
) {
1470 unsigned int dataSize
= computeDataSize(/* pages */ 0, /* upls */ 1);
1472 if (!initMemoryEntries(dataSize
, mapper
)) return (false);
1473 dataP
= getDataP(_memoryEntries
);
1474 dataP
->fPageCnt
= 0;
1476 // _wireCount++; // UPLs start out life wired
1479 _pages
+= atop_32(offset
+ count
+ PAGE_MASK
) - atop_32(offset
);
1482 iopl
.fIOPL
= (upl_t
) buffers
;
1483 upl_set_referenced(iopl
.fIOPL
, true);
1484 upl_page_info_t
*pageList
= UPL_GET_INTERNAL_PAGE_LIST(iopl
.fIOPL
);
1486 if (upl_get_size(iopl
.fIOPL
) < (count
+ offset
))
1487 panic("short external upl");
1489 _highestPage
= upl_get_highest_page(iopl
.fIOPL
);
1491 // Set the flag kIOPLOnDevice convieniently equal to 1
1492 iopl
.fFlags
= pageList
->device
| kIOPLExternUPL
;
1493 if (!pageList
->device
) {
1494 // Pre-compute the offset into the UPL's page list
1495 pageList
= &pageList
[atop_32(offset
)];
1496 offset
&= PAGE_MASK
;
1498 iopl
.fIOMDOffset
= 0;
1499 iopl
.fMappedPage
= 0;
1500 iopl
.fPageInfo
= (vm_address_t
) pageList
;
1501 iopl
.fPageOffset
= offset
;
1502 _memoryEntries
->appendBytes(&iopl
, sizeof(iopl
));
1505 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
1506 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
1508 // Initialize the memory descriptor
1509 if (options
& kIOMemoryAsReference
) {
1511 _rangesIsAllocated
= false;
1512 #endif /* !__LP64__ */
1514 // Hack assignment to get the buffer arg into _ranges.
1515 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
1517 // This also initialises the uio & physical ranges.
1518 _ranges
.v
= (IOVirtualRange
*) buffers
;
1522 _rangesIsAllocated
= true;
1523 #endif /* !__LP64__ */
1526 case kIOMemoryTypeUIO
:
1527 _ranges
.v
= (IOVirtualRange
*) uio_duplicate((uio_t
) buffers
);
1531 case kIOMemoryTypeVirtual64
:
1532 case kIOMemoryTypePhysical64
:
1534 && (((IOAddressRange
*) buffers
)->address
+ ((IOAddressRange
*) buffers
)->length
) <= 0x100000000ULL
1536 if (kIOMemoryTypeVirtual64
== type
)
1537 type
= kIOMemoryTypeVirtual
;
1539 type
= kIOMemoryTypePhysical
;
1540 _flags
= (_flags
& ~kIOMemoryTypeMask
) | type
| kIOMemoryAsReference
;
1541 _rangesIsAllocated
= false;
1542 _ranges
.v
= &_singleRange
.v
;
1543 _singleRange
.v
.address
= ((IOAddressRange
*) buffers
)->address
;
1544 _singleRange
.v
.length
= ((IOAddressRange
*) buffers
)->length
;
1547 _ranges
.v64
= IONew(IOAddressRange
, count
);
1550 bcopy(buffers
, _ranges
.v
, count
* sizeof(IOAddressRange
));
1552 #endif /* !__LP64__ */
1553 case kIOMemoryTypeVirtual
:
1554 case kIOMemoryTypePhysical
:
1556 _flags
|= kIOMemoryAsReference
;
1558 _rangesIsAllocated
= false;
1559 #endif /* !__LP64__ */
1560 _ranges
.v
= &_singleRange
.v
;
1562 _ranges
.v
= IONew(IOVirtualRange
, count
);
1566 bcopy(buffers
, _ranges
.v
, count
* sizeof(IOVirtualRange
));
1571 // Find starting address within the vector of ranges
1572 Ranges vec
= _ranges
;
1573 mach_vm_size_t totalLength
= 0;
1574 unsigned int ind
, pages
= 0;
1575 for (ind
= 0; ind
< count
; ind
++) {
1576 mach_vm_address_t addr
;
1579 // addr & len are returned by this function
1580 getAddrLenForInd(addr
, len
, type
, vec
, ind
);
1581 if ((addr
+ len
+ PAGE_MASK
) < addr
) break; /* overflow */
1582 pages
+= (atop_64(addr
+ len
+ PAGE_MASK
) - atop_64(addr
));
1584 if (totalLength
< len
) break; /* overflow */
1585 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
))
1587 ppnum_t highPage
= atop_64(addr
+ len
- 1);
1588 if (highPage
> _highestPage
)
1589 _highestPage
= highPage
;
1593 || (totalLength
!= ((IOByteCount
) totalLength
))) return (false); /* overflow */
1595 _length
= totalLength
;
1597 _rangesCount
= count
;
1599 // Auto-prepare memory at creation time.
1600 // Implied completion when descriptor is free-ed
1601 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
))
1602 _wireCount
++; // Physical MDs are, by definition, wired
1603 else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
1607 if (_pages
> atop_64(max_mem
)) return false;
1609 dataSize
= computeDataSize(_pages
, /* upls */ count
* 2);
1610 if (!initMemoryEntries(dataSize
, mapper
)) return false;
1611 dataP
= getDataP(_memoryEntries
);
1612 dataP
->fPageCnt
= _pages
;
1614 if ( (kIOMemoryPersistent
& _flags
) && !_memRef
)
1617 err
= memoryReferenceCreate(0, &_memRef
);
1618 if (kIOReturnSuccess
!= err
) return false;
1621 if ((_flags
& kIOMemoryAutoPrepare
)
1622 && prepare() != kIOReturnSuccess
)
1635 void IOGeneralMemoryDescriptor::free()
1637 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
1642 reserved
->dp
.memory
= 0;
1645 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
))
1648 if (_memoryEntries
&& (dataP
= getDataP(_memoryEntries
)) && dataP
->fMappedBase
)
1650 dataP
->fMapper
->iovmUnmapMemory(this, NULL
, dataP
->fMappedBase
, dataP
->fMappedLength
);
1651 dataP
->fMappedBase
= 0;
1656 while (_wireCount
) complete();
1659 if (_memoryEntries
) _memoryEntries
->release();
1661 if (_ranges
.v
&& !(kIOMemoryAsReference
& _flags
))
1663 if (kIOMemoryTypeUIO
== type
)
1664 uio_free((uio_t
) _ranges
.v
);
1666 else if ((kIOMemoryTypeVirtual64
== type
) || (kIOMemoryTypePhysical64
== type
))
1667 IODelete(_ranges
.v64
, IOAddressRange
, _rangesCount
);
1668 #endif /* !__LP64__ */
1670 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
1677 if (reserved
->dp
.devicePager
)
1679 // memEntry holds a ref on the device pager which owns reserved
1680 // (IOMemoryDescriptorReserved) so no reserved access after this point
1681 device_pager_deallocate( (memory_object_t
) reserved
->dp
.devicePager
);
1684 IODelete(reserved
, IOMemoryDescriptorReserved
, 1);
1688 if (_memRef
) memoryReferenceRelease(_memRef
);
1689 if (_prepareLock
) IOLockFree(_prepareLock
);
1695 void IOGeneralMemoryDescriptor::unmapFromKernel()
1697 panic("IOGMD::unmapFromKernel deprecated");
1700 void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex
)
1702 panic("IOGMD::mapIntoKernel deprecated");
1704 #endif /* !__LP64__ */
1709 * Get the direction of the transfer.
1711 IODirection
IOMemoryDescriptor::getDirection() const
1716 #endif /* !__LP64__ */
1717 return (IODirection
) (_flags
& kIOMemoryDirectionMask
);
1723 * Get the length of the transfer (over all ranges).
1725 IOByteCount
IOMemoryDescriptor::getLength() const
1730 void IOMemoryDescriptor::setTag( IOOptionBits tag
)
1735 IOOptionBits
IOMemoryDescriptor::getTag( void )
1741 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
1743 IOMemoryDescriptor::getSourceSegment( IOByteCount offset
, IOByteCount
* length
)
1745 addr64_t physAddr
= 0;
1747 if( prepare() == kIOReturnSuccess
) {
1748 physAddr
= getPhysicalSegment64( offset
, length
);
1752 return( (IOPhysicalAddress
) physAddr
); // truncated but only page offset is used
1754 #endif /* !__LP64__ */
1756 IOByteCount
IOMemoryDescriptor::readBytes
1757 (IOByteCount offset
, void *bytes
, IOByteCount length
)
1759 addr64_t dstAddr
= CAST_DOWN(addr64_t
, bytes
);
1760 IOByteCount remaining
;
1762 // Assert that this entire I/O is withing the available range
1763 assert(offset
<= _length
);
1764 assert(offset
+ length
<= _length
);
1765 if ((offset
>= _length
)
1766 || ((offset
+ length
) > _length
)) {
1770 if (kIOMemoryThreadSafe
& _flags
)
1773 remaining
= length
= min(length
, _length
- offset
);
1774 while (remaining
) { // (process another target segment?)
1778 srcAddr64
= getPhysicalSegment(offset
, &srcLen
, kIOMemoryMapperNone
);
1782 // Clip segment length to remaining
1783 if (srcLen
> remaining
)
1786 copypv(srcAddr64
, dstAddr
, srcLen
,
1787 cppvPsrc
| cppvNoRefSrc
| cppvFsnk
| cppvKmap
);
1791 remaining
-= srcLen
;
1794 if (kIOMemoryThreadSafe
& _flags
)
1799 return length
- remaining
;
1802 IOByteCount
IOMemoryDescriptor::writeBytes
1803 (IOByteCount inoffset
, const void *bytes
, IOByteCount length
)
1805 addr64_t srcAddr
= CAST_DOWN(addr64_t
, bytes
);
1806 IOByteCount remaining
;
1807 IOByteCount offset
= inoffset
;
1809 // Assert that this entire I/O is withing the available range
1810 assert(offset
<= _length
);
1811 assert(offset
+ length
<= _length
);
1813 assert( !(kIOMemoryPreparedReadOnly
& _flags
) );
1815 if ( (kIOMemoryPreparedReadOnly
& _flags
)
1816 || (offset
>= _length
)
1817 || ((offset
+ length
) > _length
)) {
1821 if (kIOMemoryThreadSafe
& _flags
)
1824 remaining
= length
= min(length
, _length
- offset
);
1825 while (remaining
) { // (process another target segment?)
1829 dstAddr64
= getPhysicalSegment(offset
, &dstLen
, kIOMemoryMapperNone
);
1833 // Clip segment length to remaining
1834 if (dstLen
> remaining
)
1837 if (!srcAddr
) bzero_phys(dstAddr64
, dstLen
);
1840 copypv(srcAddr
, (addr64_t
) dstAddr64
, dstLen
,
1841 cppvPsnk
| cppvFsnk
| cppvNoRefSrc
| cppvNoModSnk
| cppvKmap
);
1845 remaining
-= dstLen
;
1848 if (kIOMemoryThreadSafe
& _flags
)
1853 if (!srcAddr
) performOperation(kIOMemoryIncoherentIOFlush
, inoffset
, length
);
1855 return length
- remaining
;
1859 void IOGeneralMemoryDescriptor::setPosition(IOByteCount position
)
1861 panic("IOGMD::setPosition deprecated");
1863 #endif /* !__LP64__ */
1865 static volatile SInt64 gIOMDPreparationID
__attribute__((aligned(8))) = (1ULL << 32);
1868 IOGeneralMemoryDescriptor::getPreparationID( void )
1873 return (kIOPreparationIDUnprepared
);
1875 if (((kIOMemoryTypeMask
& _flags
) == kIOMemoryTypePhysical
)
1876 || ((kIOMemoryTypeMask
& _flags
) == kIOMemoryTypePhysical64
))
1878 IOMemoryDescriptor::setPreparationID();
1879 return (IOMemoryDescriptor::getPreparationID());
1882 if (!_memoryEntries
|| !(dataP
= getDataP(_memoryEntries
)))
1883 return (kIOPreparationIDUnprepared
);
1885 if (kIOPreparationIDUnprepared
== dataP
->fPreparationID
)
1887 dataP
->fPreparationID
= OSIncrementAtomic64(&gIOMDPreparationID
);
1889 return (dataP
->fPreparationID
);
1892 IOMemoryDescriptorReserved
* IOMemoryDescriptor::getKernelReserved( void )
1896 reserved
= IONew(IOMemoryDescriptorReserved
, 1);
1898 bzero(reserved
, sizeof(IOMemoryDescriptorReserved
));
1903 void IOMemoryDescriptor::setPreparationID( void )
1905 if (getKernelReserved() && (kIOPreparationIDUnprepared
== reserved
->preparationID
))
1907 #if defined(__ppc__ )
1908 reserved
->preparationID
= gIOMDPreparationID
++;
1910 reserved
->preparationID
= OSIncrementAtomic64(&gIOMDPreparationID
);
1915 uint64_t IOMemoryDescriptor::getPreparationID( void )
1918 return (reserved
->preparationID
);
1920 return (kIOPreparationIDUnsupported
);
1923 IOReturn
IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op
, void *vData
, UInt dataSize
) const
1925 IOReturn err
= kIOReturnSuccess
;
1926 DMACommandOps params
;
1927 IOGeneralMemoryDescriptor
* md
= const_cast<IOGeneralMemoryDescriptor
*>(this);
1930 params
= (op
& ~kIOMDDMACommandOperationMask
& op
);
1931 op
&= kIOMDDMACommandOperationMask
;
1933 if (kIOMDDMAMap
== op
)
1935 if (dataSize
< sizeof(IOMDDMAMapArgs
))
1936 return kIOReturnUnderrun
;
1938 IOMDDMAMapArgs
* data
= (IOMDDMAMapArgs
*) vData
;
1941 && !md
->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem
)) return (kIOReturnNoMemory
);
1943 if (_memoryEntries
&& data
->fMapper
)
1945 bool remap
, keepMap
;
1946 dataP
= getDataP(_memoryEntries
);
1948 if (data
->fMapSpec
.numAddressBits
< dataP
->fDMAMapNumAddressBits
) dataP
->fDMAMapNumAddressBits
= data
->fMapSpec
.numAddressBits
;
1949 if (data
->fMapSpec
.alignment
> dataP
->fDMAMapAlignment
) dataP
->fDMAMapAlignment
= data
->fMapSpec
.alignment
;
1951 keepMap
= (data
->fMapper
== gIOSystemMapper
);
1952 keepMap
&= ((data
->fOffset
== 0) && (data
->fLength
== _length
));
1955 remap
|= (dataP
->fDMAMapNumAddressBits
< 64)
1956 && ((dataP
->fMappedBase
+ _length
) > (1ULL << dataP
->fDMAMapNumAddressBits
));
1957 remap
|= (dataP
->fDMAMapAlignment
> page_size
);
1959 if (remap
|| !dataP
->fMappedBase
)
1961 // if (dataP->fMappedBase) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params);
1962 err
= md
->dmaMap(data
->fMapper
, data
->fCommand
, &data
->fMapSpec
, data
->fOffset
, data
->fLength
, &data
->fAlloc
, &data
->fAllocLength
);
1963 if (keepMap
&& (kIOReturnSuccess
== err
) && !dataP
->fMappedBase
)
1965 dataP
->fMappedBase
= data
->fAlloc
;
1966 dataP
->fMappedLength
= data
->fAllocLength
;
1967 data
->fAllocLength
= 0; // IOMD owns the alloc now
1972 data
->fAlloc
= dataP
->fMappedBase
;
1973 data
->fAllocLength
= 0; // give out IOMD map
1975 data
->fMapContig
= !dataP
->fDiscontig
;
1981 if (kIOMDAddDMAMapSpec
== op
)
1983 if (dataSize
< sizeof(IODMAMapSpecification
))
1984 return kIOReturnUnderrun
;
1986 IODMAMapSpecification
* data
= (IODMAMapSpecification
*) vData
;
1989 && !md
->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem
)) return (kIOReturnNoMemory
);
1993 dataP
= getDataP(_memoryEntries
);
1994 if (data
->numAddressBits
< dataP
->fDMAMapNumAddressBits
)
1995 dataP
->fDMAMapNumAddressBits
= data
->numAddressBits
;
1996 if (data
->alignment
> dataP
->fDMAMapAlignment
)
1997 dataP
->fDMAMapAlignment
= data
->alignment
;
1999 return kIOReturnSuccess
;
2002 if (kIOMDGetCharacteristics
== op
) {
2004 if (dataSize
< sizeof(IOMDDMACharacteristics
))
2005 return kIOReturnUnderrun
;
2007 IOMDDMACharacteristics
*data
= (IOMDDMACharacteristics
*) vData
;
2008 data
->fLength
= _length
;
2009 data
->fSGCount
= _rangesCount
;
2010 data
->fPages
= _pages
;
2011 data
->fDirection
= getDirection();
2013 data
->fIsPrepared
= false;
2015 data
->fIsPrepared
= true;
2016 data
->fHighestPage
= _highestPage
;
2019 dataP
= getDataP(_memoryEntries
);
2020 ioPLBlock
*ioplList
= getIOPLList(dataP
);
2021 UInt count
= getNumIOPL(_memoryEntries
, dataP
);
2023 data
->fPageAlign
= (ioplList
[0].fPageOffset
& PAGE_MASK
) | ~PAGE_MASK
;
2027 return kIOReturnSuccess
;
2029 #if IOMD_DEBUG_DMAACTIVE
2030 } else if (kIOMDDMAActive
== op
) {
2031 if (params
) OSIncrementAtomic(&md
->__iomd_reservedA
);
2033 if (md
->__iomd_reservedA
)
2034 OSDecrementAtomic(&md
->__iomd_reservedA
);
2036 panic("kIOMDSetDMAInactive");
2038 #endif /* IOMD_DEBUG_DMAACTIVE */
2040 } else if (kIOMDWalkSegments
!= op
)
2041 return kIOReturnBadArgument
;
2043 // Get the next segment
2044 struct InternalState
{
2045 IOMDDMAWalkSegmentArgs fIO
;
2051 // Find the next segment
2052 if (dataSize
< sizeof(*isP
))
2053 return kIOReturnUnderrun
;
2055 isP
= (InternalState
*) vData
;
2056 UInt offset
= isP
->fIO
.fOffset
;
2057 bool mapped
= isP
->fIO
.fMapped
;
2059 if (IOMapper::gSystem
&& mapped
2060 && (!(kIOMemoryHostOnly
& _flags
))
2061 && (!_memoryEntries
|| !getDataP(_memoryEntries
)->fMappedBase
))
2062 // && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBase))
2065 && !md
->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem
)) return (kIOReturnNoMemory
);
2067 dataP
= getDataP(_memoryEntries
);
2070 IODMAMapSpecification mapSpec
;
2071 bzero(&mapSpec
, sizeof(mapSpec
));
2072 mapSpec
.numAddressBits
= dataP
->fDMAMapNumAddressBits
;
2073 mapSpec
.alignment
= dataP
->fDMAMapAlignment
;
2074 err
= md
->dmaMap(dataP
->fMapper
, NULL
, &mapSpec
, 0, _length
, &dataP
->fMappedBase
, &dataP
->fMappedLength
);
2075 if (kIOReturnSuccess
!= err
) return (err
);
2079 if (offset
>= _length
)
2080 return (offset
== _length
)? kIOReturnOverrun
: kIOReturnInternalError
;
2082 // Validate the previous offset
2083 UInt ind
, off2Ind
= isP
->fOffset2Index
;
2086 && (offset
== isP
->fNextOffset
|| off2Ind
<= offset
))
2089 ind
= off2Ind
= 0; // Start from beginning
2095 if ( (_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
) {
2097 // Physical address based memory descriptor
2098 const IOPhysicalRange
*physP
= (IOPhysicalRange
*) &_ranges
.p
[0];
2100 // Find the range after the one that contains the offset
2102 for (len
= 0; off2Ind
<= offset
; ind
++) {
2103 len
= physP
[ind
].length
;
2107 // Calculate length within range and starting address
2108 length
= off2Ind
- offset
;
2109 address
= physP
[ind
- 1].address
+ len
- length
;
2111 if (true && mapped
&& _memoryEntries
2112 && (dataP
= getDataP(_memoryEntries
)) && dataP
->fMappedBase
)
2114 address
= dataP
->fMappedBase
+ offset
;
2118 // see how far we can coalesce ranges
2119 while (ind
< _rangesCount
&& address
+ length
== physP
[ind
].address
) {
2120 len
= physP
[ind
].length
;
2127 // correct contiguous check overshoot
2132 else if ( (_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
) {
2134 // Physical address based memory descriptor
2135 const IOAddressRange
*physP
= (IOAddressRange
*) &_ranges
.v64
[0];
2137 // Find the range after the one that contains the offset
2139 for (len
= 0; off2Ind
<= offset
; ind
++) {
2140 len
= physP
[ind
].length
;
2144 // Calculate length within range and starting address
2145 length
= off2Ind
- offset
;
2146 address
= physP
[ind
- 1].address
+ len
- length
;
2148 if (true && mapped
&& _memoryEntries
2149 && (dataP
= getDataP(_memoryEntries
)) && dataP
->fMappedBase
)
2151 address
= dataP
->fMappedBase
+ offset
;
2155 // see how far we can coalesce ranges
2156 while (ind
< _rangesCount
&& address
+ length
== physP
[ind
].address
) {
2157 len
= physP
[ind
].length
;
2163 // correct contiguous check overshoot
2167 #endif /* !__LP64__ */
2170 panic("IOGMD: not wired for the IODMACommand");
2172 assert(_memoryEntries
);
2174 dataP
= getDataP(_memoryEntries
);
2175 const ioPLBlock
*ioplList
= getIOPLList(dataP
);
2176 UInt numIOPLs
= getNumIOPL(_memoryEntries
, dataP
);
2177 upl_page_info_t
*pageList
= getPageList(dataP
);
2179 assert(numIOPLs
> 0);
2181 // Scan through iopl info blocks looking for block containing offset
2182 while (ind
< numIOPLs
&& offset
>= ioplList
[ind
].fIOMDOffset
)
2185 // Go back to actual range as search goes past it
2186 ioPLBlock ioplInfo
= ioplList
[ind
- 1];
2187 off2Ind
= ioplInfo
.fIOMDOffset
;
2190 length
= ioplList
[ind
].fIOMDOffset
;
2193 length
-= offset
; // Remainder within iopl
2195 // Subtract offset till this iopl in total list
2198 // If a mapped address is requested and this is a pre-mapped IOPL
2199 // then just need to compute an offset relative to the mapped base.
2200 if (mapped
&& dataP
->fMappedBase
) {
2201 offset
+= (ioplInfo
.fPageOffset
& PAGE_MASK
);
2202 address
= trunc_page_64(dataP
->fMappedBase
) + ptoa_64(ioplInfo
.fMappedPage
) + offset
;
2203 continue; // Done leave do/while(false) now
2206 // The offset is rebased into the current iopl.
2207 // Now add the iopl 1st page offset.
2208 offset
+= ioplInfo
.fPageOffset
;
2210 // For external UPLs the fPageInfo field points directly to
2211 // the upl's upl_page_info_t array.
2212 if (ioplInfo
.fFlags
& kIOPLExternUPL
)
2213 pageList
= (upl_page_info_t
*) ioplInfo
.fPageInfo
;
2215 pageList
= &pageList
[ioplInfo
.fPageInfo
];
2217 // Check for direct device non-paged memory
2218 if ( ioplInfo
.fFlags
& kIOPLOnDevice
) {
2219 address
= ptoa_64(pageList
->phys_addr
) + offset
;
2220 continue; // Done leave do/while(false) now
2223 // Now we need compute the index into the pageList
2224 UInt pageInd
= atop_32(offset
);
2225 offset
&= PAGE_MASK
;
2227 // Compute the starting address of this segment
2228 IOPhysicalAddress pageAddr
= pageList
[pageInd
].phys_addr
;
2230 panic("!pageList phys_addr");
2233 address
= ptoa_64(pageAddr
) + offset
;
2235 // length is currently set to the length of the remainider of the iopl.
2236 // We need to check that the remainder of the iopl is contiguous.
2237 // This is indicated by pageList[ind].phys_addr being sequential.
2238 IOByteCount contigLength
= PAGE_SIZE
- offset
;
2239 while (contigLength
< length
2240 && ++pageAddr
== pageList
[++pageInd
].phys_addr
)
2242 contigLength
+= PAGE_SIZE
;
2245 if (contigLength
< length
)
2246 length
= contigLength
;
2254 // Update return values and state
2255 isP
->fIO
.fIOVMAddr
= address
;
2256 isP
->fIO
.fLength
= length
;
2258 isP
->fOffset2Index
= off2Ind
;
2259 isP
->fNextOffset
= isP
->fIO
.fOffset
+ length
;
2261 return kIOReturnSuccess
;
2265 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
, IOOptionBits options
)
2268 mach_vm_address_t address
= 0;
2269 mach_vm_size_t length
= 0;
2270 IOMapper
* mapper
= gIOSystemMapper
;
2271 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
2273 if (lengthOfSegment
)
2274 *lengthOfSegment
= 0;
2276 if (offset
>= _length
)
2279 // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
2280 // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
2281 // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
2282 // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
2284 if ((options
& _kIOMemorySourceSegment
) && (kIOMemoryTypeUPL
!= type
))
2286 unsigned rangesIndex
= 0;
2287 Ranges vec
= _ranges
;
2288 mach_vm_address_t addr
;
2290 // Find starting address within the vector of ranges
2292 getAddrLenForInd(addr
, length
, type
, vec
, rangesIndex
);
2293 if (offset
< length
)
2295 offset
-= length
; // (make offset relative)
2299 // Now that we have the starting range,
2300 // lets find the last contiguous range
2304 for ( ++rangesIndex
; rangesIndex
< _rangesCount
; rangesIndex
++ ) {
2305 mach_vm_address_t newAddr
;
2306 mach_vm_size_t newLen
;
2308 getAddrLenForInd(newAddr
, newLen
, type
, vec
, rangesIndex
);
2309 if (addr
+ length
!= newAddr
)
2314 address
= (IOPhysicalAddress
) addr
; // Truncate address to 32bit
2318 IOMDDMAWalkSegmentState _state
;
2319 IOMDDMAWalkSegmentArgs
* state
= (IOMDDMAWalkSegmentArgs
*) (void *)&_state
;
2321 state
->fOffset
= offset
;
2322 state
->fLength
= _length
- offset
;
2323 state
->fMapped
= (0 == (options
& kIOMemoryMapperNone
)) && !(_flags
& kIOMemoryHostOnly
);
2325 ret
= dmaCommandOperation(kIOMDFirstSegment
, _state
, sizeof(_state
));
2327 if ((kIOReturnSuccess
!= ret
) && (kIOReturnOverrun
!= ret
))
2328 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
2329 ret
, this, state
->fOffset
,
2330 state
->fIOVMAddr
, state
->fLength
);
2331 if (kIOReturnSuccess
== ret
)
2333 address
= state
->fIOVMAddr
;
2334 length
= state
->fLength
;
2337 // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
2338 // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
2340 if (mapper
&& ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
)))
2342 if ((options
& kIOMemoryMapperNone
) && !(_flags
& kIOMemoryMapperNone
))
2344 addr64_t origAddr
= address
;
2345 IOByteCount origLen
= length
;
2347 address
= mapper
->mapToPhysicalAddress(origAddr
);
2348 length
= page_size
- (address
& (page_size
- 1));
2349 while ((length
< origLen
)
2350 && ((address
+ length
) == mapper
->mapToPhysicalAddress(origAddr
+ length
)))
2351 length
+= page_size
;
2352 if (length
> origLen
)
2361 if (lengthOfSegment
)
2362 *lengthOfSegment
= length
;
2369 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
, IOOptionBits options
)
2371 addr64_t address
= 0;
2373 if (options
& _kIOMemorySourceSegment
)
2375 address
= getSourceSegment(offset
, lengthOfSegment
);
2377 else if (options
& kIOMemoryMapperNone
)
2379 address
= getPhysicalSegment64(offset
, lengthOfSegment
);
2383 address
= getPhysicalSegment(offset
, lengthOfSegment
);
2390 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
2392 return (getPhysicalSegment(offset
, lengthOfSegment
, kIOMemoryMapperNone
));
2396 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
2398 addr64_t address
= 0;
2399 IOByteCount length
= 0;
2401 address
= getPhysicalSegment(offset
, lengthOfSegment
, 0);
2403 if (lengthOfSegment
)
2404 length
= *lengthOfSegment
;
2406 if ((address
+ length
) > 0x100000000ULL
)
2408 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
2409 address
, (long) length
, (getMetaClass())->getClassName());
2412 return ((IOPhysicalAddress
) address
);
2416 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
2418 IOPhysicalAddress phys32
;
2421 IOMapper
* mapper
= 0;
2423 phys32
= getPhysicalSegment(offset
, lengthOfSegment
);
2427 if (gIOSystemMapper
)
2428 mapper
= gIOSystemMapper
;
2432 IOByteCount origLen
;
2434 phys64
= mapper
->mapToPhysicalAddress(phys32
);
2435 origLen
= *lengthOfSegment
;
2436 length
= page_size
- (phys64
& (page_size
- 1));
2437 while ((length
< origLen
)
2438 && ((phys64
+ length
) == mapper
->mapToPhysicalAddress(phys32
+ length
)))
2439 length
+= page_size
;
2440 if (length
> origLen
)
2443 *lengthOfSegment
= length
;
2446 phys64
= (addr64_t
) phys32
;
2452 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
2454 return ((IOPhysicalAddress
) getPhysicalSegment(offset
, lengthOfSegment
, 0));
2458 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
2460 return ((IOPhysicalAddress
) getPhysicalSegment(offset
, lengthOfSegment
, _kIOMemorySourceSegment
));
2463 void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
2464 IOByteCount
* lengthOfSegment
)
2466 if (_task
== kernel_task
)
2467 return (void *) getSourceSegment(offset
, lengthOfSegment
);
2469 panic("IOGMD::getVirtualSegment deprecated");
2473 #endif /* !__LP64__ */
2476 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op
, void *vData
, UInt dataSize
) const
2478 IOMemoryDescriptor
*md
= const_cast<IOMemoryDescriptor
*>(this);
2479 DMACommandOps params
;
2482 params
= (op
& ~kIOMDDMACommandOperationMask
& op
);
2483 op
&= kIOMDDMACommandOperationMask
;
2485 if (kIOMDGetCharacteristics
== op
) {
2486 if (dataSize
< sizeof(IOMDDMACharacteristics
))
2487 return kIOReturnUnderrun
;
2489 IOMDDMACharacteristics
*data
= (IOMDDMACharacteristics
*) vData
;
2490 data
->fLength
= getLength();
2492 data
->fDirection
= getDirection();
2493 data
->fIsPrepared
= true; // Assume prepared - fails safe
2495 else if (kIOMDWalkSegments
== op
) {
2496 if (dataSize
< sizeof(IOMDDMAWalkSegmentArgs
))
2497 return kIOReturnUnderrun
;
2499 IOMDDMAWalkSegmentArgs
*data
= (IOMDDMAWalkSegmentArgs
*) vData
;
2500 IOByteCount offset
= (IOByteCount
) data
->fOffset
;
2502 IOPhysicalLength length
;
2503 if (data
->fMapped
&& IOMapper::gSystem
)
2504 data
->fIOVMAddr
= md
->getPhysicalSegment(offset
, &length
);
2506 data
->fIOVMAddr
= md
->getPhysicalSegment(offset
, &length
, kIOMemoryMapperNone
);
2507 data
->fLength
= length
;
2509 else if (kIOMDAddDMAMapSpec
== op
) return kIOReturnUnsupported
;
2510 else if (kIOMDDMAMap
== op
)
2512 if (dataSize
< sizeof(IOMDDMAMapArgs
))
2513 return kIOReturnUnderrun
;
2514 IOMDDMAMapArgs
* data
= (IOMDDMAMapArgs
*) vData
;
2516 if (params
) panic("class %s does not support IODMACommand::kIterateOnly", getMetaClass()->getClassName());
2518 data
->fMapContig
= true;
2519 err
= md
->dmaMap(data
->fMapper
, data
->fCommand
, &data
->fMapSpec
, data
->fOffset
, data
->fLength
, &data
->fAlloc
, &data
->fAllocLength
);
2522 else return kIOReturnBadArgument
;
2524 return kIOReturnSuccess
;
2528 IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState
,
2529 IOOptionBits
* oldState
)
2531 IOReturn err
= kIOReturnSuccess
;
2533 vm_purgable_t control
;
2538 err
= super::setPurgeable(newState
, oldState
);
2542 if (kIOMemoryThreadSafe
& _flags
)
2546 // Find the appropriate vm_map for the given task
2548 if (_task
== kernel_task
&& (kIOMemoryBufferPageable
& _flags
))
2550 err
= kIOReturnNotReady
;
2555 err
= kIOReturnUnsupported
;
2559 curMap
= get_task_map(_task
);
2561 // can only do one range
2562 Ranges vec
= _ranges
;
2563 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
2564 mach_vm_address_t addr
;
2566 getAddrLenForInd(addr
, len
, type
, vec
, 0);
2568 err
= purgeableControlBits(newState
, &control
, &state
);
2569 if (kIOReturnSuccess
!= err
)
2571 err
= mach_vm_purgable_control(curMap
, addr
, control
, &state
);
2574 if (kIOReturnSuccess
== err
)
2576 err
= purgeableStateBits(&state
);
2582 if (kIOMemoryThreadSafe
& _flags
)
2589 IOReturn
IOMemoryDescriptor::setPurgeable( IOOptionBits newState
,
2590 IOOptionBits
* oldState
)
2592 IOReturn err
= kIOReturnNotReady
;
2594 if (kIOMemoryThreadSafe
& _flags
) LOCK
;
2595 if (_memRef
) err
= IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef
, newState
, oldState
);
2596 if (kIOMemoryThreadSafe
& _flags
) UNLOCK
;
2601 IOReturn
IOMemoryDescriptor::getPageCounts( IOByteCount
* residentPageCount
,
2602 IOByteCount
* dirtyPageCount
)
2604 IOReturn err
= kIOReturnNotReady
;
2606 if (kIOMemoryThreadSafe
& _flags
) LOCK
;
2607 if (_memRef
) err
= IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef
, residentPageCount
, dirtyPageCount
);
2610 IOMultiMemoryDescriptor
* mmd
;
2611 IOSubMemoryDescriptor
* smd
;
2612 if ((smd
= OSDynamicCast(IOSubMemoryDescriptor
, this)))
2614 err
= smd
->getPageCounts(residentPageCount
, dirtyPageCount
);
2616 else if ((mmd
= OSDynamicCast(IOMultiMemoryDescriptor
, this)))
2618 err
= mmd
->getPageCounts(residentPageCount
, dirtyPageCount
);
2621 if (kIOMemoryThreadSafe
& _flags
) UNLOCK
;
2627 extern "C" void dcache_incoherent_io_flush64(addr64_t pa
, unsigned int count
);
2628 extern "C" void dcache_incoherent_io_store64(addr64_t pa
, unsigned int count
);
2630 static void SetEncryptOp(addr64_t pa
, unsigned int count
)
2634 page
= atop_64(round_page_64(pa
));
2635 end
= atop_64(trunc_page_64(pa
+ count
));
2636 for (; page
< end
; page
++)
2638 pmap_clear_noencrypt(page
);
2642 static void ClearEncryptOp(addr64_t pa
, unsigned int count
)
2646 page
= atop_64(round_page_64(pa
));
2647 end
= atop_64(trunc_page_64(pa
+ count
));
2648 for (; page
< end
; page
++)
2650 pmap_set_noencrypt(page
);
2654 IOReturn
IOMemoryDescriptor::performOperation( IOOptionBits options
,
2655 IOByteCount offset
, IOByteCount length
)
2657 IOByteCount remaining
;
2659 void (*func
)(addr64_t pa
, unsigned int count
) = 0;
2663 case kIOMemoryIncoherentIOFlush
:
2664 func
= &dcache_incoherent_io_flush64
;
2666 case kIOMemoryIncoherentIOStore
:
2667 func
= &dcache_incoherent_io_store64
;
2670 case kIOMemorySetEncrypted
:
2671 func
= &SetEncryptOp
;
2673 case kIOMemoryClearEncrypted
:
2674 func
= &ClearEncryptOp
;
2679 return (kIOReturnUnsupported
);
2681 if (kIOMemoryThreadSafe
& _flags
)
2685 remaining
= length
= min(length
, getLength() - offset
);
2687 // (process another target segment?)
2692 dstAddr64
= getPhysicalSegment(offset
, &dstLen
, kIOMemoryMapperNone
);
2696 // Clip segment length to remaining
2697 if (dstLen
> remaining
)
2700 (*func
)(dstAddr64
, dstLen
);
2703 remaining
-= dstLen
;
2706 if (kIOMemoryThreadSafe
& _flags
)
2709 return (remaining
? kIOReturnUnderrun
: kIOReturnSuccess
);
2712 #if defined(__i386__) || defined(__x86_64__)
2714 #define io_kernel_static_start vm_kernel_stext
2715 #define io_kernel_static_end vm_kernel_etext
2718 #error io_kernel_static_end is undefined for this architecture
2721 static kern_return_t
2722 io_get_kernel_static_upl(
2725 upl_size_t
*upl_size
,
2727 upl_page_info_array_t page_list
,
2728 unsigned int *count
,
2729 ppnum_t
*highest_page
)
2731 unsigned int pageCount
, page
;
2733 ppnum_t highestPage
= 0;
2735 pageCount
= atop_32(*upl_size
);
2736 if (pageCount
> *count
)
2741 for (page
= 0; page
< pageCount
; page
++)
2743 phys
= pmap_find_phys(kernel_pmap
, ((addr64_t
)offset
) + ptoa_64(page
));
2746 page_list
[page
].phys_addr
= phys
;
2747 page_list
[page
].pageout
= 0;
2748 page_list
[page
].absent
= 0;
2749 page_list
[page
].dirty
= 0;
2750 page_list
[page
].precious
= 0;
2751 page_list
[page
].device
= 0;
2752 if (phys
> highestPage
)
2756 *highest_page
= highestPage
;
2758 return ((page
>= pageCount
) ? kIOReturnSuccess
: kIOReturnVMError
);
2761 IOReturn
IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection
)
2763 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
2764 IOReturn error
= kIOReturnCannotWire
;
2766 upl_page_info_array_t pageInfo
;
2769 assert(kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
|| kIOMemoryTypeUIO
== type
);
2771 if ((kIODirectionOutIn
& forDirection
) == kIODirectionNone
)
2772 forDirection
= (IODirection
) (forDirection
| getDirection());
2774 upl_control_flags_t uplFlags
; // This Mem Desc's default flags for upl creation
2775 switch (kIODirectionOutIn
& forDirection
)
2777 case kIODirectionOut
:
2778 // Pages do not need to be marked as dirty on commit
2779 uplFlags
= UPL_COPYOUT_FROM
;
2782 case kIODirectionIn
:
2784 uplFlags
= 0; // i.e. ~UPL_COPYOUT_FROM
2790 if ((kIOMemoryPreparedReadOnly
& _flags
) && !(UPL_COPYOUT_FROM
& uplFlags
))
2792 OSReportWithBacktrace("IOMemoryDescriptor 0x%lx prepared read only", VM_KERNEL_ADDRPERM(this));
2793 error
= kIOReturnNotWritable
;
2795 else error
= kIOReturnSuccess
;
2799 dataP
= getDataP(_memoryEntries
);
2801 mapper
= dataP
->fMapper
;
2802 dataP
->fMappedBase
= 0;
2804 uplFlags
|= UPL_SET_IO_WIRE
| UPL_SET_LITE
;
2805 uplFlags
|= UPL_MEMORY_TAG_MAKE(IOMemoryTag(kernel_map
));
2807 if (kIODirectionPrepareToPhys32
& forDirection
)
2809 if (!mapper
) uplFlags
|= UPL_NEED_32BIT_ADDR
;
2810 if (dataP
->fDMAMapNumAddressBits
> 32) dataP
->fDMAMapNumAddressBits
= 32;
2812 if (kIODirectionPrepareNoFault
& forDirection
) uplFlags
|= UPL_REQUEST_NO_FAULT
;
2813 if (kIODirectionPrepareNoZeroFill
& forDirection
) uplFlags
|= UPL_NOZEROFILLIO
;
2814 if (kIODirectionPrepareNonCoherent
& forDirection
) uplFlags
|= UPL_REQUEST_FORCE_COHERENCY
;
2818 // Note that appendBytes(NULL) zeros the data up to the desired length
2819 // and the length parameter is an unsigned int
2820 size_t uplPageSize
= dataP
->fPageCnt
* sizeof(upl_page_info_t
);
2821 if (uplPageSize
> ((unsigned int)uplPageSize
)) return (kIOReturnNoMemory
);
2822 if (!_memoryEntries
->appendBytes(0, uplPageSize
)) return (kIOReturnNoMemory
);
2825 // Find the appropriate vm_map for the given task
2827 if (_task
== kernel_task
&& (kIOMemoryBufferPageable
& _flags
)) curMap
= 0;
2828 else curMap
= get_task_map(_task
);
2830 // Iterate over the vector of virtual ranges
2831 Ranges vec
= _ranges
;
2832 unsigned int pageIndex
= 0;
2833 IOByteCount mdOffset
= 0;
2834 ppnum_t highestPage
= 0;
2836 IOMemoryEntry
* memRefEntry
= 0;
2837 if (_memRef
) memRefEntry
= &_memRef
->entries
[0];
2839 for (UInt range
= 0; range
< _rangesCount
; range
++) {
2841 mach_vm_address_t startPage
;
2842 mach_vm_size_t numBytes
;
2843 ppnum_t highPage
= 0;
2845 // Get the startPage address and length of vec[range]
2846 getAddrLenForInd(startPage
, numBytes
, type
, vec
, range
);
2847 iopl
.fPageOffset
= startPage
& PAGE_MASK
;
2848 numBytes
+= iopl
.fPageOffset
;
2849 startPage
= trunc_page_64(startPage
);
2852 iopl
.fMappedPage
= mapBase
+ pageIndex
;
2854 iopl
.fMappedPage
= 0;
2856 // Iterate over the current range, creating UPLs
2858 vm_address_t kernelStart
= (vm_address_t
) startPage
;
2860 if (curMap
) theMap
= curMap
;
2867 assert(_task
== kernel_task
);
2868 theMap
= IOPageableMapForAddress(kernelStart
);
2871 // ioplFlags is an in/out parameter
2872 upl_control_flags_t ioplFlags
= uplFlags
;
2873 dataP
= getDataP(_memoryEntries
);
2874 pageInfo
= getPageList(dataP
);
2875 upl_page_list_ptr_t baseInfo
= &pageInfo
[pageIndex
];
2877 mach_vm_size_t _ioplSize
= round_page(numBytes
);
2878 upl_size_t ioplSize
= (_ioplSize
<= MAX_UPL_SIZE_BYTES
) ? _ioplSize
: MAX_UPL_SIZE_BYTES
;
2879 unsigned int numPageInfo
= atop_32(ioplSize
);
2881 if ((theMap
== kernel_map
)
2882 && (kernelStart
>= io_kernel_static_start
)
2883 && (kernelStart
< io_kernel_static_end
)) {
2884 error
= io_get_kernel_static_upl(theMap
,
2893 memory_object_offset_t entryOffset
;
2895 entryOffset
= mdOffset
;
2896 entryOffset
= (entryOffset
- iopl
.fPageOffset
- memRefEntry
->offset
);
2897 if (entryOffset
>= memRefEntry
->size
) {
2899 if (memRefEntry
>= &_memRef
->entries
[_memRef
->count
]) panic("memRefEntry");
2902 if (ioplSize
> (memRefEntry
->size
- entryOffset
)) ioplSize
= (memRefEntry
->size
- entryOffset
);
2903 error
= memory_object_iopl_request(memRefEntry
->entry
,
2913 error
= vm_map_create_upl(theMap
,
2915 (upl_size_t
*)&ioplSize
,
2922 if (error
!= KERN_SUCCESS
)
2928 highPage
= upl_get_highest_page(iopl
.fIOPL
);
2929 if (highPage
> highestPage
)
2930 highestPage
= highPage
;
2932 error
= kIOReturnCannotWire
;
2934 if (baseInfo
->device
) {
2936 iopl
.fFlags
= kIOPLOnDevice
;
2942 iopl
.fIOMDOffset
= mdOffset
;
2943 iopl
.fPageInfo
= pageIndex
;
2944 if (mapper
&& pageIndex
&& (page_mask
& (mdOffset
+ iopl
.fPageOffset
))) dataP
->fDiscontig
= true;
2947 // used to remove the upl for auto prepares here, for some errant code
2948 // that freed memory before the descriptor pointing at it
2949 if ((_flags
& kIOMemoryAutoPrepare
) && iopl
.fIOPL
)
2951 upl_commit(iopl
.fIOPL
, 0, 0);
2952 upl_deallocate(iopl
.fIOPL
);
2957 if (!_memoryEntries
->appendBytes(&iopl
, sizeof(iopl
))) {
2958 // Clean up partial created and unsaved iopl
2960 upl_abort(iopl
.fIOPL
, 0);
2961 upl_deallocate(iopl
.fIOPL
);
2967 // Check for a multiple iopl's in one virtual range
2968 pageIndex
+= numPageInfo
;
2969 mdOffset
-= iopl
.fPageOffset
;
2970 if (ioplSize
< numBytes
) {
2971 numBytes
-= ioplSize
;
2972 startPage
+= ioplSize
;
2973 mdOffset
+= ioplSize
;
2974 iopl
.fPageOffset
= 0;
2975 if (mapper
) iopl
.fMappedPage
= mapBase
+ pageIndex
;
2978 mdOffset
+= numBytes
;
2984 _highestPage
= highestPage
;
2986 if (UPL_COPYOUT_FROM
& uplFlags
) _flags
|= kIOMemoryPreparedReadOnly
;
2988 if ((kIOTracking
& gIOKitDebug
)
2989 //&& !(_flags & kIOMemoryAutoPrepare)
2992 dataP
= getDataP(_memoryEntries
);
2994 IOTrackingAdd(gIOWireTracking
, &dataP
->fWireTracking
, ptoa(_pages
), false);
2998 return kIOReturnSuccess
;
3002 dataP
= getDataP(_memoryEntries
);
3003 UInt done
= getNumIOPL(_memoryEntries
, dataP
);
3004 ioPLBlock
*ioplList
= getIOPLList(dataP
);
3006 for (UInt range
= 0; range
< done
; range
++)
3008 if (ioplList
[range
].fIOPL
) {
3009 upl_abort(ioplList
[range
].fIOPL
, 0);
3010 upl_deallocate(ioplList
[range
].fIOPL
);
3013 (void) _memoryEntries
->initWithBytes(dataP
, computeDataSize(0, 0)); // == setLength()
3016 if (error
== KERN_FAILURE
)
3017 error
= kIOReturnCannotWire
;
3018 else if (error
== KERN_MEMORY_ERROR
)
3019 error
= kIOReturnNoResources
;
3024 bool IOGeneralMemoryDescriptor::initMemoryEntries(size_t size
, IOMapper
* mapper
)
3027 unsigned dataSize
= size
;
3029 if (!_memoryEntries
) {
3030 _memoryEntries
= OSData::withCapacity(dataSize
);
3031 if (!_memoryEntries
)
3034 else if (!_memoryEntries
->initWithCapacity(dataSize
))
3037 _memoryEntries
->appendBytes(0, computeDataSize(0, 0));
3038 dataP
= getDataP(_memoryEntries
);
3040 if (mapper
== kIOMapperWaitSystem
) {
3041 IOMapper::checkForSystemMapper();
3042 mapper
= IOMapper::gSystem
;
3044 dataP
->fMapper
= mapper
;
3045 dataP
->fPageCnt
= 0;
3046 dataP
->fMappedBase
= 0;
3047 dataP
->fDMAMapNumAddressBits
= 64;
3048 dataP
->fDMAMapAlignment
= 0;
3049 dataP
->fPreparationID
= kIOPreparationIDUnprepared
;
3050 dataP
->fDiscontig
= false;
3051 dataP
->fCompletionError
= false;
3056 IOReturn
IOMemoryDescriptor::dmaMap(
3058 IODMACommand
* command
,
3059 const IODMAMapSpecification
* mapSpec
,
3062 uint64_t * mapAddress
,
3063 uint64_t * mapLength
)
3066 uint32_t mapOptions
;
3069 mapOptions
|= kIODMAMapReadAccess
;
3070 if (!(kIOMemoryPreparedReadOnly
& _flags
)) mapOptions
|= kIODMAMapWriteAccess
;
3072 ret
= mapper
->iovmMapMemory(this, offset
, length
, mapOptions
,
3073 mapSpec
, command
, NULL
, mapAddress
, mapLength
);
3078 IOReturn
IOGeneralMemoryDescriptor::dmaMap(
3080 IODMACommand
* command
,
3081 const IODMAMapSpecification
* mapSpec
,
3084 uint64_t * mapAddress
,
3085 uint64_t * mapLength
)
3087 IOReturn err
= kIOReturnSuccess
;
3089 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
3092 if (kIOMemoryHostOnly
& _flags
) return (kIOReturnSuccess
);
3094 if ((type
== kIOMemoryTypePhysical
) || (type
== kIOMemoryTypePhysical64
)
3095 || offset
|| (length
!= _length
))
3097 err
= super::dmaMap(mapper
, command
, mapSpec
, offset
, length
, mapAddress
, mapLength
);
3099 else if (_memoryEntries
&& _pages
&& (dataP
= getDataP(_memoryEntries
)))
3101 const ioPLBlock
* ioplList
= getIOPLList(dataP
);
3102 upl_page_info_t
* pageList
;
3103 uint32_t mapOptions
= 0;
3105 IODMAMapSpecification mapSpec
;
3106 bzero(&mapSpec
, sizeof(mapSpec
));
3107 mapSpec
.numAddressBits
= dataP
->fDMAMapNumAddressBits
;
3108 mapSpec
.alignment
= dataP
->fDMAMapAlignment
;
3110 // For external UPLs the fPageInfo field points directly to
3111 // the upl's upl_page_info_t array.
3112 if (ioplList
->fFlags
& kIOPLExternUPL
)
3114 pageList
= (upl_page_info_t
*) ioplList
->fPageInfo
;
3115 mapOptions
|= kIODMAMapPagingPath
;
3117 else pageList
= getPageList(dataP
);
3119 if ((_length
== ptoa_64(_pages
)) && !(page_mask
& ioplList
->fPageOffset
))
3121 mapOptions
|= kIODMAMapPageListFullyOccupied
;
3124 mapOptions
|= kIODMAMapReadAccess
;
3125 if (!(kIOMemoryPreparedReadOnly
& _flags
)) mapOptions
|= kIODMAMapWriteAccess
;
3127 // Check for direct device non-paged memory
3128 if (ioplList
->fFlags
& kIOPLOnDevice
) mapOptions
|= kIODMAMapPhysicallyContiguous
;
3130 IODMAMapPageList dmaPageList
=
3132 .pageOffset
= ioplList
->fPageOffset
& page_mask
,
3133 .pageListCount
= _pages
,
3134 .pageList
= &pageList
[0]
3136 err
= mapper
->iovmMapMemory(this, offset
, length
, mapOptions
, &mapSpec
,
3137 command
, &dmaPageList
, mapAddress
, mapLength
);
3146 * Prepare the memory for an I/O transfer. This involves paging in
3147 * the memory, if necessary, and wiring it down for the duration of
3148 * the transfer. The complete() method completes the processing of
3149 * the memory after the I/O transfer finishes. This method needn't
3150 * called for non-pageable memory.
3153 IOReturn
IOGeneralMemoryDescriptor::prepare(IODirection forDirection
)
3155 IOReturn error
= kIOReturnSuccess
;
3156 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
3158 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
))
3159 return kIOReturnSuccess
;
3162 IOLockLock(_prepareLock
);
3164 if (kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
|| kIOMemoryTypeUIO
== type
)
3166 error
= wireVirtual(forDirection
);
3169 if (kIOReturnSuccess
== error
)
3171 if (1 == ++_wireCount
)
3173 if (kIOMemoryClearEncrypt
& _flags
)
3175 performOperation(kIOMemoryClearEncrypted
, 0, _length
);
3181 IOLockUnlock(_prepareLock
);
3189 * Complete processing of the memory after an I/O transfer finishes.
3190 * This method should not be called unless a prepare was previously
3191 * issued; the prepare() and complete() must occur in pairs, before
3192 * before and after an I/O transfer involving pageable memory.
3195 IOReturn
IOGeneralMemoryDescriptor::complete(IODirection forDirection
)
3197 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
3200 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
))
3201 return kIOReturnSuccess
;
3204 IOLockLock(_prepareLock
);
3208 if ((kIODirectionCompleteWithError
& forDirection
)
3209 && (dataP
= getDataP(_memoryEntries
)))
3210 dataP
->fCompletionError
= true;
3214 if ((kIOMemoryClearEncrypt
& _flags
) && (1 == _wireCount
))
3216 performOperation(kIOMemorySetEncrypted
, 0, _length
);
3220 if (!_wireCount
|| (kIODirectionCompleteWithDataValid
& forDirection
))
3222 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
3223 dataP
= getDataP(_memoryEntries
);
3224 ioPLBlock
*ioplList
= getIOPLList(dataP
);
3225 UInt ind
, count
= getNumIOPL(_memoryEntries
, dataP
);
3229 // kIODirectionCompleteWithDataValid & forDirection
3230 if (kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
|| kIOMemoryTypeUIO
== type
)
3232 for (ind
= 0; ind
< count
; ind
++)
3234 if (ioplList
[ind
].fIOPL
) iopl_valid_data(ioplList
[ind
].fIOPL
);
3240 #if IOMD_DEBUG_DMAACTIVE
3241 if (__iomd_reservedA
) panic("complete() while dma active");
3242 #endif /* IOMD_DEBUG_DMAACTIVE */
3244 if (dataP
->fMappedBase
) {
3245 dataP
->fMapper
->iovmUnmapMemory(this, NULL
, dataP
->fMappedBase
, dataP
->fMappedLength
);
3246 dataP
->fMappedBase
= 0;
3248 // Only complete iopls that we created which are for TypeVirtual
3249 if (kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
|| kIOMemoryTypeUIO
== type
) {
3251 if ((kIOTracking
& gIOKitDebug
)
3252 //&& !(_flags & kIOMemoryAutoPrepare)
3255 IOTrackingRemove(gIOWireTracking
, &dataP
->fWireTracking
, ptoa(_pages
));
3258 for (ind
= 0; ind
< count
; ind
++)
3259 if (ioplList
[ind
].fIOPL
) {
3260 if (dataP
->fCompletionError
)
3261 upl_abort(ioplList
[ind
].fIOPL
, 0 /*!UPL_ABORT_DUMP_PAGES*/);
3263 upl_commit(ioplList
[ind
].fIOPL
, 0, 0);
3264 upl_deallocate(ioplList
[ind
].fIOPL
);
3266 } else if (kIOMemoryTypeUPL
== type
) {
3267 upl_set_referenced(ioplList
[0].fIOPL
, false);
3270 (void) _memoryEntries
->initWithBytes(dataP
, computeDataSize(0, 0)); // == setLength()
3272 dataP
->fPreparationID
= kIOPreparationIDUnprepared
;
3278 IOLockUnlock(_prepareLock
);
3280 return kIOReturnSuccess
;
3283 IOReturn
IOGeneralMemoryDescriptor::doMap(
3284 vm_map_t __addressMap
,
3285 IOVirtualAddress
* __address
,
3286 IOOptionBits options
,
3287 IOByteCount __offset
,
3288 IOByteCount __length
)
3291 if (!(kIOMap64Bit
& options
)) panic("IOGeneralMemoryDescriptor::doMap !64bit");
3292 #endif /* !__LP64__ */
3296 IOMemoryMap
* mapping
= (IOMemoryMap
*) *__address
;
3297 mach_vm_size_t offset
= mapping
->fOffset
+ __offset
;
3298 mach_vm_size_t length
= mapping
->fLength
;
3300 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
3301 Ranges vec
= _ranges
;
3303 mach_vm_address_t range0Addr
= 0;
3304 mach_vm_size_t range0Len
= 0;
3306 if ((offset
>= _length
) || ((offset
+ length
) > _length
))
3307 return( kIOReturnBadArgument
);
3310 getAddrLenForInd(range0Addr
, range0Len
, type
, vec
, 0);
3312 // mapping source == dest? (could be much better)
3314 && (mapping
->fAddressTask
== _task
)
3315 && (mapping
->fAddressMap
== get_task_map(_task
))
3316 && (options
& kIOMapAnywhere
)
3317 && (1 == _rangesCount
)
3320 && (length
<= range0Len
))
3322 mapping
->fAddress
= range0Addr
;
3323 mapping
->fOptions
|= kIOMapStatic
;
3325 return( kIOReturnSuccess
);
3330 IOOptionBits createOptions
= 0;
3331 if (!(kIOMapReadOnly
& options
))
3333 createOptions
|= kIOMemoryReferenceWrite
;
3334 #if DEVELOPMENT || DEBUG
3335 if (kIODirectionOut
== (kIODirectionOutIn
& _flags
))
3337 OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction");
3341 err
= memoryReferenceCreate(createOptions
, &_memRef
);
3342 if (kIOReturnSuccess
!= err
) return (err
);
3345 memory_object_t pager
;
3346 pager
= (memory_object_t
) (reserved
? reserved
->dp
.devicePager
: 0);
3348 // <upl_transpose //
3349 if ((kIOMapReference
|kIOMapUnique
) == ((kIOMapReference
|kIOMapUnique
) & options
))
3355 upl_control_flags_t flags
;
3356 unsigned int lock_count
;
3358 if (!_memRef
|| (1 != _memRef
->count
))
3360 err
= kIOReturnNotReadable
;
3364 size
= round_page(mapping
->fLength
);
3365 flags
= UPL_COPYOUT_FROM
| UPL_SET_INTERNAL
3366 | UPL_SET_LITE
| UPL_SET_IO_WIRE
| UPL_BLOCK_ACCESS
3367 | UPL_MEMORY_TAG_MAKE(IOMemoryTag(kernel_map
));
3369 if (KERN_SUCCESS
!= memory_object_iopl_request(_memRef
->entries
[0].entry
, 0, &size
, &redirUPL2
,
3374 for (lock_count
= 0;
3375 IORecursiveLockHaveLock(gIOMemoryLock
);
3379 err
= upl_transpose(redirUPL2
, mapping
->fRedirUPL
);
3386 if (kIOReturnSuccess
!= err
)
3388 IOLog("upl_transpose(%x)\n", err
);
3389 err
= kIOReturnSuccess
;
3394 upl_commit(redirUPL2
, NULL
, 0);
3395 upl_deallocate(redirUPL2
);
3399 // swap the memEntries since they now refer to different vm_objects
3400 IOMemoryReference
* me
= _memRef
;
3401 _memRef
= mapping
->fMemory
->_memRef
;
3402 mapping
->fMemory
->_memRef
= me
;
3405 err
= populateDevicePager( pager
, mapping
->fAddressMap
, mapping
->fAddress
, offset
, length
, options
);
3409 // upl_transpose> //
3412 err
= memoryReferenceMap(_memRef
, mapping
->fAddressMap
, offset
, length
, options
, &mapping
->fAddress
);
3414 if (err
== KERN_SUCCESS
) IOTrackingAdd(gIOMapTracking
, &mapping
->fTracking
, length
, false);
3416 if ((err
== KERN_SUCCESS
) && pager
)
3418 err
= populateDevicePager(pager
, mapping
->fAddressMap
, mapping
->fAddress
, offset
, length
, options
);
3420 if (err
!= KERN_SUCCESS
) doUnmap(mapping
->fAddressMap
, (IOVirtualAddress
) mapping
, 0);
3421 else if (kIOMapDefaultCache
== (options
& kIOMapCacheMask
))
3423 mapping
->fOptions
|= ((_flags
& kIOMemoryBufferCacheMask
) >> kIOMemoryBufferCacheShift
);
3431 IOReturn
IOGeneralMemoryDescriptor::doUnmap(
3432 vm_map_t addressMap
,
3433 IOVirtualAddress __address
,
3434 IOByteCount __length
)
3436 return (super::doUnmap(addressMap
, __address
, __length
));
3439 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3442 #define super OSObject
3444 OSDefineMetaClassAndStructors( IOMemoryMap
, OSObject
)
3446 OSMetaClassDefineReservedUnused(IOMemoryMap
, 0);
3447 OSMetaClassDefineReservedUnused(IOMemoryMap
, 1);
3448 OSMetaClassDefineReservedUnused(IOMemoryMap
, 2);
3449 OSMetaClassDefineReservedUnused(IOMemoryMap
, 3);
3450 OSMetaClassDefineReservedUnused(IOMemoryMap
, 4);
3451 OSMetaClassDefineReservedUnused(IOMemoryMap
, 5);
3452 OSMetaClassDefineReservedUnused(IOMemoryMap
, 6);
3453 OSMetaClassDefineReservedUnused(IOMemoryMap
, 7);
3455 /* ex-inline function implementation */
3456 IOPhysicalAddress
IOMemoryMap::getPhysicalAddress()
3457 { return( getPhysicalSegment( 0, 0 )); }
3459 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3461 bool IOMemoryMap::init(
3463 mach_vm_address_t toAddress
,
3464 IOOptionBits _options
,
3465 mach_vm_size_t _offset
,
3466 mach_vm_size_t _length
)
3474 fAddressMap
= get_task_map(intoTask
);
3477 vm_map_reference(fAddressMap
);
3479 fAddressTask
= intoTask
;
3480 fOptions
= _options
;
3483 fAddress
= toAddress
;
3488 bool IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor
* _memory
, mach_vm_size_t _offset
)
3495 if( (_offset
+ fLength
) > _memory
->getLength())
3503 if (fMemory
!= _memory
)
3504 fMemory
->removeMapping(this);
3512 IOReturn
IOMemoryDescriptor::doMap(
3513 vm_map_t __addressMap
,
3514 IOVirtualAddress
* __address
,
3515 IOOptionBits options
,
3516 IOByteCount __offset
,
3517 IOByteCount __length
)
3519 return (kIOReturnUnsupported
);
3522 IOReturn
IOMemoryDescriptor::handleFault(
3524 mach_vm_size_t sourceOffset
,
3525 mach_vm_size_t length
)
3527 if( kIOMemoryRedirected
& _flags
)
3530 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset
);
3534 } while( kIOMemoryRedirected
& _flags
);
3536 return (kIOReturnSuccess
);
3539 IOReturn
IOMemoryDescriptor::populateDevicePager(
3541 vm_map_t addressMap
,
3542 mach_vm_address_t address
,
3543 mach_vm_size_t sourceOffset
,
3544 mach_vm_size_t length
,
3545 IOOptionBits options
)
3547 IOReturn err
= kIOReturnSuccess
;
3548 memory_object_t pager
= (memory_object_t
) _pager
;
3549 mach_vm_size_t size
;
3550 mach_vm_size_t bytes
;
3551 mach_vm_size_t page
;
3552 mach_vm_size_t pageOffset
;
3553 mach_vm_size_t pagerOffset
;
3554 IOPhysicalLength segLen
, chunk
;
3558 type
= _flags
& kIOMemoryTypeMask
;
3560 if (reserved
->dp
.pagerContig
)
3566 physAddr
= getPhysicalSegment( sourceOffset
, &segLen
, kIOMemoryMapperNone
);
3568 pageOffset
= physAddr
- trunc_page_64( physAddr
);
3569 pagerOffset
= sourceOffset
;
3571 size
= length
+ pageOffset
;
3572 physAddr
-= pageOffset
;
3574 segLen
+= pageOffset
;
3578 // in the middle of the loop only map whole pages
3579 if( segLen
>= bytes
) segLen
= bytes
;
3580 else if (segLen
!= trunc_page(segLen
)) err
= kIOReturnVMError
;
3581 if (physAddr
!= trunc_page_64(physAddr
)) err
= kIOReturnBadArgument
;
3583 if (kIOReturnSuccess
!= err
) break;
3585 #if DEBUG || DEVELOPMENT
3586 if ((kIOMemoryTypeUPL
!= type
)
3587 && pmap_has_managed_page(atop_64(physAddr
), atop_64(physAddr
+ segLen
- 1)))
3589 OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx", physAddr
, segLen
);
3591 #endif /* DEBUG || DEVELOPMENT */
3593 chunk
= (reserved
->dp
.pagerContig
? round_page(segLen
) : page_size
);
3595 (page
< segLen
) && (KERN_SUCCESS
== err
);
3598 err
= device_pager_populate_object(pager
, pagerOffset
,
3599 (ppnum_t
)(atop_64(physAddr
+ page
)), chunk
);
3600 pagerOffset
+= chunk
;
3603 assert (KERN_SUCCESS
== err
);
3606 // This call to vm_fault causes an early pmap level resolution
3607 // of the mappings created above for kernel mappings, since
3608 // faulting in later can't take place from interrupt level.
3609 if ((addressMap
== kernel_map
) && !(kIOMemoryRedirected
& _flags
))
3611 vm_fault(addressMap
,
3612 (vm_map_offset_t
)trunc_page_64(address
),
3613 VM_PROT_READ
|VM_PROT_WRITE
,
3614 FALSE
, THREAD_UNINT
, NULL
,
3615 (vm_map_offset_t
)0);
3618 sourceOffset
+= segLen
- pageOffset
;
3623 while (bytes
&& (physAddr
= getPhysicalSegment( sourceOffset
, &segLen
, kIOMemoryMapperNone
)));
3626 err
= kIOReturnBadArgument
;
3631 IOReturn
IOMemoryDescriptor::doUnmap(
3632 vm_map_t addressMap
,
3633 IOVirtualAddress __address
,
3634 IOByteCount __length
)
3637 IOMemoryMap
* mapping
;
3638 mach_vm_address_t address
;
3639 mach_vm_size_t length
;
3641 if (__length
) panic("doUnmap");
3643 mapping
= (IOMemoryMap
*) __address
;
3644 addressMap
= mapping
->fAddressMap
;
3645 address
= mapping
->fAddress
;
3646 length
= mapping
->fLength
;
3648 if (kIOMapOverwrite
& mapping
->fOptions
) err
= KERN_SUCCESS
;
3651 if ((addressMap
== kernel_map
) && (kIOMemoryBufferPageable
& _flags
))
3652 addressMap
= IOPageableMapForAddress( address
);
3654 if( kIOLogMapping
& gIOKitDebug
) IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
3655 addressMap
, address
, length
);
3657 err
= mach_vm_deallocate( addressMap
, address
, length
);
3661 IOTrackingRemove(gIOMapTracking
, &mapping
->fTracking
, length
);
3667 IOReturn
IOMemoryDescriptor::redirect( task_t safeTask
, bool doRedirect
)
3669 IOReturn err
= kIOReturnSuccess
;
3670 IOMemoryMap
* mapping
= 0;
3676 _flags
|= kIOMemoryRedirected
;
3678 _flags
&= ~kIOMemoryRedirected
;
3681 if( (iter
= OSCollectionIterator::withCollection( _mappings
))) {
3683 memory_object_t pager
;
3686 pager
= (memory_object_t
) reserved
->dp
.devicePager
;
3688 pager
= MACH_PORT_NULL
;
3690 while( (mapping
= (IOMemoryMap
*) iter
->getNextObject()))
3692 mapping
->redirect( safeTask
, doRedirect
);
3693 if (!doRedirect
&& !safeTask
&& pager
&& (kernel_map
== mapping
->fAddressMap
))
3695 err
= populateDevicePager(pager
, mapping
->fAddressMap
, mapping
->fAddress
, mapping
->fOffset
, mapping
->fLength
, kIOMapDefaultCache
);
3711 // temporary binary compatibility
3712 IOSubMemoryDescriptor
* subMem
;
3713 if( (subMem
= OSDynamicCast( IOSubMemoryDescriptor
, this)))
3714 err
= subMem
->redirect( safeTask
, doRedirect
);
3716 err
= kIOReturnSuccess
;
3717 #endif /* !__LP64__ */
3722 IOReturn
IOMemoryMap::redirect( task_t safeTask
, bool doRedirect
)
3724 IOReturn err
= kIOReturnSuccess
;
3727 // err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
3739 if ((!safeTask
|| (get_task_map(safeTask
) != fAddressMap
))
3740 && (0 == (fOptions
& kIOMapStatic
)))
3742 IOUnmapPages( fAddressMap
, fAddress
, fLength
);
3743 err
= kIOReturnSuccess
;
3745 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect
, this, fAddress
, fLength
, fAddressMap
);
3748 else if (kIOMapWriteCombineCache
== (fOptions
& kIOMapCacheMask
))
3750 IOOptionBits newMode
;
3751 newMode
= (fOptions
& ~kIOMapCacheMask
) | (doRedirect
? kIOMapInhibitCache
: kIOMapWriteCombineCache
);
3752 IOProtectCacheMode(fAddressMap
, fAddress
, fLength
, newMode
);
3759 if ((((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
3760 || ((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
))
3762 && (doRedirect
!= (0 != (fMemory
->_flags
& kIOMemoryRedirected
))))
3763 fMemory
->redirect(safeTask
, doRedirect
);
3768 IOReturn
IOMemoryMap::unmap( void )
3774 if( fAddress
&& fAddressMap
&& (0 == fSuperMap
) && fMemory
3775 && (0 == (kIOMapStatic
& fOptions
))) {
3777 err
= fMemory
->doUnmap(fAddressMap
, (IOVirtualAddress
) this, 0);
3780 err
= kIOReturnSuccess
;
3784 vm_map_deallocate(fAddressMap
);
3795 void IOMemoryMap::taskDied( void )
3798 if (fUserClientUnmap
) unmap();
3800 else IOTrackingRemove(gIOMapTracking
, &fTracking
, fLength
);
3804 vm_map_deallocate(fAddressMap
);
3812 IOReturn
IOMemoryMap::userClientUnmap( void )
3814 fUserClientUnmap
= true;
3815 return (kIOReturnSuccess
);
3818 // Overload the release mechanism. All mappings must be a member
3819 // of a memory descriptors _mappings set. This means that we
3820 // always have 2 references on a mapping. When either of these mappings
3821 // are released we need to free ourselves.
3822 void IOMemoryMap::taggedRelease(const void *tag
) const
3825 super::taggedRelease(tag
, 2);
3829 void IOMemoryMap::free()
3836 fMemory
->removeMapping(this);
3841 if (fOwner
&& (fOwner
!= fMemory
))
3844 fOwner
->removeMapping(this);
3849 fSuperMap
->release();
3852 upl_commit(fRedirUPL
, NULL
, 0);
3853 upl_deallocate(fRedirUPL
);
3859 IOByteCount
IOMemoryMap::getLength()
3864 IOVirtualAddress
IOMemoryMap::getVirtualAddress()
3868 fSuperMap
->getVirtualAddress();
3869 else if (fAddressMap
3870 && vm_map_is_64bit(fAddressMap
)
3871 && (sizeof(IOVirtualAddress
) < 8))
3873 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress
);
3875 #endif /* !__LP64__ */
3881 mach_vm_address_t
IOMemoryMap::getAddress()
3886 mach_vm_size_t
IOMemoryMap::getSize()
3890 #endif /* !__LP64__ */
3893 task_t
IOMemoryMap::getAddressTask()
3896 return( fSuperMap
->getAddressTask());
3898 return( fAddressTask
);
3901 IOOptionBits
IOMemoryMap::getMapOptions()
3906 IOMemoryDescriptor
* IOMemoryMap::getMemoryDescriptor()
3911 IOMemoryMap
* IOMemoryMap::copyCompatible(
3912 IOMemoryMap
* newMapping
)
3914 task_t task
= newMapping
->getAddressTask();
3915 mach_vm_address_t toAddress
= newMapping
->fAddress
;
3916 IOOptionBits _options
= newMapping
->fOptions
;
3917 mach_vm_size_t _offset
= newMapping
->fOffset
;
3918 mach_vm_size_t _length
= newMapping
->fLength
;
3920 if( (!task
) || (!fAddressMap
) || (fAddressMap
!= get_task_map(task
)))
3922 if( (fOptions
^ _options
) & kIOMapReadOnly
)
3924 if( (kIOMapDefaultCache
!= (_options
& kIOMapCacheMask
))
3925 && ((fOptions
^ _options
) & kIOMapCacheMask
))
3928 if( (0 == (_options
& kIOMapAnywhere
)) && (fAddress
!= toAddress
))
3931 if( _offset
< fOffset
)
3936 if( (_offset
+ _length
) > fLength
)
3940 if( (fLength
== _length
) && (!_offset
))
3946 newMapping
->fSuperMap
= this;
3947 newMapping
->fOffset
= fOffset
+ _offset
;
3948 newMapping
->fAddress
= fAddress
+ _offset
;
3951 return( newMapping
);
3954 IOReturn
IOMemoryMap::wireRange(
3956 mach_vm_size_t offset
,
3957 mach_vm_size_t length
)
3960 mach_vm_address_t start
= trunc_page_64(fAddress
+ offset
);
3961 mach_vm_address_t end
= round_page_64(fAddress
+ offset
+ length
);
3964 prot
= (kIODirectionOutIn
& options
);
3967 prot
|= VM_PROT_MEMORY_TAG_MAKE(IOMemoryTag(kernel_map
));
3968 kr
= vm_map_wire(fAddressMap
, start
, end
, prot
, FALSE
);
3972 kr
= vm_map_unwire(fAddressMap
, start
, end
, FALSE
);
3981 IOMemoryMap::getPhysicalSegment( IOByteCount _offset
, IOPhysicalLength
* _length
, IOOptionBits _options
)
3982 #else /* !__LP64__ */
3983 IOMemoryMap::getPhysicalSegment( IOByteCount _offset
, IOPhysicalLength
* _length
)
3984 #endif /* !__LP64__ */
3986 IOPhysicalAddress address
;
3990 address
= fMemory
->getPhysicalSegment( fOffset
+ _offset
, _length
, _options
);
3991 #else /* !__LP64__ */
3992 address
= fMemory
->getPhysicalSegment( fOffset
+ _offset
, _length
);
3993 #endif /* !__LP64__ */
3999 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4002 #define super OSObject
4004 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4006 void IOMemoryDescriptor::initialize( void )
4008 if( 0 == gIOMemoryLock
)
4009 gIOMemoryLock
= IORecursiveLockAlloc();
4011 gIOLastPage
= IOGetLastPageNumber();
4014 void IOMemoryDescriptor::free( void )
4016 if( _mappings
) _mappings
->release();
4020 IODelete(reserved
, IOMemoryDescriptorReserved
, 1);
4026 IOMemoryMap
* IOMemoryDescriptor::setMapping(
4028 IOVirtualAddress mapAddress
,
4029 IOOptionBits options
)
4031 return (createMappingInTask( intoTask
, mapAddress
,
4032 options
| kIOMapStatic
,
4036 IOMemoryMap
* IOMemoryDescriptor::map(
4037 IOOptionBits options
)
4039 return (createMappingInTask( kernel_task
, 0,
4040 options
| kIOMapAnywhere
,
4045 IOMemoryMap
* IOMemoryDescriptor::map(
4047 IOVirtualAddress atAddress
,
4048 IOOptionBits options
,
4050 IOByteCount length
)
4052 if ((!(kIOMapAnywhere
& options
)) && vm_map_is_64bit(get_task_map(intoTask
)))
4054 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
4058 return (createMappingInTask(intoTask
, atAddress
,
4059 options
, offset
, length
));
4061 #endif /* !__LP64__ */
4063 IOMemoryMap
* IOMemoryDescriptor::createMappingInTask(
4065 mach_vm_address_t atAddress
,
4066 IOOptionBits options
,
4067 mach_vm_size_t offset
,
4068 mach_vm_size_t length
)
4070 IOMemoryMap
* result
;
4071 IOMemoryMap
* mapping
;
4074 length
= getLength();
4076 mapping
= new IOMemoryMap
;
4079 && !mapping
->init( intoTask
, atAddress
,
4080 options
, offset
, length
)) {
4086 result
= makeMapping(this, intoTask
, (IOVirtualAddress
) mapping
, options
| kIOMap64Bit
, 0, 0);
4092 IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
4093 this, atAddress
, (uint32_t) options
, offset
, length
);
4099 #ifndef __LP64__ // there is only a 64 bit version for LP64
4100 IOReturn
IOMemoryMap::redirect(IOMemoryDescriptor
* newBackingMemory
,
4101 IOOptionBits options
,
4104 return (redirect(newBackingMemory
, options
, (mach_vm_size_t
)offset
));
4108 IOReturn
IOMemoryMap::redirect(IOMemoryDescriptor
* newBackingMemory
,
4109 IOOptionBits options
,
4110 mach_vm_size_t offset
)
4112 IOReturn err
= kIOReturnSuccess
;
4113 IOMemoryDescriptor
* physMem
= 0;
4117 if (fAddress
&& fAddressMap
) do
4119 if (((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
4120 || ((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
))
4126 if (!fRedirUPL
&& fMemory
->_memRef
&& (1 == fMemory
->_memRef
->count
))
4128 upl_size_t size
= round_page(fLength
);
4129 upl_control_flags_t flags
= UPL_COPYOUT_FROM
| UPL_SET_INTERNAL
4130 | UPL_SET_LITE
| UPL_SET_IO_WIRE
| UPL_BLOCK_ACCESS
4131 | UPL_MEMORY_TAG_MAKE(IOMemoryTag(kernel_map
));
4132 if (KERN_SUCCESS
!= memory_object_iopl_request(fMemory
->_memRef
->entries
[0].entry
, 0, &size
, &fRedirUPL
,
4139 IOUnmapPages( fAddressMap
, fAddress
, fLength
);
4141 physMem
->redirect(0, true);
4145 if (newBackingMemory
)
4147 if (newBackingMemory
!= fMemory
)
4150 if (this != newBackingMemory
->makeMapping(newBackingMemory
, fAddressTask
, (IOVirtualAddress
) this,
4151 options
| kIOMapUnique
| kIOMapReference
| kIOMap64Bit
,
4153 err
= kIOReturnError
;
4157 upl_commit(fRedirUPL
, NULL
, 0);
4158 upl_deallocate(fRedirUPL
);
4161 if ((false) && physMem
)
4162 physMem
->redirect(0, false);
4175 IOMemoryMap
* IOMemoryDescriptor::makeMapping(
4176 IOMemoryDescriptor
* owner
,
4178 IOVirtualAddress __address
,
4179 IOOptionBits options
,
4180 IOByteCount __offset
,
4181 IOByteCount __length
)
4184 if (!(kIOMap64Bit
& options
)) panic("IOMemoryDescriptor::makeMapping !64bit");
4185 #endif /* !__LP64__ */
4187 IOMemoryDescriptor
* mapDesc
= 0;
4188 IOMemoryMap
* result
= 0;
4191 IOMemoryMap
* mapping
= (IOMemoryMap
*) __address
;
4192 mach_vm_size_t offset
= mapping
->fOffset
+ __offset
;
4193 mach_vm_size_t length
= mapping
->fLength
;
4195 mapping
->fOffset
= offset
;
4201 if (kIOMapStatic
& options
)
4204 addMapping(mapping
);
4205 mapping
->setMemoryDescriptor(this, 0);
4209 if (kIOMapUnique
& options
)
4212 IOByteCount physLen
;
4214 // if (owner != this) continue;
4216 if (((_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
4217 || ((_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
))
4219 phys
= getPhysicalSegment(offset
, &physLen
, kIOMemoryMapperNone
);
4220 if (!phys
|| (physLen
< length
))
4223 mapDesc
= IOMemoryDescriptor::withAddressRange(
4224 phys
, length
, getDirection() | kIOMemoryMapperNone
, NULL
);
4228 mapping
->fOffset
= offset
;
4233 // look for a compatible existing mapping
4234 if( (iter
= OSCollectionIterator::withCollection(_mappings
)))
4236 IOMemoryMap
* lookMapping
;
4237 while ((lookMapping
= (IOMemoryMap
*) iter
->getNextObject()))
4239 if ((result
= lookMapping
->copyCompatible(mapping
)))
4242 result
->setMemoryDescriptor(this, offset
);
4248 if (result
|| (options
& kIOMapReference
))
4250 if (result
!= mapping
)
4265 kr
= mapDesc
->doMap( 0, (IOVirtualAddress
*) &mapping
, options
, 0, 0 );
4266 if (kIOReturnSuccess
== kr
)
4269 mapDesc
->addMapping(result
);
4270 result
->setMemoryDescriptor(mapDesc
, offset
);
4288 void IOMemoryDescriptor::addMapping(
4289 IOMemoryMap
* mapping
)
4294 _mappings
= OSSet::withCapacity(1);
4296 _mappings
->setObject( mapping
);
4300 void IOMemoryDescriptor::removeMapping(
4301 IOMemoryMap
* mapping
)
4304 _mappings
->removeObject( mapping
);
4308 // obsolete initializers
4309 // - initWithOptions is the designated initializer
4311 IOMemoryDescriptor::initWithAddress(void * address
,
4313 IODirection direction
)
4319 IOMemoryDescriptor::initWithAddress(IOVirtualAddress address
,
4321 IODirection direction
,
4328 IOMemoryDescriptor::initWithPhysicalAddress(
4329 IOPhysicalAddress address
,
4331 IODirection direction
)
4337 IOMemoryDescriptor::initWithRanges(
4338 IOVirtualRange
* ranges
,
4340 IODirection direction
,
4348 IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange
* ranges
,
4350 IODirection direction
,
4356 void * IOMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
4357 IOByteCount
* lengthOfSegment
)
4361 #endif /* !__LP64__ */
4363 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4365 bool IOGeneralMemoryDescriptor::serialize(OSSerialize
* s
) const
4367 OSSymbol
const *keys
[2];
4368 OSObject
*values
[2];
4372 user_addr_t address
;
4375 unsigned int index
, nRanges
;
4378 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
4380 if (s
== NULL
) return false;
4382 array
= OSArray::withCapacity(4);
4383 if (!array
) return (false);
4385 nRanges
= _rangesCount
;
4386 vcopy
= (SerData
*) IOMalloc(sizeof(SerData
) * nRanges
);
4387 if (vcopy
== 0) return false;
4389 keys
[0] = OSSymbol::withCString("address");
4390 keys
[1] = OSSymbol::withCString("length");
4393 values
[0] = values
[1] = 0;
4395 // From this point on we can go to bail.
4397 // Copy the volatile data so we don't have to allocate memory
4398 // while the lock is held.
4400 if (nRanges
== _rangesCount
) {
4401 Ranges vec
= _ranges
;
4402 for (index
= 0; index
< nRanges
; index
++) {
4403 mach_vm_address_t addr
; mach_vm_size_t len
;
4404 getAddrLenForInd(addr
, len
, type
, vec
, index
);
4405 vcopy
[index
].address
= addr
;
4406 vcopy
[index
].length
= len
;
4409 // The descriptor changed out from under us. Give up.
4416 for (index
= 0; index
< nRanges
; index
++)
4418 user_addr_t addr
= vcopy
[index
].address
;
4419 IOByteCount len
= (IOByteCount
) vcopy
[index
].length
;
4420 values
[0] = OSNumber::withNumber(addr
, sizeof(addr
) * 8);
4421 if (values
[0] == 0) {
4425 values
[1] = OSNumber::withNumber(len
, sizeof(len
) * 8);
4426 if (values
[1] == 0) {
4430 OSDictionary
*dict
= OSDictionary::withObjects((const OSObject
**)values
, (const OSSymbol
**)keys
, 2);
4435 array
->setObject(dict
);
4437 values
[0]->release();
4438 values
[1]->release();
4439 values
[0] = values
[1] = 0;
4442 result
= array
->serialize(s
);
4448 values
[0]->release();
4450 values
[1]->release();
4456 IOFree(vcopy
, sizeof(SerData
) * nRanges
);
4461 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4463 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 0);
4465 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 1);
4466 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 2);
4467 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 3);
4468 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 4);
4469 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 5);
4470 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 6);
4471 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 7);
4472 #else /* !__LP64__ */
4473 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 1);
4474 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 2);
4475 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 3);
4476 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 4);
4477 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 5);
4478 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 6);
4479 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 7);
4480 #endif /* !__LP64__ */
4481 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 8);
4482 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 9);
4483 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 10);
4484 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 11);
4485 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 12);
4486 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 13);
4487 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 14);
4488 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 15);
4490 /* ex-inline function implementation */
4492 IOMemoryDescriptor::getPhysicalAddress()
4493 { return( getPhysicalSegment( 0, 0 )); }