2 * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
36 #include <sys/cdefs.h>
38 #include <IOKit/assert.h>
39 #include <IOKit/system.h>
40 #include <IOKit/IOLib.h>
41 #include <IOKit/IOMemoryDescriptor.h>
42 #include <IOKit/IOMapper.h>
43 #include <IOKit/IOKitKeysPrivate.h>
46 #include <IOKit/IOSubMemoryDescriptor.h>
47 #endif /* !__LP64__ */
49 #include <IOKit/IOKitDebug.h>
50 #include <libkern/OSDebug.h>
52 #include "IOKitKernelInternal.h"
53 #include "IOCopyMapper.h"
55 #include <libkern/c++/OSContainers.h>
56 #include <libkern/c++/OSDictionary.h>
57 #include <libkern/c++/OSArray.h>
58 #include <libkern/c++/OSSymbol.h>
59 #include <libkern/c++/OSNumber.h>
65 #include <vm/vm_pageout.h>
66 #include <mach/memory_object_types.h>
67 #include <device/device_port.h>
69 #include <mach/vm_prot.h>
70 #include <mach/mach_vm.h>
71 #include <vm/vm_fault.h>
72 #include <vm/vm_protos.h>
74 extern ppnum_t
pmap_find_phys(pmap_t pmap
, addr64_t va
);
75 void ipc_port_release_send(ipc_port_t port
);
77 /* Copy between a physical page and a virtual address in the given vm_map */
78 kern_return_t
copypv(addr64_t source
, addr64_t sink
, unsigned int size
, int which
);
82 memory_object_t pager
,
83 uintptr_t device_handle
,
87 device_pager_deallocate(
90 device_pager_populate_object(
91 memory_object_t pager
,
92 vm_object_offset_t offset
,
96 memory_object_iopl_request(
98 memory_object_offset_t offset
,
101 upl_page_info_array_t user_page_list
,
102 unsigned int *page_list_count
,
105 unsigned int IOTranslateCacheBits(struct phys_entry
*pp
);
109 #define kIOMaximumMappedIOByteCount (512*1024*1024)
111 static IOMapper
* gIOSystemMapper
= NULL
;
113 IOCopyMapper
* gIOCopyMapper
= NULL
;
115 static ppnum_t gIOMaximumMappedIOPageCount
= atop_32(kIOMaximumMappedIOByteCount
);
119 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
121 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor
, OSObject
)
123 #define super IOMemoryDescriptor
125 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor
, IOMemoryDescriptor
)
127 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
129 static IORecursiveLock
* gIOMemoryLock
;
131 #define LOCK IORecursiveLockLock( gIOMemoryLock)
132 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
133 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
135 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
138 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
140 #define DEBG(fmt, args...) {}
143 #define IOMD_DEBUG_DMAACTIVE 1
145 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
147 // Some data structures and accessor macros used by the initWithOptions
150 enum ioPLBlockFlags
{
151 kIOPLOnDevice
= 0x00000001,
152 kIOPLExternUPL
= 0x00000002,
155 struct typePersMDData
157 const IOGeneralMemoryDescriptor
*fMD
;
158 ipc_port_t fMemEntry
;
163 vm_address_t fPageInfo
; // Pointer to page list or index into it
164 uint32_t fIOMDOffset
; // The offset of this iopl in descriptor
165 ppnum_t fMappedBase
; // Page number of first page in this iopl
166 unsigned int fPageOffset
; // Offset within first page of iopl
167 unsigned int fFlags
; // Flags
172 uint64_t fPreparationID
;
173 unsigned int fPageCnt
;
175 // align arrays to 8 bytes so following macros work
178 upl_page_info_t fPageList
[];
182 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
183 #define getIOPLList(d) ((ioPLBlock *) &(d->fPageList[d->fPageCnt]))
184 #define getNumIOPL(osd, d) \
185 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
186 #define getPageList(d) (&(d->fPageList[0]))
187 #define computeDataSize(p, u) \
188 (sizeof(ioGMDData) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
191 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
193 #define next_page(a) ( trunc_page(a) + PAGE_SIZE )
198 kern_return_t
device_data_action(
199 uintptr_t device_handle
,
200 ipc_port_t device_pager
,
201 vm_prot_t protection
,
202 vm_object_offset_t offset
,
205 struct ExpansionData
{
207 unsigned int pagerContig
:1;
208 unsigned int unused
:31;
209 IOMemoryDescriptor
* memory
;
212 ExpansionData
* ref
= (ExpansionData
*) device_handle
;
213 IOMemoryDescriptor
* memDesc
;
216 memDesc
= ref
->memory
;
220 kr
= memDesc
->handleFault( device_pager
, 0, 0,
221 offset
, size
, kIOMapDefaultCache
/*?*/);
231 kern_return_t
device_close(
232 uintptr_t device_handle
)
234 struct ExpansionData
{
236 unsigned int pagerContig
:1;
237 unsigned int unused
:31;
238 IOMemoryDescriptor
* memory
;
240 ExpansionData
* ref
= (ExpansionData
*) device_handle
;
242 IODelete( ref
, ExpansionData
, 1 );
244 return( kIOReturnSuccess
);
248 // Note this inline function uses C++ reference arguments to return values
249 // This means that pointers are not passed and NULLs don't have to be
250 // checked for as a NULL reference is illegal.
252 getAddrLenForInd(user_addr_t
&addr
, IOPhysicalLength
&len
, // Output variables
253 UInt32 type
, IOGeneralMemoryDescriptor::Ranges r
, UInt32 ind
)
255 assert(kIOMemoryTypeUIO
== type
256 || kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
257 || kIOMemoryTypePhysical
== type
|| kIOMemoryTypePhysical64
== type
);
258 if (kIOMemoryTypeUIO
== type
) {
260 uio_getiov((uio_t
) r
.uio
, ind
, &addr
, &us
); len
= us
;
263 else if ((kIOMemoryTypeVirtual64
== type
) || (kIOMemoryTypePhysical64
== type
)) {
264 IOAddressRange cur
= r
.v64
[ind
];
268 #endif /* !__LP64__ */
270 IOVirtualRange cur
= r
.v
[ind
];
276 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
279 IOMemoryDescriptor::withAddress(void * address
,
281 IODirection direction
)
283 return IOMemoryDescriptor::
284 withAddressRange((IOVirtualAddress
) address
, length
, direction
| kIOMemoryAutoPrepare
, kernel_task
);
289 IOMemoryDescriptor::withAddress(IOVirtualAddress address
,
291 IODirection direction
,
294 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
297 if (that
->initWithAddress(address
, length
, direction
, task
))
304 #endif /* !__LP64__ */
307 IOMemoryDescriptor::withPhysicalAddress(
308 IOPhysicalAddress address
,
310 IODirection direction
)
313 return (IOMemoryDescriptor::withAddressRange(address
, length
, direction
, TASK_NULL
));
314 #else /* !__LP64__ */
315 IOGeneralMemoryDescriptor
*self
= new IOGeneralMemoryDescriptor
;
317 && !self
->initWithPhysicalAddress(address
, length
, direction
)) {
323 #endif /* !__LP64__ */
328 IOMemoryDescriptor::withRanges( IOVirtualRange
* ranges
,
330 IODirection direction
,
334 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
337 if (that
->initWithRanges(ranges
, withCount
, direction
, task
, asReference
))
344 #endif /* !__LP64__ */
347 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address
,
348 mach_vm_size_t length
,
349 IOOptionBits options
,
352 IOAddressRange range
= { address
, length
};
353 return (IOMemoryDescriptor::withAddressRanges(&range
, 1, options
, task
));
357 IOMemoryDescriptor::withAddressRanges(IOAddressRange
* ranges
,
359 IOOptionBits options
,
362 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
366 options
|= kIOMemoryTypeVirtual64
;
368 options
|= kIOMemoryTypePhysical64
;
370 if (that
->initWithOptions(ranges
, rangeCount
, 0, task
, options
, /* mapper */ 0))
383 * Create a new IOMemoryDescriptor. The buffer is made up of several
384 * virtual address ranges, from a given task.
386 * Passing the ranges as a reference will avoid an extra allocation.
389 IOMemoryDescriptor::withOptions(void * buffers
,
396 IOGeneralMemoryDescriptor
*self
= new IOGeneralMemoryDescriptor
;
399 && !self
->initWithOptions(buffers
, count
, offset
, task
, opts
, mapper
))
408 bool IOMemoryDescriptor::initWithOptions(void * buffers
,
412 IOOptionBits options
,
420 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange
* ranges
,
422 IODirection direction
,
425 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
428 if (that
->initWithPhysicalRanges(ranges
, withCount
, direction
, asReference
))
437 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor
* of
,
440 IODirection direction
)
442 return (IOSubMemoryDescriptor::withSubRange(of
, offset
, length
, direction
| kIOMemoryThreadSafe
));
444 #endif /* !__LP64__ */
447 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor
*originalMD
)
449 IOGeneralMemoryDescriptor
*origGenMD
=
450 OSDynamicCast(IOGeneralMemoryDescriptor
, originalMD
);
453 return IOGeneralMemoryDescriptor::
454 withPersistentMemoryDescriptor(origGenMD
);
460 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor
*originalMD
)
462 ipc_port_t sharedMem
= (ipc_port_t
) originalMD
->createNamedEntry();
467 if (sharedMem
== originalMD
->_memEntry
) {
468 originalMD
->retain(); // Add a new reference to ourselves
469 ipc_port_release_send(sharedMem
); // Remove extra send right
473 IOGeneralMemoryDescriptor
* self
= new IOGeneralMemoryDescriptor
;
474 typePersMDData initData
= { originalMD
, sharedMem
};
477 && !self
->initWithOptions(&initData
, 1, 0, 0, kIOMemoryTypePersistentMD
, 0)) {
484 void *IOGeneralMemoryDescriptor::createNamedEntry()
487 ipc_port_t sharedMem
;
489 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
491 user_addr_t range0Addr
;
492 IOByteCount range0Len
;
493 getAddrLenForInd(range0Addr
, range0Len
, type
, _ranges
, 0);
494 range0Addr
= trunc_page_64(range0Addr
);
496 vm_size_t size
= ptoa_32(_pages
);
497 vm_address_t kernelPage
= (vm_address_t
) range0Addr
;
499 vm_map_t theMap
= ((_task
== kernel_task
)
500 && (kIOMemoryBufferPageable
& _flags
))
501 ? IOPageableMapForAddress(kernelPage
)
502 : get_task_map(_task
);
504 memory_object_size_t actualSize
= size
;
505 vm_prot_t prot
= VM_PROT_READ
;
507 if (kIODirectionOut
!= (kIODirectionOutIn
& _flags
))
509 prot
|= VM_PROT_WRITE
;
512 prot
|= MAP_MEM_NAMED_REUSE
;
514 error
= mach_make_memory_entry_64(theMap
,
515 &actualSize
, range0Addr
, prot
, &sharedMem
, (ipc_port_t
) _memEntry
);
517 if (KERN_SUCCESS
== error
) {
518 if (actualSize
== size
) {
522 IOLog("IOGMD::mach_make_memory_entry_64 (%08llx) size (%08llx:%08llx)\n",
523 (UInt64
)range0Addr
, (UInt64
)actualSize
, (UInt64
)size
);
525 ipc_port_release_send( sharedMem
);
529 return MACH_PORT_NULL
;
534 IOGeneralMemoryDescriptor::initWithAddress(void * address
,
535 IOByteCount withLength
,
536 IODirection withDirection
)
538 _singleRange
.v
.address
= (vm_offset_t
) address
;
539 _singleRange
.v
.length
= withLength
;
541 return initWithRanges(&_singleRange
.v
, 1, withDirection
, kernel_task
, true);
545 IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address
,
546 IOByteCount withLength
,
547 IODirection withDirection
,
550 _singleRange
.v
.address
= address
;
551 _singleRange
.v
.length
= withLength
;
553 return initWithRanges(&_singleRange
.v
, 1, withDirection
, withTask
, true);
557 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
558 IOPhysicalAddress address
,
559 IOByteCount withLength
,
560 IODirection withDirection
)
562 _singleRange
.p
.address
= address
;
563 _singleRange
.p
.length
= withLength
;
565 return initWithPhysicalRanges( &_singleRange
.p
, 1, withDirection
, true);
569 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
570 IOPhysicalRange
* ranges
,
572 IODirection direction
,
575 IOOptionBits mdOpts
= direction
| kIOMemoryTypePhysical
;
578 mdOpts
|= kIOMemoryAsReference
;
580 return initWithOptions(ranges
, count
, 0, 0, mdOpts
, /* mapper */ 0);
584 IOGeneralMemoryDescriptor::initWithRanges(
585 IOVirtualRange
* ranges
,
587 IODirection direction
,
591 IOOptionBits mdOpts
= direction
;
594 mdOpts
|= kIOMemoryAsReference
;
597 mdOpts
|= kIOMemoryTypeVirtual
;
599 // Auto-prepare if this is a kernel memory descriptor as very few
600 // clients bother to prepare() kernel memory.
601 // But it was not enforced so what are you going to do?
602 if (task
== kernel_task
)
603 mdOpts
|= kIOMemoryAutoPrepare
;
606 mdOpts
|= kIOMemoryTypePhysical
;
608 return initWithOptions(ranges
, count
, 0, task
, mdOpts
, /* mapper */ 0);
610 #endif /* !__LP64__ */
615 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
616 * from a given task, several physical ranges, an UPL from the ubc
617 * system or a uio (may be 64bit) from the BSD subsystem.
619 * Passing the ranges as a reference will avoid an extra allocation.
621 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
622 * existing instance -- note this behavior is not commonly supported in other
623 * I/O Kit classes, although it is supported here.
627 IOGeneralMemoryDescriptor::initWithOptions(void * buffers
,
631 IOOptionBits options
,
634 IOOptionBits type
= options
& kIOMemoryTypeMask
;
636 // Grab the original MD's configuation data to initialse the
637 // arguments to this function.
638 if (kIOMemoryTypePersistentMD
== type
) {
640 typePersMDData
*initData
= (typePersMDData
*) buffers
;
641 const IOGeneralMemoryDescriptor
*orig
= initData
->fMD
;
642 ioGMDData
*dataP
= getDataP(orig
->_memoryEntries
);
644 // Only accept persistent memory descriptors with valid dataP data.
645 assert(orig
->_rangesCount
== 1);
646 if ( !(orig
->_flags
& kIOMemoryPersistent
) || !dataP
)
649 _memEntry
= initData
->fMemEntry
; // Grab the new named entry
650 options
= orig
->_flags
| kIOMemoryAsReference
;
651 _singleRange
= orig
->_singleRange
; // Initialise our range
652 buffers
= &_singleRange
;
655 // Now grab the original task and whatever mapper was previously used
657 mapper
= dataP
->fMapper
;
659 // We are ready to go through the original initialisation now
663 case kIOMemoryTypeUIO
:
664 case kIOMemoryTypeVirtual
:
666 case kIOMemoryTypeVirtual64
:
667 #endif /* !__LP64__ */
673 if (vm_map_is_64bit(get_task_map(task
))
674 && (kIOMemoryTypeVirtual
== type
)
675 && ((IOVirtualRange
*) buffers
)->address
)
677 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
680 #endif /* !__LP64__ */
683 case kIOMemoryTypePhysical
: // Neither Physical nor UPL should have a task
685 case kIOMemoryTypePhysical64
:
686 #endif /* !__LP64__ */
687 case kIOMemoryTypeUPL
:
691 return false; /* bad argument */
698 * We can check the _initialized instance variable before having ever set
699 * it to an initial value because I/O Kit guarantees that all our instance
700 * variables are zeroed on an object's allocation.
705 * An existing memory descriptor is being retargeted to point to
706 * somewhere else. Clean up our present state.
708 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
709 if ((kIOMemoryTypePhysical
!= type
) && (kIOMemoryTypePhysical64
!= type
))
714 if (_ranges
.v
&& !(kIOMemoryAsReference
& _flags
))
716 if (kIOMemoryTypeUIO
== type
)
717 uio_free((uio_t
) _ranges
.v
);
719 else if ((kIOMemoryTypeVirtual64
== type
) || (kIOMemoryTypePhysical64
== type
))
720 IODelete(_ranges
.v64
, IOAddressRange
, _rangesCount
);
721 #endif /* !__LP64__ */
723 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
727 { ipc_port_release_send((ipc_port_t
) _memEntry
); _memEntry
= 0; }
729 _mappings
->flushCollection();
737 // Grab the appropriate mapper
738 if (kIOMemoryMapperNone
& options
)
739 mapper
= 0; // No Mapper
740 else if (mapper
== kIOMapperSystem
) {
741 IOMapper::checkForSystemMapper();
742 gIOSystemMapper
= mapper
= IOMapper::gSystem
;
745 // Temp binary compatibility for kIOMemoryThreadSafe
746 if (kIOMemoryReserved6156215
& options
)
748 options
&= ~kIOMemoryReserved6156215
;
749 options
|= kIOMemoryThreadSafe
;
751 // Remove the dynamic internal use flags from the initial setting
752 options
&= ~(kIOMemoryPreparedReadOnly
);
757 _direction
= (IODirection
) (_flags
& kIOMemoryDirectionMask
);
758 #endif /* !__LP64__ */
760 __iomd_reservedA
= 0;
761 __iomd_reservedB
= 0;
764 if (kIOMemoryThreadSafe
& options
)
767 _prepareLock
= IOLockAlloc();
769 else if (_prepareLock
)
771 IOLockFree(_prepareLock
);
775 if (kIOMemoryTypeUPL
== type
) {
778 unsigned int dataSize
= computeDataSize(/* pages */ 0, /* upls */ 1);
780 if (!_memoryEntries
) {
781 _memoryEntries
= OSData::withCapacity(dataSize
);
785 else if (!_memoryEntries
->initWithCapacity(dataSize
))
788 _memoryEntries
->appendBytes(0, sizeof(ioGMDData
));
789 dataP
= getDataP(_memoryEntries
);
790 dataP
->fMapper
= mapper
;
793 // _wireCount++; // UPLs start out life wired
796 _pages
+= atop_32(offset
+ count
+ PAGE_MASK
) - atop_32(offset
);
799 iopl
.fIOPL
= (upl_t
) buffers
;
800 upl_page_info_t
*pageList
= UPL_GET_INTERNAL_PAGE_LIST(iopl
.fIOPL
);
802 if (upl_get_size(iopl
.fIOPL
) < (count
+ offset
))
803 panic("short external upl");
805 // Set the flag kIOPLOnDevice convieniently equal to 1
806 iopl
.fFlags
= pageList
->device
| kIOPLExternUPL
;
807 iopl
.fIOMDOffset
= 0;
809 _highestPage
= upl_get_highest_page(iopl
.fIOPL
);
811 if (!pageList
->device
) {
812 // Pre-compute the offset into the UPL's page list
813 pageList
= &pageList
[atop_32(offset
)];
816 iopl
.fMappedBase
= mapper
->iovmAlloc(_pages
);
817 mapper
->iovmInsert(iopl
.fMappedBase
, 0, pageList
, _pages
);
820 iopl
.fMappedBase
= 0;
823 iopl
.fMappedBase
= 0;
824 iopl
.fPageInfo
= (vm_address_t
) pageList
;
825 iopl
.fPageOffset
= offset
;
827 _memoryEntries
->appendBytes(&iopl
, sizeof(iopl
));
830 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
831 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
833 // Initialize the memory descriptor
834 if (options
& kIOMemoryAsReference
) {
836 _rangesIsAllocated
= false;
837 #endif /* !__LP64__ */
839 // Hack assignment to get the buffer arg into _ranges.
840 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
842 // This also initialises the uio & physical ranges.
843 _ranges
.v
= (IOVirtualRange
*) buffers
;
847 _rangesIsAllocated
= true;
848 #endif /* !__LP64__ */
851 case kIOMemoryTypeUIO
:
852 _ranges
.v
= (IOVirtualRange
*) uio_duplicate((uio_t
) buffers
);
856 case kIOMemoryTypeVirtual64
:
857 case kIOMemoryTypePhysical64
:
859 && (((IOAddressRange
*) buffers
)->address
+ ((IOAddressRange
*) buffers
)->length
) <= 0x100000000ULL
) {
860 if (kIOMemoryTypeVirtual64
== type
)
861 type
= kIOMemoryTypeVirtual
;
863 type
= kIOMemoryTypePhysical
;
864 _flags
= (_flags
& ~kIOMemoryTypeMask
) | type
| kIOMemoryAsReference
;
865 _rangesIsAllocated
= false;
866 _ranges
.v
= &_singleRange
.v
;
867 _singleRange
.v
.address
= ((IOAddressRange
*) buffers
)->address
;
868 _singleRange
.v
.length
= ((IOAddressRange
*) buffers
)->length
;
871 _ranges
.v64
= IONew(IOAddressRange
, count
);
874 bcopy(buffers
, _ranges
.v
, count
* sizeof(IOAddressRange
));
876 #endif /* !__LP64__ */
877 case kIOMemoryTypeVirtual
:
878 case kIOMemoryTypePhysical
:
880 _flags
|= kIOMemoryAsReference
;
882 _rangesIsAllocated
= false;
883 #endif /* !__LP64__ */
884 _ranges
.v
= &_singleRange
.v
;
886 _ranges
.v
= IONew(IOVirtualRange
, count
);
890 bcopy(buffers
, _ranges
.v
, count
* sizeof(IOVirtualRange
));
895 // Find starting address within the vector of ranges
896 Ranges vec
= _ranges
;
899 for (unsigned ind
= 0; ind
< count
; ind
++) {
901 IOPhysicalLength len
;
903 // addr & len are returned by this function
904 getAddrLenForInd(addr
, len
, type
, vec
, ind
);
905 pages
+= (atop_64(addr
+ len
+ PAGE_MASK
) - atop_64(addr
));
907 assert(len
>= length
); // Check for 32 bit wrap around
910 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
))
912 ppnum_t highPage
= atop_64(addr
+ len
- 1);
913 if (highPage
> _highestPage
)
914 _highestPage
= highPage
;
919 _rangesCount
= count
;
921 // Auto-prepare memory at creation time.
922 // Implied completion when descriptor is free-ed
923 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
))
924 _wireCount
++; // Physical MDs are, by definition, wired
925 else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
927 unsigned dataSize
= computeDataSize(_pages
, /* upls */ count
* 2);
929 if (!_memoryEntries
) {
930 _memoryEntries
= OSData::withCapacity(dataSize
);
934 else if (!_memoryEntries
->initWithCapacity(dataSize
))
937 _memoryEntries
->appendBytes(0, sizeof(ioGMDData
));
938 dataP
= getDataP(_memoryEntries
);
939 dataP
->fMapper
= mapper
;
940 dataP
->fPageCnt
= _pages
;
942 if ( (kIOMemoryPersistent
& _flags
) && !_memEntry
)
943 _memEntry
= createNamedEntry();
945 if ((_flags
& kIOMemoryAutoPrepare
)
946 && prepare() != kIOReturnSuccess
)
959 void IOGeneralMemoryDescriptor::free()
961 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
966 reserved
->memory
= 0;
970 if ((kIOMemoryTypePhysical
!= type
) && (kIOMemoryTypePhysical64
!= type
))
976 _memoryEntries
->release();
978 if (_ranges
.v
&& !(kIOMemoryAsReference
& _flags
))
980 if (kIOMemoryTypeUIO
== type
)
981 uio_free((uio_t
) _ranges
.v
);
983 else if ((kIOMemoryTypeVirtual64
== type
) || (kIOMemoryTypePhysical64
== type
))
984 IODelete(_ranges
.v64
, IOAddressRange
, _rangesCount
);
985 #endif /* !__LP64__ */
987 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
992 if (reserved
&& reserved
->devicePager
)
993 device_pager_deallocate( (memory_object_t
) reserved
->devicePager
);
995 // memEntry holds a ref on the device pager which owns reserved
996 // (ExpansionData) so no reserved access after this point
998 ipc_port_release_send( (ipc_port_t
) _memEntry
);
1001 IOLockFree(_prepareLock
);
1007 void IOGeneralMemoryDescriptor::unmapFromKernel()
1009 panic("IOGMD::unmapFromKernel deprecated");
1012 void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex
)
1014 panic("IOGMD::mapIntoKernel deprecated");
1016 #endif /* !__LP64__ */
1021 * Get the direction of the transfer.
1023 IODirection
IOMemoryDescriptor::getDirection() const
1028 #endif /* !__LP64__ */
1029 return (IODirection
) (_flags
& kIOMemoryDirectionMask
);
1035 * Get the length of the transfer (over all ranges).
1037 IOByteCount
IOMemoryDescriptor::getLength() const
1042 void IOMemoryDescriptor::setTag( IOOptionBits tag
)
1047 IOOptionBits
IOMemoryDescriptor::getTag( void )
1053 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
1055 IOMemoryDescriptor::getSourceSegment( IOByteCount offset
, IOByteCount
* length
)
1057 addr64_t physAddr
= 0;
1059 if( prepare() == kIOReturnSuccess
) {
1060 physAddr
= getPhysicalSegment64( offset
, length
);
1064 return( (IOPhysicalAddress
) physAddr
); // truncated but only page offset is used
1066 #endif /* !__LP64__ */
1068 IOByteCount
IOMemoryDescriptor::readBytes
1069 (IOByteCount offset
, void *bytes
, IOByteCount length
)
1071 addr64_t dstAddr
= CAST_DOWN(addr64_t
, bytes
);
1072 IOByteCount remaining
;
1074 // Assert that this entire I/O is withing the available range
1075 assert(offset
< _length
);
1076 assert(offset
+ length
<= _length
);
1077 if (offset
>= _length
) {
1081 if (kIOMemoryThreadSafe
& _flags
)
1084 remaining
= length
= min(length
, _length
- offset
);
1085 while (remaining
) { // (process another target segment?)
1089 srcAddr64
= getPhysicalSegment(offset
, &srcLen
, kIOMemoryMapperNone
);
1093 // Clip segment length to remaining
1094 if (srcLen
> remaining
)
1097 copypv(srcAddr64
, dstAddr
, srcLen
,
1098 cppvPsrc
| cppvNoRefSrc
| cppvFsnk
| cppvKmap
);
1102 remaining
-= srcLen
;
1105 if (kIOMemoryThreadSafe
& _flags
)
1110 return length
- remaining
;
1113 IOByteCount
IOMemoryDescriptor::writeBytes
1114 (IOByteCount offset
, const void *bytes
, IOByteCount length
)
1116 addr64_t srcAddr
= CAST_DOWN(addr64_t
, bytes
);
1117 IOByteCount remaining
;
1119 // Assert that this entire I/O is withing the available range
1120 assert(offset
< _length
);
1121 assert(offset
+ length
<= _length
);
1123 assert( !(kIOMemoryPreparedReadOnly
& _flags
) );
1125 if ( (kIOMemoryPreparedReadOnly
& _flags
) || offset
>= _length
) {
1129 if (kIOMemoryThreadSafe
& _flags
)
1132 remaining
= length
= min(length
, _length
- offset
);
1133 while (remaining
) { // (process another target segment?)
1137 dstAddr64
= getPhysicalSegment(offset
, &dstLen
, kIOMemoryMapperNone
);
1141 // Clip segment length to remaining
1142 if (dstLen
> remaining
)
1145 copypv(srcAddr
, (addr64_t
) dstAddr64
, dstLen
,
1146 cppvPsnk
| cppvFsnk
| cppvNoRefSrc
| cppvNoModSnk
| cppvKmap
);
1150 remaining
-= dstLen
;
1153 if (kIOMemoryThreadSafe
& _flags
)
1158 return length
- remaining
;
1161 // osfmk/device/iokit_rpc.c
1162 extern "C" unsigned int IODefaultCacheBits(addr64_t pa
);
1165 void IOGeneralMemoryDescriptor::setPosition(IOByteCount position
)
1167 panic("IOGMD::setPosition deprecated");
1169 #endif /* !__LP64__ */
1171 static volatile SInt64 gIOMDPreparationID
__attribute__((aligned(8))) = (1ULL << 32);
1174 IOGeneralMemoryDescriptor::getPreparationID( void )
1179 return (kIOPreparationIDUnprepared
);
1181 if (_flags
& (kIOMemoryTypePhysical
| kIOMemoryTypePhysical64
))
1182 return (kIOPreparationIDAlwaysPrepared
);
1184 if (!_memoryEntries
|| !(dataP
= getDataP(_memoryEntries
)))
1185 return (kIOPreparationIDUnprepared
);
1187 if (kIOPreparationIDUnprepared
== dataP
->fPreparationID
)
1189 #if defined(__ppc__ )
1190 dataP
->fPreparationID
= gIOMDPreparationID
++;
1192 dataP
->fPreparationID
= OSIncrementAtomic64(&gIOMDPreparationID
);
1195 return (dataP
->fPreparationID
);
1199 IOMemoryDescriptor::getPreparationID( void )
1201 return (kIOPreparationIDUnsupported
);
1204 IOReturn
IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op
, void *vData
, UInt dataSize
) const
1206 if (kIOMDGetCharacteristics
== op
) {
1208 if (dataSize
< sizeof(IOMDDMACharacteristics
))
1209 return kIOReturnUnderrun
;
1211 IOMDDMACharacteristics
*data
= (IOMDDMACharacteristics
*) vData
;
1212 data
->fLength
= _length
;
1213 data
->fSGCount
= _rangesCount
;
1214 data
->fPages
= _pages
;
1215 data
->fDirection
= getDirection();
1217 data
->fIsPrepared
= false;
1219 data
->fIsPrepared
= true;
1220 data
->fHighestPage
= _highestPage
;
1221 if (_memoryEntries
) {
1222 ioGMDData
*gmdData
= getDataP(_memoryEntries
);
1223 ioPLBlock
*ioplList
= getIOPLList(gmdData
);
1224 UInt count
= getNumIOPL(_memoryEntries
, gmdData
);
1226 data
->fIsMapped
= (gmdData
->fMapper
&& _pages
&& (count
> 0)
1227 && ioplList
[0].fMappedBase
);
1229 data
->fPageAlign
= (ioplList
[0].fPageOffset
& PAGE_MASK
) | ~PAGE_MASK
;
1232 data
->fIsMapped
= false;
1235 return kIOReturnSuccess
;
1237 #if IOMD_DEBUG_DMAACTIVE
1238 } else if (kIOMDSetDMAActive
== op
) {
1239 IOGeneralMemoryDescriptor
* md
= const_cast<IOGeneralMemoryDescriptor
*>(this);
1240 OSIncrementAtomic(&md
->__iomd_reservedA
);
1241 } else if (kIOMDSetDMAInactive
== op
) {
1242 IOGeneralMemoryDescriptor
* md
= const_cast<IOGeneralMemoryDescriptor
*>(this);
1243 if (md
->__iomd_reservedA
)
1244 OSDecrementAtomic(&md
->__iomd_reservedA
);
1246 panic("kIOMDSetDMAInactive");
1247 #endif /* IOMD_DEBUG_DMAACTIVE */
1249 } else if (!(kIOMDWalkSegments
& op
))
1250 return kIOReturnBadArgument
;
1252 // Get the next segment
1253 struct InternalState
{
1254 IOMDDMAWalkSegmentArgs fIO
;
1260 // Find the next segment
1261 if (dataSize
< sizeof(*isP
))
1262 return kIOReturnUnderrun
;
1264 isP
= (InternalState
*) vData
;
1265 UInt offset
= isP
->fIO
.fOffset
;
1266 bool mapped
= isP
->fIO
.fMapped
;
1268 if (offset
>= _length
)
1269 return (offset
== _length
)? kIOReturnOverrun
: kIOReturnInternalError
;
1271 // Validate the previous offset
1272 UInt ind
, off2Ind
= isP
->fOffset2Index
;
1273 if ((kIOMDFirstSegment
!= op
)
1275 && (offset
== isP
->fNextOffset
|| off2Ind
<= offset
))
1278 ind
= off2Ind
= 0; // Start from beginning
1282 if ( (_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
) {
1284 // Physical address based memory descriptor
1285 const IOPhysicalRange
*physP
= (IOPhysicalRange
*) &_ranges
.p
[0];
1287 // Find the range after the one that contains the offset
1289 for (len
= 0; off2Ind
<= offset
; ind
++) {
1290 len
= physP
[ind
].length
;
1294 // Calculate length within range and starting address
1295 length
= off2Ind
- offset
;
1296 address
= physP
[ind
- 1].address
+ len
- length
;
1298 // see how far we can coalesce ranges
1299 while (ind
< _rangesCount
&& address
+ length
== physP
[ind
].address
) {
1300 len
= physP
[ind
].length
;
1306 // correct contiguous check overshoot
1311 else if ( (_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
) {
1313 // Physical address based memory descriptor
1314 const IOAddressRange
*physP
= (IOAddressRange
*) &_ranges
.v64
[0];
1316 // Find the range after the one that contains the offset
1318 for (len
= 0; off2Ind
<= offset
; ind
++) {
1319 len
= physP
[ind
].length
;
1323 // Calculate length within range and starting address
1324 length
= off2Ind
- offset
;
1325 address
= physP
[ind
- 1].address
+ len
- length
;
1327 // see how far we can coalesce ranges
1328 while (ind
< _rangesCount
&& address
+ length
== physP
[ind
].address
) {
1329 len
= physP
[ind
].length
;
1335 // correct contiguous check overshoot
1339 #endif /* !__LP64__ */
1342 panic("IOGMD: not wired for the IODMACommand");
1344 assert(_memoryEntries
);
1346 ioGMDData
* dataP
= getDataP(_memoryEntries
);
1347 const ioPLBlock
*ioplList
= getIOPLList(dataP
);
1348 UInt numIOPLs
= getNumIOPL(_memoryEntries
, dataP
);
1349 upl_page_info_t
*pageList
= getPageList(dataP
);
1351 assert(numIOPLs
> 0);
1353 // Scan through iopl info blocks looking for block containing offset
1354 while (ind
< numIOPLs
&& offset
>= ioplList
[ind
].fIOMDOffset
)
1357 // Go back to actual range as search goes past it
1358 ioPLBlock ioplInfo
= ioplList
[ind
- 1];
1359 off2Ind
= ioplInfo
.fIOMDOffset
;
1362 length
= ioplList
[ind
].fIOMDOffset
;
1365 length
-= offset
; // Remainder within iopl
1367 // Subtract offset till this iopl in total list
1370 // If a mapped address is requested and this is a pre-mapped IOPL
1371 // then just need to compute an offset relative to the mapped base.
1372 if (mapped
&& ioplInfo
.fMappedBase
) {
1373 offset
+= (ioplInfo
.fPageOffset
& PAGE_MASK
);
1374 address
= ptoa_64(ioplInfo
.fMappedBase
) + offset
;
1375 continue; // Done leave do/while(false) now
1378 // The offset is rebased into the current iopl.
1379 // Now add the iopl 1st page offset.
1380 offset
+= ioplInfo
.fPageOffset
;
1382 // For external UPLs the fPageInfo field points directly to
1383 // the upl's upl_page_info_t array.
1384 if (ioplInfo
.fFlags
& kIOPLExternUPL
)
1385 pageList
= (upl_page_info_t
*) ioplInfo
.fPageInfo
;
1387 pageList
= &pageList
[ioplInfo
.fPageInfo
];
1389 // Check for direct device non-paged memory
1390 if ( ioplInfo
.fFlags
& kIOPLOnDevice
) {
1391 address
= ptoa_64(pageList
->phys_addr
) + offset
;
1392 continue; // Done leave do/while(false) now
1395 // Now we need compute the index into the pageList
1396 UInt pageInd
= atop_32(offset
);
1397 offset
&= PAGE_MASK
;
1399 // Compute the starting address of this segment
1400 IOPhysicalAddress pageAddr
= pageList
[pageInd
].phys_addr
;
1402 panic("!pageList phys_addr");
1405 address
= ptoa_64(pageAddr
) + offset
;
1407 // length is currently set to the length of the remainider of the iopl.
1408 // We need to check that the remainder of the iopl is contiguous.
1409 // This is indicated by pageList[ind].phys_addr being sequential.
1410 IOByteCount contigLength
= PAGE_SIZE
- offset
;
1411 while (contigLength
< length
1412 && ++pageAddr
== pageList
[++pageInd
].phys_addr
)
1414 contigLength
+= PAGE_SIZE
;
1417 if (contigLength
< length
)
1418 length
= contigLength
;
1426 // Update return values and state
1427 isP
->fIO
.fIOVMAddr
= address
;
1428 isP
->fIO
.fLength
= length
;
1430 isP
->fOffset2Index
= off2Ind
;
1431 isP
->fNextOffset
= isP
->fIO
.fOffset
+ length
;
1433 return kIOReturnSuccess
;
1437 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
, IOOptionBits options
)
1440 addr64_t address
= 0;
1441 IOByteCount length
= 0;
1442 IOMapper
* mapper
= gIOSystemMapper
;
1443 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
1445 if (lengthOfSegment
)
1446 *lengthOfSegment
= 0;
1448 if (offset
>= _length
)
1451 // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
1452 // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
1453 // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
1454 // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
1456 if ((options
& _kIOMemorySourceSegment
) && (kIOMemoryTypeUPL
!= type
))
1458 unsigned rangesIndex
= 0;
1459 Ranges vec
= _ranges
;
1462 // Find starting address within the vector of ranges
1464 getAddrLenForInd(addr
, length
, type
, vec
, rangesIndex
);
1465 if (offset
< length
)
1467 offset
-= length
; // (make offset relative)
1471 // Now that we have the starting range,
1472 // lets find the last contiguous range
1476 for ( ++rangesIndex
; rangesIndex
< _rangesCount
; rangesIndex
++ ) {
1477 user_addr_t newAddr
;
1478 IOPhysicalLength newLen
;
1480 getAddrLenForInd(newAddr
, newLen
, type
, vec
, rangesIndex
);
1481 if (addr
+ length
!= newAddr
)
1486 address
= (IOPhysicalAddress
) addr
; // Truncate address to 32bit
1490 IOMDDMAWalkSegmentState _state
;
1491 IOMDDMAWalkSegmentArgs
* state
= (IOMDDMAWalkSegmentArgs
*) &_state
;
1493 state
->fOffset
= offset
;
1494 state
->fLength
= _length
- offset
;
1495 state
->fMapped
= (0 == (options
& kIOMemoryMapperNone
));
1497 ret
= dmaCommandOperation(kIOMDFirstSegment
, _state
, sizeof(_state
));
1499 if ((kIOReturnSuccess
!= ret
) && (kIOReturnOverrun
!= ret
))
1500 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
1501 ret
, this, state
->fOffset
,
1502 state
->fIOVMAddr
, state
->fLength
);
1503 if (kIOReturnSuccess
== ret
)
1505 address
= state
->fIOVMAddr
;
1506 length
= state
->fLength
;
1509 // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
1510 // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
1512 if (mapper
&& ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
)))
1514 if ((options
& kIOMemoryMapperNone
) && !(_flags
& kIOMemoryMapperNone
))
1516 addr64_t origAddr
= address
;
1517 IOByteCount origLen
= length
;
1519 address
= mapper
->mapAddr(origAddr
);
1520 length
= page_size
- (address
& (page_size
- 1));
1521 while ((length
< origLen
)
1522 && ((address
+ length
) == mapper
->mapAddr(origAddr
+ length
)))
1523 length
+= page_size
;
1524 if (length
> origLen
)
1528 else if (!(options
& kIOMemoryMapperNone
) && (_flags
& kIOMemoryMapperNone
))
1530 panic("getPhysicalSegment not mapped for I/O");
1532 #endif /* __LP64__ */
1539 if (lengthOfSegment
)
1540 *lengthOfSegment
= length
;
1547 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
, IOOptionBits options
)
1549 addr64_t address
= 0;
1551 if (options
& _kIOMemorySourceSegment
)
1553 address
= getSourceSegment(offset
, lengthOfSegment
);
1555 else if (options
& kIOMemoryMapperNone
)
1557 address
= getPhysicalSegment64(offset
, lengthOfSegment
);
1561 address
= getPhysicalSegment(offset
, lengthOfSegment
);
1568 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
1570 return (getPhysicalSegment(offset
, lengthOfSegment
, kIOMemoryMapperNone
));
1574 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
1576 addr64_t address
= 0;
1577 IOByteCount length
= 0;
1579 address
= getPhysicalSegment(offset
, lengthOfSegment
, 0);
1581 if (lengthOfSegment
)
1582 length
= *lengthOfSegment
;
1584 if ((address
+ length
) > 0x100000000ULL
)
1586 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
1587 address
, (long) length
, (getMetaClass())->getClassName());
1590 return ((IOPhysicalAddress
) address
);
1594 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
1596 IOPhysicalAddress phys32
;
1599 IOMapper
* mapper
= 0;
1601 phys32
= getPhysicalSegment(offset
, lengthOfSegment
);
1605 if (gIOSystemMapper
)
1606 mapper
= gIOSystemMapper
;
1610 IOByteCount origLen
;
1612 phys64
= mapper
->mapAddr(phys32
);
1613 origLen
= *lengthOfSegment
;
1614 length
= page_size
- (phys64
& (page_size
- 1));
1615 while ((length
< origLen
)
1616 && ((phys64
+ length
) == mapper
->mapAddr(phys32
+ length
)))
1617 length
+= page_size
;
1618 if (length
> origLen
)
1621 *lengthOfSegment
= length
;
1624 phys64
= (addr64_t
) phys32
;
1630 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
1632 return ((IOPhysicalAddress
) getPhysicalSegment(offset
, lengthOfSegment
, 0));
1636 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
1638 return ((IOPhysicalAddress
) getPhysicalSegment(offset
, lengthOfSegment
, _kIOMemorySourceSegment
));
1641 void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
1642 IOByteCount
* lengthOfSegment
)
1644 if (_task
== kernel_task
)
1645 return (void *) getSourceSegment(offset
, lengthOfSegment
);
1647 panic("IOGMD::getVirtualSegment deprecated");
1651 #endif /* !__LP64__ */
1654 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op
, void *vData
, UInt dataSize
) const
1656 if (kIOMDGetCharacteristics
== op
) {
1657 if (dataSize
< sizeof(IOMDDMACharacteristics
))
1658 return kIOReturnUnderrun
;
1660 IOMDDMACharacteristics
*data
= (IOMDDMACharacteristics
*) vData
;
1661 data
->fLength
= getLength();
1663 data
->fDirection
= getDirection();
1664 if (IOMapper::gSystem
)
1665 data
->fIsMapped
= true;
1666 data
->fIsPrepared
= true; // Assume prepared - fails safe
1668 else if (kIOMDWalkSegments
& op
) {
1669 if (dataSize
< sizeof(IOMDDMAWalkSegmentArgs
))
1670 return kIOReturnUnderrun
;
1672 IOMDDMAWalkSegmentArgs
*data
= (IOMDDMAWalkSegmentArgs
*) vData
;
1673 IOByteCount offset
= (IOByteCount
) data
->fOffset
;
1675 IOPhysicalLength length
;
1676 IOMemoryDescriptor
*ncmd
= const_cast<IOMemoryDescriptor
*>(this);
1677 if (data
->fMapped
&& IOMapper::gSystem
)
1678 data
->fIOVMAddr
= ncmd
->getPhysicalSegment(offset
, &length
);
1680 data
->fIOVMAddr
= ncmd
->getPhysicalSegment(offset
, &length
, kIOMemoryMapperNone
);
1681 data
->fLength
= length
;
1684 return kIOReturnBadArgument
;
1686 return kIOReturnSuccess
;
1690 purgeableControlBits(IOOptionBits newState
, vm_purgable_t
* control
, int * state
)
1692 IOReturn err
= kIOReturnSuccess
;
1694 *control
= VM_PURGABLE_SET_STATE
;
1697 case kIOMemoryPurgeableKeepCurrent
:
1698 *control
= VM_PURGABLE_GET_STATE
;
1701 case kIOMemoryPurgeableNonVolatile
:
1702 *state
= VM_PURGABLE_NONVOLATILE
;
1704 case kIOMemoryPurgeableVolatile
:
1705 *state
= VM_PURGABLE_VOLATILE
;
1707 case kIOMemoryPurgeableEmpty
:
1708 *state
= VM_PURGABLE_EMPTY
;
1711 err
= kIOReturnBadArgument
;
1718 purgeableStateBits(int * state
)
1720 IOReturn err
= kIOReturnSuccess
;
1724 case VM_PURGABLE_NONVOLATILE
:
1725 *state
= kIOMemoryPurgeableNonVolatile
;
1727 case VM_PURGABLE_VOLATILE
:
1728 *state
= kIOMemoryPurgeableVolatile
;
1730 case VM_PURGABLE_EMPTY
:
1731 *state
= kIOMemoryPurgeableEmpty
;
1734 *state
= kIOMemoryPurgeableNonVolatile
;
1735 err
= kIOReturnNotReady
;
1742 IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState
,
1743 IOOptionBits
* oldState
)
1745 IOReturn err
= kIOReturnSuccess
;
1746 vm_purgable_t control
;
1751 err
= super::setPurgeable(newState
, oldState
);
1755 if (kIOMemoryThreadSafe
& _flags
)
1759 // Find the appropriate vm_map for the given task
1761 if (_task
== kernel_task
&& (kIOMemoryBufferPageable
& _flags
))
1763 err
= kIOReturnNotReady
;
1767 curMap
= get_task_map(_task
);
1769 // can only do one range
1770 Ranges vec
= _ranges
;
1771 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
1774 getAddrLenForInd(addr
, len
, type
, vec
, 0);
1776 err
= purgeableControlBits(newState
, &control
, &state
);
1777 if (kIOReturnSuccess
!= err
)
1779 err
= mach_vm_purgable_control(curMap
, addr
, control
, &state
);
1782 if (kIOReturnSuccess
== err
)
1784 err
= purgeableStateBits(&state
);
1790 if (kIOMemoryThreadSafe
& _flags
)
1796 IOReturn
IOMemoryDescriptor::setPurgeable( IOOptionBits newState
,
1797 IOOptionBits
* oldState
)
1799 IOReturn err
= kIOReturnSuccess
;
1800 vm_purgable_t control
;
1803 if (kIOMemoryThreadSafe
& _flags
)
1810 err
= kIOReturnNotReady
;
1813 err
= purgeableControlBits(newState
, &control
, &state
);
1814 if (kIOReturnSuccess
!= err
)
1816 err
= mach_memory_entry_purgable_control((ipc_port_t
) _memEntry
, control
, &state
);
1819 if (kIOReturnSuccess
== err
)
1821 err
= purgeableStateBits(&state
);
1828 if (kIOMemoryThreadSafe
& _flags
)
1834 extern "C" void dcache_incoherent_io_flush64(addr64_t pa
, unsigned int count
);
1835 extern "C" void dcache_incoherent_io_store64(addr64_t pa
, unsigned int count
);
1837 IOReturn
IOMemoryDescriptor::performOperation( IOOptionBits options
,
1838 IOByteCount offset
, IOByteCount length
)
1840 IOByteCount remaining
;
1841 void (*func
)(addr64_t pa
, unsigned int count
) = 0;
1845 case kIOMemoryIncoherentIOFlush
:
1846 func
= &dcache_incoherent_io_flush64
;
1848 case kIOMemoryIncoherentIOStore
:
1849 func
= &dcache_incoherent_io_store64
;
1854 return (kIOReturnUnsupported
);
1856 if (kIOMemoryThreadSafe
& _flags
)
1859 remaining
= length
= min(length
, getLength() - offset
);
1861 // (process another target segment?)
1866 dstAddr64
= getPhysicalSegment(offset
, &dstLen
, kIOMemoryMapperNone
);
1870 // Clip segment length to remaining
1871 if (dstLen
> remaining
)
1874 (*func
)(dstAddr64
, dstLen
);
1877 remaining
-= dstLen
;
1880 if (kIOMemoryThreadSafe
& _flags
)
1883 return (remaining
? kIOReturnUnderrun
: kIOReturnSuccess
);
1886 #if defined(__ppc__) || defined(__arm__)
1887 extern vm_offset_t static_memory_end
;
1888 #define io_kernel_static_end static_memory_end
1890 extern vm_offset_t first_avail
;
1891 #define io_kernel_static_end first_avail
1894 static kern_return_t
1895 io_get_kernel_static_upl(
1898 vm_size_t
*upl_size
,
1900 upl_page_info_array_t page_list
,
1901 unsigned int *count
,
1902 ppnum_t
*highest_page
)
1904 unsigned int pageCount
, page
;
1906 ppnum_t highestPage
= 0;
1908 pageCount
= atop_32(*upl_size
);
1909 if (pageCount
> *count
)
1914 for (page
= 0; page
< pageCount
; page
++)
1916 phys
= pmap_find_phys(kernel_pmap
, ((addr64_t
)offset
) + ptoa_64(page
));
1919 page_list
[page
].phys_addr
= phys
;
1920 page_list
[page
].pageout
= 0;
1921 page_list
[page
].absent
= 0;
1922 page_list
[page
].dirty
= 0;
1923 page_list
[page
].precious
= 0;
1924 page_list
[page
].device
= 0;
1925 if (phys
> highestPage
)
1929 *highest_page
= highestPage
;
1931 return ((page
>= pageCount
) ? kIOReturnSuccess
: kIOReturnVMError
);
1934 IOReturn
IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection
)
1936 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
1937 IOReturn error
= kIOReturnCannotWire
;
1939 ppnum_t mapBase
= 0;
1941 ipc_port_t sharedMem
= (ipc_port_t
) _memEntry
;
1943 assert(!_wireCount
);
1944 assert(kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
|| kIOMemoryTypeUIO
== type
);
1946 if (_pages
>= gIOMaximumMappedIOPageCount
)
1947 return kIOReturnNoResources
;
1949 dataP
= getDataP(_memoryEntries
);
1950 mapper
= dataP
->fMapper
;
1951 if (mapper
&& _pages
)
1952 mapBase
= mapper
->iovmAlloc(_pages
);
1954 // Note that appendBytes(NULL) zeros the data up to the
1956 _memoryEntries
->appendBytes(0, dataP
->fPageCnt
* sizeof(upl_page_info_t
));
1957 dataP
= 0; // May no longer be valid so lets not get tempted.
1959 if (forDirection
== kIODirectionNone
)
1960 forDirection
= getDirection();
1962 int uplFlags
; // This Mem Desc's default flags for upl creation
1963 switch (kIODirectionOutIn
& forDirection
)
1965 case kIODirectionOut
:
1966 // Pages do not need to be marked as dirty on commit
1967 uplFlags
= UPL_COPYOUT_FROM
;
1968 _flags
|= kIOMemoryPreparedReadOnly
;
1971 case kIODirectionIn
:
1973 uplFlags
= 0; // i.e. ~UPL_COPYOUT_FROM
1976 uplFlags
|= UPL_SET_IO_WIRE
| UPL_SET_LITE
;
1978 #ifdef UPL_NEED_32BIT_ADDR
1979 if (kIODirectionPrepareToPhys32
& forDirection
)
1980 uplFlags
|= UPL_NEED_32BIT_ADDR
;
1983 // Find the appropriate vm_map for the given task
1985 if (_task
== kernel_task
&& (kIOMemoryBufferPageable
& _flags
))
1988 { curMap
= get_task_map(_task
); }
1990 // Iterate over the vector of virtual ranges
1991 Ranges vec
= _ranges
;
1992 unsigned int pageIndex
= 0;
1993 IOByteCount mdOffset
= 0;
1994 ppnum_t highestPage
= 0;
1995 for (UInt range
= 0; range
< _rangesCount
; range
++) {
1997 user_addr_t startPage
;
1998 IOByteCount numBytes
;
1999 ppnum_t highPage
= 0;
2001 // Get the startPage address and length of vec[range]
2002 getAddrLenForInd(startPage
, numBytes
, type
, vec
, range
);
2003 iopl
.fPageOffset
= startPage
& PAGE_MASK
;
2004 numBytes
+= iopl
.fPageOffset
;
2005 startPage
= trunc_page_64(startPage
);
2008 iopl
.fMappedBase
= mapBase
+ pageIndex
;
2010 iopl
.fMappedBase
= 0;
2012 // Iterate over the current range, creating UPLs
2014 dataP
= getDataP(_memoryEntries
);
2015 vm_address_t kernelStart
= (vm_address_t
) startPage
;
2019 else if (!sharedMem
) {
2020 assert(_task
== kernel_task
);
2021 theMap
= IOPageableMapForAddress(kernelStart
);
2026 upl_page_info_array_t pageInfo
= getPageList(dataP
);
2027 int ioplFlags
= uplFlags
;
2028 upl_page_list_ptr_t baseInfo
= &pageInfo
[pageIndex
];
2030 vm_size_t ioplSize
= round_page(numBytes
);
2031 unsigned int numPageInfo
= atop_32(ioplSize
);
2033 if (theMap
== kernel_map
&& kernelStart
< io_kernel_static_end
) {
2034 error
= io_get_kernel_static_upl(theMap
,
2042 else if (sharedMem
) {
2043 error
= memory_object_iopl_request(sharedMem
,
2053 error
= vm_map_create_upl(theMap
,
2055 (upl_size_t
*)&ioplSize
,
2063 if (error
!= KERN_SUCCESS
)
2067 highPage
= upl_get_highest_page(iopl
.fIOPL
);
2068 if (highPage
> highestPage
)
2069 highestPage
= highPage
;
2071 error
= kIOReturnCannotWire
;
2073 if (baseInfo
->device
) {
2075 iopl
.fFlags
= kIOPLOnDevice
;
2076 // Don't translate device memory at all
2077 if (mapper
&& mapBase
) {
2078 mapper
->iovmFree(mapBase
, _pages
);
2080 iopl
.fMappedBase
= 0;
2086 mapper
->iovmInsert(mapBase
, pageIndex
,
2087 baseInfo
, numPageInfo
);
2090 iopl
.fIOMDOffset
= mdOffset
;
2091 iopl
.fPageInfo
= pageIndex
;
2093 if ((_flags
& kIOMemoryAutoPrepare
) && iopl
.fIOPL
)
2095 upl_commit(iopl
.fIOPL
, 0, 0);
2096 upl_deallocate(iopl
.fIOPL
);
2100 if (!_memoryEntries
->appendBytes(&iopl
, sizeof(iopl
))) {
2101 // Clean up partial created and unsaved iopl
2103 upl_abort(iopl
.fIOPL
, 0);
2104 upl_deallocate(iopl
.fIOPL
);
2109 // Check for a multiple iopl's in one virtual range
2110 pageIndex
+= numPageInfo
;
2111 mdOffset
-= iopl
.fPageOffset
;
2112 if (ioplSize
< numBytes
) {
2113 numBytes
-= ioplSize
;
2114 startPage
+= ioplSize
;
2115 mdOffset
+= ioplSize
;
2116 iopl
.fPageOffset
= 0;
2118 iopl
.fMappedBase
= mapBase
+ pageIndex
;
2121 mdOffset
+= numBytes
;
2127 _highestPage
= highestPage
;
2129 return kIOReturnSuccess
;
2133 dataP
= getDataP(_memoryEntries
);
2134 UInt done
= getNumIOPL(_memoryEntries
, dataP
);
2135 ioPLBlock
*ioplList
= getIOPLList(dataP
);
2137 for (UInt range
= 0; range
< done
; range
++)
2139 if (ioplList
[range
].fIOPL
) {
2140 upl_abort(ioplList
[range
].fIOPL
, 0);
2141 upl_deallocate(ioplList
[range
].fIOPL
);
2144 (void) _memoryEntries
->initWithBytes(dataP
, sizeof(ioGMDData
)); // == setLength()
2146 if (mapper
&& mapBase
)
2147 mapper
->iovmFree(mapBase
, _pages
);
2150 if (error
== KERN_FAILURE
)
2151 error
= kIOReturnCannotWire
;
2159 * Prepare the memory for an I/O transfer. This involves paging in
2160 * the memory, if necessary, and wiring it down for the duration of
2161 * the transfer. The complete() method completes the processing of
2162 * the memory after the I/O transfer finishes. This method needn't
2163 * called for non-pageable memory.
2165 IOReturn
IOGeneralMemoryDescriptor::prepare(IODirection forDirection
)
2167 IOReturn error
= kIOReturnSuccess
;
2168 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
2170 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
))
2171 return kIOReturnSuccess
;
2174 IOLockLock(_prepareLock
);
2177 && (kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
|| kIOMemoryTypeUIO
== type
) ) {
2178 error
= wireVirtual(forDirection
);
2181 if (kIOReturnSuccess
== error
)
2185 IOLockUnlock(_prepareLock
);
2193 * Complete processing of the memory after an I/O transfer finishes.
2194 * This method should not be called unless a prepare was previously
2195 * issued; the prepare() and complete() must occur in pairs, before
2196 * before and after an I/O transfer involving pageable memory.
2199 IOReturn
IOGeneralMemoryDescriptor::complete(IODirection
/* forDirection */)
2201 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
2203 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
))
2204 return kIOReturnSuccess
;
2207 IOLockLock(_prepareLock
);
2216 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
2217 ioGMDData
* dataP
= getDataP(_memoryEntries
);
2218 ioPLBlock
*ioplList
= getIOPLList(dataP
);
2219 UInt count
= getNumIOPL(_memoryEntries
, dataP
);
2221 #if IOMD_DEBUG_DMAACTIVE
2222 if (__iomd_reservedA
) panic("complete() while dma active");
2223 #endif /* IOMD_DEBUG_DMAACTIVE */
2225 if (dataP
->fMapper
&& _pages
&& ioplList
[0].fMappedBase
)
2226 dataP
->fMapper
->iovmFree(ioplList
[0].fMappedBase
, _pages
);
2228 // Only complete iopls that we created which are for TypeVirtual
2229 if (kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
|| kIOMemoryTypeUIO
== type
) {
2230 for (UInt ind
= 0; ind
< count
; ind
++)
2231 if (ioplList
[ind
].fIOPL
) {
2232 upl_commit(ioplList
[ind
].fIOPL
, 0, 0);
2233 upl_deallocate(ioplList
[ind
].fIOPL
);
2236 (void) _memoryEntries
->initWithBytes(dataP
, sizeof(ioGMDData
)); // == setLength()
2238 dataP
->fPreparationID
= kIOPreparationIDUnprepared
;
2243 IOLockUnlock(_prepareLock
);
2245 return kIOReturnSuccess
;
2248 IOReturn
IOGeneralMemoryDescriptor::doMap(
2249 vm_map_t __addressMap
,
2250 IOVirtualAddress
* __address
,
2251 IOOptionBits options
,
2252 IOByteCount __offset
,
2253 IOByteCount __length
)
2257 if (!(kIOMap64Bit
& options
)) panic("IOGeneralMemoryDescriptor::doMap !64bit");
2258 #endif /* !__LP64__ */
2260 IOMemoryMap
* mapping
= (IOMemoryMap
*) *__address
;
2261 mach_vm_size_t offset
= mapping
->fOffset
+ __offset
;
2262 mach_vm_size_t length
= mapping
->fLength
;
2264 kern_return_t kr
= kIOReturnVMError
;
2265 ipc_port_t sharedMem
= (ipc_port_t
) _memEntry
;
2267 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
2268 Ranges vec
= _ranges
;
2270 user_addr_t range0Addr
= 0;
2271 IOByteCount range0Len
= 0;
2274 getAddrLenForInd(range0Addr
, range0Len
, type
, vec
, 0);
2276 // mapping source == dest? (could be much better)
2278 && (mapping
->fAddressMap
== get_task_map(_task
)) && (options
& kIOMapAnywhere
)
2279 && (1 == _rangesCount
) && (0 == offset
)
2280 && range0Addr
&& (length
<= range0Len
) )
2282 mapping
->fAddress
= range0Addr
;
2283 mapping
->fOptions
|= kIOMapStatic
;
2285 return( kIOReturnSuccess
);
2288 if( 0 == sharedMem
) {
2290 vm_size_t size
= ptoa_32(_pages
);
2294 memory_object_size_t actualSize
= size
;
2295 vm_prot_t prot
= VM_PROT_READ
;
2296 if (!(kIOMapReadOnly
& options
))
2297 prot
|= VM_PROT_WRITE
;
2298 else if (kIOMapDefaultCache
!= (options
& kIOMapCacheMask
))
2299 prot
|= VM_PROT_WRITE
;
2301 kr
= mach_make_memory_entry_64(get_task_map(_task
),
2302 &actualSize
, range0Addr
,
2306 if( (KERN_SUCCESS
== kr
) && (actualSize
!= round_page(size
)))
2308 // map will cross vm objects
2310 IOLog("mach_make_memory_entry_64 (%08llx) size (%08llx:%08llx)\n",
2311 range0Addr
, (UInt64
)actualSize
, (UInt64
)size
);
2313 kr
= kIOReturnVMError
;
2314 ipc_port_release_send( sharedMem
);
2315 sharedMem
= MACH_PORT_NULL
;
2317 mach_vm_address_t address
;
2318 mach_vm_size_t pageOffset
= (range0Addr
& PAGE_MASK
);
2320 address
= trunc_page_64(mapping
->fAddress
);
2321 if ((options
& kIOMapAnywhere
) || ((mapping
->fAddress
- address
) == pageOffset
))
2323 kr
= IOMemoryDescriptorMapCopy(mapping
->fAddressMap
,
2324 get_task_map(_task
), range0Addr
,
2326 offset
, &address
, round_page_64(length
+ pageOffset
));
2327 if (kr
== KERN_SUCCESS
)
2328 mapping
->fAddress
= address
+ pageOffset
;
2330 mapping
->fAddress
= NULL
;
2335 { // _task == 0, must be physical
2337 memory_object_t pager
;
2338 unsigned int flags
= 0;
2340 IOPhysicalLength segLen
;
2342 pa
= getPhysicalSegment( offset
, &segLen
, kIOMemoryMapperNone
);
2345 reserved
= IONew( ExpansionData
, 1 );
2349 reserved
->pagerContig
= (1 == _rangesCount
);
2350 reserved
->memory
= this;
2352 /*What cache mode do we need*/
2353 switch(options
& kIOMapCacheMask
) {
2355 case kIOMapDefaultCache
:
2357 flags
= IODefaultCacheBits(pa
);
2358 if (DEVICE_PAGER_CACHE_INHIB
& flags
)
2360 if (DEVICE_PAGER_GUARDED
& flags
)
2361 mapping
->fOptions
|= kIOMapInhibitCache
;
2363 mapping
->fOptions
|= kIOMapWriteCombineCache
;
2365 else if (DEVICE_PAGER_WRITE_THROUGH
& flags
)
2366 mapping
->fOptions
|= kIOMapWriteThruCache
;
2368 mapping
->fOptions
|= kIOMapCopybackCache
;
2371 case kIOMapInhibitCache
:
2372 flags
= DEVICE_PAGER_CACHE_INHIB
|
2373 DEVICE_PAGER_COHERENT
| DEVICE_PAGER_GUARDED
;
2376 case kIOMapWriteThruCache
:
2377 flags
= DEVICE_PAGER_WRITE_THROUGH
|
2378 DEVICE_PAGER_COHERENT
| DEVICE_PAGER_GUARDED
;
2381 case kIOMapCopybackCache
:
2382 flags
= DEVICE_PAGER_COHERENT
;
2385 case kIOMapWriteCombineCache
:
2386 flags
= DEVICE_PAGER_CACHE_INHIB
|
2387 DEVICE_PAGER_COHERENT
;
2391 flags
|= reserved
->pagerContig
? DEVICE_PAGER_CONTIGUOUS
: 0;
2393 pager
= device_pager_setup( (memory_object_t
) 0, (uintptr_t) reserved
,
2398 kr
= mach_memory_object_memory_entry_64( (host_t
) 1, false /*internal*/,
2399 size
, VM_PROT_READ
| VM_PROT_WRITE
, pager
, &sharedMem
);
2401 assert( KERN_SUCCESS
== kr
);
2402 if( KERN_SUCCESS
!= kr
)
2404 device_pager_deallocate( pager
);
2405 pager
= MACH_PORT_NULL
;
2406 sharedMem
= MACH_PORT_NULL
;
2409 if( pager
&& sharedMem
)
2410 reserved
->devicePager
= pager
;
2412 IODelete( reserved
, ExpansionData
, 1 );
2418 _memEntry
= (void *) sharedMem
;
2425 result
= super::doMap( __addressMap
, __address
,
2426 options
, __offset
, __length
);
2431 IOReturn
IOGeneralMemoryDescriptor::doUnmap(
2432 vm_map_t addressMap
,
2433 IOVirtualAddress __address
,
2434 IOByteCount __length
)
2436 return (super::doUnmap(addressMap
, __address
, __length
));
2439 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2442 #define super OSObject
2444 OSDefineMetaClassAndStructors( IOMemoryMap
, OSObject
)
2446 OSMetaClassDefineReservedUnused(IOMemoryMap
, 0);
2447 OSMetaClassDefineReservedUnused(IOMemoryMap
, 1);
2448 OSMetaClassDefineReservedUnused(IOMemoryMap
, 2);
2449 OSMetaClassDefineReservedUnused(IOMemoryMap
, 3);
2450 OSMetaClassDefineReservedUnused(IOMemoryMap
, 4);
2451 OSMetaClassDefineReservedUnused(IOMemoryMap
, 5);
2452 OSMetaClassDefineReservedUnused(IOMemoryMap
, 6);
2453 OSMetaClassDefineReservedUnused(IOMemoryMap
, 7);
2455 /* ex-inline function implementation */
2456 IOPhysicalAddress
IOMemoryMap::getPhysicalAddress()
2457 { return( getPhysicalSegment( 0, 0 )); }
2459 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2461 bool IOMemoryMap::init(
2463 mach_vm_address_t toAddress
,
2464 IOOptionBits _options
,
2465 mach_vm_size_t _offset
,
2466 mach_vm_size_t _length
)
2474 fAddressMap
= get_task_map(intoTask
);
2477 vm_map_reference(fAddressMap
);
2479 fAddressTask
= intoTask
;
2480 fOptions
= _options
;
2483 fAddress
= toAddress
;
2488 bool IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor
* _memory
, mach_vm_size_t _offset
)
2495 if( (_offset
+ fLength
) > _memory
->getLength())
2503 if (fMemory
!= _memory
)
2504 fMemory
->removeMapping(this);
2512 struct IOMemoryDescriptorMapAllocRef
2514 ipc_port_t sharedMem
;
2516 mach_vm_offset_t src_address
;
2517 mach_vm_address_t mapped
;
2518 mach_vm_size_t size
;
2519 mach_vm_size_t sourceOffset
;
2520 IOOptionBits options
;
2523 static kern_return_t
IOMemoryDescriptorMapAlloc(vm_map_t map
, void * _ref
)
2525 IOMemoryDescriptorMapAllocRef
* ref
= (IOMemoryDescriptorMapAllocRef
*)_ref
;
2531 vm_prot_t prot
= VM_PROT_READ
2532 | ((ref
->options
& kIOMapReadOnly
) ? 0 : VM_PROT_WRITE
);
2534 // VM system requires write access to change cache mode
2535 if (kIOMapDefaultCache
!= (ref
->options
& kIOMapCacheMask
))
2536 prot
|= VM_PROT_WRITE
;
2538 // set memory entry cache
2539 vm_prot_t memEntryCacheMode
= prot
| MAP_MEM_ONLY
;
2540 switch (ref
->options
& kIOMapCacheMask
)
2542 case kIOMapInhibitCache
:
2543 SET_MAP_MEM(MAP_MEM_IO
, memEntryCacheMode
);
2546 case kIOMapWriteThruCache
:
2547 SET_MAP_MEM(MAP_MEM_WTHRU
, memEntryCacheMode
);
2550 case kIOMapWriteCombineCache
:
2551 SET_MAP_MEM(MAP_MEM_WCOMB
, memEntryCacheMode
);
2554 case kIOMapCopybackCache
:
2555 SET_MAP_MEM(MAP_MEM_COPYBACK
, memEntryCacheMode
);
2558 case kIOMapDefaultCache
:
2560 SET_MAP_MEM(MAP_MEM_NOOP
, memEntryCacheMode
);
2564 vm_size_t unused
= 0;
2566 err
= mach_make_memory_entry( NULL
/*unused*/, &unused
, 0 /*unused*/,
2567 memEntryCacheMode
, NULL
, ref
->sharedMem
);
2568 if (KERN_SUCCESS
!= err
)
2569 IOLog("MAP_MEM_ONLY failed %d\n", err
);
2571 err
= mach_vm_map( map
,
2573 ref
->size
, 0 /* mask */,
2574 (( ref
->options
& kIOMapAnywhere
) ? VM_FLAGS_ANYWHERE
: VM_FLAGS_FIXED
)
2575 | VM_MAKE_TAG(VM_MEMORY_IOKIT
),
2576 ref
->sharedMem
, ref
->sourceOffset
,
2582 if( KERN_SUCCESS
!= err
) {
2587 else if (ref
->src_map
)
2589 vm_prot_t cur_prot
, max_prot
;
2590 err
= mach_vm_remap(map
, &ref
->mapped
, ref
->size
, PAGE_MASK
,
2591 (ref
->options
& kIOMapAnywhere
) ? TRUE
: FALSE
,
2592 ref
->src_map
, ref
->src_address
,
2597 if (KERN_SUCCESS
== err
)
2599 if ((!(VM_PROT_READ
& cur_prot
))
2600 || (!(kIOMapReadOnly
& ref
->options
) && !(VM_PROT_WRITE
& cur_prot
)))
2602 mach_vm_deallocate(map
, ref
->mapped
, ref
->size
);
2603 err
= KERN_PROTECTION_FAILURE
;
2606 if (KERN_SUCCESS
!= err
)
2611 err
= mach_vm_allocate( map
, &ref
->mapped
, ref
->size
,
2612 ((ref
->options
& kIOMapAnywhere
) ? VM_FLAGS_ANYWHERE
: VM_FLAGS_FIXED
)
2613 | VM_MAKE_TAG(VM_MEMORY_IOKIT
) );
2614 if( KERN_SUCCESS
!= err
) {
2618 // we have to make sure that these guys don't get copied if we fork.
2619 err
= vm_inherit( map
, ref
->mapped
, ref
->size
, VM_INHERIT_NONE
);
2620 assert( KERN_SUCCESS
== err
);
2629 IOMemoryDescriptorMapMemEntry(vm_map_t map
, ipc_port_t entry
, IOOptionBits options
, bool pageable
,
2630 mach_vm_size_t offset
,
2631 mach_vm_address_t
* address
, mach_vm_size_t length
)
2634 IOMemoryDescriptorMapAllocRef ref
;
2636 ref
.sharedMem
= entry
;
2638 ref
.sharedMem
= entry
;
2639 ref
.sourceOffset
= trunc_page_64(offset
);
2640 ref
.options
= options
;
2643 if (options
& kIOMapAnywhere
)
2644 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
2647 ref
.mapped
= *address
;
2649 if( ref
.sharedMem
&& (map
== kernel_map
) && pageable
)
2650 err
= IOIteratePageableMaps( ref
.size
, &IOMemoryDescriptorMapAlloc
, &ref
);
2652 err
= IOMemoryDescriptorMapAlloc( map
, &ref
);
2654 *address
= ref
.mapped
;
2659 IOMemoryDescriptorMapCopy(vm_map_t map
,
2661 mach_vm_offset_t src_address
,
2662 IOOptionBits options
,
2663 mach_vm_size_t offset
,
2664 mach_vm_address_t
* address
, mach_vm_size_t length
)
2667 IOMemoryDescriptorMapAllocRef ref
;
2669 ref
.sharedMem
= NULL
;
2670 ref
.src_map
= src_map
;
2671 ref
.src_address
= src_address
;
2672 ref
.sourceOffset
= trunc_page_64(offset
);
2673 ref
.options
= options
;
2676 if (options
& kIOMapAnywhere
)
2677 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
2680 ref
.mapped
= *address
;
2682 if (map
== kernel_map
)
2683 err
= IOIteratePageableMaps(ref
.size
, &IOMemoryDescriptorMapAlloc
, &ref
);
2685 err
= IOMemoryDescriptorMapAlloc(map
, &ref
);
2687 *address
= ref
.mapped
;
2691 IOReturn
IOMemoryDescriptor::doMap(
2692 vm_map_t __addressMap
,
2693 IOVirtualAddress
* __address
,
2694 IOOptionBits options
,
2695 IOByteCount __offset
,
2696 IOByteCount __length
)
2699 if (!(kIOMap64Bit
& options
)) panic("IOMemoryDescriptor::doMap !64bit");
2700 #endif /* !__LP64__ */
2702 IOMemoryMap
* mapping
= (IOMemoryMap
*) *__address
;
2703 mach_vm_size_t offset
= mapping
->fOffset
+ __offset
;
2704 mach_vm_size_t length
= mapping
->fLength
;
2706 IOReturn err
= kIOReturnSuccess
;
2707 memory_object_t pager
;
2708 mach_vm_size_t pageOffset
;
2709 IOPhysicalAddress sourceAddr
;
2710 unsigned int lock_count
;
2714 sourceAddr
= getPhysicalSegment( offset
, NULL
, _kIOMemorySourceSegment
);
2715 pageOffset
= sourceAddr
- trunc_page( sourceAddr
);
2718 pager
= (memory_object_t
) reserved
->devicePager
;
2720 pager
= MACH_PORT_NULL
;
2722 if ((kIOMapReference
|kIOMapUnique
) == ((kIOMapReference
|kIOMapUnique
) & options
))
2730 err
= kIOReturnNotReadable
;
2734 size
= round_page(mapping
->fLength
+ pageOffset
);
2735 flags
= UPL_COPYOUT_FROM
| UPL_SET_INTERNAL
2736 | UPL_SET_LITE
| UPL_SET_IO_WIRE
| UPL_BLOCK_ACCESS
;
2738 if (KERN_SUCCESS
!= memory_object_iopl_request((ipc_port_t
) _memEntry
, 0, &size
, &redirUPL2
,
2743 for (lock_count
= 0;
2744 IORecursiveLockHaveLock(gIOMemoryLock
);
2748 err
= upl_transpose(redirUPL2
, mapping
->fRedirUPL
);
2755 if (kIOReturnSuccess
!= err
)
2757 IOLog("upl_transpose(%x)\n", err
);
2758 err
= kIOReturnSuccess
;
2763 upl_commit(redirUPL2
, NULL
, 0);
2764 upl_deallocate(redirUPL2
);
2768 // swap the memEntries since they now refer to different vm_objects
2769 void * me
= _memEntry
;
2770 _memEntry
= mapping
->fMemory
->_memEntry
;
2771 mapping
->fMemory
->_memEntry
= me
;
2774 err
= handleFault( reserved
->devicePager
, mapping
->fAddressMap
, mapping
->fAddress
, offset
, length
, options
);
2778 mach_vm_address_t address
;
2780 if (!(options
& kIOMapAnywhere
))
2782 address
= trunc_page_64(mapping
->fAddress
);
2783 if( (mapping
->fAddress
- address
) != pageOffset
)
2785 err
= kIOReturnVMError
;
2790 err
= IOMemoryDescriptorMapMemEntry(mapping
->fAddressMap
, (ipc_port_t
) _memEntry
,
2791 options
, (kIOMemoryBufferPageable
& _flags
),
2792 offset
, &address
, round_page_64(length
+ pageOffset
));
2793 if( err
!= KERN_SUCCESS
)
2796 if (!_memEntry
|| pager
)
2798 err
= handleFault( pager
, mapping
->fAddressMap
, address
, offset
, length
, options
);
2799 if (err
!= KERN_SUCCESS
)
2800 doUnmap( mapping
->fAddressMap
, (IOVirtualAddress
) mapping
, 0 );
2804 if (kIOLogMapping
& gIOKitDebug
)
2805 IOLog("mapping(%x) desc %p @ %lx, map %p, address %qx, offset %qx, length %qx\n",
2806 err
, this, sourceAddr
, mapping
, address
, offset
, length
);
2809 if (err
== KERN_SUCCESS
)
2810 mapping
->fAddress
= address
+ pageOffset
;
2812 mapping
->fAddress
= NULL
;
2820 IOReturn
IOMemoryDescriptor::handleFault(
2822 vm_map_t addressMap
,
2823 mach_vm_address_t address
,
2824 mach_vm_size_t sourceOffset
,
2825 mach_vm_size_t length
,
2826 IOOptionBits options
)
2828 IOReturn err
= kIOReturnSuccess
;
2829 memory_object_t pager
= (memory_object_t
) _pager
;
2830 mach_vm_size_t size
;
2831 mach_vm_size_t bytes
;
2832 mach_vm_size_t page
;
2833 mach_vm_size_t pageOffset
;
2834 mach_vm_size_t pagerOffset
;
2835 IOPhysicalLength segLen
;
2840 if( kIOMemoryRedirected
& _flags
)
2843 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset
);
2847 } while( kIOMemoryRedirected
& _flags
);
2850 return( kIOReturnSuccess
);
2853 physAddr
= getPhysicalSegment( sourceOffset
, &segLen
, kIOMemoryMapperNone
);
2855 pageOffset
= physAddr
- trunc_page_64( physAddr
);
2856 pagerOffset
= sourceOffset
;
2858 size
= length
+ pageOffset
;
2859 physAddr
-= pageOffset
;
2861 segLen
+= pageOffset
;
2865 // in the middle of the loop only map whole pages
2866 if( segLen
>= bytes
)
2868 else if( segLen
!= trunc_page( segLen
))
2869 err
= kIOReturnVMError
;
2870 if( physAddr
!= trunc_page_64( physAddr
))
2871 err
= kIOReturnBadArgument
;
2872 if (kIOReturnSuccess
!= err
)
2876 if( kIOLogMapping
& gIOKitDebug
)
2877 IOLog("IOMemoryMap::map(%p) 0x%qx->0x%qx:0x%qx\n",
2878 addressMap
, address
+ pageOffset
, physAddr
+ pageOffset
,
2879 segLen
- pageOffset
);
2884 if( reserved
&& reserved
->pagerContig
) {
2885 IOPhysicalLength allLen
;
2888 allPhys
= getPhysicalSegment( 0, &allLen
, kIOMemoryMapperNone
);
2890 err
= device_pager_populate_object( pager
, 0, atop_64(allPhys
), round_page(allLen
) );
2896 (page
< segLen
) && (KERN_SUCCESS
== err
);
2899 err
= device_pager_populate_object(pager
, pagerOffset
,
2900 (ppnum_t
)(atop_64(physAddr
+ page
)), page_size
);
2901 pagerOffset
+= page_size
;
2904 assert( KERN_SUCCESS
== err
);
2909 // This call to vm_fault causes an early pmap level resolution
2910 // of the mappings created above for kernel mappings, since
2911 // faulting in later can't take place from interrupt level.
2913 /* *** Temporary Workaround *** */
2915 if ((addressMap
== kernel_map
) && !(kIOMemoryRedirected
& _flags
))
2917 vm_fault(addressMap
,
2918 (vm_map_offset_t
)address
,
2919 VM_PROT_READ
|VM_PROT_WRITE
,
2920 FALSE
, THREAD_UNINT
, NULL
,
2921 (vm_map_offset_t
)0);
2924 /* *** Temporary Workaround *** */
2927 sourceOffset
+= segLen
- pageOffset
;
2933 while (bytes
&& (physAddr
= getPhysicalSegment( sourceOffset
, &segLen
, kIOMemoryMapperNone
)));
2936 err
= kIOReturnBadArgument
;
2941 IOReturn
IOMemoryDescriptor::doUnmap(
2942 vm_map_t addressMap
,
2943 IOVirtualAddress __address
,
2944 IOByteCount __length
)
2947 mach_vm_address_t address
;
2948 mach_vm_size_t length
;
2952 address
= __address
;
2957 addressMap
= ((IOMemoryMap
*) __address
)->fAddressMap
;
2958 address
= ((IOMemoryMap
*) __address
)->fAddress
;
2959 length
= ((IOMemoryMap
*) __address
)->fLength
;
2962 if ((addressMap
== kernel_map
)
2963 && ((kIOMemoryBufferPageable
& _flags
) || !_memEntry
))
2964 addressMap
= IOPageableMapForAddress( address
);
2967 if( kIOLogMapping
& gIOKitDebug
)
2968 IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
2969 addressMap
, address
, length
);
2972 err
= mach_vm_deallocate( addressMap
, address
, length
);
2977 IOReturn
IOMemoryDescriptor::redirect( task_t safeTask
, bool doRedirect
)
2979 IOReturn err
= kIOReturnSuccess
;
2980 IOMemoryMap
* mapping
= 0;
2986 _flags
|= kIOMemoryRedirected
;
2988 _flags
&= ~kIOMemoryRedirected
;
2991 if( (iter
= OSCollectionIterator::withCollection( _mappings
))) {
2992 while( (mapping
= (IOMemoryMap
*) iter
->getNextObject()))
2993 mapping
->redirect( safeTask
, doRedirect
);
3007 // temporary binary compatibility
3008 IOSubMemoryDescriptor
* subMem
;
3009 if( (subMem
= OSDynamicCast( IOSubMemoryDescriptor
, this)))
3010 err
= subMem
->redirect( safeTask
, doRedirect
);
3012 err
= kIOReturnSuccess
;
3013 #endif /* !__LP64__ */
3018 IOReturn
IOMemoryMap::redirect( task_t safeTask
, bool doRedirect
)
3020 IOReturn err
= kIOReturnSuccess
;
3023 // err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
3035 if ((!safeTask
|| (get_task_map(safeTask
) != fAddressMap
))
3036 && (0 == (fOptions
& kIOMapStatic
)))
3038 IOUnmapPages( fAddressMap
, fAddress
, fLength
);
3039 err
= kIOReturnSuccess
;
3041 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect
, this, fAddress
, fLength
, fAddressMap
);
3044 else if (kIOMapWriteCombineCache
== (fOptions
& kIOMapCacheMask
))
3046 IOOptionBits newMode
;
3047 newMode
= (fOptions
& ~kIOMapCacheMask
) | (doRedirect
? kIOMapInhibitCache
: kIOMapWriteCombineCache
);
3048 IOProtectCacheMode(fAddressMap
, fAddress
, fLength
, newMode
);
3055 if ((((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
3056 || ((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
))
3058 && (doRedirect
!= (0 != (fMemory
->_flags
& kIOMemoryRedirected
))))
3059 fMemory
->redirect(safeTask
, doRedirect
);
3064 IOReturn
IOMemoryMap::unmap( void )
3070 if( fAddress
&& fAddressMap
&& (0 == fSuperMap
) && fMemory
3071 && (0 == (fOptions
& kIOMapStatic
))) {
3073 err
= fMemory
->doUnmap(fAddressMap
, (IOVirtualAddress
) this, 0);
3076 err
= kIOReturnSuccess
;
3080 vm_map_deallocate(fAddressMap
);
3091 void IOMemoryMap::taskDied( void )
3094 if (fUserClientUnmap
)
3097 vm_map_deallocate(fAddressMap
);
3105 IOReturn
IOMemoryMap::userClientUnmap( void )
3107 fUserClientUnmap
= true;
3108 return (kIOReturnSuccess
);
3111 // Overload the release mechanism. All mappings must be a member
3112 // of a memory descriptors _mappings set. This means that we
3113 // always have 2 references on a mapping. When either of these mappings
3114 // are released we need to free ourselves.
3115 void IOMemoryMap::taggedRelease(const void *tag
) const
3118 super::taggedRelease(tag
, 2);
3122 void IOMemoryMap::free()
3129 fMemory
->removeMapping(this);
3134 if (fOwner
&& (fOwner
!= fMemory
))
3137 fOwner
->removeMapping(this);
3142 fSuperMap
->release();
3145 upl_commit(fRedirUPL
, NULL
, 0);
3146 upl_deallocate(fRedirUPL
);
3152 IOByteCount
IOMemoryMap::getLength()
3157 IOVirtualAddress
IOMemoryMap::getVirtualAddress()
3161 fSuperMap
->getVirtualAddress();
3162 else if (fAddressMap
3163 && vm_map_is_64bit(fAddressMap
)
3164 && (sizeof(IOVirtualAddress
) < 8))
3166 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress
);
3168 #endif /* !__LP64__ */
3174 mach_vm_address_t
IOMemoryMap::getAddress()
3179 mach_vm_size_t
IOMemoryMap::getSize()
3183 #endif /* !__LP64__ */
3186 task_t
IOMemoryMap::getAddressTask()
3189 return( fSuperMap
->getAddressTask());
3191 return( fAddressTask
);
3194 IOOptionBits
IOMemoryMap::getMapOptions()
3199 IOMemoryDescriptor
* IOMemoryMap::getMemoryDescriptor()
3204 IOMemoryMap
* IOMemoryMap::copyCompatible(
3205 IOMemoryMap
* newMapping
)
3207 task_t task
= newMapping
->getAddressTask();
3208 mach_vm_address_t toAddress
= newMapping
->fAddress
;
3209 IOOptionBits _options
= newMapping
->fOptions
;
3210 mach_vm_size_t _offset
= newMapping
->fOffset
;
3211 mach_vm_size_t _length
= newMapping
->fLength
;
3213 if( (!task
) || (!fAddressMap
) || (fAddressMap
!= get_task_map(task
)))
3215 if( (fOptions
^ _options
) & kIOMapReadOnly
)
3217 if( (kIOMapDefaultCache
!= (_options
& kIOMapCacheMask
))
3218 && ((fOptions
^ _options
) & kIOMapCacheMask
))
3221 if( (0 == (_options
& kIOMapAnywhere
)) && (fAddress
!= toAddress
))
3224 if( _offset
< fOffset
)
3229 if( (_offset
+ _length
) > fLength
)
3233 if( (fLength
== _length
) && (!_offset
))
3235 newMapping
->release();
3240 newMapping
->fSuperMap
= this;
3241 newMapping
->fOffset
= _offset
;
3242 newMapping
->fAddress
= fAddress
+ _offset
;
3245 return( newMapping
);
3250 IOMemoryMap::getPhysicalSegment( IOByteCount _offset
, IOPhysicalLength
* _length
, IOOptionBits _options
)
3251 #else /* !__LP64__ */
3252 IOMemoryMap::getPhysicalSegment( IOByteCount _offset
, IOPhysicalLength
* _length
)
3253 #endif /* !__LP64__ */
3255 IOPhysicalAddress address
;
3259 address
= fMemory
->getPhysicalSegment( fOffset
+ _offset
, _length
, _options
);
3260 #else /* !__LP64__ */
3261 address
= fMemory
->getPhysicalSegment( fOffset
+ _offset
, _length
);
3262 #endif /* !__LP64__ */
3268 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3271 #define super OSObject
3273 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3275 void IOMemoryDescriptor::initialize( void )
3277 if( 0 == gIOMemoryLock
)
3278 gIOMemoryLock
= IORecursiveLockAlloc();
3280 IORegistryEntry::getRegistryRoot()->setProperty(kIOMaximumMappedIOByteCountKey
,
3281 ptoa_64(gIOMaximumMappedIOPageCount
), 64);
3285 mapper
= new IOCopyMapper
;
3288 if (mapper
->init() && mapper
->start(NULL
))
3289 gIOCopyMapper
= (IOCopyMapper
*) mapper
;
3295 gIOLastPage
= IOGetLastPageNumber();
3298 void IOMemoryDescriptor::free( void )
3301 _mappings
->release();
3306 IOMemoryMap
* IOMemoryDescriptor::setMapping(
3308 IOVirtualAddress mapAddress
,
3309 IOOptionBits options
)
3311 return (createMappingInTask( intoTask
, mapAddress
,
3312 options
| kIOMapStatic
,
3316 IOMemoryMap
* IOMemoryDescriptor::map(
3317 IOOptionBits options
)
3319 return (createMappingInTask( kernel_task
, 0,
3320 options
| kIOMapAnywhere
,
3325 IOMemoryMap
* IOMemoryDescriptor::map(
3327 IOVirtualAddress atAddress
,
3328 IOOptionBits options
,
3330 IOByteCount length
)
3332 if ((!(kIOMapAnywhere
& options
)) && vm_map_is_64bit(get_task_map(intoTask
)))
3334 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
3338 return (createMappingInTask(intoTask
, atAddress
,
3339 options
, offset
, length
));
3341 #endif /* !__LP64__ */
3343 IOMemoryMap
* IOMemoryDescriptor::createMappingInTask(
3345 mach_vm_address_t atAddress
,
3346 IOOptionBits options
,
3347 mach_vm_size_t offset
,
3348 mach_vm_size_t length
)
3350 IOMemoryMap
* result
;
3351 IOMemoryMap
* mapping
;
3354 length
= getLength();
3356 mapping
= new IOMemoryMap
;
3359 && !mapping
->init( intoTask
, atAddress
,
3360 options
, offset
, length
)) {
3366 result
= makeMapping(this, intoTask
, (IOVirtualAddress
) mapping
, options
| kIOMap64Bit
, 0, 0);
3372 IOLog("createMappingInTask failed desc %p, addr %qx, options %lx, offset %qx, length %qx\n",
3373 this, atAddress
, options
, offset
, length
);
3379 #ifndef __LP64__ // there is only a 64 bit version for LP64
3380 IOReturn
IOMemoryMap::redirect(IOMemoryDescriptor
* newBackingMemory
,
3381 IOOptionBits options
,
3384 return (redirect(newBackingMemory
, options
, (mach_vm_size_t
)offset
));
3388 IOReturn
IOMemoryMap::redirect(IOMemoryDescriptor
* newBackingMemory
,
3389 IOOptionBits options
,
3390 mach_vm_size_t offset
)
3392 IOReturn err
= kIOReturnSuccess
;
3393 IOMemoryDescriptor
* physMem
= 0;
3397 if (fAddress
&& fAddressMap
) do
3399 if (((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
3400 || ((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
))
3408 vm_size_t size
= round_page(fLength
);
3409 int flags
= UPL_COPYOUT_FROM
| UPL_SET_INTERNAL
3410 | UPL_SET_LITE
| UPL_SET_IO_WIRE
| UPL_BLOCK_ACCESS
;
3411 if (KERN_SUCCESS
!= memory_object_iopl_request((ipc_port_t
) fMemory
->_memEntry
, 0, &size
, &fRedirUPL
,
3418 IOUnmapPages( fAddressMap
, fAddress
, fLength
);
3420 physMem
->redirect(0, true);
3424 if (newBackingMemory
)
3426 if (newBackingMemory
!= fMemory
)
3429 if (this != newBackingMemory
->makeMapping(newBackingMemory
, fAddressTask
, (IOVirtualAddress
) this,
3430 options
| kIOMapUnique
| kIOMapReference
| kIOMap64Bit
,
3432 err
= kIOReturnError
;
3436 upl_commit(fRedirUPL
, NULL
, 0);
3437 upl_deallocate(fRedirUPL
);
3440 if (false && physMem
)
3441 physMem
->redirect(0, false);
3454 IOMemoryMap
* IOMemoryDescriptor::makeMapping(
3455 IOMemoryDescriptor
* owner
,
3457 IOVirtualAddress __address
,
3458 IOOptionBits options
,
3459 IOByteCount __offset
,
3460 IOByteCount __length
)
3463 if (!(kIOMap64Bit
& options
)) panic("IOMemoryDescriptor::makeMapping !64bit");
3464 #endif /* !__LP64__ */
3466 IOMemoryDescriptor
* mapDesc
= 0;
3467 IOMemoryMap
* result
= 0;
3470 IOMemoryMap
* mapping
= (IOMemoryMap
*) __address
;
3471 mach_vm_size_t offset
= mapping
->fOffset
+ __offset
;
3472 mach_vm_size_t length
= mapping
->fLength
;
3474 mapping
->fOffset
= offset
;
3480 if (kIOMapStatic
& options
)
3483 addMapping(mapping
);
3484 mapping
->setMemoryDescriptor(this, 0);
3488 if (kIOMapUnique
& options
)
3490 IOPhysicalAddress phys
;
3491 IOByteCount physLen
;
3493 // if (owner != this) continue;
3495 if (((_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
3496 || ((_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
))
3498 phys
= getPhysicalSegment(offset
, &physLen
, kIOMemoryMapperNone
);
3499 if (!phys
|| (physLen
< length
))
3502 mapDesc
= IOMemoryDescriptor::withAddressRange(
3503 phys
, length
, getDirection() | kIOMemoryMapperNone
, NULL
);
3507 mapping
->fOffset
= offset
;
3512 // look for a compatible existing mapping
3513 if( (iter
= OSCollectionIterator::withCollection(_mappings
)))
3515 IOMemoryMap
* lookMapping
;
3516 while ((lookMapping
= (IOMemoryMap
*) iter
->getNextObject()))
3518 if ((result
= lookMapping
->copyCompatible(mapping
)))
3521 result
->setMemoryDescriptor(this, offset
);
3527 if (result
|| (options
& kIOMapReference
))
3537 kr
= mapDesc
->doMap( 0, (IOVirtualAddress
*) &mapping
, options
, 0, 0 );
3538 if (kIOReturnSuccess
== kr
)
3541 mapDesc
->addMapping(result
);
3542 result
->setMemoryDescriptor(mapDesc
, offset
);
3560 void IOMemoryDescriptor::addMapping(
3561 IOMemoryMap
* mapping
)
3566 _mappings
= OSSet::withCapacity(1);
3568 _mappings
->setObject( mapping
);
3572 void IOMemoryDescriptor::removeMapping(
3573 IOMemoryMap
* mapping
)
3576 _mappings
->removeObject( mapping
);
3580 // obsolete initializers
3581 // - initWithOptions is the designated initializer
3583 IOMemoryDescriptor::initWithAddress(void * address
,
3585 IODirection direction
)
3591 IOMemoryDescriptor::initWithAddress(IOVirtualAddress address
,
3593 IODirection direction
,
3600 IOMemoryDescriptor::initWithPhysicalAddress(
3601 IOPhysicalAddress address
,
3603 IODirection direction
)
3609 IOMemoryDescriptor::initWithRanges(
3610 IOVirtualRange
* ranges
,
3612 IODirection direction
,
3620 IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange
* ranges
,
3622 IODirection direction
,
3628 void * IOMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
3629 IOByteCount
* lengthOfSegment
)
3633 #endif /* !__LP64__ */
3635 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3637 bool IOGeneralMemoryDescriptor::serialize(OSSerialize
* s
) const
3639 OSSymbol
const *keys
[2];
3640 OSObject
*values
[2];
3642 user_addr_t address
;
3645 unsigned int index
, nRanges
;
3648 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
3650 if (s
== NULL
) return false;
3651 if (s
->previouslySerialized(this)) return true;
3653 // Pretend we are an array.
3654 if (!s
->addXMLStartTag(this, "array")) return false;
3656 nRanges
= _rangesCount
;
3657 vcopy
= (SerData
*) IOMalloc(sizeof(SerData
) * nRanges
);
3658 if (vcopy
== 0) return false;
3660 keys
[0] = OSSymbol::withCString("address");
3661 keys
[1] = OSSymbol::withCString("length");
3664 values
[0] = values
[1] = 0;
3666 // From this point on we can go to bail.
3668 // Copy the volatile data so we don't have to allocate memory
3669 // while the lock is held.
3671 if (nRanges
== _rangesCount
) {
3672 Ranges vec
= _ranges
;
3673 for (index
= 0; index
< nRanges
; index
++) {
3674 user_addr_t addr
; IOByteCount len
;
3675 getAddrLenForInd(addr
, len
, type
, vec
, index
);
3676 vcopy
[index
].address
= addr
;
3677 vcopy
[index
].length
= len
;
3680 // The descriptor changed out from under us. Give up.
3687 for (index
= 0; index
< nRanges
; index
++)
3689 user_addr_t addr
= vcopy
[index
].address
;
3690 IOByteCount len
= (IOByteCount
) vcopy
[index
].length
;
3692 OSNumber::withNumber(addr
, (((UInt64
) addr
) >> 32)? 64 : 32);
3693 if (values
[0] == 0) {
3697 values
[1] = OSNumber::withNumber(len
, sizeof(len
) * 8);
3698 if (values
[1] == 0) {
3702 OSDictionary
*dict
= OSDictionary::withObjects((const OSObject
**)values
, (const OSSymbol
**)keys
, 2);
3707 values
[0]->release();
3708 values
[1]->release();
3709 values
[0] = values
[1] = 0;
3711 result
= dict
->serialize(s
);
3717 result
= s
->addXMLEndTag("array");
3721 values
[0]->release();
3723 values
[1]->release();
3729 IOFree(vcopy
, sizeof(SerData
) * nRanges
);
3733 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3735 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 0);
3737 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 1);
3738 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 2);
3739 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 3);
3740 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 4);
3741 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 5);
3742 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 6);
3743 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 7);
3744 #else /* !__LP64__ */
3745 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 1);
3746 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 2);
3747 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 3);
3748 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 4);
3749 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 5);
3750 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 6);
3751 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 7);
3752 #endif /* !__LP64__ */
3753 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 8);
3754 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 9);
3755 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 10);
3756 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 11);
3757 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 12);
3758 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 13);
3759 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 14);
3760 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 15);
3762 /* ex-inline function implementation */
3764 IOMemoryDescriptor::getPhysicalAddress()
3765 { return( getPhysicalSegment( 0, 0 )); }