2 * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
36 #include <sys/cdefs.h>
38 #include <IOKit/assert.h>
39 #include <IOKit/system.h>
40 #include <IOKit/IOLib.h>
41 #include <IOKit/IOMemoryDescriptor.h>
42 #include <IOKit/IOMapper.h>
43 #include <IOKit/IOKitKeysPrivate.h>
46 #include <IOKit/IOSubMemoryDescriptor.h>
47 #endif /* !__LP64__ */
49 #include <IOKit/IOKitDebug.h>
50 #include <libkern/OSDebug.h>
52 #include "IOKitKernelInternal.h"
54 #include <libkern/c++/OSContainers.h>
55 #include <libkern/c++/OSDictionary.h>
56 #include <libkern/c++/OSArray.h>
57 #include <libkern/c++/OSSymbol.h>
58 #include <libkern/c++/OSNumber.h>
64 #include <vm/vm_pageout.h>
65 #include <mach/memory_object_types.h>
66 #include <device/device_port.h>
68 #include <mach/vm_prot.h>
69 #include <mach/mach_vm.h>
70 #include <vm/vm_fault.h>
71 #include <vm/vm_protos.h>
73 extern ppnum_t
pmap_find_phys(pmap_t pmap
, addr64_t va
);
74 extern void ipc_port_release_send(ipc_port_t port
);
77 memory_object_iopl_request(
79 memory_object_offset_t offset
,
82 upl_page_info_array_t user_page_list
,
83 unsigned int *page_list_count
,
86 unsigned int IOTranslateCacheBits(struct phys_entry
*pp
);
90 #define kIOMaximumMappedIOByteCount (512*1024*1024)
92 static IOMapper
* gIOSystemMapper
= NULL
;
94 static ppnum_t gIOMaximumMappedIOPageCount
= atop_32(kIOMaximumMappedIOByteCount
);
98 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
100 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor
, OSObject
)
102 #define super IOMemoryDescriptor
104 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor
, IOMemoryDescriptor
)
106 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
108 static IORecursiveLock
* gIOMemoryLock
;
110 #define LOCK IORecursiveLockLock( gIOMemoryLock)
111 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
112 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
114 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
117 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
119 #define DEBG(fmt, args...) {}
122 #define IOMD_DEBUG_DMAACTIVE 1
124 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
126 // Some data structures and accessor macros used by the initWithOptions
129 enum ioPLBlockFlags
{
130 kIOPLOnDevice
= 0x00000001,
131 kIOPLExternUPL
= 0x00000002,
134 struct typePersMDData
136 const IOGeneralMemoryDescriptor
*fMD
;
137 ipc_port_t fMemEntry
;
142 vm_address_t fPageInfo
; // Pointer to page list or index into it
143 uint32_t fIOMDOffset
; // The offset of this iopl in descriptor
144 ppnum_t fMappedBase
; // Page number of first page in this iopl
145 unsigned int fPageOffset
; // Offset within first page of iopl
146 unsigned int fFlags
; // Flags
151 uint64_t fPreparationID
;
152 unsigned int fPageCnt
;
154 // align arrays to 8 bytes so following macros work
157 upl_page_info_t fPageList
[1]; /* variable length */
158 ioPLBlock fBlocks
[1]; /* variable length */
161 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
162 #define getIOPLList(d) ((ioPLBlock *) &(d->fPageList[d->fPageCnt]))
163 #define getNumIOPL(osd, d) \
164 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
165 #define getPageList(d) (&(d->fPageList[0]))
166 #define computeDataSize(p, u) \
167 (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
170 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
172 #define next_page(a) ( trunc_page(a) + PAGE_SIZE )
177 kern_return_t
device_data_action(
178 uintptr_t device_handle
,
179 ipc_port_t device_pager
,
180 vm_prot_t protection
,
181 vm_object_offset_t offset
,
184 struct ExpansionData
{
186 unsigned int pagerContig
:1;
187 unsigned int unused
:31;
188 IOMemoryDescriptor
* memory
;
191 ExpansionData
* ref
= (ExpansionData
*) device_handle
;
192 IOMemoryDescriptor
* memDesc
;
195 memDesc
= ref
->memory
;
199 kr
= memDesc
->handleFault( device_pager
, 0, 0,
200 offset
, size
, kIOMapDefaultCache
/*?*/);
210 kern_return_t
device_close(
211 uintptr_t device_handle
)
213 struct ExpansionData
{
215 unsigned int pagerContig
:1;
216 unsigned int unused
:31;
217 IOMemoryDescriptor
* memory
;
219 ExpansionData
* ref
= (ExpansionData
*) device_handle
;
221 IODelete( ref
, ExpansionData
, 1 );
223 return( kIOReturnSuccess
);
227 // Note this inline function uses C++ reference arguments to return values
228 // This means that pointers are not passed and NULLs don't have to be
229 // checked for as a NULL reference is illegal.
231 getAddrLenForInd(user_addr_t
&addr
, IOPhysicalLength
&len
, // Output variables
232 UInt32 type
, IOGeneralMemoryDescriptor::Ranges r
, UInt32 ind
)
234 assert(kIOMemoryTypeUIO
== type
235 || kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
236 || kIOMemoryTypePhysical
== type
|| kIOMemoryTypePhysical64
== type
);
237 if (kIOMemoryTypeUIO
== type
) {
239 uio_getiov((uio_t
) r
.uio
, ind
, &addr
, &us
); len
= us
;
242 else if ((kIOMemoryTypeVirtual64
== type
) || (kIOMemoryTypePhysical64
== type
)) {
243 IOAddressRange cur
= r
.v64
[ind
];
247 #endif /* !__LP64__ */
249 IOVirtualRange cur
= r
.v
[ind
];
255 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
258 IOMemoryDescriptor::withAddress(void * address
,
260 IODirection direction
)
262 return IOMemoryDescriptor::
263 withAddressRange((IOVirtualAddress
) address
, length
, direction
| kIOMemoryAutoPrepare
, kernel_task
);
268 IOMemoryDescriptor::withAddress(IOVirtualAddress address
,
270 IODirection direction
,
273 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
276 if (that
->initWithAddress(address
, length
, direction
, task
))
283 #endif /* !__LP64__ */
286 IOMemoryDescriptor::withPhysicalAddress(
287 IOPhysicalAddress address
,
289 IODirection direction
)
291 return (IOMemoryDescriptor::withAddressRange(address
, length
, direction
, TASK_NULL
));
296 IOMemoryDescriptor::withRanges( IOVirtualRange
* ranges
,
298 IODirection direction
,
302 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
305 if (that
->initWithRanges(ranges
, withCount
, direction
, task
, asReference
))
312 #endif /* !__LP64__ */
315 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address
,
316 mach_vm_size_t length
,
317 IOOptionBits options
,
320 IOAddressRange range
= { address
, length
};
321 return (IOMemoryDescriptor::withAddressRanges(&range
, 1, options
, task
));
325 IOMemoryDescriptor::withAddressRanges(IOAddressRange
* ranges
,
327 IOOptionBits options
,
330 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
334 options
|= kIOMemoryTypeVirtual64
;
336 options
|= kIOMemoryTypePhysical64
;
338 if (that
->initWithOptions(ranges
, rangeCount
, 0, task
, options
, /* mapper */ 0))
351 * Create a new IOMemoryDescriptor. The buffer is made up of several
352 * virtual address ranges, from a given task.
354 * Passing the ranges as a reference will avoid an extra allocation.
357 IOMemoryDescriptor::withOptions(void * buffers
,
364 IOGeneralMemoryDescriptor
*self
= new IOGeneralMemoryDescriptor
;
367 && !self
->initWithOptions(buffers
, count
, offset
, task
, opts
, mapper
))
376 bool IOMemoryDescriptor::initWithOptions(void * buffers
,
380 IOOptionBits options
,
388 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange
* ranges
,
390 IODirection direction
,
393 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
396 if (that
->initWithPhysicalRanges(ranges
, withCount
, direction
, asReference
))
405 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor
* of
,
408 IODirection direction
)
410 return (IOSubMemoryDescriptor::withSubRange(of
, offset
, length
, direction
| kIOMemoryThreadSafe
));
412 #endif /* !__LP64__ */
415 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor
*originalMD
)
417 IOGeneralMemoryDescriptor
*origGenMD
=
418 OSDynamicCast(IOGeneralMemoryDescriptor
, originalMD
);
421 return IOGeneralMemoryDescriptor::
422 withPersistentMemoryDescriptor(origGenMD
);
428 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor
*originalMD
)
430 ipc_port_t sharedMem
= (ipc_port_t
) originalMD
->createNamedEntry();
435 if (sharedMem
== originalMD
->_memEntry
) {
436 originalMD
->retain(); // Add a new reference to ourselves
437 ipc_port_release_send(sharedMem
); // Remove extra send right
441 IOGeneralMemoryDescriptor
* self
= new IOGeneralMemoryDescriptor
;
442 typePersMDData initData
= { originalMD
, sharedMem
};
445 && !self
->initWithOptions(&initData
, 1, 0, 0, kIOMemoryTypePersistentMD
, 0)) {
452 void *IOGeneralMemoryDescriptor::createNamedEntry()
455 ipc_port_t sharedMem
;
457 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
459 user_addr_t range0Addr
;
460 IOByteCount range0Len
;
461 getAddrLenForInd(range0Addr
, range0Len
, type
, _ranges
, 0);
462 range0Addr
= trunc_page_64(range0Addr
);
464 vm_size_t size
= ptoa_32(_pages
);
465 vm_address_t kernelPage
= (vm_address_t
) range0Addr
;
467 vm_map_t theMap
= ((_task
== kernel_task
)
468 && (kIOMemoryBufferPageable
& _flags
))
469 ? IOPageableMapForAddress(kernelPage
)
470 : get_task_map(_task
);
472 memory_object_size_t actualSize
= size
;
473 vm_prot_t prot
= VM_PROT_READ
;
474 if (kIODirectionOut
!= (kIODirectionOutIn
& _flags
))
475 prot
|= VM_PROT_WRITE
;
478 prot
|= MAP_MEM_NAMED_REUSE
;
480 error
= mach_make_memory_entry_64(theMap
,
481 &actualSize
, range0Addr
, prot
, &sharedMem
, (ipc_port_t
) _memEntry
);
483 if (KERN_SUCCESS
== error
) {
484 if (actualSize
== size
) {
488 IOLog("IOGMD::mach_make_memory_entry_64 (%08llx) size (%08llx:%08llx)\n",
489 (UInt64
)range0Addr
, (UInt64
)actualSize
, (UInt64
)size
);
491 ipc_port_release_send( sharedMem
);
495 return MACH_PORT_NULL
;
500 IOGeneralMemoryDescriptor::initWithAddress(void * address
,
501 IOByteCount withLength
,
502 IODirection withDirection
)
504 _singleRange
.v
.address
= (vm_offset_t
) address
;
505 _singleRange
.v
.length
= withLength
;
507 return initWithRanges(&_singleRange
.v
, 1, withDirection
, kernel_task
, true);
511 IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address
,
512 IOByteCount withLength
,
513 IODirection withDirection
,
516 _singleRange
.v
.address
= address
;
517 _singleRange
.v
.length
= withLength
;
519 return initWithRanges(&_singleRange
.v
, 1, withDirection
, withTask
, true);
523 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
524 IOPhysicalAddress address
,
525 IOByteCount withLength
,
526 IODirection withDirection
)
528 _singleRange
.p
.address
= address
;
529 _singleRange
.p
.length
= withLength
;
531 return initWithPhysicalRanges( &_singleRange
.p
, 1, withDirection
, true);
535 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
536 IOPhysicalRange
* ranges
,
538 IODirection direction
,
541 IOOptionBits mdOpts
= direction
| kIOMemoryTypePhysical
;
544 mdOpts
|= kIOMemoryAsReference
;
546 return initWithOptions(ranges
, count
, 0, 0, mdOpts
, /* mapper */ 0);
550 IOGeneralMemoryDescriptor::initWithRanges(
551 IOVirtualRange
* ranges
,
553 IODirection direction
,
557 IOOptionBits mdOpts
= direction
;
560 mdOpts
|= kIOMemoryAsReference
;
563 mdOpts
|= kIOMemoryTypeVirtual
;
565 // Auto-prepare if this is a kernel memory descriptor as very few
566 // clients bother to prepare() kernel memory.
567 // But it was not enforced so what are you going to do?
568 if (task
== kernel_task
)
569 mdOpts
|= kIOMemoryAutoPrepare
;
572 mdOpts
|= kIOMemoryTypePhysical
;
574 return initWithOptions(ranges
, count
, 0, task
, mdOpts
, /* mapper */ 0);
576 #endif /* !__LP64__ */
581 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
582 * from a given task, several physical ranges, an UPL from the ubc
583 * system or a uio (may be 64bit) from the BSD subsystem.
585 * Passing the ranges as a reference will avoid an extra allocation.
587 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
588 * existing instance -- note this behavior is not commonly supported in other
589 * I/O Kit classes, although it is supported here.
593 IOGeneralMemoryDescriptor::initWithOptions(void * buffers
,
597 IOOptionBits options
,
600 IOOptionBits type
= options
& kIOMemoryTypeMask
;
604 && (kIOMemoryTypeVirtual
== type
)
605 && vm_map_is_64bit(get_task_map(task
))
606 && ((IOVirtualRange
*) buffers
)->address
)
608 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
611 #endif /* !__LP64__ */
613 // Grab the original MD's configuation data to initialse the
614 // arguments to this function.
615 if (kIOMemoryTypePersistentMD
== type
) {
617 typePersMDData
*initData
= (typePersMDData
*) buffers
;
618 const IOGeneralMemoryDescriptor
*orig
= initData
->fMD
;
619 ioGMDData
*dataP
= getDataP(orig
->_memoryEntries
);
621 // Only accept persistent memory descriptors with valid dataP data.
622 assert(orig
->_rangesCount
== 1);
623 if ( !(orig
->_flags
& kIOMemoryPersistent
) || !dataP
)
626 _memEntry
= initData
->fMemEntry
; // Grab the new named entry
627 options
= orig
->_flags
& ~kIOMemoryAsReference
;
628 type
= options
& kIOMemoryTypeMask
;
629 buffers
= orig
->_ranges
.v
;
630 count
= orig
->_rangesCount
;
632 // Now grab the original task and whatever mapper was previously used
634 mapper
= dataP
->fMapper
;
636 // We are ready to go through the original initialisation now
640 case kIOMemoryTypeUIO
:
641 case kIOMemoryTypeVirtual
:
643 case kIOMemoryTypeVirtual64
:
644 #endif /* !__LP64__ */
650 case kIOMemoryTypePhysical
: // Neither Physical nor UPL should have a task
652 case kIOMemoryTypePhysical64
:
653 #endif /* !__LP64__ */
654 case kIOMemoryTypeUPL
:
658 return false; /* bad argument */
665 * We can check the _initialized instance variable before having ever set
666 * it to an initial value because I/O Kit guarantees that all our instance
667 * variables are zeroed on an object's allocation.
672 * An existing memory descriptor is being retargeted to point to
673 * somewhere else. Clean up our present state.
675 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
676 if ((kIOMemoryTypePhysical
!= type
) && (kIOMemoryTypePhysical64
!= type
))
681 if (_ranges
.v
&& !(kIOMemoryAsReference
& _flags
))
683 if (kIOMemoryTypeUIO
== type
)
684 uio_free((uio_t
) _ranges
.v
);
686 else if ((kIOMemoryTypeVirtual64
== type
) || (kIOMemoryTypePhysical64
== type
))
687 IODelete(_ranges
.v64
, IOAddressRange
, _rangesCount
);
688 #endif /* !__LP64__ */
690 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
695 ipc_port_release_send((ipc_port_t
) _memEntry
);
699 _mappings
->flushCollection();
707 // Grab the appropriate mapper
708 if (kIOMemoryMapperNone
& options
)
709 mapper
= 0; // No Mapper
710 else if (mapper
== kIOMapperSystem
) {
711 IOMapper::checkForSystemMapper();
712 gIOSystemMapper
= mapper
= IOMapper::gSystem
;
715 // Temp binary compatibility for kIOMemoryThreadSafe
716 if (kIOMemoryReserved6156215
& options
)
718 options
&= ~kIOMemoryReserved6156215
;
719 options
|= kIOMemoryThreadSafe
;
721 // Remove the dynamic internal use flags from the initial setting
722 options
&= ~(kIOMemoryPreparedReadOnly
);
727 _direction
= (IODirection
) (_flags
& kIOMemoryDirectionMask
);
728 #endif /* !__LP64__ */
730 __iomd_reservedA
= 0;
731 __iomd_reservedB
= 0;
734 if (kIOMemoryThreadSafe
& options
)
737 _prepareLock
= IOLockAlloc();
739 else if (_prepareLock
)
741 IOLockFree(_prepareLock
);
745 if (kIOMemoryTypeUPL
== type
) {
748 unsigned int dataSize
= computeDataSize(/* pages */ 0, /* upls */ 1);
750 if (!_memoryEntries
) {
751 _memoryEntries
= OSData::withCapacity(dataSize
);
755 else if (!_memoryEntries
->initWithCapacity(dataSize
))
758 _memoryEntries
->appendBytes(0, computeDataSize(0, 0));
759 dataP
= getDataP(_memoryEntries
);
760 dataP
->fMapper
= mapper
;
763 // _wireCount++; // UPLs start out life wired
766 _pages
+= atop_32(offset
+ count
+ PAGE_MASK
) - atop_32(offset
);
769 iopl
.fIOPL
= (upl_t
) buffers
;
770 upl_set_referenced(iopl
.fIOPL
, true);
771 upl_page_info_t
*pageList
= UPL_GET_INTERNAL_PAGE_LIST(iopl
.fIOPL
);
773 if (upl_get_size(iopl
.fIOPL
) < (count
+ offset
))
774 panic("short external upl");
776 // Set the flag kIOPLOnDevice convieniently equal to 1
777 iopl
.fFlags
= pageList
->device
| kIOPLExternUPL
;
778 iopl
.fIOMDOffset
= 0;
780 _highestPage
= upl_get_highest_page(iopl
.fIOPL
);
782 if (!pageList
->device
) {
783 // Pre-compute the offset into the UPL's page list
784 pageList
= &pageList
[atop_32(offset
)];
787 iopl
.fMappedBase
= mapper
->iovmAlloc(_pages
);
788 mapper
->iovmInsert(iopl
.fMappedBase
, 0, pageList
, _pages
);
791 iopl
.fMappedBase
= 0;
794 iopl
.fMappedBase
= 0;
795 iopl
.fPageInfo
= (vm_address_t
) pageList
;
796 iopl
.fPageOffset
= offset
;
798 _memoryEntries
->appendBytes(&iopl
, sizeof(iopl
));
801 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
802 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
804 // Initialize the memory descriptor
805 if (options
& kIOMemoryAsReference
) {
807 _rangesIsAllocated
= false;
808 #endif /* !__LP64__ */
810 // Hack assignment to get the buffer arg into _ranges.
811 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
813 // This also initialises the uio & physical ranges.
814 _ranges
.v
= (IOVirtualRange
*) buffers
;
818 _rangesIsAllocated
= true;
819 #endif /* !__LP64__ */
822 case kIOMemoryTypeUIO
:
823 _ranges
.v
= (IOVirtualRange
*) uio_duplicate((uio_t
) buffers
);
827 case kIOMemoryTypeVirtual64
:
828 case kIOMemoryTypePhysical64
:
830 && (((IOAddressRange
*) buffers
)->address
+ ((IOAddressRange
*) buffers
)->length
) <= 0x100000000ULL
832 if (kIOMemoryTypeVirtual64
== type
)
833 type
= kIOMemoryTypeVirtual
;
835 type
= kIOMemoryTypePhysical
;
836 _flags
= (_flags
& ~kIOMemoryTypeMask
) | type
| kIOMemoryAsReference
;
837 _rangesIsAllocated
= false;
838 _ranges
.v
= &_singleRange
.v
;
839 _singleRange
.v
.address
= ((IOAddressRange
*) buffers
)->address
;
840 _singleRange
.v
.length
= ((IOAddressRange
*) buffers
)->length
;
843 _ranges
.v64
= IONew(IOAddressRange
, count
);
846 bcopy(buffers
, _ranges
.v
, count
* sizeof(IOAddressRange
));
848 #endif /* !__LP64__ */
849 case kIOMemoryTypeVirtual
:
850 case kIOMemoryTypePhysical
:
852 _flags
|= kIOMemoryAsReference
;
854 _rangesIsAllocated
= false;
855 #endif /* !__LP64__ */
856 _ranges
.v
= &_singleRange
.v
;
858 _ranges
.v
= IONew(IOVirtualRange
, count
);
862 bcopy(buffers
, _ranges
.v
, count
* sizeof(IOVirtualRange
));
867 // Find starting address within the vector of ranges
868 Ranges vec
= _ranges
;
871 for (unsigned ind
= 0; ind
< count
; ind
++) {
873 IOPhysicalLength len
;
875 // addr & len are returned by this function
876 getAddrLenForInd(addr
, len
, type
, vec
, ind
);
877 pages
+= (atop_64(addr
+ len
+ PAGE_MASK
) - atop_64(addr
));
879 assert(len
>= length
); // Check for 32 bit wrap around
882 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
))
884 ppnum_t highPage
= atop_64(addr
+ len
- 1);
885 if (highPage
> _highestPage
)
886 _highestPage
= highPage
;
891 _rangesCount
= count
;
893 // Auto-prepare memory at creation time.
894 // Implied completion when descriptor is free-ed
895 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
))
896 _wireCount
++; // Physical MDs are, by definition, wired
897 else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
899 unsigned dataSize
= computeDataSize(_pages
, /* upls */ count
* 2);
901 if (!_memoryEntries
) {
902 _memoryEntries
= OSData::withCapacity(dataSize
);
906 else if (!_memoryEntries
->initWithCapacity(dataSize
))
909 _memoryEntries
->appendBytes(0, computeDataSize(0, 0));
910 dataP
= getDataP(_memoryEntries
);
911 dataP
->fMapper
= mapper
;
912 dataP
->fPageCnt
= _pages
;
914 if ( (kIOMemoryPersistent
& _flags
) && !_memEntry
)
915 _memEntry
= createNamedEntry();
917 if ((_flags
& kIOMemoryAutoPrepare
)
918 && prepare() != kIOReturnSuccess
)
931 void IOGeneralMemoryDescriptor::free()
933 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
938 reserved
->memory
= 0;
942 if ((kIOMemoryTypePhysical
!= type
) && (kIOMemoryTypePhysical64
!= type
))
948 _memoryEntries
->release();
950 if (_ranges
.v
&& !(kIOMemoryAsReference
& _flags
))
952 if (kIOMemoryTypeUIO
== type
)
953 uio_free((uio_t
) _ranges
.v
);
955 else if ((kIOMemoryTypeVirtual64
== type
) || (kIOMemoryTypePhysical64
== type
))
956 IODelete(_ranges
.v64
, IOAddressRange
, _rangesCount
);
957 #endif /* !__LP64__ */
959 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
964 if (reserved
&& reserved
->devicePager
)
965 device_pager_deallocate( (memory_object_t
) reserved
->devicePager
);
967 // memEntry holds a ref on the device pager which owns reserved
968 // (ExpansionData) so no reserved access after this point
970 ipc_port_release_send( (ipc_port_t
) _memEntry
);
973 IOLockFree(_prepareLock
);
979 void IOGeneralMemoryDescriptor::unmapFromKernel()
981 panic("IOGMD::unmapFromKernel deprecated");
984 void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex
)
986 panic("IOGMD::mapIntoKernel deprecated");
988 #endif /* !__LP64__ */
993 * Get the direction of the transfer.
995 IODirection
IOMemoryDescriptor::getDirection() const
1000 #endif /* !__LP64__ */
1001 return (IODirection
) (_flags
& kIOMemoryDirectionMask
);
1007 * Get the length of the transfer (over all ranges).
1009 IOByteCount
IOMemoryDescriptor::getLength() const
1014 void IOMemoryDescriptor::setTag( IOOptionBits tag
)
1019 IOOptionBits
IOMemoryDescriptor::getTag( void )
1025 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
1027 IOMemoryDescriptor::getSourceSegment( IOByteCount offset
, IOByteCount
* length
)
1029 addr64_t physAddr
= 0;
1031 if( prepare() == kIOReturnSuccess
) {
1032 physAddr
= getPhysicalSegment64( offset
, length
);
1036 return( (IOPhysicalAddress
) physAddr
); // truncated but only page offset is used
1038 #endif /* !__LP64__ */
1040 IOByteCount
IOMemoryDescriptor::readBytes
1041 (IOByteCount offset
, void *bytes
, IOByteCount length
)
1043 addr64_t dstAddr
= CAST_DOWN(addr64_t
, bytes
);
1044 IOByteCount remaining
;
1046 // Assert that this entire I/O is withing the available range
1047 assert(offset
< _length
);
1048 assert(offset
+ length
<= _length
);
1049 if (offset
>= _length
) {
1053 if (kIOMemoryThreadSafe
& _flags
)
1056 remaining
= length
= min(length
, _length
- offset
);
1057 while (remaining
) { // (process another target segment?)
1061 srcAddr64
= getPhysicalSegment(offset
, &srcLen
, kIOMemoryMapperNone
);
1065 // Clip segment length to remaining
1066 if (srcLen
> remaining
)
1069 copypv(srcAddr64
, dstAddr
, srcLen
,
1070 cppvPsrc
| cppvNoRefSrc
| cppvFsnk
| cppvKmap
);
1074 remaining
-= srcLen
;
1077 if (kIOMemoryThreadSafe
& _flags
)
1082 return length
- remaining
;
1085 IOByteCount
IOMemoryDescriptor::writeBytes
1086 (IOByteCount offset
, const void *bytes
, IOByteCount length
)
1088 addr64_t srcAddr
= CAST_DOWN(addr64_t
, bytes
);
1089 IOByteCount remaining
;
1091 // Assert that this entire I/O is withing the available range
1092 assert(offset
< _length
);
1093 assert(offset
+ length
<= _length
);
1095 assert( !(kIOMemoryPreparedReadOnly
& _flags
) );
1097 if ( (kIOMemoryPreparedReadOnly
& _flags
) || offset
>= _length
) {
1101 if (kIOMemoryThreadSafe
& _flags
)
1104 remaining
= length
= min(length
, _length
- offset
);
1105 while (remaining
) { // (process another target segment?)
1109 dstAddr64
= getPhysicalSegment(offset
, &dstLen
, kIOMemoryMapperNone
);
1113 // Clip segment length to remaining
1114 if (dstLen
> remaining
)
1117 copypv(srcAddr
, (addr64_t
) dstAddr64
, dstLen
,
1118 cppvPsnk
| cppvFsnk
| cppvNoRefSrc
| cppvNoModSnk
| cppvKmap
);
1122 remaining
-= dstLen
;
1125 if (kIOMemoryThreadSafe
& _flags
)
1130 return length
- remaining
;
1133 // osfmk/device/iokit_rpc.c
1134 extern "C" unsigned int IODefaultCacheBits(addr64_t pa
);
1137 void IOGeneralMemoryDescriptor::setPosition(IOByteCount position
)
1139 panic("IOGMD::setPosition deprecated");
1141 #endif /* !__LP64__ */
1143 static volatile SInt64 gIOMDPreparationID
__attribute__((aligned(8))) = (1ULL << 32);
1146 IOGeneralMemoryDescriptor::getPreparationID( void )
1151 return (kIOPreparationIDUnprepared
);
1153 if (_flags
& (kIOMemoryTypePhysical
| kIOMemoryTypePhysical64
))
1154 return (kIOPreparationIDAlwaysPrepared
);
1156 if (!_memoryEntries
|| !(dataP
= getDataP(_memoryEntries
)))
1157 return (kIOPreparationIDUnprepared
);
1159 if (kIOPreparationIDUnprepared
== dataP
->fPreparationID
)
1161 dataP
->fPreparationID
= OSIncrementAtomic64(&gIOMDPreparationID
);
1163 return (dataP
->fPreparationID
);
1167 IOMemoryDescriptor::getPreparationID( void )
1169 return (kIOPreparationIDUnsupported
);
1172 IOReturn
IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op
, void *vData
, UInt dataSize
) const
1174 if (kIOMDGetCharacteristics
== op
) {
1176 if (dataSize
< sizeof(IOMDDMACharacteristics
))
1177 return kIOReturnUnderrun
;
1179 IOMDDMACharacteristics
*data
= (IOMDDMACharacteristics
*) vData
;
1180 data
->fLength
= _length
;
1181 data
->fSGCount
= _rangesCount
;
1182 data
->fPages
= _pages
;
1183 data
->fDirection
= getDirection();
1185 data
->fIsPrepared
= false;
1187 data
->fIsPrepared
= true;
1188 data
->fHighestPage
= _highestPage
;
1189 if (_memoryEntries
) {
1190 ioGMDData
*gmdData
= getDataP(_memoryEntries
);
1191 ioPLBlock
*ioplList
= getIOPLList(gmdData
);
1192 UInt count
= getNumIOPL(_memoryEntries
, gmdData
);
1194 data
->fIsMapped
= (gmdData
->fMapper
&& _pages
&& (count
> 0)
1195 && ioplList
[0].fMappedBase
);
1197 data
->fPageAlign
= (ioplList
[0].fPageOffset
& PAGE_MASK
) | ~PAGE_MASK
;
1200 data
->fIsMapped
= false;
1203 return kIOReturnSuccess
;
1205 #if IOMD_DEBUG_DMAACTIVE
1206 } else if (kIOMDSetDMAActive
== op
) {
1207 IOGeneralMemoryDescriptor
* md
= const_cast<IOGeneralMemoryDescriptor
*>(this);
1208 OSIncrementAtomic(&md
->__iomd_reservedA
);
1209 } else if (kIOMDSetDMAInactive
== op
) {
1210 IOGeneralMemoryDescriptor
* md
= const_cast<IOGeneralMemoryDescriptor
*>(this);
1211 if (md
->__iomd_reservedA
)
1212 OSDecrementAtomic(&md
->__iomd_reservedA
);
1214 panic("kIOMDSetDMAInactive");
1215 #endif /* IOMD_DEBUG_DMAACTIVE */
1217 } else if (!(kIOMDWalkSegments
& op
))
1218 return kIOReturnBadArgument
;
1220 // Get the next segment
1221 struct InternalState
{
1222 IOMDDMAWalkSegmentArgs fIO
;
1228 // Find the next segment
1229 if (dataSize
< sizeof(*isP
))
1230 return kIOReturnUnderrun
;
1232 isP
= (InternalState
*) vData
;
1233 UInt offset
= isP
->fIO
.fOffset
;
1234 bool mapped
= isP
->fIO
.fMapped
;
1236 if (offset
>= _length
)
1237 return (offset
== _length
)? kIOReturnOverrun
: kIOReturnInternalError
;
1239 // Validate the previous offset
1240 UInt ind
, off2Ind
= isP
->fOffset2Index
;
1241 if ((kIOMDFirstSegment
!= op
)
1243 && (offset
== isP
->fNextOffset
|| off2Ind
<= offset
))
1246 ind
= off2Ind
= 0; // Start from beginning
1250 if ( (_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
) {
1252 // Physical address based memory descriptor
1253 const IOPhysicalRange
*physP
= (IOPhysicalRange
*) &_ranges
.p
[0];
1255 // Find the range after the one that contains the offset
1257 for (len
= 0; off2Ind
<= offset
; ind
++) {
1258 len
= physP
[ind
].length
;
1262 // Calculate length within range and starting address
1263 length
= off2Ind
- offset
;
1264 address
= physP
[ind
- 1].address
+ len
- length
;
1266 // see how far we can coalesce ranges
1267 while (ind
< _rangesCount
&& address
+ length
== physP
[ind
].address
) {
1268 len
= physP
[ind
].length
;
1274 // correct contiguous check overshoot
1279 else if ( (_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
) {
1281 // Physical address based memory descriptor
1282 const IOAddressRange
*physP
= (IOAddressRange
*) &_ranges
.v64
[0];
1284 // Find the range after the one that contains the offset
1286 for (len
= 0; off2Ind
<= offset
; ind
++) {
1287 len
= physP
[ind
].length
;
1291 // Calculate length within range and starting address
1292 length
= off2Ind
- offset
;
1293 address
= physP
[ind
- 1].address
+ len
- length
;
1295 // see how far we can coalesce ranges
1296 while (ind
< _rangesCount
&& address
+ length
== physP
[ind
].address
) {
1297 len
= physP
[ind
].length
;
1303 // correct contiguous check overshoot
1307 #endif /* !__LP64__ */
1310 panic("IOGMD: not wired for the IODMACommand");
1312 assert(_memoryEntries
);
1314 ioGMDData
* dataP
= getDataP(_memoryEntries
);
1315 const ioPLBlock
*ioplList
= getIOPLList(dataP
);
1316 UInt numIOPLs
= getNumIOPL(_memoryEntries
, dataP
);
1317 upl_page_info_t
*pageList
= getPageList(dataP
);
1319 assert(numIOPLs
> 0);
1321 // Scan through iopl info blocks looking for block containing offset
1322 while (ind
< numIOPLs
&& offset
>= ioplList
[ind
].fIOMDOffset
)
1325 // Go back to actual range as search goes past it
1326 ioPLBlock ioplInfo
= ioplList
[ind
- 1];
1327 off2Ind
= ioplInfo
.fIOMDOffset
;
1330 length
= ioplList
[ind
].fIOMDOffset
;
1333 length
-= offset
; // Remainder within iopl
1335 // Subtract offset till this iopl in total list
1338 // If a mapped address is requested and this is a pre-mapped IOPL
1339 // then just need to compute an offset relative to the mapped base.
1340 if (mapped
&& ioplInfo
.fMappedBase
) {
1341 offset
+= (ioplInfo
.fPageOffset
& PAGE_MASK
);
1342 address
= ptoa_64(ioplInfo
.fMappedBase
) + offset
;
1343 continue; // Done leave do/while(false) now
1346 // The offset is rebased into the current iopl.
1347 // Now add the iopl 1st page offset.
1348 offset
+= ioplInfo
.fPageOffset
;
1350 // For external UPLs the fPageInfo field points directly to
1351 // the upl's upl_page_info_t array.
1352 if (ioplInfo
.fFlags
& kIOPLExternUPL
)
1353 pageList
= (upl_page_info_t
*) ioplInfo
.fPageInfo
;
1355 pageList
= &pageList
[ioplInfo
.fPageInfo
];
1357 // Check for direct device non-paged memory
1358 if ( ioplInfo
.fFlags
& kIOPLOnDevice
) {
1359 address
= ptoa_64(pageList
->phys_addr
) + offset
;
1360 continue; // Done leave do/while(false) now
1363 // Now we need compute the index into the pageList
1364 UInt pageInd
= atop_32(offset
);
1365 offset
&= PAGE_MASK
;
1367 // Compute the starting address of this segment
1368 IOPhysicalAddress pageAddr
= pageList
[pageInd
].phys_addr
;
1370 panic("!pageList phys_addr");
1373 address
= ptoa_64(pageAddr
) + offset
;
1375 // length is currently set to the length of the remainider of the iopl.
1376 // We need to check that the remainder of the iopl is contiguous.
1377 // This is indicated by pageList[ind].phys_addr being sequential.
1378 IOByteCount contigLength
= PAGE_SIZE
- offset
;
1379 while (contigLength
< length
1380 && ++pageAddr
== pageList
[++pageInd
].phys_addr
)
1382 contigLength
+= PAGE_SIZE
;
1385 if (contigLength
< length
)
1386 length
= contigLength
;
1394 // Update return values and state
1395 isP
->fIO
.fIOVMAddr
= address
;
1396 isP
->fIO
.fLength
= length
;
1398 isP
->fOffset2Index
= off2Ind
;
1399 isP
->fNextOffset
= isP
->fIO
.fOffset
+ length
;
1401 return kIOReturnSuccess
;
1405 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
, IOOptionBits options
)
1408 addr64_t address
= 0;
1409 IOByteCount length
= 0;
1410 IOMapper
* mapper
= gIOSystemMapper
;
1411 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
1413 if (lengthOfSegment
)
1414 *lengthOfSegment
= 0;
1416 if (offset
>= _length
)
1419 // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
1420 // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
1421 // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
1422 // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
1424 if ((options
& _kIOMemorySourceSegment
) && (kIOMemoryTypeUPL
!= type
))
1426 unsigned rangesIndex
= 0;
1427 Ranges vec
= _ranges
;
1430 // Find starting address within the vector of ranges
1432 getAddrLenForInd(addr
, length
, type
, vec
, rangesIndex
);
1433 if (offset
< length
)
1435 offset
-= length
; // (make offset relative)
1439 // Now that we have the starting range,
1440 // lets find the last contiguous range
1444 for ( ++rangesIndex
; rangesIndex
< _rangesCount
; rangesIndex
++ ) {
1445 user_addr_t newAddr
;
1446 IOPhysicalLength newLen
;
1448 getAddrLenForInd(newAddr
, newLen
, type
, vec
, rangesIndex
);
1449 if (addr
+ length
!= newAddr
)
1454 address
= (IOPhysicalAddress
) addr
; // Truncate address to 32bit
1458 IOMDDMAWalkSegmentState _state
;
1459 IOMDDMAWalkSegmentArgs
* state
= (IOMDDMAWalkSegmentArgs
*) &_state
;
1461 state
->fOffset
= offset
;
1462 state
->fLength
= _length
- offset
;
1463 state
->fMapped
= (0 == (options
& kIOMemoryMapperNone
));
1465 ret
= dmaCommandOperation(kIOMDFirstSegment
, _state
, sizeof(_state
));
1467 if ((kIOReturnSuccess
!= ret
) && (kIOReturnOverrun
!= ret
))
1468 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
1469 ret
, this, state
->fOffset
,
1470 state
->fIOVMAddr
, state
->fLength
);
1471 if (kIOReturnSuccess
== ret
)
1473 address
= state
->fIOVMAddr
;
1474 length
= state
->fLength
;
1477 // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
1478 // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
1480 if (mapper
&& ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
)))
1482 if ((options
& kIOMemoryMapperNone
) && !(_flags
& kIOMemoryMapperNone
))
1484 addr64_t origAddr
= address
;
1485 IOByteCount origLen
= length
;
1487 address
= mapper
->mapAddr(origAddr
);
1488 length
= page_size
- (address
& (page_size
- 1));
1489 while ((length
< origLen
)
1490 && ((address
+ length
) == mapper
->mapAddr(origAddr
+ length
)))
1491 length
+= page_size
;
1492 if (length
> origLen
)
1496 else if (!(options
& kIOMemoryMapperNone
) && (_flags
& kIOMemoryMapperNone
))
1498 panic("getPhysicalSegment not mapped for I/O");
1500 #endif /* __LP64__ */
1507 if (lengthOfSegment
)
1508 *lengthOfSegment
= length
;
1515 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
, IOOptionBits options
)
1517 addr64_t address
= 0;
1519 if (options
& _kIOMemorySourceSegment
)
1521 address
= getSourceSegment(offset
, lengthOfSegment
);
1523 else if (options
& kIOMemoryMapperNone
)
1525 address
= getPhysicalSegment64(offset
, lengthOfSegment
);
1529 address
= getPhysicalSegment(offset
, lengthOfSegment
);
1536 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
1538 return (getPhysicalSegment(offset
, lengthOfSegment
, kIOMemoryMapperNone
));
1542 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
1544 addr64_t address
= 0;
1545 IOByteCount length
= 0;
1547 address
= getPhysicalSegment(offset
, lengthOfSegment
, 0);
1549 if (lengthOfSegment
)
1550 length
= *lengthOfSegment
;
1552 if ((address
+ length
) > 0x100000000ULL
)
1554 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
1555 address
, (long) length
, (getMetaClass())->getClassName());
1558 return ((IOPhysicalAddress
) address
);
1562 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
1564 IOPhysicalAddress phys32
;
1567 IOMapper
* mapper
= 0;
1569 phys32
= getPhysicalSegment(offset
, lengthOfSegment
);
1573 if (gIOSystemMapper
)
1574 mapper
= gIOSystemMapper
;
1578 IOByteCount origLen
;
1580 phys64
= mapper
->mapAddr(phys32
);
1581 origLen
= *lengthOfSegment
;
1582 length
= page_size
- (phys64
& (page_size
- 1));
1583 while ((length
< origLen
)
1584 && ((phys64
+ length
) == mapper
->mapAddr(phys32
+ length
)))
1585 length
+= page_size
;
1586 if (length
> origLen
)
1589 *lengthOfSegment
= length
;
1592 phys64
= (addr64_t
) phys32
;
1598 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
1600 return ((IOPhysicalAddress
) getPhysicalSegment(offset
, lengthOfSegment
, 0));
1604 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
1606 return ((IOPhysicalAddress
) getPhysicalSegment(offset
, lengthOfSegment
, _kIOMemorySourceSegment
));
1609 void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
1610 IOByteCount
* lengthOfSegment
)
1612 if (_task
== kernel_task
)
1613 return (void *) getSourceSegment(offset
, lengthOfSegment
);
1615 panic("IOGMD::getVirtualSegment deprecated");
1619 #endif /* !__LP64__ */
1622 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op
, void *vData
, UInt dataSize
) const
1624 if (kIOMDGetCharacteristics
== op
) {
1625 if (dataSize
< sizeof(IOMDDMACharacteristics
))
1626 return kIOReturnUnderrun
;
1628 IOMDDMACharacteristics
*data
= (IOMDDMACharacteristics
*) vData
;
1629 data
->fLength
= getLength();
1631 data
->fDirection
= getDirection();
1632 if (IOMapper::gSystem
)
1633 data
->fIsMapped
= true;
1634 data
->fIsPrepared
= true; // Assume prepared - fails safe
1636 else if (kIOMDWalkSegments
& op
) {
1637 if (dataSize
< sizeof(IOMDDMAWalkSegmentArgs
))
1638 return kIOReturnUnderrun
;
1640 IOMDDMAWalkSegmentArgs
*data
= (IOMDDMAWalkSegmentArgs
*) vData
;
1641 IOByteCount offset
= (IOByteCount
) data
->fOffset
;
1643 IOPhysicalLength length
;
1644 IOMemoryDescriptor
*ncmd
= const_cast<IOMemoryDescriptor
*>(this);
1645 if (data
->fMapped
&& IOMapper::gSystem
)
1646 data
->fIOVMAddr
= ncmd
->getPhysicalSegment(offset
, &length
);
1648 data
->fIOVMAddr
= ncmd
->getPhysicalSegment(offset
, &length
, kIOMemoryMapperNone
);
1649 data
->fLength
= length
;
1652 return kIOReturnBadArgument
;
1654 return kIOReturnSuccess
;
1658 purgeableControlBits(IOOptionBits newState
, vm_purgable_t
* control
, int * state
)
1660 IOReturn err
= kIOReturnSuccess
;
1662 *control
= VM_PURGABLE_SET_STATE
;
1665 case kIOMemoryPurgeableKeepCurrent
:
1666 *control
= VM_PURGABLE_GET_STATE
;
1669 case kIOMemoryPurgeableNonVolatile
:
1670 *state
= VM_PURGABLE_NONVOLATILE
;
1672 case kIOMemoryPurgeableVolatile
:
1673 *state
= VM_PURGABLE_VOLATILE
;
1675 case kIOMemoryPurgeableEmpty
:
1676 *state
= VM_PURGABLE_EMPTY
;
1679 err
= kIOReturnBadArgument
;
1686 purgeableStateBits(int * state
)
1688 IOReturn err
= kIOReturnSuccess
;
1692 case VM_PURGABLE_NONVOLATILE
:
1693 *state
= kIOMemoryPurgeableNonVolatile
;
1695 case VM_PURGABLE_VOLATILE
:
1696 *state
= kIOMemoryPurgeableVolatile
;
1698 case VM_PURGABLE_EMPTY
:
1699 *state
= kIOMemoryPurgeableEmpty
;
1702 *state
= kIOMemoryPurgeableNonVolatile
;
1703 err
= kIOReturnNotReady
;
1710 IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState
,
1711 IOOptionBits
* oldState
)
1713 IOReturn err
= kIOReturnSuccess
;
1714 vm_purgable_t control
;
1719 err
= super::setPurgeable(newState
, oldState
);
1723 if (kIOMemoryThreadSafe
& _flags
)
1727 // Find the appropriate vm_map for the given task
1729 if (_task
== kernel_task
&& (kIOMemoryBufferPageable
& _flags
))
1731 err
= kIOReturnNotReady
;
1735 curMap
= get_task_map(_task
);
1737 // can only do one range
1738 Ranges vec
= _ranges
;
1739 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
1742 getAddrLenForInd(addr
, len
, type
, vec
, 0);
1744 err
= purgeableControlBits(newState
, &control
, &state
);
1745 if (kIOReturnSuccess
!= err
)
1747 err
= mach_vm_purgable_control(curMap
, addr
, control
, &state
);
1750 if (kIOReturnSuccess
== err
)
1752 err
= purgeableStateBits(&state
);
1758 if (kIOMemoryThreadSafe
& _flags
)
1764 IOReturn
IOMemoryDescriptor::setPurgeable( IOOptionBits newState
,
1765 IOOptionBits
* oldState
)
1767 IOReturn err
= kIOReturnSuccess
;
1768 vm_purgable_t control
;
1771 if (kIOMemoryThreadSafe
& _flags
)
1778 err
= kIOReturnNotReady
;
1781 err
= purgeableControlBits(newState
, &control
, &state
);
1782 if (kIOReturnSuccess
!= err
)
1784 err
= mach_memory_entry_purgable_control((ipc_port_t
) _memEntry
, control
, &state
);
1787 if (kIOReturnSuccess
== err
)
1789 err
= purgeableStateBits(&state
);
1796 if (kIOMemoryThreadSafe
& _flags
)
1802 extern "C" void dcache_incoherent_io_flush64(addr64_t pa
, unsigned int count
);
1803 extern "C" void dcache_incoherent_io_store64(addr64_t pa
, unsigned int count
);
1805 static void SetEncryptOp(addr64_t pa
, unsigned int count
)
1809 page
= atop_64(round_page_64(pa
));
1810 end
= atop_64(trunc_page_64(pa
+ count
));
1811 for (; page
< end
; page
++)
1813 pmap_clear_noencrypt(page
);
1817 static void ClearEncryptOp(addr64_t pa
, unsigned int count
)
1821 page
= atop_64(round_page_64(pa
));
1822 end
= atop_64(trunc_page_64(pa
+ count
));
1823 for (; page
< end
; page
++)
1825 pmap_set_noencrypt(page
);
1829 IOReturn
IOMemoryDescriptor::performOperation( IOOptionBits options
,
1830 IOByteCount offset
, IOByteCount length
)
1832 IOByteCount remaining
;
1833 void (*func
)(addr64_t pa
, unsigned int count
) = 0;
1837 case kIOMemoryIncoherentIOFlush
:
1838 func
= &dcache_incoherent_io_flush64
;
1840 case kIOMemoryIncoherentIOStore
:
1841 func
= &dcache_incoherent_io_store64
;
1844 case kIOMemorySetEncrypted
:
1845 func
= &SetEncryptOp
;
1847 case kIOMemoryClearEncrypted
:
1848 func
= &ClearEncryptOp
;
1853 return (kIOReturnUnsupported
);
1855 if (kIOMemoryThreadSafe
& _flags
)
1858 remaining
= length
= min(length
, getLength() - offset
);
1860 // (process another target segment?)
1865 dstAddr64
= getPhysicalSegment(offset
, &dstLen
, kIOMemoryMapperNone
);
1869 // Clip segment length to remaining
1870 if (dstLen
> remaining
)
1873 (*func
)(dstAddr64
, dstLen
);
1876 remaining
-= dstLen
;
1879 if (kIOMemoryThreadSafe
& _flags
)
1882 return (remaining
? kIOReturnUnderrun
: kIOReturnSuccess
);
1885 extern vm_offset_t first_avail
;
1886 #define io_kernel_static_end first_avail
1888 static kern_return_t
1889 io_get_kernel_static_upl(
1892 vm_size_t
*upl_size
,
1894 upl_page_info_array_t page_list
,
1895 unsigned int *count
,
1896 ppnum_t
*highest_page
)
1898 unsigned int pageCount
, page
;
1900 ppnum_t highestPage
= 0;
1902 pageCount
= atop_32(*upl_size
);
1903 if (pageCount
> *count
)
1908 for (page
= 0; page
< pageCount
; page
++)
1910 phys
= pmap_find_phys(kernel_pmap
, ((addr64_t
)offset
) + ptoa_64(page
));
1913 page_list
[page
].phys_addr
= phys
;
1914 page_list
[page
].pageout
= 0;
1915 page_list
[page
].absent
= 0;
1916 page_list
[page
].dirty
= 0;
1917 page_list
[page
].precious
= 0;
1918 page_list
[page
].device
= 0;
1919 if (phys
> highestPage
)
1923 *highest_page
= highestPage
;
1925 return ((page
>= pageCount
) ? kIOReturnSuccess
: kIOReturnVMError
);
1928 IOReturn
IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection
)
1930 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
1931 IOReturn error
= kIOReturnCannotWire
;
1933 ppnum_t mapBase
= 0;
1935 ipc_port_t sharedMem
= (ipc_port_t
) _memEntry
;
1937 assert(!_wireCount
);
1938 assert(kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
|| kIOMemoryTypeUIO
== type
);
1940 if (_pages
>= gIOMaximumMappedIOPageCount
)
1941 return kIOReturnNoResources
;
1943 dataP
= getDataP(_memoryEntries
);
1944 mapper
= dataP
->fMapper
;
1945 if (mapper
&& _pages
)
1946 mapBase
= mapper
->iovmAlloc(_pages
);
1948 // Note that appendBytes(NULL) zeros the data up to the
1950 _memoryEntries
->appendBytes(0, dataP
->fPageCnt
* sizeof(upl_page_info_t
));
1951 dataP
= 0; // May no longer be valid so lets not get tempted.
1953 if (forDirection
== kIODirectionNone
)
1954 forDirection
= getDirection();
1956 int uplFlags
; // This Mem Desc's default flags for upl creation
1957 switch (kIODirectionOutIn
& forDirection
)
1959 case kIODirectionOut
:
1960 // Pages do not need to be marked as dirty on commit
1961 uplFlags
= UPL_COPYOUT_FROM
;
1962 _flags
|= kIOMemoryPreparedReadOnly
;
1965 case kIODirectionIn
:
1967 uplFlags
= 0; // i.e. ~UPL_COPYOUT_FROM
1970 uplFlags
|= UPL_SET_IO_WIRE
| UPL_SET_LITE
;
1972 #ifdef UPL_NEED_32BIT_ADDR
1973 if (kIODirectionPrepareToPhys32
& forDirection
)
1974 uplFlags
|= UPL_NEED_32BIT_ADDR
;
1977 // Find the appropriate vm_map for the given task
1979 if (_task
== kernel_task
&& (kIOMemoryBufferPageable
& _flags
))
1982 { curMap
= get_task_map(_task
); }
1984 // Iterate over the vector of virtual ranges
1985 Ranges vec
= _ranges
;
1986 unsigned int pageIndex
= 0;
1987 IOByteCount mdOffset
= 0;
1988 ppnum_t highestPage
= 0;
1989 for (UInt range
= 0; range
< _rangesCount
; range
++) {
1991 user_addr_t startPage
;
1992 IOByteCount numBytes
;
1993 ppnum_t highPage
= 0;
1995 // Get the startPage address and length of vec[range]
1996 getAddrLenForInd(startPage
, numBytes
, type
, vec
, range
);
1997 iopl
.fPageOffset
= startPage
& PAGE_MASK
;
1998 numBytes
+= iopl
.fPageOffset
;
1999 startPage
= trunc_page_64(startPage
);
2002 iopl
.fMappedBase
= mapBase
+ pageIndex
;
2004 iopl
.fMappedBase
= 0;
2006 // Iterate over the current range, creating UPLs
2008 dataP
= getDataP(_memoryEntries
);
2009 vm_address_t kernelStart
= (vm_address_t
) startPage
;
2013 else if (!sharedMem
) {
2014 assert(_task
== kernel_task
);
2015 theMap
= IOPageableMapForAddress(kernelStart
);
2020 upl_page_info_array_t pageInfo
= getPageList(dataP
);
2021 int ioplFlags
= uplFlags
;
2022 upl_page_list_ptr_t baseInfo
= &pageInfo
[pageIndex
];
2024 vm_size_t ioplSize
= round_page(numBytes
);
2025 unsigned int numPageInfo
= atop_32(ioplSize
);
2027 if (theMap
== kernel_map
&& kernelStart
< io_kernel_static_end
) {
2028 error
= io_get_kernel_static_upl(theMap
,
2036 else if (sharedMem
) {
2037 error
= memory_object_iopl_request(sharedMem
,
2047 error
= vm_map_create_upl(theMap
,
2049 (upl_size_t
*)&ioplSize
,
2057 if (error
!= KERN_SUCCESS
)
2061 highPage
= upl_get_highest_page(iopl
.fIOPL
);
2062 if (highPage
> highestPage
)
2063 highestPage
= highPage
;
2065 error
= kIOReturnCannotWire
;
2067 if (baseInfo
->device
) {
2069 iopl
.fFlags
= kIOPLOnDevice
;
2070 // Don't translate device memory at all
2071 if (mapper
&& mapBase
) {
2072 mapper
->iovmFree(mapBase
, _pages
);
2074 iopl
.fMappedBase
= 0;
2080 mapper
->iovmInsert(mapBase
, pageIndex
,
2081 baseInfo
, numPageInfo
);
2084 iopl
.fIOMDOffset
= mdOffset
;
2085 iopl
.fPageInfo
= pageIndex
;
2088 // used to remove the upl for auto prepares here, for some errant code
2089 // that freed memory before the descriptor pointing at it
2090 if ((_flags
& kIOMemoryAutoPrepare
) && iopl
.fIOPL
)
2092 upl_commit(iopl
.fIOPL
, 0, 0);
2093 upl_deallocate(iopl
.fIOPL
);
2098 if (!_memoryEntries
->appendBytes(&iopl
, sizeof(iopl
))) {
2099 // Clean up partial created and unsaved iopl
2101 upl_abort(iopl
.fIOPL
, 0);
2102 upl_deallocate(iopl
.fIOPL
);
2107 // Check for a multiple iopl's in one virtual range
2108 pageIndex
+= numPageInfo
;
2109 mdOffset
-= iopl
.fPageOffset
;
2110 if (ioplSize
< numBytes
) {
2111 numBytes
-= ioplSize
;
2112 startPage
+= ioplSize
;
2113 mdOffset
+= ioplSize
;
2114 iopl
.fPageOffset
= 0;
2116 iopl
.fMappedBase
= mapBase
+ pageIndex
;
2119 mdOffset
+= numBytes
;
2125 _highestPage
= highestPage
;
2127 return kIOReturnSuccess
;
2131 dataP
= getDataP(_memoryEntries
);
2132 UInt done
= getNumIOPL(_memoryEntries
, dataP
);
2133 ioPLBlock
*ioplList
= getIOPLList(dataP
);
2135 for (UInt range
= 0; range
< done
; range
++)
2137 if (ioplList
[range
].fIOPL
) {
2138 upl_abort(ioplList
[range
].fIOPL
, 0);
2139 upl_deallocate(ioplList
[range
].fIOPL
);
2142 (void) _memoryEntries
->initWithBytes(dataP
, computeDataSize(0, 0)); // == setLength()
2144 if (mapper
&& mapBase
)
2145 mapper
->iovmFree(mapBase
, _pages
);
2148 if (error
== KERN_FAILURE
)
2149 error
= kIOReturnCannotWire
;
2157 * Prepare the memory for an I/O transfer. This involves paging in
2158 * the memory, if necessary, and wiring it down for the duration of
2159 * the transfer. The complete() method completes the processing of
2160 * the memory after the I/O transfer finishes. This method needn't
2161 * called for non-pageable memory.
2163 IOReturn
IOGeneralMemoryDescriptor::prepare(IODirection forDirection
)
2165 IOReturn error
= kIOReturnSuccess
;
2166 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
2168 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
))
2169 return kIOReturnSuccess
;
2172 IOLockLock(_prepareLock
);
2175 && (kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
|| kIOMemoryTypeUIO
== type
) ) {
2176 error
= wireVirtual(forDirection
);
2179 if (kIOReturnSuccess
== error
)
2182 if (1 == _wireCount
)
2184 if (kIOMemoryClearEncrypt
& _flags
)
2186 performOperation(kIOMemoryClearEncrypted
, 0, _length
);
2191 IOLockUnlock(_prepareLock
);
2199 * Complete processing of the memory after an I/O transfer finishes.
2200 * This method should not be called unless a prepare was previously
2201 * issued; the prepare() and complete() must occur in pairs, before
2202 * before and after an I/O transfer involving pageable memory.
2205 IOReturn
IOGeneralMemoryDescriptor::complete(IODirection
/* forDirection */)
2207 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
2209 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
))
2210 return kIOReturnSuccess
;
2213 IOLockLock(_prepareLock
);
2219 if ((kIOMemoryClearEncrypt
& _flags
) && (1 == _wireCount
))
2221 performOperation(kIOMemorySetEncrypted
, 0, _length
);
2227 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
2228 ioGMDData
* dataP
= getDataP(_memoryEntries
);
2229 ioPLBlock
*ioplList
= getIOPLList(dataP
);
2230 UInt count
= getNumIOPL(_memoryEntries
, dataP
);
2232 #if IOMD_DEBUG_DMAACTIVE
2233 if (__iomd_reservedA
) panic("complete() while dma active");
2234 #endif /* IOMD_DEBUG_DMAACTIVE */
2236 if (dataP
->fMapper
&& _pages
&& ioplList
[0].fMappedBase
)
2237 dataP
->fMapper
->iovmFree(ioplList
[0].fMappedBase
, _pages
);
2239 // Only complete iopls that we created which are for TypeVirtual
2240 if (kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
|| kIOMemoryTypeUIO
== type
) {
2241 for (UInt ind
= 0; ind
< count
; ind
++)
2242 if (ioplList
[ind
].fIOPL
) {
2243 upl_commit(ioplList
[ind
].fIOPL
, 0, 0);
2244 upl_deallocate(ioplList
[ind
].fIOPL
);
2246 } else if (kIOMemoryTypeUPL
== type
) {
2247 upl_set_referenced(ioplList
[0].fIOPL
, false);
2250 (void) _memoryEntries
->initWithBytes(dataP
, computeDataSize(0, 0)); // == setLength()
2252 dataP
->fPreparationID
= kIOPreparationIDUnprepared
;
2257 IOLockUnlock(_prepareLock
);
2259 return kIOReturnSuccess
;
2262 IOReturn
IOGeneralMemoryDescriptor::doMap(
2263 vm_map_t __addressMap
,
2264 IOVirtualAddress
* __address
,
2265 IOOptionBits options
,
2266 IOByteCount __offset
,
2267 IOByteCount __length
)
2271 if (!(kIOMap64Bit
& options
)) panic("IOGeneralMemoryDescriptor::doMap !64bit");
2272 #endif /* !__LP64__ */
2274 IOMemoryMap
* mapping
= (IOMemoryMap
*) *__address
;
2275 mach_vm_size_t offset
= mapping
->fOffset
+ __offset
;
2276 mach_vm_size_t length
= mapping
->fLength
;
2278 kern_return_t kr
= kIOReturnVMError
;
2279 ipc_port_t sharedMem
= (ipc_port_t
) _memEntry
;
2281 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
2282 Ranges vec
= _ranges
;
2284 user_addr_t range0Addr
= 0;
2285 IOByteCount range0Len
= 0;
2287 if ((offset
>= _length
) || ((offset
+ length
) > _length
))
2288 return( kIOReturnBadArgument
);
2291 getAddrLenForInd(range0Addr
, range0Len
, type
, vec
, 0);
2293 // mapping source == dest? (could be much better)
2295 && (mapping
->fAddressMap
== get_task_map(_task
)) && (options
& kIOMapAnywhere
)
2296 && (1 == _rangesCount
) && (0 == offset
)
2297 && range0Addr
&& (length
<= range0Len
) )
2299 mapping
->fAddress
= range0Addr
;
2300 mapping
->fOptions
|= kIOMapStatic
;
2302 return( kIOReturnSuccess
);
2305 if( 0 == sharedMem
) {
2307 vm_size_t size
= ptoa_32(_pages
);
2311 memory_object_size_t actualSize
= size
;
2312 vm_prot_t prot
= VM_PROT_READ
;
2313 if (!(kIOMapReadOnly
& options
))
2314 prot
|= VM_PROT_WRITE
;
2315 else if (kIOMapDefaultCache
!= (options
& kIOMapCacheMask
))
2316 prot
|= VM_PROT_WRITE
;
2318 if (_rangesCount
== 1)
2320 kr
= mach_make_memory_entry_64(get_task_map(_task
),
2321 &actualSize
, range0Addr
,
2325 if( (_rangesCount
!= 1)
2326 || ((KERN_SUCCESS
== kr
) && (actualSize
!= round_page(size
))))
2330 IOLog("mach_vm_remap path for ranges %d size (%08llx:%08llx)\n",
2331 _rangesCount
, (UInt64
)actualSize
, (UInt64
)size
);
2333 kr
= kIOReturnVMError
;
2336 ipc_port_release_send(sharedMem
);
2337 sharedMem
= MACH_PORT_NULL
;
2340 mach_vm_address_t address
, segDestAddr
;
2341 mach_vm_size_t mapLength
;
2342 unsigned rangesIndex
;
2343 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
2344 user_addr_t srcAddr
;
2345 IOPhysicalLength segLen
= 0;
2347 // Find starting address within the vector of ranges
2348 for (rangesIndex
= 0; rangesIndex
< _rangesCount
; rangesIndex
++) {
2349 getAddrLenForInd(srcAddr
, segLen
, type
, _ranges
, rangesIndex
);
2350 if (offset
< segLen
)
2352 offset
-= segLen
; // (make offset relative)
2355 mach_vm_size_t pageOffset
= (srcAddr
& PAGE_MASK
);
2356 address
= trunc_page_64(mapping
->fAddress
);
2358 if ((options
& kIOMapAnywhere
) || ((mapping
->fAddress
- address
) == pageOffset
))
2360 vm_map_t map
= mapping
->fAddressMap
;
2361 kr
= IOMemoryDescriptorMapCopy(&map
,
2363 offset
, &address
, round_page_64(length
+ pageOffset
));
2364 if (kr
== KERN_SUCCESS
)
2366 segDestAddr
= address
;
2372 vm_prot_t cur_prot
, max_prot
;
2373 kr
= mach_vm_remap(map
, &segDestAddr
, round_page_64(segLen
), PAGE_MASK
,
2374 VM_FLAGS_FIXED
| VM_FLAGS_OVERWRITE
,
2375 get_task_map(_task
), trunc_page_64(srcAddr
),
2380 if (KERN_SUCCESS
== kr
)
2382 if ((!(VM_PROT_READ
& cur_prot
))
2383 || (!(kIOMapReadOnly
& options
) && !(VM_PROT_WRITE
& cur_prot
)))
2385 kr
= KERN_PROTECTION_FAILURE
;
2388 if (KERN_SUCCESS
!= kr
)
2390 segDestAddr
+= segLen
;
2391 mapLength
-= segLen
;
2395 if (rangesIndex
>= _rangesCount
)
2397 kr
= kIOReturnBadArgument
;
2400 getAddrLenForInd(srcAddr
, segLen
, type
, vec
, rangesIndex
);
2401 if (srcAddr
& PAGE_MASK
)
2403 kr
= kIOReturnBadArgument
;
2406 if (segLen
> mapLength
)
2409 if (KERN_SUCCESS
!= kr
)
2411 mach_vm_deallocate(mapping
->fAddressMap
, address
, round_page_64(length
+ pageOffset
));
2415 if (KERN_SUCCESS
== kr
)
2416 mapping
->fAddress
= address
+ pageOffset
;
2418 mapping
->fAddress
= NULL
;
2424 { // _task == 0, must be physical
2426 memory_object_t pager
;
2427 unsigned int flags
= 0;
2429 IOPhysicalLength segLen
;
2431 pa
= getPhysicalSegment( offset
, &segLen
, kIOMemoryMapperNone
);
2434 reserved
= IONew( ExpansionData
, 1 );
2438 reserved
->pagerContig
= (1 == _rangesCount
);
2439 reserved
->memory
= this;
2441 /*What cache mode do we need*/
2442 switch(options
& kIOMapCacheMask
) {
2444 case kIOMapDefaultCache
:
2446 flags
= IODefaultCacheBits(pa
);
2447 if (DEVICE_PAGER_CACHE_INHIB
& flags
)
2449 if (DEVICE_PAGER_GUARDED
& flags
)
2450 mapping
->fOptions
|= kIOMapInhibitCache
;
2452 mapping
->fOptions
|= kIOMapWriteCombineCache
;
2454 else if (DEVICE_PAGER_WRITE_THROUGH
& flags
)
2455 mapping
->fOptions
|= kIOMapWriteThruCache
;
2457 mapping
->fOptions
|= kIOMapCopybackCache
;
2460 case kIOMapInhibitCache
:
2461 flags
= DEVICE_PAGER_CACHE_INHIB
|
2462 DEVICE_PAGER_COHERENT
| DEVICE_PAGER_GUARDED
;
2465 case kIOMapWriteThruCache
:
2466 flags
= DEVICE_PAGER_WRITE_THROUGH
|
2467 DEVICE_PAGER_COHERENT
| DEVICE_PAGER_GUARDED
;
2470 case kIOMapCopybackCache
:
2471 flags
= DEVICE_PAGER_COHERENT
;
2474 case kIOMapWriteCombineCache
:
2475 flags
= DEVICE_PAGER_CACHE_INHIB
|
2476 DEVICE_PAGER_COHERENT
;
2480 flags
|= reserved
->pagerContig
? DEVICE_PAGER_CONTIGUOUS
: 0;
2482 pager
= device_pager_setup( (memory_object_t
) 0, (uintptr_t) reserved
,
2487 kr
= mach_memory_object_memory_entry_64( (host_t
) 1, false /*internal*/,
2488 size
, VM_PROT_READ
| VM_PROT_WRITE
, pager
, &sharedMem
);
2490 assert( KERN_SUCCESS
== kr
);
2491 if( KERN_SUCCESS
!= kr
)
2493 device_pager_deallocate( pager
);
2494 pager
= MACH_PORT_NULL
;
2495 sharedMem
= MACH_PORT_NULL
;
2498 if( pager
&& sharedMem
)
2499 reserved
->devicePager
= pager
;
2501 IODelete( reserved
, ExpansionData
, 1 );
2507 _memEntry
= (void *) sharedMem
;
2514 result
= super::doMap( __addressMap
, __address
,
2515 options
, __offset
, __length
);
2520 IOReturn
IOGeneralMemoryDescriptor::doUnmap(
2521 vm_map_t addressMap
,
2522 IOVirtualAddress __address
,
2523 IOByteCount __length
)
2525 return (super::doUnmap(addressMap
, __address
, __length
));
2528 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2531 #define super OSObject
2533 OSDefineMetaClassAndStructors( IOMemoryMap
, OSObject
)
2535 OSMetaClassDefineReservedUnused(IOMemoryMap
, 0);
2536 OSMetaClassDefineReservedUnused(IOMemoryMap
, 1);
2537 OSMetaClassDefineReservedUnused(IOMemoryMap
, 2);
2538 OSMetaClassDefineReservedUnused(IOMemoryMap
, 3);
2539 OSMetaClassDefineReservedUnused(IOMemoryMap
, 4);
2540 OSMetaClassDefineReservedUnused(IOMemoryMap
, 5);
2541 OSMetaClassDefineReservedUnused(IOMemoryMap
, 6);
2542 OSMetaClassDefineReservedUnused(IOMemoryMap
, 7);
2544 /* ex-inline function implementation */
2545 IOPhysicalAddress
IOMemoryMap::getPhysicalAddress()
2546 { return( getPhysicalSegment( 0, 0 )); }
2548 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2550 bool IOMemoryMap::init(
2552 mach_vm_address_t toAddress
,
2553 IOOptionBits _options
,
2554 mach_vm_size_t _offset
,
2555 mach_vm_size_t _length
)
2563 fAddressMap
= get_task_map(intoTask
);
2566 vm_map_reference(fAddressMap
);
2568 fAddressTask
= intoTask
;
2569 fOptions
= _options
;
2572 fAddress
= toAddress
;
2577 bool IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor
* _memory
, mach_vm_size_t _offset
)
2584 if( (_offset
+ fLength
) > _memory
->getLength())
2592 if (fMemory
!= _memory
)
2593 fMemory
->removeMapping(this);
2601 struct IOMemoryDescriptorMapAllocRef
2603 ipc_port_t sharedMem
;
2605 mach_vm_address_t mapped
;
2606 mach_vm_size_t size
;
2607 mach_vm_size_t sourceOffset
;
2608 IOOptionBits options
;
2611 static kern_return_t
IOMemoryDescriptorMapAlloc(vm_map_t map
, void * _ref
)
2613 IOMemoryDescriptorMapAllocRef
* ref
= (IOMemoryDescriptorMapAllocRef
*)_ref
;
2619 vm_prot_t prot
= VM_PROT_READ
2620 | ((ref
->options
& kIOMapReadOnly
) ? 0 : VM_PROT_WRITE
);
2622 // VM system requires write access to change cache mode
2623 if (kIOMapDefaultCache
!= (ref
->options
& kIOMapCacheMask
))
2624 prot
|= VM_PROT_WRITE
;
2626 // set memory entry cache
2627 vm_prot_t memEntryCacheMode
= prot
| MAP_MEM_ONLY
;
2628 switch (ref
->options
& kIOMapCacheMask
)
2630 case kIOMapInhibitCache
:
2631 SET_MAP_MEM(MAP_MEM_IO
, memEntryCacheMode
);
2634 case kIOMapWriteThruCache
:
2635 SET_MAP_MEM(MAP_MEM_WTHRU
, memEntryCacheMode
);
2638 case kIOMapWriteCombineCache
:
2639 SET_MAP_MEM(MAP_MEM_WCOMB
, memEntryCacheMode
);
2642 case kIOMapCopybackCache
:
2643 SET_MAP_MEM(MAP_MEM_COPYBACK
, memEntryCacheMode
);
2646 case kIOMapDefaultCache
:
2648 SET_MAP_MEM(MAP_MEM_NOOP
, memEntryCacheMode
);
2652 vm_size_t unused
= 0;
2654 err
= mach_make_memory_entry( NULL
/*unused*/, &unused
, 0 /*unused*/,
2655 memEntryCacheMode
, NULL
, ref
->sharedMem
);
2656 if (KERN_SUCCESS
!= err
)
2657 IOLog("MAP_MEM_ONLY failed %d\n", err
);
2659 err
= mach_vm_map( map
,
2661 ref
->size
, 0 /* mask */,
2662 (( ref
->options
& kIOMapAnywhere
) ? VM_FLAGS_ANYWHERE
: VM_FLAGS_FIXED
)
2663 | VM_MAKE_TAG(VM_MEMORY_IOKIT
),
2664 ref
->sharedMem
, ref
->sourceOffset
,
2670 if( KERN_SUCCESS
!= err
) {
2678 err
= mach_vm_allocate(map
, &ref
->mapped
, ref
->size
,
2679 ((ref
->options
& kIOMapAnywhere
) ? VM_FLAGS_ANYWHERE
: VM_FLAGS_FIXED
)
2680 | VM_MAKE_TAG(VM_MEMORY_IOKIT
) );
2681 if( KERN_SUCCESS
!= err
) {
2686 // we have to make sure that these guys don't get copied if we fork.
2687 err
= vm_inherit(map
, ref
->mapped
, ref
->size
, VM_INHERIT_NONE
);
2688 assert( KERN_SUCCESS
== err
);
2697 IOMemoryDescriptorMapMemEntry(vm_map_t
* map
, ipc_port_t entry
, IOOptionBits options
, bool pageable
,
2698 mach_vm_size_t offset
,
2699 mach_vm_address_t
* address
, mach_vm_size_t length
)
2702 IOMemoryDescriptorMapAllocRef ref
;
2705 ref
.sharedMem
= entry
;
2706 ref
.sourceOffset
= trunc_page_64(offset
);
2707 ref
.options
= options
;
2710 if (options
& kIOMapAnywhere
)
2711 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
2714 ref
.mapped
= *address
;
2716 if( ref
.sharedMem
&& (ref
.map
== kernel_map
) && pageable
)
2717 err
= IOIteratePageableMaps( ref
.size
, &IOMemoryDescriptorMapAlloc
, &ref
);
2719 err
= IOMemoryDescriptorMapAlloc( ref
.map
, &ref
);
2721 *address
= ref
.mapped
;
2728 IOMemoryDescriptorMapCopy(vm_map_t
* map
,
2729 IOOptionBits options
,
2730 mach_vm_size_t offset
,
2731 mach_vm_address_t
* address
, mach_vm_size_t length
)
2734 IOMemoryDescriptorMapAllocRef ref
;
2737 ref
.sharedMem
= NULL
;
2738 ref
.sourceOffset
= trunc_page_64(offset
);
2739 ref
.options
= options
;
2742 if (options
& kIOMapAnywhere
)
2743 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
2746 ref
.mapped
= *address
;
2748 if (ref
.map
== kernel_map
)
2749 err
= IOIteratePageableMaps(ref
.size
, &IOMemoryDescriptorMapAlloc
, &ref
);
2751 err
= IOMemoryDescriptorMapAlloc(ref
.map
, &ref
);
2753 *address
= ref
.mapped
;
2759 IOReturn
IOMemoryDescriptor::doMap(
2760 vm_map_t __addressMap
,
2761 IOVirtualAddress
* __address
,
2762 IOOptionBits options
,
2763 IOByteCount __offset
,
2764 IOByteCount __length
)
2767 if (!(kIOMap64Bit
& options
)) panic("IOMemoryDescriptor::doMap !64bit");
2768 #endif /* !__LP64__ */
2770 IOMemoryMap
* mapping
= (IOMemoryMap
*) *__address
;
2771 mach_vm_size_t offset
= mapping
->fOffset
+ __offset
;
2772 mach_vm_size_t length
= mapping
->fLength
;
2774 IOReturn err
= kIOReturnSuccess
;
2775 memory_object_t pager
;
2776 mach_vm_size_t pageOffset
;
2777 IOPhysicalAddress sourceAddr
;
2778 unsigned int lock_count
;
2782 sourceAddr
= getPhysicalSegment( offset
, NULL
, _kIOMemorySourceSegment
);
2783 pageOffset
= sourceAddr
- trunc_page( sourceAddr
);
2786 pager
= (memory_object_t
) reserved
->devicePager
;
2788 pager
= MACH_PORT_NULL
;
2790 if ((kIOMapReference
|kIOMapUnique
) == ((kIOMapReference
|kIOMapUnique
) & options
))
2798 err
= kIOReturnNotReadable
;
2802 size
= round_page(mapping
->fLength
+ pageOffset
);
2803 flags
= UPL_COPYOUT_FROM
| UPL_SET_INTERNAL
2804 | UPL_SET_LITE
| UPL_SET_IO_WIRE
| UPL_BLOCK_ACCESS
;
2806 if (KERN_SUCCESS
!= memory_object_iopl_request((ipc_port_t
) _memEntry
, 0, &size
, &redirUPL2
,
2811 for (lock_count
= 0;
2812 IORecursiveLockHaveLock(gIOMemoryLock
);
2816 err
= upl_transpose(redirUPL2
, mapping
->fRedirUPL
);
2823 if (kIOReturnSuccess
!= err
)
2825 IOLog("upl_transpose(%x)\n", err
);
2826 err
= kIOReturnSuccess
;
2831 upl_commit(redirUPL2
, NULL
, 0);
2832 upl_deallocate(redirUPL2
);
2836 // swap the memEntries since they now refer to different vm_objects
2837 void * me
= _memEntry
;
2838 _memEntry
= mapping
->fMemory
->_memEntry
;
2839 mapping
->fMemory
->_memEntry
= me
;
2842 err
= handleFault( reserved
->devicePager
, mapping
->fAddressMap
, mapping
->fAddress
, offset
, length
, options
);
2846 mach_vm_address_t address
;
2848 if (!(options
& kIOMapAnywhere
))
2850 address
= trunc_page_64(mapping
->fAddress
);
2851 if( (mapping
->fAddress
- address
) != pageOffset
)
2853 err
= kIOReturnVMError
;
2858 vm_map_t map
= mapping
->fAddressMap
;
2859 err
= IOMemoryDescriptorMapMemEntry(&map
, (ipc_port_t
) _memEntry
,
2860 options
, (kIOMemoryBufferPageable
& _flags
),
2861 offset
, &address
, round_page_64(length
+ pageOffset
));
2862 if( err
!= KERN_SUCCESS
)
2865 if (!_memEntry
|| pager
)
2867 err
= handleFault( pager
, mapping
->fAddressMap
, address
, offset
, length
, options
);
2868 if (err
!= KERN_SUCCESS
)
2869 doUnmap( mapping
->fAddressMap
, (IOVirtualAddress
) mapping
, 0 );
2873 if (kIOLogMapping
& gIOKitDebug
)
2874 IOLog("mapping(%x) desc %p @ %lx, map %p, address %qx, offset %qx, length %qx\n",
2875 err
, this, sourceAddr
, mapping
, address
, offset
, length
);
2878 if (err
== KERN_SUCCESS
)
2879 mapping
->fAddress
= address
+ pageOffset
;
2881 mapping
->fAddress
= NULL
;
2889 IOReturn
IOMemoryDescriptor::handleFault(
2891 vm_map_t addressMap
,
2892 mach_vm_address_t address
,
2893 mach_vm_size_t sourceOffset
,
2894 mach_vm_size_t length
,
2895 IOOptionBits options
)
2897 IOReturn err
= kIOReturnSuccess
;
2898 memory_object_t pager
= (memory_object_t
) _pager
;
2899 mach_vm_size_t size
;
2900 mach_vm_size_t bytes
;
2901 mach_vm_size_t page
;
2902 mach_vm_size_t pageOffset
;
2903 mach_vm_size_t pagerOffset
;
2904 IOPhysicalLength segLen
;
2909 if( kIOMemoryRedirected
& _flags
)
2912 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset
);
2916 } while( kIOMemoryRedirected
& _flags
);
2919 return( kIOReturnSuccess
);
2922 physAddr
= getPhysicalSegment( sourceOffset
, &segLen
, kIOMemoryMapperNone
);
2924 pageOffset
= physAddr
- trunc_page_64( physAddr
);
2925 pagerOffset
= sourceOffset
;
2927 size
= length
+ pageOffset
;
2928 physAddr
-= pageOffset
;
2930 segLen
+= pageOffset
;
2934 // in the middle of the loop only map whole pages
2935 if( segLen
>= bytes
)
2937 else if( segLen
!= trunc_page( segLen
))
2938 err
= kIOReturnVMError
;
2939 if( physAddr
!= trunc_page_64( physAddr
))
2940 err
= kIOReturnBadArgument
;
2941 if (kIOReturnSuccess
!= err
)
2945 if( kIOLogMapping
& gIOKitDebug
)
2946 IOLog("IOMemoryMap::map(%p) 0x%qx->0x%qx:0x%qx\n",
2947 addressMap
, address
+ pageOffset
, physAddr
+ pageOffset
,
2948 segLen
- pageOffset
);
2953 if( reserved
&& reserved
->pagerContig
) {
2954 IOPhysicalLength allLen
;
2957 allPhys
= getPhysicalSegment( 0, &allLen
, kIOMemoryMapperNone
);
2959 err
= device_pager_populate_object( pager
, 0, atop_64(allPhys
), round_page(allLen
) );
2965 (page
< segLen
) && (KERN_SUCCESS
== err
);
2968 err
= device_pager_populate_object(pager
, pagerOffset
,
2969 (ppnum_t
)(atop_64(physAddr
+ page
)), page_size
);
2970 pagerOffset
+= page_size
;
2973 assert( KERN_SUCCESS
== err
);
2978 // This call to vm_fault causes an early pmap level resolution
2979 // of the mappings created above for kernel mappings, since
2980 // faulting in later can't take place from interrupt level.
2982 /* *** Temporary Workaround *** */
2984 if ((addressMap
== kernel_map
) && !(kIOMemoryRedirected
& _flags
))
2986 vm_fault(addressMap
,
2987 (vm_map_offset_t
)address
,
2988 VM_PROT_READ
|VM_PROT_WRITE
,
2989 FALSE
, THREAD_UNINT
, NULL
,
2990 (vm_map_offset_t
)0);
2993 /* *** Temporary Workaround *** */
2996 sourceOffset
+= segLen
- pageOffset
;
3002 while (bytes
&& (physAddr
= getPhysicalSegment( sourceOffset
, &segLen
, kIOMemoryMapperNone
)));
3005 err
= kIOReturnBadArgument
;
3010 IOReturn
IOMemoryDescriptor::doUnmap(
3011 vm_map_t addressMap
,
3012 IOVirtualAddress __address
,
3013 IOByteCount __length
)
3016 mach_vm_address_t address
;
3017 mach_vm_size_t length
;
3021 address
= __address
;
3026 addressMap
= ((IOMemoryMap
*) __address
)->fAddressMap
;
3027 address
= ((IOMemoryMap
*) __address
)->fAddress
;
3028 length
= ((IOMemoryMap
*) __address
)->fLength
;
3031 if ((addressMap
== kernel_map
)
3032 && ((kIOMemoryBufferPageable
& _flags
) || !_memEntry
))
3033 addressMap
= IOPageableMapForAddress( address
);
3036 if( kIOLogMapping
& gIOKitDebug
)
3037 IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
3038 addressMap
, address
, length
);
3041 err
= mach_vm_deallocate( addressMap
, address
, length
);
3046 IOReturn
IOMemoryDescriptor::redirect( task_t safeTask
, bool doRedirect
)
3048 IOReturn err
= kIOReturnSuccess
;
3049 IOMemoryMap
* mapping
= 0;
3055 _flags
|= kIOMemoryRedirected
;
3057 _flags
&= ~kIOMemoryRedirected
;
3060 if( (iter
= OSCollectionIterator::withCollection( _mappings
))) {
3061 while( (mapping
= (IOMemoryMap
*) iter
->getNextObject()))
3062 mapping
->redirect( safeTask
, doRedirect
);
3076 // temporary binary compatibility
3077 IOSubMemoryDescriptor
* subMem
;
3078 if( (subMem
= OSDynamicCast( IOSubMemoryDescriptor
, this)))
3079 err
= subMem
->redirect( safeTask
, doRedirect
);
3081 err
= kIOReturnSuccess
;
3082 #endif /* !__LP64__ */
3087 IOReturn
IOMemoryMap::redirect( task_t safeTask
, bool doRedirect
)
3089 IOReturn err
= kIOReturnSuccess
;
3092 // err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
3104 if ((!safeTask
|| (get_task_map(safeTask
) != fAddressMap
))
3105 && (0 == (fOptions
& kIOMapStatic
)))
3107 IOUnmapPages( fAddressMap
, fAddress
, fLength
);
3108 err
= kIOReturnSuccess
;
3110 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect
, this, fAddress
, fLength
, fAddressMap
);
3113 else if (kIOMapWriteCombineCache
== (fOptions
& kIOMapCacheMask
))
3115 IOOptionBits newMode
;
3116 newMode
= (fOptions
& ~kIOMapCacheMask
) | (doRedirect
? kIOMapInhibitCache
: kIOMapWriteCombineCache
);
3117 IOProtectCacheMode(fAddressMap
, fAddress
, fLength
, newMode
);
3124 if ((((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
3125 || ((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
))
3127 && (doRedirect
!= (0 != (fMemory
->_flags
& kIOMemoryRedirected
))))
3128 fMemory
->redirect(safeTask
, doRedirect
);
3133 IOReturn
IOMemoryMap::unmap( void )
3139 if( fAddress
&& fAddressMap
&& (0 == fSuperMap
) && fMemory
3140 && (0 == (fOptions
& kIOMapStatic
))) {
3142 err
= fMemory
->doUnmap(fAddressMap
, (IOVirtualAddress
) this, 0);
3145 err
= kIOReturnSuccess
;
3149 vm_map_deallocate(fAddressMap
);
3160 void IOMemoryMap::taskDied( void )
3163 if (fUserClientUnmap
)
3166 vm_map_deallocate(fAddressMap
);
3174 IOReturn
IOMemoryMap::userClientUnmap( void )
3176 fUserClientUnmap
= true;
3177 return (kIOReturnSuccess
);
3180 // Overload the release mechanism. All mappings must be a member
3181 // of a memory descriptors _mappings set. This means that we
3182 // always have 2 references on a mapping. When either of these mappings
3183 // are released we need to free ourselves.
3184 void IOMemoryMap::taggedRelease(const void *tag
) const
3187 super::taggedRelease(tag
, 2);
3191 void IOMemoryMap::free()
3198 fMemory
->removeMapping(this);
3203 if (fOwner
&& (fOwner
!= fMemory
))
3206 fOwner
->removeMapping(this);
3211 fSuperMap
->release();
3214 upl_commit(fRedirUPL
, NULL
, 0);
3215 upl_deallocate(fRedirUPL
);
3221 IOByteCount
IOMemoryMap::getLength()
3226 IOVirtualAddress
IOMemoryMap::getVirtualAddress()
3230 fSuperMap
->getVirtualAddress();
3231 else if (fAddressMap
3232 && vm_map_is_64bit(fAddressMap
)
3233 && (sizeof(IOVirtualAddress
) < 8))
3235 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress
);
3237 #endif /* !__LP64__ */
3243 mach_vm_address_t
IOMemoryMap::getAddress()
3248 mach_vm_size_t
IOMemoryMap::getSize()
3252 #endif /* !__LP64__ */
3255 task_t
IOMemoryMap::getAddressTask()
3258 return( fSuperMap
->getAddressTask());
3260 return( fAddressTask
);
3263 IOOptionBits
IOMemoryMap::getMapOptions()
3268 IOMemoryDescriptor
* IOMemoryMap::getMemoryDescriptor()
3273 IOMemoryMap
* IOMemoryMap::copyCompatible(
3274 IOMemoryMap
* newMapping
)
3276 task_t task
= newMapping
->getAddressTask();
3277 mach_vm_address_t toAddress
= newMapping
->fAddress
;
3278 IOOptionBits _options
= newMapping
->fOptions
;
3279 mach_vm_size_t _offset
= newMapping
->fOffset
;
3280 mach_vm_size_t _length
= newMapping
->fLength
;
3282 if( (!task
) || (!fAddressMap
) || (fAddressMap
!= get_task_map(task
)))
3284 if( (fOptions
^ _options
) & kIOMapReadOnly
)
3286 if( (kIOMapDefaultCache
!= (_options
& kIOMapCacheMask
))
3287 && ((fOptions
^ _options
) & kIOMapCacheMask
))
3290 if( (0 == (_options
& kIOMapAnywhere
)) && (fAddress
!= toAddress
))
3293 if( _offset
< fOffset
)
3298 if( (_offset
+ _length
) > fLength
)
3302 if( (fLength
== _length
) && (!_offset
))
3308 newMapping
->fSuperMap
= this;
3309 newMapping
->fOffset
= fOffset
+ _offset
;
3310 newMapping
->fAddress
= fAddress
+ _offset
;
3313 return( newMapping
);
3318 IOMemoryMap::getPhysicalSegment( IOByteCount _offset
, IOPhysicalLength
* _length
, IOOptionBits _options
)
3319 #else /* !__LP64__ */
3320 IOMemoryMap::getPhysicalSegment( IOByteCount _offset
, IOPhysicalLength
* _length
)
3321 #endif /* !__LP64__ */
3323 IOPhysicalAddress address
;
3327 address
= fMemory
->getPhysicalSegment( fOffset
+ _offset
, _length
, _options
);
3328 #else /* !__LP64__ */
3329 address
= fMemory
->getPhysicalSegment( fOffset
+ _offset
, _length
);
3330 #endif /* !__LP64__ */
3336 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3339 #define super OSObject
3341 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3343 void IOMemoryDescriptor::initialize( void )
3345 if( 0 == gIOMemoryLock
)
3346 gIOMemoryLock
= IORecursiveLockAlloc();
3348 IORegistryEntry::getRegistryRoot()->setProperty(kIOMaximumMappedIOByteCountKey
,
3349 ptoa_64(gIOMaximumMappedIOPageCount
), 64);
3350 gIOLastPage
= IOGetLastPageNumber();
3353 void IOMemoryDescriptor::free( void )
3356 _mappings
->release();
3361 IOMemoryMap
* IOMemoryDescriptor::setMapping(
3363 IOVirtualAddress mapAddress
,
3364 IOOptionBits options
)
3366 return (createMappingInTask( intoTask
, mapAddress
,
3367 options
| kIOMapStatic
,
3371 IOMemoryMap
* IOMemoryDescriptor::map(
3372 IOOptionBits options
)
3374 return (createMappingInTask( kernel_task
, 0,
3375 options
| kIOMapAnywhere
,
3380 IOMemoryMap
* IOMemoryDescriptor::map(
3382 IOVirtualAddress atAddress
,
3383 IOOptionBits options
,
3385 IOByteCount length
)
3387 if ((!(kIOMapAnywhere
& options
)) && vm_map_is_64bit(get_task_map(intoTask
)))
3389 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
3393 return (createMappingInTask(intoTask
, atAddress
,
3394 options
, offset
, length
));
3396 #endif /* !__LP64__ */
3398 IOMemoryMap
* IOMemoryDescriptor::createMappingInTask(
3400 mach_vm_address_t atAddress
,
3401 IOOptionBits options
,
3402 mach_vm_size_t offset
,
3403 mach_vm_size_t length
)
3405 IOMemoryMap
* result
;
3406 IOMemoryMap
* mapping
;
3409 length
= getLength();
3411 mapping
= new IOMemoryMap
;
3414 && !mapping
->init( intoTask
, atAddress
,
3415 options
, offset
, length
)) {
3421 result
= makeMapping(this, intoTask
, (IOVirtualAddress
) mapping
, options
| kIOMap64Bit
, 0, 0);
3427 IOLog("createMappingInTask failed desc %p, addr %qx, options %lx, offset %qx, length %qx\n",
3428 this, atAddress
, options
, offset
, length
);
3434 #ifndef __LP64__ // there is only a 64 bit version for LP64
3435 IOReturn
IOMemoryMap::redirect(IOMemoryDescriptor
* newBackingMemory
,
3436 IOOptionBits options
,
3439 return (redirect(newBackingMemory
, options
, (mach_vm_size_t
)offset
));
3443 IOReturn
IOMemoryMap::redirect(IOMemoryDescriptor
* newBackingMemory
,
3444 IOOptionBits options
,
3445 mach_vm_size_t offset
)
3447 IOReturn err
= kIOReturnSuccess
;
3448 IOMemoryDescriptor
* physMem
= 0;
3452 if (fAddress
&& fAddressMap
) do
3454 if (((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
3455 || ((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
))
3463 vm_size_t size
= round_page(fLength
);
3464 int flags
= UPL_COPYOUT_FROM
| UPL_SET_INTERNAL
3465 | UPL_SET_LITE
| UPL_SET_IO_WIRE
| UPL_BLOCK_ACCESS
;
3466 if (KERN_SUCCESS
!= memory_object_iopl_request((ipc_port_t
) fMemory
->_memEntry
, 0, &size
, &fRedirUPL
,
3473 IOUnmapPages( fAddressMap
, fAddress
, fLength
);
3475 physMem
->redirect(0, true);
3479 if (newBackingMemory
)
3481 if (newBackingMemory
!= fMemory
)
3484 if (this != newBackingMemory
->makeMapping(newBackingMemory
, fAddressTask
, (IOVirtualAddress
) this,
3485 options
| kIOMapUnique
| kIOMapReference
| kIOMap64Bit
,
3487 err
= kIOReturnError
;
3491 upl_commit(fRedirUPL
, NULL
, 0);
3492 upl_deallocate(fRedirUPL
);
3495 if (false && physMem
)
3496 physMem
->redirect(0, false);
3509 IOMemoryMap
* IOMemoryDescriptor::makeMapping(
3510 IOMemoryDescriptor
* owner
,
3512 IOVirtualAddress __address
,
3513 IOOptionBits options
,
3514 IOByteCount __offset
,
3515 IOByteCount __length
)
3518 if (!(kIOMap64Bit
& options
)) panic("IOMemoryDescriptor::makeMapping !64bit");
3519 #endif /* !__LP64__ */
3521 IOMemoryDescriptor
* mapDesc
= 0;
3522 IOMemoryMap
* result
= 0;
3525 IOMemoryMap
* mapping
= (IOMemoryMap
*) __address
;
3526 mach_vm_size_t offset
= mapping
->fOffset
+ __offset
;
3527 mach_vm_size_t length
= mapping
->fLength
;
3529 mapping
->fOffset
= offset
;
3535 if (kIOMapStatic
& options
)
3538 addMapping(mapping
);
3539 mapping
->setMemoryDescriptor(this, 0);
3543 if (kIOMapUnique
& options
)
3546 IOByteCount physLen
;
3548 // if (owner != this) continue;
3550 if (((_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
3551 || ((_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
))
3553 phys
= getPhysicalSegment(offset
, &physLen
, kIOMemoryMapperNone
);
3554 if (!phys
|| (physLen
< length
))
3557 mapDesc
= IOMemoryDescriptor::withAddressRange(
3558 phys
, length
, getDirection() | kIOMemoryMapperNone
, NULL
);
3562 mapping
->fOffset
= offset
;
3567 // look for a compatible existing mapping
3568 if( (iter
= OSCollectionIterator::withCollection(_mappings
)))
3570 IOMemoryMap
* lookMapping
;
3571 while ((lookMapping
= (IOMemoryMap
*) iter
->getNextObject()))
3573 if ((result
= lookMapping
->copyCompatible(mapping
)))
3576 result
->setMemoryDescriptor(this, offset
);
3582 if (result
|| (options
& kIOMapReference
))
3584 if (result
!= mapping
)
3599 kr
= mapDesc
->doMap( 0, (IOVirtualAddress
*) &mapping
, options
, 0, 0 );
3600 if (kIOReturnSuccess
== kr
)
3603 mapDesc
->addMapping(result
);
3604 result
->setMemoryDescriptor(mapDesc
, offset
);
3622 void IOMemoryDescriptor::addMapping(
3623 IOMemoryMap
* mapping
)
3628 _mappings
= OSSet::withCapacity(1);
3630 _mappings
->setObject( mapping
);
3634 void IOMemoryDescriptor::removeMapping(
3635 IOMemoryMap
* mapping
)
3638 _mappings
->removeObject( mapping
);
3642 // obsolete initializers
3643 // - initWithOptions is the designated initializer
3645 IOMemoryDescriptor::initWithAddress(void * address
,
3647 IODirection direction
)
3653 IOMemoryDescriptor::initWithAddress(IOVirtualAddress address
,
3655 IODirection direction
,
3662 IOMemoryDescriptor::initWithPhysicalAddress(
3663 IOPhysicalAddress address
,
3665 IODirection direction
)
3671 IOMemoryDescriptor::initWithRanges(
3672 IOVirtualRange
* ranges
,
3674 IODirection direction
,
3682 IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange
* ranges
,
3684 IODirection direction
,
3690 void * IOMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
3691 IOByteCount
* lengthOfSegment
)
3695 #endif /* !__LP64__ */
3697 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3699 bool IOGeneralMemoryDescriptor::serialize(OSSerialize
* s
) const
3701 OSSymbol
const *keys
[2];
3702 OSObject
*values
[2];
3704 user_addr_t address
;
3707 unsigned int index
, nRanges
;
3710 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
3712 if (s
== NULL
) return false;
3713 if (s
->previouslySerialized(this)) return true;
3715 // Pretend we are an array.
3716 if (!s
->addXMLStartTag(this, "array")) return false;
3718 nRanges
= _rangesCount
;
3719 vcopy
= (SerData
*) IOMalloc(sizeof(SerData
) * nRanges
);
3720 if (vcopy
== 0) return false;
3722 keys
[0] = OSSymbol::withCString("address");
3723 keys
[1] = OSSymbol::withCString("length");
3726 values
[0] = values
[1] = 0;
3728 // From this point on we can go to bail.
3730 // Copy the volatile data so we don't have to allocate memory
3731 // while the lock is held.
3733 if (nRanges
== _rangesCount
) {
3734 Ranges vec
= _ranges
;
3735 for (index
= 0; index
< nRanges
; index
++) {
3736 user_addr_t addr
; IOByteCount len
;
3737 getAddrLenForInd(addr
, len
, type
, vec
, index
);
3738 vcopy
[index
].address
= addr
;
3739 vcopy
[index
].length
= len
;
3742 // The descriptor changed out from under us. Give up.
3749 for (index
= 0; index
< nRanges
; index
++)
3751 user_addr_t addr
= vcopy
[index
].address
;
3752 IOByteCount len
= (IOByteCount
) vcopy
[index
].length
;
3754 OSNumber::withNumber(addr
, sizeof(addr
) * 8);
3755 if (values
[0] == 0) {
3759 values
[1] = OSNumber::withNumber(len
, sizeof(len
) * 8);
3760 if (values
[1] == 0) {
3764 OSDictionary
*dict
= OSDictionary::withObjects((const OSObject
**)values
, (const OSSymbol
**)keys
, 2);
3769 values
[0]->release();
3770 values
[1]->release();
3771 values
[0] = values
[1] = 0;
3773 result
= dict
->serialize(s
);
3779 result
= s
->addXMLEndTag("array");
3783 values
[0]->release();
3785 values
[1]->release();
3791 IOFree(vcopy
, sizeof(SerData
) * nRanges
);
3795 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3797 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 0);
3799 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 1);
3800 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 2);
3801 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 3);
3802 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 4);
3803 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 5);
3804 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 6);
3805 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 7);
3806 #else /* !__LP64__ */
3807 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 1);
3808 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 2);
3809 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 3);
3810 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 4);
3811 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 5);
3812 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 6);
3813 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 7);
3814 #endif /* !__LP64__ */
3815 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 8);
3816 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 9);
3817 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 10);
3818 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 11);
3819 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 12);
3820 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 13);
3821 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 14);
3822 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 15);
3824 /* ex-inline function implementation */
3826 IOMemoryDescriptor::getPhysicalAddress()
3827 { return( getPhysicalSegment( 0, 0 )); }