2 * Copyright (c) 1998-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
34 // 45678901234567890123456789012345678901234567890123456789012345678901234567890
35 #include <sys/cdefs.h>
37 #include <IOKit/assert.h>
38 #include <IOKit/system.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOMemoryDescriptor.h>
41 #include <IOKit/IOMapper.h>
42 #include <IOKit/IOKitKeysPrivate.h>
44 #include <IOKit/IOKitDebug.h>
46 #include "IOKitKernelInternal.h"
48 #include <libkern/c++/OSContainers.h>
49 #include <libkern/c++/OSDictionary.h>
50 #include <libkern/c++/OSArray.h>
51 #include <libkern/c++/OSSymbol.h>
52 #include <libkern/c++/OSNumber.h>
58 #include <vm/vm_pageout.h>
59 #include <vm/vm_shared_memory_server.h>
60 #include <mach/memory_object_types.h>
61 #include <device/device_port.h>
64 #include <mach/vm_prot.h>
65 #include <vm/vm_fault.h>
66 struct phys_entry
*pmap_find_physentry(ppnum_t pa
);
69 extern ppnum_t
pmap_find_phys(pmap_t pmap
, addr64_t va
);
70 void ipc_port_release_send(ipc_port_t port
);
72 /* Copy between a physical page and a virtual address in the given vm_map */
73 kern_return_t
copypv(addr64_t source
, addr64_t sink
, unsigned int size
, int which
);
77 memory_object_t pager
,
82 device_pager_deallocate(
85 device_pager_populate_object(
86 memory_object_t pager
,
87 vm_object_offset_t offset
,
91 memory_object_iopl_request(
93 memory_object_offset_t offset
,
96 upl_page_info_array_t user_page_list
,
97 unsigned int *page_list_count
,
100 unsigned int IOTranslateCacheBits(struct phys_entry
*pp
);
104 #define kIOMaximumMappedIOByteCount (512*1024*1024)
106 static IOMapper
* gIOSystemMapper
;
107 static ppnum_t gIOMaximumMappedIOPageCount
= atop_32(kIOMaximumMappedIOByteCount
);
109 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
111 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor
, OSObject
)
113 #define super IOMemoryDescriptor
115 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor
, IOMemoryDescriptor
)
117 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
119 static IORecursiveLock
* gIOMemoryLock
;
121 #define LOCK IORecursiveLockLock( gIOMemoryLock)
122 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
123 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
125 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
127 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
129 class _IOMemoryMap
: public IOMemoryMap
131 OSDeclareDefaultStructors(_IOMemoryMap
)
133 IOMemoryDescriptor
* memory
;
134 IOMemoryMap
* superMap
;
137 IOVirtualAddress logical
;
140 IOOptionBits options
;
142 ipc_port_t redirEntry
;
143 IOMemoryDescriptor
* owner
;
146 virtual void taggedRelease(const void *tag
= 0) const;
151 // IOMemoryMap methods
152 virtual IOVirtualAddress
getVirtualAddress();
153 virtual IOByteCount
getLength();
154 virtual task_t
getAddressTask();
155 virtual IOMemoryDescriptor
* getMemoryDescriptor();
156 virtual IOOptionBits
getMapOptions();
158 virtual IOReturn
unmap();
159 virtual void taskDied();
161 virtual IOReturn
redirect(IOMemoryDescriptor
* newBackingMemory
,
162 IOOptionBits options
,
163 IOByteCount offset
= 0);
165 virtual IOPhysicalAddress
getPhysicalSegment(IOByteCount offset
,
166 IOByteCount
* length
);
168 // for IOMemoryDescriptor use
169 _IOMemoryMap
* copyCompatible(
170 IOMemoryDescriptor
* owner
,
172 IOVirtualAddress toAddress
,
173 IOOptionBits options
,
175 IOByteCount length
);
178 IOMemoryDescriptor
* memory
,
179 IOMemoryMap
* superMap
,
181 IOByteCount length
);
183 bool initWithDescriptor(
184 IOMemoryDescriptor
* memory
,
186 IOVirtualAddress toAddress
,
187 IOOptionBits options
,
189 IOByteCount length
);
192 task_t intoTask
, bool redirect
);
195 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
197 // Some data structures and accessor macros used by the initWithOptions
200 enum ioPLBlockFlags
{
201 kIOPLOnDevice
= 0x00000001,
202 kIOPLExternUPL
= 0x00000002,
205 struct typePersMDData
207 const IOGeneralMemoryDescriptor
*fMD
;
208 ipc_port_t fMemEntry
;
213 vm_address_t fIOMDOffset
; // The offset of this iopl in descriptor
214 vm_offset_t fPageInfo
; // Pointer to page list or index into it
215 ppnum_t fMappedBase
; // Page number of first page in this iopl
216 unsigned int fPageOffset
; // Offset within first page of iopl
217 unsigned int fFlags
; // Flags
222 unsigned int fPageCnt
;
223 upl_page_info_t fPageList
[];
227 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
228 #define getIOPLList(d) ((ioPLBlock *) &(d->fPageList[d->fPageCnt]))
229 #define getNumIOPL(osd, d) \
230 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
231 #define getPageList(d) (&(d->fPageList[0]))
232 #define computeDataSize(p, u) \
233 (sizeof(ioGMDData) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
236 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
238 #define next_page(a) ( trunc_page_32(a) + PAGE_SIZE )
243 kern_return_t
device_data_action(
245 ipc_port_t device_pager
,
246 vm_prot_t protection
,
247 vm_object_offset_t offset
,
250 struct ExpansionData
{
252 unsigned int pagerContig
:1;
253 unsigned int unused
:31;
254 IOMemoryDescriptor
* memory
;
257 ExpansionData
* ref
= (ExpansionData
*) device_handle
;
258 IOMemoryDescriptor
* memDesc
;
261 memDesc
= ref
->memory
;
265 kr
= memDesc
->handleFault( device_pager
, 0, 0,
266 offset
, size
, kIOMapDefaultCache
/*?*/);
276 kern_return_t
device_close(
279 struct ExpansionData
{
281 unsigned int pagerContig
:1;
282 unsigned int unused
:31;
283 IOMemoryDescriptor
* memory
;
285 ExpansionData
* ref
= (ExpansionData
*) device_handle
;
287 IODelete( ref
, ExpansionData
, 1 );
289 return( kIOReturnSuccess
);
293 // Note this inline function uses C++ reference arguments to return values
294 // This means that pointers are not passed and NULLs don't have to be
295 // checked for as a NULL reference is illegal.
297 getAddrLenForInd(user_addr_t
&addr
, IOPhysicalLength
&len
, // Output variables
298 UInt32 type
, IOGeneralMemoryDescriptor::Ranges r
, UInt32 ind
)
300 assert(kIOMemoryTypePhysical
== type
|| kIOMemoryTypeUIO
== type
301 || kIOMemoryTypeVirtual
== type
);
302 if (kIOMemoryTypeUIO
== type
) {
304 uio_getiov((uio_t
) r
.uio
, ind
, &addr
, &us
); len
= us
;
307 IOVirtualRange cur
= r
.v
[ind
];
313 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
318 * Create a new IOMemoryDescriptor. The buffer is a virtual address
319 * relative to the specified task. If no task is supplied, the kernel
323 IOMemoryDescriptor::withAddress(void * address
,
325 IODirection direction
)
327 return IOMemoryDescriptor::
328 withAddress((vm_address_t
) address
, length
, direction
, kernel_task
);
332 IOMemoryDescriptor::withAddress(vm_address_t address
,
334 IODirection direction
,
337 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
340 if (that
->initWithAddress(address
, length
, direction
, task
))
349 IOMemoryDescriptor::withPhysicalAddress(
350 IOPhysicalAddress address
,
352 IODirection direction
)
354 IOGeneralMemoryDescriptor
*self
= new IOGeneralMemoryDescriptor
;
356 && !self
->initWithPhysicalAddress(address
, length
, direction
)) {
365 IOMemoryDescriptor::withRanges( IOVirtualRange
* ranges
,
367 IODirection direction
,
371 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
374 if (that
->initWithRanges(ranges
, withCount
, direction
, task
, asReference
))
386 * Create a new IOMemoryDescriptor. The buffer is made up of several
387 * virtual address ranges, from a given task.
389 * Passing the ranges as a reference will avoid an extra allocation.
392 IOMemoryDescriptor::withOptions(void * buffers
,
399 IOGeneralMemoryDescriptor
*self
= new IOGeneralMemoryDescriptor
;
402 && !self
->initWithOptions(buffers
, count
, offset
, task
, opts
, mapper
))
411 // Can't leave abstract but this should never be used directly,
412 bool IOMemoryDescriptor::initWithOptions(void * buffers
,
416 IOOptionBits options
,
419 // @@@ gvdl: Should I panic?
420 panic("IOMD::initWithOptions called\n");
425 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange
* ranges
,
427 IODirection direction
,
430 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
433 if (that
->initWithPhysicalRanges(ranges
, withCount
, direction
, asReference
))
442 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor
* of
,
445 IODirection direction
)
447 IOSubMemoryDescriptor
*self
= new IOSubMemoryDescriptor
;
449 if (self
&& !self
->initSubRange(of
, offset
, length
, direction
)) {
456 IOMemoryDescriptor
* IOMemoryDescriptor::
457 withPersistentMemoryDescriptor(IOMemoryDescriptor
*originalMD
)
459 IOGeneralMemoryDescriptor
*origGenMD
=
460 OSDynamicCast(IOGeneralMemoryDescriptor
, originalMD
);
463 return IOGeneralMemoryDescriptor::
464 withPersistentMemoryDescriptor(origGenMD
);
469 IOMemoryDescriptor
* IOGeneralMemoryDescriptor::
470 withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor
*originalMD
)
472 ipc_port_t sharedMem
= (ipc_port_t
) originalMD
->createNamedEntry();
477 if (sharedMem
== originalMD
->_memEntry
) {
478 originalMD
->retain(); // Add a new reference to ourselves
479 ipc_port_release_send(sharedMem
); // Remove extra send right
483 IOGeneralMemoryDescriptor
* self
= new IOGeneralMemoryDescriptor
;
484 typePersMDData initData
= { originalMD
, sharedMem
};
487 && !self
->initWithOptions(&initData
, 1, 0, 0, kIOMemoryTypePersistentMD
, 0)) {
494 void *IOGeneralMemoryDescriptor::createNamedEntry()
497 ipc_port_t sharedMem
;
499 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
501 user_addr_t range0Addr
;
502 IOByteCount range0Len
;
503 getAddrLenForInd(range0Addr
, range0Len
, type
, _ranges
, 0);
504 range0Addr
= trunc_page_64(range0Addr
);
506 vm_size_t size
= ptoa_32(_pages
);
507 vm_address_t kernelPage
= (vm_address_t
) range0Addr
;
509 vm_map_t theMap
= ((_task
== kernel_task
)
510 && (kIOMemoryBufferPageable
& _flags
))
511 ? IOPageableMapForAddress(kernelPage
)
512 : get_task_map(_task
);
514 memory_object_size_t actualSize
= size
;
515 vm_prot_t prot
= VM_PROT_READ
| VM_PROT_WRITE
;
517 prot
|= MAP_MEM_NAMED_REUSE
;
519 error
= mach_make_memory_entry_64(theMap
,
520 &actualSize
, range0Addr
, prot
, &sharedMem
, (ipc_port_t
) _memEntry
);
522 if (KERN_SUCCESS
== error
) {
523 if (actualSize
== size
) {
527 IOLog("IOGMD::mach_make_memory_entry_64 (%08llx) size (%08lx:%08x)\n",
528 (UInt64
)range0Addr
, (UInt32
)actualSize
, size
);
530 ipc_port_release_send( sharedMem
);
534 return MACH_PORT_NULL
;
540 * Initialize an IOMemoryDescriptor. The buffer is a virtual address
541 * relative to the specified task. If no task is supplied, the kernel
544 * An IOMemoryDescriptor can be re-used by calling initWithAddress or
545 * initWithRanges again on an existing instance -- note this behavior
546 * is not commonly supported in other I/O Kit classes, although it is
550 IOGeneralMemoryDescriptor::initWithAddress(void * address
,
551 IOByteCount withLength
,
552 IODirection withDirection
)
554 _singleRange
.v
.address
= (vm_address_t
) address
;
555 _singleRange
.v
.length
= withLength
;
557 return initWithRanges(&_singleRange
.v
, 1, withDirection
, kernel_task
, true);
561 IOGeneralMemoryDescriptor::initWithAddress(vm_address_t address
,
562 IOByteCount withLength
,
563 IODirection withDirection
,
566 _singleRange
.v
.address
= address
;
567 _singleRange
.v
.length
= withLength
;
569 return initWithRanges(&_singleRange
.v
, 1, withDirection
, withTask
, true);
573 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
574 IOPhysicalAddress address
,
575 IOByteCount withLength
,
576 IODirection withDirection
)
578 _singleRange
.p
.address
= address
;
579 _singleRange
.p
.length
= withLength
;
581 return initWithPhysicalRanges( &_singleRange
.p
, 1, withDirection
, true);
585 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
586 IOPhysicalRange
* ranges
,
588 IODirection direction
,
591 IOOptionBits mdOpts
= direction
| kIOMemoryTypePhysical
;
594 mdOpts
|= kIOMemoryAsReference
;
596 return initWithOptions(ranges
, count
, 0, 0, mdOpts
, /* mapper */ 0);
600 IOGeneralMemoryDescriptor::initWithRanges(
601 IOVirtualRange
* ranges
,
603 IODirection direction
,
607 IOOptionBits mdOpts
= direction
;
610 mdOpts
|= kIOMemoryAsReference
;
613 mdOpts
|= kIOMemoryTypeVirtual
;
615 // Auto-prepare if this is a kernel memory descriptor as very few
616 // clients bother to prepare() kernel memory.
617 // But it was not enforced so what are you going to do?
618 if (task
== kernel_task
)
619 mdOpts
|= kIOMemoryAutoPrepare
;
622 mdOpts
|= kIOMemoryTypePhysical
;
624 return initWithOptions(ranges
, count
, 0, task
, mdOpts
, /* mapper */ 0);
630 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
631 * from a given task, several physical ranges, an UPL from the ubc
632 * system or a uio (may be 64bit) from the BSD subsystem.
634 * Passing the ranges as a reference will avoid an extra allocation.
636 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
637 * existing instance -- note this behavior is not commonly supported in other
638 * I/O Kit classes, although it is supported here.
642 IOGeneralMemoryDescriptor::initWithOptions(void * buffers
,
646 IOOptionBits options
,
649 IOOptionBits type
= options
& kIOMemoryTypeMask
;
651 // Grab the original MD's configuation data to initialse the
652 // arguments to this function.
653 if (kIOMemoryTypePersistentMD
== type
) {
655 typePersMDData
*initData
= (typePersMDData
*) buffers
;
656 const IOGeneralMemoryDescriptor
*orig
= initData
->fMD
;
657 ioGMDData
*dataP
= getDataP(orig
->_memoryEntries
);
659 // Only accept persistent memory descriptors with valid dataP data.
660 assert(orig
->_rangesCount
== 1);
661 if ( !(orig
->_flags
& kIOMemoryPersistent
) || !dataP
)
664 _memEntry
= initData
->fMemEntry
; // Grab the new named entry
665 options
= orig
->_flags
| kIOMemoryAsReference
;
666 _singleRange
= orig
->_singleRange
; // Initialise our range
667 buffers
= &_singleRange
;
670 // Now grab the original task and whatever mapper was previously used
672 mapper
= dataP
->fMapper
;
674 // We are ready to go through the original initialisation now
678 case kIOMemoryTypeUIO
:
679 case kIOMemoryTypeVirtual
:
686 case kIOMemoryTypePhysical
: // Neither Physical nor UPL should have a task
687 mapper
= kIOMapperNone
;
689 case kIOMemoryTypeUPL
:
693 return false; /* bad argument */
700 * We can check the _initialized instance variable before having ever set
701 * it to an initial value because I/O Kit guarantees that all our instance
702 * variables are zeroed on an object's allocation.
707 * An existing memory descriptor is being retargeted to point to
708 * somewhere else. Clean up our present state.
715 if (_ranges
.v
&& _rangesIsAllocated
)
716 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
718 { ipc_port_release_send((ipc_port_t
) _memEntry
); _memEntry
= 0; }
726 // Grab the appropriate mapper
727 if (mapper
== kIOMapperNone
)
728 mapper
= 0; // No Mapper
730 IOMapper::checkForSystemMapper();
731 gIOSystemMapper
= mapper
= IOMapper::gSystem
;
734 // Remove the dynamic internal use flags from the initial setting
735 options
&= ~(kIOMemoryPreparedReadOnly
);
739 // DEPRECATED variable initialisation
740 _direction
= (IODirection
) (_flags
& kIOMemoryDirectionMask
);
743 _cachedPhysicalAddress
= 0;
744 _cachedVirtualAddress
= 0;
746 if (kIOMemoryTypeUPL
== type
) {
749 unsigned int dataSize
= computeDataSize(/* pages */ 0, /* upls */ 1);
751 if (!_memoryEntries
) {
752 _memoryEntries
= OSData::withCapacity(dataSize
);
756 else if (!_memoryEntries
->initWithCapacity(dataSize
))
759 _memoryEntries
->appendBytes(0, sizeof(ioGMDData
));
760 dataP
= getDataP(_memoryEntries
);
761 dataP
->fMapper
= mapper
;
764 _wireCount
++; // UPLs start out life wired
767 _pages
+= atop_32(offset
+ count
+ PAGE_MASK
) - atop_32(offset
);
770 upl_page_info_t
*pageList
= UPL_GET_INTERNAL_PAGE_LIST((upl_t
) buffers
);
772 iopl
.fIOPL
= (upl_t
) buffers
;
773 // Set the flag kIOPLOnDevice convieniently equal to 1
774 iopl
.fFlags
= pageList
->device
| kIOPLExternUPL
;
775 iopl
.fIOMDOffset
= 0;
776 if (!pageList
->device
) {
777 // Pre-compute the offset into the UPL's page list
778 pageList
= &pageList
[atop_32(offset
)];
781 iopl
.fMappedBase
= mapper
->iovmAlloc(_pages
);
782 mapper
->iovmInsert(iopl
.fMappedBase
, 0, pageList
, _pages
);
785 iopl
.fMappedBase
= 0;
788 iopl
.fMappedBase
= 0;
789 iopl
.fPageInfo
= (vm_address_t
) pageList
;
790 iopl
.fPageOffset
= offset
;
792 _memoryEntries
->appendBytes(&iopl
, sizeof(iopl
));
795 // kIOMemoryTypeVirtual | kIOMemoryTypeUIO | kIOMemoryTypePhysical
797 // Initialize the memory descriptor
798 if (options
& kIOMemoryAsReference
) {
799 _rangesIsAllocated
= false;
801 // Hack assignment to get the buffer arg into _ranges.
802 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
804 // This also initialises the uio & physical ranges.
805 _ranges
.v
= (IOVirtualRange
*) buffers
;
808 assert(kIOMemoryTypeUIO
!= type
);
810 _rangesIsAllocated
= true;
811 _ranges
.v
= IONew(IOVirtualRange
, count
);
814 bcopy(buffers
, _ranges
.v
, count
* sizeof(IOVirtualRange
));
817 // Find starting address within the vector of ranges
818 Ranges vec
= _ranges
;
821 for (unsigned ind
= 0; ind
< count
; ind
++) {
825 // addr & len are returned by this function
826 getAddrLenForInd(addr
, len
, type
, vec
, ind
);
827 pages
+= (atop_64(addr
+ len
+ PAGE_MASK
) - atop_64(addr
));
829 assert(len
> length
); // Check for 32 bit wrap around
834 _rangesCount
= count
;
836 // Auto-prepare memory at creation time.
837 // Implied completion when descriptor is free-ed
838 if (kIOMemoryTypePhysical
== type
)
839 _wireCount
++; // Physical MDs are, by definition, wired
840 else { /* kIOMemoryTypeVirtual | kIOMemoryTypeUIO */
842 unsigned dataSize
= computeDataSize(_pages
, /* upls */ count
* 2);
844 if (!_memoryEntries
) {
845 _memoryEntries
= OSData::withCapacity(dataSize
);
849 else if (!_memoryEntries
->initWithCapacity(dataSize
))
852 _memoryEntries
->appendBytes(0, sizeof(ioGMDData
));
853 dataP
= getDataP(_memoryEntries
);
854 dataP
->fMapper
= mapper
;
855 dataP
->fPageCnt
= _pages
;
857 if ( (kIOMemoryPersistent
& _flags
) && !_memEntry
)
858 _memEntry
= createNamedEntry();
860 if ((_flags
& kIOMemoryAutoPrepare
)
861 && prepare() != kIOReturnSuccess
)
874 void IOGeneralMemoryDescriptor::free()
878 reserved
->memory
= 0;
884 _memoryEntries
->release();
888 if (_ranges
.v
&& _rangesIsAllocated
)
889 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
891 if (reserved
&& reserved
->devicePager
)
892 device_pager_deallocate( (memory_object_t
) reserved
->devicePager
);
894 // memEntry holds a ref on the device pager which owns reserved
895 // (ExpansionData) so no reserved access after this point
897 ipc_port_release_send( (ipc_port_t
) _memEntry
);
902 /* DEPRECATED */ void IOGeneralMemoryDescriptor::unmapFromKernel()
904 panic("IOGMD::unmapFromKernel deprecated");
907 /* DEPRECATED */ void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex
)
909 panic("IOGMD::mapIntoKernel deprecated");
915 * Get the direction of the transfer.
917 IODirection
IOMemoryDescriptor::getDirection() const
925 * Get the length of the transfer (over all ranges).
927 IOByteCount
IOMemoryDescriptor::getLength() const
932 void IOMemoryDescriptor::setTag( IOOptionBits tag
)
937 IOOptionBits
IOMemoryDescriptor::getTag( void )
942 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
943 IOPhysicalAddress
IOMemoryDescriptor::getSourceSegment( IOByteCount offset
,
944 IOByteCount
* length
)
946 IOPhysicalAddress physAddr
= 0;
948 if( prepare() == kIOReturnSuccess
) {
949 physAddr
= getPhysicalSegment( offset
, length
);
956 IOByteCount
IOMemoryDescriptor::readBytes
957 (IOByteCount offset
, void *bytes
, IOByteCount length
)
959 addr64_t dstAddr
= (addr64_t
) (UInt32
) bytes
;
960 IOByteCount remaining
;
962 // Assert that this entire I/O is withing the available range
963 assert(offset
< _length
);
964 assert(offset
+ length
<= _length
);
965 if (offset
>= _length
) {
966 IOLog("IOGMD(%p): rB = o%lx, l%lx\n", this, offset
, length
); // @@@ gvdl
970 remaining
= length
= min(length
, _length
- offset
);
971 while (remaining
) { // (process another target segment?)
975 srcAddr64
= getPhysicalSegment64(offset
, &srcLen
);
979 // Clip segment length to remaining
980 if (srcLen
> remaining
)
983 copypv(srcAddr64
, dstAddr
, srcLen
,
984 cppvPsrc
| cppvNoRefSrc
| cppvFsnk
| cppvKmap
);
993 return length
- remaining
;
996 IOByteCount
IOMemoryDescriptor::writeBytes
997 (IOByteCount offset
, const void *bytes
, IOByteCount length
)
999 addr64_t srcAddr
= (addr64_t
) (UInt32
) bytes
;
1000 IOByteCount remaining
;
1002 // Assert that this entire I/O is withing the available range
1003 assert(offset
< _length
);
1004 assert(offset
+ length
<= _length
);
1006 assert( !(kIOMemoryPreparedReadOnly
& _flags
) );
1008 if ( (kIOMemoryPreparedReadOnly
& _flags
) || offset
>= _length
) {
1009 IOLog("IOGMD(%p): wB = o%lx, l%lx\n", this, offset
, length
); // @@@ gvdl
1013 remaining
= length
= min(length
, _length
- offset
);
1014 while (remaining
) { // (process another target segment?)
1018 dstAddr64
= getPhysicalSegment64(offset
, &dstLen
);
1022 // Clip segment length to remaining
1023 if (dstLen
> remaining
)
1026 copypv(srcAddr
, (addr64_t
) dstAddr64
, dstLen
,
1027 cppvPsnk
| cppvFsnk
| cppvNoRefSrc
| cppvNoModSnk
| cppvKmap
);
1031 remaining
-= dstLen
;
1036 return length
- remaining
;
1039 // osfmk/device/iokit_rpc.c
1040 extern "C" unsigned int IODefaultCacheBits(addr64_t pa
);
1042 /* DEPRECATED */ void IOGeneralMemoryDescriptor::setPosition(IOByteCount position
)
1044 panic("IOGMD::setPosition deprecated");
1047 IOPhysicalAddress
IOGeneralMemoryDescriptor::getPhysicalSegment
1048 (IOByteCount offset
, IOByteCount
*lengthOfSegment
)
1050 IOPhysicalAddress address
= 0;
1051 IOPhysicalLength length
= 0;
1053 // assert(offset <= _length);
1054 if (offset
< _length
) // (within bounds?)
1056 if ( (_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
) {
1059 // Physical address based memory descriptor
1061 // Find offset within descriptor and make it relative
1062 // to the current _range.
1063 for (ind
= 0 ; offset
>= _ranges
.p
[ind
].length
; ind
++ )
1064 offset
-= _ranges
.p
[ind
].length
;
1066 IOPhysicalRange cur
= _ranges
.p
[ind
];
1067 address
= cur
.address
+ offset
;
1068 length
= cur
.length
- offset
;
1070 // see how far we can coalesce ranges
1071 for (++ind
; ind
< _rangesCount
; ind
++) {
1072 cur
= _ranges
.p
[ind
];
1074 if (address
+ length
!= cur
.address
)
1077 length
+= cur
.length
;
1080 // @@@ gvdl: should be assert(address);
1081 // but can't as NVidia GeForce creates a bogus physical mem
1083 || /* nvidia */ (!_ranges
.p
[0].address
&& 1 == _rangesCount
));
1087 // We need wiring & we are wired.
1092 panic("IOGMD: not wired for getPhysicalSegment()");
1096 assert(_memoryEntries
);
1098 ioGMDData
* dataP
= getDataP(_memoryEntries
);
1099 const ioPLBlock
*ioplList
= getIOPLList(dataP
);
1100 UInt ind
, numIOPLs
= getNumIOPL(_memoryEntries
, dataP
);
1101 upl_page_info_t
*pageList
= getPageList(dataP
);
1103 assert(numIOPLs
> 0);
1105 // Scan through iopl info blocks looking for block containing offset
1106 for (ind
= 1; ind
< numIOPLs
; ind
++) {
1107 if (offset
< ioplList
[ind
].fIOMDOffset
)
1111 // Go back to actual range as search goes past it
1112 ioPLBlock ioplInfo
= ioplList
[ind
- 1];
1115 length
= ioplList
[ind
].fIOMDOffset
;
1118 length
-= offset
; // Remainder within iopl
1120 // Subtract offset till this iopl in total list
1121 offset
-= ioplInfo
.fIOMDOffset
;
1123 // This is a mapped IOPL so we just need to compute an offset
1124 // relative to the mapped base.
1125 if (ioplInfo
.fMappedBase
) {
1126 offset
+= (ioplInfo
.fPageOffset
& PAGE_MASK
);
1127 address
= ptoa_32(ioplInfo
.fMappedBase
) + offset
;
1131 // Currently the offset is rebased into the current iopl.
1132 // Now add the iopl 1st page offset.
1133 offset
+= ioplInfo
.fPageOffset
;
1135 // For external UPLs the fPageInfo field points directly to
1136 // the upl's upl_page_info_t array.
1137 if (ioplInfo
.fFlags
& kIOPLExternUPL
)
1138 pageList
= (upl_page_info_t
*) ioplInfo
.fPageInfo
;
1140 pageList
= &pageList
[ioplInfo
.fPageInfo
];
1142 // Check for direct device non-paged memory
1143 if ( ioplInfo
.fFlags
& kIOPLOnDevice
) {
1144 address
= ptoa_32(pageList
->phys_addr
) + offset
;
1148 // Now we need compute the index into the pageList
1149 ind
= atop_32(offset
);
1150 offset
&= PAGE_MASK
;
1152 IOPhysicalAddress pageAddr
= pageList
[ind
].phys_addr
;
1153 address
= ptoa_32(pageAddr
) + offset
;
1155 // Check for the remaining data in this upl being longer than the
1156 // remainder on the current page. This should be checked for
1158 if (length
> PAGE_SIZE
- offset
) {
1159 // See if the next page is contiguous. Stop looking when we hit
1160 // the end of this upl, which is indicated by the
1161 // contigLength >= length.
1162 IOByteCount contigLength
= PAGE_SIZE
- offset
;
1164 // Look for contiguous segment
1165 while (contigLength
< length
1166 && ++pageAddr
== pageList
[++ind
].phys_addr
) {
1167 contigLength
+= PAGE_SIZE
;
1169 if (length
> contigLength
)
1170 length
= contigLength
;
1182 if (lengthOfSegment
)
1183 *lengthOfSegment
= length
;
1188 addr64_t
IOMemoryDescriptor::getPhysicalSegment64
1189 (IOByteCount offset
, IOByteCount
*lengthOfSegment
)
1191 IOPhysicalAddress phys32
;
1195 phys32
= getPhysicalSegment(offset
, lengthOfSegment
);
1199 if (gIOSystemMapper
)
1201 IOByteCount origLen
;
1203 phys64
= gIOSystemMapper
->mapAddr(phys32
);
1204 origLen
= *lengthOfSegment
;
1205 length
= page_size
- (phys64
& (page_size
- 1));
1206 while ((length
< origLen
)
1207 && ((phys64
+ length
) == gIOSystemMapper
->mapAddr(phys32
+ length
)))
1208 length
+= page_size
;
1209 if (length
> origLen
)
1212 *lengthOfSegment
= length
;
1215 phys64
= (addr64_t
) phys32
;
1220 IOPhysicalAddress
IOGeneralMemoryDescriptor::
1221 getSourceSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
1223 IOPhysicalAddress address
= 0;
1224 IOPhysicalLength length
= 0;
1225 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
1227 assert(offset
<= _length
);
1229 if ( type
== kIOMemoryTypeUPL
)
1230 return super::getSourceSegment( offset
, lengthOfSegment
);
1231 else if ( offset
< _length
) // (within bounds?)
1233 unsigned rangesIndex
= 0;
1234 Ranges vec
= _ranges
;
1237 // Find starting address within the vector of ranges
1239 getAddrLenForInd(addr
, length
, type
, vec
, rangesIndex
);
1240 if (offset
< length
)
1242 offset
-= length
; // (make offset relative)
1246 // Now that we have the starting range,
1247 // lets find the last contiguous range
1251 for ( ++rangesIndex
; rangesIndex
< _rangesCount
; rangesIndex
++ ) {
1252 user_addr_t newAddr
;
1253 IOPhysicalLength newLen
;
1255 getAddrLenForInd(newAddr
, newLen
, type
, vec
, rangesIndex
);
1256 if (addr
+ length
!= newAddr
)
1261 address
= (IOPhysicalAddress
) addr
; // Truncate address to 32bit
1266 if ( lengthOfSegment
) *lengthOfSegment
= length
;
1271 /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
1272 /* DEPRECATED */ void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
1273 /* DEPRECATED */ IOByteCount
* lengthOfSegment
)
1275 if (_task
== kernel_task
)
1276 return (void *) getSourceSegment(offset
, lengthOfSegment
);
1278 panic("IOGMD::getVirtualSegment deprecated");
1282 /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
1286 IOReturn
IOMemoryDescriptor::setPurgeable( IOOptionBits newState
,
1287 IOOptionBits
* oldState
)
1289 IOReturn err
= kIOReturnSuccess
;
1290 vm_purgable_t control
;
1297 err
= kIOReturnNotReady
;
1301 control
= VM_PURGABLE_SET_STATE
;
1304 case kIOMemoryPurgeableKeepCurrent
:
1305 control
= VM_PURGABLE_GET_STATE
;
1308 case kIOMemoryPurgeableNonVolatile
:
1309 state
= VM_PURGABLE_NONVOLATILE
;
1311 case kIOMemoryPurgeableVolatile
:
1312 state
= VM_PURGABLE_VOLATILE
;
1314 case kIOMemoryPurgeableEmpty
:
1315 state
= VM_PURGABLE_EMPTY
;
1318 err
= kIOReturnBadArgument
;
1322 if (kIOReturnSuccess
!= err
)
1325 err
= mach_memory_entry_purgable_control((ipc_port_t
) _memEntry
, control
, &state
);
1329 if (kIOReturnSuccess
== err
)
1333 case VM_PURGABLE_NONVOLATILE
:
1334 state
= kIOMemoryPurgeableNonVolatile
;
1336 case VM_PURGABLE_VOLATILE
:
1337 state
= kIOMemoryPurgeableVolatile
;
1339 case VM_PURGABLE_EMPTY
:
1340 state
= kIOMemoryPurgeableEmpty
;
1343 state
= kIOMemoryPurgeableNonVolatile
;
1344 err
= kIOReturnNotReady
;
1356 extern "C" void dcache_incoherent_io_flush64(addr64_t pa
, unsigned int count
);
1357 extern "C" void dcache_incoherent_io_store64(addr64_t pa
, unsigned int count
);
1359 IOReturn
IOMemoryDescriptor::performOperation( IOOptionBits options
,
1360 IOByteCount offset
, IOByteCount length
)
1362 IOByteCount remaining
;
1363 void (*func
)(addr64_t pa
, unsigned int count
) = 0;
1367 case kIOMemoryIncoherentIOFlush
:
1368 func
= &dcache_incoherent_io_flush64
;
1370 case kIOMemoryIncoherentIOStore
:
1371 func
= &dcache_incoherent_io_store64
;
1376 return (kIOReturnUnsupported
);
1378 remaining
= length
= min(length
, getLength() - offset
);
1380 // (process another target segment?)
1385 dstAddr64
= getPhysicalSegment64(offset
, &dstLen
);
1389 // Clip segment length to remaining
1390 if (dstLen
> remaining
)
1393 (*func
)(dstAddr64
, dstLen
);
1396 remaining
-= dstLen
;
1399 return (remaining
? kIOReturnUnderrun
: kIOReturnSuccess
);
1403 extern vm_offset_t static_memory_end
;
1404 #define io_kernel_static_end static_memory_end
1406 extern vm_offset_t first_avail
;
1407 #define io_kernel_static_end first_avail
1410 static kern_return_t
1411 io_get_kernel_static_upl(
1413 vm_address_t offset
,
1414 vm_size_t
*upl_size
,
1416 upl_page_info_array_t page_list
,
1417 unsigned int *count
)
1419 unsigned int pageCount
, page
;
1422 pageCount
= atop_32(*upl_size
);
1423 if (pageCount
> *count
)
1428 for (page
= 0; page
< pageCount
; page
++)
1430 phys
= pmap_find_phys(kernel_pmap
, ((addr64_t
)offset
) + ptoa_64(page
));
1433 page_list
[page
].phys_addr
= phys
;
1434 page_list
[page
].pageout
= 0;
1435 page_list
[page
].absent
= 0;
1436 page_list
[page
].dirty
= 0;
1437 page_list
[page
].precious
= 0;
1438 page_list
[page
].device
= 0;
1441 return ((page
>= pageCount
) ? kIOReturnSuccess
: kIOReturnVMError
);
1444 IOReturn
IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection
)
1446 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
1447 IOReturn error
= kIOReturnNoMemory
;
1449 ppnum_t mapBase
= 0;
1451 ipc_port_t sharedMem
= (ipc_port_t
) _memEntry
;
1453 assert(!_wireCount
);
1454 assert(kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeUIO
== type
);
1456 if (_pages
>= gIOMaximumMappedIOPageCount
)
1457 return kIOReturnNoResources
;
1459 dataP
= getDataP(_memoryEntries
);
1460 mapper
= dataP
->fMapper
;
1461 if (mapper
&& _pages
)
1462 mapBase
= mapper
->iovmAlloc(_pages
);
1464 // Note that appendBytes(NULL) zeros the data up to the
1466 _memoryEntries
->appendBytes(0, dataP
->fPageCnt
* sizeof(upl_page_info_t
));
1467 dataP
= 0; // May no longer be valid so lets not get tempted.
1469 if (forDirection
== kIODirectionNone
)
1470 forDirection
= _direction
;
1472 int uplFlags
; // This Mem Desc's default flags for upl creation
1473 switch (forDirection
)
1475 case kIODirectionOut
:
1476 // Pages do not need to be marked as dirty on commit
1477 uplFlags
= UPL_COPYOUT_FROM
;
1478 _flags
|= kIOMemoryPreparedReadOnly
;
1481 case kIODirectionIn
:
1483 uplFlags
= 0; // i.e. ~UPL_COPYOUT_FROM
1486 uplFlags
|= UPL_SET_IO_WIRE
| UPL_SET_LITE
;
1488 // Find the appropriate vm_map for the given task
1490 if (_task
== kernel_task
&& (kIOMemoryBufferPageable
& _flags
))
1493 { curMap
= get_task_map(_task
); }
1495 // Iterate over the vector of virtual ranges
1496 Ranges vec
= _ranges
;
1497 unsigned int pageIndex
= 0;
1498 IOByteCount mdOffset
= 0;
1499 for (UInt range
= 0; range
< _rangesCount
; range
++) {
1501 user_addr_t startPage
;
1502 IOByteCount numBytes
;
1504 // Get the startPage address and length of vec[range]
1505 getAddrLenForInd(startPage
, numBytes
, type
, vec
, range
);
1506 iopl
.fPageOffset
= (short) startPage
& PAGE_MASK
;
1507 numBytes
+= iopl
.fPageOffset
;
1508 startPage
= trunc_page_64(startPage
);
1511 iopl
.fMappedBase
= mapBase
+ pageIndex
;
1513 iopl
.fMappedBase
= 0;
1515 // Iterate over the current range, creating UPLs
1517 dataP
= getDataP(_memoryEntries
);
1518 vm_address_t kernelStart
= (vm_address_t
) startPage
;
1522 else if (!sharedMem
) {
1523 assert(_task
== kernel_task
);
1524 theMap
= IOPageableMapForAddress(kernelStart
);
1529 upl_page_info_array_t pageInfo
= getPageList(dataP
);
1530 int ioplFlags
= uplFlags
;
1531 upl_page_list_ptr_t baseInfo
= &pageInfo
[pageIndex
];
1533 vm_size_t ioplSize
= round_page_32(numBytes
);
1534 unsigned int numPageInfo
= atop_32(ioplSize
);
1536 if (theMap
== kernel_map
&& kernelStart
< io_kernel_static_end
) {
1537 error
= io_get_kernel_static_upl(theMap
,
1544 else if (sharedMem
) {
1545 error
= memory_object_iopl_request(sharedMem
,
1555 error
= vm_map_create_upl(theMap
,
1565 if (error
!= KERN_SUCCESS
)
1568 error
= kIOReturnNoMemory
;
1570 if (baseInfo
->device
) {
1572 iopl
.fFlags
= kIOPLOnDevice
;
1573 // Don't translate device memory at all
1574 if (mapper
&& mapBase
) {
1575 mapper
->iovmFree(mapBase
, _pages
);
1577 iopl
.fMappedBase
= 0;
1583 mapper
->iovmInsert(mapBase
, pageIndex
,
1584 baseInfo
, numPageInfo
);
1587 iopl
.fIOMDOffset
= mdOffset
;
1588 iopl
.fPageInfo
= pageIndex
;
1590 if ((_flags
& kIOMemoryAutoPrepare
) && iopl
.fIOPL
)
1592 upl_commit(iopl
.fIOPL
, 0, 0);
1593 upl_deallocate(iopl
.fIOPL
);
1597 if (!_memoryEntries
->appendBytes(&iopl
, sizeof(iopl
))) {
1598 // Clean up partial created and unsaved iopl
1600 upl_abort(iopl
.fIOPL
, 0);
1601 upl_deallocate(iopl
.fIOPL
);
1606 // Check for a multiple iopl's in one virtual range
1607 pageIndex
+= numPageInfo
;
1608 mdOffset
-= iopl
.fPageOffset
;
1609 if (ioplSize
< numBytes
) {
1610 numBytes
-= ioplSize
;
1611 startPage
+= ioplSize
;
1612 mdOffset
+= ioplSize
;
1613 iopl
.fPageOffset
= 0;
1615 iopl
.fMappedBase
= mapBase
+ pageIndex
;
1618 mdOffset
+= numBytes
;
1624 return kIOReturnSuccess
;
1628 dataP
= getDataP(_memoryEntries
);
1629 UInt done
= getNumIOPL(_memoryEntries
, dataP
);
1630 ioPLBlock
*ioplList
= getIOPLList(dataP
);
1632 for (UInt range
= 0; range
< done
; range
++)
1634 if (ioplList
[range
].fIOPL
) {
1635 upl_abort(ioplList
[range
].fIOPL
, 0);
1636 upl_deallocate(ioplList
[range
].fIOPL
);
1639 (void) _memoryEntries
->initWithBytes(dataP
, sizeof(ioGMDData
)); // == setLength()
1641 if (mapper
&& mapBase
)
1642 mapper
->iovmFree(mapBase
, _pages
);
1651 * Prepare the memory for an I/O transfer. This involves paging in
1652 * the memory, if necessary, and wiring it down for the duration of
1653 * the transfer. The complete() method completes the processing of
1654 * the memory after the I/O transfer finishes. This method needn't
1655 * called for non-pageable memory.
1657 IOReturn
IOGeneralMemoryDescriptor::prepare(IODirection forDirection
)
1659 IOReturn error
= kIOReturnSuccess
;
1660 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
1663 && (kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeUIO
== type
) ) {
1664 error
= wireVirtual(forDirection
);
1671 return kIOReturnSuccess
;
1677 * Complete processing of the memory after an I/O transfer finishes.
1678 * This method should not be called unless a prepare was previously
1679 * issued; the prepare() and complete() must occur in pairs, before
1680 * before and after an I/O transfer involving pageable memory.
1683 IOReturn
IOGeneralMemoryDescriptor::complete(IODirection
/* forDirection */)
1688 return kIOReturnSuccess
;
1692 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
1694 if (kIOMemoryTypePhysical
== type
) {
1695 /* kIOMemoryTypePhysical */
1699 ioGMDData
* dataP
= getDataP(_memoryEntries
);
1700 ioPLBlock
*ioplList
= getIOPLList(dataP
);
1701 UInt count
= getNumIOPL(_memoryEntries
, dataP
);
1703 if (dataP
->fMapper
&& _pages
&& ioplList
[0].fMappedBase
)
1704 dataP
->fMapper
->iovmFree(ioplList
[0].fMappedBase
, _pages
);
1706 // Only complete iopls that we created which are for TypeVirtual
1707 if (kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeUIO
== type
) {
1708 for (UInt ind
= 0; ind
< count
; ind
++)
1709 if (ioplList
[ind
].fIOPL
) {
1710 upl_commit(ioplList
[ind
].fIOPL
, 0, 0);
1711 upl_deallocate(ioplList
[ind
].fIOPL
);
1715 (void) _memoryEntries
->initWithBytes(dataP
, sizeof(ioGMDData
)); // == setLength()
1718 return kIOReturnSuccess
;
1721 IOReturn
IOGeneralMemoryDescriptor::doMap(
1722 vm_map_t addressMap
,
1723 IOVirtualAddress
* atAddress
,
1724 IOOptionBits options
,
1725 IOByteCount sourceOffset
,
1726 IOByteCount length
)
1729 ipc_port_t sharedMem
= (ipc_port_t
) _memEntry
;
1731 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
1732 Ranges vec
= _ranges
;
1734 user_addr_t range0Addr
= 0;
1735 IOByteCount range0Len
= 0;
1738 getAddrLenForInd(range0Addr
, range0Len
, type
, vec
, 0);
1740 // mapping source == dest? (could be much better)
1742 && (addressMap
== get_task_map(_task
)) && (options
& kIOMapAnywhere
)
1743 && (1 == _rangesCount
) && (0 == sourceOffset
)
1744 && range0Addr
&& (length
<= range0Len
) ) {
1745 if (sizeof(user_addr_t
) > 4 && ((UInt64
) range0Addr
) >> 32)
1746 return kIOReturnOverrun
; // Doesn't fit in 32bit return field
1748 *atAddress
= range0Addr
;
1749 return( kIOReturnSuccess
);
1753 if( 0 == sharedMem
) {
1755 vm_size_t size
= ptoa_32(_pages
);
1759 memory_object_size_t actualSize
= size
;
1760 kr
= mach_make_memory_entry_64(get_task_map(_task
),
1761 &actualSize
, range0Addr
,
1762 VM_PROT_READ
| VM_PROT_WRITE
, &sharedMem
,
1765 if( (KERN_SUCCESS
== kr
) && (actualSize
!= round_page_32(size
))) {
1767 IOLog("mach_make_memory_entry_64 (%08llx) size (%08lx:%08x)\n",
1768 range0Addr
, (UInt32
) actualSize
, size
);
1770 kr
= kIOReturnVMError
;
1771 ipc_port_release_send( sharedMem
);
1774 if( KERN_SUCCESS
!= kr
)
1776 sharedMem
= MACH_PORT_NULL
;
1780 memory_object_t pager
;
1781 unsigned int flags
= 0;
1783 IOPhysicalLength segLen
;
1785 pa
= getPhysicalSegment64( sourceOffset
, &segLen
);
1788 reserved
= IONew( ExpansionData
, 1 );
1792 reserved
->pagerContig
= (1 == _rangesCount
);
1793 reserved
->memory
= this;
1795 /*What cache mode do we need*/
1796 switch(options
& kIOMapCacheMask
) {
1798 case kIOMapDefaultCache
:
1800 flags
= IODefaultCacheBits(pa
);
1803 case kIOMapInhibitCache
:
1804 flags
= DEVICE_PAGER_CACHE_INHIB
|
1805 DEVICE_PAGER_COHERENT
| DEVICE_PAGER_GUARDED
;
1808 case kIOMapWriteThruCache
:
1809 flags
= DEVICE_PAGER_WRITE_THROUGH
|
1810 DEVICE_PAGER_COHERENT
| DEVICE_PAGER_GUARDED
;
1813 case kIOMapCopybackCache
:
1814 flags
= DEVICE_PAGER_COHERENT
;
1817 case kIOMapWriteCombineCache
:
1818 flags
= DEVICE_PAGER_CACHE_INHIB
|
1819 DEVICE_PAGER_COHERENT
;
1823 flags
|= reserved
->pagerContig
? DEVICE_PAGER_CONTIGUOUS
: 0;
1825 pager
= device_pager_setup( (memory_object_t
) 0, (int) reserved
,
1830 kr
= mach_memory_object_memory_entry_64( (host_t
) 1, false /*internal*/,
1831 size
, VM_PROT_READ
| VM_PROT_WRITE
, pager
, &sharedMem
);
1833 assert( KERN_SUCCESS
== kr
);
1834 if( KERN_SUCCESS
!= kr
) {
1835 device_pager_deallocate( pager
);
1836 pager
= MACH_PORT_NULL
;
1837 sharedMem
= MACH_PORT_NULL
;
1840 if( pager
&& sharedMem
)
1841 reserved
->devicePager
= pager
;
1843 IODelete( reserved
, ExpansionData
, 1 );
1849 _memEntry
= (void *) sharedMem
;
1855 kr
= kIOReturnVMError
;
1858 kr
= super::doMap( addressMap
, atAddress
,
1859 options
, sourceOffset
, length
);
1864 IOReturn
IOGeneralMemoryDescriptor::doUnmap(
1865 vm_map_t addressMap
,
1866 IOVirtualAddress logical
,
1867 IOByteCount length
)
1869 // could be much better
1870 if( _task
&& (addressMap
== get_task_map(_task
)) && (1 == _rangesCount
)) {
1872 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
1873 user_addr_t range0Addr
;
1874 IOByteCount range0Len
;
1876 getAddrLenForInd(range0Addr
, range0Len
, type
, _ranges
, 0);
1877 if (logical
== range0Addr
&& length
<= range0Len
)
1878 return( kIOReturnSuccess
);
1881 return( super::doUnmap( addressMap
, logical
, length
));
1884 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1886 OSDefineMetaClassAndAbstractStructors( IOMemoryMap
, OSObject
)
1888 /* inline function implementation */
1889 IOPhysicalAddress
IOMemoryMap::getPhysicalAddress()
1890 { return( getPhysicalSegment( 0, 0 )); }
1894 #define super IOMemoryMap
1896 OSDefineMetaClassAndStructors(_IOMemoryMap
, IOMemoryMap
)
1898 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1900 bool _IOMemoryMap::initCompatible(
1901 IOMemoryDescriptor
* _memory
,
1902 IOMemoryMap
* _superMap
,
1903 IOByteCount _offset
,
1904 IOByteCount _length
)
1910 if( (_offset
+ _length
) > _superMap
->getLength())
1915 _superMap
->retain();
1916 superMap
= _superMap
;
1922 length
= _memory
->getLength();
1924 options
= superMap
->getMapOptions();
1925 logical
= superMap
->getVirtualAddress() + offset
;
1930 bool _IOMemoryMap::initWithDescriptor(
1931 IOMemoryDescriptor
* _memory
,
1933 IOVirtualAddress toAddress
,
1934 IOOptionBits _options
,
1935 IOByteCount _offset
,
1936 IOByteCount _length
)
1939 bool redir
= ((kIOMapUnique
|kIOMapReference
) == ((kIOMapUnique
|kIOMapReference
) & _options
));
1941 if ((!_memory
) || (!intoTask
))
1944 if( (_offset
+ _length
) > _memory
->getLength())
1951 addressMap
= get_task_map(intoTask
);
1954 vm_map_reference(addressMap
);
1955 addressTask
= intoTask
;
1956 logical
= toAddress
;
1966 length
= _memory
->getLength();
1968 if( options
& kIOMapStatic
)
1971 ok
= (kIOReturnSuccess
== _memory
->doMap( addressMap
, &toAddress
,
1972 _options
, offset
, length
));
1978 logical
= toAddress
;
1987 vm_map_deallocate(addressMap
);
1995 /* LP64todo - these need to expand */
1996 struct IOMemoryDescriptorMapAllocRef
1998 ipc_port_t sharedMem
;
2001 IOByteCount sourceOffset
;
2002 IOOptionBits options
;
2005 static kern_return_t
IOMemoryDescriptorMapAlloc(vm_map_t map
, void * _ref
)
2007 IOMemoryDescriptorMapAllocRef
* ref
= (IOMemoryDescriptorMapAllocRef
*)_ref
;
2011 if( ref
->sharedMem
) {
2012 vm_prot_t prot
= VM_PROT_READ
2013 | ((ref
->options
& kIOMapReadOnly
) ? 0 : VM_PROT_WRITE
);
2015 // set memory entry cache
2016 vm_prot_t memEntryCacheMode
= prot
| MAP_MEM_ONLY
;
2017 switch (ref
->options
& kIOMapCacheMask
)
2019 case kIOMapInhibitCache
:
2020 SET_MAP_MEM(MAP_MEM_IO
, memEntryCacheMode
);
2023 case kIOMapWriteThruCache
:
2024 SET_MAP_MEM(MAP_MEM_WTHRU
, memEntryCacheMode
);
2027 case kIOMapWriteCombineCache
:
2028 SET_MAP_MEM(MAP_MEM_WCOMB
, memEntryCacheMode
);
2031 case kIOMapCopybackCache
:
2032 SET_MAP_MEM(MAP_MEM_COPYBACK
, memEntryCacheMode
);
2035 case kIOMapDefaultCache
:
2037 SET_MAP_MEM(MAP_MEM_NOOP
, memEntryCacheMode
);
2041 vm_size_t unused
= 0;
2043 err
= mach_make_memory_entry( NULL
/*unused*/, &unused
, 0 /*unused*/,
2044 memEntryCacheMode
, NULL
, ref
->sharedMem
);
2045 if (KERN_SUCCESS
!= err
)
2046 IOLog("MAP_MEM_ONLY failed %d\n", err
);
2050 ref
->size
, 0 /* mask */,
2051 (( ref
->options
& kIOMapAnywhere
) ? VM_FLAGS_ANYWHERE
: VM_FLAGS_FIXED
)
2052 | VM_MAKE_TAG(VM_MEMORY_IOKIT
),
2053 ref
->sharedMem
, ref
->sourceOffset
,
2059 if( KERN_SUCCESS
!= err
) {
2066 err
= vm_allocate( map
, &ref
->mapped
, ref
->size
,
2067 ((ref
->options
& kIOMapAnywhere
) ? VM_FLAGS_ANYWHERE
: VM_FLAGS_FIXED
)
2068 | VM_MAKE_TAG(VM_MEMORY_IOKIT
) );
2070 if( KERN_SUCCESS
!= err
) {
2075 // we have to make sure that these guys don't get copied if we fork.
2076 err
= vm_inherit( map
, ref
->mapped
, ref
->size
, VM_INHERIT_NONE
);
2077 assert( KERN_SUCCESS
== err
);
2086 IOReturn
IOMemoryDescriptor::doMap(
2087 vm_map_t addressMap
,
2088 IOVirtualAddress
* atAddress
,
2089 IOOptionBits options
,
2090 IOByteCount sourceOffset
,
2091 IOByteCount length
)
2093 IOReturn err
= kIOReturnSuccess
;
2094 memory_object_t pager
;
2095 vm_address_t logical
;
2096 IOByteCount pageOffset
;
2097 IOPhysicalAddress sourceAddr
;
2098 IOMemoryDescriptorMapAllocRef ref
;
2100 ref
.sharedMem
= (ipc_port_t
) _memEntry
;
2101 ref
.sourceOffset
= sourceOffset
;
2102 ref
.options
= options
;
2107 length
= getLength();
2109 sourceAddr
= getSourceSegment( sourceOffset
, NULL
);
2110 pageOffset
= sourceAddr
- trunc_page_32( sourceAddr
);
2112 ref
.size
= round_page_32( length
+ pageOffset
);
2114 if ((kIOMapReference
|kIOMapUnique
) == ((kIOMapReference
|kIOMapUnique
) & options
))
2120 _IOMemoryMap
* mapping
= (_IOMemoryMap
*) *atAddress
;
2121 ref
.mapped
= mapping
->getVirtualAddress();
2125 err
= kIOReturnNotReadable
;
2130 flags
= UPL_COPYOUT_FROM
| UPL_SET_INTERNAL
2131 | UPL_SET_LITE
| UPL_SET_IO_WIRE
| UPL_BLOCK_ACCESS
;
2133 if (KERN_SUCCESS
!= memory_object_iopl_request((ipc_port_t
) _memEntry
, 0, &size
, &redirUPL2
,
2138 err
= upl_transpose(redirUPL2
, mapping
->redirUPL
);
2139 if (kIOReturnSuccess
!= err
)
2141 IOLog("upl_transpose(%x)\n", err
);
2142 err
= kIOReturnSuccess
;
2147 upl_commit(redirUPL2
, NULL
, 0);
2148 upl_deallocate(redirUPL2
);
2152 // swap the memEntries since they now refer to different vm_objects
2153 void * me
= _memEntry
;
2154 _memEntry
= mapping
->memory
->_memEntry
;
2155 mapping
->memory
->_memEntry
= me
;
2161 logical
= *atAddress
;
2162 if( options
& kIOMapAnywhere
)
2163 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
2166 ref
.mapped
= trunc_page_32( logical
);
2167 if( (logical
- ref
.mapped
) != pageOffset
) {
2168 err
= kIOReturnVMError
;
2173 if( ref
.sharedMem
&& (addressMap
== kernel_map
) && (kIOMemoryBufferPageable
& _flags
))
2174 err
= IOIteratePageableMaps( ref
.size
, &IOMemoryDescriptorMapAlloc
, &ref
);
2176 err
= IOMemoryDescriptorMapAlloc( addressMap
, &ref
);
2179 if( err
!= KERN_SUCCESS
)
2183 pager
= (memory_object_t
) reserved
->devicePager
;
2185 pager
= MACH_PORT_NULL
;
2187 if( !ref
.sharedMem
|| pager
)
2188 err
= handleFault( pager
, addressMap
, ref
.mapped
, sourceOffset
, length
, options
);
2192 if( err
!= KERN_SUCCESS
) {
2194 doUnmap( addressMap
, ref
.mapped
, ref
.size
);
2197 *atAddress
= ref
.mapped
+ pageOffset
;
2203 kIOMemoryRedirected
= 0x00010000
2206 IOReturn
IOMemoryDescriptor::handleFault(
2208 vm_map_t addressMap
,
2209 IOVirtualAddress address
,
2210 IOByteCount sourceOffset
,
2212 IOOptionBits options
)
2214 IOReturn err
= kIOReturnSuccess
;
2215 memory_object_t pager
= (memory_object_t
) _pager
;
2219 IOByteCount pageOffset
;
2220 IOByteCount pagerOffset
;
2221 IOPhysicalLength segLen
;
2226 if( kIOMemoryRedirected
& _flags
) {
2228 IOLog("sleep mem redirect %p, %lx\n", this, sourceOffset
);
2232 } while( kIOMemoryRedirected
& _flags
);
2235 return( kIOReturnSuccess
);
2238 physAddr
= getPhysicalSegment64( sourceOffset
, &segLen
);
2240 pageOffset
= physAddr
- trunc_page_64( physAddr
);
2241 pagerOffset
= sourceOffset
;
2243 size
= length
+ pageOffset
;
2244 physAddr
-= pageOffset
;
2246 segLen
+= pageOffset
;
2249 // in the middle of the loop only map whole pages
2250 if( segLen
>= bytes
)
2252 else if( segLen
!= trunc_page_32( segLen
))
2253 err
= kIOReturnVMError
;
2254 if( physAddr
!= trunc_page_64( physAddr
))
2255 err
= kIOReturnBadArgument
;
2256 if (kIOReturnSuccess
!= err
)
2260 if( kIOLogMapping
& gIOKitDebug
)
2261 IOLog("_IOMemoryMap::map(%p) %08lx->%08qx:%08lx\n",
2262 addressMap
, address
+ pageOffset
, physAddr
+ pageOffset
,
2263 segLen
- pageOffset
);
2271 /* i386 doesn't support faulting on device memory yet */
2272 if( addressMap
&& (kIOReturnSuccess
== err
))
2273 err
= IOMapPages( addressMap
, address
, (IOPhysicalAddress
) physAddr
, segLen
, options
);
2274 assert( KERN_SUCCESS
== err
);
2280 if( reserved
&& reserved
->pagerContig
) {
2281 IOPhysicalLength allLen
;
2284 allPhys
= getPhysicalSegment64( 0, &allLen
);
2286 err
= device_pager_populate_object( pager
, 0, allPhys
>> PAGE_SHIFT
, round_page_32(allLen
) );
2291 (page
< segLen
) && (KERN_SUCCESS
== err
);
2292 page
+= page_size
) {
2293 err
= device_pager_populate_object(pager
, pagerOffset
,
2294 (ppnum_t
)((physAddr
+ page
) >> PAGE_SHIFT
), page_size
);
2295 pagerOffset
+= page_size
;
2298 assert( KERN_SUCCESS
== err
);
2304 /* *** Temporary Workaround *** */
2306 /* This call to vm_fault causes an early pmap level resolution */
2307 /* of the mappings created above. Need for this is in absolute */
2308 /* violation of the basic tenet that the pmap layer is a cache. */
2309 /* Further, it implies a serious I/O architectural violation on */
2310 /* the part of some user of the mapping. As of this writing, */
2311 /* the call to vm_fault is needed because the NVIDIA driver */
2312 /* makes a call to pmap_extract. The NVIDIA driver needs to be */
2313 /* fixed as soon as possible. The NVIDIA driver should not */
2314 /* need to query for this info as it should know from the doMap */
2315 /* call where the physical memory is mapped. When a query is */
2316 /* necessary to find a physical mapping, it should be done */
2317 /* through an iokit call which includes the mapped memory */
2318 /* handle. This is required for machine architecture independence.*/
2320 if(!(kIOMemoryRedirected
& _flags
)) {
2321 vm_fault(addressMap
,
2322 (vm_map_offset_t
)address
,
2323 VM_PROT_READ
|VM_PROT_WRITE
,
2324 FALSE
, THREAD_UNINT
, NULL
,
2325 (vm_map_offset_t
)0);
2328 /* *** Temporary Workaround *** */
2331 sourceOffset
+= segLen
- pageOffset
;
2337 && (physAddr
= getPhysicalSegment64( sourceOffset
, &segLen
)));
2340 err
= kIOReturnBadArgument
;
2345 IOReturn
IOMemoryDescriptor::doUnmap(
2346 vm_map_t addressMap
,
2347 IOVirtualAddress logical
,
2348 IOByteCount length
)
2353 if( kIOLogMapping
& gIOKitDebug
)
2354 kprintf("IOMemoryDescriptor::doUnmap(%x) %08x:%08x\n",
2355 addressMap
, logical
, length
);
2358 if( true /* && (addressMap == kernel_map) || (addressMap == get_task_map(current_task()))*/) {
2360 if( _memEntry
&& (addressMap
== kernel_map
) && (kIOMemoryBufferPageable
& _flags
))
2361 addressMap
= IOPageableMapForAddress( logical
);
2363 err
= vm_deallocate( addressMap
, logical
, length
);
2366 err
= kIOReturnSuccess
;
2371 IOReturn
IOMemoryDescriptor::redirect( task_t safeTask
, bool doRedirect
)
2373 IOReturn err
= kIOReturnSuccess
;
2374 _IOMemoryMap
* mapping
= 0;
2380 _flags
|= kIOMemoryRedirected
;
2382 _flags
&= ~kIOMemoryRedirected
;
2385 if( (iter
= OSCollectionIterator::withCollection( _mappings
))) {
2386 while( (mapping
= (_IOMemoryMap
*) iter
->getNextObject()))
2387 mapping
->redirect( safeTask
, doRedirect
);
2400 // temporary binary compatibility
2401 IOSubMemoryDescriptor
* subMem
;
2402 if( (subMem
= OSDynamicCast( IOSubMemoryDescriptor
, this)))
2403 err
= subMem
->redirect( safeTask
, doRedirect
);
2405 err
= kIOReturnSuccess
;
2410 IOReturn
IOSubMemoryDescriptor::redirect( task_t safeTask
, bool doRedirect
)
2412 return( _parent
->redirect( safeTask
, doRedirect
));
2415 IOReturn
_IOMemoryMap::redirect( task_t safeTask
, bool doRedirect
)
2417 IOReturn err
= kIOReturnSuccess
;
2420 // err = ((_IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
2424 if( logical
&& addressMap
2425 && (!safeTask
|| (get_task_map(safeTask
) != addressMap
))
2426 && (0 == (options
& kIOMapStatic
)))
2428 IOUnmapPages( addressMap
, logical
, length
);
2429 if(!doRedirect
&& safeTask
2430 && ((memory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
))
2432 err
= vm_deallocate( addressMap
, logical
, length
);
2433 err
= memory
->doMap( addressMap
, &logical
,
2434 (options
& ~kIOMapAnywhere
) /*| kIOMapReserve*/,
2437 err
= kIOReturnSuccess
;
2439 IOLog("IOMemoryMap::redirect(%d, %p) %x:%lx from %p\n", doRedirect
, this, logical
, length
, addressMap
);
2445 if (((memory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
2447 && (doRedirect
!= (0 != (memory
->_flags
& kIOMemoryRedirected
))))
2448 memory
->redirect(safeTask
, doRedirect
);
2453 IOReturn
_IOMemoryMap::unmap( void )
2459 if( logical
&& addressMap
&& (0 == superMap
)
2460 && (0 == (options
& kIOMapStatic
))) {
2462 err
= memory
->doUnmap( addressMap
, logical
, length
);
2463 vm_map_deallocate(addressMap
);
2467 err
= kIOReturnSuccess
;
2476 void _IOMemoryMap::taskDied( void )
2480 vm_map_deallocate(addressMap
);
2488 // Overload the release mechanism. All mappings must be a member
2489 // of a memory descriptors _mappings set. This means that we
2490 // always have 2 references on a mapping. When either of these mappings
2491 // are released we need to free ourselves.
2492 void _IOMemoryMap::taggedRelease(const void *tag
) const
2495 super::taggedRelease(tag
, 2);
2499 void _IOMemoryMap::free()
2505 memory
->removeMapping( this);
2510 if (owner
&& (owner
!= memory
))
2513 owner
->removeMapping(this);
2518 superMap
->release();
2521 upl_commit(redirUPL
, NULL
, 0);
2522 upl_deallocate(redirUPL
);
2528 IOByteCount
_IOMemoryMap::getLength()
2533 IOVirtualAddress
_IOMemoryMap::getVirtualAddress()
2538 task_t
_IOMemoryMap::getAddressTask()
2541 return( superMap
->getAddressTask());
2543 return( addressTask
);
2546 IOOptionBits
_IOMemoryMap::getMapOptions()
2551 IOMemoryDescriptor
* _IOMemoryMap::getMemoryDescriptor()
2556 _IOMemoryMap
* _IOMemoryMap::copyCompatible(
2557 IOMemoryDescriptor
* owner
,
2559 IOVirtualAddress toAddress
,
2560 IOOptionBits _options
,
2561 IOByteCount _offset
,
2562 IOByteCount _length
)
2564 _IOMemoryMap
* mapping
;
2566 if( (!task
) || (!addressMap
) || (addressMap
!= get_task_map(task
)))
2568 if( options
& kIOMapUnique
)
2570 if( (options
^ _options
) & kIOMapReadOnly
)
2572 if( (kIOMapDefaultCache
!= (_options
& kIOMapCacheMask
))
2573 && ((options
^ _options
) & kIOMapCacheMask
))
2576 if( (0 == (_options
& kIOMapAnywhere
)) && (logical
!= toAddress
))
2579 if( _offset
< offset
)
2584 if( (_offset
+ _length
) > length
)
2587 if( (length
== _length
) && (!_offset
)) {
2592 mapping
= new _IOMemoryMap
;
2594 && !mapping
->initCompatible( owner
, this, _offset
, _length
)) {
2603 IOPhysicalAddress
_IOMemoryMap::getPhysicalSegment( IOByteCount _offset
,
2604 IOPhysicalLength
* _length
)
2606 IOPhysicalAddress address
;
2609 address
= memory
->getPhysicalSegment( offset
+ _offset
, _length
);
2615 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2618 #define super OSObject
2620 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2622 void IOMemoryDescriptor::initialize( void )
2624 if( 0 == gIOMemoryLock
)
2625 gIOMemoryLock
= IORecursiveLockAlloc();
2627 IORegistryEntry::getRegistryRoot()->setProperty(kIOMaximumMappedIOByteCountKey
,
2628 ptoa_64(gIOMaximumMappedIOPageCount
), 64);
2631 void IOMemoryDescriptor::free( void )
2634 _mappings
->release();
2639 IOMemoryMap
* IOMemoryDescriptor::setMapping(
2641 IOVirtualAddress mapAddress
,
2642 IOOptionBits options
)
2644 _IOMemoryMap
* newMap
;
2646 newMap
= new _IOMemoryMap
;
2651 && !newMap
->initWithDescriptor( this, intoTask
, mapAddress
,
2652 options
| kIOMapStatic
, 0, getLength() )) {
2657 addMapping( newMap
);
2664 IOMemoryMap
* IOMemoryDescriptor::map(
2665 IOOptionBits options
)
2668 return( makeMapping( this, kernel_task
, 0,
2669 options
| kIOMapAnywhere
,
2673 IOMemoryMap
* IOMemoryDescriptor::map(
2675 IOVirtualAddress toAddress
,
2676 IOOptionBits options
,
2678 IOByteCount length
)
2681 length
= getLength();
2683 return( makeMapping( this, intoTask
, toAddress
, options
, offset
, length
));
2686 IOReturn
_IOMemoryMap::redirect(IOMemoryDescriptor
* newBackingMemory
,
2687 IOOptionBits options
,
2690 IOReturn err
= kIOReturnSuccess
;
2691 IOMemoryDescriptor
* physMem
= 0;
2695 if (logical
&& addressMap
) do
2697 if ((memory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
2705 vm_size_t size
= length
;
2706 int flags
= UPL_COPYOUT_FROM
| UPL_SET_INTERNAL
2707 | UPL_SET_LITE
| UPL_SET_IO_WIRE
| UPL_BLOCK_ACCESS
;
2708 if (KERN_SUCCESS
!= memory_object_iopl_request((ipc_port_t
) memory
->_memEntry
, 0, &size
, &redirUPL
,
2715 IOUnmapPages( addressMap
, logical
, length
);
2716 physMem
->redirect(0, true);
2720 if (newBackingMemory
)
2722 if (newBackingMemory
!= memory
)
2724 if (this != newBackingMemory
->makeMapping(newBackingMemory
, addressTask
, (IOVirtualAddress
) this,
2725 options
| kIOMapUnique
| kIOMapReference
,
2727 err
= kIOReturnError
;
2731 upl_commit(redirUPL
, NULL
, 0);
2732 upl_deallocate(redirUPL
);
2736 physMem
->redirect(0, false);
2749 IOMemoryMap
* IOMemoryDescriptor::makeMapping(
2750 IOMemoryDescriptor
* owner
,
2752 IOVirtualAddress toAddress
,
2753 IOOptionBits options
,
2755 IOByteCount length
)
2757 IOMemoryDescriptor
* mapDesc
= 0;
2758 _IOMemoryMap
* mapping
= 0;
2765 if (kIOMapUnique
& options
)
2767 IOPhysicalAddress phys
;
2768 IOByteCount physLen
;
2773 if ((_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
2775 phys
= getPhysicalSegment(offset
, &physLen
);
2776 if (!phys
|| (physLen
< length
))
2779 mapDesc
= IOMemoryDescriptor::withPhysicalAddress(
2780 phys
, length
, _direction
);
2791 if (kIOMapReference
& options
)
2793 mapping
= (_IOMemoryMap
*) toAddress
;
2797 uint32_t pageOffset1
= mapDesc
->getSourceSegment( offset
, NULL
);
2798 pageOffset1
-= trunc_page_32( pageOffset1
);
2800 uint32_t pageOffset2
= mapping
->getVirtualAddress();
2801 pageOffset2
-= trunc_page_32( pageOffset2
);
2803 if (pageOffset1
!= pageOffset2
)
2804 IOLog("::redirect can't map offset %x to addr %x\n",
2805 pageOffset1
, mapping
->getVirtualAddress());
2809 if (!mapping
->initWithDescriptor( mapDesc
, intoTask
, toAddress
, options
,
2813 IOLog("Didn't redirect map %08lx : %08lx\n", offset
, length
);
2818 mapping
->owner
->removeMapping(mapping
);
2824 // look for an existing mapping
2825 if( (iter
= OSCollectionIterator::withCollection( _mappings
))) {
2827 while( (mapping
= (_IOMemoryMap
*) iter
->getNextObject())) {
2829 if( (mapping
= mapping
->copyCompatible(
2830 owner
, intoTask
, toAddress
,
2831 options
| kIOMapReference
,
2842 if( mapping
|| (options
& kIOMapReference
))
2850 mapping
= new _IOMemoryMap
;
2852 && !mapping
->initWithDescriptor( mapDesc
, intoTask
, toAddress
, options
,
2855 IOLog("Didn't make map %08lx : %08lx\n", offset
, length
);
2868 mapping
->owner
= owner
;
2869 owner
->addMapping( mapping
);
2881 void IOMemoryDescriptor::addMapping(
2882 IOMemoryMap
* mapping
)
2886 _mappings
= OSSet::withCapacity(1);
2888 _mappings
->setObject( mapping
);
2892 void IOMemoryDescriptor::removeMapping(
2893 IOMemoryMap
* mapping
)
2896 _mappings
->removeObject( mapping
);
2899 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2902 #define super IOMemoryDescriptor
2904 OSDefineMetaClassAndStructors(IOSubMemoryDescriptor
, IOMemoryDescriptor
)
2906 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2908 bool IOSubMemoryDescriptor::initSubRange( IOMemoryDescriptor
* parent
,
2909 IOByteCount offset
, IOByteCount length
,
2910 IODirection direction
)
2915 if( (offset
+ length
) > parent
->getLength())
2919 * We can check the _parent instance variable before having ever set it
2920 * to an initial value because I/O Kit guarantees that all our instance
2921 * variables are zeroed on an object's allocation.
2929 * An existing memory descriptor is being retargeted to
2930 * point to somewhere else. Clean up our present state.
2941 _direction
= direction
;
2942 _tag
= parent
->getTag();
2947 void IOSubMemoryDescriptor::free( void )
2956 IOPhysicalAddress
IOSubMemoryDescriptor::getPhysicalSegment( IOByteCount offset
,
2957 IOByteCount
* length
)
2959 IOPhysicalAddress address
;
2960 IOByteCount actualLength
;
2962 assert(offset
<= _length
);
2967 if( offset
>= _length
)
2970 address
= _parent
->getPhysicalSegment( offset
+ _start
, &actualLength
);
2972 if( address
&& length
)
2973 *length
= min( _length
- offset
, actualLength
);
2979 IOReturn
IOSubMemoryDescriptor::doMap(
2980 vm_map_t addressMap
,
2981 IOVirtualAddress
* atAddress
,
2982 IOOptionBits options
,
2983 IOByteCount sourceOffset
,
2984 IOByteCount length
)
2986 if( sourceOffset
>= _length
)
2987 return( kIOReturnOverrun
);
2988 return (_parent
->doMap(addressMap
, atAddress
, options
, sourceOffset
+ _start
, length
));
2991 IOPhysicalAddress
IOSubMemoryDescriptor::getSourceSegment( IOByteCount offset
,
2992 IOByteCount
* length
)
2994 IOPhysicalAddress address
;
2995 IOByteCount actualLength
;
2997 assert(offset
<= _length
);
3002 if( offset
>= _length
)
3005 address
= _parent
->getSourceSegment( offset
+ _start
, &actualLength
);
3007 if( address
&& length
)
3008 *length
= min( _length
- offset
, actualLength
);
3013 void * IOSubMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
3014 IOByteCount
* lengthOfSegment
)
3019 IOByteCount
IOSubMemoryDescriptor::readBytes(IOByteCount offset
,
3020 void * bytes
, IOByteCount length
)
3022 IOByteCount byteCount
;
3024 assert(offset
<= _length
);
3026 if( offset
>= _length
)
3030 byteCount
= _parent
->readBytes( _start
+ offset
, bytes
,
3031 min(length
, _length
- offset
) );
3034 return( byteCount
);
3037 IOByteCount
IOSubMemoryDescriptor::writeBytes(IOByteCount offset
,
3038 const void* bytes
, IOByteCount length
)
3040 IOByteCount byteCount
;
3042 assert(offset
<= _length
);
3044 if( offset
>= _length
)
3048 byteCount
= _parent
->writeBytes( _start
+ offset
, bytes
,
3049 min(length
, _length
- offset
) );
3052 return( byteCount
);
3055 IOReturn
IOSubMemoryDescriptor::setPurgeable( IOOptionBits newState
,
3056 IOOptionBits
* oldState
)
3061 err
= _parent
->setPurgeable( newState
, oldState
);
3067 IOReturn
IOSubMemoryDescriptor::performOperation( IOOptionBits options
,
3068 IOByteCount offset
, IOByteCount length
)
3072 assert(offset
<= _length
);
3074 if( offset
>= _length
)
3075 return( kIOReturnOverrun
);
3078 err
= _parent
->performOperation( options
, _start
+ offset
,
3079 min(length
, _length
- offset
) );
3085 IOReturn
IOSubMemoryDescriptor::prepare(
3086 IODirection forDirection
)
3091 err
= _parent
->prepare( forDirection
);
3097 IOReturn
IOSubMemoryDescriptor::complete(
3098 IODirection forDirection
)
3103 err
= _parent
->complete( forDirection
);
3109 IOMemoryMap
* IOSubMemoryDescriptor::makeMapping(
3110 IOMemoryDescriptor
* owner
,
3112 IOVirtualAddress toAddress
,
3113 IOOptionBits options
,
3115 IOByteCount length
)
3117 IOMemoryMap
* mapping
= 0;
3119 if (!(kIOMapUnique
& options
))
3120 mapping
= (IOMemoryMap
*) _parent
->makeMapping(
3122 toAddress
- (_start
+ offset
),
3123 options
| kIOMapReference
,
3124 _start
+ offset
, length
);
3127 mapping
= (IOMemoryMap
*) _parent
->makeMapping(
3130 options
, _start
+ offset
, length
);
3133 mapping
= super::makeMapping( owner
, intoTask
, toAddress
, options
,
3142 IOSubMemoryDescriptor::initWithAddress(void * address
,
3144 IODirection direction
)
3150 IOSubMemoryDescriptor::initWithAddress(vm_address_t address
,
3152 IODirection direction
,
3159 IOSubMemoryDescriptor::initWithPhysicalAddress(
3160 IOPhysicalAddress address
,
3162 IODirection direction
)
3168 IOSubMemoryDescriptor::initWithRanges(
3169 IOVirtualRange
* ranges
,
3171 IODirection direction
,
3179 IOSubMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange
* ranges
,
3181 IODirection direction
,
3187 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3189 bool IOGeneralMemoryDescriptor::serialize(OSSerialize
* s
) const
3191 OSSymbol
const *keys
[2];
3192 OSObject
*values
[2];
3194 user_addr_t address
;
3197 unsigned int index
, nRanges
;
3200 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
3202 if (s
== NULL
) return false;
3203 if (s
->previouslySerialized(this)) return true;
3205 // Pretend we are an array.
3206 if (!s
->addXMLStartTag(this, "array")) return false;
3208 nRanges
= _rangesCount
;
3209 vcopy
= (SerData
*) IOMalloc(sizeof(SerData
) * nRanges
);
3210 if (vcopy
== 0) return false;
3212 keys
[0] = OSSymbol::withCString("address");
3213 keys
[1] = OSSymbol::withCString("length");
3216 values
[0] = values
[1] = 0;
3218 // From this point on we can go to bail.
3220 // Copy the volatile data so we don't have to allocate memory
3221 // while the lock is held.
3223 if (nRanges
== _rangesCount
) {
3224 Ranges vec
= _ranges
;
3225 for (index
= 0; index
< nRanges
; index
++) {
3226 user_addr_t addr
; IOByteCount len
;
3227 getAddrLenForInd(addr
, len
, type
, vec
, index
);
3228 vcopy
[index
].address
= addr
;
3229 vcopy
[index
].length
= len
;
3232 // The descriptor changed out from under us. Give up.
3239 for (index
= 0; index
< nRanges
; index
++)
3241 user_addr_t addr
= vcopy
[index
].address
;
3242 IOByteCount len
= (IOByteCount
) vcopy
[index
].length
;
3244 OSNumber::withNumber(addr
, (((UInt64
) addr
) >> 32)? 64 : 32);
3245 if (values
[0] == 0) {
3249 values
[1] = OSNumber::withNumber(len
, sizeof(len
) * 8);
3250 if (values
[1] == 0) {
3254 OSDictionary
*dict
= OSDictionary::withObjects((const OSObject
**)values
, (const OSSymbol
**)keys
, 2);
3259 values
[0]->release();
3260 values
[1]->release();
3261 values
[0] = values
[1] = 0;
3263 result
= dict
->serialize(s
);
3269 result
= s
->addXMLEndTag("array");
3273 values
[0]->release();
3275 values
[1]->release();
3281 IOFree(vcopy
, sizeof(IOVirtualRange
) * nRanges
);
3285 bool IOSubMemoryDescriptor::serialize(OSSerialize
* s
) const
3290 if (s
->previouslySerialized(this)) return true;
3292 // Pretend we are a dictionary.
3293 // We must duplicate the functionality of OSDictionary here
3294 // because otherwise object references will not work;
3295 // they are based on the value of the object passed to
3296 // previouslySerialized and addXMLStartTag.
3298 if (!s
->addXMLStartTag(this, "dict")) return false;
3300 char const *keys
[3] = {"offset", "length", "parent"};
3302 OSObject
*values
[3];
3303 values
[0] = OSNumber::withNumber(_start
, sizeof(_start
) * 8);
3306 values
[1] = OSNumber::withNumber(_length
, sizeof(_length
) * 8);
3307 if (values
[1] == 0) {
3308 values
[0]->release();
3311 values
[2] = _parent
;
3314 for (int i
=0; i
<3; i
++) {
3315 if (!s
->addString("<key>") ||
3316 !s
->addString(keys
[i
]) ||
3317 !s
->addXMLEndTag("key") ||
3318 !values
[i
]->serialize(s
)) {
3323 values
[0]->release();
3324 values
[1]->release();
3329 return s
->addXMLEndTag("dict");
3332 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3334 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 0);
3335 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 1);
3336 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 2);
3337 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 3);
3338 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 4);
3339 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 5);
3340 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 6);
3341 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 7);
3342 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 8);
3343 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 9);
3344 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 10);
3345 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 11);
3346 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 12);
3347 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 13);
3348 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 14);
3349 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 15);
3351 /* ex-inline function implementation */
3352 IOPhysicalAddress
IOMemoryDescriptor::getPhysicalAddress()
3353 { return( getPhysicalSegment( 0, 0 )); }