2 * Copyright (c) 1998-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
24 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
29 // 45678901234567890123456789012345678901234567890123456789012345678901234567890
30 #include <sys/cdefs.h>
32 #include <IOKit/assert.h>
33 #include <IOKit/system.h>
34 #include <IOKit/IOLib.h>
35 #include <IOKit/IOMemoryDescriptor.h>
36 #include <IOKit/IOMapper.h>
37 #include <IOKit/IOKitKeysPrivate.h>
39 #include <IOKit/IOKitDebug.h>
41 #include "IOKitKernelInternal.h"
43 #include <libkern/c++/OSContainers.h>
44 #include <libkern/c++/OSDictionary.h>
45 #include <libkern/c++/OSArray.h>
46 #include <libkern/c++/OSSymbol.h>
47 #include <libkern/c++/OSNumber.h>
53 #include <vm/vm_pageout.h>
54 #include <vm/vm_shared_memory_server.h>
55 #include <mach/memory_object_types.h>
56 #include <device/device_port.h>
59 #include <mach/vm_prot.h>
60 #include <vm/vm_fault.h>
61 struct phys_entry
*pmap_find_physentry(ppnum_t pa
);
64 extern ppnum_t
pmap_find_phys(pmap_t pmap
, addr64_t va
);
65 void ipc_port_release_send(ipc_port_t port
);
67 /* Copy between a physical page and a virtual address in the given vm_map */
68 kern_return_t
copypv(addr64_t source
, addr64_t sink
, unsigned int size
, int which
);
72 memory_object_t pager
,
77 device_pager_deallocate(
80 device_pager_populate_object(
81 memory_object_t pager
,
82 vm_object_offset_t offset
,
86 memory_object_iopl_request(
88 memory_object_offset_t offset
,
91 upl_page_info_array_t user_page_list
,
92 unsigned int *page_list_count
,
95 unsigned int IOTranslateCacheBits(struct phys_entry
*pp
);
99 #define kIOMaximumMappedIOByteCount (512*1024*1024)
101 static IOMapper
* gIOSystemMapper
;
102 static ppnum_t gIOMaximumMappedIOPageCount
= atop_32(kIOMaximumMappedIOByteCount
);
104 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
106 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor
, OSObject
)
108 #define super IOMemoryDescriptor
110 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor
, IOMemoryDescriptor
)
112 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
114 static IORecursiveLock
* gIOMemoryLock
;
116 #define LOCK IORecursiveLockLock( gIOMemoryLock)
117 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
118 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
120 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
122 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
124 class _IOMemoryMap
: public IOMemoryMap
126 OSDeclareDefaultStructors(_IOMemoryMap
)
128 IOMemoryDescriptor
* memory
;
129 IOMemoryMap
* superMap
;
132 IOVirtualAddress logical
;
135 IOOptionBits options
;
137 ipc_port_t redirEntry
;
138 IOMemoryDescriptor
* owner
;
141 virtual void taggedRelease(const void *tag
= 0) const;
146 // IOMemoryMap methods
147 virtual IOVirtualAddress
getVirtualAddress();
148 virtual IOByteCount
getLength();
149 virtual task_t
getAddressTask();
150 virtual IOMemoryDescriptor
* getMemoryDescriptor();
151 virtual IOOptionBits
getMapOptions();
153 virtual IOReturn
unmap();
154 virtual void taskDied();
156 virtual IOReturn
redirect(IOMemoryDescriptor
* newBackingMemory
,
157 IOOptionBits options
,
158 IOByteCount offset
= 0);
160 virtual IOPhysicalAddress
getPhysicalSegment(IOByteCount offset
,
161 IOByteCount
* length
);
163 // for IOMemoryDescriptor use
164 _IOMemoryMap
* copyCompatible(
165 IOMemoryDescriptor
* owner
,
167 IOVirtualAddress toAddress
,
168 IOOptionBits options
,
170 IOByteCount length
);
173 IOMemoryDescriptor
* memory
,
174 IOMemoryMap
* superMap
,
176 IOByteCount length
);
178 bool initWithDescriptor(
179 IOMemoryDescriptor
* memory
,
181 IOVirtualAddress toAddress
,
182 IOOptionBits options
,
184 IOByteCount length
);
187 task_t intoTask
, bool redirect
);
190 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
192 // Some data structures and accessor macros used by the initWithOptions
195 enum ioPLBlockFlags
{
196 kIOPLOnDevice
= 0x00000001,
197 kIOPLExternUPL
= 0x00000002,
200 struct typePersMDData
202 const IOGeneralMemoryDescriptor
*fMD
;
203 ipc_port_t fMemEntry
;
208 vm_address_t fIOMDOffset
; // The offset of this iopl in descriptor
209 vm_offset_t fPageInfo
; // Pointer to page list or index into it
210 ppnum_t fMappedBase
; // Page number of first page in this iopl
211 unsigned int fPageOffset
; // Offset within first page of iopl
212 unsigned int fFlags
; // Flags
217 unsigned int fPageCnt
;
218 upl_page_info_t fPageList
[];
222 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
223 #define getIOPLList(d) ((ioPLBlock *) &(d->fPageList[d->fPageCnt]))
224 #define getNumIOPL(osd, d) \
225 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
226 #define getPageList(d) (&(d->fPageList[0]))
227 #define computeDataSize(p, u) \
228 (sizeof(ioGMDData) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
231 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
233 #define next_page(a) ( trunc_page_32(a) + PAGE_SIZE )
238 kern_return_t
device_data_action(
240 ipc_port_t device_pager
,
241 vm_prot_t protection
,
242 vm_object_offset_t offset
,
245 struct ExpansionData
{
247 unsigned int pagerContig
:1;
248 unsigned int unused
:31;
249 IOMemoryDescriptor
* memory
;
252 ExpansionData
* ref
= (ExpansionData
*) device_handle
;
253 IOMemoryDescriptor
* memDesc
;
256 memDesc
= ref
->memory
;
260 kr
= memDesc
->handleFault( device_pager
, 0, 0,
261 offset
, size
, kIOMapDefaultCache
/*?*/);
271 kern_return_t
device_close(
274 struct ExpansionData
{
276 unsigned int pagerContig
:1;
277 unsigned int unused
:31;
278 IOMemoryDescriptor
* memory
;
280 ExpansionData
* ref
= (ExpansionData
*) device_handle
;
282 IODelete( ref
, ExpansionData
, 1 );
284 return( kIOReturnSuccess
);
288 // Note this inline function uses C++ reference arguments to return values
289 // This means that pointers are not passed and NULLs don't have to be
290 // checked for as a NULL reference is illegal.
292 getAddrLenForInd(user_addr_t
&addr
, IOPhysicalLength
&len
, // Output variables
293 UInt32 type
, IOGeneralMemoryDescriptor::Ranges r
, UInt32 ind
)
295 assert(kIOMemoryTypePhysical
== type
|| kIOMemoryTypeUIO
== type
296 || kIOMemoryTypeVirtual
== type
);
297 if (kIOMemoryTypeUIO
== type
) {
299 uio_getiov((uio_t
) r
.uio
, ind
, &addr
, &us
); len
= us
;
302 IOVirtualRange cur
= r
.v
[ind
];
308 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
313 * Create a new IOMemoryDescriptor. The buffer is a virtual address
314 * relative to the specified task. If no task is supplied, the kernel
318 IOMemoryDescriptor::withAddress(void * address
,
320 IODirection direction
)
322 return IOMemoryDescriptor::
323 withAddress((vm_address_t
) address
, length
, direction
, kernel_task
);
327 IOMemoryDescriptor::withAddress(vm_address_t address
,
329 IODirection direction
,
332 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
335 if (that
->initWithAddress(address
, length
, direction
, task
))
344 IOMemoryDescriptor::withPhysicalAddress(
345 IOPhysicalAddress address
,
347 IODirection direction
)
349 IOGeneralMemoryDescriptor
*self
= new IOGeneralMemoryDescriptor
;
351 && !self
->initWithPhysicalAddress(address
, length
, direction
)) {
360 IOMemoryDescriptor::withRanges( IOVirtualRange
* ranges
,
362 IODirection direction
,
366 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
369 if (that
->initWithRanges(ranges
, withCount
, direction
, task
, asReference
))
381 * Create a new IOMemoryDescriptor. The buffer is made up of several
382 * virtual address ranges, from a given task.
384 * Passing the ranges as a reference will avoid an extra allocation.
387 IOMemoryDescriptor::withOptions(void * buffers
,
394 IOGeneralMemoryDescriptor
*self
= new IOGeneralMemoryDescriptor
;
397 && !self
->initWithOptions(buffers
, count
, offset
, task
, opts
, mapper
))
406 // Can't leave abstract but this should never be used directly,
407 bool IOMemoryDescriptor::initWithOptions(void * buffers
,
411 IOOptionBits options
,
414 // @@@ gvdl: Should I panic?
415 panic("IOMD::initWithOptions called\n");
420 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange
* ranges
,
422 IODirection direction
,
425 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
428 if (that
->initWithPhysicalRanges(ranges
, withCount
, direction
, asReference
))
437 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor
* of
,
440 IODirection direction
)
442 IOSubMemoryDescriptor
*self
= new IOSubMemoryDescriptor
;
444 if (self
&& !self
->initSubRange(of
, offset
, length
, direction
)) {
451 IOMemoryDescriptor
* IOMemoryDescriptor::
452 withPersistentMemoryDescriptor(IOMemoryDescriptor
*originalMD
)
454 IOGeneralMemoryDescriptor
*origGenMD
=
455 OSDynamicCast(IOGeneralMemoryDescriptor
, originalMD
);
458 return IOGeneralMemoryDescriptor::
459 withPersistentMemoryDescriptor(origGenMD
);
464 IOMemoryDescriptor
* IOGeneralMemoryDescriptor::
465 withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor
*originalMD
)
467 ipc_port_t sharedMem
= (ipc_port_t
) originalMD
->createNamedEntry();
472 if (sharedMem
== originalMD
->_memEntry
) {
473 originalMD
->retain(); // Add a new reference to ourselves
474 ipc_port_release_send(sharedMem
); // Remove extra send right
478 IOGeneralMemoryDescriptor
* self
= new IOGeneralMemoryDescriptor
;
479 typePersMDData initData
= { originalMD
, sharedMem
};
482 && !self
->initWithOptions(&initData
, 1, 0, 0, kIOMemoryTypePersistentMD
, 0)) {
489 void *IOGeneralMemoryDescriptor::createNamedEntry()
492 ipc_port_t sharedMem
;
494 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
496 user_addr_t range0Addr
;
497 IOByteCount range0Len
;
498 getAddrLenForInd(range0Addr
, range0Len
, type
, _ranges
, 0);
499 range0Addr
= trunc_page_64(range0Addr
);
501 vm_size_t size
= ptoa_32(_pages
);
502 vm_address_t kernelPage
= (vm_address_t
) range0Addr
;
504 vm_map_t theMap
= ((_task
== kernel_task
)
505 && (kIOMemoryBufferPageable
& _flags
))
506 ? IOPageableMapForAddress(kernelPage
)
507 : get_task_map(_task
);
509 memory_object_size_t actualSize
= size
;
510 vm_prot_t prot
= VM_PROT_READ
| VM_PROT_WRITE
;
512 prot
|= MAP_MEM_NAMED_REUSE
;
514 error
= mach_make_memory_entry_64(theMap
,
515 &actualSize
, range0Addr
, prot
, &sharedMem
, (ipc_port_t
) _memEntry
);
517 if (KERN_SUCCESS
== error
) {
518 if (actualSize
== size
) {
522 IOLog("IOGMD::mach_make_memory_entry_64 (%08llx) size (%08lx:%08x)\n",
523 (UInt64
)range0Addr
, (UInt32
)actualSize
, size
);
525 ipc_port_release_send( sharedMem
);
529 return MACH_PORT_NULL
;
535 * Initialize an IOMemoryDescriptor. The buffer is a virtual address
536 * relative to the specified task. If no task is supplied, the kernel
539 * An IOMemoryDescriptor can be re-used by calling initWithAddress or
540 * initWithRanges again on an existing instance -- note this behavior
541 * is not commonly supported in other I/O Kit classes, although it is
545 IOGeneralMemoryDescriptor::initWithAddress(void * address
,
546 IOByteCount withLength
,
547 IODirection withDirection
)
549 _singleRange
.v
.address
= (vm_address_t
) address
;
550 _singleRange
.v
.length
= withLength
;
552 return initWithRanges(&_singleRange
.v
, 1, withDirection
, kernel_task
, true);
556 IOGeneralMemoryDescriptor::initWithAddress(vm_address_t address
,
557 IOByteCount withLength
,
558 IODirection withDirection
,
561 _singleRange
.v
.address
= address
;
562 _singleRange
.v
.length
= withLength
;
564 return initWithRanges(&_singleRange
.v
, 1, withDirection
, withTask
, true);
568 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
569 IOPhysicalAddress address
,
570 IOByteCount withLength
,
571 IODirection withDirection
)
573 _singleRange
.p
.address
= address
;
574 _singleRange
.p
.length
= withLength
;
576 return initWithPhysicalRanges( &_singleRange
.p
, 1, withDirection
, true);
580 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
581 IOPhysicalRange
* ranges
,
583 IODirection direction
,
586 IOOptionBits mdOpts
= direction
| kIOMemoryTypePhysical
;
589 mdOpts
|= kIOMemoryAsReference
;
591 return initWithOptions(ranges
, count
, 0, 0, mdOpts
, /* mapper */ 0);
595 IOGeneralMemoryDescriptor::initWithRanges(
596 IOVirtualRange
* ranges
,
598 IODirection direction
,
602 IOOptionBits mdOpts
= direction
;
605 mdOpts
|= kIOMemoryAsReference
;
608 mdOpts
|= kIOMemoryTypeVirtual
;
610 // Auto-prepare if this is a kernel memory descriptor as very few
611 // clients bother to prepare() kernel memory.
612 // But it was not enforced so what are you going to do?
613 if (task
== kernel_task
)
614 mdOpts
|= kIOMemoryAutoPrepare
;
617 mdOpts
|= kIOMemoryTypePhysical
;
619 return initWithOptions(ranges
, count
, 0, task
, mdOpts
, /* mapper */ 0);
625 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
626 * from a given task, several physical ranges, an UPL from the ubc
627 * system or a uio (may be 64bit) from the BSD subsystem.
629 * Passing the ranges as a reference will avoid an extra allocation.
631 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
632 * existing instance -- note this behavior is not commonly supported in other
633 * I/O Kit classes, although it is supported here.
637 IOGeneralMemoryDescriptor::initWithOptions(void * buffers
,
641 IOOptionBits options
,
644 IOOptionBits type
= options
& kIOMemoryTypeMask
;
646 // Grab the original MD's configuation data to initialse the
647 // arguments to this function.
648 if (kIOMemoryTypePersistentMD
== type
) {
650 typePersMDData
*initData
= (typePersMDData
*) buffers
;
651 const IOGeneralMemoryDescriptor
*orig
= initData
->fMD
;
652 ioGMDData
*dataP
= getDataP(orig
->_memoryEntries
);
654 // Only accept persistent memory descriptors with valid dataP data.
655 assert(orig
->_rangesCount
== 1);
656 if ( !(orig
->_flags
& kIOMemoryPersistent
) || !dataP
)
659 _memEntry
= initData
->fMemEntry
; // Grab the new named entry
660 options
= orig
->_flags
| kIOMemoryAsReference
;
661 _singleRange
= orig
->_singleRange
; // Initialise our range
662 buffers
= &_singleRange
;
665 // Now grab the original task and whatever mapper was previously used
667 mapper
= dataP
->fMapper
;
669 // We are ready to go through the original initialisation now
673 case kIOMemoryTypeUIO
:
674 case kIOMemoryTypeVirtual
:
681 case kIOMemoryTypePhysical
: // Neither Physical nor UPL should have a task
682 mapper
= kIOMapperNone
;
684 case kIOMemoryTypeUPL
:
688 return false; /* bad argument */
695 * We can check the _initialized instance variable before having ever set
696 * it to an initial value because I/O Kit guarantees that all our instance
697 * variables are zeroed on an object's allocation.
702 * An existing memory descriptor is being retargeted to point to
703 * somewhere else. Clean up our present state.
710 if (_ranges
.v
&& _rangesIsAllocated
)
711 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
713 { ipc_port_release_send((ipc_port_t
) _memEntry
); _memEntry
= 0; }
721 // Grab the appropriate mapper
722 if (mapper
== kIOMapperNone
)
723 mapper
= 0; // No Mapper
725 IOMapper::checkForSystemMapper();
726 gIOSystemMapper
= mapper
= IOMapper::gSystem
;
729 // Remove the dynamic internal use flags from the initial setting
730 options
&= ~(kIOMemoryPreparedReadOnly
);
734 // DEPRECATED variable initialisation
735 _direction
= (IODirection
) (_flags
& kIOMemoryDirectionMask
);
738 _cachedPhysicalAddress
= 0;
739 _cachedVirtualAddress
= 0;
741 if (kIOMemoryTypeUPL
== type
) {
744 unsigned int dataSize
= computeDataSize(/* pages */ 0, /* upls */ 1);
746 if (!_memoryEntries
) {
747 _memoryEntries
= OSData::withCapacity(dataSize
);
751 else if (!_memoryEntries
->initWithCapacity(dataSize
))
754 _memoryEntries
->appendBytes(0, sizeof(ioGMDData
));
755 dataP
= getDataP(_memoryEntries
);
756 dataP
->fMapper
= mapper
;
759 _wireCount
++; // UPLs start out life wired
762 _pages
+= atop_32(offset
+ count
+ PAGE_MASK
) - atop_32(offset
);
765 upl_page_info_t
*pageList
= UPL_GET_INTERNAL_PAGE_LIST((upl_t
) buffers
);
767 iopl
.fIOPL
= (upl_t
) buffers
;
768 // Set the flag kIOPLOnDevice convieniently equal to 1
769 iopl
.fFlags
= pageList
->device
| kIOPLExternUPL
;
770 iopl
.fIOMDOffset
= 0;
771 if (!pageList
->device
) {
772 // Pre-compute the offset into the UPL's page list
773 pageList
= &pageList
[atop_32(offset
)];
776 iopl
.fMappedBase
= mapper
->iovmAlloc(_pages
);
777 mapper
->iovmInsert(iopl
.fMappedBase
, 0, pageList
, _pages
);
780 iopl
.fMappedBase
= 0;
783 iopl
.fMappedBase
= 0;
784 iopl
.fPageInfo
= (vm_address_t
) pageList
;
785 iopl
.fPageOffset
= offset
;
787 _memoryEntries
->appendBytes(&iopl
, sizeof(iopl
));
790 // kIOMemoryTypeVirtual | kIOMemoryTypeUIO | kIOMemoryTypePhysical
792 // Initialize the memory descriptor
793 if (options
& kIOMemoryAsReference
) {
794 _rangesIsAllocated
= false;
796 // Hack assignment to get the buffer arg into _ranges.
797 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
799 // This also initialises the uio & physical ranges.
800 _ranges
.v
= (IOVirtualRange
*) buffers
;
803 assert(kIOMemoryTypeUIO
!= type
);
805 _rangesIsAllocated
= true;
806 _ranges
.v
= IONew(IOVirtualRange
, count
);
809 bcopy(buffers
, _ranges
.v
, count
* sizeof(IOVirtualRange
));
812 // Find starting address within the vector of ranges
813 Ranges vec
= _ranges
;
816 for (unsigned ind
= 0; ind
< count
; ind
++) {
820 // addr & len are returned by this function
821 getAddrLenForInd(addr
, len
, type
, vec
, ind
);
822 pages
+= (atop_64(addr
+ len
+ PAGE_MASK
) - atop_64(addr
));
824 assert(len
> length
); // Check for 32 bit wrap around
829 _rangesCount
= count
;
831 // Auto-prepare memory at creation time.
832 // Implied completion when descriptor is free-ed
833 if (kIOMemoryTypePhysical
== type
)
834 _wireCount
++; // Physical MDs are, by definition, wired
835 else { /* kIOMemoryTypeVirtual | kIOMemoryTypeUIO */
837 unsigned dataSize
= computeDataSize(_pages
, /* upls */ count
* 2);
839 if (!_memoryEntries
) {
840 _memoryEntries
= OSData::withCapacity(dataSize
);
844 else if (!_memoryEntries
->initWithCapacity(dataSize
))
847 _memoryEntries
->appendBytes(0, sizeof(ioGMDData
));
848 dataP
= getDataP(_memoryEntries
);
849 dataP
->fMapper
= mapper
;
850 dataP
->fPageCnt
= _pages
;
852 if ( (kIOMemoryPersistent
& _flags
) && !_memEntry
)
853 _memEntry
= createNamedEntry();
855 if ((_flags
& kIOMemoryAutoPrepare
)
856 && prepare() != kIOReturnSuccess
)
869 void IOGeneralMemoryDescriptor::free()
873 reserved
->memory
= 0;
879 _memoryEntries
->release();
883 if (_ranges
.v
&& _rangesIsAllocated
)
884 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
886 if (reserved
&& reserved
->devicePager
)
887 device_pager_deallocate( (memory_object_t
) reserved
->devicePager
);
889 // memEntry holds a ref on the device pager which owns reserved
890 // (ExpansionData) so no reserved access after this point
892 ipc_port_release_send( (ipc_port_t
) _memEntry
);
897 /* DEPRECATED */ void IOGeneralMemoryDescriptor::unmapFromKernel()
899 panic("IOGMD::unmapFromKernel deprecated");
902 /* DEPRECATED */ void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex
)
904 panic("IOGMD::mapIntoKernel deprecated");
910 * Get the direction of the transfer.
912 IODirection
IOMemoryDescriptor::getDirection() const
920 * Get the length of the transfer (over all ranges).
922 IOByteCount
IOMemoryDescriptor::getLength() const
927 void IOMemoryDescriptor::setTag( IOOptionBits tag
)
932 IOOptionBits
IOMemoryDescriptor::getTag( void )
937 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
938 IOPhysicalAddress
IOMemoryDescriptor::getSourceSegment( IOByteCount offset
,
939 IOByteCount
* length
)
941 IOPhysicalAddress physAddr
= 0;
943 if( prepare() == kIOReturnSuccess
) {
944 physAddr
= getPhysicalSegment( offset
, length
);
951 IOByteCount
IOMemoryDescriptor::readBytes
952 (IOByteCount offset
, void *bytes
, IOByteCount length
)
954 addr64_t dstAddr
= (addr64_t
) (UInt32
) bytes
;
955 IOByteCount remaining
;
957 // Assert that this entire I/O is withing the available range
958 assert(offset
< _length
);
959 assert(offset
+ length
<= _length
);
960 if (offset
>= _length
) {
961 IOLog("IOGMD(%p): rB = o%lx, l%lx\n", this, offset
, length
); // @@@ gvdl
965 remaining
= length
= min(length
, _length
- offset
);
966 while (remaining
) { // (process another target segment?)
970 srcAddr64
= getPhysicalSegment64(offset
, &srcLen
);
974 // Clip segment length to remaining
975 if (srcLen
> remaining
)
978 copypv(srcAddr64
, dstAddr
, srcLen
,
979 cppvPsrc
| cppvNoRefSrc
| cppvFsnk
| cppvKmap
);
988 return length
- remaining
;
991 IOByteCount
IOMemoryDescriptor::writeBytes
992 (IOByteCount offset
, const void *bytes
, IOByteCount length
)
994 addr64_t srcAddr
= (addr64_t
) (UInt32
) bytes
;
995 IOByteCount remaining
;
997 // Assert that this entire I/O is withing the available range
998 assert(offset
< _length
);
999 assert(offset
+ length
<= _length
);
1001 assert( !(kIOMemoryPreparedReadOnly
& _flags
) );
1003 if ( (kIOMemoryPreparedReadOnly
& _flags
) || offset
>= _length
) {
1004 IOLog("IOGMD(%p): wB = o%lx, l%lx\n", this, offset
, length
); // @@@ gvdl
1008 remaining
= length
= min(length
, _length
- offset
);
1009 while (remaining
) { // (process another target segment?)
1013 dstAddr64
= getPhysicalSegment64(offset
, &dstLen
);
1017 // Clip segment length to remaining
1018 if (dstLen
> remaining
)
1021 copypv(srcAddr
, (addr64_t
) dstAddr64
, dstLen
,
1022 cppvPsnk
| cppvFsnk
| cppvNoRefSrc
| cppvNoModSnk
| cppvKmap
);
1026 remaining
-= dstLen
;
1031 return length
- remaining
;
1034 // osfmk/device/iokit_rpc.c
1035 extern "C" unsigned int IODefaultCacheBits(addr64_t pa
);
1037 /* DEPRECATED */ void IOGeneralMemoryDescriptor::setPosition(IOByteCount position
)
1039 panic("IOGMD::setPosition deprecated");
1042 IOPhysicalAddress
IOGeneralMemoryDescriptor::getPhysicalSegment
1043 (IOByteCount offset
, IOByteCount
*lengthOfSegment
)
1045 IOPhysicalAddress address
= 0;
1046 IOPhysicalLength length
= 0;
1048 // assert(offset <= _length);
1049 if (offset
< _length
) // (within bounds?)
1051 if ( (_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
) {
1054 // Physical address based memory descriptor
1056 // Find offset within descriptor and make it relative
1057 // to the current _range.
1058 for (ind
= 0 ; offset
>= _ranges
.p
[ind
].length
; ind
++ )
1059 offset
-= _ranges
.p
[ind
].length
;
1061 IOPhysicalRange cur
= _ranges
.p
[ind
];
1062 address
= cur
.address
+ offset
;
1063 length
= cur
.length
- offset
;
1065 // see how far we can coalesce ranges
1066 for (++ind
; ind
< _rangesCount
; ind
++) {
1067 cur
= _ranges
.p
[ind
];
1069 if (address
+ length
!= cur
.address
)
1072 length
+= cur
.length
;
1075 // @@@ gvdl: should be assert(address);
1076 // but can't as NVidia GeForce creates a bogus physical mem
1078 || /* nvidia */ (!_ranges
.p
[0].address
&& 1 == _rangesCount
));
1082 // We need wiring & we are wired.
1087 panic("IOGMD: not wired for getPhysicalSegment()");
1091 assert(_memoryEntries
);
1093 ioGMDData
* dataP
= getDataP(_memoryEntries
);
1094 const ioPLBlock
*ioplList
= getIOPLList(dataP
);
1095 UInt ind
, numIOPLs
= getNumIOPL(_memoryEntries
, dataP
);
1096 upl_page_info_t
*pageList
= getPageList(dataP
);
1098 assert(numIOPLs
> 0);
1100 // Scan through iopl info blocks looking for block containing offset
1101 for (ind
= 1; ind
< numIOPLs
; ind
++) {
1102 if (offset
< ioplList
[ind
].fIOMDOffset
)
1106 // Go back to actual range as search goes past it
1107 ioPLBlock ioplInfo
= ioplList
[ind
- 1];
1110 length
= ioplList
[ind
].fIOMDOffset
;
1113 length
-= offset
; // Remainder within iopl
1115 // Subtract offset till this iopl in total list
1116 offset
-= ioplInfo
.fIOMDOffset
;
1118 // This is a mapped IOPL so we just need to compute an offset
1119 // relative to the mapped base.
1120 if (ioplInfo
.fMappedBase
) {
1121 offset
+= (ioplInfo
.fPageOffset
& PAGE_MASK
);
1122 address
= ptoa_32(ioplInfo
.fMappedBase
) + offset
;
1126 // Currently the offset is rebased into the current iopl.
1127 // Now add the iopl 1st page offset.
1128 offset
+= ioplInfo
.fPageOffset
;
1130 // For external UPLs the fPageInfo field points directly to
1131 // the upl's upl_page_info_t array.
1132 if (ioplInfo
.fFlags
& kIOPLExternUPL
)
1133 pageList
= (upl_page_info_t
*) ioplInfo
.fPageInfo
;
1135 pageList
= &pageList
[ioplInfo
.fPageInfo
];
1137 // Check for direct device non-paged memory
1138 if ( ioplInfo
.fFlags
& kIOPLOnDevice
) {
1139 address
= ptoa_32(pageList
->phys_addr
) + offset
;
1143 // Now we need compute the index into the pageList
1144 ind
= atop_32(offset
);
1145 offset
&= PAGE_MASK
;
1147 IOPhysicalAddress pageAddr
= pageList
[ind
].phys_addr
;
1148 address
= ptoa_32(pageAddr
) + offset
;
1150 // Check for the remaining data in this upl being longer than the
1151 // remainder on the current page. This should be checked for
1153 if (length
> PAGE_SIZE
- offset
) {
1154 // See if the next page is contiguous. Stop looking when we hit
1155 // the end of this upl, which is indicated by the
1156 // contigLength >= length.
1157 IOByteCount contigLength
= PAGE_SIZE
- offset
;
1159 // Look for contiguous segment
1160 while (contigLength
< length
1161 && ++pageAddr
== pageList
[++ind
].phys_addr
) {
1162 contigLength
+= PAGE_SIZE
;
1164 if (length
> contigLength
)
1165 length
= contigLength
;
1177 if (lengthOfSegment
)
1178 *lengthOfSegment
= length
;
1183 addr64_t
IOMemoryDescriptor::getPhysicalSegment64
1184 (IOByteCount offset
, IOByteCount
*lengthOfSegment
)
1186 IOPhysicalAddress phys32
;
1190 phys32
= getPhysicalSegment(offset
, lengthOfSegment
);
1194 if (gIOSystemMapper
)
1196 IOByteCount origLen
;
1198 phys64
= gIOSystemMapper
->mapAddr(phys32
);
1199 origLen
= *lengthOfSegment
;
1200 length
= page_size
- (phys64
& (page_size
- 1));
1201 while ((length
< origLen
)
1202 && ((phys64
+ length
) == gIOSystemMapper
->mapAddr(phys32
+ length
)))
1203 length
+= page_size
;
1204 if (length
> origLen
)
1207 *lengthOfSegment
= length
;
1210 phys64
= (addr64_t
) phys32
;
1215 IOPhysicalAddress
IOGeneralMemoryDescriptor::
1216 getSourceSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
1218 IOPhysicalAddress address
= 0;
1219 IOPhysicalLength length
= 0;
1220 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
1222 assert(offset
<= _length
);
1224 if ( type
== kIOMemoryTypeUPL
)
1225 return super::getSourceSegment( offset
, lengthOfSegment
);
1226 else if ( offset
< _length
) // (within bounds?)
1228 unsigned rangesIndex
= 0;
1229 Ranges vec
= _ranges
;
1232 // Find starting address within the vector of ranges
1234 getAddrLenForInd(addr
, length
, type
, vec
, rangesIndex
);
1235 if (offset
< length
)
1237 offset
-= length
; // (make offset relative)
1241 // Now that we have the starting range,
1242 // lets find the last contiguous range
1246 for ( ++rangesIndex
; rangesIndex
< _rangesCount
; rangesIndex
++ ) {
1247 user_addr_t newAddr
;
1248 IOPhysicalLength newLen
;
1250 getAddrLenForInd(newAddr
, newLen
, type
, vec
, rangesIndex
);
1251 if (addr
+ length
!= newAddr
)
1256 address
= (IOPhysicalAddress
) addr
; // Truncate address to 32bit
1261 if ( lengthOfSegment
) *lengthOfSegment
= length
;
1266 /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
1267 /* DEPRECATED */ void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
1268 /* DEPRECATED */ IOByteCount
* lengthOfSegment
)
1270 if (_task
== kernel_task
)
1271 return (void *) getSourceSegment(offset
, lengthOfSegment
);
1273 panic("IOGMD::getVirtualSegment deprecated");
1277 /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
1281 IOReturn
IOMemoryDescriptor::setPurgeable( IOOptionBits newState
,
1282 IOOptionBits
* oldState
)
1284 IOReturn err
= kIOReturnSuccess
;
1285 vm_purgable_t control
;
1292 err
= kIOReturnNotReady
;
1296 control
= VM_PURGABLE_SET_STATE
;
1299 case kIOMemoryPurgeableKeepCurrent
:
1300 control
= VM_PURGABLE_GET_STATE
;
1303 case kIOMemoryPurgeableNonVolatile
:
1304 state
= VM_PURGABLE_NONVOLATILE
;
1306 case kIOMemoryPurgeableVolatile
:
1307 state
= VM_PURGABLE_VOLATILE
;
1309 case kIOMemoryPurgeableEmpty
:
1310 state
= VM_PURGABLE_EMPTY
;
1313 err
= kIOReturnBadArgument
;
1317 if (kIOReturnSuccess
!= err
)
1320 err
= mach_memory_entry_purgable_control((ipc_port_t
) _memEntry
, control
, &state
);
1324 if (kIOReturnSuccess
== err
)
1328 case VM_PURGABLE_NONVOLATILE
:
1329 state
= kIOMemoryPurgeableNonVolatile
;
1331 case VM_PURGABLE_VOLATILE
:
1332 state
= kIOMemoryPurgeableVolatile
;
1334 case VM_PURGABLE_EMPTY
:
1335 state
= kIOMemoryPurgeableEmpty
;
1338 state
= kIOMemoryPurgeableNonVolatile
;
1339 err
= kIOReturnNotReady
;
1351 extern "C" void dcache_incoherent_io_flush64(addr64_t pa
, unsigned int count
);
1352 extern "C" void dcache_incoherent_io_store64(addr64_t pa
, unsigned int count
);
1354 IOReturn
IOMemoryDescriptor::performOperation( IOOptionBits options
,
1355 IOByteCount offset
, IOByteCount length
)
1357 IOByteCount remaining
;
1358 void (*func
)(addr64_t pa
, unsigned int count
) = 0;
1362 case kIOMemoryIncoherentIOFlush
:
1363 func
= &dcache_incoherent_io_flush64
;
1365 case kIOMemoryIncoherentIOStore
:
1366 func
= &dcache_incoherent_io_store64
;
1371 return (kIOReturnUnsupported
);
1373 remaining
= length
= min(length
, getLength() - offset
);
1375 // (process another target segment?)
1380 dstAddr64
= getPhysicalSegment64(offset
, &dstLen
);
1384 // Clip segment length to remaining
1385 if (dstLen
> remaining
)
1388 (*func
)(dstAddr64
, dstLen
);
1391 remaining
-= dstLen
;
1394 return (remaining
? kIOReturnUnderrun
: kIOReturnSuccess
);
1398 extern vm_offset_t static_memory_end
;
1399 #define io_kernel_static_end static_memory_end
1401 extern vm_offset_t first_avail
;
1402 #define io_kernel_static_end first_avail
1405 static kern_return_t
1406 io_get_kernel_static_upl(
1408 vm_address_t offset
,
1409 vm_size_t
*upl_size
,
1411 upl_page_info_array_t page_list
,
1412 unsigned int *count
)
1414 unsigned int pageCount
, page
;
1417 pageCount
= atop_32(*upl_size
);
1418 if (pageCount
> *count
)
1423 for (page
= 0; page
< pageCount
; page
++)
1425 phys
= pmap_find_phys(kernel_pmap
, ((addr64_t
)offset
) + ptoa_64(page
));
1428 page_list
[page
].phys_addr
= phys
;
1429 page_list
[page
].pageout
= 0;
1430 page_list
[page
].absent
= 0;
1431 page_list
[page
].dirty
= 0;
1432 page_list
[page
].precious
= 0;
1433 page_list
[page
].device
= 0;
1436 return ((page
>= pageCount
) ? kIOReturnSuccess
: kIOReturnVMError
);
1439 IOReturn
IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection
)
1441 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
1442 IOReturn error
= kIOReturnNoMemory
;
1444 ppnum_t mapBase
= 0;
1446 ipc_port_t sharedMem
= (ipc_port_t
) _memEntry
;
1448 assert(!_wireCount
);
1449 assert(kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeUIO
== type
);
1451 if (_pages
>= gIOMaximumMappedIOPageCount
)
1452 return kIOReturnNoResources
;
1454 dataP
= getDataP(_memoryEntries
);
1455 mapper
= dataP
->fMapper
;
1456 if (mapper
&& _pages
)
1457 mapBase
= mapper
->iovmAlloc(_pages
);
1459 // Note that appendBytes(NULL) zeros the data up to the
1461 _memoryEntries
->appendBytes(0, dataP
->fPageCnt
* sizeof(upl_page_info_t
));
1462 dataP
= 0; // May no longer be valid so lets not get tempted.
1464 if (forDirection
== kIODirectionNone
)
1465 forDirection
= _direction
;
1467 int uplFlags
; // This Mem Desc's default flags for upl creation
1468 switch (forDirection
)
1470 case kIODirectionOut
:
1471 // Pages do not need to be marked as dirty on commit
1472 uplFlags
= UPL_COPYOUT_FROM
;
1473 _flags
|= kIOMemoryPreparedReadOnly
;
1476 case kIODirectionIn
:
1478 uplFlags
= 0; // i.e. ~UPL_COPYOUT_FROM
1481 uplFlags
|= UPL_SET_IO_WIRE
| UPL_SET_LITE
;
1483 // Find the appropriate vm_map for the given task
1485 if (_task
== kernel_task
&& (kIOMemoryBufferPageable
& _flags
))
1488 { curMap
= get_task_map(_task
); }
1490 // Iterate over the vector of virtual ranges
1491 Ranges vec
= _ranges
;
1492 unsigned int pageIndex
= 0;
1493 IOByteCount mdOffset
= 0;
1494 for (UInt range
= 0; range
< _rangesCount
; range
++) {
1496 user_addr_t startPage
;
1497 IOByteCount numBytes
;
1499 // Get the startPage address and length of vec[range]
1500 getAddrLenForInd(startPage
, numBytes
, type
, vec
, range
);
1501 iopl
.fPageOffset
= (short) startPage
& PAGE_MASK
;
1502 numBytes
+= iopl
.fPageOffset
;
1503 startPage
= trunc_page_64(startPage
);
1506 iopl
.fMappedBase
= mapBase
+ pageIndex
;
1508 iopl
.fMappedBase
= 0;
1510 // Iterate over the current range, creating UPLs
1512 dataP
= getDataP(_memoryEntries
);
1513 vm_address_t kernelStart
= (vm_address_t
) startPage
;
1517 else if (!sharedMem
) {
1518 assert(_task
== kernel_task
);
1519 theMap
= IOPageableMapForAddress(kernelStart
);
1524 upl_page_info_array_t pageInfo
= getPageList(dataP
);
1525 int ioplFlags
= uplFlags
;
1526 upl_page_list_ptr_t baseInfo
= &pageInfo
[pageIndex
];
1528 vm_size_t ioplSize
= round_page_32(numBytes
);
1529 unsigned int numPageInfo
= atop_32(ioplSize
);
1531 if (theMap
== kernel_map
&& kernelStart
< io_kernel_static_end
) {
1532 error
= io_get_kernel_static_upl(theMap
,
1539 else if (sharedMem
) {
1540 error
= memory_object_iopl_request(sharedMem
,
1550 error
= vm_map_create_upl(theMap
,
1560 if (error
!= KERN_SUCCESS
)
1563 error
= kIOReturnNoMemory
;
1565 if (baseInfo
->device
) {
1567 iopl
.fFlags
= kIOPLOnDevice
;
1568 // Don't translate device memory at all
1569 if (mapper
&& mapBase
) {
1570 mapper
->iovmFree(mapBase
, _pages
);
1572 iopl
.fMappedBase
= 0;
1578 mapper
->iovmInsert(mapBase
, pageIndex
,
1579 baseInfo
, numPageInfo
);
1582 iopl
.fIOMDOffset
= mdOffset
;
1583 iopl
.fPageInfo
= pageIndex
;
1585 if ((_flags
& kIOMemoryAutoPrepare
) && iopl
.fIOPL
)
1587 upl_commit(iopl
.fIOPL
, 0, 0);
1588 upl_deallocate(iopl
.fIOPL
);
1592 if (!_memoryEntries
->appendBytes(&iopl
, sizeof(iopl
))) {
1593 // Clean up partial created and unsaved iopl
1595 upl_abort(iopl
.fIOPL
, 0);
1596 upl_deallocate(iopl
.fIOPL
);
1601 // Check for a multiple iopl's in one virtual range
1602 pageIndex
+= numPageInfo
;
1603 mdOffset
-= iopl
.fPageOffset
;
1604 if (ioplSize
< numBytes
) {
1605 numBytes
-= ioplSize
;
1606 startPage
+= ioplSize
;
1607 mdOffset
+= ioplSize
;
1608 iopl
.fPageOffset
= 0;
1610 iopl
.fMappedBase
= mapBase
+ pageIndex
;
1613 mdOffset
+= numBytes
;
1619 return kIOReturnSuccess
;
1623 dataP
= getDataP(_memoryEntries
);
1624 UInt done
= getNumIOPL(_memoryEntries
, dataP
);
1625 ioPLBlock
*ioplList
= getIOPLList(dataP
);
1627 for (UInt range
= 0; range
< done
; range
++)
1629 if (ioplList
[range
].fIOPL
) {
1630 upl_abort(ioplList
[range
].fIOPL
, 0);
1631 upl_deallocate(ioplList
[range
].fIOPL
);
1634 (void) _memoryEntries
->initWithBytes(dataP
, sizeof(ioGMDData
)); // == setLength()
1636 if (mapper
&& mapBase
)
1637 mapper
->iovmFree(mapBase
, _pages
);
1646 * Prepare the memory for an I/O transfer. This involves paging in
1647 * the memory, if necessary, and wiring it down for the duration of
1648 * the transfer. The complete() method completes the processing of
1649 * the memory after the I/O transfer finishes. This method needn't
1650 * called for non-pageable memory.
1652 IOReturn
IOGeneralMemoryDescriptor::prepare(IODirection forDirection
)
1654 IOReturn error
= kIOReturnSuccess
;
1655 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
1658 && (kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeUIO
== type
) ) {
1659 error
= wireVirtual(forDirection
);
1666 return kIOReturnSuccess
;
1672 * Complete processing of the memory after an I/O transfer finishes.
1673 * This method should not be called unless a prepare was previously
1674 * issued; the prepare() and complete() must occur in pairs, before
1675 * before and after an I/O transfer involving pageable memory.
1678 IOReturn
IOGeneralMemoryDescriptor::complete(IODirection
/* forDirection */)
1683 return kIOReturnSuccess
;
1687 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
1689 if (kIOMemoryTypePhysical
== type
) {
1690 /* kIOMemoryTypePhysical */
1694 ioGMDData
* dataP
= getDataP(_memoryEntries
);
1695 ioPLBlock
*ioplList
= getIOPLList(dataP
);
1696 UInt count
= getNumIOPL(_memoryEntries
, dataP
);
1698 if (dataP
->fMapper
&& _pages
&& ioplList
[0].fMappedBase
)
1699 dataP
->fMapper
->iovmFree(ioplList
[0].fMappedBase
, _pages
);
1701 // Only complete iopls that we created which are for TypeVirtual
1702 if (kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeUIO
== type
) {
1703 for (UInt ind
= 0; ind
< count
; ind
++)
1704 if (ioplList
[ind
].fIOPL
) {
1705 upl_commit(ioplList
[ind
].fIOPL
, 0, 0);
1706 upl_deallocate(ioplList
[ind
].fIOPL
);
1710 (void) _memoryEntries
->initWithBytes(dataP
, sizeof(ioGMDData
)); // == setLength()
1713 return kIOReturnSuccess
;
1716 IOReturn
IOGeneralMemoryDescriptor::doMap(
1717 vm_map_t addressMap
,
1718 IOVirtualAddress
* atAddress
,
1719 IOOptionBits options
,
1720 IOByteCount sourceOffset
,
1721 IOByteCount length
)
1724 ipc_port_t sharedMem
= (ipc_port_t
) _memEntry
;
1726 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
1727 Ranges vec
= _ranges
;
1729 user_addr_t range0Addr
= 0;
1730 IOByteCount range0Len
= 0;
1733 getAddrLenForInd(range0Addr
, range0Len
, type
, vec
, 0);
1735 // mapping source == dest? (could be much better)
1737 && (addressMap
== get_task_map(_task
)) && (options
& kIOMapAnywhere
)
1738 && (1 == _rangesCount
) && (0 == sourceOffset
)
1739 && range0Addr
&& (length
<= range0Len
) ) {
1740 if (sizeof(user_addr_t
) > 4 && ((UInt64
) range0Addr
) >> 32)
1741 return kIOReturnOverrun
; // Doesn't fit in 32bit return field
1743 *atAddress
= range0Addr
;
1744 return( kIOReturnSuccess
);
1748 if( 0 == sharedMem
) {
1750 vm_size_t size
= ptoa_32(_pages
);
1754 memory_object_size_t actualSize
= size
;
1755 kr
= mach_make_memory_entry_64(get_task_map(_task
),
1756 &actualSize
, range0Addr
,
1757 VM_PROT_READ
| VM_PROT_WRITE
, &sharedMem
,
1760 if( (KERN_SUCCESS
== kr
) && (actualSize
!= round_page_32(size
))) {
1762 IOLog("mach_make_memory_entry_64 (%08llx) size (%08lx:%08x)\n",
1763 range0Addr
, (UInt32
) actualSize
, size
);
1765 kr
= kIOReturnVMError
;
1766 ipc_port_release_send( sharedMem
);
1769 if( KERN_SUCCESS
!= kr
)
1771 sharedMem
= MACH_PORT_NULL
;
1775 memory_object_t pager
;
1776 unsigned int flags
= 0;
1778 IOPhysicalLength segLen
;
1780 pa
= getPhysicalSegment64( sourceOffset
, &segLen
);
1783 reserved
= IONew( ExpansionData
, 1 );
1787 reserved
->pagerContig
= (1 == _rangesCount
);
1788 reserved
->memory
= this;
1790 /*What cache mode do we need*/
1791 switch(options
& kIOMapCacheMask
) {
1793 case kIOMapDefaultCache
:
1795 flags
= IODefaultCacheBits(pa
);
1798 case kIOMapInhibitCache
:
1799 flags
= DEVICE_PAGER_CACHE_INHIB
|
1800 DEVICE_PAGER_COHERENT
| DEVICE_PAGER_GUARDED
;
1803 case kIOMapWriteThruCache
:
1804 flags
= DEVICE_PAGER_WRITE_THROUGH
|
1805 DEVICE_PAGER_COHERENT
| DEVICE_PAGER_GUARDED
;
1808 case kIOMapCopybackCache
:
1809 flags
= DEVICE_PAGER_COHERENT
;
1812 case kIOMapWriteCombineCache
:
1813 flags
= DEVICE_PAGER_CACHE_INHIB
|
1814 DEVICE_PAGER_COHERENT
;
1818 flags
|= reserved
->pagerContig
? DEVICE_PAGER_CONTIGUOUS
: 0;
1820 pager
= device_pager_setup( (memory_object_t
) 0, (int) reserved
,
1825 kr
= mach_memory_object_memory_entry_64( (host_t
) 1, false /*internal*/,
1826 size
, VM_PROT_READ
| VM_PROT_WRITE
, pager
, &sharedMem
);
1828 assert( KERN_SUCCESS
== kr
);
1829 if( KERN_SUCCESS
!= kr
) {
1830 device_pager_deallocate( pager
);
1831 pager
= MACH_PORT_NULL
;
1832 sharedMem
= MACH_PORT_NULL
;
1835 if( pager
&& sharedMem
)
1836 reserved
->devicePager
= pager
;
1838 IODelete( reserved
, ExpansionData
, 1 );
1844 _memEntry
= (void *) sharedMem
;
1850 kr
= kIOReturnVMError
;
1853 kr
= super::doMap( addressMap
, atAddress
,
1854 options
, sourceOffset
, length
);
1859 IOReturn
IOGeneralMemoryDescriptor::doUnmap(
1860 vm_map_t addressMap
,
1861 IOVirtualAddress logical
,
1862 IOByteCount length
)
1864 // could be much better
1865 if( _task
&& (addressMap
== get_task_map(_task
)) && (1 == _rangesCount
)) {
1867 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
1868 user_addr_t range0Addr
;
1869 IOByteCount range0Len
;
1871 getAddrLenForInd(range0Addr
, range0Len
, type
, _ranges
, 0);
1872 if (logical
== range0Addr
&& length
<= range0Len
)
1873 return( kIOReturnSuccess
);
1876 return( super::doUnmap( addressMap
, logical
, length
));
1879 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1881 OSDefineMetaClassAndAbstractStructors( IOMemoryMap
, OSObject
)
1883 /* inline function implementation */
1884 IOPhysicalAddress
IOMemoryMap::getPhysicalAddress()
1885 { return( getPhysicalSegment( 0, 0 )); }
1889 #define super IOMemoryMap
1891 OSDefineMetaClassAndStructors(_IOMemoryMap
, IOMemoryMap
)
1893 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1895 bool _IOMemoryMap::initCompatible(
1896 IOMemoryDescriptor
* _memory
,
1897 IOMemoryMap
* _superMap
,
1898 IOByteCount _offset
,
1899 IOByteCount _length
)
1905 if( (_offset
+ _length
) > _superMap
->getLength())
1910 _superMap
->retain();
1911 superMap
= _superMap
;
1917 length
= _memory
->getLength();
1919 options
= superMap
->getMapOptions();
1920 logical
= superMap
->getVirtualAddress() + offset
;
1925 bool _IOMemoryMap::initWithDescriptor(
1926 IOMemoryDescriptor
* _memory
,
1928 IOVirtualAddress toAddress
,
1929 IOOptionBits _options
,
1930 IOByteCount _offset
,
1931 IOByteCount _length
)
1934 bool redir
= ((kIOMapUnique
|kIOMapReference
) == ((kIOMapUnique
|kIOMapReference
) & _options
));
1936 if ((!_memory
) || (!intoTask
))
1939 if( (_offset
+ _length
) > _memory
->getLength())
1946 addressMap
= get_task_map(intoTask
);
1949 vm_map_reference(addressMap
);
1950 addressTask
= intoTask
;
1951 logical
= toAddress
;
1961 length
= _memory
->getLength();
1963 if( options
& kIOMapStatic
)
1966 ok
= (kIOReturnSuccess
== _memory
->doMap( addressMap
, &toAddress
,
1967 _options
, offset
, length
));
1973 logical
= toAddress
;
1982 vm_map_deallocate(addressMap
);
1990 /* LP64todo - these need to expand */
1991 struct IOMemoryDescriptorMapAllocRef
1993 ipc_port_t sharedMem
;
1996 IOByteCount sourceOffset
;
1997 IOOptionBits options
;
2000 static kern_return_t
IOMemoryDescriptorMapAlloc(vm_map_t map
, void * _ref
)
2002 IOMemoryDescriptorMapAllocRef
* ref
= (IOMemoryDescriptorMapAllocRef
*)_ref
;
2006 if( ref
->sharedMem
) {
2007 vm_prot_t prot
= VM_PROT_READ
2008 | ((ref
->options
& kIOMapReadOnly
) ? 0 : VM_PROT_WRITE
);
2010 // set memory entry cache
2011 vm_prot_t memEntryCacheMode
= prot
| MAP_MEM_ONLY
;
2012 switch (ref
->options
& kIOMapCacheMask
)
2014 case kIOMapInhibitCache
:
2015 SET_MAP_MEM(MAP_MEM_IO
, memEntryCacheMode
);
2018 case kIOMapWriteThruCache
:
2019 SET_MAP_MEM(MAP_MEM_WTHRU
, memEntryCacheMode
);
2022 case kIOMapWriteCombineCache
:
2023 SET_MAP_MEM(MAP_MEM_WCOMB
, memEntryCacheMode
);
2026 case kIOMapCopybackCache
:
2027 SET_MAP_MEM(MAP_MEM_COPYBACK
, memEntryCacheMode
);
2030 case kIOMapDefaultCache
:
2032 SET_MAP_MEM(MAP_MEM_NOOP
, memEntryCacheMode
);
2036 vm_size_t unused
= 0;
2038 err
= mach_make_memory_entry( NULL
/*unused*/, &unused
, 0 /*unused*/,
2039 memEntryCacheMode
, NULL
, ref
->sharedMem
);
2040 if (KERN_SUCCESS
!= err
)
2041 IOLog("MAP_MEM_ONLY failed %d\n", err
);
2045 ref
->size
, 0 /* mask */,
2046 (( ref
->options
& kIOMapAnywhere
) ? VM_FLAGS_ANYWHERE
: VM_FLAGS_FIXED
)
2047 | VM_MAKE_TAG(VM_MEMORY_IOKIT
),
2048 ref
->sharedMem
, ref
->sourceOffset
,
2054 if( KERN_SUCCESS
!= err
) {
2061 err
= vm_allocate( map
, &ref
->mapped
, ref
->size
,
2062 ((ref
->options
& kIOMapAnywhere
) ? VM_FLAGS_ANYWHERE
: VM_FLAGS_FIXED
)
2063 | VM_MAKE_TAG(VM_MEMORY_IOKIT
) );
2065 if( KERN_SUCCESS
!= err
) {
2070 // we have to make sure that these guys don't get copied if we fork.
2071 err
= vm_inherit( map
, ref
->mapped
, ref
->size
, VM_INHERIT_NONE
);
2072 assert( KERN_SUCCESS
== err
);
2081 IOReturn
IOMemoryDescriptor::doMap(
2082 vm_map_t addressMap
,
2083 IOVirtualAddress
* atAddress
,
2084 IOOptionBits options
,
2085 IOByteCount sourceOffset
,
2086 IOByteCount length
)
2088 IOReturn err
= kIOReturnSuccess
;
2089 memory_object_t pager
;
2090 vm_address_t logical
;
2091 IOByteCount pageOffset
;
2092 IOPhysicalAddress sourceAddr
;
2093 IOMemoryDescriptorMapAllocRef ref
;
2095 ref
.sharedMem
= (ipc_port_t
) _memEntry
;
2096 ref
.sourceOffset
= sourceOffset
;
2097 ref
.options
= options
;
2102 length
= getLength();
2104 sourceAddr
= getSourceSegment( sourceOffset
, NULL
);
2105 pageOffset
= sourceAddr
- trunc_page_32( sourceAddr
);
2107 ref
.size
= round_page_32( length
+ pageOffset
);
2109 if ((kIOMapReference
|kIOMapUnique
) == ((kIOMapReference
|kIOMapUnique
) & options
))
2115 _IOMemoryMap
* mapping
= (_IOMemoryMap
*) *atAddress
;
2116 ref
.mapped
= mapping
->getVirtualAddress();
2120 err
= kIOReturnNotReadable
;
2125 flags
= UPL_COPYOUT_FROM
| UPL_SET_INTERNAL
2126 | UPL_SET_LITE
| UPL_SET_IO_WIRE
| UPL_BLOCK_ACCESS
;
2128 if (KERN_SUCCESS
!= memory_object_iopl_request((ipc_port_t
) _memEntry
, 0, &size
, &redirUPL2
,
2133 err
= upl_transpose(redirUPL2
, mapping
->redirUPL
);
2134 if (kIOReturnSuccess
!= err
)
2136 IOLog("upl_transpose(%x)\n", err
);
2137 err
= kIOReturnSuccess
;
2142 upl_commit(redirUPL2
, NULL
, 0);
2143 upl_deallocate(redirUPL2
);
2147 // swap the memEntries since they now refer to different vm_objects
2148 void * me
= _memEntry
;
2149 _memEntry
= mapping
->memory
->_memEntry
;
2150 mapping
->memory
->_memEntry
= me
;
2156 logical
= *atAddress
;
2157 if( options
& kIOMapAnywhere
)
2158 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
2161 ref
.mapped
= trunc_page_32( logical
);
2162 if( (logical
- ref
.mapped
) != pageOffset
) {
2163 err
= kIOReturnVMError
;
2168 if( ref
.sharedMem
&& (addressMap
== kernel_map
) && (kIOMemoryBufferPageable
& _flags
))
2169 err
= IOIteratePageableMaps( ref
.size
, &IOMemoryDescriptorMapAlloc
, &ref
);
2171 err
= IOMemoryDescriptorMapAlloc( addressMap
, &ref
);
2174 if( err
!= KERN_SUCCESS
)
2178 pager
= (memory_object_t
) reserved
->devicePager
;
2180 pager
= MACH_PORT_NULL
;
2182 if( !ref
.sharedMem
|| pager
)
2183 err
= handleFault( pager
, addressMap
, ref
.mapped
, sourceOffset
, length
, options
);
2187 if( err
!= KERN_SUCCESS
) {
2189 doUnmap( addressMap
, ref
.mapped
, ref
.size
);
2192 *atAddress
= ref
.mapped
+ pageOffset
;
2198 kIOMemoryRedirected
= 0x00010000
2201 IOReturn
IOMemoryDescriptor::handleFault(
2203 vm_map_t addressMap
,
2204 IOVirtualAddress address
,
2205 IOByteCount sourceOffset
,
2207 IOOptionBits options
)
2209 IOReturn err
= kIOReturnSuccess
;
2210 memory_object_t pager
= (memory_object_t
) _pager
;
2214 IOByteCount pageOffset
;
2215 IOByteCount pagerOffset
;
2216 IOPhysicalLength segLen
;
2221 if( kIOMemoryRedirected
& _flags
) {
2223 IOLog("sleep mem redirect %p, %lx\n", this, sourceOffset
);
2227 } while( kIOMemoryRedirected
& _flags
);
2230 return( kIOReturnSuccess
);
2233 physAddr
= getPhysicalSegment64( sourceOffset
, &segLen
);
2235 pageOffset
= physAddr
- trunc_page_64( physAddr
);
2236 pagerOffset
= sourceOffset
;
2238 size
= length
+ pageOffset
;
2239 physAddr
-= pageOffset
;
2241 segLen
+= pageOffset
;
2244 // in the middle of the loop only map whole pages
2245 if( segLen
>= bytes
)
2247 else if( segLen
!= trunc_page_32( segLen
))
2248 err
= kIOReturnVMError
;
2249 if( physAddr
!= trunc_page_64( physAddr
))
2250 err
= kIOReturnBadArgument
;
2253 if( kIOLogMapping
& gIOKitDebug
)
2254 IOLog("_IOMemoryMap::map(%p) %08lx->%08qx:%08lx\n",
2255 addressMap
, address
+ pageOffset
, physAddr
+ pageOffset
,
2256 segLen
- pageOffset
);
2264 /* i386 doesn't support faulting on device memory yet */
2265 if( addressMap
&& (kIOReturnSuccess
== err
))
2266 err
= IOMapPages( addressMap
, address
, (IOPhysicalAddress
) physAddr
, segLen
, options
);
2267 assert( KERN_SUCCESS
== err
);
2273 if( reserved
&& reserved
->pagerContig
) {
2274 IOPhysicalLength allLen
;
2277 allPhys
= getPhysicalSegment64( 0, &allLen
);
2279 err
= device_pager_populate_object( pager
, 0, allPhys
>> PAGE_SHIFT
, round_page_32(allLen
) );
2284 (page
< segLen
) && (KERN_SUCCESS
== err
);
2285 page
+= page_size
) {
2286 err
= device_pager_populate_object(pager
, pagerOffset
,
2287 (ppnum_t
)((physAddr
+ page
) >> PAGE_SHIFT
), page_size
);
2288 pagerOffset
+= page_size
;
2291 assert( KERN_SUCCESS
== err
);
2297 /* *** Temporary Workaround *** */
2299 /* This call to vm_fault causes an early pmap level resolution */
2300 /* of the mappings created above. Need for this is in absolute */
2301 /* violation of the basic tenet that the pmap layer is a cache. */
2302 /* Further, it implies a serious I/O architectural violation on */
2303 /* the part of some user of the mapping. As of this writing, */
2304 /* the call to vm_fault is needed because the NVIDIA driver */
2305 /* makes a call to pmap_extract. The NVIDIA driver needs to be */
2306 /* fixed as soon as possible. The NVIDIA driver should not */
2307 /* need to query for this info as it should know from the doMap */
2308 /* call where the physical memory is mapped. When a query is */
2309 /* necessary to find a physical mapping, it should be done */
2310 /* through an iokit call which includes the mapped memory */
2311 /* handle. This is required for machine architecture independence.*/
2313 if(!(kIOMemoryRedirected
& _flags
)) {
2314 vm_fault(addressMap
,
2315 (vm_map_offset_t
)address
,
2316 VM_PROT_READ
|VM_PROT_WRITE
,
2317 FALSE
, THREAD_UNINT
, NULL
,
2318 (vm_map_offset_t
)0);
2321 /* *** Temporary Workaround *** */
2324 sourceOffset
+= segLen
- pageOffset
;
2330 && (physAddr
= getPhysicalSegment64( sourceOffset
, &segLen
)));
2333 err
= kIOReturnBadArgument
;
2338 IOReturn
IOMemoryDescriptor::doUnmap(
2339 vm_map_t addressMap
,
2340 IOVirtualAddress logical
,
2341 IOByteCount length
)
2346 if( kIOLogMapping
& gIOKitDebug
)
2347 kprintf("IOMemoryDescriptor::doUnmap(%x) %08x:%08x\n",
2348 addressMap
, logical
, length
);
2351 if( true /* && (addressMap == kernel_map) || (addressMap == get_task_map(current_task()))*/) {
2353 if( _memEntry
&& (addressMap
== kernel_map
) && (kIOMemoryBufferPageable
& _flags
))
2354 addressMap
= IOPageableMapForAddress( logical
);
2356 err
= vm_deallocate( addressMap
, logical
, length
);
2359 err
= kIOReturnSuccess
;
2364 IOReturn
IOMemoryDescriptor::redirect( task_t safeTask
, bool doRedirect
)
2366 IOReturn err
= kIOReturnSuccess
;
2367 _IOMemoryMap
* mapping
= 0;
2373 _flags
|= kIOMemoryRedirected
;
2375 _flags
&= ~kIOMemoryRedirected
;
2378 if( (iter
= OSCollectionIterator::withCollection( _mappings
))) {
2379 while( (mapping
= (_IOMemoryMap
*) iter
->getNextObject()))
2380 mapping
->redirect( safeTask
, doRedirect
);
2393 // temporary binary compatibility
2394 IOSubMemoryDescriptor
* subMem
;
2395 if( (subMem
= OSDynamicCast( IOSubMemoryDescriptor
, this)))
2396 err
= subMem
->redirect( safeTask
, doRedirect
);
2398 err
= kIOReturnSuccess
;
2403 IOReturn
IOSubMemoryDescriptor::redirect( task_t safeTask
, bool doRedirect
)
2405 return( _parent
->redirect( safeTask
, doRedirect
));
2408 IOReturn
_IOMemoryMap::redirect( task_t safeTask
, bool doRedirect
)
2410 IOReturn err
= kIOReturnSuccess
;
2413 // err = ((_IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
2417 if( logical
&& addressMap
2418 && (!safeTask
|| (get_task_map(safeTask
) != addressMap
))
2419 && (0 == (options
& kIOMapStatic
)))
2421 IOUnmapPages( addressMap
, logical
, length
);
2422 if(!doRedirect
&& safeTask
2423 && ((memory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
))
2425 err
= vm_deallocate( addressMap
, logical
, length
);
2426 err
= memory
->doMap( addressMap
, &logical
,
2427 (options
& ~kIOMapAnywhere
) /*| kIOMapReserve*/,
2430 err
= kIOReturnSuccess
;
2432 IOLog("IOMemoryMap::redirect(%d, %p) %x:%lx from %p\n", doRedirect
, this, logical
, length
, addressMap
);
2438 if (((memory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
2440 && (doRedirect
!= (0 != (memory
->_flags
& kIOMemoryRedirected
))))
2441 memory
->redirect(safeTask
, doRedirect
);
2446 IOReturn
_IOMemoryMap::unmap( void )
2452 if( logical
&& addressMap
&& (0 == superMap
)
2453 && (0 == (options
& kIOMapStatic
))) {
2455 err
= memory
->doUnmap( addressMap
, logical
, length
);
2456 vm_map_deallocate(addressMap
);
2460 err
= kIOReturnSuccess
;
2469 void _IOMemoryMap::taskDied( void )
2473 vm_map_deallocate(addressMap
);
2481 // Overload the release mechanism. All mappings must be a member
2482 // of a memory descriptors _mappings set. This means that we
2483 // always have 2 references on a mapping. When either of these mappings
2484 // are released we need to free ourselves.
2485 void _IOMemoryMap::taggedRelease(const void *tag
) const
2488 super::taggedRelease(tag
, 2);
2492 void _IOMemoryMap::free()
2498 memory
->removeMapping( this);
2503 if (owner
&& (owner
!= memory
))
2506 owner
->removeMapping(this);
2511 superMap
->release();
2514 upl_commit(redirUPL
, NULL
, 0);
2515 upl_deallocate(redirUPL
);
2521 IOByteCount
_IOMemoryMap::getLength()
2526 IOVirtualAddress
_IOMemoryMap::getVirtualAddress()
2531 task_t
_IOMemoryMap::getAddressTask()
2534 return( superMap
->getAddressTask());
2536 return( addressTask
);
2539 IOOptionBits
_IOMemoryMap::getMapOptions()
2544 IOMemoryDescriptor
* _IOMemoryMap::getMemoryDescriptor()
2549 _IOMemoryMap
* _IOMemoryMap::copyCompatible(
2550 IOMemoryDescriptor
* owner
,
2552 IOVirtualAddress toAddress
,
2553 IOOptionBits _options
,
2554 IOByteCount _offset
,
2555 IOByteCount _length
)
2557 _IOMemoryMap
* mapping
;
2559 if( (!task
) || (!addressMap
) || (addressMap
!= get_task_map(task
)))
2561 if( options
& kIOMapUnique
)
2563 if( (options
^ _options
) & kIOMapReadOnly
)
2565 if( (kIOMapDefaultCache
!= (_options
& kIOMapCacheMask
))
2566 && ((options
^ _options
) & kIOMapCacheMask
))
2569 if( (0 == (_options
& kIOMapAnywhere
)) && (logical
!= toAddress
))
2572 if( _offset
< offset
)
2577 if( (_offset
+ _length
) > length
)
2580 if( (length
== _length
) && (!_offset
)) {
2585 mapping
= new _IOMemoryMap
;
2587 && !mapping
->initCompatible( owner
, this, _offset
, _length
)) {
2596 IOPhysicalAddress
_IOMemoryMap::getPhysicalSegment( IOByteCount _offset
,
2597 IOPhysicalLength
* _length
)
2599 IOPhysicalAddress address
;
2602 address
= memory
->getPhysicalSegment( offset
+ _offset
, _length
);
2608 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2611 #define super OSObject
2613 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2615 void IOMemoryDescriptor::initialize( void )
2617 if( 0 == gIOMemoryLock
)
2618 gIOMemoryLock
= IORecursiveLockAlloc();
2620 IORegistryEntry::getRegistryRoot()->setProperty(kIOMaximumMappedIOByteCountKey
,
2621 ptoa_64(gIOMaximumMappedIOPageCount
), 64);
2624 void IOMemoryDescriptor::free( void )
2627 _mappings
->release();
2632 IOMemoryMap
* IOMemoryDescriptor::setMapping(
2634 IOVirtualAddress mapAddress
,
2635 IOOptionBits options
)
2637 _IOMemoryMap
* newMap
;
2639 newMap
= new _IOMemoryMap
;
2644 && !newMap
->initWithDescriptor( this, intoTask
, mapAddress
,
2645 options
| kIOMapStatic
, 0, getLength() )) {
2650 addMapping( newMap
);
2657 IOMemoryMap
* IOMemoryDescriptor::map(
2658 IOOptionBits options
)
2661 return( makeMapping( this, kernel_task
, 0,
2662 options
| kIOMapAnywhere
,
2666 IOMemoryMap
* IOMemoryDescriptor::map(
2668 IOVirtualAddress toAddress
,
2669 IOOptionBits options
,
2671 IOByteCount length
)
2674 length
= getLength();
2676 return( makeMapping( this, intoTask
, toAddress
, options
, offset
, length
));
2679 IOReturn
_IOMemoryMap::redirect(IOMemoryDescriptor
* newBackingMemory
,
2680 IOOptionBits options
,
2683 IOReturn err
= kIOReturnSuccess
;
2684 IOMemoryDescriptor
* physMem
= 0;
2688 if (logical
&& addressMap
) do
2690 if ((memory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
2698 vm_size_t size
= length
;
2699 int flags
= UPL_COPYOUT_FROM
| UPL_SET_INTERNAL
2700 | UPL_SET_LITE
| UPL_SET_IO_WIRE
| UPL_BLOCK_ACCESS
;
2701 if (KERN_SUCCESS
!= memory_object_iopl_request((ipc_port_t
) memory
->_memEntry
, 0, &size
, &redirUPL
,
2708 IOUnmapPages( addressMap
, logical
, length
);
2709 physMem
->redirect(0, true);
2713 if (newBackingMemory
)
2715 if (newBackingMemory
!= memory
)
2717 if (this != newBackingMemory
->makeMapping(newBackingMemory
, addressTask
, (IOVirtualAddress
) this,
2718 options
| kIOMapUnique
| kIOMapReference
,
2720 err
= kIOReturnError
;
2724 upl_commit(redirUPL
, NULL
, 0);
2725 upl_deallocate(redirUPL
);
2729 physMem
->redirect(0, false);
2742 IOMemoryMap
* IOMemoryDescriptor::makeMapping(
2743 IOMemoryDescriptor
* owner
,
2745 IOVirtualAddress toAddress
,
2746 IOOptionBits options
,
2748 IOByteCount length
)
2750 IOMemoryDescriptor
* mapDesc
= 0;
2751 _IOMemoryMap
* mapping
= 0;
2758 if (kIOMapUnique
& options
)
2760 IOPhysicalAddress phys
;
2761 IOByteCount physLen
;
2766 if ((_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
2768 phys
= getPhysicalSegment(offset
, &physLen
);
2769 if (!phys
|| (physLen
< length
))
2772 mapDesc
= IOMemoryDescriptor::withPhysicalAddress(
2773 phys
, length
, _direction
);
2784 if (kIOMapReference
& options
)
2786 mapping
= (_IOMemoryMap
*) toAddress
;
2790 uint32_t pageOffset1
= mapDesc
->getSourceSegment( offset
, NULL
);
2791 pageOffset1
-= trunc_page_32( pageOffset1
);
2793 uint32_t pageOffset2
= mapping
->getVirtualAddress();
2794 pageOffset2
-= trunc_page_32( pageOffset2
);
2796 if (pageOffset1
!= pageOffset2
)
2797 IOLog("::redirect can't map offset %x to addr %x\n",
2798 pageOffset1
, mapping
->getVirtualAddress());
2802 if (!mapping
->initWithDescriptor( mapDesc
, intoTask
, toAddress
, options
,
2806 IOLog("Didn't redirect map %08lx : %08lx\n", offset
, length
);
2811 mapping
->owner
->removeMapping(mapping
);
2817 // look for an existing mapping
2818 if( (iter
= OSCollectionIterator::withCollection( _mappings
))) {
2820 while( (mapping
= (_IOMemoryMap
*) iter
->getNextObject())) {
2822 if( (mapping
= mapping
->copyCompatible(
2823 owner
, intoTask
, toAddress
,
2824 options
| kIOMapReference
,
2835 if( mapping
|| (options
& kIOMapReference
))
2843 mapping
= new _IOMemoryMap
;
2845 && !mapping
->initWithDescriptor( mapDesc
, intoTask
, toAddress
, options
,
2848 IOLog("Didn't make map %08lx : %08lx\n", offset
, length
);
2861 mapping
->owner
= owner
;
2862 owner
->addMapping( mapping
);
2874 void IOMemoryDescriptor::addMapping(
2875 IOMemoryMap
* mapping
)
2879 _mappings
= OSSet::withCapacity(1);
2881 _mappings
->setObject( mapping
);
2885 void IOMemoryDescriptor::removeMapping(
2886 IOMemoryMap
* mapping
)
2889 _mappings
->removeObject( mapping
);
2892 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2895 #define super IOMemoryDescriptor
2897 OSDefineMetaClassAndStructors(IOSubMemoryDescriptor
, IOMemoryDescriptor
)
2899 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2901 bool IOSubMemoryDescriptor::initSubRange( IOMemoryDescriptor
* parent
,
2902 IOByteCount offset
, IOByteCount length
,
2903 IODirection direction
)
2908 if( (offset
+ length
) > parent
->getLength())
2912 * We can check the _parent instance variable before having ever set it
2913 * to an initial value because I/O Kit guarantees that all our instance
2914 * variables are zeroed on an object's allocation.
2922 * An existing memory descriptor is being retargeted to
2923 * point to somewhere else. Clean up our present state.
2934 _direction
= direction
;
2935 _tag
= parent
->getTag();
2940 void IOSubMemoryDescriptor::free( void )
2949 IOPhysicalAddress
IOSubMemoryDescriptor::getPhysicalSegment( IOByteCount offset
,
2950 IOByteCount
* length
)
2952 IOPhysicalAddress address
;
2953 IOByteCount actualLength
;
2955 assert(offset
<= _length
);
2960 if( offset
>= _length
)
2963 address
= _parent
->getPhysicalSegment( offset
+ _start
, &actualLength
);
2965 if( address
&& length
)
2966 *length
= min( _length
- offset
, actualLength
);
2972 IOReturn
IOSubMemoryDescriptor::doMap(
2973 vm_map_t addressMap
,
2974 IOVirtualAddress
* atAddress
,
2975 IOOptionBits options
,
2976 IOByteCount sourceOffset
,
2977 IOByteCount length
)
2979 if( sourceOffset
>= _length
)
2980 return( kIOReturnOverrun
);
2981 return (_parent
->doMap(addressMap
, atAddress
, options
, sourceOffset
+ _start
, length
));
2984 IOPhysicalAddress
IOSubMemoryDescriptor::getSourceSegment( IOByteCount offset
,
2985 IOByteCount
* length
)
2987 IOPhysicalAddress address
;
2988 IOByteCount actualLength
;
2990 assert(offset
<= _length
);
2995 if( offset
>= _length
)
2998 address
= _parent
->getSourceSegment( offset
+ _start
, &actualLength
);
3000 if( address
&& length
)
3001 *length
= min( _length
- offset
, actualLength
);
3006 void * IOSubMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
3007 IOByteCount
* lengthOfSegment
)
3012 IOByteCount
IOSubMemoryDescriptor::readBytes(IOByteCount offset
,
3013 void * bytes
, IOByteCount length
)
3015 IOByteCount byteCount
;
3017 assert(offset
<= _length
);
3019 if( offset
>= _length
)
3023 byteCount
= _parent
->readBytes( _start
+ offset
, bytes
,
3024 min(length
, _length
- offset
) );
3027 return( byteCount
);
3030 IOByteCount
IOSubMemoryDescriptor::writeBytes(IOByteCount offset
,
3031 const void* bytes
, IOByteCount length
)
3033 IOByteCount byteCount
;
3035 assert(offset
<= _length
);
3037 if( offset
>= _length
)
3041 byteCount
= _parent
->writeBytes( _start
+ offset
, bytes
,
3042 min(length
, _length
- offset
) );
3045 return( byteCount
);
3048 IOReturn
IOSubMemoryDescriptor::setPurgeable( IOOptionBits newState
,
3049 IOOptionBits
* oldState
)
3054 err
= _parent
->setPurgeable( newState
, oldState
);
3060 IOReturn
IOSubMemoryDescriptor::performOperation( IOOptionBits options
,
3061 IOByteCount offset
, IOByteCount length
)
3065 assert(offset
<= _length
);
3067 if( offset
>= _length
)
3068 return( kIOReturnOverrun
);
3071 err
= _parent
->performOperation( options
, _start
+ offset
,
3072 min(length
, _length
- offset
) );
3078 IOReturn
IOSubMemoryDescriptor::prepare(
3079 IODirection forDirection
)
3084 err
= _parent
->prepare( forDirection
);
3090 IOReturn
IOSubMemoryDescriptor::complete(
3091 IODirection forDirection
)
3096 err
= _parent
->complete( forDirection
);
3102 IOMemoryMap
* IOSubMemoryDescriptor::makeMapping(
3103 IOMemoryDescriptor
* owner
,
3105 IOVirtualAddress toAddress
,
3106 IOOptionBits options
,
3108 IOByteCount length
)
3110 IOMemoryMap
* mapping
= 0;
3112 if (!(kIOMapUnique
& options
))
3113 mapping
= (IOMemoryMap
*) _parent
->makeMapping(
3115 toAddress
- (_start
+ offset
),
3116 options
| kIOMapReference
,
3117 _start
+ offset
, length
);
3120 mapping
= (IOMemoryMap
*) _parent
->makeMapping(
3123 options
, _start
+ offset
, length
);
3126 mapping
= super::makeMapping( owner
, intoTask
, toAddress
, options
,
3135 IOSubMemoryDescriptor::initWithAddress(void * address
,
3137 IODirection direction
)
3143 IOSubMemoryDescriptor::initWithAddress(vm_address_t address
,
3145 IODirection direction
,
3152 IOSubMemoryDescriptor::initWithPhysicalAddress(
3153 IOPhysicalAddress address
,
3155 IODirection direction
)
3161 IOSubMemoryDescriptor::initWithRanges(
3162 IOVirtualRange
* ranges
,
3164 IODirection direction
,
3172 IOSubMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange
* ranges
,
3174 IODirection direction
,
3180 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3182 bool IOGeneralMemoryDescriptor::serialize(OSSerialize
* s
) const
3184 OSSymbol
const *keys
[2];
3185 OSObject
*values
[2];
3187 user_addr_t address
;
3190 unsigned int index
, nRanges
;
3193 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
3195 if (s
== NULL
) return false;
3196 if (s
->previouslySerialized(this)) return true;
3198 // Pretend we are an array.
3199 if (!s
->addXMLStartTag(this, "array")) return false;
3201 nRanges
= _rangesCount
;
3202 vcopy
= (SerData
*) IOMalloc(sizeof(SerData
) * nRanges
);
3203 if (vcopy
== 0) return false;
3205 keys
[0] = OSSymbol::withCString("address");
3206 keys
[1] = OSSymbol::withCString("length");
3209 values
[0] = values
[1] = 0;
3211 // From this point on we can go to bail.
3213 // Copy the volatile data so we don't have to allocate memory
3214 // while the lock is held.
3216 if (nRanges
== _rangesCount
) {
3217 Ranges vec
= _ranges
;
3218 for (index
= 0; index
< nRanges
; index
++) {
3219 user_addr_t addr
; IOByteCount len
;
3220 getAddrLenForInd(addr
, len
, type
, vec
, index
);
3221 vcopy
[index
].address
= addr
;
3222 vcopy
[index
].length
= len
;
3225 // The descriptor changed out from under us. Give up.
3232 for (index
= 0; index
< nRanges
; index
++)
3234 user_addr_t addr
= vcopy
[index
].address
;
3235 IOByteCount len
= (IOByteCount
) vcopy
[index
].length
;
3237 OSNumber::withNumber(addr
, (((UInt64
) addr
) >> 32)? 64 : 32);
3238 if (values
[0] == 0) {
3242 values
[1] = OSNumber::withNumber(len
, sizeof(len
) * 8);
3243 if (values
[1] == 0) {
3247 OSDictionary
*dict
= OSDictionary::withObjects((const OSObject
**)values
, (const OSSymbol
**)keys
, 2);
3252 values
[0]->release();
3253 values
[1]->release();
3254 values
[0] = values
[1] = 0;
3256 result
= dict
->serialize(s
);
3262 result
= s
->addXMLEndTag("array");
3266 values
[0]->release();
3268 values
[1]->release();
3274 IOFree(vcopy
, sizeof(IOVirtualRange
) * nRanges
);
3278 bool IOSubMemoryDescriptor::serialize(OSSerialize
* s
) const
3283 if (s
->previouslySerialized(this)) return true;
3285 // Pretend we are a dictionary.
3286 // We must duplicate the functionality of OSDictionary here
3287 // because otherwise object references will not work;
3288 // they are based on the value of the object passed to
3289 // previouslySerialized and addXMLStartTag.
3291 if (!s
->addXMLStartTag(this, "dict")) return false;
3293 char const *keys
[3] = {"offset", "length", "parent"};
3295 OSObject
*values
[3];
3296 values
[0] = OSNumber::withNumber(_start
, sizeof(_start
) * 8);
3299 values
[1] = OSNumber::withNumber(_length
, sizeof(_length
) * 8);
3300 if (values
[1] == 0) {
3301 values
[0]->release();
3304 values
[2] = _parent
;
3307 for (int i
=0; i
<3; i
++) {
3308 if (!s
->addString("<key>") ||
3309 !s
->addString(keys
[i
]) ||
3310 !s
->addXMLEndTag("key") ||
3311 !values
[i
]->serialize(s
)) {
3316 values
[0]->release();
3317 values
[1]->release();
3322 return s
->addXMLEndTag("dict");
3325 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3327 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 0);
3328 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 1);
3329 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 2);
3330 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 3);
3331 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 4);
3332 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 5);
3333 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 6);
3334 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 7);
3335 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 8);
3336 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 9);
3337 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 10);
3338 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 11);
3339 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 12);
3340 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 13);
3341 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 14);
3342 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 15);
3344 /* ex-inline function implementation */
3345 IOPhysicalAddress
IOMemoryDescriptor::getPhysicalAddress()
3346 { return( getPhysicalSegment( 0, 0 )); }