2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
26 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
31 // 45678901234567890123456789012345678901234567890123456789012345678901234567890
32 #include <sys/cdefs.h>
34 #include <IOKit/assert.h>
35 #include <IOKit/system.h>
36 #include <IOKit/IOLib.h>
37 #include <IOKit/IOMemoryDescriptor.h>
38 #include <IOKit/IOMapper.h>
40 #include <IOKit/IOKitDebug.h>
42 #include <libkern/c++/OSContainers.h>
43 #include <libkern/c++/OSDictionary.h>
44 #include <libkern/c++/OSArray.h>
45 #include <libkern/c++/OSSymbol.h>
46 #include <libkern/c++/OSNumber.h>
47 #include <sys/cdefs.h>
51 #include <mach/memory_object_types.h>
52 #include <device/device_port.h>
55 struct phys_entry
*pmap_find_physentry(ppnum_t pa
);
57 void ipc_port_release_send(ipc_port_t port
);
59 /* Copy between a physical page and a virtual address in the given vm_map */
60 kern_return_t
copypv(addr64_t source
, addr64_t sink
, unsigned int size
, int which
);
64 memory_object_t pager
,
69 device_pager_deallocate(
72 device_pager_populate_object(
73 memory_object_t pager
,
74 vm_object_offset_t offset
,
79 * Page fault handling based on vm_map (or entries therein)
81 extern kern_return_t
vm_fault(
85 boolean_t change_wiring
,
88 vm_offset_t caller_pmap_addr
);
90 unsigned int IOTranslateCacheBits(struct phys_entry
*pp
);
92 vm_map_t
IOPageableMapForAddress( vm_address_t address
);
94 typedef kern_return_t (*IOIteratePageableMapsCallback
)(vm_map_t map
, void * ref
);
96 kern_return_t
IOIteratePageableMaps(vm_size_t size
,
97 IOIteratePageableMapsCallback callback
, void * ref
);
100 static IOMapper
* gIOSystemMapper
;
102 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
104 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor
, OSObject
)
106 #define super IOMemoryDescriptor
108 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor
, IOMemoryDescriptor
)
110 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
112 static IORecursiveLock
* gIOMemoryLock
;
114 #define LOCK IORecursiveLockLock( gIOMemoryLock)
115 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
116 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
118 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
120 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
122 #define next_page(a) ( trunc_page_32(a) + PAGE_SIZE )
127 kern_return_t
device_data_action(
129 ipc_port_t device_pager
,
130 vm_prot_t protection
,
131 vm_object_offset_t offset
,
134 struct ExpansionData
{
136 unsigned int pagerContig
:1;
137 unsigned int unused
:31;
138 IOMemoryDescriptor
* memory
;
141 ExpansionData
* ref
= (ExpansionData
*) device_handle
;
142 IOMemoryDescriptor
* memDesc
;
145 memDesc
= ref
->memory
;
147 kr
= memDesc
->handleFault( device_pager
, 0, 0,
148 offset
, size
, kIOMapDefaultCache
/*?*/);
156 kern_return_t
device_close(
159 struct ExpansionData
{
161 unsigned int pagerContig
:1;
162 unsigned int unused
:31;
163 IOMemoryDescriptor
* memory
;
165 ExpansionData
* ref
= (ExpansionData
*) device_handle
;
167 IODelete( ref
, ExpansionData
, 1 );
169 return( kIOReturnSuccess
);
174 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
179 * Create a new IOMemoryDescriptor. The buffer is a virtual address
180 * relative to the specified task. If no task is supplied, the kernel
184 IOMemoryDescriptor::withAddress(void * address
,
186 IODirection direction
)
188 return IOMemoryDescriptor::
189 withAddress((vm_address_t
) address
, length
, direction
, kernel_task
);
193 IOMemoryDescriptor::withAddress(vm_address_t address
,
195 IODirection direction
,
198 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
201 if (that
->initWithAddress(address
, length
, direction
, task
))
210 IOMemoryDescriptor::withPhysicalAddress(
211 IOPhysicalAddress address
,
213 IODirection direction
)
215 IOGeneralMemoryDescriptor
*self
= new IOGeneralMemoryDescriptor
;
217 && !self
->initWithPhysicalAddress(address
, length
, direction
)) {
226 IOMemoryDescriptor::withRanges( IOVirtualRange
* ranges
,
228 IODirection direction
,
232 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
235 if (that
->initWithRanges(ranges
, withCount
, direction
, task
, asReference
))
247 * Create a new IOMemoryDescriptor. The buffer is made up of several
248 * virtual address ranges, from a given task.
250 * Passing the ranges as a reference will avoid an extra allocation.
253 IOMemoryDescriptor::withOptions(void * buffers
,
260 IOGeneralMemoryDescriptor
*self
= new IOGeneralMemoryDescriptor
;
263 && !self
->initWithOptions(buffers
, count
, offset
, task
, opts
, mapper
))
272 // Can't leave abstract but this should never be used directly,
273 bool IOMemoryDescriptor::initWithOptions(void * buffers
,
277 IOOptionBits options
,
280 // @@@ gvdl: Should I panic?
281 panic("IOMD::initWithOptions called\n");
286 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange
* ranges
,
288 IODirection direction
,
291 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
294 if (that
->initWithPhysicalRanges(ranges
, withCount
, direction
, asReference
))
303 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor
* of
,
306 IODirection direction
)
308 IOSubMemoryDescriptor
*self
= new IOSubMemoryDescriptor
;
310 if (self
&& !self
->initSubRange(of
, offset
, length
, direction
)) {
320 * Initialize an IOMemoryDescriptor. The buffer is a virtual address
321 * relative to the specified task. If no task is supplied, the kernel
324 * An IOMemoryDescriptor can be re-used by calling initWithAddress or
325 * initWithRanges again on an existing instance -- note this behavior
326 * is not commonly supported in other I/O Kit classes, although it is
330 IOGeneralMemoryDescriptor::initWithAddress(void * address
,
331 IOByteCount withLength
,
332 IODirection withDirection
)
334 _singleRange
.v
.address
= (vm_address_t
) address
;
335 _singleRange
.v
.length
= withLength
;
337 return initWithRanges(&_singleRange
.v
, 1, withDirection
, kernel_task
, true);
341 IOGeneralMemoryDescriptor::initWithAddress(vm_address_t address
,
342 IOByteCount withLength
,
343 IODirection withDirection
,
346 _singleRange
.v
.address
= address
;
347 _singleRange
.v
.length
= withLength
;
349 return initWithRanges(&_singleRange
.v
, 1, withDirection
, withTask
, true);
353 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
354 IOPhysicalAddress address
,
355 IOByteCount withLength
,
356 IODirection withDirection
)
358 _singleRange
.p
.address
= address
;
359 _singleRange
.p
.length
= withLength
;
361 return initWithPhysicalRanges( &_singleRange
.p
, 1, withDirection
, true);
365 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
366 IOPhysicalRange
* ranges
,
368 IODirection direction
,
371 IOOptionBits mdOpts
= direction
| kIOMemoryTypePhysical
;
374 mdOpts
|= kIOMemoryAsReference
;
376 return initWithOptions(ranges
, count
, 0, 0, mdOpts
, /* mapper */ 0);
380 IOGeneralMemoryDescriptor::initWithRanges(
381 IOVirtualRange
* ranges
,
383 IODirection direction
,
387 IOOptionBits mdOpts
= direction
;
390 mdOpts
|= kIOMemoryAsReference
;
393 mdOpts
|= kIOMemoryTypeVirtual
;
394 if (task
== kernel_task
)
395 mdOpts
|= kIOMemoryAutoPrepare
;
398 mdOpts
|= kIOMemoryTypePhysical
;
400 // @@@ gvdl: Need to remove this
401 // Auto-prepare if this is a kernel memory descriptor as very few
402 // clients bother to prepare() kernel memory.
403 // But it has been enforced so what are you going to do?
405 return initWithOptions(ranges
, count
, 0, task
, mdOpts
, /* mapper */ 0);
411 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
412 * from a given task or several physical ranges or finally an UPL from the ubc
415 * Passing the ranges as a reference will avoid an extra allocation.
417 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
418 * existing instance -- note this behavior is not commonly supported in other
419 * I/O Kit classes, although it is supported here.
422 enum ioPLBlockFlags
{
423 kIOPLOnDevice
= 0x00000001,
424 kIOPLExternUPL
= 0x00000002,
429 vm_address_t fIOMDOffset
; // The offset of this iopl in descriptor
430 vm_offset_t fPageInfo
; // Pointer to page list or index into it
431 ppnum_t fMappedBase
; // Page number of first page in this iopl
432 unsigned int fPageOffset
; // Offset within first page of iopl
433 unsigned int fFlags
; // Flags
438 unsigned int fPageCnt
;
439 upl_page_info_t fPageList
[0]; // @@@ gvdl need to get rid of this
440 // should be able to use upl directly
441 ioPLBlock fBlocks
[0];
444 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
445 #define getIOPLList(d) ((ioPLBlock *) &(d->fPageList[d->fPageCnt]))
446 #define getNumIOPL(d,len) \
447 ((len - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
448 #define getPageList(d) (&(d->fPageList[0]))
449 #define computeDataSize(p, u) \
450 (sizeof(ioGMDData) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
453 IOGeneralMemoryDescriptor::initWithOptions(void * buffers
,
457 IOOptionBits options
,
461 switch (options
& kIOMemoryTypeMask
) {
462 case kIOMemoryTypeVirtual
:
469 case kIOMemoryTypePhysical
: // Neither Physical nor UPL should have a task
470 mapper
= kIOMapperNone
;
471 case kIOMemoryTypeUPL
:
475 panic("IOGMD::iWO(): bad type"); // @@@ gvdl: for testing
476 return false; /* bad argument */
483 * We can check the _initialized instance variable before having ever set
484 * it to an initial value because I/O Kit guarantees that all our instance
485 * variables are zeroed on an object's allocation.
490 * An existing memory descriptor is being retargeted to point to
491 * somewhere else. Clean up our present state.
498 if (_ranges
.v
&& _rangesIsAllocated
)
499 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
507 // Grab the appropriate mapper
508 if (mapper
== kIOMapperNone
)
509 mapper
= 0; // No Mapper
511 IOMapper::checkForSystemMapper();
512 gIOSystemMapper
= mapper
= IOMapper::gSystem
;
518 // DEPRECATED variable initialisation
519 _direction
= (IODirection
) (_flags
& kIOMemoryDirectionMask
);
522 _cachedPhysicalAddress
= 0;
523 _cachedVirtualAddress
= 0;
525 if ( (options
& kIOMemoryTypeMask
) == kIOMemoryTypeUPL
) {
528 unsigned int dataSize
= computeDataSize(/* pages */ 0, /* upls */ 1);
530 if (!_memoryEntries
) {
531 _memoryEntries
= OSData::withCapacity(dataSize
);
535 else if (!_memoryEntries
->initWithCapacity(dataSize
))
538 _memoryEntries
->appendBytes(0, sizeof(ioGMDData
));
539 dataP
= getDataP(_memoryEntries
);
540 dataP
->fMapper
= mapper
;
543 _wireCount
++; // UPLs start out life wired
546 _pages
+= atop_32(offset
+ count
+ PAGE_MASK
) - atop_32(offset
);
549 upl_page_info_t
*pageList
= UPL_GET_INTERNAL_PAGE_LIST((upl_t
) buffers
);
551 iopl
.fIOPL
= (upl_t
) buffers
;
552 // Set the flag kIOPLOnDevice convieniently equal to 1
553 iopl
.fFlags
= pageList
->device
| kIOPLExternUPL
;
554 iopl
.fIOMDOffset
= 0;
555 if (!pageList
->device
) {
556 // @@@ gvdl: Ask JoeS are the pages contiguious with the list?
557 // or there a chance that we may be inserting 0 phys_addrs?
558 // Pre-compute the offset into the UPL's page list
559 pageList
= &pageList
[atop_32(offset
)];
562 iopl
.fMappedBase
= mapper
->iovmAlloc(_pages
);
563 mapper
->iovmInsert(iopl
.fMappedBase
, 0, pageList
, _pages
);
566 iopl
.fMappedBase
= 0;
569 iopl
.fMappedBase
= 0;
570 iopl
.fPageInfo
= (vm_address_t
) pageList
;
571 iopl
.fPageOffset
= offset
;
573 _memoryEntries
->appendBytes(&iopl
, sizeof(iopl
));
575 else { /* kIOMemoryTypeVirtual | kIOMemoryTypePhysical */
576 IOVirtualRange
*ranges
= (IOVirtualRange
*) buffers
;
579 * Initialize the memory descriptor.
584 for (unsigned ind
= 0; ind
< count
; ind
++) {
585 IOVirtualRange cur
= ranges
[ind
];
587 _length
+= cur
.length
;
588 _pages
+= atop_32(cur
.address
+ cur
.length
+ PAGE_MASK
)
589 - atop_32(cur
.address
);
593 _rangesIsAllocated
= !(options
& kIOMemoryAsReference
);
594 _rangesCount
= count
;
596 if (options
& kIOMemoryAsReference
)
599 _ranges
.v
= IONew(IOVirtualRange
, count
);
602 bcopy(/* from */ ranges
, _ranges
.v
,
603 count
* sizeof(IOVirtualRange
));
606 // Auto-prepare memory at creation time.
607 // Implied completion when descriptor is free-ed
608 if ( (options
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
609 _wireCount
++; // Physical MDs are start out wired
610 else { /* kIOMemoryTypeVirtual */
612 unsigned int dataSize
=
613 computeDataSize(_pages
, /* upls */ _rangesCount
* 2);
615 if (!_memoryEntries
) {
616 _memoryEntries
= OSData::withCapacity(dataSize
);
620 else if (!_memoryEntries
->initWithCapacity(dataSize
))
623 _memoryEntries
->appendBytes(0, sizeof(ioGMDData
));
624 dataP
= getDataP(_memoryEntries
);
625 dataP
->fMapper
= mapper
;
626 dataP
->fPageCnt
= _pages
;
628 if ((_flags
& kIOMemoryAutoPrepare
)
629 && prepare() != kIOReturnSuccess
)
642 void IOGeneralMemoryDescriptor::free()
646 reserved
->memory
= 0;
652 _memoryEntries
->release();
656 if (_ranges
.v
&& _rangesIsAllocated
)
657 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
659 if (reserved
&& reserved
->devicePager
)
660 device_pager_deallocate( (memory_object_t
) reserved
->devicePager
);
662 // memEntry holds a ref on the device pager which owns reserved
663 // (ExpansionData) so no reserved access after this point
665 ipc_port_release_send( (ipc_port_t
) _memEntry
);
670 /* DEPRECATED */ void IOGeneralMemoryDescriptor::unmapFromKernel()
672 panic("IOGMD::unmapFromKernel deprecated");
675 /* DEPRECATED */ void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex
)
677 panic("IOGMD::mapIntoKernel deprecated");
683 * Get the direction of the transfer.
685 IODirection
IOMemoryDescriptor::getDirection() const
693 * Get the length of the transfer (over all ranges).
695 IOByteCount
IOMemoryDescriptor::getLength() const
700 void IOMemoryDescriptor::setTag( IOOptionBits tag
)
705 IOOptionBits
IOMemoryDescriptor::getTag( void )
710 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
711 IOPhysicalAddress
IOMemoryDescriptor::getSourceSegment( IOByteCount offset
,
712 IOByteCount
* length
)
714 IOPhysicalAddress physAddr
= 0;
716 if( prepare() == kIOReturnSuccess
) {
717 physAddr
= getPhysicalSegment( offset
, length
);
724 IOByteCount
IOMemoryDescriptor::readBytes
725 (IOByteCount offset
, void *bytes
, IOByteCount length
)
727 addr64_t dstAddr
= (addr64_t
) (UInt32
) bytes
;
728 IOByteCount remaining
;
730 // Assert that this entire I/O is withing the available range
731 assert(offset
< _length
);
732 assert(offset
+ length
<= _length
);
733 if (offset
>= _length
) {
734 IOLog("IOGMD(%p): rB = o%lx, l%lx\n", this, offset
, length
); // @@@ gvdl
738 remaining
= length
= min(length
, _length
- offset
);
739 while (remaining
) { // (process another target segment?)
743 srcAddr64
= getPhysicalSegment64(offset
, &srcLen
);
747 // Clip segment length to remaining
748 if (srcLen
> remaining
)
751 copypv(srcAddr64
, dstAddr
, srcLen
,
752 cppvPsrc
| cppvFsnk
| cppvKmap
);
761 return length
- remaining
;
764 IOByteCount
IOMemoryDescriptor::writeBytes
765 (IOByteCount offset
, const void *bytes
, IOByteCount length
)
767 addr64_t srcAddr
= (addr64_t
) (UInt32
) bytes
;
768 IOByteCount remaining
;
770 // Assert that this entire I/O is withing the available range
771 assert(offset
< _length
);
772 assert(offset
+ length
<= _length
);
774 assert( !(kIOMemoryPreparedReadOnly
& _flags
) );
776 if ( (kIOMemoryPreparedReadOnly
& _flags
) || offset
>= _length
) {
777 IOLog("IOGMD(%p): wB = o%lx, l%lx\n", this, offset
, length
); // @@@ gvdl
781 remaining
= length
= min(length
, _length
- offset
);
782 while (remaining
) { // (process another target segment?)
786 dstAddr64
= getPhysicalSegment64(offset
, &dstLen
);
790 // Clip segment length to remaining
791 if (dstLen
> remaining
)
794 copypv(srcAddr
, (addr64_t
) dstAddr64
, dstLen
,
795 cppvPsnk
| cppvFsnk
| cppvNoModSnk
| cppvKmap
);
804 return length
- remaining
;
807 // osfmk/device/iokit_rpc.c
808 extern "C" unsigned int IODefaultCacheBits(addr64_t pa
);
810 /* DEPRECATED */ void IOGeneralMemoryDescriptor::setPosition(IOByteCount position
)
812 panic("IOGMD::setPosition deprecated");
815 IOPhysicalAddress
IOGeneralMemoryDescriptor::getPhysicalSegment
816 (IOByteCount offset
, IOByteCount
*lengthOfSegment
)
818 IOPhysicalAddress address
= 0;
819 IOPhysicalLength length
= 0;
821 // assert(offset <= _length);
822 if (offset
< _length
) // (within bounds?)
824 if ( (_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
) {
827 // Physical address based memory descriptor
829 // Find offset within descriptor and make it relative
830 // to the current _range.
831 for (ind
= 0 ; offset
>= _ranges
.p
[ind
].length
; ind
++ )
832 offset
-= _ranges
.p
[ind
].length
;
834 IOPhysicalRange cur
= _ranges
.p
[ind
];
835 address
= cur
.address
+ offset
;
836 length
= cur
.length
- offset
;
838 // see how far we can coalesce ranges
839 for (++ind
; ind
< _rangesCount
; ind
++) {
840 cur
= _ranges
.p
[ind
];
842 if (address
+ length
!= cur
.address
)
845 length
+= cur
.length
;
848 // @@@ gvdl: should assert(address);
849 // but can't as NVidia GeForce creates a bogus physical mem
851 assert(address
|| /*nvidia*/(!_ranges
.p
[0].address
&& 1 == _rangesCount
));
856 // We need wiring & we are wired.
861 panic("IOGMD: not wired for getPhysicalSegment()");
865 assert(_memoryEntries
);
867 ioGMDData
* dataP
= getDataP(_memoryEntries
);
868 const ioPLBlock
*ioplList
= getIOPLList(dataP
);
869 UInt ind
, numIOPLs
= getNumIOPL(dataP
, _memoryEntries
->getLength());
870 upl_page_info_t
*pageList
= getPageList(dataP
);
872 assert(numIOPLs
> 0);
874 // Scan through iopl info blocks looking for block containing offset
875 for (ind
= 1; ind
< numIOPLs
; ind
++) {
876 if (offset
< ioplList
[ind
].fIOMDOffset
)
880 // Go back to actual range as search goes past it
881 ioPLBlock ioplInfo
= ioplList
[ind
- 1];
884 length
= ioplList
[ind
].fIOMDOffset
;
887 length
-= offset
; // Remainder within iopl
889 // Subtract offset till this iopl in total list
890 offset
-= ioplInfo
.fIOMDOffset
;
892 // This is a mapped IOPL so we just need to compute an offset
893 // relative to the mapped base.
894 if (ioplInfo
.fMappedBase
) {
895 offset
+= (ioplInfo
.fPageOffset
& PAGE_MASK
);
896 address
= ptoa_32(ioplInfo
.fMappedBase
) + offset
;
900 // Currently the offset is rebased into the current iopl.
901 // Now add the iopl 1st page offset.
902 offset
+= ioplInfo
.fPageOffset
;
904 // For external UPLs the fPageInfo field points directly to
905 // the upl's upl_page_info_t array.
906 if (ioplInfo
.fFlags
& kIOPLExternUPL
)
907 pageList
= (upl_page_info_t
*) ioplInfo
.fPageInfo
;
909 pageList
= &pageList
[ioplInfo
.fPageInfo
];
911 // Check for direct device non-paged memory
912 if ( ioplInfo
.fFlags
& kIOPLOnDevice
) {
913 address
= ptoa_32(pageList
->phys_addr
) + offset
;
917 // Now we need compute the index into the pageList
918 ind
= atop_32(offset
);
921 IOPhysicalAddress pageAddr
= pageList
[ind
].phys_addr
;
922 address
= ptoa_32(pageAddr
) + offset
;
924 // Check for the remaining data in this upl being longer than the
925 // remainder on the current page. This should be checked for
927 if (length
> PAGE_SIZE
- offset
) {
928 // See if the next page is contiguous. Stop looking when we hit
929 // the end of this upl, which is indicated by the
930 // contigLength >= length.
931 IOByteCount contigLength
= PAGE_SIZE
- offset
;
933 // Look for contiguous segment
934 while (contigLength
< length
935 && ++pageAddr
== pageList
[++ind
].phys_addr
) {
936 contigLength
+= PAGE_SIZE
;
938 if (length
> contigLength
)
939 length
= contigLength
;
952 *lengthOfSegment
= length
;
957 addr64_t
IOMemoryDescriptor::getPhysicalSegment64
958 (IOByteCount offset
, IOByteCount
*lengthOfSegment
)
960 IOPhysicalAddress phys32
;
964 phys32
= getPhysicalSegment(offset
, lengthOfSegment
);
972 phys64
= gIOSystemMapper
->mapAddr(phys32
);
973 origLen
= *lengthOfSegment
;
974 length
= page_size
- (phys64
& (page_size
- 1));
975 while ((length
< origLen
)
976 && ((phys64
+ length
) == gIOSystemMapper
->mapAddr(phys32
+ length
)))
978 if (length
> origLen
)
981 *lengthOfSegment
= length
;
984 phys64
= (addr64_t
) phys32
;
989 IOPhysicalAddress
IOGeneralMemoryDescriptor::getSourceSegment
990 (IOByteCount offset
, IOByteCount
*lengthOfSegment
)
992 IOPhysicalAddress address
= 0;
993 IOPhysicalLength length
= 0;
995 assert(offset
<= _length
);
997 if ( (_flags
& kIOMemoryTypeMask
) == kIOMemoryTypeUPL
)
998 return super::getSourceSegment( offset
, lengthOfSegment
);
1000 if ( offset
< _length
) // (within bounds?)
1002 unsigned rangesIndex
= 0;
1004 for ( ; offset
>= _ranges
.v
[rangesIndex
].length
; rangesIndex
++ )
1006 offset
-= _ranges
.v
[rangesIndex
].length
; // (make offset relative)
1009 address
= _ranges
.v
[rangesIndex
].address
+ offset
;
1010 length
= _ranges
.v
[rangesIndex
].length
- offset
;
1012 for ( ++rangesIndex
; rangesIndex
< _rangesCount
; rangesIndex
++ )
1014 if ( address
+ length
!= _ranges
.v
[rangesIndex
].address
) break;
1016 length
+= _ranges
.v
[rangesIndex
].length
; // (coalesce ranges)
1020 if ( address
== 0 ) length
= 0;
1023 if ( lengthOfSegment
) *lengthOfSegment
= length
;
1028 /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
1029 /* DEPRECATED */ void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
1030 /* DEPRECATED */ IOByteCount
* lengthOfSegment
)
1032 if (_task
== kernel_task
)
1033 return (void *) getSourceSegment(offset
, lengthOfSegment
);
1035 panic("IOGMD::getVirtualSegment deprecated");
1039 /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
1041 IOReturn
IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection
)
1043 IOReturn error
= kIOReturnNoMemory
;
1045 ppnum_t mapBase
= 0;
1048 assert(!_wireCount
);
1050 dataP
= getDataP(_memoryEntries
);
1051 mapper
= dataP
->fMapper
;
1052 if (mapper
&& _pages
)
1053 mapBase
= mapper
->iovmAlloc(_pages
);
1055 // Note that appendBytes(NULL) zeros the data up to the
1057 _memoryEntries
->appendBytes(0, dataP
->fPageCnt
* sizeof(upl_page_info_t
));
1058 dataP
= 0; // May no longer be valid so lets not get tempted.
1060 if (forDirection
== kIODirectionNone
)
1061 forDirection
= _direction
;
1063 int uplFlags
; // This Mem Desc's default flags for upl creation
1064 switch (forDirection
)
1066 case kIODirectionOut
:
1067 // Pages do not need to be marked as dirty on commit
1068 uplFlags
= UPL_COPYOUT_FROM
;
1069 _flags
|= kIOMemoryPreparedReadOnly
;
1072 case kIODirectionIn
:
1074 uplFlags
= 0; // i.e. ~UPL_COPYOUT_FROM
1077 uplFlags
|= UPL_SET_IO_WIRE
| UPL_SET_LITE
;
1080 // Check user read/write access to the data buffer.
1082 unsigned int pageIndex
= 0;
1083 IOByteCount mdOffset
= 0;
1085 if (_task
== kernel_task
&& (kIOMemoryBufferPageable
& _flags
))
1088 { curMap
= get_task_map(_task
); }
1090 for (UInt range
= 0; range
< _rangesCount
; range
++) {
1092 IOVirtualRange curRange
= _ranges
.v
[range
];
1093 vm_address_t startPage
;
1094 IOByteCount numBytes
;
1096 startPage
= trunc_page_32(curRange
.address
);
1097 iopl
.fPageOffset
= (short) curRange
.address
& PAGE_MASK
;
1099 iopl
.fMappedBase
= mapBase
+ pageIndex
;
1101 iopl
.fMappedBase
= 0;
1102 numBytes
= iopl
.fPageOffset
+ curRange
.length
;
1105 dataP
= getDataP(_memoryEntries
);
1108 : IOPageableMapForAddress(startPage
);
1109 upl_page_info_array_t pageInfo
= getPageList(dataP
);
1110 int ioplFlags
= uplFlags
;
1111 upl_page_list_ptr_t baseInfo
= &pageInfo
[pageIndex
];
1113 vm_size_t ioplSize
= round_page_32(numBytes
);
1114 unsigned int numPageInfo
= atop_32(ioplSize
);
1115 error
= vm_map_get_upl(theMap
,
1124 if (error
!= KERN_SUCCESS
)
1127 error
= kIOReturnNoMemory
;
1129 if (baseInfo
->device
) {
1131 iopl
.fFlags
= kIOPLOnDevice
;
1132 // Don't translate device memory at all
1133 if (mapper
&& mapBase
) {
1134 mapper
->iovmFree(mapBase
, _pages
);
1136 iopl
.fMappedBase
= 0;
1142 mapper
->iovmInsert(mapBase
, pageIndex
,
1143 baseInfo
, numPageInfo
);
1146 iopl
.fIOMDOffset
= mdOffset
;
1147 iopl
.fPageInfo
= pageIndex
;
1149 if (_flags
& kIOMemoryAutoPrepare
)
1151 kernel_upl_commit(iopl
.fIOPL
, 0, 0);
1155 if (!_memoryEntries
->appendBytes(&iopl
, sizeof(iopl
))) {
1156 // Clean up partial created and unsaved iopl
1158 kernel_upl_abort(iopl
.fIOPL
, 0);
1162 // Check for a multiple iopl's in one virtual range
1163 pageIndex
+= numPageInfo
;
1164 mdOffset
-= iopl
.fPageOffset
;
1165 if (ioplSize
< numBytes
) {
1166 numBytes
-= ioplSize
;
1167 startPage
+= ioplSize
;
1168 mdOffset
+= ioplSize
;
1169 iopl
.fPageOffset
= 0;
1171 iopl
.fMappedBase
= mapBase
+ pageIndex
;
1174 mdOffset
+= numBytes
;
1180 return kIOReturnSuccess
;
1184 dataP
= getDataP(_memoryEntries
);
1185 UInt done
= getNumIOPL(dataP
, _memoryEntries
->getLength());
1186 ioPLBlock
*ioplList
= getIOPLList(dataP
);
1188 for (UInt range
= 0; range
< done
; range
++)
1190 if (ioplList
[range
].fIOPL
)
1191 kernel_upl_abort(ioplList
[range
].fIOPL
, 0);
1194 if (mapper
&& mapBase
)
1195 mapper
->iovmFree(mapBase
, _pages
);
1204 * Prepare the memory for an I/O transfer. This involves paging in
1205 * the memory, if necessary, and wiring it down for the duration of
1206 * the transfer. The complete() method completes the processing of
1207 * the memory after the I/O transfer finishes. This method needn't
1208 * called for non-pageable memory.
1210 IOReturn
IOGeneralMemoryDescriptor::prepare(IODirection forDirection
)
1212 IOReturn error
= kIOReturnSuccess
;
1214 if (!_wireCount
&& (_flags
& kIOMemoryTypeMask
) == kIOMemoryTypeVirtual
) {
1215 error
= wireVirtual(forDirection
);
1222 return kIOReturnSuccess
;
1228 * Complete processing of the memory after an I/O transfer finishes.
1229 * This method should not be called unless a prepare was previously
1230 * issued; the prepare() and complete() must occur in pairs, before
1231 * before and after an I/O transfer involving pageable memory.
1234 IOReturn
IOGeneralMemoryDescriptor::complete(IODirection
/* forDirection */)
1239 return kIOReturnSuccess
;
1243 if ((_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
) {
1244 /* kIOMemoryTypePhysical */
1248 ioGMDData
* dataP
= getDataP(_memoryEntries
);
1249 ioPLBlock
*ioplList
= getIOPLList(dataP
);
1250 UInt count
= getNumIOPL(dataP
, _memoryEntries
->getLength());
1252 if (dataP
->fMapper
&& _pages
&& ioplList
[0].fMappedBase
)
1253 dataP
->fMapper
->iovmFree(ioplList
[0].fMappedBase
, _pages
);
1255 // Only complete iopls that we created which are for TypeVirtual
1256 if ( (_flags
& kIOMemoryTypeMask
) == kIOMemoryTypeVirtual
) {
1257 for (UInt ind
= 0; ind
< count
; ind
++)
1258 if (ioplList
[ind
].fIOPL
)
1259 kernel_upl_commit(ioplList
[ind
].fIOPL
, 0, 0);
1262 (void) _memoryEntries
->initWithBytes(dataP
, sizeof(ioGMDData
)); // == setLength()
1265 return kIOReturnSuccess
;
1268 IOReturn
IOGeneralMemoryDescriptor::doMap(
1269 vm_map_t addressMap
,
1270 IOVirtualAddress
* atAddress
,
1271 IOOptionBits options
,
1272 IOByteCount sourceOffset
,
1273 IOByteCount length
)
1276 ipc_port_t sharedMem
= (ipc_port_t
) _memEntry
;
1278 // mapping source == dest? (could be much better)
1279 if( _task
&& (addressMap
== get_task_map(_task
)) && (options
& kIOMapAnywhere
)
1280 && (1 == _rangesCount
) && (0 == sourceOffset
)
1281 && (length
<= _ranges
.v
[0].length
) ) {
1282 *atAddress
= _ranges
.v
[0].address
;
1283 return( kIOReturnSuccess
);
1286 if( 0 == sharedMem
) {
1288 vm_size_t size
= _pages
<< PAGE_SHIFT
;
1292 vm_size_t actualSize
= size
;
1293 kr
= mach_make_memory_entry( get_task_map(_task
),
1294 &actualSize
, _ranges
.v
[0].address
,
1295 VM_PROT_READ
| VM_PROT_WRITE
, &sharedMem
,
1298 if( (KERN_SUCCESS
== kr
) && (actualSize
!= round_page_32(size
))) {
1300 IOLog("mach_make_memory_entry_64 (%08x) size (%08lx:%08x)\n",
1301 _ranges
.v
[0].address
, (UInt32
)actualSize
, size
);
1303 kr
= kIOReturnVMError
;
1304 ipc_port_release_send( sharedMem
);
1307 if( KERN_SUCCESS
!= kr
)
1309 sharedMem
= MACH_PORT_NULL
;
1313 memory_object_t pager
;
1314 unsigned int flags
= 0;
1316 IOPhysicalLength segLen
;
1318 pa
= getPhysicalSegment64( sourceOffset
, &segLen
);
1321 reserved
= IONew( ExpansionData
, 1 );
1325 reserved
->pagerContig
= (1 == _rangesCount
);
1326 reserved
->memory
= this;
1328 /*What cache mode do we need*/
1329 switch(options
& kIOMapCacheMask
) {
1331 case kIOMapDefaultCache
:
1333 flags
= IODefaultCacheBits(pa
);
1336 case kIOMapInhibitCache
:
1337 flags
= DEVICE_PAGER_CACHE_INHIB
|
1338 DEVICE_PAGER_COHERENT
| DEVICE_PAGER_GUARDED
;
1341 case kIOMapWriteThruCache
:
1342 flags
= DEVICE_PAGER_WRITE_THROUGH
|
1343 DEVICE_PAGER_COHERENT
| DEVICE_PAGER_GUARDED
;
1346 case kIOMapCopybackCache
:
1347 flags
= DEVICE_PAGER_COHERENT
;
1350 case kIOMapWriteCombineCache
:
1351 flags
= DEVICE_PAGER_CACHE_INHIB
|
1352 DEVICE_PAGER_COHERENT
;
1356 flags
|= reserved
->pagerContig
? DEVICE_PAGER_CONTIGUOUS
: 0;
1358 pager
= device_pager_setup( (memory_object_t
) 0, (int) reserved
,
1363 kr
= mach_memory_object_memory_entry_64( (host_t
) 1, false /*internal*/,
1364 size
, VM_PROT_READ
| VM_PROT_WRITE
, pager
, &sharedMem
);
1366 assert( KERN_SUCCESS
== kr
);
1367 if( KERN_SUCCESS
!= kr
) {
1368 device_pager_deallocate( pager
);
1369 pager
= MACH_PORT_NULL
;
1370 sharedMem
= MACH_PORT_NULL
;
1373 if( pager
&& sharedMem
)
1374 reserved
->devicePager
= pager
;
1376 IODelete( reserved
, ExpansionData
, 1 );
1382 _memEntry
= (void *) sharedMem
;
1387 kr
= kIOReturnVMError
;
1390 kr
= super::doMap( addressMap
, atAddress
,
1391 options
, sourceOffset
, length
);
1396 IOReturn
IOGeneralMemoryDescriptor::doUnmap(
1397 vm_map_t addressMap
,
1398 IOVirtualAddress logical
,
1399 IOByteCount length
)
1401 // could be much better
1402 if( _task
&& (addressMap
== get_task_map(_task
)) && (1 == _rangesCount
)
1403 && (logical
== _ranges
.v
[0].address
)
1404 && (length
<= _ranges
.v
[0].length
) )
1405 return( kIOReturnSuccess
);
1407 return( super::doUnmap( addressMap
, logical
, length
));
1410 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1413 // osfmk/device/iokit_rpc.c
1414 extern kern_return_t
IOMapPages( vm_map_t map
, vm_offset_t va
, vm_offset_t pa
,
1415 vm_size_t length
, unsigned int mapFlags
);
1416 extern kern_return_t
IOUnmapPages(vm_map_t map
, vm_offset_t va
, vm_size_t length
);
1419 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1421 OSDefineMetaClassAndAbstractStructors( IOMemoryMap
, OSObject
)
1423 /* inline function implementation */
1424 IOPhysicalAddress
IOMemoryMap::getPhysicalAddress()
1425 { return( getPhysicalSegment( 0, 0 )); }
1427 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1429 class _IOMemoryMap
: public IOMemoryMap
1431 OSDeclareDefaultStructors(_IOMemoryMap
)
1433 IOMemoryDescriptor
* memory
;
1434 IOMemoryMap
* superMap
;
1437 IOVirtualAddress logical
;
1439 vm_map_t addressMap
;
1440 IOOptionBits options
;
1443 virtual void taggedRelease(const void *tag
= 0) const;
1444 virtual void free();
1448 // IOMemoryMap methods
1449 virtual IOVirtualAddress
getVirtualAddress();
1450 virtual IOByteCount
getLength();
1451 virtual task_t
getAddressTask();
1452 virtual IOMemoryDescriptor
* getMemoryDescriptor();
1453 virtual IOOptionBits
getMapOptions();
1455 virtual IOReturn
unmap();
1456 virtual void taskDied();
1458 virtual IOPhysicalAddress
getPhysicalSegment(IOByteCount offset
,
1459 IOByteCount
* length
);
1461 // for IOMemoryDescriptor use
1462 _IOMemoryMap
* copyCompatible(
1463 IOMemoryDescriptor
* owner
,
1465 IOVirtualAddress toAddress
,
1466 IOOptionBits options
,
1468 IOByteCount length
);
1470 bool initCompatible(
1471 IOMemoryDescriptor
* memory
,
1472 IOMemoryMap
* superMap
,
1474 IOByteCount length
);
1476 bool initWithDescriptor(
1477 IOMemoryDescriptor
* memory
,
1479 IOVirtualAddress toAddress
,
1480 IOOptionBits options
,
1482 IOByteCount length
);
1485 task_t intoTask
, bool redirect
);
1488 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1491 #define super IOMemoryMap
1493 OSDefineMetaClassAndStructors(_IOMemoryMap
, IOMemoryMap
)
1495 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1497 bool _IOMemoryMap::initCompatible(
1498 IOMemoryDescriptor
* _memory
,
1499 IOMemoryMap
* _superMap
,
1500 IOByteCount _offset
,
1501 IOByteCount _length
)
1507 if( (_offset
+ _length
) > _superMap
->getLength())
1512 _superMap
->retain();
1513 superMap
= _superMap
;
1519 length
= _memory
->getLength();
1521 options
= superMap
->getMapOptions();
1522 logical
= superMap
->getVirtualAddress() + offset
;
1527 bool _IOMemoryMap::initWithDescriptor(
1528 IOMemoryDescriptor
* _memory
,
1530 IOVirtualAddress toAddress
,
1531 IOOptionBits _options
,
1532 IOByteCount _offset
,
1533 IOByteCount _length
)
1537 if( (!_memory
) || (!intoTask
) || !super::init())
1540 if( (_offset
+ _length
) > _memory
->getLength())
1543 addressMap
= get_task_map(intoTask
);
1546 vm_map_reference(addressMap
);
1555 length
= _memory
->getLength();
1557 addressTask
= intoTask
;
1558 logical
= toAddress
;
1561 if( options
& kIOMapStatic
)
1564 ok
= (kIOReturnSuccess
== memory
->doMap( addressMap
, &logical
,
1565 options
, offset
, length
));
1570 vm_map_deallocate(addressMap
);
1576 struct IOMemoryDescriptorMapAllocRef
1578 ipc_port_t sharedMem
;
1581 IOByteCount sourceOffset
;
1582 IOOptionBits options
;
1585 static kern_return_t
IOMemoryDescriptorMapAlloc(vm_map_t map
, void * _ref
)
1587 IOMemoryDescriptorMapAllocRef
* ref
= (IOMemoryDescriptorMapAllocRef
*)_ref
;
1591 if( ref
->sharedMem
) {
1592 vm_prot_t prot
= VM_PROT_READ
1593 | ((ref
->options
& kIOMapReadOnly
) ? 0 : VM_PROT_WRITE
);
1595 // set memory entry cache
1596 vm_prot_t memEntryCacheMode
= prot
| MAP_MEM_ONLY
;
1597 switch (ref
->options
& kIOMapCacheMask
)
1599 case kIOMapInhibitCache
:
1600 SET_MAP_MEM(MAP_MEM_IO
, memEntryCacheMode
);
1603 case kIOMapWriteThruCache
:
1604 SET_MAP_MEM(MAP_MEM_WTHRU
, memEntryCacheMode
);
1607 case kIOMapWriteCombineCache
:
1608 SET_MAP_MEM(MAP_MEM_WCOMB
, memEntryCacheMode
);
1611 case kIOMapCopybackCache
:
1612 SET_MAP_MEM(MAP_MEM_COPYBACK
, memEntryCacheMode
);
1615 case kIOMapDefaultCache
:
1617 SET_MAP_MEM(MAP_MEM_NOOP
, memEntryCacheMode
);
1621 vm_size_t unused
= 0;
1623 err
= mach_make_memory_entry( NULL
/*unused*/, &unused
, 0 /*unused*/,
1624 memEntryCacheMode
, NULL
, ref
->sharedMem
);
1625 if (KERN_SUCCESS
!= err
)
1626 IOLog("MAP_MEM_ONLY failed %d\n", err
);
1630 ref
->size
, 0 /* mask */,
1631 (( ref
->options
& kIOMapAnywhere
) ? VM_FLAGS_ANYWHERE
: VM_FLAGS_FIXED
)
1632 | VM_MAKE_TAG(VM_MEMORY_IOKIT
),
1633 ref
->sharedMem
, ref
->sourceOffset
,
1639 if( KERN_SUCCESS
!= err
) {
1646 err
= vm_allocate( map
, &ref
->mapped
, ref
->size
,
1647 ((ref
->options
& kIOMapAnywhere
) ? VM_FLAGS_ANYWHERE
: VM_FLAGS_FIXED
)
1648 | VM_MAKE_TAG(VM_MEMORY_IOKIT
) );
1650 if( KERN_SUCCESS
!= err
) {
1655 // we have to make sure that these guys don't get copied if we fork.
1656 err
= vm_inherit( map
, ref
->mapped
, ref
->size
, VM_INHERIT_NONE
);
1657 assert( KERN_SUCCESS
== err
);
1666 IOReturn
IOMemoryDescriptor::doMap(
1667 vm_map_t addressMap
,
1668 IOVirtualAddress
* atAddress
,
1669 IOOptionBits options
,
1670 IOByteCount sourceOffset
,
1671 IOByteCount length
)
1673 IOReturn err
= kIOReturnSuccess
;
1674 memory_object_t pager
;
1675 vm_address_t logical
;
1676 IOByteCount pageOffset
;
1677 IOPhysicalAddress sourceAddr
;
1678 IOMemoryDescriptorMapAllocRef ref
;
1680 ref
.sharedMem
= (ipc_port_t
) _memEntry
;
1681 ref
.sourceOffset
= sourceOffset
;
1682 ref
.options
= options
;
1687 length
= getLength();
1689 sourceAddr
= getSourceSegment( sourceOffset
, NULL
);
1690 assert( sourceAddr
);
1691 pageOffset
= sourceAddr
- trunc_page_32( sourceAddr
);
1693 ref
.size
= round_page_32( length
+ pageOffset
);
1695 logical
= *atAddress
;
1696 if( options
& kIOMapAnywhere
)
1697 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1700 ref
.mapped
= trunc_page_32( logical
);
1701 if( (logical
- ref
.mapped
) != pageOffset
) {
1702 err
= kIOReturnVMError
;
1707 if( ref
.sharedMem
&& (addressMap
== kernel_map
) && (kIOMemoryBufferPageable
& _flags
))
1708 err
= IOIteratePageableMaps( ref
.size
, &IOMemoryDescriptorMapAlloc
, &ref
);
1710 err
= IOMemoryDescriptorMapAlloc( addressMap
, &ref
);
1712 if( err
!= KERN_SUCCESS
)
1716 pager
= (memory_object_t
) reserved
->devicePager
;
1718 pager
= MACH_PORT_NULL
;
1720 if( !ref
.sharedMem
|| pager
)
1721 err
= handleFault( pager
, addressMap
, ref
.mapped
, sourceOffset
, length
, options
);
1725 if( err
!= KERN_SUCCESS
) {
1727 doUnmap( addressMap
, ref
.mapped
, ref
.size
);
1730 *atAddress
= ref
.mapped
+ pageOffset
;
1736 kIOMemoryRedirected
= 0x00010000
1739 IOReturn
IOMemoryDescriptor::handleFault(
1741 vm_map_t addressMap
,
1742 IOVirtualAddress address
,
1743 IOByteCount sourceOffset
,
1745 IOOptionBits options
)
1747 IOReturn err
= kIOReturnSuccess
;
1748 memory_object_t pager
= (memory_object_t
) _pager
;
1752 IOByteCount pageOffset
;
1753 IOPhysicalLength segLen
;
1758 if( kIOMemoryRedirected
& _flags
) {
1760 IOLog("sleep mem redirect %p, %lx\n", this, sourceOffset
);
1764 } while( kIOMemoryRedirected
& _flags
);
1767 return( kIOReturnSuccess
);
1770 physAddr
= getPhysicalSegment64( sourceOffset
, &segLen
);
1772 pageOffset
= physAddr
- trunc_page_64( physAddr
);
1774 size
= length
+ pageOffset
;
1775 physAddr
-= pageOffset
;
1777 segLen
+= pageOffset
;
1780 // in the middle of the loop only map whole pages
1781 if( segLen
>= bytes
)
1783 else if( segLen
!= trunc_page_32( segLen
))
1784 err
= kIOReturnVMError
;
1785 if( physAddr
!= trunc_page_64( physAddr
))
1786 err
= kIOReturnBadArgument
;
1789 if( kIOLogMapping
& gIOKitDebug
)
1790 IOLog("_IOMemoryMap::map(%p) %08lx->%08qx:%08lx\n",
1791 addressMap
, address
+ pageOffset
, physAddr
+ pageOffset
,
1792 segLen
- pageOffset
);
1800 /* i386 doesn't support faulting on device memory yet */
1801 if( addressMap
&& (kIOReturnSuccess
== err
))
1802 err
= IOMapPages( addressMap
, address
, (IOPhysicalAddress
) physAddr
, segLen
, options
);
1803 assert( KERN_SUCCESS
== err
);
1809 if( reserved
&& reserved
->pagerContig
) {
1810 IOPhysicalLength allLen
;
1813 allPhys
= getPhysicalSegment64( 0, &allLen
);
1815 err
= device_pager_populate_object( pager
, 0, allPhys
>> PAGE_SHIFT
, round_page_32(allLen
) );
1820 (page
< segLen
) && (KERN_SUCCESS
== err
);
1821 page
+= page_size
) {
1822 err
= device_pager_populate_object(pager
, sourceOffset
+ page
,
1823 (ppnum_t
)((physAddr
+ page
) >> PAGE_SHIFT
), page_size
);
1826 assert( KERN_SUCCESS
== err
);
1832 /* *** Temporary Workaround *** */
1834 /* This call to vm_fault causes an early pmap level resolution */
1835 /* of the mappings created above. Need for this is in absolute */
1836 /* violation of the basic tenet that the pmap layer is a cache. */
1837 /* Further, it implies a serious I/O architectural violation on */
1838 /* the part of some user of the mapping. As of this writing, */
1839 /* the call to vm_fault is needed because the NVIDIA driver */
1840 /* makes a call to pmap_extract. The NVIDIA driver needs to be */
1841 /* fixed as soon as possible. The NVIDIA driver should not */
1842 /* need to query for this info as it should know from the doMap */
1843 /* call where the physical memory is mapped. When a query is */
1844 /* necessary to find a physical mapping, it should be done */
1845 /* through an iokit call which includes the mapped memory */
1846 /* handle. This is required for machine architecture independence.*/
1848 if(!(kIOMemoryRedirected
& _flags
)) {
1849 vm_fault(addressMap
, address
, 3, FALSE
, FALSE
, NULL
, 0);
1852 /* *** Temporary Workaround *** */
1855 sourceOffset
+= segLen
- pageOffset
;
1861 && (physAddr
= getPhysicalSegment64( sourceOffset
, &segLen
)));
1864 err
= kIOReturnBadArgument
;
1869 IOReturn
IOMemoryDescriptor::doUnmap(
1870 vm_map_t addressMap
,
1871 IOVirtualAddress logical
,
1872 IOByteCount length
)
1877 if( kIOLogMapping
& gIOKitDebug
)
1878 kprintf("IOMemoryDescriptor::doUnmap(%x) %08x:%08x\n",
1879 addressMap
, logical
, length
);
1882 if( true /* && (addressMap == kernel_map) || (addressMap == get_task_map(current_task()))*/) {
1884 if( _memEntry
&& (addressMap
== kernel_map
) && (kIOMemoryBufferPageable
& _flags
))
1885 addressMap
= IOPageableMapForAddress( logical
);
1887 err
= vm_deallocate( addressMap
, logical
, length
);
1890 err
= kIOReturnSuccess
;
1895 IOReturn
IOMemoryDescriptor::redirect( task_t safeTask
, bool redirect
)
1898 _IOMemoryMap
* mapping
= 0;
1904 if( (iter
= OSCollectionIterator::withCollection( _mappings
))) {
1905 while( (mapping
= (_IOMemoryMap
*) iter
->getNextObject()))
1906 mapping
->redirect( safeTask
, redirect
);
1913 _flags
|= kIOMemoryRedirected
;
1915 _flags
&= ~kIOMemoryRedirected
;
1921 // temporary binary compatibility
1922 IOSubMemoryDescriptor
* subMem
;
1923 if( (subMem
= OSDynamicCast( IOSubMemoryDescriptor
, this)))
1924 err
= subMem
->redirect( safeTask
, redirect
);
1926 err
= kIOReturnSuccess
;
1931 IOReturn
IOSubMemoryDescriptor::redirect( task_t safeTask
, bool redirect
)
1933 return( _parent
->redirect( safeTask
, redirect
));
1936 IOReturn
_IOMemoryMap::redirect( task_t safeTask
, bool redirect
)
1938 IOReturn err
= kIOReturnSuccess
;
1941 // err = ((_IOMemoryMap *)superMap)->redirect( safeTask, redirect );
1945 if( logical
&& addressMap
1946 && (get_task_map( safeTask
) != addressMap
)
1947 && (0 == (options
& kIOMapStatic
))) {
1949 IOUnmapPages( addressMap
, logical
, length
);
1951 err
= vm_deallocate( addressMap
, logical
, length
);
1952 err
= memory
->doMap( addressMap
, &logical
,
1953 (options
& ~kIOMapAnywhere
) /*| kIOMapReserve*/,
1956 err
= kIOReturnSuccess
;
1958 IOLog("IOMemoryMap::redirect(%d, %p) %x:%lx from %p\n", redirect
, this, logical
, length
, addressMap
);
1967 IOReturn
_IOMemoryMap::unmap( void )
1973 if( logical
&& addressMap
&& (0 == superMap
)
1974 && (0 == (options
& kIOMapStatic
))) {
1976 err
= memory
->doUnmap( addressMap
, logical
, length
);
1977 vm_map_deallocate(addressMap
);
1981 err
= kIOReturnSuccess
;
1990 void _IOMemoryMap::taskDied( void )
1994 vm_map_deallocate(addressMap
);
2002 // Overload the release mechanism. All mappings must be a member
2003 // of a memory descriptors _mappings set. This means that we
2004 // always have 2 references on a mapping. When either of these mappings
2005 // are released we need to free ourselves.
2006 void _IOMemoryMap::taggedRelease(const void *tag
) const
2009 super::taggedRelease(tag
, 2);
2013 void _IOMemoryMap::free()
2019 memory
->removeMapping( this);
2025 superMap
->release();
2030 IOByteCount
_IOMemoryMap::getLength()
2035 IOVirtualAddress
_IOMemoryMap::getVirtualAddress()
2040 task_t
_IOMemoryMap::getAddressTask()
2043 return( superMap
->getAddressTask());
2045 return( addressTask
);
2048 IOOptionBits
_IOMemoryMap::getMapOptions()
2053 IOMemoryDescriptor
* _IOMemoryMap::getMemoryDescriptor()
2058 _IOMemoryMap
* _IOMemoryMap::copyCompatible(
2059 IOMemoryDescriptor
* owner
,
2061 IOVirtualAddress toAddress
,
2062 IOOptionBits _options
,
2063 IOByteCount _offset
,
2064 IOByteCount _length
)
2066 _IOMemoryMap
* mapping
;
2068 if( (!task
) || (task
!= getAddressTask()))
2070 if( (options
^ _options
) & kIOMapReadOnly
)
2072 if( (kIOMapDefaultCache
!= (_options
& kIOMapCacheMask
))
2073 && ((options
^ _options
) & kIOMapCacheMask
))
2076 if( (0 == (_options
& kIOMapAnywhere
)) && (logical
!= toAddress
))
2079 if( _offset
< offset
)
2084 if( (_offset
+ _length
) > length
)
2087 if( (length
== _length
) && (!_offset
)) {
2092 mapping
= new _IOMemoryMap
;
2094 && !mapping
->initCompatible( owner
, this, _offset
, _length
)) {
2103 IOPhysicalAddress
_IOMemoryMap::getPhysicalSegment( IOByteCount _offset
,
2104 IOPhysicalLength
* length
)
2106 IOPhysicalAddress address
;
2109 address
= memory
->getPhysicalSegment( offset
+ _offset
, length
);
2115 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2118 #define super OSObject
2120 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2122 void IOMemoryDescriptor::initialize( void )
2124 if( 0 == gIOMemoryLock
)
2125 gIOMemoryLock
= IORecursiveLockAlloc();
2128 void IOMemoryDescriptor::free( void )
2131 _mappings
->release();
2136 IOMemoryMap
* IOMemoryDescriptor::setMapping(
2138 IOVirtualAddress mapAddress
,
2139 IOOptionBits options
)
2143 map
= new _IOMemoryMap
;
2148 && !map
->initWithDescriptor( this, intoTask
, mapAddress
,
2149 options
| kIOMapStatic
, 0, getLength() )) {
2161 IOMemoryMap
* IOMemoryDescriptor::map(
2162 IOOptionBits options
)
2165 return( makeMapping( this, kernel_task
, 0,
2166 options
| kIOMapAnywhere
,
2170 IOMemoryMap
* IOMemoryDescriptor::map(
2172 IOVirtualAddress toAddress
,
2173 IOOptionBits options
,
2175 IOByteCount length
)
2178 length
= getLength();
2180 return( makeMapping( this, intoTask
, toAddress
, options
, offset
, length
));
2183 IOMemoryMap
* IOMemoryDescriptor::makeMapping(
2184 IOMemoryDescriptor
* owner
,
2186 IOVirtualAddress toAddress
,
2187 IOOptionBits options
,
2189 IOByteCount length
)
2191 _IOMemoryMap
* mapping
= 0;
2197 // look for an existing mapping
2198 if( (iter
= OSCollectionIterator::withCollection( _mappings
))) {
2200 while( (mapping
= (_IOMemoryMap
*) iter
->getNextObject())) {
2202 if( (mapping
= mapping
->copyCompatible(
2203 owner
, intoTask
, toAddress
,
2204 options
| kIOMapReference
,
2214 if( mapping
|| (options
& kIOMapReference
))
2219 mapping
= new _IOMemoryMap
;
2221 && !mapping
->initWithDescriptor( owner
, intoTask
, toAddress
, options
,
2224 IOLog("Didn't make map %08lx : %08lx\n", offset
, length
);
2232 owner
->addMapping( mapping
);
2239 void IOMemoryDescriptor::addMapping(
2240 IOMemoryMap
* mapping
)
2244 _mappings
= OSSet::withCapacity(1);
2246 _mappings
->setObject( mapping
);
2250 void IOMemoryDescriptor::removeMapping(
2251 IOMemoryMap
* mapping
)
2254 _mappings
->removeObject( mapping
);
2257 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2260 #define super IOMemoryDescriptor
2262 OSDefineMetaClassAndStructors(IOSubMemoryDescriptor
, IOMemoryDescriptor
)
2264 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2266 bool IOSubMemoryDescriptor::initSubRange( IOMemoryDescriptor
* parent
,
2267 IOByteCount offset
, IOByteCount length
,
2268 IODirection direction
)
2273 if( (offset
+ length
) > parent
->getLength())
2277 * We can check the _parent instance variable before having ever set it
2278 * to an initial value because I/O Kit guarantees that all our instance
2279 * variables are zeroed on an object's allocation.
2287 * An existing memory descriptor is being retargeted to
2288 * point to somewhere else. Clean up our present state.
2299 _direction
= direction
;
2300 _tag
= parent
->getTag();
2305 void IOSubMemoryDescriptor::free( void )
2314 IOPhysicalAddress
IOSubMemoryDescriptor::getPhysicalSegment( IOByteCount offset
,
2315 IOByteCount
* length
)
2317 IOPhysicalAddress address
;
2318 IOByteCount actualLength
;
2320 assert(offset
<= _length
);
2325 if( offset
>= _length
)
2328 address
= _parent
->getPhysicalSegment( offset
+ _start
, &actualLength
);
2330 if( address
&& length
)
2331 *length
= min( _length
- offset
, actualLength
);
2336 IOPhysicalAddress
IOSubMemoryDescriptor::getSourceSegment( IOByteCount offset
,
2337 IOByteCount
* length
)
2339 IOPhysicalAddress address
;
2340 IOByteCount actualLength
;
2342 assert(offset
<= _length
);
2347 if( offset
>= _length
)
2350 address
= _parent
->getSourceSegment( offset
+ _start
, &actualLength
);
2352 if( address
&& length
)
2353 *length
= min( _length
- offset
, actualLength
);
2358 void * IOSubMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
2359 IOByteCount
* lengthOfSegment
)
2364 IOByteCount
IOSubMemoryDescriptor::readBytes(IOByteCount offset
,
2365 void * bytes
, IOByteCount length
)
2367 IOByteCount byteCount
;
2369 assert(offset
<= _length
);
2371 if( offset
>= _length
)
2375 byteCount
= _parent
->readBytes( _start
+ offset
, bytes
,
2376 min(length
, _length
- offset
) );
2379 return( byteCount
);
2382 IOByteCount
IOSubMemoryDescriptor::writeBytes(IOByteCount offset
,
2383 const void* bytes
, IOByteCount length
)
2385 IOByteCount byteCount
;
2387 assert(offset
<= _length
);
2389 if( offset
>= _length
)
2393 byteCount
= _parent
->writeBytes( _start
+ offset
, bytes
,
2394 min(length
, _length
- offset
) );
2397 return( byteCount
);
2400 IOReturn
IOSubMemoryDescriptor::prepare(
2401 IODirection forDirection
)
2406 err
= _parent
->prepare( forDirection
);
2412 IOReturn
IOSubMemoryDescriptor::complete(
2413 IODirection forDirection
)
2418 err
= _parent
->complete( forDirection
);
2424 IOMemoryMap
* IOSubMemoryDescriptor::makeMapping(
2425 IOMemoryDescriptor
* owner
,
2427 IOVirtualAddress toAddress
,
2428 IOOptionBits options
,
2430 IOByteCount length
)
2432 IOMemoryMap
* mapping
;
2434 mapping
= (IOMemoryMap
*) _parent
->makeMapping(
2436 toAddress
- (_start
+ offset
),
2437 options
| kIOMapReference
,
2438 _start
+ offset
, length
);
2441 mapping
= (IOMemoryMap
*) _parent
->makeMapping(
2444 options
, _start
+ offset
, length
);
2447 mapping
= super::makeMapping( owner
, intoTask
, toAddress
, options
,
2456 IOSubMemoryDescriptor::initWithAddress(void * address
,
2458 IODirection direction
)
2464 IOSubMemoryDescriptor::initWithAddress(vm_address_t address
,
2466 IODirection direction
,
2473 IOSubMemoryDescriptor::initWithPhysicalAddress(
2474 IOPhysicalAddress address
,
2476 IODirection direction
)
2482 IOSubMemoryDescriptor::initWithRanges(
2483 IOVirtualRange
* ranges
,
2485 IODirection direction
,
2493 IOSubMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange
* ranges
,
2495 IODirection direction
,
2501 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2503 bool IOGeneralMemoryDescriptor::serialize(OSSerialize
* s
) const
2505 OSSymbol
const *keys
[2];
2506 OSObject
*values
[2];
2507 IOVirtualRange
*vcopy
;
2508 unsigned int index
, nRanges
;
2511 if (s
== NULL
) return false;
2512 if (s
->previouslySerialized(this)) return true;
2514 // Pretend we are an array.
2515 if (!s
->addXMLStartTag(this, "array")) return false;
2517 nRanges
= _rangesCount
;
2518 vcopy
= (IOVirtualRange
*) IOMalloc(sizeof(IOVirtualRange
) * nRanges
);
2519 if (vcopy
== 0) return false;
2521 keys
[0] = OSSymbol::withCString("address");
2522 keys
[1] = OSSymbol::withCString("length");
2525 values
[0] = values
[1] = 0;
2527 // From this point on we can go to bail.
2529 // Copy the volatile data so we don't have to allocate memory
2530 // while the lock is held.
2532 if (nRanges
== _rangesCount
) {
2533 for (index
= 0; index
< nRanges
; index
++) {
2534 vcopy
[index
] = _ranges
.v
[index
];
2537 // The descriptor changed out from under us. Give up.
2544 for (index
= 0; index
< nRanges
; index
++)
2546 values
[0] = OSNumber::withNumber(_ranges
.v
[index
].address
, sizeof(_ranges
.v
[index
].address
) * 8);
2547 if (values
[0] == 0) {
2551 values
[1] = OSNumber::withNumber(_ranges
.v
[index
].length
, sizeof(_ranges
.v
[index
].length
) * 8);
2552 if (values
[1] == 0) {
2556 OSDictionary
*dict
= OSDictionary::withObjects((const OSObject
**)values
, (const OSSymbol
**)keys
, 2);
2561 values
[0]->release();
2562 values
[1]->release();
2563 values
[0] = values
[1] = 0;
2565 result
= dict
->serialize(s
);
2571 result
= s
->addXMLEndTag("array");
2575 values
[0]->release();
2577 values
[1]->release();
2583 IOFree(vcopy
, sizeof(IOVirtualRange
) * nRanges
);
2587 bool IOSubMemoryDescriptor::serialize(OSSerialize
* s
) const
2592 if (s
->previouslySerialized(this)) return true;
2594 // Pretend we are a dictionary.
2595 // We must duplicate the functionality of OSDictionary here
2596 // because otherwise object references will not work;
2597 // they are based on the value of the object passed to
2598 // previouslySerialized and addXMLStartTag.
2600 if (!s
->addXMLStartTag(this, "dict")) return false;
2602 char const *keys
[3] = {"offset", "length", "parent"};
2604 OSObject
*values
[3];
2605 values
[0] = OSNumber::withNumber(_start
, sizeof(_start
) * 8);
2608 values
[1] = OSNumber::withNumber(_length
, sizeof(_length
) * 8);
2609 if (values
[1] == 0) {
2610 values
[0]->release();
2613 values
[2] = _parent
;
2616 for (int i
=0; i
<3; i
++) {
2617 if (!s
->addString("<key>") ||
2618 !s
->addString(keys
[i
]) ||
2619 !s
->addXMLEndTag("key") ||
2620 !values
[i
]->serialize(s
)) {
2625 values
[0]->release();
2626 values
[1]->release();
2631 return s
->addXMLEndTag("dict");
2634 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2636 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 0);
2637 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 1);
2638 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 2);
2639 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 3);
2640 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 4);
2641 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 5);
2642 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 6);
2643 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 7);
2644 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 8);
2645 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 9);
2646 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 10);
2647 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 11);
2648 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 12);
2649 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 13);
2650 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 14);
2651 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 15);
2653 /* ex-inline function implementation */
2654 IOPhysicalAddress
IOMemoryDescriptor::getPhysicalAddress()
2655 { return( getPhysicalSegment( 0, 0 )); }