2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
26 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
31 // 45678901234567890123456789012345678901234567890123456789012345678901234567890
32 #include <sys/cdefs.h>
34 #include <IOKit/assert.h>
35 #include <IOKit/system.h>
36 #include <IOKit/IOLib.h>
37 #include <IOKit/IOMemoryDescriptor.h>
38 #include <IOKit/IOMapper.h>
39 #include <IOKit/IOKitKeysPrivate.h>
41 #include <IOKit/IOKitDebug.h>
43 #include <libkern/c++/OSContainers.h>
44 #include <libkern/c++/OSDictionary.h>
45 #include <libkern/c++/OSArray.h>
46 #include <libkern/c++/OSSymbol.h>
47 #include <libkern/c++/OSNumber.h>
48 #include <sys/cdefs.h>
52 #include <mach/memory_object_types.h>
53 #include <device/device_port.h>
56 struct phys_entry
*pmap_find_physentry(ppnum_t pa
);
58 extern ppnum_t
pmap_find_phys(pmap_t pmap
, addr64_t va
);
59 void ipc_port_release_send(ipc_port_t port
);
61 /* Copy between a physical page and a virtual address in the given vm_map */
62 kern_return_t
copypv(addr64_t source
, addr64_t sink
, unsigned int size
, int which
);
66 memory_object_t pager
,
71 device_pager_deallocate(
74 device_pager_populate_object(
75 memory_object_t pager
,
76 vm_object_offset_t offset
,
80 memory_object_iopl_request(
82 memory_object_offset_t offset
,
85 upl_page_info_array_t user_page_list
,
86 unsigned int *page_list_count
,
90 * Page fault handling based on vm_map (or entries therein)
92 extern kern_return_t
vm_fault(
96 boolean_t change_wiring
,
99 vm_offset_t caller_pmap_addr
);
101 unsigned int IOTranslateCacheBits(struct phys_entry
*pp
);
103 vm_map_t
IOPageableMapForAddress( vm_address_t address
);
105 typedef kern_return_t (*IOIteratePageableMapsCallback
)(vm_map_t map
, void * ref
);
107 kern_return_t
IOIteratePageableMaps(vm_size_t size
,
108 IOIteratePageableMapsCallback callback
, void * ref
);
111 #define kIOMaximumMappedIOByteCount (512*1024*1024)
113 static IOMapper
* gIOSystemMapper
;
114 static ppnum_t gIOMaximumMappedIOPageCount
= atop_32(kIOMaximumMappedIOByteCount
);
116 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
118 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor
, OSObject
)
120 #define super IOMemoryDescriptor
122 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor
, IOMemoryDescriptor
)
124 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
126 static IORecursiveLock
* gIOMemoryLock
;
128 #define LOCK IORecursiveLockLock( gIOMemoryLock)
129 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
130 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
132 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
134 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
136 #define next_page(a) ( trunc_page_32(a) + PAGE_SIZE )
141 kern_return_t
device_data_action(
143 ipc_port_t device_pager
,
144 vm_prot_t protection
,
145 vm_object_offset_t offset
,
148 struct ExpansionData
{
150 unsigned int pagerContig
:1;
151 unsigned int unused
:31;
152 IOMemoryDescriptor
* memory
;
155 ExpansionData
* ref
= (ExpansionData
*) device_handle
;
156 IOMemoryDescriptor
* memDesc
;
159 memDesc
= ref
->memory
;
161 kr
= memDesc
->handleFault( device_pager
, 0, 0,
162 offset
, size
, kIOMapDefaultCache
/*?*/);
170 kern_return_t
device_close(
173 struct ExpansionData
{
175 unsigned int pagerContig
:1;
176 unsigned int unused
:31;
177 IOMemoryDescriptor
* memory
;
179 ExpansionData
* ref
= (ExpansionData
*) device_handle
;
181 IODelete( ref
, ExpansionData
, 1 );
183 return( kIOReturnSuccess
);
188 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
193 * Create a new IOMemoryDescriptor. The buffer is a virtual address
194 * relative to the specified task. If no task is supplied, the kernel
198 IOMemoryDescriptor::withAddress(void * address
,
200 IODirection direction
)
202 return IOMemoryDescriptor::
203 withAddress((vm_address_t
) address
, length
, direction
, kernel_task
);
207 IOMemoryDescriptor::withAddress(vm_address_t address
,
209 IODirection direction
,
212 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
215 if (that
->initWithAddress(address
, length
, direction
, task
))
224 IOMemoryDescriptor::withPhysicalAddress(
225 IOPhysicalAddress address
,
227 IODirection direction
)
229 IOGeneralMemoryDescriptor
*self
= new IOGeneralMemoryDescriptor
;
231 && !self
->initWithPhysicalAddress(address
, length
, direction
)) {
240 IOMemoryDescriptor::withRanges( IOVirtualRange
* ranges
,
242 IODirection direction
,
246 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
249 if (that
->initWithRanges(ranges
, withCount
, direction
, task
, asReference
))
261 * Create a new IOMemoryDescriptor. The buffer is made up of several
262 * virtual address ranges, from a given task.
264 * Passing the ranges as a reference will avoid an extra allocation.
267 IOMemoryDescriptor::withOptions(void * buffers
,
274 IOGeneralMemoryDescriptor
*self
= new IOGeneralMemoryDescriptor
;
277 && !self
->initWithOptions(buffers
, count
, offset
, task
, opts
, mapper
))
286 // Can't leave abstract but this should never be used directly,
287 bool IOMemoryDescriptor::initWithOptions(void * buffers
,
291 IOOptionBits options
,
294 // @@@ gvdl: Should I panic?
295 panic("IOMD::initWithOptions called\n");
300 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange
* ranges
,
302 IODirection direction
,
305 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
308 if (that
->initWithPhysicalRanges(ranges
, withCount
, direction
, asReference
))
317 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor
* of
,
320 IODirection direction
)
322 IOSubMemoryDescriptor
*self
= new IOSubMemoryDescriptor
;
324 if (self
&& !self
->initSubRange(of
, offset
, length
, direction
)) {
334 * Initialize an IOMemoryDescriptor. The buffer is a virtual address
335 * relative to the specified task. If no task is supplied, the kernel
338 * An IOMemoryDescriptor can be re-used by calling initWithAddress or
339 * initWithRanges again on an existing instance -- note this behavior
340 * is not commonly supported in other I/O Kit classes, although it is
344 IOGeneralMemoryDescriptor::initWithAddress(void * address
,
345 IOByteCount withLength
,
346 IODirection withDirection
)
348 _singleRange
.v
.address
= (vm_address_t
) address
;
349 _singleRange
.v
.length
= withLength
;
351 return initWithRanges(&_singleRange
.v
, 1, withDirection
, kernel_task
, true);
355 IOGeneralMemoryDescriptor::initWithAddress(vm_address_t address
,
356 IOByteCount withLength
,
357 IODirection withDirection
,
360 _singleRange
.v
.address
= address
;
361 _singleRange
.v
.length
= withLength
;
363 return initWithRanges(&_singleRange
.v
, 1, withDirection
, withTask
, true);
367 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
368 IOPhysicalAddress address
,
369 IOByteCount withLength
,
370 IODirection withDirection
)
372 _singleRange
.p
.address
= address
;
373 _singleRange
.p
.length
= withLength
;
375 return initWithPhysicalRanges( &_singleRange
.p
, 1, withDirection
, true);
379 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
380 IOPhysicalRange
* ranges
,
382 IODirection direction
,
385 IOOptionBits mdOpts
= direction
| kIOMemoryTypePhysical
;
388 mdOpts
|= kIOMemoryAsReference
;
390 return initWithOptions(ranges
, count
, 0, 0, mdOpts
, /* mapper */ 0);
394 IOGeneralMemoryDescriptor::initWithRanges(
395 IOVirtualRange
* ranges
,
397 IODirection direction
,
401 IOOptionBits mdOpts
= direction
;
404 mdOpts
|= kIOMemoryAsReference
;
407 mdOpts
|= kIOMemoryTypeVirtual
;
408 if (task
== kernel_task
)
409 mdOpts
|= kIOMemoryAutoPrepare
;
412 mdOpts
|= kIOMemoryTypePhysical
;
414 // @@@ gvdl: Need to remove this
415 // Auto-prepare if this is a kernel memory descriptor as very few
416 // clients bother to prepare() kernel memory.
417 // But it has been enforced so what are you going to do?
419 return initWithOptions(ranges
, count
, 0, task
, mdOpts
, /* mapper */ 0);
425 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
426 * from a given task or several physical ranges or finally an UPL from the ubc
429 * Passing the ranges as a reference will avoid an extra allocation.
431 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
432 * existing instance -- note this behavior is not commonly supported in other
433 * I/O Kit classes, although it is supported here.
436 enum ioPLBlockFlags
{
437 kIOPLOnDevice
= 0x00000001,
438 kIOPLExternUPL
= 0x00000002,
443 vm_address_t fIOMDOffset
; // The offset of this iopl in descriptor
444 vm_offset_t fPageInfo
; // Pointer to page list or index into it
445 ppnum_t fMappedBase
; // Page number of first page in this iopl
446 unsigned int fPageOffset
; // Offset within first page of iopl
447 unsigned int fFlags
; // Flags
452 unsigned int fPageCnt
;
453 upl_page_info_t fPageList
[0]; // @@@ gvdl need to get rid of this
454 // should be able to use upl directly
455 ioPLBlock fBlocks
[0];
458 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
459 #define getIOPLList(d) ((ioPLBlock *) &(d->fPageList[d->fPageCnt]))
460 #define getNumIOPL(d,len) \
461 ((len - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
462 #define getPageList(d) (&(d->fPageList[0]))
463 #define computeDataSize(p, u) \
464 (sizeof(ioGMDData) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
467 IOGeneralMemoryDescriptor::initWithOptions(void * buffers
,
471 IOOptionBits options
,
475 switch (options
& kIOMemoryTypeMask
) {
476 case kIOMemoryTypeVirtual
:
483 case kIOMemoryTypePhysical
: // Neither Physical nor UPL should have a task
484 mapper
= kIOMapperNone
;
485 case kIOMemoryTypeUPL
:
489 panic("IOGMD::iWO(): bad type"); // @@@ gvdl: for testing
490 return false; /* bad argument */
497 * We can check the _initialized instance variable before having ever set
498 * it to an initial value because I/O Kit guarantees that all our instance
499 * variables are zeroed on an object's allocation.
504 * An existing memory descriptor is being retargeted to point to
505 * somewhere else. Clean up our present state.
512 if (_ranges
.v
&& _rangesIsAllocated
)
513 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
521 // Grab the appropriate mapper
522 if (mapper
== kIOMapperNone
)
523 mapper
= 0; // No Mapper
525 IOMapper::checkForSystemMapper();
526 gIOSystemMapper
= mapper
= IOMapper::gSystem
;
532 // DEPRECATED variable initialisation
533 _direction
= (IODirection
) (_flags
& kIOMemoryDirectionMask
);
536 _cachedPhysicalAddress
= 0;
537 _cachedVirtualAddress
= 0;
539 if ( (options
& kIOMemoryTypeMask
) == kIOMemoryTypeUPL
) {
542 unsigned int dataSize
= computeDataSize(/* pages */ 0, /* upls */ 1);
544 if (!_memoryEntries
) {
545 _memoryEntries
= OSData::withCapacity(dataSize
);
549 else if (!_memoryEntries
->initWithCapacity(dataSize
))
552 _memoryEntries
->appendBytes(0, sizeof(ioGMDData
));
553 dataP
= getDataP(_memoryEntries
);
554 dataP
->fMapper
= mapper
;
557 _wireCount
++; // UPLs start out life wired
560 _pages
+= atop_32(offset
+ count
+ PAGE_MASK
) - atop_32(offset
);
563 upl_page_info_t
*pageList
= UPL_GET_INTERNAL_PAGE_LIST((upl_t
) buffers
);
565 iopl
.fIOPL
= (upl_t
) buffers
;
566 // Set the flag kIOPLOnDevice convieniently equal to 1
567 iopl
.fFlags
= pageList
->device
| kIOPLExternUPL
;
568 iopl
.fIOMDOffset
= 0;
569 if (!pageList
->device
) {
570 // @@@ gvdl: Ask JoeS are the pages contiguious with the list?
571 // or there a chance that we may be inserting 0 phys_addrs?
572 // Pre-compute the offset into the UPL's page list
573 pageList
= &pageList
[atop_32(offset
)];
576 iopl
.fMappedBase
= mapper
->iovmAlloc(_pages
);
577 mapper
->iovmInsert(iopl
.fMappedBase
, 0, pageList
, _pages
);
580 iopl
.fMappedBase
= 0;
583 iopl
.fMappedBase
= 0;
584 iopl
.fPageInfo
= (vm_address_t
) pageList
;
585 iopl
.fPageOffset
= offset
;
587 _memoryEntries
->appendBytes(&iopl
, sizeof(iopl
));
589 else { /* kIOMemoryTypeVirtual | kIOMemoryTypePhysical */
590 IOVirtualRange
*ranges
= (IOVirtualRange
*) buffers
;
593 * Initialize the memory descriptor.
598 for (unsigned ind
= 0; ind
< count
; ind
++) {
599 IOVirtualRange cur
= ranges
[ind
];
601 _length
+= cur
.length
;
602 _pages
+= atop_32(cur
.address
+ cur
.length
+ PAGE_MASK
)
603 - atop_32(cur
.address
);
607 _rangesIsAllocated
= !(options
& kIOMemoryAsReference
);
608 _rangesCount
= count
;
610 if (options
& kIOMemoryAsReference
)
613 _ranges
.v
= IONew(IOVirtualRange
, count
);
616 bcopy(/* from */ ranges
, _ranges
.v
,
617 count
* sizeof(IOVirtualRange
));
620 // Auto-prepare memory at creation time.
621 // Implied completion when descriptor is free-ed
622 if ( (options
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
623 _wireCount
++; // Physical MDs are start out wired
624 else { /* kIOMemoryTypeVirtual */
626 unsigned int dataSize
=
627 computeDataSize(_pages
, /* upls */ _rangesCount
* 2);
629 if (!_memoryEntries
) {
630 _memoryEntries
= OSData::withCapacity(dataSize
);
634 else if (!_memoryEntries
->initWithCapacity(dataSize
))
637 _memoryEntries
->appendBytes(0, sizeof(ioGMDData
));
638 dataP
= getDataP(_memoryEntries
);
639 dataP
->fMapper
= mapper
;
640 dataP
->fPageCnt
= _pages
;
642 if (kIOMemoryPersistent
& _flags
)
645 ipc_port_t sharedMem
;
647 vm_size_t size
= _pages
<< PAGE_SHIFT
;
648 vm_address_t startPage
;
650 startPage
= trunc_page_32(_ranges
.v
[0].address
);
652 vm_map_t theMap
= ((_task
== kernel_task
) && (kIOMemoryBufferPageable
& _flags
))
653 ? IOPageableMapForAddress(startPage
)
654 : get_task_map(_task
);
656 vm_size_t actualSize
= size
;
657 error
= mach_make_memory_entry( theMap
,
658 &actualSize
, startPage
,
659 VM_PROT_READ
| VM_PROT_WRITE
, &sharedMem
,
662 if (KERN_SUCCESS
== error
) {
663 if (actualSize
== round_page_32(size
)) {
664 _memEntry
= (void *) sharedMem
;
667 IOLog("mach_make_memory_entry_64 (%08x) size (%08lx:%08x)\n",
668 startPage
, (UInt32
)actualSize
, size
);
670 ipc_port_release_send( sharedMem
);
675 if ((_flags
& kIOMemoryAutoPrepare
)
676 && prepare() != kIOReturnSuccess
)
689 void IOGeneralMemoryDescriptor::free()
693 reserved
->memory
= 0;
699 _memoryEntries
->release();
703 if (_ranges
.v
&& _rangesIsAllocated
)
704 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
706 if (reserved
&& reserved
->devicePager
)
707 device_pager_deallocate( (memory_object_t
) reserved
->devicePager
);
709 // memEntry holds a ref on the device pager which owns reserved
710 // (ExpansionData) so no reserved access after this point
712 ipc_port_release_send( (ipc_port_t
) _memEntry
);
717 /* DEPRECATED */ void IOGeneralMemoryDescriptor::unmapFromKernel()
719 panic("IOGMD::unmapFromKernel deprecated");
722 /* DEPRECATED */ void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex
)
724 panic("IOGMD::mapIntoKernel deprecated");
730 * Get the direction of the transfer.
732 IODirection
IOMemoryDescriptor::getDirection() const
740 * Get the length of the transfer (over all ranges).
742 IOByteCount
IOMemoryDescriptor::getLength() const
747 void IOMemoryDescriptor::setTag( IOOptionBits tag
)
752 IOOptionBits
IOMemoryDescriptor::getTag( void )
757 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
758 IOPhysicalAddress
IOMemoryDescriptor::getSourceSegment( IOByteCount offset
,
759 IOByteCount
* length
)
761 IOPhysicalAddress physAddr
= 0;
763 if( prepare() == kIOReturnSuccess
) {
764 physAddr
= getPhysicalSegment( offset
, length
);
771 IOByteCount
IOMemoryDescriptor::readBytes
772 (IOByteCount offset
, void *bytes
, IOByteCount length
)
774 addr64_t dstAddr
= (addr64_t
) (UInt32
) bytes
;
775 IOByteCount remaining
;
777 // Assert that this entire I/O is withing the available range
778 assert(offset
< _length
);
779 assert(offset
+ length
<= _length
);
780 if (offset
>= _length
) {
781 IOLog("IOGMD(%p): rB = o%lx, l%lx\n", this, offset
, length
); // @@@ gvdl
785 remaining
= length
= min(length
, _length
- offset
);
786 while (remaining
) { // (process another target segment?)
790 srcAddr64
= getPhysicalSegment64(offset
, &srcLen
);
794 // Clip segment length to remaining
795 if (srcLen
> remaining
)
798 copypv(srcAddr64
, dstAddr
, srcLen
,
799 cppvPsrc
| cppvNoRefSrc
| cppvFsnk
| cppvKmap
);
808 return length
- remaining
;
811 IOByteCount
IOMemoryDescriptor::writeBytes
812 (IOByteCount offset
, const void *bytes
, IOByteCount length
)
814 addr64_t srcAddr
= (addr64_t
) (UInt32
) bytes
;
815 IOByteCount remaining
;
817 // Assert that this entire I/O is withing the available range
818 assert(offset
< _length
);
819 assert(offset
+ length
<= _length
);
821 assert( !(kIOMemoryPreparedReadOnly
& _flags
) );
823 if ( (kIOMemoryPreparedReadOnly
& _flags
) || offset
>= _length
) {
824 IOLog("IOGMD(%p): wB = o%lx, l%lx\n", this, offset
, length
); // @@@ gvdl
828 remaining
= length
= min(length
, _length
- offset
);
829 while (remaining
) { // (process another target segment?)
833 dstAddr64
= getPhysicalSegment64(offset
, &dstLen
);
837 // Clip segment length to remaining
838 if (dstLen
> remaining
)
841 copypv(srcAddr
, (addr64_t
) dstAddr64
, dstLen
,
842 cppvPsnk
| cppvFsnk
| cppvNoRefSrc
| cppvNoModSnk
| cppvKmap
);
851 return length
- remaining
;
854 // osfmk/device/iokit_rpc.c
855 extern "C" unsigned int IODefaultCacheBits(addr64_t pa
);
857 /* DEPRECATED */ void IOGeneralMemoryDescriptor::setPosition(IOByteCount position
)
859 panic("IOGMD::setPosition deprecated");
862 IOPhysicalAddress
IOGeneralMemoryDescriptor::getPhysicalSegment
863 (IOByteCount offset
, IOByteCount
*lengthOfSegment
)
865 IOPhysicalAddress address
= 0;
866 IOPhysicalLength length
= 0;
868 // assert(offset <= _length);
869 if (offset
< _length
) // (within bounds?)
871 if ( (_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
) {
874 // Physical address based memory descriptor
876 // Find offset within descriptor and make it relative
877 // to the current _range.
878 for (ind
= 0 ; offset
>= _ranges
.p
[ind
].length
; ind
++ )
879 offset
-= _ranges
.p
[ind
].length
;
881 IOPhysicalRange cur
= _ranges
.p
[ind
];
882 address
= cur
.address
+ offset
;
883 length
= cur
.length
- offset
;
885 // see how far we can coalesce ranges
886 for (++ind
; ind
< _rangesCount
; ind
++) {
887 cur
= _ranges
.p
[ind
];
889 if (address
+ length
!= cur
.address
)
892 length
+= cur
.length
;
895 // @@@ gvdl: should assert(address);
896 // but can't as NVidia GeForce creates a bogus physical mem
898 assert(address
|| /*nvidia*/(!_ranges
.p
[0].address
&& 1 == _rangesCount
));
903 // We need wiring & we are wired.
908 panic("IOGMD: not wired for getPhysicalSegment()");
912 assert(_memoryEntries
);
914 ioGMDData
* dataP
= getDataP(_memoryEntries
);
915 const ioPLBlock
*ioplList
= getIOPLList(dataP
);
916 UInt ind
, numIOPLs
= getNumIOPL(dataP
, _memoryEntries
->getLength());
917 upl_page_info_t
*pageList
= getPageList(dataP
);
919 assert(numIOPLs
> 0);
921 // Scan through iopl info blocks looking for block containing offset
922 for (ind
= 1; ind
< numIOPLs
; ind
++) {
923 if (offset
< ioplList
[ind
].fIOMDOffset
)
927 // Go back to actual range as search goes past it
928 ioPLBlock ioplInfo
= ioplList
[ind
- 1];
931 length
= ioplList
[ind
].fIOMDOffset
;
934 length
-= offset
; // Remainder within iopl
936 // Subtract offset till this iopl in total list
937 offset
-= ioplInfo
.fIOMDOffset
;
939 // This is a mapped IOPL so we just need to compute an offset
940 // relative to the mapped base.
941 if (ioplInfo
.fMappedBase
) {
942 offset
+= (ioplInfo
.fPageOffset
& PAGE_MASK
);
943 address
= ptoa_32(ioplInfo
.fMappedBase
) + offset
;
947 // Currently the offset is rebased into the current iopl.
948 // Now add the iopl 1st page offset.
949 offset
+= ioplInfo
.fPageOffset
;
951 // For external UPLs the fPageInfo field points directly to
952 // the upl's upl_page_info_t array.
953 if (ioplInfo
.fFlags
& kIOPLExternUPL
)
954 pageList
= (upl_page_info_t
*) ioplInfo
.fPageInfo
;
956 pageList
= &pageList
[ioplInfo
.fPageInfo
];
958 // Check for direct device non-paged memory
959 if ( ioplInfo
.fFlags
& kIOPLOnDevice
) {
960 address
= ptoa_32(pageList
->phys_addr
) + offset
;
964 // Now we need compute the index into the pageList
965 ind
= atop_32(offset
);
968 IOPhysicalAddress pageAddr
= pageList
[ind
].phys_addr
;
969 address
= ptoa_32(pageAddr
) + offset
;
971 // Check for the remaining data in this upl being longer than the
972 // remainder on the current page. This should be checked for
974 if (length
> PAGE_SIZE
- offset
) {
975 // See if the next page is contiguous. Stop looking when we hit
976 // the end of this upl, which is indicated by the
977 // contigLength >= length.
978 IOByteCount contigLength
= PAGE_SIZE
- offset
;
980 // Look for contiguous segment
981 while (contigLength
< length
982 && ++pageAddr
== pageList
[++ind
].phys_addr
) {
983 contigLength
+= PAGE_SIZE
;
985 if (length
> contigLength
)
986 length
= contigLength
;
999 *lengthOfSegment
= length
;
1004 addr64_t
IOMemoryDescriptor::getPhysicalSegment64
1005 (IOByteCount offset
, IOByteCount
*lengthOfSegment
)
1007 IOPhysicalAddress phys32
;
1011 phys32
= getPhysicalSegment(offset
, lengthOfSegment
);
1015 if (gIOSystemMapper
)
1017 IOByteCount origLen
;
1019 phys64
= gIOSystemMapper
->mapAddr(phys32
);
1020 origLen
= *lengthOfSegment
;
1021 length
= page_size
- (phys64
& (page_size
- 1));
1022 while ((length
< origLen
)
1023 && ((phys64
+ length
) == gIOSystemMapper
->mapAddr(phys32
+ length
)))
1024 length
+= page_size
;
1025 if (length
> origLen
)
1028 *lengthOfSegment
= length
;
1031 phys64
= (addr64_t
) phys32
;
1036 IOPhysicalAddress
IOGeneralMemoryDescriptor::getSourceSegment
1037 (IOByteCount offset
, IOByteCount
*lengthOfSegment
)
1039 IOPhysicalAddress address
= 0;
1040 IOPhysicalLength length
= 0;
1042 assert(offset
<= _length
);
1044 if ( (_flags
& kIOMemoryTypeMask
) == kIOMemoryTypeUPL
)
1045 return super::getSourceSegment( offset
, lengthOfSegment
);
1047 if ( offset
< _length
) // (within bounds?)
1049 unsigned rangesIndex
= 0;
1051 for ( ; offset
>= _ranges
.v
[rangesIndex
].length
; rangesIndex
++ )
1053 offset
-= _ranges
.v
[rangesIndex
].length
; // (make offset relative)
1056 address
= _ranges
.v
[rangesIndex
].address
+ offset
;
1057 length
= _ranges
.v
[rangesIndex
].length
- offset
;
1059 for ( ++rangesIndex
; rangesIndex
< _rangesCount
; rangesIndex
++ )
1061 if ( address
+ length
!= _ranges
.v
[rangesIndex
].address
) break;
1063 length
+= _ranges
.v
[rangesIndex
].length
; // (coalesce ranges)
1067 if ( address
== 0 ) length
= 0;
1070 if ( lengthOfSegment
) *lengthOfSegment
= length
;
1075 /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
1076 /* DEPRECATED */ void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
1077 /* DEPRECATED */ IOByteCount
* lengthOfSegment
)
1079 if (_task
== kernel_task
)
1080 return (void *) getSourceSegment(offset
, lengthOfSegment
);
1082 panic("IOGMD::getVirtualSegment deprecated");
1086 /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
1089 extern vm_offset_t static_memory_end
;
1090 #define io_kernel_static_end static_memory_end
1092 extern vm_offset_t first_avail
;
1093 #define io_kernel_static_end first_avail
1096 static kern_return_t
1097 io_get_kernel_static_upl(
1099 vm_address_t offset
,
1100 vm_size_t
*upl_size
,
1102 upl_page_info_array_t page_list
,
1103 unsigned int *count
,
1105 int force_data_sync
)
1107 unsigned int pageCount
, page
;
1110 pageCount
= atop_32(*upl_size
);
1111 if (pageCount
> *count
)
1116 for (page
= 0; page
< pageCount
; page
++)
1118 phys
= pmap_find_phys(kernel_pmap
, ((addr64_t
)offset
) + ptoa_64(page
));
1121 page_list
[page
].phys_addr
= phys
;
1122 page_list
[page
].pageout
= 0;
1123 page_list
[page
].absent
= 0;
1124 page_list
[page
].dirty
= 0;
1125 page_list
[page
].precious
= 0;
1126 page_list
[page
].device
= 0;
1129 return ((page
>= pageCount
) ? kIOReturnSuccess
: kIOReturnVMError
);
1132 IOReturn
IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection
)
1134 IOReturn error
= kIOReturnNoMemory
;
1136 ppnum_t mapBase
= 0;
1138 ipc_port_t sharedMem
= (ipc_port_t
) _memEntry
;
1140 assert(!_wireCount
);
1142 if (_pages
>= gIOMaximumMappedIOPageCount
)
1143 return kIOReturnNoResources
;
1145 dataP
= getDataP(_memoryEntries
);
1146 mapper
= dataP
->fMapper
;
1147 if (mapper
&& _pages
)
1148 mapBase
= mapper
->iovmAlloc(_pages
);
1150 // Note that appendBytes(NULL) zeros the data up to the
1152 _memoryEntries
->appendBytes(0, dataP
->fPageCnt
* sizeof(upl_page_info_t
));
1153 dataP
= 0; // May no longer be valid so lets not get tempted.
1155 if (forDirection
== kIODirectionNone
)
1156 forDirection
= _direction
;
1158 int uplFlags
; // This Mem Desc's default flags for upl creation
1159 switch (forDirection
)
1161 case kIODirectionOut
:
1162 // Pages do not need to be marked as dirty on commit
1163 uplFlags
= UPL_COPYOUT_FROM
;
1164 _flags
|= kIOMemoryPreparedReadOnly
;
1167 case kIODirectionIn
:
1169 uplFlags
= 0; // i.e. ~UPL_COPYOUT_FROM
1172 uplFlags
|= UPL_SET_IO_WIRE
| UPL_SET_LITE
;
1175 // Check user read/write access to the data buffer.
1177 unsigned int pageIndex
= 0;
1178 IOByteCount mdOffset
= 0;
1180 if (_task
== kernel_task
&& (kIOMemoryBufferPageable
& _flags
))
1183 { curMap
= get_task_map(_task
); }
1185 for (UInt range
= 0; range
< _rangesCount
; range
++) {
1187 IOVirtualRange curRange
= _ranges
.v
[range
];
1188 vm_address_t startPage
;
1189 IOByteCount numBytes
;
1191 startPage
= trunc_page_32(curRange
.address
);
1192 iopl
.fPageOffset
= (short) curRange
.address
& PAGE_MASK
;
1194 iopl
.fMappedBase
= mapBase
+ pageIndex
;
1196 iopl
.fMappedBase
= 0;
1197 numBytes
= iopl
.fPageOffset
+ curRange
.length
;
1200 dataP
= getDataP(_memoryEntries
);
1203 : IOPageableMapForAddress(startPage
);
1204 upl_page_info_array_t pageInfo
= getPageList(dataP
);
1205 int ioplFlags
= uplFlags
;
1206 upl_page_list_ptr_t baseInfo
= &pageInfo
[pageIndex
];
1208 vm_size_t ioplSize
= round_page_32(numBytes
);
1209 unsigned int numPageInfo
= atop_32(ioplSize
);
1211 if ((theMap
== kernel_map
) && (startPage
< io_kernel_static_end
))
1213 error
= io_get_kernel_static_upl(theMap
,
1222 } else if (sharedMem
&& (kIOMemoryPersistent
& _flags
)) {
1224 error
= memory_object_iopl_request(sharedMem
,
1233 error
= vm_map_get_upl(theMap
,
1244 if (error
!= KERN_SUCCESS
)
1247 error
= kIOReturnNoMemory
;
1249 if (baseInfo
->device
) {
1251 iopl
.fFlags
= kIOPLOnDevice
;
1252 // Don't translate device memory at all
1253 if (mapper
&& mapBase
) {
1254 mapper
->iovmFree(mapBase
, _pages
);
1256 iopl
.fMappedBase
= 0;
1262 mapper
->iovmInsert(mapBase
, pageIndex
,
1263 baseInfo
, numPageInfo
);
1266 iopl
.fIOMDOffset
= mdOffset
;
1267 iopl
.fPageInfo
= pageIndex
;
1269 if ((_flags
& kIOMemoryAutoPrepare
) && iopl
.fIOPL
)
1271 kernel_upl_commit(iopl
.fIOPL
, 0, 0);
1275 if (!_memoryEntries
->appendBytes(&iopl
, sizeof(iopl
))) {
1276 // Clean up partial created and unsaved iopl
1278 kernel_upl_abort(iopl
.fIOPL
, 0);
1282 // Check for a multiple iopl's in one virtual range
1283 pageIndex
+= numPageInfo
;
1284 mdOffset
-= iopl
.fPageOffset
;
1285 if (ioplSize
< numBytes
) {
1286 numBytes
-= ioplSize
;
1287 startPage
+= ioplSize
;
1288 mdOffset
+= ioplSize
;
1289 iopl
.fPageOffset
= 0;
1291 iopl
.fMappedBase
= mapBase
+ pageIndex
;
1294 mdOffset
+= numBytes
;
1300 return kIOReturnSuccess
;
1304 dataP
= getDataP(_memoryEntries
);
1305 UInt done
= getNumIOPL(dataP
, _memoryEntries
->getLength());
1306 ioPLBlock
*ioplList
= getIOPLList(dataP
);
1308 for (UInt range
= 0; range
< done
; range
++)
1310 if (ioplList
[range
].fIOPL
)
1311 kernel_upl_abort(ioplList
[range
].fIOPL
, 0);
1314 if (mapper
&& mapBase
)
1315 mapper
->iovmFree(mapBase
, _pages
);
1324 * Prepare the memory for an I/O transfer. This involves paging in
1325 * the memory, if necessary, and wiring it down for the duration of
1326 * the transfer. The complete() method completes the processing of
1327 * the memory after the I/O transfer finishes. This method needn't
1328 * called for non-pageable memory.
1330 IOReturn
IOGeneralMemoryDescriptor::prepare(IODirection forDirection
)
1332 IOReturn error
= kIOReturnSuccess
;
1334 if (!_wireCount
&& (_flags
& kIOMemoryTypeMask
) == kIOMemoryTypeVirtual
) {
1335 error
= wireVirtual(forDirection
);
1342 return kIOReturnSuccess
;
1348 * Complete processing of the memory after an I/O transfer finishes.
1349 * This method should not be called unless a prepare was previously
1350 * issued; the prepare() and complete() must occur in pairs, before
1351 * before and after an I/O transfer involving pageable memory.
1354 IOReturn
IOGeneralMemoryDescriptor::complete(IODirection
/* forDirection */)
1359 return kIOReturnSuccess
;
1363 if ((_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
) {
1364 /* kIOMemoryTypePhysical */
1368 ioGMDData
* dataP
= getDataP(_memoryEntries
);
1369 ioPLBlock
*ioplList
= getIOPLList(dataP
);
1370 UInt count
= getNumIOPL(dataP
, _memoryEntries
->getLength());
1372 if (dataP
->fMapper
&& _pages
&& ioplList
[0].fMappedBase
)
1373 dataP
->fMapper
->iovmFree(ioplList
[0].fMappedBase
, _pages
);
1375 // Only complete iopls that we created which are for TypeVirtual
1376 if ( (_flags
& kIOMemoryTypeMask
) == kIOMemoryTypeVirtual
) {
1377 for (UInt ind
= 0; ind
< count
; ind
++)
1378 if (ioplList
[ind
].fIOPL
)
1379 kernel_upl_commit(ioplList
[ind
].fIOPL
, 0, 0);
1382 (void) _memoryEntries
->initWithBytes(dataP
, sizeof(ioGMDData
)); // == setLength()
1385 return kIOReturnSuccess
;
1388 IOReturn
IOGeneralMemoryDescriptor::doMap(
1389 vm_map_t addressMap
,
1390 IOVirtualAddress
* atAddress
,
1391 IOOptionBits options
,
1392 IOByteCount sourceOffset
,
1393 IOByteCount length
)
1396 ipc_port_t sharedMem
= (ipc_port_t
) _memEntry
;
1398 // mapping source == dest? (could be much better)
1399 if( _task
&& (addressMap
== get_task_map(_task
)) && (options
& kIOMapAnywhere
)
1400 && (1 == _rangesCount
) && (0 == sourceOffset
)
1401 && (length
<= _ranges
.v
[0].length
) ) {
1402 *atAddress
= _ranges
.v
[0].address
;
1403 return( kIOReturnSuccess
);
1406 if( 0 == sharedMem
) {
1408 vm_size_t size
= _pages
<< PAGE_SHIFT
;
1412 vm_size_t actualSize
= size
;
1413 kr
= mach_make_memory_entry( get_task_map(_task
),
1414 &actualSize
, _ranges
.v
[0].address
,
1415 VM_PROT_READ
| VM_PROT_WRITE
, &sharedMem
,
1418 if( (KERN_SUCCESS
== kr
) && (actualSize
!= round_page_32(size
))) {
1420 IOLog("mach_make_memory_entry_64 (%08x) size (%08lx:%08x)\n",
1421 _ranges
.v
[0].address
, (UInt32
)actualSize
, size
);
1423 kr
= kIOReturnVMError
;
1424 ipc_port_release_send( sharedMem
);
1427 if( KERN_SUCCESS
!= kr
)
1429 sharedMem
= MACH_PORT_NULL
;
1433 memory_object_t pager
;
1434 unsigned int flags
= 0;
1436 IOPhysicalLength segLen
;
1438 pa
= getPhysicalSegment64( sourceOffset
, &segLen
);
1441 reserved
= IONew( ExpansionData
, 1 );
1445 reserved
->pagerContig
= (1 == _rangesCount
);
1446 reserved
->memory
= this;
1448 /*What cache mode do we need*/
1449 switch(options
& kIOMapCacheMask
) {
1451 case kIOMapDefaultCache
:
1453 flags
= IODefaultCacheBits(pa
);
1456 case kIOMapInhibitCache
:
1457 flags
= DEVICE_PAGER_CACHE_INHIB
|
1458 DEVICE_PAGER_COHERENT
| DEVICE_PAGER_GUARDED
;
1461 case kIOMapWriteThruCache
:
1462 flags
= DEVICE_PAGER_WRITE_THROUGH
|
1463 DEVICE_PAGER_COHERENT
| DEVICE_PAGER_GUARDED
;
1466 case kIOMapCopybackCache
:
1467 flags
= DEVICE_PAGER_COHERENT
;
1470 case kIOMapWriteCombineCache
:
1471 flags
= DEVICE_PAGER_CACHE_INHIB
|
1472 DEVICE_PAGER_COHERENT
;
1476 flags
|= reserved
->pagerContig
? DEVICE_PAGER_CONTIGUOUS
: 0;
1478 pager
= device_pager_setup( (memory_object_t
) 0, (int) reserved
,
1483 kr
= mach_memory_object_memory_entry_64( (host_t
) 1, false /*internal*/,
1484 size
, VM_PROT_READ
| VM_PROT_WRITE
, pager
, &sharedMem
);
1486 assert( KERN_SUCCESS
== kr
);
1487 if( KERN_SUCCESS
!= kr
) {
1488 device_pager_deallocate( pager
);
1489 pager
= MACH_PORT_NULL
;
1490 sharedMem
= MACH_PORT_NULL
;
1493 if( pager
&& sharedMem
)
1494 reserved
->devicePager
= pager
;
1496 IODelete( reserved
, ExpansionData
, 1 );
1502 _memEntry
= (void *) sharedMem
;
1507 kr
= kIOReturnVMError
;
1510 kr
= super::doMap( addressMap
, atAddress
,
1511 options
, sourceOffset
, length
);
1516 IOReturn
IOGeneralMemoryDescriptor::doUnmap(
1517 vm_map_t addressMap
,
1518 IOVirtualAddress logical
,
1519 IOByteCount length
)
1521 // could be much better
1522 if( _task
&& (addressMap
== get_task_map(_task
)) && (1 == _rangesCount
)
1523 && (logical
== _ranges
.v
[0].address
)
1524 && (length
<= _ranges
.v
[0].length
) )
1525 return( kIOReturnSuccess
);
1527 return( super::doUnmap( addressMap
, logical
, length
));
1530 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1533 // osfmk/device/iokit_rpc.c
1534 extern kern_return_t
IOMapPages( vm_map_t map
, vm_offset_t va
, vm_offset_t pa
,
1535 vm_size_t length
, unsigned int mapFlags
);
1536 extern kern_return_t
IOUnmapPages(vm_map_t map
, vm_offset_t va
, vm_size_t length
);
1539 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1541 OSDefineMetaClassAndAbstractStructors( IOMemoryMap
, OSObject
)
1543 /* inline function implementation */
1544 IOPhysicalAddress
IOMemoryMap::getPhysicalAddress()
1545 { return( getPhysicalSegment( 0, 0 )); }
1547 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1549 class _IOMemoryMap
: public IOMemoryMap
1551 OSDeclareDefaultStructors(_IOMemoryMap
)
1553 IOMemoryDescriptor
* memory
;
1554 IOMemoryMap
* superMap
;
1557 IOVirtualAddress logical
;
1559 vm_map_t addressMap
;
1560 IOOptionBits options
;
1563 virtual void taggedRelease(const void *tag
= 0) const;
1564 virtual void free();
1568 // IOMemoryMap methods
1569 virtual IOVirtualAddress
getVirtualAddress();
1570 virtual IOByteCount
getLength();
1571 virtual task_t
getAddressTask();
1572 virtual IOMemoryDescriptor
* getMemoryDescriptor();
1573 virtual IOOptionBits
getMapOptions();
1575 virtual IOReturn
unmap();
1576 virtual void taskDied();
1578 virtual IOPhysicalAddress
getPhysicalSegment(IOByteCount offset
,
1579 IOByteCount
* length
);
1581 // for IOMemoryDescriptor use
1582 _IOMemoryMap
* copyCompatible(
1583 IOMemoryDescriptor
* owner
,
1585 IOVirtualAddress toAddress
,
1586 IOOptionBits options
,
1588 IOByteCount length
);
1590 bool initCompatible(
1591 IOMemoryDescriptor
* memory
,
1592 IOMemoryMap
* superMap
,
1594 IOByteCount length
);
1596 bool initWithDescriptor(
1597 IOMemoryDescriptor
* memory
,
1599 IOVirtualAddress toAddress
,
1600 IOOptionBits options
,
1602 IOByteCount length
);
1605 task_t intoTask
, bool redirect
);
1608 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1611 #define super IOMemoryMap
1613 OSDefineMetaClassAndStructors(_IOMemoryMap
, IOMemoryMap
)
1615 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1617 bool _IOMemoryMap::initCompatible(
1618 IOMemoryDescriptor
* _memory
,
1619 IOMemoryMap
* _superMap
,
1620 IOByteCount _offset
,
1621 IOByteCount _length
)
1627 if( (_offset
+ _length
) > _superMap
->getLength())
1632 _superMap
->retain();
1633 superMap
= _superMap
;
1639 length
= _memory
->getLength();
1641 options
= superMap
->getMapOptions();
1642 logical
= superMap
->getVirtualAddress() + offset
;
1647 bool _IOMemoryMap::initWithDescriptor(
1648 IOMemoryDescriptor
* _memory
,
1650 IOVirtualAddress toAddress
,
1651 IOOptionBits _options
,
1652 IOByteCount _offset
,
1653 IOByteCount _length
)
1657 if( (!_memory
) || (!intoTask
) || !super::init())
1660 if( (_offset
+ _length
) > _memory
->getLength())
1663 addressMap
= get_task_map(intoTask
);
1666 vm_map_reference(addressMap
);
1675 length
= _memory
->getLength();
1677 addressTask
= intoTask
;
1678 logical
= toAddress
;
1681 if( options
& kIOMapStatic
)
1684 ok
= (kIOReturnSuccess
== memory
->doMap( addressMap
, &logical
,
1685 options
, offset
, length
));
1690 vm_map_deallocate(addressMap
);
1696 struct IOMemoryDescriptorMapAllocRef
1698 ipc_port_t sharedMem
;
1701 IOByteCount sourceOffset
;
1702 IOOptionBits options
;
1705 static kern_return_t
IOMemoryDescriptorMapAlloc(vm_map_t map
, void * _ref
)
1707 IOMemoryDescriptorMapAllocRef
* ref
= (IOMemoryDescriptorMapAllocRef
*)_ref
;
1711 if( ref
->sharedMem
) {
1712 vm_prot_t prot
= VM_PROT_READ
1713 | ((ref
->options
& kIOMapReadOnly
) ? 0 : VM_PROT_WRITE
);
1715 // set memory entry cache
1716 vm_prot_t memEntryCacheMode
= prot
| MAP_MEM_ONLY
;
1717 switch (ref
->options
& kIOMapCacheMask
)
1719 case kIOMapInhibitCache
:
1720 SET_MAP_MEM(MAP_MEM_IO
, memEntryCacheMode
);
1723 case kIOMapWriteThruCache
:
1724 SET_MAP_MEM(MAP_MEM_WTHRU
, memEntryCacheMode
);
1727 case kIOMapWriteCombineCache
:
1728 SET_MAP_MEM(MAP_MEM_WCOMB
, memEntryCacheMode
);
1731 case kIOMapCopybackCache
:
1732 SET_MAP_MEM(MAP_MEM_COPYBACK
, memEntryCacheMode
);
1735 case kIOMapDefaultCache
:
1737 SET_MAP_MEM(MAP_MEM_NOOP
, memEntryCacheMode
);
1741 vm_size_t unused
= 0;
1743 err
= mach_make_memory_entry( NULL
/*unused*/, &unused
, 0 /*unused*/,
1744 memEntryCacheMode
, NULL
, ref
->sharedMem
);
1745 if (KERN_SUCCESS
!= err
)
1746 IOLog("MAP_MEM_ONLY failed %d\n", err
);
1750 ref
->size
, 0 /* mask */,
1751 (( ref
->options
& kIOMapAnywhere
) ? VM_FLAGS_ANYWHERE
: VM_FLAGS_FIXED
)
1752 | VM_MAKE_TAG(VM_MEMORY_IOKIT
),
1753 ref
->sharedMem
, ref
->sourceOffset
,
1759 if( KERN_SUCCESS
!= err
) {
1766 err
= vm_allocate( map
, &ref
->mapped
, ref
->size
,
1767 ((ref
->options
& kIOMapAnywhere
) ? VM_FLAGS_ANYWHERE
: VM_FLAGS_FIXED
)
1768 | VM_MAKE_TAG(VM_MEMORY_IOKIT
) );
1770 if( KERN_SUCCESS
!= err
) {
1775 // we have to make sure that these guys don't get copied if we fork.
1776 err
= vm_inherit( map
, ref
->mapped
, ref
->size
, VM_INHERIT_NONE
);
1777 assert( KERN_SUCCESS
== err
);
1786 IOReturn
IOMemoryDescriptor::doMap(
1787 vm_map_t addressMap
,
1788 IOVirtualAddress
* atAddress
,
1789 IOOptionBits options
,
1790 IOByteCount sourceOffset
,
1791 IOByteCount length
)
1793 IOReturn err
= kIOReturnSuccess
;
1794 memory_object_t pager
;
1795 vm_address_t logical
;
1796 IOByteCount pageOffset
;
1797 IOPhysicalAddress sourceAddr
;
1798 IOMemoryDescriptorMapAllocRef ref
;
1800 ref
.sharedMem
= (ipc_port_t
) _memEntry
;
1801 ref
.sourceOffset
= sourceOffset
;
1802 ref
.options
= options
;
1807 length
= getLength();
1809 sourceAddr
= getSourceSegment( sourceOffset
, NULL
);
1810 assert( sourceAddr
);
1811 pageOffset
= sourceAddr
- trunc_page_32( sourceAddr
);
1813 ref
.size
= round_page_32( length
+ pageOffset
);
1815 logical
= *atAddress
;
1816 if( options
& kIOMapAnywhere
)
1817 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1820 ref
.mapped
= trunc_page_32( logical
);
1821 if( (logical
- ref
.mapped
) != pageOffset
) {
1822 err
= kIOReturnVMError
;
1827 if( ref
.sharedMem
&& (addressMap
== kernel_map
) && (kIOMemoryBufferPageable
& _flags
))
1828 err
= IOIteratePageableMaps( ref
.size
, &IOMemoryDescriptorMapAlloc
, &ref
);
1830 err
= IOMemoryDescriptorMapAlloc( addressMap
, &ref
);
1832 if( err
!= KERN_SUCCESS
)
1836 pager
= (memory_object_t
) reserved
->devicePager
;
1838 pager
= MACH_PORT_NULL
;
1840 if( !ref
.sharedMem
|| pager
)
1841 err
= handleFault( pager
, addressMap
, ref
.mapped
, sourceOffset
, length
, options
);
1845 if( err
!= KERN_SUCCESS
) {
1847 doUnmap( addressMap
, ref
.mapped
, ref
.size
);
1850 *atAddress
= ref
.mapped
+ pageOffset
;
1856 kIOMemoryRedirected
= 0x00010000
1859 IOReturn
IOMemoryDescriptor::handleFault(
1861 vm_map_t addressMap
,
1862 IOVirtualAddress address
,
1863 IOByteCount sourceOffset
,
1865 IOOptionBits options
)
1867 IOReturn err
= kIOReturnSuccess
;
1868 memory_object_t pager
= (memory_object_t
) _pager
;
1872 IOByteCount pageOffset
;
1873 IOByteCount pagerOffset
;
1874 IOPhysicalLength segLen
;
1879 if( kIOMemoryRedirected
& _flags
) {
1881 IOLog("sleep mem redirect %p, %lx\n", this, sourceOffset
);
1885 } while( kIOMemoryRedirected
& _flags
);
1888 return( kIOReturnSuccess
);
1891 physAddr
= getPhysicalSegment64( sourceOffset
, &segLen
);
1893 pageOffset
= physAddr
- trunc_page_64( physAddr
);
1894 pagerOffset
= sourceOffset
;
1896 size
= length
+ pageOffset
;
1897 physAddr
-= pageOffset
;
1899 segLen
+= pageOffset
;
1902 // in the middle of the loop only map whole pages
1903 if( segLen
>= bytes
)
1905 else if( segLen
!= trunc_page_32( segLen
))
1906 err
= kIOReturnVMError
;
1907 if( physAddr
!= trunc_page_64( physAddr
))
1908 err
= kIOReturnBadArgument
;
1911 if( kIOLogMapping
& gIOKitDebug
)
1912 IOLog("_IOMemoryMap::map(%p) %08lx->%08qx:%08lx\n",
1913 addressMap
, address
+ pageOffset
, physAddr
+ pageOffset
,
1914 segLen
- pageOffset
);
1922 /* i386 doesn't support faulting on device memory yet */
1923 if( addressMap
&& (kIOReturnSuccess
== err
))
1924 err
= IOMapPages( addressMap
, address
, (IOPhysicalAddress
) physAddr
, segLen
, options
);
1925 assert( KERN_SUCCESS
== err
);
1931 if( reserved
&& reserved
->pagerContig
) {
1932 IOPhysicalLength allLen
;
1935 allPhys
= getPhysicalSegment64( 0, &allLen
);
1937 err
= device_pager_populate_object( pager
, 0, allPhys
>> PAGE_SHIFT
, round_page_32(allLen
) );
1942 (page
< segLen
) && (KERN_SUCCESS
== err
);
1943 page
+= page_size
) {
1944 err
= device_pager_populate_object(pager
, pagerOffset
,
1945 (ppnum_t
)((physAddr
+ page
) >> PAGE_SHIFT
), page_size
);
1946 pagerOffset
+= page_size
;
1949 assert( KERN_SUCCESS
== err
);
1955 /* *** Temporary Workaround *** */
1957 /* This call to vm_fault causes an early pmap level resolution */
1958 /* of the mappings created above. Need for this is in absolute */
1959 /* violation of the basic tenet that the pmap layer is a cache. */
1960 /* Further, it implies a serious I/O architectural violation on */
1961 /* the part of some user of the mapping. As of this writing, */
1962 /* the call to vm_fault is needed because the NVIDIA driver */
1963 /* makes a call to pmap_extract. The NVIDIA driver needs to be */
1964 /* fixed as soon as possible. The NVIDIA driver should not */
1965 /* need to query for this info as it should know from the doMap */
1966 /* call where the physical memory is mapped. When a query is */
1967 /* necessary to find a physical mapping, it should be done */
1968 /* through an iokit call which includes the mapped memory */
1969 /* handle. This is required for machine architecture independence.*/
1971 if(!(kIOMemoryRedirected
& _flags
)) {
1972 vm_fault(addressMap
, address
, 3, FALSE
, FALSE
, NULL
, 0);
1975 /* *** Temporary Workaround *** */
1978 sourceOffset
+= segLen
- pageOffset
;
1984 && (physAddr
= getPhysicalSegment64( sourceOffset
, &segLen
)));
1987 err
= kIOReturnBadArgument
;
1992 IOReturn
IOMemoryDescriptor::doUnmap(
1993 vm_map_t addressMap
,
1994 IOVirtualAddress logical
,
1995 IOByteCount length
)
2000 if( kIOLogMapping
& gIOKitDebug
)
2001 kprintf("IOMemoryDescriptor::doUnmap(%x) %08x:%08x\n",
2002 addressMap
, logical
, length
);
2005 if( true /* && (addressMap == kernel_map) || (addressMap == get_task_map(current_task()))*/) {
2007 if( _memEntry
&& (addressMap
== kernel_map
) && (kIOMemoryBufferPageable
& _flags
))
2008 addressMap
= IOPageableMapForAddress( logical
);
2010 err
= vm_deallocate( addressMap
, logical
, length
);
2013 err
= kIOReturnSuccess
;
2018 IOReturn
IOMemoryDescriptor::redirect( task_t safeTask
, bool redirect
)
2021 _IOMemoryMap
* mapping
= 0;
2027 if( (iter
= OSCollectionIterator::withCollection( _mappings
))) {
2028 while( (mapping
= (_IOMemoryMap
*) iter
->getNextObject()))
2029 mapping
->redirect( safeTask
, redirect
);
2036 _flags
|= kIOMemoryRedirected
;
2038 _flags
&= ~kIOMemoryRedirected
;
2044 // temporary binary compatibility
2045 IOSubMemoryDescriptor
* subMem
;
2046 if( (subMem
= OSDynamicCast( IOSubMemoryDescriptor
, this)))
2047 err
= subMem
->redirect( safeTask
, redirect
);
2049 err
= kIOReturnSuccess
;
2054 IOReturn
IOSubMemoryDescriptor::redirect( task_t safeTask
, bool redirect
)
2056 return( _parent
->redirect( safeTask
, redirect
));
2059 IOReturn
_IOMemoryMap::redirect( task_t safeTask
, bool redirect
)
2061 IOReturn err
= kIOReturnSuccess
;
2064 // err = ((_IOMemoryMap *)superMap)->redirect( safeTask, redirect );
2068 if( logical
&& addressMap
2069 && (get_task_map( safeTask
) != addressMap
)
2070 && (0 == (options
& kIOMapStatic
))) {
2072 IOUnmapPages( addressMap
, logical
, length
);
2074 err
= vm_deallocate( addressMap
, logical
, length
);
2075 err
= memory
->doMap( addressMap
, &logical
,
2076 (options
& ~kIOMapAnywhere
) /*| kIOMapReserve*/,
2079 err
= kIOReturnSuccess
;
2081 IOLog("IOMemoryMap::redirect(%d, %p) %x:%lx from %p\n", redirect
, this, logical
, length
, addressMap
);
2090 IOReturn
_IOMemoryMap::unmap( void )
2096 if( logical
&& addressMap
&& (0 == superMap
)
2097 && (0 == (options
& kIOMapStatic
))) {
2099 err
= memory
->doUnmap( addressMap
, logical
, length
);
2100 vm_map_deallocate(addressMap
);
2104 err
= kIOReturnSuccess
;
2113 void _IOMemoryMap::taskDied( void )
2117 vm_map_deallocate(addressMap
);
2125 // Overload the release mechanism. All mappings must be a member
2126 // of a memory descriptors _mappings set. This means that we
2127 // always have 2 references on a mapping. When either of these mappings
2128 // are released we need to free ourselves.
2129 void _IOMemoryMap::taggedRelease(const void *tag
) const
2132 super::taggedRelease(tag
, 2);
2136 void _IOMemoryMap::free()
2142 memory
->removeMapping( this);
2148 superMap
->release();
2153 IOByteCount
_IOMemoryMap::getLength()
2158 IOVirtualAddress
_IOMemoryMap::getVirtualAddress()
2163 task_t
_IOMemoryMap::getAddressTask()
2166 return( superMap
->getAddressTask());
2168 return( addressTask
);
2171 IOOptionBits
_IOMemoryMap::getMapOptions()
2176 IOMemoryDescriptor
* _IOMemoryMap::getMemoryDescriptor()
2181 _IOMemoryMap
* _IOMemoryMap::copyCompatible(
2182 IOMemoryDescriptor
* owner
,
2184 IOVirtualAddress toAddress
,
2185 IOOptionBits _options
,
2186 IOByteCount _offset
,
2187 IOByteCount _length
)
2189 _IOMemoryMap
* mapping
;
2191 if( (!task
) || (!addressMap
) || (addressMap
!= get_task_map(task
)))
2193 if( (options
^ _options
) & kIOMapReadOnly
)
2195 if( (kIOMapDefaultCache
!= (_options
& kIOMapCacheMask
))
2196 && ((options
^ _options
) & kIOMapCacheMask
))
2199 if( (0 == (_options
& kIOMapAnywhere
)) && (logical
!= toAddress
))
2202 if( _offset
< offset
)
2207 if( (_offset
+ _length
) > length
)
2210 if( (length
== _length
) && (!_offset
)) {
2215 mapping
= new _IOMemoryMap
;
2217 && !mapping
->initCompatible( owner
, this, _offset
, _length
)) {
2226 IOPhysicalAddress
_IOMemoryMap::getPhysicalSegment( IOByteCount _offset
,
2227 IOPhysicalLength
* length
)
2229 IOPhysicalAddress address
;
2232 address
= memory
->getPhysicalSegment( offset
+ _offset
, length
);
2238 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2241 #define super OSObject
2243 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2245 void IOMemoryDescriptor::initialize( void )
2247 if( 0 == gIOMemoryLock
)
2248 gIOMemoryLock
= IORecursiveLockAlloc();
2250 IORegistryEntry::getRegistryRoot()->setProperty(kIOMaximumMappedIOByteCountKey
,
2251 ptoa_64(gIOMaximumMappedIOPageCount
), 64);
2254 void IOMemoryDescriptor::free( void )
2257 _mappings
->release();
2262 IOMemoryMap
* IOMemoryDescriptor::setMapping(
2264 IOVirtualAddress mapAddress
,
2265 IOOptionBits options
)
2269 map
= new _IOMemoryMap
;
2274 && !map
->initWithDescriptor( this, intoTask
, mapAddress
,
2275 options
| kIOMapStatic
, 0, getLength() )) {
2287 IOMemoryMap
* IOMemoryDescriptor::map(
2288 IOOptionBits options
)
2291 return( makeMapping( this, kernel_task
, 0,
2292 options
| kIOMapAnywhere
,
2296 IOMemoryMap
* IOMemoryDescriptor::map(
2298 IOVirtualAddress toAddress
,
2299 IOOptionBits options
,
2301 IOByteCount length
)
2304 length
= getLength();
2306 return( makeMapping( this, intoTask
, toAddress
, options
, offset
, length
));
2309 IOMemoryMap
* IOMemoryDescriptor::makeMapping(
2310 IOMemoryDescriptor
* owner
,
2312 IOVirtualAddress toAddress
,
2313 IOOptionBits options
,
2315 IOByteCount length
)
2317 _IOMemoryMap
* mapping
= 0;
2323 // look for an existing mapping
2324 if( (iter
= OSCollectionIterator::withCollection( _mappings
))) {
2326 while( (mapping
= (_IOMemoryMap
*) iter
->getNextObject())) {
2328 if( (mapping
= mapping
->copyCompatible(
2329 owner
, intoTask
, toAddress
,
2330 options
| kIOMapReference
,
2340 if( mapping
|| (options
& kIOMapReference
))
2345 mapping
= new _IOMemoryMap
;
2347 && !mapping
->initWithDescriptor( owner
, intoTask
, toAddress
, options
,
2350 IOLog("Didn't make map %08lx : %08lx\n", offset
, length
);
2358 owner
->addMapping( mapping
);
2365 void IOMemoryDescriptor::addMapping(
2366 IOMemoryMap
* mapping
)
2370 _mappings
= OSSet::withCapacity(1);
2372 _mappings
->setObject( mapping
);
2376 void IOMemoryDescriptor::removeMapping(
2377 IOMemoryMap
* mapping
)
2380 _mappings
->removeObject( mapping
);
2383 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2386 #define super IOMemoryDescriptor
2388 OSDefineMetaClassAndStructors(IOSubMemoryDescriptor
, IOMemoryDescriptor
)
2390 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2392 bool IOSubMemoryDescriptor::initSubRange( IOMemoryDescriptor
* parent
,
2393 IOByteCount offset
, IOByteCount length
,
2394 IODirection direction
)
2399 if( (offset
+ length
) > parent
->getLength())
2403 * We can check the _parent instance variable before having ever set it
2404 * to an initial value because I/O Kit guarantees that all our instance
2405 * variables are zeroed on an object's allocation.
2413 * An existing memory descriptor is being retargeted to
2414 * point to somewhere else. Clean up our present state.
2425 _direction
= direction
;
2426 _tag
= parent
->getTag();
2431 void IOSubMemoryDescriptor::free( void )
2440 IOPhysicalAddress
IOSubMemoryDescriptor::getPhysicalSegment( IOByteCount offset
,
2441 IOByteCount
* length
)
2443 IOPhysicalAddress address
;
2444 IOByteCount actualLength
;
2446 assert(offset
<= _length
);
2451 if( offset
>= _length
)
2454 address
= _parent
->getPhysicalSegment( offset
+ _start
, &actualLength
);
2456 if( address
&& length
)
2457 *length
= min( _length
- offset
, actualLength
);
2462 IOPhysicalAddress
IOSubMemoryDescriptor::getSourceSegment( IOByteCount offset
,
2463 IOByteCount
* length
)
2465 IOPhysicalAddress address
;
2466 IOByteCount actualLength
;
2468 assert(offset
<= _length
);
2473 if( offset
>= _length
)
2476 address
= _parent
->getSourceSegment( offset
+ _start
, &actualLength
);
2478 if( address
&& length
)
2479 *length
= min( _length
- offset
, actualLength
);
2484 void * IOSubMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
2485 IOByteCount
* lengthOfSegment
)
2490 IOByteCount
IOSubMemoryDescriptor::readBytes(IOByteCount offset
,
2491 void * bytes
, IOByteCount length
)
2493 IOByteCount byteCount
;
2495 assert(offset
<= _length
);
2497 if( offset
>= _length
)
2501 byteCount
= _parent
->readBytes( _start
+ offset
, bytes
,
2502 min(length
, _length
- offset
) );
2505 return( byteCount
);
2508 IOByteCount
IOSubMemoryDescriptor::writeBytes(IOByteCount offset
,
2509 const void* bytes
, IOByteCount length
)
2511 IOByteCount byteCount
;
2513 assert(offset
<= _length
);
2515 if( offset
>= _length
)
2519 byteCount
= _parent
->writeBytes( _start
+ offset
, bytes
,
2520 min(length
, _length
- offset
) );
2523 return( byteCount
);
2526 IOReturn
IOSubMemoryDescriptor::prepare(
2527 IODirection forDirection
)
2532 err
= _parent
->prepare( forDirection
);
2538 IOReturn
IOSubMemoryDescriptor::complete(
2539 IODirection forDirection
)
2544 err
= _parent
->complete( forDirection
);
2550 IOMemoryMap
* IOSubMemoryDescriptor::makeMapping(
2551 IOMemoryDescriptor
* owner
,
2553 IOVirtualAddress toAddress
,
2554 IOOptionBits options
,
2556 IOByteCount length
)
2558 IOMemoryMap
* mapping
;
2560 mapping
= (IOMemoryMap
*) _parent
->makeMapping(
2562 toAddress
- (_start
+ offset
),
2563 options
| kIOMapReference
,
2564 _start
+ offset
, length
);
2567 mapping
= (IOMemoryMap
*) _parent
->makeMapping(
2570 options
, _start
+ offset
, length
);
2573 mapping
= super::makeMapping( owner
, intoTask
, toAddress
, options
,
2582 IOSubMemoryDescriptor::initWithAddress(void * address
,
2584 IODirection direction
)
2590 IOSubMemoryDescriptor::initWithAddress(vm_address_t address
,
2592 IODirection direction
,
2599 IOSubMemoryDescriptor::initWithPhysicalAddress(
2600 IOPhysicalAddress address
,
2602 IODirection direction
)
2608 IOSubMemoryDescriptor::initWithRanges(
2609 IOVirtualRange
* ranges
,
2611 IODirection direction
,
2619 IOSubMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange
* ranges
,
2621 IODirection direction
,
2627 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2629 bool IOGeneralMemoryDescriptor::serialize(OSSerialize
* s
) const
2631 OSSymbol
const *keys
[2];
2632 OSObject
*values
[2];
2633 IOVirtualRange
*vcopy
;
2634 unsigned int index
, nRanges
;
2637 if (s
== NULL
) return false;
2638 if (s
->previouslySerialized(this)) return true;
2640 // Pretend we are an array.
2641 if (!s
->addXMLStartTag(this, "array")) return false;
2643 nRanges
= _rangesCount
;
2644 vcopy
= (IOVirtualRange
*) IOMalloc(sizeof(IOVirtualRange
) * nRanges
);
2645 if (vcopy
== 0) return false;
2647 keys
[0] = OSSymbol::withCString("address");
2648 keys
[1] = OSSymbol::withCString("length");
2651 values
[0] = values
[1] = 0;
2653 // From this point on we can go to bail.
2655 // Copy the volatile data so we don't have to allocate memory
2656 // while the lock is held.
2658 if (nRanges
== _rangesCount
) {
2659 for (index
= 0; index
< nRanges
; index
++) {
2660 vcopy
[index
] = _ranges
.v
[index
];
2663 // The descriptor changed out from under us. Give up.
2670 for (index
= 0; index
< nRanges
; index
++)
2672 values
[0] = OSNumber::withNumber(_ranges
.v
[index
].address
, sizeof(_ranges
.v
[index
].address
) * 8);
2673 if (values
[0] == 0) {
2677 values
[1] = OSNumber::withNumber(_ranges
.v
[index
].length
, sizeof(_ranges
.v
[index
].length
) * 8);
2678 if (values
[1] == 0) {
2682 OSDictionary
*dict
= OSDictionary::withObjects((const OSObject
**)values
, (const OSSymbol
**)keys
, 2);
2687 values
[0]->release();
2688 values
[1]->release();
2689 values
[0] = values
[1] = 0;
2691 result
= dict
->serialize(s
);
2697 result
= s
->addXMLEndTag("array");
2701 values
[0]->release();
2703 values
[1]->release();
2709 IOFree(vcopy
, sizeof(IOVirtualRange
) * nRanges
);
2713 bool IOSubMemoryDescriptor::serialize(OSSerialize
* s
) const
2718 if (s
->previouslySerialized(this)) return true;
2720 // Pretend we are a dictionary.
2721 // We must duplicate the functionality of OSDictionary here
2722 // because otherwise object references will not work;
2723 // they are based on the value of the object passed to
2724 // previouslySerialized and addXMLStartTag.
2726 if (!s
->addXMLStartTag(this, "dict")) return false;
2728 char const *keys
[3] = {"offset", "length", "parent"};
2730 OSObject
*values
[3];
2731 values
[0] = OSNumber::withNumber(_start
, sizeof(_start
) * 8);
2734 values
[1] = OSNumber::withNumber(_length
, sizeof(_length
) * 8);
2735 if (values
[1] == 0) {
2736 values
[0]->release();
2739 values
[2] = _parent
;
2742 for (int i
=0; i
<3; i
++) {
2743 if (!s
->addString("<key>") ||
2744 !s
->addString(keys
[i
]) ||
2745 !s
->addXMLEndTag("key") ||
2746 !values
[i
]->serialize(s
)) {
2751 values
[0]->release();
2752 values
[1]->release();
2757 return s
->addXMLEndTag("dict");
2760 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2762 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 0);
2763 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 1);
2764 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 2);
2765 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 3);
2766 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 4);
2767 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 5);
2768 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 6);
2769 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 7);
2770 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 8);
2771 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 9);
2772 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 10);
2773 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 11);
2774 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 12);
2775 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 13);
2776 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 14);
2777 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 15);
2779 /* ex-inline function implementation */
2780 IOPhysicalAddress
IOMemoryDescriptor::getPhysicalAddress()
2781 { return( getPhysicalSegment( 0, 0 )); }