2 * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
34 // 45678901234567890123456789012345678901234567890123456789012345678901234567890
35 #include <sys/cdefs.h>
37 #include <IOKit/assert.h>
38 #include <IOKit/system.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOMemoryDescriptor.h>
41 #include <IOKit/IOMapper.h>
42 #include <IOKit/IOKitKeysPrivate.h>
44 #include <IOKit/IOKitDebug.h>
45 #include <libkern/OSDebug.h>
47 #include "IOKitKernelInternal.h"
48 #include "IOCopyMapper.h"
50 #include <libkern/c++/OSContainers.h>
51 #include <libkern/c++/OSDictionary.h>
52 #include <libkern/c++/OSArray.h>
53 #include <libkern/c++/OSSymbol.h>
54 #include <libkern/c++/OSNumber.h>
60 #include <vm/vm_pageout.h>
61 #include <mach/memory_object_types.h>
62 #include <device/device_port.h>
64 #include <mach/vm_prot.h>
65 #include <mach/mach_vm.h>
66 #include <vm/vm_fault.h>
67 #include <vm/vm_protos.h>
69 extern ppnum_t
pmap_find_phys(pmap_t pmap
, addr64_t va
);
70 void ipc_port_release_send(ipc_port_t port
);
72 /* Copy between a physical page and a virtual address in the given vm_map */
73 kern_return_t
copypv(addr64_t source
, addr64_t sink
, unsigned int size
, int which
);
77 memory_object_t pager
,
82 device_pager_deallocate(
85 device_pager_populate_object(
86 memory_object_t pager
,
87 vm_object_offset_t offset
,
91 memory_object_iopl_request(
93 memory_object_offset_t offset
,
96 upl_page_info_array_t user_page_list
,
97 unsigned int *page_list_count
,
100 unsigned int IOTranslateCacheBits(struct phys_entry
*pp
);
104 #define kIOMaximumMappedIOByteCount (512*1024*1024)
106 static IOMapper
* gIOSystemMapper
= NULL
;
108 IOCopyMapper
* gIOCopyMapper
= NULL
;
110 static ppnum_t gIOMaximumMappedIOPageCount
= atop_32(kIOMaximumMappedIOByteCount
);
114 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
116 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor
, OSObject
)
118 #define super IOMemoryDescriptor
120 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor
, IOMemoryDescriptor
)
122 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
124 static IORecursiveLock
* gIOMemoryLock
;
126 #define LOCK IORecursiveLockLock( gIOMemoryLock)
127 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
128 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
130 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
133 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
135 #define DEBG(fmt, args...) {}
138 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
140 class _IOMemoryMap
: public IOMemoryMap
142 OSDeclareDefaultStructors(_IOMemoryMap
)
144 IOMemoryDescriptor
* fMemory
;
145 IOMemoryMap
* fSuperMap
;
146 mach_vm_size_t fOffset
;
147 mach_vm_address_t fAddress
;
148 mach_vm_size_t fLength
;
150 vm_map_t fAddressMap
;
151 IOOptionBits fOptions
;
153 ipc_port_t fRedirEntry
;
154 IOMemoryDescriptor
* fOwner
;
157 virtual void taggedRelease(const void *tag
= 0) const;
162 // IOMemoryMap methods
163 virtual IOVirtualAddress
getVirtualAddress();
164 virtual IOByteCount
getLength();
165 virtual task_t
getAddressTask();
166 virtual mach_vm_address_t
getAddress();
167 virtual mach_vm_size_t
getSize();
168 virtual IOMemoryDescriptor
* getMemoryDescriptor();
169 virtual IOOptionBits
getMapOptions();
171 virtual IOReturn
unmap();
172 virtual void taskDied();
174 virtual IOReturn
redirect(IOMemoryDescriptor
* newBackingMemory
,
175 IOOptionBits options
,
176 IOByteCount offset
= 0);
178 virtual IOReturn
redirect(IOMemoryDescriptor
* newBackingMemory
,
179 IOOptionBits options
,
180 mach_vm_size_t offset
= 0);
182 virtual IOPhysicalAddress
getPhysicalSegment(IOByteCount offset
,
183 IOByteCount
* length
);
185 // for IOMemoryDescriptor use
186 _IOMemoryMap
* copyCompatible( _IOMemoryMap
* newMapping
);
190 mach_vm_address_t toAddress
,
191 IOOptionBits options
,
192 mach_vm_size_t offset
,
193 mach_vm_size_t length
);
195 bool setMemoryDescriptor(IOMemoryDescriptor
* _memory
, mach_vm_size_t _offset
);
198 task_t intoTask
, bool redirect
);
201 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
203 // Some data structures and accessor macros used by the initWithOptions
206 enum ioPLBlockFlags
{
207 kIOPLOnDevice
= 0x00000001,
208 kIOPLExternUPL
= 0x00000002,
211 struct typePersMDData
213 const IOGeneralMemoryDescriptor
*fMD
;
214 ipc_port_t fMemEntry
;
219 vm_address_t fIOMDOffset
; // The offset of this iopl in descriptor
220 vm_offset_t fPageInfo
; // Pointer to page list or index into it
221 ppnum_t fMappedBase
; // Page number of first page in this iopl
222 unsigned int fPageOffset
; // Offset within first page of iopl
223 unsigned int fFlags
; // Flags
228 unsigned int fPageCnt
;
229 upl_page_info_t fPageList
[];
233 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
234 #define getIOPLList(d) ((ioPLBlock *) &(d->fPageList[d->fPageCnt]))
235 #define getNumIOPL(osd, d) \
236 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
237 #define getPageList(d) (&(d->fPageList[0]))
238 #define computeDataSize(p, u) \
239 (sizeof(ioGMDData) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
242 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
244 #define next_page(a) ( trunc_page_32(a) + PAGE_SIZE )
249 kern_return_t
device_data_action(
251 ipc_port_t device_pager
,
252 vm_prot_t protection
,
253 vm_object_offset_t offset
,
256 struct ExpansionData
{
258 unsigned int pagerContig
:1;
259 unsigned int unused
:31;
260 IOMemoryDescriptor
* memory
;
263 ExpansionData
* ref
= (ExpansionData
*) device_handle
;
264 IOMemoryDescriptor
* memDesc
;
267 memDesc
= ref
->memory
;
271 kr
= memDesc
->handleFault( device_pager
, 0, 0,
272 offset
, size
, kIOMapDefaultCache
/*?*/);
282 kern_return_t
device_close(
285 struct ExpansionData
{
287 unsigned int pagerContig
:1;
288 unsigned int unused
:31;
289 IOMemoryDescriptor
* memory
;
291 ExpansionData
* ref
= (ExpansionData
*) device_handle
;
293 IODelete( ref
, ExpansionData
, 1 );
295 return( kIOReturnSuccess
);
299 // Note this inline function uses C++ reference arguments to return values
300 // This means that pointers are not passed and NULLs don't have to be
301 // checked for as a NULL reference is illegal.
303 getAddrLenForInd(user_addr_t
&addr
, IOPhysicalLength
&len
, // Output variables
304 UInt32 type
, IOGeneralMemoryDescriptor::Ranges r
, UInt32 ind
)
306 assert(kIOMemoryTypeUIO
== type
307 || kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
308 || kIOMemoryTypePhysical
== type
|| kIOMemoryTypePhysical64
== type
);
309 if (kIOMemoryTypeUIO
== type
) {
311 uio_getiov((uio_t
) r
.uio
, ind
, &addr
, &us
); len
= us
;
313 else if ((kIOMemoryTypeVirtual64
== type
) || (kIOMemoryTypePhysical64
== type
)) {
314 IOAddressRange cur
= r
.v64
[ind
];
319 IOVirtualRange cur
= r
.v
[ind
];
325 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
330 * Create a new IOMemoryDescriptor. The buffer is a virtual address
331 * relative to the specified task. If no task is supplied, the kernel
335 IOMemoryDescriptor::withAddress(void * address
,
337 IODirection direction
)
339 return IOMemoryDescriptor::
340 withAddress((vm_address_t
) address
, length
, direction
, kernel_task
);
344 IOMemoryDescriptor::withAddress(vm_address_t address
,
346 IODirection direction
,
352 IOOptionBits options
= (IOOptionBits
) direction
;
353 if (task
== kernel_task
)
354 options
|= kIOMemoryAutoPrepare
;
355 return (IOMemoryDescriptor::withAddressRange(address
, length
, options
, task
));
358 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
361 if (that
->initWithAddress(address
, length
, direction
, task
))
370 IOMemoryDescriptor::withPhysicalAddress(
371 IOPhysicalAddress address
,
373 IODirection direction
)
376 return (IOMemoryDescriptor::withAddressRange(address
, length
, (IOOptionBits
) direction
, NULL
));
378 IOGeneralMemoryDescriptor
*self
= new IOGeneralMemoryDescriptor
;
380 && !self
->initWithPhysicalAddress(address
, length
, direction
)) {
389 IOMemoryDescriptor::withRanges( IOVirtualRange
* ranges
,
391 IODirection direction
,
395 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
398 if (that
->initWithRanges(ranges
, withCount
, direction
, task
, asReference
))
407 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address
,
408 mach_vm_size_t length
,
409 IOOptionBits options
,
412 IOAddressRange range
= { address
, length
};
413 return (IOMemoryDescriptor::withAddressRanges(&range
, 1, options
, task
));
417 IOMemoryDescriptor::withAddressRanges(IOAddressRange
* ranges
,
419 IOOptionBits options
,
422 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
426 options
|= kIOMemoryTypeVirtual64
;
428 options
|= kIOMemoryTypePhysical64
;
430 if (that
->initWithOptions(ranges
, rangeCount
, 0, task
, options
, /* mapper */ 0))
443 * Create a new IOMemoryDescriptor. The buffer is made up of several
444 * virtual address ranges, from a given task.
446 * Passing the ranges as a reference will avoid an extra allocation.
449 IOMemoryDescriptor::withOptions(void * buffers
,
456 IOGeneralMemoryDescriptor
*self
= new IOGeneralMemoryDescriptor
;
459 && !self
->initWithOptions(buffers
, count
, offset
, task
, opts
, mapper
))
468 // Can't leave abstract but this should never be used directly,
469 bool IOMemoryDescriptor::initWithOptions(void * buffers
,
473 IOOptionBits options
,
476 // @@@ gvdl: Should I panic?
477 panic("IOMD::initWithOptions called\n");
482 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange
* ranges
,
484 IODirection direction
,
487 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
490 if (that
->initWithPhysicalRanges(ranges
, withCount
, direction
, asReference
))
499 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor
* of
,
502 IODirection direction
)
504 IOSubMemoryDescriptor
*self
= new IOSubMemoryDescriptor
;
506 if (self
&& !self
->initSubRange(of
, offset
, length
, direction
)) {
514 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor
*originalMD
)
516 IOGeneralMemoryDescriptor
*origGenMD
=
517 OSDynamicCast(IOGeneralMemoryDescriptor
, originalMD
);
520 return IOGeneralMemoryDescriptor::
521 withPersistentMemoryDescriptor(origGenMD
);
527 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor
*originalMD
)
529 ipc_port_t sharedMem
= (ipc_port_t
) originalMD
->createNamedEntry();
534 if (sharedMem
== originalMD
->_memEntry
) {
535 originalMD
->retain(); // Add a new reference to ourselves
536 ipc_port_release_send(sharedMem
); // Remove extra send right
540 IOGeneralMemoryDescriptor
* self
= new IOGeneralMemoryDescriptor
;
541 typePersMDData initData
= { originalMD
, sharedMem
};
544 && !self
->initWithOptions(&initData
, 1, 0, 0, kIOMemoryTypePersistentMD
, 0)) {
551 void *IOGeneralMemoryDescriptor::createNamedEntry()
554 ipc_port_t sharedMem
;
556 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
558 user_addr_t range0Addr
;
559 IOByteCount range0Len
;
560 getAddrLenForInd(range0Addr
, range0Len
, type
, _ranges
, 0);
561 range0Addr
= trunc_page_64(range0Addr
);
563 vm_size_t size
= ptoa_32(_pages
);
564 vm_address_t kernelPage
= (vm_address_t
) range0Addr
;
566 vm_map_t theMap
= ((_task
== kernel_task
)
567 && (kIOMemoryBufferPageable
& _flags
))
568 ? IOPageableMapForAddress(kernelPage
)
569 : get_task_map(_task
);
571 memory_object_size_t actualSize
= size
;
572 vm_prot_t prot
= VM_PROT_READ
;
574 if (kIODirectionOut
!= (kIODirectionOutIn
& _flags
))
576 prot
|= VM_PROT_WRITE
;
579 prot
|= MAP_MEM_NAMED_REUSE
;
581 error
= mach_make_memory_entry_64(theMap
,
582 &actualSize
, range0Addr
, prot
, &sharedMem
, (ipc_port_t
) _memEntry
);
584 if (KERN_SUCCESS
== error
) {
585 if (actualSize
== size
) {
589 IOLog("IOGMD::mach_make_memory_entry_64 (%08llx) size (%08lx:%08x)\n",
590 (UInt64
)range0Addr
, (UInt32
)actualSize
, size
);
592 ipc_port_release_send( sharedMem
);
596 return MACH_PORT_NULL
;
602 * Initialize an IOMemoryDescriptor. The buffer is a virtual address
603 * relative to the specified task. If no task is supplied, the kernel
606 * An IOMemoryDescriptor can be re-used by calling initWithAddress or
607 * initWithRanges again on an existing instance -- note this behavior
608 * is not commonly supported in other I/O Kit classes, although it is
612 IOGeneralMemoryDescriptor::initWithAddress(void * address
,
613 IOByteCount withLength
,
614 IODirection withDirection
)
616 _singleRange
.v
.address
= (vm_address_t
) address
;
617 _singleRange
.v
.length
= withLength
;
619 return initWithRanges(&_singleRange
.v
, 1, withDirection
, kernel_task
, true);
623 IOGeneralMemoryDescriptor::initWithAddress(vm_address_t address
,
624 IOByteCount withLength
,
625 IODirection withDirection
,
628 _singleRange
.v
.address
= address
;
629 _singleRange
.v
.length
= withLength
;
631 return initWithRanges(&_singleRange
.v
, 1, withDirection
, withTask
, true);
635 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
636 IOPhysicalAddress address
,
637 IOByteCount withLength
,
638 IODirection withDirection
)
640 _singleRange
.p
.address
= address
;
641 _singleRange
.p
.length
= withLength
;
643 return initWithPhysicalRanges( &_singleRange
.p
, 1, withDirection
, true);
647 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
648 IOPhysicalRange
* ranges
,
650 IODirection direction
,
653 IOOptionBits mdOpts
= direction
| kIOMemoryTypePhysical
;
656 mdOpts
|= kIOMemoryAsReference
;
658 return initWithOptions(ranges
, count
, 0, 0, mdOpts
, /* mapper */ 0);
662 IOGeneralMemoryDescriptor::initWithRanges(
663 IOVirtualRange
* ranges
,
665 IODirection direction
,
669 IOOptionBits mdOpts
= direction
;
672 mdOpts
|= kIOMemoryAsReference
;
675 mdOpts
|= kIOMemoryTypeVirtual
;
677 // Auto-prepare if this is a kernel memory descriptor as very few
678 // clients bother to prepare() kernel memory.
679 // But it was not enforced so what are you going to do?
680 if (task
== kernel_task
)
681 mdOpts
|= kIOMemoryAutoPrepare
;
684 mdOpts
|= kIOMemoryTypePhysical
;
686 return initWithOptions(ranges
, count
, 0, task
, mdOpts
, /* mapper */ 0);
692 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
693 * from a given task, several physical ranges, an UPL from the ubc
694 * system or a uio (may be 64bit) from the BSD subsystem.
696 * Passing the ranges as a reference will avoid an extra allocation.
698 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
699 * existing instance -- note this behavior is not commonly supported in other
700 * I/O Kit classes, although it is supported here.
704 IOGeneralMemoryDescriptor::initWithOptions(void * buffers
,
708 IOOptionBits options
,
711 IOOptionBits type
= options
& kIOMemoryTypeMask
;
713 // Grab the original MD's configuation data to initialse the
714 // arguments to this function.
715 if (kIOMemoryTypePersistentMD
== type
) {
717 typePersMDData
*initData
= (typePersMDData
*) buffers
;
718 const IOGeneralMemoryDescriptor
*orig
= initData
->fMD
;
719 ioGMDData
*dataP
= getDataP(orig
->_memoryEntries
);
721 // Only accept persistent memory descriptors with valid dataP data.
722 assert(orig
->_rangesCount
== 1);
723 if ( !(orig
->_flags
& kIOMemoryPersistent
) || !dataP
)
726 _memEntry
= initData
->fMemEntry
; // Grab the new named entry
727 options
= orig
->_flags
| kIOMemoryAsReference
;
728 _singleRange
= orig
->_singleRange
; // Initialise our range
729 buffers
= &_singleRange
;
732 // Now grab the original task and whatever mapper was previously used
734 mapper
= dataP
->fMapper
;
736 // We are ready to go through the original initialisation now
740 case kIOMemoryTypeUIO
:
741 case kIOMemoryTypeVirtual
:
742 case kIOMemoryTypeVirtual64
:
747 if (vm_map_is_64bit(get_task_map(task
))
748 && (kIOMemoryTypeVirtual
== type
)
749 && ((IOVirtualRange
*) buffers
)->address
)
751 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
756 case kIOMemoryTypePhysical
: // Neither Physical nor UPL should have a task
757 case kIOMemoryTypePhysical64
:
758 mapper
= kIOMapperNone
;
760 case kIOMemoryTypeUPL
:
764 return false; /* bad argument */
771 * We can check the _initialized instance variable before having ever set
772 * it to an initial value because I/O Kit guarantees that all our instance
773 * variables are zeroed on an object's allocation.
778 * An existing memory descriptor is being retargeted to point to
779 * somewhere else. Clean up our present state.
781 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
782 if ((kIOMemoryTypePhysical
!= type
) && (kIOMemoryTypePhysical64
!= type
))
787 if (_ranges
.v
&& _rangesIsAllocated
)
789 if (kIOMemoryTypeUIO
== type
)
790 uio_free((uio_t
) _ranges
.v
);
791 else if ((kIOMemoryTypeVirtual64
== type
) || (kIOMemoryTypePhysical64
== type
))
792 IODelete(_ranges
.v64
, IOAddressRange
, _rangesCount
);
794 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
798 { ipc_port_release_send((ipc_port_t
) _memEntry
); _memEntry
= 0; }
800 _mappings
->flushCollection();
808 // Grab the appropriate mapper
809 if (mapper
== kIOMapperNone
)
810 mapper
= 0; // No Mapper
811 else if (mapper
== kIOMapperSystem
) {
812 IOMapper::checkForSystemMapper();
813 gIOSystemMapper
= mapper
= IOMapper::gSystem
;
816 // Temp binary compatibility for kIOMemoryThreadSafe
817 if (kIOMemoryReserved6156215
& options
)
819 options
&= ~kIOMemoryReserved6156215
;
820 options
|= kIOMemoryThreadSafe
;
822 // Remove the dynamic internal use flags from the initial setting
823 options
&= ~(kIOMemoryPreparedReadOnly
);
827 // DEPRECATED variable initialisation
828 _direction
= (IODirection
) (_flags
& kIOMemoryDirectionMask
);
830 __iomd_reservedA
= 0;
831 __iomd_reservedB
= 0;
834 if (kIOMemoryThreadSafe
& options
)
837 _prepareLock
= IOLockAlloc();
839 else if (_prepareLock
)
841 IOLockFree(_prepareLock
);
845 if (kIOMemoryTypeUPL
== type
) {
848 unsigned int dataSize
= computeDataSize(/* pages */ 0, /* upls */ 1);
850 if (!_memoryEntries
) {
851 _memoryEntries
= OSData::withCapacity(dataSize
);
855 else if (!_memoryEntries
->initWithCapacity(dataSize
))
858 _memoryEntries
->appendBytes(0, sizeof(ioGMDData
));
859 dataP
= getDataP(_memoryEntries
);
860 dataP
->fMapper
= mapper
;
863 // _wireCount++; // UPLs start out life wired
866 _pages
+= atop_32(offset
+ count
+ PAGE_MASK
) - atop_32(offset
);
869 upl_page_info_t
*pageList
= UPL_GET_INTERNAL_PAGE_LIST((upl_t
) buffers
);
871 iopl
.fIOPL
= (upl_t
) buffers
;
872 // Set the flag kIOPLOnDevice convieniently equal to 1
873 iopl
.fFlags
= pageList
->device
| kIOPLExternUPL
;
874 iopl
.fIOMDOffset
= 0;
876 _highestPage
= upl_get_highest_page(iopl
.fIOPL
);
878 if (!pageList
->device
) {
879 // Pre-compute the offset into the UPL's page list
880 pageList
= &pageList
[atop_32(offset
)];
883 iopl
.fMappedBase
= mapper
->iovmAlloc(_pages
);
884 mapper
->iovmInsert(iopl
.fMappedBase
, 0, pageList
, _pages
);
887 iopl
.fMappedBase
= 0;
890 iopl
.fMappedBase
= 0;
891 iopl
.fPageInfo
= (vm_address_t
) pageList
;
892 iopl
.fPageOffset
= offset
;
894 _memoryEntries
->appendBytes(&iopl
, sizeof(iopl
));
897 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
898 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
900 // Initialize the memory descriptor
901 if (options
& kIOMemoryAsReference
) {
902 _rangesIsAllocated
= false;
904 // Hack assignment to get the buffer arg into _ranges.
905 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
907 // This also initialises the uio & physical ranges.
908 _ranges
.v
= (IOVirtualRange
*) buffers
;
911 _rangesIsAllocated
= true;
912 switch (_flags
& kIOMemoryTypeMask
)
914 case kIOMemoryTypeUIO
:
915 _ranges
.v
= (IOVirtualRange
*) uio_duplicate((uio_t
) buffers
);
918 case kIOMemoryTypeVirtual64
:
919 case kIOMemoryTypePhysical64
:
920 _ranges
.v64
= IONew(IOAddressRange
, count
);
923 bcopy(buffers
, _ranges
.v
, count
* sizeof(IOAddressRange
));
925 case kIOMemoryTypeVirtual
:
926 case kIOMemoryTypePhysical
:
927 _ranges
.v
= IONew(IOVirtualRange
, count
);
930 bcopy(buffers
, _ranges
.v
, count
* sizeof(IOVirtualRange
));
935 // Find starting address within the vector of ranges
936 Ranges vec
= _ranges
;
939 for (unsigned ind
= 0; ind
< count
; ind
++) {
943 // addr & len are returned by this function
944 getAddrLenForInd(addr
, len
, type
, vec
, ind
);
945 pages
+= (atop_64(addr
+ len
+ PAGE_MASK
) - atop_64(addr
));
947 assert(len
>= length
); // Check for 32 bit wrap around
950 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
))
952 ppnum_t highPage
= atop_64(addr
+ len
- 1);
953 if (highPage
> _highestPage
)
954 _highestPage
= highPage
;
959 _rangesCount
= count
;
961 // Auto-prepare memory at creation time.
962 // Implied completion when descriptor is free-ed
963 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
))
964 _wireCount
++; // Physical MDs are, by definition, wired
965 else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
967 unsigned dataSize
= computeDataSize(_pages
, /* upls */ count
* 2);
969 if (!_memoryEntries
) {
970 _memoryEntries
= OSData::withCapacity(dataSize
);
974 else if (!_memoryEntries
->initWithCapacity(dataSize
))
977 _memoryEntries
->appendBytes(0, sizeof(ioGMDData
));
978 dataP
= getDataP(_memoryEntries
);
979 dataP
->fMapper
= mapper
;
980 dataP
->fPageCnt
= _pages
;
982 if ( (kIOMemoryPersistent
& _flags
) && !_memEntry
)
983 _memEntry
= createNamedEntry();
985 if ((_flags
& kIOMemoryAutoPrepare
)
986 && prepare() != kIOReturnSuccess
)
999 void IOGeneralMemoryDescriptor::free()
1001 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
1006 reserved
->memory
= 0;
1010 if ((kIOMemoryTypePhysical
!= type
) && (kIOMemoryTypePhysical64
!= type
))
1016 _memoryEntries
->release();
1018 if (_ranges
.v
&& _rangesIsAllocated
)
1020 if (kIOMemoryTypeUIO
== type
)
1021 uio_free((uio_t
) _ranges
.v
);
1022 else if ((kIOMemoryTypeVirtual64
== type
) || (kIOMemoryTypePhysical64
== type
))
1023 IODelete(_ranges
.v64
, IOAddressRange
, _rangesCount
);
1025 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
1030 if (reserved
&& reserved
->devicePager
)
1031 device_pager_deallocate( (memory_object_t
) reserved
->devicePager
);
1033 // memEntry holds a ref on the device pager which owns reserved
1034 // (ExpansionData) so no reserved access after this point
1036 ipc_port_release_send( (ipc_port_t
) _memEntry
);
1039 IOLockFree(_prepareLock
);
1044 /* DEPRECATED */ void IOGeneralMemoryDescriptor::unmapFromKernel()
1046 panic("IOGMD::unmapFromKernel deprecated");
1049 /* DEPRECATED */ void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex
)
1051 panic("IOGMD::mapIntoKernel deprecated");
1057 * Get the direction of the transfer.
1059 IODirection
IOMemoryDescriptor::getDirection() const
1067 * Get the length of the transfer (over all ranges).
1069 IOByteCount
IOMemoryDescriptor::getLength() const
1074 void IOMemoryDescriptor::setTag( IOOptionBits tag
)
1079 IOOptionBits
IOMemoryDescriptor::getTag( void )
1084 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
1086 IOMemoryDescriptor::getSourceSegment( IOByteCount offset
, IOByteCount
* length
)
1088 addr64_t physAddr
= 0;
1090 if( prepare() == kIOReturnSuccess
) {
1091 physAddr
= getPhysicalSegment64( offset
, length
);
1095 return( (IOPhysicalAddress
) physAddr
); // truncated but only page offset is used
1098 IOByteCount
IOMemoryDescriptor::readBytes
1099 (IOByteCount offset
, void *bytes
, IOByteCount length
)
1101 addr64_t dstAddr
= (addr64_t
) (UInt32
) bytes
;
1102 IOByteCount remaining
;
1104 // Assert that this entire I/O is withing the available range
1105 assert(offset
< _length
);
1106 assert(offset
+ length
<= _length
);
1107 if (offset
>= _length
) {
1111 remaining
= length
= min(length
, _length
- offset
);
1112 while (remaining
) { // (process another target segment?)
1116 srcAddr64
= getPhysicalSegment64(offset
, &srcLen
);
1120 // Clip segment length to remaining
1121 if (srcLen
> remaining
)
1124 copypv(srcAddr64
, dstAddr
, srcLen
,
1125 cppvPsrc
| cppvNoRefSrc
| cppvFsnk
| cppvKmap
);
1129 remaining
-= srcLen
;
1134 return length
- remaining
;
1137 IOByteCount
IOMemoryDescriptor::writeBytes
1138 (IOByteCount offset
, const void *bytes
, IOByteCount length
)
1140 addr64_t srcAddr
= (addr64_t
) (UInt32
) bytes
;
1141 IOByteCount remaining
;
1143 // Assert that this entire I/O is withing the available range
1144 assert(offset
< _length
);
1145 assert(offset
+ length
<= _length
);
1147 assert( !(kIOMemoryPreparedReadOnly
& _flags
) );
1149 if ( (kIOMemoryPreparedReadOnly
& _flags
) || offset
>= _length
) {
1153 remaining
= length
= min(length
, _length
- offset
);
1154 while (remaining
) { // (process another target segment?)
1158 dstAddr64
= getPhysicalSegment64(offset
, &dstLen
);
1162 // Clip segment length to remaining
1163 if (dstLen
> remaining
)
1166 copypv(srcAddr
, (addr64_t
) dstAddr64
, dstLen
,
1167 cppvPsnk
| cppvFsnk
| cppvNoRefSrc
| cppvNoModSnk
| cppvKmap
);
1171 remaining
-= dstLen
;
1176 return length
- remaining
;
1179 // osfmk/device/iokit_rpc.c
1180 extern "C" unsigned int IODefaultCacheBits(addr64_t pa
);
1182 /* DEPRECATED */ void IOGeneralMemoryDescriptor::setPosition(IOByteCount position
)
1184 panic("IOGMD::setPosition deprecated");
1187 IOReturn
IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op
, void *vData
, UInt dataSize
) const
1189 if (kIOMDGetCharacteristics
== op
) {
1191 if (dataSize
< sizeof(IOMDDMACharacteristics
))
1192 return kIOReturnUnderrun
;
1194 IOMDDMACharacteristics
*data
= (IOMDDMACharacteristics
*) vData
;
1195 data
->fLength
= _length
;
1196 data
->fSGCount
= _rangesCount
;
1197 data
->fPages
= _pages
;
1198 data
->fDirection
= _direction
;
1200 data
->fIsPrepared
= false;
1202 data
->fIsPrepared
= true;
1203 data
->fHighestPage
= _highestPage
;
1204 if (_memoryEntries
) {
1205 ioGMDData
*gmdData
= getDataP(_memoryEntries
);
1206 ioPLBlock
*ioplList
= getIOPLList(gmdData
);
1207 UInt count
= getNumIOPL(_memoryEntries
, gmdData
);
1209 data
->fIsMapped
= (gmdData
->fMapper
&& _pages
&& (count
> 0)
1210 && ioplList
[0].fMappedBase
);
1212 data
->fPageAlign
= (ioplList
[0].fPageOffset
& PAGE_MASK
) | ~PAGE_MASK
;
1215 data
->fIsMapped
= false;
1218 return kIOReturnSuccess
;
1220 else if (!(kIOMDWalkSegments
& op
))
1221 return kIOReturnBadArgument
;
1223 // Get the next segment
1224 struct InternalState
{
1225 IOMDDMAWalkSegmentArgs fIO
;
1231 // Find the next segment
1232 if (dataSize
< sizeof(*isP
))
1233 return kIOReturnUnderrun
;
1235 isP
= (InternalState
*) vData
;
1236 UInt offset
= isP
->fIO
.fOffset
;
1237 bool mapped
= isP
->fIO
.fMapped
;
1239 if (offset
>= _length
)
1240 return (offset
== _length
)? kIOReturnOverrun
: kIOReturnInternalError
;
1242 // Validate the previous offset
1243 UInt ind
, off2Ind
= isP
->fOffset2Index
;
1244 if ((kIOMDFirstSegment
!= op
)
1246 && (offset
== isP
->fNextOffset
|| off2Ind
<= offset
))
1249 ind
= off2Ind
= 0; // Start from beginning
1253 if ( (_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
) {
1255 // Physical address based memory descriptor
1256 const IOPhysicalRange
*physP
= (IOPhysicalRange
*) &_ranges
.p
[0];
1258 // Find the range after the one that contains the offset
1260 for (len
= 0; off2Ind
<= offset
; ind
++) {
1261 len
= physP
[ind
].length
;
1265 // Calculate length within range and starting address
1266 length
= off2Ind
- offset
;
1267 address
= physP
[ind
- 1].address
+ len
- length
;
1269 // see how far we can coalesce ranges
1270 while (ind
< _rangesCount
&& address
+ length
== physP
[ind
].address
) {
1271 len
= physP
[ind
].length
;
1277 // correct contiguous check overshoot
1281 else if ( (_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
) {
1283 // Physical address based memory descriptor
1284 const IOAddressRange
*physP
= (IOAddressRange
*) &_ranges
.v64
[0];
1286 // Find the range after the one that contains the offset
1288 for (len
= 0; off2Ind
<= offset
; ind
++) {
1289 len
= physP
[ind
].length
;
1293 // Calculate length within range and starting address
1294 length
= off2Ind
- offset
;
1295 address
= physP
[ind
- 1].address
+ len
- length
;
1297 // see how far we can coalesce ranges
1298 while (ind
< _rangesCount
&& address
+ length
== physP
[ind
].address
) {
1299 len
= physP
[ind
].length
;
1305 // correct contiguous check overshoot
1311 panic("IOGMD: not wired for the IODMACommand");
1313 assert(_memoryEntries
);
1315 ioGMDData
* dataP
= getDataP(_memoryEntries
);
1316 const ioPLBlock
*ioplList
= getIOPLList(dataP
);
1317 UInt numIOPLs
= getNumIOPL(_memoryEntries
, dataP
);
1318 upl_page_info_t
*pageList
= getPageList(dataP
);
1320 assert(numIOPLs
> 0);
1322 // Scan through iopl info blocks looking for block containing offset
1323 while (ind
< numIOPLs
&& offset
>= ioplList
[ind
].fIOMDOffset
)
1326 // Go back to actual range as search goes past it
1327 ioPLBlock ioplInfo
= ioplList
[ind
- 1];
1328 off2Ind
= ioplInfo
.fIOMDOffset
;
1331 length
= ioplList
[ind
].fIOMDOffset
;
1334 length
-= offset
; // Remainder within iopl
1336 // Subtract offset till this iopl in total list
1339 // If a mapped address is requested and this is a pre-mapped IOPL
1340 // then just need to compute an offset relative to the mapped base.
1341 if (mapped
&& ioplInfo
.fMappedBase
) {
1342 offset
+= (ioplInfo
.fPageOffset
& PAGE_MASK
);
1343 address
= ptoa_64(ioplInfo
.fMappedBase
) + offset
;
1344 continue; // Done leave do/while(false) now
1347 // The offset is rebased into the current iopl.
1348 // Now add the iopl 1st page offset.
1349 offset
+= ioplInfo
.fPageOffset
;
1351 // For external UPLs the fPageInfo field points directly to
1352 // the upl's upl_page_info_t array.
1353 if (ioplInfo
.fFlags
& kIOPLExternUPL
)
1354 pageList
= (upl_page_info_t
*) ioplInfo
.fPageInfo
;
1356 pageList
= &pageList
[ioplInfo
.fPageInfo
];
1358 // Check for direct device non-paged memory
1359 if ( ioplInfo
.fFlags
& kIOPLOnDevice
) {
1360 address
= ptoa_64(pageList
->phys_addr
) + offset
;
1361 continue; // Done leave do/while(false) now
1364 // Now we need compute the index into the pageList
1365 UInt pageInd
= atop_32(offset
);
1366 offset
&= PAGE_MASK
;
1368 // Compute the starting address of this segment
1369 IOPhysicalAddress pageAddr
= pageList
[pageInd
].phys_addr
;
1370 address
= ptoa_64(pageAddr
) + offset
;
1372 // length is currently set to the length of the remainider of the iopl.
1373 // We need to check that the remainder of the iopl is contiguous.
1374 // This is indicated by pageList[ind].phys_addr being sequential.
1375 IOByteCount contigLength
= PAGE_SIZE
- offset
;
1376 while (contigLength
< length
1377 && ++pageAddr
== pageList
[++pageInd
].phys_addr
)
1379 contigLength
+= PAGE_SIZE
;
1382 if (contigLength
< length
)
1383 length
= contigLength
;
1391 // Update return values and state
1392 isP
->fIO
.fIOVMAddr
= address
;
1393 isP
->fIO
.fLength
= length
;
1395 isP
->fOffset2Index
= off2Ind
;
1396 isP
->fNextOffset
= isP
->fIO
.fOffset
+ length
;
1398 return kIOReturnSuccess
;
1402 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
1405 IOByteCount length
= 0;
1406 addr64_t address
= 0;
1408 if (gIOSystemMapper
&& (kIOMemoryTypePhysical
== (_flags
& kIOMemoryTypeMask
)))
1409 return (super::getPhysicalSegment64(offset
, lengthOfSegment
));
1411 if (offset
< _length
) // (within bounds?)
1413 IOMDDMAWalkSegmentState _state
;
1414 IOMDDMAWalkSegmentArgs
* state
= (IOMDDMAWalkSegmentArgs
*) &_state
;
1416 state
->fOffset
= offset
;
1417 state
->fLength
= _length
- offset
;
1418 state
->fMapped
= false;
1420 ret
= dmaCommandOperation(kIOMDFirstSegment
, _state
, sizeof(_state
));
1422 if ((kIOReturnSuccess
!= ret
) && (kIOReturnOverrun
!= ret
))
1423 DEBG("getPhysicalSegment64 dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
1424 ret
, this, state
->fOffset
,
1425 state
->fIOVMAddr
, state
->fLength
);
1426 if (kIOReturnSuccess
== ret
)
1428 address
= state
->fIOVMAddr
;
1429 length
= state
->fLength
;
1435 if (lengthOfSegment
)
1436 *lengthOfSegment
= length
;
1442 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
1445 IOByteCount length
= 0;
1446 addr64_t address
= 0;
1448 // assert(offset <= _length);
1450 if (offset
< _length
) // (within bounds?)
1452 IOMDDMAWalkSegmentState _state
;
1453 IOMDDMAWalkSegmentArgs
* state
= (IOMDDMAWalkSegmentArgs
*) &_state
;
1455 state
->fOffset
= offset
;
1456 state
->fLength
= _length
- offset
;
1457 state
->fMapped
= true;
1459 ret
= dmaCommandOperation(
1460 kIOMDFirstSegment
, _state
, sizeof(_state
));
1462 if ((kIOReturnSuccess
!= ret
) && (kIOReturnOverrun
!= ret
))
1463 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
1464 ret
, this, state
->fOffset
,
1465 state
->fIOVMAddr
, state
->fLength
);
1466 if (kIOReturnSuccess
== ret
)
1468 address
= state
->fIOVMAddr
;
1469 length
= state
->fLength
;
1476 if ((address
+ length
) > 0x100000000ULL
)
1478 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
1479 address
, length
, (getMetaClass())->getClassName());
1482 if (lengthOfSegment
)
1483 *lengthOfSegment
= length
;
1485 return ((IOPhysicalAddress
) address
);
1489 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
1491 IOPhysicalAddress phys32
;
1494 IOMapper
* mapper
= 0;
1496 phys32
= getPhysicalSegment(offset
, lengthOfSegment
);
1500 if (gIOSystemMapper
)
1501 mapper
= gIOSystemMapper
;
1505 IOByteCount origLen
;
1507 phys64
= mapper
->mapAddr(phys32
);
1508 origLen
= *lengthOfSegment
;
1509 length
= page_size
- (phys64
& (page_size
- 1));
1510 while ((length
< origLen
)
1511 && ((phys64
+ length
) == mapper
->mapAddr(phys32
+ length
)))
1512 length
+= page_size
;
1513 if (length
> origLen
)
1516 *lengthOfSegment
= length
;
1519 phys64
= (addr64_t
) phys32
;
1525 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
1527 IOPhysicalAddress address
= 0;
1528 IOPhysicalLength length
= 0;
1529 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
1531 assert(offset
<= _length
);
1533 if ( type
== kIOMemoryTypeUPL
)
1534 return super::getSourceSegment( offset
, lengthOfSegment
);
1535 else if ( offset
< _length
) // (within bounds?)
1537 unsigned rangesIndex
= 0;
1538 Ranges vec
= _ranges
;
1541 // Find starting address within the vector of ranges
1543 getAddrLenForInd(addr
, length
, type
, vec
, rangesIndex
);
1544 if (offset
< length
)
1546 offset
-= length
; // (make offset relative)
1550 // Now that we have the starting range,
1551 // lets find the last contiguous range
1555 for ( ++rangesIndex
; rangesIndex
< _rangesCount
; rangesIndex
++ ) {
1556 user_addr_t newAddr
;
1557 IOPhysicalLength newLen
;
1559 getAddrLenForInd(newAddr
, newLen
, type
, vec
, rangesIndex
);
1560 if (addr
+ length
!= newAddr
)
1565 address
= (IOPhysicalAddress
) addr
; // Truncate address to 32bit
1570 if ( lengthOfSegment
) *lengthOfSegment
= length
;
1575 /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
1576 /* DEPRECATED */ void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
1577 /* DEPRECATED */ IOByteCount
* lengthOfSegment
)
1579 if (_task
== kernel_task
)
1580 return (void *) getSourceSegment(offset
, lengthOfSegment
);
1582 panic("IOGMD::getVirtualSegment deprecated");
1586 /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
1591 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op
, void *vData
, UInt dataSize
) const
1593 if (kIOMDGetCharacteristics
== op
) {
1594 if (dataSize
< sizeof(IOMDDMACharacteristics
))
1595 return kIOReturnUnderrun
;
1597 IOMDDMACharacteristics
*data
= (IOMDDMACharacteristics
*) vData
;
1598 data
->fLength
= getLength();
1600 data
->fDirection
= _direction
;
1601 if (IOMapper::gSystem
)
1602 data
->fIsMapped
= true;
1603 data
->fIsPrepared
= true; // Assume prepared - fails safe
1605 else if (kIOMDWalkSegments
& op
) {
1606 if (dataSize
< sizeof(IOMDDMAWalkSegmentArgs
))
1607 return kIOReturnUnderrun
;
1609 IOMDDMAWalkSegmentArgs
*data
= (IOMDDMAWalkSegmentArgs
*) vData
;
1610 IOByteCount offset
= (IOByteCount
) data
->fOffset
;
1612 IOPhysicalLength length
;
1613 IOMemoryDescriptor
*ncmd
= const_cast<IOMemoryDescriptor
*>(this);
1614 if (data
->fMapped
&& IOMapper::gSystem
)
1615 data
->fIOVMAddr
= ncmd
->getPhysicalSegment(offset
, &length
);
1617 data
->fIOVMAddr
= ncmd
->getPhysicalSegment64(offset
, &length
);
1618 data
->fLength
= length
;
1621 return kIOReturnBadArgument
;
1623 return kIOReturnSuccess
;
1626 IOReturn
IOMemoryDescriptor::setPurgeable( IOOptionBits newState
,
1627 IOOptionBits
* oldState
)
1629 IOReturn err
= kIOReturnSuccess
;
1630 vm_purgable_t control
;
1637 err
= kIOReturnNotReady
;
1641 control
= VM_PURGABLE_SET_STATE
;
1644 case kIOMemoryPurgeableKeepCurrent
:
1645 control
= VM_PURGABLE_GET_STATE
;
1648 case kIOMemoryPurgeableNonVolatile
:
1649 state
= VM_PURGABLE_NONVOLATILE
;
1651 case kIOMemoryPurgeableVolatile
:
1652 state
= VM_PURGABLE_VOLATILE
;
1654 case kIOMemoryPurgeableEmpty
:
1655 state
= VM_PURGABLE_EMPTY
;
1658 err
= kIOReturnBadArgument
;
1662 if (kIOReturnSuccess
!= err
)
1665 err
= mach_memory_entry_purgable_control((ipc_port_t
) _memEntry
, control
, &state
);
1669 if (kIOReturnSuccess
== err
)
1673 case VM_PURGABLE_NONVOLATILE
:
1674 state
= kIOMemoryPurgeableNonVolatile
;
1676 case VM_PURGABLE_VOLATILE
:
1677 state
= kIOMemoryPurgeableVolatile
;
1679 case VM_PURGABLE_EMPTY
:
1680 state
= kIOMemoryPurgeableEmpty
;
1683 state
= kIOMemoryPurgeableNonVolatile
;
1684 err
= kIOReturnNotReady
;
1696 extern "C" void dcache_incoherent_io_flush64(addr64_t pa
, unsigned int count
);
1697 extern "C" void dcache_incoherent_io_store64(addr64_t pa
, unsigned int count
);
1699 IOReturn
IOMemoryDescriptor::performOperation( IOOptionBits options
,
1700 IOByteCount offset
, IOByteCount length
)
1702 IOByteCount remaining
;
1703 void (*func
)(addr64_t pa
, unsigned int count
) = 0;
1707 case kIOMemoryIncoherentIOFlush
:
1708 func
= &dcache_incoherent_io_flush64
;
1710 case kIOMemoryIncoherentIOStore
:
1711 func
= &dcache_incoherent_io_store64
;
1716 return (kIOReturnUnsupported
);
1718 remaining
= length
= min(length
, getLength() - offset
);
1720 // (process another target segment?)
1725 dstAddr64
= getPhysicalSegment64(offset
, &dstLen
);
1729 // Clip segment length to remaining
1730 if (dstLen
> remaining
)
1733 (*func
)(dstAddr64
, dstLen
);
1736 remaining
-= dstLen
;
1739 return (remaining
? kIOReturnUnderrun
: kIOReturnSuccess
);
1742 #if defined(__ppc__) || defined(__arm__)
1743 extern vm_offset_t static_memory_end
;
1744 #define io_kernel_static_end static_memory_end
1746 extern vm_offset_t first_avail
;
1747 #define io_kernel_static_end first_avail
1750 static kern_return_t
1751 io_get_kernel_static_upl(
1753 vm_address_t offset
,
1754 vm_size_t
*upl_size
,
1756 upl_page_info_array_t page_list
,
1757 unsigned int *count
,
1758 ppnum_t
*highest_page
)
1760 unsigned int pageCount
, page
;
1762 ppnum_t highestPage
= 0;
1764 pageCount
= atop_32(*upl_size
);
1765 if (pageCount
> *count
)
1770 for (page
= 0; page
< pageCount
; page
++)
1772 phys
= pmap_find_phys(kernel_pmap
, ((addr64_t
)offset
) + ptoa_64(page
));
1775 page_list
[page
].phys_addr
= phys
;
1776 page_list
[page
].pageout
= 0;
1777 page_list
[page
].absent
= 0;
1778 page_list
[page
].dirty
= 0;
1779 page_list
[page
].precious
= 0;
1780 page_list
[page
].device
= 0;
1781 if (phys
> highestPage
)
1785 *highest_page
= highestPage
;
1787 return ((page
>= pageCount
) ? kIOReturnSuccess
: kIOReturnVMError
);
1790 IOReturn
IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection
)
1792 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
1793 IOReturn error
= kIOReturnCannotWire
;
1795 ppnum_t mapBase
= 0;
1797 ipc_port_t sharedMem
= (ipc_port_t
) _memEntry
;
1799 assert(!_wireCount
);
1800 assert(kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
|| kIOMemoryTypeUIO
== type
);
1802 if (_pages
>= gIOMaximumMappedIOPageCount
)
1803 return kIOReturnNoResources
;
1805 dataP
= getDataP(_memoryEntries
);
1806 mapper
= dataP
->fMapper
;
1807 if (mapper
&& _pages
)
1808 mapBase
= mapper
->iovmAlloc(_pages
);
1810 // Note that appendBytes(NULL) zeros the data up to the
1812 _memoryEntries
->appendBytes(0, dataP
->fPageCnt
* sizeof(upl_page_info_t
));
1813 dataP
= 0; // May no longer be valid so lets not get tempted.
1815 if (forDirection
== kIODirectionNone
)
1816 forDirection
= _direction
;
1818 int uplFlags
; // This Mem Desc's default flags for upl creation
1819 switch (kIODirectionOutIn
& forDirection
)
1821 case kIODirectionOut
:
1822 // Pages do not need to be marked as dirty on commit
1823 uplFlags
= UPL_COPYOUT_FROM
;
1824 _flags
|= kIOMemoryPreparedReadOnly
;
1827 case kIODirectionIn
:
1829 uplFlags
= 0; // i.e. ~UPL_COPYOUT_FROM
1832 uplFlags
|= UPL_SET_IO_WIRE
| UPL_SET_LITE
;
1834 #ifdef UPL_NEED_32BIT_ADDR
1835 if (kIODirectionPrepareToPhys32
& forDirection
)
1836 uplFlags
|= UPL_NEED_32BIT_ADDR
;
1839 // Find the appropriate vm_map for the given task
1841 if (_task
== kernel_task
&& (kIOMemoryBufferPageable
& _flags
))
1844 { curMap
= get_task_map(_task
); }
1846 // Iterate over the vector of virtual ranges
1847 Ranges vec
= _ranges
;
1848 unsigned int pageIndex
= 0;
1849 IOByteCount mdOffset
= 0;
1850 ppnum_t highestPage
= 0;
1851 for (UInt range
= 0; range
< _rangesCount
; range
++) {
1853 user_addr_t startPage
;
1854 IOByteCount numBytes
;
1855 ppnum_t highPage
= 0;
1857 // Get the startPage address and length of vec[range]
1858 getAddrLenForInd(startPage
, numBytes
, type
, vec
, range
);
1859 iopl
.fPageOffset
= (short) startPage
& PAGE_MASK
;
1860 numBytes
+= iopl
.fPageOffset
;
1861 startPage
= trunc_page_64(startPage
);
1864 iopl
.fMappedBase
= mapBase
+ pageIndex
;
1866 iopl
.fMappedBase
= 0;
1868 // Iterate over the current range, creating UPLs
1870 dataP
= getDataP(_memoryEntries
);
1871 vm_address_t kernelStart
= (vm_address_t
) startPage
;
1875 else if (!sharedMem
) {
1876 assert(_task
== kernel_task
);
1877 theMap
= IOPageableMapForAddress(kernelStart
);
1882 upl_page_info_array_t pageInfo
= getPageList(dataP
);
1883 int ioplFlags
= uplFlags
;
1884 upl_page_list_ptr_t baseInfo
= &pageInfo
[pageIndex
];
1886 vm_size_t ioplSize
= round_page_32(numBytes
);
1887 unsigned int numPageInfo
= atop_32(ioplSize
);
1889 if (theMap
== kernel_map
&& kernelStart
< io_kernel_static_end
) {
1890 error
= io_get_kernel_static_upl(theMap
,
1898 else if (sharedMem
) {
1899 error
= memory_object_iopl_request(sharedMem
,
1909 error
= vm_map_create_upl(theMap
,
1919 if (error
!= KERN_SUCCESS
)
1923 highPage
= upl_get_highest_page(iopl
.fIOPL
);
1924 if (highPage
> highestPage
)
1925 highestPage
= highPage
;
1927 error
= kIOReturnCannotWire
;
1929 if (baseInfo
->device
) {
1931 iopl
.fFlags
= kIOPLOnDevice
;
1932 // Don't translate device memory at all
1933 if (mapper
&& mapBase
) {
1934 mapper
->iovmFree(mapBase
, _pages
);
1936 iopl
.fMappedBase
= 0;
1942 mapper
->iovmInsert(mapBase
, pageIndex
,
1943 baseInfo
, numPageInfo
);
1946 iopl
.fIOMDOffset
= mdOffset
;
1947 iopl
.fPageInfo
= pageIndex
;
1949 if ((_flags
& kIOMemoryAutoPrepare
) && iopl
.fIOPL
)
1951 upl_commit(iopl
.fIOPL
, 0, 0);
1952 upl_deallocate(iopl
.fIOPL
);
1956 if (!_memoryEntries
->appendBytes(&iopl
, sizeof(iopl
))) {
1957 // Clean up partial created and unsaved iopl
1959 upl_abort(iopl
.fIOPL
, 0);
1960 upl_deallocate(iopl
.fIOPL
);
1965 // Check for a multiple iopl's in one virtual range
1966 pageIndex
+= numPageInfo
;
1967 mdOffset
-= iopl
.fPageOffset
;
1968 if (ioplSize
< numBytes
) {
1969 numBytes
-= ioplSize
;
1970 startPage
+= ioplSize
;
1971 mdOffset
+= ioplSize
;
1972 iopl
.fPageOffset
= 0;
1974 iopl
.fMappedBase
= mapBase
+ pageIndex
;
1977 mdOffset
+= numBytes
;
1983 _highestPage
= highestPage
;
1985 return kIOReturnSuccess
;
1989 dataP
= getDataP(_memoryEntries
);
1990 UInt done
= getNumIOPL(_memoryEntries
, dataP
);
1991 ioPLBlock
*ioplList
= getIOPLList(dataP
);
1993 for (UInt range
= 0; range
< done
; range
++)
1995 if (ioplList
[range
].fIOPL
) {
1996 upl_abort(ioplList
[range
].fIOPL
, 0);
1997 upl_deallocate(ioplList
[range
].fIOPL
);
2000 (void) _memoryEntries
->initWithBytes(dataP
, sizeof(ioGMDData
)); // == setLength()
2002 if (mapper
&& mapBase
)
2003 mapper
->iovmFree(mapBase
, _pages
);
2006 if (error
== KERN_FAILURE
)
2007 error
= kIOReturnCannotWire
;
2015 * Prepare the memory for an I/O transfer. This involves paging in
2016 * the memory, if necessary, and wiring it down for the duration of
2017 * the transfer. The complete() method completes the processing of
2018 * the memory after the I/O transfer finishes. This method needn't
2019 * called for non-pageable memory.
2021 IOReturn
IOGeneralMemoryDescriptor::prepare(IODirection forDirection
)
2023 IOReturn error
= kIOReturnSuccess
;
2024 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
2026 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
))
2027 return kIOReturnSuccess
;
2030 IOLockLock(_prepareLock
);
2033 && (kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
|| kIOMemoryTypeUIO
== type
) ) {
2034 error
= wireVirtual(forDirection
);
2037 if (kIOReturnSuccess
== error
)
2041 IOLockUnlock(_prepareLock
);
2049 * Complete processing of the memory after an I/O transfer finishes.
2050 * This method should not be called unless a prepare was previously
2051 * issued; the prepare() and complete() must occur in pairs, before
2052 * before and after an I/O transfer involving pageable memory.
2055 IOReturn
IOGeneralMemoryDescriptor::complete(IODirection
/* forDirection */)
2057 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
2059 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
))
2060 return kIOReturnSuccess
;
2063 IOLockLock(_prepareLock
);
2072 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
2073 ioGMDData
* dataP
= getDataP(_memoryEntries
);
2074 ioPLBlock
*ioplList
= getIOPLList(dataP
);
2075 UInt count
= getNumIOPL(_memoryEntries
, dataP
);
2077 if (dataP
->fMapper
&& _pages
&& ioplList
[0].fMappedBase
)
2078 dataP
->fMapper
->iovmFree(ioplList
[0].fMappedBase
, _pages
);
2080 // Only complete iopls that we created which are for TypeVirtual
2081 if (kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
|| kIOMemoryTypeUIO
== type
) {
2082 for (UInt ind
= 0; ind
< count
; ind
++)
2083 if (ioplList
[ind
].fIOPL
) {
2084 upl_commit(ioplList
[ind
].fIOPL
, 0, 0);
2085 upl_deallocate(ioplList
[ind
].fIOPL
);
2088 (void) _memoryEntries
->initWithBytes(dataP
, sizeof(ioGMDData
)); // == setLength()
2093 IOLockUnlock(_prepareLock
);
2095 return kIOReturnSuccess
;
2098 IOReturn
IOGeneralMemoryDescriptor::doMap(
2099 vm_map_t __addressMap
,
2100 IOVirtualAddress
* __address
,
2101 IOOptionBits options
,
2102 IOByteCount __offset
,
2103 IOByteCount __length
)
2106 if (!(kIOMap64Bit
& options
)) panic("IOGeneralMemoryDescriptor::doMap !64bit");
2108 _IOMemoryMap
* mapping
= (_IOMemoryMap
*) *__address
;
2109 mach_vm_size_t offset
= mapping
->fOffset
+ __offset
;
2110 mach_vm_size_t length
= mapping
->fLength
;
2113 ipc_port_t sharedMem
= (ipc_port_t
) _memEntry
;
2115 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
2116 Ranges vec
= _ranges
;
2118 user_addr_t range0Addr
= 0;
2119 IOByteCount range0Len
= 0;
2122 getAddrLenForInd(range0Addr
, range0Len
, type
, vec
, 0);
2124 // mapping source == dest? (could be much better)
2126 && (mapping
->fAddressMap
== get_task_map(_task
)) && (options
& kIOMapAnywhere
)
2127 && (1 == _rangesCount
) && (0 == offset
)
2128 && range0Addr
&& (length
<= range0Len
) )
2130 mapping
->fAddress
= range0Addr
;
2131 mapping
->fOptions
|= kIOMapStatic
;
2133 return( kIOReturnSuccess
);
2136 if( 0 == sharedMem
) {
2138 vm_size_t size
= ptoa_32(_pages
);
2142 memory_object_size_t actualSize
= size
;
2143 vm_prot_t prot
= VM_PROT_READ
;
2144 if (!(kIOMapReadOnly
& options
))
2145 prot
|= VM_PROT_WRITE
;
2146 else if (kIOMapDefaultCache
!= (options
& kIOMapCacheMask
))
2147 prot
|= VM_PROT_WRITE
;
2149 kr
= mach_make_memory_entry_64(get_task_map(_task
),
2150 &actualSize
, range0Addr
,
2154 if( (KERN_SUCCESS
== kr
) && (actualSize
!= round_page_32(size
))) {
2156 IOLog("mach_make_memory_entry_64 (%08llx) size (%08lx:%08x)\n",
2157 range0Addr
, (UInt32
) actualSize
, size
);
2159 kr
= kIOReturnVMError
;
2160 ipc_port_release_send( sharedMem
);
2163 if( KERN_SUCCESS
!= kr
)
2164 sharedMem
= MACH_PORT_NULL
;
2166 } else do { // _task == 0, must be physical
2168 memory_object_t pager
;
2169 unsigned int flags
= 0;
2171 IOPhysicalLength segLen
;
2173 pa
= getPhysicalSegment64( offset
, &segLen
);
2176 reserved
= IONew( ExpansionData
, 1 );
2180 reserved
->pagerContig
= (1 == _rangesCount
);
2181 reserved
->memory
= this;
2183 /*What cache mode do we need*/
2184 switch(options
& kIOMapCacheMask
) {
2186 case kIOMapDefaultCache
:
2188 flags
= IODefaultCacheBits(pa
);
2189 if (DEVICE_PAGER_CACHE_INHIB
& flags
)
2191 if (DEVICE_PAGER_GUARDED
& flags
)
2192 mapping
->fOptions
|= kIOMapInhibitCache
;
2194 mapping
->fOptions
|= kIOMapWriteCombineCache
;
2196 else if (DEVICE_PAGER_WRITE_THROUGH
& flags
)
2197 mapping
->fOptions
|= kIOMapWriteThruCache
;
2199 mapping
->fOptions
|= kIOMapCopybackCache
;
2202 case kIOMapInhibitCache
:
2203 flags
= DEVICE_PAGER_CACHE_INHIB
|
2204 DEVICE_PAGER_COHERENT
| DEVICE_PAGER_GUARDED
;
2207 case kIOMapWriteThruCache
:
2208 flags
= DEVICE_PAGER_WRITE_THROUGH
|
2209 DEVICE_PAGER_COHERENT
| DEVICE_PAGER_GUARDED
;
2212 case kIOMapCopybackCache
:
2213 flags
= DEVICE_PAGER_COHERENT
;
2216 case kIOMapWriteCombineCache
:
2217 flags
= DEVICE_PAGER_CACHE_INHIB
|
2218 DEVICE_PAGER_COHERENT
;
2222 flags
|= reserved
->pagerContig
? DEVICE_PAGER_CONTIGUOUS
: 0;
2224 pager
= device_pager_setup( (memory_object_t
) 0, (int) reserved
,
2229 kr
= mach_memory_object_memory_entry_64( (host_t
) 1, false /*internal*/,
2230 size
, VM_PROT_READ
| VM_PROT_WRITE
, pager
, &sharedMem
);
2232 assert( KERN_SUCCESS
== kr
);
2233 if( KERN_SUCCESS
!= kr
)
2235 device_pager_deallocate( pager
);
2236 pager
= MACH_PORT_NULL
;
2237 sharedMem
= MACH_PORT_NULL
;
2240 if( pager
&& sharedMem
)
2241 reserved
->devicePager
= pager
;
2243 IODelete( reserved
, ExpansionData
, 1 );
2249 _memEntry
= (void *) sharedMem
;
2254 result
= kIOReturnVMError
;
2256 result
= super::doMap( __addressMap
, __address
,
2257 options
, __offset
, __length
);
2262 IOReturn
IOGeneralMemoryDescriptor::doUnmap(
2263 vm_map_t addressMap
,
2264 IOVirtualAddress __address
,
2265 IOByteCount __length
)
2267 return (super::doUnmap(addressMap
, __address
, __length
));
2270 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2272 OSDefineMetaClassAndAbstractStructors( IOMemoryMap
, OSObject
)
2274 /* inline function implementation */
2275 IOPhysicalAddress
IOMemoryMap::getPhysicalAddress()
2276 { return( getPhysicalSegment( 0, 0 )); }
2280 #define super IOMemoryMap
2282 OSDefineMetaClassAndStructors(_IOMemoryMap
, IOMemoryMap
)
2284 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2286 bool _IOMemoryMap::init(
2288 mach_vm_address_t toAddress
,
2289 IOOptionBits _options
,
2290 mach_vm_size_t _offset
,
2291 mach_vm_size_t _length
)
2299 fAddressMap
= get_task_map(intoTask
);
2302 vm_map_reference(fAddressMap
);
2304 fAddressTask
= intoTask
;
2305 fOptions
= _options
;
2308 fAddress
= toAddress
;
2313 bool _IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor
* _memory
, mach_vm_size_t _offset
)
2320 if( (_offset
+ fLength
) > _memory
->getLength())
2328 if (fMemory
!= _memory
)
2329 fMemory
->removeMapping(this);
2337 struct IOMemoryDescriptorMapAllocRef
2339 ipc_port_t sharedMem
;
2340 mach_vm_address_t mapped
;
2341 mach_vm_size_t size
;
2342 mach_vm_size_t sourceOffset
;
2343 IOOptionBits options
;
2346 static kern_return_t
IOMemoryDescriptorMapAlloc(vm_map_t map
, void * _ref
)
2348 IOMemoryDescriptorMapAllocRef
* ref
= (IOMemoryDescriptorMapAllocRef
*)_ref
;
2354 vm_prot_t prot
= VM_PROT_READ
2355 | ((ref
->options
& kIOMapReadOnly
) ? 0 : VM_PROT_WRITE
);
2357 // VM system requires write access to change cache mode
2358 if (kIOMapDefaultCache
!= (ref
->options
& kIOMapCacheMask
))
2359 prot
|= VM_PROT_WRITE
;
2361 // set memory entry cache
2362 vm_prot_t memEntryCacheMode
= prot
| MAP_MEM_ONLY
;
2363 switch (ref
->options
& kIOMapCacheMask
)
2365 case kIOMapInhibitCache
:
2366 SET_MAP_MEM(MAP_MEM_IO
, memEntryCacheMode
);
2369 case kIOMapWriteThruCache
:
2370 SET_MAP_MEM(MAP_MEM_WTHRU
, memEntryCacheMode
);
2373 case kIOMapWriteCombineCache
:
2374 SET_MAP_MEM(MAP_MEM_WCOMB
, memEntryCacheMode
);
2377 case kIOMapCopybackCache
:
2378 SET_MAP_MEM(MAP_MEM_COPYBACK
, memEntryCacheMode
);
2381 case kIOMapDefaultCache
:
2383 SET_MAP_MEM(MAP_MEM_NOOP
, memEntryCacheMode
);
2387 vm_size_t unused
= 0;
2389 err
= mach_make_memory_entry( NULL
/*unused*/, &unused
, 0 /*unused*/,
2390 memEntryCacheMode
, NULL
, ref
->sharedMem
);
2391 if (KERN_SUCCESS
!= err
)
2392 IOLog("MAP_MEM_ONLY failed %d\n", err
);
2394 err
= mach_vm_map( map
,
2396 ref
->size
, 0 /* mask */,
2397 (( ref
->options
& kIOMapAnywhere
) ? VM_FLAGS_ANYWHERE
: VM_FLAGS_FIXED
)
2398 | VM_MAKE_TAG(VM_MEMORY_IOKIT
),
2399 ref
->sharedMem
, ref
->sourceOffset
,
2405 if( KERN_SUCCESS
!= err
) {
2413 err
= mach_vm_allocate( map
, &ref
->mapped
, ref
->size
,
2414 ((ref
->options
& kIOMapAnywhere
) ? VM_FLAGS_ANYWHERE
: VM_FLAGS_FIXED
)
2415 | VM_MAKE_TAG(VM_MEMORY_IOKIT
) );
2416 if( KERN_SUCCESS
!= err
) {
2420 // we have to make sure that these guys don't get copied if we fork.
2421 err
= vm_inherit( map
, ref
->mapped
, ref
->size
, VM_INHERIT_NONE
);
2422 assert( KERN_SUCCESS
== err
);
2431 IOMemoryDescriptorMapMemEntry(vm_map_t map
, ipc_port_t entry
, IOOptionBits options
, bool pageable
,
2432 mach_vm_size_t offset
,
2433 mach_vm_address_t
* address
, mach_vm_size_t length
)
2436 IOMemoryDescriptorMapAllocRef ref
;
2438 ref
.sharedMem
= entry
;
2439 ref
.sourceOffset
= trunc_page_64(offset
);
2440 ref
.options
= options
;
2444 if (options
& kIOMapAnywhere
)
2445 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
2448 ref
.mapped
= *address
;
2450 if( ref
.sharedMem
&& (map
== kernel_map
) && pageable
)
2451 err
= IOIteratePageableMaps( ref
.size
, &IOMemoryDescriptorMapAlloc
, &ref
);
2453 err
= IOMemoryDescriptorMapAlloc( map
, &ref
);
2455 *address
= ref
.mapped
;
2460 IOReturn
IOMemoryDescriptor::doMap(
2461 vm_map_t __addressMap
,
2462 IOVirtualAddress
* __address
,
2463 IOOptionBits options
,
2464 IOByteCount __offset
,
2465 IOByteCount __length
)
2467 if (!(kIOMap64Bit
& options
)) panic("IOMemoryDescriptor::doMap !64bit");
2469 _IOMemoryMap
* mapping
= (_IOMemoryMap
*) *__address
;
2470 mach_vm_size_t offset
= mapping
->fOffset
+ __offset
;
2471 mach_vm_size_t length
= mapping
->fLength
;
2473 IOReturn err
= kIOReturnSuccess
;
2474 memory_object_t pager
;
2475 mach_vm_size_t pageOffset
;
2476 IOPhysicalAddress sourceAddr
;
2480 sourceAddr
= getSourceSegment( offset
, NULL
);
2481 pageOffset
= sourceAddr
- trunc_page_32( sourceAddr
);
2484 pager
= (memory_object_t
) reserved
->devicePager
;
2486 pager
= MACH_PORT_NULL
;
2488 if ((kIOMapReference
|kIOMapUnique
) == ((kIOMapReference
|kIOMapUnique
) & options
))
2496 err
= kIOReturnNotReadable
;
2500 size
= mapping
->fLength
+ pageOffset
;
2501 flags
= UPL_COPYOUT_FROM
| UPL_SET_INTERNAL
2502 | UPL_SET_LITE
| UPL_SET_IO_WIRE
| UPL_BLOCK_ACCESS
;
2504 if (KERN_SUCCESS
!= memory_object_iopl_request((ipc_port_t
) _memEntry
, 0, &size
, &redirUPL2
,
2509 err
= upl_transpose(redirUPL2
, mapping
->fRedirUPL
);
2510 if (kIOReturnSuccess
!= err
)
2512 IOLog("upl_transpose(%x)\n", err
);
2513 err
= kIOReturnSuccess
;
2518 upl_commit(redirUPL2
, NULL
, 0);
2519 upl_deallocate(redirUPL2
);
2523 // swap the memEntries since they now refer to different vm_objects
2524 void * me
= _memEntry
;
2525 _memEntry
= mapping
->fMemory
->_memEntry
;
2526 mapping
->fMemory
->_memEntry
= me
;
2529 err
= handleFault( reserved
->devicePager
, mapping
->fAddressMap
, mapping
->fAddress
, offset
, length
, options
);
2533 mach_vm_address_t address
;
2535 if (!(options
& kIOMapAnywhere
))
2537 address
= trunc_page_64(mapping
->fAddress
);
2538 if( (mapping
->fAddress
- address
) != pageOffset
)
2540 err
= kIOReturnVMError
;
2545 err
= IOMemoryDescriptorMapMemEntry(mapping
->fAddressMap
, (ipc_port_t
) _memEntry
,
2546 options
, (kIOMemoryBufferPageable
& _flags
),
2547 offset
, &address
, round_page_64(length
+ pageOffset
));
2548 if( err
!= KERN_SUCCESS
)
2551 if (!_memEntry
|| pager
)
2553 err
= handleFault( pager
, mapping
->fAddressMap
, address
, offset
, length
, options
);
2554 if (err
!= KERN_SUCCESS
)
2555 doUnmap( mapping
->fAddressMap
, (IOVirtualAddress
) mapping
, 0 );
2559 if (kIOLogMapping
& gIOKitDebug
)
2560 IOLog("mapping(%x) desc %p @ %lx, map %p, address %qx, offset %qx, length %qx\n",
2561 err
, this, sourceAddr
, mapping
, address
, offset
, length
);
2564 if (err
== KERN_SUCCESS
)
2565 mapping
->fAddress
= address
+ pageOffset
;
2567 mapping
->fAddress
= NULL
;
2575 IOReturn
IOMemoryDescriptor::handleFault(
2577 vm_map_t addressMap
,
2578 mach_vm_address_t address
,
2579 mach_vm_size_t sourceOffset
,
2580 mach_vm_size_t length
,
2581 IOOptionBits options
)
2583 IOReturn err
= kIOReturnSuccess
;
2584 memory_object_t pager
= (memory_object_t
) _pager
;
2585 mach_vm_size_t size
;
2586 mach_vm_size_t bytes
;
2587 mach_vm_size_t page
;
2588 mach_vm_size_t pageOffset
;
2589 mach_vm_size_t pagerOffset
;
2590 IOPhysicalLength segLen
;
2595 if( kIOMemoryRedirected
& _flags
)
2598 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset
);
2602 } while( kIOMemoryRedirected
& _flags
);
2605 return( kIOReturnSuccess
);
2608 physAddr
= getPhysicalSegment64( sourceOffset
, &segLen
);
2610 pageOffset
= physAddr
- trunc_page_64( physAddr
);
2611 pagerOffset
= sourceOffset
;
2613 size
= length
+ pageOffset
;
2614 physAddr
-= pageOffset
;
2616 segLen
+= pageOffset
;
2620 // in the middle of the loop only map whole pages
2621 if( segLen
>= bytes
)
2623 else if( segLen
!= trunc_page_32( segLen
))
2624 err
= kIOReturnVMError
;
2625 if( physAddr
!= trunc_page_64( physAddr
))
2626 err
= kIOReturnBadArgument
;
2627 if (kIOReturnSuccess
!= err
)
2631 if( kIOLogMapping
& gIOKitDebug
)
2632 IOLog("_IOMemoryMap::map(%p) 0x%qx->0x%qx:0x%qx\n",
2633 addressMap
, address
+ pageOffset
, physAddr
+ pageOffset
,
2634 segLen
- pageOffset
);
2639 if( reserved
&& reserved
->pagerContig
) {
2640 IOPhysicalLength allLen
;
2643 allPhys
= getPhysicalSegment64( 0, &allLen
);
2645 err
= device_pager_populate_object( pager
, 0, atop_64(allPhys
), round_page_32(allLen
) );
2651 (page
< segLen
) && (KERN_SUCCESS
== err
);
2654 err
= device_pager_populate_object(pager
, pagerOffset
,
2655 (ppnum_t
)(atop_64(physAddr
+ page
)), page_size
);
2656 pagerOffset
+= page_size
;
2659 assert( KERN_SUCCESS
== err
);
2664 // This call to vm_fault causes an early pmap level resolution
2665 // of the mappings created above for kernel mappings, since
2666 // faulting in later can't take place from interrupt level.
2668 /* *** Temporary Workaround *** */
2670 if ((addressMap
== kernel_map
) && !(kIOMemoryRedirected
& _flags
))
2672 vm_fault(addressMap
,
2673 (vm_map_offset_t
)address
,
2674 VM_PROT_READ
|VM_PROT_WRITE
,
2675 FALSE
, THREAD_UNINT
, NULL
,
2676 (vm_map_offset_t
)0);
2679 /* *** Temporary Workaround *** */
2682 sourceOffset
+= segLen
- pageOffset
;
2688 while (bytes
&& (physAddr
= getPhysicalSegment64( sourceOffset
, &segLen
)));
2691 err
= kIOReturnBadArgument
;
2696 IOReturn
IOMemoryDescriptor::doUnmap(
2697 vm_map_t addressMap
,
2698 IOVirtualAddress __address
,
2699 IOByteCount __length
)
2702 mach_vm_address_t address
;
2703 mach_vm_size_t length
;
2707 address
= __address
;
2712 addressMap
= ((_IOMemoryMap
*) __address
)->fAddressMap
;
2713 address
= ((_IOMemoryMap
*) __address
)->fAddress
;
2714 length
= ((_IOMemoryMap
*) __address
)->fLength
;
2717 if( _memEntry
&& (addressMap
== kernel_map
) && (kIOMemoryBufferPageable
& _flags
))
2718 addressMap
= IOPageableMapForAddress( address
);
2721 if( kIOLogMapping
& gIOKitDebug
)
2722 IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
2723 addressMap
, address
, length
);
2726 err
= mach_vm_deallocate( addressMap
, address
, length
);
2731 IOReturn
IOMemoryDescriptor::redirect( task_t safeTask
, bool doRedirect
)
2733 IOReturn err
= kIOReturnSuccess
;
2734 _IOMemoryMap
* mapping
= 0;
2740 _flags
|= kIOMemoryRedirected
;
2742 _flags
&= ~kIOMemoryRedirected
;
2745 if( (iter
= OSCollectionIterator::withCollection( _mappings
))) {
2746 while( (mapping
= (_IOMemoryMap
*) iter
->getNextObject()))
2747 mapping
->redirect( safeTask
, doRedirect
);
2760 // temporary binary compatibility
2761 IOSubMemoryDescriptor
* subMem
;
2762 if( (subMem
= OSDynamicCast( IOSubMemoryDescriptor
, this)))
2763 err
= subMem
->redirect( safeTask
, doRedirect
);
2765 err
= kIOReturnSuccess
;
2770 IOReturn
IOSubMemoryDescriptor::redirect( task_t safeTask
, bool doRedirect
)
2772 return( _parent
->redirect( safeTask
, doRedirect
));
2775 IOReturn
_IOMemoryMap::redirect( task_t safeTask
, bool doRedirect
)
2777 IOReturn err
= kIOReturnSuccess
;
2780 // err = ((_IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
2792 if ((!safeTask
|| (get_task_map(safeTask
) != fAddressMap
))
2793 && (0 == (fOptions
& kIOMapStatic
)))
2795 IOUnmapPages( fAddressMap
, fAddress
, fLength
);
2796 if(!doRedirect
&& safeTask
2797 && (((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
2798 || ((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
)))
2800 IOVirtualAddress iova
= (IOVirtualAddress
) this;
2801 err
= mach_vm_deallocate( fAddressMap
, fAddress
, fLength
);
2802 err
= fMemory
->doMap( fAddressMap
, &iova
,
2803 (fOptions
& ~kIOMapAnywhere
) | kIOMap64Bit
/*| kIOMapReserve*/,
2806 err
= kIOReturnSuccess
;
2808 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect
, this, fAddress
, fLength
, fAddressMap
);
2811 else if (kIOMapWriteCombineCache
== (fOptions
& kIOMapCacheMask
))
2813 IOOptionBits newMode
;
2814 newMode
= (fOptions
& ~kIOMapCacheMask
) | (doRedirect
? kIOMapInhibitCache
: kIOMapWriteCombineCache
);
2815 IOProtectCacheMode(fAddressMap
, fAddress
, fLength
, newMode
);
2822 if ((((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
2823 || ((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
))
2825 && (doRedirect
!= (0 != (fMemory
->_flags
& kIOMemoryRedirected
))))
2826 fMemory
->redirect(safeTask
, doRedirect
);
2831 IOReturn
_IOMemoryMap::unmap( void )
2837 if( fAddress
&& fAddressMap
&& (0 == fSuperMap
) && fMemory
2838 && (0 == (fOptions
& kIOMapStatic
))) {
2840 err
= fMemory
->doUnmap(fAddressMap
, (IOVirtualAddress
) this, 0);
2843 err
= kIOReturnSuccess
;
2847 vm_map_deallocate(fAddressMap
);
2858 void _IOMemoryMap::taskDied( void )
2862 vm_map_deallocate(fAddressMap
);
2870 // Overload the release mechanism. All mappings must be a member
2871 // of a memory descriptors _mappings set. This means that we
2872 // always have 2 references on a mapping. When either of these mappings
2873 // are released we need to free ourselves.
2874 void _IOMemoryMap::taggedRelease(const void *tag
) const
2877 super::taggedRelease(tag
, 2);
2881 void _IOMemoryMap::free()
2888 fMemory
->removeMapping(this);
2893 if (fOwner
&& (fOwner
!= fMemory
))
2896 fOwner
->removeMapping(this);
2901 fSuperMap
->release();
2904 upl_commit(fRedirUPL
, NULL
, 0);
2905 upl_deallocate(fRedirUPL
);
2911 IOByteCount
_IOMemoryMap::getLength()
2916 IOVirtualAddress
_IOMemoryMap::getVirtualAddress()
2919 fSuperMap
->getVirtualAddress();
2920 else if (fAddressMap
&& vm_map_is_64bit(fAddressMap
))
2922 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress
);
2928 mach_vm_address_t
_IOMemoryMap::getAddress()
2933 mach_vm_size_t
_IOMemoryMap::getSize()
2939 task_t
_IOMemoryMap::getAddressTask()
2942 return( fSuperMap
->getAddressTask());
2944 return( fAddressTask
);
2947 IOOptionBits
_IOMemoryMap::getMapOptions()
2952 IOMemoryDescriptor
* _IOMemoryMap::getMemoryDescriptor()
2957 _IOMemoryMap
* _IOMemoryMap::copyCompatible(
2958 _IOMemoryMap
* newMapping
)
2960 task_t task
= newMapping
->getAddressTask();
2961 mach_vm_address_t toAddress
= newMapping
->fAddress
;
2962 IOOptionBits _options
= newMapping
->fOptions
;
2963 mach_vm_size_t _offset
= newMapping
->fOffset
;
2964 mach_vm_size_t _length
= newMapping
->fLength
;
2966 if( (!task
) || (!fAddressMap
) || (fAddressMap
!= get_task_map(task
)))
2968 if( (fOptions
^ _options
) & kIOMapReadOnly
)
2970 if( (kIOMapDefaultCache
!= (_options
& kIOMapCacheMask
))
2971 && ((fOptions
^ _options
) & kIOMapCacheMask
))
2974 if( (0 == (_options
& kIOMapAnywhere
)) && (fAddress
!= toAddress
))
2977 if( _offset
< fOffset
)
2982 if( (_offset
+ _length
) > fLength
)
2986 if( (fLength
== _length
) && (!_offset
))
2988 newMapping
->release();
2993 newMapping
->fSuperMap
= this;
2994 newMapping
->fOffset
= _offset
;
2995 newMapping
->fAddress
= fAddress
+ _offset
;
2998 return( newMapping
);
3002 _IOMemoryMap::getPhysicalSegment( IOByteCount _offset
, IOPhysicalLength
* _length
)
3004 IOPhysicalAddress address
;
3007 address
= fMemory
->getPhysicalSegment( fOffset
+ _offset
, _length
);
3013 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3016 #define super OSObject
3018 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3020 void IOMemoryDescriptor::initialize( void )
3022 if( 0 == gIOMemoryLock
)
3023 gIOMemoryLock
= IORecursiveLockAlloc();
3025 IORegistryEntry::getRegistryRoot()->setProperty(kIOMaximumMappedIOByteCountKey
,
3026 ptoa_64(gIOMaximumMappedIOPageCount
), 64);
3030 mapper
= new IOCopyMapper
;
3033 if (mapper
->init() && mapper
->start(NULL
))
3034 gIOCopyMapper
= (IOCopyMapper
*) mapper
;
3040 gIOLastPage
= IOGetLastPageNumber();
3043 void IOMemoryDescriptor::free( void )
3046 _mappings
->release();
3051 IOMemoryMap
* IOMemoryDescriptor::setMapping(
3053 IOVirtualAddress mapAddress
,
3054 IOOptionBits options
)
3056 return (createMappingInTask( intoTask
, mapAddress
,
3057 options
| kIOMapStatic
,
3061 IOMemoryMap
* IOMemoryDescriptor::map(
3062 IOOptionBits options
)
3064 return (createMappingInTask( kernel_task
, 0,
3065 options
| kIOMapAnywhere
,
3069 IOMemoryMap
* IOMemoryDescriptor::map(
3071 IOVirtualAddress atAddress
,
3072 IOOptionBits options
,
3074 IOByteCount length
)
3076 if ((!(kIOMapAnywhere
& options
)) && vm_map_is_64bit(get_task_map(intoTask
)))
3078 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
3082 return (createMappingInTask(intoTask
, atAddress
,
3083 options
, offset
, length
));
3086 IOMemoryMap
* IOMemoryDescriptor::createMappingInTask(
3088 mach_vm_address_t atAddress
,
3089 IOOptionBits options
,
3090 mach_vm_size_t offset
,
3091 mach_vm_size_t length
)
3093 IOMemoryMap
* result
;
3094 _IOMemoryMap
* mapping
;
3097 length
= getLength();
3099 mapping
= new _IOMemoryMap
;
3102 && !mapping
->init( intoTask
, atAddress
,
3103 options
, offset
, length
)) {
3109 result
= makeMapping(this, intoTask
, (IOVirtualAddress
) mapping
, options
| kIOMap64Bit
, 0, 0);
3115 IOLog("createMappingInTask failed desc %p, addr %qx, options %lx, offset %qx, length %qx\n",
3116 this, atAddress
, options
, offset
, length
);
3122 IOReturn
_IOMemoryMap::redirect(IOMemoryDescriptor
* newBackingMemory
,
3123 IOOptionBits options
,
3126 return (redirect(newBackingMemory
, options
, (mach_vm_size_t
)offset
));
3129 IOReturn
_IOMemoryMap::redirect(IOMemoryDescriptor
* newBackingMemory
,
3130 IOOptionBits options
,
3131 mach_vm_size_t offset
)
3133 IOReturn err
= kIOReturnSuccess
;
3134 IOMemoryDescriptor
* physMem
= 0;
3138 if (fAddress
&& fAddressMap
) do
3140 if (((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
3141 || ((fMemory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
))
3149 vm_size_t size
= fLength
;
3150 int flags
= UPL_COPYOUT_FROM
| UPL_SET_INTERNAL
3151 | UPL_SET_LITE
| UPL_SET_IO_WIRE
| UPL_BLOCK_ACCESS
;
3152 if (KERN_SUCCESS
!= memory_object_iopl_request((ipc_port_t
) fMemory
->_memEntry
, 0, &size
, &fRedirUPL
,
3159 IOUnmapPages( fAddressMap
, fAddress
, fLength
);
3160 physMem
->redirect(0, true);
3164 if (newBackingMemory
)
3166 if (newBackingMemory
!= fMemory
)
3169 if (this != newBackingMemory
->makeMapping(newBackingMemory
, fAddressTask
, (IOVirtualAddress
) this,
3170 options
| kIOMapUnique
| kIOMapReference
| kIOMap64Bit
,
3172 err
= kIOReturnError
;
3176 upl_commit(fRedirUPL
, NULL
, 0);
3177 upl_deallocate(fRedirUPL
);
3181 physMem
->redirect(0, false);
3194 IOMemoryMap
* IOMemoryDescriptor::makeMapping(
3195 IOMemoryDescriptor
* owner
,
3197 IOVirtualAddress __address
,
3198 IOOptionBits options
,
3199 IOByteCount __offset
,
3200 IOByteCount __length
)
3202 if (!(kIOMap64Bit
& options
)) panic("IOMemoryDescriptor::makeMapping !64bit");
3204 IOMemoryDescriptor
* mapDesc
= 0;
3205 _IOMemoryMap
* result
= 0;
3208 _IOMemoryMap
* mapping
= (_IOMemoryMap
*) __address
;
3209 mach_vm_size_t offset
= mapping
->fOffset
+ __offset
;
3210 mach_vm_size_t length
= mapping
->fLength
;
3212 mapping
->fOffset
= offset
;
3218 if (kIOMapStatic
& options
)
3221 addMapping(mapping
);
3222 mapping
->setMemoryDescriptor(this, 0);
3226 if (kIOMapUnique
& options
)
3228 IOPhysicalAddress phys
;
3229 IOByteCount physLen
;
3231 // if (owner != this) continue;
3233 if (((_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
3234 || ((_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
))
3236 phys
= getPhysicalSegment(offset
, &physLen
);
3237 if (!phys
|| (physLen
< length
))
3240 mapDesc
= IOMemoryDescriptor::withPhysicalAddress(
3241 phys
, length
, _direction
);
3245 mapping
->fOffset
= offset
;
3250 // look for a compatible existing mapping
3251 if( (iter
= OSCollectionIterator::withCollection(_mappings
)))
3253 _IOMemoryMap
* lookMapping
;
3254 while ((lookMapping
= (_IOMemoryMap
*) iter
->getNextObject()))
3256 if ((result
= lookMapping
->copyCompatible(mapping
)))
3259 result
->setMemoryDescriptor(this, offset
);
3265 if (result
|| (options
& kIOMapReference
))
3275 kr
= mapDesc
->doMap( 0, (IOVirtualAddress
*) &mapping
, options
, 0, 0 );
3276 if (kIOReturnSuccess
== kr
)
3279 mapDesc
->addMapping(result
);
3280 result
->setMemoryDescriptor(mapDesc
, offset
);
3298 void IOMemoryDescriptor::addMapping(
3299 IOMemoryMap
* mapping
)
3304 _mappings
= OSSet::withCapacity(1);
3306 _mappings
->setObject( mapping
);
3310 void IOMemoryDescriptor::removeMapping(
3311 IOMemoryMap
* mapping
)
3314 _mappings
->removeObject( mapping
);
3317 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3320 #define super IOMemoryDescriptor
3322 OSDefineMetaClassAndStructors(IOSubMemoryDescriptor
, IOMemoryDescriptor
)
3324 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3326 bool IOSubMemoryDescriptor::initSubRange( IOMemoryDescriptor
* parent
,
3327 IOByteCount offset
, IOByteCount length
,
3328 IODirection direction
)
3333 if( (offset
+ length
) > parent
->getLength())
3337 * We can check the _parent instance variable before having ever set it
3338 * to an initial value because I/O Kit guarantees that all our instance
3339 * variables are zeroed on an object's allocation.
3347 * An existing memory descriptor is being retargeted to
3348 * point to somewhere else. Clean up our present state.
3359 _direction
= direction
;
3360 _tag
= parent
->getTag();
3365 void IOSubMemoryDescriptor::free( void )
3375 IOSubMemoryDescriptor::dmaCommandOperation(DMACommandOps op
, void *vData
, UInt dataSize
) const
3379 if (kIOMDGetCharacteristics
== op
) {
3381 rtn
= _parent
->dmaCommandOperation(op
, vData
, dataSize
);
3382 if (kIOReturnSuccess
== rtn
) {
3383 IOMDDMACharacteristics
*data
= (IOMDDMACharacteristics
*) vData
;
3384 data
->fLength
= _length
;
3385 data
->fSGCount
= 0; // XXX gvdl: need to compute and pages
3387 data
->fPageAlign
= 0;
3392 else if (kIOMDWalkSegments
& op
) {
3393 if (dataSize
< sizeof(IOMDDMAWalkSegmentArgs
))
3394 return kIOReturnUnderrun
;
3396 IOMDDMAWalkSegmentArgs
*data
=
3397 reinterpret_cast<IOMDDMAWalkSegmentArgs
*>(vData
);
3398 UInt offset
= data
->fOffset
;
3399 UInt remain
= _length
- offset
;
3400 if ((int) remain
<= 0)
3401 return (!remain
)? kIOReturnOverrun
: kIOReturnInternalError
;
3403 data
->fOffset
= offset
+ _start
;
3404 rtn
= _parent
->dmaCommandOperation(op
, vData
, dataSize
);
3405 if (data
->fLength
> remain
)
3406 data
->fLength
= remain
;
3407 data
->fOffset
= offset
;
3412 return kIOReturnBadArgument
;
3416 IOSubMemoryDescriptor::getPhysicalSegment64(IOByteCount offset
, IOByteCount
* length
)
3419 IOByteCount actualLength
;
3421 assert(offset
<= _length
);
3426 if( offset
>= _length
)
3429 address
= _parent
->getPhysicalSegment64( offset
+ _start
, &actualLength
);
3431 if( address
&& length
)
3432 *length
= min( _length
- offset
, actualLength
);
3438 IOSubMemoryDescriptor::getPhysicalSegment( IOByteCount offset
, IOByteCount
* length
)
3440 IOPhysicalAddress address
;
3441 IOByteCount actualLength
;
3443 assert(offset
<= _length
);
3448 if( offset
>= _length
)
3451 address
= _parent
->getPhysicalSegment( offset
+ _start
, &actualLength
);
3453 if( address
&& length
)
3454 *length
= min( _length
- offset
, actualLength
);
3460 IOSubMemoryDescriptor::getSourceSegment( IOByteCount offset
, IOByteCount
* length
)
3462 IOPhysicalAddress address
;
3463 IOByteCount actualLength
;
3465 assert(offset
<= _length
);
3470 if( offset
>= _length
)
3473 address
= _parent
->getSourceSegment( offset
+ _start
, &actualLength
);
3475 if( address
&& length
)
3476 *length
= min( _length
- offset
, actualLength
);
3481 void * IOSubMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
3482 IOByteCount
* lengthOfSegment
)
3487 IOReturn
IOSubMemoryDescriptor::doMap(
3488 vm_map_t addressMap
,
3489 IOVirtualAddress
* atAddress
,
3490 IOOptionBits options
,
3491 IOByteCount sourceOffset
,
3492 IOByteCount length
)
3494 panic("IOSubMemoryDescriptor::doMap");
3495 return (IOMemoryDescriptor::doMap(addressMap
, atAddress
, options
, sourceOffset
, length
));
3498 IOByteCount
IOSubMemoryDescriptor::readBytes(IOByteCount offset
,
3499 void * bytes
, IOByteCount length
)
3501 IOByteCount byteCount
;
3503 assert(offset
<= _length
);
3505 if( offset
>= _length
)
3509 byteCount
= _parent
->readBytes( _start
+ offset
, bytes
,
3510 min(length
, _length
- offset
) );
3513 return( byteCount
);
3516 IOByteCount
IOSubMemoryDescriptor::writeBytes(IOByteCount offset
,
3517 const void* bytes
, IOByteCount length
)
3519 IOByteCount byteCount
;
3521 assert(offset
<= _length
);
3523 if( offset
>= _length
)
3527 byteCount
= _parent
->writeBytes( _start
+ offset
, bytes
,
3528 min(length
, _length
- offset
) );
3531 return( byteCount
);
3534 IOReturn
IOSubMemoryDescriptor::setPurgeable( IOOptionBits newState
,
3535 IOOptionBits
* oldState
)
3540 err
= _parent
->setPurgeable( newState
, oldState
);
3546 IOReturn
IOSubMemoryDescriptor::performOperation( IOOptionBits options
,
3547 IOByteCount offset
, IOByteCount length
)
3551 assert(offset
<= _length
);
3553 if( offset
>= _length
)
3554 return( kIOReturnOverrun
);
3557 err
= _parent
->performOperation( options
, _start
+ offset
,
3558 min(length
, _length
- offset
) );
3564 IOReturn
IOSubMemoryDescriptor::prepare(
3565 IODirection forDirection
)
3570 err
= _parent
->prepare( forDirection
);
3576 IOReturn
IOSubMemoryDescriptor::complete(
3577 IODirection forDirection
)
3582 err
= _parent
->complete( forDirection
);
3588 IOMemoryMap
* IOSubMemoryDescriptor::makeMapping(
3589 IOMemoryDescriptor
* owner
,
3591 IOVirtualAddress address
,
3592 IOOptionBits options
,
3594 IOByteCount length
)
3596 IOMemoryMap
* mapping
= 0;
3598 if (!(kIOMap64Bit
& options
))
3600 panic("IOSubMemoryDescriptor::makeMapping !64bit");
3603 mapping
= (IOMemoryMap
*) _parent
->makeMapping(
3607 options
, _start
+ offset
, length
);
3615 IOSubMemoryDescriptor::initWithAddress(void * address
,
3617 IODirection direction
)
3623 IOSubMemoryDescriptor::initWithAddress(vm_address_t address
,
3625 IODirection direction
,
3632 IOSubMemoryDescriptor::initWithPhysicalAddress(
3633 IOPhysicalAddress address
,
3635 IODirection direction
)
3641 IOSubMemoryDescriptor::initWithRanges(
3642 IOVirtualRange
* ranges
,
3644 IODirection direction
,
3652 IOSubMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange
* ranges
,
3654 IODirection direction
,
3660 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3662 bool IOGeneralMemoryDescriptor::serialize(OSSerialize
* s
) const
3664 OSSymbol
const *keys
[2];
3665 OSObject
*values
[2];
3667 user_addr_t address
;
3670 unsigned int index
, nRanges
;
3673 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
3675 if (s
== NULL
) return false;
3676 if (s
->previouslySerialized(this)) return true;
3678 // Pretend we are an array.
3679 if (!s
->addXMLStartTag(this, "array")) return false;
3681 nRanges
= _rangesCount
;
3682 vcopy
= (SerData
*) IOMalloc(sizeof(SerData
) * nRanges
);
3683 if (vcopy
== 0) return false;
3685 keys
[0] = OSSymbol::withCString("address");
3686 keys
[1] = OSSymbol::withCString("length");
3689 values
[0] = values
[1] = 0;
3691 // From this point on we can go to bail.
3693 // Copy the volatile data so we don't have to allocate memory
3694 // while the lock is held.
3696 if (nRanges
== _rangesCount
) {
3697 Ranges vec
= _ranges
;
3698 for (index
= 0; index
< nRanges
; index
++) {
3699 user_addr_t addr
; IOByteCount len
;
3700 getAddrLenForInd(addr
, len
, type
, vec
, index
);
3701 vcopy
[index
].address
= addr
;
3702 vcopy
[index
].length
= len
;
3705 // The descriptor changed out from under us. Give up.
3712 for (index
= 0; index
< nRanges
; index
++)
3714 user_addr_t addr
= vcopy
[index
].address
;
3715 IOByteCount len
= (IOByteCount
) vcopy
[index
].length
;
3717 OSNumber::withNumber(addr
, (((UInt64
) addr
) >> 32)? 64 : 32);
3718 if (values
[0] == 0) {
3722 values
[1] = OSNumber::withNumber(len
, sizeof(len
) * 8);
3723 if (values
[1] == 0) {
3727 OSDictionary
*dict
= OSDictionary::withObjects((const OSObject
**)values
, (const OSSymbol
**)keys
, 2);
3732 values
[0]->release();
3733 values
[1]->release();
3734 values
[0] = values
[1] = 0;
3736 result
= dict
->serialize(s
);
3742 result
= s
->addXMLEndTag("array");
3746 values
[0]->release();
3748 values
[1]->release();
3754 IOFree(vcopy
, sizeof(SerData
) * nRanges
);
3758 bool IOSubMemoryDescriptor::serialize(OSSerialize
* s
) const
3763 if (s
->previouslySerialized(this)) return true;
3765 // Pretend we are a dictionary.
3766 // We must duplicate the functionality of OSDictionary here
3767 // because otherwise object references will not work;
3768 // they are based on the value of the object passed to
3769 // previouslySerialized and addXMLStartTag.
3771 if (!s
->addXMLStartTag(this, "dict")) return false;
3773 char const *keys
[3] = {"offset", "length", "parent"};
3775 OSObject
*values
[3];
3776 values
[0] = OSNumber::withNumber(_start
, sizeof(_start
) * 8);
3779 values
[1] = OSNumber::withNumber(_length
, sizeof(_length
) * 8);
3780 if (values
[1] == 0) {
3781 values
[0]->release();
3784 values
[2] = _parent
;
3787 for (int i
=0; i
<3; i
++) {
3788 if (!s
->addString("<key>") ||
3789 !s
->addString(keys
[i
]) ||
3790 !s
->addXMLEndTag("key") ||
3791 !values
[i
]->serialize(s
)) {
3796 values
[0]->release();
3797 values
[1]->release();
3802 return s
->addXMLEndTag("dict");
3805 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3807 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 0);
3808 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 1);
3809 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 2);
3810 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 3);
3811 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 4);
3812 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 5);
3813 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 6);
3814 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 7);
3815 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 8);
3816 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 9);
3817 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 10);
3818 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 11);
3819 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 12);
3820 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 13);
3821 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 14);
3822 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 15);
3824 /* ex-inline function implementation */
3826 IOMemoryDescriptor::getPhysicalAddress()
3827 { return( getPhysicalSegment( 0, 0 )); }