2 * Copyright (c) 1998-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
34 // 45678901234567890123456789012345678901234567890123456789012345678901234567890
35 #include <sys/cdefs.h>
37 #include <IOKit/assert.h>
38 #include <IOKit/system.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOMemoryDescriptor.h>
41 #include <IOKit/IOMapper.h>
42 #include <IOKit/IOKitKeysPrivate.h>
44 #include <IOKit/IOKitDebug.h>
46 #include "IOKitKernelInternal.h"
47 #include "IOCopyMapper.h"
49 #include <libkern/c++/OSContainers.h>
50 #include <libkern/c++/OSDictionary.h>
51 #include <libkern/c++/OSArray.h>
52 #include <libkern/c++/OSSymbol.h>
53 #include <libkern/c++/OSNumber.h>
59 #include <vm/vm_pageout.h>
60 #include <vm/vm_shared_memory_server.h>
61 #include <mach/memory_object_types.h>
62 #include <device/device_port.h>
64 #include <mach/vm_prot.h>
65 #include <vm/vm_fault.h>
67 extern ppnum_t
pmap_find_phys(pmap_t pmap
, addr64_t va
);
68 void ipc_port_release_send(ipc_port_t port
);
70 /* Copy between a physical page and a virtual address in the given vm_map */
71 kern_return_t
copypv(addr64_t source
, addr64_t sink
, unsigned int size
, int which
);
75 memory_object_t pager
,
80 device_pager_deallocate(
83 device_pager_populate_object(
84 memory_object_t pager
,
85 vm_object_offset_t offset
,
89 memory_object_iopl_request(
91 memory_object_offset_t offset
,
94 upl_page_info_array_t user_page_list
,
95 unsigned int *page_list_count
,
98 unsigned int IOTranslateCacheBits(struct phys_entry
*pp
);
102 #define kIOMaximumMappedIOByteCount (512*1024*1024)
104 static IOMapper
* gIOSystemMapper
= NULL
;
106 IOCopyMapper
* gIOCopyMapper
= NULL
;
108 static ppnum_t gIOMaximumMappedIOPageCount
= atop_32(kIOMaximumMappedIOByteCount
);
112 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
114 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor
, OSObject
)
116 #define super IOMemoryDescriptor
118 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor
, IOMemoryDescriptor
)
120 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
122 static IORecursiveLock
* gIOMemoryLock
;
124 #define LOCK IORecursiveLockLock( gIOMemoryLock)
125 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
126 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
128 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
131 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
133 #define DEBG(fmt, args...) {}
136 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
138 class _IOMemoryMap
: public IOMemoryMap
140 OSDeclareDefaultStructors(_IOMemoryMap
)
142 IOMemoryDescriptor
* memory
;
143 IOMemoryMap
* superMap
;
146 IOVirtualAddress logical
;
149 IOOptionBits options
;
151 ipc_port_t redirEntry
;
152 IOMemoryDescriptor
* owner
;
155 virtual void taggedRelease(const void *tag
= 0) const;
160 // IOMemoryMap methods
161 virtual IOVirtualAddress
getVirtualAddress();
162 virtual IOByteCount
getLength();
163 virtual task_t
getAddressTask();
164 virtual IOMemoryDescriptor
* getMemoryDescriptor();
165 virtual IOOptionBits
getMapOptions();
167 virtual IOReturn
unmap();
168 virtual void taskDied();
170 virtual IOReturn
redirect(IOMemoryDescriptor
* newBackingMemory
,
171 IOOptionBits options
,
172 IOByteCount offset
= 0);
174 virtual IOPhysicalAddress
getPhysicalSegment(IOByteCount offset
,
175 IOByteCount
* length
);
177 // for IOMemoryDescriptor use
178 _IOMemoryMap
* copyCompatible(
179 IOMemoryDescriptor
* owner
,
181 IOVirtualAddress toAddress
,
182 IOOptionBits options
,
184 IOByteCount length
);
187 IOMemoryDescriptor
* memory
,
188 IOMemoryMap
* superMap
,
190 IOByteCount length
);
192 bool initWithDescriptor(
193 IOMemoryDescriptor
* memory
,
195 IOVirtualAddress toAddress
,
196 IOOptionBits options
,
198 IOByteCount length
);
201 task_t intoTask
, bool redirect
);
204 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
206 // Some data structures and accessor macros used by the initWithOptions
209 enum ioPLBlockFlags
{
210 kIOPLOnDevice
= 0x00000001,
211 kIOPLExternUPL
= 0x00000002,
214 struct typePersMDData
216 const IOGeneralMemoryDescriptor
*fMD
;
217 ipc_port_t fMemEntry
;
222 vm_address_t fIOMDOffset
; // The offset of this iopl in descriptor
223 vm_offset_t fPageInfo
; // Pointer to page list or index into it
224 ppnum_t fMappedBase
; // Page number of first page in this iopl
225 unsigned int fPageOffset
; // Offset within first page of iopl
226 unsigned int fFlags
; // Flags
231 unsigned int fPageCnt
;
232 upl_page_info_t fPageList
[];
236 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
237 #define getIOPLList(d) ((ioPLBlock *) &(d->fPageList[d->fPageCnt]))
238 #define getNumIOPL(osd, d) \
239 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
240 #define getPageList(d) (&(d->fPageList[0]))
241 #define computeDataSize(p, u) \
242 (sizeof(ioGMDData) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
245 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
247 #define next_page(a) ( trunc_page_32(a) + PAGE_SIZE )
252 kern_return_t
device_data_action(
254 ipc_port_t device_pager
,
255 vm_prot_t protection
,
256 vm_object_offset_t offset
,
259 struct ExpansionData
{
261 unsigned int pagerContig
:1;
262 unsigned int unused
:31;
263 IOMemoryDescriptor
* memory
;
266 ExpansionData
* ref
= (ExpansionData
*) device_handle
;
267 IOMemoryDescriptor
* memDesc
;
270 memDesc
= ref
->memory
;
274 kr
= memDesc
->handleFault( device_pager
, 0, 0,
275 offset
, size
, kIOMapDefaultCache
/*?*/);
285 kern_return_t
device_close(
288 struct ExpansionData
{
290 unsigned int pagerContig
:1;
291 unsigned int unused
:31;
292 IOMemoryDescriptor
* memory
;
294 ExpansionData
* ref
= (ExpansionData
*) device_handle
;
296 IODelete( ref
, ExpansionData
, 1 );
298 return( kIOReturnSuccess
);
302 // Note this inline function uses C++ reference arguments to return values
303 // This means that pointers are not passed and NULLs don't have to be
304 // checked for as a NULL reference is illegal.
306 getAddrLenForInd(addr64_t
&addr
, IOPhysicalLength
&len
, // Output variables
307 UInt32 type
, IOGeneralMemoryDescriptor::Ranges r
, UInt32 ind
)
309 assert(kIOMemoryTypeUIO
== type
310 || kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
311 || kIOMemoryTypePhysical
== type
|| kIOMemoryTypePhysical64
== type
);
312 if (kIOMemoryTypeUIO
== type
) {
314 uio_getiov((uio_t
) r
.uio
, ind
, &addr
, &us
); len
= us
;
316 else if ((kIOMemoryTypeVirtual64
== type
) || (kIOMemoryTypePhysical64
== type
)) {
317 IOAddressRange cur
= r
.v64
[ind
];
322 IOVirtualRange cur
= r
.v
[ind
];
328 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
333 * Create a new IOMemoryDescriptor. The buffer is a virtual address
334 * relative to the specified task. If no task is supplied, the kernel
338 IOMemoryDescriptor::withAddress(void * address
,
340 IODirection direction
)
342 return IOMemoryDescriptor::
343 withAddress((vm_address_t
) address
, length
, direction
, kernel_task
);
347 IOMemoryDescriptor::withAddress(vm_address_t address
,
349 IODirection direction
,
355 IOOptionBits options
= (IOOptionBits
) direction
;
356 if (task
== kernel_task
)
357 options
|= kIOMemoryAutoPrepare
;
358 return (IOMemoryDescriptor::withAddressRange(address
, length
, options
, task
));
361 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
364 if (that
->initWithAddress(address
, length
, direction
, task
))
373 IOMemoryDescriptor::withPhysicalAddress(
374 IOPhysicalAddress address
,
376 IODirection direction
)
379 return (IOMemoryDescriptor::withAddressRange(address
, length
, (IOOptionBits
) direction
, NULL
));
381 IOGeneralMemoryDescriptor
*self
= new IOGeneralMemoryDescriptor
;
383 && !self
->initWithPhysicalAddress(address
, length
, direction
)) {
392 IOMemoryDescriptor::withRanges( IOVirtualRange
* ranges
,
394 IODirection direction
,
398 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
401 if (that
->initWithRanges(ranges
, withCount
, direction
, task
, asReference
))
410 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address
,
411 mach_vm_size_t length
,
412 IOOptionBits options
,
415 IOAddressRange range
= { address
, length
};
416 return (IOMemoryDescriptor::withAddressRanges(&range
, 1, options
, task
));
420 IOMemoryDescriptor::withAddressRanges(IOAddressRange
* ranges
,
422 IOOptionBits options
,
425 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
429 options
|= kIOMemoryTypeVirtual64
;
431 options
|= kIOMemoryTypePhysical64
;
433 if (that
->initWithOptions(ranges
, rangeCount
, 0, task
, options
, /* mapper */ 0))
446 * Create a new IOMemoryDescriptor. The buffer is made up of several
447 * virtual address ranges, from a given task.
449 * Passing the ranges as a reference will avoid an extra allocation.
452 IOMemoryDescriptor::withOptions(void * buffers
,
459 IOGeneralMemoryDescriptor
*self
= new IOGeneralMemoryDescriptor
;
462 && !self
->initWithOptions(buffers
, count
, offset
, task
, opts
, mapper
))
471 // Can't leave abstract but this should never be used directly,
472 bool IOMemoryDescriptor::initWithOptions(void * buffers
,
476 IOOptionBits options
,
479 // @@@ gvdl: Should I panic?
480 panic("IOMD::initWithOptions called\n");
485 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange
* ranges
,
487 IODirection direction
,
490 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
493 if (that
->initWithPhysicalRanges(ranges
, withCount
, direction
, asReference
))
502 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor
* of
,
505 IODirection direction
)
507 IOSubMemoryDescriptor
*self
= new IOSubMemoryDescriptor
;
509 if (self
&& !self
->initSubRange(of
, offset
, length
, direction
)) {
517 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor
*originalMD
)
519 IOGeneralMemoryDescriptor
*origGenMD
=
520 OSDynamicCast(IOGeneralMemoryDescriptor
, originalMD
);
523 return IOGeneralMemoryDescriptor::
524 withPersistentMemoryDescriptor(origGenMD
);
530 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor
*originalMD
)
532 ipc_port_t sharedMem
= (ipc_port_t
) originalMD
->createNamedEntry();
537 if (sharedMem
== originalMD
->_memEntry
) {
538 originalMD
->retain(); // Add a new reference to ourselves
539 ipc_port_release_send(sharedMem
); // Remove extra send right
543 IOGeneralMemoryDescriptor
* self
= new IOGeneralMemoryDescriptor
;
544 typePersMDData initData
= { originalMD
, sharedMem
};
547 && !self
->initWithOptions(&initData
, 1, 0, 0, kIOMemoryTypePersistentMD
, 0)) {
554 void *IOGeneralMemoryDescriptor::createNamedEntry()
557 ipc_port_t sharedMem
;
559 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
561 user_addr_t range0Addr
;
562 IOByteCount range0Len
;
563 getAddrLenForInd(range0Addr
, range0Len
, type
, _ranges
, 0);
564 range0Addr
= trunc_page_64(range0Addr
);
566 vm_size_t size
= ptoa_32(_pages
);
567 vm_address_t kernelPage
= (vm_address_t
) range0Addr
;
569 vm_map_t theMap
= ((_task
== kernel_task
)
570 && (kIOMemoryBufferPageable
& _flags
))
571 ? IOPageableMapForAddress(kernelPage
)
572 : get_task_map(_task
);
574 memory_object_size_t actualSize
= size
;
575 vm_prot_t prot
= VM_PROT_READ
| VM_PROT_WRITE
;
577 prot
|= MAP_MEM_NAMED_REUSE
;
579 error
= mach_make_memory_entry_64(theMap
,
580 &actualSize
, range0Addr
, prot
, &sharedMem
, (ipc_port_t
) _memEntry
);
582 if (KERN_SUCCESS
== error
) {
583 if (actualSize
== size
) {
587 IOLog("IOGMD::mach_make_memory_entry_64 (%08llx) size (%08lx:%08x)\n",
588 (UInt64
)range0Addr
, (UInt32
)actualSize
, size
);
590 ipc_port_release_send( sharedMem
);
594 return MACH_PORT_NULL
;
600 * Initialize an IOMemoryDescriptor. The buffer is a virtual address
601 * relative to the specified task. If no task is supplied, the kernel
604 * An IOMemoryDescriptor can be re-used by calling initWithAddress or
605 * initWithRanges again on an existing instance -- note this behavior
606 * is not commonly supported in other I/O Kit classes, although it is
610 IOGeneralMemoryDescriptor::initWithAddress(void * address
,
611 IOByteCount withLength
,
612 IODirection withDirection
)
614 _singleRange
.v
.address
= (vm_address_t
) address
;
615 _singleRange
.v
.length
= withLength
;
617 return initWithRanges(&_singleRange
.v
, 1, withDirection
, kernel_task
, true);
621 IOGeneralMemoryDescriptor::initWithAddress(vm_address_t address
,
622 IOByteCount withLength
,
623 IODirection withDirection
,
626 _singleRange
.v
.address
= address
;
627 _singleRange
.v
.length
= withLength
;
629 return initWithRanges(&_singleRange
.v
, 1, withDirection
, withTask
, true);
633 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
634 IOPhysicalAddress address
,
635 IOByteCount withLength
,
636 IODirection withDirection
)
638 _singleRange
.p
.address
= address
;
639 _singleRange
.p
.length
= withLength
;
641 return initWithPhysicalRanges( &_singleRange
.p
, 1, withDirection
, true);
645 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
646 IOPhysicalRange
* ranges
,
648 IODirection direction
,
651 IOOptionBits mdOpts
= direction
| kIOMemoryTypePhysical
;
654 mdOpts
|= kIOMemoryAsReference
;
656 return initWithOptions(ranges
, count
, 0, 0, mdOpts
, /* mapper */ 0);
660 IOGeneralMemoryDescriptor::initWithRanges(
661 IOVirtualRange
* ranges
,
663 IODirection direction
,
667 IOOptionBits mdOpts
= direction
;
670 mdOpts
|= kIOMemoryAsReference
;
673 mdOpts
|= kIOMemoryTypeVirtual
;
675 // Auto-prepare if this is a kernel memory descriptor as very few
676 // clients bother to prepare() kernel memory.
677 // But it was not enforced so what are you going to do?
678 if (task
== kernel_task
)
679 mdOpts
|= kIOMemoryAutoPrepare
;
682 mdOpts
|= kIOMemoryTypePhysical
;
684 return initWithOptions(ranges
, count
, 0, task
, mdOpts
, /* mapper */ 0);
690 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
691 * from a given task, several physical ranges, an UPL from the ubc
692 * system or a uio (may be 64bit) from the BSD subsystem.
694 * Passing the ranges as a reference will avoid an extra allocation.
696 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
697 * existing instance -- note this behavior is not commonly supported in other
698 * I/O Kit classes, although it is supported here.
702 IOGeneralMemoryDescriptor::initWithOptions(void * buffers
,
706 IOOptionBits options
,
709 IOOptionBits type
= options
& kIOMemoryTypeMask
;
711 // Grab the original MD's configuation data to initialse the
712 // arguments to this function.
713 if (kIOMemoryTypePersistentMD
== type
) {
715 typePersMDData
*initData
= (typePersMDData
*) buffers
;
716 const IOGeneralMemoryDescriptor
*orig
= initData
->fMD
;
717 ioGMDData
*dataP
= getDataP(orig
->_memoryEntries
);
719 // Only accept persistent memory descriptors with valid dataP data.
720 assert(orig
->_rangesCount
== 1);
721 if ( !(orig
->_flags
& kIOMemoryPersistent
) || !dataP
)
724 _memEntry
= initData
->fMemEntry
; // Grab the new named entry
725 options
= orig
->_flags
| kIOMemoryAsReference
;
726 _singleRange
= orig
->_singleRange
; // Initialise our range
727 buffers
= &_singleRange
;
730 // Now grab the original task and whatever mapper was previously used
732 mapper
= dataP
->fMapper
;
734 // We are ready to go through the original initialisation now
738 case kIOMemoryTypeUIO
:
739 case kIOMemoryTypeVirtual
:
740 case kIOMemoryTypeVirtual64
:
747 case kIOMemoryTypePhysical
: // Neither Physical nor UPL should have a task
748 case kIOMemoryTypePhysical64
:
749 mapper
= kIOMapperNone
;
751 case kIOMemoryTypeUPL
:
755 return false; /* bad argument */
762 * We can check the _initialized instance variable before having ever set
763 * it to an initial value because I/O Kit guarantees that all our instance
764 * variables are zeroed on an object's allocation.
769 * An existing memory descriptor is being retargeted to point to
770 * somewhere else. Clean up our present state.
775 if (_ranges
.v
&& _rangesIsAllocated
)
777 if (kIOMemoryTypeUIO
== type
)
778 uio_free((uio_t
) _ranges
.v
);
779 else if ((kIOMemoryTypeVirtual64
== type
) || (kIOMemoryTypePhysical64
== type
))
780 IODelete(_ranges
.v64
, IOAddressRange
, _rangesCount
);
782 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
785 { ipc_port_release_send((ipc_port_t
) _memEntry
); _memEntry
= 0; }
793 // Grab the appropriate mapper
794 if (mapper
== kIOMapperNone
)
795 mapper
= 0; // No Mapper
796 else if (mapper
== kIOMapperSystem
) {
797 IOMapper::checkForSystemMapper();
798 gIOSystemMapper
= mapper
= IOMapper::gSystem
;
801 // Remove the dynamic internal use flags from the initial setting
802 options
&= ~(kIOMemoryPreparedReadOnly
);
806 // DEPRECATED variable initialisation
807 _direction
= (IODirection
) (_flags
& kIOMemoryDirectionMask
);
809 __iomd_reservedA
= 0;
810 __iomd_reservedB
= 0;
811 __iomd_reservedC
= 0;
815 if (kIOMemoryTypeUPL
== type
) {
818 unsigned int dataSize
= computeDataSize(/* pages */ 0, /* upls */ 1);
820 if (!_memoryEntries
) {
821 _memoryEntries
= OSData::withCapacity(dataSize
);
825 else if (!_memoryEntries
->initWithCapacity(dataSize
))
828 _memoryEntries
->appendBytes(0, sizeof(ioGMDData
));
829 dataP
= getDataP(_memoryEntries
);
830 dataP
->fMapper
= mapper
;
833 // _wireCount++; // UPLs start out life wired
836 _pages
+= atop_32(offset
+ count
+ PAGE_MASK
) - atop_32(offset
);
839 upl_page_info_t
*pageList
= UPL_GET_INTERNAL_PAGE_LIST((upl_t
) buffers
);
841 iopl
.fIOPL
= (upl_t
) buffers
;
842 // Set the flag kIOPLOnDevice convieniently equal to 1
843 iopl
.fFlags
= pageList
->device
| kIOPLExternUPL
;
844 iopl
.fIOMDOffset
= 0;
846 _highestPage
= upl_get_highest_page(iopl
.fIOPL
);
848 if (!pageList
->device
) {
849 // Pre-compute the offset into the UPL's page list
850 pageList
= &pageList
[atop_32(offset
)];
853 iopl
.fMappedBase
= mapper
->iovmAlloc(_pages
);
854 mapper
->iovmInsert(iopl
.fMappedBase
, 0, pageList
, _pages
);
857 iopl
.fMappedBase
= 0;
860 iopl
.fMappedBase
= 0;
861 iopl
.fPageInfo
= (vm_address_t
) pageList
;
862 iopl
.fPageOffset
= offset
;
864 _memoryEntries
->appendBytes(&iopl
, sizeof(iopl
));
867 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
868 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
870 // Initialize the memory descriptor
871 if (options
& kIOMemoryAsReference
) {
872 _rangesIsAllocated
= false;
874 // Hack assignment to get the buffer arg into _ranges.
875 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
877 // This also initialises the uio & physical ranges.
878 _ranges
.v
= (IOVirtualRange
*) buffers
;
881 _rangesIsAllocated
= true;
882 switch (_flags
& kIOMemoryTypeMask
)
884 case kIOMemoryTypeUIO
:
885 _ranges
.v
= (IOVirtualRange
*) uio_duplicate((uio_t
) buffers
);
888 case kIOMemoryTypeVirtual64
:
889 case kIOMemoryTypePhysical64
:
890 _ranges
.v64
= IONew(IOAddressRange
, count
);
893 bcopy(buffers
, _ranges
.v
, count
* sizeof(IOAddressRange
));
895 case kIOMemoryTypeVirtual
:
896 _ranges
.v
= IONew(IOVirtualRange
, count
);
899 bcopy(buffers
, _ranges
.v
, count
* sizeof(IOVirtualRange
));
904 // Find starting address within the vector of ranges
905 Ranges vec
= _ranges
;
908 for (unsigned ind
= 0; ind
< count
; ind
++) {
912 // addr & len are returned by this function
913 getAddrLenForInd(addr
, len
, type
, vec
, ind
);
914 pages
+= (atop_64(addr
+ len
+ PAGE_MASK
) - atop_64(addr
));
916 assert(len
>= length
); // Check for 32 bit wrap around
919 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
))
921 ppnum_t highPage
= atop_64(addr
+ len
- 1);
922 if (highPage
> _highestPage
)
923 _highestPage
= highPage
;
928 _rangesCount
= count
;
930 // Auto-prepare memory at creation time.
931 // Implied completion when descriptor is free-ed
932 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
))
933 _wireCount
++; // Physical MDs are, by definition, wired
934 else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
936 unsigned dataSize
= computeDataSize(_pages
, /* upls */ count
* 2);
938 if (!_memoryEntries
) {
939 _memoryEntries
= OSData::withCapacity(dataSize
);
943 else if (!_memoryEntries
->initWithCapacity(dataSize
))
946 _memoryEntries
->appendBytes(0, sizeof(ioGMDData
));
947 dataP
= getDataP(_memoryEntries
);
948 dataP
->fMapper
= mapper
;
949 dataP
->fPageCnt
= _pages
;
951 if ( (kIOMemoryPersistent
& _flags
) && !_memEntry
)
952 _memEntry
= createNamedEntry();
954 if ((_flags
& kIOMemoryAutoPrepare
)
955 && prepare() != kIOReturnSuccess
)
968 void IOGeneralMemoryDescriptor::free()
972 reserved
->memory
= 0;
978 _memoryEntries
->release();
980 if (_ranges
.v
&& _rangesIsAllocated
)
982 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
983 if (kIOMemoryTypeUIO
== type
)
984 uio_free((uio_t
) _ranges
.v
);
985 else if ((kIOMemoryTypeVirtual64
== type
) || (kIOMemoryTypePhysical64
== type
))
986 IODelete(_ranges
.v64
, IOAddressRange
, _rangesCount
);
988 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
991 if (reserved
&& reserved
->devicePager
)
992 device_pager_deallocate( (memory_object_t
) reserved
->devicePager
);
994 // memEntry holds a ref on the device pager which owns reserved
995 // (ExpansionData) so no reserved access after this point
997 ipc_port_release_send( (ipc_port_t
) _memEntry
);
1002 /* DEPRECATED */ void IOGeneralMemoryDescriptor::unmapFromKernel()
1004 panic("IOGMD::unmapFromKernel deprecated");
1007 /* DEPRECATED */ void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex
)
1009 panic("IOGMD::mapIntoKernel deprecated");
1015 * Get the direction of the transfer.
1017 IODirection
IOMemoryDescriptor::getDirection() const
1025 * Get the length of the transfer (over all ranges).
1027 IOByteCount
IOMemoryDescriptor::getLength() const
1032 void IOMemoryDescriptor::setTag( IOOptionBits tag
)
1037 IOOptionBits
IOMemoryDescriptor::getTag( void )
1042 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
1044 IOMemoryDescriptor::getSourceSegment( IOByteCount offset
, IOByteCount
* length
)
1046 addr64_t physAddr
= 0;
1048 if( prepare() == kIOReturnSuccess
) {
1049 physAddr
= getPhysicalSegment64( offset
, length
);
1053 return( (IOPhysicalAddress
) physAddr
); // truncated but only page offset is used
1056 IOByteCount
IOMemoryDescriptor::readBytes
1057 (IOByteCount offset
, void *bytes
, IOByteCount length
)
1059 addr64_t dstAddr
= (addr64_t
) (UInt32
) bytes
;
1060 IOByteCount remaining
;
1062 // Assert that this entire I/O is withing the available range
1063 assert(offset
< _length
);
1064 assert(offset
+ length
<= _length
);
1065 if (offset
>= _length
) {
1066 IOLog("IOGMD(%p): rB = o%lx, l%lx\n", this, offset
, length
); // @@@ gvdl
1070 remaining
= length
= min(length
, _length
- offset
);
1071 while (remaining
) { // (process another target segment?)
1075 srcAddr64
= getPhysicalSegment64(offset
, &srcLen
);
1079 // Clip segment length to remaining
1080 if (srcLen
> remaining
)
1083 copypv(srcAddr64
, dstAddr
, srcLen
,
1084 cppvPsrc
| cppvNoRefSrc
| cppvFsnk
| cppvKmap
);
1088 remaining
-= srcLen
;
1093 return length
- remaining
;
1096 IOByteCount
IOMemoryDescriptor::writeBytes
1097 (IOByteCount offset
, const void *bytes
, IOByteCount length
)
1099 addr64_t srcAddr
= (addr64_t
) (UInt32
) bytes
;
1100 IOByteCount remaining
;
1102 // Assert that this entire I/O is withing the available range
1103 assert(offset
< _length
);
1104 assert(offset
+ length
<= _length
);
1106 assert( !(kIOMemoryPreparedReadOnly
& _flags
) );
1108 if ( (kIOMemoryPreparedReadOnly
& _flags
) || offset
>= _length
) {
1109 IOLog("IOGMD(%p): wB = o%lx, l%lx\n", this, offset
, length
); // @@@ gvdl
1113 remaining
= length
= min(length
, _length
- offset
);
1114 while (remaining
) { // (process another target segment?)
1118 dstAddr64
= getPhysicalSegment64(offset
, &dstLen
);
1122 // Clip segment length to remaining
1123 if (dstLen
> remaining
)
1126 copypv(srcAddr
, (addr64_t
) dstAddr64
, dstLen
,
1127 cppvPsnk
| cppvFsnk
| cppvNoRefSrc
| cppvNoModSnk
| cppvKmap
);
1131 remaining
-= dstLen
;
1136 return length
- remaining
;
1139 // osfmk/device/iokit_rpc.c
1140 extern "C" unsigned int IODefaultCacheBits(addr64_t pa
);
1142 /* DEPRECATED */ void IOGeneralMemoryDescriptor::setPosition(IOByteCount position
)
1144 panic("IOGMD::setPosition deprecated");
1147 IOReturn
IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op
, void *vData
, UInt dataSize
) const
1149 if (kIOMDGetCharacteristics
== op
) {
1151 if (dataSize
< sizeof(IOMDDMACharacteristics
))
1152 return kIOReturnUnderrun
;
1154 IOMDDMACharacteristics
*data
= (IOMDDMACharacteristics
*) vData
;
1155 data
->fLength
= _length
;
1156 data
->fSGCount
= _rangesCount
;
1157 data
->fPages
= _pages
;
1158 data
->fDirection
= _direction
;
1160 data
->fIsPrepared
= false;
1162 data
->fIsPrepared
= true;
1163 data
->fHighestPage
= _highestPage
;
1164 if (_memoryEntries
) {
1165 ioGMDData
*gmdData
= getDataP(_memoryEntries
);
1166 ioPLBlock
*ioplList
= getIOPLList(gmdData
);
1167 UInt count
= getNumIOPL(_memoryEntries
, gmdData
);
1169 data
->fIsMapped
= (gmdData
->fMapper
&& _pages
&& (count
> 0)
1170 && ioplList
[0].fMappedBase
);
1172 data
->fPageAlign
= (ioplList
[0].fPageOffset
& PAGE_MASK
) | ~PAGE_MASK
;
1175 data
->fIsMapped
= false;
1178 return kIOReturnSuccess
;
1180 else if (!(kIOMDWalkSegments
& op
))
1181 return kIOReturnBadArgument
;
1183 // Get the next segment
1184 struct InternalState
{
1185 IOMDDMAWalkSegmentArgs fIO
;
1191 // Find the next segment
1192 if (dataSize
< sizeof(*isP
))
1193 return kIOReturnUnderrun
;
1195 isP
= (InternalState
*) vData
;
1196 UInt offset
= isP
->fIO
.fOffset
;
1197 bool mapped
= isP
->fIO
.fMapped
;
1199 if (offset
>= _length
)
1200 return (offset
== _length
)? kIOReturnOverrun
: kIOReturnInternalError
;
1202 // Validate the previous offset
1203 UInt ind
, off2Ind
= isP
->fOffset2Index
;
1204 if ((kIOMDFirstSegment
!= op
)
1206 && (offset
== isP
->fNextOffset
|| off2Ind
<= offset
))
1209 ind
= off2Ind
= 0; // Start from beginning
1213 if ( (_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
) {
1215 // Physical address based memory descriptor
1216 const IOPhysicalRange
*physP
= (IOPhysicalRange
*) &_ranges
.p
[0];
1218 // Find the range after the one that contains the offset
1220 for (len
= 0; off2Ind
<= offset
; ind
++) {
1221 len
= physP
[ind
].length
;
1225 // Calculate length within range and starting address
1226 length
= off2Ind
- offset
;
1227 address
= physP
[ind
- 1].address
+ len
- length
;
1229 // see how far we can coalesce ranges
1230 while (ind
< _rangesCount
&& address
+ length
== physP
[ind
].address
) {
1231 len
= physP
[ind
].length
;
1237 // correct contiguous check overshoot
1241 else if ( (_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
) {
1243 // Physical address based memory descriptor
1244 const IOAddressRange
*physP
= (IOAddressRange
*) &_ranges
.v64
[0];
1246 // Find the range after the one that contains the offset
1248 for (len
= 0; off2Ind
<= offset
; ind
++) {
1249 len
= physP
[ind
].length
;
1253 // Calculate length within range and starting address
1254 length
= off2Ind
- offset
;
1255 address
= physP
[ind
- 1].address
+ len
- length
;
1257 // see how far we can coalesce ranges
1258 while (ind
< _rangesCount
&& address
+ length
== physP
[ind
].address
) {
1259 len
= physP
[ind
].length
;
1265 // correct contiguous check overshoot
1271 panic("IOGMD: not wired for the IODMACommand");
1273 assert(_memoryEntries
);
1275 ioGMDData
* dataP
= getDataP(_memoryEntries
);
1276 const ioPLBlock
*ioplList
= getIOPLList(dataP
);
1277 UInt numIOPLs
= getNumIOPL(_memoryEntries
, dataP
);
1278 upl_page_info_t
*pageList
= getPageList(dataP
);
1280 assert(numIOPLs
> 0);
1282 // Scan through iopl info blocks looking for block containing offset
1283 while (ind
< numIOPLs
&& offset
>= ioplList
[ind
].fIOMDOffset
)
1286 // Go back to actual range as search goes past it
1287 ioPLBlock ioplInfo
= ioplList
[ind
- 1];
1288 off2Ind
= ioplInfo
.fIOMDOffset
;
1291 length
= ioplList
[ind
].fIOMDOffset
;
1294 length
-= offset
; // Remainder within iopl
1296 // Subtract offset till this iopl in total list
1299 // If a mapped address is requested and this is a pre-mapped IOPL
1300 // then just need to compute an offset relative to the mapped base.
1301 if (mapped
&& ioplInfo
.fMappedBase
) {
1302 offset
+= (ioplInfo
.fPageOffset
& PAGE_MASK
);
1303 address
= ptoa_64(ioplInfo
.fMappedBase
) + offset
;
1304 continue; // Done leave do/while(false) now
1307 // The offset is rebased into the current iopl.
1308 // Now add the iopl 1st page offset.
1309 offset
+= ioplInfo
.fPageOffset
;
1311 // For external UPLs the fPageInfo field points directly to
1312 // the upl's upl_page_info_t array.
1313 if (ioplInfo
.fFlags
& kIOPLExternUPL
)
1314 pageList
= (upl_page_info_t
*) ioplInfo
.fPageInfo
;
1316 pageList
= &pageList
[ioplInfo
.fPageInfo
];
1318 // Check for direct device non-paged memory
1319 if ( ioplInfo
.fFlags
& kIOPLOnDevice
) {
1320 address
= ptoa_64(pageList
->phys_addr
) + offset
;
1321 continue; // Done leave do/while(false) now
1324 // Now we need compute the index into the pageList
1325 UInt pageInd
= atop_32(offset
);
1326 offset
&= PAGE_MASK
;
1328 // Compute the starting address of this segment
1329 IOPhysicalAddress pageAddr
= pageList
[pageInd
].phys_addr
;
1330 address
= ptoa_64(pageAddr
) + offset
;
1332 // length is currently set to the length of the remainider of the iopl.
1333 // We need to check that the remainder of the iopl is contiguous.
1334 // This is indicated by pageList[ind].phys_addr being sequential.
1335 IOByteCount contigLength
= PAGE_SIZE
- offset
;
1336 while (contigLength
< length
1337 && ++pageAddr
== pageList
[++pageInd
].phys_addr
)
1339 contigLength
+= PAGE_SIZE
;
1342 if (contigLength
< length
)
1343 length
= contigLength
;
1351 // Update return values and state
1352 isP
->fIO
.fIOVMAddr
= address
;
1353 isP
->fIO
.fLength
= length
;
1355 isP
->fOffset2Index
= off2Ind
;
1356 isP
->fNextOffset
= isP
->fIO
.fOffset
+ length
;
1358 return kIOReturnSuccess
;
1362 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
1365 IOByteCount length
= 0;
1366 addr64_t address
= 0;
1368 if (offset
< _length
) // (within bounds?)
1370 IOMDDMAWalkSegmentState _state
;
1371 IOMDDMAWalkSegmentArgs
* state
= (IOMDDMAWalkSegmentArgs
*) &_state
;
1373 state
->fOffset
= offset
;
1374 state
->fLength
= _length
- offset
;
1375 state
->fMapped
= false;
1377 ret
= dmaCommandOperation(kIOMDFirstSegment
, _state
, sizeof(_state
));
1379 if ((kIOReturnSuccess
!= ret
) && (kIOReturnOverrun
!= ret
))
1380 DEBG("getPhysicalSegment64 dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
1381 ret
, this, state
->fOffset
,
1382 state
->fIOVMAddr
, state
->fLength
);
1383 if (kIOReturnSuccess
== ret
)
1385 address
= state
->fIOVMAddr
;
1386 length
= state
->fLength
;
1392 if (lengthOfSegment
)
1393 *lengthOfSegment
= length
;
1399 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
1402 IOByteCount length
= 0;
1403 addr64_t address
= 0;
1405 // assert(offset <= _length);
1407 if (offset
< _length
) // (within bounds?)
1409 IOMDDMAWalkSegmentState _state
;
1410 IOMDDMAWalkSegmentArgs
* state
= (IOMDDMAWalkSegmentArgs
*) &_state
;
1412 state
->fOffset
= offset
;
1413 state
->fLength
= _length
- offset
;
1414 state
->fMapped
= true;
1416 ret
= dmaCommandOperation(
1417 kIOMDFirstSegment
, _state
, sizeof(_state
));
1419 if ((kIOReturnSuccess
!= ret
) && (kIOReturnOverrun
!= ret
))
1420 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
1421 ret
, this, state
->fOffset
,
1422 state
->fIOVMAddr
, state
->fLength
);
1423 if (kIOReturnSuccess
== ret
)
1425 address
= state
->fIOVMAddr
;
1426 length
= state
->fLength
;
1433 if ((address
+ length
) > 0x100000000ULL
)
1435 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%x, class %s",
1436 address
, length
, (getMetaClass())->getClassName());
1439 if (lengthOfSegment
)
1440 *lengthOfSegment
= length
;
1442 return ((IOPhysicalAddress
) address
);
1446 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
1448 IOPhysicalAddress phys32
;
1451 IOMapper
* mapper
= 0;
1453 phys32
= getPhysicalSegment(offset
, lengthOfSegment
);
1457 if (gIOSystemMapper
)
1458 mapper
= gIOSystemMapper
;
1462 IOByteCount origLen
;
1464 phys64
= mapper
->mapAddr(phys32
);
1465 origLen
= *lengthOfSegment
;
1466 length
= page_size
- (phys64
& (page_size
- 1));
1467 while ((length
< origLen
)
1468 && ((phys64
+ length
) == mapper
->mapAddr(phys32
+ length
)))
1469 length
+= page_size
;
1470 if (length
> origLen
)
1473 *lengthOfSegment
= length
;
1476 phys64
= (addr64_t
) phys32
;
1482 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset
, IOByteCount
*lengthOfSegment
)
1484 IOPhysicalAddress address
= 0;
1485 IOPhysicalLength length
= 0;
1486 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
1488 assert(offset
<= _length
);
1490 if ( type
== kIOMemoryTypeUPL
)
1491 return super::getSourceSegment( offset
, lengthOfSegment
);
1492 else if ( offset
< _length
) // (within bounds?)
1494 unsigned rangesIndex
= 0;
1495 Ranges vec
= _ranges
;
1498 // Find starting address within the vector of ranges
1500 getAddrLenForInd(addr
, length
, type
, vec
, rangesIndex
);
1501 if (offset
< length
)
1503 offset
-= length
; // (make offset relative)
1507 // Now that we have the starting range,
1508 // lets find the last contiguous range
1512 for ( ++rangesIndex
; rangesIndex
< _rangesCount
; rangesIndex
++ ) {
1513 user_addr_t newAddr
;
1514 IOPhysicalLength newLen
;
1516 getAddrLenForInd(newAddr
, newLen
, type
, vec
, rangesIndex
);
1517 if (addr
+ length
!= newAddr
)
1522 address
= (IOPhysicalAddress
) addr
; // Truncate address to 32bit
1527 if ( lengthOfSegment
) *lengthOfSegment
= length
;
1532 /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
1533 /* DEPRECATED */ void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
1534 /* DEPRECATED */ IOByteCount
* lengthOfSegment
)
1536 if (_task
== kernel_task
)
1537 return (void *) getSourceSegment(offset
, lengthOfSegment
);
1539 panic("IOGMD::getVirtualSegment deprecated");
1543 /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
1548 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op
, void *vData
, UInt dataSize
) const
1550 if (kIOMDGetCharacteristics
== op
) {
1551 if (dataSize
< sizeof(IOMDDMACharacteristics
))
1552 return kIOReturnUnderrun
;
1554 IOMDDMACharacteristics
*data
= (IOMDDMACharacteristics
*) vData
;
1555 data
->fLength
= getLength();
1557 data
->fDirection
= _direction
;
1558 if (IOMapper::gSystem
)
1559 data
->fIsMapped
= true;
1560 data
->fIsPrepared
= true; // Assume prepared - fails safe
1562 else if (kIOMDWalkSegments
& op
) {
1563 if (dataSize
< sizeof(IOMDDMAWalkSegmentArgs
))
1564 return kIOReturnUnderrun
;
1566 IOMDDMAWalkSegmentArgs
*data
= (IOMDDMAWalkSegmentArgs
*) vData
;
1567 IOByteCount offset
= (IOByteCount
) data
->fOffset
;
1569 IOPhysicalLength length
;
1570 IOMemoryDescriptor
*ncmd
= const_cast<IOMemoryDescriptor
*>(this);
1571 if (data
->fMapped
&& IOMapper::gSystem
)
1572 data
->fIOVMAddr
= ncmd
->getPhysicalSegment(offset
, &length
);
1574 data
->fIOVMAddr
= ncmd
->getPhysicalSegment64(offset
, &length
);
1575 data
->fLength
= length
;
1578 return kIOReturnBadArgument
;
1580 return kIOReturnSuccess
;
1583 IOReturn
IOMemoryDescriptor::setPurgeable( IOOptionBits newState
,
1584 IOOptionBits
* oldState
)
1586 IOReturn err
= kIOReturnSuccess
;
1587 vm_purgable_t control
;
1594 err
= kIOReturnNotReady
;
1598 control
= VM_PURGABLE_SET_STATE
;
1601 case kIOMemoryPurgeableKeepCurrent
:
1602 control
= VM_PURGABLE_GET_STATE
;
1605 case kIOMemoryPurgeableNonVolatile
:
1606 state
= VM_PURGABLE_NONVOLATILE
;
1608 case kIOMemoryPurgeableVolatile
:
1609 state
= VM_PURGABLE_VOLATILE
;
1611 case kIOMemoryPurgeableEmpty
:
1612 state
= VM_PURGABLE_EMPTY
;
1615 err
= kIOReturnBadArgument
;
1619 if (kIOReturnSuccess
!= err
)
1622 err
= mach_memory_entry_purgable_control((ipc_port_t
) _memEntry
, control
, &state
);
1626 if (kIOReturnSuccess
== err
)
1630 case VM_PURGABLE_NONVOLATILE
:
1631 state
= kIOMemoryPurgeableNonVolatile
;
1633 case VM_PURGABLE_VOLATILE
:
1634 state
= kIOMemoryPurgeableVolatile
;
1636 case VM_PURGABLE_EMPTY
:
1637 state
= kIOMemoryPurgeableEmpty
;
1640 state
= kIOMemoryPurgeableNonVolatile
;
1641 err
= kIOReturnNotReady
;
1653 extern "C" void dcache_incoherent_io_flush64(addr64_t pa
, unsigned int count
);
1654 extern "C" void dcache_incoherent_io_store64(addr64_t pa
, unsigned int count
);
1656 IOReturn
IOMemoryDescriptor::performOperation( IOOptionBits options
,
1657 IOByteCount offset
, IOByteCount length
)
1659 IOByteCount remaining
;
1660 void (*func
)(addr64_t pa
, unsigned int count
) = 0;
1664 case kIOMemoryIncoherentIOFlush
:
1665 func
= &dcache_incoherent_io_flush64
;
1667 case kIOMemoryIncoherentIOStore
:
1668 func
= &dcache_incoherent_io_store64
;
1673 return (kIOReturnUnsupported
);
1675 remaining
= length
= min(length
, getLength() - offset
);
1677 // (process another target segment?)
1682 dstAddr64
= getPhysicalSegment64(offset
, &dstLen
);
1686 // Clip segment length to remaining
1687 if (dstLen
> remaining
)
1690 (*func
)(dstAddr64
, dstLen
);
1693 remaining
-= dstLen
;
1696 return (remaining
? kIOReturnUnderrun
: kIOReturnSuccess
);
1700 extern vm_offset_t static_memory_end
;
1701 #define io_kernel_static_end static_memory_end
1703 extern vm_offset_t first_avail
;
1704 #define io_kernel_static_end first_avail
1707 static kern_return_t
1708 io_get_kernel_static_upl(
1710 vm_address_t offset
,
1711 vm_size_t
*upl_size
,
1713 upl_page_info_array_t page_list
,
1714 unsigned int *count
,
1715 ppnum_t
*highest_page
)
1717 unsigned int pageCount
, page
;
1719 ppnum_t highestPage
= 0;
1721 pageCount
= atop_32(*upl_size
);
1722 if (pageCount
> *count
)
1727 for (page
= 0; page
< pageCount
; page
++)
1729 phys
= pmap_find_phys(kernel_pmap
, ((addr64_t
)offset
) + ptoa_64(page
));
1732 page_list
[page
].phys_addr
= phys
;
1733 page_list
[page
].pageout
= 0;
1734 page_list
[page
].absent
= 0;
1735 page_list
[page
].dirty
= 0;
1736 page_list
[page
].precious
= 0;
1737 page_list
[page
].device
= 0;
1738 if (phys
> highestPage
)
1742 *highest_page
= highestPage
;
1744 return ((page
>= pageCount
) ? kIOReturnSuccess
: kIOReturnVMError
);
1747 IOReturn
IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection
)
1749 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
1750 IOReturn error
= kIOReturnNoMemory
;
1752 ppnum_t mapBase
= 0;
1754 ipc_port_t sharedMem
= (ipc_port_t
) _memEntry
;
1756 assert(!_wireCount
);
1757 assert(kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
|| kIOMemoryTypeUIO
== type
);
1759 if (_pages
>= gIOMaximumMappedIOPageCount
)
1760 return kIOReturnNoResources
;
1762 dataP
= getDataP(_memoryEntries
);
1763 mapper
= dataP
->fMapper
;
1764 if (mapper
&& _pages
)
1765 mapBase
= mapper
->iovmAlloc(_pages
);
1767 // Note that appendBytes(NULL) zeros the data up to the
1769 _memoryEntries
->appendBytes(0, dataP
->fPageCnt
* sizeof(upl_page_info_t
));
1770 dataP
= 0; // May no longer be valid so lets not get tempted.
1772 if (forDirection
== kIODirectionNone
)
1773 forDirection
= _direction
;
1775 int uplFlags
; // This Mem Desc's default flags for upl creation
1776 switch (kIODirectionOutIn
& forDirection
)
1778 case kIODirectionOut
:
1779 // Pages do not need to be marked as dirty on commit
1780 uplFlags
= UPL_COPYOUT_FROM
;
1781 _flags
|= kIOMemoryPreparedReadOnly
;
1784 case kIODirectionIn
:
1786 uplFlags
= 0; // i.e. ~UPL_COPYOUT_FROM
1789 uplFlags
|= UPL_SET_IO_WIRE
| UPL_SET_LITE
;
1791 #ifdef UPL_NEED_32BIT_ADDR
1792 if (kIODirectionPrepareToPhys32
& forDirection
)
1793 uplFlags
|= UPL_NEED_32BIT_ADDR
;
1796 // Find the appropriate vm_map for the given task
1798 if (_task
== kernel_task
&& (kIOMemoryBufferPageable
& _flags
))
1801 { curMap
= get_task_map(_task
); }
1803 // Iterate over the vector of virtual ranges
1804 Ranges vec
= _ranges
;
1805 unsigned int pageIndex
= 0;
1806 IOByteCount mdOffset
= 0;
1807 ppnum_t highestPage
= 0;
1808 for (UInt range
= 0; range
< _rangesCount
; range
++) {
1810 user_addr_t startPage
;
1811 IOByteCount numBytes
;
1812 ppnum_t highPage
= 0;
1814 // Get the startPage address and length of vec[range]
1815 getAddrLenForInd(startPage
, numBytes
, type
, vec
, range
);
1816 iopl
.fPageOffset
= (short) startPage
& PAGE_MASK
;
1817 numBytes
+= iopl
.fPageOffset
;
1818 startPage
= trunc_page_64(startPage
);
1821 iopl
.fMappedBase
= mapBase
+ pageIndex
;
1823 iopl
.fMappedBase
= 0;
1825 // Iterate over the current range, creating UPLs
1827 dataP
= getDataP(_memoryEntries
);
1828 vm_address_t kernelStart
= (vm_address_t
) startPage
;
1832 else if (!sharedMem
) {
1833 assert(_task
== kernel_task
);
1834 theMap
= IOPageableMapForAddress(kernelStart
);
1839 upl_page_info_array_t pageInfo
= getPageList(dataP
);
1840 int ioplFlags
= uplFlags
;
1841 upl_page_list_ptr_t baseInfo
= &pageInfo
[pageIndex
];
1843 vm_size_t ioplSize
= round_page_32(numBytes
);
1844 unsigned int numPageInfo
= atop_32(ioplSize
);
1846 if (theMap
== kernel_map
&& kernelStart
< io_kernel_static_end
) {
1847 error
= io_get_kernel_static_upl(theMap
,
1855 else if (sharedMem
) {
1856 error
= memory_object_iopl_request(sharedMem
,
1866 error
= vm_map_create_upl(theMap
,
1876 if (error
!= KERN_SUCCESS
)
1880 highPage
= upl_get_highest_page(iopl
.fIOPL
);
1881 if (highPage
> highestPage
)
1882 highestPage
= highPage
;
1884 error
= kIOReturnNoMemory
;
1886 if (baseInfo
->device
) {
1888 iopl
.fFlags
= kIOPLOnDevice
;
1889 // Don't translate device memory at all
1890 if (mapper
&& mapBase
) {
1891 mapper
->iovmFree(mapBase
, _pages
);
1893 iopl
.fMappedBase
= 0;
1899 mapper
->iovmInsert(mapBase
, pageIndex
,
1900 baseInfo
, numPageInfo
);
1903 iopl
.fIOMDOffset
= mdOffset
;
1904 iopl
.fPageInfo
= pageIndex
;
1906 if ((_flags
& kIOMemoryAutoPrepare
) && iopl
.fIOPL
)
1908 upl_commit(iopl
.fIOPL
, 0, 0);
1909 upl_deallocate(iopl
.fIOPL
);
1913 if (!_memoryEntries
->appendBytes(&iopl
, sizeof(iopl
))) {
1914 // Clean up partial created and unsaved iopl
1916 upl_abort(iopl
.fIOPL
, 0);
1917 upl_deallocate(iopl
.fIOPL
);
1922 // Check for a multiple iopl's in one virtual range
1923 pageIndex
+= numPageInfo
;
1924 mdOffset
-= iopl
.fPageOffset
;
1925 if (ioplSize
< numBytes
) {
1926 numBytes
-= ioplSize
;
1927 startPage
+= ioplSize
;
1928 mdOffset
+= ioplSize
;
1929 iopl
.fPageOffset
= 0;
1931 iopl
.fMappedBase
= mapBase
+ pageIndex
;
1934 mdOffset
+= numBytes
;
1940 _highestPage
= highestPage
;
1942 return kIOReturnSuccess
;
1946 dataP
= getDataP(_memoryEntries
);
1947 UInt done
= getNumIOPL(_memoryEntries
, dataP
);
1948 ioPLBlock
*ioplList
= getIOPLList(dataP
);
1950 for (UInt range
= 0; range
< done
; range
++)
1952 if (ioplList
[range
].fIOPL
) {
1953 upl_abort(ioplList
[range
].fIOPL
, 0);
1954 upl_deallocate(ioplList
[range
].fIOPL
);
1957 (void) _memoryEntries
->initWithBytes(dataP
, sizeof(ioGMDData
)); // == setLength()
1959 if (mapper
&& mapBase
)
1960 mapper
->iovmFree(mapBase
, _pages
);
1969 * Prepare the memory for an I/O transfer. This involves paging in
1970 * the memory, if necessary, and wiring it down for the duration of
1971 * the transfer. The complete() method completes the processing of
1972 * the memory after the I/O transfer finishes. This method needn't
1973 * called for non-pageable memory.
1975 IOReturn
IOGeneralMemoryDescriptor::prepare(IODirection forDirection
)
1977 IOReturn error
= kIOReturnSuccess
;
1978 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
1981 && (kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
|| kIOMemoryTypeUIO
== type
) ) {
1982 error
= wireVirtual(forDirection
);
1989 return kIOReturnSuccess
;
1995 * Complete processing of the memory after an I/O transfer finishes.
1996 * This method should not be called unless a prepare was previously
1997 * issued; the prepare() and complete() must occur in pairs, before
1998 * before and after an I/O transfer involving pageable memory.
2001 IOReturn
IOGeneralMemoryDescriptor::complete(IODirection
/* forDirection */)
2006 return kIOReturnSuccess
;
2010 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
2012 if ((kIOMemoryTypePhysical
== type
) || (kIOMemoryTypePhysical64
== type
)) {
2013 /* kIOMemoryTypePhysical */
2017 ioGMDData
* dataP
= getDataP(_memoryEntries
);
2018 ioPLBlock
*ioplList
= getIOPLList(dataP
);
2019 UInt count
= getNumIOPL(_memoryEntries
, dataP
);
2021 if (dataP
->fMapper
&& _pages
&& ioplList
[0].fMappedBase
)
2022 dataP
->fMapper
->iovmFree(ioplList
[0].fMappedBase
, _pages
);
2024 // Only complete iopls that we created which are for TypeVirtual
2025 if (kIOMemoryTypeVirtual
== type
|| kIOMemoryTypeVirtual64
== type
|| kIOMemoryTypeUIO
== type
) {
2026 for (UInt ind
= 0; ind
< count
; ind
++)
2027 if (ioplList
[ind
].fIOPL
) {
2028 upl_commit(ioplList
[ind
].fIOPL
, 0, 0);
2029 upl_deallocate(ioplList
[ind
].fIOPL
);
2033 (void) _memoryEntries
->initWithBytes(dataP
, sizeof(ioGMDData
)); // == setLength()
2036 return kIOReturnSuccess
;
2039 IOReturn
IOGeneralMemoryDescriptor::doMap(
2040 vm_map_t addressMap
,
2041 IOVirtualAddress
* atAddress
,
2042 IOOptionBits options
,
2043 IOByteCount sourceOffset
,
2044 IOByteCount length
)
2047 ipc_port_t sharedMem
= (ipc_port_t
) _memEntry
;
2049 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
2050 Ranges vec
= _ranges
;
2052 user_addr_t range0Addr
= 0;
2053 IOByteCount range0Len
= 0;
2056 getAddrLenForInd(range0Addr
, range0Len
, type
, vec
, 0);
2058 // mapping source == dest? (could be much better)
2060 && (addressMap
== get_task_map(_task
)) && (options
& kIOMapAnywhere
)
2061 && (1 == _rangesCount
) && (0 == sourceOffset
)
2062 && range0Addr
&& (length
<= range0Len
) ) {
2063 if (sizeof(user_addr_t
) > 4 && ((UInt64
) range0Addr
) >> 32)
2064 return kIOReturnOverrun
; // Doesn't fit in 32bit return field
2066 *atAddress
= range0Addr
;
2067 return( kIOReturnSuccess
);
2071 if( 0 == sharedMem
) {
2073 vm_size_t size
= ptoa_32(_pages
);
2077 memory_object_size_t actualSize
= size
;
2078 kr
= mach_make_memory_entry_64(get_task_map(_task
),
2079 &actualSize
, range0Addr
,
2080 VM_PROT_READ
| VM_PROT_WRITE
, &sharedMem
,
2083 if( (KERN_SUCCESS
== kr
) && (actualSize
!= round_page_32(size
))) {
2085 IOLog("mach_make_memory_entry_64 (%08llx) size (%08lx:%08x)\n",
2086 range0Addr
, (UInt32
) actualSize
, size
);
2088 kr
= kIOReturnVMError
;
2089 ipc_port_release_send( sharedMem
);
2092 if( KERN_SUCCESS
!= kr
)
2093 sharedMem
= MACH_PORT_NULL
;
2095 } else do { // _task == 0, must be physical
2097 memory_object_t pager
;
2098 unsigned int flags
= 0;
2100 IOPhysicalLength segLen
;
2102 pa
= getPhysicalSegment64( sourceOffset
, &segLen
);
2105 reserved
= IONew( ExpansionData
, 1 );
2109 reserved
->pagerContig
= (1 == _rangesCount
);
2110 reserved
->memory
= this;
2112 /*What cache mode do we need*/
2113 switch(options
& kIOMapCacheMask
) {
2115 case kIOMapDefaultCache
:
2117 flags
= IODefaultCacheBits(pa
);
2120 case kIOMapInhibitCache
:
2121 flags
= DEVICE_PAGER_CACHE_INHIB
|
2122 DEVICE_PAGER_COHERENT
| DEVICE_PAGER_GUARDED
;
2125 case kIOMapWriteThruCache
:
2126 flags
= DEVICE_PAGER_WRITE_THROUGH
|
2127 DEVICE_PAGER_COHERENT
| DEVICE_PAGER_GUARDED
;
2130 case kIOMapCopybackCache
:
2131 flags
= DEVICE_PAGER_COHERENT
;
2134 case kIOMapWriteCombineCache
:
2135 flags
= DEVICE_PAGER_CACHE_INHIB
|
2136 DEVICE_PAGER_COHERENT
;
2140 flags
|= reserved
->pagerContig
? DEVICE_PAGER_CONTIGUOUS
: 0;
2142 pager
= device_pager_setup( (memory_object_t
) 0, (int) reserved
,
2147 kr
= mach_memory_object_memory_entry_64( (host_t
) 1, false /*internal*/,
2148 size
, VM_PROT_READ
| VM_PROT_WRITE
, pager
, &sharedMem
);
2150 assert( KERN_SUCCESS
== kr
);
2151 if( KERN_SUCCESS
!= kr
) {
2152 device_pager_deallocate( pager
);
2153 pager
= MACH_PORT_NULL
;
2154 sharedMem
= MACH_PORT_NULL
;
2157 if( pager
&& sharedMem
)
2158 reserved
->devicePager
= pager
;
2160 IODelete( reserved
, ExpansionData
, 1 );
2166 _memEntry
= (void *) sharedMem
;
2171 kr
= kIOReturnVMError
;
2173 kr
= super::doMap( addressMap
, atAddress
,
2174 options
, sourceOffset
, length
);
2179 IOReturn
IOGeneralMemoryDescriptor::doUnmap(
2180 vm_map_t addressMap
,
2181 IOVirtualAddress logical
,
2182 IOByteCount length
)
2184 // could be much better
2185 if( _task
&& (addressMap
== get_task_map(_task
)) && (1 == _rangesCount
)) {
2187 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
2188 user_addr_t range0Addr
;
2189 IOByteCount range0Len
;
2191 getAddrLenForInd(range0Addr
, range0Len
, type
, _ranges
, 0);
2192 if (logical
== range0Addr
&& length
<= range0Len
)
2193 return( kIOReturnSuccess
);
2196 return( super::doUnmap( addressMap
, logical
, length
));
2199 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2201 OSDefineMetaClassAndAbstractStructors( IOMemoryMap
, OSObject
)
2203 /* inline function implementation */
2204 IOPhysicalAddress
IOMemoryMap::getPhysicalAddress()
2205 { return( getPhysicalSegment( 0, 0 )); }
2209 #define super IOMemoryMap
2211 OSDefineMetaClassAndStructors(_IOMemoryMap
, IOMemoryMap
)
2213 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2215 bool _IOMemoryMap::initCompatible(
2216 IOMemoryDescriptor
* _memory
,
2217 IOMemoryMap
* _superMap
,
2218 IOByteCount _offset
,
2219 IOByteCount _length
)
2225 if( (_offset
+ _length
) > _superMap
->getLength())
2230 _superMap
->retain();
2231 superMap
= _superMap
;
2237 length
= _memory
->getLength();
2239 options
= superMap
->getMapOptions();
2240 logical
= superMap
->getVirtualAddress() + offset
;
2245 bool _IOMemoryMap::initWithDescriptor(
2246 IOMemoryDescriptor
* _memory
,
2248 IOVirtualAddress toAddress
,
2249 IOOptionBits _options
,
2250 IOByteCount _offset
,
2251 IOByteCount _length
)
2254 bool redir
= ((kIOMapUnique
|kIOMapReference
) == ((kIOMapUnique
|kIOMapReference
) & _options
));
2256 if ((!_memory
) || (!intoTask
))
2259 if( (_offset
+ _length
) > _memory
->getLength())
2266 addressMap
= get_task_map(intoTask
);
2269 vm_map_reference(addressMap
);
2270 addressTask
= intoTask
;
2271 logical
= toAddress
;
2281 length
= _memory
->getLength();
2283 if( options
& kIOMapStatic
)
2286 ok
= (kIOReturnSuccess
== _memory
->doMap( addressMap
, &toAddress
,
2287 _options
, offset
, length
));
2293 logical
= toAddress
;
2302 vm_map_deallocate(addressMap
);
2310 /* LP64todo - these need to expand */
2311 struct IOMemoryDescriptorMapAllocRef
2313 ipc_port_t sharedMem
;
2316 IOByteCount sourceOffset
;
2317 IOOptionBits options
;
2320 static kern_return_t
IOMemoryDescriptorMapAlloc(vm_map_t map
, void * _ref
)
2322 IOMemoryDescriptorMapAllocRef
* ref
= (IOMemoryDescriptorMapAllocRef
*)_ref
;
2326 if( ref
->sharedMem
) {
2327 vm_prot_t prot
= VM_PROT_READ
2328 | ((ref
->options
& kIOMapReadOnly
) ? 0 : VM_PROT_WRITE
);
2330 // set memory entry cache
2331 vm_prot_t memEntryCacheMode
= prot
| MAP_MEM_ONLY
;
2332 switch (ref
->options
& kIOMapCacheMask
)
2334 case kIOMapInhibitCache
:
2335 SET_MAP_MEM(MAP_MEM_IO
, memEntryCacheMode
);
2338 case kIOMapWriteThruCache
:
2339 SET_MAP_MEM(MAP_MEM_WTHRU
, memEntryCacheMode
);
2342 case kIOMapWriteCombineCache
:
2343 SET_MAP_MEM(MAP_MEM_WCOMB
, memEntryCacheMode
);
2346 case kIOMapCopybackCache
:
2347 SET_MAP_MEM(MAP_MEM_COPYBACK
, memEntryCacheMode
);
2350 case kIOMapDefaultCache
:
2352 SET_MAP_MEM(MAP_MEM_NOOP
, memEntryCacheMode
);
2356 vm_size_t unused
= 0;
2358 err
= mach_make_memory_entry( NULL
/*unused*/, &unused
, 0 /*unused*/,
2359 memEntryCacheMode
, NULL
, ref
->sharedMem
);
2360 if (KERN_SUCCESS
!= err
)
2361 IOLog("MAP_MEM_ONLY failed %d\n", err
);
2365 ref
->size
, 0 /* mask */,
2366 (( ref
->options
& kIOMapAnywhere
) ? VM_FLAGS_ANYWHERE
: VM_FLAGS_FIXED
)
2367 | VM_MAKE_TAG(VM_MEMORY_IOKIT
),
2368 ref
->sharedMem
, ref
->sourceOffset
,
2374 if( KERN_SUCCESS
!= err
) {
2381 err
= vm_allocate( map
, &ref
->mapped
, ref
->size
,
2382 ((ref
->options
& kIOMapAnywhere
) ? VM_FLAGS_ANYWHERE
: VM_FLAGS_FIXED
)
2383 | VM_MAKE_TAG(VM_MEMORY_IOKIT
) );
2385 if( KERN_SUCCESS
!= err
) {
2390 // we have to make sure that these guys don't get copied if we fork.
2391 err
= vm_inherit( map
, ref
->mapped
, ref
->size
, VM_INHERIT_NONE
);
2392 assert( KERN_SUCCESS
== err
);
2401 IOReturn
IOMemoryDescriptor::doMap(
2402 vm_map_t addressMap
,
2403 IOVirtualAddress
* atAddress
,
2404 IOOptionBits options
,
2405 IOByteCount sourceOffset
,
2406 IOByteCount length
)
2408 IOReturn err
= kIOReturnSuccess
;
2409 memory_object_t pager
;
2410 vm_address_t logical
;
2411 IOByteCount pageOffset
;
2412 IOPhysicalAddress sourceAddr
;
2413 IOMemoryDescriptorMapAllocRef ref
;
2415 ref
.sharedMem
= (ipc_port_t
) _memEntry
;
2416 ref
.sourceOffset
= sourceOffset
;
2417 ref
.options
= options
;
2422 length
= getLength();
2424 sourceAddr
= getSourceSegment( sourceOffset
, NULL
);
2425 pageOffset
= sourceAddr
- trunc_page_32( sourceAddr
);
2427 ref
.size
= round_page_32( length
+ pageOffset
);
2429 if ((kIOMapReference
|kIOMapUnique
) == ((kIOMapReference
|kIOMapUnique
) & options
))
2435 _IOMemoryMap
* mapping
= (_IOMemoryMap
*) *atAddress
;
2436 ref
.mapped
= mapping
->getVirtualAddress();
2440 err
= kIOReturnNotReadable
;
2445 flags
= UPL_COPYOUT_FROM
| UPL_SET_INTERNAL
2446 | UPL_SET_LITE
| UPL_SET_IO_WIRE
| UPL_BLOCK_ACCESS
;
2448 if (KERN_SUCCESS
!= memory_object_iopl_request((ipc_port_t
) _memEntry
, 0, &size
, &redirUPL2
,
2453 err
= upl_transpose(redirUPL2
, mapping
->redirUPL
);
2454 if (kIOReturnSuccess
!= err
)
2456 IOLog("upl_transpose(%x)\n", err
);
2457 err
= kIOReturnSuccess
;
2462 upl_commit(redirUPL2
, NULL
, 0);
2463 upl_deallocate(redirUPL2
);
2467 // swap the memEntries since they now refer to different vm_objects
2468 void * me
= _memEntry
;
2469 _memEntry
= mapping
->memory
->_memEntry
;
2470 mapping
->memory
->_memEntry
= me
;
2476 logical
= *atAddress
;
2477 if( options
& kIOMapAnywhere
)
2478 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
2481 ref
.mapped
= trunc_page_32( logical
);
2482 if( (logical
- ref
.mapped
) != pageOffset
) {
2483 err
= kIOReturnVMError
;
2488 if( ref
.sharedMem
&& (addressMap
== kernel_map
) && (kIOMemoryBufferPageable
& _flags
))
2489 err
= IOIteratePageableMaps( ref
.size
, &IOMemoryDescriptorMapAlloc
, &ref
);
2491 err
= IOMemoryDescriptorMapAlloc( addressMap
, &ref
);
2494 if( err
!= KERN_SUCCESS
)
2498 pager
= (memory_object_t
) reserved
->devicePager
;
2500 pager
= MACH_PORT_NULL
;
2502 if( !ref
.sharedMem
|| pager
)
2503 err
= handleFault( pager
, addressMap
, ref
.mapped
, sourceOffset
, length
, options
);
2507 if( err
!= KERN_SUCCESS
) {
2509 doUnmap( addressMap
, ref
.mapped
, ref
.size
);
2512 *atAddress
= ref
.mapped
+ pageOffset
;
2518 kIOMemoryRedirected
= 0x00010000
2521 IOReturn
IOMemoryDescriptor::handleFault(
2523 vm_map_t addressMap
,
2524 IOVirtualAddress address
,
2525 IOByteCount sourceOffset
,
2527 IOOptionBits options
)
2529 IOReturn err
= kIOReturnSuccess
;
2530 memory_object_t pager
= (memory_object_t
) _pager
;
2534 IOByteCount pageOffset
;
2535 IOByteCount pagerOffset
;
2536 IOPhysicalLength segLen
;
2541 if( kIOMemoryRedirected
& _flags
) {
2543 IOLog("sleep mem redirect %p, %lx\n", this, sourceOffset
);
2547 } while( kIOMemoryRedirected
& _flags
);
2550 return( kIOReturnSuccess
);
2553 physAddr
= getPhysicalSegment64( sourceOffset
, &segLen
);
2555 pageOffset
= physAddr
- trunc_page_64( physAddr
);
2556 pagerOffset
= sourceOffset
;
2558 size
= length
+ pageOffset
;
2559 physAddr
-= pageOffset
;
2561 segLen
+= pageOffset
;
2564 // in the middle of the loop only map whole pages
2565 if( segLen
>= bytes
)
2567 else if( segLen
!= trunc_page_32( segLen
))
2568 err
= kIOReturnVMError
;
2569 if( physAddr
!= trunc_page_64( physAddr
))
2570 err
= kIOReturnBadArgument
;
2571 if (kIOReturnSuccess
!= err
)
2575 if( kIOLogMapping
& gIOKitDebug
)
2576 IOLog("_IOMemoryMap::map(%p) %08lx->%08qx:%08lx\n",
2577 addressMap
, address
+ pageOffset
, physAddr
+ pageOffset
,
2578 segLen
- pageOffset
);
2582 if( reserved
&& reserved
->pagerContig
) {
2583 IOPhysicalLength allLen
;
2586 allPhys
= getPhysicalSegment64( 0, &allLen
);
2588 err
= device_pager_populate_object( pager
, 0, allPhys
>> PAGE_SHIFT
, round_page_32(allLen
) );
2593 (page
< segLen
) && (KERN_SUCCESS
== err
);
2594 page
+= page_size
) {
2595 err
= device_pager_populate_object(pager
, pagerOffset
,
2596 (ppnum_t
)((physAddr
+ page
) >> PAGE_SHIFT
), page_size
);
2597 pagerOffset
+= page_size
;
2600 assert( KERN_SUCCESS
== err
);
2606 /* *** Temporary Workaround *** */
2608 /* This call to vm_fault causes an early pmap level resolution */
2609 /* of the mappings created above. Need for this is in absolute */
2610 /* violation of the basic tenet that the pmap layer is a cache. */
2611 /* Further, it implies a serious I/O architectural violation on */
2612 /* the part of some user of the mapping. As of this writing, */
2613 /* the call to vm_fault is needed because the NVIDIA driver */
2614 /* makes a call to pmap_extract. The NVIDIA driver needs to be */
2615 /* fixed as soon as possible. The NVIDIA driver should not */
2616 /* need to query for this info as it should know from the doMap */
2617 /* call where the physical memory is mapped. When a query is */
2618 /* necessary to find a physical mapping, it should be done */
2619 /* through an iokit call which includes the mapped memory */
2620 /* handle. This is required for machine architecture independence.*/
2622 if(!(kIOMemoryRedirected
& _flags
)) {
2623 vm_fault(addressMap
,
2624 (vm_map_offset_t
)address
,
2625 VM_PROT_READ
|VM_PROT_WRITE
,
2626 FALSE
, THREAD_UNINT
, NULL
,
2627 (vm_map_offset_t
)0);
2630 /* *** Temporary Workaround *** */
2633 sourceOffset
+= segLen
- pageOffset
;
2639 && (physAddr
= getPhysicalSegment64( sourceOffset
, &segLen
)));
2642 err
= kIOReturnBadArgument
;
2647 IOReturn
IOMemoryDescriptor::doUnmap(
2648 vm_map_t addressMap
,
2649 IOVirtualAddress logical
,
2650 IOByteCount length
)
2655 if( kIOLogMapping
& gIOKitDebug
)
2656 kprintf("IOMemoryDescriptor::doUnmap(%x) %08x:%08x\n",
2657 addressMap
, logical
, length
);
2660 if( true /* && (addressMap == kernel_map) || (addressMap == get_task_map(current_task()))*/) {
2662 if( _memEntry
&& (addressMap
== kernel_map
) && (kIOMemoryBufferPageable
& _flags
))
2663 addressMap
= IOPageableMapForAddress( logical
);
2665 err
= vm_deallocate( addressMap
, logical
, length
);
2668 err
= kIOReturnSuccess
;
2673 IOReturn
IOMemoryDescriptor::redirect( task_t safeTask
, bool doRedirect
)
2675 IOReturn err
= kIOReturnSuccess
;
2676 _IOMemoryMap
* mapping
= 0;
2682 _flags
|= kIOMemoryRedirected
;
2684 _flags
&= ~kIOMemoryRedirected
;
2687 if( (iter
= OSCollectionIterator::withCollection( _mappings
))) {
2688 while( (mapping
= (_IOMemoryMap
*) iter
->getNextObject()))
2689 mapping
->redirect( safeTask
, doRedirect
);
2702 // temporary binary compatibility
2703 IOSubMemoryDescriptor
* subMem
;
2704 if( (subMem
= OSDynamicCast( IOSubMemoryDescriptor
, this)))
2705 err
= subMem
->redirect( safeTask
, doRedirect
);
2707 err
= kIOReturnSuccess
;
2712 IOReturn
IOSubMemoryDescriptor::redirect( task_t safeTask
, bool doRedirect
)
2714 return( _parent
->redirect( safeTask
, doRedirect
));
2717 IOReturn
_IOMemoryMap::redirect( task_t safeTask
, bool doRedirect
)
2719 IOReturn err
= kIOReturnSuccess
;
2722 // err = ((_IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
2734 if ((!safeTask
|| (get_task_map(safeTask
) != addressMap
))
2735 && (0 == (options
& kIOMapStatic
)))
2737 IOUnmapPages( addressMap
, logical
, length
);
2738 if(!doRedirect
&& safeTask
2739 && (((memory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
2740 || ((memory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
)))
2742 err
= vm_deallocate( addressMap
, logical
, length
);
2743 err
= memory
->doMap( addressMap
, &logical
,
2744 (options
& ~kIOMapAnywhere
) /*| kIOMapReserve*/,
2747 err
= kIOReturnSuccess
;
2749 IOLog("IOMemoryMap::redirect(%d, %p) %x:%lx from %p\n", doRedirect
, this, logical
, length
, addressMap
);
2752 else if (kIOMapWriteCombineCache
== (options
& kIOMapCacheMask
))
2754 IOOptionBits newMode
;
2755 newMode
= (options
& ~kIOMapCacheMask
) | (doRedirect
? kIOMapInhibitCache
: kIOMapWriteCombineCache
);
2756 IOProtectCacheMode(addressMap
, logical
, length
, newMode
);
2764 if ((((memory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
2765 || ((memory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
))
2767 && (doRedirect
!= (0 != (memory
->_flags
& kIOMemoryRedirected
))))
2768 memory
->redirect(safeTask
, doRedirect
);
2773 IOReturn
_IOMemoryMap::unmap( void )
2779 if( logical
&& addressMap
&& (0 == superMap
)
2780 && (0 == (options
& kIOMapStatic
))) {
2782 err
= memory
->doUnmap( addressMap
, logical
, length
);
2783 vm_map_deallocate(addressMap
);
2787 err
= kIOReturnSuccess
;
2796 void _IOMemoryMap::taskDied( void )
2800 vm_map_deallocate(addressMap
);
2808 // Overload the release mechanism. All mappings must be a member
2809 // of a memory descriptors _mappings set. This means that we
2810 // always have 2 references on a mapping. When either of these mappings
2811 // are released we need to free ourselves.
2812 void _IOMemoryMap::taggedRelease(const void *tag
) const
2815 super::taggedRelease(tag
, 2);
2819 void _IOMemoryMap::free()
2825 memory
->removeMapping( this);
2830 if (owner
&& (owner
!= memory
))
2833 owner
->removeMapping(this);
2838 superMap
->release();
2841 upl_commit(redirUPL
, NULL
, 0);
2842 upl_deallocate(redirUPL
);
2848 IOByteCount
_IOMemoryMap::getLength()
2853 IOVirtualAddress
_IOMemoryMap::getVirtualAddress()
2858 task_t
_IOMemoryMap::getAddressTask()
2861 return( superMap
->getAddressTask());
2863 return( addressTask
);
2866 IOOptionBits
_IOMemoryMap::getMapOptions()
2871 IOMemoryDescriptor
* _IOMemoryMap::getMemoryDescriptor()
2876 _IOMemoryMap
* _IOMemoryMap::copyCompatible(
2877 IOMemoryDescriptor
* owner
,
2879 IOVirtualAddress toAddress
,
2880 IOOptionBits _options
,
2881 IOByteCount _offset
,
2882 IOByteCount _length
)
2884 _IOMemoryMap
* mapping
;
2886 if( (!task
) || (!addressMap
) || (addressMap
!= get_task_map(task
)))
2888 if( options
& kIOMapUnique
)
2890 if( (options
^ _options
) & kIOMapReadOnly
)
2892 if( (kIOMapDefaultCache
!= (_options
& kIOMapCacheMask
))
2893 && ((options
^ _options
) & kIOMapCacheMask
))
2896 if( (0 == (_options
& kIOMapAnywhere
)) && (logical
!= toAddress
))
2899 if( _offset
< offset
)
2904 if( (_offset
+ _length
) > length
)
2907 if( (length
== _length
) && (!_offset
)) {
2912 mapping
= new _IOMemoryMap
;
2914 && !mapping
->initCompatible( owner
, this, _offset
, _length
)) {
2924 _IOMemoryMap::getPhysicalSegment( IOByteCount _offset
, IOPhysicalLength
* _length
)
2926 IOPhysicalAddress address
;
2929 address
= memory
->getPhysicalSegment( offset
+ _offset
, _length
);
2935 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2938 #define super OSObject
2940 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2942 void IOMemoryDescriptor::initialize( void )
2944 if( 0 == gIOMemoryLock
)
2945 gIOMemoryLock
= IORecursiveLockAlloc();
2947 IORegistryEntry::getRegistryRoot()->setProperty(kIOMaximumMappedIOByteCountKey
,
2948 ptoa_64(gIOMaximumMappedIOPageCount
), 64);
2952 mapper
= new IOCopyMapper
;
2955 if (mapper
->init() && mapper
->start(NULL
))
2956 gIOCopyMapper
= (IOCopyMapper
*) mapper
;
2962 gIOLastPage
= IOGetLastPageNumber();
2965 void IOMemoryDescriptor::free( void )
2968 _mappings
->release();
2973 IOMemoryMap
* IOMemoryDescriptor::setMapping(
2975 IOVirtualAddress mapAddress
,
2976 IOOptionBits options
)
2978 _IOMemoryMap
* newMap
;
2980 newMap
= new _IOMemoryMap
;
2985 && !newMap
->initWithDescriptor( this, intoTask
, mapAddress
,
2986 options
| kIOMapStatic
, 0, getLength() )) {
2991 addMapping( newMap
);
2998 IOMemoryMap
* IOMemoryDescriptor::map(
2999 IOOptionBits options
)
3002 return( makeMapping( this, kernel_task
, 0,
3003 options
| kIOMapAnywhere
,
3007 IOMemoryMap
* IOMemoryDescriptor::map(
3009 IOVirtualAddress toAddress
,
3010 IOOptionBits options
,
3012 IOByteCount length
)
3015 length
= getLength();
3017 return( makeMapping( this, intoTask
, toAddress
, options
, offset
, length
));
3020 IOReturn
_IOMemoryMap::redirect(IOMemoryDescriptor
* newBackingMemory
,
3021 IOOptionBits options
,
3024 IOReturn err
= kIOReturnSuccess
;
3025 IOMemoryDescriptor
* physMem
= 0;
3029 if (logical
&& addressMap
) do
3031 if (((memory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
3032 || ((memory
->_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
))
3040 vm_size_t size
= length
;
3041 int flags
= UPL_COPYOUT_FROM
| UPL_SET_INTERNAL
3042 | UPL_SET_LITE
| UPL_SET_IO_WIRE
| UPL_BLOCK_ACCESS
;
3043 if (KERN_SUCCESS
!= memory_object_iopl_request((ipc_port_t
) memory
->_memEntry
, 0, &size
, &redirUPL
,
3050 IOUnmapPages( addressMap
, logical
, length
);
3051 physMem
->redirect(0, true);
3055 if (newBackingMemory
)
3057 if (newBackingMemory
!= memory
)
3059 if (this != newBackingMemory
->makeMapping(newBackingMemory
, addressTask
, (IOVirtualAddress
) this,
3060 options
| kIOMapUnique
| kIOMapReference
,
3062 err
= kIOReturnError
;
3066 upl_commit(redirUPL
, NULL
, 0);
3067 upl_deallocate(redirUPL
);
3071 physMem
->redirect(0, false);
3084 IOMemoryMap
* IOMemoryDescriptor::makeMapping(
3085 IOMemoryDescriptor
* owner
,
3087 IOVirtualAddress toAddress
,
3088 IOOptionBits options
,
3090 IOByteCount length
)
3092 IOMemoryDescriptor
* mapDesc
= 0;
3093 _IOMemoryMap
* mapping
= 0;
3100 if (kIOMapUnique
& options
)
3102 IOPhysicalAddress phys
;
3103 IOByteCount physLen
;
3108 if (((_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical
)
3109 || ((_flags
& kIOMemoryTypeMask
) == kIOMemoryTypePhysical64
))
3111 phys
= getPhysicalSegment(offset
, &physLen
);
3112 if (!phys
|| (physLen
< length
))
3115 mapDesc
= IOMemoryDescriptor::withPhysicalAddress(
3116 phys
, length
, _direction
);
3127 if (kIOMapReference
& options
)
3129 mapping
= (_IOMemoryMap
*) toAddress
;
3133 uint32_t pageOffset1
= mapDesc
->getSourceSegment( offset
, NULL
);
3134 pageOffset1
-= trunc_page_32( pageOffset1
);
3136 uint32_t pageOffset2
= mapping
->getVirtualAddress();
3137 pageOffset2
-= trunc_page_32( pageOffset2
);
3139 if (pageOffset1
!= pageOffset2
)
3140 IOLog("::redirect can't map offset %x to addr %x\n",
3141 pageOffset1
, mapping
->getVirtualAddress());
3145 if (!mapping
->initWithDescriptor( mapDesc
, intoTask
, toAddress
, options
,
3149 IOLog("Didn't redirect map %08lx : %08lx\n", offset
, length
);
3154 mapping
->owner
->removeMapping(mapping
);
3160 // look for an existing mapping
3161 if( (iter
= OSCollectionIterator::withCollection( _mappings
))) {
3163 while( (mapping
= (_IOMemoryMap
*) iter
->getNextObject())) {
3165 if( (mapping
= mapping
->copyCompatible(
3166 owner
, intoTask
, toAddress
,
3167 options
| kIOMapReference
,
3178 if( mapping
|| (options
& kIOMapReference
))
3186 mapping
= new _IOMemoryMap
;
3188 && !mapping
->initWithDescriptor( mapDesc
, intoTask
, toAddress
, options
,
3191 IOLog("Didn't make map %08lx : %08lx\n", offset
, length
);
3204 mapping
->owner
= owner
;
3205 owner
->addMapping( mapping
);
3217 void IOMemoryDescriptor::addMapping(
3218 IOMemoryMap
* mapping
)
3222 _mappings
= OSSet::withCapacity(1);
3224 _mappings
->setObject( mapping
);
3228 void IOMemoryDescriptor::removeMapping(
3229 IOMemoryMap
* mapping
)
3232 _mappings
->removeObject( mapping
);
3235 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3238 #define super IOMemoryDescriptor
3240 OSDefineMetaClassAndStructors(IOSubMemoryDescriptor
, IOMemoryDescriptor
)
3242 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3244 bool IOSubMemoryDescriptor::initSubRange( IOMemoryDescriptor
* parent
,
3245 IOByteCount offset
, IOByteCount length
,
3246 IODirection direction
)
3251 if( (offset
+ length
) > parent
->getLength())
3255 * We can check the _parent instance variable before having ever set it
3256 * to an initial value because I/O Kit guarantees that all our instance
3257 * variables are zeroed on an object's allocation.
3265 * An existing memory descriptor is being retargeted to
3266 * point to somewhere else. Clean up our present state.
3277 _direction
= direction
;
3278 _tag
= parent
->getTag();
3283 void IOSubMemoryDescriptor::free( void )
3293 IOSubMemoryDescriptor::dmaCommandOperation(DMACommandOps op
, void *vData
, UInt dataSize
) const
3297 if (kIOMDGetCharacteristics
== op
) {
3299 rtn
= _parent
->dmaCommandOperation(op
, vData
, dataSize
);
3300 if (kIOReturnSuccess
== rtn
) {
3301 IOMDDMACharacteristics
*data
= (IOMDDMACharacteristics
*) vData
;
3302 data
->fLength
= _length
;
3303 data
->fSGCount
= 0; // XXX gvdl: need to compute and pages
3305 data
->fPageAlign
= 0;
3310 else if (kIOMDWalkSegments
& op
) {
3311 if (dataSize
< sizeof(IOMDDMAWalkSegmentArgs
))
3312 return kIOReturnUnderrun
;
3314 IOMDDMAWalkSegmentArgs
*data
=
3315 reinterpret_cast<IOMDDMAWalkSegmentArgs
*>(vData
);
3316 UInt offset
= data
->fOffset
;
3317 UInt remain
= _length
- offset
;
3318 if ((int) remain
<= 0)
3319 return (!remain
)? kIOReturnOverrun
: kIOReturnInternalError
;
3321 data
->fOffset
= offset
+ _start
;
3322 rtn
= _parent
->dmaCommandOperation(op
, vData
, dataSize
);
3323 if (data
->fLength
> remain
)
3324 data
->fLength
= remain
;
3325 data
->fOffset
= offset
;
3330 return kIOReturnBadArgument
;
3334 IOSubMemoryDescriptor::getPhysicalSegment64(IOByteCount offset
, IOByteCount
* length
)
3337 IOByteCount actualLength
;
3339 assert(offset
<= _length
);
3344 if( offset
>= _length
)
3347 address
= _parent
->getPhysicalSegment64( offset
+ _start
, &actualLength
);
3349 if( address
&& length
)
3350 *length
= min( _length
- offset
, actualLength
);
3356 IOSubMemoryDescriptor::getPhysicalSegment( IOByteCount offset
, IOByteCount
* length
)
3358 IOPhysicalAddress address
;
3359 IOByteCount actualLength
;
3361 assert(offset
<= _length
);
3366 if( offset
>= _length
)
3369 address
= _parent
->getPhysicalSegment( offset
+ _start
, &actualLength
);
3371 if( address
&& length
)
3372 *length
= min( _length
- offset
, actualLength
);
3378 IOReturn
IOSubMemoryDescriptor::doMap(
3379 vm_map_t addressMap
,
3380 IOVirtualAddress
* atAddress
,
3381 IOOptionBits options
,
3382 IOByteCount sourceOffset
,
3383 IOByteCount length
)
3385 if( sourceOffset
>= _length
)
3386 return( kIOReturnOverrun
);
3387 return (_parent
->doMap(addressMap
, atAddress
, options
, sourceOffset
+ _start
, length
));
3391 IOSubMemoryDescriptor::getSourceSegment( IOByteCount offset
, IOByteCount
* length
)
3393 IOPhysicalAddress address
;
3394 IOByteCount actualLength
;
3396 assert(offset
<= _length
);
3401 if( offset
>= _length
)
3404 address
= _parent
->getSourceSegment( offset
+ _start
, &actualLength
);
3406 if( address
&& length
)
3407 *length
= min( _length
- offset
, actualLength
);
3412 void * IOSubMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
3413 IOByteCount
* lengthOfSegment
)
3418 IOByteCount
IOSubMemoryDescriptor::readBytes(IOByteCount offset
,
3419 void * bytes
, IOByteCount length
)
3421 IOByteCount byteCount
;
3423 assert(offset
<= _length
);
3425 if( offset
>= _length
)
3429 byteCount
= _parent
->readBytes( _start
+ offset
, bytes
,
3430 min(length
, _length
- offset
) );
3433 return( byteCount
);
3436 IOByteCount
IOSubMemoryDescriptor::writeBytes(IOByteCount offset
,
3437 const void* bytes
, IOByteCount length
)
3439 IOByteCount byteCount
;
3441 assert(offset
<= _length
);
3443 if( offset
>= _length
)
3447 byteCount
= _parent
->writeBytes( _start
+ offset
, bytes
,
3448 min(length
, _length
- offset
) );
3451 return( byteCount
);
3454 IOReturn
IOSubMemoryDescriptor::setPurgeable( IOOptionBits newState
,
3455 IOOptionBits
* oldState
)
3460 err
= _parent
->setPurgeable( newState
, oldState
);
3466 IOReturn
IOSubMemoryDescriptor::performOperation( IOOptionBits options
,
3467 IOByteCount offset
, IOByteCount length
)
3471 assert(offset
<= _length
);
3473 if( offset
>= _length
)
3474 return( kIOReturnOverrun
);
3477 err
= _parent
->performOperation( options
, _start
+ offset
,
3478 min(length
, _length
- offset
) );
3484 IOReturn
IOSubMemoryDescriptor::prepare(
3485 IODirection forDirection
)
3490 err
= _parent
->prepare( forDirection
);
3496 IOReturn
IOSubMemoryDescriptor::complete(
3497 IODirection forDirection
)
3502 err
= _parent
->complete( forDirection
);
3508 IOMemoryMap
* IOSubMemoryDescriptor::makeMapping(
3509 IOMemoryDescriptor
* owner
,
3511 IOVirtualAddress toAddress
,
3512 IOOptionBits options
,
3514 IOByteCount length
)
3516 IOMemoryMap
* mapping
= 0;
3518 if (!(kIOMapUnique
& options
))
3519 mapping
= (IOMemoryMap
*) _parent
->makeMapping(
3521 toAddress
- (_start
+ offset
),
3522 options
| kIOMapReference
,
3523 _start
+ offset
, length
);
3526 mapping
= (IOMemoryMap
*) _parent
->makeMapping(
3529 options
, _start
+ offset
, length
);
3532 mapping
= super::makeMapping( owner
, intoTask
, toAddress
, options
,
3541 IOSubMemoryDescriptor::initWithAddress(void * address
,
3543 IODirection direction
)
3549 IOSubMemoryDescriptor::initWithAddress(vm_address_t address
,
3551 IODirection direction
,
3558 IOSubMemoryDescriptor::initWithPhysicalAddress(
3559 IOPhysicalAddress address
,
3561 IODirection direction
)
3567 IOSubMemoryDescriptor::initWithRanges(
3568 IOVirtualRange
* ranges
,
3570 IODirection direction
,
3578 IOSubMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange
* ranges
,
3580 IODirection direction
,
3586 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3588 bool IOGeneralMemoryDescriptor::serialize(OSSerialize
* s
) const
3590 OSSymbol
const *keys
[2];
3591 OSObject
*values
[2];
3593 user_addr_t address
;
3596 unsigned int index
, nRanges
;
3599 IOOptionBits type
= _flags
& kIOMemoryTypeMask
;
3601 if (s
== NULL
) return false;
3602 if (s
->previouslySerialized(this)) return true;
3604 // Pretend we are an array.
3605 if (!s
->addXMLStartTag(this, "array")) return false;
3607 nRanges
= _rangesCount
;
3608 vcopy
= (SerData
*) IOMalloc(sizeof(SerData
) * nRanges
);
3609 if (vcopy
== 0) return false;
3611 keys
[0] = OSSymbol::withCString("address");
3612 keys
[1] = OSSymbol::withCString("length");
3615 values
[0] = values
[1] = 0;
3617 // From this point on we can go to bail.
3619 // Copy the volatile data so we don't have to allocate memory
3620 // while the lock is held.
3622 if (nRanges
== _rangesCount
) {
3623 Ranges vec
= _ranges
;
3624 for (index
= 0; index
< nRanges
; index
++) {
3625 user_addr_t addr
; IOByteCount len
;
3626 getAddrLenForInd(addr
, len
, type
, vec
, index
);
3627 vcopy
[index
].address
= addr
;
3628 vcopy
[index
].length
= len
;
3631 // The descriptor changed out from under us. Give up.
3638 for (index
= 0; index
< nRanges
; index
++)
3640 user_addr_t addr
= vcopy
[index
].address
;
3641 IOByteCount len
= (IOByteCount
) vcopy
[index
].length
;
3643 OSNumber::withNumber(addr
, (((UInt64
) addr
) >> 32)? 64 : 32);
3644 if (values
[0] == 0) {
3648 values
[1] = OSNumber::withNumber(len
, sizeof(len
) * 8);
3649 if (values
[1] == 0) {
3653 OSDictionary
*dict
= OSDictionary::withObjects((const OSObject
**)values
, (const OSSymbol
**)keys
, 2);
3658 values
[0]->release();
3659 values
[1]->release();
3660 values
[0] = values
[1] = 0;
3662 result
= dict
->serialize(s
);
3668 result
= s
->addXMLEndTag("array");
3672 values
[0]->release();
3674 values
[1]->release();
3680 IOFree(vcopy
, sizeof(IOVirtualRange
) * nRanges
);
3684 bool IOSubMemoryDescriptor::serialize(OSSerialize
* s
) const
3689 if (s
->previouslySerialized(this)) return true;
3691 // Pretend we are a dictionary.
3692 // We must duplicate the functionality of OSDictionary here
3693 // because otherwise object references will not work;
3694 // they are based on the value of the object passed to
3695 // previouslySerialized and addXMLStartTag.
3697 if (!s
->addXMLStartTag(this, "dict")) return false;
3699 char const *keys
[3] = {"offset", "length", "parent"};
3701 OSObject
*values
[3];
3702 values
[0] = OSNumber::withNumber(_start
, sizeof(_start
) * 8);
3705 values
[1] = OSNumber::withNumber(_length
, sizeof(_length
) * 8);
3706 if (values
[1] == 0) {
3707 values
[0]->release();
3710 values
[2] = _parent
;
3713 for (int i
=0; i
<3; i
++) {
3714 if (!s
->addString("<key>") ||
3715 !s
->addString(keys
[i
]) ||
3716 !s
->addXMLEndTag("key") ||
3717 !values
[i
]->serialize(s
)) {
3722 values
[0]->release();
3723 values
[1]->release();
3728 return s
->addXMLEndTag("dict");
3731 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3733 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 0);
3734 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 1);
3735 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 2);
3736 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 3);
3737 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 4);
3738 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 5);
3739 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 6);
3740 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 7);
3741 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 8);
3742 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 9);
3743 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 10);
3744 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 11);
3745 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 12);
3746 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 13);
3747 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 14);
3748 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 15);
3750 /* ex-inline function implementation */
3752 IOMemoryDescriptor::getPhysicalAddress()
3753 { return( getPhysicalSegment( 0, 0 )); }