2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
29 #include <IOKit/assert.h>
30 #include <IOKit/system.h>
31 #include <IOKit/IOLib.h>
32 #include <IOKit/IOMemoryDescriptor.h>
34 #include <IOKit/IOKitDebug.h>
36 #include <libkern/c++/OSContainers.h>
37 #include <sys/cdefs.h>
41 void pmap_enter(pmap_t pmap
, vm_offset_t va
, vm_offset_t pa
,
42 vm_prot_t prot
, boolean_t wired
);
43 void ipc_port_release_send(ipc_port_t port
);
44 vm_offset_t
vm_map_get_phys_page(vm_map_t map
, vm_offset_t offset
);
47 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
49 OSDefineMetaClass( IOMemoryDescriptor
, OSObject
)
50 OSDefineAbstractStructors( IOMemoryDescriptor
, OSObject
)
52 #define super IOMemoryDescriptor
54 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor
, IOMemoryDescriptor
)
56 extern "C" vm_map_t
IOPageableMapForAddress( vm_address_t address
);
58 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
60 inline vm_map_t
IOGeneralMemoryDescriptor::getMapForTask( task_t task
, vm_address_t address
)
62 if( (task
== kernel_task
) && (kIOMemoryRequiresWire
& _flags
))
63 return( IOPageableMapForAddress( address
) );
65 return( get_task_map( task
));
68 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
73 * Create a new IOMemoryDescriptor. The buffer is a virtual address
74 * relative to the specified task. If no task is supplied, the kernel
78 IOMemoryDescriptor::withAddress(void * address
,
79 IOByteCount withLength
,
80 IODirection withDirection
)
82 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
85 if (that
->initWithAddress(address
, withLength
, withDirection
))
94 IOMemoryDescriptor::withAddress(vm_address_t address
,
95 IOByteCount withLength
,
96 IODirection withDirection
,
99 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
102 if (that
->initWithAddress(address
, withLength
, withDirection
, withTask
))
111 IOMemoryDescriptor::withPhysicalAddress(
112 IOPhysicalAddress address
,
113 IOByteCount withLength
,
114 IODirection withDirection
)
116 return( IOMemoryDescriptor::withAddress( address
, withLength
,
117 withDirection
, (task_t
) 0 ));
124 * Create a new IOMemoryDescriptor. The buffer is made up of several
125 * virtual address ranges, from a given task.
127 * Passing the ranges as a reference will avoid an extra allocation.
130 IOMemoryDescriptor::withRanges( IOVirtualRange
* ranges
,
132 IODirection withDirection
,
134 bool asReference
= false)
136 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
139 if (that
->initWithRanges(ranges
, withCount
, withDirection
, withTask
, asReference
))
148 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange
* ranges
,
150 IODirection withDirection
,
151 bool asReference
= false)
153 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
156 if (that
->initWithPhysicalRanges(ranges
, withCount
, withDirection
, asReference
))
165 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor
* of
,
168 IODirection withDirection
)
170 IOSubMemoryDescriptor
* that
= new IOSubMemoryDescriptor
;
172 if (that
&& !that
->initSubRange(of
, offset
, length
, withDirection
)) {
182 * Initialize an IOMemoryDescriptor. The buffer is a virtual address
183 * relative to the specified task. If no task is supplied, the kernel
186 * An IOMemoryDescriptor can be re-used by calling initWithAddress or
187 * initWithRanges again on an existing instance -- note this behavior
188 * is not commonly supported in other I/O Kit classes, although it is
192 IOGeneralMemoryDescriptor::initWithAddress(void * address
,
193 IOByteCount withLength
,
194 IODirection withDirection
)
196 _singleRange
.v
.address
= (vm_address_t
) address
;
197 _singleRange
.v
.length
= withLength
;
199 return initWithRanges(&_singleRange
.v
, 1, withDirection
, kernel_task
, true);
203 IOGeneralMemoryDescriptor::initWithAddress(vm_address_t address
,
204 IOByteCount withLength
,
205 IODirection withDirection
,
208 _singleRange
.v
.address
= address
;
209 _singleRange
.v
.length
= withLength
;
211 return initWithRanges(&_singleRange
.v
, 1, withDirection
, withTask
, true);
215 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
216 IOPhysicalAddress address
,
217 IOByteCount withLength
,
218 IODirection withDirection
)
220 _singleRange
.p
.address
= address
;
221 _singleRange
.p
.length
= withLength
;
223 return initWithPhysicalRanges( &_singleRange
.p
, 1, withDirection
, true);
229 * Initialize an IOMemoryDescriptor. The buffer is made up of several
230 * virtual address ranges, from a given task
232 * Passing the ranges as a reference will avoid an extra allocation.
234 * An IOMemoryDescriptor can be re-used by calling initWithAddress or
235 * initWithRanges again on an existing instance -- note this behavior
236 * is not commonly supported in other I/O Kit classes, although it is
240 IOGeneralMemoryDescriptor::initWithRanges(
241 IOVirtualRange
* ranges
,
243 IODirection withDirection
,
245 bool asReference
= false)
251 * We can check the _initialized instance variable before having ever set
252 * it to an initial value because I/O Kit guarantees that all our instance
253 * variables are zeroed on an object's allocation.
256 if (_initialized
== false)
258 if (super::init() == false) return false;
264 * An existing memory descriptor is being retargeted to point to
265 * somewhere else. Clean up our present state.
268 assert(_wireCount
== 0);
274 if (_ranges
.v
&& _rangesIsAllocated
)
275 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
279 * Initialize the memory descriptor.
283 _rangesCount
= withCount
;
284 _rangesIsAllocated
= asReference
? false : true;
285 _direction
= withDirection
;
289 _positionAtIndex
= 0;
290 _positionAtOffset
= 0;
292 _cachedPhysicalAddress
= 0;
293 _cachedVirtualAddress
= 0;
296 if (withTask
&& (withTask
!= kernel_task
))
297 _flags
|= kIOMemoryRequiresWire
;
303 _ranges
.v
= IONew(IOVirtualRange
, withCount
);
304 if (_ranges
.v
== 0) return false;
305 bcopy(/* from */ ranges
, _ranges
.v
, withCount
* sizeof(IOVirtualRange
));
308 for (unsigned index
= 0; index
< _rangesCount
; index
++)
310 _length
+= _ranges
.v
[index
].length
;
317 IOGeneralMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange
* ranges
,
319 IODirection withDirection
,
320 bool asReference
= false)
322 #warning assuming virtual, physical addresses same size
323 return( initWithRanges( (IOVirtualRange
*) ranges
,
324 withCount
, withDirection
, (task_t
) 0, asReference
));
332 void IOGeneralMemoryDescriptor::free()
338 if (_ranges
.v
&& _rangesIsAllocated
)
339 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
341 ipc_port_release_send( (ipc_port_t
) _memEntry
);
345 void IOGeneralMemoryDescriptor::unmapFromKernel()
349 // Pull the shared pages out of the task map
350 // Do we need to unwire it first?
351 for ( off
= 0; off
< _kernSize
; off
+= page_size
)
355 _kernPtrAligned
+ off
,
360 _kernPtrAligned
+ off
,
361 _kernPtrAligned
+ off
+ page_size
);
363 // Free the former shmem area in the task
364 krtn
= vm_deallocate(kernel_map
,
367 assert(krtn
== KERN_SUCCESS
);
371 void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex
)
378 if (_kernPtrAtIndex
== rangeIndex
) return;
380 assert(_kernPtrAligned
== 0);
383 vm_offset_t srcAlign
= trunc_page(_ranges
.v
[rangeIndex
].address
);
385 _kernSize
= trunc_page(_ranges
.v
[rangeIndex
].address
+
386 _ranges
.v
[rangeIndex
].length
+
387 page_size
- 1) - srcAlign
;
389 /* Find some memory of the same size in kernel task. We use vm_allocate()
390 to do this. vm_allocate inserts the found memory object in the
391 target task's map as a side effect. */
392 krtn
= vm_allocate( kernel_map
,
395 VM_FLAGS_ANYWHERE
|VM_MAKE_TAG(VM_MEMORY_IOKIT
) ); // Find first fit
396 assert(krtn
== KERN_SUCCESS
);
399 /* For each page in the area allocated from the kernel map,
400 find the physical address of the page.
401 Enter the page in the target task's pmap, at the
402 appropriate target task virtual address. */
403 for ( off
= 0; off
< _kernSize
; off
+= page_size
)
405 vm_offset_t kern_phys_addr
, phys_addr
;
407 phys_addr
= pmap_extract( get_task_pmap(_task
), srcAlign
+ off
);
409 phys_addr
= srcAlign
+ off
;
411 if(phys_addr
== 0) return;
413 // Check original state.
414 kern_phys_addr
= pmap_extract( kernel_pmap
, _kernPtrAligned
+ off
);
415 // Set virtual page to point to the right physical one
418 _kernPtrAligned
+ off
,
420 VM_PROT_READ
|VM_PROT_WRITE
,
423 _kernPtrAtIndex
= rangeIndex
;
429 * Get the direction of the transfer.
431 IODirection
IOMemoryDescriptor::getDirection() const
439 * Get the length of the transfer (over all ranges).
441 IOByteCount
IOMemoryDescriptor::getLength() const
446 void IOMemoryDescriptor::setTag(
452 IOOptionBits
IOMemoryDescriptor::getTag( void )
460 * Set the logical start position inside the client buffer.
462 * It is convention that the position reflect the actual byte count that
463 * is successfully transferred into or out of the buffer, before the I/O
464 * request is "completed" (ie. sent back to its originator).
467 void IOGeneralMemoryDescriptor::setPosition(IOByteCount position
)
469 assert(position
<= _length
);
471 if (position
>= _length
)
474 _positionAtIndex
= _rangesCount
; /* careful: out-of-bounds */
475 _positionAtOffset
= 0;
479 if (position
< _position
)
481 _positionAtOffset
= position
;
482 _positionAtIndex
= 0;
486 _positionAtOffset
+= (position
- _position
);
488 _position
= position
;
490 while (_positionAtOffset
>= _ranges
.v
[_positionAtIndex
].length
)
492 _positionAtOffset
-= _ranges
.v
[_positionAtIndex
].length
;
500 * Copy data from the memory descriptor's buffer into the specified buffer,
501 * relative to the current position. The memory descriptor's position is
502 * advanced based on the number of bytes copied.
505 IOByteCount
IOGeneralMemoryDescriptor::readBytes(IOByteCount offset
,
506 void * bytes
, IOByteCount withLength
)
508 IOByteCount bytesLeft
;
510 IOByteCount segmentLength
;
512 if( offset
!= _position
)
513 setPosition( offset
);
515 withLength
= min(withLength
, _length
- _position
);
516 bytesLeft
= withLength
;
519 while (bytesLeft
&& (_position
< _length
))
521 /* Compute the relative length to the end of this virtual segment. */
522 segmentLength
= min(_ranges
.v
[_positionAtIndex
].length
- _positionAtOffset
, bytesLeft
);
524 /* Compute the relative address of this virtual segment. */
525 segment
= (void *)(_ranges
.v
[_positionAtIndex
].address
+ _positionAtOffset
);
527 if (KERN_SUCCESS
!= vm_map_read_user(getMapForTask(_task
, segment
),
528 /* from */ (vm_offset_t
) segment
, /* to */ (vm_offset_t
) bytes
,
529 /* size */ segmentLength
))
532 bytesLeft
= withLength
;
535 bytesLeft
-= segmentLength
;
536 offset
+= segmentLength
;
540 while (bytesLeft
&& (segment
= getVirtualSegment(offset
, &segmentLength
)))
542 segmentLength
= min(segmentLength
, bytesLeft
);
543 bcopy(/* from */ segment
, /* to */ bytes
, /* size */ segmentLength
);
544 bytesLeft
-= segmentLength
;
545 offset
+= segmentLength
;
546 bytes
= (void *) (((UInt32
) bytes
) + segmentLength
);
550 return withLength
- bytesLeft
;
556 * Copy data to the memory descriptor's buffer from the specified buffer,
557 * relative to the current position. The memory descriptor's position is
558 * advanced based on the number of bytes copied.
560 IOByteCount
IOGeneralMemoryDescriptor::writeBytes(IOByteCount offset
,
561 const void* bytes
,IOByteCount withLength
)
563 IOByteCount bytesLeft
;
565 IOByteCount segmentLength
;
567 if( offset
!= _position
)
568 setPosition( offset
);
570 withLength
= min(withLength
, _length
- _position
);
571 bytesLeft
= withLength
;
574 while (bytesLeft
&& (_position
< _length
))
576 assert(_position
<= _length
);
578 /* Compute the relative length to the end of this virtual segment. */
579 segmentLength
= min(_ranges
.v
[_positionAtIndex
].length
- _positionAtOffset
, bytesLeft
);
581 /* Compute the relative address of this virtual segment. */
582 segment
= (void *)(_ranges
.v
[_positionAtIndex
].address
+ _positionAtOffset
);
584 if (KERN_SUCCESS
!= vm_map_write_user(getMapForTask(_task
, segment
),
585 /* from */ (vm_offset_t
) bytes
,
586 /* to */ (vm_offset_t
) segment
,
587 /* size */ segmentLength
))
590 bytesLeft
= withLength
;
593 bytesLeft
-= segmentLength
;
594 offset
+= segmentLength
;
598 while (bytesLeft
&& (segment
= getVirtualSegment(offset
, &segmentLength
)))
600 segmentLength
= min(segmentLength
, bytesLeft
);
601 bcopy(/* from */ bytes
, /* to */ segment
, /* size */ segmentLength
);
602 // Flush cache in case we're copying code around, eg. handling a code page fault
603 IOFlushProcessorCache(kernel_task
, (vm_offset_t
) segment
, segmentLength
);
605 bytesLeft
-= segmentLength
;
606 offset
+= segmentLength
;
607 bytes
= (void *) (((UInt32
) bytes
) + segmentLength
);
611 return withLength
- bytesLeft
;
615 * getPhysicalSegment:
617 * Get the physical address of the buffer, relative to the current position.
618 * If the current position is at the end of the buffer, a zero is returned.
621 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset
,
622 IOByteCount
* lengthOfSegment
)
624 vm_address_t virtualAddress
;
625 IOByteCount virtualLength
;
627 IOPhysicalAddress physicalAddress
;
628 IOPhysicalLength physicalLength
;
630 if( kIOMemoryRequiresWire
& _flags
)
631 assert( _wireCount
);
633 if ((0 == _task
) && (1 == _rangesCount
))
635 assert(offset
<= _length
);
636 if (offset
>= _length
)
643 physicalLength
= _length
- offset
;
644 physicalAddress
= offset
+ _ranges
.v
[0].address
;
648 *lengthOfSegment
= physicalLength
;
649 return physicalAddress
;
652 if( offset
!= _position
)
653 setPosition( offset
);
655 assert(_position
<= _length
);
657 /* Fail gracefully if the position is at (or past) the end-of-buffer. */
658 if (_position
>= _length
)
660 *lengthOfSegment
= 0;
664 /* Prepare to compute the largest contiguous physical length possible. */
666 virtualAddress
= _ranges
.v
[_positionAtIndex
].address
+ _positionAtOffset
;
667 virtualLength
= _ranges
.v
[_positionAtIndex
].length
- _positionAtOffset
;
668 vm_address_t virtualPage
= trunc_page(virtualAddress
);
670 virtualPMap
= get_task_pmap(_task
);
674 physicalAddress
= (virtualAddress
== _cachedVirtualAddress
) ?
675 _cachedPhysicalAddress
: /* optimization */
677 pmap_extract(virtualPMap
, virtualAddress
) :
679 physicalLength
= trunc_page(physicalAddress
) + page_size
- physicalAddress
;
681 if (!physicalAddress
&& _task
)
684 vm_map_get_phys_page(get_task_map(_task
), virtualPage
);
685 physicalAddress
+= virtualAddress
- virtualPage
;
688 if (physicalAddress
== 0) /* memory must be wired in order to proceed */
690 assert(physicalAddress
);
691 *lengthOfSegment
= 0;
695 /* Compute the largest contiguous physical length possible, within range. */
696 IOPhysicalAddress physicalPage
= trunc_page(physicalAddress
);
698 while (physicalLength
< virtualLength
)
700 physicalPage
+= page_size
;
701 virtualPage
+= page_size
;
702 _cachedVirtualAddress
= virtualPage
;
703 _cachedPhysicalAddress
= virtualPMap
?
704 pmap_extract(virtualPMap
, virtualPage
) :
706 if (!_cachedPhysicalAddress
&& _task
)
708 _cachedPhysicalAddress
=
709 vm_map_get_phys_page(get_task_map(_task
), virtualPage
);
712 if (_cachedPhysicalAddress
!= physicalPage
) break;
714 physicalLength
+= page_size
;
717 /* Clip contiguous physical length at the end of this range. */
718 if (physicalLength
> virtualLength
)
719 physicalLength
= virtualLength
;
722 *lengthOfSegment
= physicalLength
;
724 return physicalAddress
;
731 * Get the virtual address of the buffer, relative to the current position.
732 * If the memory wasn't mapped into the caller's address space, it will be
733 * mapped in now. If the current position is at the end of the buffer, a
736 void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
737 IOByteCount
* lengthOfSegment
)
739 if( offset
!= _position
)
740 setPosition( offset
);
742 assert(_position
<= _length
);
744 /* Fail gracefully if the position is at (or past) the end-of-buffer. */
745 if (_position
>= _length
)
747 *lengthOfSegment
= 0;
751 /* Compute the relative length to the end of this virtual segment. */
752 *lengthOfSegment
= _ranges
.v
[_positionAtIndex
].length
- _positionAtOffset
;
754 /* Compute the relative address of this virtual segment. */
755 if (_task
== kernel_task
)
756 return (void *)(_ranges
.v
[_positionAtIndex
].address
+ _positionAtOffset
);
761 mapIntoKernel(_positionAtIndex
);
763 off
= _ranges
.v
[_kernPtrAtIndex
].address
;
764 off
-= trunc_page(off
);
766 return (void *) (_kernPtrAligned
+ off
+ _positionAtOffset
);
773 * Prepare the memory for an I/O transfer. This involves paging in
774 * the memory, if necessary, and wiring it down for the duration of
775 * the transfer. The complete() method completes the processing of
776 * the memory after the I/O transfer finishes. This method needn't
777 * called for non-pageable memory.
779 IOReturn
IOGeneralMemoryDescriptor::prepare(
780 IODirection forDirection
= kIODirectionNone
)
784 if((_wireCount
== 0) && (kIOMemoryRequiresWire
& _flags
)) {
787 if(forDirection
== kIODirectionNone
)
788 forDirection
= _direction
;
790 vm_prot_t access
= VM_PROT_DEFAULT
; // Could be cleverer using direction
793 // Check user read/write access to the data buffer.
796 for (rangeIndex
= 0; rangeIndex
< _rangesCount
; rangeIndex
++)
798 vm_offset_t checkBase
= trunc_page(_ranges
.v
[rangeIndex
].address
);
799 vm_size_t checkSize
= round_page(_ranges
.v
[rangeIndex
].length
);
803 vm_region_basic_info_data_t regionInfo
;
804 mach_msg_type_number_t regionInfoSize
= sizeof(regionInfo
);
805 vm_size_t regionSize
;
808 /* map */ getMapForTask(_task
, checkBase
),
809 /* address */ &checkBase
,
810 /* size */ ®ionSize
,
811 /* flavor */ VM_REGION_BASIC_INFO
,
812 /* info */ (vm_region_info_t
) ®ionInfo
,
813 /* info size */ ®ionInfoSize
,
814 /* object name */ 0 ) != KERN_SUCCESS
) ||
815 ( (forDirection
& kIODirectionIn
) &&
816 !(regionInfo
.protection
& VM_PROT_WRITE
) ) ||
817 ( (forDirection
& kIODirectionOut
) &&
818 !(regionInfo
.protection
& VM_PROT_READ
) ) )
820 return kIOReturnVMError
;
823 assert((regionSize
& PAGE_MASK
) == 0);
825 regionSize
= min(regionSize
, checkSize
);
826 checkSize
-= regionSize
;
827 checkBase
+= regionSize
;
828 } // (for each vm region)
829 } // (for each io range)
831 for (rangeIndex
= 0; rangeIndex
< _rangesCount
; rangeIndex
++) {
833 vm_offset_t srcAlign
= trunc_page(_ranges
.v
[rangeIndex
].address
);
834 IOByteCount srcAlignEnd
= trunc_page(_ranges
.v
[rangeIndex
].address
+
835 _ranges
.v
[rangeIndex
].length
+
838 vm_map_t taskVMMap
= getMapForTask(_task
, srcAlign
);
840 rc
= vm_map_wire(taskVMMap
, srcAlign
, srcAlignEnd
, access
, FALSE
);
841 if (KERN_SUCCESS
!= rc
) {
842 IOLog("IOMemoryDescriptor::prepare vm_map_wire failed: %d\n", rc
);
846 // If this I/O is for a user land task then protect ourselves
847 // against COW and other vm_shenanigans
848 if (_task
&& _task
!= kernel_task
) {
849 // setup a data object to hold the 'named' memory regions
850 // @@@ gvdl: If we fail to allocate an OSData we will just
851 // hope for the best for the time being. Lets not fail a
852 // prepare at this late stage in product release.
854 _memoryEntries
= OSData::withCapacity(16);
855 if (_memoryEntries
) {
856 vm_object_offset_t desiredSize
= srcAlignEnd
- srcAlign
;
857 vm_object_offset_t entryStart
= srcAlign
;
858 ipc_port_t memHandle
;
861 vm_object_offset_t actualSize
= desiredSize
;
863 rc
= mach_make_memory_entry_64
864 (taskVMMap
, &actualSize
, entryStart
,
865 forDirection
, &memHandle
, NULL
);
866 if (KERN_SUCCESS
!= rc
) {
867 IOLog("IOMemoryDescriptor::prepare mach_make_memory_entry_64 failed: %d\n", rc
);
872 appendBytes(&memHandle
, sizeof(memHandle
));
873 desiredSize
-= actualSize
;
874 entryStart
+= actualSize
;
875 } while (desiredSize
);
881 return kIOReturnSuccess
;
887 for(doneIndex
= 0; doneIndex
< rangeIndex
; doneIndex
++) {
888 vm_offset_t srcAlign
= trunc_page(_ranges
.v
[doneIndex
].address
);
889 IOByteCount srcAlignEnd
= trunc_page(_ranges
.v
[doneIndex
].address
+
890 _ranges
.v
[doneIndex
].length
+
893 vm_map_unwire(getMapForTask(_task
, srcAlign
), srcAlign
,
897 if (_memoryEntries
) {
898 ipc_port_t
*handles
, *handlesEnd
;
900 handles
= (ipc_port_t
*) _memoryEntries
->getBytesNoCopy();
901 handlesEnd
= (ipc_port_t
*)
902 ((vm_address_t
) handles
+ _memoryEntries
->getLength());
903 while (handles
< handlesEnd
)
904 ipc_port_release_send(*handles
++);
905 _memoryEntries
->release();
909 return kIOReturnVMError
;
915 * Complete processing of the memory after an I/O transfer finishes.
916 * This method should not be called unless a prepare was previously
917 * issued; the prepare() and complete() must occur in pairs, before
918 * before and after an I/O transfer involving pageable memory.
921 IOReturn
IOGeneralMemoryDescriptor::complete(
922 IODirection forDirection
= kIODirectionNone
)
927 return kIOReturnSuccess
;
930 if((_wireCount
== 0) && (kIOMemoryRequiresWire
& _flags
)) {
934 if(forDirection
== kIODirectionNone
)
935 forDirection
= _direction
;
937 for(rangeIndex
= 0; rangeIndex
< _rangesCount
; rangeIndex
++) {
939 vm_offset_t srcAlign
= trunc_page(_ranges
.v
[rangeIndex
].address
);
940 IOByteCount srcAlignEnd
= trunc_page(_ranges
.v
[rangeIndex
].address
+
941 _ranges
.v
[rangeIndex
].length
+
944 if(forDirection
== kIODirectionIn
)
945 pmap_modify_pages(get_task_pmap(_task
), srcAlign
, srcAlignEnd
);
947 rc
= vm_map_unwire(getMapForTask(_task
, srcAlign
), srcAlign
,
949 if(rc
!= KERN_SUCCESS
)
950 IOLog("IOMemoryDescriptor::complete: vm_map_unwire failed: %d\n", rc
);
953 if (_memoryEntries
) {
954 ipc_port_t
*handles
, *handlesEnd
;
956 handles
= (ipc_port_t
*) _memoryEntries
->getBytesNoCopy();
957 handlesEnd
= (ipc_port_t
*)
958 ((vm_address_t
) handles
+ _memoryEntries
->getLength());
959 while (handles
< handlesEnd
)
960 ipc_port_release_send(*handles
++);
962 _memoryEntries
->release();
966 _cachedVirtualAddress
= 0;
968 return kIOReturnSuccess
;
971 IOReturn
IOGeneralMemoryDescriptor::doMap(
973 IOVirtualAddress
* atAddress
,
974 IOOptionBits options
,
975 IOByteCount sourceOffset
= 0,
976 IOByteCount length
= 0 )
980 // mapping source == dest? (could be much better)
981 if( _task
&& (addressMap
== get_task_map(_task
)) && (options
& kIOMapAnywhere
)
982 && (1 == _rangesCount
) && (0 == sourceOffset
)
983 && (length
<= _ranges
.v
[0].length
) ) {
984 *atAddress
= _ranges
.v
[0].address
;
985 return( kIOReturnSuccess
);
988 if( _task
&& _memEntry
&& (_flags
& kIOMemoryRequiresWire
)) {
992 if( (1 != _rangesCount
)
993 || (kIOMapDefaultCache
!= (options
& kIOMapCacheMask
)) ) {
994 kr
= kIOReturnUnsupported
;
999 length
= getLength();
1000 if( (sourceOffset
+ length
) > _ranges
.v
[0].length
) {
1001 kr
= kIOReturnBadArgument
;
1005 ipc_port_t sharedMem
= (ipc_port_t
) _memEntry
;
1006 vm_prot_t prot
= VM_PROT_READ
1007 | ((options
& kIOMapReadOnly
) ? 0 : VM_PROT_WRITE
);
1009 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1010 if( options
& kIOMapAnywhere
)
1014 kr
= kIOReturnVMError
;
1018 if( KERN_SUCCESS
== kr
)
1019 kr
= vm_map( addressMap
,
1021 length
, 0 /* mask */,
1022 (( options
& kIOMapAnywhere
) ? VM_FLAGS_ANYWHERE
: VM_FLAGS_FIXED
)
1023 | VM_MAKE_TAG(VM_MEMORY_IOKIT
),
1024 sharedMem
, sourceOffset
,
1033 kr
= super::doMap( addressMap
, atAddress
,
1034 options
, sourceOffset
, length
);
1038 IOReturn
IOGeneralMemoryDescriptor::doUnmap(
1039 vm_map_t addressMap
,
1040 IOVirtualAddress logical
,
1041 IOByteCount length
)
1043 // could be much better
1044 if( _task
&& (addressMap
== getMapForTask(_task
, _ranges
.v
[0].address
)) && (1 == _rangesCount
)
1045 && (logical
== _ranges
.v
[0].address
)
1046 && (length
<= _ranges
.v
[0].length
) )
1047 return( kIOReturnSuccess
);
1049 return( super::doUnmap( addressMap
, logical
, length
));
1052 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1055 // osfmk/device/iokit_rpc.c
1056 extern kern_return_t
IOMapPages( vm_map_t map
, vm_offset_t va
, vm_offset_t pa
,
1057 vm_size_t length
, unsigned int mapFlags
);
1060 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1062 static IORecursiveLock
* gIOMemoryLock
;
1064 #define LOCK IORecursiveLockLock( gIOMemoryLock)
1065 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
1067 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1069 OSDefineMetaClass( IOMemoryMap
, OSObject
)
1070 OSDefineAbstractStructors( IOMemoryMap
, OSObject
)
1072 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1074 class _IOMemoryMap
: public IOMemoryMap
1076 OSDeclareDefaultStructors(_IOMemoryMap
)
1078 IOMemoryDescriptor
* memory
;
1079 IOMemoryMap
* superMap
;
1082 IOVirtualAddress logical
;
1084 vm_map_t addressMap
;
1085 IOOptionBits options
;
1088 virtual void free();
1090 // IOMemoryMap methods
1091 virtual IOVirtualAddress
getVirtualAddress();
1092 virtual IOByteCount
getLength();
1093 virtual task_t
getAddressTask();
1094 virtual IOMemoryDescriptor
* getMemoryDescriptor();
1095 virtual IOOptionBits
getMapOptions();
1097 virtual IOReturn
unmap();
1098 virtual void taskDied();
1100 virtual IOPhysicalAddress
getPhysicalSegment(IOByteCount offset
,
1101 IOByteCount
* length
);
1103 // for IOMemoryDescriptor use
1104 _IOMemoryMap
* isCompatible(
1105 IOMemoryDescriptor
* owner
,
1107 IOVirtualAddress toAddress
,
1108 IOOptionBits options
,
1110 IOByteCount length
);
1113 IOMemoryDescriptor
* memory
,
1114 IOMemoryMap
* superMap
,
1116 IOByteCount length
);
1119 IOMemoryDescriptor
* memory
,
1121 IOVirtualAddress toAddress
,
1122 IOOptionBits options
,
1124 IOByteCount length
);
1127 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1130 #define super IOMemoryMap
1132 OSDefineMetaClassAndStructors(_IOMemoryMap
, IOMemoryMap
)
1134 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1136 bool _IOMemoryMap::init(
1137 IOMemoryDescriptor
* _memory
,
1138 IOMemoryMap
* _superMap
,
1139 IOByteCount _offset
,
1140 IOByteCount _length
)
1146 if( (_offset
+ _length
) > _superMap
->getLength())
1151 _superMap
->retain();
1152 superMap
= _superMap
;
1158 length
= _memory
->getLength();
1160 options
= superMap
->getMapOptions();
1161 logical
= superMap
->getVirtualAddress() + offset
;
1166 bool _IOMemoryMap::init(
1167 IOMemoryDescriptor
* _memory
,
1169 IOVirtualAddress toAddress
,
1170 IOOptionBits _options
,
1171 IOByteCount _offset
,
1172 IOByteCount _length
)
1176 if( (!_memory
) || (!intoTask
) || !super::init())
1179 if( (_offset
+ _length
) > _memory
->getLength())
1182 addressMap
= get_task_map(intoTask
);
1185 kernel_vm_map_reference(addressMap
);
1194 length
= _memory
->getLength();
1196 addressTask
= intoTask
;
1197 logical
= toAddress
;
1200 if( options
& kIOMapStatic
)
1203 ok
= (kIOReturnSuccess
== memory
->doMap( addressMap
, &logical
,
1204 options
, offset
, length
));
1208 vm_map_deallocate(addressMap
);
1214 IOReturn
IOMemoryDescriptor::doMap(
1215 vm_map_t addressMap
,
1216 IOVirtualAddress
* atAddress
,
1217 IOOptionBits options
,
1218 IOByteCount sourceOffset
= 0,
1219 IOByteCount length
= 0 )
1221 IOReturn err
= kIOReturnSuccess
;
1225 vm_address_t logical
;
1226 IOByteCount pageOffset
;
1227 IOPhysicalLength segLen
;
1228 IOPhysicalAddress physAddr
;
1231 length
= getLength();
1233 physAddr
= getPhysicalSegment( sourceOffset
, &segLen
);
1236 pageOffset
= physAddr
- trunc_page( physAddr
);
1237 ourSize
= length
+ pageOffset
;
1238 physAddr
-= pageOffset
;
1240 logical
= *atAddress
;
1241 if( 0 == (options
& kIOMapAnywhere
)) {
1242 mapped
= trunc_page( logical
);
1243 if( (logical
- mapped
) != pageOffset
)
1244 err
= kIOReturnVMError
;
1246 if( kIOReturnSuccess
== err
)
1247 err
= vm_allocate( addressMap
, &mapped
, ourSize
,
1248 ((options
& kIOMapAnywhere
) ? VM_FLAGS_ANYWHERE
: VM_FLAGS_FIXED
)
1249 | VM_MAKE_TAG(VM_MEMORY_IOKIT
) );
1253 kprintf("IOMemoryDescriptor::doMap: vm_allocate() "
1254 "returned %08x\n", err
);
1259 // we have to make sure that these guys don't get copied if we fork.
1260 err
= vm_inherit( addressMap
, mapped
, ourSize
, VM_INHERIT_NONE
);
1261 if( err
!= KERN_SUCCESS
) {
1262 doUnmap( addressMap
, mapped
, ourSize
); // back out
1267 *atAddress
= mapped
+ pageOffset
;
1269 segLen
+= pageOffset
;
1272 // in the middle of the loop only map whole pages
1273 if( segLen
>= bytes
)
1275 else if( segLen
!= trunc_page( segLen
))
1276 err
= kIOReturnVMError
;
1277 if( physAddr
!= trunc_page( physAddr
))
1278 err
= kIOReturnBadArgument
;
1281 if( kIOLogMapping
& gIOKitDebug
)
1282 kprintf("_IOMemoryMap::map(%x) %08x->%08x:%08x\n",
1283 addressMap
, mapped
+ pageOffset
, physAddr
+ pageOffset
,
1284 segLen
- pageOffset
);
1287 if( kIOReturnSuccess
== err
)
1288 err
= IOMapPages( addressMap
, mapped
, physAddr
, segLen
, options
);
1292 sourceOffset
+= segLen
- pageOffset
;
1298 && (physAddr
= getPhysicalSegment( sourceOffset
, &segLen
)));
1301 err
= kIOReturnBadArgument
;
1303 doUnmap( addressMap
, logical
, ourSize
);
1310 IOReturn
IOMemoryDescriptor::doUnmap(
1311 vm_map_t addressMap
,
1312 IOVirtualAddress logical
,
1313 IOByteCount length
)
1318 if( kIOLogMapping
& gIOKitDebug
)
1319 kprintf("IOMemoryDescriptor::doUnmap(%x) %08x:%08x\n",
1320 addressMap
, logical
, length
);
1323 if( (addressMap
== kernel_map
) || (addressMap
== get_task_map(current_task())))
1324 err
= vm_deallocate( addressMap
, logical
, length
);
1326 err
= kIOReturnSuccess
;
1331 IOReturn
_IOMemoryMap::unmap( void )
1337 if( logical
&& addressMap
&& (0 == superMap
)
1338 && (0 == (options
& kIOMapStatic
))) {
1340 err
= memory
->doUnmap( addressMap
, logical
, length
);
1341 vm_map_deallocate(addressMap
);
1345 err
= kIOReturnSuccess
;
1354 void _IOMemoryMap::taskDied( void )
1358 vm_map_deallocate(addressMap
);
1366 void _IOMemoryMap::free()
1372 memory
->removeMapping( this);
1378 superMap
->release();
1383 IOByteCount
_IOMemoryMap::getLength()
1388 IOVirtualAddress
_IOMemoryMap::getVirtualAddress()
1393 task_t
_IOMemoryMap::getAddressTask()
1396 return( superMap
->getAddressTask());
1398 return( addressTask
);
1401 IOOptionBits
_IOMemoryMap::getMapOptions()
1406 IOMemoryDescriptor
* _IOMemoryMap::getMemoryDescriptor()
1411 _IOMemoryMap
* _IOMemoryMap::isCompatible(
1412 IOMemoryDescriptor
* owner
,
1414 IOVirtualAddress toAddress
,
1415 IOOptionBits _options
,
1416 IOByteCount _offset
,
1417 IOByteCount _length
)
1419 _IOMemoryMap
* mapping
;
1421 if( (!task
) || (task
!= getAddressTask()))
1423 if( (options
^ _options
) & (kIOMapCacheMask
| kIOMapReadOnly
))
1426 if( (0 == (_options
& kIOMapAnywhere
)) && (logical
!= toAddress
))
1429 if( _offset
< offset
)
1434 if( (_offset
+ _length
) > length
)
1437 if( (length
== _length
) && (!_offset
)) {
1442 mapping
= new _IOMemoryMap
;
1444 && !mapping
->init( owner
, this, _offset
, _length
)) {
1453 IOPhysicalAddress
_IOMemoryMap::getPhysicalSegment( IOByteCount _offset
,
1454 IOPhysicalLength
* length
)
1456 IOPhysicalAddress address
;
1459 address
= memory
->getPhysicalSegment( offset
+ _offset
, length
);
1465 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1468 #define super OSObject
1470 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1472 void IOMemoryDescriptor::initialize( void )
1474 if( 0 == gIOMemoryLock
)
1475 gIOMemoryLock
= IORecursiveLockAlloc();
1478 void IOMemoryDescriptor::free( void )
1481 _mappings
->release();
1486 IOMemoryMap
* IOMemoryDescriptor::setMapping(
1488 IOVirtualAddress mapAddress
,
1489 IOOptionBits options
= 0 )
1493 map
= new _IOMemoryMap
;
1498 && !map
->init( this, intoTask
, mapAddress
,
1499 options
| kIOMapStatic
, 0, getLength() )) {
1511 IOMemoryMap
* IOMemoryDescriptor::map(
1512 IOOptionBits options
= 0 )
1515 return( makeMapping( this, kernel_task
, 0,
1516 options
| kIOMapAnywhere
,
1520 IOMemoryMap
* IOMemoryDescriptor::map(
1522 IOVirtualAddress toAddress
,
1523 IOOptionBits options
,
1524 IOByteCount offset
= 0,
1525 IOByteCount length
= 0 )
1528 length
= getLength();
1530 return( makeMapping( this, intoTask
, toAddress
, options
, offset
, length
));
1533 IOMemoryMap
* IOMemoryDescriptor::makeMapping(
1534 IOMemoryDescriptor
* owner
,
1536 IOVirtualAddress toAddress
,
1537 IOOptionBits options
,
1539 IOByteCount length
)
1541 _IOMemoryMap
* mapping
= 0;
1547 // look for an existing mapping
1548 if( (iter
= OSCollectionIterator::withCollection( _mappings
))) {
1550 while( (mapping
= (_IOMemoryMap
*) iter
->getNextObject())) {
1552 if( (mapping
= mapping
->isCompatible(
1553 owner
, intoTask
, toAddress
,
1554 options
| kIOMapReference
,
1564 if( mapping
|| (options
& kIOMapReference
))
1569 mapping
= new _IOMemoryMap
;
1571 && !mapping
->init( owner
, intoTask
, toAddress
, options
,
1574 IOLog("Didn't make map %08lx : %08lx\n", offset
, length
);
1581 owner
->addMapping( mapping
);
1588 void IOMemoryDescriptor::addMapping(
1589 IOMemoryMap
* mapping
)
1593 _mappings
= OSSet::withCapacity(1);
1594 if( _mappings
&& _mappings
->setObject( mapping
))
1595 mapping
->release(); /* really */
1599 void IOMemoryDescriptor::removeMapping(
1600 IOMemoryMap
* mapping
)
1605 _mappings
->removeObject( mapping
);
1609 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1612 #define super IOMemoryDescriptor
1614 OSDefineMetaClassAndStructors(IOSubMemoryDescriptor
, IOMemoryDescriptor
)
1616 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1618 bool IOSubMemoryDescriptor::initSubRange( IOMemoryDescriptor
* parent
,
1619 IOByteCount offset
, IOByteCount length
,
1620 IODirection withDirection
)
1628 if( (offset
+ length
) > parent
->getLength())
1635 _direction
= withDirection
;
1636 _tag
= parent
->getTag();
1641 void IOSubMemoryDescriptor::free( void )
1650 IOPhysicalAddress
IOSubMemoryDescriptor::getPhysicalSegment( IOByteCount offset
,
1651 IOByteCount
* length
)
1653 IOPhysicalAddress address
;
1654 IOByteCount actualLength
;
1656 assert(offset
<= _length
);
1661 if( offset
>= _length
)
1664 address
= _parent
->getPhysicalSegment( offset
+ _start
, &actualLength
);
1666 if( address
&& length
)
1667 *length
= min( _length
- offset
, actualLength
);
1672 void * IOSubMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
1673 IOByteCount
* lengthOfSegment
)
1678 IOByteCount
IOSubMemoryDescriptor::readBytes(IOByteCount offset
,
1679 void * bytes
, IOByteCount withLength
)
1681 IOByteCount byteCount
;
1683 assert(offset
<= _length
);
1685 if( offset
>= _length
)
1689 byteCount
= _parent
->readBytes( _start
+ offset
, bytes
,
1690 min(withLength
, _length
- offset
) );
1693 return( byteCount
);
1696 IOByteCount
IOSubMemoryDescriptor::writeBytes(IOByteCount offset
,
1697 const void* bytes
, IOByteCount withLength
)
1699 IOByteCount byteCount
;
1701 assert(offset
<= _length
);
1703 if( offset
>= _length
)
1707 byteCount
= _parent
->writeBytes( _start
+ offset
, bytes
,
1708 min(withLength
, _length
- offset
) );
1711 return( byteCount
);
1714 IOReturn
IOSubMemoryDescriptor::prepare(
1715 IODirection forDirection
= kIODirectionNone
)
1720 err
= _parent
->prepare( forDirection
);
1726 IOReturn
IOSubMemoryDescriptor::complete(
1727 IODirection forDirection
= kIODirectionNone
)
1732 err
= _parent
->complete( forDirection
);
1738 IOMemoryMap
* IOSubMemoryDescriptor::makeMapping(
1739 IOMemoryDescriptor
* owner
,
1741 IOVirtualAddress toAddress
,
1742 IOOptionBits options
,
1744 IOByteCount length
)
1746 IOMemoryMap
* mapping
;
1748 mapping
= (IOMemoryMap
*) _parent
->makeMapping(
1750 toAddress
- (_start
+ offset
),
1751 options
| kIOMapReference
,
1752 _start
+ offset
, length
);
1755 mapping
= super::makeMapping( owner
, intoTask
, toAddress
, options
,
1764 IOSubMemoryDescriptor::initWithAddress(void * address
,
1765 IOByteCount withLength
,
1766 IODirection withDirection
)
1772 IOSubMemoryDescriptor::initWithAddress(vm_address_t address
,
1773 IOByteCount withLength
,
1774 IODirection withDirection
,
1781 IOSubMemoryDescriptor::initWithPhysicalAddress(
1782 IOPhysicalAddress address
,
1783 IOByteCount withLength
,
1784 IODirection withDirection
)
1790 IOSubMemoryDescriptor::initWithRanges(
1791 IOVirtualRange
* ranges
,
1793 IODirection withDirection
,
1795 bool asReference
= false)
1801 IOSubMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange
* ranges
,
1803 IODirection withDirection
,
1804 bool asReference
= false)
1809 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1811 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 0);
1812 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 1);
1813 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 2);
1814 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 3);
1815 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 4);
1816 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 5);
1817 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 6);
1818 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 7);
1819 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 8);
1820 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 9);
1821 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 10);
1822 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 11);
1823 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 12);
1824 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 13);
1825 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 14);
1826 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 15);