2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
29 #include <IOKit/assert.h>
30 #include <IOKit/system.h>
31 #include <IOKit/IOLib.h>
32 #include <IOKit/IOMemoryDescriptor.h>
34 #include <IOKit/IOKitDebug.h>
36 #include <libkern/c++/OSContainers.h>
37 #include <sys/cdefs.h>
41 #include <device/device_port.h>
42 void bcopy_phys(char *from
, char *to
, int size
);
43 void pmap_enter(pmap_t pmap
, vm_offset_t va
, vm_offset_t pa
,
44 vm_prot_t prot
, boolean_t wired
);
45 void ipc_port_release_send(ipc_port_t port
);
46 vm_offset_t
vm_map_get_phys_page(vm_map_t map
, vm_offset_t offset
);
50 memory_object_t pager
,
55 device_pager_populate_object(
56 memory_object_t pager
,
57 vm_object_offset_t offset
,
58 vm_offset_t phys_addr
,
63 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
65 OSDefineMetaClass( IOMemoryDescriptor
, OSObject
)
66 OSDefineAbstractStructors( IOMemoryDescriptor
, OSObject
)
68 #define super IOMemoryDescriptor
70 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor
, IOMemoryDescriptor
)
74 vm_map_t
IOPageableMapForAddress( vm_address_t address
);
76 typedef kern_return_t (*IOIteratePageableMapsCallback
)(vm_map_t map
, void * ref
);
78 kern_return_t
IOIteratePageableMaps(vm_size_t size
,
79 IOIteratePageableMapsCallback callback
, void * ref
);
83 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
85 inline vm_map_t
IOGeneralMemoryDescriptor::getMapForTask( task_t task
, vm_address_t address
)
87 if( (task
== kernel_task
) && (kIOMemoryRequiresWire
& _flags
))
88 return( IOPageableMapForAddress( address
) );
90 return( get_task_map( task
));
93 inline vm_offset_t
pmap_extract_safe(task_t task
, vm_offset_t va
)
95 vm_offset_t pa
= pmap_extract(get_task_pmap(task
), va
);
99 pa
= vm_map_get_phys_page(get_task_map(task
), trunc_page(va
));
100 if ( pa
) pa
+= va
- trunc_page(va
);
106 inline void bcopy_phys_safe(char * from
, char * to
, int size
)
108 boolean_t enabled
= ml_set_interrupts_enabled(FALSE
);
110 bcopy_phys(from
, to
, size
);
112 ml_set_interrupts_enabled(enabled
);
115 #define next_page(a) ( trunc_page(a) + page_size )
120 kern_return_t
device_data_action(
122 ipc_port_t device_pager
,
123 vm_prot_t protection
,
124 vm_object_offset_t offset
,
127 IOMemoryDescriptor
* memDesc
= (IOMemoryDescriptor
*) device_handle
;
129 assert( OSDynamicCast( IOMemoryDescriptor
, memDesc
));
131 return( memDesc
->handleFault( device_pager
, 0, 0,
132 offset
, size
, kIOMapDefaultCache
/*?*/));
135 kern_return_t
device_close(
138 IOMemoryDescriptor
* memDesc
= (IOMemoryDescriptor
*) device_handle
;
140 assert( OSDynamicCast( IOMemoryDescriptor
, memDesc
));
144 return( kIOReturnSuccess
);
149 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
154 * Create a new IOMemoryDescriptor. The buffer is a virtual address
155 * relative to the specified task. If no task is supplied, the kernel
159 IOMemoryDescriptor::withAddress(void * address
,
160 IOByteCount withLength
,
161 IODirection withDirection
)
163 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
166 if (that
->initWithAddress(address
, withLength
, withDirection
))
175 IOMemoryDescriptor::withAddress(vm_address_t address
,
176 IOByteCount withLength
,
177 IODirection withDirection
,
180 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
183 if (that
->initWithAddress(address
, withLength
, withDirection
, withTask
))
192 IOMemoryDescriptor::withPhysicalAddress(
193 IOPhysicalAddress address
,
194 IOByteCount withLength
,
195 IODirection withDirection
)
197 return( IOMemoryDescriptor::withAddress( address
, withLength
,
198 withDirection
, (task_t
) 0 ));
205 * Create a new IOMemoryDescriptor. The buffer is made up of several
206 * virtual address ranges, from a given task.
208 * Passing the ranges as a reference will avoid an extra allocation.
211 IOMemoryDescriptor::withRanges( IOVirtualRange
* ranges
,
213 IODirection withDirection
,
215 bool asReference
= false)
217 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
220 if (that
->initWithRanges(ranges
, withCount
, withDirection
, withTask
, asReference
))
229 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange
* ranges
,
231 IODirection withDirection
,
232 bool asReference
= false)
234 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
237 if (that
->initWithPhysicalRanges(ranges
, withCount
, withDirection
, asReference
))
246 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor
* of
,
249 IODirection withDirection
)
251 IOSubMemoryDescriptor
* that
= new IOSubMemoryDescriptor
;
253 if (that
&& !that
->initSubRange(of
, offset
, length
, withDirection
)) {
263 * Initialize an IOMemoryDescriptor. The buffer is a virtual address
264 * relative to the specified task. If no task is supplied, the kernel
267 * An IOMemoryDescriptor can be re-used by calling initWithAddress or
268 * initWithRanges again on an existing instance -- note this behavior
269 * is not commonly supported in other I/O Kit classes, although it is
273 IOGeneralMemoryDescriptor::initWithAddress(void * address
,
274 IOByteCount withLength
,
275 IODirection withDirection
)
277 _singleRange
.v
.address
= (vm_address_t
) address
;
278 _singleRange
.v
.length
= withLength
;
280 return initWithRanges(&_singleRange
.v
, 1, withDirection
, kernel_task
, true);
284 IOGeneralMemoryDescriptor::initWithAddress(vm_address_t address
,
285 IOByteCount withLength
,
286 IODirection withDirection
,
289 _singleRange
.v
.address
= address
;
290 _singleRange
.v
.length
= withLength
;
292 return initWithRanges(&_singleRange
.v
, 1, withDirection
, withTask
, true);
296 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
297 IOPhysicalAddress address
,
298 IOByteCount withLength
,
299 IODirection withDirection
)
301 _singleRange
.p
.address
= address
;
302 _singleRange
.p
.length
= withLength
;
304 return initWithPhysicalRanges( &_singleRange
.p
, 1, withDirection
, true);
310 * Initialize an IOMemoryDescriptor. The buffer is made up of several
311 * virtual address ranges, from a given task
313 * Passing the ranges as a reference will avoid an extra allocation.
315 * An IOMemoryDescriptor can be re-used by calling initWithAddress or
316 * initWithRanges again on an existing instance -- note this behavior
317 * is not commonly supported in other I/O Kit classes, although it is
321 IOGeneralMemoryDescriptor::initWithRanges(
322 IOVirtualRange
* ranges
,
324 IODirection withDirection
,
326 bool asReference
= false)
332 * We can check the _initialized instance variable before having ever set
333 * it to an initial value because I/O Kit guarantees that all our instance
334 * variables are zeroed on an object's allocation.
337 if (_initialized
== false)
339 if (super::init() == false) return false;
345 * An existing memory descriptor is being retargeted to point to
346 * somewhere else. Clean up our present state.
349 assert(_wireCount
== 0);
355 if (_ranges
.v
&& _rangesIsAllocated
)
356 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
360 * Initialize the memory descriptor.
364 _rangesCount
= withCount
;
365 _rangesIsAllocated
= asReference
? false : true;
366 _direction
= withDirection
;
370 _positionAtIndex
= 0;
371 _positionAtOffset
= 0;
373 _cachedPhysicalAddress
= 0;
374 _cachedVirtualAddress
= 0;
377 if (withTask
&& (withTask
!= kernel_task
))
378 _flags
|= kIOMemoryRequiresWire
;
384 _ranges
.v
= IONew(IOVirtualRange
, withCount
);
385 if (_ranges
.v
== 0) return false;
386 bcopy(/* from */ ranges
, _ranges
.v
, withCount
* sizeof(IOVirtualRange
));
389 for (unsigned index
= 0; index
< _rangesCount
; index
++)
391 _length
+= _ranges
.v
[index
].length
;
398 IOGeneralMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange
* ranges
,
400 IODirection withDirection
,
401 bool asReference
= false)
403 #warning assuming virtual, physical addresses same size
404 return( initWithRanges( (IOVirtualRange
*) ranges
,
405 withCount
, withDirection
, (task_t
) 0, asReference
));
413 void IOGeneralMemoryDescriptor::free()
419 if (_ranges
.v
&& _rangesIsAllocated
)
420 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
422 ipc_port_release_send( (ipc_port_t
) _memEntry
);
426 /* DEPRECATED */ void IOGeneralMemoryDescriptor::unmapFromKernel()
428 /* DEPRECATED */ kern_return_t krtn
;
429 /* DEPRECATED */ vm_offset_t off
;
430 /* DEPRECATED */ // Pull the shared pages out of the task map
431 /* DEPRECATED */ // Do we need to unwire it first?
432 /* DEPRECATED */ for ( off
= 0; off
< _kernSize
; off
+= page_size
)
434 /* DEPRECATED */ pmap_change_wiring(
435 /* DEPRECATED */ kernel_pmap
,
436 /* DEPRECATED */ _kernPtrAligned
+ off
,
437 /* DEPRECATED */ FALSE
);
439 /* DEPRECATED */ pmap_remove(
440 /* DEPRECATED */ kernel_pmap
,
441 /* DEPRECATED */ _kernPtrAligned
+ off
,
442 /* DEPRECATED */ _kernPtrAligned
+ off
+ page_size
);
444 /* DEPRECATED */ // Free the former shmem area in the task
445 /* DEPRECATED */ krtn
= vm_deallocate(kernel_map
,
446 /* DEPRECATED */ _kernPtrAligned
,
447 /* DEPRECATED */ _kernSize
);
448 /* DEPRECATED */ assert(krtn
== KERN_SUCCESS
);
449 /* DEPRECATED */ _kernPtrAligned
= 0;
452 /* DEPRECATED */ void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex
)
454 /* DEPRECATED */ kern_return_t krtn
;
455 /* DEPRECATED */ vm_offset_t off
;
457 /* DEPRECATED */ if (_kernPtrAligned
)
459 /* DEPRECATED */ if (_kernPtrAtIndex
== rangeIndex
) return;
460 /* DEPRECATED */ unmapFromKernel();
461 /* DEPRECATED */ assert(_kernPtrAligned
== 0);
464 /* DEPRECATED */ vm_offset_t srcAlign
= trunc_page(_ranges
.v
[rangeIndex
].address
);
466 /* DEPRECATED */ _kernSize
= trunc_page(_ranges
.v
[rangeIndex
].address
+
467 /* DEPRECATED */ _ranges
.v
[rangeIndex
].length
+
468 /* DEPRECATED */ page_size
- 1) - srcAlign
;
470 /* DEPRECATED */ /* Find some memory of the same size in kernel task. We use vm_allocate() */
471 /* DEPRECATED */ /* to do this. vm_allocate inserts the found memory object in the */
472 /* DEPRECATED */ /* target task's map as a side effect. */
473 /* DEPRECATED */ krtn
= vm_allocate( kernel_map
,
474 /* DEPRECATED */ &_kernPtrAligned
,
475 /* DEPRECATED */ _kernSize
,
476 /* DEPRECATED */ VM_FLAGS_ANYWHERE
|VM_MAKE_TAG(VM_MEMORY_IOKIT
) ); // Find first fit
477 /* DEPRECATED */ assert(krtn
== KERN_SUCCESS
);
478 /* DEPRECATED */ if(krtn
) return;
480 /* DEPRECATED */ /* For each page in the area allocated from the kernel map, */
481 /* DEPRECATED */ /* find the physical address of the page. */
482 /* DEPRECATED */ /* Enter the page in the target task's pmap, at the */
483 /* DEPRECATED */ /* appropriate target task virtual address. */
484 /* DEPRECATED */ for ( off
= 0; off
< _kernSize
; off
+= page_size
)
486 /* DEPRECATED */ vm_offset_t kern_phys_addr
, phys_addr
;
487 /* DEPRECATED */ if( _task
)
488 /* DEPRECATED */ phys_addr
= pmap_extract( get_task_pmap(_task
), srcAlign
+ off
);
489 /* DEPRECATED */ else
490 /* DEPRECATED */ phys_addr
= srcAlign
+ off
;
491 /* DEPRECATED */ assert(phys_addr
);
492 /* DEPRECATED */ if(phys_addr
== 0) return;
494 /* DEPRECATED */ // Check original state.
495 /* DEPRECATED */ kern_phys_addr
= pmap_extract( kernel_pmap
, _kernPtrAligned
+ off
);
496 /* DEPRECATED */ // Set virtual page to point to the right physical one
497 /* DEPRECATED */ pmap_enter(
498 /* DEPRECATED */ kernel_pmap
,
499 /* DEPRECATED */ _kernPtrAligned
+ off
,
500 /* DEPRECATED */ phys_addr
,
501 /* DEPRECATED */ VM_PROT_READ
|VM_PROT_WRITE
,
502 /* DEPRECATED */ TRUE
);
504 /* DEPRECATED */ _kernPtrAtIndex
= rangeIndex
;
510 * Get the direction of the transfer.
512 IODirection
IOMemoryDescriptor::getDirection() const
520 * Get the length of the transfer (over all ranges).
522 IOByteCount
IOMemoryDescriptor::getLength() const
527 void IOMemoryDescriptor::setTag(
533 IOOptionBits
IOMemoryDescriptor::getTag( void )
538 IOPhysicalAddress
IOMemoryDescriptor::getSourceSegment( IOByteCount offset
,
539 IOByteCount
* length
)
541 IOPhysicalAddress physAddr
;
544 physAddr
= getPhysicalSegment( offset
, length
);
550 IOByteCount
IOMemoryDescriptor::readBytes( IOByteCount offset
,
552 IOByteCount withLength
)
554 IOByteCount bytesCopied
= 0;
556 assert(offset
<= _length
);
557 assert(offset
<= _length
- withLength
);
559 if ( offset
< _length
)
561 withLength
= min(withLength
, _length
- offset
);
563 while ( withLength
) // (process another source segment?)
565 IOPhysicalAddress sourceSegment
;
566 IOByteCount sourceSegmentLength
;
568 sourceSegment
= getPhysicalSegment(offset
, &sourceSegmentLength
);
569 if ( sourceSegment
== 0 ) goto readBytesErr
;
571 sourceSegmentLength
= min(sourceSegmentLength
, withLength
);
573 while ( sourceSegmentLength
) // (process another target segment?)
575 IOPhysicalAddress targetSegment
;
576 IOByteCount targetSegmentLength
;
578 targetSegment
= pmap_extract_safe(kernel_task
, (vm_offset_t
) bytes
);
579 if ( targetSegment
== 0 ) goto readBytesErr
;
581 targetSegmentLength
= min(next_page(targetSegment
) - targetSegment
, sourceSegmentLength
);
583 if ( sourceSegment
+ targetSegmentLength
> next_page(sourceSegment
) )
585 IOByteCount pageLength
;
587 pageLength
= next_page(sourceSegment
) - sourceSegment
;
589 bcopy_phys_safe( /* from */ (char *) sourceSegment
,
590 /* to */ (char *) targetSegment
,
591 /* size */ (int ) pageLength
);
593 ((UInt8
*) bytes
) += pageLength
;
594 bytesCopied
+= pageLength
;
595 offset
+= pageLength
;
596 sourceSegment
+= pageLength
;
597 sourceSegmentLength
-= pageLength
;
598 targetSegment
+= pageLength
;
599 targetSegmentLength
-= pageLength
;
600 withLength
-= pageLength
;
603 bcopy_phys_safe( /* from */ (char *) sourceSegment
,
604 /* to */ (char *) targetSegment
,
605 /* size */ (int ) targetSegmentLength
);
607 ((UInt8
*) bytes
) += targetSegmentLength
;
608 bytesCopied
+= targetSegmentLength
;
609 offset
+= targetSegmentLength
;
610 sourceSegment
+= targetSegmentLength
;
611 sourceSegmentLength
-= targetSegmentLength
;
612 withLength
-= targetSegmentLength
;
621 // We mark the destination pages as modified, just
622 // in case they are made pageable later on in life.
624 pmap_modify_pages( /* pmap */ kernel_pmap
,
625 /* start */ trunc_page(((vm_offset_t
) bytes
) - bytesCopied
),
626 /* end */ round_page(((vm_offset_t
) bytes
)) );
632 IOByteCount
IOMemoryDescriptor::writeBytes( IOByteCount offset
,
634 IOByteCount withLength
)
636 IOByteCount bytesCopied
= 0;
638 assert(offset
<= _length
);
639 assert(offset
<= _length
- withLength
);
641 if ( offset
< _length
)
643 withLength
= min(withLength
, _length
- offset
);
645 while ( withLength
) // (process another target segment?)
647 IOPhysicalAddress targetSegment
;
648 IOByteCount targetSegmentLength
;
650 targetSegment
= getPhysicalSegment(offset
, &targetSegmentLength
);
651 if ( targetSegment
== 0 ) goto writeBytesErr
;
653 targetSegmentLength
= min(targetSegmentLength
, withLength
);
655 while ( targetSegmentLength
) // (process another source segment?)
657 IOPhysicalAddress sourceSegment
;
658 IOByteCount sourceSegmentLength
;
660 sourceSegment
= pmap_extract_safe(kernel_task
, (vm_offset_t
) bytes
);
661 if ( sourceSegment
== 0 ) goto writeBytesErr
;
663 sourceSegmentLength
= min(next_page(sourceSegment
) - sourceSegment
, targetSegmentLength
);
665 if ( targetSegment
+ sourceSegmentLength
> next_page(targetSegment
) )
667 IOByteCount pageLength
;
669 pageLength
= next_page(targetSegment
) - targetSegment
;
671 bcopy_phys_safe( /* from */ (char *) sourceSegment
,
672 /* to */ (char *) targetSegment
,
673 /* size */ (int ) pageLength
);
675 // We flush the data cache in case it is code we've copied,
676 // such that the instruction cache is in the know about it.
678 flush_dcache(targetSegment
, pageLength
, true);
680 ((UInt8
*) bytes
) += pageLength
;
681 bytesCopied
+= pageLength
;
682 offset
+= pageLength
;
683 sourceSegment
+= pageLength
;
684 sourceSegmentLength
-= pageLength
;
685 targetSegment
+= pageLength
;
686 targetSegmentLength
-= pageLength
;
687 withLength
-= pageLength
;
690 bcopy_phys_safe( /* from */ (char *) sourceSegment
,
691 /* to */ (char *) targetSegment
,
692 /* size */ (int ) sourceSegmentLength
);
694 // We flush the data cache in case it is code we've copied,
695 // such that the instruction cache is in the know about it.
697 flush_dcache(targetSegment
, sourceSegmentLength
, true);
699 ((UInt8
*) bytes
) += sourceSegmentLength
;
700 bytesCopied
+= sourceSegmentLength
;
701 offset
+= sourceSegmentLength
;
702 targetSegment
+= sourceSegmentLength
;
703 targetSegmentLength
-= sourceSegmentLength
;
704 withLength
-= sourceSegmentLength
;
714 /* DEPRECATED */ void IOGeneralMemoryDescriptor::setPosition(IOByteCount position
)
716 /* DEPRECATED */ assert(position
<= _length
);
718 /* DEPRECATED */ if (position
>= _length
)
720 /* DEPRECATED */ _position
= _length
;
721 /* DEPRECATED */ _positionAtIndex
= _rangesCount
; /* careful: out-of-bounds */
722 /* DEPRECATED */ _positionAtOffset
= 0;
723 /* DEPRECATED */ return;
726 /* DEPRECATED */ if (position
< _position
)
728 /* DEPRECATED */ _positionAtOffset
= position
;
729 /* DEPRECATED */ _positionAtIndex
= 0;
731 /* DEPRECATED */ else
733 /* DEPRECATED */ _positionAtOffset
+= (position
- _position
);
735 /* DEPRECATED */ _position
= position
;
737 /* DEPRECATED */ while (_positionAtOffset
>= _ranges
.v
[_positionAtIndex
].length
)
739 /* DEPRECATED */ _positionAtOffset
-= _ranges
.v
[_positionAtIndex
].length
;
740 /* DEPRECATED */ _positionAtIndex
++;
744 IOPhysicalAddress
IOGeneralMemoryDescriptor::getPhysicalSegment( IOByteCount offset
,
745 IOByteCount
* lengthOfSegment
)
747 IOPhysicalAddress address
= 0;
748 IOPhysicalLength length
= 0;
751 // assert(offset <= _length);
753 if ( offset
< _length
) // (within bounds?)
755 unsigned rangesIndex
= 0;
757 for ( ; offset
>= _ranges
.v
[rangesIndex
].length
; rangesIndex
++ )
759 offset
-= _ranges
.v
[rangesIndex
].length
; // (make offset relative)
762 if ( _task
== 0 ) // (physical memory?)
764 address
= _ranges
.v
[rangesIndex
].address
+ offset
;
765 length
= _ranges
.v
[rangesIndex
].length
- offset
;
767 for ( ++rangesIndex
; rangesIndex
< _rangesCount
; rangesIndex
++ )
769 if ( address
+ length
!= _ranges
.v
[rangesIndex
].address
) break;
771 length
+= _ranges
.v
[rangesIndex
].length
; // (coalesce ranges)
774 else // (virtual memory?)
776 vm_address_t addressVirtual
= _ranges
.v
[rangesIndex
].address
+ offset
;
778 assert((0 == (kIOMemoryRequiresWire
& _flags
)) || _wireCount
);
780 address
= pmap_extract_safe(_task
, addressVirtual
);
781 length
= next_page(addressVirtual
) - addressVirtual
;
782 length
= min(_ranges
.v
[rangesIndex
].length
- offset
, length
);
786 if ( address
== 0 ) length
= 0;
789 if ( lengthOfSegment
) *lengthOfSegment
= length
;
794 IOPhysicalAddress
IOGeneralMemoryDescriptor::getSourceSegment( IOByteCount offset
,
795 IOByteCount
* lengthOfSegment
)
797 IOPhysicalAddress address
= 0;
798 IOPhysicalLength length
= 0;
800 assert(offset
<= _length
);
802 if ( offset
< _length
) // (within bounds?)
804 unsigned rangesIndex
= 0;
806 for ( ; offset
>= _ranges
.v
[rangesIndex
].length
; rangesIndex
++ )
808 offset
-= _ranges
.v
[rangesIndex
].length
; // (make offset relative)
811 address
= _ranges
.v
[rangesIndex
].address
+ offset
;
812 length
= _ranges
.v
[rangesIndex
].length
- offset
;
814 for ( ++rangesIndex
; rangesIndex
< _rangesCount
; rangesIndex
++ )
816 if ( address
+ length
!= _ranges
.v
[rangesIndex
].address
) break;
818 length
+= _ranges
.v
[rangesIndex
].length
; // (coalesce ranges)
822 if ( address
== 0 ) length
= 0;
825 if ( lengthOfSegment
) *lengthOfSegment
= length
;
830 /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
831 /* DEPRECATED */ void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
832 /* DEPRECATED */ IOByteCount
* lengthOfSegment
)
834 /* DEPRECATED */ if( offset
!= _position
)
835 /* DEPRECATED */ setPosition( offset
);
837 /* DEPRECATED */ assert(_position
<= _length
);
839 /* DEPRECATED */ /* Fail gracefully if the position is at (or past) the end-of-buffer. */
840 /* DEPRECATED */ if (_position
>= _length
)
842 /* DEPRECATED */ *lengthOfSegment
= 0;
843 /* DEPRECATED */ return 0;
846 /* DEPRECATED */ /* Compute the relative length to the end of this virtual segment. */
847 /* DEPRECATED */ *lengthOfSegment
= _ranges
.v
[_positionAtIndex
].length
- _positionAtOffset
;
849 /* DEPRECATED */ /* Compute the relative address of this virtual segment. */
850 /* DEPRECATED */ if (_task
== kernel_task
)
851 /* DEPRECATED */ return (void *)(_ranges
.v
[_positionAtIndex
].address
+ _positionAtOffset
);
852 /* DEPRECATED */ else
854 /* DEPRECATED */ vm_offset_t off
;
856 /* DEPRECATED */ mapIntoKernel(_positionAtIndex
);
858 /* DEPRECATED */ off
= _ranges
.v
[_kernPtrAtIndex
].address
;
859 /* DEPRECATED */ off
-= trunc_page(off
);
861 /* DEPRECATED */ return (void *) (_kernPtrAligned
+ off
+ _positionAtOffset
);
864 /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
869 * Prepare the memory for an I/O transfer. This involves paging in
870 * the memory, if necessary, and wiring it down for the duration of
871 * the transfer. The complete() method completes the processing of
872 * the memory after the I/O transfer finishes. This method needn't
873 * called for non-pageable memory.
875 IOReturn
IOGeneralMemoryDescriptor::prepare(
876 IODirection forDirection
= kIODirectionNone
)
880 if((_wireCount
== 0) && (kIOMemoryRequiresWire
& _flags
)) {
883 if(forDirection
== kIODirectionNone
)
884 forDirection
= _direction
;
888 switch (forDirection
)
891 access
= VM_PROT_WRITE
;
894 case kIODirectionOut
:
895 access
= VM_PROT_READ
;
899 access
= VM_PROT_READ
| VM_PROT_WRITE
;
904 // Check user read/write access to the data buffer.
907 for (rangeIndex
= 0; rangeIndex
< _rangesCount
; rangeIndex
++)
909 vm_offset_t checkBase
= trunc_page(_ranges
.v
[rangeIndex
].address
);
910 vm_size_t checkSize
= round_page(_ranges
.v
[rangeIndex
].length
);
914 vm_region_basic_info_data_t regionInfo
;
915 mach_msg_type_number_t regionInfoSize
= sizeof(regionInfo
);
916 vm_size_t regionSize
;
919 /* map */ getMapForTask(_task
, checkBase
),
920 /* address */ &checkBase
,
921 /* size */ ®ionSize
,
922 /* flavor */ VM_REGION_BASIC_INFO
,
923 /* info */ (vm_region_info_t
) ®ionInfo
,
924 /* info size */ ®ionInfoSize
,
925 /* object name */ 0 ) != KERN_SUCCESS
) ||
926 ( (forDirection
& kIODirectionIn
) &&
927 !(regionInfo
.protection
& VM_PROT_WRITE
) ) ||
928 ( (forDirection
& kIODirectionOut
) &&
929 !(regionInfo
.protection
& VM_PROT_READ
) ) )
931 return kIOReturnVMError
;
934 assert((regionSize
& PAGE_MASK
) == 0);
936 regionSize
= min(regionSize
, checkSize
);
937 checkSize
-= regionSize
;
938 checkBase
+= regionSize
;
939 } // (for each vm region)
940 } // (for each io range)
942 for (rangeIndex
= 0; rangeIndex
< _rangesCount
; rangeIndex
++) {
944 vm_offset_t srcAlign
= trunc_page(_ranges
.v
[rangeIndex
].address
);
945 IOByteCount srcAlignEnd
= trunc_page(_ranges
.v
[rangeIndex
].address
+
946 _ranges
.v
[rangeIndex
].length
+
949 vm_map_t taskVMMap
= getMapForTask(_task
, srcAlign
);
951 // If this I/O is for a user land task then protect ourselves
952 // against COW and other vm_shenanigans
953 if (_task
&& _task
!= kernel_task
) {
954 // setup a data object to hold the 'named' memory regions
955 // @@@ gvdl: If we fail to allocate an OSData we will just
956 // hope for the best for the time being. Lets not fail a
957 // prepare at this late stage in product release.
959 _memoryEntries
= OSData::withCapacity(16);
960 if (_memoryEntries
) {
961 vm_object_offset_t desiredSize
= srcAlignEnd
- srcAlign
;
962 vm_object_offset_t entryStart
= srcAlign
;
963 ipc_port_t memHandle
;
966 vm_object_offset_t actualSize
= desiredSize
;
968 rc
= mach_make_memory_entry_64
969 (taskVMMap
, &actualSize
, entryStart
,
970 forDirection
, &memHandle
, NULL
);
971 if (KERN_SUCCESS
!= rc
) {
972 IOLog("IOMemoryDescriptor::prepare mach_make_memory_entry_64 failed: %d\n", rc
);
977 appendBytes(&memHandle
, sizeof(memHandle
));
978 desiredSize
-= actualSize
;
979 entryStart
+= actualSize
;
980 } while (desiredSize
);
984 rc
= vm_map_wire(taskVMMap
, srcAlign
, srcAlignEnd
, access
, FALSE
);
985 if (KERN_SUCCESS
!= rc
) {
986 IOLog("IOMemoryDescriptor::prepare vm_map_wire failed: %d\n", rc
);
992 return kIOReturnSuccess
;
998 for(doneIndex
= 0; doneIndex
< rangeIndex
; doneIndex
++) {
999 vm_offset_t srcAlign
= trunc_page(_ranges
.v
[doneIndex
].address
);
1000 IOByteCount srcAlignEnd
= trunc_page(_ranges
.v
[doneIndex
].address
+
1001 _ranges
.v
[doneIndex
].length
+
1004 vm_map_unwire(getMapForTask(_task
, srcAlign
), srcAlign
,
1005 srcAlignEnd
, FALSE
);
1008 if (_memoryEntries
) {
1009 ipc_port_t
*handles
, *handlesEnd
;
1011 handles
= (ipc_port_t
*) _memoryEntries
->getBytesNoCopy();
1012 handlesEnd
= (ipc_port_t
*)
1013 ((vm_address_t
) handles
+ _memoryEntries
->getLength());
1014 while (handles
< handlesEnd
)
1015 ipc_port_release_send(*handles
++);
1016 _memoryEntries
->release();
1020 return kIOReturnVMError
;
1026 * Complete processing of the memory after an I/O transfer finishes.
1027 * This method should not be called unless a prepare was previously
1028 * issued; the prepare() and complete() must occur in pairs, before
1029 * before and after an I/O transfer involving pageable memory.
1032 IOReturn
IOGeneralMemoryDescriptor::complete(
1033 IODirection forDirection
= kIODirectionNone
)
1038 return kIOReturnSuccess
;
1041 if((_wireCount
== 0) && (kIOMemoryRequiresWire
& _flags
)) {
1045 if(forDirection
== kIODirectionNone
)
1046 forDirection
= _direction
;
1048 for(rangeIndex
= 0; rangeIndex
< _rangesCount
; rangeIndex
++) {
1050 vm_offset_t srcAlign
= trunc_page(_ranges
.v
[rangeIndex
].address
);
1051 IOByteCount srcAlignEnd
= trunc_page(_ranges
.v
[rangeIndex
].address
+
1052 _ranges
.v
[rangeIndex
].length
+
1055 if(forDirection
== kIODirectionIn
)
1056 pmap_modify_pages(get_task_pmap(_task
), srcAlign
, srcAlignEnd
);
1058 rc
= vm_map_unwire(getMapForTask(_task
, srcAlign
), srcAlign
,
1059 srcAlignEnd
, FALSE
);
1060 if(rc
!= KERN_SUCCESS
)
1061 IOLog("IOMemoryDescriptor::complete: vm_map_unwire failed: %d\n", rc
);
1064 if (_memoryEntries
) {
1065 ipc_port_t
*handles
, *handlesEnd
;
1067 handles
= (ipc_port_t
*) _memoryEntries
->getBytesNoCopy();
1068 handlesEnd
= (ipc_port_t
*)
1069 ((vm_address_t
) handles
+ _memoryEntries
->getLength());
1070 while (handles
< handlesEnd
)
1071 ipc_port_release_send(*handles
++);
1073 _memoryEntries
->release();
1077 return kIOReturnSuccess
;
1080 IOReturn
IOGeneralMemoryDescriptor::doMap(
1081 vm_map_t addressMap
,
1082 IOVirtualAddress
* atAddress
,
1083 IOOptionBits options
,
1084 IOByteCount sourceOffset
= 0,
1085 IOByteCount length
= 0 )
1088 ipc_port_t sharedMem
= (ipc_port_t
) _memEntry
;
1090 // mapping source == dest? (could be much better)
1091 if( _task
&& (addressMap
== get_task_map(_task
)) && (options
& kIOMapAnywhere
)
1092 && (1 == _rangesCount
) && (0 == sourceOffset
)
1093 && (length
<= _ranges
.v
[0].length
) ) {
1094 *atAddress
= _ranges
.v
[0].address
;
1095 return( kIOReturnSuccess
);
1098 if( 0 == sharedMem
) {
1102 for (unsigned index
= 0; index
< _rangesCount
; index
++)
1103 size
+= round_page(_ranges
.v
[index
].address
+ _ranges
.v
[index
].length
)
1104 - trunc_page(_ranges
.v
[index
].address
);
1108 vm_object_offset_t actualSize
= size
;
1109 kr
= mach_make_memory_entry_64( get_task_map(_task
),
1110 &actualSize
, _ranges
.v
[0].address
,
1111 VM_PROT_READ
| VM_PROT_WRITE
, &sharedMem
,
1114 if( (KERN_SUCCESS
== kr
) && (actualSize
!= size
)) {
1116 IOLog("mach_make_memory_entry_64 (%08lx) size (%08lx:%08lx)\n",
1117 _ranges
.v
[0].address
, (UInt32
)actualSize
, size
);
1119 kr
= kIOReturnVMError
;
1120 ipc_port_release_send( sharedMem
);
1123 if( KERN_SUCCESS
!= kr
)
1125 sharedMem
= MACH_PORT_NULL
;
1129 memory_object_t pager
;
1132 reserved
= IONew( ExpansionData
, 1 );
1136 reserved
->pagerContig
= (1 == _rangesCount
);
1138 pager
= device_pager_setup( (memory_object_t
) 0, (int) this, size
,
1139 reserved
->pagerContig
? DEVICE_PAGER_CONTIGUOUS
: 0 );
1143 retain(); // pager has a ref
1144 kr
= mach_memory_object_memory_entry_64( (host_t
) 1, false /*internal*/,
1145 size
, VM_PROT_READ
| VM_PROT_WRITE
, pager
, &sharedMem
);
1147 assert( KERN_SUCCESS
== kr
);
1148 if( KERN_SUCCESS
!= kr
) {
1150 // ipc_port_release_send( (ipc_port_t) pager );
1151 pager
= MACH_PORT_NULL
;
1152 sharedMem
= MACH_PORT_NULL
;
1155 reserved
->devicePager
= pager
;
1159 _memEntry
= (void *) sharedMem
;
1162 kr
= super::doMap( addressMap
, atAddress
,
1163 options
, sourceOffset
, length
);
1168 IOReturn
IOGeneralMemoryDescriptor::doUnmap(
1169 vm_map_t addressMap
,
1170 IOVirtualAddress logical
,
1171 IOByteCount length
)
1173 // could be much better
1174 if( _task
&& (addressMap
== getMapForTask(_task
, _ranges
.v
[0].address
)) && (1 == _rangesCount
)
1175 && (logical
== _ranges
.v
[0].address
)
1176 && (length
<= _ranges
.v
[0].length
) )
1177 return( kIOReturnSuccess
);
1179 return( super::doUnmap( addressMap
, logical
, length
));
1182 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1185 // osfmk/device/iokit_rpc.c
1186 extern kern_return_t
IOMapPages( vm_map_t map
, vm_offset_t va
, vm_offset_t pa
,
1187 vm_size_t length
, unsigned int mapFlags
);
1188 extern kern_return_t
IOUnmapPages(vm_map_t map
, vm_offset_t va
, vm_size_t length
);
1191 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1193 static IORecursiveLock
* gIOMemoryLock
;
1195 #define LOCK IORecursiveLockLock( gIOMemoryLock)
1196 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
1198 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1200 OSDefineMetaClass( IOMemoryMap
, OSObject
)
1201 OSDefineAbstractStructors( IOMemoryMap
, OSObject
)
1203 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1205 class _IOMemoryMap
: public IOMemoryMap
1207 OSDeclareDefaultStructors(_IOMemoryMap
)
1209 IOMemoryDescriptor
* memory
;
1210 IOMemoryMap
* superMap
;
1213 IOVirtualAddress logical
;
1215 vm_map_t addressMap
;
1216 IOOptionBits options
;
1219 virtual void free();
1221 // IOMemoryMap methods
1222 virtual IOVirtualAddress
getVirtualAddress();
1223 virtual IOByteCount
getLength();
1224 virtual task_t
getAddressTask();
1225 virtual IOMemoryDescriptor
* getMemoryDescriptor();
1226 virtual IOOptionBits
getMapOptions();
1228 virtual IOReturn
unmap();
1229 virtual void taskDied();
1231 virtual IOPhysicalAddress
getPhysicalSegment(IOByteCount offset
,
1232 IOByteCount
* length
);
1234 // for IOMemoryDescriptor use
1235 _IOMemoryMap
* isCompatible(
1236 IOMemoryDescriptor
* owner
,
1238 IOVirtualAddress toAddress
,
1239 IOOptionBits options
,
1241 IOByteCount length
);
1244 IOMemoryDescriptor
* memory
,
1245 IOMemoryMap
* superMap
,
1247 IOByteCount length
);
1250 IOMemoryDescriptor
* memory
,
1252 IOVirtualAddress toAddress
,
1253 IOOptionBits options
,
1255 IOByteCount length
);
1258 task_t intoTask
, bool redirect
);
1261 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1264 #define super IOMemoryMap
1266 OSDefineMetaClassAndStructors(_IOMemoryMap
, IOMemoryMap
)
1268 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1270 bool _IOMemoryMap::init(
1271 IOMemoryDescriptor
* _memory
,
1272 IOMemoryMap
* _superMap
,
1273 IOByteCount _offset
,
1274 IOByteCount _length
)
1280 if( (_offset
+ _length
) > _superMap
->getLength())
1285 _superMap
->retain();
1286 superMap
= _superMap
;
1292 length
= _memory
->getLength();
1294 options
= superMap
->getMapOptions();
1295 logical
= superMap
->getVirtualAddress() + offset
;
1300 bool _IOMemoryMap::init(
1301 IOMemoryDescriptor
* _memory
,
1303 IOVirtualAddress toAddress
,
1304 IOOptionBits _options
,
1305 IOByteCount _offset
,
1306 IOByteCount _length
)
1310 if( (!_memory
) || (!intoTask
) || !super::init())
1313 if( (_offset
+ _length
) > _memory
->getLength())
1316 addressMap
= get_task_map(intoTask
);
1319 kernel_vm_map_reference(addressMap
);
1328 length
= _memory
->getLength();
1330 addressTask
= intoTask
;
1331 logical
= toAddress
;
1334 if( options
& kIOMapStatic
)
1337 ok
= (kIOReturnSuccess
== memory
->doMap( addressMap
, &logical
,
1338 options
, offset
, length
));
1343 vm_map_deallocate(addressMap
);
1349 struct IOMemoryDescriptorMapAllocRef
1351 ipc_port_t sharedMem
;
1354 IOByteCount sourceOffset
;
1355 IOOptionBits options
;
1358 static kern_return_t
IOMemoryDescriptorMapAlloc(vm_map_t map
, void * _ref
)
1360 IOMemoryDescriptorMapAllocRef
* ref
= (IOMemoryDescriptorMapAllocRef
*)_ref
;
1364 if( ref
->sharedMem
) {
1365 vm_prot_t prot
= VM_PROT_READ
1366 | ((ref
->options
& kIOMapReadOnly
) ? 0 : VM_PROT_WRITE
);
1370 ref
->size
, 0 /* mask */,
1371 (( ref
->options
& kIOMapAnywhere
) ? VM_FLAGS_ANYWHERE
: VM_FLAGS_FIXED
)
1372 | VM_MAKE_TAG(VM_MEMORY_IOKIT
),
1373 ref
->sharedMem
, ref
->sourceOffset
,
1379 if( KERN_SUCCESS
!= err
) {
1386 err
= vm_allocate( map
, &ref
->mapped
, ref
->size
,
1387 ((ref
->options
& kIOMapAnywhere
) ? VM_FLAGS_ANYWHERE
: VM_FLAGS_FIXED
)
1388 | VM_MAKE_TAG(VM_MEMORY_IOKIT
) );
1390 if( KERN_SUCCESS
!= err
) {
1395 // we have to make sure that these guys don't get copied if we fork.
1396 err
= vm_inherit( map
, ref
->mapped
, ref
->size
, VM_INHERIT_NONE
);
1397 assert( KERN_SUCCESS
== err
);
1405 IOReturn
IOMemoryDescriptor::doMap(
1406 vm_map_t addressMap
,
1407 IOVirtualAddress
* atAddress
,
1408 IOOptionBits options
,
1409 IOByteCount sourceOffset
= 0,
1410 IOByteCount length
= 0 )
1412 IOReturn err
= kIOReturnSuccess
;
1413 memory_object_t pager
;
1414 vm_address_t logical
;
1415 IOByteCount pageOffset
;
1416 IOPhysicalAddress sourceAddr
;
1417 IOMemoryDescriptorMapAllocRef ref
;
1419 ref
.sharedMem
= (ipc_port_t
) _memEntry
;
1420 ref
.sourceOffset
= sourceOffset
;
1421 ref
.options
= options
;
1426 length
= getLength();
1428 sourceAddr
= getSourceSegment( sourceOffset
, NULL
);
1429 assert( sourceAddr
);
1430 pageOffset
= sourceAddr
- trunc_page( sourceAddr
);
1432 ref
.size
= round_page( length
+ pageOffset
);
1434 logical
= *atAddress
;
1435 if( options
& kIOMapAnywhere
)
1436 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1439 ref
.mapped
= trunc_page( logical
);
1440 if( (logical
- ref
.mapped
) != pageOffset
) {
1441 err
= kIOReturnVMError
;
1446 if( ref
.sharedMem
&& (addressMap
== kernel_map
) && (kIOMemoryRequiresWire
& _flags
))
1447 err
= IOIteratePageableMaps( ref
.size
, &IOMemoryDescriptorMapAlloc
, &ref
);
1449 err
= IOMemoryDescriptorMapAlloc( addressMap
, &ref
);
1451 if( err
!= KERN_SUCCESS
)
1455 pager
= (memory_object_t
) reserved
->devicePager
;
1457 pager
= MACH_PORT_NULL
;
1459 if( !ref
.sharedMem
|| pager
)
1460 err
= handleFault( pager
, addressMap
, ref
.mapped
, sourceOffset
, length
, options
);
1464 if( err
!= KERN_SUCCESS
) {
1466 doUnmap( addressMap
, ref
.mapped
, ref
.size
);
1469 *atAddress
= ref
.mapped
+ pageOffset
;
1475 kIOMemoryRedirected
= 0x00010000
1478 IOReturn
IOMemoryDescriptor::handleFault(
1480 vm_map_t addressMap
,
1481 IOVirtualAddress address
,
1482 IOByteCount sourceOffset
,
1484 IOOptionBits options
)
1486 IOReturn err
= kIOReturnSuccess
;
1487 memory_object_t pager
= (memory_object_t
) _pager
;
1491 IOByteCount pageOffset
;
1492 IOPhysicalLength segLen
;
1493 IOPhysicalAddress physAddr
;
1499 if( kIOMemoryRedirected
& _flags
) {
1501 IOLog("sleep mem redirect %x, %lx\n", address
, sourceOffset
);
1504 assert_wait( (event_t
) this, THREAD_UNINT
);
1506 thread_block((void (*)(void)) 0);
1508 } while( kIOMemoryRedirected
& _flags
);
1512 return( kIOReturnSuccess
);
1515 physAddr
= getPhysicalSegment( sourceOffset
, &segLen
);
1517 pageOffset
= physAddr
- trunc_page( physAddr
);
1519 size
= length
+ pageOffset
;
1520 physAddr
-= pageOffset
;
1522 segLen
+= pageOffset
;
1525 // in the middle of the loop only map whole pages
1526 if( segLen
>= bytes
)
1528 else if( segLen
!= trunc_page( segLen
))
1529 err
= kIOReturnVMError
;
1530 if( physAddr
!= trunc_page( physAddr
))
1531 err
= kIOReturnBadArgument
;
1534 if( kIOLogMapping
& gIOKitDebug
)
1535 IOLog("_IOMemoryMap::map(%p) %08lx->%08lx:%08lx\n",
1536 addressMap
, address
+ pageOffset
, physAddr
+ pageOffset
,
1537 segLen
- pageOffset
);
1540 if( addressMap
&& (kIOReturnSuccess
== err
))
1541 err
= IOMapPages( addressMap
, address
, physAddr
, segLen
, options
);
1542 assert( KERN_SUCCESS
== err
);
1547 if( reserved
&& reserved
->pagerContig
) {
1548 IOPhysicalLength allLen
;
1549 IOPhysicalAddress allPhys
;
1551 allPhys
= getPhysicalSegment( 0, &allLen
);
1553 err
= device_pager_populate_object( pager
, 0, trunc_page(allPhys
), round_page(allPhys
+ allLen
) );
1558 (page
< segLen
) && (KERN_SUCCESS
== err
);
1559 page
+= page_size
) {
1560 err
= device_pager_populate_object( pager
, sourceOffset
+ page
,
1561 physAddr
+ page
, page_size
);
1564 assert( KERN_SUCCESS
== err
);
1568 sourceOffset
+= segLen
- pageOffset
;
1574 && (physAddr
= getPhysicalSegment( sourceOffset
, &segLen
)));
1577 err
= kIOReturnBadArgument
;
1582 IOReturn
IOMemoryDescriptor::doUnmap(
1583 vm_map_t addressMap
,
1584 IOVirtualAddress logical
,
1585 IOByteCount length
)
1590 if( kIOLogMapping
& gIOKitDebug
)
1591 kprintf("IOMemoryDescriptor::doUnmap(%x) %08x:%08x\n",
1592 addressMap
, logical
, length
);
1595 if( (addressMap
== kernel_map
) || (addressMap
== get_task_map(current_task()))) {
1597 if( _memEntry
&& (addressMap
== kernel_map
) && (kIOMemoryRequiresWire
& _flags
))
1598 addressMap
= IOPageableMapForAddress( logical
);
1600 err
= vm_deallocate( addressMap
, logical
, length
);
1603 err
= kIOReturnSuccess
;
1608 IOReturn
IOMemoryDescriptor::redirect( task_t safeTask
, bool redirect
)
1611 _IOMemoryMap
* mapping
= 0;
1617 if( (iter
= OSCollectionIterator::withCollection( _mappings
))) {
1618 while( (mapping
= (_IOMemoryMap
*) iter
->getNextObject()))
1619 mapping
->redirect( safeTask
, redirect
);
1626 _flags
|= kIOMemoryRedirected
;
1628 _flags
&= ~kIOMemoryRedirected
;
1629 thread_wakeup( (event_t
) this);
1634 // temporary binary compatibility
1635 IOSubMemoryDescriptor
* subMem
;
1636 if( (subMem
= OSDynamicCast( IOSubMemoryDescriptor
, this)))
1637 err
= subMem
->redirect( safeTask
, redirect
);
1639 err
= kIOReturnSuccess
;
1644 IOReturn
IOSubMemoryDescriptor::redirect( task_t safeTask
, bool redirect
)
1646 return( _parent
->redirect( safeTask
, redirect
));
1649 IOReturn
_IOMemoryMap::redirect( task_t safeTask
, bool redirect
)
1651 IOReturn err
= kIOReturnSuccess
;
1654 // err = ((_IOMemoryMap *)superMap)->redirect( safeTask, redirect );
1658 if( logical
&& addressMap
1659 && (get_task_map( safeTask
) != addressMap
)
1660 && (0 == (options
& kIOMapStatic
))) {
1662 IOUnmapPages( addressMap
, logical
, length
);
1664 err
= vm_deallocate( addressMap
, logical
, length
);
1665 err
= memory
->doMap( addressMap
, &logical
,
1666 (options
& ~kIOMapAnywhere
) /*| kIOMapReserve*/,
1669 err
= kIOReturnSuccess
;
1671 IOLog("IOMemoryMap::redirect(%d, %x) %x from %p\n", redirect
, err
, logical
, addressMap
);
1680 IOReturn
_IOMemoryMap::unmap( void )
1686 if( logical
&& addressMap
&& (0 == superMap
)
1687 && (0 == (options
& kIOMapStatic
))) {
1689 err
= memory
->doUnmap( addressMap
, logical
, length
);
1690 vm_map_deallocate(addressMap
);
1694 err
= kIOReturnSuccess
;
1703 void _IOMemoryMap::taskDied( void )
1707 vm_map_deallocate(addressMap
);
1715 void _IOMemoryMap::free()
1721 memory
->removeMapping( this);
1727 superMap
->release();
1732 IOByteCount
_IOMemoryMap::getLength()
1737 IOVirtualAddress
_IOMemoryMap::getVirtualAddress()
1742 task_t
_IOMemoryMap::getAddressTask()
1745 return( superMap
->getAddressTask());
1747 return( addressTask
);
1750 IOOptionBits
_IOMemoryMap::getMapOptions()
1755 IOMemoryDescriptor
* _IOMemoryMap::getMemoryDescriptor()
1760 _IOMemoryMap
* _IOMemoryMap::isCompatible(
1761 IOMemoryDescriptor
* owner
,
1763 IOVirtualAddress toAddress
,
1764 IOOptionBits _options
,
1765 IOByteCount _offset
,
1766 IOByteCount _length
)
1768 _IOMemoryMap
* mapping
;
1770 if( (!task
) || (task
!= getAddressTask()))
1772 if( (options
^ _options
) & (kIOMapCacheMask
| kIOMapReadOnly
))
1775 if( (0 == (_options
& kIOMapAnywhere
)) && (logical
!= toAddress
))
1778 if( _offset
< offset
)
1783 if( (_offset
+ _length
) > length
)
1786 if( (length
== _length
) && (!_offset
)) {
1791 mapping
= new _IOMemoryMap
;
1793 && !mapping
->init( owner
, this, _offset
, _length
)) {
1802 IOPhysicalAddress
_IOMemoryMap::getPhysicalSegment( IOByteCount _offset
,
1803 IOPhysicalLength
* length
)
1805 IOPhysicalAddress address
;
1808 address
= memory
->getPhysicalSegment( offset
+ _offset
, length
);
1814 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1817 #define super OSObject
1819 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1821 void IOMemoryDescriptor::initialize( void )
1823 if( 0 == gIOMemoryLock
)
1824 gIOMemoryLock
= IORecursiveLockAlloc();
1827 void IOMemoryDescriptor::free( void )
1830 _mappings
->release();
1833 IODelete( reserved
, ExpansionData
, 1 );
1838 IOMemoryMap
* IOMemoryDescriptor::setMapping(
1840 IOVirtualAddress mapAddress
,
1841 IOOptionBits options
= 0 )
1845 map
= new _IOMemoryMap
;
1850 && !map
->init( this, intoTask
, mapAddress
,
1851 options
| kIOMapStatic
, 0, getLength() )) {
1863 IOMemoryMap
* IOMemoryDescriptor::map(
1864 IOOptionBits options
= 0 )
1867 return( makeMapping( this, kernel_task
, 0,
1868 options
| kIOMapAnywhere
,
1872 IOMemoryMap
* IOMemoryDescriptor::map(
1874 IOVirtualAddress toAddress
,
1875 IOOptionBits options
,
1876 IOByteCount offset
= 0,
1877 IOByteCount length
= 0 )
1880 length
= getLength();
1882 return( makeMapping( this, intoTask
, toAddress
, options
, offset
, length
));
1885 IOMemoryMap
* IOMemoryDescriptor::makeMapping(
1886 IOMemoryDescriptor
* owner
,
1888 IOVirtualAddress toAddress
,
1889 IOOptionBits options
,
1891 IOByteCount length
)
1893 _IOMemoryMap
* mapping
= 0;
1899 // look for an existing mapping
1900 if( (iter
= OSCollectionIterator::withCollection( _mappings
))) {
1902 while( (mapping
= (_IOMemoryMap
*) iter
->getNextObject())) {
1904 if( (mapping
= mapping
->isCompatible(
1905 owner
, intoTask
, toAddress
,
1906 options
| kIOMapReference
,
1916 if( mapping
|| (options
& kIOMapReference
))
1921 mapping
= new _IOMemoryMap
;
1923 && !mapping
->init( owner
, intoTask
, toAddress
, options
,
1926 IOLog("Didn't make map %08lx : %08lx\n", offset
, length
);
1933 owner
->addMapping( mapping
);
1940 void IOMemoryDescriptor::addMapping(
1941 IOMemoryMap
* mapping
)
1945 _mappings
= OSSet::withCapacity(1);
1946 if( _mappings
&& _mappings
->setObject( mapping
))
1947 mapping
->release(); /* really */
1951 void IOMemoryDescriptor::removeMapping(
1952 IOMemoryMap
* mapping
)
1957 _mappings
->removeObject( mapping
);
1961 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1964 #define super IOMemoryDescriptor
1966 OSDefineMetaClassAndStructors(IOSubMemoryDescriptor
, IOMemoryDescriptor
)
1968 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1970 bool IOSubMemoryDescriptor::initSubRange( IOMemoryDescriptor
* parent
,
1971 IOByteCount offset
, IOByteCount length
,
1972 IODirection withDirection
)
1980 if( (offset
+ length
) > parent
->getLength())
1987 _direction
= withDirection
;
1988 _tag
= parent
->getTag();
1993 void IOSubMemoryDescriptor::free( void )
2002 IOPhysicalAddress
IOSubMemoryDescriptor::getPhysicalSegment( IOByteCount offset
,
2003 IOByteCount
* length
)
2005 IOPhysicalAddress address
;
2006 IOByteCount actualLength
;
2008 assert(offset
<= _length
);
2013 if( offset
>= _length
)
2016 address
= _parent
->getPhysicalSegment( offset
+ _start
, &actualLength
);
2018 if( address
&& length
)
2019 *length
= min( _length
- offset
, actualLength
);
2024 IOPhysicalAddress
IOSubMemoryDescriptor::getSourceSegment( IOByteCount offset
,
2025 IOByteCount
* length
)
2027 IOPhysicalAddress address
;
2028 IOByteCount actualLength
;
2030 assert(offset
<= _length
);
2035 if( offset
>= _length
)
2038 address
= _parent
->getSourceSegment( offset
+ _start
, &actualLength
);
2040 if( address
&& length
)
2041 *length
= min( _length
- offset
, actualLength
);
2046 void * IOSubMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
2047 IOByteCount
* lengthOfSegment
)
2052 IOByteCount
IOSubMemoryDescriptor::readBytes(IOByteCount offset
,
2053 void * bytes
, IOByteCount withLength
)
2055 IOByteCount byteCount
;
2057 assert(offset
<= _length
);
2059 if( offset
>= _length
)
2063 byteCount
= _parent
->readBytes( _start
+ offset
, bytes
,
2064 min(withLength
, _length
- offset
) );
2067 return( byteCount
);
2070 IOByteCount
IOSubMemoryDescriptor::writeBytes(IOByteCount offset
,
2071 const void* bytes
, IOByteCount withLength
)
2073 IOByteCount byteCount
;
2075 assert(offset
<= _length
);
2077 if( offset
>= _length
)
2081 byteCount
= _parent
->writeBytes( _start
+ offset
, bytes
,
2082 min(withLength
, _length
- offset
) );
2085 return( byteCount
);
2088 IOReturn
IOSubMemoryDescriptor::prepare(
2089 IODirection forDirection
= kIODirectionNone
)
2094 err
= _parent
->prepare( forDirection
);
2100 IOReturn
IOSubMemoryDescriptor::complete(
2101 IODirection forDirection
= kIODirectionNone
)
2106 err
= _parent
->complete( forDirection
);
2112 IOMemoryMap
* IOSubMemoryDescriptor::makeMapping(
2113 IOMemoryDescriptor
* owner
,
2115 IOVirtualAddress toAddress
,
2116 IOOptionBits options
,
2118 IOByteCount length
)
2120 IOMemoryMap
* mapping
;
2122 mapping
= (IOMemoryMap
*) _parent
->makeMapping(
2124 toAddress
- (_start
+ offset
),
2125 options
| kIOMapReference
,
2126 _start
+ offset
, length
);
2129 mapping
= (IOMemoryMap
*) _parent
->makeMapping(
2132 options
, _start
+ offset
, length
);
2135 mapping
= super::makeMapping( owner
, intoTask
, toAddress
, options
,
2144 IOSubMemoryDescriptor::initWithAddress(void * address
,
2145 IOByteCount withLength
,
2146 IODirection withDirection
)
2152 IOSubMemoryDescriptor::initWithAddress(vm_address_t address
,
2153 IOByteCount withLength
,
2154 IODirection withDirection
,
2161 IOSubMemoryDescriptor::initWithPhysicalAddress(
2162 IOPhysicalAddress address
,
2163 IOByteCount withLength
,
2164 IODirection withDirection
)
2170 IOSubMemoryDescriptor::initWithRanges(
2171 IOVirtualRange
* ranges
,
2173 IODirection withDirection
,
2175 bool asReference
= false)
2181 IOSubMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange
* ranges
,
2183 IODirection withDirection
,
2184 bool asReference
= false)
2189 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2191 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 0);
2192 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 1);
2193 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 2);
2194 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 3);
2195 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 4);
2196 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 5);
2197 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 6);
2198 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 7);
2199 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 8);
2200 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 9);
2201 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 10);
2202 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 11);
2203 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 12);
2204 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 13);
2205 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 14);
2206 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 15);