2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
26 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
32 #include <IOKit/assert.h>
33 #include <IOKit/system.h>
34 #include <IOKit/IOLib.h>
35 #include <IOKit/IOMemoryDescriptor.h>
37 #include <IOKit/IOKitDebug.h>
39 #include <libkern/c++/OSContainers.h>
40 #include <libkern/c++/OSDictionary.h>
41 #include <libkern/c++/OSArray.h>
42 #include <libkern/c++/OSSymbol.h>
43 #include <libkern/c++/OSNumber.h>
44 #include <sys/cdefs.h>
48 #include <device/device_port.h>
49 void bcopy_phys(char *from
, char *to
, int size
);
50 void pmap_enter(pmap_t pmap
, vm_offset_t va
, vm_offset_t pa
,
51 vm_prot_t prot
, unsigned int flags
, boolean_t wired
);
53 struct phys_entry
*pmap_find_physentry(vm_offset_t pa
);
55 void ipc_port_release_send(ipc_port_t port
);
56 vm_offset_t
vm_map_get_phys_page(vm_map_t map
, vm_offset_t offset
);
60 memory_object_t pager
,
65 device_pager_deallocate(
68 device_pager_populate_object(
69 memory_object_t pager
,
70 vm_object_offset_t offset
,
71 vm_offset_t phys_addr
,
75 * Page fault handling based on vm_map (or entries therein)
77 extern kern_return_t
vm_fault(
81 boolean_t change_wiring
,
84 vm_offset_t caller_pmap_addr
);
88 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
90 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor
, OSObject
)
92 #define super IOMemoryDescriptor
94 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor
, IOMemoryDescriptor
)
98 vm_map_t
IOPageableMapForAddress( vm_address_t address
);
100 typedef kern_return_t (*IOIteratePageableMapsCallback
)(vm_map_t map
, void * ref
);
102 kern_return_t
IOIteratePageableMaps(vm_size_t size
,
103 IOIteratePageableMapsCallback callback
, void * ref
);
107 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
109 static IORecursiveLock
* gIOMemoryLock
;
111 #define LOCK IORecursiveLockLock( gIOMemoryLock)
112 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
113 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
115 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
117 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
119 inline vm_map_t
IOGeneralMemoryDescriptor::getMapForTask( task_t task
, vm_address_t address
)
121 if( (task
== kernel_task
) && (kIOMemoryRequiresWire
& _flags
))
122 return( IOPageableMapForAddress( address
) );
124 return( get_task_map( task
));
127 inline vm_offset_t
pmap_extract_safe(task_t task
, vm_offset_t va
)
129 vm_offset_t pa
= pmap_extract(get_task_pmap(task
), va
);
133 pa
= vm_map_get_phys_page(get_task_map(task
), trunc_page(va
));
134 if ( pa
) pa
+= va
- trunc_page(va
);
140 inline void bcopy_phys_safe(char * from
, char * to
, int size
)
142 boolean_t enabled
= ml_set_interrupts_enabled(FALSE
);
144 bcopy_phys(from
, to
, size
);
146 ml_set_interrupts_enabled(enabled
);
149 #define next_page(a) ( trunc_page(a) + page_size )
154 kern_return_t
device_data_action(
156 ipc_port_t device_pager
,
157 vm_prot_t protection
,
158 vm_object_offset_t offset
,
161 struct ExpansionData
{
163 unsigned int pagerContig
:1;
164 unsigned int unused
:31;
165 IOMemoryDescriptor
* memory
;
168 ExpansionData
* ref
= (ExpansionData
*) device_handle
;
169 IOMemoryDescriptor
* memDesc
;
172 memDesc
= ref
->memory
;
174 kr
= memDesc
->handleFault( device_pager
, 0, 0,
175 offset
, size
, kIOMapDefaultCache
/*?*/);
183 kern_return_t
device_close(
186 struct ExpansionData
{
188 unsigned int pagerContig
:1;
189 unsigned int unused
:31;
190 IOMemoryDescriptor
* memory
;
192 ExpansionData
* ref
= (ExpansionData
*) device_handle
;
194 IODelete( ref
, ExpansionData
, 1 );
196 return( kIOReturnSuccess
);
201 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
206 * Create a new IOMemoryDescriptor. The buffer is a virtual address
207 * relative to the specified task. If no task is supplied, the kernel
211 IOMemoryDescriptor::withAddress(void * address
,
212 IOByteCount withLength
,
213 IODirection withDirection
)
215 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
218 if (that
->initWithAddress(address
, withLength
, withDirection
))
227 IOMemoryDescriptor::withAddress(vm_address_t address
,
228 IOByteCount withLength
,
229 IODirection withDirection
,
232 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
235 if (that
->initWithAddress(address
, withLength
, withDirection
, withTask
))
244 IOMemoryDescriptor::withPhysicalAddress(
245 IOPhysicalAddress address
,
246 IOByteCount withLength
,
247 IODirection withDirection
)
249 return( IOMemoryDescriptor::withAddress( address
, withLength
,
250 withDirection
, (task_t
) 0 ));
257 * Create a new IOMemoryDescriptor. The buffer is made up of several
258 * virtual address ranges, from a given task.
260 * Passing the ranges as a reference will avoid an extra allocation.
263 IOMemoryDescriptor::withRanges( IOVirtualRange
* ranges
,
265 IODirection withDirection
,
267 bool asReference
= false)
269 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
272 if (that
->initWithRanges(ranges
, withCount
, withDirection
, withTask
, asReference
))
281 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange
* ranges
,
283 IODirection withDirection
,
284 bool asReference
= false)
286 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
289 if (that
->initWithPhysicalRanges(ranges
, withCount
, withDirection
, asReference
))
298 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor
* of
,
301 IODirection withDirection
)
303 IOSubMemoryDescriptor
* that
= new IOSubMemoryDescriptor
;
305 if (that
&& !that
->initSubRange(of
, offset
, length
, withDirection
)) {
315 * Initialize an IOMemoryDescriptor. The buffer is a virtual address
316 * relative to the specified task. If no task is supplied, the kernel
319 * An IOMemoryDescriptor can be re-used by calling initWithAddress or
320 * initWithRanges again on an existing instance -- note this behavior
321 * is not commonly supported in other I/O Kit classes, although it is
325 IOGeneralMemoryDescriptor::initWithAddress(void * address
,
326 IOByteCount withLength
,
327 IODirection withDirection
)
329 _singleRange
.v
.address
= (vm_address_t
) address
;
330 _singleRange
.v
.length
= withLength
;
332 return initWithRanges(&_singleRange
.v
, 1, withDirection
, kernel_task
, true);
336 IOGeneralMemoryDescriptor::initWithAddress(vm_address_t address
,
337 IOByteCount withLength
,
338 IODirection withDirection
,
341 _singleRange
.v
.address
= address
;
342 _singleRange
.v
.length
= withLength
;
344 return initWithRanges(&_singleRange
.v
, 1, withDirection
, withTask
, true);
348 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
349 IOPhysicalAddress address
,
350 IOByteCount withLength
,
351 IODirection withDirection
)
353 _singleRange
.p
.address
= address
;
354 _singleRange
.p
.length
= withLength
;
356 return initWithPhysicalRanges( &_singleRange
.p
, 1, withDirection
, true);
362 * Initialize an IOMemoryDescriptor. The buffer is made up of several
363 * virtual address ranges, from a given task
365 * Passing the ranges as a reference will avoid an extra allocation.
367 * An IOMemoryDescriptor can be re-used by calling initWithAddress or
368 * initWithRanges again on an existing instance -- note this behavior
369 * is not commonly supported in other I/O Kit classes, although it is
373 IOGeneralMemoryDescriptor::initWithRanges(
374 IOVirtualRange
* ranges
,
376 IODirection withDirection
,
378 bool asReference
= false)
384 * We can check the _initialized instance variable before having ever set
385 * it to an initial value because I/O Kit guarantees that all our instance
386 * variables are zeroed on an object's allocation.
389 if (_initialized
== false)
391 if (super::init() == false) return false;
397 * An existing memory descriptor is being retargeted to point to
398 * somewhere else. Clean up our present state.
401 assert(_wireCount
== 0);
407 if (_ranges
.v
&& _rangesIsAllocated
)
408 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
412 * Initialize the memory descriptor.
416 _rangesCount
= withCount
;
417 _rangesIsAllocated
= asReference
? false : true;
418 _direction
= withDirection
;
422 _positionAtIndex
= 0;
423 _positionAtOffset
= 0;
425 _cachedPhysicalAddress
= 0;
426 _cachedVirtualAddress
= 0;
429 if (withTask
&& (withTask
!= kernel_task
))
430 _flags
|= kIOMemoryRequiresWire
;
436 _ranges
.v
= IONew(IOVirtualRange
, withCount
);
437 if (_ranges
.v
== 0) return false;
438 bcopy(/* from */ ranges
, _ranges
.v
, withCount
* sizeof(IOVirtualRange
));
441 for (unsigned index
= 0; index
< _rangesCount
; index
++)
443 _length
+= _ranges
.v
[index
].length
;
450 IOGeneralMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange
* ranges
,
452 IODirection withDirection
,
453 bool asReference
= false)
455 #warning assuming virtual, physical addresses same size
456 return( initWithRanges( (IOVirtualRange
*) ranges
,
457 withCount
, withDirection
, (task_t
) 0, asReference
));
465 void IOGeneralMemoryDescriptor::free()
469 reserved
->memory
= 0;
476 if (_ranges
.v
&& _rangesIsAllocated
)
477 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
479 if( reserved
&& reserved
->devicePager
)
480 device_pager_deallocate( reserved
->devicePager
);
482 // memEntry holds a ref on the device pager which owns reserved (ExpansionData)
483 // so no reserved access after this point
485 ipc_port_release_send( (ipc_port_t
) _memEntry
);
489 /* DEPRECATED */ void IOGeneralMemoryDescriptor::unmapFromKernel()
491 /* DEPRECATED */ kern_return_t krtn
;
492 /* DEPRECATED */ vm_offset_t off
;
493 /* DEPRECATED */ // Pull the shared pages out of the task map
494 /* DEPRECATED */ // Do we need to unwire it first?
495 /* DEPRECATED */ for ( off
= 0; off
< _kernSize
; off
+= page_size
)
497 /* DEPRECATED */ pmap_change_wiring(
498 /* DEPRECATED */ kernel_pmap
,
499 /* DEPRECATED */ _kernPtrAligned
+ off
,
500 /* DEPRECATED */ FALSE
);
502 /* DEPRECATED */ pmap_remove(
503 /* DEPRECATED */ kernel_pmap
,
504 /* DEPRECATED */ _kernPtrAligned
+ off
,
505 /* DEPRECATED */ _kernPtrAligned
+ off
+ page_size
);
507 /* DEPRECATED */ // Free the former shmem area in the task
508 /* DEPRECATED */ krtn
= vm_deallocate(kernel_map
,
509 /* DEPRECATED */ _kernPtrAligned
,
510 /* DEPRECATED */ _kernSize
);
511 /* DEPRECATED */ assert(krtn
== KERN_SUCCESS
);
512 /* DEPRECATED */ _kernPtrAligned
= 0;
515 /* DEPRECATED */ void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex
)
517 /* DEPRECATED */ kern_return_t krtn
;
518 /* DEPRECATED */ vm_offset_t off
;
520 /* DEPRECATED */ if (_kernPtrAligned
)
522 /* DEPRECATED */ if (_kernPtrAtIndex
== rangeIndex
) return;
523 /* DEPRECATED */ unmapFromKernel();
524 /* DEPRECATED */ assert(_kernPtrAligned
== 0);
527 /* DEPRECATED */ vm_offset_t srcAlign
= trunc_page(_ranges
.v
[rangeIndex
].address
);
529 /* DEPRECATED */ _kernSize
= trunc_page(_ranges
.v
[rangeIndex
].address
+
530 /* DEPRECATED */ _ranges
.v
[rangeIndex
].length
+
531 /* DEPRECATED */ page_size
- 1) - srcAlign
;
533 /* DEPRECATED */ /* Find some memory of the same size in kernel task. We use vm_allocate() */
534 /* DEPRECATED */ /* to do this. vm_allocate inserts the found memory object in the */
535 /* DEPRECATED */ /* target task's map as a side effect. */
536 /* DEPRECATED */ krtn
= vm_allocate( kernel_map
,
537 /* DEPRECATED */ &_kernPtrAligned
,
538 /* DEPRECATED */ _kernSize
,
539 /* DEPRECATED */ VM_FLAGS_ANYWHERE
|VM_MAKE_TAG(VM_MEMORY_IOKIT
) ); // Find first fit
540 /* DEPRECATED */ assert(krtn
== KERN_SUCCESS
);
541 /* DEPRECATED */ if(krtn
) return;
543 /* DEPRECATED */ /* For each page in the area allocated from the kernel map, */
544 /* DEPRECATED */ /* find the physical address of the page. */
545 /* DEPRECATED */ /* Enter the page in the target task's pmap, at the */
546 /* DEPRECATED */ /* appropriate target task virtual address. */
547 /* DEPRECATED */ for ( off
= 0; off
< _kernSize
; off
+= page_size
)
549 /* DEPRECATED */ vm_offset_t kern_phys_addr
, phys_addr
;
550 /* DEPRECATED */ if( _task
)
551 /* DEPRECATED */ phys_addr
= pmap_extract( get_task_pmap(_task
), srcAlign
+ off
);
552 /* DEPRECATED */ else
553 /* DEPRECATED */ phys_addr
= srcAlign
+ off
;
554 /* DEPRECATED */ assert(phys_addr
);
555 /* DEPRECATED */ if(phys_addr
== 0) return;
557 /* DEPRECATED */ // Check original state.
558 /* DEPRECATED */ kern_phys_addr
= pmap_extract( kernel_pmap
, _kernPtrAligned
+ off
);
559 /* DEPRECATED */ // Set virtual page to point to the right physical one
560 /* DEPRECATED */ pmap_enter(
561 /* DEPRECATED */ kernel_pmap
,
562 /* DEPRECATED */ _kernPtrAligned
+ off
,
563 /* DEPRECATED */ phys_addr
,
564 /* DEPRECATED */ VM_PROT_READ
|VM_PROT_WRITE
,
565 /* DEPRECATED */ VM_WIMG_USE_DEFAULT
,
566 /* DEPRECATED */ TRUE
);
568 /* DEPRECATED */ _kernPtrAtIndex
= rangeIndex
;
574 * Get the direction of the transfer.
576 IODirection
IOMemoryDescriptor::getDirection() const
584 * Get the length of the transfer (over all ranges).
586 IOByteCount
IOMemoryDescriptor::getLength() const
591 void IOMemoryDescriptor::setTag(
597 IOOptionBits
IOMemoryDescriptor::getTag( void )
602 IOPhysicalAddress
IOMemoryDescriptor::getSourceSegment( IOByteCount offset
,
603 IOByteCount
* length
)
605 IOPhysicalAddress physAddr
= 0;
607 if( prepare() == kIOReturnSuccess
) {
608 physAddr
= getPhysicalSegment( offset
, length
);
615 IOByteCount
IOMemoryDescriptor::readBytes( IOByteCount offset
,
617 IOByteCount withLength
)
619 IOByteCount bytesCopied
= 0;
621 assert(offset
<= _length
);
622 assert(offset
<= _length
- withLength
);
624 if ( offset
< _length
)
626 withLength
= min(withLength
, _length
- offset
);
628 while ( withLength
) // (process another source segment?)
630 IOPhysicalAddress sourceSegment
;
631 IOByteCount sourceSegmentLength
;
633 sourceSegment
= getPhysicalSegment(offset
, &sourceSegmentLength
);
634 if ( sourceSegment
== 0 ) goto readBytesErr
;
636 sourceSegmentLength
= min(sourceSegmentLength
, withLength
);
638 while ( sourceSegmentLength
) // (process another target segment?)
640 IOPhysicalAddress targetSegment
;
641 IOByteCount targetSegmentLength
;
643 targetSegment
= pmap_extract_safe(kernel_task
, (vm_offset_t
) bytes
);
644 if ( targetSegment
== 0 ) goto readBytesErr
;
646 targetSegmentLength
= min(next_page(targetSegment
) - targetSegment
, sourceSegmentLength
);
648 if ( sourceSegment
+ targetSegmentLength
> next_page(sourceSegment
) )
650 IOByteCount pageLength
;
652 pageLength
= next_page(sourceSegment
) - sourceSegment
;
654 bcopy_phys_safe( /* from */ (char *) sourceSegment
,
655 /* to */ (char *) targetSegment
,
656 /* size */ (int ) pageLength
);
658 ((UInt8
*) bytes
) += pageLength
;
659 bytesCopied
+= pageLength
;
660 offset
+= pageLength
;
661 sourceSegment
+= pageLength
;
662 sourceSegmentLength
-= pageLength
;
663 targetSegment
+= pageLength
;
664 targetSegmentLength
-= pageLength
;
665 withLength
-= pageLength
;
668 bcopy_phys_safe( /* from */ (char *) sourceSegment
,
669 /* to */ (char *) targetSegment
,
670 /* size */ (int ) targetSegmentLength
);
672 ((UInt8
*) bytes
) += targetSegmentLength
;
673 bytesCopied
+= targetSegmentLength
;
674 offset
+= targetSegmentLength
;
675 sourceSegment
+= targetSegmentLength
;
676 sourceSegmentLength
-= targetSegmentLength
;
677 withLength
-= targetSegmentLength
;
686 // We mark the destination pages as modified, just
687 // in case they are made pageable later on in life.
689 pmap_modify_pages( /* pmap */ kernel_pmap
,
690 /* start */ trunc_page(((vm_offset_t
) bytes
) - bytesCopied
),
691 /* end */ round_page(((vm_offset_t
) bytes
)) );
697 IOByteCount
IOMemoryDescriptor::writeBytes( IOByteCount offset
,
699 IOByteCount withLength
)
701 IOByteCount bytesCopied
= 0;
703 assert(offset
<= _length
);
704 assert(offset
<= _length
- withLength
);
706 if ( offset
< _length
)
708 withLength
= min(withLength
, _length
- offset
);
710 while ( withLength
) // (process another target segment?)
712 IOPhysicalAddress targetSegment
;
713 IOByteCount targetSegmentLength
;
715 targetSegment
= getPhysicalSegment(offset
, &targetSegmentLength
);
716 if ( targetSegment
== 0 ) goto writeBytesErr
;
718 targetSegmentLength
= min(targetSegmentLength
, withLength
);
720 while ( targetSegmentLength
) // (process another source segment?)
722 IOPhysicalAddress sourceSegment
;
723 IOByteCount sourceSegmentLength
;
725 sourceSegment
= pmap_extract_safe(kernel_task
, (vm_offset_t
) bytes
);
726 if ( sourceSegment
== 0 ) goto writeBytesErr
;
728 sourceSegmentLength
= min(next_page(sourceSegment
) - sourceSegment
, targetSegmentLength
);
730 if ( targetSegment
+ sourceSegmentLength
> next_page(targetSegment
) )
732 IOByteCount pageLength
;
734 pageLength
= next_page(targetSegment
) - targetSegment
;
736 bcopy_phys_safe( /* from */ (char *) sourceSegment
,
737 /* to */ (char *) targetSegment
,
738 /* size */ (int ) pageLength
);
740 // We flush the data cache in case it is code we've copied,
741 // such that the instruction cache is in the know about it.
743 flush_dcache(targetSegment
, pageLength
, true);
745 ((UInt8
*) bytes
) += pageLength
;
746 bytesCopied
+= pageLength
;
747 offset
+= pageLength
;
748 sourceSegment
+= pageLength
;
749 sourceSegmentLength
-= pageLength
;
750 targetSegment
+= pageLength
;
751 targetSegmentLength
-= pageLength
;
752 withLength
-= pageLength
;
755 bcopy_phys_safe( /* from */ (char *) sourceSegment
,
756 /* to */ (char *) targetSegment
,
757 /* size */ (int ) sourceSegmentLength
);
759 // We flush the data cache in case it is code we've copied,
760 // such that the instruction cache is in the know about it.
762 flush_dcache(targetSegment
, sourceSegmentLength
, true);
764 ((UInt8
*) bytes
) += sourceSegmentLength
;
765 bytesCopied
+= sourceSegmentLength
;
766 offset
+= sourceSegmentLength
;
767 targetSegment
+= sourceSegmentLength
;
768 targetSegmentLength
-= sourceSegmentLength
;
769 withLength
-= sourceSegmentLength
;
780 // osfmk/device/iokit_rpc.c
781 extern unsigned int IOTranslateCacheBits(struct phys_entry
*pp
);
784 /* DEPRECATED */ void IOGeneralMemoryDescriptor::setPosition(IOByteCount position
)
786 /* DEPRECATED */ assert(position
<= _length
);
788 /* DEPRECATED */ if (position
>= _length
)
790 /* DEPRECATED */ _position
= _length
;
791 /* DEPRECATED */ _positionAtIndex
= _rangesCount
; /* careful: out-of-bounds */
792 /* DEPRECATED */ _positionAtOffset
= 0;
793 /* DEPRECATED */ return;
796 /* DEPRECATED */ if (position
< _position
)
798 /* DEPRECATED */ _positionAtOffset
= position
;
799 /* DEPRECATED */ _positionAtIndex
= 0;
801 /* DEPRECATED */ else
803 /* DEPRECATED */ _positionAtOffset
+= (position
- _position
);
805 /* DEPRECATED */ _position
= position
;
807 /* DEPRECATED */ while (_positionAtOffset
>= _ranges
.v
[_positionAtIndex
].length
)
809 /* DEPRECATED */ _positionAtOffset
-= _ranges
.v
[_positionAtIndex
].length
;
810 /* DEPRECATED */ _positionAtIndex
++;
814 IOPhysicalAddress
IOGeneralMemoryDescriptor::getPhysicalSegment( IOByteCount offset
,
815 IOByteCount
* lengthOfSegment
)
817 IOPhysicalAddress address
= 0;
818 IOPhysicalLength length
= 0;
821 // assert(offset <= _length);
823 if ( offset
< _length
) // (within bounds?)
825 unsigned rangesIndex
= 0;
827 for ( ; offset
>= _ranges
.v
[rangesIndex
].length
; rangesIndex
++ )
829 offset
-= _ranges
.v
[rangesIndex
].length
; // (make offset relative)
832 if ( _task
== 0 ) // (physical memory?)
834 address
= _ranges
.v
[rangesIndex
].address
+ offset
;
835 length
= _ranges
.v
[rangesIndex
].length
- offset
;
837 for ( ++rangesIndex
; rangesIndex
< _rangesCount
; rangesIndex
++ )
839 if ( address
+ length
!= _ranges
.v
[rangesIndex
].address
) break;
841 length
+= _ranges
.v
[rangesIndex
].length
; // (coalesce ranges)
844 else // (virtual memory?)
846 vm_address_t addressVirtual
= _ranges
.v
[rangesIndex
].address
+ offset
;
848 assert((0 == (kIOMemoryRequiresWire
& _flags
)) || _wireCount
);
850 address
= pmap_extract_safe(_task
, addressVirtual
);
851 length
= next_page(addressVirtual
) - addressVirtual
;
852 length
= min(_ranges
.v
[rangesIndex
].length
- offset
, length
);
856 if ( address
== 0 ) length
= 0;
859 if ( lengthOfSegment
) *lengthOfSegment
= length
;
864 IOPhysicalAddress
IOGeneralMemoryDescriptor::getSourceSegment( IOByteCount offset
,
865 IOByteCount
* lengthOfSegment
)
867 IOPhysicalAddress address
= 0;
868 IOPhysicalLength length
= 0;
870 assert(offset
<= _length
);
872 if ( offset
< _length
) // (within bounds?)
874 unsigned rangesIndex
= 0;
876 for ( ; offset
>= _ranges
.v
[rangesIndex
].length
; rangesIndex
++ )
878 offset
-= _ranges
.v
[rangesIndex
].length
; // (make offset relative)
881 address
= _ranges
.v
[rangesIndex
].address
+ offset
;
882 length
= _ranges
.v
[rangesIndex
].length
- offset
;
884 for ( ++rangesIndex
; rangesIndex
< _rangesCount
; rangesIndex
++ )
886 if ( address
+ length
!= _ranges
.v
[rangesIndex
].address
) break;
888 length
+= _ranges
.v
[rangesIndex
].length
; // (coalesce ranges)
892 if ( address
== 0 ) length
= 0;
895 if ( lengthOfSegment
) *lengthOfSegment
= length
;
900 /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
901 /* DEPRECATED */ void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
902 /* DEPRECATED */ IOByteCount
* lengthOfSegment
)
904 /* DEPRECATED */ if( offset
!= _position
)
905 /* DEPRECATED */ setPosition( offset
);
907 /* DEPRECATED */ assert(_position
<= _length
);
909 /* DEPRECATED */ /* Fail gracefully if the position is at (or past) the end-of-buffer. */
910 /* DEPRECATED */ if (_position
>= _length
)
912 /* DEPRECATED */ *lengthOfSegment
= 0;
913 /* DEPRECATED */ return 0;
916 /* DEPRECATED */ /* Compute the relative length to the end of this virtual segment. */
917 /* DEPRECATED */ *lengthOfSegment
= _ranges
.v
[_positionAtIndex
].length
- _positionAtOffset
;
919 /* DEPRECATED */ /* Compute the relative address of this virtual segment. */
920 /* DEPRECATED */ if (_task
== kernel_task
)
921 /* DEPRECATED */ return (void *)(_ranges
.v
[_positionAtIndex
].address
+ _positionAtOffset
);
922 /* DEPRECATED */ else
924 /* DEPRECATED */ vm_offset_t off
;
926 /* DEPRECATED */ mapIntoKernel(_positionAtIndex
);
928 /* DEPRECATED */ off
= _ranges
.v
[_kernPtrAtIndex
].address
;
929 /* DEPRECATED */ off
-= trunc_page(off
);
931 /* DEPRECATED */ return (void *) (_kernPtrAligned
+ off
+ _positionAtOffset
);
934 /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
939 * Prepare the memory for an I/O transfer. This involves paging in
940 * the memory, if necessary, and wiring it down for the duration of
941 * the transfer. The complete() method completes the processing of
942 * the memory after the I/O transfer finishes. This method needn't
943 * called for non-pageable memory.
945 IOReturn
IOGeneralMemoryDescriptor::prepare(
946 IODirection forDirection
= kIODirectionNone
)
950 if((_wireCount
== 0) && (kIOMemoryRequiresWire
& _flags
)) {
953 if(forDirection
== kIODirectionNone
)
954 forDirection
= _direction
;
958 switch (forDirection
)
961 access
= VM_PROT_WRITE
;
964 case kIODirectionOut
:
965 access
= VM_PROT_READ
;
969 access
= VM_PROT_READ
| VM_PROT_WRITE
;
974 // Check user read/write access to the data buffer.
977 for (rangeIndex
= 0; rangeIndex
< _rangesCount
; rangeIndex
++)
979 vm_offset_t checkBase
= trunc_page(_ranges
.v
[rangeIndex
].address
);
980 vm_size_t checkSize
= round_page(_ranges
.v
[rangeIndex
].length
);
984 vm_region_basic_info_data_t regionInfo
;
985 mach_msg_type_number_t regionInfoSize
= sizeof(regionInfo
);
986 vm_size_t regionSize
;
989 /* map */ getMapForTask(_task
, checkBase
),
990 /* address */ &checkBase
,
991 /* size */ ®ionSize
,
992 /* flavor */ VM_REGION_BASIC_INFO
,
993 /* info */ (vm_region_info_t
) ®ionInfo
,
994 /* info size */ ®ionInfoSize
,
995 /* object name */ 0 ) != KERN_SUCCESS
) ||
996 ( (forDirection
& kIODirectionIn
) &&
997 !(regionInfo
.protection
& VM_PROT_WRITE
) ) ||
998 ( (forDirection
& kIODirectionOut
) &&
999 !(regionInfo
.protection
& VM_PROT_READ
) ) )
1001 return kIOReturnVMError
;
1004 assert((regionSize
& PAGE_MASK
) == 0);
1006 regionSize
= min(regionSize
, checkSize
);
1007 checkSize
-= regionSize
;
1008 checkBase
+= regionSize
;
1009 } // (for each vm region)
1010 } // (for each io range)
1012 for (rangeIndex
= 0; rangeIndex
< _rangesCount
; rangeIndex
++) {
1014 vm_offset_t srcAlign
= trunc_page(_ranges
.v
[rangeIndex
].address
);
1015 IOByteCount srcAlignEnd
= trunc_page(_ranges
.v
[rangeIndex
].address
+
1016 _ranges
.v
[rangeIndex
].length
+
1019 vm_map_t taskVMMap
= getMapForTask(_task
, srcAlign
);
1021 // If this I/O is for a user land task then protect ourselves
1022 // against COW and other vm_shenanigans
1023 if (_task
&& _task
!= kernel_task
) {
1024 // setup a data object to hold the 'named' memory regions
1025 // @@@ gvdl: If we fail to allocate an OSData we will just
1026 // hope for the best for the time being. Lets not fail a
1027 // prepare at this late stage in product release.
1028 if (!_memoryEntries
)
1029 _memoryEntries
= OSData::withCapacity(16);
1030 if (_memoryEntries
) {
1031 vm_object_offset_t desiredSize
= srcAlignEnd
- srcAlign
;
1032 vm_object_offset_t entryStart
= srcAlign
;
1033 ipc_port_t memHandle
;
1036 vm_object_offset_t actualSize
= desiredSize
;
1038 rc
= mach_make_memory_entry_64
1039 (taskVMMap
, &actualSize
, entryStart
,
1040 forDirection
, &memHandle
, NULL
);
1041 if (KERN_SUCCESS
!= rc
) {
1042 IOLog("IOMemoryDescriptor::prepare mach_make_memory_entry_64 failed: %d\n", rc
);
1047 appendBytes(&memHandle
, sizeof(memHandle
));
1048 desiredSize
-= actualSize
;
1049 entryStart
+= actualSize
;
1050 } while (desiredSize
);
1054 rc
= vm_map_wire(taskVMMap
, srcAlign
, srcAlignEnd
, access
, FALSE
);
1055 if (KERN_SUCCESS
!= rc
) {
1056 IOLog("IOMemoryDescriptor::prepare vm_map_wire failed: %d\n", rc
);
1062 return kIOReturnSuccess
;
1068 for(doneIndex
= 0; doneIndex
< rangeIndex
; doneIndex
++) {
1069 vm_offset_t srcAlign
= trunc_page(_ranges
.v
[doneIndex
].address
);
1070 IOByteCount srcAlignEnd
= trunc_page(_ranges
.v
[doneIndex
].address
+
1071 _ranges
.v
[doneIndex
].length
+
1074 vm_map_unwire(getMapForTask(_task
, srcAlign
), srcAlign
,
1075 srcAlignEnd
, FALSE
);
1078 if (_memoryEntries
) {
1079 ipc_port_t
*handles
, *handlesEnd
;
1081 handles
= (ipc_port_t
*) _memoryEntries
->getBytesNoCopy();
1082 handlesEnd
= (ipc_port_t
*)
1083 ((vm_address_t
) handles
+ _memoryEntries
->getLength());
1084 while (handles
< handlesEnd
)
1085 ipc_port_release_send(*handles
++);
1086 _memoryEntries
->release();
1090 return kIOReturnVMError
;
1096 * Complete processing of the memory after an I/O transfer finishes.
1097 * This method should not be called unless a prepare was previously
1098 * issued; the prepare() and complete() must occur in pairs, before
1099 * before and after an I/O transfer involving pageable memory.
1102 IOReturn
IOGeneralMemoryDescriptor::complete(
1103 IODirection forDirection
= kIODirectionNone
)
1108 return kIOReturnSuccess
;
1111 if((_wireCount
== 0) && (kIOMemoryRequiresWire
& _flags
)) {
1115 if(forDirection
== kIODirectionNone
)
1116 forDirection
= _direction
;
1118 for(rangeIndex
= 0; rangeIndex
< _rangesCount
; rangeIndex
++) {
1120 vm_offset_t srcAlign
= trunc_page(_ranges
.v
[rangeIndex
].address
);
1121 IOByteCount srcAlignEnd
= trunc_page(_ranges
.v
[rangeIndex
].address
+
1122 _ranges
.v
[rangeIndex
].length
+
1125 if(forDirection
== kIODirectionIn
)
1126 pmap_modify_pages(get_task_pmap(_task
), srcAlign
, srcAlignEnd
);
1128 rc
= vm_map_unwire(getMapForTask(_task
, srcAlign
), srcAlign
,
1129 srcAlignEnd
, FALSE
);
1130 if(rc
!= KERN_SUCCESS
)
1131 IOLog("IOMemoryDescriptor::complete: vm_map_unwire failed: %d\n", rc
);
1134 if (_memoryEntries
) {
1135 ipc_port_t
*handles
, *handlesEnd
;
1137 handles
= (ipc_port_t
*) _memoryEntries
->getBytesNoCopy();
1138 handlesEnd
= (ipc_port_t
*)
1139 ((vm_address_t
) handles
+ _memoryEntries
->getLength());
1140 while (handles
< handlesEnd
)
1141 ipc_port_release_send(*handles
++);
1143 _memoryEntries
->release();
1147 return kIOReturnSuccess
;
1150 IOReturn
IOGeneralMemoryDescriptor::doMap(
1151 vm_map_t addressMap
,
1152 IOVirtualAddress
* atAddress
,
1153 IOOptionBits options
,
1154 IOByteCount sourceOffset
= 0,
1155 IOByteCount length
= 0 )
1158 ipc_port_t sharedMem
= (ipc_port_t
) _memEntry
;
1160 // mapping source == dest? (could be much better)
1161 if( _task
&& (addressMap
== get_task_map(_task
)) && (options
& kIOMapAnywhere
)
1162 && (1 == _rangesCount
) && (0 == sourceOffset
)
1163 && (length
<= _ranges
.v
[0].length
) ) {
1164 *atAddress
= _ranges
.v
[0].address
;
1165 return( kIOReturnSuccess
);
1168 if( 0 == sharedMem
) {
1172 for (unsigned index
= 0; index
< _rangesCount
; index
++)
1173 size
+= round_page(_ranges
.v
[index
].address
+ _ranges
.v
[index
].length
)
1174 - trunc_page(_ranges
.v
[index
].address
);
1178 vm_size_t actualSize
= size
;
1179 kr
= mach_make_memory_entry( get_task_map(_task
),
1180 &actualSize
, _ranges
.v
[0].address
,
1181 VM_PROT_READ
| VM_PROT_WRITE
, &sharedMem
,
1184 if( (KERN_SUCCESS
== kr
) && (actualSize
!= round_page(size
))) {
1186 IOLog("mach_make_memory_entry_64 (%08lx) size (%08lx:%08lx)\n",
1187 _ranges
.v
[0].address
, (UInt32
)actualSize
, size
);
1189 kr
= kIOReturnVMError
;
1190 ipc_port_release_send( sharedMem
);
1193 if( KERN_SUCCESS
!= kr
)
1195 sharedMem
= MACH_PORT_NULL
;
1199 memory_object_t pager
;
1200 unsigned int flags
=0;
1201 struct phys_entry
*pp
;
1202 IOPhysicalAddress pa
;
1203 IOPhysicalLength segLen
;
1205 pa
= getPhysicalSegment( sourceOffset
, &segLen
);
1208 reserved
= IONew( ExpansionData
, 1 );
1212 reserved
->pagerContig
= (1 == _rangesCount
);
1213 reserved
->memory
= this;
1216 switch(options
& kIOMapCacheMask
) { /*What cache mode do we need*/
1218 case kIOMapDefaultCache
:
1220 if((pp
= pmap_find_physentry(pa
))) {/* Find physical address */
1221 /* Use physical attributes as default */
1222 flags
= IOTranslateCacheBits(pp
);
1225 else { /* If no physical, just hard code attributes */
1226 flags
= DEVICE_PAGER_CACHE_INHIB
|
1227 DEVICE_PAGER_COHERENT
| DEVICE_PAGER_GUARDED
;
1231 case kIOMapInhibitCache
:
1232 flags
= DEVICE_PAGER_CACHE_INHIB
|
1233 DEVICE_PAGER_COHERENT
| DEVICE_PAGER_GUARDED
;
1236 case kIOMapWriteThruCache
:
1237 flags
= DEVICE_PAGER_WRITE_THROUGH
|
1238 DEVICE_PAGER_COHERENT
| DEVICE_PAGER_GUARDED
;
1241 case kIOMapCopybackCache
:
1242 flags
= DEVICE_PAGER_COHERENT
;
1246 flags
|= reserved
->pagerContig
? DEVICE_PAGER_CONTIGUOUS
: 0;
1248 flags
= reserved
->pagerContig
? DEVICE_PAGER_CONTIGUOUS
: 0;
1251 pager
= device_pager_setup( (memory_object_t
) 0, (int) reserved
,
1256 kr
= mach_memory_object_memory_entry_64( (host_t
) 1, false /*internal*/,
1257 size
, VM_PROT_READ
| VM_PROT_WRITE
, pager
, &sharedMem
);
1259 assert( KERN_SUCCESS
== kr
);
1260 if( KERN_SUCCESS
!= kr
) {
1261 device_pager_deallocate( pager
);
1262 pager
= MACH_PORT_NULL
;
1263 sharedMem
= MACH_PORT_NULL
;
1266 if( pager
&& sharedMem
)
1267 reserved
->devicePager
= pager
;
1269 IODelete( reserved
, ExpansionData
, 1 );
1275 _memEntry
= (void *) sharedMem
;
1280 kr
= kIOReturnVMError
;
1283 kr
= super::doMap( addressMap
, atAddress
,
1284 options
, sourceOffset
, length
);
1289 IOReturn
IOGeneralMemoryDescriptor::doUnmap(
1290 vm_map_t addressMap
,
1291 IOVirtualAddress logical
,
1292 IOByteCount length
)
1294 // could be much better
1295 if( _task
&& (addressMap
== getMapForTask(_task
, _ranges
.v
[0].address
)) && (1 == _rangesCount
)
1296 && (logical
== _ranges
.v
[0].address
)
1297 && (length
<= _ranges
.v
[0].length
) )
1298 return( kIOReturnSuccess
);
1300 return( super::doUnmap( addressMap
, logical
, length
));
1303 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1306 // osfmk/device/iokit_rpc.c
1307 extern kern_return_t
IOMapPages( vm_map_t map
, vm_offset_t va
, vm_offset_t pa
,
1308 vm_size_t length
, unsigned int mapFlags
);
1309 extern kern_return_t
IOUnmapPages(vm_map_t map
, vm_offset_t va
, vm_size_t length
);
1312 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1314 OSDefineMetaClassAndAbstractStructors( IOMemoryMap
, OSObject
)
1316 /* inline function implementation */
1317 IOPhysicalAddress
IOMemoryMap::getPhysicalAddress()
1318 { return( getPhysicalSegment( 0, 0 )); }
1320 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1322 class _IOMemoryMap
: public IOMemoryMap
1324 OSDeclareDefaultStructors(_IOMemoryMap
)
1326 IOMemoryDescriptor
* memory
;
1327 IOMemoryMap
* superMap
;
1330 IOVirtualAddress logical
;
1332 vm_map_t addressMap
;
1333 IOOptionBits options
;
1336 virtual void taggedRelease(const void *tag
= 0) const;
1337 virtual void free();
1341 // IOMemoryMap methods
1342 virtual IOVirtualAddress
getVirtualAddress();
1343 virtual IOByteCount
getLength();
1344 virtual task_t
getAddressTask();
1345 virtual IOMemoryDescriptor
* getMemoryDescriptor();
1346 virtual IOOptionBits
getMapOptions();
1348 virtual IOReturn
unmap();
1349 virtual void taskDied();
1351 virtual IOPhysicalAddress
getPhysicalSegment(IOByteCount offset
,
1352 IOByteCount
* length
);
1354 // for IOMemoryDescriptor use
1355 _IOMemoryMap
* copyCompatible(
1356 IOMemoryDescriptor
* owner
,
1358 IOVirtualAddress toAddress
,
1359 IOOptionBits options
,
1361 IOByteCount length
);
1363 bool initCompatible(
1364 IOMemoryDescriptor
* memory
,
1365 IOMemoryMap
* superMap
,
1367 IOByteCount length
);
1369 bool initWithDescriptor(
1370 IOMemoryDescriptor
* memory
,
1372 IOVirtualAddress toAddress
,
1373 IOOptionBits options
,
1375 IOByteCount length
);
1378 task_t intoTask
, bool redirect
);
1381 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1384 #define super IOMemoryMap
1386 OSDefineMetaClassAndStructors(_IOMemoryMap
, IOMemoryMap
)
1388 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1390 bool _IOMemoryMap::initCompatible(
1391 IOMemoryDescriptor
* _memory
,
1392 IOMemoryMap
* _superMap
,
1393 IOByteCount _offset
,
1394 IOByteCount _length
)
1400 if( (_offset
+ _length
) > _superMap
->getLength())
1405 _superMap
->retain();
1406 superMap
= _superMap
;
1412 length
= _memory
->getLength();
1414 options
= superMap
->getMapOptions();
1415 logical
= superMap
->getVirtualAddress() + offset
;
1420 bool _IOMemoryMap::initWithDescriptor(
1421 IOMemoryDescriptor
* _memory
,
1423 IOVirtualAddress toAddress
,
1424 IOOptionBits _options
,
1425 IOByteCount _offset
,
1426 IOByteCount _length
)
1430 if( (!_memory
) || (!intoTask
) || !super::init())
1433 if( (_offset
+ _length
) > _memory
->getLength())
1436 addressMap
= get_task_map(intoTask
);
1439 vm_map_reference(addressMap
);
1448 length
= _memory
->getLength();
1450 addressTask
= intoTask
;
1451 logical
= toAddress
;
1454 if( options
& kIOMapStatic
)
1457 ok
= (kIOReturnSuccess
== memory
->doMap( addressMap
, &logical
,
1458 options
, offset
, length
));
1463 vm_map_deallocate(addressMap
);
1469 struct IOMemoryDescriptorMapAllocRef
1471 ipc_port_t sharedMem
;
1474 IOByteCount sourceOffset
;
1475 IOOptionBits options
;
1478 static kern_return_t
IOMemoryDescriptorMapAlloc(vm_map_t map
, void * _ref
)
1480 IOMemoryDescriptorMapAllocRef
* ref
= (IOMemoryDescriptorMapAllocRef
*)_ref
;
1484 if( ref
->sharedMem
) {
1485 vm_prot_t prot
= VM_PROT_READ
1486 | ((ref
->options
& kIOMapReadOnly
) ? 0 : VM_PROT_WRITE
);
1490 ref
->size
, 0 /* mask */,
1491 (( ref
->options
& kIOMapAnywhere
) ? VM_FLAGS_ANYWHERE
: VM_FLAGS_FIXED
)
1492 | VM_MAKE_TAG(VM_MEMORY_IOKIT
),
1493 ref
->sharedMem
, ref
->sourceOffset
,
1499 if( KERN_SUCCESS
!= err
) {
1506 err
= vm_allocate( map
, &ref
->mapped
, ref
->size
,
1507 ((ref
->options
& kIOMapAnywhere
) ? VM_FLAGS_ANYWHERE
: VM_FLAGS_FIXED
)
1508 | VM_MAKE_TAG(VM_MEMORY_IOKIT
) );
1510 if( KERN_SUCCESS
!= err
) {
1515 // we have to make sure that these guys don't get copied if we fork.
1516 err
= vm_inherit( map
, ref
->mapped
, ref
->size
, VM_INHERIT_NONE
);
1517 assert( KERN_SUCCESS
== err
);
1526 IOReturn
IOMemoryDescriptor::doMap(
1527 vm_map_t addressMap
,
1528 IOVirtualAddress
* atAddress
,
1529 IOOptionBits options
,
1530 IOByteCount sourceOffset
= 0,
1531 IOByteCount length
= 0 )
1533 IOReturn err
= kIOReturnSuccess
;
1534 memory_object_t pager
;
1535 vm_address_t logical
;
1536 IOByteCount pageOffset
;
1537 IOPhysicalAddress sourceAddr
;
1538 IOMemoryDescriptorMapAllocRef ref
;
1540 ref
.sharedMem
= (ipc_port_t
) _memEntry
;
1541 ref
.sourceOffset
= sourceOffset
;
1542 ref
.options
= options
;
1547 length
= getLength();
1549 sourceAddr
= getSourceSegment( sourceOffset
, NULL
);
1550 assert( sourceAddr
);
1551 pageOffset
= sourceAddr
- trunc_page( sourceAddr
);
1553 ref
.size
= round_page( length
+ pageOffset
);
1555 logical
= *atAddress
;
1556 if( options
& kIOMapAnywhere
)
1557 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1560 ref
.mapped
= trunc_page( logical
);
1561 if( (logical
- ref
.mapped
) != pageOffset
) {
1562 err
= kIOReturnVMError
;
1567 if( ref
.sharedMem
&& (addressMap
== kernel_map
) && (kIOMemoryRequiresWire
& _flags
))
1568 err
= IOIteratePageableMaps( ref
.size
, &IOMemoryDescriptorMapAlloc
, &ref
);
1570 err
= IOMemoryDescriptorMapAlloc( addressMap
, &ref
);
1572 if( err
!= KERN_SUCCESS
)
1576 pager
= (memory_object_t
) reserved
->devicePager
;
1578 pager
= MACH_PORT_NULL
;
1580 if( !ref
.sharedMem
|| pager
)
1581 err
= handleFault( pager
, addressMap
, ref
.mapped
, sourceOffset
, length
, options
);
1585 if( err
!= KERN_SUCCESS
) {
1587 doUnmap( addressMap
, ref
.mapped
, ref
.size
);
1590 *atAddress
= ref
.mapped
+ pageOffset
;
1596 kIOMemoryRedirected
= 0x00010000
1599 IOReturn
IOMemoryDescriptor::handleFault(
1601 vm_map_t addressMap
,
1602 IOVirtualAddress address
,
1603 IOByteCount sourceOffset
,
1605 IOOptionBits options
)
1607 IOReturn err
= kIOReturnSuccess
;
1608 memory_object_t pager
= (memory_object_t
) _pager
;
1612 IOByteCount pageOffset
;
1613 IOPhysicalLength segLen
;
1614 IOPhysicalAddress physAddr
;
1618 if( kIOMemoryRedirected
& _flags
) {
1620 IOLog("sleep mem redirect %p, %lx\n", this, sourceOffset
);
1624 } while( kIOMemoryRedirected
& _flags
);
1627 return( kIOReturnSuccess
);
1630 physAddr
= getPhysicalSegment( sourceOffset
, &segLen
);
1632 pageOffset
= physAddr
- trunc_page( physAddr
);
1634 size
= length
+ pageOffset
;
1635 physAddr
-= pageOffset
;
1637 segLen
+= pageOffset
;
1640 // in the middle of the loop only map whole pages
1641 if( segLen
>= bytes
)
1643 else if( segLen
!= trunc_page( segLen
))
1644 err
= kIOReturnVMError
;
1645 if( physAddr
!= trunc_page( physAddr
))
1646 err
= kIOReturnBadArgument
;
1649 if( kIOLogMapping
& gIOKitDebug
)
1650 IOLog("_IOMemoryMap::map(%p) %08lx->%08lx:%08lx\n",
1651 addressMap
, address
+ pageOffset
, physAddr
+ pageOffset
,
1652 segLen
- pageOffset
);
1660 /* i386 doesn't support faulting on device memory yet */
1661 if( addressMap
&& (kIOReturnSuccess
== err
))
1662 err
= IOMapPages( addressMap
, address
, physAddr
, segLen
, options
);
1663 assert( KERN_SUCCESS
== err
);
1669 if( reserved
&& reserved
->pagerContig
) {
1670 IOPhysicalLength allLen
;
1671 IOPhysicalAddress allPhys
;
1673 allPhys
= getPhysicalSegment( 0, &allLen
);
1675 err
= device_pager_populate_object( pager
, 0, trunc_page(allPhys
), round_page(allLen
) );
1680 (page
< segLen
) && (KERN_SUCCESS
== err
);
1681 page
+= page_size
) {
1682 err
= device_pager_populate_object( pager
, sourceOffset
+ page
,
1683 physAddr
+ page
, page_size
);
1686 assert( KERN_SUCCESS
== err
);
1692 /* *** Temporary Workaround *** */
1694 /* This call to vm_fault causes an early pmap level resolution */
1695 /* of the mappings created above. Need for this is in absolute */
1696 /* violation of the basic tenet that the pmap layer is a cache. */
1697 /* Further, it implies a serious I/O architectural violation on */
1698 /* the part of some user of the mapping. As of this writing, */
1699 /* the call to vm_fault is needed because the NVIDIA driver */
1700 /* makes a call to pmap_extract. The NVIDIA driver needs to be */
1701 /* fixed as soon as possible. The NVIDIA driver should not */
1702 /* need to query for this info as it should know from the doMap */
1703 /* call where the physical memory is mapped. When a query is */
1704 /* necessary to find a physical mapping, it should be done */
1705 /* through an iokit call which includes the mapped memory */
1706 /* handle. This is required for machine architecture independence.*/
1708 if(!(kIOMemoryRedirected
& _flags
)) {
1709 vm_fault(addressMap
, address
, 3, FALSE
, FALSE
, NULL
, 0);
1712 /* *** Temporary Workaround *** */
1715 sourceOffset
+= segLen
- pageOffset
;
1721 && (physAddr
= getPhysicalSegment( sourceOffset
, &segLen
)));
1724 err
= kIOReturnBadArgument
;
1729 IOReturn
IOMemoryDescriptor::doUnmap(
1730 vm_map_t addressMap
,
1731 IOVirtualAddress logical
,
1732 IOByteCount length
)
1737 if( kIOLogMapping
& gIOKitDebug
)
1738 kprintf("IOMemoryDescriptor::doUnmap(%x) %08x:%08x\n",
1739 addressMap
, logical
, length
);
1742 if( true /* && (addressMap == kernel_map) || (addressMap == get_task_map(current_task()))*/) {
1744 if( _memEntry
&& (addressMap
== kernel_map
) && (kIOMemoryRequiresWire
& _flags
))
1745 addressMap
= IOPageableMapForAddress( logical
);
1747 err
= vm_deallocate( addressMap
, logical
, length
);
1750 err
= kIOReturnSuccess
;
1755 IOReturn
IOMemoryDescriptor::redirect( task_t safeTask
, bool redirect
)
1758 _IOMemoryMap
* mapping
= 0;
1764 if( (iter
= OSCollectionIterator::withCollection( _mappings
))) {
1765 while( (mapping
= (_IOMemoryMap
*) iter
->getNextObject()))
1766 mapping
->redirect( safeTask
, redirect
);
1773 _flags
|= kIOMemoryRedirected
;
1775 _flags
&= ~kIOMemoryRedirected
;
1781 // temporary binary compatibility
1782 IOSubMemoryDescriptor
* subMem
;
1783 if( (subMem
= OSDynamicCast( IOSubMemoryDescriptor
, this)))
1784 err
= subMem
->redirect( safeTask
, redirect
);
1786 err
= kIOReturnSuccess
;
1791 IOReturn
IOSubMemoryDescriptor::redirect( task_t safeTask
, bool redirect
)
1793 return( _parent
->redirect( safeTask
, redirect
));
1796 IOReturn
_IOMemoryMap::redirect( task_t safeTask
, bool redirect
)
1798 IOReturn err
= kIOReturnSuccess
;
1801 // err = ((_IOMemoryMap *)superMap)->redirect( safeTask, redirect );
1805 if( logical
&& addressMap
1806 && (get_task_map( safeTask
) != addressMap
)
1807 && (0 == (options
& kIOMapStatic
))) {
1809 IOUnmapPages( addressMap
, logical
, length
);
1811 err
= vm_deallocate( addressMap
, logical
, length
);
1812 err
= memory
->doMap( addressMap
, &logical
,
1813 (options
& ~kIOMapAnywhere
) /*| kIOMapReserve*/,
1816 err
= kIOReturnSuccess
;
1818 IOLog("IOMemoryMap::redirect(%d, %p) %x:%lx from %p\n", redirect
, this, logical
, length
, addressMap
);
1827 IOReturn
_IOMemoryMap::unmap( void )
1833 if( logical
&& addressMap
&& (0 == superMap
)
1834 && (0 == (options
& kIOMapStatic
))) {
1836 err
= memory
->doUnmap( addressMap
, logical
, length
);
1837 vm_map_deallocate(addressMap
);
1841 err
= kIOReturnSuccess
;
1850 void _IOMemoryMap::taskDied( void )
1854 vm_map_deallocate(addressMap
);
1862 // Overload the release mechanism. All mappings must be a member
1863 // of a memory descriptors _mappings set. This means that we
1864 // always have 2 references on a mapping. When either of these mappings
1865 // are released we need to free ourselves.
1866 void _IOMemoryMap::taggedRelease(const void *tag
= 0) const
1868 super::taggedRelease(tag
, 2);
1871 void _IOMemoryMap::free()
1877 memory
->removeMapping( this);
1883 superMap
->release();
1888 IOByteCount
_IOMemoryMap::getLength()
1893 IOVirtualAddress
_IOMemoryMap::getVirtualAddress()
1898 task_t
_IOMemoryMap::getAddressTask()
1901 return( superMap
->getAddressTask());
1903 return( addressTask
);
1906 IOOptionBits
_IOMemoryMap::getMapOptions()
1911 IOMemoryDescriptor
* _IOMemoryMap::getMemoryDescriptor()
1916 _IOMemoryMap
* _IOMemoryMap::copyCompatible(
1917 IOMemoryDescriptor
* owner
,
1919 IOVirtualAddress toAddress
,
1920 IOOptionBits _options
,
1921 IOByteCount _offset
,
1922 IOByteCount _length
)
1924 _IOMemoryMap
* mapping
;
1926 if( (!task
) || (task
!= getAddressTask()))
1928 if( (options
^ _options
) & kIOMapReadOnly
)
1930 if( (kIOMapDefaultCache
!= (_options
& kIOMapCacheMask
))
1931 && ((options
^ _options
) & kIOMapCacheMask
))
1934 if( (0 == (_options
& kIOMapAnywhere
)) && (logical
!= toAddress
))
1937 if( _offset
< offset
)
1942 if( (_offset
+ _length
) > length
)
1945 if( (length
== _length
) && (!_offset
)) {
1950 mapping
= new _IOMemoryMap
;
1952 && !mapping
->initCompatible( owner
, this, _offset
, _length
)) {
1961 IOPhysicalAddress
_IOMemoryMap::getPhysicalSegment( IOByteCount _offset
,
1962 IOPhysicalLength
* length
)
1964 IOPhysicalAddress address
;
1967 address
= memory
->getPhysicalSegment( offset
+ _offset
, length
);
1973 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1976 #define super OSObject
1978 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1980 void IOMemoryDescriptor::initialize( void )
1982 if( 0 == gIOMemoryLock
)
1983 gIOMemoryLock
= IORecursiveLockAlloc();
1986 void IOMemoryDescriptor::free( void )
1989 _mappings
->release();
1994 IOMemoryMap
* IOMemoryDescriptor::setMapping(
1996 IOVirtualAddress mapAddress
,
1997 IOOptionBits options
= 0 )
2001 map
= new _IOMemoryMap
;
2006 && !map
->initWithDescriptor( this, intoTask
, mapAddress
,
2007 options
| kIOMapStatic
, 0, getLength() )) {
2019 IOMemoryMap
* IOMemoryDescriptor::map(
2020 IOOptionBits options
= 0 )
2023 return( makeMapping( this, kernel_task
, 0,
2024 options
| kIOMapAnywhere
,
2028 IOMemoryMap
* IOMemoryDescriptor::map(
2030 IOVirtualAddress toAddress
,
2031 IOOptionBits options
,
2032 IOByteCount offset
= 0,
2033 IOByteCount length
= 0 )
2036 length
= getLength();
2038 return( makeMapping( this, intoTask
, toAddress
, options
, offset
, length
));
2041 IOMemoryMap
* IOMemoryDescriptor::makeMapping(
2042 IOMemoryDescriptor
* owner
,
2044 IOVirtualAddress toAddress
,
2045 IOOptionBits options
,
2047 IOByteCount length
)
2049 _IOMemoryMap
* mapping
= 0;
2055 // look for an existing mapping
2056 if( (iter
= OSCollectionIterator::withCollection( _mappings
))) {
2058 while( (mapping
= (_IOMemoryMap
*) iter
->getNextObject())) {
2060 if( (mapping
= mapping
->copyCompatible(
2061 owner
, intoTask
, toAddress
,
2062 options
| kIOMapReference
,
2072 if( mapping
|| (options
& kIOMapReference
))
2077 mapping
= new _IOMemoryMap
;
2079 && !mapping
->initWithDescriptor( owner
, intoTask
, toAddress
, options
,
2082 IOLog("Didn't make map %08lx : %08lx\n", offset
, length
);
2090 owner
->addMapping( mapping
);
2097 void IOMemoryDescriptor::addMapping(
2098 IOMemoryMap
* mapping
)
2102 _mappings
= OSSet::withCapacity(1);
2104 _mappings
->setObject( mapping
);
2108 void IOMemoryDescriptor::removeMapping(
2109 IOMemoryMap
* mapping
)
2112 _mappings
->removeObject( mapping
);
2115 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2118 #define super IOMemoryDescriptor
2120 OSDefineMetaClassAndStructors(IOSubMemoryDescriptor
, IOMemoryDescriptor
)
2122 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2124 bool IOSubMemoryDescriptor::initSubRange( IOMemoryDescriptor
* parent
,
2125 IOByteCount offset
, IOByteCount length
,
2126 IODirection withDirection
)
2134 if( (offset
+ length
) > parent
->getLength())
2141 _direction
= withDirection
;
2142 _tag
= parent
->getTag();
2147 void IOSubMemoryDescriptor::free( void )
2156 IOPhysicalAddress
IOSubMemoryDescriptor::getPhysicalSegment( IOByteCount offset
,
2157 IOByteCount
* length
)
2159 IOPhysicalAddress address
;
2160 IOByteCount actualLength
;
2162 assert(offset
<= _length
);
2167 if( offset
>= _length
)
2170 address
= _parent
->getPhysicalSegment( offset
+ _start
, &actualLength
);
2172 if( address
&& length
)
2173 *length
= min( _length
- offset
, actualLength
);
2178 IOPhysicalAddress
IOSubMemoryDescriptor::getSourceSegment( IOByteCount offset
,
2179 IOByteCount
* length
)
2181 IOPhysicalAddress address
;
2182 IOByteCount actualLength
;
2184 assert(offset
<= _length
);
2189 if( offset
>= _length
)
2192 address
= _parent
->getSourceSegment( offset
+ _start
, &actualLength
);
2194 if( address
&& length
)
2195 *length
= min( _length
- offset
, actualLength
);
2200 void * IOSubMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
2201 IOByteCount
* lengthOfSegment
)
2206 IOByteCount
IOSubMemoryDescriptor::readBytes(IOByteCount offset
,
2207 void * bytes
, IOByteCount withLength
)
2209 IOByteCount byteCount
;
2211 assert(offset
<= _length
);
2213 if( offset
>= _length
)
2217 byteCount
= _parent
->readBytes( _start
+ offset
, bytes
,
2218 min(withLength
, _length
- offset
) );
2221 return( byteCount
);
2224 IOByteCount
IOSubMemoryDescriptor::writeBytes(IOByteCount offset
,
2225 const void* bytes
, IOByteCount withLength
)
2227 IOByteCount byteCount
;
2229 assert(offset
<= _length
);
2231 if( offset
>= _length
)
2235 byteCount
= _parent
->writeBytes( _start
+ offset
, bytes
,
2236 min(withLength
, _length
- offset
) );
2239 return( byteCount
);
2242 IOReturn
IOSubMemoryDescriptor::prepare(
2243 IODirection forDirection
= kIODirectionNone
)
2248 err
= _parent
->prepare( forDirection
);
2254 IOReturn
IOSubMemoryDescriptor::complete(
2255 IODirection forDirection
= kIODirectionNone
)
2260 err
= _parent
->complete( forDirection
);
2266 IOMemoryMap
* IOSubMemoryDescriptor::makeMapping(
2267 IOMemoryDescriptor
* owner
,
2269 IOVirtualAddress toAddress
,
2270 IOOptionBits options
,
2272 IOByteCount length
)
2274 IOMemoryMap
* mapping
;
2276 mapping
= (IOMemoryMap
*) _parent
->makeMapping(
2278 toAddress
- (_start
+ offset
),
2279 options
| kIOMapReference
,
2280 _start
+ offset
, length
);
2283 mapping
= (IOMemoryMap
*) _parent
->makeMapping(
2286 options
, _start
+ offset
, length
);
2289 mapping
= super::makeMapping( owner
, intoTask
, toAddress
, options
,
2298 IOSubMemoryDescriptor::initWithAddress(void * address
,
2299 IOByteCount withLength
,
2300 IODirection withDirection
)
2306 IOSubMemoryDescriptor::initWithAddress(vm_address_t address
,
2307 IOByteCount withLength
,
2308 IODirection withDirection
,
2315 IOSubMemoryDescriptor::initWithPhysicalAddress(
2316 IOPhysicalAddress address
,
2317 IOByteCount withLength
,
2318 IODirection withDirection
)
2324 IOSubMemoryDescriptor::initWithRanges(
2325 IOVirtualRange
* ranges
,
2327 IODirection withDirection
,
2329 bool asReference
= false)
2335 IOSubMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange
* ranges
,
2337 IODirection withDirection
,
2338 bool asReference
= false)
2343 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2345 bool IOGeneralMemoryDescriptor::serialize(OSSerialize
* s
) const
2347 OSSymbol
const *keys
[2];
2348 OSObject
*values
[2];
2350 IOVirtualRange
*vcopy
;
2351 unsigned int index
, nRanges
;
2354 if (s
== NULL
) return false;
2355 if (s
->previouslySerialized(this)) return true;
2357 // Pretend we are an array.
2358 if (!s
->addXMLStartTag(this, "array")) return false;
2360 nRanges
= _rangesCount
;
2361 vcopy
= (IOVirtualRange
*) IOMalloc(sizeof(IOVirtualRange
) * nRanges
);
2362 if (vcopy
== 0) return false;
2364 keys
[0] = OSSymbol::withCString("address");
2365 keys
[1] = OSSymbol::withCString("length");
2368 values
[0] = values
[1] = 0;
2370 // From this point on we can go to bail.
2372 // Copy the volatile data so we don't have to allocate memory
2373 // while the lock is held.
2375 if (nRanges
== _rangesCount
) {
2376 for (index
= 0; index
< nRanges
; index
++) {
2377 vcopy
[index
] = _ranges
.v
[index
];
2380 // The descriptor changed out from under us. Give up.
2387 for (index
= 0; index
< nRanges
; index
++)
2389 values
[0] = OSNumber::withNumber(_ranges
.v
[index
].address
, sizeof(_ranges
.v
[index
].address
) * 8);
2390 if (values
[0] == 0) {
2394 values
[1] = OSNumber::withNumber(_ranges
.v
[index
].length
, sizeof(_ranges
.v
[index
].length
) * 8);
2395 if (values
[1] == 0) {
2399 OSDictionary
*dict
= OSDictionary::withObjects((const OSObject
**)values
, (const OSSymbol
**)keys
, 2);
2404 values
[0]->release();
2405 values
[1]->release();
2406 values
[0] = values
[1] = 0;
2408 result
= dict
->serialize(s
);
2414 result
= s
->addXMLEndTag("array");
2418 values
[0]->release();
2420 values
[1]->release();
2426 IOFree(vcopy
, sizeof(IOVirtualRange
) * nRanges
);
2430 bool IOSubMemoryDescriptor::serialize(OSSerialize
* s
) const
2435 if (s
->previouslySerialized(this)) return true;
2437 // Pretend we are a dictionary.
2438 // We must duplicate the functionality of OSDictionary here
2439 // because otherwise object references will not work;
2440 // they are based on the value of the object passed to
2441 // previouslySerialized and addXMLStartTag.
2443 if (!s
->addXMLStartTag(this, "dict")) return false;
2445 char const *keys
[3] = {"offset", "length", "parent"};
2447 OSObject
*values
[3];
2448 values
[0] = OSNumber::withNumber(_start
, sizeof(_start
) * 8);
2451 values
[1] = OSNumber::withNumber(_length
, sizeof(_length
) * 8);
2452 if (values
[1] == 0) {
2453 values
[0]->release();
2456 values
[2] = _parent
;
2459 for (int i
=0; i
<3; i
++) {
2460 if (!s
->addString("<key>") ||
2461 !s
->addString(keys
[i
]) ||
2462 !s
->addXMLEndTag("key") ||
2463 !values
[i
]->serialize(s
)) {
2468 values
[0]->release();
2469 values
[1]->release();
2474 return s
->addXMLEndTag("dict");
2477 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2479 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 0);
2480 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 1);
2481 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 2);
2482 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 3);
2483 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 4);
2484 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 5);
2485 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 6);
2486 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 7);
2487 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 8);
2488 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 9);
2489 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 10);
2490 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 11);
2491 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 12);
2492 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 13);
2493 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 14);
2494 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 15);
2496 /* inline function implementation */
2497 IOPhysicalAddress
IOMemoryDescriptor::getPhysicalAddress()
2498 { return( getPhysicalSegment( 0, 0 )); }