2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
29 #include <IOKit/assert.h>
30 #include <IOKit/system.h>
31 #include <IOKit/IOLib.h>
32 #include <IOKit/IOMemoryDescriptor.h>
34 #include <IOKit/IOKitDebug.h>
36 #include <libkern/c++/OSContainers.h>
37 #include <libkern/c++/OSDictionary.h>
38 #include <libkern/c++/OSArray.h>
39 #include <libkern/c++/OSSymbol.h>
40 #include <libkern/c++/OSNumber.h>
41 #include <sys/cdefs.h>
45 #include <device/device_port.h>
46 void bcopy_phys(char *from
, char *to
, int size
);
47 void pmap_enter(pmap_t pmap
, vm_offset_t va
, vm_offset_t pa
,
48 vm_prot_t prot
, unsigned int flags
, boolean_t wired
);
50 struct phys_entry
*pmap_find_physentry(vm_offset_t pa
);
52 void ipc_port_release_send(ipc_port_t port
);
53 vm_offset_t
vm_map_get_phys_page(vm_map_t map
, vm_offset_t offset
);
57 memory_object_t pager
,
62 device_pager_deallocate(
65 device_pager_populate_object(
66 memory_object_t pager
,
67 vm_object_offset_t offset
,
68 vm_offset_t phys_addr
,
72 * Page fault handling based on vm_map (or entries therein)
74 extern kern_return_t
vm_fault(
78 boolean_t change_wiring
,
81 vm_offset_t caller_pmap_addr
);
85 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
87 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor
, OSObject
)
89 #define super IOMemoryDescriptor
91 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor
, IOMemoryDescriptor
)
95 vm_map_t
IOPageableMapForAddress( vm_address_t address
);
97 typedef kern_return_t (*IOIteratePageableMapsCallback
)(vm_map_t map
, void * ref
);
99 kern_return_t
IOIteratePageableMaps(vm_size_t size
,
100 IOIteratePageableMapsCallback callback
, void * ref
);
104 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
106 static IORecursiveLock
* gIOMemoryLock
;
108 #define LOCK IORecursiveLockLock( gIOMemoryLock)
109 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
110 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
112 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
114 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
116 inline vm_map_t
IOGeneralMemoryDescriptor::getMapForTask( task_t task
, vm_address_t address
)
118 if( (task
== kernel_task
) && (kIOMemoryRequiresWire
& _flags
))
119 return( IOPageableMapForAddress( address
) );
121 return( get_task_map( task
));
124 inline vm_offset_t
pmap_extract_safe(task_t task
, vm_offset_t va
)
126 vm_offset_t pa
= pmap_extract(get_task_pmap(task
), va
);
130 pa
= vm_map_get_phys_page(get_task_map(task
), trunc_page(va
));
131 if ( pa
) pa
+= va
- trunc_page(va
);
137 inline void bcopy_phys_safe(char * from
, char * to
, int size
)
139 boolean_t enabled
= ml_set_interrupts_enabled(FALSE
);
141 bcopy_phys(from
, to
, size
);
143 ml_set_interrupts_enabled(enabled
);
146 #define next_page(a) ( trunc_page(a) + page_size )
151 kern_return_t
device_data_action(
153 ipc_port_t device_pager
,
154 vm_prot_t protection
,
155 vm_object_offset_t offset
,
158 struct ExpansionData
{
160 unsigned int pagerContig
:1;
161 unsigned int unused
:31;
162 IOMemoryDescriptor
* memory
;
165 ExpansionData
* ref
= (ExpansionData
*) device_handle
;
166 IOMemoryDescriptor
* memDesc
;
169 memDesc
= ref
->memory
;
171 kr
= memDesc
->handleFault( device_pager
, 0, 0,
172 offset
, size
, kIOMapDefaultCache
/*?*/);
180 kern_return_t
device_close(
183 struct ExpansionData
{
185 unsigned int pagerContig
:1;
186 unsigned int unused
:31;
187 IOMemoryDescriptor
* memory
;
189 ExpansionData
* ref
= (ExpansionData
*) device_handle
;
191 IODelete( ref
, ExpansionData
, 1 );
193 return( kIOReturnSuccess
);
198 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
203 * Create a new IOMemoryDescriptor. The buffer is a virtual address
204 * relative to the specified task. If no task is supplied, the kernel
208 IOMemoryDescriptor::withAddress(void * address
,
209 IOByteCount withLength
,
210 IODirection withDirection
)
212 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
215 if (that
->initWithAddress(address
, withLength
, withDirection
))
224 IOMemoryDescriptor::withAddress(vm_address_t address
,
225 IOByteCount withLength
,
226 IODirection withDirection
,
229 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
232 if (that
->initWithAddress(address
, withLength
, withDirection
, withTask
))
241 IOMemoryDescriptor::withPhysicalAddress(
242 IOPhysicalAddress address
,
243 IOByteCount withLength
,
244 IODirection withDirection
)
246 return( IOMemoryDescriptor::withAddress( address
, withLength
,
247 withDirection
, (task_t
) 0 ));
254 * Create a new IOMemoryDescriptor. The buffer is made up of several
255 * virtual address ranges, from a given task.
257 * Passing the ranges as a reference will avoid an extra allocation.
260 IOMemoryDescriptor::withRanges( IOVirtualRange
* ranges
,
262 IODirection withDirection
,
264 bool asReference
= false)
266 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
269 if (that
->initWithRanges(ranges
, withCount
, withDirection
, withTask
, asReference
))
278 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange
* ranges
,
280 IODirection withDirection
,
281 bool asReference
= false)
283 IOGeneralMemoryDescriptor
* that
= new IOGeneralMemoryDescriptor
;
286 if (that
->initWithPhysicalRanges(ranges
, withCount
, withDirection
, asReference
))
295 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor
* of
,
298 IODirection withDirection
)
300 IOSubMemoryDescriptor
* that
= new IOSubMemoryDescriptor
;
302 if (that
&& !that
->initSubRange(of
, offset
, length
, withDirection
)) {
312 * Initialize an IOMemoryDescriptor. The buffer is a virtual address
313 * relative to the specified task. If no task is supplied, the kernel
316 * An IOMemoryDescriptor can be re-used by calling initWithAddress or
317 * initWithRanges again on an existing instance -- note this behavior
318 * is not commonly supported in other I/O Kit classes, although it is
322 IOGeneralMemoryDescriptor::initWithAddress(void * address
,
323 IOByteCount withLength
,
324 IODirection withDirection
)
326 _singleRange
.v
.address
= (vm_address_t
) address
;
327 _singleRange
.v
.length
= withLength
;
329 return initWithRanges(&_singleRange
.v
, 1, withDirection
, kernel_task
, true);
333 IOGeneralMemoryDescriptor::initWithAddress(vm_address_t address
,
334 IOByteCount withLength
,
335 IODirection withDirection
,
338 _singleRange
.v
.address
= address
;
339 _singleRange
.v
.length
= withLength
;
341 return initWithRanges(&_singleRange
.v
, 1, withDirection
, withTask
, true);
345 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
346 IOPhysicalAddress address
,
347 IOByteCount withLength
,
348 IODirection withDirection
)
350 _singleRange
.p
.address
= address
;
351 _singleRange
.p
.length
= withLength
;
353 return initWithPhysicalRanges( &_singleRange
.p
, 1, withDirection
, true);
359 * Initialize an IOMemoryDescriptor. The buffer is made up of several
360 * virtual address ranges, from a given task
362 * Passing the ranges as a reference will avoid an extra allocation.
364 * An IOMemoryDescriptor can be re-used by calling initWithAddress or
365 * initWithRanges again on an existing instance -- note this behavior
366 * is not commonly supported in other I/O Kit classes, although it is
370 IOGeneralMemoryDescriptor::initWithRanges(
371 IOVirtualRange
* ranges
,
373 IODirection withDirection
,
375 bool asReference
= false)
381 * We can check the _initialized instance variable before having ever set
382 * it to an initial value because I/O Kit guarantees that all our instance
383 * variables are zeroed on an object's allocation.
386 if (_initialized
== false)
388 if (super::init() == false) return false;
394 * An existing memory descriptor is being retargeted to point to
395 * somewhere else. Clean up our present state.
398 assert(_wireCount
== 0);
404 if (_ranges
.v
&& _rangesIsAllocated
)
405 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
409 * Initialize the memory descriptor.
413 _rangesCount
= withCount
;
414 _rangesIsAllocated
= asReference
? false : true;
415 _direction
= withDirection
;
419 _positionAtIndex
= 0;
420 _positionAtOffset
= 0;
422 _cachedPhysicalAddress
= 0;
423 _cachedVirtualAddress
= 0;
426 if (withTask
&& (withTask
!= kernel_task
))
427 _flags
|= kIOMemoryRequiresWire
;
433 _ranges
.v
= IONew(IOVirtualRange
, withCount
);
434 if (_ranges
.v
== 0) return false;
435 bcopy(/* from */ ranges
, _ranges
.v
, withCount
* sizeof(IOVirtualRange
));
438 for (unsigned index
= 0; index
< _rangesCount
; index
++)
440 _length
+= _ranges
.v
[index
].length
;
447 IOGeneralMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange
* ranges
,
449 IODirection withDirection
,
450 bool asReference
= false)
452 #warning assuming virtual, physical addresses same size
453 return( initWithRanges( (IOVirtualRange
*) ranges
,
454 withCount
, withDirection
, (task_t
) 0, asReference
));
462 void IOGeneralMemoryDescriptor::free()
466 reserved
->memory
= 0;
473 if (_ranges
.v
&& _rangesIsAllocated
)
474 IODelete(_ranges
.v
, IOVirtualRange
, _rangesCount
);
476 if( reserved
&& reserved
->devicePager
)
477 device_pager_deallocate( reserved
->devicePager
);
479 // memEntry holds a ref on the device pager which owns reserved (ExpansionData)
480 // so no reserved access after this point
482 ipc_port_release_send( (ipc_port_t
) _memEntry
);
486 /* DEPRECATED */ void IOGeneralMemoryDescriptor::unmapFromKernel()
488 /* DEPRECATED */ kern_return_t krtn
;
489 /* DEPRECATED */ vm_offset_t off
;
490 /* DEPRECATED */ // Pull the shared pages out of the task map
491 /* DEPRECATED */ // Do we need to unwire it first?
492 /* DEPRECATED */ for ( off
= 0; off
< _kernSize
; off
+= page_size
)
494 /* DEPRECATED */ pmap_change_wiring(
495 /* DEPRECATED */ kernel_pmap
,
496 /* DEPRECATED */ _kernPtrAligned
+ off
,
497 /* DEPRECATED */ FALSE
);
499 /* DEPRECATED */ pmap_remove(
500 /* DEPRECATED */ kernel_pmap
,
501 /* DEPRECATED */ _kernPtrAligned
+ off
,
502 /* DEPRECATED */ _kernPtrAligned
+ off
+ page_size
);
504 /* DEPRECATED */ // Free the former shmem area in the task
505 /* DEPRECATED */ krtn
= vm_deallocate(kernel_map
,
506 /* DEPRECATED */ _kernPtrAligned
,
507 /* DEPRECATED */ _kernSize
);
508 /* DEPRECATED */ assert(krtn
== KERN_SUCCESS
);
509 /* DEPRECATED */ _kernPtrAligned
= 0;
512 /* DEPRECATED */ void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex
)
514 /* DEPRECATED */ kern_return_t krtn
;
515 /* DEPRECATED */ vm_offset_t off
;
517 /* DEPRECATED */ if (_kernPtrAligned
)
519 /* DEPRECATED */ if (_kernPtrAtIndex
== rangeIndex
) return;
520 /* DEPRECATED */ unmapFromKernel();
521 /* DEPRECATED */ assert(_kernPtrAligned
== 0);
524 /* DEPRECATED */ vm_offset_t srcAlign
= trunc_page(_ranges
.v
[rangeIndex
].address
);
526 /* DEPRECATED */ _kernSize
= trunc_page(_ranges
.v
[rangeIndex
].address
+
527 /* DEPRECATED */ _ranges
.v
[rangeIndex
].length
+
528 /* DEPRECATED */ page_size
- 1) - srcAlign
;
530 /* DEPRECATED */ /* Find some memory of the same size in kernel task. We use vm_allocate() */
531 /* DEPRECATED */ /* to do this. vm_allocate inserts the found memory object in the */
532 /* DEPRECATED */ /* target task's map as a side effect. */
533 /* DEPRECATED */ krtn
= vm_allocate( kernel_map
,
534 /* DEPRECATED */ &_kernPtrAligned
,
535 /* DEPRECATED */ _kernSize
,
536 /* DEPRECATED */ VM_FLAGS_ANYWHERE
|VM_MAKE_TAG(VM_MEMORY_IOKIT
) ); // Find first fit
537 /* DEPRECATED */ assert(krtn
== KERN_SUCCESS
);
538 /* DEPRECATED */ if(krtn
) return;
540 /* DEPRECATED */ /* For each page in the area allocated from the kernel map, */
541 /* DEPRECATED */ /* find the physical address of the page. */
542 /* DEPRECATED */ /* Enter the page in the target task's pmap, at the */
543 /* DEPRECATED */ /* appropriate target task virtual address. */
544 /* DEPRECATED */ for ( off
= 0; off
< _kernSize
; off
+= page_size
)
546 /* DEPRECATED */ vm_offset_t kern_phys_addr
, phys_addr
;
547 /* DEPRECATED */ if( _task
)
548 /* DEPRECATED */ phys_addr
= pmap_extract( get_task_pmap(_task
), srcAlign
+ off
);
549 /* DEPRECATED */ else
550 /* DEPRECATED */ phys_addr
= srcAlign
+ off
;
551 /* DEPRECATED */ assert(phys_addr
);
552 /* DEPRECATED */ if(phys_addr
== 0) return;
554 /* DEPRECATED */ // Check original state.
555 /* DEPRECATED */ kern_phys_addr
= pmap_extract( kernel_pmap
, _kernPtrAligned
+ off
);
556 /* DEPRECATED */ // Set virtual page to point to the right physical one
557 /* DEPRECATED */ pmap_enter(
558 /* DEPRECATED */ kernel_pmap
,
559 /* DEPRECATED */ _kernPtrAligned
+ off
,
560 /* DEPRECATED */ phys_addr
,
561 /* DEPRECATED */ VM_PROT_READ
|VM_PROT_WRITE
,
562 /* DEPRECATED */ VM_WIMG_USE_DEFAULT
,
563 /* DEPRECATED */ TRUE
);
565 /* DEPRECATED */ _kernPtrAtIndex
= rangeIndex
;
571 * Get the direction of the transfer.
573 IODirection
IOMemoryDescriptor::getDirection() const
581 * Get the length of the transfer (over all ranges).
583 IOByteCount
IOMemoryDescriptor::getLength() const
588 void IOMemoryDescriptor::setTag(
594 IOOptionBits
IOMemoryDescriptor::getTag( void )
599 IOPhysicalAddress
IOMemoryDescriptor::getSourceSegment( IOByteCount offset
,
600 IOByteCount
* length
)
602 IOPhysicalAddress physAddr
= 0;
604 if( prepare() == kIOReturnSuccess
) {
605 physAddr
= getPhysicalSegment( offset
, length
);
612 IOByteCount
IOMemoryDescriptor::readBytes( IOByteCount offset
,
614 IOByteCount withLength
)
616 IOByteCount bytesCopied
= 0;
618 assert(offset
<= _length
);
619 assert(offset
<= _length
- withLength
);
621 if ( offset
< _length
)
623 withLength
= min(withLength
, _length
- offset
);
625 while ( withLength
) // (process another source segment?)
627 IOPhysicalAddress sourceSegment
;
628 IOByteCount sourceSegmentLength
;
630 sourceSegment
= getPhysicalSegment(offset
, &sourceSegmentLength
);
631 if ( sourceSegment
== 0 ) goto readBytesErr
;
633 sourceSegmentLength
= min(sourceSegmentLength
, withLength
);
635 while ( sourceSegmentLength
) // (process another target segment?)
637 IOPhysicalAddress targetSegment
;
638 IOByteCount targetSegmentLength
;
640 targetSegment
= pmap_extract_safe(kernel_task
, (vm_offset_t
) bytes
);
641 if ( targetSegment
== 0 ) goto readBytesErr
;
643 targetSegmentLength
= min(next_page(targetSegment
) - targetSegment
, sourceSegmentLength
);
645 if ( sourceSegment
+ targetSegmentLength
> next_page(sourceSegment
) )
647 IOByteCount pageLength
;
649 pageLength
= next_page(sourceSegment
) - sourceSegment
;
651 bcopy_phys_safe( /* from */ (char *) sourceSegment
,
652 /* to */ (char *) targetSegment
,
653 /* size */ (int ) pageLength
);
655 ((UInt8
*) bytes
) += pageLength
;
656 bytesCopied
+= pageLength
;
657 offset
+= pageLength
;
658 sourceSegment
+= pageLength
;
659 sourceSegmentLength
-= pageLength
;
660 targetSegment
+= pageLength
;
661 targetSegmentLength
-= pageLength
;
662 withLength
-= pageLength
;
665 bcopy_phys_safe( /* from */ (char *) sourceSegment
,
666 /* to */ (char *) targetSegment
,
667 /* size */ (int ) targetSegmentLength
);
669 ((UInt8
*) bytes
) += targetSegmentLength
;
670 bytesCopied
+= targetSegmentLength
;
671 offset
+= targetSegmentLength
;
672 sourceSegment
+= targetSegmentLength
;
673 sourceSegmentLength
-= targetSegmentLength
;
674 withLength
-= targetSegmentLength
;
683 // We mark the destination pages as modified, just
684 // in case they are made pageable later on in life.
686 pmap_modify_pages( /* pmap */ kernel_pmap
,
687 /* start */ trunc_page(((vm_offset_t
) bytes
) - bytesCopied
),
688 /* end */ round_page(((vm_offset_t
) bytes
)) );
694 IOByteCount
IOMemoryDescriptor::writeBytes( IOByteCount offset
,
696 IOByteCount withLength
)
698 IOByteCount bytesCopied
= 0;
700 assert(offset
<= _length
);
701 assert(offset
<= _length
- withLength
);
703 if ( offset
< _length
)
705 withLength
= min(withLength
, _length
- offset
);
707 while ( withLength
) // (process another target segment?)
709 IOPhysicalAddress targetSegment
;
710 IOByteCount targetSegmentLength
;
712 targetSegment
= getPhysicalSegment(offset
, &targetSegmentLength
);
713 if ( targetSegment
== 0 ) goto writeBytesErr
;
715 targetSegmentLength
= min(targetSegmentLength
, withLength
);
717 while ( targetSegmentLength
) // (process another source segment?)
719 IOPhysicalAddress sourceSegment
;
720 IOByteCount sourceSegmentLength
;
722 sourceSegment
= pmap_extract_safe(kernel_task
, (vm_offset_t
) bytes
);
723 if ( sourceSegment
== 0 ) goto writeBytesErr
;
725 sourceSegmentLength
= min(next_page(sourceSegment
) - sourceSegment
, targetSegmentLength
);
727 if ( targetSegment
+ sourceSegmentLength
> next_page(targetSegment
) )
729 IOByteCount pageLength
;
731 pageLength
= next_page(targetSegment
) - targetSegment
;
733 bcopy_phys_safe( /* from */ (char *) sourceSegment
,
734 /* to */ (char *) targetSegment
,
735 /* size */ (int ) pageLength
);
737 // We flush the data cache in case it is code we've copied,
738 // such that the instruction cache is in the know about it.
740 flush_dcache(targetSegment
, pageLength
, true);
742 ((UInt8
*) bytes
) += pageLength
;
743 bytesCopied
+= pageLength
;
744 offset
+= pageLength
;
745 sourceSegment
+= pageLength
;
746 sourceSegmentLength
-= pageLength
;
747 targetSegment
+= pageLength
;
748 targetSegmentLength
-= pageLength
;
749 withLength
-= pageLength
;
752 bcopy_phys_safe( /* from */ (char *) sourceSegment
,
753 /* to */ (char *) targetSegment
,
754 /* size */ (int ) sourceSegmentLength
);
756 // We flush the data cache in case it is code we've copied,
757 // such that the instruction cache is in the know about it.
759 flush_dcache(targetSegment
, sourceSegmentLength
, true);
761 ((UInt8
*) bytes
) += sourceSegmentLength
;
762 bytesCopied
+= sourceSegmentLength
;
763 offset
+= sourceSegmentLength
;
764 targetSegment
+= sourceSegmentLength
;
765 targetSegmentLength
-= sourceSegmentLength
;
766 withLength
-= sourceSegmentLength
;
777 // osfmk/device/iokit_rpc.c
778 extern unsigned int IOTranslateCacheBits(struct phys_entry
*pp
);
781 /* DEPRECATED */ void IOGeneralMemoryDescriptor::setPosition(IOByteCount position
)
783 /* DEPRECATED */ assert(position
<= _length
);
785 /* DEPRECATED */ if (position
>= _length
)
787 /* DEPRECATED */ _position
= _length
;
788 /* DEPRECATED */ _positionAtIndex
= _rangesCount
; /* careful: out-of-bounds */
789 /* DEPRECATED */ _positionAtOffset
= 0;
790 /* DEPRECATED */ return;
793 /* DEPRECATED */ if (position
< _position
)
795 /* DEPRECATED */ _positionAtOffset
= position
;
796 /* DEPRECATED */ _positionAtIndex
= 0;
798 /* DEPRECATED */ else
800 /* DEPRECATED */ _positionAtOffset
+= (position
- _position
);
802 /* DEPRECATED */ _position
= position
;
804 /* DEPRECATED */ while (_positionAtOffset
>= _ranges
.v
[_positionAtIndex
].length
)
806 /* DEPRECATED */ _positionAtOffset
-= _ranges
.v
[_positionAtIndex
].length
;
807 /* DEPRECATED */ _positionAtIndex
++;
811 IOPhysicalAddress
IOGeneralMemoryDescriptor::getPhysicalSegment( IOByteCount offset
,
812 IOByteCount
* lengthOfSegment
)
814 IOPhysicalAddress address
= 0;
815 IOPhysicalLength length
= 0;
818 // assert(offset <= _length);
820 if ( offset
< _length
) // (within bounds?)
822 unsigned rangesIndex
= 0;
824 for ( ; offset
>= _ranges
.v
[rangesIndex
].length
; rangesIndex
++ )
826 offset
-= _ranges
.v
[rangesIndex
].length
; // (make offset relative)
829 if ( _task
== 0 ) // (physical memory?)
831 address
= _ranges
.v
[rangesIndex
].address
+ offset
;
832 length
= _ranges
.v
[rangesIndex
].length
- offset
;
834 for ( ++rangesIndex
; rangesIndex
< _rangesCount
; rangesIndex
++ )
836 if ( address
+ length
!= _ranges
.v
[rangesIndex
].address
) break;
838 length
+= _ranges
.v
[rangesIndex
].length
; // (coalesce ranges)
841 else // (virtual memory?)
843 vm_address_t addressVirtual
= _ranges
.v
[rangesIndex
].address
+ offset
;
845 assert((0 == (kIOMemoryRequiresWire
& _flags
)) || _wireCount
);
847 address
= pmap_extract_safe(_task
, addressVirtual
);
848 length
= next_page(addressVirtual
) - addressVirtual
;
849 length
= min(_ranges
.v
[rangesIndex
].length
- offset
, length
);
853 if ( address
== 0 ) length
= 0;
856 if ( lengthOfSegment
) *lengthOfSegment
= length
;
861 IOPhysicalAddress
IOGeneralMemoryDescriptor::getSourceSegment( IOByteCount offset
,
862 IOByteCount
* lengthOfSegment
)
864 IOPhysicalAddress address
= 0;
865 IOPhysicalLength length
= 0;
867 assert(offset
<= _length
);
869 if ( offset
< _length
) // (within bounds?)
871 unsigned rangesIndex
= 0;
873 for ( ; offset
>= _ranges
.v
[rangesIndex
].length
; rangesIndex
++ )
875 offset
-= _ranges
.v
[rangesIndex
].length
; // (make offset relative)
878 address
= _ranges
.v
[rangesIndex
].address
+ offset
;
879 length
= _ranges
.v
[rangesIndex
].length
- offset
;
881 for ( ++rangesIndex
; rangesIndex
< _rangesCount
; rangesIndex
++ )
883 if ( address
+ length
!= _ranges
.v
[rangesIndex
].address
) break;
885 length
+= _ranges
.v
[rangesIndex
].length
; // (coalesce ranges)
889 if ( address
== 0 ) length
= 0;
892 if ( lengthOfSegment
) *lengthOfSegment
= length
;
897 /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
898 /* DEPRECATED */ void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
899 /* DEPRECATED */ IOByteCount
* lengthOfSegment
)
901 /* DEPRECATED */ if( offset
!= _position
)
902 /* DEPRECATED */ setPosition( offset
);
904 /* DEPRECATED */ assert(_position
<= _length
);
906 /* DEPRECATED */ /* Fail gracefully if the position is at (or past) the end-of-buffer. */
907 /* DEPRECATED */ if (_position
>= _length
)
909 /* DEPRECATED */ *lengthOfSegment
= 0;
910 /* DEPRECATED */ return 0;
913 /* DEPRECATED */ /* Compute the relative length to the end of this virtual segment. */
914 /* DEPRECATED */ *lengthOfSegment
= _ranges
.v
[_positionAtIndex
].length
- _positionAtOffset
;
916 /* DEPRECATED */ /* Compute the relative address of this virtual segment. */
917 /* DEPRECATED */ if (_task
== kernel_task
)
918 /* DEPRECATED */ return (void *)(_ranges
.v
[_positionAtIndex
].address
+ _positionAtOffset
);
919 /* DEPRECATED */ else
921 /* DEPRECATED */ vm_offset_t off
;
923 /* DEPRECATED */ mapIntoKernel(_positionAtIndex
);
925 /* DEPRECATED */ off
= _ranges
.v
[_kernPtrAtIndex
].address
;
926 /* DEPRECATED */ off
-= trunc_page(off
);
928 /* DEPRECATED */ return (void *) (_kernPtrAligned
+ off
+ _positionAtOffset
);
931 /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
936 * Prepare the memory for an I/O transfer. This involves paging in
937 * the memory, if necessary, and wiring it down for the duration of
938 * the transfer. The complete() method completes the processing of
939 * the memory after the I/O transfer finishes. This method needn't
940 * called for non-pageable memory.
942 IOReturn
IOGeneralMemoryDescriptor::prepare(
943 IODirection forDirection
= kIODirectionNone
)
947 if((_wireCount
== 0) && (kIOMemoryRequiresWire
& _flags
)) {
950 if(forDirection
== kIODirectionNone
)
951 forDirection
= _direction
;
955 switch (forDirection
)
958 access
= VM_PROT_WRITE
;
961 case kIODirectionOut
:
962 access
= VM_PROT_READ
;
966 access
= VM_PROT_READ
| VM_PROT_WRITE
;
971 // Check user read/write access to the data buffer.
974 for (rangeIndex
= 0; rangeIndex
< _rangesCount
; rangeIndex
++)
976 vm_offset_t checkBase
= trunc_page(_ranges
.v
[rangeIndex
].address
);
977 vm_size_t checkSize
= round_page(_ranges
.v
[rangeIndex
].length
);
981 vm_region_basic_info_data_t regionInfo
;
982 mach_msg_type_number_t regionInfoSize
= sizeof(regionInfo
);
983 vm_size_t regionSize
;
986 /* map */ getMapForTask(_task
, checkBase
),
987 /* address */ &checkBase
,
988 /* size */ ®ionSize
,
989 /* flavor */ VM_REGION_BASIC_INFO
,
990 /* info */ (vm_region_info_t
) ®ionInfo
,
991 /* info size */ ®ionInfoSize
,
992 /* object name */ 0 ) != KERN_SUCCESS
) ||
993 ( (forDirection
& kIODirectionIn
) &&
994 !(regionInfo
.protection
& VM_PROT_WRITE
) ) ||
995 ( (forDirection
& kIODirectionOut
) &&
996 !(regionInfo
.protection
& VM_PROT_READ
) ) )
998 return kIOReturnVMError
;
1001 assert((regionSize
& PAGE_MASK
) == 0);
1003 regionSize
= min(regionSize
, checkSize
);
1004 checkSize
-= regionSize
;
1005 checkBase
+= regionSize
;
1006 } // (for each vm region)
1007 } // (for each io range)
1009 for (rangeIndex
= 0; rangeIndex
< _rangesCount
; rangeIndex
++) {
1011 vm_offset_t srcAlign
= trunc_page(_ranges
.v
[rangeIndex
].address
);
1012 IOByteCount srcAlignEnd
= trunc_page(_ranges
.v
[rangeIndex
].address
+
1013 _ranges
.v
[rangeIndex
].length
+
1016 vm_map_t taskVMMap
= getMapForTask(_task
, srcAlign
);
1018 // If this I/O is for a user land task then protect ourselves
1019 // against COW and other vm_shenanigans
1020 if (_task
&& _task
!= kernel_task
) {
1021 // setup a data object to hold the 'named' memory regions
1022 // @@@ gvdl: If we fail to allocate an OSData we will just
1023 // hope for the best for the time being. Lets not fail a
1024 // prepare at this late stage in product release.
1025 if (!_memoryEntries
)
1026 _memoryEntries
= OSData::withCapacity(16);
1027 if (_memoryEntries
) {
1028 vm_object_offset_t desiredSize
= srcAlignEnd
- srcAlign
;
1029 vm_object_offset_t entryStart
= srcAlign
;
1030 ipc_port_t memHandle
;
1033 vm_object_offset_t actualSize
= desiredSize
;
1035 rc
= mach_make_memory_entry_64
1036 (taskVMMap
, &actualSize
, entryStart
,
1037 forDirection
, &memHandle
, NULL
);
1038 if (KERN_SUCCESS
!= rc
) {
1039 IOLog("IOMemoryDescriptor::prepare mach_make_memory_entry_64 failed: %d\n", rc
);
1044 appendBytes(&memHandle
, sizeof(memHandle
));
1045 desiredSize
-= actualSize
;
1046 entryStart
+= actualSize
;
1047 } while (desiredSize
);
1051 rc
= vm_map_wire(taskVMMap
, srcAlign
, srcAlignEnd
, access
, FALSE
);
1052 if (KERN_SUCCESS
!= rc
) {
1053 IOLog("IOMemoryDescriptor::prepare vm_map_wire failed: %d\n", rc
);
1059 return kIOReturnSuccess
;
1065 for(doneIndex
= 0; doneIndex
< rangeIndex
; doneIndex
++) {
1066 vm_offset_t srcAlign
= trunc_page(_ranges
.v
[doneIndex
].address
);
1067 IOByteCount srcAlignEnd
= trunc_page(_ranges
.v
[doneIndex
].address
+
1068 _ranges
.v
[doneIndex
].length
+
1071 vm_map_unwire(getMapForTask(_task
, srcAlign
), srcAlign
,
1072 srcAlignEnd
, FALSE
);
1075 if (_memoryEntries
) {
1076 ipc_port_t
*handles
, *handlesEnd
;
1078 handles
= (ipc_port_t
*) _memoryEntries
->getBytesNoCopy();
1079 handlesEnd
= (ipc_port_t
*)
1080 ((vm_address_t
) handles
+ _memoryEntries
->getLength());
1081 while (handles
< handlesEnd
)
1082 ipc_port_release_send(*handles
++);
1083 _memoryEntries
->release();
1087 return kIOReturnVMError
;
1093 * Complete processing of the memory after an I/O transfer finishes.
1094 * This method should not be called unless a prepare was previously
1095 * issued; the prepare() and complete() must occur in pairs, before
1096 * before and after an I/O transfer involving pageable memory.
1099 IOReturn
IOGeneralMemoryDescriptor::complete(
1100 IODirection forDirection
= kIODirectionNone
)
1105 return kIOReturnSuccess
;
1108 if((_wireCount
== 0) && (kIOMemoryRequiresWire
& _flags
)) {
1112 if(forDirection
== kIODirectionNone
)
1113 forDirection
= _direction
;
1115 for(rangeIndex
= 0; rangeIndex
< _rangesCount
; rangeIndex
++) {
1117 vm_offset_t srcAlign
= trunc_page(_ranges
.v
[rangeIndex
].address
);
1118 IOByteCount srcAlignEnd
= trunc_page(_ranges
.v
[rangeIndex
].address
+
1119 _ranges
.v
[rangeIndex
].length
+
1122 if(forDirection
== kIODirectionIn
)
1123 pmap_modify_pages(get_task_pmap(_task
), srcAlign
, srcAlignEnd
);
1125 rc
= vm_map_unwire(getMapForTask(_task
, srcAlign
), srcAlign
,
1126 srcAlignEnd
, FALSE
);
1127 if(rc
!= KERN_SUCCESS
)
1128 IOLog("IOMemoryDescriptor::complete: vm_map_unwire failed: %d\n", rc
);
1131 if (_memoryEntries
) {
1132 ipc_port_t
*handles
, *handlesEnd
;
1134 handles
= (ipc_port_t
*) _memoryEntries
->getBytesNoCopy();
1135 handlesEnd
= (ipc_port_t
*)
1136 ((vm_address_t
) handles
+ _memoryEntries
->getLength());
1137 while (handles
< handlesEnd
)
1138 ipc_port_release_send(*handles
++);
1140 _memoryEntries
->release();
1144 return kIOReturnSuccess
;
1147 IOReturn
IOGeneralMemoryDescriptor::doMap(
1148 vm_map_t addressMap
,
1149 IOVirtualAddress
* atAddress
,
1150 IOOptionBits options
,
1151 IOByteCount sourceOffset
= 0,
1152 IOByteCount length
= 0 )
1155 ipc_port_t sharedMem
= (ipc_port_t
) _memEntry
;
1157 // mapping source == dest? (could be much better)
1158 if( _task
&& (addressMap
== get_task_map(_task
)) && (options
& kIOMapAnywhere
)
1159 && (1 == _rangesCount
) && (0 == sourceOffset
)
1160 && (length
<= _ranges
.v
[0].length
) ) {
1161 *atAddress
= _ranges
.v
[0].address
;
1162 return( kIOReturnSuccess
);
1165 if( 0 == sharedMem
) {
1169 for (unsigned index
= 0; index
< _rangesCount
; index
++)
1170 size
+= round_page(_ranges
.v
[index
].address
+ _ranges
.v
[index
].length
)
1171 - trunc_page(_ranges
.v
[index
].address
);
1175 vm_size_t actualSize
= size
;
1176 kr
= mach_make_memory_entry( get_task_map(_task
),
1177 &actualSize
, _ranges
.v
[0].address
,
1178 VM_PROT_READ
| VM_PROT_WRITE
, &sharedMem
,
1181 if( (KERN_SUCCESS
== kr
) && (actualSize
!= round_page(size
))) {
1183 IOLog("mach_make_memory_entry_64 (%08lx) size (%08lx:%08lx)\n",
1184 _ranges
.v
[0].address
, (UInt32
)actualSize
, size
);
1186 kr
= kIOReturnVMError
;
1187 ipc_port_release_send( sharedMem
);
1190 if( KERN_SUCCESS
!= kr
)
1192 sharedMem
= MACH_PORT_NULL
;
1196 memory_object_t pager
;
1197 unsigned int flags
=0;
1198 struct phys_entry
*pp
;
1199 IOPhysicalAddress pa
;
1200 IOPhysicalLength segLen
;
1202 pa
= getPhysicalSegment( sourceOffset
, &segLen
);
1205 reserved
= IONew( ExpansionData
, 1 );
1209 reserved
->pagerContig
= (1 == _rangesCount
);
1210 reserved
->memory
= this;
1213 switch(options
& kIOMapCacheMask
) { /*What cache mode do we need*/
1215 case kIOMapDefaultCache
:
1217 if((pp
= pmap_find_physentry(pa
))) {/* Find physical address */
1218 /* Use physical attributes as default */
1219 flags
= IOTranslateCacheBits(pp
);
1222 else { /* If no physical, just hard code attributes */
1223 flags
= DEVICE_PAGER_CACHE_INHIB
|
1224 DEVICE_PAGER_COHERENT
| DEVICE_PAGER_GUARDED
;
1228 case kIOMapInhibitCache
:
1229 flags
= DEVICE_PAGER_CACHE_INHIB
|
1230 DEVICE_PAGER_COHERENT
| DEVICE_PAGER_GUARDED
;
1233 case kIOMapWriteThruCache
:
1234 flags
= DEVICE_PAGER_WRITE_THROUGH
|
1235 DEVICE_PAGER_COHERENT
| DEVICE_PAGER_GUARDED
;
1238 case kIOMapCopybackCache
:
1239 flags
= DEVICE_PAGER_COHERENT
;
1243 flags
|= reserved
->pagerContig
? DEVICE_PAGER_CONTIGUOUS
: 0;
1245 flags
= reserved
->pagerContig
? DEVICE_PAGER_CONTIGUOUS
: 0;
1248 pager
= device_pager_setup( (memory_object_t
) 0, (int) reserved
,
1253 kr
= mach_memory_object_memory_entry_64( (host_t
) 1, false /*internal*/,
1254 size
, VM_PROT_READ
| VM_PROT_WRITE
, pager
, &sharedMem
);
1256 assert( KERN_SUCCESS
== kr
);
1257 if( KERN_SUCCESS
!= kr
) {
1258 device_pager_deallocate( pager
);
1259 pager
= MACH_PORT_NULL
;
1260 sharedMem
= MACH_PORT_NULL
;
1263 if( pager
&& sharedMem
)
1264 reserved
->devicePager
= pager
;
1266 IODelete( reserved
, ExpansionData
, 1 );
1272 _memEntry
= (void *) sharedMem
;
1277 kr
= kIOReturnVMError
;
1280 kr
= super::doMap( addressMap
, atAddress
,
1281 options
, sourceOffset
, length
);
1286 IOReturn
IOGeneralMemoryDescriptor::doUnmap(
1287 vm_map_t addressMap
,
1288 IOVirtualAddress logical
,
1289 IOByteCount length
)
1291 // could be much better
1292 if( _task
&& (addressMap
== getMapForTask(_task
, _ranges
.v
[0].address
)) && (1 == _rangesCount
)
1293 && (logical
== _ranges
.v
[0].address
)
1294 && (length
<= _ranges
.v
[0].length
) )
1295 return( kIOReturnSuccess
);
1297 return( super::doUnmap( addressMap
, logical
, length
));
1300 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1303 // osfmk/device/iokit_rpc.c
1304 extern kern_return_t
IOMapPages( vm_map_t map
, vm_offset_t va
, vm_offset_t pa
,
1305 vm_size_t length
, unsigned int mapFlags
);
1306 extern kern_return_t
IOUnmapPages(vm_map_t map
, vm_offset_t va
, vm_size_t length
);
1309 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1311 OSDefineMetaClassAndAbstractStructors( IOMemoryMap
, OSObject
)
1313 /* inline function implementation */
1314 IOPhysicalAddress
IOMemoryMap::getPhysicalAddress()
1315 { return( getPhysicalSegment( 0, 0 )); }
1317 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1319 class _IOMemoryMap
: public IOMemoryMap
1321 OSDeclareDefaultStructors(_IOMemoryMap
)
1323 IOMemoryDescriptor
* memory
;
1324 IOMemoryMap
* superMap
;
1327 IOVirtualAddress logical
;
1329 vm_map_t addressMap
;
1330 IOOptionBits options
;
1333 virtual void taggedRelease(const void *tag
= 0) const;
1334 virtual void free();
1338 // IOMemoryMap methods
1339 virtual IOVirtualAddress
getVirtualAddress();
1340 virtual IOByteCount
getLength();
1341 virtual task_t
getAddressTask();
1342 virtual IOMemoryDescriptor
* getMemoryDescriptor();
1343 virtual IOOptionBits
getMapOptions();
1345 virtual IOReturn
unmap();
1346 virtual void taskDied();
1348 virtual IOPhysicalAddress
getPhysicalSegment(IOByteCount offset
,
1349 IOByteCount
* length
);
1351 // for IOMemoryDescriptor use
1352 _IOMemoryMap
* copyCompatible(
1353 IOMemoryDescriptor
* owner
,
1355 IOVirtualAddress toAddress
,
1356 IOOptionBits options
,
1358 IOByteCount length
);
1360 bool initCompatible(
1361 IOMemoryDescriptor
* memory
,
1362 IOMemoryMap
* superMap
,
1364 IOByteCount length
);
1366 bool initWithDescriptor(
1367 IOMemoryDescriptor
* memory
,
1369 IOVirtualAddress toAddress
,
1370 IOOptionBits options
,
1372 IOByteCount length
);
1375 task_t intoTask
, bool redirect
);
1378 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1381 #define super IOMemoryMap
1383 OSDefineMetaClassAndStructors(_IOMemoryMap
, IOMemoryMap
)
1385 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1387 bool _IOMemoryMap::initCompatible(
1388 IOMemoryDescriptor
* _memory
,
1389 IOMemoryMap
* _superMap
,
1390 IOByteCount _offset
,
1391 IOByteCount _length
)
1397 if( (_offset
+ _length
) > _superMap
->getLength())
1402 _superMap
->retain();
1403 superMap
= _superMap
;
1409 length
= _memory
->getLength();
1411 options
= superMap
->getMapOptions();
1412 logical
= superMap
->getVirtualAddress() + offset
;
1417 bool _IOMemoryMap::initWithDescriptor(
1418 IOMemoryDescriptor
* _memory
,
1420 IOVirtualAddress toAddress
,
1421 IOOptionBits _options
,
1422 IOByteCount _offset
,
1423 IOByteCount _length
)
1427 if( (!_memory
) || (!intoTask
) || !super::init())
1430 if( (_offset
+ _length
) > _memory
->getLength())
1433 addressMap
= get_task_map(intoTask
);
1436 vm_map_reference(addressMap
);
1445 length
= _memory
->getLength();
1447 addressTask
= intoTask
;
1448 logical
= toAddress
;
1451 if( options
& kIOMapStatic
)
1454 ok
= (kIOReturnSuccess
== memory
->doMap( addressMap
, &logical
,
1455 options
, offset
, length
));
1460 vm_map_deallocate(addressMap
);
1466 struct IOMemoryDescriptorMapAllocRef
1468 ipc_port_t sharedMem
;
1471 IOByteCount sourceOffset
;
1472 IOOptionBits options
;
1475 static kern_return_t
IOMemoryDescriptorMapAlloc(vm_map_t map
, void * _ref
)
1477 IOMemoryDescriptorMapAllocRef
* ref
= (IOMemoryDescriptorMapAllocRef
*)_ref
;
1481 if( ref
->sharedMem
) {
1482 vm_prot_t prot
= VM_PROT_READ
1483 | ((ref
->options
& kIOMapReadOnly
) ? 0 : VM_PROT_WRITE
);
1487 ref
->size
, 0 /* mask */,
1488 (( ref
->options
& kIOMapAnywhere
) ? VM_FLAGS_ANYWHERE
: VM_FLAGS_FIXED
)
1489 | VM_MAKE_TAG(VM_MEMORY_IOKIT
),
1490 ref
->sharedMem
, ref
->sourceOffset
,
1496 if( KERN_SUCCESS
!= err
) {
1503 err
= vm_allocate( map
, &ref
->mapped
, ref
->size
,
1504 ((ref
->options
& kIOMapAnywhere
) ? VM_FLAGS_ANYWHERE
: VM_FLAGS_FIXED
)
1505 | VM_MAKE_TAG(VM_MEMORY_IOKIT
) );
1507 if( KERN_SUCCESS
!= err
) {
1512 // we have to make sure that these guys don't get copied if we fork.
1513 err
= vm_inherit( map
, ref
->mapped
, ref
->size
, VM_INHERIT_NONE
);
1514 assert( KERN_SUCCESS
== err
);
1523 IOReturn
IOMemoryDescriptor::doMap(
1524 vm_map_t addressMap
,
1525 IOVirtualAddress
* atAddress
,
1526 IOOptionBits options
,
1527 IOByteCount sourceOffset
= 0,
1528 IOByteCount length
= 0 )
1530 IOReturn err
= kIOReturnSuccess
;
1531 memory_object_t pager
;
1532 vm_address_t logical
;
1533 IOByteCount pageOffset
;
1534 IOPhysicalAddress sourceAddr
;
1535 IOMemoryDescriptorMapAllocRef ref
;
1537 ref
.sharedMem
= (ipc_port_t
) _memEntry
;
1538 ref
.sourceOffset
= sourceOffset
;
1539 ref
.options
= options
;
1544 length
= getLength();
1546 sourceAddr
= getSourceSegment( sourceOffset
, NULL
);
1547 assert( sourceAddr
);
1548 pageOffset
= sourceAddr
- trunc_page( sourceAddr
);
1550 ref
.size
= round_page( length
+ pageOffset
);
1552 logical
= *atAddress
;
1553 if( options
& kIOMapAnywhere
)
1554 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1557 ref
.mapped
= trunc_page( logical
);
1558 if( (logical
- ref
.mapped
) != pageOffset
) {
1559 err
= kIOReturnVMError
;
1564 if( ref
.sharedMem
&& (addressMap
== kernel_map
) && (kIOMemoryRequiresWire
& _flags
))
1565 err
= IOIteratePageableMaps( ref
.size
, &IOMemoryDescriptorMapAlloc
, &ref
);
1567 err
= IOMemoryDescriptorMapAlloc( addressMap
, &ref
);
1569 if( err
!= KERN_SUCCESS
)
1573 pager
= (memory_object_t
) reserved
->devicePager
;
1575 pager
= MACH_PORT_NULL
;
1577 if( !ref
.sharedMem
|| pager
)
1578 err
= handleFault( pager
, addressMap
, ref
.mapped
, sourceOffset
, length
, options
);
1582 if( err
!= KERN_SUCCESS
) {
1584 doUnmap( addressMap
, ref
.mapped
, ref
.size
);
1587 *atAddress
= ref
.mapped
+ pageOffset
;
1593 kIOMemoryRedirected
= 0x00010000
1596 IOReturn
IOMemoryDescriptor::handleFault(
1598 vm_map_t addressMap
,
1599 IOVirtualAddress address
,
1600 IOByteCount sourceOffset
,
1602 IOOptionBits options
)
1604 IOReturn err
= kIOReturnSuccess
;
1605 memory_object_t pager
= (memory_object_t
) _pager
;
1609 IOByteCount pageOffset
;
1610 IOPhysicalLength segLen
;
1611 IOPhysicalAddress physAddr
;
1615 if( kIOMemoryRedirected
& _flags
) {
1617 IOLog("sleep mem redirect %p, %lx\n", this, sourceOffset
);
1621 } while( kIOMemoryRedirected
& _flags
);
1624 return( kIOReturnSuccess
);
1627 physAddr
= getPhysicalSegment( sourceOffset
, &segLen
);
1629 pageOffset
= physAddr
- trunc_page( physAddr
);
1631 size
= length
+ pageOffset
;
1632 physAddr
-= pageOffset
;
1634 segLen
+= pageOffset
;
1637 // in the middle of the loop only map whole pages
1638 if( segLen
>= bytes
)
1640 else if( segLen
!= trunc_page( segLen
))
1641 err
= kIOReturnVMError
;
1642 if( physAddr
!= trunc_page( physAddr
))
1643 err
= kIOReturnBadArgument
;
1646 if( kIOLogMapping
& gIOKitDebug
)
1647 IOLog("_IOMemoryMap::map(%p) %08lx->%08lx:%08lx\n",
1648 addressMap
, address
+ pageOffset
, physAddr
+ pageOffset
,
1649 segLen
- pageOffset
);
1657 /* i386 doesn't support faulting on device memory yet */
1658 if( addressMap
&& (kIOReturnSuccess
== err
))
1659 err
= IOMapPages( addressMap
, address
, physAddr
, segLen
, options
);
1660 assert( KERN_SUCCESS
== err
);
1666 if( reserved
&& reserved
->pagerContig
) {
1667 IOPhysicalLength allLen
;
1668 IOPhysicalAddress allPhys
;
1670 allPhys
= getPhysicalSegment( 0, &allLen
);
1672 err
= device_pager_populate_object( pager
, 0, trunc_page(allPhys
), round_page(allLen
) );
1677 (page
< segLen
) && (KERN_SUCCESS
== err
);
1678 page
+= page_size
) {
1679 err
= device_pager_populate_object( pager
, sourceOffset
+ page
,
1680 physAddr
+ page
, page_size
);
1683 assert( KERN_SUCCESS
== err
);
1689 /* *** Temporary Workaround *** */
1691 /* This call to vm_fault causes an early pmap level resolution */
1692 /* of the mappings created above. Need for this is in absolute */
1693 /* violation of the basic tenet that the pmap layer is a cache. */
1694 /* Further, it implies a serious I/O architectural violation on */
1695 /* the part of some user of the mapping. As of this writing, */
1696 /* the call to vm_fault is needed because the NVIDIA driver */
1697 /* makes a call to pmap_extract. The NVIDIA driver needs to be */
1698 /* fixed as soon as possible. The NVIDIA driver should not */
1699 /* need to query for this info as it should know from the doMap */
1700 /* call where the physical memory is mapped. When a query is */
1701 /* necessary to find a physical mapping, it should be done */
1702 /* through an iokit call which includes the mapped memory */
1703 /* handle. This is required for machine architecture independence.*/
1705 if(!(kIOMemoryRedirected
& _flags
)) {
1706 vm_fault(addressMap
, address
, 3, FALSE
, FALSE
, NULL
, 0);
1709 /* *** Temporary Workaround *** */
1712 sourceOffset
+= segLen
- pageOffset
;
1718 && (physAddr
= getPhysicalSegment( sourceOffset
, &segLen
)));
1721 err
= kIOReturnBadArgument
;
1726 IOReturn
IOMemoryDescriptor::doUnmap(
1727 vm_map_t addressMap
,
1728 IOVirtualAddress logical
,
1729 IOByteCount length
)
1734 if( kIOLogMapping
& gIOKitDebug
)
1735 kprintf("IOMemoryDescriptor::doUnmap(%x) %08x:%08x\n",
1736 addressMap
, logical
, length
);
1739 if( (addressMap
== kernel_map
) || (addressMap
== get_task_map(current_task()))) {
1741 if( _memEntry
&& (addressMap
== kernel_map
) && (kIOMemoryRequiresWire
& _flags
))
1742 addressMap
= IOPageableMapForAddress( logical
);
1744 err
= vm_deallocate( addressMap
, logical
, length
);
1747 err
= kIOReturnSuccess
;
1752 IOReturn
IOMemoryDescriptor::redirect( task_t safeTask
, bool redirect
)
1755 _IOMemoryMap
* mapping
= 0;
1761 if( (iter
= OSCollectionIterator::withCollection( _mappings
))) {
1762 while( (mapping
= (_IOMemoryMap
*) iter
->getNextObject()))
1763 mapping
->redirect( safeTask
, redirect
);
1770 _flags
|= kIOMemoryRedirected
;
1772 _flags
&= ~kIOMemoryRedirected
;
1778 // temporary binary compatibility
1779 IOSubMemoryDescriptor
* subMem
;
1780 if( (subMem
= OSDynamicCast( IOSubMemoryDescriptor
, this)))
1781 err
= subMem
->redirect( safeTask
, redirect
);
1783 err
= kIOReturnSuccess
;
1788 IOReturn
IOSubMemoryDescriptor::redirect( task_t safeTask
, bool redirect
)
1790 return( _parent
->redirect( safeTask
, redirect
));
1793 IOReturn
_IOMemoryMap::redirect( task_t safeTask
, bool redirect
)
1795 IOReturn err
= kIOReturnSuccess
;
1798 // err = ((_IOMemoryMap *)superMap)->redirect( safeTask, redirect );
1802 if( logical
&& addressMap
1803 && (get_task_map( safeTask
) != addressMap
)
1804 && (0 == (options
& kIOMapStatic
))) {
1806 IOUnmapPages( addressMap
, logical
, length
);
1808 err
= vm_deallocate( addressMap
, logical
, length
);
1809 err
= memory
->doMap( addressMap
, &logical
,
1810 (options
& ~kIOMapAnywhere
) /*| kIOMapReserve*/,
1813 err
= kIOReturnSuccess
;
1815 IOLog("IOMemoryMap::redirect(%d, %p) %x:%lx from %p\n", redirect
, this, logical
, length
, addressMap
);
1824 IOReturn
_IOMemoryMap::unmap( void )
1830 if( logical
&& addressMap
&& (0 == superMap
)
1831 && (0 == (options
& kIOMapStatic
))) {
1833 err
= memory
->doUnmap( addressMap
, logical
, length
);
1834 vm_map_deallocate(addressMap
);
1838 err
= kIOReturnSuccess
;
1847 void _IOMemoryMap::taskDied( void )
1851 vm_map_deallocate(addressMap
);
1859 // Overload the release mechanism. All mappings must be a member
1860 // of a memory descriptors _mappings set. This means that we
1861 // always have 2 references on a mapping. When either of these mappings
1862 // are released we need to free ourselves.
1863 void _IOMemoryMap::taggedRelease(const void *tag
= 0) const
1865 super::taggedRelease(tag
, 2);
1868 void _IOMemoryMap::free()
1874 memory
->removeMapping( this);
1880 superMap
->release();
1885 IOByteCount
_IOMemoryMap::getLength()
1890 IOVirtualAddress
_IOMemoryMap::getVirtualAddress()
1895 task_t
_IOMemoryMap::getAddressTask()
1898 return( superMap
->getAddressTask());
1900 return( addressTask
);
1903 IOOptionBits
_IOMemoryMap::getMapOptions()
1908 IOMemoryDescriptor
* _IOMemoryMap::getMemoryDescriptor()
1913 _IOMemoryMap
* _IOMemoryMap::copyCompatible(
1914 IOMemoryDescriptor
* owner
,
1916 IOVirtualAddress toAddress
,
1917 IOOptionBits _options
,
1918 IOByteCount _offset
,
1919 IOByteCount _length
)
1921 _IOMemoryMap
* mapping
;
1923 if( (!task
) || (task
!= getAddressTask()))
1925 if( (options
^ _options
) & kIOMapReadOnly
)
1927 if( (kIOMapDefaultCache
!= (_options
& kIOMapCacheMask
))
1928 && ((options
^ _options
) & kIOMapCacheMask
))
1931 if( (0 == (_options
& kIOMapAnywhere
)) && (logical
!= toAddress
))
1934 if( _offset
< offset
)
1939 if( (_offset
+ _length
) > length
)
1942 if( (length
== _length
) && (!_offset
)) {
1947 mapping
= new _IOMemoryMap
;
1949 && !mapping
->initCompatible( owner
, this, _offset
, _length
)) {
1958 IOPhysicalAddress
_IOMemoryMap::getPhysicalSegment( IOByteCount _offset
,
1959 IOPhysicalLength
* length
)
1961 IOPhysicalAddress address
;
1964 address
= memory
->getPhysicalSegment( offset
+ _offset
, length
);
1970 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1973 #define super OSObject
1975 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1977 void IOMemoryDescriptor::initialize( void )
1979 if( 0 == gIOMemoryLock
)
1980 gIOMemoryLock
= IORecursiveLockAlloc();
1983 void IOMemoryDescriptor::free( void )
1986 _mappings
->release();
1991 IOMemoryMap
* IOMemoryDescriptor::setMapping(
1993 IOVirtualAddress mapAddress
,
1994 IOOptionBits options
= 0 )
1998 map
= new _IOMemoryMap
;
2003 && !map
->initWithDescriptor( this, intoTask
, mapAddress
,
2004 options
| kIOMapStatic
, 0, getLength() )) {
2016 IOMemoryMap
* IOMemoryDescriptor::map(
2017 IOOptionBits options
= 0 )
2020 return( makeMapping( this, kernel_task
, 0,
2021 options
| kIOMapAnywhere
,
2025 IOMemoryMap
* IOMemoryDescriptor::map(
2027 IOVirtualAddress toAddress
,
2028 IOOptionBits options
,
2029 IOByteCount offset
= 0,
2030 IOByteCount length
= 0 )
2033 length
= getLength();
2035 return( makeMapping( this, intoTask
, toAddress
, options
, offset
, length
));
2038 IOMemoryMap
* IOMemoryDescriptor::makeMapping(
2039 IOMemoryDescriptor
* owner
,
2041 IOVirtualAddress toAddress
,
2042 IOOptionBits options
,
2044 IOByteCount length
)
2046 _IOMemoryMap
* mapping
= 0;
2052 // look for an existing mapping
2053 if( (iter
= OSCollectionIterator::withCollection( _mappings
))) {
2055 while( (mapping
= (_IOMemoryMap
*) iter
->getNextObject())) {
2057 if( (mapping
= mapping
->copyCompatible(
2058 owner
, intoTask
, toAddress
,
2059 options
| kIOMapReference
,
2069 if( mapping
|| (options
& kIOMapReference
))
2074 mapping
= new _IOMemoryMap
;
2076 && !mapping
->initWithDescriptor( owner
, intoTask
, toAddress
, options
,
2079 IOLog("Didn't make map %08lx : %08lx\n", offset
, length
);
2087 owner
->addMapping( mapping
);
2094 void IOMemoryDescriptor::addMapping(
2095 IOMemoryMap
* mapping
)
2099 _mappings
= OSSet::withCapacity(1);
2101 _mappings
->setObject( mapping
);
2105 void IOMemoryDescriptor::removeMapping(
2106 IOMemoryMap
* mapping
)
2109 _mappings
->removeObject( mapping
);
2112 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2115 #define super IOMemoryDescriptor
2117 OSDefineMetaClassAndStructors(IOSubMemoryDescriptor
, IOMemoryDescriptor
)
2119 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2121 bool IOSubMemoryDescriptor::initSubRange( IOMemoryDescriptor
* parent
,
2122 IOByteCount offset
, IOByteCount length
,
2123 IODirection withDirection
)
2131 if( (offset
+ length
) > parent
->getLength())
2138 _direction
= withDirection
;
2139 _tag
= parent
->getTag();
2144 void IOSubMemoryDescriptor::free( void )
2153 IOPhysicalAddress
IOSubMemoryDescriptor::getPhysicalSegment( IOByteCount offset
,
2154 IOByteCount
* length
)
2156 IOPhysicalAddress address
;
2157 IOByteCount actualLength
;
2159 assert(offset
<= _length
);
2164 if( offset
>= _length
)
2167 address
= _parent
->getPhysicalSegment( offset
+ _start
, &actualLength
);
2169 if( address
&& length
)
2170 *length
= min( _length
- offset
, actualLength
);
2175 IOPhysicalAddress
IOSubMemoryDescriptor::getSourceSegment( IOByteCount offset
,
2176 IOByteCount
* length
)
2178 IOPhysicalAddress address
;
2179 IOByteCount actualLength
;
2181 assert(offset
<= _length
);
2186 if( offset
>= _length
)
2189 address
= _parent
->getSourceSegment( offset
+ _start
, &actualLength
);
2191 if( address
&& length
)
2192 *length
= min( _length
- offset
, actualLength
);
2197 void * IOSubMemoryDescriptor::getVirtualSegment(IOByteCount offset
,
2198 IOByteCount
* lengthOfSegment
)
2203 IOByteCount
IOSubMemoryDescriptor::readBytes(IOByteCount offset
,
2204 void * bytes
, IOByteCount withLength
)
2206 IOByteCount byteCount
;
2208 assert(offset
<= _length
);
2210 if( offset
>= _length
)
2214 byteCount
= _parent
->readBytes( _start
+ offset
, bytes
,
2215 min(withLength
, _length
- offset
) );
2218 return( byteCount
);
2221 IOByteCount
IOSubMemoryDescriptor::writeBytes(IOByteCount offset
,
2222 const void* bytes
, IOByteCount withLength
)
2224 IOByteCount byteCount
;
2226 assert(offset
<= _length
);
2228 if( offset
>= _length
)
2232 byteCount
= _parent
->writeBytes( _start
+ offset
, bytes
,
2233 min(withLength
, _length
- offset
) );
2236 return( byteCount
);
2239 IOReturn
IOSubMemoryDescriptor::prepare(
2240 IODirection forDirection
= kIODirectionNone
)
2245 err
= _parent
->prepare( forDirection
);
2251 IOReturn
IOSubMemoryDescriptor::complete(
2252 IODirection forDirection
= kIODirectionNone
)
2257 err
= _parent
->complete( forDirection
);
2263 IOMemoryMap
* IOSubMemoryDescriptor::makeMapping(
2264 IOMemoryDescriptor
* owner
,
2266 IOVirtualAddress toAddress
,
2267 IOOptionBits options
,
2269 IOByteCount length
)
2271 IOMemoryMap
* mapping
;
2273 mapping
= (IOMemoryMap
*) _parent
->makeMapping(
2275 toAddress
- (_start
+ offset
),
2276 options
| kIOMapReference
,
2277 _start
+ offset
, length
);
2280 mapping
= (IOMemoryMap
*) _parent
->makeMapping(
2283 options
, _start
+ offset
, length
);
2286 mapping
= super::makeMapping( owner
, intoTask
, toAddress
, options
,
2295 IOSubMemoryDescriptor::initWithAddress(void * address
,
2296 IOByteCount withLength
,
2297 IODirection withDirection
)
2303 IOSubMemoryDescriptor::initWithAddress(vm_address_t address
,
2304 IOByteCount withLength
,
2305 IODirection withDirection
,
2312 IOSubMemoryDescriptor::initWithPhysicalAddress(
2313 IOPhysicalAddress address
,
2314 IOByteCount withLength
,
2315 IODirection withDirection
)
2321 IOSubMemoryDescriptor::initWithRanges(
2322 IOVirtualRange
* ranges
,
2324 IODirection withDirection
,
2326 bool asReference
= false)
2332 IOSubMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange
* ranges
,
2334 IODirection withDirection
,
2335 bool asReference
= false)
2340 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2342 bool IOGeneralMemoryDescriptor::serialize(OSSerialize
* s
) const
2344 OSSymbol
const *keys
[2];
2345 OSObject
*values
[2];
2347 IOVirtualRange
*vcopy
;
2348 unsigned int index
, nRanges
;
2351 if (s
== NULL
) return false;
2352 if (s
->previouslySerialized(this)) return true;
2354 // Pretend we are an array.
2355 if (!s
->addXMLStartTag(this, "array")) return false;
2357 nRanges
= _rangesCount
;
2358 vcopy
= (IOVirtualRange
*) IOMalloc(sizeof(IOVirtualRange
) * nRanges
);
2359 if (vcopy
== 0) return false;
2361 keys
[0] = OSSymbol::withCString("address");
2362 keys
[1] = OSSymbol::withCString("length");
2365 values
[0] = values
[1] = 0;
2367 // From this point on we can go to bail.
2369 // Copy the volatile data so we don't have to allocate memory
2370 // while the lock is held.
2372 if (nRanges
== _rangesCount
) {
2373 for (index
= 0; index
< nRanges
; index
++) {
2374 vcopy
[index
] = _ranges
.v
[index
];
2377 // The descriptor changed out from under us. Give up.
2384 for (index
= 0; index
< nRanges
; index
++)
2386 values
[0] = OSNumber::withNumber(_ranges
.v
[index
].address
, sizeof(_ranges
.v
[index
].address
) * 8);
2387 if (values
[0] == 0) {
2391 values
[1] = OSNumber::withNumber(_ranges
.v
[index
].length
, sizeof(_ranges
.v
[index
].length
) * 8);
2392 if (values
[1] == 0) {
2396 OSDictionary
*dict
= OSDictionary::withObjects((const OSObject
**)values
, (const OSSymbol
**)keys
, 2);
2401 values
[0]->release();
2402 values
[1]->release();
2403 values
[0] = values
[1] = 0;
2405 result
= dict
->serialize(s
);
2411 result
= s
->addXMLEndTag("array");
2415 values
[0]->release();
2417 values
[1]->release();
2423 IOFree(vcopy
, sizeof(IOVirtualRange
) * nRanges
);
2427 bool IOSubMemoryDescriptor::serialize(OSSerialize
* s
) const
2432 if (s
->previouslySerialized(this)) return true;
2434 // Pretend we are a dictionary.
2435 // We must duplicate the functionality of OSDictionary here
2436 // because otherwise object references will not work;
2437 // they are based on the value of the object passed to
2438 // previouslySerialized and addXMLStartTag.
2440 if (!s
->addXMLStartTag(this, "dict")) return false;
2442 char const *keys
[3] = {"offset", "length", "parent"};
2444 OSObject
*values
[3];
2445 values
[0] = OSNumber::withNumber(_start
, sizeof(_start
) * 8);
2448 values
[1] = OSNumber::withNumber(_length
, sizeof(_length
) * 8);
2449 if (values
[1] == 0) {
2450 values
[0]->release();
2453 values
[2] = _parent
;
2456 for (int i
=0; i
<3; i
++) {
2457 if (!s
->addString("<key>") ||
2458 !s
->addString(keys
[i
]) ||
2459 !s
->addXMLEndTag("key") ||
2460 !values
[i
]->serialize(s
)) {
2465 values
[0]->release();
2466 values
[1]->release();
2471 return s
->addXMLEndTag("dict");
2474 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2476 OSMetaClassDefineReservedUsed(IOMemoryDescriptor
, 0);
2477 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 1);
2478 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 2);
2479 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 3);
2480 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 4);
2481 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 5);
2482 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 6);
2483 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 7);
2484 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 8);
2485 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 9);
2486 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 10);
2487 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 11);
2488 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 12);
2489 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 13);
2490 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 14);
2491 OSMetaClassDefineReservedUnused(IOMemoryDescriptor
, 15);
2493 /* inline function implementation */
2494 IOPhysicalAddress
IOMemoryDescriptor::getPhysicalAddress()
2495 { return( getPhysicalSegment( 0, 0 )); }