]> git.saurik.com Git - apple/xnu.git/blame - iokit/Kernel/IOMemoryDescriptor.cpp
xnu-124.13.tar.gz
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
24 *
25 * HISTORY
26 *
27 */
28
29#include <IOKit/assert.h>
30#include <IOKit/system.h>
31#include <IOKit/IOLib.h>
32#include <IOKit/IOMemoryDescriptor.h>
33
34#include <IOKit/IOKitDebug.h>
35
36#include <libkern/c++/OSContainers.h>
37#include <sys/cdefs.h>
38
39__BEGIN_DECLS
40#include <vm/pmap.h>
41void pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa,
42 vm_prot_t prot, boolean_t wired);
43void ipc_port_release_send(ipc_port_t port);
44vm_offset_t vm_map_get_phys_page(vm_map_t map, vm_offset_t offset);
45__END_DECLS
46
47/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
48
49OSDefineMetaClass( IOMemoryDescriptor, OSObject )
50OSDefineAbstractStructors( IOMemoryDescriptor, OSObject )
51
52#define super IOMemoryDescriptor
53
54OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
55
56extern "C" vm_map_t IOPageableMapForAddress( vm_address_t address );
57
58/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
59
60inline vm_map_t IOGeneralMemoryDescriptor::getMapForTask( task_t task, vm_address_t address )
61{
62 if( (task == kernel_task) && (kIOMemoryRequiresWire & _flags))
63 return( IOPageableMapForAddress( address ) );
64 else
65 return( get_task_map( task ));
66}
67
68/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
69
70/*
71 * withAddress:
72 *
73 * Create a new IOMemoryDescriptor. The buffer is a virtual address
74 * relative to the specified task. If no task is supplied, the kernel
75 * task is implied.
76 */
77IOMemoryDescriptor *
78IOMemoryDescriptor::withAddress(void * address,
79 IOByteCount withLength,
80 IODirection withDirection)
81{
82 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
83 if (that)
84 {
85 if (that->initWithAddress(address, withLength, withDirection))
86 return that;
87
88 that->release();
89 }
90 return 0;
91}
92
93IOMemoryDescriptor *
94IOMemoryDescriptor::withAddress(vm_address_t address,
95 IOByteCount withLength,
96 IODirection withDirection,
97 task_t withTask)
98{
99 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
100 if (that)
101 {
102 if (that->initWithAddress(address, withLength, withDirection, withTask))
103 return that;
104
105 that->release();
106 }
107 return 0;
108}
109
110IOMemoryDescriptor *
111IOMemoryDescriptor::withPhysicalAddress(
112 IOPhysicalAddress address,
113 IOByteCount withLength,
114 IODirection withDirection )
115{
116 return( IOMemoryDescriptor::withAddress( address, withLength,
117 withDirection, (task_t) 0 ));
118}
119
120
121/*
122 * withRanges:
123 *
124 * Create a new IOMemoryDescriptor. The buffer is made up of several
125 * virtual address ranges, from a given task.
126 *
127 * Passing the ranges as a reference will avoid an extra allocation.
128 */
129IOMemoryDescriptor *
130IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
131 UInt32 withCount,
132 IODirection withDirection,
133 task_t withTask,
134 bool asReference = false)
135{
136 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
137 if (that)
138 {
139 if (that->initWithRanges(ranges, withCount, withDirection, withTask, asReference))
140 return that;
141
142 that->release();
143 }
144 return 0;
145}
146
147IOMemoryDescriptor *
148IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
149 UInt32 withCount,
150 IODirection withDirection,
151 bool asReference = false)
152{
153 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
154 if (that)
155 {
156 if (that->initWithPhysicalRanges(ranges, withCount, withDirection, asReference))
157 return that;
158
159 that->release();
160 }
161 return 0;
162}
163
164IOMemoryDescriptor *
165IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
166 IOByteCount offset,
167 IOByteCount length,
168 IODirection withDirection)
169{
170 IOSubMemoryDescriptor * that = new IOSubMemoryDescriptor;
171
172 if (that && !that->initSubRange(of, offset, length, withDirection)) {
173 that->release();
174 that = 0;
175 }
176 return that;
177}
178
179/*
180 * initWithAddress:
181 *
182 * Initialize an IOMemoryDescriptor. The buffer is a virtual address
183 * relative to the specified task. If no task is supplied, the kernel
184 * task is implied.
185 *
186 * An IOMemoryDescriptor can be re-used by calling initWithAddress or
187 * initWithRanges again on an existing instance -- note this behavior
188 * is not commonly supported in other I/O Kit classes, although it is
189 * supported here.
190 */
191bool
192IOGeneralMemoryDescriptor::initWithAddress(void * address,
193 IOByteCount withLength,
194 IODirection withDirection)
195{
196 _singleRange.v.address = (vm_address_t) address;
197 _singleRange.v.length = withLength;
198
199 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
200}
201
202bool
203IOGeneralMemoryDescriptor::initWithAddress(vm_address_t address,
204 IOByteCount withLength,
205 IODirection withDirection,
206 task_t withTask)
207{
208 _singleRange.v.address = address;
209 _singleRange.v.length = withLength;
210
211 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
212}
213
214bool
215IOGeneralMemoryDescriptor::initWithPhysicalAddress(
216 IOPhysicalAddress address,
217 IOByteCount withLength,
218 IODirection withDirection )
219{
220 _singleRange.p.address = address;
221 _singleRange.p.length = withLength;
222
223 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
224}
225
226/*
227 * initWithRanges:
228 *
229 * Initialize an IOMemoryDescriptor. The buffer is made up of several
230 * virtual address ranges, from a given task
231 *
232 * Passing the ranges as a reference will avoid an extra allocation.
233 *
234 * An IOMemoryDescriptor can be re-used by calling initWithAddress or
235 * initWithRanges again on an existing instance -- note this behavior
236 * is not commonly supported in other I/O Kit classes, although it is
237 * supported here.
238 */
239bool
240IOGeneralMemoryDescriptor::initWithRanges(
241 IOVirtualRange * ranges,
242 UInt32 withCount,
243 IODirection withDirection,
244 task_t withTask,
245 bool asReference = false)
246{
247 assert(ranges);
248 assert(withCount);
249
250 /*
251 * We can check the _initialized instance variable before having ever set
252 * it to an initial value because I/O Kit guarantees that all our instance
253 * variables are zeroed on an object's allocation.
254 */
255
256 if (_initialized == false)
257 {
258 if (super::init() == false) return false;
259 _initialized = true;
260 }
261 else
262 {
263 /*
264 * An existing memory descriptor is being retargeted to point to
265 * somewhere else. Clean up our present state.
266 */
267
268 assert(_wireCount == 0);
269
270 while (_wireCount)
271 complete();
272 if (_kernPtrAligned)
273 unmapFromKernel();
274 if (_ranges.v && _rangesIsAllocated)
275 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
276 }
277
278 /*
279 * Initialize the memory descriptor.
280 */
281
282 _ranges.v = 0;
283 _rangesCount = withCount;
284 _rangesIsAllocated = asReference ? false : true;
285 _direction = withDirection;
286 _length = 0;
287 _task = withTask;
288 _position = 0;
289 _positionAtIndex = 0;
290 _positionAtOffset = 0;
291 _kernPtrAligned = 0;
292 _cachedPhysicalAddress = 0;
293 _cachedVirtualAddress = 0;
294 _flags = 0;
295
296 if (withTask && (withTask != kernel_task))
297 _flags |= kIOMemoryRequiresWire;
298
299 if (asReference)
300 _ranges.v = ranges;
301 else
302 {
303 _ranges.v = IONew(IOVirtualRange, withCount);
304 if (_ranges.v == 0) return false;
305 bcopy(/* from */ ranges, _ranges.v, withCount * sizeof(IOVirtualRange));
306 }
307
308 for (unsigned index = 0; index < _rangesCount; index++)
309 {
310 _length += _ranges.v[index].length;
311 }
312
313 return true;
314}
315
316bool
317IOGeneralMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
318 UInt32 withCount,
319 IODirection withDirection,
320 bool asReference = false)
321{
322#warning assuming virtual, physical addresses same size
323 return( initWithRanges( (IOVirtualRange *) ranges,
324 withCount, withDirection, (task_t) 0, asReference ));
325}
326
327/*
328 * free
329 *
330 * Free resources.
331 */
332void IOGeneralMemoryDescriptor::free()
333{
334 while (_wireCount)
335 complete();
336 if (_kernPtrAligned)
337 unmapFromKernel();
338 if (_ranges.v && _rangesIsAllocated)
339 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
340 if( _memEntry)
341 ipc_port_release_send( (ipc_port_t) _memEntry );
342 super::free();
343}
344
345void IOGeneralMemoryDescriptor::unmapFromKernel()
346{
347 kern_return_t krtn;
348 vm_offset_t off;
349 // Pull the shared pages out of the task map
350 // Do we need to unwire it first?
351 for ( off = 0; off < _kernSize; off += page_size )
352 {
353 pmap_change_wiring(
354 kernel_pmap,
355 _kernPtrAligned + off,
356 FALSE);
357
358 pmap_remove(
359 kernel_pmap,
360 _kernPtrAligned + off,
361 _kernPtrAligned + off + page_size);
362 }
363 // Free the former shmem area in the task
364 krtn = vm_deallocate(kernel_map,
365 _kernPtrAligned,
366 _kernSize );
367 assert(krtn == KERN_SUCCESS);
368 _kernPtrAligned = 0;
369}
370
371void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
372{
373 kern_return_t krtn;
374 vm_offset_t off;
375
376 if (_kernPtrAligned)
377 {
378 if (_kernPtrAtIndex == rangeIndex) return;
379 unmapFromKernel();
380 assert(_kernPtrAligned == 0);
381 }
382
383 vm_offset_t srcAlign = trunc_page(_ranges.v[rangeIndex].address);
384
385 _kernSize = trunc_page(_ranges.v[rangeIndex].address +
386 _ranges.v[rangeIndex].length +
387 page_size - 1) - srcAlign;
388
389 /* Find some memory of the same size in kernel task. We use vm_allocate()
390 to do this. vm_allocate inserts the found memory object in the
391 target task's map as a side effect. */
392 krtn = vm_allocate( kernel_map,
393 &_kernPtrAligned,
394 _kernSize,
395 VM_FLAGS_ANYWHERE|VM_MAKE_TAG(VM_MEMORY_IOKIT) ); // Find first fit
396 assert(krtn == KERN_SUCCESS);
397 if(krtn) return;
398
399 /* For each page in the area allocated from the kernel map,
400 find the physical address of the page.
401 Enter the page in the target task's pmap, at the
402 appropriate target task virtual address. */
403 for ( off = 0; off < _kernSize; off += page_size )
404 {
405 vm_offset_t kern_phys_addr, phys_addr;
406 if( _task)
407 phys_addr = pmap_extract( get_task_pmap(_task), srcAlign + off );
408 else
409 phys_addr = srcAlign + off;
410 assert(phys_addr);
411 if(phys_addr == 0) return;
412
413 // Check original state.
414 kern_phys_addr = pmap_extract( kernel_pmap, _kernPtrAligned + off );
415 // Set virtual page to point to the right physical one
416 pmap_enter(
417 kernel_pmap,
418 _kernPtrAligned + off,
419 phys_addr,
420 VM_PROT_READ|VM_PROT_WRITE,
421 TRUE);
422 }
423 _kernPtrAtIndex = rangeIndex;
424}
425
426/*
427 * getDirection:
428 *
429 * Get the direction of the transfer.
430 */
431IODirection IOMemoryDescriptor::getDirection() const
432{
433 return _direction;
434}
435
436/*
437 * getLength:
438 *
439 * Get the length of the transfer (over all ranges).
440 */
441IOByteCount IOMemoryDescriptor::getLength() const
442{
443 return _length;
444}
445
446void IOMemoryDescriptor::setTag(
447 IOOptionBits tag )
448{
449 _tag = tag;
450}
451
452IOOptionBits IOMemoryDescriptor::getTag( void )
453{
454 return( _tag);
455}
456
457/*
458 * setPosition
459 *
460 * Set the logical start position inside the client buffer.
461 *
462 * It is convention that the position reflect the actual byte count that
463 * is successfully transferred into or out of the buffer, before the I/O
464 * request is "completed" (ie. sent back to its originator).
465 */
466
467void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
468{
469 assert(position <= _length);
470
471 if (position >= _length)
472 {
473 _position = _length;
474 _positionAtIndex = _rangesCount; /* careful: out-of-bounds */
475 _positionAtOffset = 0;
476 return;
477 }
478
479 if (position < _position)
480 {
481 _positionAtOffset = position;
482 _positionAtIndex = 0;
483 }
484 else
485 {
486 _positionAtOffset += (position - _position);
487 }
488 _position = position;
489
490 while (_positionAtOffset >= _ranges.v[_positionAtIndex].length)
491 {
492 _positionAtOffset -= _ranges.v[_positionAtIndex].length;
493 _positionAtIndex++;
494 }
495}
496
497/*
498 * readBytes:
499 *
500 * Copy data from the memory descriptor's buffer into the specified buffer,
501 * relative to the current position. The memory descriptor's position is
502 * advanced based on the number of bytes copied.
503 */
504
505IOByteCount IOGeneralMemoryDescriptor::readBytes(IOByteCount offset,
506 void * bytes, IOByteCount withLength)
507{
508 IOByteCount bytesLeft;
509 void * segment;
510 IOByteCount segmentLength;
511
512 if( offset != _position)
513 setPosition( offset );
514
515 withLength = min(withLength, _length - _position);
516 bytesLeft = withLength;
517
518#if 0
519 while (bytesLeft && (_position < _length))
520 {
521 /* Compute the relative length to the end of this virtual segment. */
522 segmentLength = min(_ranges.v[_positionAtIndex].length - _positionAtOffset, bytesLeft);
523
524 /* Compute the relative address of this virtual segment. */
525 segment = (void *)(_ranges.v[_positionAtIndex].address + _positionAtOffset);
526
527 if (KERN_SUCCESS != vm_map_read_user(getMapForTask(_task, segment),
528 /* from */ (vm_offset_t) segment, /* to */ (vm_offset_t) bytes,
529 /* size */ segmentLength))
530 {
531 assert( false );
532 bytesLeft = withLength;
533 break;
534 }
535 bytesLeft -= segmentLength;
536 offset += segmentLength;
537 setPosition(offset);
538 }
539#else
540 while (bytesLeft && (segment = getVirtualSegment(offset, &segmentLength)))
541 {
542 segmentLength = min(segmentLength, bytesLeft);
543 bcopy(/* from */ segment, /* to */ bytes, /* size */ segmentLength);
544 bytesLeft -= segmentLength;
545 offset += segmentLength;
546 bytes = (void *) (((UInt32) bytes) + segmentLength);
547 }
548#endif
549
550 return withLength - bytesLeft;
551}
552
553/*
554 * writeBytes:
555 *
556 * Copy data to the memory descriptor's buffer from the specified buffer,
557 * relative to the current position. The memory descriptor's position is
558 * advanced based on the number of bytes copied.
559 */
560IOByteCount IOGeneralMemoryDescriptor::writeBytes(IOByteCount offset,
561 const void* bytes,IOByteCount withLength)
562{
563 IOByteCount bytesLeft;
564 void * segment;
565 IOByteCount segmentLength;
566
567 if( offset != _position)
568 setPosition( offset );
569
570 withLength = min(withLength, _length - _position);
571 bytesLeft = withLength;
572
573#if 0
574 while (bytesLeft && (_position < _length))
575 {
576 assert(_position <= _length);
577
578 /* Compute the relative length to the end of this virtual segment. */
579 segmentLength = min(_ranges.v[_positionAtIndex].length - _positionAtOffset, bytesLeft);
580
581 /* Compute the relative address of this virtual segment. */
582 segment = (void *)(_ranges.v[_positionAtIndex].address + _positionAtOffset);
583
584 if (KERN_SUCCESS != vm_map_write_user(getMapForTask(_task, segment),
585 /* from */ (vm_offset_t) bytes,
586 /* to */ (vm_offset_t) segment,
587 /* size */ segmentLength))
588 {
589 assert( false );
590 bytesLeft = withLength;
591 break;
592 }
593 bytesLeft -= segmentLength;
594 offset += segmentLength;
595 setPosition(offset);
596 }
597#else
598 while (bytesLeft && (segment = getVirtualSegment(offset, &segmentLength)))
599 {
600 segmentLength = min(segmentLength, bytesLeft);
601 bcopy(/* from */ bytes, /* to */ segment, /* size */ segmentLength);
602 // Flush cache in case we're copying code around, eg. handling a code page fault
603 IOFlushProcessorCache(kernel_task, (vm_offset_t) segment, segmentLength );
604
605 bytesLeft -= segmentLength;
606 offset += segmentLength;
607 bytes = (void *) (((UInt32) bytes) + segmentLength);
608 }
609#endif
610
611 return withLength - bytesLeft;
612}
613
614/*
615 * getPhysicalSegment:
616 *
617 * Get the physical address of the buffer, relative to the current position.
618 * If the current position is at the end of the buffer, a zero is returned.
619 */
620IOPhysicalAddress
621IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset,
622 IOByteCount * lengthOfSegment)
623{
624 vm_address_t virtualAddress;
625 IOByteCount virtualLength;
626 pmap_t virtualPMap;
627 IOPhysicalAddress physicalAddress;
628 IOPhysicalLength physicalLength;
629
630 if( kIOMemoryRequiresWire & _flags)
631 assert( _wireCount );
632
633 if ((0 == _task) && (1 == _rangesCount))
634 {
635 assert(offset <= _length);
636 if (offset >= _length)
637 {
638 physicalAddress = 0;
639 physicalLength = 0;
640 }
641 else
642 {
643 physicalLength = _length - offset;
644 physicalAddress = offset + _ranges.v[0].address;
645 }
646
647 if (lengthOfSegment)
648 *lengthOfSegment = physicalLength;
649 return physicalAddress;
650 }
651
652 if( offset != _position)
653 setPosition( offset );
654
655 assert(_position <= _length);
656
657 /* Fail gracefully if the position is at (or past) the end-of-buffer. */
658 if (_position >= _length)
659 {
660 *lengthOfSegment = 0;
661 return 0;
662 }
663
664 /* Prepare to compute the largest contiguous physical length possible. */
665
666 virtualAddress = _ranges.v[_positionAtIndex].address + _positionAtOffset;
667 virtualLength = _ranges.v[_positionAtIndex].length - _positionAtOffset;
668 vm_address_t virtualPage = trunc_page(virtualAddress);
669 if( _task)
670 virtualPMap = get_task_pmap(_task);
671 else
672 virtualPMap = 0;
673
674 physicalAddress = (virtualAddress == _cachedVirtualAddress) ?
675 _cachedPhysicalAddress : /* optimization */
676 virtualPMap ?
677 pmap_extract(virtualPMap, virtualAddress) :
678 virtualAddress;
679 physicalLength = trunc_page(physicalAddress) + page_size - physicalAddress;
680
681 if (!physicalAddress && _task)
682 {
683 physicalAddress =
684 vm_map_get_phys_page(get_task_map(_task), virtualPage);
685 physicalAddress += virtualAddress - virtualPage;
686 }
687
688 if (physicalAddress == 0) /* memory must be wired in order to proceed */
689 {
690 assert(physicalAddress);
691 *lengthOfSegment = 0;
692 return 0;
693 }
694
695 /* Compute the largest contiguous physical length possible, within range. */
696 IOPhysicalAddress physicalPage = trunc_page(physicalAddress);
697
698 while (physicalLength < virtualLength)
699 {
700 physicalPage += page_size;
701 virtualPage += page_size;
702 _cachedVirtualAddress = virtualPage;
703 _cachedPhysicalAddress = virtualPMap ?
704 pmap_extract(virtualPMap, virtualPage) :
705 virtualPage;
706 if (!_cachedPhysicalAddress && _task)
707 {
708 _cachedPhysicalAddress =
709 vm_map_get_phys_page(get_task_map(_task), virtualPage);
710 }
711
712 if (_cachedPhysicalAddress != physicalPage) break;
713
714 physicalLength += page_size;
715 }
716
717 /* Clip contiguous physical length at the end of this range. */
718 if (physicalLength > virtualLength)
719 physicalLength = virtualLength;
720
721 if( lengthOfSegment)
722 *lengthOfSegment = physicalLength;
723
724 return physicalAddress;
725}
726
727
728/*
729 * getVirtualSegment:
730 *
731 * Get the virtual address of the buffer, relative to the current position.
732 * If the memory wasn't mapped into the caller's address space, it will be
733 * mapped in now. If the current position is at the end of the buffer, a
734 * null is returned.
735 */
736void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
737 IOByteCount * lengthOfSegment)
738{
739 if( offset != _position)
740 setPosition( offset );
741
742 assert(_position <= _length);
743
744 /* Fail gracefully if the position is at (or past) the end-of-buffer. */
745 if (_position >= _length)
746 {
747 *lengthOfSegment = 0;
748 return 0;
749 }
750
751 /* Compute the relative length to the end of this virtual segment. */
752 *lengthOfSegment = _ranges.v[_positionAtIndex].length - _positionAtOffset;
753
754 /* Compute the relative address of this virtual segment. */
755 if (_task == kernel_task)
756 return (void *)(_ranges.v[_positionAtIndex].address + _positionAtOffset);
757 else
758 {
759 vm_offset_t off;
760
761 mapIntoKernel(_positionAtIndex);
762
763 off = _ranges.v[_kernPtrAtIndex].address;
764 off -= trunc_page(off);
765
766 return (void *) (_kernPtrAligned + off + _positionAtOffset);
767 }
768}
769
770/*
771 * prepare
772 *
773 * Prepare the memory for an I/O transfer. This involves paging in
774 * the memory, if necessary, and wiring it down for the duration of
775 * the transfer. The complete() method completes the processing of
776 * the memory after the I/O transfer finishes. This method needn't
777 * called for non-pageable memory.
778 */
779IOReturn IOGeneralMemoryDescriptor::prepare(
780 IODirection forDirection = kIODirectionNone)
781{
782 UInt rangeIndex = 0;
783
784 if((_wireCount == 0) && (kIOMemoryRequiresWire & _flags)) {
785 kern_return_t rc;
786
787 if(forDirection == kIODirectionNone)
788 forDirection = _direction;
789
e3027f41 790 vm_prot_t access = VM_PROT_DEFAULT; // Could be cleverer using direction
1c79356b
A
791
792 //
793 // Check user read/write access to the data buffer.
794 //
795
796 for (rangeIndex = 0; rangeIndex < _rangesCount; rangeIndex++)
797 {
798 vm_offset_t checkBase = trunc_page(_ranges.v[rangeIndex].address);
799 vm_size_t checkSize = round_page(_ranges.v[rangeIndex].length );
800
801 while (checkSize)
802 {
803 vm_region_basic_info_data_t regionInfo;
804 mach_msg_type_number_t regionInfoSize = sizeof(regionInfo);
805 vm_size_t regionSize;
806
807 if ( (vm_region(
808 /* map */ getMapForTask(_task, checkBase),
809 /* address */ &checkBase,
810 /* size */ &regionSize,
811 /* flavor */ VM_REGION_BASIC_INFO,
812 /* info */ (vm_region_info_t) &regionInfo,
813 /* info size */ &regionInfoSize,
814 /* object name */ 0 ) != KERN_SUCCESS ) ||
815 ( (forDirection & kIODirectionIn ) &&
816 !(regionInfo.protection & VM_PROT_WRITE) ) ||
817 ( (forDirection & kIODirectionOut) &&
818 !(regionInfo.protection & VM_PROT_READ ) ) )
819 {
820 return kIOReturnVMError;
821 }
822
823 assert((regionSize & PAGE_MASK) == 0);
824
825 regionSize = min(regionSize, checkSize);
826 checkSize -= regionSize;
827 checkBase += regionSize;
828 } // (for each vm region)
829 } // (for each io range)
830
831 for (rangeIndex = 0; rangeIndex < _rangesCount; rangeIndex++) {
832
833 vm_offset_t srcAlign = trunc_page(_ranges.v[rangeIndex].address);
834 IOByteCount srcAlignEnd = trunc_page(_ranges.v[rangeIndex].address +
835 _ranges.v[rangeIndex].length +
836 page_size - 1);
837
838 vm_map_t taskVMMap = getMapForTask(_task, srcAlign);
839
840 rc = vm_map_wire(taskVMMap, srcAlign, srcAlignEnd, access, FALSE);
841 if (KERN_SUCCESS != rc) {
842 IOLog("IOMemoryDescriptor::prepare vm_map_wire failed: %d\n", rc);
843 goto abortExit;
844 }
845
846 // If this I/O is for a user land task then protect ourselves
847 // against COW and other vm_shenanigans
848 if (_task && _task != kernel_task) {
849 // setup a data object to hold the 'named' memory regions
850 // @@@ gvdl: If we fail to allocate an OSData we will just
851 // hope for the best for the time being. Lets not fail a
852 // prepare at this late stage in product release.
853 if (!_memoryEntries)
854 _memoryEntries = OSData::withCapacity(16);
855 if (_memoryEntries) {
856 vm_object_offset_t desiredSize = srcAlignEnd - srcAlign;
857 vm_object_offset_t entryStart = srcAlign;
858 ipc_port_t memHandle;
859
860 do {
861 vm_object_offset_t actualSize = desiredSize;
862
863 rc = mach_make_memory_entry_64
864 (taskVMMap, &actualSize, entryStart,
865 forDirection, &memHandle, NULL);
866 if (KERN_SUCCESS != rc) {
867 IOLog("IOMemoryDescriptor::prepare mach_make_memory_entry_64 failed: %d\n", rc);
868 goto abortExit;
869 }
870
871 _memoryEntries->
872 appendBytes(&memHandle, sizeof(memHandle));
873 desiredSize -= actualSize;
874 entryStart += actualSize;
875 } while (desiredSize);
876 }
877 }
878 }
879 }
880 _wireCount++;
881 return kIOReturnSuccess;
882
883abortExit:
884 UInt doneIndex;
885
886
887 for(doneIndex = 0; doneIndex < rangeIndex; doneIndex++) {
888 vm_offset_t srcAlign = trunc_page(_ranges.v[doneIndex].address);
889 IOByteCount srcAlignEnd = trunc_page(_ranges.v[doneIndex].address +
890 _ranges.v[doneIndex].length +
891 page_size - 1);
892
893 vm_map_unwire(getMapForTask(_task, srcAlign), srcAlign,
894 srcAlignEnd, FALSE);
895 }
896
897 if (_memoryEntries) {
898 ipc_port_t *handles, *handlesEnd;
899
900 handles = (ipc_port_t *) _memoryEntries->getBytesNoCopy();
901 handlesEnd = (ipc_port_t *)
902 ((vm_address_t) handles + _memoryEntries->getLength());
903 while (handles < handlesEnd)
904 ipc_port_release_send(*handles++);
905 _memoryEntries->release();
906 _memoryEntries = 0;
907 }
908
909 return kIOReturnVMError;
910}
911
912/*
913 * complete
914 *
915 * Complete processing of the memory after an I/O transfer finishes.
916 * This method should not be called unless a prepare was previously
917 * issued; the prepare() and complete() must occur in pairs, before
918 * before and after an I/O transfer involving pageable memory.
919 */
920
921IOReturn IOGeneralMemoryDescriptor::complete(
922 IODirection forDirection = kIODirectionNone)
923{
924 assert(_wireCount);
925
926 if(0 == _wireCount)
927 return kIOReturnSuccess;
928
929 _wireCount--;
930 if((_wireCount == 0) && (kIOMemoryRequiresWire & _flags)) {
931 UInt rangeIndex;
932 kern_return_t rc;
933
934 if(forDirection == kIODirectionNone)
935 forDirection = _direction;
936
937 for(rangeIndex = 0; rangeIndex < _rangesCount; rangeIndex++) {
938
939 vm_offset_t srcAlign = trunc_page(_ranges.v[rangeIndex].address);
940 IOByteCount srcAlignEnd = trunc_page(_ranges.v[rangeIndex].address +
941 _ranges.v[rangeIndex].length +
942 page_size - 1);
943
944 if(forDirection == kIODirectionIn)
945 pmap_modify_pages(get_task_pmap(_task), srcAlign, srcAlignEnd);
946
947 rc = vm_map_unwire(getMapForTask(_task, srcAlign), srcAlign,
948 srcAlignEnd, FALSE);
949 if(rc != KERN_SUCCESS)
950 IOLog("IOMemoryDescriptor::complete: vm_map_unwire failed: %d\n", rc);
951 }
952
953 if (_memoryEntries) {
954 ipc_port_t *handles, *handlesEnd;
955
956 handles = (ipc_port_t *) _memoryEntries->getBytesNoCopy();
957 handlesEnd = (ipc_port_t *)
958 ((vm_address_t) handles + _memoryEntries->getLength());
959 while (handles < handlesEnd)
960 ipc_port_release_send(*handles++);
961
962 _memoryEntries->release();
963 _memoryEntries = 0;
964 }
965
966 _cachedVirtualAddress = 0;
967 }
968 return kIOReturnSuccess;
969}
970
971IOReturn IOGeneralMemoryDescriptor::doMap(
972 vm_map_t addressMap,
973 IOVirtualAddress * atAddress,
974 IOOptionBits options,
975 IOByteCount sourceOffset = 0,
976 IOByteCount length = 0 )
977{
978 kern_return_t kr;
979
980 // mapping source == dest? (could be much better)
981 if( _task && (addressMap == get_task_map(_task)) && (options & kIOMapAnywhere)
982 && (1 == _rangesCount) && (0 == sourceOffset)
983 && (length <= _ranges.v[0].length) ) {
984 *atAddress = _ranges.v[0].address;
985 return( kIOReturnSuccess );
986 }
987
988 if( _task && _memEntry && (_flags & kIOMemoryRequiresWire)) {
989
990 do {
991
992 if( (1 != _rangesCount)
993 || (kIOMapDefaultCache != (options & kIOMapCacheMask)) ) {
994 kr = kIOReturnUnsupported;
995 continue;
996 }
997
998 if( 0 == length)
999 length = getLength();
1000 if( (sourceOffset + length) > _ranges.v[0].length) {
1001 kr = kIOReturnBadArgument;
1002 continue;
1003 }
1004
1005 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
1006 vm_prot_t prot = VM_PROT_READ
1007 | ((options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE);
1008
1009 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1010 if( options & kIOMapAnywhere)
1011 *atAddress = 0;
1012
1013 if( 0 == sharedMem)
1014 kr = kIOReturnVMError;
1015 else
1016 kr = KERN_SUCCESS;
1017
1018 if( KERN_SUCCESS == kr)
1019 kr = vm_map( addressMap,
1020 atAddress,
1021 length, 0 /* mask */,
1022 (( options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
1023 | VM_MAKE_TAG(VM_MEMORY_IOKIT),
1024 sharedMem, sourceOffset,
1025 false, // copy
1026 prot, // cur
1027 prot, // max
1028 VM_INHERIT_NONE);
1029
1030 } while( false );
1031
1032 } else
1033 kr = super::doMap( addressMap, atAddress,
1034 options, sourceOffset, length );
1035 return( kr );
1036}
1037
1038IOReturn IOGeneralMemoryDescriptor::doUnmap(
1039 vm_map_t addressMap,
1040 IOVirtualAddress logical,
1041 IOByteCount length )
1042{
1043 // could be much better
1044 if( _task && (addressMap == getMapForTask(_task, _ranges.v[0].address)) && (1 == _rangesCount)
1045 && (logical == _ranges.v[0].address)
1046 && (length <= _ranges.v[0].length) )
1047 return( kIOReturnSuccess );
1048
1049 return( super::doUnmap( addressMap, logical, length ));
1050}
1051
1052/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1053
1054extern "C" {
1055// osfmk/device/iokit_rpc.c
1056extern kern_return_t IOMapPages( vm_map_t map, vm_offset_t va, vm_offset_t pa,
1057 vm_size_t length, unsigned int mapFlags);
e3027f41 1058extern kern_return_t IOUnmapPages(vm_map_t map, vm_offset_t va, vm_size_t length);
1c79356b
A
1059};
1060
1061/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1062
1063static IORecursiveLock * gIOMemoryLock;
1064
1065#define LOCK IORecursiveLockLock( gIOMemoryLock)
1066#define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
1067
1068/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1069
1070OSDefineMetaClass( IOMemoryMap, OSObject )
1071OSDefineAbstractStructors( IOMemoryMap, OSObject )
1072
1073/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1074
1075class _IOMemoryMap : public IOMemoryMap
1076{
1077 OSDeclareDefaultStructors(_IOMemoryMap)
1078
1079 IOMemoryDescriptor * memory;
1080 IOMemoryMap * superMap;
1081 IOByteCount offset;
1082 IOByteCount length;
1083 IOVirtualAddress logical;
1084 task_t addressTask;
1085 vm_map_t addressMap;
1086 IOOptionBits options;
1087
1088public:
1089 virtual void free();
1090
1091 // IOMemoryMap methods
1092 virtual IOVirtualAddress getVirtualAddress();
1093 virtual IOByteCount getLength();
1094 virtual task_t getAddressTask();
1095 virtual IOMemoryDescriptor * getMemoryDescriptor();
1096 virtual IOOptionBits getMapOptions();
1097
1098 virtual IOReturn unmap();
1099 virtual void taskDied();
1100
1101 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
1102 IOByteCount * length);
1103
1104 // for IOMemoryDescriptor use
1105 _IOMemoryMap * isCompatible(
1106 IOMemoryDescriptor * owner,
1107 task_t intoTask,
1108 IOVirtualAddress toAddress,
1109 IOOptionBits options,
1110 IOByteCount offset,
1111 IOByteCount length );
1112
1113 bool init(
1114 IOMemoryDescriptor * memory,
1115 IOMemoryMap * superMap,
1116 IOByteCount offset,
1117 IOByteCount length );
1118
1119 bool init(
1120 IOMemoryDescriptor * memory,
1121 task_t intoTask,
1122 IOVirtualAddress toAddress,
1123 IOOptionBits options,
1124 IOByteCount offset,
1125 IOByteCount length );
e3027f41
A
1126
1127 IOReturn redirect(
1128 task_t intoTask, bool redirect );
1c79356b
A
1129};
1130
1131/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1132
1133#undef super
1134#define super IOMemoryMap
1135
1136OSDefineMetaClassAndStructors(_IOMemoryMap, IOMemoryMap)
1137
1138/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1139
1140bool _IOMemoryMap::init(
1141 IOMemoryDescriptor * _memory,
1142 IOMemoryMap * _superMap,
1143 IOByteCount _offset,
1144 IOByteCount _length )
1145{
1146
1147 if( !super::init())
1148 return( false);
1149
1150 if( (_offset + _length) > _superMap->getLength())
1151 return( false);
1152
1153 _memory->retain();
1154 memory = _memory;
1155 _superMap->retain();
1156 superMap = _superMap;
1157
1158 offset = _offset;
1159 if( _length)
1160 length = _length;
1161 else
1162 length = _memory->getLength();
1163
1164 options = superMap->getMapOptions();
1165 logical = superMap->getVirtualAddress() + offset;
1166
1167 return( true );
1168}
1169
1170bool _IOMemoryMap::init(
1171 IOMemoryDescriptor * _memory,
1172 task_t intoTask,
1173 IOVirtualAddress toAddress,
1174 IOOptionBits _options,
1175 IOByteCount _offset,
1176 IOByteCount _length )
1177{
1178 bool ok;
1179
1180 if( (!_memory) || (!intoTask) || !super::init())
1181 return( false);
1182
1183 if( (_offset + _length) > _memory->getLength())
1184 return( false);
1185
1186 addressMap = get_task_map(intoTask);
1187 if( !addressMap)
1188 return( false);
1189 kernel_vm_map_reference(addressMap);
1190
1191 _memory->retain();
1192 memory = _memory;
1193
1194 offset = _offset;
1195 if( _length)
1196 length = _length;
1197 else
1198 length = _memory->getLength();
1199
1200 addressTask = intoTask;
1201 logical = toAddress;
1202 options = _options;
1203
1204 if( options & kIOMapStatic)
1205 ok = true;
1206 else
1207 ok = (kIOReturnSuccess == memory->doMap( addressMap, &logical,
1208 options, offset, length ));
1209 if( !ok) {
1210 logical = 0;
e3027f41
A
1211 memory->release();
1212 memory = 0;
1c79356b
A
1213 vm_map_deallocate(addressMap);
1214 addressMap = 0;
1215 }
1216 return( ok );
1217}
1218
1219IOReturn IOMemoryDescriptor::doMap(
1220 vm_map_t addressMap,
1221 IOVirtualAddress * atAddress,
1222 IOOptionBits options,
1223 IOByteCount sourceOffset = 0,
1224 IOByteCount length = 0 )
1225{
1226 IOReturn err = kIOReturnSuccess;
1227 vm_size_t ourSize;
1228 vm_size_t bytes;
1229 vm_offset_t mapped;
1230 vm_address_t logical;
1231 IOByteCount pageOffset;
1232 IOPhysicalLength segLen;
1233 IOPhysicalAddress physAddr;
1234
1235 if( 0 == length)
1236 length = getLength();
1237
1238 physAddr = getPhysicalSegment( sourceOffset, &segLen );
1239 assert( physAddr );
1240
1241 pageOffset = physAddr - trunc_page( physAddr );
1242 ourSize = length + pageOffset;
1243 physAddr -= pageOffset;
1244
1245 logical = *atAddress;
1246 if( 0 == (options & kIOMapAnywhere)) {
1247 mapped = trunc_page( logical );
1248 if( (logical - mapped) != pageOffset)
1249 err = kIOReturnVMError;
1250 }
1251 if( kIOReturnSuccess == err)
1252 err = vm_allocate( addressMap, &mapped, ourSize,
1253 ((options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
1254 | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
1255
1256 if( err) {
1257#ifdef DEBUG
1258 kprintf("IOMemoryDescriptor::doMap: vm_allocate() "
1259 "returned %08x\n", err);
1260#endif
1261 return( err);
1262 }
1263
1264 // we have to make sure that these guys don't get copied if we fork.
1265 err = vm_inherit( addressMap, mapped, ourSize, VM_INHERIT_NONE);
1266 if( err != KERN_SUCCESS) {
1267 doUnmap( addressMap, mapped, ourSize); // back out
1268 return( err);
1269 }
1270
1271 logical = mapped;
1272 *atAddress = mapped + pageOffset;
1273
1274 segLen += pageOffset;
1275 bytes = ourSize;
1276 do {
1277 // in the middle of the loop only map whole pages
1278 if( segLen >= bytes)
1279 segLen = bytes;
1280 else if( segLen != trunc_page( segLen))
1281 err = kIOReturnVMError;
1282 if( physAddr != trunc_page( physAddr))
1283 err = kIOReturnBadArgument;
1284
1285#ifdef DEBUG
1286 if( kIOLogMapping & gIOKitDebug)
1287 kprintf("_IOMemoryMap::map(%x) %08x->%08x:%08x\n",
1288 addressMap, mapped + pageOffset, physAddr + pageOffset,
1289 segLen - pageOffset);
1290#endif
1291
1292 if( kIOReturnSuccess == err)
1293 err = IOMapPages( addressMap, mapped, physAddr, segLen, options );
1294 if( err)
1295 break;
1296
1297 sourceOffset += segLen - pageOffset;
1298 mapped += segLen;
1299 bytes -= segLen;
1300 pageOffset = 0;
1301
1302 } while( bytes
1303 && (physAddr = getPhysicalSegment( sourceOffset, &segLen )));
1304
1305 if( bytes)
1306 err = kIOReturnBadArgument;
1307 if( err)
1308 doUnmap( addressMap, logical, ourSize );
1309 else
1310 mapped = true;
1311
1312 return( err );
1313}
1314
1315IOReturn IOMemoryDescriptor::doUnmap(
1316 vm_map_t addressMap,
1317 IOVirtualAddress logical,
1318 IOByteCount length )
1319{
1320 IOReturn err;
1321
1322#ifdef DEBUG
1323 if( kIOLogMapping & gIOKitDebug)
1324 kprintf("IOMemoryDescriptor::doUnmap(%x) %08x:%08x\n",
1325 addressMap, logical, length );
1326#endif
1327
1328 if( (addressMap == kernel_map) || (addressMap == get_task_map(current_task())))
1329 err = vm_deallocate( addressMap, logical, length );
1330 else
1331 err = kIOReturnSuccess;
1332
1333 return( err );
1334}
1335
e3027f41
A
1336IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool redirect )
1337{
1338 IOReturn err;
1339 _IOMemoryMap * mapping = 0;
1340 OSIterator * iter;
1341
1342 LOCK;
1343
1344 do {
1345 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
1346 while( (mapping = (_IOMemoryMap *) iter->getNextObject()))
1347 mapping->redirect( safeTask, redirect );
1348
1349 iter->release();
1350 }
1351 } while( false );
1352
1353 UNLOCK;
1354
1355 // temporary binary compatibility
1356 IOSubMemoryDescriptor * subMem;
1357 if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
1358 err = subMem->redirect( safeTask, redirect );
1359 else
1360 err = kIOReturnSuccess;
1361
1362 return( err );
1363}
1364
1365IOReturn IOSubMemoryDescriptor::redirect( task_t safeTask, bool redirect )
1366{
1367// temporary binary compatibility IOMemoryDescriptor::redirect( safeTask, redirect );
1368 return( _parent->redirect( safeTask, redirect ));
1369}
1370
1371IOReturn _IOMemoryMap::redirect( task_t safeTask, bool redirect )
1372{
1373 IOReturn err = kIOReturnSuccess;
1374
1375 if( superMap) {
1376// err = ((_IOMemoryMap *)superMap)->redirect( safeTask, redirect );
1377 } else {
1378
1379 LOCK;
1380 if( logical && addressMap
1381 && (get_task_map( safeTask) != addressMap)
1382 && (0 == (options & kIOMapStatic))) {
1383
1384 IOUnmapPages( addressMap, logical, length );
1385 if( !redirect) {
1386 err = vm_deallocate( addressMap, logical, length );
1387 err = memory->doMap( addressMap, &logical,
1388 (options & ~kIOMapAnywhere) /*| kIOMapReserve*/ );
1389 } else
1390 err = kIOReturnSuccess;
1391#ifdef DEBUG
1392 IOLog("IOMemoryMap::redirect(%d, %x) %x from %lx\n", redirect, err, logical, addressMap);
1393#endif
1394 }
1395 UNLOCK;
1396 }
1397
1398 return( err );
1399}
1400
1c79356b
A
1401IOReturn _IOMemoryMap::unmap( void )
1402{
1403 IOReturn err;
1404
1405 LOCK;
1406
1407 if( logical && addressMap && (0 == superMap)
1408 && (0 == (options & kIOMapStatic))) {
1409
1410 err = memory->doUnmap( addressMap, logical, length );
1411 vm_map_deallocate(addressMap);
1412 addressMap = 0;
1413
1414 } else
1415 err = kIOReturnSuccess;
1416
1417 logical = 0;
1418
1419 UNLOCK;
1420
1421 return( err );
1422}
1423
1424void _IOMemoryMap::taskDied( void )
1425{
1426 LOCK;
1427 if( addressMap) {
1428 vm_map_deallocate(addressMap);
1429 addressMap = 0;
1430 }
1431 addressTask = 0;
1432 logical = 0;
1433 UNLOCK;
1434}
1435
1436void _IOMemoryMap::free()
1437{
1438 unmap();
1439
1440 if( memory) {
1441 LOCK;
1442 memory->removeMapping( this);
1443 UNLOCK;
1444 memory->release();
1445 }
1446
1447 if( superMap)
1448 superMap->release();
1449
1450 super::free();
1451}
1452
1453IOByteCount _IOMemoryMap::getLength()
1454{
1455 return( length );
1456}
1457
1458IOVirtualAddress _IOMemoryMap::getVirtualAddress()
1459{
1460 return( logical);
1461}
1462
1463task_t _IOMemoryMap::getAddressTask()
1464{
1465 if( superMap)
1466 return( superMap->getAddressTask());
1467 else
1468 return( addressTask);
1469}
1470
1471IOOptionBits _IOMemoryMap::getMapOptions()
1472{
1473 return( options);
1474}
1475
1476IOMemoryDescriptor * _IOMemoryMap::getMemoryDescriptor()
1477{
1478 return( memory );
1479}
1480
1481_IOMemoryMap * _IOMemoryMap::isCompatible(
1482 IOMemoryDescriptor * owner,
1483 task_t task,
1484 IOVirtualAddress toAddress,
1485 IOOptionBits _options,
1486 IOByteCount _offset,
1487 IOByteCount _length )
1488{
1489 _IOMemoryMap * mapping;
1490
1491 if( (!task) || (task != getAddressTask()))
1492 return( 0 );
1493 if( (options ^ _options) & (kIOMapCacheMask | kIOMapReadOnly))
1494 return( 0 );
1495
1496 if( (0 == (_options & kIOMapAnywhere)) && (logical != toAddress))
1497 return( 0 );
1498
1499 if( _offset < offset)
1500 return( 0 );
1501
1502 _offset -= offset;
1503
1504 if( (_offset + _length) > length)
1505 return( 0 );
1506
1507 if( (length == _length) && (!_offset)) {
1508 retain();
1509 mapping = this;
1510
1511 } else {
1512 mapping = new _IOMemoryMap;
1513 if( mapping
1514 && !mapping->init( owner, this, _offset, _length )) {
1515 mapping->release();
1516 mapping = 0;
1517 }
1518 }
1519
1520 return( mapping );
1521}
1522
1523IOPhysicalAddress _IOMemoryMap::getPhysicalSegment( IOByteCount _offset,
1524 IOPhysicalLength * length)
1525{
1526 IOPhysicalAddress address;
1527
1528 LOCK;
1529 address = memory->getPhysicalSegment( offset + _offset, length );
1530 UNLOCK;
1531
1532 return( address );
1533}
1534
1535/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1536
1537#undef super
1538#define super OSObject
1539
1540/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1541
1542void IOMemoryDescriptor::initialize( void )
1543{
1544 if( 0 == gIOMemoryLock)
1545 gIOMemoryLock = IORecursiveLockAlloc();
1546}
1547
1548void IOMemoryDescriptor::free( void )
1549{
1550 if( _mappings)
1551 _mappings->release();
1552
1553 super::free();
1554}
1555
1556IOMemoryMap * IOMemoryDescriptor::setMapping(
1557 task_t intoTask,
1558 IOVirtualAddress mapAddress,
1559 IOOptionBits options = 0 )
1560{
1561 _IOMemoryMap * map;
1562
1563 map = new _IOMemoryMap;
1564
1565 LOCK;
1566
1567 if( map
1568 && !map->init( this, intoTask, mapAddress,
1569 options | kIOMapStatic, 0, getLength() )) {
1570 map->release();
1571 map = 0;
1572 }
1573
1574 addMapping( map);
1575
1576 UNLOCK;
1577
1578 return( map);
1579}
1580
1581IOMemoryMap * IOMemoryDescriptor::map(
1582 IOOptionBits options = 0 )
1583{
1584
1585 return( makeMapping( this, kernel_task, 0,
1586 options | kIOMapAnywhere,
1587 0, getLength() ));
1588}
1589
1590IOMemoryMap * IOMemoryDescriptor::map(
1591 task_t intoTask,
1592 IOVirtualAddress toAddress,
1593 IOOptionBits options,
1594 IOByteCount offset = 0,
1595 IOByteCount length = 0 )
1596{
1597 if( 0 == length)
1598 length = getLength();
1599
1600 return( makeMapping( this, intoTask, toAddress, options, offset, length ));
1601}
1602
1603IOMemoryMap * IOMemoryDescriptor::makeMapping(
1604 IOMemoryDescriptor * owner,
1605 task_t intoTask,
1606 IOVirtualAddress toAddress,
1607 IOOptionBits options,
1608 IOByteCount offset,
1609 IOByteCount length )
1610{
1611 _IOMemoryMap * mapping = 0;
1612 OSIterator * iter;
1613
1614 LOCK;
1615
1616 do {
1617 // look for an existing mapping
1618 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
1619
1620 while( (mapping = (_IOMemoryMap *) iter->getNextObject())) {
1621
1622 if( (mapping = mapping->isCompatible(
1623 owner, intoTask, toAddress,
1624 options | kIOMapReference,
1625 offset, length )))
1626 break;
1627 }
1628 iter->release();
1629 if( mapping)
1630 continue;
1631 }
1632
1633
1634 if( mapping || (options & kIOMapReference))
1635 continue;
1636
1637 owner = this;
1638
1639 mapping = new _IOMemoryMap;
1640 if( mapping
1641 && !mapping->init( owner, intoTask, toAddress, options,
1642 offset, length )) {
1643
1644 IOLog("Didn't make map %08lx : %08lx\n", offset, length );
1645 mapping->release();
1646 mapping = 0;
1647 }
1648
1649 } while( false );
1650
1651 owner->addMapping( mapping);
1652
1653 UNLOCK;
1654
1655 return( mapping);
1656}
1657
1658void IOMemoryDescriptor::addMapping(
1659 IOMemoryMap * mapping )
1660{
1661 if( mapping) {
1662 if( 0 == _mappings)
1663 _mappings = OSSet::withCapacity(1);
1664 if( _mappings && _mappings->setObject( mapping ))
1665 mapping->release(); /* really */
1666 }
1667}
1668
1669void IOMemoryDescriptor::removeMapping(
1670 IOMemoryMap * mapping )
1671{
1672 if( _mappings) {
1673 mapping->retain();
1674 mapping->retain();
1675 _mappings->removeObject( mapping);
1676 }
1677}
1678
1679/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1680
1681#undef super
1682#define super IOMemoryDescriptor
1683
1684OSDefineMetaClassAndStructors(IOSubMemoryDescriptor, IOMemoryDescriptor)
1685
1686/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1687
1688bool IOSubMemoryDescriptor::initSubRange( IOMemoryDescriptor * parent,
1689 IOByteCount offset, IOByteCount length,
1690 IODirection withDirection )
1691{
1692 if( !super::init())
1693 return( false );
1694
1695 if( !parent)
1696 return( false);
1697
1698 if( (offset + length) > parent->getLength())
1699 return( false);
1700
1701 parent->retain();
1702 _parent = parent;
1703 _start = offset;
1704 _length = length;
1705 _direction = withDirection;
1706 _tag = parent->getTag();
1707
1708 return( true );
1709}
1710
1711void IOSubMemoryDescriptor::free( void )
1712{
1713 if( _parent)
1714 _parent->release();
1715
1716 super::free();
1717}
1718
1719
1720IOPhysicalAddress IOSubMemoryDescriptor::getPhysicalSegment( IOByteCount offset,
1721 IOByteCount * length )
1722{
1723 IOPhysicalAddress address;
1724 IOByteCount actualLength;
1725
1726 assert(offset <= _length);
1727
1728 if( length)
1729 *length = 0;
1730
1731 if( offset >= _length)
1732 return( 0 );
1733
1734 address = _parent->getPhysicalSegment( offset + _start, &actualLength );
1735
1736 if( address && length)
1737 *length = min( _length - offset, actualLength );
1738
1739 return( address );
1740}
1741
1742void * IOSubMemoryDescriptor::getVirtualSegment(IOByteCount offset,
1743 IOByteCount * lengthOfSegment)
1744{
1745 return( 0 );
1746}
1747
1748IOByteCount IOSubMemoryDescriptor::readBytes(IOByteCount offset,
1749 void * bytes, IOByteCount withLength)
1750{
1751 IOByteCount byteCount;
1752
1753 assert(offset <= _length);
1754
1755 if( offset >= _length)
1756 return( 0 );
1757
1758 LOCK;
1759 byteCount = _parent->readBytes( _start + offset, bytes,
1760 min(withLength, _length - offset) );
1761 UNLOCK;
1762
1763 return( byteCount );
1764}
1765
1766IOByteCount IOSubMemoryDescriptor::writeBytes(IOByteCount offset,
1767 const void* bytes, IOByteCount withLength)
1768{
1769 IOByteCount byteCount;
1770
1771 assert(offset <= _length);
1772
1773 if( offset >= _length)
1774 return( 0 );
1775
1776 LOCK;
1777 byteCount = _parent->writeBytes( _start + offset, bytes,
1778 min(withLength, _length - offset) );
1779 UNLOCK;
1780
1781 return( byteCount );
1782}
1783
1784IOReturn IOSubMemoryDescriptor::prepare(
1785 IODirection forDirection = kIODirectionNone)
1786{
1787 IOReturn err;
1788
1789 LOCK;
1790 err = _parent->prepare( forDirection);
1791 UNLOCK;
1792
1793 return( err );
1794}
1795
1796IOReturn IOSubMemoryDescriptor::complete(
1797 IODirection forDirection = kIODirectionNone)
1798{
1799 IOReturn err;
1800
1801 LOCK;
1802 err = _parent->complete( forDirection);
1803 UNLOCK;
1804
1805 return( err );
1806}
1807
1808IOMemoryMap * IOSubMemoryDescriptor::makeMapping(
1809 IOMemoryDescriptor * owner,
1810 task_t intoTask,
1811 IOVirtualAddress toAddress,
1812 IOOptionBits options,
1813 IOByteCount offset,
1814 IOByteCount length )
1815{
1816 IOMemoryMap * mapping;
1817
1818 mapping = (IOMemoryMap *) _parent->makeMapping(
1819 _parent, intoTask,
1820 toAddress - (_start + offset),
1821 options | kIOMapReference,
1822 _start + offset, length );
1823
1824 if( !mapping)
1825 mapping = super::makeMapping( owner, intoTask, toAddress, options,
1826 offset, length );
1827
1828 return( mapping );
1829}
1830
1831/* ick */
1832
1833bool
1834IOSubMemoryDescriptor::initWithAddress(void * address,
1835 IOByteCount withLength,
1836 IODirection withDirection)
1837{
1838 return( false );
1839}
1840
1841bool
1842IOSubMemoryDescriptor::initWithAddress(vm_address_t address,
1843 IOByteCount withLength,
1844 IODirection withDirection,
1845 task_t withTask)
1846{
1847 return( false );
1848}
1849
1850bool
1851IOSubMemoryDescriptor::initWithPhysicalAddress(
1852 IOPhysicalAddress address,
1853 IOByteCount withLength,
1854 IODirection withDirection )
1855{
1856 return( false );
1857}
1858
1859bool
1860IOSubMemoryDescriptor::initWithRanges(
1861 IOVirtualRange * ranges,
1862 UInt32 withCount,
1863 IODirection withDirection,
1864 task_t withTask,
1865 bool asReference = false)
1866{
1867 return( false );
1868}
1869
1870bool
1871IOSubMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
1872 UInt32 withCount,
1873 IODirection withDirection,
1874 bool asReference = false)
1875{
1876 return( false );
1877}
1878
1879/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1880
1881OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 0);
1882OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
1883OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
1884OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
1885OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
1886OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
1887OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
1888OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
1889OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
1890OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
1891OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
1892OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
1893OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
1894OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
1895OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
1896OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);