]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOMemoryDescriptor.cpp
129b53f23094b214d501b42fa25c1f1c14670949
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
24 *
25 * HISTORY
26 *
27 */
28
29 #include <IOKit/assert.h>
30 #include <IOKit/system.h>
31 #include <IOKit/IOLib.h>
32 #include <IOKit/IOMemoryDescriptor.h>
33
34 #include <IOKit/IOKitDebug.h>
35
36 #include <libkern/c++/OSContainers.h>
37 #include <sys/cdefs.h>
38
39 __BEGIN_DECLS
40 #include <vm/pmap.h>
41 void pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa,
42 vm_prot_t prot, boolean_t wired);
43 void ipc_port_release_send(ipc_port_t port);
44 vm_offset_t vm_map_get_phys_page(vm_map_t map, vm_offset_t offset);
45 __END_DECLS
46
47 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
48
49 OSDefineMetaClass( IOMemoryDescriptor, OSObject )
50 OSDefineAbstractStructors( IOMemoryDescriptor, OSObject )
51
52 #define super IOMemoryDescriptor
53
54 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
55
56 extern "C" vm_map_t IOPageableMapForAddress( vm_address_t address );
57
58 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
59
60 inline vm_map_t IOGeneralMemoryDescriptor::getMapForTask( task_t task, vm_address_t address )
61 {
62 if( (task == kernel_task) && (kIOMemoryRequiresWire & _flags))
63 return( IOPageableMapForAddress( address ) );
64 else
65 return( get_task_map( task ));
66 }
67
68 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
69
70 /*
71 * withAddress:
72 *
73 * Create a new IOMemoryDescriptor. The buffer is a virtual address
74 * relative to the specified task. If no task is supplied, the kernel
75 * task is implied.
76 */
77 IOMemoryDescriptor *
78 IOMemoryDescriptor::withAddress(void * address,
79 IOByteCount withLength,
80 IODirection withDirection)
81 {
82 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
83 if (that)
84 {
85 if (that->initWithAddress(address, withLength, withDirection))
86 return that;
87
88 that->release();
89 }
90 return 0;
91 }
92
93 IOMemoryDescriptor *
94 IOMemoryDescriptor::withAddress(vm_address_t address,
95 IOByteCount withLength,
96 IODirection withDirection,
97 task_t withTask)
98 {
99 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
100 if (that)
101 {
102 if (that->initWithAddress(address, withLength, withDirection, withTask))
103 return that;
104
105 that->release();
106 }
107 return 0;
108 }
109
110 IOMemoryDescriptor *
111 IOMemoryDescriptor::withPhysicalAddress(
112 IOPhysicalAddress address,
113 IOByteCount withLength,
114 IODirection withDirection )
115 {
116 return( IOMemoryDescriptor::withAddress( address, withLength,
117 withDirection, (task_t) 0 ));
118 }
119
120
121 /*
122 * withRanges:
123 *
124 * Create a new IOMemoryDescriptor. The buffer is made up of several
125 * virtual address ranges, from a given task.
126 *
127 * Passing the ranges as a reference will avoid an extra allocation.
128 */
129 IOMemoryDescriptor *
130 IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
131 UInt32 withCount,
132 IODirection withDirection,
133 task_t withTask,
134 bool asReference = false)
135 {
136 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
137 if (that)
138 {
139 if (that->initWithRanges(ranges, withCount, withDirection, withTask, asReference))
140 return that;
141
142 that->release();
143 }
144 return 0;
145 }
146
147 IOMemoryDescriptor *
148 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
149 UInt32 withCount,
150 IODirection withDirection,
151 bool asReference = false)
152 {
153 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
154 if (that)
155 {
156 if (that->initWithPhysicalRanges(ranges, withCount, withDirection, asReference))
157 return that;
158
159 that->release();
160 }
161 return 0;
162 }
163
164 IOMemoryDescriptor *
165 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
166 IOByteCount offset,
167 IOByteCount length,
168 IODirection withDirection)
169 {
170 IOSubMemoryDescriptor * that = new IOSubMemoryDescriptor;
171
172 if (that && !that->initSubRange(of, offset, length, withDirection)) {
173 that->release();
174 that = 0;
175 }
176 return that;
177 }
178
179 /*
180 * initWithAddress:
181 *
182 * Initialize an IOMemoryDescriptor. The buffer is a virtual address
183 * relative to the specified task. If no task is supplied, the kernel
184 * task is implied.
185 *
186 * An IOMemoryDescriptor can be re-used by calling initWithAddress or
187 * initWithRanges again on an existing instance -- note this behavior
188 * is not commonly supported in other I/O Kit classes, although it is
189 * supported here.
190 */
191 bool
192 IOGeneralMemoryDescriptor::initWithAddress(void * address,
193 IOByteCount withLength,
194 IODirection withDirection)
195 {
196 _singleRange.v.address = (vm_address_t) address;
197 _singleRange.v.length = withLength;
198
199 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
200 }
201
202 bool
203 IOGeneralMemoryDescriptor::initWithAddress(vm_address_t address,
204 IOByteCount withLength,
205 IODirection withDirection,
206 task_t withTask)
207 {
208 _singleRange.v.address = address;
209 _singleRange.v.length = withLength;
210
211 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
212 }
213
214 bool
215 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
216 IOPhysicalAddress address,
217 IOByteCount withLength,
218 IODirection withDirection )
219 {
220 _singleRange.p.address = address;
221 _singleRange.p.length = withLength;
222
223 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
224 }
225
226 /*
227 * initWithRanges:
228 *
229 * Initialize an IOMemoryDescriptor. The buffer is made up of several
230 * virtual address ranges, from a given task
231 *
232 * Passing the ranges as a reference will avoid an extra allocation.
233 *
234 * An IOMemoryDescriptor can be re-used by calling initWithAddress or
235 * initWithRanges again on an existing instance -- note this behavior
236 * is not commonly supported in other I/O Kit classes, although it is
237 * supported here.
238 */
239 bool
240 IOGeneralMemoryDescriptor::initWithRanges(
241 IOVirtualRange * ranges,
242 UInt32 withCount,
243 IODirection withDirection,
244 task_t withTask,
245 bool asReference = false)
246 {
247 assert(ranges);
248 assert(withCount);
249
250 /*
251 * We can check the _initialized instance variable before having ever set
252 * it to an initial value because I/O Kit guarantees that all our instance
253 * variables are zeroed on an object's allocation.
254 */
255
256 if (_initialized == false)
257 {
258 if (super::init() == false) return false;
259 _initialized = true;
260 }
261 else
262 {
263 /*
264 * An existing memory descriptor is being retargeted to point to
265 * somewhere else. Clean up our present state.
266 */
267
268 assert(_wireCount == 0);
269
270 while (_wireCount)
271 complete();
272 if (_kernPtrAligned)
273 unmapFromKernel();
274 if (_ranges.v && _rangesIsAllocated)
275 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
276 }
277
278 /*
279 * Initialize the memory descriptor.
280 */
281
282 _ranges.v = 0;
283 _rangesCount = withCount;
284 _rangesIsAllocated = asReference ? false : true;
285 _direction = withDirection;
286 _length = 0;
287 _task = withTask;
288 _position = 0;
289 _positionAtIndex = 0;
290 _positionAtOffset = 0;
291 _kernPtrAligned = 0;
292 _cachedPhysicalAddress = 0;
293 _cachedVirtualAddress = 0;
294 _flags = 0;
295
296 if (withTask && (withTask != kernel_task))
297 _flags |= kIOMemoryRequiresWire;
298
299 if (asReference)
300 _ranges.v = ranges;
301 else
302 {
303 _ranges.v = IONew(IOVirtualRange, withCount);
304 if (_ranges.v == 0) return false;
305 bcopy(/* from */ ranges, _ranges.v, withCount * sizeof(IOVirtualRange));
306 }
307
308 for (unsigned index = 0; index < _rangesCount; index++)
309 {
310 _length += _ranges.v[index].length;
311 }
312
313 return true;
314 }
315
316 bool
317 IOGeneralMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
318 UInt32 withCount,
319 IODirection withDirection,
320 bool asReference = false)
321 {
322 #warning assuming virtual, physical addresses same size
323 return( initWithRanges( (IOVirtualRange *) ranges,
324 withCount, withDirection, (task_t) 0, asReference ));
325 }
326
327 /*
328 * free
329 *
330 * Free resources.
331 */
332 void IOGeneralMemoryDescriptor::free()
333 {
334 while (_wireCount)
335 complete();
336 if (_kernPtrAligned)
337 unmapFromKernel();
338 if (_ranges.v && _rangesIsAllocated)
339 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
340 if( _memEntry)
341 ipc_port_release_send( (ipc_port_t) _memEntry );
342 super::free();
343 }
344
345 void IOGeneralMemoryDescriptor::unmapFromKernel()
346 {
347 kern_return_t krtn;
348 vm_offset_t off;
349 // Pull the shared pages out of the task map
350 // Do we need to unwire it first?
351 for ( off = 0; off < _kernSize; off += page_size )
352 {
353 pmap_change_wiring(
354 kernel_pmap,
355 _kernPtrAligned + off,
356 FALSE);
357
358 pmap_remove(
359 kernel_pmap,
360 _kernPtrAligned + off,
361 _kernPtrAligned + off + page_size);
362 }
363 // Free the former shmem area in the task
364 krtn = vm_deallocate(kernel_map,
365 _kernPtrAligned,
366 _kernSize );
367 assert(krtn == KERN_SUCCESS);
368 _kernPtrAligned = 0;
369 }
370
371 void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
372 {
373 kern_return_t krtn;
374 vm_offset_t off;
375
376 if (_kernPtrAligned)
377 {
378 if (_kernPtrAtIndex == rangeIndex) return;
379 unmapFromKernel();
380 assert(_kernPtrAligned == 0);
381 }
382
383 vm_offset_t srcAlign = trunc_page(_ranges.v[rangeIndex].address);
384
385 _kernSize = trunc_page(_ranges.v[rangeIndex].address +
386 _ranges.v[rangeIndex].length +
387 page_size - 1) - srcAlign;
388
389 /* Find some memory of the same size in kernel task. We use vm_allocate()
390 to do this. vm_allocate inserts the found memory object in the
391 target task's map as a side effect. */
392 krtn = vm_allocate( kernel_map,
393 &_kernPtrAligned,
394 _kernSize,
395 VM_FLAGS_ANYWHERE|VM_MAKE_TAG(VM_MEMORY_IOKIT) ); // Find first fit
396 assert(krtn == KERN_SUCCESS);
397 if(krtn) return;
398
399 /* For each page in the area allocated from the kernel map,
400 find the physical address of the page.
401 Enter the page in the target task's pmap, at the
402 appropriate target task virtual address. */
403 for ( off = 0; off < _kernSize; off += page_size )
404 {
405 vm_offset_t kern_phys_addr, phys_addr;
406 if( _task)
407 phys_addr = pmap_extract( get_task_pmap(_task), srcAlign + off );
408 else
409 phys_addr = srcAlign + off;
410 assert(phys_addr);
411 if(phys_addr == 0) return;
412
413 // Check original state.
414 kern_phys_addr = pmap_extract( kernel_pmap, _kernPtrAligned + off );
415 // Set virtual page to point to the right physical one
416 pmap_enter(
417 kernel_pmap,
418 _kernPtrAligned + off,
419 phys_addr,
420 VM_PROT_READ|VM_PROT_WRITE,
421 TRUE);
422 }
423 _kernPtrAtIndex = rangeIndex;
424 }
425
426 /*
427 * getDirection:
428 *
429 * Get the direction of the transfer.
430 */
431 IODirection IOMemoryDescriptor::getDirection() const
432 {
433 return _direction;
434 }
435
436 /*
437 * getLength:
438 *
439 * Get the length of the transfer (over all ranges).
440 */
441 IOByteCount IOMemoryDescriptor::getLength() const
442 {
443 return _length;
444 }
445
446 void IOMemoryDescriptor::setTag(
447 IOOptionBits tag )
448 {
449 _tag = tag;
450 }
451
452 IOOptionBits IOMemoryDescriptor::getTag( void )
453 {
454 return( _tag);
455 }
456
457 /*
458 * setPosition
459 *
460 * Set the logical start position inside the client buffer.
461 *
462 * It is convention that the position reflect the actual byte count that
463 * is successfully transferred into or out of the buffer, before the I/O
464 * request is "completed" (ie. sent back to its originator).
465 */
466
467 void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
468 {
469 assert(position <= _length);
470
471 if (position >= _length)
472 {
473 _position = _length;
474 _positionAtIndex = _rangesCount; /* careful: out-of-bounds */
475 _positionAtOffset = 0;
476 return;
477 }
478
479 if (position < _position)
480 {
481 _positionAtOffset = position;
482 _positionAtIndex = 0;
483 }
484 else
485 {
486 _positionAtOffset += (position - _position);
487 }
488 _position = position;
489
490 while (_positionAtOffset >= _ranges.v[_positionAtIndex].length)
491 {
492 _positionAtOffset -= _ranges.v[_positionAtIndex].length;
493 _positionAtIndex++;
494 }
495 }
496
497 /*
498 * readBytes:
499 *
500 * Copy data from the memory descriptor's buffer into the specified buffer,
501 * relative to the current position. The memory descriptor's position is
502 * advanced based on the number of bytes copied.
503 */
504
505 IOByteCount IOGeneralMemoryDescriptor::readBytes(IOByteCount offset,
506 void * bytes, IOByteCount withLength)
507 {
508 IOByteCount bytesLeft;
509 void * segment;
510 IOByteCount segmentLength;
511
512 if( offset != _position)
513 setPosition( offset );
514
515 withLength = min(withLength, _length - _position);
516 bytesLeft = withLength;
517
518 #if 0
519 while (bytesLeft && (_position < _length))
520 {
521 /* Compute the relative length to the end of this virtual segment. */
522 segmentLength = min(_ranges.v[_positionAtIndex].length - _positionAtOffset, bytesLeft);
523
524 /* Compute the relative address of this virtual segment. */
525 segment = (void *)(_ranges.v[_positionAtIndex].address + _positionAtOffset);
526
527 if (KERN_SUCCESS != vm_map_read_user(getMapForTask(_task, segment),
528 /* from */ (vm_offset_t) segment, /* to */ (vm_offset_t) bytes,
529 /* size */ segmentLength))
530 {
531 assert( false );
532 bytesLeft = withLength;
533 break;
534 }
535 bytesLeft -= segmentLength;
536 offset += segmentLength;
537 setPosition(offset);
538 }
539 #else
540 while (bytesLeft && (segment = getVirtualSegment(offset, &segmentLength)))
541 {
542 segmentLength = min(segmentLength, bytesLeft);
543 bcopy(/* from */ segment, /* to */ bytes, /* size */ segmentLength);
544 bytesLeft -= segmentLength;
545 offset += segmentLength;
546 bytes = (void *) (((UInt32) bytes) + segmentLength);
547 }
548 #endif
549
550 return withLength - bytesLeft;
551 }
552
553 /*
554 * writeBytes:
555 *
556 * Copy data to the memory descriptor's buffer from the specified buffer,
557 * relative to the current position. The memory descriptor's position is
558 * advanced based on the number of bytes copied.
559 */
560 IOByteCount IOGeneralMemoryDescriptor::writeBytes(IOByteCount offset,
561 const void* bytes,IOByteCount withLength)
562 {
563 IOByteCount bytesLeft;
564 void * segment;
565 IOByteCount segmentLength;
566
567 if( offset != _position)
568 setPosition( offset );
569
570 withLength = min(withLength, _length - _position);
571 bytesLeft = withLength;
572
573 #if 0
574 while (bytesLeft && (_position < _length))
575 {
576 assert(_position <= _length);
577
578 /* Compute the relative length to the end of this virtual segment. */
579 segmentLength = min(_ranges.v[_positionAtIndex].length - _positionAtOffset, bytesLeft);
580
581 /* Compute the relative address of this virtual segment. */
582 segment = (void *)(_ranges.v[_positionAtIndex].address + _positionAtOffset);
583
584 if (KERN_SUCCESS != vm_map_write_user(getMapForTask(_task, segment),
585 /* from */ (vm_offset_t) bytes,
586 /* to */ (vm_offset_t) segment,
587 /* size */ segmentLength))
588 {
589 assert( false );
590 bytesLeft = withLength;
591 break;
592 }
593 bytesLeft -= segmentLength;
594 offset += segmentLength;
595 setPosition(offset);
596 }
597 #else
598 while (bytesLeft && (segment = getVirtualSegment(offset, &segmentLength)))
599 {
600 segmentLength = min(segmentLength, bytesLeft);
601 bcopy(/* from */ bytes, /* to */ segment, /* size */ segmentLength);
602 // Flush cache in case we're copying code around, eg. handling a code page fault
603 IOFlushProcessorCache(kernel_task, (vm_offset_t) segment, segmentLength );
604
605 bytesLeft -= segmentLength;
606 offset += segmentLength;
607 bytes = (void *) (((UInt32) bytes) + segmentLength);
608 }
609 #endif
610
611 return withLength - bytesLeft;
612 }
613
614 /*
615 * getPhysicalSegment:
616 *
617 * Get the physical address of the buffer, relative to the current position.
618 * If the current position is at the end of the buffer, a zero is returned.
619 */
620 IOPhysicalAddress
621 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset,
622 IOByteCount * lengthOfSegment)
623 {
624 vm_address_t virtualAddress;
625 IOByteCount virtualLength;
626 pmap_t virtualPMap;
627 IOPhysicalAddress physicalAddress;
628 IOPhysicalLength physicalLength;
629
630 if( kIOMemoryRequiresWire & _flags)
631 assert( _wireCount );
632
633 if ((0 == _task) && (1 == _rangesCount))
634 {
635 assert(offset <= _length);
636 if (offset >= _length)
637 {
638 physicalAddress = 0;
639 physicalLength = 0;
640 }
641 else
642 {
643 physicalLength = _length - offset;
644 physicalAddress = offset + _ranges.v[0].address;
645 }
646
647 if (lengthOfSegment)
648 *lengthOfSegment = physicalLength;
649 return physicalAddress;
650 }
651
652 if( offset != _position)
653 setPosition( offset );
654
655 assert(_position <= _length);
656
657 /* Fail gracefully if the position is at (or past) the end-of-buffer. */
658 if (_position >= _length)
659 {
660 *lengthOfSegment = 0;
661 return 0;
662 }
663
664 /* Prepare to compute the largest contiguous physical length possible. */
665
666 virtualAddress = _ranges.v[_positionAtIndex].address + _positionAtOffset;
667 virtualLength = _ranges.v[_positionAtIndex].length - _positionAtOffset;
668 vm_address_t virtualPage = trunc_page(virtualAddress);
669 if( _task)
670 virtualPMap = get_task_pmap(_task);
671 else
672 virtualPMap = 0;
673
674 physicalAddress = (virtualAddress == _cachedVirtualAddress) ?
675 _cachedPhysicalAddress : /* optimization */
676 virtualPMap ?
677 pmap_extract(virtualPMap, virtualAddress) :
678 virtualAddress;
679 physicalLength = trunc_page(physicalAddress) + page_size - physicalAddress;
680
681 if (!physicalAddress && _task)
682 {
683 physicalAddress =
684 vm_map_get_phys_page(get_task_map(_task), virtualPage);
685 physicalAddress += virtualAddress - virtualPage;
686 }
687
688 if (physicalAddress == 0) /* memory must be wired in order to proceed */
689 {
690 assert(physicalAddress);
691 *lengthOfSegment = 0;
692 return 0;
693 }
694
695 /* Compute the largest contiguous physical length possible, within range. */
696 IOPhysicalAddress physicalPage = trunc_page(physicalAddress);
697
698 while (physicalLength < virtualLength)
699 {
700 physicalPage += page_size;
701 virtualPage += page_size;
702 _cachedVirtualAddress = virtualPage;
703 _cachedPhysicalAddress = virtualPMap ?
704 pmap_extract(virtualPMap, virtualPage) :
705 virtualPage;
706 if (!_cachedPhysicalAddress && _task)
707 {
708 _cachedPhysicalAddress =
709 vm_map_get_phys_page(get_task_map(_task), virtualPage);
710 }
711
712 if (_cachedPhysicalAddress != physicalPage) break;
713
714 physicalLength += page_size;
715 }
716
717 /* Clip contiguous physical length at the end of this range. */
718 if (physicalLength > virtualLength)
719 physicalLength = virtualLength;
720
721 if( lengthOfSegment)
722 *lengthOfSegment = physicalLength;
723
724 return physicalAddress;
725 }
726
727
728 /*
729 * getVirtualSegment:
730 *
731 * Get the virtual address of the buffer, relative to the current position.
732 * If the memory wasn't mapped into the caller's address space, it will be
733 * mapped in now. If the current position is at the end of the buffer, a
734 * null is returned.
735 */
736 void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
737 IOByteCount * lengthOfSegment)
738 {
739 if( offset != _position)
740 setPosition( offset );
741
742 assert(_position <= _length);
743
744 /* Fail gracefully if the position is at (or past) the end-of-buffer. */
745 if (_position >= _length)
746 {
747 *lengthOfSegment = 0;
748 return 0;
749 }
750
751 /* Compute the relative length to the end of this virtual segment. */
752 *lengthOfSegment = _ranges.v[_positionAtIndex].length - _positionAtOffset;
753
754 /* Compute the relative address of this virtual segment. */
755 if (_task == kernel_task)
756 return (void *)(_ranges.v[_positionAtIndex].address + _positionAtOffset);
757 else
758 {
759 vm_offset_t off;
760
761 mapIntoKernel(_positionAtIndex);
762
763 off = _ranges.v[_kernPtrAtIndex].address;
764 off -= trunc_page(off);
765
766 return (void *) (_kernPtrAligned + off + _positionAtOffset);
767 }
768 }
769
770 /*
771 * prepare
772 *
773 * Prepare the memory for an I/O transfer. This involves paging in
774 * the memory, if necessary, and wiring it down for the duration of
775 * the transfer. The complete() method completes the processing of
776 * the memory after the I/O transfer finishes. This method needn't
777 * called for non-pageable memory.
778 */
779 IOReturn IOGeneralMemoryDescriptor::prepare(
780 IODirection forDirection = kIODirectionNone)
781 {
782 UInt rangeIndex = 0;
783
784 if((_wireCount == 0) && (kIOMemoryRequiresWire & _flags)) {
785 kern_return_t rc;
786
787 if(forDirection == kIODirectionNone)
788 forDirection = _direction;
789
790 vm_prot_t access = VM_PROT_DEFAULT; // Could be cleverer using direction
791
792 //
793 // Check user read/write access to the data buffer.
794 //
795
796 for (rangeIndex = 0; rangeIndex < _rangesCount; rangeIndex++)
797 {
798 vm_offset_t checkBase = trunc_page(_ranges.v[rangeIndex].address);
799 vm_size_t checkSize = round_page(_ranges.v[rangeIndex].length );
800
801 while (checkSize)
802 {
803 vm_region_basic_info_data_t regionInfo;
804 mach_msg_type_number_t regionInfoSize = sizeof(regionInfo);
805 vm_size_t regionSize;
806
807 if ( (vm_region(
808 /* map */ getMapForTask(_task, checkBase),
809 /* address */ &checkBase,
810 /* size */ &regionSize,
811 /* flavor */ VM_REGION_BASIC_INFO,
812 /* info */ (vm_region_info_t) &regionInfo,
813 /* info size */ &regionInfoSize,
814 /* object name */ 0 ) != KERN_SUCCESS ) ||
815 ( (forDirection & kIODirectionIn ) &&
816 !(regionInfo.protection & VM_PROT_WRITE) ) ||
817 ( (forDirection & kIODirectionOut) &&
818 !(regionInfo.protection & VM_PROT_READ ) ) )
819 {
820 return kIOReturnVMError;
821 }
822
823 assert((regionSize & PAGE_MASK) == 0);
824
825 regionSize = min(regionSize, checkSize);
826 checkSize -= regionSize;
827 checkBase += regionSize;
828 } // (for each vm region)
829 } // (for each io range)
830
831 for (rangeIndex = 0; rangeIndex < _rangesCount; rangeIndex++) {
832
833 vm_offset_t srcAlign = trunc_page(_ranges.v[rangeIndex].address);
834 IOByteCount srcAlignEnd = trunc_page(_ranges.v[rangeIndex].address +
835 _ranges.v[rangeIndex].length +
836 page_size - 1);
837
838 vm_map_t taskVMMap = getMapForTask(_task, srcAlign);
839
840 rc = vm_map_wire(taskVMMap, srcAlign, srcAlignEnd, access, FALSE);
841 if (KERN_SUCCESS != rc) {
842 IOLog("IOMemoryDescriptor::prepare vm_map_wire failed: %d\n", rc);
843 goto abortExit;
844 }
845
846 // If this I/O is for a user land task then protect ourselves
847 // against COW and other vm_shenanigans
848 if (_task && _task != kernel_task) {
849 // setup a data object to hold the 'named' memory regions
850 // @@@ gvdl: If we fail to allocate an OSData we will just
851 // hope for the best for the time being. Lets not fail a
852 // prepare at this late stage in product release.
853 if (!_memoryEntries)
854 _memoryEntries = OSData::withCapacity(16);
855 if (_memoryEntries) {
856 vm_object_offset_t desiredSize = srcAlignEnd - srcAlign;
857 vm_object_offset_t entryStart = srcAlign;
858 ipc_port_t memHandle;
859
860 do {
861 vm_object_offset_t actualSize = desiredSize;
862
863 rc = mach_make_memory_entry_64
864 (taskVMMap, &actualSize, entryStart,
865 forDirection, &memHandle, NULL);
866 if (KERN_SUCCESS != rc) {
867 IOLog("IOMemoryDescriptor::prepare mach_make_memory_entry_64 failed: %d\n", rc);
868 goto abortExit;
869 }
870
871 _memoryEntries->
872 appendBytes(&memHandle, sizeof(memHandle));
873 desiredSize -= actualSize;
874 entryStart += actualSize;
875 } while (desiredSize);
876 }
877 }
878 }
879 }
880 _wireCount++;
881 return kIOReturnSuccess;
882
883 abortExit:
884 UInt doneIndex;
885
886
887 for(doneIndex = 0; doneIndex < rangeIndex; doneIndex++) {
888 vm_offset_t srcAlign = trunc_page(_ranges.v[doneIndex].address);
889 IOByteCount srcAlignEnd = trunc_page(_ranges.v[doneIndex].address +
890 _ranges.v[doneIndex].length +
891 page_size - 1);
892
893 vm_map_unwire(getMapForTask(_task, srcAlign), srcAlign,
894 srcAlignEnd, FALSE);
895 }
896
897 if (_memoryEntries) {
898 ipc_port_t *handles, *handlesEnd;
899
900 handles = (ipc_port_t *) _memoryEntries->getBytesNoCopy();
901 handlesEnd = (ipc_port_t *)
902 ((vm_address_t) handles + _memoryEntries->getLength());
903 while (handles < handlesEnd)
904 ipc_port_release_send(*handles++);
905 _memoryEntries->release();
906 _memoryEntries = 0;
907 }
908
909 return kIOReturnVMError;
910 }
911
912 /*
913 * complete
914 *
915 * Complete processing of the memory after an I/O transfer finishes.
916 * This method should not be called unless a prepare was previously
917 * issued; the prepare() and complete() must occur in pairs, before
918 * before and after an I/O transfer involving pageable memory.
919 */
920
921 IOReturn IOGeneralMemoryDescriptor::complete(
922 IODirection forDirection = kIODirectionNone)
923 {
924 assert(_wireCount);
925
926 if(0 == _wireCount)
927 return kIOReturnSuccess;
928
929 _wireCount--;
930 if((_wireCount == 0) && (kIOMemoryRequiresWire & _flags)) {
931 UInt rangeIndex;
932 kern_return_t rc;
933
934 if(forDirection == kIODirectionNone)
935 forDirection = _direction;
936
937 for(rangeIndex = 0; rangeIndex < _rangesCount; rangeIndex++) {
938
939 vm_offset_t srcAlign = trunc_page(_ranges.v[rangeIndex].address);
940 IOByteCount srcAlignEnd = trunc_page(_ranges.v[rangeIndex].address +
941 _ranges.v[rangeIndex].length +
942 page_size - 1);
943
944 if(forDirection == kIODirectionIn)
945 pmap_modify_pages(get_task_pmap(_task), srcAlign, srcAlignEnd);
946
947 rc = vm_map_unwire(getMapForTask(_task, srcAlign), srcAlign,
948 srcAlignEnd, FALSE);
949 if(rc != KERN_SUCCESS)
950 IOLog("IOMemoryDescriptor::complete: vm_map_unwire failed: %d\n", rc);
951 }
952
953 if (_memoryEntries) {
954 ipc_port_t *handles, *handlesEnd;
955
956 handles = (ipc_port_t *) _memoryEntries->getBytesNoCopy();
957 handlesEnd = (ipc_port_t *)
958 ((vm_address_t) handles + _memoryEntries->getLength());
959 while (handles < handlesEnd)
960 ipc_port_release_send(*handles++);
961
962 _memoryEntries->release();
963 _memoryEntries = 0;
964 }
965
966 _cachedVirtualAddress = 0;
967 }
968 return kIOReturnSuccess;
969 }
970
971 IOReturn IOGeneralMemoryDescriptor::doMap(
972 vm_map_t addressMap,
973 IOVirtualAddress * atAddress,
974 IOOptionBits options,
975 IOByteCount sourceOffset = 0,
976 IOByteCount length = 0 )
977 {
978 kern_return_t kr;
979
980 // mapping source == dest? (could be much better)
981 if( _task && (addressMap == get_task_map(_task)) && (options & kIOMapAnywhere)
982 && (1 == _rangesCount) && (0 == sourceOffset)
983 && (length <= _ranges.v[0].length) ) {
984 *atAddress = _ranges.v[0].address;
985 return( kIOReturnSuccess );
986 }
987
988 if( _task && _memEntry && (_flags & kIOMemoryRequiresWire)) {
989
990 do {
991
992 if( (1 != _rangesCount)
993 || (kIOMapDefaultCache != (options & kIOMapCacheMask)) ) {
994 kr = kIOReturnUnsupported;
995 continue;
996 }
997
998 if( 0 == length)
999 length = getLength();
1000 if( (sourceOffset + length) > _ranges.v[0].length) {
1001 kr = kIOReturnBadArgument;
1002 continue;
1003 }
1004
1005 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
1006 vm_prot_t prot = VM_PROT_READ
1007 | ((options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE);
1008
1009 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1010 if( options & kIOMapAnywhere)
1011 *atAddress = 0;
1012
1013 if( 0 == sharedMem)
1014 kr = kIOReturnVMError;
1015 else
1016 kr = KERN_SUCCESS;
1017
1018 if( KERN_SUCCESS == kr)
1019 kr = vm_map( addressMap,
1020 atAddress,
1021 length, 0 /* mask */,
1022 (( options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
1023 | VM_MAKE_TAG(VM_MEMORY_IOKIT),
1024 sharedMem, sourceOffset,
1025 false, // copy
1026 prot, // cur
1027 prot, // max
1028 VM_INHERIT_NONE);
1029
1030 } while( false );
1031
1032 } else
1033 kr = super::doMap( addressMap, atAddress,
1034 options, sourceOffset, length );
1035 return( kr );
1036 }
1037
1038 IOReturn IOGeneralMemoryDescriptor::doUnmap(
1039 vm_map_t addressMap,
1040 IOVirtualAddress logical,
1041 IOByteCount length )
1042 {
1043 // could be much better
1044 if( _task && (addressMap == getMapForTask(_task, _ranges.v[0].address)) && (1 == _rangesCount)
1045 && (logical == _ranges.v[0].address)
1046 && (length <= _ranges.v[0].length) )
1047 return( kIOReturnSuccess );
1048
1049 return( super::doUnmap( addressMap, logical, length ));
1050 }
1051
1052 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1053
1054 extern "C" {
1055 // osfmk/device/iokit_rpc.c
1056 extern kern_return_t IOMapPages( vm_map_t map, vm_offset_t va, vm_offset_t pa,
1057 vm_size_t length, unsigned int mapFlags);
1058 };
1059
1060 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1061
1062 static IORecursiveLock * gIOMemoryLock;
1063
1064 #define LOCK IORecursiveLockLock( gIOMemoryLock)
1065 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
1066
1067 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1068
1069 OSDefineMetaClass( IOMemoryMap, OSObject )
1070 OSDefineAbstractStructors( IOMemoryMap, OSObject )
1071
1072 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1073
1074 class _IOMemoryMap : public IOMemoryMap
1075 {
1076 OSDeclareDefaultStructors(_IOMemoryMap)
1077
1078 IOMemoryDescriptor * memory;
1079 IOMemoryMap * superMap;
1080 IOByteCount offset;
1081 IOByteCount length;
1082 IOVirtualAddress logical;
1083 task_t addressTask;
1084 vm_map_t addressMap;
1085 IOOptionBits options;
1086
1087 public:
1088 virtual void free();
1089
1090 // IOMemoryMap methods
1091 virtual IOVirtualAddress getVirtualAddress();
1092 virtual IOByteCount getLength();
1093 virtual task_t getAddressTask();
1094 virtual IOMemoryDescriptor * getMemoryDescriptor();
1095 virtual IOOptionBits getMapOptions();
1096
1097 virtual IOReturn unmap();
1098 virtual void taskDied();
1099
1100 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
1101 IOByteCount * length);
1102
1103 // for IOMemoryDescriptor use
1104 _IOMemoryMap * isCompatible(
1105 IOMemoryDescriptor * owner,
1106 task_t intoTask,
1107 IOVirtualAddress toAddress,
1108 IOOptionBits options,
1109 IOByteCount offset,
1110 IOByteCount length );
1111
1112 bool init(
1113 IOMemoryDescriptor * memory,
1114 IOMemoryMap * superMap,
1115 IOByteCount offset,
1116 IOByteCount length );
1117
1118 bool init(
1119 IOMemoryDescriptor * memory,
1120 task_t intoTask,
1121 IOVirtualAddress toAddress,
1122 IOOptionBits options,
1123 IOByteCount offset,
1124 IOByteCount length );
1125 };
1126
1127 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1128
1129 #undef super
1130 #define super IOMemoryMap
1131
1132 OSDefineMetaClassAndStructors(_IOMemoryMap, IOMemoryMap)
1133
1134 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1135
1136 bool _IOMemoryMap::init(
1137 IOMemoryDescriptor * _memory,
1138 IOMemoryMap * _superMap,
1139 IOByteCount _offset,
1140 IOByteCount _length )
1141 {
1142
1143 if( !super::init())
1144 return( false);
1145
1146 if( (_offset + _length) > _superMap->getLength())
1147 return( false);
1148
1149 _memory->retain();
1150 memory = _memory;
1151 _superMap->retain();
1152 superMap = _superMap;
1153
1154 offset = _offset;
1155 if( _length)
1156 length = _length;
1157 else
1158 length = _memory->getLength();
1159
1160 options = superMap->getMapOptions();
1161 logical = superMap->getVirtualAddress() + offset;
1162
1163 return( true );
1164 }
1165
1166 bool _IOMemoryMap::init(
1167 IOMemoryDescriptor * _memory,
1168 task_t intoTask,
1169 IOVirtualAddress toAddress,
1170 IOOptionBits _options,
1171 IOByteCount _offset,
1172 IOByteCount _length )
1173 {
1174 bool ok;
1175
1176 if( (!_memory) || (!intoTask) || !super::init())
1177 return( false);
1178
1179 if( (_offset + _length) > _memory->getLength())
1180 return( false);
1181
1182 addressMap = get_task_map(intoTask);
1183 if( !addressMap)
1184 return( false);
1185 kernel_vm_map_reference(addressMap);
1186
1187 _memory->retain();
1188 memory = _memory;
1189
1190 offset = _offset;
1191 if( _length)
1192 length = _length;
1193 else
1194 length = _memory->getLength();
1195
1196 addressTask = intoTask;
1197 logical = toAddress;
1198 options = _options;
1199
1200 if( options & kIOMapStatic)
1201 ok = true;
1202 else
1203 ok = (kIOReturnSuccess == memory->doMap( addressMap, &logical,
1204 options, offset, length ));
1205 if( !ok) {
1206 logical = 0;
1207 _memory->release();
1208 vm_map_deallocate(addressMap);
1209 addressMap = 0;
1210 }
1211 return( ok );
1212 }
1213
1214 IOReturn IOMemoryDescriptor::doMap(
1215 vm_map_t addressMap,
1216 IOVirtualAddress * atAddress,
1217 IOOptionBits options,
1218 IOByteCount sourceOffset = 0,
1219 IOByteCount length = 0 )
1220 {
1221 IOReturn err = kIOReturnSuccess;
1222 vm_size_t ourSize;
1223 vm_size_t bytes;
1224 vm_offset_t mapped;
1225 vm_address_t logical;
1226 IOByteCount pageOffset;
1227 IOPhysicalLength segLen;
1228 IOPhysicalAddress physAddr;
1229
1230 if( 0 == length)
1231 length = getLength();
1232
1233 physAddr = getPhysicalSegment( sourceOffset, &segLen );
1234 assert( physAddr );
1235
1236 pageOffset = physAddr - trunc_page( physAddr );
1237 ourSize = length + pageOffset;
1238 physAddr -= pageOffset;
1239
1240 logical = *atAddress;
1241 if( 0 == (options & kIOMapAnywhere)) {
1242 mapped = trunc_page( logical );
1243 if( (logical - mapped) != pageOffset)
1244 err = kIOReturnVMError;
1245 }
1246 if( kIOReturnSuccess == err)
1247 err = vm_allocate( addressMap, &mapped, ourSize,
1248 ((options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
1249 | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
1250
1251 if( err) {
1252 #ifdef DEBUG
1253 kprintf("IOMemoryDescriptor::doMap: vm_allocate() "
1254 "returned %08x\n", err);
1255 #endif
1256 return( err);
1257 }
1258
1259 // we have to make sure that these guys don't get copied if we fork.
1260 err = vm_inherit( addressMap, mapped, ourSize, VM_INHERIT_NONE);
1261 if( err != KERN_SUCCESS) {
1262 doUnmap( addressMap, mapped, ourSize); // back out
1263 return( err);
1264 }
1265
1266 logical = mapped;
1267 *atAddress = mapped + pageOffset;
1268
1269 segLen += pageOffset;
1270 bytes = ourSize;
1271 do {
1272 // in the middle of the loop only map whole pages
1273 if( segLen >= bytes)
1274 segLen = bytes;
1275 else if( segLen != trunc_page( segLen))
1276 err = kIOReturnVMError;
1277 if( physAddr != trunc_page( physAddr))
1278 err = kIOReturnBadArgument;
1279
1280 #ifdef DEBUG
1281 if( kIOLogMapping & gIOKitDebug)
1282 kprintf("_IOMemoryMap::map(%x) %08x->%08x:%08x\n",
1283 addressMap, mapped + pageOffset, physAddr + pageOffset,
1284 segLen - pageOffset);
1285 #endif
1286
1287 if( kIOReturnSuccess == err)
1288 err = IOMapPages( addressMap, mapped, physAddr, segLen, options );
1289 if( err)
1290 break;
1291
1292 sourceOffset += segLen - pageOffset;
1293 mapped += segLen;
1294 bytes -= segLen;
1295 pageOffset = 0;
1296
1297 } while( bytes
1298 && (physAddr = getPhysicalSegment( sourceOffset, &segLen )));
1299
1300 if( bytes)
1301 err = kIOReturnBadArgument;
1302 if( err)
1303 doUnmap( addressMap, logical, ourSize );
1304 else
1305 mapped = true;
1306
1307 return( err );
1308 }
1309
1310 IOReturn IOMemoryDescriptor::doUnmap(
1311 vm_map_t addressMap,
1312 IOVirtualAddress logical,
1313 IOByteCount length )
1314 {
1315 IOReturn err;
1316
1317 #ifdef DEBUG
1318 if( kIOLogMapping & gIOKitDebug)
1319 kprintf("IOMemoryDescriptor::doUnmap(%x) %08x:%08x\n",
1320 addressMap, logical, length );
1321 #endif
1322
1323 if( (addressMap == kernel_map) || (addressMap == get_task_map(current_task())))
1324 err = vm_deallocate( addressMap, logical, length );
1325 else
1326 err = kIOReturnSuccess;
1327
1328 return( err );
1329 }
1330
1331 IOReturn _IOMemoryMap::unmap( void )
1332 {
1333 IOReturn err;
1334
1335 LOCK;
1336
1337 if( logical && addressMap && (0 == superMap)
1338 && (0 == (options & kIOMapStatic))) {
1339
1340 err = memory->doUnmap( addressMap, logical, length );
1341 vm_map_deallocate(addressMap);
1342 addressMap = 0;
1343
1344 } else
1345 err = kIOReturnSuccess;
1346
1347 logical = 0;
1348
1349 UNLOCK;
1350
1351 return( err );
1352 }
1353
1354 void _IOMemoryMap::taskDied( void )
1355 {
1356 LOCK;
1357 if( addressMap) {
1358 vm_map_deallocate(addressMap);
1359 addressMap = 0;
1360 }
1361 addressTask = 0;
1362 logical = 0;
1363 UNLOCK;
1364 }
1365
1366 void _IOMemoryMap::free()
1367 {
1368 unmap();
1369
1370 if( memory) {
1371 LOCK;
1372 memory->removeMapping( this);
1373 UNLOCK;
1374 memory->release();
1375 }
1376
1377 if( superMap)
1378 superMap->release();
1379
1380 super::free();
1381 }
1382
1383 IOByteCount _IOMemoryMap::getLength()
1384 {
1385 return( length );
1386 }
1387
1388 IOVirtualAddress _IOMemoryMap::getVirtualAddress()
1389 {
1390 return( logical);
1391 }
1392
1393 task_t _IOMemoryMap::getAddressTask()
1394 {
1395 if( superMap)
1396 return( superMap->getAddressTask());
1397 else
1398 return( addressTask);
1399 }
1400
1401 IOOptionBits _IOMemoryMap::getMapOptions()
1402 {
1403 return( options);
1404 }
1405
1406 IOMemoryDescriptor * _IOMemoryMap::getMemoryDescriptor()
1407 {
1408 return( memory );
1409 }
1410
1411 _IOMemoryMap * _IOMemoryMap::isCompatible(
1412 IOMemoryDescriptor * owner,
1413 task_t task,
1414 IOVirtualAddress toAddress,
1415 IOOptionBits _options,
1416 IOByteCount _offset,
1417 IOByteCount _length )
1418 {
1419 _IOMemoryMap * mapping;
1420
1421 if( (!task) || (task != getAddressTask()))
1422 return( 0 );
1423 if( (options ^ _options) & (kIOMapCacheMask | kIOMapReadOnly))
1424 return( 0 );
1425
1426 if( (0 == (_options & kIOMapAnywhere)) && (logical != toAddress))
1427 return( 0 );
1428
1429 if( _offset < offset)
1430 return( 0 );
1431
1432 _offset -= offset;
1433
1434 if( (_offset + _length) > length)
1435 return( 0 );
1436
1437 if( (length == _length) && (!_offset)) {
1438 retain();
1439 mapping = this;
1440
1441 } else {
1442 mapping = new _IOMemoryMap;
1443 if( mapping
1444 && !mapping->init( owner, this, _offset, _length )) {
1445 mapping->release();
1446 mapping = 0;
1447 }
1448 }
1449
1450 return( mapping );
1451 }
1452
1453 IOPhysicalAddress _IOMemoryMap::getPhysicalSegment( IOByteCount _offset,
1454 IOPhysicalLength * length)
1455 {
1456 IOPhysicalAddress address;
1457
1458 LOCK;
1459 address = memory->getPhysicalSegment( offset + _offset, length );
1460 UNLOCK;
1461
1462 return( address );
1463 }
1464
1465 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1466
1467 #undef super
1468 #define super OSObject
1469
1470 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1471
1472 void IOMemoryDescriptor::initialize( void )
1473 {
1474 if( 0 == gIOMemoryLock)
1475 gIOMemoryLock = IORecursiveLockAlloc();
1476 }
1477
1478 void IOMemoryDescriptor::free( void )
1479 {
1480 if( _mappings)
1481 _mappings->release();
1482
1483 super::free();
1484 }
1485
1486 IOMemoryMap * IOMemoryDescriptor::setMapping(
1487 task_t intoTask,
1488 IOVirtualAddress mapAddress,
1489 IOOptionBits options = 0 )
1490 {
1491 _IOMemoryMap * map;
1492
1493 map = new _IOMemoryMap;
1494
1495 LOCK;
1496
1497 if( map
1498 && !map->init( this, intoTask, mapAddress,
1499 options | kIOMapStatic, 0, getLength() )) {
1500 map->release();
1501 map = 0;
1502 }
1503
1504 addMapping( map);
1505
1506 UNLOCK;
1507
1508 return( map);
1509 }
1510
1511 IOMemoryMap * IOMemoryDescriptor::map(
1512 IOOptionBits options = 0 )
1513 {
1514
1515 return( makeMapping( this, kernel_task, 0,
1516 options | kIOMapAnywhere,
1517 0, getLength() ));
1518 }
1519
1520 IOMemoryMap * IOMemoryDescriptor::map(
1521 task_t intoTask,
1522 IOVirtualAddress toAddress,
1523 IOOptionBits options,
1524 IOByteCount offset = 0,
1525 IOByteCount length = 0 )
1526 {
1527 if( 0 == length)
1528 length = getLength();
1529
1530 return( makeMapping( this, intoTask, toAddress, options, offset, length ));
1531 }
1532
1533 IOMemoryMap * IOMemoryDescriptor::makeMapping(
1534 IOMemoryDescriptor * owner,
1535 task_t intoTask,
1536 IOVirtualAddress toAddress,
1537 IOOptionBits options,
1538 IOByteCount offset,
1539 IOByteCount length )
1540 {
1541 _IOMemoryMap * mapping = 0;
1542 OSIterator * iter;
1543
1544 LOCK;
1545
1546 do {
1547 // look for an existing mapping
1548 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
1549
1550 while( (mapping = (_IOMemoryMap *) iter->getNextObject())) {
1551
1552 if( (mapping = mapping->isCompatible(
1553 owner, intoTask, toAddress,
1554 options | kIOMapReference,
1555 offset, length )))
1556 break;
1557 }
1558 iter->release();
1559 if( mapping)
1560 continue;
1561 }
1562
1563
1564 if( mapping || (options & kIOMapReference))
1565 continue;
1566
1567 owner = this;
1568
1569 mapping = new _IOMemoryMap;
1570 if( mapping
1571 && !mapping->init( owner, intoTask, toAddress, options,
1572 offset, length )) {
1573
1574 IOLog("Didn't make map %08lx : %08lx\n", offset, length );
1575 mapping->release();
1576 mapping = 0;
1577 }
1578
1579 } while( false );
1580
1581 owner->addMapping( mapping);
1582
1583 UNLOCK;
1584
1585 return( mapping);
1586 }
1587
1588 void IOMemoryDescriptor::addMapping(
1589 IOMemoryMap * mapping )
1590 {
1591 if( mapping) {
1592 if( 0 == _mappings)
1593 _mappings = OSSet::withCapacity(1);
1594 if( _mappings && _mappings->setObject( mapping ))
1595 mapping->release(); /* really */
1596 }
1597 }
1598
1599 void IOMemoryDescriptor::removeMapping(
1600 IOMemoryMap * mapping )
1601 {
1602 if( _mappings) {
1603 mapping->retain();
1604 mapping->retain();
1605 _mappings->removeObject( mapping);
1606 }
1607 }
1608
1609 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1610
1611 #undef super
1612 #define super IOMemoryDescriptor
1613
1614 OSDefineMetaClassAndStructors(IOSubMemoryDescriptor, IOMemoryDescriptor)
1615
1616 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1617
1618 bool IOSubMemoryDescriptor::initSubRange( IOMemoryDescriptor * parent,
1619 IOByteCount offset, IOByteCount length,
1620 IODirection withDirection )
1621 {
1622 if( !super::init())
1623 return( false );
1624
1625 if( !parent)
1626 return( false);
1627
1628 if( (offset + length) > parent->getLength())
1629 return( false);
1630
1631 parent->retain();
1632 _parent = parent;
1633 _start = offset;
1634 _length = length;
1635 _direction = withDirection;
1636 _tag = parent->getTag();
1637
1638 return( true );
1639 }
1640
1641 void IOSubMemoryDescriptor::free( void )
1642 {
1643 if( _parent)
1644 _parent->release();
1645
1646 super::free();
1647 }
1648
1649
1650 IOPhysicalAddress IOSubMemoryDescriptor::getPhysicalSegment( IOByteCount offset,
1651 IOByteCount * length )
1652 {
1653 IOPhysicalAddress address;
1654 IOByteCount actualLength;
1655
1656 assert(offset <= _length);
1657
1658 if( length)
1659 *length = 0;
1660
1661 if( offset >= _length)
1662 return( 0 );
1663
1664 address = _parent->getPhysicalSegment( offset + _start, &actualLength );
1665
1666 if( address && length)
1667 *length = min( _length - offset, actualLength );
1668
1669 return( address );
1670 }
1671
1672 void * IOSubMemoryDescriptor::getVirtualSegment(IOByteCount offset,
1673 IOByteCount * lengthOfSegment)
1674 {
1675 return( 0 );
1676 }
1677
1678 IOByteCount IOSubMemoryDescriptor::readBytes(IOByteCount offset,
1679 void * bytes, IOByteCount withLength)
1680 {
1681 IOByteCount byteCount;
1682
1683 assert(offset <= _length);
1684
1685 if( offset >= _length)
1686 return( 0 );
1687
1688 LOCK;
1689 byteCount = _parent->readBytes( _start + offset, bytes,
1690 min(withLength, _length - offset) );
1691 UNLOCK;
1692
1693 return( byteCount );
1694 }
1695
1696 IOByteCount IOSubMemoryDescriptor::writeBytes(IOByteCount offset,
1697 const void* bytes, IOByteCount withLength)
1698 {
1699 IOByteCount byteCount;
1700
1701 assert(offset <= _length);
1702
1703 if( offset >= _length)
1704 return( 0 );
1705
1706 LOCK;
1707 byteCount = _parent->writeBytes( _start + offset, bytes,
1708 min(withLength, _length - offset) );
1709 UNLOCK;
1710
1711 return( byteCount );
1712 }
1713
1714 IOReturn IOSubMemoryDescriptor::prepare(
1715 IODirection forDirection = kIODirectionNone)
1716 {
1717 IOReturn err;
1718
1719 LOCK;
1720 err = _parent->prepare( forDirection);
1721 UNLOCK;
1722
1723 return( err );
1724 }
1725
1726 IOReturn IOSubMemoryDescriptor::complete(
1727 IODirection forDirection = kIODirectionNone)
1728 {
1729 IOReturn err;
1730
1731 LOCK;
1732 err = _parent->complete( forDirection);
1733 UNLOCK;
1734
1735 return( err );
1736 }
1737
1738 IOMemoryMap * IOSubMemoryDescriptor::makeMapping(
1739 IOMemoryDescriptor * owner,
1740 task_t intoTask,
1741 IOVirtualAddress toAddress,
1742 IOOptionBits options,
1743 IOByteCount offset,
1744 IOByteCount length )
1745 {
1746 IOMemoryMap * mapping;
1747
1748 mapping = (IOMemoryMap *) _parent->makeMapping(
1749 _parent, intoTask,
1750 toAddress - (_start + offset),
1751 options | kIOMapReference,
1752 _start + offset, length );
1753
1754 if( !mapping)
1755 mapping = super::makeMapping( owner, intoTask, toAddress, options,
1756 offset, length );
1757
1758 return( mapping );
1759 }
1760
1761 /* ick */
1762
1763 bool
1764 IOSubMemoryDescriptor::initWithAddress(void * address,
1765 IOByteCount withLength,
1766 IODirection withDirection)
1767 {
1768 return( false );
1769 }
1770
1771 bool
1772 IOSubMemoryDescriptor::initWithAddress(vm_address_t address,
1773 IOByteCount withLength,
1774 IODirection withDirection,
1775 task_t withTask)
1776 {
1777 return( false );
1778 }
1779
1780 bool
1781 IOSubMemoryDescriptor::initWithPhysicalAddress(
1782 IOPhysicalAddress address,
1783 IOByteCount withLength,
1784 IODirection withDirection )
1785 {
1786 return( false );
1787 }
1788
1789 bool
1790 IOSubMemoryDescriptor::initWithRanges(
1791 IOVirtualRange * ranges,
1792 UInt32 withCount,
1793 IODirection withDirection,
1794 task_t withTask,
1795 bool asReference = false)
1796 {
1797 return( false );
1798 }
1799
1800 bool
1801 IOSubMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
1802 UInt32 withCount,
1803 IODirection withDirection,
1804 bool asReference = false)
1805 {
1806 return( false );
1807 }
1808
1809 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1810
1811 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 0);
1812 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
1813 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
1814 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
1815 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
1816 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
1817 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
1818 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
1819 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
1820 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
1821 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
1822 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
1823 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
1824 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
1825 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
1826 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);