]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOMemoryDescriptor.cpp
8dc3f43c24b2dc62234ebf1fcd10e186ab2e89c3
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
27 *
28 * HISTORY
29 *
30 */
31
32 #include <IOKit/assert.h>
33 #include <IOKit/system.h>
34 #include <IOKit/IOLib.h>
35 #include <IOKit/IOMemoryDescriptor.h>
36
37 #include <IOKit/IOKitDebug.h>
38
39 #include <libkern/c++/OSContainers.h>
40 #include <libkern/c++/OSDictionary.h>
41 #include <libkern/c++/OSArray.h>
42 #include <libkern/c++/OSSymbol.h>
43 #include <libkern/c++/OSNumber.h>
44 #include <sys/cdefs.h>
45
46 __BEGIN_DECLS
47 #include <vm/pmap.h>
48 #include <device/device_port.h>
49 void bcopy_phys(char *from, char *to, int size);
50 void pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa,
51 vm_prot_t prot, unsigned int flags, boolean_t wired);
52 #ifndef i386
53 struct phys_entry *pmap_find_physentry(vm_offset_t pa);
54 #endif
55 void ipc_port_release_send(ipc_port_t port);
56 vm_offset_t vm_map_get_phys_page(vm_map_t map, vm_offset_t offset);
57
58 memory_object_t
59 device_pager_setup(
60 memory_object_t pager,
61 int device_handle,
62 vm_size_t size,
63 int flags);
64 void
65 device_pager_deallocate(
66 memory_object_t);
67 kern_return_t
68 device_pager_populate_object(
69 memory_object_t pager,
70 vm_object_offset_t offset,
71 vm_offset_t phys_addr,
72 vm_size_t size);
73
74 /*
75 * Page fault handling based on vm_map (or entries therein)
76 */
77 extern kern_return_t vm_fault(
78 vm_map_t map,
79 vm_offset_t vaddr,
80 vm_prot_t fault_type,
81 boolean_t change_wiring,
82 int interruptible,
83 pmap_t caller_pmap,
84 vm_offset_t caller_pmap_addr);
85
86 __END_DECLS
87
88 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
89
90 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
91
92 #define super IOMemoryDescriptor
93
94 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
95
96 extern "C" {
97
98 vm_map_t IOPageableMapForAddress( vm_address_t address );
99
100 typedef kern_return_t (*IOIteratePageableMapsCallback)(vm_map_t map, void * ref);
101
102 kern_return_t IOIteratePageableMaps(vm_size_t size,
103 IOIteratePageableMapsCallback callback, void * ref);
104
105 }
106
107 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
108
109 static IORecursiveLock * gIOMemoryLock;
110
111 #define LOCK IORecursiveLockLock( gIOMemoryLock)
112 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
113 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
114 #define WAKEUP \
115 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
116
117 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
118
119 inline vm_map_t IOGeneralMemoryDescriptor::getMapForTask( task_t task, vm_address_t address )
120 {
121 if( (task == kernel_task) && (kIOMemoryRequiresWire & _flags))
122 return( IOPageableMapForAddress( address ) );
123 else
124 return( get_task_map( task ));
125 }
126
127 inline vm_offset_t pmap_extract_safe(task_t task, vm_offset_t va)
128 {
129 vm_offset_t pa = pmap_extract(get_task_pmap(task), va);
130
131 if ( pa == 0 )
132 {
133 pa = vm_map_get_phys_page(get_task_map(task), trunc_page(va));
134 if ( pa ) pa += va - trunc_page(va);
135 }
136
137 return pa;
138 }
139
140 inline void bcopy_phys_safe(char * from, char * to, int size)
141 {
142 boolean_t enabled = ml_set_interrupts_enabled(FALSE);
143
144 bcopy_phys(from, to, size);
145
146 ml_set_interrupts_enabled(enabled);
147 }
148
149 #define next_page(a) ( trunc_page(a) + page_size )
150
151
152 extern "C" {
153
154 kern_return_t device_data_action(
155 int device_handle,
156 ipc_port_t device_pager,
157 vm_prot_t protection,
158 vm_object_offset_t offset,
159 vm_size_t size)
160 {
161 struct ExpansionData {
162 void * devicePager;
163 unsigned int pagerContig:1;
164 unsigned int unused:31;
165 IOMemoryDescriptor * memory;
166 };
167 kern_return_t kr;
168 ExpansionData * ref = (ExpansionData *) device_handle;
169 IOMemoryDescriptor * memDesc;
170
171 LOCK;
172 memDesc = ref->memory;
173 if( memDesc)
174 kr = memDesc->handleFault( device_pager, 0, 0,
175 offset, size, kIOMapDefaultCache /*?*/);
176 else
177 kr = KERN_ABORTED;
178 UNLOCK;
179
180 return( kr );
181 }
182
183 kern_return_t device_close(
184 int device_handle)
185 {
186 struct ExpansionData {
187 void * devicePager;
188 unsigned int pagerContig:1;
189 unsigned int unused:31;
190 IOMemoryDescriptor * memory;
191 };
192 ExpansionData * ref = (ExpansionData *) device_handle;
193
194 IODelete( ref, ExpansionData, 1 );
195
196 return( kIOReturnSuccess );
197 }
198
199 }
200
201 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
202
203 /*
204 * withAddress:
205 *
206 * Create a new IOMemoryDescriptor. The buffer is a virtual address
207 * relative to the specified task. If no task is supplied, the kernel
208 * task is implied.
209 */
210 IOMemoryDescriptor *
211 IOMemoryDescriptor::withAddress(void * address,
212 IOByteCount withLength,
213 IODirection withDirection)
214 {
215 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
216 if (that)
217 {
218 if (that->initWithAddress(address, withLength, withDirection))
219 return that;
220
221 that->release();
222 }
223 return 0;
224 }
225
226 IOMemoryDescriptor *
227 IOMemoryDescriptor::withAddress(vm_address_t address,
228 IOByteCount withLength,
229 IODirection withDirection,
230 task_t withTask)
231 {
232 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
233 if (that)
234 {
235 if (that->initWithAddress(address, withLength, withDirection, withTask))
236 return that;
237
238 that->release();
239 }
240 return 0;
241 }
242
243 IOMemoryDescriptor *
244 IOMemoryDescriptor::withPhysicalAddress(
245 IOPhysicalAddress address,
246 IOByteCount withLength,
247 IODirection withDirection )
248 {
249 return( IOMemoryDescriptor::withAddress( address, withLength,
250 withDirection, (task_t) 0 ));
251 }
252
253
254 /*
255 * withRanges:
256 *
257 * Create a new IOMemoryDescriptor. The buffer is made up of several
258 * virtual address ranges, from a given task.
259 *
260 * Passing the ranges as a reference will avoid an extra allocation.
261 */
262 IOMemoryDescriptor *
263 IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
264 UInt32 withCount,
265 IODirection withDirection,
266 task_t withTask,
267 bool asReference = false)
268 {
269 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
270 if (that)
271 {
272 if (that->initWithRanges(ranges, withCount, withDirection, withTask, asReference))
273 return that;
274
275 that->release();
276 }
277 return 0;
278 }
279
280 IOMemoryDescriptor *
281 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
282 UInt32 withCount,
283 IODirection withDirection,
284 bool asReference = false)
285 {
286 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
287 if (that)
288 {
289 if (that->initWithPhysicalRanges(ranges, withCount, withDirection, asReference))
290 return that;
291
292 that->release();
293 }
294 return 0;
295 }
296
297 IOMemoryDescriptor *
298 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
299 IOByteCount offset,
300 IOByteCount length,
301 IODirection withDirection)
302 {
303 IOSubMemoryDescriptor * that = new IOSubMemoryDescriptor;
304
305 if (that && !that->initSubRange(of, offset, length, withDirection)) {
306 that->release();
307 that = 0;
308 }
309 return that;
310 }
311
312 /*
313 * initWithAddress:
314 *
315 * Initialize an IOMemoryDescriptor. The buffer is a virtual address
316 * relative to the specified task. If no task is supplied, the kernel
317 * task is implied.
318 *
319 * An IOMemoryDescriptor can be re-used by calling initWithAddress or
320 * initWithRanges again on an existing instance -- note this behavior
321 * is not commonly supported in other I/O Kit classes, although it is
322 * supported here.
323 */
324 bool
325 IOGeneralMemoryDescriptor::initWithAddress(void * address,
326 IOByteCount withLength,
327 IODirection withDirection)
328 {
329 _singleRange.v.address = (vm_address_t) address;
330 _singleRange.v.length = withLength;
331
332 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
333 }
334
335 bool
336 IOGeneralMemoryDescriptor::initWithAddress(vm_address_t address,
337 IOByteCount withLength,
338 IODirection withDirection,
339 task_t withTask)
340 {
341 _singleRange.v.address = address;
342 _singleRange.v.length = withLength;
343
344 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
345 }
346
347 bool
348 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
349 IOPhysicalAddress address,
350 IOByteCount withLength,
351 IODirection withDirection )
352 {
353 _singleRange.p.address = address;
354 _singleRange.p.length = withLength;
355
356 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
357 }
358
359 /*
360 * initWithRanges:
361 *
362 * Initialize an IOMemoryDescriptor. The buffer is made up of several
363 * virtual address ranges, from a given task
364 *
365 * Passing the ranges as a reference will avoid an extra allocation.
366 *
367 * An IOMemoryDescriptor can be re-used by calling initWithAddress or
368 * initWithRanges again on an existing instance -- note this behavior
369 * is not commonly supported in other I/O Kit classes, although it is
370 * supported here.
371 */
372 bool
373 IOGeneralMemoryDescriptor::initWithRanges(
374 IOVirtualRange * ranges,
375 UInt32 withCount,
376 IODirection withDirection,
377 task_t withTask,
378 bool asReference = false)
379 {
380 assert(ranges);
381 assert(withCount);
382
383 /*
384 * We can check the _initialized instance variable before having ever set
385 * it to an initial value because I/O Kit guarantees that all our instance
386 * variables are zeroed on an object's allocation.
387 */
388
389 if (_initialized == false)
390 {
391 if (super::init() == false) return false;
392 _initialized = true;
393 }
394 else
395 {
396 /*
397 * An existing memory descriptor is being retargeted to point to
398 * somewhere else. Clean up our present state.
399 */
400
401 assert(_wireCount == 0);
402
403 while (_wireCount)
404 complete();
405 if (_kernPtrAligned)
406 unmapFromKernel();
407 if (_ranges.v && _rangesIsAllocated)
408 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
409 }
410
411 /*
412 * Initialize the memory descriptor.
413 */
414
415 _ranges.v = 0;
416 _rangesCount = withCount;
417 _rangesIsAllocated = asReference ? false : true;
418 _direction = withDirection;
419 _length = 0;
420 _task = withTask;
421 _position = 0;
422 _positionAtIndex = 0;
423 _positionAtOffset = 0;
424 _kernPtrAligned = 0;
425 _cachedPhysicalAddress = 0;
426 _cachedVirtualAddress = 0;
427 _flags = 0;
428
429 if (withTask && (withTask != kernel_task))
430 _flags |= kIOMemoryRequiresWire;
431
432 if (asReference)
433 _ranges.v = ranges;
434 else
435 {
436 _ranges.v = IONew(IOVirtualRange, withCount);
437 if (_ranges.v == 0) return false;
438 bcopy(/* from */ ranges, _ranges.v, withCount * sizeof(IOVirtualRange));
439 }
440
441 for (unsigned index = 0; index < _rangesCount; index++)
442 {
443 _length += _ranges.v[index].length;
444 }
445
446 return true;
447 }
448
449 bool
450 IOGeneralMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
451 UInt32 withCount,
452 IODirection withDirection,
453 bool asReference = false)
454 {
455 #warning assuming virtual, physical addresses same size
456 return( initWithRanges( (IOVirtualRange *) ranges,
457 withCount, withDirection, (task_t) 0, asReference ));
458 }
459
460 /*
461 * free
462 *
463 * Free resources.
464 */
465 void IOGeneralMemoryDescriptor::free()
466 {
467 LOCK;
468 if( reserved)
469 reserved->memory = 0;
470 UNLOCK;
471
472 while (_wireCount)
473 complete();
474 if (_kernPtrAligned)
475 unmapFromKernel();
476 if (_ranges.v && _rangesIsAllocated)
477 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
478
479 if( reserved && reserved->devicePager)
480 device_pager_deallocate( reserved->devicePager );
481
482 // memEntry holds a ref on the device pager which owns reserved (ExpansionData)
483 // so no reserved access after this point
484 if( _memEntry)
485 ipc_port_release_send( (ipc_port_t) _memEntry );
486 super::free();
487 }
488
489 /* DEPRECATED */ void IOGeneralMemoryDescriptor::unmapFromKernel()
490 /* DEPRECATED */ {
491 /* DEPRECATED */ kern_return_t krtn;
492 /* DEPRECATED */ vm_offset_t off;
493 /* DEPRECATED */ // Pull the shared pages out of the task map
494 /* DEPRECATED */ // Do we need to unwire it first?
495 /* DEPRECATED */ for ( off = 0; off < _kernSize; off += page_size )
496 /* DEPRECATED */ {
497 /* DEPRECATED */ pmap_change_wiring(
498 /* DEPRECATED */ kernel_pmap,
499 /* DEPRECATED */ _kernPtrAligned + off,
500 /* DEPRECATED */ FALSE);
501 /* DEPRECATED */
502 /* DEPRECATED */ pmap_remove(
503 /* DEPRECATED */ kernel_pmap,
504 /* DEPRECATED */ _kernPtrAligned + off,
505 /* DEPRECATED */ _kernPtrAligned + off + page_size);
506 /* DEPRECATED */ }
507 /* DEPRECATED */ // Free the former shmem area in the task
508 /* DEPRECATED */ krtn = vm_deallocate(kernel_map,
509 /* DEPRECATED */ _kernPtrAligned,
510 /* DEPRECATED */ _kernSize );
511 /* DEPRECATED */ assert(krtn == KERN_SUCCESS);
512 /* DEPRECATED */ _kernPtrAligned = 0;
513 /* DEPRECATED */ }
514 /* DEPRECATED */
515 /* DEPRECATED */ void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
516 /* DEPRECATED */ {
517 /* DEPRECATED */ kern_return_t krtn;
518 /* DEPRECATED */ vm_offset_t off;
519 /* DEPRECATED */
520 /* DEPRECATED */ if (_kernPtrAligned)
521 /* DEPRECATED */ {
522 /* DEPRECATED */ if (_kernPtrAtIndex == rangeIndex) return;
523 /* DEPRECATED */ unmapFromKernel();
524 /* DEPRECATED */ assert(_kernPtrAligned == 0);
525 /* DEPRECATED */ }
526 /* DEPRECATED */
527 /* DEPRECATED */ vm_offset_t srcAlign = trunc_page(_ranges.v[rangeIndex].address);
528 /* DEPRECATED */
529 /* DEPRECATED */ _kernSize = trunc_page(_ranges.v[rangeIndex].address +
530 /* DEPRECATED */ _ranges.v[rangeIndex].length +
531 /* DEPRECATED */ page_size - 1) - srcAlign;
532 /* DEPRECATED */
533 /* DEPRECATED */ /* Find some memory of the same size in kernel task. We use vm_allocate() */
534 /* DEPRECATED */ /* to do this. vm_allocate inserts the found memory object in the */
535 /* DEPRECATED */ /* target task's map as a side effect. */
536 /* DEPRECATED */ krtn = vm_allocate( kernel_map,
537 /* DEPRECATED */ &_kernPtrAligned,
538 /* DEPRECATED */ _kernSize,
539 /* DEPRECATED */ VM_FLAGS_ANYWHERE|VM_MAKE_TAG(VM_MEMORY_IOKIT) ); // Find first fit
540 /* DEPRECATED */ assert(krtn == KERN_SUCCESS);
541 /* DEPRECATED */ if(krtn) return;
542 /* DEPRECATED */
543 /* DEPRECATED */ /* For each page in the area allocated from the kernel map, */
544 /* DEPRECATED */ /* find the physical address of the page. */
545 /* DEPRECATED */ /* Enter the page in the target task's pmap, at the */
546 /* DEPRECATED */ /* appropriate target task virtual address. */
547 /* DEPRECATED */ for ( off = 0; off < _kernSize; off += page_size )
548 /* DEPRECATED */ {
549 /* DEPRECATED */ vm_offset_t kern_phys_addr, phys_addr;
550 /* DEPRECATED */ if( _task)
551 /* DEPRECATED */ phys_addr = pmap_extract( get_task_pmap(_task), srcAlign + off );
552 /* DEPRECATED */ else
553 /* DEPRECATED */ phys_addr = srcAlign + off;
554 /* DEPRECATED */ assert(phys_addr);
555 /* DEPRECATED */ if(phys_addr == 0) return;
556 /* DEPRECATED */
557 /* DEPRECATED */ // Check original state.
558 /* DEPRECATED */ kern_phys_addr = pmap_extract( kernel_pmap, _kernPtrAligned + off );
559 /* DEPRECATED */ // Set virtual page to point to the right physical one
560 /* DEPRECATED */ pmap_enter(
561 /* DEPRECATED */ kernel_pmap,
562 /* DEPRECATED */ _kernPtrAligned + off,
563 /* DEPRECATED */ phys_addr,
564 /* DEPRECATED */ VM_PROT_READ|VM_PROT_WRITE,
565 /* DEPRECATED */ VM_WIMG_USE_DEFAULT,
566 /* DEPRECATED */ TRUE);
567 /* DEPRECATED */ }
568 /* DEPRECATED */ _kernPtrAtIndex = rangeIndex;
569 /* DEPRECATED */ }
570
571 /*
572 * getDirection:
573 *
574 * Get the direction of the transfer.
575 */
576 IODirection IOMemoryDescriptor::getDirection() const
577 {
578 return _direction;
579 }
580
581 /*
582 * getLength:
583 *
584 * Get the length of the transfer (over all ranges).
585 */
586 IOByteCount IOMemoryDescriptor::getLength() const
587 {
588 return _length;
589 }
590
591 void IOMemoryDescriptor::setTag(
592 IOOptionBits tag )
593 {
594 _tag = tag;
595 }
596
597 IOOptionBits IOMemoryDescriptor::getTag( void )
598 {
599 return( _tag);
600 }
601
602 IOPhysicalAddress IOMemoryDescriptor::getSourceSegment( IOByteCount offset,
603 IOByteCount * length )
604 {
605 IOPhysicalAddress physAddr = 0;
606
607 if( prepare() == kIOReturnSuccess) {
608 physAddr = getPhysicalSegment( offset, length );
609 complete();
610 }
611
612 return( physAddr );
613 }
614
615 IOByteCount IOMemoryDescriptor::readBytes( IOByteCount offset,
616 void * bytes,
617 IOByteCount withLength )
618 {
619 IOByteCount bytesCopied = 0;
620
621 assert(offset <= _length);
622 assert(offset <= _length - withLength);
623
624 if ( offset < _length )
625 {
626 withLength = min(withLength, _length - offset);
627
628 while ( withLength ) // (process another source segment?)
629 {
630 IOPhysicalAddress sourceSegment;
631 IOByteCount sourceSegmentLength;
632
633 sourceSegment = getPhysicalSegment(offset, &sourceSegmentLength);
634 if ( sourceSegment == 0 ) goto readBytesErr;
635
636 sourceSegmentLength = min(sourceSegmentLength, withLength);
637
638 while ( sourceSegmentLength ) // (process another target segment?)
639 {
640 IOPhysicalAddress targetSegment;
641 IOByteCount targetSegmentLength;
642
643 targetSegment = pmap_extract_safe(kernel_task, (vm_offset_t) bytes);
644 if ( targetSegment == 0 ) goto readBytesErr;
645
646 targetSegmentLength = min(next_page(targetSegment) - targetSegment, sourceSegmentLength);
647
648 if ( sourceSegment + targetSegmentLength > next_page(sourceSegment) )
649 {
650 IOByteCount pageLength;
651
652 pageLength = next_page(sourceSegment) - sourceSegment;
653
654 bcopy_phys_safe( /* from */ (char *) sourceSegment,
655 /* to */ (char *) targetSegment,
656 /* size */ (int ) pageLength );
657
658 ((UInt8 *) bytes) += pageLength;
659 bytesCopied += pageLength;
660 offset += pageLength;
661 sourceSegment += pageLength;
662 sourceSegmentLength -= pageLength;
663 targetSegment += pageLength;
664 targetSegmentLength -= pageLength;
665 withLength -= pageLength;
666 }
667
668 bcopy_phys_safe( /* from */ (char *) sourceSegment,
669 /* to */ (char *) targetSegment,
670 /* size */ (int ) targetSegmentLength );
671
672 ((UInt8 *) bytes) += targetSegmentLength;
673 bytesCopied += targetSegmentLength;
674 offset += targetSegmentLength;
675 sourceSegment += targetSegmentLength;
676 sourceSegmentLength -= targetSegmentLength;
677 withLength -= targetSegmentLength;
678 }
679 }
680 }
681
682 readBytesErr:
683
684 if ( bytesCopied )
685 {
686 // We mark the destination pages as modified, just
687 // in case they are made pageable later on in life.
688
689 pmap_modify_pages( /* pmap */ kernel_pmap,
690 /* start */ trunc_page(((vm_offset_t) bytes) - bytesCopied),
691 /* end */ round_page(((vm_offset_t) bytes)) );
692 }
693
694 return bytesCopied;
695 }
696
697 IOByteCount IOMemoryDescriptor::writeBytes( IOByteCount offset,
698 const void * bytes,
699 IOByteCount withLength )
700 {
701 IOByteCount bytesCopied = 0;
702
703 assert(offset <= _length);
704 assert(offset <= _length - withLength);
705
706 if ( offset < _length )
707 {
708 withLength = min(withLength, _length - offset);
709
710 while ( withLength ) // (process another target segment?)
711 {
712 IOPhysicalAddress targetSegment;
713 IOByteCount targetSegmentLength;
714
715 targetSegment = getPhysicalSegment(offset, &targetSegmentLength);
716 if ( targetSegment == 0 ) goto writeBytesErr;
717
718 targetSegmentLength = min(targetSegmentLength, withLength);
719
720 while ( targetSegmentLength ) // (process another source segment?)
721 {
722 IOPhysicalAddress sourceSegment;
723 IOByteCount sourceSegmentLength;
724
725 sourceSegment = pmap_extract_safe(kernel_task, (vm_offset_t) bytes);
726 if ( sourceSegment == 0 ) goto writeBytesErr;
727
728 sourceSegmentLength = min(next_page(sourceSegment) - sourceSegment, targetSegmentLength);
729
730 if ( targetSegment + sourceSegmentLength > next_page(targetSegment) )
731 {
732 IOByteCount pageLength;
733
734 pageLength = next_page(targetSegment) - targetSegment;
735
736 bcopy_phys_safe( /* from */ (char *) sourceSegment,
737 /* to */ (char *) targetSegment,
738 /* size */ (int ) pageLength );
739
740 // We flush the data cache in case it is code we've copied,
741 // such that the instruction cache is in the know about it.
742
743 flush_dcache(targetSegment, pageLength, true);
744
745 ((UInt8 *) bytes) += pageLength;
746 bytesCopied += pageLength;
747 offset += pageLength;
748 sourceSegment += pageLength;
749 sourceSegmentLength -= pageLength;
750 targetSegment += pageLength;
751 targetSegmentLength -= pageLength;
752 withLength -= pageLength;
753 }
754
755 bcopy_phys_safe( /* from */ (char *) sourceSegment,
756 /* to */ (char *) targetSegment,
757 /* size */ (int ) sourceSegmentLength );
758
759 // We flush the data cache in case it is code we've copied,
760 // such that the instruction cache is in the know about it.
761
762 flush_dcache(targetSegment, sourceSegmentLength, true);
763
764 ((UInt8 *) bytes) += sourceSegmentLength;
765 bytesCopied += sourceSegmentLength;
766 offset += sourceSegmentLength;
767 targetSegment += sourceSegmentLength;
768 targetSegmentLength -= sourceSegmentLength;
769 withLength -= sourceSegmentLength;
770 }
771 }
772 }
773
774 writeBytesErr:
775
776 return bytesCopied;
777 }
778
779 extern "C" {
780 // osfmk/device/iokit_rpc.c
781 extern unsigned int IOTranslateCacheBits(struct phys_entry *pp);
782 };
783
784 /* DEPRECATED */ void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
785 /* DEPRECATED */ {
786 /* DEPRECATED */ assert(position <= _length);
787 /* DEPRECATED */
788 /* DEPRECATED */ if (position >= _length)
789 /* DEPRECATED */ {
790 /* DEPRECATED */ _position = _length;
791 /* DEPRECATED */ _positionAtIndex = _rangesCount; /* careful: out-of-bounds */
792 /* DEPRECATED */ _positionAtOffset = 0;
793 /* DEPRECATED */ return;
794 /* DEPRECATED */ }
795 /* DEPRECATED */
796 /* DEPRECATED */ if (position < _position)
797 /* DEPRECATED */ {
798 /* DEPRECATED */ _positionAtOffset = position;
799 /* DEPRECATED */ _positionAtIndex = 0;
800 /* DEPRECATED */ }
801 /* DEPRECATED */ else
802 /* DEPRECATED */ {
803 /* DEPRECATED */ _positionAtOffset += (position - _position);
804 /* DEPRECATED */ }
805 /* DEPRECATED */ _position = position;
806 /* DEPRECATED */
807 /* DEPRECATED */ while (_positionAtOffset >= _ranges.v[_positionAtIndex].length)
808 /* DEPRECATED */ {
809 /* DEPRECATED */ _positionAtOffset -= _ranges.v[_positionAtIndex].length;
810 /* DEPRECATED */ _positionAtIndex++;
811 /* DEPRECATED */ }
812 /* DEPRECATED */ }
813
814 IOPhysicalAddress IOGeneralMemoryDescriptor::getPhysicalSegment( IOByteCount offset,
815 IOByteCount * lengthOfSegment )
816 {
817 IOPhysicalAddress address = 0;
818 IOPhysicalLength length = 0;
819
820
821 // assert(offset <= _length);
822
823 if ( offset < _length ) // (within bounds?)
824 {
825 unsigned rangesIndex = 0;
826
827 for ( ; offset >= _ranges.v[rangesIndex].length; rangesIndex++ )
828 {
829 offset -= _ranges.v[rangesIndex].length; // (make offset relative)
830 }
831
832 if ( _task == 0 ) // (physical memory?)
833 {
834 address = _ranges.v[rangesIndex].address + offset;
835 length = _ranges.v[rangesIndex].length - offset;
836
837 for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ )
838 {
839 if ( address + length != _ranges.v[rangesIndex].address ) break;
840
841 length += _ranges.v[rangesIndex].length; // (coalesce ranges)
842 }
843 }
844 else // (virtual memory?)
845 {
846 vm_address_t addressVirtual = _ranges.v[rangesIndex].address + offset;
847
848 assert((0 == (kIOMemoryRequiresWire & _flags)) || _wireCount);
849
850 address = pmap_extract_safe(_task, addressVirtual);
851 length = next_page(addressVirtual) - addressVirtual;
852 length = min(_ranges.v[rangesIndex].length - offset, length);
853 }
854
855 assert(address);
856 if ( address == 0 ) length = 0;
857 }
858
859 if ( lengthOfSegment ) *lengthOfSegment = length;
860
861 return address;
862 }
863
864 IOPhysicalAddress IOGeneralMemoryDescriptor::getSourceSegment( IOByteCount offset,
865 IOByteCount * lengthOfSegment )
866 {
867 IOPhysicalAddress address = 0;
868 IOPhysicalLength length = 0;
869
870 assert(offset <= _length);
871
872 if ( offset < _length ) // (within bounds?)
873 {
874 unsigned rangesIndex = 0;
875
876 for ( ; offset >= _ranges.v[rangesIndex].length; rangesIndex++ )
877 {
878 offset -= _ranges.v[rangesIndex].length; // (make offset relative)
879 }
880
881 address = _ranges.v[rangesIndex].address + offset;
882 length = _ranges.v[rangesIndex].length - offset;
883
884 for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ )
885 {
886 if ( address + length != _ranges.v[rangesIndex].address ) break;
887
888 length += _ranges.v[rangesIndex].length; // (coalesce ranges)
889 }
890
891 assert(address);
892 if ( address == 0 ) length = 0;
893 }
894
895 if ( lengthOfSegment ) *lengthOfSegment = length;
896
897 return address;
898 }
899
900 /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
901 /* DEPRECATED */ void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
902 /* DEPRECATED */ IOByteCount * lengthOfSegment)
903 /* DEPRECATED */ {
904 /* DEPRECATED */ if( offset != _position)
905 /* DEPRECATED */ setPosition( offset );
906 /* DEPRECATED */
907 /* DEPRECATED */ assert(_position <= _length);
908 /* DEPRECATED */
909 /* DEPRECATED */ /* Fail gracefully if the position is at (or past) the end-of-buffer. */
910 /* DEPRECATED */ if (_position >= _length)
911 /* DEPRECATED */ {
912 /* DEPRECATED */ *lengthOfSegment = 0;
913 /* DEPRECATED */ return 0;
914 /* DEPRECATED */ }
915 /* DEPRECATED */
916 /* DEPRECATED */ /* Compute the relative length to the end of this virtual segment. */
917 /* DEPRECATED */ *lengthOfSegment = _ranges.v[_positionAtIndex].length - _positionAtOffset;
918 /* DEPRECATED */
919 /* DEPRECATED */ /* Compute the relative address of this virtual segment. */
920 /* DEPRECATED */ if (_task == kernel_task)
921 /* DEPRECATED */ return (void *)(_ranges.v[_positionAtIndex].address + _positionAtOffset);
922 /* DEPRECATED */ else
923 /* DEPRECATED */ {
924 /* DEPRECATED */ vm_offset_t off;
925 /* DEPRECATED */
926 /* DEPRECATED */ mapIntoKernel(_positionAtIndex);
927 /* DEPRECATED */
928 /* DEPRECATED */ off = _ranges.v[_kernPtrAtIndex].address;
929 /* DEPRECATED */ off -= trunc_page(off);
930 /* DEPRECATED */
931 /* DEPRECATED */ return (void *) (_kernPtrAligned + off + _positionAtOffset);
932 /* DEPRECATED */ }
933 /* DEPRECATED */ }
934 /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
935
936 /*
937 * prepare
938 *
939 * Prepare the memory for an I/O transfer. This involves paging in
940 * the memory, if necessary, and wiring it down for the duration of
941 * the transfer. The complete() method completes the processing of
942 * the memory after the I/O transfer finishes. This method needn't
943 * called for non-pageable memory.
944 */
945 IOReturn IOGeneralMemoryDescriptor::prepare(
946 IODirection forDirection = kIODirectionNone)
947 {
948 UInt rangeIndex = 0;
949
950 if((_wireCount == 0) && (kIOMemoryRequiresWire & _flags)) {
951 kern_return_t rc;
952
953 if(forDirection == kIODirectionNone)
954 forDirection = _direction;
955
956 vm_prot_t access;
957
958 switch (forDirection)
959 {
960 case kIODirectionIn:
961 access = VM_PROT_WRITE;
962 break;
963
964 case kIODirectionOut:
965 access = VM_PROT_READ;
966 break;
967
968 default:
969 access = VM_PROT_READ | VM_PROT_WRITE;
970 break;
971 }
972
973 //
974 // Check user read/write access to the data buffer.
975 //
976
977 for (rangeIndex = 0; rangeIndex < _rangesCount; rangeIndex++)
978 {
979 vm_offset_t checkBase = trunc_page(_ranges.v[rangeIndex].address);
980 vm_size_t checkSize = round_page(_ranges.v[rangeIndex].length );
981
982 while (checkSize)
983 {
984 vm_region_basic_info_data_t regionInfo;
985 mach_msg_type_number_t regionInfoSize = sizeof(regionInfo);
986 vm_size_t regionSize;
987
988 if ( (vm_region(
989 /* map */ getMapForTask(_task, checkBase),
990 /* address */ &checkBase,
991 /* size */ &regionSize,
992 /* flavor */ VM_REGION_BASIC_INFO,
993 /* info */ (vm_region_info_t) &regionInfo,
994 /* info size */ &regionInfoSize,
995 /* object name */ 0 ) != KERN_SUCCESS ) ||
996 ( (forDirection & kIODirectionIn ) &&
997 !(regionInfo.protection & VM_PROT_WRITE) ) ||
998 ( (forDirection & kIODirectionOut) &&
999 !(regionInfo.protection & VM_PROT_READ ) ) )
1000 {
1001 return kIOReturnVMError;
1002 }
1003
1004 assert((regionSize & PAGE_MASK) == 0);
1005
1006 regionSize = min(regionSize, checkSize);
1007 checkSize -= regionSize;
1008 checkBase += regionSize;
1009 } // (for each vm region)
1010 } // (for each io range)
1011
1012 for (rangeIndex = 0; rangeIndex < _rangesCount; rangeIndex++) {
1013
1014 vm_offset_t srcAlign = trunc_page(_ranges.v[rangeIndex].address);
1015 IOByteCount srcAlignEnd = trunc_page(_ranges.v[rangeIndex].address +
1016 _ranges.v[rangeIndex].length +
1017 page_size - 1);
1018
1019 vm_map_t taskVMMap = getMapForTask(_task, srcAlign);
1020
1021 // If this I/O is for a user land task then protect ourselves
1022 // against COW and other vm_shenanigans
1023 if (_task && _task != kernel_task) {
1024 // setup a data object to hold the 'named' memory regions
1025 // @@@ gvdl: If we fail to allocate an OSData we will just
1026 // hope for the best for the time being. Lets not fail a
1027 // prepare at this late stage in product release.
1028 if (!_memoryEntries)
1029 _memoryEntries = OSData::withCapacity(16);
1030 if (_memoryEntries) {
1031 vm_object_offset_t desiredSize = srcAlignEnd - srcAlign;
1032 vm_object_offset_t entryStart = srcAlign;
1033 ipc_port_t memHandle;
1034
1035 do {
1036 vm_object_offset_t actualSize = desiredSize;
1037
1038 rc = mach_make_memory_entry_64
1039 (taskVMMap, &actualSize, entryStart,
1040 forDirection, &memHandle, NULL);
1041 if (KERN_SUCCESS != rc) {
1042 IOLog("IOMemoryDescriptor::prepare mach_make_memory_entry_64 failed: %d\n", rc);
1043 goto abortExit;
1044 }
1045
1046 _memoryEntries->
1047 appendBytes(&memHandle, sizeof(memHandle));
1048 desiredSize -= actualSize;
1049 entryStart += actualSize;
1050 } while (desiredSize);
1051 }
1052 }
1053
1054 rc = vm_map_wire(taskVMMap, srcAlign, srcAlignEnd, access, FALSE);
1055 if (KERN_SUCCESS != rc) {
1056 IOLog("IOMemoryDescriptor::prepare vm_map_wire failed: %d\n", rc);
1057 goto abortExit;
1058 }
1059 }
1060 }
1061 _wireCount++;
1062 return kIOReturnSuccess;
1063
1064 abortExit:
1065 UInt doneIndex;
1066
1067
1068 for(doneIndex = 0; doneIndex < rangeIndex; doneIndex++) {
1069 vm_offset_t srcAlign = trunc_page(_ranges.v[doneIndex].address);
1070 IOByteCount srcAlignEnd = trunc_page(_ranges.v[doneIndex].address +
1071 _ranges.v[doneIndex].length +
1072 page_size - 1);
1073
1074 vm_map_unwire(getMapForTask(_task, srcAlign), srcAlign,
1075 srcAlignEnd, FALSE);
1076 }
1077
1078 if (_memoryEntries) {
1079 ipc_port_t *handles, *handlesEnd;
1080
1081 handles = (ipc_port_t *) _memoryEntries->getBytesNoCopy();
1082 handlesEnd = (ipc_port_t *)
1083 ((vm_address_t) handles + _memoryEntries->getLength());
1084 while (handles < handlesEnd)
1085 ipc_port_release_send(*handles++);
1086 _memoryEntries->release();
1087 _memoryEntries = 0;
1088 }
1089
1090 return kIOReturnVMError;
1091 }
1092
1093 /*
1094 * complete
1095 *
1096 * Complete processing of the memory after an I/O transfer finishes.
1097 * This method should not be called unless a prepare was previously
1098 * issued; the prepare() and complete() must occur in pairs, before
1099 * before and after an I/O transfer involving pageable memory.
1100 */
1101
1102 IOReturn IOGeneralMemoryDescriptor::complete(
1103 IODirection forDirection = kIODirectionNone)
1104 {
1105 assert(_wireCount);
1106
1107 if(0 == _wireCount)
1108 return kIOReturnSuccess;
1109
1110 _wireCount--;
1111 if((_wireCount == 0) && (kIOMemoryRequiresWire & _flags)) {
1112 UInt rangeIndex;
1113 kern_return_t rc;
1114
1115 if(forDirection == kIODirectionNone)
1116 forDirection = _direction;
1117
1118 for(rangeIndex = 0; rangeIndex < _rangesCount; rangeIndex++) {
1119
1120 vm_offset_t srcAlign = trunc_page(_ranges.v[rangeIndex].address);
1121 IOByteCount srcAlignEnd = trunc_page(_ranges.v[rangeIndex].address +
1122 _ranges.v[rangeIndex].length +
1123 page_size - 1);
1124
1125 if(forDirection == kIODirectionIn)
1126 pmap_modify_pages(get_task_pmap(_task), srcAlign, srcAlignEnd);
1127
1128 rc = vm_map_unwire(getMapForTask(_task, srcAlign), srcAlign,
1129 srcAlignEnd, FALSE);
1130 if(rc != KERN_SUCCESS)
1131 IOLog("IOMemoryDescriptor::complete: vm_map_unwire failed: %d\n", rc);
1132 }
1133
1134 if (_memoryEntries) {
1135 ipc_port_t *handles, *handlesEnd;
1136
1137 handles = (ipc_port_t *) _memoryEntries->getBytesNoCopy();
1138 handlesEnd = (ipc_port_t *)
1139 ((vm_address_t) handles + _memoryEntries->getLength());
1140 while (handles < handlesEnd)
1141 ipc_port_release_send(*handles++);
1142
1143 _memoryEntries->release();
1144 _memoryEntries = 0;
1145 }
1146 }
1147 return kIOReturnSuccess;
1148 }
1149
1150 IOReturn IOGeneralMemoryDescriptor::doMap(
1151 vm_map_t addressMap,
1152 IOVirtualAddress * atAddress,
1153 IOOptionBits options,
1154 IOByteCount sourceOffset = 0,
1155 IOByteCount length = 0 )
1156 {
1157 kern_return_t kr;
1158 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
1159
1160 // mapping source == dest? (could be much better)
1161 if( _task && (addressMap == get_task_map(_task)) && (options & kIOMapAnywhere)
1162 && (1 == _rangesCount) && (0 == sourceOffset)
1163 && (length <= _ranges.v[0].length) ) {
1164 *atAddress = _ranges.v[0].address;
1165 return( kIOReturnSuccess );
1166 }
1167
1168 if( 0 == sharedMem) {
1169
1170 vm_size_t size = 0;
1171
1172 for (unsigned index = 0; index < _rangesCount; index++)
1173 size += round_page(_ranges.v[index].address + _ranges.v[index].length)
1174 - trunc_page(_ranges.v[index].address);
1175
1176 if( _task) {
1177 #ifndef i386
1178 vm_size_t actualSize = size;
1179 kr = mach_make_memory_entry( get_task_map(_task),
1180 &actualSize, _ranges.v[0].address,
1181 VM_PROT_READ | VM_PROT_WRITE, &sharedMem,
1182 NULL );
1183
1184 if( (KERN_SUCCESS == kr) && (actualSize != round_page(size))) {
1185 #if IOASSERT
1186 IOLog("mach_make_memory_entry_64 (%08lx) size (%08lx:%08lx)\n",
1187 _ranges.v[0].address, (UInt32)actualSize, size);
1188 #endif
1189 kr = kIOReturnVMError;
1190 ipc_port_release_send( sharedMem );
1191 }
1192
1193 if( KERN_SUCCESS != kr)
1194 #endif /* i386 */
1195 sharedMem = MACH_PORT_NULL;
1196
1197 } else do {
1198
1199 memory_object_t pager;
1200 unsigned int flags=0;
1201 struct phys_entry *pp;
1202 IOPhysicalAddress pa;
1203 IOPhysicalLength segLen;
1204
1205 pa = getPhysicalSegment( sourceOffset, &segLen );
1206
1207 if( !reserved) {
1208 reserved = IONew( ExpansionData, 1 );
1209 if( !reserved)
1210 continue;
1211 }
1212 reserved->pagerContig = (1 == _rangesCount);
1213 reserved->memory = this;
1214
1215 #ifndef i386
1216 switch(options & kIOMapCacheMask ) { /*What cache mode do we need*/
1217
1218 case kIOMapDefaultCache:
1219 default:
1220 if((pp = pmap_find_physentry(pa))) {/* Find physical address */
1221 /* Use physical attributes as default */
1222 flags = IOTranslateCacheBits(pp);
1223
1224 }
1225 else { /* If no physical, just hard code attributes */
1226 flags = DEVICE_PAGER_CACHE_INHIB |
1227 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
1228 }
1229 break;
1230
1231 case kIOMapInhibitCache:
1232 flags = DEVICE_PAGER_CACHE_INHIB |
1233 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
1234 break;
1235
1236 case kIOMapWriteThruCache:
1237 flags = DEVICE_PAGER_WRITE_THROUGH |
1238 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
1239 break;
1240
1241 case kIOMapCopybackCache:
1242 flags = DEVICE_PAGER_COHERENT;
1243 break;
1244 }
1245
1246 flags |= reserved->pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0;
1247 #else
1248 flags = reserved->pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0;
1249 #endif
1250
1251 pager = device_pager_setup( (memory_object_t) 0, (int) reserved,
1252 size, flags);
1253 assert( pager );
1254
1255 if( pager) {
1256 kr = mach_memory_object_memory_entry_64( (host_t) 1, false /*internal*/,
1257 size, VM_PROT_READ | VM_PROT_WRITE, pager, &sharedMem );
1258
1259 assert( KERN_SUCCESS == kr );
1260 if( KERN_SUCCESS != kr) {
1261 device_pager_deallocate( pager );
1262 pager = MACH_PORT_NULL;
1263 sharedMem = MACH_PORT_NULL;
1264 }
1265 }
1266 if( pager && sharedMem)
1267 reserved->devicePager = pager;
1268 else {
1269 IODelete( reserved, ExpansionData, 1 );
1270 reserved = 0;
1271 }
1272
1273 } while( false );
1274
1275 _memEntry = (void *) sharedMem;
1276 }
1277
1278 #ifndef i386
1279 if( 0 == sharedMem)
1280 kr = kIOReturnVMError;
1281 else
1282 #endif
1283 kr = super::doMap( addressMap, atAddress,
1284 options, sourceOffset, length );
1285
1286 return( kr );
1287 }
1288
1289 IOReturn IOGeneralMemoryDescriptor::doUnmap(
1290 vm_map_t addressMap,
1291 IOVirtualAddress logical,
1292 IOByteCount length )
1293 {
1294 // could be much better
1295 if( _task && (addressMap == getMapForTask(_task, _ranges.v[0].address)) && (1 == _rangesCount)
1296 && (logical == _ranges.v[0].address)
1297 && (length <= _ranges.v[0].length) )
1298 return( kIOReturnSuccess );
1299
1300 return( super::doUnmap( addressMap, logical, length ));
1301 }
1302
1303 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1304
1305 extern "C" {
1306 // osfmk/device/iokit_rpc.c
1307 extern kern_return_t IOMapPages( vm_map_t map, vm_offset_t va, vm_offset_t pa,
1308 vm_size_t length, unsigned int mapFlags);
1309 extern kern_return_t IOUnmapPages(vm_map_t map, vm_offset_t va, vm_size_t length);
1310 };
1311
1312 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1313
1314 OSDefineMetaClassAndAbstractStructors( IOMemoryMap, OSObject )
1315
1316 /* inline function implementation */
1317 IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
1318 { return( getPhysicalSegment( 0, 0 )); }
1319
1320 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1321
1322 class _IOMemoryMap : public IOMemoryMap
1323 {
1324 OSDeclareDefaultStructors(_IOMemoryMap)
1325
1326 IOMemoryDescriptor * memory;
1327 IOMemoryMap * superMap;
1328 IOByteCount offset;
1329 IOByteCount length;
1330 IOVirtualAddress logical;
1331 task_t addressTask;
1332 vm_map_t addressMap;
1333 IOOptionBits options;
1334
1335 protected:
1336 virtual void taggedRelease(const void *tag = 0) const;
1337 virtual void free();
1338
1339 public:
1340
1341 // IOMemoryMap methods
1342 virtual IOVirtualAddress getVirtualAddress();
1343 virtual IOByteCount getLength();
1344 virtual task_t getAddressTask();
1345 virtual IOMemoryDescriptor * getMemoryDescriptor();
1346 virtual IOOptionBits getMapOptions();
1347
1348 virtual IOReturn unmap();
1349 virtual void taskDied();
1350
1351 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
1352 IOByteCount * length);
1353
1354 // for IOMemoryDescriptor use
1355 _IOMemoryMap * copyCompatible(
1356 IOMemoryDescriptor * owner,
1357 task_t intoTask,
1358 IOVirtualAddress toAddress,
1359 IOOptionBits options,
1360 IOByteCount offset,
1361 IOByteCount length );
1362
1363 bool initCompatible(
1364 IOMemoryDescriptor * memory,
1365 IOMemoryMap * superMap,
1366 IOByteCount offset,
1367 IOByteCount length );
1368
1369 bool initWithDescriptor(
1370 IOMemoryDescriptor * memory,
1371 task_t intoTask,
1372 IOVirtualAddress toAddress,
1373 IOOptionBits options,
1374 IOByteCount offset,
1375 IOByteCount length );
1376
1377 IOReturn redirect(
1378 task_t intoTask, bool redirect );
1379 };
1380
1381 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1382
1383 #undef super
1384 #define super IOMemoryMap
1385
1386 OSDefineMetaClassAndStructors(_IOMemoryMap, IOMemoryMap)
1387
1388 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1389
1390 bool _IOMemoryMap::initCompatible(
1391 IOMemoryDescriptor * _memory,
1392 IOMemoryMap * _superMap,
1393 IOByteCount _offset,
1394 IOByteCount _length )
1395 {
1396
1397 if( !super::init())
1398 return( false);
1399
1400 if( (_offset + _length) > _superMap->getLength())
1401 return( false);
1402
1403 _memory->retain();
1404 memory = _memory;
1405 _superMap->retain();
1406 superMap = _superMap;
1407
1408 offset = _offset;
1409 if( _length)
1410 length = _length;
1411 else
1412 length = _memory->getLength();
1413
1414 options = superMap->getMapOptions();
1415 logical = superMap->getVirtualAddress() + offset;
1416
1417 return( true );
1418 }
1419
1420 bool _IOMemoryMap::initWithDescriptor(
1421 IOMemoryDescriptor * _memory,
1422 task_t intoTask,
1423 IOVirtualAddress toAddress,
1424 IOOptionBits _options,
1425 IOByteCount _offset,
1426 IOByteCount _length )
1427 {
1428 bool ok;
1429
1430 if( (!_memory) || (!intoTask) || !super::init())
1431 return( false);
1432
1433 if( (_offset + _length) > _memory->getLength())
1434 return( false);
1435
1436 addressMap = get_task_map(intoTask);
1437 if( !addressMap)
1438 return( false);
1439 vm_map_reference(addressMap);
1440
1441 _memory->retain();
1442 memory = _memory;
1443
1444 offset = _offset;
1445 if( _length)
1446 length = _length;
1447 else
1448 length = _memory->getLength();
1449
1450 addressTask = intoTask;
1451 logical = toAddress;
1452 options = _options;
1453
1454 if( options & kIOMapStatic)
1455 ok = true;
1456 else
1457 ok = (kIOReturnSuccess == memory->doMap( addressMap, &logical,
1458 options, offset, length ));
1459 if( !ok) {
1460 logical = 0;
1461 memory->release();
1462 memory = 0;
1463 vm_map_deallocate(addressMap);
1464 addressMap = 0;
1465 }
1466 return( ok );
1467 }
1468
1469 struct IOMemoryDescriptorMapAllocRef
1470 {
1471 ipc_port_t sharedMem;
1472 vm_size_t size;
1473 vm_offset_t mapped;
1474 IOByteCount sourceOffset;
1475 IOOptionBits options;
1476 };
1477
1478 static kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
1479 {
1480 IOMemoryDescriptorMapAllocRef * ref = (IOMemoryDescriptorMapAllocRef *)_ref;
1481 IOReturn err;
1482
1483 do {
1484 if( ref->sharedMem) {
1485 vm_prot_t prot = VM_PROT_READ
1486 | ((ref->options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE);
1487
1488 err = vm_map( map,
1489 &ref->mapped,
1490 ref->size, 0 /* mask */,
1491 (( ref->options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
1492 | VM_MAKE_TAG(VM_MEMORY_IOKIT),
1493 ref->sharedMem, ref->sourceOffset,
1494 false, // copy
1495 prot, // cur
1496 prot, // max
1497 VM_INHERIT_NONE);
1498
1499 if( KERN_SUCCESS != err) {
1500 ref->mapped = 0;
1501 continue;
1502 }
1503
1504 } else {
1505
1506 err = vm_allocate( map, &ref->mapped, ref->size,
1507 ((ref->options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
1508 | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
1509
1510 if( KERN_SUCCESS != err) {
1511 ref->mapped = 0;
1512 continue;
1513 }
1514
1515 // we have to make sure that these guys don't get copied if we fork.
1516 err = vm_inherit( map, ref->mapped, ref->size, VM_INHERIT_NONE);
1517 assert( KERN_SUCCESS == err );
1518 }
1519
1520 } while( false );
1521
1522 return( err );
1523 }
1524
1525
1526 IOReturn IOMemoryDescriptor::doMap(
1527 vm_map_t addressMap,
1528 IOVirtualAddress * atAddress,
1529 IOOptionBits options,
1530 IOByteCount sourceOffset = 0,
1531 IOByteCount length = 0 )
1532 {
1533 IOReturn err = kIOReturnSuccess;
1534 memory_object_t pager;
1535 vm_address_t logical;
1536 IOByteCount pageOffset;
1537 IOPhysicalAddress sourceAddr;
1538 IOMemoryDescriptorMapAllocRef ref;
1539
1540 ref.sharedMem = (ipc_port_t) _memEntry;
1541 ref.sourceOffset = sourceOffset;
1542 ref.options = options;
1543
1544 do {
1545
1546 if( 0 == length)
1547 length = getLength();
1548
1549 sourceAddr = getSourceSegment( sourceOffset, NULL );
1550 assert( sourceAddr );
1551 pageOffset = sourceAddr - trunc_page( sourceAddr );
1552
1553 ref.size = round_page( length + pageOffset );
1554
1555 logical = *atAddress;
1556 if( options & kIOMapAnywhere)
1557 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1558 ref.mapped = 0;
1559 else {
1560 ref.mapped = trunc_page( logical );
1561 if( (logical - ref.mapped) != pageOffset) {
1562 err = kIOReturnVMError;
1563 continue;
1564 }
1565 }
1566
1567 if( ref.sharedMem && (addressMap == kernel_map) && (kIOMemoryRequiresWire & _flags))
1568 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
1569 else
1570 err = IOMemoryDescriptorMapAlloc( addressMap, &ref );
1571
1572 if( err != KERN_SUCCESS)
1573 continue;
1574
1575 if( reserved)
1576 pager = (memory_object_t) reserved->devicePager;
1577 else
1578 pager = MACH_PORT_NULL;
1579
1580 if( !ref.sharedMem || pager )
1581 err = handleFault( pager, addressMap, ref.mapped, sourceOffset, length, options );
1582
1583 } while( false );
1584
1585 if( err != KERN_SUCCESS) {
1586 if( ref.mapped)
1587 doUnmap( addressMap, ref.mapped, ref.size );
1588 *atAddress = NULL;
1589 } else
1590 *atAddress = ref.mapped + pageOffset;
1591
1592 return( err );
1593 }
1594
1595 enum {
1596 kIOMemoryRedirected = 0x00010000
1597 };
1598
1599 IOReturn IOMemoryDescriptor::handleFault(
1600 void * _pager,
1601 vm_map_t addressMap,
1602 IOVirtualAddress address,
1603 IOByteCount sourceOffset,
1604 IOByteCount length,
1605 IOOptionBits options )
1606 {
1607 IOReturn err = kIOReturnSuccess;
1608 memory_object_t pager = (memory_object_t) _pager;
1609 vm_size_t size;
1610 vm_size_t bytes;
1611 vm_size_t page;
1612 IOByteCount pageOffset;
1613 IOPhysicalLength segLen;
1614 IOPhysicalAddress physAddr;
1615
1616 if( !addressMap) {
1617
1618 if( kIOMemoryRedirected & _flags) {
1619 #ifdef DEBUG
1620 IOLog("sleep mem redirect %p, %lx\n", this, sourceOffset);
1621 #endif
1622 do {
1623 SLEEP;
1624 } while( kIOMemoryRedirected & _flags );
1625 }
1626
1627 return( kIOReturnSuccess );
1628 }
1629
1630 physAddr = getPhysicalSegment( sourceOffset, &segLen );
1631 assert( physAddr );
1632 pageOffset = physAddr - trunc_page( physAddr );
1633
1634 size = length + pageOffset;
1635 physAddr -= pageOffset;
1636
1637 segLen += pageOffset;
1638 bytes = size;
1639 do {
1640 // in the middle of the loop only map whole pages
1641 if( segLen >= bytes)
1642 segLen = bytes;
1643 else if( segLen != trunc_page( segLen))
1644 err = kIOReturnVMError;
1645 if( physAddr != trunc_page( physAddr))
1646 err = kIOReturnBadArgument;
1647
1648 #ifdef DEBUG
1649 if( kIOLogMapping & gIOKitDebug)
1650 IOLog("_IOMemoryMap::map(%p) %08lx->%08lx:%08lx\n",
1651 addressMap, address + pageOffset, physAddr + pageOffset,
1652 segLen - pageOffset);
1653 #endif
1654
1655
1656
1657
1658
1659 #ifdef i386
1660 /* i386 doesn't support faulting on device memory yet */
1661 if( addressMap && (kIOReturnSuccess == err))
1662 err = IOMapPages( addressMap, address, physAddr, segLen, options );
1663 assert( KERN_SUCCESS == err );
1664 if( err)
1665 break;
1666 #endif
1667
1668 if( pager) {
1669 if( reserved && reserved->pagerContig) {
1670 IOPhysicalLength allLen;
1671 IOPhysicalAddress allPhys;
1672
1673 allPhys = getPhysicalSegment( 0, &allLen );
1674 assert( allPhys );
1675 err = device_pager_populate_object( pager, 0, trunc_page(allPhys), round_page(allLen) );
1676
1677 } else {
1678
1679 for( page = 0;
1680 (page < segLen) && (KERN_SUCCESS == err);
1681 page += page_size) {
1682 err = device_pager_populate_object( pager, sourceOffset + page,
1683 physAddr + page, page_size );
1684 }
1685 }
1686 assert( KERN_SUCCESS == err );
1687 if( err)
1688 break;
1689 }
1690 #ifndef i386
1691 /* *** ALERT *** */
1692 /* *** Temporary Workaround *** */
1693
1694 /* This call to vm_fault causes an early pmap level resolution */
1695 /* of the mappings created above. Need for this is in absolute */
1696 /* violation of the basic tenet that the pmap layer is a cache. */
1697 /* Further, it implies a serious I/O architectural violation on */
1698 /* the part of some user of the mapping. As of this writing, */
1699 /* the call to vm_fault is needed because the NVIDIA driver */
1700 /* makes a call to pmap_extract. The NVIDIA driver needs to be */
1701 /* fixed as soon as possible. The NVIDIA driver should not */
1702 /* need to query for this info as it should know from the doMap */
1703 /* call where the physical memory is mapped. When a query is */
1704 /* necessary to find a physical mapping, it should be done */
1705 /* through an iokit call which includes the mapped memory */
1706 /* handle. This is required for machine architecture independence.*/
1707
1708 if(!(kIOMemoryRedirected & _flags)) {
1709 vm_fault(addressMap, address, 3, FALSE, FALSE, NULL, 0);
1710 }
1711
1712 /* *** Temporary Workaround *** */
1713 /* *** ALERT *** */
1714 #endif
1715 sourceOffset += segLen - pageOffset;
1716 address += segLen;
1717 bytes -= segLen;
1718 pageOffset = 0;
1719
1720 } while( bytes
1721 && (physAddr = getPhysicalSegment( sourceOffset, &segLen )));
1722
1723 if( bytes)
1724 err = kIOReturnBadArgument;
1725
1726 return( err );
1727 }
1728
1729 IOReturn IOMemoryDescriptor::doUnmap(
1730 vm_map_t addressMap,
1731 IOVirtualAddress logical,
1732 IOByteCount length )
1733 {
1734 IOReturn err;
1735
1736 #ifdef DEBUG
1737 if( kIOLogMapping & gIOKitDebug)
1738 kprintf("IOMemoryDescriptor::doUnmap(%x) %08x:%08x\n",
1739 addressMap, logical, length );
1740 #endif
1741
1742 if( true /* && (addressMap == kernel_map) || (addressMap == get_task_map(current_task()))*/) {
1743
1744 if( _memEntry && (addressMap == kernel_map) && (kIOMemoryRequiresWire & _flags))
1745 addressMap = IOPageableMapForAddress( logical );
1746
1747 err = vm_deallocate( addressMap, logical, length );
1748
1749 } else
1750 err = kIOReturnSuccess;
1751
1752 return( err );
1753 }
1754
1755 IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool redirect )
1756 {
1757 IOReturn err;
1758 _IOMemoryMap * mapping = 0;
1759 OSIterator * iter;
1760
1761 LOCK;
1762
1763 do {
1764 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
1765 while( (mapping = (_IOMemoryMap *) iter->getNextObject()))
1766 mapping->redirect( safeTask, redirect );
1767
1768 iter->release();
1769 }
1770 } while( false );
1771
1772 if( redirect)
1773 _flags |= kIOMemoryRedirected;
1774 else {
1775 _flags &= ~kIOMemoryRedirected;
1776 WAKEUP;
1777 }
1778
1779 UNLOCK;
1780
1781 // temporary binary compatibility
1782 IOSubMemoryDescriptor * subMem;
1783 if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
1784 err = subMem->redirect( safeTask, redirect );
1785 else
1786 err = kIOReturnSuccess;
1787
1788 return( err );
1789 }
1790
1791 IOReturn IOSubMemoryDescriptor::redirect( task_t safeTask, bool redirect )
1792 {
1793 return( _parent->redirect( safeTask, redirect ));
1794 }
1795
1796 IOReturn _IOMemoryMap::redirect( task_t safeTask, bool redirect )
1797 {
1798 IOReturn err = kIOReturnSuccess;
1799
1800 if( superMap) {
1801 // err = ((_IOMemoryMap *)superMap)->redirect( safeTask, redirect );
1802 } else {
1803
1804 LOCK;
1805 if( logical && addressMap
1806 && (get_task_map( safeTask) != addressMap)
1807 && (0 == (options & kIOMapStatic))) {
1808
1809 IOUnmapPages( addressMap, logical, length );
1810 if( !redirect) {
1811 err = vm_deallocate( addressMap, logical, length );
1812 err = memory->doMap( addressMap, &logical,
1813 (options & ~kIOMapAnywhere) /*| kIOMapReserve*/,
1814 offset, length );
1815 } else
1816 err = kIOReturnSuccess;
1817 #ifdef DEBUG
1818 IOLog("IOMemoryMap::redirect(%d, %p) %x:%lx from %p\n", redirect, this, logical, length, addressMap);
1819 #endif
1820 }
1821 UNLOCK;
1822 }
1823
1824 return( err );
1825 }
1826
1827 IOReturn _IOMemoryMap::unmap( void )
1828 {
1829 IOReturn err;
1830
1831 LOCK;
1832
1833 if( logical && addressMap && (0 == superMap)
1834 && (0 == (options & kIOMapStatic))) {
1835
1836 err = memory->doUnmap( addressMap, logical, length );
1837 vm_map_deallocate(addressMap);
1838 addressMap = 0;
1839
1840 } else
1841 err = kIOReturnSuccess;
1842
1843 logical = 0;
1844
1845 UNLOCK;
1846
1847 return( err );
1848 }
1849
1850 void _IOMemoryMap::taskDied( void )
1851 {
1852 LOCK;
1853 if( addressMap) {
1854 vm_map_deallocate(addressMap);
1855 addressMap = 0;
1856 }
1857 addressTask = 0;
1858 logical = 0;
1859 UNLOCK;
1860 }
1861
1862 // Overload the release mechanism. All mappings must be a member
1863 // of a memory descriptors _mappings set. This means that we
1864 // always have 2 references on a mapping. When either of these mappings
1865 // are released we need to free ourselves.
1866 void _IOMemoryMap::taggedRelease(const void *tag = 0) const
1867 {
1868 super::taggedRelease(tag, 2);
1869 }
1870
1871 void _IOMemoryMap::free()
1872 {
1873 unmap();
1874
1875 if( memory) {
1876 LOCK;
1877 memory->removeMapping( this);
1878 UNLOCK;
1879 memory->release();
1880 }
1881
1882 if( superMap)
1883 superMap->release();
1884
1885 super::free();
1886 }
1887
1888 IOByteCount _IOMemoryMap::getLength()
1889 {
1890 return( length );
1891 }
1892
1893 IOVirtualAddress _IOMemoryMap::getVirtualAddress()
1894 {
1895 return( logical);
1896 }
1897
1898 task_t _IOMemoryMap::getAddressTask()
1899 {
1900 if( superMap)
1901 return( superMap->getAddressTask());
1902 else
1903 return( addressTask);
1904 }
1905
1906 IOOptionBits _IOMemoryMap::getMapOptions()
1907 {
1908 return( options);
1909 }
1910
1911 IOMemoryDescriptor * _IOMemoryMap::getMemoryDescriptor()
1912 {
1913 return( memory );
1914 }
1915
1916 _IOMemoryMap * _IOMemoryMap::copyCompatible(
1917 IOMemoryDescriptor * owner,
1918 task_t task,
1919 IOVirtualAddress toAddress,
1920 IOOptionBits _options,
1921 IOByteCount _offset,
1922 IOByteCount _length )
1923 {
1924 _IOMemoryMap * mapping;
1925
1926 if( (!task) || (task != getAddressTask()))
1927 return( 0 );
1928 if( (options ^ _options) & kIOMapReadOnly)
1929 return( 0 );
1930 if( (kIOMapDefaultCache != (_options & kIOMapCacheMask))
1931 && ((options ^ _options) & kIOMapCacheMask))
1932 return( 0 );
1933
1934 if( (0 == (_options & kIOMapAnywhere)) && (logical != toAddress))
1935 return( 0 );
1936
1937 if( _offset < offset)
1938 return( 0 );
1939
1940 _offset -= offset;
1941
1942 if( (_offset + _length) > length)
1943 return( 0 );
1944
1945 if( (length == _length) && (!_offset)) {
1946 retain();
1947 mapping = this;
1948
1949 } else {
1950 mapping = new _IOMemoryMap;
1951 if( mapping
1952 && !mapping->initCompatible( owner, this, _offset, _length )) {
1953 mapping->release();
1954 mapping = 0;
1955 }
1956 }
1957
1958 return( mapping );
1959 }
1960
1961 IOPhysicalAddress _IOMemoryMap::getPhysicalSegment( IOByteCount _offset,
1962 IOPhysicalLength * length)
1963 {
1964 IOPhysicalAddress address;
1965
1966 LOCK;
1967 address = memory->getPhysicalSegment( offset + _offset, length );
1968 UNLOCK;
1969
1970 return( address );
1971 }
1972
1973 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1974
1975 #undef super
1976 #define super OSObject
1977
1978 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1979
1980 void IOMemoryDescriptor::initialize( void )
1981 {
1982 if( 0 == gIOMemoryLock)
1983 gIOMemoryLock = IORecursiveLockAlloc();
1984 }
1985
1986 void IOMemoryDescriptor::free( void )
1987 {
1988 if( _mappings)
1989 _mappings->release();
1990
1991 super::free();
1992 }
1993
1994 IOMemoryMap * IOMemoryDescriptor::setMapping(
1995 task_t intoTask,
1996 IOVirtualAddress mapAddress,
1997 IOOptionBits options = 0 )
1998 {
1999 _IOMemoryMap * map;
2000
2001 map = new _IOMemoryMap;
2002
2003 LOCK;
2004
2005 if( map
2006 && !map->initWithDescriptor( this, intoTask, mapAddress,
2007 options | kIOMapStatic, 0, getLength() )) {
2008 map->release();
2009 map = 0;
2010 }
2011
2012 addMapping( map);
2013
2014 UNLOCK;
2015
2016 return( map);
2017 }
2018
2019 IOMemoryMap * IOMemoryDescriptor::map(
2020 IOOptionBits options = 0 )
2021 {
2022
2023 return( makeMapping( this, kernel_task, 0,
2024 options | kIOMapAnywhere,
2025 0, getLength() ));
2026 }
2027
2028 IOMemoryMap * IOMemoryDescriptor::map(
2029 task_t intoTask,
2030 IOVirtualAddress toAddress,
2031 IOOptionBits options,
2032 IOByteCount offset = 0,
2033 IOByteCount length = 0 )
2034 {
2035 if( 0 == length)
2036 length = getLength();
2037
2038 return( makeMapping( this, intoTask, toAddress, options, offset, length ));
2039 }
2040
2041 IOMemoryMap * IOMemoryDescriptor::makeMapping(
2042 IOMemoryDescriptor * owner,
2043 task_t intoTask,
2044 IOVirtualAddress toAddress,
2045 IOOptionBits options,
2046 IOByteCount offset,
2047 IOByteCount length )
2048 {
2049 _IOMemoryMap * mapping = 0;
2050 OSIterator * iter;
2051
2052 LOCK;
2053
2054 do {
2055 // look for an existing mapping
2056 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
2057
2058 while( (mapping = (_IOMemoryMap *) iter->getNextObject())) {
2059
2060 if( (mapping = mapping->copyCompatible(
2061 owner, intoTask, toAddress,
2062 options | kIOMapReference,
2063 offset, length )))
2064 break;
2065 }
2066 iter->release();
2067 if( mapping)
2068 continue;
2069 }
2070
2071
2072 if( mapping || (options & kIOMapReference))
2073 continue;
2074
2075 owner = this;
2076
2077 mapping = new _IOMemoryMap;
2078 if( mapping
2079 && !mapping->initWithDescriptor( owner, intoTask, toAddress, options,
2080 offset, length )) {
2081 #ifdef DEBUG
2082 IOLog("Didn't make map %08lx : %08lx\n", offset, length );
2083 #endif
2084 mapping->release();
2085 mapping = 0;
2086 }
2087
2088 } while( false );
2089
2090 owner->addMapping( mapping);
2091
2092 UNLOCK;
2093
2094 return( mapping);
2095 }
2096
2097 void IOMemoryDescriptor::addMapping(
2098 IOMemoryMap * mapping )
2099 {
2100 if( mapping) {
2101 if( 0 == _mappings)
2102 _mappings = OSSet::withCapacity(1);
2103 if( _mappings )
2104 _mappings->setObject( mapping );
2105 }
2106 }
2107
2108 void IOMemoryDescriptor::removeMapping(
2109 IOMemoryMap * mapping )
2110 {
2111 if( _mappings)
2112 _mappings->removeObject( mapping);
2113 }
2114
2115 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2116
2117 #undef super
2118 #define super IOMemoryDescriptor
2119
2120 OSDefineMetaClassAndStructors(IOSubMemoryDescriptor, IOMemoryDescriptor)
2121
2122 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2123
2124 bool IOSubMemoryDescriptor::initSubRange( IOMemoryDescriptor * parent,
2125 IOByteCount offset, IOByteCount length,
2126 IODirection withDirection )
2127 {
2128 if( !super::init())
2129 return( false );
2130
2131 if( !parent)
2132 return( false);
2133
2134 if( (offset + length) > parent->getLength())
2135 return( false);
2136
2137 parent->retain();
2138 _parent = parent;
2139 _start = offset;
2140 _length = length;
2141 _direction = withDirection;
2142 _tag = parent->getTag();
2143
2144 return( true );
2145 }
2146
2147 void IOSubMemoryDescriptor::free( void )
2148 {
2149 if( _parent)
2150 _parent->release();
2151
2152 super::free();
2153 }
2154
2155
2156 IOPhysicalAddress IOSubMemoryDescriptor::getPhysicalSegment( IOByteCount offset,
2157 IOByteCount * length )
2158 {
2159 IOPhysicalAddress address;
2160 IOByteCount actualLength;
2161
2162 assert(offset <= _length);
2163
2164 if( length)
2165 *length = 0;
2166
2167 if( offset >= _length)
2168 return( 0 );
2169
2170 address = _parent->getPhysicalSegment( offset + _start, &actualLength );
2171
2172 if( address && length)
2173 *length = min( _length - offset, actualLength );
2174
2175 return( address );
2176 }
2177
2178 IOPhysicalAddress IOSubMemoryDescriptor::getSourceSegment( IOByteCount offset,
2179 IOByteCount * length )
2180 {
2181 IOPhysicalAddress address;
2182 IOByteCount actualLength;
2183
2184 assert(offset <= _length);
2185
2186 if( length)
2187 *length = 0;
2188
2189 if( offset >= _length)
2190 return( 0 );
2191
2192 address = _parent->getSourceSegment( offset + _start, &actualLength );
2193
2194 if( address && length)
2195 *length = min( _length - offset, actualLength );
2196
2197 return( address );
2198 }
2199
2200 void * IOSubMemoryDescriptor::getVirtualSegment(IOByteCount offset,
2201 IOByteCount * lengthOfSegment)
2202 {
2203 return( 0 );
2204 }
2205
2206 IOByteCount IOSubMemoryDescriptor::readBytes(IOByteCount offset,
2207 void * bytes, IOByteCount withLength)
2208 {
2209 IOByteCount byteCount;
2210
2211 assert(offset <= _length);
2212
2213 if( offset >= _length)
2214 return( 0 );
2215
2216 LOCK;
2217 byteCount = _parent->readBytes( _start + offset, bytes,
2218 min(withLength, _length - offset) );
2219 UNLOCK;
2220
2221 return( byteCount );
2222 }
2223
2224 IOByteCount IOSubMemoryDescriptor::writeBytes(IOByteCount offset,
2225 const void* bytes, IOByteCount withLength)
2226 {
2227 IOByteCount byteCount;
2228
2229 assert(offset <= _length);
2230
2231 if( offset >= _length)
2232 return( 0 );
2233
2234 LOCK;
2235 byteCount = _parent->writeBytes( _start + offset, bytes,
2236 min(withLength, _length - offset) );
2237 UNLOCK;
2238
2239 return( byteCount );
2240 }
2241
2242 IOReturn IOSubMemoryDescriptor::prepare(
2243 IODirection forDirection = kIODirectionNone)
2244 {
2245 IOReturn err;
2246
2247 LOCK;
2248 err = _parent->prepare( forDirection);
2249 UNLOCK;
2250
2251 return( err );
2252 }
2253
2254 IOReturn IOSubMemoryDescriptor::complete(
2255 IODirection forDirection = kIODirectionNone)
2256 {
2257 IOReturn err;
2258
2259 LOCK;
2260 err = _parent->complete( forDirection);
2261 UNLOCK;
2262
2263 return( err );
2264 }
2265
2266 IOMemoryMap * IOSubMemoryDescriptor::makeMapping(
2267 IOMemoryDescriptor * owner,
2268 task_t intoTask,
2269 IOVirtualAddress toAddress,
2270 IOOptionBits options,
2271 IOByteCount offset,
2272 IOByteCount length )
2273 {
2274 IOMemoryMap * mapping;
2275
2276 mapping = (IOMemoryMap *) _parent->makeMapping(
2277 _parent, intoTask,
2278 toAddress - (_start + offset),
2279 options | kIOMapReference,
2280 _start + offset, length );
2281
2282 if( !mapping)
2283 mapping = (IOMemoryMap *) _parent->makeMapping(
2284 _parent, intoTask,
2285 toAddress,
2286 options, _start + offset, length );
2287
2288 if( !mapping)
2289 mapping = super::makeMapping( owner, intoTask, toAddress, options,
2290 offset, length );
2291
2292 return( mapping );
2293 }
2294
2295 /* ick */
2296
2297 bool
2298 IOSubMemoryDescriptor::initWithAddress(void * address,
2299 IOByteCount withLength,
2300 IODirection withDirection)
2301 {
2302 return( false );
2303 }
2304
2305 bool
2306 IOSubMemoryDescriptor::initWithAddress(vm_address_t address,
2307 IOByteCount withLength,
2308 IODirection withDirection,
2309 task_t withTask)
2310 {
2311 return( false );
2312 }
2313
2314 bool
2315 IOSubMemoryDescriptor::initWithPhysicalAddress(
2316 IOPhysicalAddress address,
2317 IOByteCount withLength,
2318 IODirection withDirection )
2319 {
2320 return( false );
2321 }
2322
2323 bool
2324 IOSubMemoryDescriptor::initWithRanges(
2325 IOVirtualRange * ranges,
2326 UInt32 withCount,
2327 IODirection withDirection,
2328 task_t withTask,
2329 bool asReference = false)
2330 {
2331 return( false );
2332 }
2333
2334 bool
2335 IOSubMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
2336 UInt32 withCount,
2337 IODirection withDirection,
2338 bool asReference = false)
2339 {
2340 return( false );
2341 }
2342
2343 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2344
2345 bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
2346 {
2347 OSSymbol const *keys[2];
2348 OSObject *values[2];
2349 OSDictionary *dict;
2350 IOVirtualRange *vcopy;
2351 unsigned int index, nRanges;
2352 bool result;
2353
2354 if (s == NULL) return false;
2355 if (s->previouslySerialized(this)) return true;
2356
2357 // Pretend we are an array.
2358 if (!s->addXMLStartTag(this, "array")) return false;
2359
2360 nRanges = _rangesCount;
2361 vcopy = (IOVirtualRange *) IOMalloc(sizeof(IOVirtualRange) * nRanges);
2362 if (vcopy == 0) return false;
2363
2364 keys[0] = OSSymbol::withCString("address");
2365 keys[1] = OSSymbol::withCString("length");
2366
2367 result = false;
2368 values[0] = values[1] = 0;
2369
2370 // From this point on we can go to bail.
2371
2372 // Copy the volatile data so we don't have to allocate memory
2373 // while the lock is held.
2374 LOCK;
2375 if (nRanges == _rangesCount) {
2376 for (index = 0; index < nRanges; index++) {
2377 vcopy[index] = _ranges.v[index];
2378 }
2379 } else {
2380 // The descriptor changed out from under us. Give up.
2381 UNLOCK;
2382 result = false;
2383 goto bail;
2384 }
2385 UNLOCK;
2386
2387 for (index = 0; index < nRanges; index++)
2388 {
2389 values[0] = OSNumber::withNumber(_ranges.v[index].address, sizeof(_ranges.v[index].address) * 8);
2390 if (values[0] == 0) {
2391 result = false;
2392 goto bail;
2393 }
2394 values[1] = OSNumber::withNumber(_ranges.v[index].length, sizeof(_ranges.v[index].length) * 8);
2395 if (values[1] == 0) {
2396 result = false;
2397 goto bail;
2398 }
2399 OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
2400 if (dict == 0) {
2401 result = false;
2402 goto bail;
2403 }
2404 values[0]->release();
2405 values[1]->release();
2406 values[0] = values[1] = 0;
2407
2408 result = dict->serialize(s);
2409 dict->release();
2410 if (!result) {
2411 goto bail;
2412 }
2413 }
2414 result = s->addXMLEndTag("array");
2415
2416 bail:
2417 if (values[0])
2418 values[0]->release();
2419 if (values[1])
2420 values[1]->release();
2421 if (keys[0])
2422 keys[0]->release();
2423 if (keys[1])
2424 keys[1]->release();
2425 if (vcopy)
2426 IOFree(vcopy, sizeof(IOVirtualRange) * nRanges);
2427 return result;
2428 }
2429
2430 bool IOSubMemoryDescriptor::serialize(OSSerialize * s) const
2431 {
2432 if (!s) {
2433 return (false);
2434 }
2435 if (s->previouslySerialized(this)) return true;
2436
2437 // Pretend we are a dictionary.
2438 // We must duplicate the functionality of OSDictionary here
2439 // because otherwise object references will not work;
2440 // they are based on the value of the object passed to
2441 // previouslySerialized and addXMLStartTag.
2442
2443 if (!s->addXMLStartTag(this, "dict")) return false;
2444
2445 char const *keys[3] = {"offset", "length", "parent"};
2446
2447 OSObject *values[3];
2448 values[0] = OSNumber::withNumber(_start, sizeof(_start) * 8);
2449 if (values[0] == 0)
2450 return false;
2451 values[1] = OSNumber::withNumber(_length, sizeof(_length) * 8);
2452 if (values[1] == 0) {
2453 values[0]->release();
2454 return false;
2455 }
2456 values[2] = _parent;
2457
2458 bool result = true;
2459 for (int i=0; i<3; i++) {
2460 if (!s->addString("<key>") ||
2461 !s->addString(keys[i]) ||
2462 !s->addXMLEndTag("key") ||
2463 !values[i]->serialize(s)) {
2464 result = false;
2465 break;
2466 }
2467 }
2468 values[0]->release();
2469 values[1]->release();
2470 if (!result) {
2471 return false;
2472 }
2473
2474 return s->addXMLEndTag("dict");
2475 }
2476
2477 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2478
2479 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
2480 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
2481 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
2482 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
2483 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
2484 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
2485 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
2486 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
2487 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
2488 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
2489 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
2490 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
2491 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
2492 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
2493 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
2494 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
2495
2496 /* inline function implementation */
2497 IOPhysicalAddress IOMemoryDescriptor::getPhysicalAddress()
2498 { return( getPhysicalSegment( 0, 0 )); }