]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOMemoryDescriptor.cpp
12ace713b3c89a412ccffac8e0b7bd6c8e80ba71
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
27 *
28 * HISTORY
29 *
30 */
31 // 45678901234567890123456789012345678901234567890123456789012345678901234567890
32 #include <sys/cdefs.h>
33
34 #include <IOKit/assert.h>
35 #include <IOKit/system.h>
36 #include <IOKit/IOLib.h>
37 #include <IOKit/IOMemoryDescriptor.h>
38 #include <IOKit/IOMapper.h>
39
40 #include <IOKit/IOKitDebug.h>
41
42 #include <libkern/c++/OSContainers.h>
43 #include <libkern/c++/OSDictionary.h>
44 #include <libkern/c++/OSArray.h>
45 #include <libkern/c++/OSSymbol.h>
46 #include <libkern/c++/OSNumber.h>
47 #include <sys/cdefs.h>
48
49 __BEGIN_DECLS
50 #include <vm/pmap.h>
51 #include <mach/memory_object_types.h>
52 #include <device/device_port.h>
53
54 #ifndef i386
55 struct phys_entry *pmap_find_physentry(ppnum_t pa);
56 #endif
57 void ipc_port_release_send(ipc_port_t port);
58
59 /* Copy between a physical page and a virtual address in the given vm_map */
60 kern_return_t copypv(addr64_t source, addr64_t sink, unsigned int size, int which);
61
62 memory_object_t
63 device_pager_setup(
64 memory_object_t pager,
65 int device_handle,
66 vm_size_t size,
67 int flags);
68 void
69 device_pager_deallocate(
70 memory_object_t);
71 kern_return_t
72 device_pager_populate_object(
73 memory_object_t pager,
74 vm_object_offset_t offset,
75 ppnum_t phys_addr,
76 vm_size_t size);
77
78 /*
79 * Page fault handling based on vm_map (or entries therein)
80 */
81 extern kern_return_t vm_fault(
82 vm_map_t map,
83 vm_offset_t vaddr,
84 vm_prot_t fault_type,
85 boolean_t change_wiring,
86 int interruptible,
87 pmap_t caller_pmap,
88 vm_offset_t caller_pmap_addr);
89
90 unsigned int IOTranslateCacheBits(struct phys_entry *pp);
91
92 vm_map_t IOPageableMapForAddress( vm_address_t address );
93
94 typedef kern_return_t (*IOIteratePageableMapsCallback)(vm_map_t map, void * ref);
95
96 kern_return_t IOIteratePageableMaps(vm_size_t size,
97 IOIteratePageableMapsCallback callback, void * ref);
98 __END_DECLS
99
100 static IOMapper * gIOSystemMapper;
101
102 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
103
104 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
105
106 #define super IOMemoryDescriptor
107
108 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
109
110 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
111
112 static IORecursiveLock * gIOMemoryLock;
113
114 #define LOCK IORecursiveLockLock( gIOMemoryLock)
115 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
116 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
117 #define WAKEUP \
118 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
119
120 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
121
122 #define next_page(a) ( trunc_page_32(a) + PAGE_SIZE )
123
124
125 extern "C" {
126
127 kern_return_t device_data_action(
128 int device_handle,
129 ipc_port_t device_pager,
130 vm_prot_t protection,
131 vm_object_offset_t offset,
132 vm_size_t size)
133 {
134 struct ExpansionData {
135 void * devicePager;
136 unsigned int pagerContig:1;
137 unsigned int unused:31;
138 IOMemoryDescriptor * memory;
139 };
140 kern_return_t kr;
141 ExpansionData * ref = (ExpansionData *) device_handle;
142 IOMemoryDescriptor * memDesc;
143
144 LOCK;
145 memDesc = ref->memory;
146 if( memDesc)
147 kr = memDesc->handleFault( device_pager, 0, 0,
148 offset, size, kIOMapDefaultCache /*?*/);
149 else
150 kr = KERN_ABORTED;
151 UNLOCK;
152
153 return( kr );
154 }
155
156 kern_return_t device_close(
157 int device_handle)
158 {
159 struct ExpansionData {
160 void * devicePager;
161 unsigned int pagerContig:1;
162 unsigned int unused:31;
163 IOMemoryDescriptor * memory;
164 };
165 ExpansionData * ref = (ExpansionData *) device_handle;
166
167 IODelete( ref, ExpansionData, 1 );
168
169 return( kIOReturnSuccess );
170 }
171
172 }
173
174 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
175
176 /*
177 * withAddress:
178 *
179 * Create a new IOMemoryDescriptor. The buffer is a virtual address
180 * relative to the specified task. If no task is supplied, the kernel
181 * task is implied.
182 */
183 IOMemoryDescriptor *
184 IOMemoryDescriptor::withAddress(void * address,
185 IOByteCount length,
186 IODirection direction)
187 {
188 return IOMemoryDescriptor::
189 withAddress((vm_address_t) address, length, direction, kernel_task);
190 }
191
192 IOMemoryDescriptor *
193 IOMemoryDescriptor::withAddress(vm_address_t address,
194 IOByteCount length,
195 IODirection direction,
196 task_t task)
197 {
198 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
199 if (that)
200 {
201 if (that->initWithAddress(address, length, direction, task))
202 return that;
203
204 that->release();
205 }
206 return 0;
207 }
208
209 IOMemoryDescriptor *
210 IOMemoryDescriptor::withPhysicalAddress(
211 IOPhysicalAddress address,
212 IOByteCount length,
213 IODirection direction )
214 {
215 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
216 if (self
217 && !self->initWithPhysicalAddress(address, length, direction)) {
218 self->release();
219 return 0;
220 }
221
222 return self;
223 }
224
225 IOMemoryDescriptor *
226 IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
227 UInt32 withCount,
228 IODirection direction,
229 task_t task,
230 bool asReference)
231 {
232 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
233 if (that)
234 {
235 if (that->initWithRanges(ranges, withCount, direction, task, asReference))
236 return that;
237
238 that->release();
239 }
240 return 0;
241 }
242
243
244 /*
245 * withRanges:
246 *
247 * Create a new IOMemoryDescriptor. The buffer is made up of several
248 * virtual address ranges, from a given task.
249 *
250 * Passing the ranges as a reference will avoid an extra allocation.
251 */
252 IOMemoryDescriptor *
253 IOMemoryDescriptor::withOptions(void * buffers,
254 UInt32 count,
255 UInt32 offset,
256 task_t task,
257 IOOptionBits opts,
258 IOMapper * mapper)
259 {
260 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
261
262 if (self
263 && !self->initWithOptions(buffers, count, offset, task, opts, mapper))
264 {
265 self->release();
266 return 0;
267 }
268
269 return self;
270 }
271
272 // Can't leave abstract but this should never be used directly,
273 bool IOMemoryDescriptor::initWithOptions(void * buffers,
274 UInt32 count,
275 UInt32 offset,
276 task_t task,
277 IOOptionBits options,
278 IOMapper * mapper)
279 {
280 // @@@ gvdl: Should I panic?
281 panic("IOMD::initWithOptions called\n");
282 return 0;
283 }
284
285 IOMemoryDescriptor *
286 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
287 UInt32 withCount,
288 IODirection direction,
289 bool asReference)
290 {
291 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
292 if (that)
293 {
294 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference))
295 return that;
296
297 that->release();
298 }
299 return 0;
300 }
301
302 IOMemoryDescriptor *
303 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
304 IOByteCount offset,
305 IOByteCount length,
306 IODirection direction)
307 {
308 IOSubMemoryDescriptor *self = new IOSubMemoryDescriptor;
309
310 if (self && !self->initSubRange(of, offset, length, direction)) {
311 self->release();
312 self = 0;
313 }
314 return self;
315 }
316
317 /*
318 * initWithAddress:
319 *
320 * Initialize an IOMemoryDescriptor. The buffer is a virtual address
321 * relative to the specified task. If no task is supplied, the kernel
322 * task is implied.
323 *
324 * An IOMemoryDescriptor can be re-used by calling initWithAddress or
325 * initWithRanges again on an existing instance -- note this behavior
326 * is not commonly supported in other I/O Kit classes, although it is
327 * supported here.
328 */
329 bool
330 IOGeneralMemoryDescriptor::initWithAddress(void * address,
331 IOByteCount withLength,
332 IODirection withDirection)
333 {
334 _singleRange.v.address = (vm_address_t) address;
335 _singleRange.v.length = withLength;
336
337 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
338 }
339
340 bool
341 IOGeneralMemoryDescriptor::initWithAddress(vm_address_t address,
342 IOByteCount withLength,
343 IODirection withDirection,
344 task_t withTask)
345 {
346 _singleRange.v.address = address;
347 _singleRange.v.length = withLength;
348
349 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
350 }
351
352 bool
353 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
354 IOPhysicalAddress address,
355 IOByteCount withLength,
356 IODirection withDirection )
357 {
358 _singleRange.p.address = address;
359 _singleRange.p.length = withLength;
360
361 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
362 }
363
364 bool
365 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
366 IOPhysicalRange * ranges,
367 UInt32 count,
368 IODirection direction,
369 bool reference)
370 {
371 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
372
373 if (reference)
374 mdOpts |= kIOMemoryAsReference;
375
376 return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0);
377 }
378
379 bool
380 IOGeneralMemoryDescriptor::initWithRanges(
381 IOVirtualRange * ranges,
382 UInt32 count,
383 IODirection direction,
384 task_t task,
385 bool reference)
386 {
387 IOOptionBits mdOpts = direction;
388
389 if (reference)
390 mdOpts |= kIOMemoryAsReference;
391
392 if (task) {
393 mdOpts |= kIOMemoryTypeVirtual;
394 if (task == kernel_task)
395 mdOpts |= kIOMemoryAutoPrepare;
396 }
397 else
398 mdOpts |= kIOMemoryTypePhysical;
399
400 // @@@ gvdl: Need to remove this
401 // Auto-prepare if this is a kernel memory descriptor as very few
402 // clients bother to prepare() kernel memory.
403 // But it has been enforced so what are you going to do?
404
405 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0);
406 }
407
408 /*
409 * initWithOptions:
410 *
411 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
412 * from a given task or several physical ranges or finally an UPL from the ubc
413 * system.
414 *
415 * Passing the ranges as a reference will avoid an extra allocation.
416 *
417 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
418 * existing instance -- note this behavior is not commonly supported in other
419 * I/O Kit classes, although it is supported here.
420 */
421
422 enum ioPLBlockFlags {
423 kIOPLOnDevice = 0x00000001,
424 kIOPLExternUPL = 0x00000002,
425 };
426
427 struct ioPLBlock {
428 upl_t fIOPL;
429 vm_address_t fIOMDOffset; // The offset of this iopl in descriptor
430 vm_offset_t fPageInfo; // Pointer to page list or index into it
431 ppnum_t fMappedBase; // Page number of first page in this iopl
432 unsigned int fPageOffset; // Offset within first page of iopl
433 unsigned int fFlags; // Flags
434 };
435
436 struct ioGMDData {
437 IOMapper *fMapper;
438 unsigned int fPageCnt;
439 upl_page_info_t fPageList[0]; // @@@ gvdl need to get rid of this
440 // should be able to use upl directly
441 ioPLBlock fBlocks[0];
442 };
443
444 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
445 #define getIOPLList(d) ((ioPLBlock *) &(d->fPageList[d->fPageCnt]))
446 #define getNumIOPL(d,len) \
447 ((len - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
448 #define getPageList(d) (&(d->fPageList[0]))
449 #define computeDataSize(p, u) \
450 (sizeof(ioGMDData) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
451
452 bool
453 IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
454 UInt32 count,
455 UInt32 offset,
456 task_t task,
457 IOOptionBits options,
458 IOMapper * mapper)
459 {
460
461 switch (options & kIOMemoryTypeMask) {
462 case kIOMemoryTypeVirtual:
463 assert(task);
464 if (!task)
465 return false;
466 else
467 break;
468
469 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
470 mapper = kIOMapperNone;
471 case kIOMemoryTypeUPL:
472 assert(!task);
473 break;
474 default:
475 panic("IOGMD::iWO(): bad type"); // @@@ gvdl: for testing
476 return false; /* bad argument */
477 }
478
479 assert(buffers);
480 assert(count);
481
482 /*
483 * We can check the _initialized instance variable before having ever set
484 * it to an initial value because I/O Kit guarantees that all our instance
485 * variables are zeroed on an object's allocation.
486 */
487
488 if (_initialized) {
489 /*
490 * An existing memory descriptor is being retargeted to point to
491 * somewhere else. Clean up our present state.
492 */
493
494 while (_wireCount)
495 complete();
496 if (_kernPtrAligned)
497 unmapFromKernel();
498 if (_ranges.v && _rangesIsAllocated)
499 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
500 }
501 else {
502 if (!super::init())
503 return false;
504 _initialized = true;
505 }
506
507 // Grab the appropriate mapper
508 if (mapper == kIOMapperNone)
509 mapper = 0; // No Mapper
510 else if (!mapper) {
511 IOMapper::checkForSystemMapper();
512 gIOSystemMapper = mapper = IOMapper::gSystem;
513 }
514
515 _flags = options;
516 _task = task;
517
518 // DEPRECATED variable initialisation
519 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
520 _position = 0;
521 _kernPtrAligned = 0;
522 _cachedPhysicalAddress = 0;
523 _cachedVirtualAddress = 0;
524
525 if ( (options & kIOMemoryTypeMask) == kIOMemoryTypeUPL) {
526
527 ioGMDData *dataP;
528 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
529
530 if (!_memoryEntries) {
531 _memoryEntries = OSData::withCapacity(dataSize);
532 if (!_memoryEntries)
533 return false;
534 }
535 else if (!_memoryEntries->initWithCapacity(dataSize))
536 return false;
537
538 _memoryEntries->appendBytes(0, sizeof(ioGMDData));
539 dataP = getDataP(_memoryEntries);
540 dataP->fMapper = mapper;
541 dataP->fPageCnt = 0;
542
543 _wireCount++; // UPLs start out life wired
544
545 _length = count;
546 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
547
548 ioPLBlock iopl;
549 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST((upl_t) buffers);
550
551 iopl.fIOPL = (upl_t) buffers;
552 // Set the flag kIOPLOnDevice convieniently equal to 1
553 iopl.fFlags = pageList->device | kIOPLExternUPL;
554 iopl.fIOMDOffset = 0;
555 if (!pageList->device) {
556 // @@@ gvdl: Ask JoeS are the pages contiguious with the list?
557 // or there a chance that we may be inserting 0 phys_addrs?
558 // Pre-compute the offset into the UPL's page list
559 pageList = &pageList[atop_32(offset)];
560 offset &= PAGE_MASK;
561 if (mapper) {
562 iopl.fMappedBase = mapper->iovmAlloc(_pages);
563 mapper->iovmInsert(iopl.fMappedBase, 0, pageList, _pages);
564 }
565 else
566 iopl.fMappedBase = 0;
567 }
568 else
569 iopl.fMappedBase = 0;
570 iopl.fPageInfo = (vm_address_t) pageList;
571 iopl.fPageOffset = offset;
572
573 _memoryEntries->appendBytes(&iopl, sizeof(iopl));
574 }
575 else { /* kIOMemoryTypeVirtual | kIOMemoryTypePhysical */
576 IOVirtualRange *ranges = (IOVirtualRange *) buffers;
577
578 /*
579 * Initialize the memory descriptor.
580 */
581
582 _length = 0;
583 _pages = 0;
584 for (unsigned ind = 0; ind < count; ind++) {
585 IOVirtualRange cur = ranges[ind];
586
587 _length += cur.length;
588 _pages += atop_32(cur.address + cur.length + PAGE_MASK)
589 - atop_32(cur.address);
590 }
591
592 _ranges.v = 0;
593 _rangesIsAllocated = !(options & kIOMemoryAsReference);
594 _rangesCount = count;
595
596 if (options & kIOMemoryAsReference)
597 _ranges.v = ranges;
598 else {
599 _ranges.v = IONew(IOVirtualRange, count);
600 if (!_ranges.v)
601 return false;
602 bcopy(/* from */ ranges, _ranges.v,
603 count * sizeof(IOVirtualRange));
604 }
605
606 // Auto-prepare memory at creation time.
607 // Implied completion when descriptor is free-ed
608 if ( (options & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
609 _wireCount++; // Physical MDs are start out wired
610 else { /* kIOMemoryTypeVirtual */
611 ioGMDData *dataP;
612 unsigned int dataSize =
613 computeDataSize(_pages, /* upls */ _rangesCount * 2);
614
615 if (!_memoryEntries) {
616 _memoryEntries = OSData::withCapacity(dataSize);
617 if (!_memoryEntries)
618 return false;
619 }
620 else if (!_memoryEntries->initWithCapacity(dataSize))
621 return false;
622
623 _memoryEntries->appendBytes(0, sizeof(ioGMDData));
624 dataP = getDataP(_memoryEntries);
625 dataP->fMapper = mapper;
626 dataP->fPageCnt = _pages;
627
628 if ((_flags & kIOMemoryAutoPrepare)
629 && prepare() != kIOReturnSuccess)
630 return false;
631 }
632 }
633
634 return true;
635 }
636
637 /*
638 * free
639 *
640 * Free resources.
641 */
642 void IOGeneralMemoryDescriptor::free()
643 {
644 LOCK;
645 if( reserved)
646 reserved->memory = 0;
647 UNLOCK;
648
649 while (_wireCount)
650 complete();
651 if (_memoryEntries)
652 _memoryEntries->release();
653
654 if (_kernPtrAligned)
655 unmapFromKernel();
656 if (_ranges.v && _rangesIsAllocated)
657 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
658
659 if (reserved && reserved->devicePager)
660 device_pager_deallocate( (memory_object_t) reserved->devicePager );
661
662 // memEntry holds a ref on the device pager which owns reserved
663 // (ExpansionData) so no reserved access after this point
664 if (_memEntry)
665 ipc_port_release_send( (ipc_port_t) _memEntry );
666
667 super::free();
668 }
669
670 /* DEPRECATED */ void IOGeneralMemoryDescriptor::unmapFromKernel()
671 /* DEPRECATED */ {
672 panic("IOGMD::unmapFromKernel deprecated");
673 /* DEPRECATED */ }
674 /* DEPRECATED */
675 /* DEPRECATED */ void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
676 /* DEPRECATED */ {
677 panic("IOGMD::mapIntoKernel deprecated");
678 /* DEPRECATED */ }
679
680 /*
681 * getDirection:
682 *
683 * Get the direction of the transfer.
684 */
685 IODirection IOMemoryDescriptor::getDirection() const
686 {
687 return _direction;
688 }
689
690 /*
691 * getLength:
692 *
693 * Get the length of the transfer (over all ranges).
694 */
695 IOByteCount IOMemoryDescriptor::getLength() const
696 {
697 return _length;
698 }
699
700 void IOMemoryDescriptor::setTag( IOOptionBits tag )
701 {
702 _tag = tag;
703 }
704
705 IOOptionBits IOMemoryDescriptor::getTag( void )
706 {
707 return( _tag);
708 }
709
710 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
711 IOPhysicalAddress IOMemoryDescriptor::getSourceSegment( IOByteCount offset,
712 IOByteCount * length )
713 {
714 IOPhysicalAddress physAddr = 0;
715
716 if( prepare() == kIOReturnSuccess) {
717 physAddr = getPhysicalSegment( offset, length );
718 complete();
719 }
720
721 return( physAddr );
722 }
723
724 IOByteCount IOMemoryDescriptor::readBytes
725 (IOByteCount offset, void *bytes, IOByteCount length)
726 {
727 addr64_t dstAddr = (addr64_t) (UInt32) bytes;
728 IOByteCount remaining;
729
730 // Assert that this entire I/O is withing the available range
731 assert(offset < _length);
732 assert(offset + length <= _length);
733 if (offset >= _length) {
734 IOLog("IOGMD(%p): rB = o%lx, l%lx\n", this, offset, length); // @@@ gvdl
735 return 0;
736 }
737
738 remaining = length = min(length, _length - offset);
739 while (remaining) { // (process another target segment?)
740 addr64_t srcAddr64;
741 IOByteCount srcLen;
742
743 srcAddr64 = getPhysicalSegment64(offset, &srcLen);
744 if (!srcAddr64)
745 break;
746
747 // Clip segment length to remaining
748 if (srcLen > remaining)
749 srcLen = remaining;
750
751 copypv(srcAddr64, dstAddr, srcLen,
752 cppvPsrc | cppvFsnk | cppvKmap);
753
754 dstAddr += srcLen;
755 offset += srcLen;
756 remaining -= srcLen;
757 }
758
759 assert(!remaining);
760
761 return length - remaining;
762 }
763
764 IOByteCount IOMemoryDescriptor::writeBytes
765 (IOByteCount offset, const void *bytes, IOByteCount length)
766 {
767 addr64_t srcAddr = (addr64_t) (UInt32) bytes;
768 IOByteCount remaining;
769
770 // Assert that this entire I/O is withing the available range
771 assert(offset < _length);
772 assert(offset + length <= _length);
773
774 assert( !(kIOMemoryPreparedReadOnly & _flags) );
775
776 if ( (kIOMemoryPreparedReadOnly & _flags) || offset >= _length) {
777 IOLog("IOGMD(%p): wB = o%lx, l%lx\n", this, offset, length); // @@@ gvdl
778 return 0;
779 }
780
781 remaining = length = min(length, _length - offset);
782 while (remaining) { // (process another target segment?)
783 addr64_t dstAddr64;
784 IOByteCount dstLen;
785
786 dstAddr64 = getPhysicalSegment64(offset, &dstLen);
787 if (!dstAddr64)
788 break;
789
790 // Clip segment length to remaining
791 if (dstLen > remaining)
792 dstLen = remaining;
793
794 copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
795 cppvPsnk | cppvFsnk | cppvNoModSnk | cppvKmap);
796
797 srcAddr += dstLen;
798 offset += dstLen;
799 remaining -= dstLen;
800 }
801
802 assert(!remaining);
803
804 return length - remaining;
805 }
806
807 // osfmk/device/iokit_rpc.c
808 extern "C" unsigned int IODefaultCacheBits(addr64_t pa);
809
810 /* DEPRECATED */ void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
811 /* DEPRECATED */ {
812 panic("IOGMD::setPosition deprecated");
813 /* DEPRECATED */ }
814
815 IOPhysicalAddress IOGeneralMemoryDescriptor::getPhysicalSegment
816 (IOByteCount offset, IOByteCount *lengthOfSegment)
817 {
818 IOPhysicalAddress address = 0;
819 IOPhysicalLength length = 0;
820
821 // assert(offset <= _length);
822 if (offset < _length) // (within bounds?)
823 {
824 if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
825 unsigned int ind;
826
827 // Physical address based memory descriptor
828
829 // Find offset within descriptor and make it relative
830 // to the current _range.
831 for (ind = 0 ; offset >= _ranges.p[ind].length; ind++ )
832 offset -= _ranges.p[ind].length;
833
834 IOPhysicalRange cur = _ranges.p[ind];
835 address = cur.address + offset;
836 length = cur.length - offset;
837
838 // see how far we can coalesce ranges
839 for (++ind; ind < _rangesCount; ind++) {
840 cur = _ranges.p[ind];
841
842 if (address + length != cur.address)
843 break;
844
845 length += cur.length;
846 }
847
848 // @@@ gvdl: should assert(address);
849 // but can't as NVidia GeForce creates a bogus physical mem
850 {
851 assert(address || /*nvidia*/(!_ranges.p[0].address && 1 == _rangesCount));
852 }
853 assert(length);
854 }
855 else do {
856 // We need wiring & we are wired.
857 assert(_wireCount);
858
859 if (!_wireCount)
860 {
861 panic("IOGMD: not wired for getPhysicalSegment()");
862 continue;
863 }
864
865 assert(_memoryEntries);
866
867 ioGMDData * dataP = getDataP(_memoryEntries);
868 const ioPLBlock *ioplList = getIOPLList(dataP);
869 UInt ind, numIOPLs = getNumIOPL(dataP, _memoryEntries->getLength());
870 upl_page_info_t *pageList = getPageList(dataP);
871
872 assert(numIOPLs > 0);
873
874 // Scan through iopl info blocks looking for block containing offset
875 for (ind = 1; ind < numIOPLs; ind++) {
876 if (offset < ioplList[ind].fIOMDOffset)
877 break;
878 }
879
880 // Go back to actual range as search goes past it
881 ioPLBlock ioplInfo = ioplList[ind - 1];
882
883 if (ind < numIOPLs)
884 length = ioplList[ind].fIOMDOffset;
885 else
886 length = _length;
887 length -= offset; // Remainder within iopl
888
889 // Subtract offset till this iopl in total list
890 offset -= ioplInfo.fIOMDOffset;
891
892 // This is a mapped IOPL so we just need to compute an offset
893 // relative to the mapped base.
894 if (ioplInfo.fMappedBase) {
895 offset += (ioplInfo.fPageOffset & PAGE_MASK);
896 address = ptoa_32(ioplInfo.fMappedBase) + offset;
897 continue;
898 }
899
900 // Currently the offset is rebased into the current iopl.
901 // Now add the iopl 1st page offset.
902 offset += ioplInfo.fPageOffset;
903
904 // For external UPLs the fPageInfo field points directly to
905 // the upl's upl_page_info_t array.
906 if (ioplInfo.fFlags & kIOPLExternUPL)
907 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
908 else
909 pageList = &pageList[ioplInfo.fPageInfo];
910
911 // Check for direct device non-paged memory
912 if ( ioplInfo.fFlags & kIOPLOnDevice ) {
913 address = ptoa_32(pageList->phys_addr) + offset;
914 continue;
915 }
916
917 // Now we need compute the index into the pageList
918 ind = atop_32(offset);
919 offset &= PAGE_MASK;
920
921 IOPhysicalAddress pageAddr = pageList[ind].phys_addr;
922 address = ptoa_32(pageAddr) + offset;
923
924 // Check for the remaining data in this upl being longer than the
925 // remainder on the current page. This should be checked for
926 // contiguous pages
927 if (length > PAGE_SIZE - offset) {
928 // See if the next page is contiguous. Stop looking when we hit
929 // the end of this upl, which is indicated by the
930 // contigLength >= length.
931 IOByteCount contigLength = PAGE_SIZE - offset;
932
933 // Look for contiguous segment
934 while (contigLength < length
935 && ++pageAddr == pageList[++ind].phys_addr) {
936 contigLength += PAGE_SIZE;
937 }
938 if (length > contigLength)
939 length = contigLength;
940 }
941
942 assert(address);
943 assert(length);
944
945 } while (0);
946
947 if (!address)
948 length = 0;
949 }
950
951 if (lengthOfSegment)
952 *lengthOfSegment = length;
953
954 return address;
955 }
956
957 addr64_t IOMemoryDescriptor::getPhysicalSegment64
958 (IOByteCount offset, IOByteCount *lengthOfSegment)
959 {
960 IOPhysicalAddress phys32;
961 IOByteCount length;
962 addr64_t phys64;
963
964 phys32 = getPhysicalSegment(offset, lengthOfSegment);
965 if (!phys32)
966 return 0;
967
968 if (gIOSystemMapper)
969 {
970 IOByteCount origLen;
971
972 phys64 = gIOSystemMapper->mapAddr(phys32);
973 origLen = *lengthOfSegment;
974 length = page_size - (phys64 & (page_size - 1));
975 while ((length < origLen)
976 && ((phys64 + length) == gIOSystemMapper->mapAddr(phys32 + length)))
977 length += page_size;
978 if (length > origLen)
979 length = origLen;
980
981 *lengthOfSegment = length;
982 }
983 else
984 phys64 = (addr64_t) phys32;
985
986 return phys64;
987 }
988
989 IOPhysicalAddress IOGeneralMemoryDescriptor::getSourceSegment
990 (IOByteCount offset, IOByteCount *lengthOfSegment)
991 {
992 IOPhysicalAddress address = 0;
993 IOPhysicalLength length = 0;
994
995 assert(offset <= _length);
996
997 if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypeUPL)
998 return super::getSourceSegment( offset, lengthOfSegment );
999
1000 if ( offset < _length ) // (within bounds?)
1001 {
1002 unsigned rangesIndex = 0;
1003
1004 for ( ; offset >= _ranges.v[rangesIndex].length; rangesIndex++ )
1005 {
1006 offset -= _ranges.v[rangesIndex].length; // (make offset relative)
1007 }
1008
1009 address = _ranges.v[rangesIndex].address + offset;
1010 length = _ranges.v[rangesIndex].length - offset;
1011
1012 for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ )
1013 {
1014 if ( address + length != _ranges.v[rangesIndex].address ) break;
1015
1016 length += _ranges.v[rangesIndex].length; // (coalesce ranges)
1017 }
1018
1019 assert(address);
1020 if ( address == 0 ) length = 0;
1021 }
1022
1023 if ( lengthOfSegment ) *lengthOfSegment = length;
1024
1025 return address;
1026 }
1027
1028 /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
1029 /* DEPRECATED */ void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
1030 /* DEPRECATED */ IOByteCount * lengthOfSegment)
1031 /* DEPRECATED */ {
1032 if (_task == kernel_task)
1033 return (void *) getSourceSegment(offset, lengthOfSegment);
1034 else
1035 panic("IOGMD::getVirtualSegment deprecated");
1036
1037 return 0;
1038 /* DEPRECATED */ }
1039 /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
1040
1041 IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
1042 {
1043 IOReturn error = kIOReturnNoMemory;
1044 ioGMDData *dataP;
1045 ppnum_t mapBase = 0;
1046 IOMapper *mapper;
1047
1048 assert(!_wireCount);
1049
1050 dataP = getDataP(_memoryEntries);
1051 mapper = dataP->fMapper;
1052 if (mapper && _pages)
1053 mapBase = mapper->iovmAlloc(_pages);
1054
1055 // Note that appendBytes(NULL) zeros the data up to the
1056 // desired length.
1057 _memoryEntries->appendBytes(0, dataP->fPageCnt * sizeof(upl_page_info_t));
1058 dataP = 0; // May no longer be valid so lets not get tempted.
1059
1060 if (forDirection == kIODirectionNone)
1061 forDirection = _direction;
1062
1063 int uplFlags; // This Mem Desc's default flags for upl creation
1064 switch (forDirection)
1065 {
1066 case kIODirectionOut:
1067 // Pages do not need to be marked as dirty on commit
1068 uplFlags = UPL_COPYOUT_FROM;
1069 _flags |= kIOMemoryPreparedReadOnly;
1070 break;
1071
1072 case kIODirectionIn:
1073 default:
1074 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
1075 break;
1076 }
1077 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
1078
1079 //
1080 // Check user read/write access to the data buffer.
1081 //
1082 unsigned int pageIndex = 0;
1083 IOByteCount mdOffset = 0;
1084 vm_map_t curMap;
1085 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
1086 curMap = 0;
1087 else
1088 { curMap = get_task_map(_task); }
1089
1090 for (UInt range = 0; range < _rangesCount; range++) {
1091 ioPLBlock iopl;
1092 IOVirtualRange curRange = _ranges.v[range];
1093 vm_address_t startPage;
1094 IOByteCount numBytes;
1095
1096 startPage = trunc_page_32(curRange.address);
1097 iopl.fPageOffset = (short) curRange.address & PAGE_MASK;
1098 if (mapper)
1099 iopl.fMappedBase = mapBase + pageIndex;
1100 else
1101 iopl.fMappedBase = 0;
1102 numBytes = iopl.fPageOffset + curRange.length;
1103
1104 while (numBytes) {
1105 dataP = getDataP(_memoryEntries);
1106 vm_map_t theMap =
1107 (curMap)? curMap
1108 : IOPageableMapForAddress(startPage);
1109 upl_page_info_array_t pageInfo = getPageList(dataP);
1110 int ioplFlags = uplFlags;
1111 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
1112
1113 vm_size_t ioplSize = round_page_32(numBytes);
1114 unsigned int numPageInfo = atop_32(ioplSize);
1115 error = vm_map_get_upl(theMap,
1116 startPage,
1117 &ioplSize,
1118 &iopl.fIOPL,
1119 baseInfo,
1120 &numPageInfo,
1121 &ioplFlags,
1122 false);
1123 assert(ioplSize);
1124 if (error != KERN_SUCCESS)
1125 goto abortExit;
1126
1127 error = kIOReturnNoMemory;
1128
1129 if (baseInfo->device) {
1130 numPageInfo = 1;
1131 iopl.fFlags = kIOPLOnDevice;
1132 // Don't translate device memory at all
1133 if (mapper && mapBase) {
1134 mapper->iovmFree(mapBase, _pages);
1135 mapBase = 0;
1136 iopl.fMappedBase = 0;
1137 }
1138 }
1139 else {
1140 iopl.fFlags = 0;
1141 if (mapper)
1142 mapper->iovmInsert(mapBase, pageIndex,
1143 baseInfo, numPageInfo);
1144 }
1145
1146 iopl.fIOMDOffset = mdOffset;
1147 iopl.fPageInfo = pageIndex;
1148
1149 if (_flags & kIOMemoryAutoPrepare)
1150 {
1151 kernel_upl_commit(iopl.fIOPL, 0, 0);
1152 iopl.fIOPL = 0;
1153 }
1154
1155 if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
1156 // Clean up partial created and unsaved iopl
1157 if (iopl.fIOPL)
1158 kernel_upl_abort(iopl.fIOPL, 0);
1159 goto abortExit;
1160 }
1161
1162 // Check for a multiple iopl's in one virtual range
1163 pageIndex += numPageInfo;
1164 mdOffset -= iopl.fPageOffset;
1165 if (ioplSize < numBytes) {
1166 numBytes -= ioplSize;
1167 startPage += ioplSize;
1168 mdOffset += ioplSize;
1169 iopl.fPageOffset = 0;
1170 if (mapper)
1171 iopl.fMappedBase = mapBase + pageIndex;
1172 }
1173 else {
1174 mdOffset += numBytes;
1175 break;
1176 }
1177 }
1178 }
1179
1180 return kIOReturnSuccess;
1181
1182 abortExit:
1183 {
1184 dataP = getDataP(_memoryEntries);
1185 UInt done = getNumIOPL(dataP, _memoryEntries->getLength());
1186 ioPLBlock *ioplList = getIOPLList(dataP);
1187
1188 for (UInt range = 0; range < done; range++)
1189 {
1190 if (ioplList[range].fIOPL)
1191 kernel_upl_abort(ioplList[range].fIOPL, 0);
1192 }
1193
1194 if (mapper && mapBase)
1195 mapper->iovmFree(mapBase, _pages);
1196 }
1197
1198 return error;
1199 }
1200
1201 /*
1202 * prepare
1203 *
1204 * Prepare the memory for an I/O transfer. This involves paging in
1205 * the memory, if necessary, and wiring it down for the duration of
1206 * the transfer. The complete() method completes the processing of
1207 * the memory after the I/O transfer finishes. This method needn't
1208 * called for non-pageable memory.
1209 */
1210 IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
1211 {
1212 IOReturn error = kIOReturnSuccess;
1213
1214 if (!_wireCount && (_flags & kIOMemoryTypeMask) == kIOMemoryTypeVirtual) {
1215 error = wireVirtual(forDirection);
1216 if (error)
1217 return error;
1218 }
1219
1220 _wireCount++;
1221
1222 return kIOReturnSuccess;
1223 }
1224
1225 /*
1226 * complete
1227 *
1228 * Complete processing of the memory after an I/O transfer finishes.
1229 * This method should not be called unless a prepare was previously
1230 * issued; the prepare() and complete() must occur in pairs, before
1231 * before and after an I/O transfer involving pageable memory.
1232 */
1233
1234 IOReturn IOGeneralMemoryDescriptor::complete(IODirection /* forDirection */)
1235 {
1236 assert(_wireCount);
1237
1238 if (!_wireCount)
1239 return kIOReturnSuccess;
1240
1241 _wireCount--;
1242 if (!_wireCount) {
1243 if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
1244 /* kIOMemoryTypePhysical */
1245 // DO NOTHING
1246 }
1247 else {
1248 ioGMDData * dataP = getDataP(_memoryEntries);
1249 ioPLBlock *ioplList = getIOPLList(dataP);
1250 UInt count = getNumIOPL(dataP, _memoryEntries->getLength());
1251
1252 if (dataP->fMapper && _pages && ioplList[0].fMappedBase)
1253 dataP->fMapper->iovmFree(ioplList[0].fMappedBase, _pages);
1254
1255 // Only complete iopls that we created which are for TypeVirtual
1256 if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypeVirtual) {
1257 for (UInt ind = 0; ind < count; ind++)
1258 if (ioplList[ind].fIOPL)
1259 kernel_upl_commit(ioplList[ind].fIOPL, 0, 0);
1260 }
1261
1262 (void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); // == setLength()
1263 }
1264 }
1265 return kIOReturnSuccess;
1266 }
1267
1268 IOReturn IOGeneralMemoryDescriptor::doMap(
1269 vm_map_t addressMap,
1270 IOVirtualAddress * atAddress,
1271 IOOptionBits options,
1272 IOByteCount sourceOffset,
1273 IOByteCount length )
1274 {
1275 kern_return_t kr;
1276 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
1277
1278 // mapping source == dest? (could be much better)
1279 if( _task && (addressMap == get_task_map(_task)) && (options & kIOMapAnywhere)
1280 && (1 == _rangesCount) && (0 == sourceOffset)
1281 && (length <= _ranges.v[0].length) ) {
1282 *atAddress = _ranges.v[0].address;
1283 return( kIOReturnSuccess );
1284 }
1285
1286 if( 0 == sharedMem) {
1287
1288 vm_size_t size = _pages << PAGE_SHIFT;
1289
1290 if( _task) {
1291 #ifndef i386
1292 vm_size_t actualSize = size;
1293 kr = mach_make_memory_entry( get_task_map(_task),
1294 &actualSize, _ranges.v[0].address,
1295 VM_PROT_READ | VM_PROT_WRITE, &sharedMem,
1296 NULL );
1297
1298 if( (KERN_SUCCESS == kr) && (actualSize != round_page_32(size))) {
1299 #if IOASSERT
1300 IOLog("mach_make_memory_entry_64 (%08x) size (%08lx:%08x)\n",
1301 _ranges.v[0].address, (UInt32)actualSize, size);
1302 #endif
1303 kr = kIOReturnVMError;
1304 ipc_port_release_send( sharedMem );
1305 }
1306
1307 if( KERN_SUCCESS != kr)
1308 #endif /* i386 */
1309 sharedMem = MACH_PORT_NULL;
1310
1311 } else do {
1312
1313 memory_object_t pager;
1314 unsigned int flags = 0;
1315 addr64_t pa;
1316 IOPhysicalLength segLen;
1317
1318 pa = getPhysicalSegment64( sourceOffset, &segLen );
1319
1320 if( !reserved) {
1321 reserved = IONew( ExpansionData, 1 );
1322 if( !reserved)
1323 continue;
1324 }
1325 reserved->pagerContig = (1 == _rangesCount);
1326 reserved->memory = this;
1327
1328 /*What cache mode do we need*/
1329 switch(options & kIOMapCacheMask ) {
1330
1331 case kIOMapDefaultCache:
1332 default:
1333 flags = IODefaultCacheBits(pa);
1334 break;
1335
1336 case kIOMapInhibitCache:
1337 flags = DEVICE_PAGER_CACHE_INHIB |
1338 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
1339 break;
1340
1341 case kIOMapWriteThruCache:
1342 flags = DEVICE_PAGER_WRITE_THROUGH |
1343 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
1344 break;
1345
1346 case kIOMapCopybackCache:
1347 flags = DEVICE_PAGER_COHERENT;
1348 break;
1349
1350 case kIOMapWriteCombineCache:
1351 flags = DEVICE_PAGER_CACHE_INHIB |
1352 DEVICE_PAGER_COHERENT;
1353 break;
1354 }
1355
1356 flags |= reserved->pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0;
1357
1358 pager = device_pager_setup( (memory_object_t) 0, (int) reserved,
1359 size, flags);
1360 assert( pager );
1361
1362 if( pager) {
1363 kr = mach_memory_object_memory_entry_64( (host_t) 1, false /*internal*/,
1364 size, VM_PROT_READ | VM_PROT_WRITE, pager, &sharedMem );
1365
1366 assert( KERN_SUCCESS == kr );
1367 if( KERN_SUCCESS != kr) {
1368 device_pager_deallocate( pager );
1369 pager = MACH_PORT_NULL;
1370 sharedMem = MACH_PORT_NULL;
1371 }
1372 }
1373 if( pager && sharedMem)
1374 reserved->devicePager = pager;
1375 else {
1376 IODelete( reserved, ExpansionData, 1 );
1377 reserved = 0;
1378 }
1379
1380 } while( false );
1381
1382 _memEntry = (void *) sharedMem;
1383 }
1384
1385 #ifndef i386
1386 if( 0 == sharedMem)
1387 kr = kIOReturnVMError;
1388 else
1389 #endif
1390 kr = super::doMap( addressMap, atAddress,
1391 options, sourceOffset, length );
1392
1393 return( kr );
1394 }
1395
1396 IOReturn IOGeneralMemoryDescriptor::doUnmap(
1397 vm_map_t addressMap,
1398 IOVirtualAddress logical,
1399 IOByteCount length )
1400 {
1401 // could be much better
1402 if( _task && (addressMap == get_task_map(_task)) && (1 == _rangesCount)
1403 && (logical == _ranges.v[0].address)
1404 && (length <= _ranges.v[0].length) )
1405 return( kIOReturnSuccess );
1406
1407 return( super::doUnmap( addressMap, logical, length ));
1408 }
1409
1410 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1411
1412 extern "C" {
1413 // osfmk/device/iokit_rpc.c
1414 extern kern_return_t IOMapPages( vm_map_t map, vm_offset_t va, vm_offset_t pa,
1415 vm_size_t length, unsigned int mapFlags);
1416 extern kern_return_t IOUnmapPages(vm_map_t map, vm_offset_t va, vm_size_t length);
1417 };
1418
1419 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1420
1421 OSDefineMetaClassAndAbstractStructors( IOMemoryMap, OSObject )
1422
1423 /* inline function implementation */
1424 IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
1425 { return( getPhysicalSegment( 0, 0 )); }
1426
1427 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1428
1429 class _IOMemoryMap : public IOMemoryMap
1430 {
1431 OSDeclareDefaultStructors(_IOMemoryMap)
1432
1433 IOMemoryDescriptor * memory;
1434 IOMemoryMap * superMap;
1435 IOByteCount offset;
1436 IOByteCount length;
1437 IOVirtualAddress logical;
1438 task_t addressTask;
1439 vm_map_t addressMap;
1440 IOOptionBits options;
1441
1442 protected:
1443 virtual void taggedRelease(const void *tag = 0) const;
1444 virtual void free();
1445
1446 public:
1447
1448 // IOMemoryMap methods
1449 virtual IOVirtualAddress getVirtualAddress();
1450 virtual IOByteCount getLength();
1451 virtual task_t getAddressTask();
1452 virtual IOMemoryDescriptor * getMemoryDescriptor();
1453 virtual IOOptionBits getMapOptions();
1454
1455 virtual IOReturn unmap();
1456 virtual void taskDied();
1457
1458 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
1459 IOByteCount * length);
1460
1461 // for IOMemoryDescriptor use
1462 _IOMemoryMap * copyCompatible(
1463 IOMemoryDescriptor * owner,
1464 task_t intoTask,
1465 IOVirtualAddress toAddress,
1466 IOOptionBits options,
1467 IOByteCount offset,
1468 IOByteCount length );
1469
1470 bool initCompatible(
1471 IOMemoryDescriptor * memory,
1472 IOMemoryMap * superMap,
1473 IOByteCount offset,
1474 IOByteCount length );
1475
1476 bool initWithDescriptor(
1477 IOMemoryDescriptor * memory,
1478 task_t intoTask,
1479 IOVirtualAddress toAddress,
1480 IOOptionBits options,
1481 IOByteCount offset,
1482 IOByteCount length );
1483
1484 IOReturn redirect(
1485 task_t intoTask, bool redirect );
1486 };
1487
1488 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1489
1490 #undef super
1491 #define super IOMemoryMap
1492
1493 OSDefineMetaClassAndStructors(_IOMemoryMap, IOMemoryMap)
1494
1495 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1496
1497 bool _IOMemoryMap::initCompatible(
1498 IOMemoryDescriptor * _memory,
1499 IOMemoryMap * _superMap,
1500 IOByteCount _offset,
1501 IOByteCount _length )
1502 {
1503
1504 if( !super::init())
1505 return( false);
1506
1507 if( (_offset + _length) > _superMap->getLength())
1508 return( false);
1509
1510 _memory->retain();
1511 memory = _memory;
1512 _superMap->retain();
1513 superMap = _superMap;
1514
1515 offset = _offset;
1516 if( _length)
1517 length = _length;
1518 else
1519 length = _memory->getLength();
1520
1521 options = superMap->getMapOptions();
1522 logical = superMap->getVirtualAddress() + offset;
1523
1524 return( true );
1525 }
1526
1527 bool _IOMemoryMap::initWithDescriptor(
1528 IOMemoryDescriptor * _memory,
1529 task_t intoTask,
1530 IOVirtualAddress toAddress,
1531 IOOptionBits _options,
1532 IOByteCount _offset,
1533 IOByteCount _length )
1534 {
1535 bool ok;
1536
1537 if( (!_memory) || (!intoTask) || !super::init())
1538 return( false);
1539
1540 if( (_offset + _length) > _memory->getLength())
1541 return( false);
1542
1543 addressMap = get_task_map(intoTask);
1544 if( !addressMap)
1545 return( false);
1546 vm_map_reference(addressMap);
1547
1548 _memory->retain();
1549 memory = _memory;
1550
1551 offset = _offset;
1552 if( _length)
1553 length = _length;
1554 else
1555 length = _memory->getLength();
1556
1557 addressTask = intoTask;
1558 logical = toAddress;
1559 options = _options;
1560
1561 if( options & kIOMapStatic)
1562 ok = true;
1563 else
1564 ok = (kIOReturnSuccess == memory->doMap( addressMap, &logical,
1565 options, offset, length ));
1566 if( !ok) {
1567 logical = 0;
1568 memory->release();
1569 memory = 0;
1570 vm_map_deallocate(addressMap);
1571 addressMap = 0;
1572 }
1573 return( ok );
1574 }
1575
1576 struct IOMemoryDescriptorMapAllocRef
1577 {
1578 ipc_port_t sharedMem;
1579 vm_size_t size;
1580 vm_offset_t mapped;
1581 IOByteCount sourceOffset;
1582 IOOptionBits options;
1583 };
1584
1585 static kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
1586 {
1587 IOMemoryDescriptorMapAllocRef * ref = (IOMemoryDescriptorMapAllocRef *)_ref;
1588 IOReturn err;
1589
1590 do {
1591 if( ref->sharedMem) {
1592 vm_prot_t prot = VM_PROT_READ
1593 | ((ref->options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE);
1594
1595 // set memory entry cache
1596 vm_prot_t memEntryCacheMode = prot | MAP_MEM_ONLY;
1597 switch (ref->options & kIOMapCacheMask)
1598 {
1599 case kIOMapInhibitCache:
1600 SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
1601 break;
1602
1603 case kIOMapWriteThruCache:
1604 SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
1605 break;
1606
1607 case kIOMapWriteCombineCache:
1608 SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
1609 break;
1610
1611 case kIOMapCopybackCache:
1612 SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
1613 break;
1614
1615 case kIOMapDefaultCache:
1616 default:
1617 SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
1618 break;
1619 }
1620
1621 vm_size_t unused = 0;
1622
1623 err = mach_make_memory_entry( NULL /*unused*/, &unused, 0 /*unused*/,
1624 memEntryCacheMode, NULL, ref->sharedMem );
1625 if (KERN_SUCCESS != err)
1626 IOLog("MAP_MEM_ONLY failed %d\n", err);
1627
1628 err = vm_map( map,
1629 &ref->mapped,
1630 ref->size, 0 /* mask */,
1631 (( ref->options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
1632 | VM_MAKE_TAG(VM_MEMORY_IOKIT),
1633 ref->sharedMem, ref->sourceOffset,
1634 false, // copy
1635 prot, // cur
1636 prot, // max
1637 VM_INHERIT_NONE);
1638
1639 if( KERN_SUCCESS != err) {
1640 ref->mapped = 0;
1641 continue;
1642 }
1643
1644 } else {
1645
1646 err = vm_allocate( map, &ref->mapped, ref->size,
1647 ((ref->options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
1648 | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
1649
1650 if( KERN_SUCCESS != err) {
1651 ref->mapped = 0;
1652 continue;
1653 }
1654
1655 // we have to make sure that these guys don't get copied if we fork.
1656 err = vm_inherit( map, ref->mapped, ref->size, VM_INHERIT_NONE);
1657 assert( KERN_SUCCESS == err );
1658 }
1659
1660 } while( false );
1661
1662 return( err );
1663 }
1664
1665
1666 IOReturn IOMemoryDescriptor::doMap(
1667 vm_map_t addressMap,
1668 IOVirtualAddress * atAddress,
1669 IOOptionBits options,
1670 IOByteCount sourceOffset,
1671 IOByteCount length )
1672 {
1673 IOReturn err = kIOReturnSuccess;
1674 memory_object_t pager;
1675 vm_address_t logical;
1676 IOByteCount pageOffset;
1677 IOPhysicalAddress sourceAddr;
1678 IOMemoryDescriptorMapAllocRef ref;
1679
1680 ref.sharedMem = (ipc_port_t) _memEntry;
1681 ref.sourceOffset = sourceOffset;
1682 ref.options = options;
1683
1684 do {
1685
1686 if( 0 == length)
1687 length = getLength();
1688
1689 sourceAddr = getSourceSegment( sourceOffset, NULL );
1690 assert( sourceAddr );
1691 pageOffset = sourceAddr - trunc_page_32( sourceAddr );
1692
1693 ref.size = round_page_32( length + pageOffset );
1694
1695 logical = *atAddress;
1696 if( options & kIOMapAnywhere)
1697 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1698 ref.mapped = 0;
1699 else {
1700 ref.mapped = trunc_page_32( logical );
1701 if( (logical - ref.mapped) != pageOffset) {
1702 err = kIOReturnVMError;
1703 continue;
1704 }
1705 }
1706
1707 if( ref.sharedMem && (addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags))
1708 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
1709 else
1710 err = IOMemoryDescriptorMapAlloc( addressMap, &ref );
1711
1712 if( err != KERN_SUCCESS)
1713 continue;
1714
1715 if( reserved)
1716 pager = (memory_object_t) reserved->devicePager;
1717 else
1718 pager = MACH_PORT_NULL;
1719
1720 if( !ref.sharedMem || pager )
1721 err = handleFault( pager, addressMap, ref.mapped, sourceOffset, length, options );
1722
1723 } while( false );
1724
1725 if( err != KERN_SUCCESS) {
1726 if( ref.mapped)
1727 doUnmap( addressMap, ref.mapped, ref.size );
1728 *atAddress = NULL;
1729 } else
1730 *atAddress = ref.mapped + pageOffset;
1731
1732 return( err );
1733 }
1734
1735 enum {
1736 kIOMemoryRedirected = 0x00010000
1737 };
1738
1739 IOReturn IOMemoryDescriptor::handleFault(
1740 void * _pager,
1741 vm_map_t addressMap,
1742 IOVirtualAddress address,
1743 IOByteCount sourceOffset,
1744 IOByteCount length,
1745 IOOptionBits options )
1746 {
1747 IOReturn err = kIOReturnSuccess;
1748 memory_object_t pager = (memory_object_t) _pager;
1749 vm_size_t size;
1750 vm_size_t bytes;
1751 vm_size_t page;
1752 IOByteCount pageOffset;
1753 IOPhysicalLength segLen;
1754 addr64_t physAddr;
1755
1756 if( !addressMap) {
1757
1758 if( kIOMemoryRedirected & _flags) {
1759 #ifdef DEBUG
1760 IOLog("sleep mem redirect %p, %lx\n", this, sourceOffset);
1761 #endif
1762 do {
1763 SLEEP;
1764 } while( kIOMemoryRedirected & _flags );
1765 }
1766
1767 return( kIOReturnSuccess );
1768 }
1769
1770 physAddr = getPhysicalSegment64( sourceOffset, &segLen );
1771 assert( physAddr );
1772 pageOffset = physAddr - trunc_page_64( physAddr );
1773
1774 size = length + pageOffset;
1775 physAddr -= pageOffset;
1776
1777 segLen += pageOffset;
1778 bytes = size;
1779 do {
1780 // in the middle of the loop only map whole pages
1781 if( segLen >= bytes)
1782 segLen = bytes;
1783 else if( segLen != trunc_page_32( segLen))
1784 err = kIOReturnVMError;
1785 if( physAddr != trunc_page_64( physAddr))
1786 err = kIOReturnBadArgument;
1787
1788 #ifdef DEBUG
1789 if( kIOLogMapping & gIOKitDebug)
1790 IOLog("_IOMemoryMap::map(%p) %08lx->%08qx:%08lx\n",
1791 addressMap, address + pageOffset, physAddr + pageOffset,
1792 segLen - pageOffset);
1793 #endif
1794
1795
1796
1797
1798
1799 #ifdef i386
1800 /* i386 doesn't support faulting on device memory yet */
1801 if( addressMap && (kIOReturnSuccess == err))
1802 err = IOMapPages( addressMap, address, (IOPhysicalAddress) physAddr, segLen, options );
1803 assert( KERN_SUCCESS == err );
1804 if( err)
1805 break;
1806 #endif
1807
1808 if( pager) {
1809 if( reserved && reserved->pagerContig) {
1810 IOPhysicalLength allLen;
1811 addr64_t allPhys;
1812
1813 allPhys = getPhysicalSegment64( 0, &allLen );
1814 assert( allPhys );
1815 err = device_pager_populate_object( pager, 0, allPhys >> PAGE_SHIFT, round_page_32(allLen) );
1816
1817 } else {
1818
1819 for( page = 0;
1820 (page < segLen) && (KERN_SUCCESS == err);
1821 page += page_size) {
1822 err = device_pager_populate_object(pager, sourceOffset + page,
1823 (ppnum_t)((physAddr + page) >> PAGE_SHIFT), page_size);
1824 }
1825 }
1826 assert( KERN_SUCCESS == err );
1827 if( err)
1828 break;
1829 }
1830 #ifndef i386
1831 /* *** ALERT *** */
1832 /* *** Temporary Workaround *** */
1833
1834 /* This call to vm_fault causes an early pmap level resolution */
1835 /* of the mappings created above. Need for this is in absolute */
1836 /* violation of the basic tenet that the pmap layer is a cache. */
1837 /* Further, it implies a serious I/O architectural violation on */
1838 /* the part of some user of the mapping. As of this writing, */
1839 /* the call to vm_fault is needed because the NVIDIA driver */
1840 /* makes a call to pmap_extract. The NVIDIA driver needs to be */
1841 /* fixed as soon as possible. The NVIDIA driver should not */
1842 /* need to query for this info as it should know from the doMap */
1843 /* call where the physical memory is mapped. When a query is */
1844 /* necessary to find a physical mapping, it should be done */
1845 /* through an iokit call which includes the mapped memory */
1846 /* handle. This is required for machine architecture independence.*/
1847
1848 if(!(kIOMemoryRedirected & _flags)) {
1849 vm_fault(addressMap, address, 3, FALSE, FALSE, NULL, 0);
1850 }
1851
1852 /* *** Temporary Workaround *** */
1853 /* *** ALERT *** */
1854 #endif
1855 sourceOffset += segLen - pageOffset;
1856 address += segLen;
1857 bytes -= segLen;
1858 pageOffset = 0;
1859
1860 } while( bytes
1861 && (physAddr = getPhysicalSegment64( sourceOffset, &segLen )));
1862
1863 if( bytes)
1864 err = kIOReturnBadArgument;
1865
1866 return( err );
1867 }
1868
1869 IOReturn IOMemoryDescriptor::doUnmap(
1870 vm_map_t addressMap,
1871 IOVirtualAddress logical,
1872 IOByteCount length )
1873 {
1874 IOReturn err;
1875
1876 #ifdef DEBUG
1877 if( kIOLogMapping & gIOKitDebug)
1878 kprintf("IOMemoryDescriptor::doUnmap(%x) %08x:%08x\n",
1879 addressMap, logical, length );
1880 #endif
1881
1882 if( true /* && (addressMap == kernel_map) || (addressMap == get_task_map(current_task()))*/) {
1883
1884 if( _memEntry && (addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags))
1885 addressMap = IOPageableMapForAddress( logical );
1886
1887 err = vm_deallocate( addressMap, logical, length );
1888
1889 } else
1890 err = kIOReturnSuccess;
1891
1892 return( err );
1893 }
1894
1895 IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool redirect )
1896 {
1897 IOReturn err;
1898 _IOMemoryMap * mapping = 0;
1899 OSIterator * iter;
1900
1901 LOCK;
1902
1903 do {
1904 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
1905 while( (mapping = (_IOMemoryMap *) iter->getNextObject()))
1906 mapping->redirect( safeTask, redirect );
1907
1908 iter->release();
1909 }
1910 } while( false );
1911
1912 if( redirect)
1913 _flags |= kIOMemoryRedirected;
1914 else {
1915 _flags &= ~kIOMemoryRedirected;
1916 WAKEUP;
1917 }
1918
1919 UNLOCK;
1920
1921 // temporary binary compatibility
1922 IOSubMemoryDescriptor * subMem;
1923 if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
1924 err = subMem->redirect( safeTask, redirect );
1925 else
1926 err = kIOReturnSuccess;
1927
1928 return( err );
1929 }
1930
1931 IOReturn IOSubMemoryDescriptor::redirect( task_t safeTask, bool redirect )
1932 {
1933 return( _parent->redirect( safeTask, redirect ));
1934 }
1935
1936 IOReturn _IOMemoryMap::redirect( task_t safeTask, bool redirect )
1937 {
1938 IOReturn err = kIOReturnSuccess;
1939
1940 if( superMap) {
1941 // err = ((_IOMemoryMap *)superMap)->redirect( safeTask, redirect );
1942 } else {
1943
1944 LOCK;
1945 if( logical && addressMap
1946 && (get_task_map( safeTask) != addressMap)
1947 && (0 == (options & kIOMapStatic))) {
1948
1949 IOUnmapPages( addressMap, logical, length );
1950 if( !redirect) {
1951 err = vm_deallocate( addressMap, logical, length );
1952 err = memory->doMap( addressMap, &logical,
1953 (options & ~kIOMapAnywhere) /*| kIOMapReserve*/,
1954 offset, length );
1955 } else
1956 err = kIOReturnSuccess;
1957 #ifdef DEBUG
1958 IOLog("IOMemoryMap::redirect(%d, %p) %x:%lx from %p\n", redirect, this, logical, length, addressMap);
1959 #endif
1960 }
1961 UNLOCK;
1962 }
1963
1964 return( err );
1965 }
1966
1967 IOReturn _IOMemoryMap::unmap( void )
1968 {
1969 IOReturn err;
1970
1971 LOCK;
1972
1973 if( logical && addressMap && (0 == superMap)
1974 && (0 == (options & kIOMapStatic))) {
1975
1976 err = memory->doUnmap( addressMap, logical, length );
1977 vm_map_deallocate(addressMap);
1978 addressMap = 0;
1979
1980 } else
1981 err = kIOReturnSuccess;
1982
1983 logical = 0;
1984
1985 UNLOCK;
1986
1987 return( err );
1988 }
1989
1990 void _IOMemoryMap::taskDied( void )
1991 {
1992 LOCK;
1993 if( addressMap) {
1994 vm_map_deallocate(addressMap);
1995 addressMap = 0;
1996 }
1997 addressTask = 0;
1998 logical = 0;
1999 UNLOCK;
2000 }
2001
2002 // Overload the release mechanism. All mappings must be a member
2003 // of a memory descriptors _mappings set. This means that we
2004 // always have 2 references on a mapping. When either of these mappings
2005 // are released we need to free ourselves.
2006 void _IOMemoryMap::taggedRelease(const void *tag) const
2007 {
2008 LOCK;
2009 super::taggedRelease(tag, 2);
2010 UNLOCK;
2011 }
2012
2013 void _IOMemoryMap::free()
2014 {
2015 unmap();
2016
2017 if( memory) {
2018 LOCK;
2019 memory->removeMapping( this);
2020 UNLOCK;
2021 memory->release();
2022 }
2023
2024 if( superMap)
2025 superMap->release();
2026
2027 super::free();
2028 }
2029
2030 IOByteCount _IOMemoryMap::getLength()
2031 {
2032 return( length );
2033 }
2034
2035 IOVirtualAddress _IOMemoryMap::getVirtualAddress()
2036 {
2037 return( logical);
2038 }
2039
2040 task_t _IOMemoryMap::getAddressTask()
2041 {
2042 if( superMap)
2043 return( superMap->getAddressTask());
2044 else
2045 return( addressTask);
2046 }
2047
2048 IOOptionBits _IOMemoryMap::getMapOptions()
2049 {
2050 return( options);
2051 }
2052
2053 IOMemoryDescriptor * _IOMemoryMap::getMemoryDescriptor()
2054 {
2055 return( memory );
2056 }
2057
2058 _IOMemoryMap * _IOMemoryMap::copyCompatible(
2059 IOMemoryDescriptor * owner,
2060 task_t task,
2061 IOVirtualAddress toAddress,
2062 IOOptionBits _options,
2063 IOByteCount _offset,
2064 IOByteCount _length )
2065 {
2066 _IOMemoryMap * mapping;
2067
2068 if( (!task) || (task != getAddressTask()))
2069 return( 0 );
2070 if( (options ^ _options) & kIOMapReadOnly)
2071 return( 0 );
2072 if( (kIOMapDefaultCache != (_options & kIOMapCacheMask))
2073 && ((options ^ _options) & kIOMapCacheMask))
2074 return( 0 );
2075
2076 if( (0 == (_options & kIOMapAnywhere)) && (logical != toAddress))
2077 return( 0 );
2078
2079 if( _offset < offset)
2080 return( 0 );
2081
2082 _offset -= offset;
2083
2084 if( (_offset + _length) > length)
2085 return( 0 );
2086
2087 if( (length == _length) && (!_offset)) {
2088 retain();
2089 mapping = this;
2090
2091 } else {
2092 mapping = new _IOMemoryMap;
2093 if( mapping
2094 && !mapping->initCompatible( owner, this, _offset, _length )) {
2095 mapping->release();
2096 mapping = 0;
2097 }
2098 }
2099
2100 return( mapping );
2101 }
2102
2103 IOPhysicalAddress _IOMemoryMap::getPhysicalSegment( IOByteCount _offset,
2104 IOPhysicalLength * length)
2105 {
2106 IOPhysicalAddress address;
2107
2108 LOCK;
2109 address = memory->getPhysicalSegment( offset + _offset, length );
2110 UNLOCK;
2111
2112 return( address );
2113 }
2114
2115 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2116
2117 #undef super
2118 #define super OSObject
2119
2120 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2121
2122 void IOMemoryDescriptor::initialize( void )
2123 {
2124 if( 0 == gIOMemoryLock)
2125 gIOMemoryLock = IORecursiveLockAlloc();
2126 }
2127
2128 void IOMemoryDescriptor::free( void )
2129 {
2130 if( _mappings)
2131 _mappings->release();
2132
2133 super::free();
2134 }
2135
2136 IOMemoryMap * IOMemoryDescriptor::setMapping(
2137 task_t intoTask,
2138 IOVirtualAddress mapAddress,
2139 IOOptionBits options )
2140 {
2141 _IOMemoryMap * map;
2142
2143 map = new _IOMemoryMap;
2144
2145 LOCK;
2146
2147 if( map
2148 && !map->initWithDescriptor( this, intoTask, mapAddress,
2149 options | kIOMapStatic, 0, getLength() )) {
2150 map->release();
2151 map = 0;
2152 }
2153
2154 addMapping( map);
2155
2156 UNLOCK;
2157
2158 return( map);
2159 }
2160
2161 IOMemoryMap * IOMemoryDescriptor::map(
2162 IOOptionBits options )
2163 {
2164
2165 return( makeMapping( this, kernel_task, 0,
2166 options | kIOMapAnywhere,
2167 0, getLength() ));
2168 }
2169
2170 IOMemoryMap * IOMemoryDescriptor::map(
2171 task_t intoTask,
2172 IOVirtualAddress toAddress,
2173 IOOptionBits options,
2174 IOByteCount offset,
2175 IOByteCount length )
2176 {
2177 if( 0 == length)
2178 length = getLength();
2179
2180 return( makeMapping( this, intoTask, toAddress, options, offset, length ));
2181 }
2182
2183 IOMemoryMap * IOMemoryDescriptor::makeMapping(
2184 IOMemoryDescriptor * owner,
2185 task_t intoTask,
2186 IOVirtualAddress toAddress,
2187 IOOptionBits options,
2188 IOByteCount offset,
2189 IOByteCount length )
2190 {
2191 _IOMemoryMap * mapping = 0;
2192 OSIterator * iter;
2193
2194 LOCK;
2195
2196 do {
2197 // look for an existing mapping
2198 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
2199
2200 while( (mapping = (_IOMemoryMap *) iter->getNextObject())) {
2201
2202 if( (mapping = mapping->copyCompatible(
2203 owner, intoTask, toAddress,
2204 options | kIOMapReference,
2205 offset, length )))
2206 break;
2207 }
2208 iter->release();
2209 if( mapping)
2210 continue;
2211 }
2212
2213
2214 if( mapping || (options & kIOMapReference))
2215 continue;
2216
2217 owner = this;
2218
2219 mapping = new _IOMemoryMap;
2220 if( mapping
2221 && !mapping->initWithDescriptor( owner, intoTask, toAddress, options,
2222 offset, length )) {
2223 #ifdef DEBUG
2224 IOLog("Didn't make map %08lx : %08lx\n", offset, length );
2225 #endif
2226 mapping->release();
2227 mapping = 0;
2228 }
2229
2230 } while( false );
2231
2232 owner->addMapping( mapping);
2233
2234 UNLOCK;
2235
2236 return( mapping);
2237 }
2238
2239 void IOMemoryDescriptor::addMapping(
2240 IOMemoryMap * mapping )
2241 {
2242 if( mapping) {
2243 if( 0 == _mappings)
2244 _mappings = OSSet::withCapacity(1);
2245 if( _mappings )
2246 _mappings->setObject( mapping );
2247 }
2248 }
2249
2250 void IOMemoryDescriptor::removeMapping(
2251 IOMemoryMap * mapping )
2252 {
2253 if( _mappings)
2254 _mappings->removeObject( mapping);
2255 }
2256
2257 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2258
2259 #undef super
2260 #define super IOMemoryDescriptor
2261
2262 OSDefineMetaClassAndStructors(IOSubMemoryDescriptor, IOMemoryDescriptor)
2263
2264 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2265
2266 bool IOSubMemoryDescriptor::initSubRange( IOMemoryDescriptor * parent,
2267 IOByteCount offset, IOByteCount length,
2268 IODirection direction )
2269 {
2270 if( !parent)
2271 return( false);
2272
2273 if( (offset + length) > parent->getLength())
2274 return( false);
2275
2276 /*
2277 * We can check the _parent instance variable before having ever set it
2278 * to an initial value because I/O Kit guarantees that all our instance
2279 * variables are zeroed on an object's allocation.
2280 */
2281
2282 if( !_parent) {
2283 if( !super::init())
2284 return( false );
2285 } else {
2286 /*
2287 * An existing memory descriptor is being retargeted to
2288 * point to somewhere else. Clean up our present state.
2289 */
2290
2291 _parent->release();
2292 _parent = 0;
2293 }
2294
2295 parent->retain();
2296 _parent = parent;
2297 _start = offset;
2298 _length = length;
2299 _direction = direction;
2300 _tag = parent->getTag();
2301
2302 return( true );
2303 }
2304
2305 void IOSubMemoryDescriptor::free( void )
2306 {
2307 if( _parent)
2308 _parent->release();
2309
2310 super::free();
2311 }
2312
2313
2314 IOPhysicalAddress IOSubMemoryDescriptor::getPhysicalSegment( IOByteCount offset,
2315 IOByteCount * length )
2316 {
2317 IOPhysicalAddress address;
2318 IOByteCount actualLength;
2319
2320 assert(offset <= _length);
2321
2322 if( length)
2323 *length = 0;
2324
2325 if( offset >= _length)
2326 return( 0 );
2327
2328 address = _parent->getPhysicalSegment( offset + _start, &actualLength );
2329
2330 if( address && length)
2331 *length = min( _length - offset, actualLength );
2332
2333 return( address );
2334 }
2335
2336 IOPhysicalAddress IOSubMemoryDescriptor::getSourceSegment( IOByteCount offset,
2337 IOByteCount * length )
2338 {
2339 IOPhysicalAddress address;
2340 IOByteCount actualLength;
2341
2342 assert(offset <= _length);
2343
2344 if( length)
2345 *length = 0;
2346
2347 if( offset >= _length)
2348 return( 0 );
2349
2350 address = _parent->getSourceSegment( offset + _start, &actualLength );
2351
2352 if( address && length)
2353 *length = min( _length - offset, actualLength );
2354
2355 return( address );
2356 }
2357
2358 void * IOSubMemoryDescriptor::getVirtualSegment(IOByteCount offset,
2359 IOByteCount * lengthOfSegment)
2360 {
2361 return( 0 );
2362 }
2363
2364 IOByteCount IOSubMemoryDescriptor::readBytes(IOByteCount offset,
2365 void * bytes, IOByteCount length)
2366 {
2367 IOByteCount byteCount;
2368
2369 assert(offset <= _length);
2370
2371 if( offset >= _length)
2372 return( 0 );
2373
2374 LOCK;
2375 byteCount = _parent->readBytes( _start + offset, bytes,
2376 min(length, _length - offset) );
2377 UNLOCK;
2378
2379 return( byteCount );
2380 }
2381
2382 IOByteCount IOSubMemoryDescriptor::writeBytes(IOByteCount offset,
2383 const void* bytes, IOByteCount length)
2384 {
2385 IOByteCount byteCount;
2386
2387 assert(offset <= _length);
2388
2389 if( offset >= _length)
2390 return( 0 );
2391
2392 LOCK;
2393 byteCount = _parent->writeBytes( _start + offset, bytes,
2394 min(length, _length - offset) );
2395 UNLOCK;
2396
2397 return( byteCount );
2398 }
2399
2400 IOReturn IOSubMemoryDescriptor::prepare(
2401 IODirection forDirection)
2402 {
2403 IOReturn err;
2404
2405 LOCK;
2406 err = _parent->prepare( forDirection);
2407 UNLOCK;
2408
2409 return( err );
2410 }
2411
2412 IOReturn IOSubMemoryDescriptor::complete(
2413 IODirection forDirection)
2414 {
2415 IOReturn err;
2416
2417 LOCK;
2418 err = _parent->complete( forDirection);
2419 UNLOCK;
2420
2421 return( err );
2422 }
2423
2424 IOMemoryMap * IOSubMemoryDescriptor::makeMapping(
2425 IOMemoryDescriptor * owner,
2426 task_t intoTask,
2427 IOVirtualAddress toAddress,
2428 IOOptionBits options,
2429 IOByteCount offset,
2430 IOByteCount length )
2431 {
2432 IOMemoryMap * mapping;
2433
2434 mapping = (IOMemoryMap *) _parent->makeMapping(
2435 _parent, intoTask,
2436 toAddress - (_start + offset),
2437 options | kIOMapReference,
2438 _start + offset, length );
2439
2440 if( !mapping)
2441 mapping = (IOMemoryMap *) _parent->makeMapping(
2442 _parent, intoTask,
2443 toAddress,
2444 options, _start + offset, length );
2445
2446 if( !mapping)
2447 mapping = super::makeMapping( owner, intoTask, toAddress, options,
2448 offset, length );
2449
2450 return( mapping );
2451 }
2452
2453 /* ick */
2454
2455 bool
2456 IOSubMemoryDescriptor::initWithAddress(void * address,
2457 IOByteCount length,
2458 IODirection direction)
2459 {
2460 return( false );
2461 }
2462
2463 bool
2464 IOSubMemoryDescriptor::initWithAddress(vm_address_t address,
2465 IOByteCount length,
2466 IODirection direction,
2467 task_t task)
2468 {
2469 return( false );
2470 }
2471
2472 bool
2473 IOSubMemoryDescriptor::initWithPhysicalAddress(
2474 IOPhysicalAddress address,
2475 IOByteCount length,
2476 IODirection direction )
2477 {
2478 return( false );
2479 }
2480
2481 bool
2482 IOSubMemoryDescriptor::initWithRanges(
2483 IOVirtualRange * ranges,
2484 UInt32 withCount,
2485 IODirection direction,
2486 task_t task,
2487 bool asReference)
2488 {
2489 return( false );
2490 }
2491
2492 bool
2493 IOSubMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
2494 UInt32 withCount,
2495 IODirection direction,
2496 bool asReference)
2497 {
2498 return( false );
2499 }
2500
2501 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2502
2503 bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
2504 {
2505 OSSymbol const *keys[2];
2506 OSObject *values[2];
2507 IOVirtualRange *vcopy;
2508 unsigned int index, nRanges;
2509 bool result;
2510
2511 if (s == NULL) return false;
2512 if (s->previouslySerialized(this)) return true;
2513
2514 // Pretend we are an array.
2515 if (!s->addXMLStartTag(this, "array")) return false;
2516
2517 nRanges = _rangesCount;
2518 vcopy = (IOVirtualRange *) IOMalloc(sizeof(IOVirtualRange) * nRanges);
2519 if (vcopy == 0) return false;
2520
2521 keys[0] = OSSymbol::withCString("address");
2522 keys[1] = OSSymbol::withCString("length");
2523
2524 result = false;
2525 values[0] = values[1] = 0;
2526
2527 // From this point on we can go to bail.
2528
2529 // Copy the volatile data so we don't have to allocate memory
2530 // while the lock is held.
2531 LOCK;
2532 if (nRanges == _rangesCount) {
2533 for (index = 0; index < nRanges; index++) {
2534 vcopy[index] = _ranges.v[index];
2535 }
2536 } else {
2537 // The descriptor changed out from under us. Give up.
2538 UNLOCK;
2539 result = false;
2540 goto bail;
2541 }
2542 UNLOCK;
2543
2544 for (index = 0; index < nRanges; index++)
2545 {
2546 values[0] = OSNumber::withNumber(_ranges.v[index].address, sizeof(_ranges.v[index].address) * 8);
2547 if (values[0] == 0) {
2548 result = false;
2549 goto bail;
2550 }
2551 values[1] = OSNumber::withNumber(_ranges.v[index].length, sizeof(_ranges.v[index].length) * 8);
2552 if (values[1] == 0) {
2553 result = false;
2554 goto bail;
2555 }
2556 OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
2557 if (dict == 0) {
2558 result = false;
2559 goto bail;
2560 }
2561 values[0]->release();
2562 values[1]->release();
2563 values[0] = values[1] = 0;
2564
2565 result = dict->serialize(s);
2566 dict->release();
2567 if (!result) {
2568 goto bail;
2569 }
2570 }
2571 result = s->addXMLEndTag("array");
2572
2573 bail:
2574 if (values[0])
2575 values[0]->release();
2576 if (values[1])
2577 values[1]->release();
2578 if (keys[0])
2579 keys[0]->release();
2580 if (keys[1])
2581 keys[1]->release();
2582 if (vcopy)
2583 IOFree(vcopy, sizeof(IOVirtualRange) * nRanges);
2584 return result;
2585 }
2586
2587 bool IOSubMemoryDescriptor::serialize(OSSerialize * s) const
2588 {
2589 if (!s) {
2590 return (false);
2591 }
2592 if (s->previouslySerialized(this)) return true;
2593
2594 // Pretend we are a dictionary.
2595 // We must duplicate the functionality of OSDictionary here
2596 // because otherwise object references will not work;
2597 // they are based on the value of the object passed to
2598 // previouslySerialized and addXMLStartTag.
2599
2600 if (!s->addXMLStartTag(this, "dict")) return false;
2601
2602 char const *keys[3] = {"offset", "length", "parent"};
2603
2604 OSObject *values[3];
2605 values[0] = OSNumber::withNumber(_start, sizeof(_start) * 8);
2606 if (values[0] == 0)
2607 return false;
2608 values[1] = OSNumber::withNumber(_length, sizeof(_length) * 8);
2609 if (values[1] == 0) {
2610 values[0]->release();
2611 return false;
2612 }
2613 values[2] = _parent;
2614
2615 bool result = true;
2616 for (int i=0; i<3; i++) {
2617 if (!s->addString("<key>") ||
2618 !s->addString(keys[i]) ||
2619 !s->addXMLEndTag("key") ||
2620 !values[i]->serialize(s)) {
2621 result = false;
2622 break;
2623 }
2624 }
2625 values[0]->release();
2626 values[1]->release();
2627 if (!result) {
2628 return false;
2629 }
2630
2631 return s->addXMLEndTag("dict");
2632 }
2633
2634 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2635
2636 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
2637 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
2638 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
2639 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
2640 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
2641 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
2642 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
2643 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
2644 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
2645 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
2646 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
2647 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
2648 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
2649 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
2650 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
2651 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
2652
2653 /* ex-inline function implementation */
2654 IOPhysicalAddress IOMemoryDescriptor::getPhysicalAddress()
2655 { return( getPhysicalSegment( 0, 0 )); }