]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOMemoryDescriptor.cpp
f60f612dc45f796afb3f17c85aea6429258fb598
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
30 *
31 * HISTORY
32 *
33 */
34
35
36 #include <sys/cdefs.h>
37
38 #include <IOKit/assert.h>
39 #include <IOKit/system.h>
40 #include <IOKit/IOLib.h>
41 #include <IOKit/IOMemoryDescriptor.h>
42 #include <IOKit/IOMapper.h>
43 #include <IOKit/IOKitKeysPrivate.h>
44
45 #ifndef __LP64__
46 #include <IOKit/IOSubMemoryDescriptor.h>
47 #endif /* !__LP64__ */
48
49 #include <IOKit/IOKitDebug.h>
50 #include <libkern/OSDebug.h>
51
52 #include "IOKitKernelInternal.h"
53 #include "IOCopyMapper.h"
54
55 #include <libkern/c++/OSContainers.h>
56 #include <libkern/c++/OSDictionary.h>
57 #include <libkern/c++/OSArray.h>
58 #include <libkern/c++/OSSymbol.h>
59 #include <libkern/c++/OSNumber.h>
60
61 #include <sys/uio.h>
62
63 __BEGIN_DECLS
64 #include <vm/pmap.h>
65 #include <vm/vm_pageout.h>
66 #include <mach/memory_object_types.h>
67 #include <device/device_port.h>
68
69 #include <mach/vm_prot.h>
70 #include <mach/mach_vm.h>
71 #include <vm/vm_fault.h>
72 #include <vm/vm_protos.h>
73
74 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
75 void ipc_port_release_send(ipc_port_t port);
76
77 /* Copy between a physical page and a virtual address in the given vm_map */
78 kern_return_t copypv(addr64_t source, addr64_t sink, unsigned int size, int which);
79
80 memory_object_t
81 device_pager_setup(
82 memory_object_t pager,
83 uintptr_t device_handle,
84 vm_size_t size,
85 int flags);
86 void
87 device_pager_deallocate(
88 memory_object_t);
89 kern_return_t
90 device_pager_populate_object(
91 memory_object_t pager,
92 vm_object_offset_t offset,
93 ppnum_t phys_addr,
94 vm_size_t size);
95 kern_return_t
96 memory_object_iopl_request(
97 ipc_port_t port,
98 memory_object_offset_t offset,
99 vm_size_t *upl_size,
100 upl_t *upl_ptr,
101 upl_page_info_array_t user_page_list,
102 unsigned int *page_list_count,
103 int *flags);
104
105 unsigned int IOTranslateCacheBits(struct phys_entry *pp);
106
107 __END_DECLS
108
109 #define kIOMaximumMappedIOByteCount (512*1024*1024)
110
111 static IOMapper * gIOSystemMapper = NULL;
112
113 IOCopyMapper * gIOCopyMapper = NULL;
114
115 static ppnum_t gIOMaximumMappedIOPageCount = atop_32(kIOMaximumMappedIOByteCount);
116
117 ppnum_t gIOLastPage;
118
119 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
120
121 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
122
123 #define super IOMemoryDescriptor
124
125 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
126
127 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
128
129 static IORecursiveLock * gIOMemoryLock;
130
131 #define LOCK IORecursiveLockLock( gIOMemoryLock)
132 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
133 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
134 #define WAKEUP \
135 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
136
137 #if 0
138 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
139 #else
140 #define DEBG(fmt, args...) {}
141 #endif
142
143 #define IOMD_DEBUG_DMAACTIVE 1
144
145 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
146
147 // Some data structures and accessor macros used by the initWithOptions
148 // Function
149
150 enum ioPLBlockFlags {
151 kIOPLOnDevice = 0x00000001,
152 kIOPLExternUPL = 0x00000002,
153 };
154
155 struct typePersMDData
156 {
157 const IOGeneralMemoryDescriptor *fMD;
158 ipc_port_t fMemEntry;
159 };
160
161 struct ioPLBlock {
162 upl_t fIOPL;
163 vm_address_t fPageInfo; // Pointer to page list or index into it
164 uint32_t fIOMDOffset; // The offset of this iopl in descriptor
165 ppnum_t fMappedBase; // Page number of first page in this iopl
166 unsigned int fPageOffset; // Offset within first page of iopl
167 unsigned int fFlags; // Flags
168 };
169
170 struct ioGMDData {
171 IOMapper *fMapper;
172 uint64_t fPreparationID;
173 unsigned int fPageCnt;
174 #if __LP64__
175 // align arrays to 8 bytes so following macros work
176 unsigned int fPad;
177 #endif
178 upl_page_info_t fPageList[];
179 ioPLBlock fBlocks[];
180 };
181
182 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
183 #define getIOPLList(d) ((ioPLBlock *) &(d->fPageList[d->fPageCnt]))
184 #define getNumIOPL(osd, d) \
185 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
186 #define getPageList(d) (&(d->fPageList[0]))
187 #define computeDataSize(p, u) \
188 (sizeof(ioGMDData) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
189
190
191 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
192
193 #define next_page(a) ( trunc_page(a) + PAGE_SIZE )
194
195
196 extern "C" {
197
198 kern_return_t device_data_action(
199 uintptr_t device_handle,
200 ipc_port_t device_pager,
201 vm_prot_t protection,
202 vm_object_offset_t offset,
203 vm_size_t size)
204 {
205 struct ExpansionData {
206 void * devicePager;
207 unsigned int pagerContig:1;
208 unsigned int unused:31;
209 IOMemoryDescriptor * memory;
210 };
211 kern_return_t kr;
212 ExpansionData * ref = (ExpansionData *) device_handle;
213 IOMemoryDescriptor * memDesc;
214
215 LOCK;
216 memDesc = ref->memory;
217 if( memDesc)
218 {
219 memDesc->retain();
220 kr = memDesc->handleFault( device_pager, 0, 0,
221 offset, size, kIOMapDefaultCache /*?*/);
222 memDesc->release();
223 }
224 else
225 kr = KERN_ABORTED;
226 UNLOCK;
227
228 return( kr );
229 }
230
231 kern_return_t device_close(
232 uintptr_t device_handle)
233 {
234 struct ExpansionData {
235 void * devicePager;
236 unsigned int pagerContig:1;
237 unsigned int unused:31;
238 IOMemoryDescriptor * memory;
239 };
240 ExpansionData * ref = (ExpansionData *) device_handle;
241
242 IODelete( ref, ExpansionData, 1 );
243
244 return( kIOReturnSuccess );
245 }
246 }; // end extern "C"
247
248 // Note this inline function uses C++ reference arguments to return values
249 // This means that pointers are not passed and NULLs don't have to be
250 // checked for as a NULL reference is illegal.
251 static inline void
252 getAddrLenForInd(user_addr_t &addr, IOPhysicalLength &len, // Output variables
253 UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
254 {
255 assert(kIOMemoryTypeUIO == type
256 || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
257 || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
258 if (kIOMemoryTypeUIO == type) {
259 user_size_t us;
260 uio_getiov((uio_t) r.uio, ind, &addr, &us); len = us;
261 }
262 #ifndef __LP64__
263 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
264 IOAddressRange cur = r.v64[ind];
265 addr = cur.address;
266 len = cur.length;
267 }
268 #endif /* !__LP64__ */
269 else {
270 IOVirtualRange cur = r.v[ind];
271 addr = cur.address;
272 len = cur.length;
273 }
274 }
275
276 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
277
278 IOMemoryDescriptor *
279 IOMemoryDescriptor::withAddress(void * address,
280 IOByteCount length,
281 IODirection direction)
282 {
283 return IOMemoryDescriptor::
284 withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
285 }
286
287 #ifndef __LP64__
288 IOMemoryDescriptor *
289 IOMemoryDescriptor::withAddress(IOVirtualAddress address,
290 IOByteCount length,
291 IODirection direction,
292 task_t task)
293 {
294 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
295 if (that)
296 {
297 if (that->initWithAddress(address, length, direction, task))
298 return that;
299
300 that->release();
301 }
302 return 0;
303 }
304 #endif /* !__LP64__ */
305
306 IOMemoryDescriptor *
307 IOMemoryDescriptor::withPhysicalAddress(
308 IOPhysicalAddress address,
309 IOByteCount length,
310 IODirection direction )
311 {
312 #ifdef __LP64__
313 return (IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL));
314 #else /* !__LP64__ */
315 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
316 if (self
317 && !self->initWithPhysicalAddress(address, length, direction)) {
318 self->release();
319 return 0;
320 }
321
322 return self;
323 #endif /* !__LP64__ */
324 }
325
326 #ifndef __LP64__
327 IOMemoryDescriptor *
328 IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
329 UInt32 withCount,
330 IODirection direction,
331 task_t task,
332 bool asReference)
333 {
334 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
335 if (that)
336 {
337 if (that->initWithRanges(ranges, withCount, direction, task, asReference))
338 return that;
339
340 that->release();
341 }
342 return 0;
343 }
344 #endif /* !__LP64__ */
345
346 IOMemoryDescriptor *
347 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
348 mach_vm_size_t length,
349 IOOptionBits options,
350 task_t task)
351 {
352 IOAddressRange range = { address, length };
353 return (IOMemoryDescriptor::withAddressRanges(&range, 1, options, task));
354 }
355
356 IOMemoryDescriptor *
357 IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
358 UInt32 rangeCount,
359 IOOptionBits options,
360 task_t task)
361 {
362 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
363 if (that)
364 {
365 if (task)
366 options |= kIOMemoryTypeVirtual64;
367 else
368 options |= kIOMemoryTypePhysical64;
369
370 if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ 0))
371 return that;
372
373 that->release();
374 }
375
376 return 0;
377 }
378
379
380 /*
381 * withOptions:
382 *
383 * Create a new IOMemoryDescriptor. The buffer is made up of several
384 * virtual address ranges, from a given task.
385 *
386 * Passing the ranges as a reference will avoid an extra allocation.
387 */
388 IOMemoryDescriptor *
389 IOMemoryDescriptor::withOptions(void * buffers,
390 UInt32 count,
391 UInt32 offset,
392 task_t task,
393 IOOptionBits opts,
394 IOMapper * mapper)
395 {
396 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
397
398 if (self
399 && !self->initWithOptions(buffers, count, offset, task, opts, mapper))
400 {
401 self->release();
402 return 0;
403 }
404
405 return self;
406 }
407
408 bool IOMemoryDescriptor::initWithOptions(void * buffers,
409 UInt32 count,
410 UInt32 offset,
411 task_t task,
412 IOOptionBits options,
413 IOMapper * mapper)
414 {
415 return( false );
416 }
417
418 #ifndef __LP64__
419 IOMemoryDescriptor *
420 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
421 UInt32 withCount,
422 IODirection direction,
423 bool asReference)
424 {
425 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
426 if (that)
427 {
428 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference))
429 return that;
430
431 that->release();
432 }
433 return 0;
434 }
435
436 IOMemoryDescriptor *
437 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
438 IOByteCount offset,
439 IOByteCount length,
440 IODirection direction)
441 {
442 return (IOSubMemoryDescriptor::withSubRange(of, offset, length, direction | kIOMemoryThreadSafe));
443 }
444 #endif /* !__LP64__ */
445
446 IOMemoryDescriptor *
447 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
448 {
449 IOGeneralMemoryDescriptor *origGenMD =
450 OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
451
452 if (origGenMD)
453 return IOGeneralMemoryDescriptor::
454 withPersistentMemoryDescriptor(origGenMD);
455 else
456 return 0;
457 }
458
459 IOMemoryDescriptor *
460 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
461 {
462 ipc_port_t sharedMem = (ipc_port_t) originalMD->createNamedEntry();
463
464 if (!sharedMem)
465 return 0;
466
467 if (sharedMem == originalMD->_memEntry) {
468 originalMD->retain(); // Add a new reference to ourselves
469 ipc_port_release_send(sharedMem); // Remove extra send right
470 return originalMD;
471 }
472
473 IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor;
474 typePersMDData initData = { originalMD, sharedMem };
475
476 if (self
477 && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) {
478 self->release();
479 self = 0;
480 }
481 return self;
482 }
483
484 void *IOGeneralMemoryDescriptor::createNamedEntry()
485 {
486 kern_return_t error;
487 ipc_port_t sharedMem;
488
489 IOOptionBits type = _flags & kIOMemoryTypeMask;
490
491 user_addr_t range0Addr;
492 IOByteCount range0Len;
493 getAddrLenForInd(range0Addr, range0Len, type, _ranges, 0);
494 range0Addr = trunc_page_64(range0Addr);
495
496 vm_size_t size = ptoa_32(_pages);
497 vm_address_t kernelPage = (vm_address_t) range0Addr;
498
499 vm_map_t theMap = ((_task == kernel_task)
500 && (kIOMemoryBufferPageable & _flags))
501 ? IOPageableMapForAddress(kernelPage)
502 : get_task_map(_task);
503
504 memory_object_size_t actualSize = size;
505 vm_prot_t prot = VM_PROT_READ;
506 #if CONFIG_EMBEDDED
507 if (kIODirectionOut != (kIODirectionOutIn & _flags))
508 #endif
509 prot |= VM_PROT_WRITE;
510
511 if (_memEntry)
512 prot |= MAP_MEM_NAMED_REUSE;
513
514 error = mach_make_memory_entry_64(theMap,
515 &actualSize, range0Addr, prot, &sharedMem, (ipc_port_t) _memEntry);
516
517 if (KERN_SUCCESS == error) {
518 if (actualSize == size) {
519 return sharedMem;
520 } else {
521 #if IOASSERT
522 IOLog("IOGMD::mach_make_memory_entry_64 (%08llx) size (%08llx:%08llx)\n",
523 (UInt64)range0Addr, (UInt64)actualSize, (UInt64)size);
524 #endif
525 ipc_port_release_send( sharedMem );
526 }
527 }
528
529 return MACH_PORT_NULL;
530 }
531
532 #ifndef __LP64__
533 bool
534 IOGeneralMemoryDescriptor::initWithAddress(void * address,
535 IOByteCount withLength,
536 IODirection withDirection)
537 {
538 _singleRange.v.address = (vm_offset_t) address;
539 _singleRange.v.length = withLength;
540
541 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
542 }
543
544 bool
545 IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
546 IOByteCount withLength,
547 IODirection withDirection,
548 task_t withTask)
549 {
550 _singleRange.v.address = address;
551 _singleRange.v.length = withLength;
552
553 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
554 }
555
556 bool
557 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
558 IOPhysicalAddress address,
559 IOByteCount withLength,
560 IODirection withDirection )
561 {
562 _singleRange.p.address = address;
563 _singleRange.p.length = withLength;
564
565 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
566 }
567
568 bool
569 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
570 IOPhysicalRange * ranges,
571 UInt32 count,
572 IODirection direction,
573 bool reference)
574 {
575 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
576
577 if (reference)
578 mdOpts |= kIOMemoryAsReference;
579
580 return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0);
581 }
582
583 bool
584 IOGeneralMemoryDescriptor::initWithRanges(
585 IOVirtualRange * ranges,
586 UInt32 count,
587 IODirection direction,
588 task_t task,
589 bool reference)
590 {
591 IOOptionBits mdOpts = direction;
592
593 if (reference)
594 mdOpts |= kIOMemoryAsReference;
595
596 if (task) {
597 mdOpts |= kIOMemoryTypeVirtual;
598
599 // Auto-prepare if this is a kernel memory descriptor as very few
600 // clients bother to prepare() kernel memory.
601 // But it was not enforced so what are you going to do?
602 if (task == kernel_task)
603 mdOpts |= kIOMemoryAutoPrepare;
604 }
605 else
606 mdOpts |= kIOMemoryTypePhysical;
607
608 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0);
609 }
610 #endif /* !__LP64__ */
611
612 /*
613 * initWithOptions:
614 *
615 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
616 * from a given task, several physical ranges, an UPL from the ubc
617 * system or a uio (may be 64bit) from the BSD subsystem.
618 *
619 * Passing the ranges as a reference will avoid an extra allocation.
620 *
621 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
622 * existing instance -- note this behavior is not commonly supported in other
623 * I/O Kit classes, although it is supported here.
624 */
625
626 bool
627 IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
628 UInt32 count,
629 UInt32 offset,
630 task_t task,
631 IOOptionBits options,
632 IOMapper * mapper)
633 {
634 IOOptionBits type = options & kIOMemoryTypeMask;
635
636 // Grab the original MD's configuation data to initialse the
637 // arguments to this function.
638 if (kIOMemoryTypePersistentMD == type) {
639
640 typePersMDData *initData = (typePersMDData *) buffers;
641 const IOGeneralMemoryDescriptor *orig = initData->fMD;
642 ioGMDData *dataP = getDataP(orig->_memoryEntries);
643
644 // Only accept persistent memory descriptors with valid dataP data.
645 assert(orig->_rangesCount == 1);
646 if ( !(orig->_flags & kIOMemoryPersistent) || !dataP)
647 return false;
648
649 _memEntry = initData->fMemEntry; // Grab the new named entry
650 options = orig->_flags | kIOMemoryAsReference;
651 _singleRange = orig->_singleRange; // Initialise our range
652 buffers = &_singleRange;
653 count = 1;
654
655 // Now grab the original task and whatever mapper was previously used
656 task = orig->_task;
657 mapper = dataP->fMapper;
658
659 // We are ready to go through the original initialisation now
660 }
661
662 switch (type) {
663 case kIOMemoryTypeUIO:
664 case kIOMemoryTypeVirtual:
665 #ifndef __LP64__
666 case kIOMemoryTypeVirtual64:
667 #endif /* !__LP64__ */
668 assert(task);
669 if (!task)
670 return false;
671
672 #ifndef __LP64__
673 if (vm_map_is_64bit(get_task_map(task))
674 && (kIOMemoryTypeVirtual == type)
675 && ((IOVirtualRange *) buffers)->address)
676 {
677 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
678 return false;
679 }
680 #endif /* !__LP64__ */
681 break;
682
683 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
684 #ifndef __LP64__
685 case kIOMemoryTypePhysical64:
686 #endif /* !__LP64__ */
687 case kIOMemoryTypeUPL:
688 assert(!task);
689 break;
690 default:
691 return false; /* bad argument */
692 }
693
694 assert(buffers);
695 assert(count);
696
697 /*
698 * We can check the _initialized instance variable before having ever set
699 * it to an initial value because I/O Kit guarantees that all our instance
700 * variables are zeroed on an object's allocation.
701 */
702
703 if (_initialized) {
704 /*
705 * An existing memory descriptor is being retargeted to point to
706 * somewhere else. Clean up our present state.
707 */
708 IOOptionBits type = _flags & kIOMemoryTypeMask;
709 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
710 {
711 while (_wireCount)
712 complete();
713 }
714 if (_ranges.v && !(kIOMemoryAsReference & _flags))
715 {
716 if (kIOMemoryTypeUIO == type)
717 uio_free((uio_t) _ranges.v);
718 #ifndef __LP64__
719 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
720 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
721 #endif /* !__LP64__ */
722 else
723 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
724 }
725
726 if (_memEntry)
727 { ipc_port_release_send((ipc_port_t) _memEntry); _memEntry = 0; }
728 if (_mappings)
729 _mappings->flushCollection();
730 }
731 else {
732 if (!super::init())
733 return false;
734 _initialized = true;
735 }
736
737 // Grab the appropriate mapper
738 if (kIOMemoryMapperNone & options)
739 mapper = 0; // No Mapper
740 else if (mapper == kIOMapperSystem) {
741 IOMapper::checkForSystemMapper();
742 gIOSystemMapper = mapper = IOMapper::gSystem;
743 }
744
745 // Temp binary compatibility for kIOMemoryThreadSafe
746 if (kIOMemoryReserved6156215 & options)
747 {
748 options &= ~kIOMemoryReserved6156215;
749 options |= kIOMemoryThreadSafe;
750 }
751 // Remove the dynamic internal use flags from the initial setting
752 options &= ~(kIOMemoryPreparedReadOnly);
753 _flags = options;
754 _task = task;
755
756 #ifndef __LP64__
757 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
758 #endif /* !__LP64__ */
759
760 __iomd_reservedA = 0;
761 __iomd_reservedB = 0;
762 _highestPage = 0;
763
764 if (kIOMemoryThreadSafe & options)
765 {
766 if (!_prepareLock)
767 _prepareLock = IOLockAlloc();
768 }
769 else if (_prepareLock)
770 {
771 IOLockFree(_prepareLock);
772 _prepareLock = NULL;
773 }
774
775 if (kIOMemoryTypeUPL == type) {
776
777 ioGMDData *dataP;
778 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
779
780 if (!_memoryEntries) {
781 _memoryEntries = OSData::withCapacity(dataSize);
782 if (!_memoryEntries)
783 return false;
784 }
785 else if (!_memoryEntries->initWithCapacity(dataSize))
786 return false;
787
788 _memoryEntries->appendBytes(0, sizeof(ioGMDData));
789 dataP = getDataP(_memoryEntries);
790 dataP->fMapper = mapper;
791 dataP->fPageCnt = 0;
792
793 // _wireCount++; // UPLs start out life wired
794
795 _length = count;
796 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
797
798 ioPLBlock iopl;
799 iopl.fIOPL = (upl_t) buffers;
800 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
801
802 if (upl_get_size(iopl.fIOPL) < (count + offset))
803 panic("short external upl");
804
805 // Set the flag kIOPLOnDevice convieniently equal to 1
806 iopl.fFlags = pageList->device | kIOPLExternUPL;
807 iopl.fIOMDOffset = 0;
808
809 _highestPage = upl_get_highest_page(iopl.fIOPL);
810
811 if (!pageList->device) {
812 // Pre-compute the offset into the UPL's page list
813 pageList = &pageList[atop_32(offset)];
814 offset &= PAGE_MASK;
815 if (mapper) {
816 iopl.fMappedBase = mapper->iovmAlloc(_pages);
817 mapper->iovmInsert(iopl.fMappedBase, 0, pageList, _pages);
818 }
819 else
820 iopl.fMappedBase = 0;
821 }
822 else
823 iopl.fMappedBase = 0;
824 iopl.fPageInfo = (vm_address_t) pageList;
825 iopl.fPageOffset = offset;
826
827 _memoryEntries->appendBytes(&iopl, sizeof(iopl));
828 }
829 else {
830 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
831 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
832
833 // Initialize the memory descriptor
834 if (options & kIOMemoryAsReference) {
835 #ifndef __LP64__
836 _rangesIsAllocated = false;
837 #endif /* !__LP64__ */
838
839 // Hack assignment to get the buffer arg into _ranges.
840 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
841 // work, C++ sigh.
842 // This also initialises the uio & physical ranges.
843 _ranges.v = (IOVirtualRange *) buffers;
844 }
845 else {
846 #ifndef __LP64__
847 _rangesIsAllocated = true;
848 #endif /* !__LP64__ */
849 switch (type)
850 {
851 case kIOMemoryTypeUIO:
852 _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
853 break;
854
855 #ifndef __LP64__
856 case kIOMemoryTypeVirtual64:
857 case kIOMemoryTypePhysical64:
858 if (count == 1
859 && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL) {
860 if (kIOMemoryTypeVirtual64 == type)
861 type = kIOMemoryTypeVirtual;
862 else
863 type = kIOMemoryTypePhysical;
864 _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
865 _rangesIsAllocated = false;
866 _ranges.v = &_singleRange.v;
867 _singleRange.v.address = ((IOAddressRange *) buffers)->address;
868 _singleRange.v.length = ((IOAddressRange *) buffers)->length;
869 break;
870 }
871 _ranges.v64 = IONew(IOAddressRange, count);
872 if (!_ranges.v64)
873 return false;
874 bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
875 break;
876 #endif /* !__LP64__ */
877 case kIOMemoryTypeVirtual:
878 case kIOMemoryTypePhysical:
879 if (count == 1) {
880 _flags |= kIOMemoryAsReference;
881 #ifndef __LP64__
882 _rangesIsAllocated = false;
883 #endif /* !__LP64__ */
884 _ranges.v = &_singleRange.v;
885 } else {
886 _ranges.v = IONew(IOVirtualRange, count);
887 if (!_ranges.v)
888 return false;
889 }
890 bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
891 break;
892 }
893 }
894
895 // Find starting address within the vector of ranges
896 Ranges vec = _ranges;
897 UInt32 length = 0;
898 UInt32 pages = 0;
899 for (unsigned ind = 0; ind < count; ind++) {
900 user_addr_t addr;
901 IOPhysicalLength len;
902
903 // addr & len are returned by this function
904 getAddrLenForInd(addr, len, type, vec, ind);
905 pages += (atop_64(addr + len + PAGE_MASK) - atop_64(addr));
906 len += length;
907 assert(len >= length); // Check for 32 bit wrap around
908 length = len;
909
910 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
911 {
912 ppnum_t highPage = atop_64(addr + len - 1);
913 if (highPage > _highestPage)
914 _highestPage = highPage;
915 }
916 }
917 _length = length;
918 _pages = pages;
919 _rangesCount = count;
920
921 // Auto-prepare memory at creation time.
922 // Implied completion when descriptor is free-ed
923 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
924 _wireCount++; // Physical MDs are, by definition, wired
925 else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
926 ioGMDData *dataP;
927 unsigned dataSize = computeDataSize(_pages, /* upls */ count * 2);
928
929 if (!_memoryEntries) {
930 _memoryEntries = OSData::withCapacity(dataSize);
931 if (!_memoryEntries)
932 return false;
933 }
934 else if (!_memoryEntries->initWithCapacity(dataSize))
935 return false;
936
937 _memoryEntries->appendBytes(0, sizeof(ioGMDData));
938 dataP = getDataP(_memoryEntries);
939 dataP->fMapper = mapper;
940 dataP->fPageCnt = _pages;
941
942 if ( (kIOMemoryPersistent & _flags) && !_memEntry)
943 _memEntry = createNamedEntry();
944
945 if ((_flags & kIOMemoryAutoPrepare)
946 && prepare() != kIOReturnSuccess)
947 return false;
948 }
949 }
950
951 return true;
952 }
953
954 /*
955 * free
956 *
957 * Free resources.
958 */
959 void IOGeneralMemoryDescriptor::free()
960 {
961 IOOptionBits type = _flags & kIOMemoryTypeMask;
962
963 if( reserved)
964 {
965 LOCK;
966 reserved->memory = 0;
967 UNLOCK;
968 }
969
970 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
971 {
972 while (_wireCount)
973 complete();
974 }
975 if (_memoryEntries)
976 _memoryEntries->release();
977
978 if (_ranges.v && !(kIOMemoryAsReference & _flags))
979 {
980 if (kIOMemoryTypeUIO == type)
981 uio_free((uio_t) _ranges.v);
982 #ifndef __LP64__
983 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
984 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
985 #endif /* !__LP64__ */
986 else
987 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
988
989 _ranges.v = NULL;
990 }
991
992 if (reserved && reserved->devicePager)
993 device_pager_deallocate( (memory_object_t) reserved->devicePager );
994
995 // memEntry holds a ref on the device pager which owns reserved
996 // (ExpansionData) so no reserved access after this point
997 if (_memEntry)
998 ipc_port_release_send( (ipc_port_t) _memEntry );
999
1000 if (_prepareLock)
1001 IOLockFree(_prepareLock);
1002
1003 super::free();
1004 }
1005
1006 #ifndef __LP64__
1007 void IOGeneralMemoryDescriptor::unmapFromKernel()
1008 {
1009 panic("IOGMD::unmapFromKernel deprecated");
1010 }
1011
1012 void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
1013 {
1014 panic("IOGMD::mapIntoKernel deprecated");
1015 }
1016 #endif /* !__LP64__ */
1017
1018 /*
1019 * getDirection:
1020 *
1021 * Get the direction of the transfer.
1022 */
1023 IODirection IOMemoryDescriptor::getDirection() const
1024 {
1025 #ifndef __LP64__
1026 if (_direction)
1027 return _direction;
1028 #endif /* !__LP64__ */
1029 return (IODirection) (_flags & kIOMemoryDirectionMask);
1030 }
1031
1032 /*
1033 * getLength:
1034 *
1035 * Get the length of the transfer (over all ranges).
1036 */
1037 IOByteCount IOMemoryDescriptor::getLength() const
1038 {
1039 return _length;
1040 }
1041
1042 void IOMemoryDescriptor::setTag( IOOptionBits tag )
1043 {
1044 _tag = tag;
1045 }
1046
1047 IOOptionBits IOMemoryDescriptor::getTag( void )
1048 {
1049 return( _tag);
1050 }
1051
1052 #ifndef __LP64__
1053 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
1054 IOPhysicalAddress
1055 IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
1056 {
1057 addr64_t physAddr = 0;
1058
1059 if( prepare() == kIOReturnSuccess) {
1060 physAddr = getPhysicalSegment64( offset, length );
1061 complete();
1062 }
1063
1064 return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used
1065 }
1066 #endif /* !__LP64__ */
1067
1068 IOByteCount IOMemoryDescriptor::readBytes
1069 (IOByteCount offset, void *bytes, IOByteCount length)
1070 {
1071 addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
1072 IOByteCount remaining;
1073
1074 // Assert that this entire I/O is withing the available range
1075 assert(offset < _length);
1076 assert(offset + length <= _length);
1077 if (offset >= _length) {
1078 return 0;
1079 }
1080
1081 if (kIOMemoryThreadSafe & _flags)
1082 LOCK;
1083
1084 remaining = length = min(length, _length - offset);
1085 while (remaining) { // (process another target segment?)
1086 addr64_t srcAddr64;
1087 IOByteCount srcLen;
1088
1089 srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
1090 if (!srcAddr64)
1091 break;
1092
1093 // Clip segment length to remaining
1094 if (srcLen > remaining)
1095 srcLen = remaining;
1096
1097 copypv(srcAddr64, dstAddr, srcLen,
1098 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
1099
1100 dstAddr += srcLen;
1101 offset += srcLen;
1102 remaining -= srcLen;
1103 }
1104
1105 if (kIOMemoryThreadSafe & _flags)
1106 UNLOCK;
1107
1108 assert(!remaining);
1109
1110 return length - remaining;
1111 }
1112
1113 IOByteCount IOMemoryDescriptor::writeBytes
1114 (IOByteCount offset, const void *bytes, IOByteCount length)
1115 {
1116 addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
1117 IOByteCount remaining;
1118
1119 // Assert that this entire I/O is withing the available range
1120 assert(offset < _length);
1121 assert(offset + length <= _length);
1122
1123 assert( !(kIOMemoryPreparedReadOnly & _flags) );
1124
1125 if ( (kIOMemoryPreparedReadOnly & _flags) || offset >= _length) {
1126 return 0;
1127 }
1128
1129 if (kIOMemoryThreadSafe & _flags)
1130 LOCK;
1131
1132 remaining = length = min(length, _length - offset);
1133 while (remaining) { // (process another target segment?)
1134 addr64_t dstAddr64;
1135 IOByteCount dstLen;
1136
1137 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
1138 if (!dstAddr64)
1139 break;
1140
1141 // Clip segment length to remaining
1142 if (dstLen > remaining)
1143 dstLen = remaining;
1144
1145 copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
1146 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
1147
1148 srcAddr += dstLen;
1149 offset += dstLen;
1150 remaining -= dstLen;
1151 }
1152
1153 if (kIOMemoryThreadSafe & _flags)
1154 UNLOCK;
1155
1156 assert(!remaining);
1157
1158 return length - remaining;
1159 }
1160
1161 // osfmk/device/iokit_rpc.c
1162 extern "C" unsigned int IODefaultCacheBits(addr64_t pa);
1163
1164 #ifndef __LP64__
1165 void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
1166 {
1167 panic("IOGMD::setPosition deprecated");
1168 }
1169 #endif /* !__LP64__ */
1170
1171 static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
1172
1173 uint64_t
1174 IOGeneralMemoryDescriptor::getPreparationID( void )
1175 {
1176 ioGMDData *dataP;
1177
1178 if (!_wireCount)
1179 return (kIOPreparationIDUnprepared);
1180
1181 if (_flags & (kIOMemoryTypePhysical | kIOMemoryTypePhysical64))
1182 return (kIOPreparationIDAlwaysPrepared);
1183
1184 if (!_memoryEntries || !(dataP = getDataP(_memoryEntries)))
1185 return (kIOPreparationIDUnprepared);
1186
1187 if (kIOPreparationIDUnprepared == dataP->fPreparationID)
1188 {
1189 #if defined(__ppc__ )
1190 dataP->fPreparationID = gIOMDPreparationID++;
1191 #else
1192 dataP->fPreparationID = OSIncrementAtomic64(&gIOMDPreparationID);
1193 #endif
1194 }
1195 return (dataP->fPreparationID);
1196 }
1197
1198 uint64_t
1199 IOMemoryDescriptor::getPreparationID( void )
1200 {
1201 return (kIOPreparationIDUnsupported);
1202 }
1203
1204 IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
1205 {
1206 if (kIOMDGetCharacteristics == op) {
1207
1208 if (dataSize < sizeof(IOMDDMACharacteristics))
1209 return kIOReturnUnderrun;
1210
1211 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
1212 data->fLength = _length;
1213 data->fSGCount = _rangesCount;
1214 data->fPages = _pages;
1215 data->fDirection = getDirection();
1216 if (!_wireCount)
1217 data->fIsPrepared = false;
1218 else {
1219 data->fIsPrepared = true;
1220 data->fHighestPage = _highestPage;
1221 if (_memoryEntries) {
1222 ioGMDData *gmdData = getDataP(_memoryEntries);
1223 ioPLBlock *ioplList = getIOPLList(gmdData);
1224 UInt count = getNumIOPL(_memoryEntries, gmdData);
1225
1226 data->fIsMapped = (gmdData->fMapper && _pages && (count > 0)
1227 && ioplList[0].fMappedBase);
1228 if (count == 1)
1229 data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
1230 }
1231 else
1232 data->fIsMapped = false;
1233 }
1234
1235 return kIOReturnSuccess;
1236
1237 #if IOMD_DEBUG_DMAACTIVE
1238 } else if (kIOMDSetDMAActive == op) {
1239 IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
1240 OSIncrementAtomic(&md->__iomd_reservedA);
1241 } else if (kIOMDSetDMAInactive == op) {
1242 IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
1243 if (md->__iomd_reservedA)
1244 OSDecrementAtomic(&md->__iomd_reservedA);
1245 else
1246 panic("kIOMDSetDMAInactive");
1247 #endif /* IOMD_DEBUG_DMAACTIVE */
1248
1249 } else if (!(kIOMDWalkSegments & op))
1250 return kIOReturnBadArgument;
1251
1252 // Get the next segment
1253 struct InternalState {
1254 IOMDDMAWalkSegmentArgs fIO;
1255 UInt fOffset2Index;
1256 UInt fIndex;
1257 UInt fNextOffset;
1258 } *isP;
1259
1260 // Find the next segment
1261 if (dataSize < sizeof(*isP))
1262 return kIOReturnUnderrun;
1263
1264 isP = (InternalState *) vData;
1265 UInt offset = isP->fIO.fOffset;
1266 bool mapped = isP->fIO.fMapped;
1267
1268 if (offset >= _length)
1269 return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
1270
1271 // Validate the previous offset
1272 UInt ind, off2Ind = isP->fOffset2Index;
1273 if ((kIOMDFirstSegment != op)
1274 && offset
1275 && (offset == isP->fNextOffset || off2Ind <= offset))
1276 ind = isP->fIndex;
1277 else
1278 ind = off2Ind = 0; // Start from beginning
1279
1280 UInt length;
1281 UInt64 address;
1282 if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
1283
1284 // Physical address based memory descriptor
1285 const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
1286
1287 // Find the range after the one that contains the offset
1288 mach_vm_size_t len;
1289 for (len = 0; off2Ind <= offset; ind++) {
1290 len = physP[ind].length;
1291 off2Ind += len;
1292 }
1293
1294 // Calculate length within range and starting address
1295 length = off2Ind - offset;
1296 address = physP[ind - 1].address + len - length;
1297
1298 // see how far we can coalesce ranges
1299 while (ind < _rangesCount && address + length == physP[ind].address) {
1300 len = physP[ind].length;
1301 length += len;
1302 off2Ind += len;
1303 ind++;
1304 }
1305
1306 // correct contiguous check overshoot
1307 ind--;
1308 off2Ind -= len;
1309 }
1310 #ifndef __LP64__
1311 else if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
1312
1313 // Physical address based memory descriptor
1314 const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
1315
1316 // Find the range after the one that contains the offset
1317 mach_vm_size_t len;
1318 for (len = 0; off2Ind <= offset; ind++) {
1319 len = physP[ind].length;
1320 off2Ind += len;
1321 }
1322
1323 // Calculate length within range and starting address
1324 length = off2Ind - offset;
1325 address = physP[ind - 1].address + len - length;
1326
1327 // see how far we can coalesce ranges
1328 while (ind < _rangesCount && address + length == physP[ind].address) {
1329 len = physP[ind].length;
1330 length += len;
1331 off2Ind += len;
1332 ind++;
1333 }
1334
1335 // correct contiguous check overshoot
1336 ind--;
1337 off2Ind -= len;
1338 }
1339 #endif /* !__LP64__ */
1340 else do {
1341 if (!_wireCount)
1342 panic("IOGMD: not wired for the IODMACommand");
1343
1344 assert(_memoryEntries);
1345
1346 ioGMDData * dataP = getDataP(_memoryEntries);
1347 const ioPLBlock *ioplList = getIOPLList(dataP);
1348 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1349 upl_page_info_t *pageList = getPageList(dataP);
1350
1351 assert(numIOPLs > 0);
1352
1353 // Scan through iopl info blocks looking for block containing offset
1354 while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset)
1355 ind++;
1356
1357 // Go back to actual range as search goes past it
1358 ioPLBlock ioplInfo = ioplList[ind - 1];
1359 off2Ind = ioplInfo.fIOMDOffset;
1360
1361 if (ind < numIOPLs)
1362 length = ioplList[ind].fIOMDOffset;
1363 else
1364 length = _length;
1365 length -= offset; // Remainder within iopl
1366
1367 // Subtract offset till this iopl in total list
1368 offset -= off2Ind;
1369
1370 // If a mapped address is requested and this is a pre-mapped IOPL
1371 // then just need to compute an offset relative to the mapped base.
1372 if (mapped && ioplInfo.fMappedBase) {
1373 offset += (ioplInfo.fPageOffset & PAGE_MASK);
1374 address = ptoa_64(ioplInfo.fMappedBase) + offset;
1375 continue; // Done leave do/while(false) now
1376 }
1377
1378 // The offset is rebased into the current iopl.
1379 // Now add the iopl 1st page offset.
1380 offset += ioplInfo.fPageOffset;
1381
1382 // For external UPLs the fPageInfo field points directly to
1383 // the upl's upl_page_info_t array.
1384 if (ioplInfo.fFlags & kIOPLExternUPL)
1385 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
1386 else
1387 pageList = &pageList[ioplInfo.fPageInfo];
1388
1389 // Check for direct device non-paged memory
1390 if ( ioplInfo.fFlags & kIOPLOnDevice ) {
1391 address = ptoa_64(pageList->phys_addr) + offset;
1392 continue; // Done leave do/while(false) now
1393 }
1394
1395 // Now we need compute the index into the pageList
1396 UInt pageInd = atop_32(offset);
1397 offset &= PAGE_MASK;
1398
1399 // Compute the starting address of this segment
1400 IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
1401 if (!pageAddr) {
1402 panic("!pageList phys_addr");
1403 }
1404
1405 address = ptoa_64(pageAddr) + offset;
1406
1407 // length is currently set to the length of the remainider of the iopl.
1408 // We need to check that the remainder of the iopl is contiguous.
1409 // This is indicated by pageList[ind].phys_addr being sequential.
1410 IOByteCount contigLength = PAGE_SIZE - offset;
1411 while (contigLength < length
1412 && ++pageAddr == pageList[++pageInd].phys_addr)
1413 {
1414 contigLength += PAGE_SIZE;
1415 }
1416
1417 if (contigLength < length)
1418 length = contigLength;
1419
1420
1421 assert(address);
1422 assert(length);
1423
1424 } while (false);
1425
1426 // Update return values and state
1427 isP->fIO.fIOVMAddr = address;
1428 isP->fIO.fLength = length;
1429 isP->fIndex = ind;
1430 isP->fOffset2Index = off2Ind;
1431 isP->fNextOffset = isP->fIO.fOffset + length;
1432
1433 return kIOReturnSuccess;
1434 }
1435
1436 addr64_t
1437 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
1438 {
1439 IOReturn ret;
1440 addr64_t address = 0;
1441 IOByteCount length = 0;
1442 IOMapper * mapper = gIOSystemMapper;
1443 IOOptionBits type = _flags & kIOMemoryTypeMask;
1444
1445 if (lengthOfSegment)
1446 *lengthOfSegment = 0;
1447
1448 if (offset >= _length)
1449 return 0;
1450
1451 // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
1452 // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
1453 // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
1454 // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
1455
1456 if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type))
1457 {
1458 unsigned rangesIndex = 0;
1459 Ranges vec = _ranges;
1460 user_addr_t addr;
1461
1462 // Find starting address within the vector of ranges
1463 for (;;) {
1464 getAddrLenForInd(addr, length, type, vec, rangesIndex);
1465 if (offset < length)
1466 break;
1467 offset -= length; // (make offset relative)
1468 rangesIndex++;
1469 }
1470
1471 // Now that we have the starting range,
1472 // lets find the last contiguous range
1473 addr += offset;
1474 length -= offset;
1475
1476 for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) {
1477 user_addr_t newAddr;
1478 IOPhysicalLength newLen;
1479
1480 getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
1481 if (addr + length != newAddr)
1482 break;
1483 length += newLen;
1484 }
1485 if (addr)
1486 address = (IOPhysicalAddress) addr; // Truncate address to 32bit
1487 }
1488 else
1489 {
1490 IOMDDMAWalkSegmentState _state;
1491 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) &_state;
1492
1493 state->fOffset = offset;
1494 state->fLength = _length - offset;
1495 state->fMapped = (0 == (options & kIOMemoryMapperNone));
1496
1497 ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
1498
1499 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret))
1500 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
1501 ret, this, state->fOffset,
1502 state->fIOVMAddr, state->fLength);
1503 if (kIOReturnSuccess == ret)
1504 {
1505 address = state->fIOVMAddr;
1506 length = state->fLength;
1507 }
1508
1509 // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
1510 // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
1511
1512 if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)))
1513 {
1514 if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone))
1515 {
1516 addr64_t origAddr = address;
1517 IOByteCount origLen = length;
1518
1519 address = mapper->mapAddr(origAddr);
1520 length = page_size - (address & (page_size - 1));
1521 while ((length < origLen)
1522 && ((address + length) == mapper->mapAddr(origAddr + length)))
1523 length += page_size;
1524 if (length > origLen)
1525 length = origLen;
1526 }
1527 #ifdef __LP64__
1528 else if (!(options & kIOMemoryMapperNone) && (_flags & kIOMemoryMapperNone))
1529 {
1530 panic("getPhysicalSegment not mapped for I/O");
1531 }
1532 #endif /* __LP64__ */
1533 }
1534 }
1535
1536 if (!address)
1537 length = 0;
1538
1539 if (lengthOfSegment)
1540 *lengthOfSegment = length;
1541
1542 return (address);
1543 }
1544
1545 #ifndef __LP64__
1546 addr64_t
1547 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
1548 {
1549 addr64_t address = 0;
1550
1551 if (options & _kIOMemorySourceSegment)
1552 {
1553 address = getSourceSegment(offset, lengthOfSegment);
1554 }
1555 else if (options & kIOMemoryMapperNone)
1556 {
1557 address = getPhysicalSegment64(offset, lengthOfSegment);
1558 }
1559 else
1560 {
1561 address = getPhysicalSegment(offset, lengthOfSegment);
1562 }
1563
1564 return (address);
1565 }
1566
1567 addr64_t
1568 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
1569 {
1570 return (getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone));
1571 }
1572
1573 IOPhysicalAddress
1574 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1575 {
1576 addr64_t address = 0;
1577 IOByteCount length = 0;
1578
1579 address = getPhysicalSegment(offset, lengthOfSegment, 0);
1580
1581 if (lengthOfSegment)
1582 length = *lengthOfSegment;
1583
1584 if ((address + length) > 0x100000000ULL)
1585 {
1586 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
1587 address, (long) length, (getMetaClass())->getClassName());
1588 }
1589
1590 return ((IOPhysicalAddress) address);
1591 }
1592
1593 addr64_t
1594 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
1595 {
1596 IOPhysicalAddress phys32;
1597 IOByteCount length;
1598 addr64_t phys64;
1599 IOMapper * mapper = 0;
1600
1601 phys32 = getPhysicalSegment(offset, lengthOfSegment);
1602 if (!phys32)
1603 return 0;
1604
1605 if (gIOSystemMapper)
1606 mapper = gIOSystemMapper;
1607
1608 if (mapper)
1609 {
1610 IOByteCount origLen;
1611
1612 phys64 = mapper->mapAddr(phys32);
1613 origLen = *lengthOfSegment;
1614 length = page_size - (phys64 & (page_size - 1));
1615 while ((length < origLen)
1616 && ((phys64 + length) == mapper->mapAddr(phys32 + length)))
1617 length += page_size;
1618 if (length > origLen)
1619 length = origLen;
1620
1621 *lengthOfSegment = length;
1622 }
1623 else
1624 phys64 = (addr64_t) phys32;
1625
1626 return phys64;
1627 }
1628
1629 IOPhysicalAddress
1630 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1631 {
1632 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0));
1633 }
1634
1635 IOPhysicalAddress
1636 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1637 {
1638 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment));
1639 }
1640
1641 void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
1642 IOByteCount * lengthOfSegment)
1643 {
1644 if (_task == kernel_task)
1645 return (void *) getSourceSegment(offset, lengthOfSegment);
1646 else
1647 panic("IOGMD::getVirtualSegment deprecated");
1648
1649 return 0;
1650 }
1651 #endif /* !__LP64__ */
1652
1653 IOReturn
1654 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
1655 {
1656 if (kIOMDGetCharacteristics == op) {
1657 if (dataSize < sizeof(IOMDDMACharacteristics))
1658 return kIOReturnUnderrun;
1659
1660 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
1661 data->fLength = getLength();
1662 data->fSGCount = 0;
1663 data->fDirection = getDirection();
1664 if (IOMapper::gSystem)
1665 data->fIsMapped = true;
1666 data->fIsPrepared = true; // Assume prepared - fails safe
1667 }
1668 else if (kIOMDWalkSegments & op) {
1669 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs))
1670 return kIOReturnUnderrun;
1671
1672 IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
1673 IOByteCount offset = (IOByteCount) data->fOffset;
1674
1675 IOPhysicalLength length;
1676 IOMemoryDescriptor *ncmd = const_cast<IOMemoryDescriptor *>(this);
1677 if (data->fMapped && IOMapper::gSystem)
1678 data->fIOVMAddr = ncmd->getPhysicalSegment(offset, &length);
1679 else
1680 data->fIOVMAddr = ncmd->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
1681 data->fLength = length;
1682 }
1683 else
1684 return kIOReturnBadArgument;
1685
1686 return kIOReturnSuccess;
1687 }
1688
1689 static IOReturn
1690 purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
1691 {
1692 IOReturn err = kIOReturnSuccess;
1693
1694 *control = VM_PURGABLE_SET_STATE;
1695 switch (newState)
1696 {
1697 case kIOMemoryPurgeableKeepCurrent:
1698 *control = VM_PURGABLE_GET_STATE;
1699 break;
1700
1701 case kIOMemoryPurgeableNonVolatile:
1702 *state = VM_PURGABLE_NONVOLATILE;
1703 break;
1704 case kIOMemoryPurgeableVolatile:
1705 *state = VM_PURGABLE_VOLATILE;
1706 break;
1707 case kIOMemoryPurgeableEmpty:
1708 *state = VM_PURGABLE_EMPTY;
1709 break;
1710 default:
1711 err = kIOReturnBadArgument;
1712 break;
1713 }
1714 return (err);
1715 }
1716
1717 static IOReturn
1718 purgeableStateBits(int * state)
1719 {
1720 IOReturn err = kIOReturnSuccess;
1721
1722 switch (*state)
1723 {
1724 case VM_PURGABLE_NONVOLATILE:
1725 *state = kIOMemoryPurgeableNonVolatile;
1726 break;
1727 case VM_PURGABLE_VOLATILE:
1728 *state = kIOMemoryPurgeableVolatile;
1729 break;
1730 case VM_PURGABLE_EMPTY:
1731 *state = kIOMemoryPurgeableEmpty;
1732 break;
1733 default:
1734 *state = kIOMemoryPurgeableNonVolatile;
1735 err = kIOReturnNotReady;
1736 break;
1737 }
1738 return (err);
1739 }
1740
1741 IOReturn
1742 IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
1743 IOOptionBits * oldState )
1744 {
1745 IOReturn err = kIOReturnSuccess;
1746 vm_purgable_t control;
1747 int state;
1748
1749 if (_memEntry)
1750 {
1751 err = super::setPurgeable(newState, oldState);
1752 }
1753 else
1754 {
1755 if (kIOMemoryThreadSafe & _flags)
1756 LOCK;
1757 do
1758 {
1759 // Find the appropriate vm_map for the given task
1760 vm_map_t curMap;
1761 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
1762 {
1763 err = kIOReturnNotReady;
1764 break;
1765 }
1766 else
1767 curMap = get_task_map(_task);
1768
1769 // can only do one range
1770 Ranges vec = _ranges;
1771 IOOptionBits type = _flags & kIOMemoryTypeMask;
1772 user_addr_t addr;
1773 IOByteCount len;
1774 getAddrLenForInd(addr, len, type, vec, 0);
1775
1776 err = purgeableControlBits(newState, &control, &state);
1777 if (kIOReturnSuccess != err)
1778 break;
1779 err = mach_vm_purgable_control(curMap, addr, control, &state);
1780 if (oldState)
1781 {
1782 if (kIOReturnSuccess == err)
1783 {
1784 err = purgeableStateBits(&state);
1785 *oldState = state;
1786 }
1787 }
1788 }
1789 while (false);
1790 if (kIOMemoryThreadSafe & _flags)
1791 UNLOCK;
1792 }
1793 return (err);
1794 }
1795
1796 IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
1797 IOOptionBits * oldState )
1798 {
1799 IOReturn err = kIOReturnSuccess;
1800 vm_purgable_t control;
1801 int state;
1802
1803 if (kIOMemoryThreadSafe & _flags)
1804 LOCK;
1805
1806 do
1807 {
1808 if (!_memEntry)
1809 {
1810 err = kIOReturnNotReady;
1811 break;
1812 }
1813 err = purgeableControlBits(newState, &control, &state);
1814 if (kIOReturnSuccess != err)
1815 break;
1816 err = mach_memory_entry_purgable_control((ipc_port_t) _memEntry, control, &state);
1817 if (oldState)
1818 {
1819 if (kIOReturnSuccess == err)
1820 {
1821 err = purgeableStateBits(&state);
1822 *oldState = state;
1823 }
1824 }
1825 }
1826 while (false);
1827
1828 if (kIOMemoryThreadSafe & _flags)
1829 UNLOCK;
1830
1831 return (err);
1832 }
1833
1834 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
1835 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
1836
1837 IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options,
1838 IOByteCount offset, IOByteCount length )
1839 {
1840 IOByteCount remaining;
1841 void (*func)(addr64_t pa, unsigned int count) = 0;
1842
1843 switch (options)
1844 {
1845 case kIOMemoryIncoherentIOFlush:
1846 func = &dcache_incoherent_io_flush64;
1847 break;
1848 case kIOMemoryIncoherentIOStore:
1849 func = &dcache_incoherent_io_store64;
1850 break;
1851 }
1852
1853 if (!func)
1854 return (kIOReturnUnsupported);
1855
1856 if (kIOMemoryThreadSafe & _flags)
1857 LOCK;
1858
1859 remaining = length = min(length, getLength() - offset);
1860 while (remaining)
1861 // (process another target segment?)
1862 {
1863 addr64_t dstAddr64;
1864 IOByteCount dstLen;
1865
1866 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
1867 if (!dstAddr64)
1868 break;
1869
1870 // Clip segment length to remaining
1871 if (dstLen > remaining)
1872 dstLen = remaining;
1873
1874 (*func)(dstAddr64, dstLen);
1875
1876 offset += dstLen;
1877 remaining -= dstLen;
1878 }
1879
1880 if (kIOMemoryThreadSafe & _flags)
1881 UNLOCK;
1882
1883 return (remaining ? kIOReturnUnderrun : kIOReturnSuccess);
1884 }
1885
1886 #if defined(__ppc__) || defined(__arm__)
1887 extern vm_offset_t static_memory_end;
1888 #define io_kernel_static_end static_memory_end
1889 #else
1890 extern vm_offset_t first_avail;
1891 #define io_kernel_static_end first_avail
1892 #endif
1893
1894 static kern_return_t
1895 io_get_kernel_static_upl(
1896 vm_map_t /* map */,
1897 uintptr_t offset,
1898 vm_size_t *upl_size,
1899 upl_t *upl,
1900 upl_page_info_array_t page_list,
1901 unsigned int *count,
1902 ppnum_t *highest_page)
1903 {
1904 unsigned int pageCount, page;
1905 ppnum_t phys;
1906 ppnum_t highestPage = 0;
1907
1908 pageCount = atop_32(*upl_size);
1909 if (pageCount > *count)
1910 pageCount = *count;
1911
1912 *upl = NULL;
1913
1914 for (page = 0; page < pageCount; page++)
1915 {
1916 phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
1917 if (!phys)
1918 break;
1919 page_list[page].phys_addr = phys;
1920 page_list[page].pageout = 0;
1921 page_list[page].absent = 0;
1922 page_list[page].dirty = 0;
1923 page_list[page].precious = 0;
1924 page_list[page].device = 0;
1925 if (phys > highestPage)
1926 highestPage = phys;
1927 }
1928
1929 *highest_page = highestPage;
1930
1931 return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError);
1932 }
1933
1934 IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
1935 {
1936 IOOptionBits type = _flags & kIOMemoryTypeMask;
1937 IOReturn error = kIOReturnCannotWire;
1938 ioGMDData *dataP;
1939 ppnum_t mapBase = 0;
1940 IOMapper *mapper;
1941 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
1942
1943 assert(!_wireCount);
1944 assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
1945
1946 if (_pages >= gIOMaximumMappedIOPageCount)
1947 return kIOReturnNoResources;
1948
1949 dataP = getDataP(_memoryEntries);
1950 mapper = dataP->fMapper;
1951 if (mapper && _pages)
1952 mapBase = mapper->iovmAlloc(_pages);
1953
1954 // Note that appendBytes(NULL) zeros the data up to the
1955 // desired length.
1956 _memoryEntries->appendBytes(0, dataP->fPageCnt * sizeof(upl_page_info_t));
1957 dataP = 0; // May no longer be valid so lets not get tempted.
1958
1959 if (forDirection == kIODirectionNone)
1960 forDirection = getDirection();
1961
1962 int uplFlags; // This Mem Desc's default flags for upl creation
1963 switch (kIODirectionOutIn & forDirection)
1964 {
1965 case kIODirectionOut:
1966 // Pages do not need to be marked as dirty on commit
1967 uplFlags = UPL_COPYOUT_FROM;
1968 _flags |= kIOMemoryPreparedReadOnly;
1969 break;
1970
1971 case kIODirectionIn:
1972 default:
1973 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
1974 break;
1975 }
1976 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
1977
1978 #ifdef UPL_NEED_32BIT_ADDR
1979 if (kIODirectionPrepareToPhys32 & forDirection)
1980 uplFlags |= UPL_NEED_32BIT_ADDR;
1981 #endif
1982
1983 // Find the appropriate vm_map for the given task
1984 vm_map_t curMap;
1985 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
1986 curMap = 0;
1987 else
1988 { curMap = get_task_map(_task); }
1989
1990 // Iterate over the vector of virtual ranges
1991 Ranges vec = _ranges;
1992 unsigned int pageIndex = 0;
1993 IOByteCount mdOffset = 0;
1994 ppnum_t highestPage = 0;
1995 for (UInt range = 0; range < _rangesCount; range++) {
1996 ioPLBlock iopl;
1997 user_addr_t startPage;
1998 IOByteCount numBytes;
1999 ppnum_t highPage = 0;
2000
2001 // Get the startPage address and length of vec[range]
2002 getAddrLenForInd(startPage, numBytes, type, vec, range);
2003 iopl.fPageOffset = startPage & PAGE_MASK;
2004 numBytes += iopl.fPageOffset;
2005 startPage = trunc_page_64(startPage);
2006
2007 if (mapper)
2008 iopl.fMappedBase = mapBase + pageIndex;
2009 else
2010 iopl.fMappedBase = 0;
2011
2012 // Iterate over the current range, creating UPLs
2013 while (numBytes) {
2014 dataP = getDataP(_memoryEntries);
2015 vm_address_t kernelStart = (vm_address_t) startPage;
2016 vm_map_t theMap;
2017 if (curMap)
2018 theMap = curMap;
2019 else if (!sharedMem) {
2020 assert(_task == kernel_task);
2021 theMap = IOPageableMapForAddress(kernelStart);
2022 }
2023 else
2024 theMap = NULL;
2025
2026 upl_page_info_array_t pageInfo = getPageList(dataP);
2027 int ioplFlags = uplFlags;
2028 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
2029
2030 vm_size_t ioplSize = round_page(numBytes);
2031 unsigned int numPageInfo = atop_32(ioplSize);
2032
2033 if (theMap == kernel_map && kernelStart < io_kernel_static_end) {
2034 error = io_get_kernel_static_upl(theMap,
2035 kernelStart,
2036 &ioplSize,
2037 &iopl.fIOPL,
2038 baseInfo,
2039 &numPageInfo,
2040 &highPage);
2041 }
2042 else if (sharedMem) {
2043 error = memory_object_iopl_request(sharedMem,
2044 ptoa_32(pageIndex),
2045 &ioplSize,
2046 &iopl.fIOPL,
2047 baseInfo,
2048 &numPageInfo,
2049 &ioplFlags);
2050 }
2051 else {
2052 assert(theMap);
2053 error = vm_map_create_upl(theMap,
2054 startPage,
2055 (upl_size_t*)&ioplSize,
2056 &iopl.fIOPL,
2057 baseInfo,
2058 &numPageInfo,
2059 &ioplFlags);
2060 }
2061
2062 assert(ioplSize);
2063 if (error != KERN_SUCCESS)
2064 goto abortExit;
2065
2066 if (iopl.fIOPL)
2067 highPage = upl_get_highest_page(iopl.fIOPL);
2068 if (highPage > highestPage)
2069 highestPage = highPage;
2070
2071 error = kIOReturnCannotWire;
2072
2073 if (baseInfo->device) {
2074 numPageInfo = 1;
2075 iopl.fFlags = kIOPLOnDevice;
2076 // Don't translate device memory at all
2077 if (mapper && mapBase) {
2078 mapper->iovmFree(mapBase, _pages);
2079 mapBase = 0;
2080 iopl.fMappedBase = 0;
2081 }
2082 }
2083 else {
2084 iopl.fFlags = 0;
2085 if (mapper)
2086 mapper->iovmInsert(mapBase, pageIndex,
2087 baseInfo, numPageInfo);
2088 }
2089
2090 iopl.fIOMDOffset = mdOffset;
2091 iopl.fPageInfo = pageIndex;
2092
2093 if ((_flags & kIOMemoryAutoPrepare) && iopl.fIOPL)
2094 {
2095 upl_commit(iopl.fIOPL, 0, 0);
2096 upl_deallocate(iopl.fIOPL);
2097 iopl.fIOPL = 0;
2098 }
2099
2100 if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
2101 // Clean up partial created and unsaved iopl
2102 if (iopl.fIOPL) {
2103 upl_abort(iopl.fIOPL, 0);
2104 upl_deallocate(iopl.fIOPL);
2105 }
2106 goto abortExit;
2107 }
2108
2109 // Check for a multiple iopl's in one virtual range
2110 pageIndex += numPageInfo;
2111 mdOffset -= iopl.fPageOffset;
2112 if (ioplSize < numBytes) {
2113 numBytes -= ioplSize;
2114 startPage += ioplSize;
2115 mdOffset += ioplSize;
2116 iopl.fPageOffset = 0;
2117 if (mapper)
2118 iopl.fMappedBase = mapBase + pageIndex;
2119 }
2120 else {
2121 mdOffset += numBytes;
2122 break;
2123 }
2124 }
2125 }
2126
2127 _highestPage = highestPage;
2128
2129 return kIOReturnSuccess;
2130
2131 abortExit:
2132 {
2133 dataP = getDataP(_memoryEntries);
2134 UInt done = getNumIOPL(_memoryEntries, dataP);
2135 ioPLBlock *ioplList = getIOPLList(dataP);
2136
2137 for (UInt range = 0; range < done; range++)
2138 {
2139 if (ioplList[range].fIOPL) {
2140 upl_abort(ioplList[range].fIOPL, 0);
2141 upl_deallocate(ioplList[range].fIOPL);
2142 }
2143 }
2144 (void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); // == setLength()
2145
2146 if (mapper && mapBase)
2147 mapper->iovmFree(mapBase, _pages);
2148 }
2149
2150 if (error == KERN_FAILURE)
2151 error = kIOReturnCannotWire;
2152
2153 return error;
2154 }
2155
2156 /*
2157 * prepare
2158 *
2159 * Prepare the memory for an I/O transfer. This involves paging in
2160 * the memory, if necessary, and wiring it down for the duration of
2161 * the transfer. The complete() method completes the processing of
2162 * the memory after the I/O transfer finishes. This method needn't
2163 * called for non-pageable memory.
2164 */
2165 IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
2166 {
2167 IOReturn error = kIOReturnSuccess;
2168 IOOptionBits type = _flags & kIOMemoryTypeMask;
2169
2170 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
2171 return kIOReturnSuccess;
2172
2173 if (_prepareLock)
2174 IOLockLock(_prepareLock);
2175
2176 if (!_wireCount
2177 && (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) ) {
2178 error = wireVirtual(forDirection);
2179 }
2180
2181 if (kIOReturnSuccess == error)
2182 _wireCount++;
2183
2184 if (_prepareLock)
2185 IOLockUnlock(_prepareLock);
2186
2187 return error;
2188 }
2189
2190 /*
2191 * complete
2192 *
2193 * Complete processing of the memory after an I/O transfer finishes.
2194 * This method should not be called unless a prepare was previously
2195 * issued; the prepare() and complete() must occur in pairs, before
2196 * before and after an I/O transfer involving pageable memory.
2197 */
2198
2199 IOReturn IOGeneralMemoryDescriptor::complete(IODirection /* forDirection */)
2200 {
2201 IOOptionBits type = _flags & kIOMemoryTypeMask;
2202
2203 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
2204 return kIOReturnSuccess;
2205
2206 if (_prepareLock)
2207 IOLockLock(_prepareLock);
2208
2209 assert(_wireCount);
2210
2211 if (_wireCount)
2212 {
2213 _wireCount--;
2214 if (!_wireCount)
2215 {
2216 IOOptionBits type = _flags & kIOMemoryTypeMask;
2217 ioGMDData * dataP = getDataP(_memoryEntries);
2218 ioPLBlock *ioplList = getIOPLList(dataP);
2219 UInt count = getNumIOPL(_memoryEntries, dataP);
2220
2221 #if IOMD_DEBUG_DMAACTIVE
2222 if (__iomd_reservedA) panic("complete() while dma active");
2223 #endif /* IOMD_DEBUG_DMAACTIVE */
2224
2225 if (dataP->fMapper && _pages && ioplList[0].fMappedBase)
2226 dataP->fMapper->iovmFree(ioplList[0].fMappedBase, _pages);
2227
2228 // Only complete iopls that we created which are for TypeVirtual
2229 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
2230 for (UInt ind = 0; ind < count; ind++)
2231 if (ioplList[ind].fIOPL) {
2232 upl_commit(ioplList[ind].fIOPL, 0, 0);
2233 upl_deallocate(ioplList[ind].fIOPL);
2234 }
2235 }
2236 (void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); // == setLength()
2237
2238 dataP->fPreparationID = kIOPreparationIDUnprepared;
2239 }
2240 }
2241
2242 if (_prepareLock)
2243 IOLockUnlock(_prepareLock);
2244
2245 return kIOReturnSuccess;
2246 }
2247
2248 IOReturn IOGeneralMemoryDescriptor::doMap(
2249 vm_map_t __addressMap,
2250 IOVirtualAddress * __address,
2251 IOOptionBits options,
2252 IOByteCount __offset,
2253 IOByteCount __length )
2254
2255 {
2256 #ifndef __LP64__
2257 if (!(kIOMap64Bit & options)) panic("IOGeneralMemoryDescriptor::doMap !64bit");
2258 #endif /* !__LP64__ */
2259
2260 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
2261 mach_vm_size_t offset = mapping->fOffset + __offset;
2262 mach_vm_size_t length = mapping->fLength;
2263
2264 kern_return_t kr = kIOReturnVMError;
2265 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
2266
2267 IOOptionBits type = _flags & kIOMemoryTypeMask;
2268 Ranges vec = _ranges;
2269
2270 user_addr_t range0Addr = 0;
2271 IOByteCount range0Len = 0;
2272
2273 if (vec.v)
2274 getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
2275
2276 // mapping source == dest? (could be much better)
2277 if( _task
2278 && (mapping->fAddressMap == get_task_map(_task)) && (options & kIOMapAnywhere)
2279 && (1 == _rangesCount) && (0 == offset)
2280 && range0Addr && (length <= range0Len) )
2281 {
2282 mapping->fAddress = range0Addr;
2283 mapping->fOptions |= kIOMapStatic;
2284
2285 return( kIOReturnSuccess );
2286 }
2287
2288 if( 0 == sharedMem) {
2289
2290 vm_size_t size = ptoa_32(_pages);
2291
2292 if( _task) {
2293
2294 memory_object_size_t actualSize = size;
2295 vm_prot_t prot = VM_PROT_READ;
2296 if (!(kIOMapReadOnly & options))
2297 prot |= VM_PROT_WRITE;
2298 else if (kIOMapDefaultCache != (options & kIOMapCacheMask))
2299 prot |= VM_PROT_WRITE;
2300
2301 kr = mach_make_memory_entry_64(get_task_map(_task),
2302 &actualSize, range0Addr,
2303 prot, &sharedMem,
2304 NULL );
2305
2306 if( (KERN_SUCCESS == kr) && (actualSize != round_page(size)))
2307 {
2308 // map will cross vm objects
2309 #if IOASSERT
2310 IOLog("mach_make_memory_entry_64 (%08llx) size (%08llx:%08llx)\n",
2311 range0Addr, (UInt64)actualSize, (UInt64)size);
2312 #endif
2313 kr = kIOReturnVMError;
2314 ipc_port_release_send( sharedMem );
2315 sharedMem = MACH_PORT_NULL;
2316
2317 mach_vm_address_t address;
2318 mach_vm_size_t pageOffset = (range0Addr & PAGE_MASK);
2319
2320 address = trunc_page_64(mapping->fAddress);
2321 if ((options & kIOMapAnywhere) || ((mapping->fAddress - address) == pageOffset))
2322 {
2323 kr = IOMemoryDescriptorMapCopy(mapping->fAddressMap,
2324 get_task_map(_task), range0Addr,
2325 options,
2326 offset, &address, round_page_64(length + pageOffset));
2327 if (kr == KERN_SUCCESS)
2328 mapping->fAddress = address + pageOffset;
2329 else
2330 mapping->fAddress = NULL;
2331 }
2332 }
2333 }
2334 else do
2335 { // _task == 0, must be physical
2336
2337 memory_object_t pager;
2338 unsigned int flags = 0;
2339 addr64_t pa;
2340 IOPhysicalLength segLen;
2341
2342 pa = getPhysicalSegment( offset, &segLen, kIOMemoryMapperNone );
2343
2344 if( !reserved) {
2345 reserved = IONew( ExpansionData, 1 );
2346 if( !reserved)
2347 continue;
2348 }
2349 reserved->pagerContig = (1 == _rangesCount);
2350 reserved->memory = this;
2351
2352 /*What cache mode do we need*/
2353 switch(options & kIOMapCacheMask ) {
2354
2355 case kIOMapDefaultCache:
2356 default:
2357 flags = IODefaultCacheBits(pa);
2358 if (DEVICE_PAGER_CACHE_INHIB & flags)
2359 {
2360 if (DEVICE_PAGER_GUARDED & flags)
2361 mapping->fOptions |= kIOMapInhibitCache;
2362 else
2363 mapping->fOptions |= kIOMapWriteCombineCache;
2364 }
2365 else if (DEVICE_PAGER_WRITE_THROUGH & flags)
2366 mapping->fOptions |= kIOMapWriteThruCache;
2367 else
2368 mapping->fOptions |= kIOMapCopybackCache;
2369 break;
2370
2371 case kIOMapInhibitCache:
2372 flags = DEVICE_PAGER_CACHE_INHIB |
2373 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
2374 break;
2375
2376 case kIOMapWriteThruCache:
2377 flags = DEVICE_PAGER_WRITE_THROUGH |
2378 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
2379 break;
2380
2381 case kIOMapCopybackCache:
2382 flags = DEVICE_PAGER_COHERENT;
2383 break;
2384
2385 case kIOMapWriteCombineCache:
2386 flags = DEVICE_PAGER_CACHE_INHIB |
2387 DEVICE_PAGER_COHERENT;
2388 break;
2389 }
2390
2391 flags |= reserved->pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0;
2392
2393 pager = device_pager_setup( (memory_object_t) 0, (uintptr_t) reserved,
2394 size, flags);
2395 assert( pager );
2396
2397 if( pager) {
2398 kr = mach_memory_object_memory_entry_64( (host_t) 1, false /*internal*/,
2399 size, VM_PROT_READ | VM_PROT_WRITE, pager, &sharedMem );
2400
2401 assert( KERN_SUCCESS == kr );
2402 if( KERN_SUCCESS != kr)
2403 {
2404 device_pager_deallocate( pager );
2405 pager = MACH_PORT_NULL;
2406 sharedMem = MACH_PORT_NULL;
2407 }
2408 }
2409 if( pager && sharedMem)
2410 reserved->devicePager = pager;
2411 else {
2412 IODelete( reserved, ExpansionData, 1 );
2413 reserved = 0;
2414 }
2415
2416 } while( false );
2417
2418 _memEntry = (void *) sharedMem;
2419 }
2420
2421 IOReturn result;
2422 if (0 == sharedMem)
2423 result = kr;
2424 else
2425 result = super::doMap( __addressMap, __address,
2426 options, __offset, __length );
2427
2428 return( result );
2429 }
2430
2431 IOReturn IOGeneralMemoryDescriptor::doUnmap(
2432 vm_map_t addressMap,
2433 IOVirtualAddress __address,
2434 IOByteCount __length )
2435 {
2436 return (super::doUnmap(addressMap, __address, __length));
2437 }
2438
2439 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2440
2441 #undef super
2442 #define super OSObject
2443
2444 OSDefineMetaClassAndStructors( IOMemoryMap, OSObject )
2445
2446 OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
2447 OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
2448 OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
2449 OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
2450 OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
2451 OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
2452 OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
2453 OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
2454
2455 /* ex-inline function implementation */
2456 IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
2457 { return( getPhysicalSegment( 0, 0 )); }
2458
2459 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2460
2461 bool IOMemoryMap::init(
2462 task_t intoTask,
2463 mach_vm_address_t toAddress,
2464 IOOptionBits _options,
2465 mach_vm_size_t _offset,
2466 mach_vm_size_t _length )
2467 {
2468 if (!intoTask)
2469 return( false);
2470
2471 if (!super::init())
2472 return(false);
2473
2474 fAddressMap = get_task_map(intoTask);
2475 if (!fAddressMap)
2476 return(false);
2477 vm_map_reference(fAddressMap);
2478
2479 fAddressTask = intoTask;
2480 fOptions = _options;
2481 fLength = _length;
2482 fOffset = _offset;
2483 fAddress = toAddress;
2484
2485 return (true);
2486 }
2487
2488 bool IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
2489 {
2490 if (!_memory)
2491 return(false);
2492
2493 if (!fSuperMap)
2494 {
2495 if( (_offset + fLength) > _memory->getLength())
2496 return( false);
2497 fOffset = _offset;
2498 }
2499
2500 _memory->retain();
2501 if (fMemory)
2502 {
2503 if (fMemory != _memory)
2504 fMemory->removeMapping(this);
2505 fMemory->release();
2506 }
2507 fMemory = _memory;
2508
2509 return( true );
2510 }
2511
2512 struct IOMemoryDescriptorMapAllocRef
2513 {
2514 ipc_port_t sharedMem;
2515 vm_map_t src_map;
2516 mach_vm_offset_t src_address;
2517 mach_vm_address_t mapped;
2518 mach_vm_size_t size;
2519 mach_vm_size_t sourceOffset;
2520 IOOptionBits options;
2521 };
2522
2523 static kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
2524 {
2525 IOMemoryDescriptorMapAllocRef * ref = (IOMemoryDescriptorMapAllocRef *)_ref;
2526 IOReturn err;
2527
2528 do {
2529 if( ref->sharedMem)
2530 {
2531 vm_prot_t prot = VM_PROT_READ
2532 | ((ref->options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE);
2533
2534 // VM system requires write access to change cache mode
2535 if (kIOMapDefaultCache != (ref->options & kIOMapCacheMask))
2536 prot |= VM_PROT_WRITE;
2537
2538 // set memory entry cache
2539 vm_prot_t memEntryCacheMode = prot | MAP_MEM_ONLY;
2540 switch (ref->options & kIOMapCacheMask)
2541 {
2542 case kIOMapInhibitCache:
2543 SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
2544 break;
2545
2546 case kIOMapWriteThruCache:
2547 SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
2548 break;
2549
2550 case kIOMapWriteCombineCache:
2551 SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
2552 break;
2553
2554 case kIOMapCopybackCache:
2555 SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
2556 break;
2557
2558 case kIOMapDefaultCache:
2559 default:
2560 SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
2561 break;
2562 }
2563
2564 vm_size_t unused = 0;
2565
2566 err = mach_make_memory_entry( NULL /*unused*/, &unused, 0 /*unused*/,
2567 memEntryCacheMode, NULL, ref->sharedMem );
2568 if (KERN_SUCCESS != err)
2569 IOLog("MAP_MEM_ONLY failed %d\n", err);
2570
2571 err = mach_vm_map( map,
2572 &ref->mapped,
2573 ref->size, 0 /* mask */,
2574 (( ref->options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
2575 | VM_MAKE_TAG(VM_MEMORY_IOKIT),
2576 ref->sharedMem, ref->sourceOffset,
2577 false, // copy
2578 prot, // cur
2579 prot, // max
2580 VM_INHERIT_NONE);
2581
2582 if( KERN_SUCCESS != err) {
2583 ref->mapped = 0;
2584 continue;
2585 }
2586 }
2587 else if (ref->src_map)
2588 {
2589 vm_prot_t cur_prot, max_prot;
2590 err = mach_vm_remap(map, &ref->mapped, ref->size, PAGE_MASK,
2591 (ref->options & kIOMapAnywhere) ? TRUE : FALSE,
2592 ref->src_map, ref->src_address,
2593 FALSE /* copy */,
2594 &cur_prot,
2595 &max_prot,
2596 VM_INHERIT_NONE);
2597 if (KERN_SUCCESS == err)
2598 {
2599 if ((!(VM_PROT_READ & cur_prot))
2600 || (!(kIOMapReadOnly & ref->options) && !(VM_PROT_WRITE & cur_prot)))
2601 {
2602 mach_vm_deallocate(map, ref->mapped, ref->size);
2603 err = KERN_PROTECTION_FAILURE;
2604 }
2605 }
2606 if (KERN_SUCCESS != err)
2607 ref->mapped = 0;
2608 }
2609 else
2610 {
2611 err = mach_vm_allocate( map, &ref->mapped, ref->size,
2612 ((ref->options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
2613 | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
2614 if( KERN_SUCCESS != err) {
2615 ref->mapped = 0;
2616 continue;
2617 }
2618 // we have to make sure that these guys don't get copied if we fork.
2619 err = vm_inherit( map, ref->mapped, ref->size, VM_INHERIT_NONE);
2620 assert( KERN_SUCCESS == err );
2621 }
2622 }
2623 while( false );
2624
2625 return( err );
2626 }
2627
2628 kern_return_t
2629 IOMemoryDescriptorMapMemEntry(vm_map_t map, ipc_port_t entry, IOOptionBits options, bool pageable,
2630 mach_vm_size_t offset,
2631 mach_vm_address_t * address, mach_vm_size_t length)
2632 {
2633 IOReturn err;
2634 IOMemoryDescriptorMapAllocRef ref;
2635
2636 ref.sharedMem = entry;
2637 ref.src_map = NULL;
2638 ref.sharedMem = entry;
2639 ref.sourceOffset = trunc_page_64(offset);
2640 ref.options = options;
2641 ref.size = length;
2642
2643 if (options & kIOMapAnywhere)
2644 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
2645 ref.mapped = 0;
2646 else
2647 ref.mapped = *address;
2648
2649 if( ref.sharedMem && (map == kernel_map) && pageable)
2650 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
2651 else
2652 err = IOMemoryDescriptorMapAlloc( map, &ref );
2653
2654 *address = ref.mapped;
2655 return (err);
2656 }
2657
2658 kern_return_t
2659 IOMemoryDescriptorMapCopy(vm_map_t map,
2660 vm_map_t src_map,
2661 mach_vm_offset_t src_address,
2662 IOOptionBits options,
2663 mach_vm_size_t offset,
2664 mach_vm_address_t * address, mach_vm_size_t length)
2665 {
2666 IOReturn err;
2667 IOMemoryDescriptorMapAllocRef ref;
2668
2669 ref.sharedMem = NULL;
2670 ref.src_map = src_map;
2671 ref.src_address = src_address;
2672 ref.sourceOffset = trunc_page_64(offset);
2673 ref.options = options;
2674 ref.size = length;
2675
2676 if (options & kIOMapAnywhere)
2677 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
2678 ref.mapped = 0;
2679 else
2680 ref.mapped = *address;
2681
2682 if (map == kernel_map)
2683 err = IOIteratePageableMaps(ref.size, &IOMemoryDescriptorMapAlloc, &ref);
2684 else
2685 err = IOMemoryDescriptorMapAlloc(map, &ref);
2686
2687 *address = ref.mapped;
2688 return (err);
2689 }
2690
2691 IOReturn IOMemoryDescriptor::doMap(
2692 vm_map_t __addressMap,
2693 IOVirtualAddress * __address,
2694 IOOptionBits options,
2695 IOByteCount __offset,
2696 IOByteCount __length )
2697 {
2698 #ifndef __LP64__
2699 if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::doMap !64bit");
2700 #endif /* !__LP64__ */
2701
2702 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
2703 mach_vm_size_t offset = mapping->fOffset + __offset;
2704 mach_vm_size_t length = mapping->fLength;
2705
2706 IOReturn err = kIOReturnSuccess;
2707 memory_object_t pager;
2708 mach_vm_size_t pageOffset;
2709 IOPhysicalAddress sourceAddr;
2710 unsigned int lock_count;
2711
2712 do
2713 {
2714 sourceAddr = getPhysicalSegment( offset, NULL, _kIOMemorySourceSegment );
2715 pageOffset = sourceAddr - trunc_page( sourceAddr );
2716
2717 if( reserved)
2718 pager = (memory_object_t) reserved->devicePager;
2719 else
2720 pager = MACH_PORT_NULL;
2721
2722 if ((kIOMapReference|kIOMapUnique) == ((kIOMapReference|kIOMapUnique) & options))
2723 {
2724 upl_t redirUPL2;
2725 vm_size_t size;
2726 int flags;
2727
2728 if (!_memEntry)
2729 {
2730 err = kIOReturnNotReadable;
2731 continue;
2732 }
2733
2734 size = round_page(mapping->fLength + pageOffset);
2735 flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
2736 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
2737
2738 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) _memEntry, 0, &size, &redirUPL2,
2739 NULL, NULL,
2740 &flags))
2741 redirUPL2 = NULL;
2742
2743 for (lock_count = 0;
2744 IORecursiveLockHaveLock(gIOMemoryLock);
2745 lock_count++) {
2746 UNLOCK;
2747 }
2748 err = upl_transpose(redirUPL2, mapping->fRedirUPL);
2749 for (;
2750 lock_count;
2751 lock_count--) {
2752 LOCK;
2753 }
2754
2755 if (kIOReturnSuccess != err)
2756 {
2757 IOLog("upl_transpose(%x)\n", err);
2758 err = kIOReturnSuccess;
2759 }
2760
2761 if (redirUPL2)
2762 {
2763 upl_commit(redirUPL2, NULL, 0);
2764 upl_deallocate(redirUPL2);
2765 redirUPL2 = 0;
2766 }
2767 {
2768 // swap the memEntries since they now refer to different vm_objects
2769 void * me = _memEntry;
2770 _memEntry = mapping->fMemory->_memEntry;
2771 mapping->fMemory->_memEntry = me;
2772 }
2773 if (pager)
2774 err = handleFault( reserved->devicePager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
2775 }
2776 else
2777 {
2778 mach_vm_address_t address;
2779
2780 if (!(options & kIOMapAnywhere))
2781 {
2782 address = trunc_page_64(mapping->fAddress);
2783 if( (mapping->fAddress - address) != pageOffset)
2784 {
2785 err = kIOReturnVMError;
2786 continue;
2787 }
2788 }
2789
2790 err = IOMemoryDescriptorMapMemEntry(mapping->fAddressMap, (ipc_port_t) _memEntry,
2791 options, (kIOMemoryBufferPageable & _flags),
2792 offset, &address, round_page_64(length + pageOffset));
2793 if( err != KERN_SUCCESS)
2794 continue;
2795
2796 if (!_memEntry || pager)
2797 {
2798 err = handleFault( pager, mapping->fAddressMap, address, offset, length, options );
2799 if (err != KERN_SUCCESS)
2800 doUnmap( mapping->fAddressMap, (IOVirtualAddress) mapping, 0 );
2801 }
2802
2803 #if DEBUG
2804 if (kIOLogMapping & gIOKitDebug)
2805 IOLog("mapping(%x) desc %p @ %lx, map %p, address %qx, offset %qx, length %qx\n",
2806 err, this, sourceAddr, mapping, address, offset, length);
2807 #endif
2808
2809 if (err == KERN_SUCCESS)
2810 mapping->fAddress = address + pageOffset;
2811 else
2812 mapping->fAddress = NULL;
2813 }
2814 }
2815 while( false );
2816
2817 return (err);
2818 }
2819
2820 IOReturn IOMemoryDescriptor::handleFault(
2821 void * _pager,
2822 vm_map_t addressMap,
2823 mach_vm_address_t address,
2824 mach_vm_size_t sourceOffset,
2825 mach_vm_size_t length,
2826 IOOptionBits options )
2827 {
2828 IOReturn err = kIOReturnSuccess;
2829 memory_object_t pager = (memory_object_t) _pager;
2830 mach_vm_size_t size;
2831 mach_vm_size_t bytes;
2832 mach_vm_size_t page;
2833 mach_vm_size_t pageOffset;
2834 mach_vm_size_t pagerOffset;
2835 IOPhysicalLength segLen;
2836 addr64_t physAddr;
2837
2838 if( !addressMap)
2839 {
2840 if( kIOMemoryRedirected & _flags)
2841 {
2842 #if DEBUG
2843 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
2844 #endif
2845 do {
2846 SLEEP;
2847 } while( kIOMemoryRedirected & _flags );
2848 }
2849
2850 return( kIOReturnSuccess );
2851 }
2852
2853 physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
2854 assert( physAddr );
2855 pageOffset = physAddr - trunc_page_64( physAddr );
2856 pagerOffset = sourceOffset;
2857
2858 size = length + pageOffset;
2859 physAddr -= pageOffset;
2860
2861 segLen += pageOffset;
2862 bytes = size;
2863 do
2864 {
2865 // in the middle of the loop only map whole pages
2866 if( segLen >= bytes)
2867 segLen = bytes;
2868 else if( segLen != trunc_page( segLen))
2869 err = kIOReturnVMError;
2870 if( physAddr != trunc_page_64( physAddr))
2871 err = kIOReturnBadArgument;
2872 if (kIOReturnSuccess != err)
2873 break;
2874
2875 #if DEBUG
2876 if( kIOLogMapping & gIOKitDebug)
2877 IOLog("IOMemoryMap::map(%p) 0x%qx->0x%qx:0x%qx\n",
2878 addressMap, address + pageOffset, physAddr + pageOffset,
2879 segLen - pageOffset);
2880 #endif
2881
2882
2883 if( pager) {
2884 if( reserved && reserved->pagerContig) {
2885 IOPhysicalLength allLen;
2886 addr64_t allPhys;
2887
2888 allPhys = getPhysicalSegment( 0, &allLen, kIOMemoryMapperNone );
2889 assert( allPhys );
2890 err = device_pager_populate_object( pager, 0, atop_64(allPhys), round_page(allLen) );
2891 }
2892 else
2893 {
2894
2895 for( page = 0;
2896 (page < segLen) && (KERN_SUCCESS == err);
2897 page += page_size)
2898 {
2899 err = device_pager_populate_object(pager, pagerOffset,
2900 (ppnum_t)(atop_64(physAddr + page)), page_size);
2901 pagerOffset += page_size;
2902 }
2903 }
2904 assert( KERN_SUCCESS == err );
2905 if( err)
2906 break;
2907 }
2908
2909 // This call to vm_fault causes an early pmap level resolution
2910 // of the mappings created above for kernel mappings, since
2911 // faulting in later can't take place from interrupt level.
2912 /* *** ALERT *** */
2913 /* *** Temporary Workaround *** */
2914
2915 if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags))
2916 {
2917 vm_fault(addressMap,
2918 (vm_map_offset_t)address,
2919 VM_PROT_READ|VM_PROT_WRITE,
2920 FALSE, THREAD_UNINT, NULL,
2921 (vm_map_offset_t)0);
2922 }
2923
2924 /* *** Temporary Workaround *** */
2925 /* *** ALERT *** */
2926
2927 sourceOffset += segLen - pageOffset;
2928 address += segLen;
2929 bytes -= segLen;
2930 pageOffset = 0;
2931
2932 }
2933 while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
2934
2935 if (bytes)
2936 err = kIOReturnBadArgument;
2937
2938 return (err);
2939 }
2940
2941 IOReturn IOMemoryDescriptor::doUnmap(
2942 vm_map_t addressMap,
2943 IOVirtualAddress __address,
2944 IOByteCount __length )
2945 {
2946 IOReturn err;
2947 mach_vm_address_t address;
2948 mach_vm_size_t length;
2949
2950 if (__length)
2951 {
2952 address = __address;
2953 length = __length;
2954 }
2955 else
2956 {
2957 addressMap = ((IOMemoryMap *) __address)->fAddressMap;
2958 address = ((IOMemoryMap *) __address)->fAddress;
2959 length = ((IOMemoryMap *) __address)->fLength;
2960 }
2961
2962 if ((addressMap == kernel_map)
2963 && ((kIOMemoryBufferPageable & _flags) || !_memEntry))
2964 addressMap = IOPageableMapForAddress( address );
2965
2966 #if DEBUG
2967 if( kIOLogMapping & gIOKitDebug)
2968 IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
2969 addressMap, address, length );
2970 #endif
2971
2972 err = mach_vm_deallocate( addressMap, address, length );
2973
2974 return (err);
2975 }
2976
2977 IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
2978 {
2979 IOReturn err = kIOReturnSuccess;
2980 IOMemoryMap * mapping = 0;
2981 OSIterator * iter;
2982
2983 LOCK;
2984
2985 if( doRedirect)
2986 _flags |= kIOMemoryRedirected;
2987 else
2988 _flags &= ~kIOMemoryRedirected;
2989
2990 do {
2991 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
2992 while( (mapping = (IOMemoryMap *) iter->getNextObject()))
2993 mapping->redirect( safeTask, doRedirect );
2994
2995 iter->release();
2996 }
2997 } while( false );
2998
2999 if (!doRedirect)
3000 {
3001 WAKEUP;
3002 }
3003
3004 UNLOCK;
3005
3006 #ifndef __LP64__
3007 // temporary binary compatibility
3008 IOSubMemoryDescriptor * subMem;
3009 if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
3010 err = subMem->redirect( safeTask, doRedirect );
3011 else
3012 err = kIOReturnSuccess;
3013 #endif /* !__LP64__ */
3014
3015 return( err );
3016 }
3017
3018 IOReturn IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
3019 {
3020 IOReturn err = kIOReturnSuccess;
3021
3022 if( fSuperMap) {
3023 // err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
3024 } else {
3025
3026 LOCK;
3027
3028 do
3029 {
3030 if (!fAddress)
3031 break;
3032 if (!fAddressMap)
3033 break;
3034
3035 if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
3036 && (0 == (fOptions & kIOMapStatic)))
3037 {
3038 IOUnmapPages( fAddressMap, fAddress, fLength );
3039 err = kIOReturnSuccess;
3040 #if DEBUG
3041 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
3042 #endif
3043 }
3044 else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask))
3045 {
3046 IOOptionBits newMode;
3047 newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
3048 IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
3049 }
3050 }
3051 while (false);
3052 UNLOCK;
3053 }
3054
3055 if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3056 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3057 && safeTask
3058 && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected))))
3059 fMemory->redirect(safeTask, doRedirect);
3060
3061 return( err );
3062 }
3063
3064 IOReturn IOMemoryMap::unmap( void )
3065 {
3066 IOReturn err;
3067
3068 LOCK;
3069
3070 if( fAddress && fAddressMap && (0 == fSuperMap) && fMemory
3071 && (0 == (fOptions & kIOMapStatic))) {
3072
3073 err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
3074
3075 } else
3076 err = kIOReturnSuccess;
3077
3078 if (fAddressMap)
3079 {
3080 vm_map_deallocate(fAddressMap);
3081 fAddressMap = 0;
3082 }
3083
3084 fAddress = 0;
3085
3086 UNLOCK;
3087
3088 return( err );
3089 }
3090
3091 void IOMemoryMap::taskDied( void )
3092 {
3093 LOCK;
3094 if (fUserClientUnmap)
3095 unmap();
3096 if( fAddressMap) {
3097 vm_map_deallocate(fAddressMap);
3098 fAddressMap = 0;
3099 }
3100 fAddressTask = 0;
3101 fAddress = 0;
3102 UNLOCK;
3103 }
3104
3105 IOReturn IOMemoryMap::userClientUnmap( void )
3106 {
3107 fUserClientUnmap = true;
3108 return (kIOReturnSuccess);
3109 }
3110
3111 // Overload the release mechanism. All mappings must be a member
3112 // of a memory descriptors _mappings set. This means that we
3113 // always have 2 references on a mapping. When either of these mappings
3114 // are released we need to free ourselves.
3115 void IOMemoryMap::taggedRelease(const void *tag) const
3116 {
3117 LOCK;
3118 super::taggedRelease(tag, 2);
3119 UNLOCK;
3120 }
3121
3122 void IOMemoryMap::free()
3123 {
3124 unmap();
3125
3126 if (fMemory)
3127 {
3128 LOCK;
3129 fMemory->removeMapping(this);
3130 UNLOCK;
3131 fMemory->release();
3132 }
3133
3134 if (fOwner && (fOwner != fMemory))
3135 {
3136 LOCK;
3137 fOwner->removeMapping(this);
3138 UNLOCK;
3139 }
3140
3141 if (fSuperMap)
3142 fSuperMap->release();
3143
3144 if (fRedirUPL) {
3145 upl_commit(fRedirUPL, NULL, 0);
3146 upl_deallocate(fRedirUPL);
3147 }
3148
3149 super::free();
3150 }
3151
3152 IOByteCount IOMemoryMap::getLength()
3153 {
3154 return( fLength );
3155 }
3156
3157 IOVirtualAddress IOMemoryMap::getVirtualAddress()
3158 {
3159 #ifndef __LP64__
3160 if (fSuperMap)
3161 fSuperMap->getVirtualAddress();
3162 else if (fAddressMap
3163 && vm_map_is_64bit(fAddressMap)
3164 && (sizeof(IOVirtualAddress) < 8))
3165 {
3166 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
3167 }
3168 #endif /* !__LP64__ */
3169
3170 return (fAddress);
3171 }
3172
3173 #ifndef __LP64__
3174 mach_vm_address_t IOMemoryMap::getAddress()
3175 {
3176 return( fAddress);
3177 }
3178
3179 mach_vm_size_t IOMemoryMap::getSize()
3180 {
3181 return( fLength );
3182 }
3183 #endif /* !__LP64__ */
3184
3185
3186 task_t IOMemoryMap::getAddressTask()
3187 {
3188 if( fSuperMap)
3189 return( fSuperMap->getAddressTask());
3190 else
3191 return( fAddressTask);
3192 }
3193
3194 IOOptionBits IOMemoryMap::getMapOptions()
3195 {
3196 return( fOptions);
3197 }
3198
3199 IOMemoryDescriptor * IOMemoryMap::getMemoryDescriptor()
3200 {
3201 return( fMemory );
3202 }
3203
3204 IOMemoryMap * IOMemoryMap::copyCompatible(
3205 IOMemoryMap * newMapping )
3206 {
3207 task_t task = newMapping->getAddressTask();
3208 mach_vm_address_t toAddress = newMapping->fAddress;
3209 IOOptionBits _options = newMapping->fOptions;
3210 mach_vm_size_t _offset = newMapping->fOffset;
3211 mach_vm_size_t _length = newMapping->fLength;
3212
3213 if( (!task) || (!fAddressMap) || (fAddressMap != get_task_map(task)))
3214 return( 0 );
3215 if( (fOptions ^ _options) & kIOMapReadOnly)
3216 return( 0 );
3217 if( (kIOMapDefaultCache != (_options & kIOMapCacheMask))
3218 && ((fOptions ^ _options) & kIOMapCacheMask))
3219 return( 0 );
3220
3221 if( (0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress))
3222 return( 0 );
3223
3224 if( _offset < fOffset)
3225 return( 0 );
3226
3227 _offset -= fOffset;
3228
3229 if( (_offset + _length) > fLength)
3230 return( 0 );
3231
3232 retain();
3233 if( (fLength == _length) && (!_offset))
3234 {
3235 newMapping->release();
3236 newMapping = this;
3237 }
3238 else
3239 {
3240 newMapping->fSuperMap = this;
3241 newMapping->fOffset = _offset;
3242 newMapping->fAddress = fAddress + _offset;
3243 }
3244
3245 return( newMapping );
3246 }
3247
3248 IOPhysicalAddress
3249 #ifdef __LP64__
3250 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
3251 #else /* !__LP64__ */
3252 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
3253 #endif /* !__LP64__ */
3254 {
3255 IOPhysicalAddress address;
3256
3257 LOCK;
3258 #ifdef __LP64__
3259 address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
3260 #else /* !__LP64__ */
3261 address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
3262 #endif /* !__LP64__ */
3263 UNLOCK;
3264
3265 return( address );
3266 }
3267
3268 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3269
3270 #undef super
3271 #define super OSObject
3272
3273 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3274
3275 void IOMemoryDescriptor::initialize( void )
3276 {
3277 if( 0 == gIOMemoryLock)
3278 gIOMemoryLock = IORecursiveLockAlloc();
3279
3280 IORegistryEntry::getRegistryRoot()->setProperty(kIOMaximumMappedIOByteCountKey,
3281 ptoa_64(gIOMaximumMappedIOPageCount), 64);
3282 if (!gIOCopyMapper)
3283 {
3284 IOMapper *
3285 mapper = new IOCopyMapper;
3286 if (mapper)
3287 {
3288 if (mapper->init() && mapper->start(NULL))
3289 gIOCopyMapper = (IOCopyMapper *) mapper;
3290 else
3291 mapper->release();
3292 }
3293 }
3294
3295 gIOLastPage = IOGetLastPageNumber();
3296 }
3297
3298 void IOMemoryDescriptor::free( void )
3299 {
3300 if( _mappings)
3301 _mappings->release();
3302
3303 super::free();
3304 }
3305
3306 IOMemoryMap * IOMemoryDescriptor::setMapping(
3307 task_t intoTask,
3308 IOVirtualAddress mapAddress,
3309 IOOptionBits options )
3310 {
3311 return (createMappingInTask( intoTask, mapAddress,
3312 options | kIOMapStatic,
3313 0, getLength() ));
3314 }
3315
3316 IOMemoryMap * IOMemoryDescriptor::map(
3317 IOOptionBits options )
3318 {
3319 return (createMappingInTask( kernel_task, 0,
3320 options | kIOMapAnywhere,
3321 0, getLength() ));
3322 }
3323
3324 #ifndef __LP64__
3325 IOMemoryMap * IOMemoryDescriptor::map(
3326 task_t intoTask,
3327 IOVirtualAddress atAddress,
3328 IOOptionBits options,
3329 IOByteCount offset,
3330 IOByteCount length )
3331 {
3332 if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask)))
3333 {
3334 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
3335 return (0);
3336 }
3337
3338 return (createMappingInTask(intoTask, atAddress,
3339 options, offset, length));
3340 }
3341 #endif /* !__LP64__ */
3342
3343 IOMemoryMap * IOMemoryDescriptor::createMappingInTask(
3344 task_t intoTask,
3345 mach_vm_address_t atAddress,
3346 IOOptionBits options,
3347 mach_vm_size_t offset,
3348 mach_vm_size_t length)
3349 {
3350 IOMemoryMap * result;
3351 IOMemoryMap * mapping;
3352
3353 if (0 == length)
3354 length = getLength();
3355
3356 mapping = new IOMemoryMap;
3357
3358 if( mapping
3359 && !mapping->init( intoTask, atAddress,
3360 options, offset, length )) {
3361 mapping->release();
3362 mapping = 0;
3363 }
3364
3365 if (mapping)
3366 result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
3367 else
3368 result = 0;
3369
3370 #if DEBUG
3371 if (!result)
3372 IOLog("createMappingInTask failed desc %p, addr %qx, options %lx, offset %qx, length %qx\n",
3373 this, atAddress, options, offset, length);
3374 #endif
3375
3376 return (result);
3377 }
3378
3379 #ifndef __LP64__ // there is only a 64 bit version for LP64
3380 IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
3381 IOOptionBits options,
3382 IOByteCount offset)
3383 {
3384 return (redirect(newBackingMemory, options, (mach_vm_size_t)offset));
3385 }
3386 #endif
3387
3388 IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
3389 IOOptionBits options,
3390 mach_vm_size_t offset)
3391 {
3392 IOReturn err = kIOReturnSuccess;
3393 IOMemoryDescriptor * physMem = 0;
3394
3395 LOCK;
3396
3397 if (fAddress && fAddressMap) do
3398 {
3399 if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3400 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3401 {
3402 physMem = fMemory;
3403 physMem->retain();
3404 }
3405
3406 if (!fRedirUPL)
3407 {
3408 vm_size_t size = round_page(fLength);
3409 int flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
3410 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
3411 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) fMemory->_memEntry, 0, &size, &fRedirUPL,
3412 NULL, NULL,
3413 &flags))
3414 fRedirUPL = 0;
3415
3416 if (physMem)
3417 {
3418 IOUnmapPages( fAddressMap, fAddress, fLength );
3419 if (false)
3420 physMem->redirect(0, true);
3421 }
3422 }
3423
3424 if (newBackingMemory)
3425 {
3426 if (newBackingMemory != fMemory)
3427 {
3428 fOffset = 0;
3429 if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
3430 options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
3431 offset, fLength))
3432 err = kIOReturnError;
3433 }
3434 if (fRedirUPL)
3435 {
3436 upl_commit(fRedirUPL, NULL, 0);
3437 upl_deallocate(fRedirUPL);
3438 fRedirUPL = 0;
3439 }
3440 if (false && physMem)
3441 physMem->redirect(0, false);
3442 }
3443 }
3444 while (false);
3445
3446 UNLOCK;
3447
3448 if (physMem)
3449 physMem->release();
3450
3451 return (err);
3452 }
3453
3454 IOMemoryMap * IOMemoryDescriptor::makeMapping(
3455 IOMemoryDescriptor * owner,
3456 task_t __intoTask,
3457 IOVirtualAddress __address,
3458 IOOptionBits options,
3459 IOByteCount __offset,
3460 IOByteCount __length )
3461 {
3462 #ifndef __LP64__
3463 if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::makeMapping !64bit");
3464 #endif /* !__LP64__ */
3465
3466 IOMemoryDescriptor * mapDesc = 0;
3467 IOMemoryMap * result = 0;
3468 OSIterator * iter;
3469
3470 IOMemoryMap * mapping = (IOMemoryMap *) __address;
3471 mach_vm_size_t offset = mapping->fOffset + __offset;
3472 mach_vm_size_t length = mapping->fLength;
3473
3474 mapping->fOffset = offset;
3475
3476 LOCK;
3477
3478 do
3479 {
3480 if (kIOMapStatic & options)
3481 {
3482 result = mapping;
3483 addMapping(mapping);
3484 mapping->setMemoryDescriptor(this, 0);
3485 continue;
3486 }
3487
3488 if (kIOMapUnique & options)
3489 {
3490 IOPhysicalAddress phys;
3491 IOByteCount physLen;
3492
3493 // if (owner != this) continue;
3494
3495 if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3496 || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3497 {
3498 phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
3499 if (!phys || (physLen < length))
3500 continue;
3501
3502 mapDesc = IOMemoryDescriptor::withAddressRange(
3503 phys, length, getDirection() | kIOMemoryMapperNone, NULL);
3504 if (!mapDesc)
3505 continue;
3506 offset = 0;
3507 mapping->fOffset = offset;
3508 }
3509 }
3510 else
3511 {
3512 // look for a compatible existing mapping
3513 if( (iter = OSCollectionIterator::withCollection(_mappings)))
3514 {
3515 IOMemoryMap * lookMapping;
3516 while ((lookMapping = (IOMemoryMap *) iter->getNextObject()))
3517 {
3518 if ((result = lookMapping->copyCompatible(mapping)))
3519 {
3520 addMapping(result);
3521 result->setMemoryDescriptor(this, offset);
3522 break;
3523 }
3524 }
3525 iter->release();
3526 }
3527 if (result || (options & kIOMapReference))
3528 continue;
3529 }
3530
3531 if (!mapDesc)
3532 {
3533 mapDesc = this;
3534 mapDesc->retain();
3535 }
3536 IOReturn
3537 kr = mapDesc->doMap( 0, (IOVirtualAddress *) &mapping, options, 0, 0 );
3538 if (kIOReturnSuccess == kr)
3539 {
3540 result = mapping;
3541 mapDesc->addMapping(result);
3542 result->setMemoryDescriptor(mapDesc, offset);
3543 }
3544 else
3545 {
3546 mapping->release();
3547 mapping = NULL;
3548 }
3549 }
3550 while( false );
3551
3552 UNLOCK;
3553
3554 if (mapDesc)
3555 mapDesc->release();
3556
3557 return (result);
3558 }
3559
3560 void IOMemoryDescriptor::addMapping(
3561 IOMemoryMap * mapping )
3562 {
3563 if( mapping)
3564 {
3565 if( 0 == _mappings)
3566 _mappings = OSSet::withCapacity(1);
3567 if( _mappings )
3568 _mappings->setObject( mapping );
3569 }
3570 }
3571
3572 void IOMemoryDescriptor::removeMapping(
3573 IOMemoryMap * mapping )
3574 {
3575 if( _mappings)
3576 _mappings->removeObject( mapping);
3577 }
3578
3579 #ifndef __LP64__
3580 // obsolete initializers
3581 // - initWithOptions is the designated initializer
3582 bool
3583 IOMemoryDescriptor::initWithAddress(void * address,
3584 IOByteCount length,
3585 IODirection direction)
3586 {
3587 return( false );
3588 }
3589
3590 bool
3591 IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
3592 IOByteCount length,
3593 IODirection direction,
3594 task_t task)
3595 {
3596 return( false );
3597 }
3598
3599 bool
3600 IOMemoryDescriptor::initWithPhysicalAddress(
3601 IOPhysicalAddress address,
3602 IOByteCount length,
3603 IODirection direction )
3604 {
3605 return( false );
3606 }
3607
3608 bool
3609 IOMemoryDescriptor::initWithRanges(
3610 IOVirtualRange * ranges,
3611 UInt32 withCount,
3612 IODirection direction,
3613 task_t task,
3614 bool asReference)
3615 {
3616 return( false );
3617 }
3618
3619 bool
3620 IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
3621 UInt32 withCount,
3622 IODirection direction,
3623 bool asReference)
3624 {
3625 return( false );
3626 }
3627
3628 void * IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
3629 IOByteCount * lengthOfSegment)
3630 {
3631 return( 0 );
3632 }
3633 #endif /* !__LP64__ */
3634
3635 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3636
3637 bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
3638 {
3639 OSSymbol const *keys[2];
3640 OSObject *values[2];
3641 struct SerData {
3642 user_addr_t address;
3643 user_size_t length;
3644 } *vcopy;
3645 unsigned int index, nRanges;
3646 bool result;
3647
3648 IOOptionBits type = _flags & kIOMemoryTypeMask;
3649
3650 if (s == NULL) return false;
3651 if (s->previouslySerialized(this)) return true;
3652
3653 // Pretend we are an array.
3654 if (!s->addXMLStartTag(this, "array")) return false;
3655
3656 nRanges = _rangesCount;
3657 vcopy = (SerData *) IOMalloc(sizeof(SerData) * nRanges);
3658 if (vcopy == 0) return false;
3659
3660 keys[0] = OSSymbol::withCString("address");
3661 keys[1] = OSSymbol::withCString("length");
3662
3663 result = false;
3664 values[0] = values[1] = 0;
3665
3666 // From this point on we can go to bail.
3667
3668 // Copy the volatile data so we don't have to allocate memory
3669 // while the lock is held.
3670 LOCK;
3671 if (nRanges == _rangesCount) {
3672 Ranges vec = _ranges;
3673 for (index = 0; index < nRanges; index++) {
3674 user_addr_t addr; IOByteCount len;
3675 getAddrLenForInd(addr, len, type, vec, index);
3676 vcopy[index].address = addr;
3677 vcopy[index].length = len;
3678 }
3679 } else {
3680 // The descriptor changed out from under us. Give up.
3681 UNLOCK;
3682 result = false;
3683 goto bail;
3684 }
3685 UNLOCK;
3686
3687 for (index = 0; index < nRanges; index++)
3688 {
3689 user_addr_t addr = vcopy[index].address;
3690 IOByteCount len = (IOByteCount) vcopy[index].length;
3691 values[0] =
3692 OSNumber::withNumber(addr, (((UInt64) addr) >> 32)? 64 : 32);
3693 if (values[0] == 0) {
3694 result = false;
3695 goto bail;
3696 }
3697 values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
3698 if (values[1] == 0) {
3699 result = false;
3700 goto bail;
3701 }
3702 OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
3703 if (dict == 0) {
3704 result = false;
3705 goto bail;
3706 }
3707 values[0]->release();
3708 values[1]->release();
3709 values[0] = values[1] = 0;
3710
3711 result = dict->serialize(s);
3712 dict->release();
3713 if (!result) {
3714 goto bail;
3715 }
3716 }
3717 result = s->addXMLEndTag("array");
3718
3719 bail:
3720 if (values[0])
3721 values[0]->release();
3722 if (values[1])
3723 values[1]->release();
3724 if (keys[0])
3725 keys[0]->release();
3726 if (keys[1])
3727 keys[1]->release();
3728 if (vcopy)
3729 IOFree(vcopy, sizeof(SerData) * nRanges);
3730 return result;
3731 }
3732
3733 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3734
3735 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
3736 #ifdef __LP64__
3737 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
3738 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
3739 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
3740 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
3741 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
3742 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
3743 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
3744 #else /* !__LP64__ */
3745 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
3746 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
3747 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3);
3748 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4);
3749 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5);
3750 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 6);
3751 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 7);
3752 #endif /* !__LP64__ */
3753 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
3754 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
3755 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
3756 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
3757 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
3758 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
3759 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
3760 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
3761
3762 /* ex-inline function implementation */
3763 IOPhysicalAddress
3764 IOMemoryDescriptor::getPhysicalAddress()
3765 { return( getPhysicalSegment( 0, 0 )); }
3766
3767
3768