]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOMemoryDescriptor.cpp
9aef6a1a0e23952d7958a853cd1f1e9491f390ed
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
30 *
31 * HISTORY
32 *
33 */
34
35
36 #include <sys/cdefs.h>
37
38 #include <IOKit/assert.h>
39 #include <IOKit/system.h>
40 #include <IOKit/IOLib.h>
41 #include <IOKit/IOMemoryDescriptor.h>
42 #include <IOKit/IOMapper.h>
43 #include <IOKit/IOKitKeysPrivate.h>
44
45 #ifndef __LP64__
46 #include <IOKit/IOSubMemoryDescriptor.h>
47 #endif /* !__LP64__ */
48
49 #include <IOKit/IOKitDebug.h>
50 #include <libkern/OSDebug.h>
51
52 #include "IOKitKernelInternal.h"
53 #include "IOCopyMapper.h"
54
55 #include <libkern/c++/OSContainers.h>
56 #include <libkern/c++/OSDictionary.h>
57 #include <libkern/c++/OSArray.h>
58 #include <libkern/c++/OSSymbol.h>
59 #include <libkern/c++/OSNumber.h>
60
61 #include <sys/uio.h>
62
63 __BEGIN_DECLS
64 #include <vm/pmap.h>
65 #include <vm/vm_pageout.h>
66 #include <mach/memory_object_types.h>
67 #include <device/device_port.h>
68
69 #include <mach/vm_prot.h>
70 #include <mach/mach_vm.h>
71 #include <vm/vm_fault.h>
72 #include <vm/vm_protos.h>
73
74 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
75 void ipc_port_release_send(ipc_port_t port);
76
77 /* Copy between a physical page and a virtual address in the given vm_map */
78 kern_return_t copypv(addr64_t source, addr64_t sink, unsigned int size, int which);
79
80 memory_object_t
81 device_pager_setup(
82 memory_object_t pager,
83 uintptr_t device_handle,
84 vm_size_t size,
85 int flags);
86 void
87 device_pager_deallocate(
88 memory_object_t);
89 kern_return_t
90 device_pager_populate_object(
91 memory_object_t pager,
92 vm_object_offset_t offset,
93 ppnum_t phys_addr,
94 vm_size_t size);
95 kern_return_t
96 memory_object_iopl_request(
97 ipc_port_t port,
98 memory_object_offset_t offset,
99 vm_size_t *upl_size,
100 upl_t *upl_ptr,
101 upl_page_info_array_t user_page_list,
102 unsigned int *page_list_count,
103 int *flags);
104
105 unsigned int IOTranslateCacheBits(struct phys_entry *pp);
106
107 __END_DECLS
108
109 #define kIOMaximumMappedIOByteCount (512*1024*1024)
110
111 static IOMapper * gIOSystemMapper = NULL;
112
113 IOCopyMapper * gIOCopyMapper = NULL;
114
115 static ppnum_t gIOMaximumMappedIOPageCount = atop_32(kIOMaximumMappedIOByteCount);
116
117 ppnum_t gIOLastPage;
118
119 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
120
121 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
122
123 #define super IOMemoryDescriptor
124
125 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
126
127 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
128
129 static IORecursiveLock * gIOMemoryLock;
130
131 #define LOCK IORecursiveLockLock( gIOMemoryLock)
132 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
133 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
134 #define WAKEUP \
135 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
136
137 #if 0
138 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
139 #else
140 #define DEBG(fmt, args...) {}
141 #endif
142
143 #define IOMD_DEBUG_DMAACTIVE 1
144
145 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
146
147 // Some data structures and accessor macros used by the initWithOptions
148 // Function
149
150 enum ioPLBlockFlags {
151 kIOPLOnDevice = 0x00000001,
152 kIOPLExternUPL = 0x00000002,
153 };
154
155 struct typePersMDData
156 {
157 const IOGeneralMemoryDescriptor *fMD;
158 ipc_port_t fMemEntry;
159 };
160
161 struct ioPLBlock {
162 upl_t fIOPL;
163 vm_address_t fPageInfo; // Pointer to page list or index into it
164 uint32_t fIOMDOffset; // The offset of this iopl in descriptor
165 ppnum_t fMappedBase; // Page number of first page in this iopl
166 unsigned int fPageOffset; // Offset within first page of iopl
167 unsigned int fFlags; // Flags
168 };
169
170 struct ioGMDData {
171 IOMapper *fMapper;
172 uint64_t fPreparationID;
173 unsigned int fPageCnt;
174 #if __LP64__
175 // align arrays to 8 bytes so following macros work
176 unsigned int fPad;
177 #endif
178 upl_page_info_t fPageList[];
179 ioPLBlock fBlocks[];
180 };
181
182 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
183 #define getIOPLList(d) ((ioPLBlock *) &(d->fPageList[d->fPageCnt]))
184 #define getNumIOPL(osd, d) \
185 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
186 #define getPageList(d) (&(d->fPageList[0]))
187 #define computeDataSize(p, u) \
188 (sizeof(ioGMDData) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
189
190
191 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
192
193 #define next_page(a) ( trunc_page(a) + PAGE_SIZE )
194
195
196 extern "C" {
197
198 kern_return_t device_data_action(
199 uintptr_t device_handle,
200 ipc_port_t device_pager,
201 vm_prot_t protection,
202 vm_object_offset_t offset,
203 vm_size_t size)
204 {
205 struct ExpansionData {
206 void * devicePager;
207 unsigned int pagerContig:1;
208 unsigned int unused:31;
209 IOMemoryDescriptor * memory;
210 };
211 kern_return_t kr;
212 ExpansionData * ref = (ExpansionData *) device_handle;
213 IOMemoryDescriptor * memDesc;
214
215 LOCK;
216 memDesc = ref->memory;
217 if( memDesc)
218 {
219 memDesc->retain();
220 kr = memDesc->handleFault( device_pager, 0, 0,
221 offset, size, kIOMapDefaultCache /*?*/);
222 memDesc->release();
223 }
224 else
225 kr = KERN_ABORTED;
226 UNLOCK;
227
228 return( kr );
229 }
230
231 kern_return_t device_close(
232 uintptr_t device_handle)
233 {
234 struct ExpansionData {
235 void * devicePager;
236 unsigned int pagerContig:1;
237 unsigned int unused:31;
238 IOMemoryDescriptor * memory;
239 };
240 ExpansionData * ref = (ExpansionData *) device_handle;
241
242 IODelete( ref, ExpansionData, 1 );
243
244 return( kIOReturnSuccess );
245 }
246 }; // end extern "C"
247
248 // Note this inline function uses C++ reference arguments to return values
249 // This means that pointers are not passed and NULLs don't have to be
250 // checked for as a NULL reference is illegal.
251 static inline void
252 getAddrLenForInd(user_addr_t &addr, IOPhysicalLength &len, // Output variables
253 UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
254 {
255 assert(kIOMemoryTypeUIO == type
256 || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
257 || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
258 if (kIOMemoryTypeUIO == type) {
259 user_size_t us;
260 uio_getiov((uio_t) r.uio, ind, &addr, &us); len = us;
261 }
262 #ifndef __LP64__
263 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
264 IOAddressRange cur = r.v64[ind];
265 addr = cur.address;
266 len = cur.length;
267 }
268 #endif /* !__LP64__ */
269 else {
270 IOVirtualRange cur = r.v[ind];
271 addr = cur.address;
272 len = cur.length;
273 }
274 }
275
276 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
277
278 IOMemoryDescriptor *
279 IOMemoryDescriptor::withAddress(void * address,
280 IOByteCount length,
281 IODirection direction)
282 {
283 return IOMemoryDescriptor::
284 withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
285 }
286
287 #ifndef __LP64__
288 IOMemoryDescriptor *
289 IOMemoryDescriptor::withAddress(IOVirtualAddress address,
290 IOByteCount length,
291 IODirection direction,
292 task_t task)
293 {
294 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
295 if (that)
296 {
297 if (that->initWithAddress(address, length, direction, task))
298 return that;
299
300 that->release();
301 }
302 return 0;
303 }
304 #endif /* !__LP64__ */
305
306 IOMemoryDescriptor *
307 IOMemoryDescriptor::withPhysicalAddress(
308 IOPhysicalAddress address,
309 IOByteCount length,
310 IODirection direction )
311 {
312 #ifdef __LP64__
313 return (IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL));
314 #else /* !__LP64__ */
315 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
316 if (self
317 && !self->initWithPhysicalAddress(address, length, direction)) {
318 self->release();
319 return 0;
320 }
321
322 return self;
323 #endif /* !__LP64__ */
324 }
325
326 #ifndef __LP64__
327 IOMemoryDescriptor *
328 IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
329 UInt32 withCount,
330 IODirection direction,
331 task_t task,
332 bool asReference)
333 {
334 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
335 if (that)
336 {
337 if (that->initWithRanges(ranges, withCount, direction, task, asReference))
338 return that;
339
340 that->release();
341 }
342 return 0;
343 }
344 #endif /* !__LP64__ */
345
346 IOMemoryDescriptor *
347 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
348 mach_vm_size_t length,
349 IOOptionBits options,
350 task_t task)
351 {
352 IOAddressRange range = { address, length };
353 return (IOMemoryDescriptor::withAddressRanges(&range, 1, options, task));
354 }
355
356 IOMemoryDescriptor *
357 IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
358 UInt32 rangeCount,
359 IOOptionBits options,
360 task_t task)
361 {
362 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
363 if (that)
364 {
365 if (task)
366 options |= kIOMemoryTypeVirtual64;
367 else
368 options |= kIOMemoryTypePhysical64;
369
370 if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ 0))
371 return that;
372
373 that->release();
374 }
375
376 return 0;
377 }
378
379
380 /*
381 * withOptions:
382 *
383 * Create a new IOMemoryDescriptor. The buffer is made up of several
384 * virtual address ranges, from a given task.
385 *
386 * Passing the ranges as a reference will avoid an extra allocation.
387 */
388 IOMemoryDescriptor *
389 IOMemoryDescriptor::withOptions(void * buffers,
390 UInt32 count,
391 UInt32 offset,
392 task_t task,
393 IOOptionBits opts,
394 IOMapper * mapper)
395 {
396 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
397
398 if (self
399 && !self->initWithOptions(buffers, count, offset, task, opts, mapper))
400 {
401 self->release();
402 return 0;
403 }
404
405 return self;
406 }
407
408 bool IOMemoryDescriptor::initWithOptions(void * buffers,
409 UInt32 count,
410 UInt32 offset,
411 task_t task,
412 IOOptionBits options,
413 IOMapper * mapper)
414 {
415 return( false );
416 }
417
418 #ifndef __LP64__
419 IOMemoryDescriptor *
420 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
421 UInt32 withCount,
422 IODirection direction,
423 bool asReference)
424 {
425 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
426 if (that)
427 {
428 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference))
429 return that;
430
431 that->release();
432 }
433 return 0;
434 }
435
436 IOMemoryDescriptor *
437 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
438 IOByteCount offset,
439 IOByteCount length,
440 IODirection direction)
441 {
442 return (IOSubMemoryDescriptor::withSubRange(of, offset, length, direction | kIOMemoryThreadSafe));
443 }
444 #endif /* !__LP64__ */
445
446 IOMemoryDescriptor *
447 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
448 {
449 IOGeneralMemoryDescriptor *origGenMD =
450 OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
451
452 if (origGenMD)
453 return IOGeneralMemoryDescriptor::
454 withPersistentMemoryDescriptor(origGenMD);
455 else
456 return 0;
457 }
458
459 IOMemoryDescriptor *
460 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
461 {
462 ipc_port_t sharedMem = (ipc_port_t) originalMD->createNamedEntry();
463
464 if (!sharedMem)
465 return 0;
466
467 if (sharedMem == originalMD->_memEntry) {
468 originalMD->retain(); // Add a new reference to ourselves
469 ipc_port_release_send(sharedMem); // Remove extra send right
470 return originalMD;
471 }
472
473 IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor;
474 typePersMDData initData = { originalMD, sharedMem };
475
476 if (self
477 && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) {
478 self->release();
479 self = 0;
480 }
481 return self;
482 }
483
484 void *IOGeneralMemoryDescriptor::createNamedEntry()
485 {
486 kern_return_t error;
487 ipc_port_t sharedMem;
488
489 IOOptionBits type = _flags & kIOMemoryTypeMask;
490
491 user_addr_t range0Addr;
492 IOByteCount range0Len;
493 getAddrLenForInd(range0Addr, range0Len, type, _ranges, 0);
494 range0Addr = trunc_page_64(range0Addr);
495
496 vm_size_t size = ptoa_32(_pages);
497 vm_address_t kernelPage = (vm_address_t) range0Addr;
498
499 vm_map_t theMap = ((_task == kernel_task)
500 && (kIOMemoryBufferPageable & _flags))
501 ? IOPageableMapForAddress(kernelPage)
502 : get_task_map(_task);
503
504 memory_object_size_t actualSize = size;
505 vm_prot_t prot = VM_PROT_READ;
506 #if CONFIG_EMBEDDED
507 if (kIODirectionOut != (kIODirectionOutIn & _flags))
508 #endif
509 prot |= VM_PROT_WRITE;
510
511 if (_memEntry)
512 prot |= MAP_MEM_NAMED_REUSE;
513
514 error = mach_make_memory_entry_64(theMap,
515 &actualSize, range0Addr, prot, &sharedMem, (ipc_port_t) _memEntry);
516
517 if (KERN_SUCCESS == error) {
518 if (actualSize == size) {
519 return sharedMem;
520 } else {
521 #if IOASSERT
522 IOLog("IOGMD::mach_make_memory_entry_64 (%08llx) size (%08llx:%08llx)\n",
523 (UInt64)range0Addr, (UInt64)actualSize, (UInt64)size);
524 #endif
525 ipc_port_release_send( sharedMem );
526 }
527 }
528
529 return MACH_PORT_NULL;
530 }
531
532 #ifndef __LP64__
533 bool
534 IOGeneralMemoryDescriptor::initWithAddress(void * address,
535 IOByteCount withLength,
536 IODirection withDirection)
537 {
538 _singleRange.v.address = (vm_offset_t) address;
539 _singleRange.v.length = withLength;
540
541 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
542 }
543
544 bool
545 IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
546 IOByteCount withLength,
547 IODirection withDirection,
548 task_t withTask)
549 {
550 _singleRange.v.address = address;
551 _singleRange.v.length = withLength;
552
553 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
554 }
555
556 bool
557 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
558 IOPhysicalAddress address,
559 IOByteCount withLength,
560 IODirection withDirection )
561 {
562 _singleRange.p.address = address;
563 _singleRange.p.length = withLength;
564
565 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
566 }
567
568 bool
569 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
570 IOPhysicalRange * ranges,
571 UInt32 count,
572 IODirection direction,
573 bool reference)
574 {
575 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
576
577 if (reference)
578 mdOpts |= kIOMemoryAsReference;
579
580 return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0);
581 }
582
583 bool
584 IOGeneralMemoryDescriptor::initWithRanges(
585 IOVirtualRange * ranges,
586 UInt32 count,
587 IODirection direction,
588 task_t task,
589 bool reference)
590 {
591 IOOptionBits mdOpts = direction;
592
593 if (reference)
594 mdOpts |= kIOMemoryAsReference;
595
596 if (task) {
597 mdOpts |= kIOMemoryTypeVirtual;
598
599 // Auto-prepare if this is a kernel memory descriptor as very few
600 // clients bother to prepare() kernel memory.
601 // But it was not enforced so what are you going to do?
602 if (task == kernel_task)
603 mdOpts |= kIOMemoryAutoPrepare;
604 }
605 else
606 mdOpts |= kIOMemoryTypePhysical;
607
608 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0);
609 }
610 #endif /* !__LP64__ */
611
612 /*
613 * initWithOptions:
614 *
615 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
616 * from a given task, several physical ranges, an UPL from the ubc
617 * system or a uio (may be 64bit) from the BSD subsystem.
618 *
619 * Passing the ranges as a reference will avoid an extra allocation.
620 *
621 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
622 * existing instance -- note this behavior is not commonly supported in other
623 * I/O Kit classes, although it is supported here.
624 */
625
626 bool
627 IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
628 UInt32 count,
629 UInt32 offset,
630 task_t task,
631 IOOptionBits options,
632 IOMapper * mapper)
633 {
634 IOOptionBits type = options & kIOMemoryTypeMask;
635
636 // Grab the original MD's configuation data to initialse the
637 // arguments to this function.
638 if (kIOMemoryTypePersistentMD == type) {
639
640 typePersMDData *initData = (typePersMDData *) buffers;
641 const IOGeneralMemoryDescriptor *orig = initData->fMD;
642 ioGMDData *dataP = getDataP(orig->_memoryEntries);
643
644 // Only accept persistent memory descriptors with valid dataP data.
645 assert(orig->_rangesCount == 1);
646 if ( !(orig->_flags & kIOMemoryPersistent) || !dataP)
647 return false;
648
649 _memEntry = initData->fMemEntry; // Grab the new named entry
650 options = orig->_flags | kIOMemoryAsReference;
651 _singleRange = orig->_singleRange; // Initialise our range
652 buffers = &_singleRange;
653 count = 1;
654
655 // Now grab the original task and whatever mapper was previously used
656 task = orig->_task;
657 mapper = dataP->fMapper;
658
659 // We are ready to go through the original initialisation now
660 }
661
662 switch (type) {
663 case kIOMemoryTypeUIO:
664 case kIOMemoryTypeVirtual:
665 #ifndef __LP64__
666 case kIOMemoryTypeVirtual64:
667 #endif /* !__LP64__ */
668 assert(task);
669 if (!task)
670 return false;
671
672 #ifndef __LP64__
673 if (vm_map_is_64bit(get_task_map(task))
674 && (kIOMemoryTypeVirtual == type)
675 && ((IOVirtualRange *) buffers)->address)
676 {
677 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
678 return false;
679 }
680 #endif /* !__LP64__ */
681 break;
682
683 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
684 #ifndef __LP64__
685 case kIOMemoryTypePhysical64:
686 #endif /* !__LP64__ */
687 case kIOMemoryTypeUPL:
688 assert(!task);
689 break;
690 default:
691 return false; /* bad argument */
692 }
693
694 assert(buffers);
695 assert(count);
696
697 /*
698 * We can check the _initialized instance variable before having ever set
699 * it to an initial value because I/O Kit guarantees that all our instance
700 * variables are zeroed on an object's allocation.
701 */
702
703 if (_initialized) {
704 /*
705 * An existing memory descriptor is being retargeted to point to
706 * somewhere else. Clean up our present state.
707 */
708 IOOptionBits type = _flags & kIOMemoryTypeMask;
709 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
710 {
711 while (_wireCount)
712 complete();
713 }
714 if (_ranges.v && !(kIOMemoryAsReference & _flags))
715 {
716 if (kIOMemoryTypeUIO == type)
717 uio_free((uio_t) _ranges.v);
718 #ifndef __LP64__
719 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
720 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
721 #endif /* !__LP64__ */
722 else
723 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
724 }
725
726 if (_memEntry)
727 { ipc_port_release_send((ipc_port_t) _memEntry); _memEntry = 0; }
728 if (_mappings)
729 _mappings->flushCollection();
730 }
731 else {
732 if (!super::init())
733 return false;
734 _initialized = true;
735 }
736
737 // Grab the appropriate mapper
738 if (kIOMemoryMapperNone & options)
739 mapper = 0; // No Mapper
740 else if (mapper == kIOMapperSystem) {
741 IOMapper::checkForSystemMapper();
742 gIOSystemMapper = mapper = IOMapper::gSystem;
743 }
744
745 // Temp binary compatibility for kIOMemoryThreadSafe
746 if (kIOMemoryReserved6156215 & options)
747 {
748 options &= ~kIOMemoryReserved6156215;
749 options |= kIOMemoryThreadSafe;
750 }
751 // Remove the dynamic internal use flags from the initial setting
752 options &= ~(kIOMemoryPreparedReadOnly);
753 _flags = options;
754 _task = task;
755
756 #ifndef __LP64__
757 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
758 #endif /* !__LP64__ */
759
760 __iomd_reservedA = 0;
761 __iomd_reservedB = 0;
762 _highestPage = 0;
763
764 if (kIOMemoryThreadSafe & options)
765 {
766 if (!_prepareLock)
767 _prepareLock = IOLockAlloc();
768 }
769 else if (_prepareLock)
770 {
771 IOLockFree(_prepareLock);
772 _prepareLock = NULL;
773 }
774
775 if (kIOMemoryTypeUPL == type) {
776
777 ioGMDData *dataP;
778 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
779
780 if (!_memoryEntries) {
781 _memoryEntries = OSData::withCapacity(dataSize);
782 if (!_memoryEntries)
783 return false;
784 }
785 else if (!_memoryEntries->initWithCapacity(dataSize))
786 return false;
787
788 _memoryEntries->appendBytes(0, sizeof(ioGMDData));
789 dataP = getDataP(_memoryEntries);
790 dataP->fMapper = mapper;
791 dataP->fPageCnt = 0;
792
793 // _wireCount++; // UPLs start out life wired
794
795 _length = count;
796 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
797
798 ioPLBlock iopl;
799 iopl.fIOPL = (upl_t) buffers;
800 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
801
802 if (upl_get_size(iopl.fIOPL) < (count + offset))
803 panic("short external upl");
804
805 // Set the flag kIOPLOnDevice convieniently equal to 1
806 iopl.fFlags = pageList->device | kIOPLExternUPL;
807 iopl.fIOMDOffset = 0;
808
809 _highestPage = upl_get_highest_page(iopl.fIOPL);
810
811 if (!pageList->device) {
812 // Pre-compute the offset into the UPL's page list
813 pageList = &pageList[atop_32(offset)];
814 offset &= PAGE_MASK;
815 if (mapper) {
816 iopl.fMappedBase = mapper->iovmAlloc(_pages);
817 mapper->iovmInsert(iopl.fMappedBase, 0, pageList, _pages);
818 }
819 else
820 iopl.fMappedBase = 0;
821 }
822 else
823 iopl.fMappedBase = 0;
824 iopl.fPageInfo = (vm_address_t) pageList;
825 iopl.fPageOffset = offset;
826
827 _memoryEntries->appendBytes(&iopl, sizeof(iopl));
828 }
829 else {
830 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
831 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
832
833 // Initialize the memory descriptor
834 if (options & kIOMemoryAsReference) {
835 #ifndef __LP64__
836 _rangesIsAllocated = false;
837 #endif /* !__LP64__ */
838
839 // Hack assignment to get the buffer arg into _ranges.
840 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
841 // work, C++ sigh.
842 // This also initialises the uio & physical ranges.
843 _ranges.v = (IOVirtualRange *) buffers;
844 }
845 else {
846 #ifndef __LP64__
847 _rangesIsAllocated = true;
848 #endif /* !__LP64__ */
849 switch (type)
850 {
851 case kIOMemoryTypeUIO:
852 _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
853 break;
854
855 #ifndef __LP64__
856 case kIOMemoryTypeVirtual64:
857 case kIOMemoryTypePhysical64:
858 if (count == 1
859 && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL) {
860 if (kIOMemoryTypeVirtual64 == type)
861 type = kIOMemoryTypeVirtual;
862 else
863 type = kIOMemoryTypePhysical;
864 _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
865 _rangesIsAllocated = false;
866 _ranges.v = &_singleRange.v;
867 _singleRange.v.address = ((IOAddressRange *) buffers)->address;
868 _singleRange.v.length = ((IOAddressRange *) buffers)->length;
869 break;
870 }
871 _ranges.v64 = IONew(IOAddressRange, count);
872 if (!_ranges.v64)
873 return false;
874 bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
875 break;
876 #endif /* !__LP64__ */
877 case kIOMemoryTypeVirtual:
878 case kIOMemoryTypePhysical:
879 if (count == 1) {
880 _flags |= kIOMemoryAsReference;
881 #ifndef __LP64__
882 _rangesIsAllocated = false;
883 #endif /* !__LP64__ */
884 _ranges.v = &_singleRange.v;
885 } else {
886 _ranges.v = IONew(IOVirtualRange, count);
887 if (!_ranges.v)
888 return false;
889 }
890 bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
891 break;
892 }
893 }
894
895 // Find starting address within the vector of ranges
896 Ranges vec = _ranges;
897 UInt32 length = 0;
898 UInt32 pages = 0;
899 for (unsigned ind = 0; ind < count; ind++) {
900 user_addr_t addr;
901 IOPhysicalLength len;
902
903 // addr & len are returned by this function
904 getAddrLenForInd(addr, len, type, vec, ind);
905 pages += (atop_64(addr + len + PAGE_MASK) - atop_64(addr));
906 len += length;
907 assert(len >= length); // Check for 32 bit wrap around
908 length = len;
909
910 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
911 {
912 ppnum_t highPage = atop_64(addr + len - 1);
913 if (highPage > _highestPage)
914 _highestPage = highPage;
915 }
916 }
917 _length = length;
918 _pages = pages;
919 _rangesCount = count;
920
921 // Auto-prepare memory at creation time.
922 // Implied completion when descriptor is free-ed
923 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
924 _wireCount++; // Physical MDs are, by definition, wired
925 else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
926 ioGMDData *dataP;
927 unsigned dataSize = computeDataSize(_pages, /* upls */ count * 2);
928
929 if (!_memoryEntries) {
930 _memoryEntries = OSData::withCapacity(dataSize);
931 if (!_memoryEntries)
932 return false;
933 }
934 else if (!_memoryEntries->initWithCapacity(dataSize))
935 return false;
936
937 _memoryEntries->appendBytes(0, sizeof(ioGMDData));
938 dataP = getDataP(_memoryEntries);
939 dataP->fMapper = mapper;
940 dataP->fPageCnt = _pages;
941
942 if ( (kIOMemoryPersistent & _flags) && !_memEntry)
943 _memEntry = createNamedEntry();
944
945 if ((_flags & kIOMemoryAutoPrepare)
946 && prepare() != kIOReturnSuccess)
947 return false;
948 }
949 }
950
951 return true;
952 }
953
954 /*
955 * free
956 *
957 * Free resources.
958 */
959 void IOGeneralMemoryDescriptor::free()
960 {
961 IOOptionBits type = _flags & kIOMemoryTypeMask;
962
963 if( reserved)
964 {
965 LOCK;
966 reserved->memory = 0;
967 UNLOCK;
968 }
969
970 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
971 {
972 while (_wireCount)
973 complete();
974 }
975 if (_memoryEntries)
976 _memoryEntries->release();
977
978 if (_ranges.v && !(kIOMemoryAsReference & _flags))
979 {
980 if (kIOMemoryTypeUIO == type)
981 uio_free((uio_t) _ranges.v);
982 #ifndef __LP64__
983 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
984 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
985 #endif /* !__LP64__ */
986 else
987 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
988
989 _ranges.v = NULL;
990 }
991
992 if (reserved && reserved->devicePager)
993 device_pager_deallocate( (memory_object_t) reserved->devicePager );
994
995 // memEntry holds a ref on the device pager which owns reserved
996 // (ExpansionData) so no reserved access after this point
997 if (_memEntry)
998 ipc_port_release_send( (ipc_port_t) _memEntry );
999
1000 if (_prepareLock)
1001 IOLockFree(_prepareLock);
1002
1003 super::free();
1004 }
1005
1006 #ifndef __LP64__
1007 void IOGeneralMemoryDescriptor::unmapFromKernel()
1008 {
1009 panic("IOGMD::unmapFromKernel deprecated");
1010 }
1011
1012 void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
1013 {
1014 panic("IOGMD::mapIntoKernel deprecated");
1015 }
1016 #endif /* !__LP64__ */
1017
1018 /*
1019 * getDirection:
1020 *
1021 * Get the direction of the transfer.
1022 */
1023 IODirection IOMemoryDescriptor::getDirection() const
1024 {
1025 #ifndef __LP64__
1026 if (_direction)
1027 return _direction;
1028 #endif /* !__LP64__ */
1029 return (IODirection) (_flags & kIOMemoryDirectionMask);
1030 }
1031
1032 /*
1033 * getLength:
1034 *
1035 * Get the length of the transfer (over all ranges).
1036 */
1037 IOByteCount IOMemoryDescriptor::getLength() const
1038 {
1039 return _length;
1040 }
1041
1042 void IOMemoryDescriptor::setTag( IOOptionBits tag )
1043 {
1044 _tag = tag;
1045 }
1046
1047 IOOptionBits IOMemoryDescriptor::getTag( void )
1048 {
1049 return( _tag);
1050 }
1051
1052 #ifndef __LP64__
1053 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
1054 IOPhysicalAddress
1055 IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
1056 {
1057 addr64_t physAddr = 0;
1058
1059 if( prepare() == kIOReturnSuccess) {
1060 physAddr = getPhysicalSegment64( offset, length );
1061 complete();
1062 }
1063
1064 return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used
1065 }
1066 #endif /* !__LP64__ */
1067
1068 IOByteCount IOMemoryDescriptor::readBytes
1069 (IOByteCount offset, void *bytes, IOByteCount length)
1070 {
1071 addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
1072 IOByteCount remaining;
1073
1074 // Assert that this entire I/O is withing the available range
1075 assert(offset < _length);
1076 assert(offset + length <= _length);
1077 if (offset >= _length) {
1078 return 0;
1079 }
1080
1081 if (kIOMemoryThreadSafe & _flags)
1082 LOCK;
1083
1084 remaining = length = min(length, _length - offset);
1085 while (remaining) { // (process another target segment?)
1086 addr64_t srcAddr64;
1087 IOByteCount srcLen;
1088
1089 srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
1090 if (!srcAddr64)
1091 break;
1092
1093 // Clip segment length to remaining
1094 if (srcLen > remaining)
1095 srcLen = remaining;
1096
1097 copypv(srcAddr64, dstAddr, srcLen,
1098 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
1099
1100 dstAddr += srcLen;
1101 offset += srcLen;
1102 remaining -= srcLen;
1103 }
1104
1105 if (kIOMemoryThreadSafe & _flags)
1106 UNLOCK;
1107
1108 assert(!remaining);
1109
1110 return length - remaining;
1111 }
1112
1113 IOByteCount IOMemoryDescriptor::writeBytes
1114 (IOByteCount offset, const void *bytes, IOByteCount length)
1115 {
1116 addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
1117 IOByteCount remaining;
1118
1119 // Assert that this entire I/O is withing the available range
1120 assert(offset < _length);
1121 assert(offset + length <= _length);
1122
1123 assert( !(kIOMemoryPreparedReadOnly & _flags) );
1124
1125 if ( (kIOMemoryPreparedReadOnly & _flags) || offset >= _length) {
1126 return 0;
1127 }
1128
1129 if (kIOMemoryThreadSafe & _flags)
1130 LOCK;
1131
1132 remaining = length = min(length, _length - offset);
1133 while (remaining) { // (process another target segment?)
1134 addr64_t dstAddr64;
1135 IOByteCount dstLen;
1136
1137 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
1138 if (!dstAddr64)
1139 break;
1140
1141 // Clip segment length to remaining
1142 if (dstLen > remaining)
1143 dstLen = remaining;
1144
1145 copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
1146 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
1147
1148 srcAddr += dstLen;
1149 offset += dstLen;
1150 remaining -= dstLen;
1151 }
1152
1153 if (kIOMemoryThreadSafe & _flags)
1154 UNLOCK;
1155
1156 assert(!remaining);
1157
1158 return length - remaining;
1159 }
1160
1161 // osfmk/device/iokit_rpc.c
1162 extern "C" unsigned int IODefaultCacheBits(addr64_t pa);
1163
1164 #ifndef __LP64__
1165 void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
1166 {
1167 panic("IOGMD::setPosition deprecated");
1168 }
1169 #endif /* !__LP64__ */
1170
1171 static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
1172
1173 uint64_t
1174 IOGeneralMemoryDescriptor::getPreparationID( void )
1175 {
1176 ioGMDData *dataP;
1177 if (!_wireCount || !(dataP = getDataP(_memoryEntries)))
1178 return (kIOPreparationIDUnprepared);
1179 if (kIOPreparationIDUnprepared == dataP->fPreparationID)
1180 {
1181 #if defined(__ppc__ )
1182 dataP->fPreparationID = gIOMDPreparationID++;
1183 #else
1184 dataP->fPreparationID = OSIncrementAtomic64(&gIOMDPreparationID);
1185 #endif
1186 }
1187 return (dataP->fPreparationID);
1188 }
1189
1190 uint64_t
1191 IOMemoryDescriptor::getPreparationID( void )
1192 {
1193 return (kIOPreparationIDUnsupported);
1194 }
1195
1196 IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
1197 {
1198 if (kIOMDGetCharacteristics == op) {
1199
1200 if (dataSize < sizeof(IOMDDMACharacteristics))
1201 return kIOReturnUnderrun;
1202
1203 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
1204 data->fLength = _length;
1205 data->fSGCount = _rangesCount;
1206 data->fPages = _pages;
1207 data->fDirection = getDirection();
1208 if (!_wireCount)
1209 data->fIsPrepared = false;
1210 else {
1211 data->fIsPrepared = true;
1212 data->fHighestPage = _highestPage;
1213 if (_memoryEntries) {
1214 ioGMDData *gmdData = getDataP(_memoryEntries);
1215 ioPLBlock *ioplList = getIOPLList(gmdData);
1216 UInt count = getNumIOPL(_memoryEntries, gmdData);
1217
1218 data->fIsMapped = (gmdData->fMapper && _pages && (count > 0)
1219 && ioplList[0].fMappedBase);
1220 if (count == 1)
1221 data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
1222 }
1223 else
1224 data->fIsMapped = false;
1225 }
1226
1227 return kIOReturnSuccess;
1228
1229 #if IOMD_DEBUG_DMAACTIVE
1230 } else if (kIOMDSetDMAActive == op) {
1231 IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
1232 md->__iomd_reservedA++;
1233 } else if (kIOMDSetDMAInactive == op) {
1234 IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
1235 if (md->__iomd_reservedA)
1236 md->__iomd_reservedA--;
1237 else
1238 panic("kIOMDSetDMAInactive");
1239 #endif /* IOMD_DEBUG_DMAACTIVE */
1240
1241 } else if (!(kIOMDWalkSegments & op))
1242 return kIOReturnBadArgument;
1243
1244 // Get the next segment
1245 struct InternalState {
1246 IOMDDMAWalkSegmentArgs fIO;
1247 UInt fOffset2Index;
1248 UInt fIndex;
1249 UInt fNextOffset;
1250 } *isP;
1251
1252 // Find the next segment
1253 if (dataSize < sizeof(*isP))
1254 return kIOReturnUnderrun;
1255
1256 isP = (InternalState *) vData;
1257 UInt offset = isP->fIO.fOffset;
1258 bool mapped = isP->fIO.fMapped;
1259
1260 if (offset >= _length)
1261 return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
1262
1263 // Validate the previous offset
1264 UInt ind, off2Ind = isP->fOffset2Index;
1265 if ((kIOMDFirstSegment != op)
1266 && offset
1267 && (offset == isP->fNextOffset || off2Ind <= offset))
1268 ind = isP->fIndex;
1269 else
1270 ind = off2Ind = 0; // Start from beginning
1271
1272 UInt length;
1273 UInt64 address;
1274 if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
1275
1276 // Physical address based memory descriptor
1277 const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
1278
1279 // Find the range after the one that contains the offset
1280 mach_vm_size_t len;
1281 for (len = 0; off2Ind <= offset; ind++) {
1282 len = physP[ind].length;
1283 off2Ind += len;
1284 }
1285
1286 // Calculate length within range and starting address
1287 length = off2Ind - offset;
1288 address = physP[ind - 1].address + len - length;
1289
1290 // see how far we can coalesce ranges
1291 while (ind < _rangesCount && address + length == physP[ind].address) {
1292 len = physP[ind].length;
1293 length += len;
1294 off2Ind += len;
1295 ind++;
1296 }
1297
1298 // correct contiguous check overshoot
1299 ind--;
1300 off2Ind -= len;
1301 }
1302 #ifndef __LP64__
1303 else if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
1304
1305 // Physical address based memory descriptor
1306 const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
1307
1308 // Find the range after the one that contains the offset
1309 mach_vm_size_t len;
1310 for (len = 0; off2Ind <= offset; ind++) {
1311 len = physP[ind].length;
1312 off2Ind += len;
1313 }
1314
1315 // Calculate length within range and starting address
1316 length = off2Ind - offset;
1317 address = physP[ind - 1].address + len - length;
1318
1319 // see how far we can coalesce ranges
1320 while (ind < _rangesCount && address + length == physP[ind].address) {
1321 len = physP[ind].length;
1322 length += len;
1323 off2Ind += len;
1324 ind++;
1325 }
1326
1327 // correct contiguous check overshoot
1328 ind--;
1329 off2Ind -= len;
1330 }
1331 #endif /* !__LP64__ */
1332 else do {
1333 if (!_wireCount)
1334 panic("IOGMD: not wired for the IODMACommand");
1335
1336 assert(_memoryEntries);
1337
1338 ioGMDData * dataP = getDataP(_memoryEntries);
1339 const ioPLBlock *ioplList = getIOPLList(dataP);
1340 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1341 upl_page_info_t *pageList = getPageList(dataP);
1342
1343 assert(numIOPLs > 0);
1344
1345 // Scan through iopl info blocks looking for block containing offset
1346 while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset)
1347 ind++;
1348
1349 // Go back to actual range as search goes past it
1350 ioPLBlock ioplInfo = ioplList[ind - 1];
1351 off2Ind = ioplInfo.fIOMDOffset;
1352
1353 if (ind < numIOPLs)
1354 length = ioplList[ind].fIOMDOffset;
1355 else
1356 length = _length;
1357 length -= offset; // Remainder within iopl
1358
1359 // Subtract offset till this iopl in total list
1360 offset -= off2Ind;
1361
1362 // If a mapped address is requested and this is a pre-mapped IOPL
1363 // then just need to compute an offset relative to the mapped base.
1364 if (mapped && ioplInfo.fMappedBase) {
1365 offset += (ioplInfo.fPageOffset & PAGE_MASK);
1366 address = ptoa_64(ioplInfo.fMappedBase) + offset;
1367 continue; // Done leave do/while(false) now
1368 }
1369
1370 // The offset is rebased into the current iopl.
1371 // Now add the iopl 1st page offset.
1372 offset += ioplInfo.fPageOffset;
1373
1374 // For external UPLs the fPageInfo field points directly to
1375 // the upl's upl_page_info_t array.
1376 if (ioplInfo.fFlags & kIOPLExternUPL)
1377 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
1378 else
1379 pageList = &pageList[ioplInfo.fPageInfo];
1380
1381 // Check for direct device non-paged memory
1382 if ( ioplInfo.fFlags & kIOPLOnDevice ) {
1383 address = ptoa_64(pageList->phys_addr) + offset;
1384 continue; // Done leave do/while(false) now
1385 }
1386
1387 // Now we need compute the index into the pageList
1388 UInt pageInd = atop_32(offset);
1389 offset &= PAGE_MASK;
1390
1391 // Compute the starting address of this segment
1392 IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
1393 if (!pageAddr) {
1394 panic("!pageList phys_addr");
1395 }
1396
1397 address = ptoa_64(pageAddr) + offset;
1398
1399 // length is currently set to the length of the remainider of the iopl.
1400 // We need to check that the remainder of the iopl is contiguous.
1401 // This is indicated by pageList[ind].phys_addr being sequential.
1402 IOByteCount contigLength = PAGE_SIZE - offset;
1403 while (contigLength < length
1404 && ++pageAddr == pageList[++pageInd].phys_addr)
1405 {
1406 contigLength += PAGE_SIZE;
1407 }
1408
1409 if (contigLength < length)
1410 length = contigLength;
1411
1412
1413 assert(address);
1414 assert(length);
1415
1416 } while (false);
1417
1418 // Update return values and state
1419 isP->fIO.fIOVMAddr = address;
1420 isP->fIO.fLength = length;
1421 isP->fIndex = ind;
1422 isP->fOffset2Index = off2Ind;
1423 isP->fNextOffset = isP->fIO.fOffset + length;
1424
1425 return kIOReturnSuccess;
1426 }
1427
1428 addr64_t
1429 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
1430 {
1431 IOReturn ret;
1432 addr64_t address = 0;
1433 IOByteCount length = 0;
1434 IOMapper * mapper = gIOSystemMapper;
1435 IOOptionBits type = _flags & kIOMemoryTypeMask;
1436
1437 if (lengthOfSegment)
1438 *lengthOfSegment = 0;
1439
1440 if (offset >= _length)
1441 return 0;
1442
1443 // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
1444 // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
1445 // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
1446 // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
1447
1448 if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type))
1449 {
1450 unsigned rangesIndex = 0;
1451 Ranges vec = _ranges;
1452 user_addr_t addr;
1453
1454 // Find starting address within the vector of ranges
1455 for (;;) {
1456 getAddrLenForInd(addr, length, type, vec, rangesIndex);
1457 if (offset < length)
1458 break;
1459 offset -= length; // (make offset relative)
1460 rangesIndex++;
1461 }
1462
1463 // Now that we have the starting range,
1464 // lets find the last contiguous range
1465 addr += offset;
1466 length -= offset;
1467
1468 for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) {
1469 user_addr_t newAddr;
1470 IOPhysicalLength newLen;
1471
1472 getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
1473 if (addr + length != newAddr)
1474 break;
1475 length += newLen;
1476 }
1477 if (addr)
1478 address = (IOPhysicalAddress) addr; // Truncate address to 32bit
1479 }
1480 else
1481 {
1482 IOMDDMAWalkSegmentState _state;
1483 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) &_state;
1484
1485 state->fOffset = offset;
1486 state->fLength = _length - offset;
1487 state->fMapped = (0 == (options & kIOMemoryMapperNone));
1488
1489 ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
1490
1491 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret))
1492 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
1493 ret, this, state->fOffset,
1494 state->fIOVMAddr, state->fLength);
1495 if (kIOReturnSuccess == ret)
1496 {
1497 address = state->fIOVMAddr;
1498 length = state->fLength;
1499 }
1500
1501 // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
1502 // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
1503
1504 if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)))
1505 {
1506 if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone))
1507 {
1508 addr64_t origAddr = address;
1509 IOByteCount origLen = length;
1510
1511 address = mapper->mapAddr(origAddr);
1512 length = page_size - (address & (page_size - 1));
1513 while ((length < origLen)
1514 && ((address + length) == mapper->mapAddr(origAddr + length)))
1515 length += page_size;
1516 if (length > origLen)
1517 length = origLen;
1518 }
1519 #ifdef __LP64__
1520 else if (!(options & kIOMemoryMapperNone) && (_flags & kIOMemoryMapperNone))
1521 {
1522 panic("getPhysicalSegment not mapped for I/O");
1523 }
1524 #endif /* __LP64__ */
1525 }
1526 }
1527
1528 if (!address)
1529 length = 0;
1530
1531 if (lengthOfSegment)
1532 *lengthOfSegment = length;
1533
1534 return (address);
1535 }
1536
1537 #ifndef __LP64__
1538 addr64_t
1539 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
1540 {
1541 addr64_t address = 0;
1542
1543 if (options & _kIOMemorySourceSegment)
1544 {
1545 address = getSourceSegment(offset, lengthOfSegment);
1546 }
1547 else if (options & kIOMemoryMapperNone)
1548 {
1549 address = getPhysicalSegment64(offset, lengthOfSegment);
1550 }
1551 else
1552 {
1553 address = getPhysicalSegment(offset, lengthOfSegment);
1554 }
1555
1556 return (address);
1557 }
1558
1559 addr64_t
1560 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
1561 {
1562 return (getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone));
1563 }
1564
1565 IOPhysicalAddress
1566 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1567 {
1568 addr64_t address = 0;
1569 IOByteCount length = 0;
1570
1571 address = getPhysicalSegment(offset, lengthOfSegment, 0);
1572
1573 if (lengthOfSegment)
1574 length = *lengthOfSegment;
1575
1576 if ((address + length) > 0x100000000ULL)
1577 {
1578 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
1579 address, (long) length, (getMetaClass())->getClassName());
1580 }
1581
1582 return ((IOPhysicalAddress) address);
1583 }
1584
1585 addr64_t
1586 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
1587 {
1588 IOPhysicalAddress phys32;
1589 IOByteCount length;
1590 addr64_t phys64;
1591 IOMapper * mapper = 0;
1592
1593 phys32 = getPhysicalSegment(offset, lengthOfSegment);
1594 if (!phys32)
1595 return 0;
1596
1597 if (gIOSystemMapper)
1598 mapper = gIOSystemMapper;
1599
1600 if (mapper)
1601 {
1602 IOByteCount origLen;
1603
1604 phys64 = mapper->mapAddr(phys32);
1605 origLen = *lengthOfSegment;
1606 length = page_size - (phys64 & (page_size - 1));
1607 while ((length < origLen)
1608 && ((phys64 + length) == mapper->mapAddr(phys32 + length)))
1609 length += page_size;
1610 if (length > origLen)
1611 length = origLen;
1612
1613 *lengthOfSegment = length;
1614 }
1615 else
1616 phys64 = (addr64_t) phys32;
1617
1618 return phys64;
1619 }
1620
1621 IOPhysicalAddress
1622 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1623 {
1624 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0));
1625 }
1626
1627 IOPhysicalAddress
1628 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1629 {
1630 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment));
1631 }
1632
1633 void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
1634 IOByteCount * lengthOfSegment)
1635 {
1636 if (_task == kernel_task)
1637 return (void *) getSourceSegment(offset, lengthOfSegment);
1638 else
1639 panic("IOGMD::getVirtualSegment deprecated");
1640
1641 return 0;
1642 }
1643 #endif /* !__LP64__ */
1644
1645 IOReturn
1646 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
1647 {
1648 if (kIOMDGetCharacteristics == op) {
1649 if (dataSize < sizeof(IOMDDMACharacteristics))
1650 return kIOReturnUnderrun;
1651
1652 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
1653 data->fLength = getLength();
1654 data->fSGCount = 0;
1655 data->fDirection = getDirection();
1656 if (IOMapper::gSystem)
1657 data->fIsMapped = true;
1658 data->fIsPrepared = true; // Assume prepared - fails safe
1659 }
1660 else if (kIOMDWalkSegments & op) {
1661 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs))
1662 return kIOReturnUnderrun;
1663
1664 IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
1665 IOByteCount offset = (IOByteCount) data->fOffset;
1666
1667 IOPhysicalLength length;
1668 IOMemoryDescriptor *ncmd = const_cast<IOMemoryDescriptor *>(this);
1669 if (data->fMapped && IOMapper::gSystem)
1670 data->fIOVMAddr = ncmd->getPhysicalSegment(offset, &length);
1671 else
1672 data->fIOVMAddr = ncmd->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
1673 data->fLength = length;
1674 }
1675 else
1676 return kIOReturnBadArgument;
1677
1678 return kIOReturnSuccess;
1679 }
1680
1681 static IOReturn
1682 purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
1683 {
1684 IOReturn err = kIOReturnSuccess;
1685
1686 *control = VM_PURGABLE_SET_STATE;
1687 switch (newState)
1688 {
1689 case kIOMemoryPurgeableKeepCurrent:
1690 *control = VM_PURGABLE_GET_STATE;
1691 break;
1692
1693 case kIOMemoryPurgeableNonVolatile:
1694 *state = VM_PURGABLE_NONVOLATILE;
1695 break;
1696 case kIOMemoryPurgeableVolatile:
1697 *state = VM_PURGABLE_VOLATILE;
1698 break;
1699 case kIOMemoryPurgeableEmpty:
1700 *state = VM_PURGABLE_EMPTY;
1701 break;
1702 default:
1703 err = kIOReturnBadArgument;
1704 break;
1705 }
1706 return (err);
1707 }
1708
1709 static IOReturn
1710 purgeableStateBits(int * state)
1711 {
1712 IOReturn err = kIOReturnSuccess;
1713
1714 switch (*state)
1715 {
1716 case VM_PURGABLE_NONVOLATILE:
1717 *state = kIOMemoryPurgeableNonVolatile;
1718 break;
1719 case VM_PURGABLE_VOLATILE:
1720 *state = kIOMemoryPurgeableVolatile;
1721 break;
1722 case VM_PURGABLE_EMPTY:
1723 *state = kIOMemoryPurgeableEmpty;
1724 break;
1725 default:
1726 *state = kIOMemoryPurgeableNonVolatile;
1727 err = kIOReturnNotReady;
1728 break;
1729 }
1730 return (err);
1731 }
1732
1733 IOReturn
1734 IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
1735 IOOptionBits * oldState )
1736 {
1737 IOReturn err = kIOReturnSuccess;
1738 vm_purgable_t control;
1739 int state;
1740
1741 if (_memEntry)
1742 {
1743 err = super::setPurgeable(newState, oldState);
1744 }
1745 else
1746 {
1747 if (kIOMemoryThreadSafe & _flags)
1748 LOCK;
1749 do
1750 {
1751 // Find the appropriate vm_map for the given task
1752 vm_map_t curMap;
1753 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
1754 {
1755 err = kIOReturnNotReady;
1756 break;
1757 }
1758 else
1759 curMap = get_task_map(_task);
1760
1761 // can only do one range
1762 Ranges vec = _ranges;
1763 IOOptionBits type = _flags & kIOMemoryTypeMask;
1764 user_addr_t addr;
1765 IOByteCount len;
1766 getAddrLenForInd(addr, len, type, vec, 0);
1767
1768 err = purgeableControlBits(newState, &control, &state);
1769 if (kIOReturnSuccess != err)
1770 break;
1771 err = mach_vm_purgable_control(curMap, addr, control, &state);
1772 if (oldState)
1773 {
1774 if (kIOReturnSuccess == err)
1775 {
1776 err = purgeableStateBits(&state);
1777 *oldState = state;
1778 }
1779 }
1780 }
1781 while (false);
1782 if (kIOMemoryThreadSafe & _flags)
1783 UNLOCK;
1784 }
1785 return (err);
1786 }
1787
1788 IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
1789 IOOptionBits * oldState )
1790 {
1791 IOReturn err = kIOReturnSuccess;
1792 vm_purgable_t control;
1793 int state;
1794
1795 if (kIOMemoryThreadSafe & _flags)
1796 LOCK;
1797
1798 do
1799 {
1800 if (!_memEntry)
1801 {
1802 err = kIOReturnNotReady;
1803 break;
1804 }
1805 err = purgeableControlBits(newState, &control, &state);
1806 if (kIOReturnSuccess != err)
1807 break;
1808 err = mach_memory_entry_purgable_control((ipc_port_t) _memEntry, control, &state);
1809 if (oldState)
1810 {
1811 if (kIOReturnSuccess == err)
1812 {
1813 err = purgeableStateBits(&state);
1814 *oldState = state;
1815 }
1816 }
1817 }
1818 while (false);
1819
1820 if (kIOMemoryThreadSafe & _flags)
1821 UNLOCK;
1822
1823 return (err);
1824 }
1825
1826 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
1827 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
1828
1829 IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options,
1830 IOByteCount offset, IOByteCount length )
1831 {
1832 IOByteCount remaining;
1833 void (*func)(addr64_t pa, unsigned int count) = 0;
1834
1835 switch (options)
1836 {
1837 case kIOMemoryIncoherentIOFlush:
1838 func = &dcache_incoherent_io_flush64;
1839 break;
1840 case kIOMemoryIncoherentIOStore:
1841 func = &dcache_incoherent_io_store64;
1842 break;
1843 }
1844
1845 if (!func)
1846 return (kIOReturnUnsupported);
1847
1848 if (kIOMemoryThreadSafe & _flags)
1849 LOCK;
1850
1851 remaining = length = min(length, getLength() - offset);
1852 while (remaining)
1853 // (process another target segment?)
1854 {
1855 addr64_t dstAddr64;
1856 IOByteCount dstLen;
1857
1858 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
1859 if (!dstAddr64)
1860 break;
1861
1862 // Clip segment length to remaining
1863 if (dstLen > remaining)
1864 dstLen = remaining;
1865
1866 (*func)(dstAddr64, dstLen);
1867
1868 offset += dstLen;
1869 remaining -= dstLen;
1870 }
1871
1872 if (kIOMemoryThreadSafe & _flags)
1873 UNLOCK;
1874
1875 return (remaining ? kIOReturnUnderrun : kIOReturnSuccess);
1876 }
1877
1878 #if defined(__ppc__) || defined(__arm__)
1879 extern vm_offset_t static_memory_end;
1880 #define io_kernel_static_end static_memory_end
1881 #else
1882 extern vm_offset_t first_avail;
1883 #define io_kernel_static_end first_avail
1884 #endif
1885
1886 static kern_return_t
1887 io_get_kernel_static_upl(
1888 vm_map_t /* map */,
1889 uintptr_t offset,
1890 vm_size_t *upl_size,
1891 upl_t *upl,
1892 upl_page_info_array_t page_list,
1893 unsigned int *count,
1894 ppnum_t *highest_page)
1895 {
1896 unsigned int pageCount, page;
1897 ppnum_t phys;
1898 ppnum_t highestPage = 0;
1899
1900 pageCount = atop_32(*upl_size);
1901 if (pageCount > *count)
1902 pageCount = *count;
1903
1904 *upl = NULL;
1905
1906 for (page = 0; page < pageCount; page++)
1907 {
1908 phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
1909 if (!phys)
1910 break;
1911 page_list[page].phys_addr = phys;
1912 page_list[page].pageout = 0;
1913 page_list[page].absent = 0;
1914 page_list[page].dirty = 0;
1915 page_list[page].precious = 0;
1916 page_list[page].device = 0;
1917 if (phys > highestPage)
1918 highestPage = phys;
1919 }
1920
1921 *highest_page = highestPage;
1922
1923 return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError);
1924 }
1925
1926 IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
1927 {
1928 IOOptionBits type = _flags & kIOMemoryTypeMask;
1929 IOReturn error = kIOReturnCannotWire;
1930 ioGMDData *dataP;
1931 ppnum_t mapBase = 0;
1932 IOMapper *mapper;
1933 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
1934
1935 assert(!_wireCount);
1936 assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
1937
1938 if (_pages >= gIOMaximumMappedIOPageCount)
1939 return kIOReturnNoResources;
1940
1941 dataP = getDataP(_memoryEntries);
1942 mapper = dataP->fMapper;
1943 if (mapper && _pages)
1944 mapBase = mapper->iovmAlloc(_pages);
1945
1946 // Note that appendBytes(NULL) zeros the data up to the
1947 // desired length.
1948 _memoryEntries->appendBytes(0, dataP->fPageCnt * sizeof(upl_page_info_t));
1949 dataP = 0; // May no longer be valid so lets not get tempted.
1950
1951 if (forDirection == kIODirectionNone)
1952 forDirection = getDirection();
1953
1954 int uplFlags; // This Mem Desc's default flags for upl creation
1955 switch (kIODirectionOutIn & forDirection)
1956 {
1957 case kIODirectionOut:
1958 // Pages do not need to be marked as dirty on commit
1959 uplFlags = UPL_COPYOUT_FROM;
1960 _flags |= kIOMemoryPreparedReadOnly;
1961 break;
1962
1963 case kIODirectionIn:
1964 default:
1965 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
1966 break;
1967 }
1968 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
1969
1970 #ifdef UPL_NEED_32BIT_ADDR
1971 if (kIODirectionPrepareToPhys32 & forDirection)
1972 uplFlags |= UPL_NEED_32BIT_ADDR;
1973 #endif
1974
1975 // Find the appropriate vm_map for the given task
1976 vm_map_t curMap;
1977 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
1978 curMap = 0;
1979 else
1980 { curMap = get_task_map(_task); }
1981
1982 // Iterate over the vector of virtual ranges
1983 Ranges vec = _ranges;
1984 unsigned int pageIndex = 0;
1985 IOByteCount mdOffset = 0;
1986 ppnum_t highestPage = 0;
1987 for (UInt range = 0; range < _rangesCount; range++) {
1988 ioPLBlock iopl;
1989 user_addr_t startPage;
1990 IOByteCount numBytes;
1991 ppnum_t highPage = 0;
1992
1993 // Get the startPage address and length of vec[range]
1994 getAddrLenForInd(startPage, numBytes, type, vec, range);
1995 iopl.fPageOffset = startPage & PAGE_MASK;
1996 numBytes += iopl.fPageOffset;
1997 startPage = trunc_page_64(startPage);
1998
1999 if (mapper)
2000 iopl.fMappedBase = mapBase + pageIndex;
2001 else
2002 iopl.fMappedBase = 0;
2003
2004 // Iterate over the current range, creating UPLs
2005 while (numBytes) {
2006 dataP = getDataP(_memoryEntries);
2007 vm_address_t kernelStart = (vm_address_t) startPage;
2008 vm_map_t theMap;
2009 if (curMap)
2010 theMap = curMap;
2011 else if (!sharedMem) {
2012 assert(_task == kernel_task);
2013 theMap = IOPageableMapForAddress(kernelStart);
2014 }
2015 else
2016 theMap = NULL;
2017
2018 upl_page_info_array_t pageInfo = getPageList(dataP);
2019 int ioplFlags = uplFlags;
2020 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
2021
2022 vm_size_t ioplSize = round_page(numBytes);
2023 unsigned int numPageInfo = atop_32(ioplSize);
2024
2025 if (theMap == kernel_map && kernelStart < io_kernel_static_end) {
2026 error = io_get_kernel_static_upl(theMap,
2027 kernelStart,
2028 &ioplSize,
2029 &iopl.fIOPL,
2030 baseInfo,
2031 &numPageInfo,
2032 &highPage);
2033 }
2034 else if (sharedMem) {
2035 error = memory_object_iopl_request(sharedMem,
2036 ptoa_32(pageIndex),
2037 &ioplSize,
2038 &iopl.fIOPL,
2039 baseInfo,
2040 &numPageInfo,
2041 &ioplFlags);
2042 }
2043 else {
2044 assert(theMap);
2045 error = vm_map_create_upl(theMap,
2046 startPage,
2047 (upl_size_t*)&ioplSize,
2048 &iopl.fIOPL,
2049 baseInfo,
2050 &numPageInfo,
2051 &ioplFlags);
2052 }
2053
2054 assert(ioplSize);
2055 if (error != KERN_SUCCESS)
2056 goto abortExit;
2057
2058 if (iopl.fIOPL)
2059 highPage = upl_get_highest_page(iopl.fIOPL);
2060 if (highPage > highestPage)
2061 highestPage = highPage;
2062
2063 error = kIOReturnCannotWire;
2064
2065 if (baseInfo->device) {
2066 numPageInfo = 1;
2067 iopl.fFlags = kIOPLOnDevice;
2068 // Don't translate device memory at all
2069 if (mapper && mapBase) {
2070 mapper->iovmFree(mapBase, _pages);
2071 mapBase = 0;
2072 iopl.fMappedBase = 0;
2073 }
2074 }
2075 else {
2076 iopl.fFlags = 0;
2077 if (mapper)
2078 mapper->iovmInsert(mapBase, pageIndex,
2079 baseInfo, numPageInfo);
2080 }
2081
2082 iopl.fIOMDOffset = mdOffset;
2083 iopl.fPageInfo = pageIndex;
2084
2085 if ((_flags & kIOMemoryAutoPrepare) && iopl.fIOPL)
2086 {
2087 upl_commit(iopl.fIOPL, 0, 0);
2088 upl_deallocate(iopl.fIOPL);
2089 iopl.fIOPL = 0;
2090 }
2091
2092 if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
2093 // Clean up partial created and unsaved iopl
2094 if (iopl.fIOPL) {
2095 upl_abort(iopl.fIOPL, 0);
2096 upl_deallocate(iopl.fIOPL);
2097 }
2098 goto abortExit;
2099 }
2100
2101 // Check for a multiple iopl's in one virtual range
2102 pageIndex += numPageInfo;
2103 mdOffset -= iopl.fPageOffset;
2104 if (ioplSize < numBytes) {
2105 numBytes -= ioplSize;
2106 startPage += ioplSize;
2107 mdOffset += ioplSize;
2108 iopl.fPageOffset = 0;
2109 if (mapper)
2110 iopl.fMappedBase = mapBase + pageIndex;
2111 }
2112 else {
2113 mdOffset += numBytes;
2114 break;
2115 }
2116 }
2117 }
2118
2119 _highestPage = highestPage;
2120
2121 return kIOReturnSuccess;
2122
2123 abortExit:
2124 {
2125 dataP = getDataP(_memoryEntries);
2126 UInt done = getNumIOPL(_memoryEntries, dataP);
2127 ioPLBlock *ioplList = getIOPLList(dataP);
2128
2129 for (UInt range = 0; range < done; range++)
2130 {
2131 if (ioplList[range].fIOPL) {
2132 upl_abort(ioplList[range].fIOPL, 0);
2133 upl_deallocate(ioplList[range].fIOPL);
2134 }
2135 }
2136 (void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); // == setLength()
2137
2138 if (mapper && mapBase)
2139 mapper->iovmFree(mapBase, _pages);
2140 }
2141
2142 if (error == KERN_FAILURE)
2143 error = kIOReturnCannotWire;
2144
2145 return error;
2146 }
2147
2148 /*
2149 * prepare
2150 *
2151 * Prepare the memory for an I/O transfer. This involves paging in
2152 * the memory, if necessary, and wiring it down for the duration of
2153 * the transfer. The complete() method completes the processing of
2154 * the memory after the I/O transfer finishes. This method needn't
2155 * called for non-pageable memory.
2156 */
2157 IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
2158 {
2159 IOReturn error = kIOReturnSuccess;
2160 IOOptionBits type = _flags & kIOMemoryTypeMask;
2161
2162 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
2163 return kIOReturnSuccess;
2164
2165 if (_prepareLock)
2166 IOLockLock(_prepareLock);
2167
2168 if (!_wireCount
2169 && (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) ) {
2170 error = wireVirtual(forDirection);
2171 }
2172
2173 if (kIOReturnSuccess == error)
2174 _wireCount++;
2175
2176 if (_prepareLock)
2177 IOLockUnlock(_prepareLock);
2178
2179 return error;
2180 }
2181
2182 /*
2183 * complete
2184 *
2185 * Complete processing of the memory after an I/O transfer finishes.
2186 * This method should not be called unless a prepare was previously
2187 * issued; the prepare() and complete() must occur in pairs, before
2188 * before and after an I/O transfer involving pageable memory.
2189 */
2190
2191 IOReturn IOGeneralMemoryDescriptor::complete(IODirection /* forDirection */)
2192 {
2193 IOOptionBits type = _flags & kIOMemoryTypeMask;
2194
2195 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
2196 return kIOReturnSuccess;
2197
2198 if (_prepareLock)
2199 IOLockLock(_prepareLock);
2200
2201 assert(_wireCount);
2202
2203 if (_wireCount)
2204 {
2205 _wireCount--;
2206 if (!_wireCount)
2207 {
2208 IOOptionBits type = _flags & kIOMemoryTypeMask;
2209 ioGMDData * dataP = getDataP(_memoryEntries);
2210 ioPLBlock *ioplList = getIOPLList(dataP);
2211 UInt count = getNumIOPL(_memoryEntries, dataP);
2212
2213 #if IOMD_DEBUG_DMAACTIVE
2214 if (__iomd_reservedA) panic("complete() while dma active");
2215 #endif /* IOMD_DEBUG_DMAACTIVE */
2216
2217 if (dataP->fMapper && _pages && ioplList[0].fMappedBase)
2218 dataP->fMapper->iovmFree(ioplList[0].fMappedBase, _pages);
2219
2220 // Only complete iopls that we created which are for TypeVirtual
2221 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
2222 for (UInt ind = 0; ind < count; ind++)
2223 if (ioplList[ind].fIOPL) {
2224 upl_commit(ioplList[ind].fIOPL, 0, 0);
2225 upl_deallocate(ioplList[ind].fIOPL);
2226 }
2227 }
2228 (void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); // == setLength()
2229
2230 dataP->fPreparationID = kIOPreparationIDUnprepared;
2231 }
2232 }
2233
2234 if (_prepareLock)
2235 IOLockUnlock(_prepareLock);
2236
2237 return kIOReturnSuccess;
2238 }
2239
2240 IOReturn IOGeneralMemoryDescriptor::doMap(
2241 vm_map_t __addressMap,
2242 IOVirtualAddress * __address,
2243 IOOptionBits options,
2244 IOByteCount __offset,
2245 IOByteCount __length )
2246
2247 {
2248 #ifndef __LP64__
2249 if (!(kIOMap64Bit & options)) panic("IOGeneralMemoryDescriptor::doMap !64bit");
2250 #endif /* !__LP64__ */
2251
2252 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
2253 mach_vm_size_t offset = mapping->fOffset + __offset;
2254 mach_vm_size_t length = mapping->fLength;
2255
2256 kern_return_t kr = kIOReturnVMError;
2257 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
2258
2259 IOOptionBits type = _flags & kIOMemoryTypeMask;
2260 Ranges vec = _ranges;
2261
2262 user_addr_t range0Addr = 0;
2263 IOByteCount range0Len = 0;
2264
2265 if (vec.v)
2266 getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
2267
2268 // mapping source == dest? (could be much better)
2269 if( _task
2270 && (mapping->fAddressMap == get_task_map(_task)) && (options & kIOMapAnywhere)
2271 && (1 == _rangesCount) && (0 == offset)
2272 && range0Addr && (length <= range0Len) )
2273 {
2274 mapping->fAddress = range0Addr;
2275 mapping->fOptions |= kIOMapStatic;
2276
2277 return( kIOReturnSuccess );
2278 }
2279
2280 if( 0 == sharedMem) {
2281
2282 vm_size_t size = ptoa_32(_pages);
2283
2284 if( _task) {
2285
2286 memory_object_size_t actualSize = size;
2287 vm_prot_t prot = VM_PROT_READ;
2288 if (!(kIOMapReadOnly & options))
2289 prot |= VM_PROT_WRITE;
2290 else if (kIOMapDefaultCache != (options & kIOMapCacheMask))
2291 prot |= VM_PROT_WRITE;
2292
2293 kr = mach_make_memory_entry_64(get_task_map(_task),
2294 &actualSize, range0Addr,
2295 prot, &sharedMem,
2296 NULL );
2297
2298 if( (KERN_SUCCESS == kr) && (actualSize != round_page(size)))
2299 {
2300 // map will cross vm objects
2301 #if IOASSERT
2302 IOLog("mach_make_memory_entry_64 (%08llx) size (%08llx:%08llx)\n",
2303 range0Addr, (UInt64)actualSize, (UInt64)size);
2304 #endif
2305 kr = kIOReturnVMError;
2306 ipc_port_release_send( sharedMem );
2307 sharedMem = MACH_PORT_NULL;
2308
2309 mach_vm_address_t address;
2310 mach_vm_size_t pageOffset = (range0Addr & PAGE_MASK);
2311
2312 address = trunc_page_64(mapping->fAddress);
2313 if ((options & kIOMapAnywhere) || ((mapping->fAddress - address) == pageOffset))
2314 {
2315 kr = IOMemoryDescriptorMapCopy(mapping->fAddressMap,
2316 get_task_map(_task), range0Addr,
2317 options,
2318 offset, &address, round_page_64(length + pageOffset));
2319 if (kr == KERN_SUCCESS)
2320 mapping->fAddress = address + pageOffset;
2321 else
2322 mapping->fAddress = NULL;
2323 }
2324 }
2325 }
2326 else do
2327 { // _task == 0, must be physical
2328
2329 memory_object_t pager;
2330 unsigned int flags = 0;
2331 addr64_t pa;
2332 IOPhysicalLength segLen;
2333
2334 pa = getPhysicalSegment( offset, &segLen, kIOMemoryMapperNone );
2335
2336 if( !reserved) {
2337 reserved = IONew( ExpansionData, 1 );
2338 if( !reserved)
2339 continue;
2340 }
2341 reserved->pagerContig = (1 == _rangesCount);
2342 reserved->memory = this;
2343
2344 /*What cache mode do we need*/
2345 switch(options & kIOMapCacheMask ) {
2346
2347 case kIOMapDefaultCache:
2348 default:
2349 flags = IODefaultCacheBits(pa);
2350 if (DEVICE_PAGER_CACHE_INHIB & flags)
2351 {
2352 if (DEVICE_PAGER_GUARDED & flags)
2353 mapping->fOptions |= kIOMapInhibitCache;
2354 else
2355 mapping->fOptions |= kIOMapWriteCombineCache;
2356 }
2357 else if (DEVICE_PAGER_WRITE_THROUGH & flags)
2358 mapping->fOptions |= kIOMapWriteThruCache;
2359 else
2360 mapping->fOptions |= kIOMapCopybackCache;
2361 break;
2362
2363 case kIOMapInhibitCache:
2364 flags = DEVICE_PAGER_CACHE_INHIB |
2365 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
2366 break;
2367
2368 case kIOMapWriteThruCache:
2369 flags = DEVICE_PAGER_WRITE_THROUGH |
2370 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
2371 break;
2372
2373 case kIOMapCopybackCache:
2374 flags = DEVICE_PAGER_COHERENT;
2375 break;
2376
2377 case kIOMapWriteCombineCache:
2378 flags = DEVICE_PAGER_CACHE_INHIB |
2379 DEVICE_PAGER_COHERENT;
2380 break;
2381 }
2382
2383 flags |= reserved->pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0;
2384
2385 pager = device_pager_setup( (memory_object_t) 0, (uintptr_t) reserved,
2386 size, flags);
2387 assert( pager );
2388
2389 if( pager) {
2390 kr = mach_memory_object_memory_entry_64( (host_t) 1, false /*internal*/,
2391 size, VM_PROT_READ | VM_PROT_WRITE, pager, &sharedMem );
2392
2393 assert( KERN_SUCCESS == kr );
2394 if( KERN_SUCCESS != kr)
2395 {
2396 device_pager_deallocate( pager );
2397 pager = MACH_PORT_NULL;
2398 sharedMem = MACH_PORT_NULL;
2399 }
2400 }
2401 if( pager && sharedMem)
2402 reserved->devicePager = pager;
2403 else {
2404 IODelete( reserved, ExpansionData, 1 );
2405 reserved = 0;
2406 }
2407
2408 } while( false );
2409
2410 _memEntry = (void *) sharedMem;
2411 }
2412
2413 IOReturn result;
2414 if (0 == sharedMem)
2415 result = kr;
2416 else
2417 result = super::doMap( __addressMap, __address,
2418 options, __offset, __length );
2419
2420 return( result );
2421 }
2422
2423 IOReturn IOGeneralMemoryDescriptor::doUnmap(
2424 vm_map_t addressMap,
2425 IOVirtualAddress __address,
2426 IOByteCount __length )
2427 {
2428 return (super::doUnmap(addressMap, __address, __length));
2429 }
2430
2431 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2432
2433 #undef super
2434 #define super OSObject
2435
2436 OSDefineMetaClassAndStructors( IOMemoryMap, OSObject )
2437
2438 OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
2439 OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
2440 OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
2441 OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
2442 OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
2443 OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
2444 OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
2445 OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
2446
2447 /* ex-inline function implementation */
2448 IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
2449 { return( getPhysicalSegment( 0, 0 )); }
2450
2451 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2452
2453 bool IOMemoryMap::init(
2454 task_t intoTask,
2455 mach_vm_address_t toAddress,
2456 IOOptionBits _options,
2457 mach_vm_size_t _offset,
2458 mach_vm_size_t _length )
2459 {
2460 if (!intoTask)
2461 return( false);
2462
2463 if (!super::init())
2464 return(false);
2465
2466 fAddressMap = get_task_map(intoTask);
2467 if (!fAddressMap)
2468 return(false);
2469 vm_map_reference(fAddressMap);
2470
2471 fAddressTask = intoTask;
2472 fOptions = _options;
2473 fLength = _length;
2474 fOffset = _offset;
2475 fAddress = toAddress;
2476
2477 return (true);
2478 }
2479
2480 bool IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
2481 {
2482 if (!_memory)
2483 return(false);
2484
2485 if (!fSuperMap)
2486 {
2487 if( (_offset + fLength) > _memory->getLength())
2488 return( false);
2489 fOffset = _offset;
2490 }
2491
2492 _memory->retain();
2493 if (fMemory)
2494 {
2495 if (fMemory != _memory)
2496 fMemory->removeMapping(this);
2497 fMemory->release();
2498 }
2499 fMemory = _memory;
2500
2501 return( true );
2502 }
2503
2504 struct IOMemoryDescriptorMapAllocRef
2505 {
2506 ipc_port_t sharedMem;
2507 vm_map_t src_map;
2508 mach_vm_offset_t src_address;
2509 mach_vm_address_t mapped;
2510 mach_vm_size_t size;
2511 mach_vm_size_t sourceOffset;
2512 IOOptionBits options;
2513 };
2514
2515 static kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
2516 {
2517 IOMemoryDescriptorMapAllocRef * ref = (IOMemoryDescriptorMapAllocRef *)_ref;
2518 IOReturn err;
2519
2520 do {
2521 if( ref->sharedMem)
2522 {
2523 vm_prot_t prot = VM_PROT_READ
2524 | ((ref->options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE);
2525
2526 // VM system requires write access to change cache mode
2527 if (kIOMapDefaultCache != (ref->options & kIOMapCacheMask))
2528 prot |= VM_PROT_WRITE;
2529
2530 // set memory entry cache
2531 vm_prot_t memEntryCacheMode = prot | MAP_MEM_ONLY;
2532 switch (ref->options & kIOMapCacheMask)
2533 {
2534 case kIOMapInhibitCache:
2535 SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
2536 break;
2537
2538 case kIOMapWriteThruCache:
2539 SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
2540 break;
2541
2542 case kIOMapWriteCombineCache:
2543 SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
2544 break;
2545
2546 case kIOMapCopybackCache:
2547 SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
2548 break;
2549
2550 case kIOMapDefaultCache:
2551 default:
2552 SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
2553 break;
2554 }
2555
2556 vm_size_t unused = 0;
2557
2558 err = mach_make_memory_entry( NULL /*unused*/, &unused, 0 /*unused*/,
2559 memEntryCacheMode, NULL, ref->sharedMem );
2560 if (KERN_SUCCESS != err)
2561 IOLog("MAP_MEM_ONLY failed %d\n", err);
2562
2563 err = mach_vm_map( map,
2564 &ref->mapped,
2565 ref->size, 0 /* mask */,
2566 (( ref->options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
2567 | VM_MAKE_TAG(VM_MEMORY_IOKIT),
2568 ref->sharedMem, ref->sourceOffset,
2569 false, // copy
2570 prot, // cur
2571 prot, // max
2572 VM_INHERIT_NONE);
2573
2574 if( KERN_SUCCESS != err) {
2575 ref->mapped = 0;
2576 continue;
2577 }
2578 }
2579 else if (ref->src_map)
2580 {
2581 vm_prot_t cur_prot, max_prot;
2582 err = mach_vm_remap(map, &ref->mapped, ref->size, PAGE_MASK,
2583 (ref->options & kIOMapAnywhere) ? TRUE : FALSE,
2584 ref->src_map, ref->src_address,
2585 FALSE /* copy */,
2586 &cur_prot,
2587 &max_prot,
2588 VM_INHERIT_NONE);
2589 if (KERN_SUCCESS == err)
2590 {
2591 if ((!(VM_PROT_READ & cur_prot))
2592 || (!(kIOMapReadOnly & ref->options) && !(VM_PROT_WRITE & cur_prot)))
2593 {
2594 mach_vm_deallocate(map, ref->mapped, ref->size);
2595 err = KERN_PROTECTION_FAILURE;
2596 }
2597 }
2598 if (KERN_SUCCESS != err)
2599 ref->mapped = 0;
2600 }
2601 else
2602 {
2603 err = mach_vm_allocate( map, &ref->mapped, ref->size,
2604 ((ref->options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
2605 | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
2606 if( KERN_SUCCESS != err) {
2607 ref->mapped = 0;
2608 continue;
2609 }
2610 // we have to make sure that these guys don't get copied if we fork.
2611 err = vm_inherit( map, ref->mapped, ref->size, VM_INHERIT_NONE);
2612 assert( KERN_SUCCESS == err );
2613 }
2614 }
2615 while( false );
2616
2617 return( err );
2618 }
2619
2620 kern_return_t
2621 IOMemoryDescriptorMapMemEntry(vm_map_t map, ipc_port_t entry, IOOptionBits options, bool pageable,
2622 mach_vm_size_t offset,
2623 mach_vm_address_t * address, mach_vm_size_t length)
2624 {
2625 IOReturn err;
2626 IOMemoryDescriptorMapAllocRef ref;
2627
2628 ref.sharedMem = entry;
2629 ref.src_map = NULL;
2630 ref.sharedMem = entry;
2631 ref.sourceOffset = trunc_page_64(offset);
2632 ref.options = options;
2633 ref.size = length;
2634
2635 if (options & kIOMapAnywhere)
2636 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
2637 ref.mapped = 0;
2638 else
2639 ref.mapped = *address;
2640
2641 if( ref.sharedMem && (map == kernel_map) && pageable)
2642 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
2643 else
2644 err = IOMemoryDescriptorMapAlloc( map, &ref );
2645
2646 *address = ref.mapped;
2647 return (err);
2648 }
2649
2650 kern_return_t
2651 IOMemoryDescriptorMapCopy(vm_map_t map,
2652 vm_map_t src_map,
2653 mach_vm_offset_t src_address,
2654 IOOptionBits options,
2655 mach_vm_size_t offset,
2656 mach_vm_address_t * address, mach_vm_size_t length)
2657 {
2658 IOReturn err;
2659 IOMemoryDescriptorMapAllocRef ref;
2660
2661 ref.sharedMem = NULL;
2662 ref.src_map = src_map;
2663 ref.src_address = src_address;
2664 ref.sourceOffset = trunc_page_64(offset);
2665 ref.options = options;
2666 ref.size = length;
2667
2668 if (options & kIOMapAnywhere)
2669 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
2670 ref.mapped = 0;
2671 else
2672 ref.mapped = *address;
2673
2674 if (map == kernel_map)
2675 err = IOIteratePageableMaps(ref.size, &IOMemoryDescriptorMapAlloc, &ref);
2676 else
2677 err = IOMemoryDescriptorMapAlloc(map, &ref);
2678
2679 *address = ref.mapped;
2680 return (err);
2681 }
2682
2683 IOReturn IOMemoryDescriptor::doMap(
2684 vm_map_t __addressMap,
2685 IOVirtualAddress * __address,
2686 IOOptionBits options,
2687 IOByteCount __offset,
2688 IOByteCount __length )
2689 {
2690 #ifndef __LP64__
2691 if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::doMap !64bit");
2692 #endif /* !__LP64__ */
2693
2694 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
2695 mach_vm_size_t offset = mapping->fOffset + __offset;
2696 mach_vm_size_t length = mapping->fLength;
2697
2698 IOReturn err = kIOReturnSuccess;
2699 memory_object_t pager;
2700 mach_vm_size_t pageOffset;
2701 IOPhysicalAddress sourceAddr;
2702 unsigned int lock_count;
2703
2704 do
2705 {
2706 sourceAddr = getPhysicalSegment( offset, NULL, _kIOMemorySourceSegment );
2707 pageOffset = sourceAddr - trunc_page( sourceAddr );
2708
2709 if( reserved)
2710 pager = (memory_object_t) reserved->devicePager;
2711 else
2712 pager = MACH_PORT_NULL;
2713
2714 if ((kIOMapReference|kIOMapUnique) == ((kIOMapReference|kIOMapUnique) & options))
2715 {
2716 upl_t redirUPL2;
2717 vm_size_t size;
2718 int flags;
2719
2720 if (!_memEntry)
2721 {
2722 err = kIOReturnNotReadable;
2723 continue;
2724 }
2725
2726 size = round_page(mapping->fLength + pageOffset);
2727 flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
2728 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
2729
2730 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) _memEntry, 0, &size, &redirUPL2,
2731 NULL, NULL,
2732 &flags))
2733 redirUPL2 = NULL;
2734
2735 for (lock_count = 0;
2736 IORecursiveLockHaveLock(gIOMemoryLock);
2737 lock_count++) {
2738 UNLOCK;
2739 }
2740 err = upl_transpose(redirUPL2, mapping->fRedirUPL);
2741 for (;
2742 lock_count;
2743 lock_count--) {
2744 LOCK;
2745 }
2746
2747 if (kIOReturnSuccess != err)
2748 {
2749 IOLog("upl_transpose(%x)\n", err);
2750 err = kIOReturnSuccess;
2751 }
2752
2753 if (redirUPL2)
2754 {
2755 upl_commit(redirUPL2, NULL, 0);
2756 upl_deallocate(redirUPL2);
2757 redirUPL2 = 0;
2758 }
2759 {
2760 // swap the memEntries since they now refer to different vm_objects
2761 void * me = _memEntry;
2762 _memEntry = mapping->fMemory->_memEntry;
2763 mapping->fMemory->_memEntry = me;
2764 }
2765 if (pager)
2766 err = handleFault( reserved->devicePager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
2767 }
2768 else
2769 {
2770 mach_vm_address_t address;
2771
2772 if (!(options & kIOMapAnywhere))
2773 {
2774 address = trunc_page_64(mapping->fAddress);
2775 if( (mapping->fAddress - address) != pageOffset)
2776 {
2777 err = kIOReturnVMError;
2778 continue;
2779 }
2780 }
2781
2782 err = IOMemoryDescriptorMapMemEntry(mapping->fAddressMap, (ipc_port_t) _memEntry,
2783 options, (kIOMemoryBufferPageable & _flags),
2784 offset, &address, round_page_64(length + pageOffset));
2785 if( err != KERN_SUCCESS)
2786 continue;
2787
2788 if (!_memEntry || pager)
2789 {
2790 err = handleFault( pager, mapping->fAddressMap, address, offset, length, options );
2791 if (err != KERN_SUCCESS)
2792 doUnmap( mapping->fAddressMap, (IOVirtualAddress) mapping, 0 );
2793 }
2794
2795 #if DEBUG
2796 if (kIOLogMapping & gIOKitDebug)
2797 IOLog("mapping(%x) desc %p @ %lx, map %p, address %qx, offset %qx, length %qx\n",
2798 err, this, sourceAddr, mapping, address, offset, length);
2799 #endif
2800
2801 if (err == KERN_SUCCESS)
2802 mapping->fAddress = address + pageOffset;
2803 else
2804 mapping->fAddress = NULL;
2805 }
2806 }
2807 while( false );
2808
2809 return (err);
2810 }
2811
2812 IOReturn IOMemoryDescriptor::handleFault(
2813 void * _pager,
2814 vm_map_t addressMap,
2815 mach_vm_address_t address,
2816 mach_vm_size_t sourceOffset,
2817 mach_vm_size_t length,
2818 IOOptionBits options )
2819 {
2820 IOReturn err = kIOReturnSuccess;
2821 memory_object_t pager = (memory_object_t) _pager;
2822 mach_vm_size_t size;
2823 mach_vm_size_t bytes;
2824 mach_vm_size_t page;
2825 mach_vm_size_t pageOffset;
2826 mach_vm_size_t pagerOffset;
2827 IOPhysicalLength segLen;
2828 addr64_t physAddr;
2829
2830 if( !addressMap)
2831 {
2832 if( kIOMemoryRedirected & _flags)
2833 {
2834 #if DEBUG
2835 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
2836 #endif
2837 do {
2838 SLEEP;
2839 } while( kIOMemoryRedirected & _flags );
2840 }
2841
2842 return( kIOReturnSuccess );
2843 }
2844
2845 physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
2846 assert( physAddr );
2847 pageOffset = physAddr - trunc_page_64( physAddr );
2848 pagerOffset = sourceOffset;
2849
2850 size = length + pageOffset;
2851 physAddr -= pageOffset;
2852
2853 segLen += pageOffset;
2854 bytes = size;
2855 do
2856 {
2857 // in the middle of the loop only map whole pages
2858 if( segLen >= bytes)
2859 segLen = bytes;
2860 else if( segLen != trunc_page( segLen))
2861 err = kIOReturnVMError;
2862 if( physAddr != trunc_page_64( physAddr))
2863 err = kIOReturnBadArgument;
2864 if (kIOReturnSuccess != err)
2865 break;
2866
2867 #if DEBUG
2868 if( kIOLogMapping & gIOKitDebug)
2869 IOLog("IOMemoryMap::map(%p) 0x%qx->0x%qx:0x%qx\n",
2870 addressMap, address + pageOffset, physAddr + pageOffset,
2871 segLen - pageOffset);
2872 #endif
2873
2874
2875 if( pager) {
2876 if( reserved && reserved->pagerContig) {
2877 IOPhysicalLength allLen;
2878 addr64_t allPhys;
2879
2880 allPhys = getPhysicalSegment( 0, &allLen, kIOMemoryMapperNone );
2881 assert( allPhys );
2882 err = device_pager_populate_object( pager, 0, atop_64(allPhys), round_page(allLen) );
2883 }
2884 else
2885 {
2886
2887 for( page = 0;
2888 (page < segLen) && (KERN_SUCCESS == err);
2889 page += page_size)
2890 {
2891 err = device_pager_populate_object(pager, pagerOffset,
2892 (ppnum_t)(atop_64(physAddr + page)), page_size);
2893 pagerOffset += page_size;
2894 }
2895 }
2896 assert( KERN_SUCCESS == err );
2897 if( err)
2898 break;
2899 }
2900
2901 // This call to vm_fault causes an early pmap level resolution
2902 // of the mappings created above for kernel mappings, since
2903 // faulting in later can't take place from interrupt level.
2904 /* *** ALERT *** */
2905 /* *** Temporary Workaround *** */
2906
2907 if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags))
2908 {
2909 vm_fault(addressMap,
2910 (vm_map_offset_t)address,
2911 VM_PROT_READ|VM_PROT_WRITE,
2912 FALSE, THREAD_UNINT, NULL,
2913 (vm_map_offset_t)0);
2914 }
2915
2916 /* *** Temporary Workaround *** */
2917 /* *** ALERT *** */
2918
2919 sourceOffset += segLen - pageOffset;
2920 address += segLen;
2921 bytes -= segLen;
2922 pageOffset = 0;
2923
2924 }
2925 while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
2926
2927 if (bytes)
2928 err = kIOReturnBadArgument;
2929
2930 return (err);
2931 }
2932
2933 IOReturn IOMemoryDescriptor::doUnmap(
2934 vm_map_t addressMap,
2935 IOVirtualAddress __address,
2936 IOByteCount __length )
2937 {
2938 IOReturn err;
2939 mach_vm_address_t address;
2940 mach_vm_size_t length;
2941
2942 if (__length)
2943 {
2944 address = __address;
2945 length = __length;
2946 }
2947 else
2948 {
2949 addressMap = ((IOMemoryMap *) __address)->fAddressMap;
2950 address = ((IOMemoryMap *) __address)->fAddress;
2951 length = ((IOMemoryMap *) __address)->fLength;
2952 }
2953
2954 if( _memEntry && (addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags))
2955 addressMap = IOPageableMapForAddress( address );
2956
2957 #if DEBUG
2958 if( kIOLogMapping & gIOKitDebug)
2959 IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
2960 addressMap, address, length );
2961 #endif
2962
2963 err = mach_vm_deallocate( addressMap, address, length );
2964
2965 return (err);
2966 }
2967
2968 IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
2969 {
2970 IOReturn err = kIOReturnSuccess;
2971 IOMemoryMap * mapping = 0;
2972 OSIterator * iter;
2973
2974 LOCK;
2975
2976 if( doRedirect)
2977 _flags |= kIOMemoryRedirected;
2978 else
2979 _flags &= ~kIOMemoryRedirected;
2980
2981 do {
2982 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
2983 while( (mapping = (IOMemoryMap *) iter->getNextObject()))
2984 mapping->redirect( safeTask, doRedirect );
2985
2986 iter->release();
2987 }
2988 } while( false );
2989
2990 if (!doRedirect)
2991 {
2992 WAKEUP;
2993 }
2994
2995 UNLOCK;
2996
2997 #ifndef __LP64__
2998 // temporary binary compatibility
2999 IOSubMemoryDescriptor * subMem;
3000 if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
3001 err = subMem->redirect( safeTask, doRedirect );
3002 else
3003 err = kIOReturnSuccess;
3004 #endif /* !__LP64__ */
3005
3006 return( err );
3007 }
3008
3009 IOReturn IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
3010 {
3011 IOReturn err = kIOReturnSuccess;
3012
3013 if( fSuperMap) {
3014 // err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
3015 } else {
3016
3017 LOCK;
3018
3019 do
3020 {
3021 if (!fAddress)
3022 break;
3023 if (!fAddressMap)
3024 break;
3025
3026 if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
3027 && (0 == (fOptions & kIOMapStatic)))
3028 {
3029 IOUnmapPages( fAddressMap, fAddress, fLength );
3030 err = kIOReturnSuccess;
3031 #if DEBUG
3032 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
3033 #endif
3034 }
3035 else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask))
3036 {
3037 IOOptionBits newMode;
3038 newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
3039 IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
3040 }
3041 }
3042 while (false);
3043 UNLOCK;
3044 }
3045
3046 if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3047 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3048 && safeTask
3049 && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected))))
3050 fMemory->redirect(safeTask, doRedirect);
3051
3052 return( err );
3053 }
3054
3055 IOReturn IOMemoryMap::unmap( void )
3056 {
3057 IOReturn err;
3058
3059 LOCK;
3060
3061 if( fAddress && fAddressMap && (0 == fSuperMap) && fMemory
3062 && (0 == (fOptions & kIOMapStatic))) {
3063
3064 err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
3065
3066 } else
3067 err = kIOReturnSuccess;
3068
3069 if (fAddressMap)
3070 {
3071 vm_map_deallocate(fAddressMap);
3072 fAddressMap = 0;
3073 }
3074
3075 fAddress = 0;
3076
3077 UNLOCK;
3078
3079 return( err );
3080 }
3081
3082 void IOMemoryMap::taskDied( void )
3083 {
3084 LOCK;
3085 if (fUserClientUnmap)
3086 unmap();
3087 if( fAddressMap) {
3088 vm_map_deallocate(fAddressMap);
3089 fAddressMap = 0;
3090 }
3091 fAddressTask = 0;
3092 fAddress = 0;
3093 UNLOCK;
3094 }
3095
3096 IOReturn IOMemoryMap::userClientUnmap( void )
3097 {
3098 fUserClientUnmap = true;
3099 return (kIOReturnSuccess);
3100 }
3101
3102 // Overload the release mechanism. All mappings must be a member
3103 // of a memory descriptors _mappings set. This means that we
3104 // always have 2 references on a mapping. When either of these mappings
3105 // are released we need to free ourselves.
3106 void IOMemoryMap::taggedRelease(const void *tag) const
3107 {
3108 LOCK;
3109 super::taggedRelease(tag, 2);
3110 UNLOCK;
3111 }
3112
3113 void IOMemoryMap::free()
3114 {
3115 unmap();
3116
3117 if (fMemory)
3118 {
3119 LOCK;
3120 fMemory->removeMapping(this);
3121 UNLOCK;
3122 fMemory->release();
3123 }
3124
3125 if (fOwner && (fOwner != fMemory))
3126 {
3127 LOCK;
3128 fOwner->removeMapping(this);
3129 UNLOCK;
3130 }
3131
3132 if (fSuperMap)
3133 fSuperMap->release();
3134
3135 if (fRedirUPL) {
3136 upl_commit(fRedirUPL, NULL, 0);
3137 upl_deallocate(fRedirUPL);
3138 }
3139
3140 super::free();
3141 }
3142
3143 IOByteCount IOMemoryMap::getLength()
3144 {
3145 return( fLength );
3146 }
3147
3148 IOVirtualAddress IOMemoryMap::getVirtualAddress()
3149 {
3150 #ifndef __LP64__
3151 if (fSuperMap)
3152 fSuperMap->getVirtualAddress();
3153 else if (fAddressMap
3154 && vm_map_is_64bit(fAddressMap)
3155 && (sizeof(IOVirtualAddress) < 8))
3156 {
3157 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
3158 }
3159 #endif /* !__LP64__ */
3160
3161 return (fAddress);
3162 }
3163
3164 #ifndef __LP64__
3165 mach_vm_address_t IOMemoryMap::getAddress()
3166 {
3167 return( fAddress);
3168 }
3169
3170 mach_vm_size_t IOMemoryMap::getSize()
3171 {
3172 return( fLength );
3173 }
3174 #endif /* !__LP64__ */
3175
3176
3177 task_t IOMemoryMap::getAddressTask()
3178 {
3179 if( fSuperMap)
3180 return( fSuperMap->getAddressTask());
3181 else
3182 return( fAddressTask);
3183 }
3184
3185 IOOptionBits IOMemoryMap::getMapOptions()
3186 {
3187 return( fOptions);
3188 }
3189
3190 IOMemoryDescriptor * IOMemoryMap::getMemoryDescriptor()
3191 {
3192 return( fMemory );
3193 }
3194
3195 IOMemoryMap * IOMemoryMap::copyCompatible(
3196 IOMemoryMap * newMapping )
3197 {
3198 task_t task = newMapping->getAddressTask();
3199 mach_vm_address_t toAddress = newMapping->fAddress;
3200 IOOptionBits _options = newMapping->fOptions;
3201 mach_vm_size_t _offset = newMapping->fOffset;
3202 mach_vm_size_t _length = newMapping->fLength;
3203
3204 if( (!task) || (!fAddressMap) || (fAddressMap != get_task_map(task)))
3205 return( 0 );
3206 if( (fOptions ^ _options) & kIOMapReadOnly)
3207 return( 0 );
3208 if( (kIOMapDefaultCache != (_options & kIOMapCacheMask))
3209 && ((fOptions ^ _options) & kIOMapCacheMask))
3210 return( 0 );
3211
3212 if( (0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress))
3213 return( 0 );
3214
3215 if( _offset < fOffset)
3216 return( 0 );
3217
3218 _offset -= fOffset;
3219
3220 if( (_offset + _length) > fLength)
3221 return( 0 );
3222
3223 retain();
3224 if( (fLength == _length) && (!_offset))
3225 {
3226 newMapping->release();
3227 newMapping = this;
3228 }
3229 else
3230 {
3231 newMapping->fSuperMap = this;
3232 newMapping->fOffset = _offset;
3233 newMapping->fAddress = fAddress + _offset;
3234 }
3235
3236 return( newMapping );
3237 }
3238
3239 IOPhysicalAddress
3240 #ifdef __LP64__
3241 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
3242 #else /* !__LP64__ */
3243 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
3244 #endif /* !__LP64__ */
3245 {
3246 IOPhysicalAddress address;
3247
3248 LOCK;
3249 #ifdef __LP64__
3250 address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
3251 #else /* !__LP64__ */
3252 address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
3253 #endif /* !__LP64__ */
3254 UNLOCK;
3255
3256 return( address );
3257 }
3258
3259 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3260
3261 #undef super
3262 #define super OSObject
3263
3264 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3265
3266 void IOMemoryDescriptor::initialize( void )
3267 {
3268 if( 0 == gIOMemoryLock)
3269 gIOMemoryLock = IORecursiveLockAlloc();
3270
3271 IORegistryEntry::getRegistryRoot()->setProperty(kIOMaximumMappedIOByteCountKey,
3272 ptoa_64(gIOMaximumMappedIOPageCount), 64);
3273 if (!gIOCopyMapper)
3274 {
3275 IOMapper *
3276 mapper = new IOCopyMapper;
3277 if (mapper)
3278 {
3279 if (mapper->init() && mapper->start(NULL))
3280 gIOCopyMapper = (IOCopyMapper *) mapper;
3281 else
3282 mapper->release();
3283 }
3284 }
3285
3286 gIOLastPage = IOGetLastPageNumber();
3287 }
3288
3289 void IOMemoryDescriptor::free( void )
3290 {
3291 if( _mappings)
3292 _mappings->release();
3293
3294 super::free();
3295 }
3296
3297 IOMemoryMap * IOMemoryDescriptor::setMapping(
3298 task_t intoTask,
3299 IOVirtualAddress mapAddress,
3300 IOOptionBits options )
3301 {
3302 return (createMappingInTask( intoTask, mapAddress,
3303 options | kIOMapStatic,
3304 0, getLength() ));
3305 }
3306
3307 IOMemoryMap * IOMemoryDescriptor::map(
3308 IOOptionBits options )
3309 {
3310 return (createMappingInTask( kernel_task, 0,
3311 options | kIOMapAnywhere,
3312 0, getLength() ));
3313 }
3314
3315 #ifndef __LP64__
3316 IOMemoryMap * IOMemoryDescriptor::map(
3317 task_t intoTask,
3318 IOVirtualAddress atAddress,
3319 IOOptionBits options,
3320 IOByteCount offset,
3321 IOByteCount length )
3322 {
3323 if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask)))
3324 {
3325 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
3326 return (0);
3327 }
3328
3329 return (createMappingInTask(intoTask, atAddress,
3330 options, offset, length));
3331 }
3332 #endif /* !__LP64__ */
3333
3334 IOMemoryMap * IOMemoryDescriptor::createMappingInTask(
3335 task_t intoTask,
3336 mach_vm_address_t atAddress,
3337 IOOptionBits options,
3338 mach_vm_size_t offset,
3339 mach_vm_size_t length)
3340 {
3341 IOMemoryMap * result;
3342 IOMemoryMap * mapping;
3343
3344 if (0 == length)
3345 length = getLength();
3346
3347 mapping = new IOMemoryMap;
3348
3349 if( mapping
3350 && !mapping->init( intoTask, atAddress,
3351 options, offset, length )) {
3352 mapping->release();
3353 mapping = 0;
3354 }
3355
3356 if (mapping)
3357 result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
3358 else
3359 result = 0;
3360
3361 #if DEBUG
3362 if (!result)
3363 IOLog("createMappingInTask failed desc %p, addr %qx, options %lx, offset %qx, length %qx\n",
3364 this, atAddress, options, offset, length);
3365 #endif
3366
3367 return (result);
3368 }
3369
3370 #ifndef __LP64__ // there is only a 64 bit version for LP64
3371 IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
3372 IOOptionBits options,
3373 IOByteCount offset)
3374 {
3375 return (redirect(newBackingMemory, options, (mach_vm_size_t)offset));
3376 }
3377 #endif
3378
3379 IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
3380 IOOptionBits options,
3381 mach_vm_size_t offset)
3382 {
3383 IOReturn err = kIOReturnSuccess;
3384 IOMemoryDescriptor * physMem = 0;
3385
3386 LOCK;
3387
3388 if (fAddress && fAddressMap) do
3389 {
3390 if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3391 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3392 {
3393 physMem = fMemory;
3394 physMem->retain();
3395 }
3396
3397 if (!fRedirUPL)
3398 {
3399 vm_size_t size = round_page(fLength);
3400 int flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
3401 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
3402 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) fMemory->_memEntry, 0, &size, &fRedirUPL,
3403 NULL, NULL,
3404 &flags))
3405 fRedirUPL = 0;
3406
3407 if (physMem)
3408 {
3409 IOUnmapPages( fAddressMap, fAddress, fLength );
3410 if (false)
3411 physMem->redirect(0, true);
3412 }
3413 }
3414
3415 if (newBackingMemory)
3416 {
3417 if (newBackingMemory != fMemory)
3418 {
3419 fOffset = 0;
3420 if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
3421 options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
3422 offset, fLength))
3423 err = kIOReturnError;
3424 }
3425 if (fRedirUPL)
3426 {
3427 upl_commit(fRedirUPL, NULL, 0);
3428 upl_deallocate(fRedirUPL);
3429 fRedirUPL = 0;
3430 }
3431 if (false && physMem)
3432 physMem->redirect(0, false);
3433 }
3434 }
3435 while (false);
3436
3437 UNLOCK;
3438
3439 if (physMem)
3440 physMem->release();
3441
3442 return (err);
3443 }
3444
3445 IOMemoryMap * IOMemoryDescriptor::makeMapping(
3446 IOMemoryDescriptor * owner,
3447 task_t __intoTask,
3448 IOVirtualAddress __address,
3449 IOOptionBits options,
3450 IOByteCount __offset,
3451 IOByteCount __length )
3452 {
3453 #ifndef __LP64__
3454 if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::makeMapping !64bit");
3455 #endif /* !__LP64__ */
3456
3457 IOMemoryDescriptor * mapDesc = 0;
3458 IOMemoryMap * result = 0;
3459 OSIterator * iter;
3460
3461 IOMemoryMap * mapping = (IOMemoryMap *) __address;
3462 mach_vm_size_t offset = mapping->fOffset + __offset;
3463 mach_vm_size_t length = mapping->fLength;
3464
3465 mapping->fOffset = offset;
3466
3467 LOCK;
3468
3469 do
3470 {
3471 if (kIOMapStatic & options)
3472 {
3473 result = mapping;
3474 addMapping(mapping);
3475 mapping->setMemoryDescriptor(this, 0);
3476 continue;
3477 }
3478
3479 if (kIOMapUnique & options)
3480 {
3481 IOPhysicalAddress phys;
3482 IOByteCount physLen;
3483
3484 // if (owner != this) continue;
3485
3486 if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3487 || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3488 {
3489 phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
3490 if (!phys || (physLen < length))
3491 continue;
3492
3493 mapDesc = IOMemoryDescriptor::withAddressRange(
3494 phys, length, getDirection() | kIOMemoryMapperNone, NULL);
3495 if (!mapDesc)
3496 continue;
3497 offset = 0;
3498 mapping->fOffset = offset;
3499 }
3500 }
3501 else
3502 {
3503 // look for a compatible existing mapping
3504 if( (iter = OSCollectionIterator::withCollection(_mappings)))
3505 {
3506 IOMemoryMap * lookMapping;
3507 while ((lookMapping = (IOMemoryMap *) iter->getNextObject()))
3508 {
3509 if ((result = lookMapping->copyCompatible(mapping)))
3510 {
3511 addMapping(result);
3512 result->setMemoryDescriptor(this, offset);
3513 break;
3514 }
3515 }
3516 iter->release();
3517 }
3518 if (result || (options & kIOMapReference))
3519 continue;
3520 }
3521
3522 if (!mapDesc)
3523 {
3524 mapDesc = this;
3525 mapDesc->retain();
3526 }
3527 IOReturn
3528 kr = mapDesc->doMap( 0, (IOVirtualAddress *) &mapping, options, 0, 0 );
3529 if (kIOReturnSuccess == kr)
3530 {
3531 result = mapping;
3532 mapDesc->addMapping(result);
3533 result->setMemoryDescriptor(mapDesc, offset);
3534 }
3535 else
3536 {
3537 mapping->release();
3538 mapping = NULL;
3539 }
3540 }
3541 while( false );
3542
3543 UNLOCK;
3544
3545 if (mapDesc)
3546 mapDesc->release();
3547
3548 return (result);
3549 }
3550
3551 void IOMemoryDescriptor::addMapping(
3552 IOMemoryMap * mapping )
3553 {
3554 if( mapping)
3555 {
3556 if( 0 == _mappings)
3557 _mappings = OSSet::withCapacity(1);
3558 if( _mappings )
3559 _mappings->setObject( mapping );
3560 }
3561 }
3562
3563 void IOMemoryDescriptor::removeMapping(
3564 IOMemoryMap * mapping )
3565 {
3566 if( _mappings)
3567 _mappings->removeObject( mapping);
3568 }
3569
3570 #ifndef __LP64__
3571 // obsolete initializers
3572 // - initWithOptions is the designated initializer
3573 bool
3574 IOMemoryDescriptor::initWithAddress(void * address,
3575 IOByteCount length,
3576 IODirection direction)
3577 {
3578 return( false );
3579 }
3580
3581 bool
3582 IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
3583 IOByteCount length,
3584 IODirection direction,
3585 task_t task)
3586 {
3587 return( false );
3588 }
3589
3590 bool
3591 IOMemoryDescriptor::initWithPhysicalAddress(
3592 IOPhysicalAddress address,
3593 IOByteCount length,
3594 IODirection direction )
3595 {
3596 return( false );
3597 }
3598
3599 bool
3600 IOMemoryDescriptor::initWithRanges(
3601 IOVirtualRange * ranges,
3602 UInt32 withCount,
3603 IODirection direction,
3604 task_t task,
3605 bool asReference)
3606 {
3607 return( false );
3608 }
3609
3610 bool
3611 IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
3612 UInt32 withCount,
3613 IODirection direction,
3614 bool asReference)
3615 {
3616 return( false );
3617 }
3618
3619 void * IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
3620 IOByteCount * lengthOfSegment)
3621 {
3622 return( 0 );
3623 }
3624 #endif /* !__LP64__ */
3625
3626 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3627
3628 bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
3629 {
3630 OSSymbol const *keys[2];
3631 OSObject *values[2];
3632 struct SerData {
3633 user_addr_t address;
3634 user_size_t length;
3635 } *vcopy;
3636 unsigned int index, nRanges;
3637 bool result;
3638
3639 IOOptionBits type = _flags & kIOMemoryTypeMask;
3640
3641 if (s == NULL) return false;
3642 if (s->previouslySerialized(this)) return true;
3643
3644 // Pretend we are an array.
3645 if (!s->addXMLStartTag(this, "array")) return false;
3646
3647 nRanges = _rangesCount;
3648 vcopy = (SerData *) IOMalloc(sizeof(SerData) * nRanges);
3649 if (vcopy == 0) return false;
3650
3651 keys[0] = OSSymbol::withCString("address");
3652 keys[1] = OSSymbol::withCString("length");
3653
3654 result = false;
3655 values[0] = values[1] = 0;
3656
3657 // From this point on we can go to bail.
3658
3659 // Copy the volatile data so we don't have to allocate memory
3660 // while the lock is held.
3661 LOCK;
3662 if (nRanges == _rangesCount) {
3663 Ranges vec = _ranges;
3664 for (index = 0; index < nRanges; index++) {
3665 user_addr_t addr; IOByteCount len;
3666 getAddrLenForInd(addr, len, type, vec, index);
3667 vcopy[index].address = addr;
3668 vcopy[index].length = len;
3669 }
3670 } else {
3671 // The descriptor changed out from under us. Give up.
3672 UNLOCK;
3673 result = false;
3674 goto bail;
3675 }
3676 UNLOCK;
3677
3678 for (index = 0; index < nRanges; index++)
3679 {
3680 user_addr_t addr = vcopy[index].address;
3681 IOByteCount len = (IOByteCount) vcopy[index].length;
3682 values[0] =
3683 OSNumber::withNumber(addr, (((UInt64) addr) >> 32)? 64 : 32);
3684 if (values[0] == 0) {
3685 result = false;
3686 goto bail;
3687 }
3688 values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
3689 if (values[1] == 0) {
3690 result = false;
3691 goto bail;
3692 }
3693 OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
3694 if (dict == 0) {
3695 result = false;
3696 goto bail;
3697 }
3698 values[0]->release();
3699 values[1]->release();
3700 values[0] = values[1] = 0;
3701
3702 result = dict->serialize(s);
3703 dict->release();
3704 if (!result) {
3705 goto bail;
3706 }
3707 }
3708 result = s->addXMLEndTag("array");
3709
3710 bail:
3711 if (values[0])
3712 values[0]->release();
3713 if (values[1])
3714 values[1]->release();
3715 if (keys[0])
3716 keys[0]->release();
3717 if (keys[1])
3718 keys[1]->release();
3719 if (vcopy)
3720 IOFree(vcopy, sizeof(SerData) * nRanges);
3721 return result;
3722 }
3723
3724 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3725
3726 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
3727 #ifdef __LP64__
3728 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
3729 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
3730 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
3731 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
3732 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
3733 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
3734 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
3735 #else /* !__LP64__ */
3736 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
3737 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
3738 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3);
3739 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4);
3740 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5);
3741 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 6);
3742 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 7);
3743 #endif /* !__LP64__ */
3744 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
3745 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
3746 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
3747 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
3748 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
3749 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
3750 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
3751 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
3752
3753 /* ex-inline function implementation */
3754 IOPhysicalAddress
3755 IOMemoryDescriptor::getPhysicalAddress()
3756 { return( getPhysicalSegment( 0, 0 )); }
3757
3758
3759