]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOMemoryDescriptor.cpp
xnu-1504.9.17.tar.gz
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
30 *
31 * HISTORY
32 *
33 */
34
35
36 #include <sys/cdefs.h>
37
38 #include <IOKit/assert.h>
39 #include <IOKit/system.h>
40 #include <IOKit/IOLib.h>
41 #include <IOKit/IOMemoryDescriptor.h>
42 #include <IOKit/IOMapper.h>
43 #include <IOKit/IOKitKeysPrivate.h>
44
45 #ifndef __LP64__
46 #include <IOKit/IOSubMemoryDescriptor.h>
47 #endif /* !__LP64__ */
48
49 #include <IOKit/IOKitDebug.h>
50 #include <libkern/OSDebug.h>
51
52 #include "IOKitKernelInternal.h"
53
54 #include <libkern/c++/OSContainers.h>
55 #include <libkern/c++/OSDictionary.h>
56 #include <libkern/c++/OSArray.h>
57 #include <libkern/c++/OSSymbol.h>
58 #include <libkern/c++/OSNumber.h>
59
60 #include <sys/uio.h>
61
62 __BEGIN_DECLS
63 #include <vm/pmap.h>
64 #include <vm/vm_pageout.h>
65 #include <mach/memory_object_types.h>
66 #include <device/device_port.h>
67
68 #include <mach/vm_prot.h>
69 #include <mach/mach_vm.h>
70 #include <vm/vm_fault.h>
71 #include <vm/vm_protos.h>
72
73 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
74 void ipc_port_release_send(ipc_port_t port);
75
76 /* Copy between a physical page and a virtual address in the given vm_map */
77 kern_return_t copypv(addr64_t source, addr64_t sink, unsigned int size, int which);
78
79 memory_object_t
80 device_pager_setup(
81 memory_object_t pager,
82 uintptr_t device_handle,
83 vm_size_t size,
84 int flags);
85 void
86 device_pager_deallocate(
87 memory_object_t);
88 kern_return_t
89 device_pager_populate_object(
90 memory_object_t pager,
91 vm_object_offset_t offset,
92 ppnum_t phys_addr,
93 vm_size_t size);
94 kern_return_t
95 memory_object_iopl_request(
96 ipc_port_t port,
97 memory_object_offset_t offset,
98 vm_size_t *upl_size,
99 upl_t *upl_ptr,
100 upl_page_info_array_t user_page_list,
101 unsigned int *page_list_count,
102 int *flags);
103
104 unsigned int IOTranslateCacheBits(struct phys_entry *pp);
105
106 __END_DECLS
107
108 #define kIOMaximumMappedIOByteCount (512*1024*1024)
109
110 static IOMapper * gIOSystemMapper = NULL;
111
112 static ppnum_t gIOMaximumMappedIOPageCount = atop_32(kIOMaximumMappedIOByteCount);
113
114 ppnum_t gIOLastPage;
115
116 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
117
118 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
119
120 #define super IOMemoryDescriptor
121
122 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
123
124 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
125
126 static IORecursiveLock * gIOMemoryLock;
127
128 #define LOCK IORecursiveLockLock( gIOMemoryLock)
129 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
130 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
131 #define WAKEUP \
132 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
133
134 #if 0
135 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
136 #else
137 #define DEBG(fmt, args...) {}
138 #endif
139
140 #define IOMD_DEBUG_DMAACTIVE 1
141
142 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
143
144 // Some data structures and accessor macros used by the initWithOptions
145 // Function
146
147 enum ioPLBlockFlags {
148 kIOPLOnDevice = 0x00000001,
149 kIOPLExternUPL = 0x00000002,
150 };
151
152 struct typePersMDData
153 {
154 const IOGeneralMemoryDescriptor *fMD;
155 ipc_port_t fMemEntry;
156 };
157
158 struct ioPLBlock {
159 upl_t fIOPL;
160 vm_address_t fPageInfo; // Pointer to page list or index into it
161 uint32_t fIOMDOffset; // The offset of this iopl in descriptor
162 ppnum_t fMappedBase; // Page number of first page in this iopl
163 unsigned int fPageOffset; // Offset within first page of iopl
164 unsigned int fFlags; // Flags
165 };
166
167 struct ioGMDData {
168 IOMapper *fMapper;
169 uint64_t fPreparationID;
170 unsigned int fPageCnt;
171 #if __LP64__
172 // align arrays to 8 bytes so following macros work
173 unsigned int fPad;
174 #endif
175 upl_page_info_t fPageList[];
176 ioPLBlock fBlocks[];
177 };
178
179 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
180 #define getIOPLList(d) ((ioPLBlock *) &(d->fPageList[d->fPageCnt]))
181 #define getNumIOPL(osd, d) \
182 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
183 #define getPageList(d) (&(d->fPageList[0]))
184 #define computeDataSize(p, u) \
185 (sizeof(ioGMDData) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
186
187
188 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
189
190 #define next_page(a) ( trunc_page(a) + PAGE_SIZE )
191
192
193 extern "C" {
194
195 kern_return_t device_data_action(
196 uintptr_t device_handle,
197 ipc_port_t device_pager,
198 vm_prot_t protection,
199 vm_object_offset_t offset,
200 vm_size_t size)
201 {
202 struct ExpansionData {
203 void * devicePager;
204 unsigned int pagerContig:1;
205 unsigned int unused:31;
206 IOMemoryDescriptor * memory;
207 };
208 kern_return_t kr;
209 ExpansionData * ref = (ExpansionData *) device_handle;
210 IOMemoryDescriptor * memDesc;
211
212 LOCK;
213 memDesc = ref->memory;
214 if( memDesc)
215 {
216 memDesc->retain();
217 kr = memDesc->handleFault( device_pager, 0, 0,
218 offset, size, kIOMapDefaultCache /*?*/);
219 memDesc->release();
220 }
221 else
222 kr = KERN_ABORTED;
223 UNLOCK;
224
225 return( kr );
226 }
227
228 kern_return_t device_close(
229 uintptr_t device_handle)
230 {
231 struct ExpansionData {
232 void * devicePager;
233 unsigned int pagerContig:1;
234 unsigned int unused:31;
235 IOMemoryDescriptor * memory;
236 };
237 ExpansionData * ref = (ExpansionData *) device_handle;
238
239 IODelete( ref, ExpansionData, 1 );
240
241 return( kIOReturnSuccess );
242 }
243 }; // end extern "C"
244
245 // Note this inline function uses C++ reference arguments to return values
246 // This means that pointers are not passed and NULLs don't have to be
247 // checked for as a NULL reference is illegal.
248 static inline void
249 getAddrLenForInd(user_addr_t &addr, IOPhysicalLength &len, // Output variables
250 UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
251 {
252 assert(kIOMemoryTypeUIO == type
253 || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
254 || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
255 if (kIOMemoryTypeUIO == type) {
256 user_size_t us;
257 uio_getiov((uio_t) r.uio, ind, &addr, &us); len = us;
258 }
259 #ifndef __LP64__
260 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
261 IOAddressRange cur = r.v64[ind];
262 addr = cur.address;
263 len = cur.length;
264 }
265 #endif /* !__LP64__ */
266 else {
267 IOVirtualRange cur = r.v[ind];
268 addr = cur.address;
269 len = cur.length;
270 }
271 }
272
273 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
274
275 IOMemoryDescriptor *
276 IOMemoryDescriptor::withAddress(void * address,
277 IOByteCount length,
278 IODirection direction)
279 {
280 return IOMemoryDescriptor::
281 withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
282 }
283
284 #ifndef __LP64__
285 IOMemoryDescriptor *
286 IOMemoryDescriptor::withAddress(IOVirtualAddress address,
287 IOByteCount length,
288 IODirection direction,
289 task_t task)
290 {
291 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
292 if (that)
293 {
294 if (that->initWithAddress(address, length, direction, task))
295 return that;
296
297 that->release();
298 }
299 return 0;
300 }
301 #endif /* !__LP64__ */
302
303 IOMemoryDescriptor *
304 IOMemoryDescriptor::withPhysicalAddress(
305 IOPhysicalAddress address,
306 IOByteCount length,
307 IODirection direction )
308 {
309 #ifdef __LP64__
310 return (IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL));
311 #else /* !__LP64__ */
312 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
313 if (self
314 && !self->initWithPhysicalAddress(address, length, direction)) {
315 self->release();
316 return 0;
317 }
318
319 return self;
320 #endif /* !__LP64__ */
321 }
322
323 #ifndef __LP64__
324 IOMemoryDescriptor *
325 IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
326 UInt32 withCount,
327 IODirection direction,
328 task_t task,
329 bool asReference)
330 {
331 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
332 if (that)
333 {
334 if (that->initWithRanges(ranges, withCount, direction, task, asReference))
335 return that;
336
337 that->release();
338 }
339 return 0;
340 }
341 #endif /* !__LP64__ */
342
343 IOMemoryDescriptor *
344 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
345 mach_vm_size_t length,
346 IOOptionBits options,
347 task_t task)
348 {
349 IOAddressRange range = { address, length };
350 return (IOMemoryDescriptor::withAddressRanges(&range, 1, options, task));
351 }
352
353 IOMemoryDescriptor *
354 IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
355 UInt32 rangeCount,
356 IOOptionBits options,
357 task_t task)
358 {
359 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
360 if (that)
361 {
362 if (task)
363 options |= kIOMemoryTypeVirtual64;
364 else
365 options |= kIOMemoryTypePhysical64;
366
367 if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ 0))
368 return that;
369
370 that->release();
371 }
372
373 return 0;
374 }
375
376
377 /*
378 * withOptions:
379 *
380 * Create a new IOMemoryDescriptor. The buffer is made up of several
381 * virtual address ranges, from a given task.
382 *
383 * Passing the ranges as a reference will avoid an extra allocation.
384 */
385 IOMemoryDescriptor *
386 IOMemoryDescriptor::withOptions(void * buffers,
387 UInt32 count,
388 UInt32 offset,
389 task_t task,
390 IOOptionBits opts,
391 IOMapper * mapper)
392 {
393 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
394
395 if (self
396 && !self->initWithOptions(buffers, count, offset, task, opts, mapper))
397 {
398 self->release();
399 return 0;
400 }
401
402 return self;
403 }
404
405 bool IOMemoryDescriptor::initWithOptions(void * buffers,
406 UInt32 count,
407 UInt32 offset,
408 task_t task,
409 IOOptionBits options,
410 IOMapper * mapper)
411 {
412 return( false );
413 }
414
415 #ifndef __LP64__
416 IOMemoryDescriptor *
417 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
418 UInt32 withCount,
419 IODirection direction,
420 bool asReference)
421 {
422 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
423 if (that)
424 {
425 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference))
426 return that;
427
428 that->release();
429 }
430 return 0;
431 }
432
433 IOMemoryDescriptor *
434 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
435 IOByteCount offset,
436 IOByteCount length,
437 IODirection direction)
438 {
439 return (IOSubMemoryDescriptor::withSubRange(of, offset, length, direction | kIOMemoryThreadSafe));
440 }
441 #endif /* !__LP64__ */
442
443 IOMemoryDescriptor *
444 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
445 {
446 IOGeneralMemoryDescriptor *origGenMD =
447 OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
448
449 if (origGenMD)
450 return IOGeneralMemoryDescriptor::
451 withPersistentMemoryDescriptor(origGenMD);
452 else
453 return 0;
454 }
455
456 IOMemoryDescriptor *
457 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
458 {
459 ipc_port_t sharedMem = (ipc_port_t) originalMD->createNamedEntry();
460
461 if (!sharedMem)
462 return 0;
463
464 if (sharedMem == originalMD->_memEntry) {
465 originalMD->retain(); // Add a new reference to ourselves
466 ipc_port_release_send(sharedMem); // Remove extra send right
467 return originalMD;
468 }
469
470 IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor;
471 typePersMDData initData = { originalMD, sharedMem };
472
473 if (self
474 && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) {
475 self->release();
476 self = 0;
477 }
478 return self;
479 }
480
481 void *IOGeneralMemoryDescriptor::createNamedEntry()
482 {
483 kern_return_t error;
484 ipc_port_t sharedMem;
485
486 IOOptionBits type = _flags & kIOMemoryTypeMask;
487
488 user_addr_t range0Addr;
489 IOByteCount range0Len;
490 getAddrLenForInd(range0Addr, range0Len, type, _ranges, 0);
491 range0Addr = trunc_page_64(range0Addr);
492
493 vm_size_t size = ptoa_32(_pages);
494 vm_address_t kernelPage = (vm_address_t) range0Addr;
495
496 vm_map_t theMap = ((_task == kernel_task)
497 && (kIOMemoryBufferPageable & _flags))
498 ? IOPageableMapForAddress(kernelPage)
499 : get_task_map(_task);
500
501 memory_object_size_t actualSize = size;
502 vm_prot_t prot = VM_PROT_READ;
503 #if CONFIG_EMBEDDED
504 if (kIODirectionOut != (kIODirectionOutIn & _flags))
505 #endif
506 prot |= VM_PROT_WRITE;
507
508 if (_memEntry)
509 prot |= MAP_MEM_NAMED_REUSE;
510
511 error = mach_make_memory_entry_64(theMap,
512 &actualSize, range0Addr, prot, &sharedMem, (ipc_port_t) _memEntry);
513
514 if (KERN_SUCCESS == error) {
515 if (actualSize == size) {
516 return sharedMem;
517 } else {
518 #if IOASSERT
519 IOLog("IOGMD::mach_make_memory_entry_64 (%08llx) size (%08llx:%08llx)\n",
520 (UInt64)range0Addr, (UInt64)actualSize, (UInt64)size);
521 #endif
522 ipc_port_release_send( sharedMem );
523 }
524 }
525
526 return MACH_PORT_NULL;
527 }
528
529 #ifndef __LP64__
530 bool
531 IOGeneralMemoryDescriptor::initWithAddress(void * address,
532 IOByteCount withLength,
533 IODirection withDirection)
534 {
535 _singleRange.v.address = (vm_offset_t) address;
536 _singleRange.v.length = withLength;
537
538 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
539 }
540
541 bool
542 IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
543 IOByteCount withLength,
544 IODirection withDirection,
545 task_t withTask)
546 {
547 _singleRange.v.address = address;
548 _singleRange.v.length = withLength;
549
550 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
551 }
552
553 bool
554 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
555 IOPhysicalAddress address,
556 IOByteCount withLength,
557 IODirection withDirection )
558 {
559 _singleRange.p.address = address;
560 _singleRange.p.length = withLength;
561
562 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
563 }
564
565 bool
566 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
567 IOPhysicalRange * ranges,
568 UInt32 count,
569 IODirection direction,
570 bool reference)
571 {
572 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
573
574 if (reference)
575 mdOpts |= kIOMemoryAsReference;
576
577 return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0);
578 }
579
580 bool
581 IOGeneralMemoryDescriptor::initWithRanges(
582 IOVirtualRange * ranges,
583 UInt32 count,
584 IODirection direction,
585 task_t task,
586 bool reference)
587 {
588 IOOptionBits mdOpts = direction;
589
590 if (reference)
591 mdOpts |= kIOMemoryAsReference;
592
593 if (task) {
594 mdOpts |= kIOMemoryTypeVirtual;
595
596 // Auto-prepare if this is a kernel memory descriptor as very few
597 // clients bother to prepare() kernel memory.
598 // But it was not enforced so what are you going to do?
599 if (task == kernel_task)
600 mdOpts |= kIOMemoryAutoPrepare;
601 }
602 else
603 mdOpts |= kIOMemoryTypePhysical;
604
605 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0);
606 }
607 #endif /* !__LP64__ */
608
609 /*
610 * initWithOptions:
611 *
612 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
613 * from a given task, several physical ranges, an UPL from the ubc
614 * system or a uio (may be 64bit) from the BSD subsystem.
615 *
616 * Passing the ranges as a reference will avoid an extra allocation.
617 *
618 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
619 * existing instance -- note this behavior is not commonly supported in other
620 * I/O Kit classes, although it is supported here.
621 */
622
623 bool
624 IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
625 UInt32 count,
626 UInt32 offset,
627 task_t task,
628 IOOptionBits options,
629 IOMapper * mapper)
630 {
631 IOOptionBits type = options & kIOMemoryTypeMask;
632
633 // Grab the original MD's configuation data to initialse the
634 // arguments to this function.
635 if (kIOMemoryTypePersistentMD == type) {
636
637 typePersMDData *initData = (typePersMDData *) buffers;
638 const IOGeneralMemoryDescriptor *orig = initData->fMD;
639 ioGMDData *dataP = getDataP(orig->_memoryEntries);
640
641 // Only accept persistent memory descriptors with valid dataP data.
642 assert(orig->_rangesCount == 1);
643 if ( !(orig->_flags & kIOMemoryPersistent) || !dataP)
644 return false;
645
646 _memEntry = initData->fMemEntry; // Grab the new named entry
647 options = orig->_flags | kIOMemoryAsReference;
648 _singleRange = orig->_singleRange; // Initialise our range
649 buffers = &_singleRange;
650 count = 1;
651
652 // Now grab the original task and whatever mapper was previously used
653 task = orig->_task;
654 mapper = dataP->fMapper;
655
656 // We are ready to go through the original initialisation now
657 }
658
659 switch (type) {
660 case kIOMemoryTypeUIO:
661 case kIOMemoryTypeVirtual:
662 #ifndef __LP64__
663 case kIOMemoryTypeVirtual64:
664 #endif /* !__LP64__ */
665 assert(task);
666 if (!task)
667 return false;
668
669 #ifndef __LP64__
670 if (vm_map_is_64bit(get_task_map(task))
671 && (kIOMemoryTypeVirtual == type)
672 && ((IOVirtualRange *) buffers)->address)
673 {
674 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
675 return false;
676 }
677 #endif /* !__LP64__ */
678 break;
679
680 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
681 #ifndef __LP64__
682 case kIOMemoryTypePhysical64:
683 #endif /* !__LP64__ */
684 case kIOMemoryTypeUPL:
685 assert(!task);
686 break;
687 default:
688 return false; /* bad argument */
689 }
690
691 assert(buffers);
692 assert(count);
693
694 /*
695 * We can check the _initialized instance variable before having ever set
696 * it to an initial value because I/O Kit guarantees that all our instance
697 * variables are zeroed on an object's allocation.
698 */
699
700 if (_initialized) {
701 /*
702 * An existing memory descriptor is being retargeted to point to
703 * somewhere else. Clean up our present state.
704 */
705 IOOptionBits type = _flags & kIOMemoryTypeMask;
706 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
707 {
708 while (_wireCount)
709 complete();
710 }
711 if (_ranges.v && !(kIOMemoryAsReference & _flags))
712 {
713 if (kIOMemoryTypeUIO == type)
714 uio_free((uio_t) _ranges.v);
715 #ifndef __LP64__
716 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
717 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
718 #endif /* !__LP64__ */
719 else
720 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
721 }
722
723 if (_memEntry)
724 { ipc_port_release_send((ipc_port_t) _memEntry); _memEntry = 0; }
725 if (_mappings)
726 _mappings->flushCollection();
727 }
728 else {
729 if (!super::init())
730 return false;
731 _initialized = true;
732 }
733
734 // Grab the appropriate mapper
735 if (kIOMemoryMapperNone & options)
736 mapper = 0; // No Mapper
737 else if (mapper == kIOMapperSystem) {
738 IOMapper::checkForSystemMapper();
739 gIOSystemMapper = mapper = IOMapper::gSystem;
740 }
741
742 // Temp binary compatibility for kIOMemoryThreadSafe
743 if (kIOMemoryReserved6156215 & options)
744 {
745 options &= ~kIOMemoryReserved6156215;
746 options |= kIOMemoryThreadSafe;
747 }
748 // Remove the dynamic internal use flags from the initial setting
749 options &= ~(kIOMemoryPreparedReadOnly);
750 _flags = options;
751 _task = task;
752
753 #ifndef __LP64__
754 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
755 #endif /* !__LP64__ */
756
757 __iomd_reservedA = 0;
758 __iomd_reservedB = 0;
759 _highestPage = 0;
760
761 if (kIOMemoryThreadSafe & options)
762 {
763 if (!_prepareLock)
764 _prepareLock = IOLockAlloc();
765 }
766 else if (_prepareLock)
767 {
768 IOLockFree(_prepareLock);
769 _prepareLock = NULL;
770 }
771
772 if (kIOMemoryTypeUPL == type) {
773
774 ioGMDData *dataP;
775 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
776
777 if (!_memoryEntries) {
778 _memoryEntries = OSData::withCapacity(dataSize);
779 if (!_memoryEntries)
780 return false;
781 }
782 else if (!_memoryEntries->initWithCapacity(dataSize))
783 return false;
784
785 _memoryEntries->appendBytes(0, sizeof(ioGMDData));
786 dataP = getDataP(_memoryEntries);
787 dataP->fMapper = mapper;
788 dataP->fPageCnt = 0;
789
790 // _wireCount++; // UPLs start out life wired
791
792 _length = count;
793 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
794
795 ioPLBlock iopl;
796 iopl.fIOPL = (upl_t) buffers;
797 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
798
799 if (upl_get_size(iopl.fIOPL) < (count + offset))
800 panic("short external upl");
801
802 // Set the flag kIOPLOnDevice convieniently equal to 1
803 iopl.fFlags = pageList->device | kIOPLExternUPL;
804 iopl.fIOMDOffset = 0;
805
806 _highestPage = upl_get_highest_page(iopl.fIOPL);
807
808 if (!pageList->device) {
809 // Pre-compute the offset into the UPL's page list
810 pageList = &pageList[atop_32(offset)];
811 offset &= PAGE_MASK;
812 if (mapper) {
813 iopl.fMappedBase = mapper->iovmAlloc(_pages);
814 mapper->iovmInsert(iopl.fMappedBase, 0, pageList, _pages);
815 }
816 else
817 iopl.fMappedBase = 0;
818 }
819 else
820 iopl.fMappedBase = 0;
821 iopl.fPageInfo = (vm_address_t) pageList;
822 iopl.fPageOffset = offset;
823
824 _memoryEntries->appendBytes(&iopl, sizeof(iopl));
825 }
826 else {
827 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
828 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
829
830 // Initialize the memory descriptor
831 if (options & kIOMemoryAsReference) {
832 #ifndef __LP64__
833 _rangesIsAllocated = false;
834 #endif /* !__LP64__ */
835
836 // Hack assignment to get the buffer arg into _ranges.
837 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
838 // work, C++ sigh.
839 // This also initialises the uio & physical ranges.
840 _ranges.v = (IOVirtualRange *) buffers;
841 }
842 else {
843 #ifndef __LP64__
844 _rangesIsAllocated = true;
845 #endif /* !__LP64__ */
846 switch (type)
847 {
848 case kIOMemoryTypeUIO:
849 _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
850 break;
851
852 #ifndef __LP64__
853 case kIOMemoryTypeVirtual64:
854 case kIOMemoryTypePhysical64:
855 if (count == 1
856 && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL) {
857 if (kIOMemoryTypeVirtual64 == type)
858 type = kIOMemoryTypeVirtual;
859 else
860 type = kIOMemoryTypePhysical;
861 _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
862 _rangesIsAllocated = false;
863 _ranges.v = &_singleRange.v;
864 _singleRange.v.address = ((IOAddressRange *) buffers)->address;
865 _singleRange.v.length = ((IOAddressRange *) buffers)->length;
866 break;
867 }
868 _ranges.v64 = IONew(IOAddressRange, count);
869 if (!_ranges.v64)
870 return false;
871 bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
872 break;
873 #endif /* !__LP64__ */
874 case kIOMemoryTypeVirtual:
875 case kIOMemoryTypePhysical:
876 if (count == 1) {
877 _flags |= kIOMemoryAsReference;
878 #ifndef __LP64__
879 _rangesIsAllocated = false;
880 #endif /* !__LP64__ */
881 _ranges.v = &_singleRange.v;
882 } else {
883 _ranges.v = IONew(IOVirtualRange, count);
884 if (!_ranges.v)
885 return false;
886 }
887 bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
888 break;
889 }
890 }
891
892 // Find starting address within the vector of ranges
893 Ranges vec = _ranges;
894 UInt32 length = 0;
895 UInt32 pages = 0;
896 for (unsigned ind = 0; ind < count; ind++) {
897 user_addr_t addr;
898 IOPhysicalLength len;
899
900 // addr & len are returned by this function
901 getAddrLenForInd(addr, len, type, vec, ind);
902 pages += (atop_64(addr + len + PAGE_MASK) - atop_64(addr));
903 len += length;
904 assert(len >= length); // Check for 32 bit wrap around
905 length = len;
906
907 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
908 {
909 ppnum_t highPage = atop_64(addr + len - 1);
910 if (highPage > _highestPage)
911 _highestPage = highPage;
912 }
913 }
914 _length = length;
915 _pages = pages;
916 _rangesCount = count;
917
918 // Auto-prepare memory at creation time.
919 // Implied completion when descriptor is free-ed
920 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
921 _wireCount++; // Physical MDs are, by definition, wired
922 else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
923 ioGMDData *dataP;
924 unsigned dataSize = computeDataSize(_pages, /* upls */ count * 2);
925
926 if (!_memoryEntries) {
927 _memoryEntries = OSData::withCapacity(dataSize);
928 if (!_memoryEntries)
929 return false;
930 }
931 else if (!_memoryEntries->initWithCapacity(dataSize))
932 return false;
933
934 _memoryEntries->appendBytes(0, sizeof(ioGMDData));
935 dataP = getDataP(_memoryEntries);
936 dataP->fMapper = mapper;
937 dataP->fPageCnt = _pages;
938
939 if ( (kIOMemoryPersistent & _flags) && !_memEntry)
940 _memEntry = createNamedEntry();
941
942 if ((_flags & kIOMemoryAutoPrepare)
943 && prepare() != kIOReturnSuccess)
944 return false;
945 }
946 }
947
948 return true;
949 }
950
951 /*
952 * free
953 *
954 * Free resources.
955 */
956 void IOGeneralMemoryDescriptor::free()
957 {
958 IOOptionBits type = _flags & kIOMemoryTypeMask;
959
960 if( reserved)
961 {
962 LOCK;
963 reserved->memory = 0;
964 UNLOCK;
965 }
966
967 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
968 {
969 while (_wireCount)
970 complete();
971 }
972 if (_memoryEntries)
973 _memoryEntries->release();
974
975 if (_ranges.v && !(kIOMemoryAsReference & _flags))
976 {
977 if (kIOMemoryTypeUIO == type)
978 uio_free((uio_t) _ranges.v);
979 #ifndef __LP64__
980 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
981 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
982 #endif /* !__LP64__ */
983 else
984 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
985
986 _ranges.v = NULL;
987 }
988
989 if (reserved && reserved->devicePager)
990 device_pager_deallocate( (memory_object_t) reserved->devicePager );
991
992 // memEntry holds a ref on the device pager which owns reserved
993 // (ExpansionData) so no reserved access after this point
994 if (_memEntry)
995 ipc_port_release_send( (ipc_port_t) _memEntry );
996
997 if (_prepareLock)
998 IOLockFree(_prepareLock);
999
1000 super::free();
1001 }
1002
1003 #ifndef __LP64__
1004 void IOGeneralMemoryDescriptor::unmapFromKernel()
1005 {
1006 panic("IOGMD::unmapFromKernel deprecated");
1007 }
1008
1009 void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
1010 {
1011 panic("IOGMD::mapIntoKernel deprecated");
1012 }
1013 #endif /* !__LP64__ */
1014
1015 /*
1016 * getDirection:
1017 *
1018 * Get the direction of the transfer.
1019 */
1020 IODirection IOMemoryDescriptor::getDirection() const
1021 {
1022 #ifndef __LP64__
1023 if (_direction)
1024 return _direction;
1025 #endif /* !__LP64__ */
1026 return (IODirection) (_flags & kIOMemoryDirectionMask);
1027 }
1028
1029 /*
1030 * getLength:
1031 *
1032 * Get the length of the transfer (over all ranges).
1033 */
1034 IOByteCount IOMemoryDescriptor::getLength() const
1035 {
1036 return _length;
1037 }
1038
1039 void IOMemoryDescriptor::setTag( IOOptionBits tag )
1040 {
1041 _tag = tag;
1042 }
1043
1044 IOOptionBits IOMemoryDescriptor::getTag( void )
1045 {
1046 return( _tag);
1047 }
1048
1049 #ifndef __LP64__
1050 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
1051 IOPhysicalAddress
1052 IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
1053 {
1054 addr64_t physAddr = 0;
1055
1056 if( prepare() == kIOReturnSuccess) {
1057 physAddr = getPhysicalSegment64( offset, length );
1058 complete();
1059 }
1060
1061 return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used
1062 }
1063 #endif /* !__LP64__ */
1064
1065 IOByteCount IOMemoryDescriptor::readBytes
1066 (IOByteCount offset, void *bytes, IOByteCount length)
1067 {
1068 addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
1069 IOByteCount remaining;
1070
1071 // Assert that this entire I/O is withing the available range
1072 assert(offset < _length);
1073 assert(offset + length <= _length);
1074 if (offset >= _length) {
1075 return 0;
1076 }
1077
1078 if (kIOMemoryThreadSafe & _flags)
1079 LOCK;
1080
1081 remaining = length = min(length, _length - offset);
1082 while (remaining) { // (process another target segment?)
1083 addr64_t srcAddr64;
1084 IOByteCount srcLen;
1085
1086 srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
1087 if (!srcAddr64)
1088 break;
1089
1090 // Clip segment length to remaining
1091 if (srcLen > remaining)
1092 srcLen = remaining;
1093
1094 copypv(srcAddr64, dstAddr, srcLen,
1095 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
1096
1097 dstAddr += srcLen;
1098 offset += srcLen;
1099 remaining -= srcLen;
1100 }
1101
1102 if (kIOMemoryThreadSafe & _flags)
1103 UNLOCK;
1104
1105 assert(!remaining);
1106
1107 return length - remaining;
1108 }
1109
1110 IOByteCount IOMemoryDescriptor::writeBytes
1111 (IOByteCount offset, const void *bytes, IOByteCount length)
1112 {
1113 addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
1114 IOByteCount remaining;
1115
1116 // Assert that this entire I/O is withing the available range
1117 assert(offset < _length);
1118 assert(offset + length <= _length);
1119
1120 assert( !(kIOMemoryPreparedReadOnly & _flags) );
1121
1122 if ( (kIOMemoryPreparedReadOnly & _flags) || offset >= _length) {
1123 return 0;
1124 }
1125
1126 if (kIOMemoryThreadSafe & _flags)
1127 LOCK;
1128
1129 remaining = length = min(length, _length - offset);
1130 while (remaining) { // (process another target segment?)
1131 addr64_t dstAddr64;
1132 IOByteCount dstLen;
1133
1134 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
1135 if (!dstAddr64)
1136 break;
1137
1138 // Clip segment length to remaining
1139 if (dstLen > remaining)
1140 dstLen = remaining;
1141
1142 copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
1143 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
1144
1145 srcAddr += dstLen;
1146 offset += dstLen;
1147 remaining -= dstLen;
1148 }
1149
1150 if (kIOMemoryThreadSafe & _flags)
1151 UNLOCK;
1152
1153 assert(!remaining);
1154
1155 return length - remaining;
1156 }
1157
1158 // osfmk/device/iokit_rpc.c
1159 extern "C" unsigned int IODefaultCacheBits(addr64_t pa);
1160
1161 #ifndef __LP64__
1162 void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
1163 {
1164 panic("IOGMD::setPosition deprecated");
1165 }
1166 #endif /* !__LP64__ */
1167
1168 static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
1169
1170 uint64_t
1171 IOGeneralMemoryDescriptor::getPreparationID( void )
1172 {
1173 ioGMDData *dataP;
1174
1175 if (!_wireCount)
1176 return (kIOPreparationIDUnprepared);
1177
1178 if (_flags & (kIOMemoryTypePhysical | kIOMemoryTypePhysical64))
1179 return (kIOPreparationIDAlwaysPrepared);
1180
1181 if (!_memoryEntries || !(dataP = getDataP(_memoryEntries)))
1182 return (kIOPreparationIDUnprepared);
1183
1184 if (kIOPreparationIDUnprepared == dataP->fPreparationID)
1185 {
1186 #if defined(__ppc__ )
1187 dataP->fPreparationID = gIOMDPreparationID++;
1188 #else
1189 dataP->fPreparationID = OSIncrementAtomic64(&gIOMDPreparationID);
1190 #endif
1191 }
1192 return (dataP->fPreparationID);
1193 }
1194
1195 uint64_t
1196 IOMemoryDescriptor::getPreparationID( void )
1197 {
1198 return (kIOPreparationIDUnsupported);
1199 }
1200
1201 IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
1202 {
1203 if (kIOMDGetCharacteristics == op) {
1204
1205 if (dataSize < sizeof(IOMDDMACharacteristics))
1206 return kIOReturnUnderrun;
1207
1208 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
1209 data->fLength = _length;
1210 data->fSGCount = _rangesCount;
1211 data->fPages = _pages;
1212 data->fDirection = getDirection();
1213 if (!_wireCount)
1214 data->fIsPrepared = false;
1215 else {
1216 data->fIsPrepared = true;
1217 data->fHighestPage = _highestPage;
1218 if (_memoryEntries) {
1219 ioGMDData *gmdData = getDataP(_memoryEntries);
1220 ioPLBlock *ioplList = getIOPLList(gmdData);
1221 UInt count = getNumIOPL(_memoryEntries, gmdData);
1222
1223 data->fIsMapped = (gmdData->fMapper && _pages && (count > 0)
1224 && ioplList[0].fMappedBase);
1225 if (count == 1)
1226 data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
1227 }
1228 else
1229 data->fIsMapped = false;
1230 }
1231
1232 return kIOReturnSuccess;
1233
1234 #if IOMD_DEBUG_DMAACTIVE
1235 } else if (kIOMDSetDMAActive == op) {
1236 IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
1237 OSIncrementAtomic(&md->__iomd_reservedA);
1238 } else if (kIOMDSetDMAInactive == op) {
1239 IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
1240 if (md->__iomd_reservedA)
1241 OSDecrementAtomic(&md->__iomd_reservedA);
1242 else
1243 panic("kIOMDSetDMAInactive");
1244 #endif /* IOMD_DEBUG_DMAACTIVE */
1245
1246 } else if (!(kIOMDWalkSegments & op))
1247 return kIOReturnBadArgument;
1248
1249 // Get the next segment
1250 struct InternalState {
1251 IOMDDMAWalkSegmentArgs fIO;
1252 UInt fOffset2Index;
1253 UInt fIndex;
1254 UInt fNextOffset;
1255 } *isP;
1256
1257 // Find the next segment
1258 if (dataSize < sizeof(*isP))
1259 return kIOReturnUnderrun;
1260
1261 isP = (InternalState *) vData;
1262 UInt offset = isP->fIO.fOffset;
1263 bool mapped = isP->fIO.fMapped;
1264
1265 if (offset >= _length)
1266 return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
1267
1268 // Validate the previous offset
1269 UInt ind, off2Ind = isP->fOffset2Index;
1270 if ((kIOMDFirstSegment != op)
1271 && offset
1272 && (offset == isP->fNextOffset || off2Ind <= offset))
1273 ind = isP->fIndex;
1274 else
1275 ind = off2Ind = 0; // Start from beginning
1276
1277 UInt length;
1278 UInt64 address;
1279 if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
1280
1281 // Physical address based memory descriptor
1282 const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
1283
1284 // Find the range after the one that contains the offset
1285 mach_vm_size_t len;
1286 for (len = 0; off2Ind <= offset; ind++) {
1287 len = physP[ind].length;
1288 off2Ind += len;
1289 }
1290
1291 // Calculate length within range and starting address
1292 length = off2Ind - offset;
1293 address = physP[ind - 1].address + len - length;
1294
1295 // see how far we can coalesce ranges
1296 while (ind < _rangesCount && address + length == physP[ind].address) {
1297 len = physP[ind].length;
1298 length += len;
1299 off2Ind += len;
1300 ind++;
1301 }
1302
1303 // correct contiguous check overshoot
1304 ind--;
1305 off2Ind -= len;
1306 }
1307 #ifndef __LP64__
1308 else if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
1309
1310 // Physical address based memory descriptor
1311 const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
1312
1313 // Find the range after the one that contains the offset
1314 mach_vm_size_t len;
1315 for (len = 0; off2Ind <= offset; ind++) {
1316 len = physP[ind].length;
1317 off2Ind += len;
1318 }
1319
1320 // Calculate length within range and starting address
1321 length = off2Ind - offset;
1322 address = physP[ind - 1].address + len - length;
1323
1324 // see how far we can coalesce ranges
1325 while (ind < _rangesCount && address + length == physP[ind].address) {
1326 len = physP[ind].length;
1327 length += len;
1328 off2Ind += len;
1329 ind++;
1330 }
1331
1332 // correct contiguous check overshoot
1333 ind--;
1334 off2Ind -= len;
1335 }
1336 #endif /* !__LP64__ */
1337 else do {
1338 if (!_wireCount)
1339 panic("IOGMD: not wired for the IODMACommand");
1340
1341 assert(_memoryEntries);
1342
1343 ioGMDData * dataP = getDataP(_memoryEntries);
1344 const ioPLBlock *ioplList = getIOPLList(dataP);
1345 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1346 upl_page_info_t *pageList = getPageList(dataP);
1347
1348 assert(numIOPLs > 0);
1349
1350 // Scan through iopl info blocks looking for block containing offset
1351 while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset)
1352 ind++;
1353
1354 // Go back to actual range as search goes past it
1355 ioPLBlock ioplInfo = ioplList[ind - 1];
1356 off2Ind = ioplInfo.fIOMDOffset;
1357
1358 if (ind < numIOPLs)
1359 length = ioplList[ind].fIOMDOffset;
1360 else
1361 length = _length;
1362 length -= offset; // Remainder within iopl
1363
1364 // Subtract offset till this iopl in total list
1365 offset -= off2Ind;
1366
1367 // If a mapped address is requested and this is a pre-mapped IOPL
1368 // then just need to compute an offset relative to the mapped base.
1369 if (mapped && ioplInfo.fMappedBase) {
1370 offset += (ioplInfo.fPageOffset & PAGE_MASK);
1371 address = ptoa_64(ioplInfo.fMappedBase) + offset;
1372 continue; // Done leave do/while(false) now
1373 }
1374
1375 // The offset is rebased into the current iopl.
1376 // Now add the iopl 1st page offset.
1377 offset += ioplInfo.fPageOffset;
1378
1379 // For external UPLs the fPageInfo field points directly to
1380 // the upl's upl_page_info_t array.
1381 if (ioplInfo.fFlags & kIOPLExternUPL)
1382 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
1383 else
1384 pageList = &pageList[ioplInfo.fPageInfo];
1385
1386 // Check for direct device non-paged memory
1387 if ( ioplInfo.fFlags & kIOPLOnDevice ) {
1388 address = ptoa_64(pageList->phys_addr) + offset;
1389 continue; // Done leave do/while(false) now
1390 }
1391
1392 // Now we need compute the index into the pageList
1393 UInt pageInd = atop_32(offset);
1394 offset &= PAGE_MASK;
1395
1396 // Compute the starting address of this segment
1397 IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
1398 if (!pageAddr) {
1399 panic("!pageList phys_addr");
1400 }
1401
1402 address = ptoa_64(pageAddr) + offset;
1403
1404 // length is currently set to the length of the remainider of the iopl.
1405 // We need to check that the remainder of the iopl is contiguous.
1406 // This is indicated by pageList[ind].phys_addr being sequential.
1407 IOByteCount contigLength = PAGE_SIZE - offset;
1408 while (contigLength < length
1409 && ++pageAddr == pageList[++pageInd].phys_addr)
1410 {
1411 contigLength += PAGE_SIZE;
1412 }
1413
1414 if (contigLength < length)
1415 length = contigLength;
1416
1417
1418 assert(address);
1419 assert(length);
1420
1421 } while (false);
1422
1423 // Update return values and state
1424 isP->fIO.fIOVMAddr = address;
1425 isP->fIO.fLength = length;
1426 isP->fIndex = ind;
1427 isP->fOffset2Index = off2Ind;
1428 isP->fNextOffset = isP->fIO.fOffset + length;
1429
1430 return kIOReturnSuccess;
1431 }
1432
1433 addr64_t
1434 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
1435 {
1436 IOReturn ret;
1437 addr64_t address = 0;
1438 IOByteCount length = 0;
1439 IOMapper * mapper = gIOSystemMapper;
1440 IOOptionBits type = _flags & kIOMemoryTypeMask;
1441
1442 if (lengthOfSegment)
1443 *lengthOfSegment = 0;
1444
1445 if (offset >= _length)
1446 return 0;
1447
1448 // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
1449 // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
1450 // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
1451 // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
1452
1453 if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type))
1454 {
1455 unsigned rangesIndex = 0;
1456 Ranges vec = _ranges;
1457 user_addr_t addr;
1458
1459 // Find starting address within the vector of ranges
1460 for (;;) {
1461 getAddrLenForInd(addr, length, type, vec, rangesIndex);
1462 if (offset < length)
1463 break;
1464 offset -= length; // (make offset relative)
1465 rangesIndex++;
1466 }
1467
1468 // Now that we have the starting range,
1469 // lets find the last contiguous range
1470 addr += offset;
1471 length -= offset;
1472
1473 for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) {
1474 user_addr_t newAddr;
1475 IOPhysicalLength newLen;
1476
1477 getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
1478 if (addr + length != newAddr)
1479 break;
1480 length += newLen;
1481 }
1482 if (addr)
1483 address = (IOPhysicalAddress) addr; // Truncate address to 32bit
1484 }
1485 else
1486 {
1487 IOMDDMAWalkSegmentState _state;
1488 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) &_state;
1489
1490 state->fOffset = offset;
1491 state->fLength = _length - offset;
1492 state->fMapped = (0 == (options & kIOMemoryMapperNone));
1493
1494 ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
1495
1496 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret))
1497 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
1498 ret, this, state->fOffset,
1499 state->fIOVMAddr, state->fLength);
1500 if (kIOReturnSuccess == ret)
1501 {
1502 address = state->fIOVMAddr;
1503 length = state->fLength;
1504 }
1505
1506 // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
1507 // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
1508
1509 if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)))
1510 {
1511 if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone))
1512 {
1513 addr64_t origAddr = address;
1514 IOByteCount origLen = length;
1515
1516 address = mapper->mapAddr(origAddr);
1517 length = page_size - (address & (page_size - 1));
1518 while ((length < origLen)
1519 && ((address + length) == mapper->mapAddr(origAddr + length)))
1520 length += page_size;
1521 if (length > origLen)
1522 length = origLen;
1523 }
1524 #ifdef __LP64__
1525 else if (!(options & kIOMemoryMapperNone) && (_flags & kIOMemoryMapperNone))
1526 {
1527 panic("getPhysicalSegment not mapped for I/O");
1528 }
1529 #endif /* __LP64__ */
1530 }
1531 }
1532
1533 if (!address)
1534 length = 0;
1535
1536 if (lengthOfSegment)
1537 *lengthOfSegment = length;
1538
1539 return (address);
1540 }
1541
1542 #ifndef __LP64__
1543 addr64_t
1544 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
1545 {
1546 addr64_t address = 0;
1547
1548 if (options & _kIOMemorySourceSegment)
1549 {
1550 address = getSourceSegment(offset, lengthOfSegment);
1551 }
1552 else if (options & kIOMemoryMapperNone)
1553 {
1554 address = getPhysicalSegment64(offset, lengthOfSegment);
1555 }
1556 else
1557 {
1558 address = getPhysicalSegment(offset, lengthOfSegment);
1559 }
1560
1561 return (address);
1562 }
1563
1564 addr64_t
1565 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
1566 {
1567 return (getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone));
1568 }
1569
1570 IOPhysicalAddress
1571 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1572 {
1573 addr64_t address = 0;
1574 IOByteCount length = 0;
1575
1576 address = getPhysicalSegment(offset, lengthOfSegment, 0);
1577
1578 if (lengthOfSegment)
1579 length = *lengthOfSegment;
1580
1581 if ((address + length) > 0x100000000ULL)
1582 {
1583 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
1584 address, (long) length, (getMetaClass())->getClassName());
1585 }
1586
1587 return ((IOPhysicalAddress) address);
1588 }
1589
1590 addr64_t
1591 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
1592 {
1593 IOPhysicalAddress phys32;
1594 IOByteCount length;
1595 addr64_t phys64;
1596 IOMapper * mapper = 0;
1597
1598 phys32 = getPhysicalSegment(offset, lengthOfSegment);
1599 if (!phys32)
1600 return 0;
1601
1602 if (gIOSystemMapper)
1603 mapper = gIOSystemMapper;
1604
1605 if (mapper)
1606 {
1607 IOByteCount origLen;
1608
1609 phys64 = mapper->mapAddr(phys32);
1610 origLen = *lengthOfSegment;
1611 length = page_size - (phys64 & (page_size - 1));
1612 while ((length < origLen)
1613 && ((phys64 + length) == mapper->mapAddr(phys32 + length)))
1614 length += page_size;
1615 if (length > origLen)
1616 length = origLen;
1617
1618 *lengthOfSegment = length;
1619 }
1620 else
1621 phys64 = (addr64_t) phys32;
1622
1623 return phys64;
1624 }
1625
1626 IOPhysicalAddress
1627 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1628 {
1629 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0));
1630 }
1631
1632 IOPhysicalAddress
1633 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1634 {
1635 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment));
1636 }
1637
1638 void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
1639 IOByteCount * lengthOfSegment)
1640 {
1641 if (_task == kernel_task)
1642 return (void *) getSourceSegment(offset, lengthOfSegment);
1643 else
1644 panic("IOGMD::getVirtualSegment deprecated");
1645
1646 return 0;
1647 }
1648 #endif /* !__LP64__ */
1649
1650 IOReturn
1651 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
1652 {
1653 if (kIOMDGetCharacteristics == op) {
1654 if (dataSize < sizeof(IOMDDMACharacteristics))
1655 return kIOReturnUnderrun;
1656
1657 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
1658 data->fLength = getLength();
1659 data->fSGCount = 0;
1660 data->fDirection = getDirection();
1661 if (IOMapper::gSystem)
1662 data->fIsMapped = true;
1663 data->fIsPrepared = true; // Assume prepared - fails safe
1664 }
1665 else if (kIOMDWalkSegments & op) {
1666 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs))
1667 return kIOReturnUnderrun;
1668
1669 IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
1670 IOByteCount offset = (IOByteCount) data->fOffset;
1671
1672 IOPhysicalLength length;
1673 IOMemoryDescriptor *ncmd = const_cast<IOMemoryDescriptor *>(this);
1674 if (data->fMapped && IOMapper::gSystem)
1675 data->fIOVMAddr = ncmd->getPhysicalSegment(offset, &length);
1676 else
1677 data->fIOVMAddr = ncmd->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
1678 data->fLength = length;
1679 }
1680 else
1681 return kIOReturnBadArgument;
1682
1683 return kIOReturnSuccess;
1684 }
1685
1686 static IOReturn
1687 purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
1688 {
1689 IOReturn err = kIOReturnSuccess;
1690
1691 *control = VM_PURGABLE_SET_STATE;
1692 switch (newState)
1693 {
1694 case kIOMemoryPurgeableKeepCurrent:
1695 *control = VM_PURGABLE_GET_STATE;
1696 break;
1697
1698 case kIOMemoryPurgeableNonVolatile:
1699 *state = VM_PURGABLE_NONVOLATILE;
1700 break;
1701 case kIOMemoryPurgeableVolatile:
1702 *state = VM_PURGABLE_VOLATILE;
1703 break;
1704 case kIOMemoryPurgeableEmpty:
1705 *state = VM_PURGABLE_EMPTY;
1706 break;
1707 default:
1708 err = kIOReturnBadArgument;
1709 break;
1710 }
1711 return (err);
1712 }
1713
1714 static IOReturn
1715 purgeableStateBits(int * state)
1716 {
1717 IOReturn err = kIOReturnSuccess;
1718
1719 switch (*state)
1720 {
1721 case VM_PURGABLE_NONVOLATILE:
1722 *state = kIOMemoryPurgeableNonVolatile;
1723 break;
1724 case VM_PURGABLE_VOLATILE:
1725 *state = kIOMemoryPurgeableVolatile;
1726 break;
1727 case VM_PURGABLE_EMPTY:
1728 *state = kIOMemoryPurgeableEmpty;
1729 break;
1730 default:
1731 *state = kIOMemoryPurgeableNonVolatile;
1732 err = kIOReturnNotReady;
1733 break;
1734 }
1735 return (err);
1736 }
1737
1738 IOReturn
1739 IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
1740 IOOptionBits * oldState )
1741 {
1742 IOReturn err = kIOReturnSuccess;
1743 vm_purgable_t control;
1744 int state;
1745
1746 if (_memEntry)
1747 {
1748 err = super::setPurgeable(newState, oldState);
1749 }
1750 else
1751 {
1752 if (kIOMemoryThreadSafe & _flags)
1753 LOCK;
1754 do
1755 {
1756 // Find the appropriate vm_map for the given task
1757 vm_map_t curMap;
1758 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
1759 {
1760 err = kIOReturnNotReady;
1761 break;
1762 }
1763 else
1764 curMap = get_task_map(_task);
1765
1766 // can only do one range
1767 Ranges vec = _ranges;
1768 IOOptionBits type = _flags & kIOMemoryTypeMask;
1769 user_addr_t addr;
1770 IOByteCount len;
1771 getAddrLenForInd(addr, len, type, vec, 0);
1772
1773 err = purgeableControlBits(newState, &control, &state);
1774 if (kIOReturnSuccess != err)
1775 break;
1776 err = mach_vm_purgable_control(curMap, addr, control, &state);
1777 if (oldState)
1778 {
1779 if (kIOReturnSuccess == err)
1780 {
1781 err = purgeableStateBits(&state);
1782 *oldState = state;
1783 }
1784 }
1785 }
1786 while (false);
1787 if (kIOMemoryThreadSafe & _flags)
1788 UNLOCK;
1789 }
1790 return (err);
1791 }
1792
1793 IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
1794 IOOptionBits * oldState )
1795 {
1796 IOReturn err = kIOReturnSuccess;
1797 vm_purgable_t control;
1798 int state;
1799
1800 if (kIOMemoryThreadSafe & _flags)
1801 LOCK;
1802
1803 do
1804 {
1805 if (!_memEntry)
1806 {
1807 err = kIOReturnNotReady;
1808 break;
1809 }
1810 err = purgeableControlBits(newState, &control, &state);
1811 if (kIOReturnSuccess != err)
1812 break;
1813 err = mach_memory_entry_purgable_control((ipc_port_t) _memEntry, control, &state);
1814 if (oldState)
1815 {
1816 if (kIOReturnSuccess == err)
1817 {
1818 err = purgeableStateBits(&state);
1819 *oldState = state;
1820 }
1821 }
1822 }
1823 while (false);
1824
1825 if (kIOMemoryThreadSafe & _flags)
1826 UNLOCK;
1827
1828 return (err);
1829 }
1830
1831 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
1832 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
1833
1834 static void SetEncryptOp(addr64_t pa, unsigned int count)
1835 {
1836 ppnum_t page, end;
1837
1838 page = atop_64(round_page_64(pa));
1839 end = atop_64(trunc_page_64(pa + count));
1840 for (; page < end; page++)
1841 {
1842 pmap_clear_noencrypt(page);
1843 }
1844 }
1845
1846 static void ClearEncryptOp(addr64_t pa, unsigned int count)
1847 {
1848 ppnum_t page, end;
1849
1850 page = atop_64(round_page_64(pa));
1851 end = atop_64(trunc_page_64(pa + count));
1852 for (; page < end; page++)
1853 {
1854 pmap_set_noencrypt(page);
1855 }
1856 }
1857
1858 IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options,
1859 IOByteCount offset, IOByteCount length )
1860 {
1861 IOByteCount remaining;
1862 void (*func)(addr64_t pa, unsigned int count) = 0;
1863
1864 switch (options)
1865 {
1866 case kIOMemoryIncoherentIOFlush:
1867 func = &dcache_incoherent_io_flush64;
1868 break;
1869 case kIOMemoryIncoherentIOStore:
1870 func = &dcache_incoherent_io_store64;
1871 break;
1872
1873 case kIOMemorySetEncrypted:
1874 func = &SetEncryptOp;
1875 break;
1876 case kIOMemoryClearEncrypted:
1877 func = &ClearEncryptOp;
1878 break;
1879 }
1880
1881 if (!func)
1882 return (kIOReturnUnsupported);
1883
1884 if (kIOMemoryThreadSafe & _flags)
1885 LOCK;
1886
1887 remaining = length = min(length, getLength() - offset);
1888 while (remaining)
1889 // (process another target segment?)
1890 {
1891 addr64_t dstAddr64;
1892 IOByteCount dstLen;
1893
1894 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
1895 if (!dstAddr64)
1896 break;
1897
1898 // Clip segment length to remaining
1899 if (dstLen > remaining)
1900 dstLen = remaining;
1901
1902 (*func)(dstAddr64, dstLen);
1903
1904 offset += dstLen;
1905 remaining -= dstLen;
1906 }
1907
1908 if (kIOMemoryThreadSafe & _flags)
1909 UNLOCK;
1910
1911 return (remaining ? kIOReturnUnderrun : kIOReturnSuccess);
1912 }
1913
1914 #if defined(__ppc__) || defined(__arm__)
1915 extern vm_offset_t static_memory_end;
1916 #define io_kernel_static_end static_memory_end
1917 #else
1918 extern vm_offset_t first_avail;
1919 #define io_kernel_static_end first_avail
1920 #endif
1921
1922 static kern_return_t
1923 io_get_kernel_static_upl(
1924 vm_map_t /* map */,
1925 uintptr_t offset,
1926 vm_size_t *upl_size,
1927 upl_t *upl,
1928 upl_page_info_array_t page_list,
1929 unsigned int *count,
1930 ppnum_t *highest_page)
1931 {
1932 unsigned int pageCount, page;
1933 ppnum_t phys;
1934 ppnum_t highestPage = 0;
1935
1936 pageCount = atop_32(*upl_size);
1937 if (pageCount > *count)
1938 pageCount = *count;
1939
1940 *upl = NULL;
1941
1942 for (page = 0; page < pageCount; page++)
1943 {
1944 phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
1945 if (!phys)
1946 break;
1947 page_list[page].phys_addr = phys;
1948 page_list[page].pageout = 0;
1949 page_list[page].absent = 0;
1950 page_list[page].dirty = 0;
1951 page_list[page].precious = 0;
1952 page_list[page].device = 0;
1953 if (phys > highestPage)
1954 highestPage = phys;
1955 }
1956
1957 *highest_page = highestPage;
1958
1959 return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError);
1960 }
1961
1962 IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
1963 {
1964 IOOptionBits type = _flags & kIOMemoryTypeMask;
1965 IOReturn error = kIOReturnCannotWire;
1966 ioGMDData *dataP;
1967 ppnum_t mapBase = 0;
1968 IOMapper *mapper;
1969 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
1970
1971 assert(!_wireCount);
1972 assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
1973
1974 if (_pages >= gIOMaximumMappedIOPageCount)
1975 return kIOReturnNoResources;
1976
1977 dataP = getDataP(_memoryEntries);
1978 mapper = dataP->fMapper;
1979 if (mapper && _pages)
1980 mapBase = mapper->iovmAlloc(_pages);
1981
1982 // Note that appendBytes(NULL) zeros the data up to the
1983 // desired length.
1984 _memoryEntries->appendBytes(0, dataP->fPageCnt * sizeof(upl_page_info_t));
1985 dataP = 0; // May no longer be valid so lets not get tempted.
1986
1987 if (forDirection == kIODirectionNone)
1988 forDirection = getDirection();
1989
1990 int uplFlags; // This Mem Desc's default flags for upl creation
1991 switch (kIODirectionOutIn & forDirection)
1992 {
1993 case kIODirectionOut:
1994 // Pages do not need to be marked as dirty on commit
1995 uplFlags = UPL_COPYOUT_FROM;
1996 _flags |= kIOMemoryPreparedReadOnly;
1997 break;
1998
1999 case kIODirectionIn:
2000 default:
2001 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
2002 break;
2003 }
2004 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
2005
2006 #ifdef UPL_NEED_32BIT_ADDR
2007 if (kIODirectionPrepareToPhys32 & forDirection)
2008 uplFlags |= UPL_NEED_32BIT_ADDR;
2009 #endif
2010
2011 // Find the appropriate vm_map for the given task
2012 vm_map_t curMap;
2013 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
2014 curMap = 0;
2015 else
2016 { curMap = get_task_map(_task); }
2017
2018 // Iterate over the vector of virtual ranges
2019 Ranges vec = _ranges;
2020 unsigned int pageIndex = 0;
2021 IOByteCount mdOffset = 0;
2022 ppnum_t highestPage = 0;
2023 for (UInt range = 0; range < _rangesCount; range++) {
2024 ioPLBlock iopl;
2025 user_addr_t startPage;
2026 IOByteCount numBytes;
2027 ppnum_t highPage = 0;
2028
2029 // Get the startPage address and length of vec[range]
2030 getAddrLenForInd(startPage, numBytes, type, vec, range);
2031 iopl.fPageOffset = startPage & PAGE_MASK;
2032 numBytes += iopl.fPageOffset;
2033 startPage = trunc_page_64(startPage);
2034
2035 if (mapper)
2036 iopl.fMappedBase = mapBase + pageIndex;
2037 else
2038 iopl.fMappedBase = 0;
2039
2040 // Iterate over the current range, creating UPLs
2041 while (numBytes) {
2042 dataP = getDataP(_memoryEntries);
2043 vm_address_t kernelStart = (vm_address_t) startPage;
2044 vm_map_t theMap;
2045 if (curMap)
2046 theMap = curMap;
2047 else if (!sharedMem) {
2048 assert(_task == kernel_task);
2049 theMap = IOPageableMapForAddress(kernelStart);
2050 }
2051 else
2052 theMap = NULL;
2053
2054 upl_page_info_array_t pageInfo = getPageList(dataP);
2055 int ioplFlags = uplFlags;
2056 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
2057
2058 vm_size_t ioplSize = round_page(numBytes);
2059 unsigned int numPageInfo = atop_32(ioplSize);
2060
2061 if (theMap == kernel_map && kernelStart < io_kernel_static_end) {
2062 error = io_get_kernel_static_upl(theMap,
2063 kernelStart,
2064 &ioplSize,
2065 &iopl.fIOPL,
2066 baseInfo,
2067 &numPageInfo,
2068 &highPage);
2069 }
2070 else if (sharedMem) {
2071 error = memory_object_iopl_request(sharedMem,
2072 ptoa_32(pageIndex),
2073 &ioplSize,
2074 &iopl.fIOPL,
2075 baseInfo,
2076 &numPageInfo,
2077 &ioplFlags);
2078 }
2079 else {
2080 assert(theMap);
2081 error = vm_map_create_upl(theMap,
2082 startPage,
2083 (upl_size_t*)&ioplSize,
2084 &iopl.fIOPL,
2085 baseInfo,
2086 &numPageInfo,
2087 &ioplFlags);
2088 }
2089
2090 assert(ioplSize);
2091 if (error != KERN_SUCCESS)
2092 goto abortExit;
2093
2094 if (iopl.fIOPL)
2095 highPage = upl_get_highest_page(iopl.fIOPL);
2096 if (highPage > highestPage)
2097 highestPage = highPage;
2098
2099 error = kIOReturnCannotWire;
2100
2101 if (baseInfo->device) {
2102 numPageInfo = 1;
2103 iopl.fFlags = kIOPLOnDevice;
2104 // Don't translate device memory at all
2105 if (mapper && mapBase) {
2106 mapper->iovmFree(mapBase, _pages);
2107 mapBase = 0;
2108 iopl.fMappedBase = 0;
2109 }
2110 }
2111 else {
2112 iopl.fFlags = 0;
2113 if (mapper)
2114 mapper->iovmInsert(mapBase, pageIndex,
2115 baseInfo, numPageInfo);
2116 }
2117
2118 iopl.fIOMDOffset = mdOffset;
2119 iopl.fPageInfo = pageIndex;
2120
2121 if ((_flags & kIOMemoryAutoPrepare) && iopl.fIOPL)
2122 {
2123 upl_commit(iopl.fIOPL, 0, 0);
2124 upl_deallocate(iopl.fIOPL);
2125 iopl.fIOPL = 0;
2126 }
2127
2128 if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
2129 // Clean up partial created and unsaved iopl
2130 if (iopl.fIOPL) {
2131 upl_abort(iopl.fIOPL, 0);
2132 upl_deallocate(iopl.fIOPL);
2133 }
2134 goto abortExit;
2135 }
2136
2137 // Check for a multiple iopl's in one virtual range
2138 pageIndex += numPageInfo;
2139 mdOffset -= iopl.fPageOffset;
2140 if (ioplSize < numBytes) {
2141 numBytes -= ioplSize;
2142 startPage += ioplSize;
2143 mdOffset += ioplSize;
2144 iopl.fPageOffset = 0;
2145 if (mapper)
2146 iopl.fMappedBase = mapBase + pageIndex;
2147 }
2148 else {
2149 mdOffset += numBytes;
2150 break;
2151 }
2152 }
2153 }
2154
2155 _highestPage = highestPage;
2156
2157 return kIOReturnSuccess;
2158
2159 abortExit:
2160 {
2161 dataP = getDataP(_memoryEntries);
2162 UInt done = getNumIOPL(_memoryEntries, dataP);
2163 ioPLBlock *ioplList = getIOPLList(dataP);
2164
2165 for (UInt range = 0; range < done; range++)
2166 {
2167 if (ioplList[range].fIOPL) {
2168 upl_abort(ioplList[range].fIOPL, 0);
2169 upl_deallocate(ioplList[range].fIOPL);
2170 }
2171 }
2172 (void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); // == setLength()
2173
2174 if (mapper && mapBase)
2175 mapper->iovmFree(mapBase, _pages);
2176 }
2177
2178 if (error == KERN_FAILURE)
2179 error = kIOReturnCannotWire;
2180
2181 return error;
2182 }
2183
2184 /*
2185 * prepare
2186 *
2187 * Prepare the memory for an I/O transfer. This involves paging in
2188 * the memory, if necessary, and wiring it down for the duration of
2189 * the transfer. The complete() method completes the processing of
2190 * the memory after the I/O transfer finishes. This method needn't
2191 * called for non-pageable memory.
2192 */
2193 IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
2194 {
2195 IOReturn error = kIOReturnSuccess;
2196 IOOptionBits type = _flags & kIOMemoryTypeMask;
2197
2198 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
2199 return kIOReturnSuccess;
2200
2201 if (_prepareLock)
2202 IOLockLock(_prepareLock);
2203
2204 if (!_wireCount
2205 && (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) ) {
2206 error = wireVirtual(forDirection);
2207 }
2208
2209 if (kIOReturnSuccess == error)
2210 _wireCount++;
2211
2212 if (1 == _wireCount)
2213 {
2214 if (kIOMemoryClearEncrypt & _flags)
2215 {
2216 performOperation(kIOMemoryClearEncrypted, 0, _length);
2217 }
2218 }
2219
2220 if (_prepareLock)
2221 IOLockUnlock(_prepareLock);
2222
2223 return error;
2224 }
2225
2226 /*
2227 * complete
2228 *
2229 * Complete processing of the memory after an I/O transfer finishes.
2230 * This method should not be called unless a prepare was previously
2231 * issued; the prepare() and complete() must occur in pairs, before
2232 * before and after an I/O transfer involving pageable memory.
2233 */
2234
2235 IOReturn IOGeneralMemoryDescriptor::complete(IODirection /* forDirection */)
2236 {
2237 IOOptionBits type = _flags & kIOMemoryTypeMask;
2238
2239 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
2240 return kIOReturnSuccess;
2241
2242 if (_prepareLock)
2243 IOLockLock(_prepareLock);
2244
2245 assert(_wireCount);
2246
2247 if (_wireCount)
2248 {
2249 if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount))
2250 {
2251 performOperation(kIOMemorySetEncrypted, 0, _length);
2252 }
2253
2254 _wireCount--;
2255 if (!_wireCount)
2256 {
2257 IOOptionBits type = _flags & kIOMemoryTypeMask;
2258 ioGMDData * dataP = getDataP(_memoryEntries);
2259 ioPLBlock *ioplList = getIOPLList(dataP);
2260 UInt count = getNumIOPL(_memoryEntries, dataP);
2261
2262 #if IOMD_DEBUG_DMAACTIVE
2263 if (__iomd_reservedA) panic("complete() while dma active");
2264 #endif /* IOMD_DEBUG_DMAACTIVE */
2265
2266 if (dataP->fMapper && _pages && ioplList[0].fMappedBase)
2267 dataP->fMapper->iovmFree(ioplList[0].fMappedBase, _pages);
2268
2269 // Only complete iopls that we created which are for TypeVirtual
2270 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
2271 for (UInt ind = 0; ind < count; ind++)
2272 if (ioplList[ind].fIOPL) {
2273 upl_commit(ioplList[ind].fIOPL, 0, 0);
2274 upl_deallocate(ioplList[ind].fIOPL);
2275 }
2276 }
2277 (void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); // == setLength()
2278
2279 dataP->fPreparationID = kIOPreparationIDUnprepared;
2280 }
2281 }
2282
2283 if (_prepareLock)
2284 IOLockUnlock(_prepareLock);
2285
2286 return kIOReturnSuccess;
2287 }
2288
2289 IOReturn IOGeneralMemoryDescriptor::doMap(
2290 vm_map_t __addressMap,
2291 IOVirtualAddress * __address,
2292 IOOptionBits options,
2293 IOByteCount __offset,
2294 IOByteCount __length )
2295
2296 {
2297 #ifndef __LP64__
2298 if (!(kIOMap64Bit & options)) panic("IOGeneralMemoryDescriptor::doMap !64bit");
2299 #endif /* !__LP64__ */
2300
2301 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
2302 mach_vm_size_t offset = mapping->fOffset + __offset;
2303 mach_vm_size_t length = mapping->fLength;
2304
2305 kern_return_t kr = kIOReturnVMError;
2306 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
2307
2308 IOOptionBits type = _flags & kIOMemoryTypeMask;
2309 Ranges vec = _ranges;
2310
2311 user_addr_t range0Addr = 0;
2312 IOByteCount range0Len = 0;
2313
2314 if (vec.v)
2315 getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
2316
2317 // mapping source == dest? (could be much better)
2318 if( _task
2319 && (mapping->fAddressMap == get_task_map(_task)) && (options & kIOMapAnywhere)
2320 && (1 == _rangesCount) && (0 == offset)
2321 && range0Addr && (length <= range0Len) )
2322 {
2323 mapping->fAddress = range0Addr;
2324 mapping->fOptions |= kIOMapStatic;
2325
2326 return( kIOReturnSuccess );
2327 }
2328
2329 if( 0 == sharedMem) {
2330
2331 vm_size_t size = ptoa_32(_pages);
2332
2333 if( _task) {
2334
2335 memory_object_size_t actualSize = size;
2336 vm_prot_t prot = VM_PROT_READ;
2337 if (!(kIOMapReadOnly & options))
2338 prot |= VM_PROT_WRITE;
2339 else if (kIOMapDefaultCache != (options & kIOMapCacheMask))
2340 prot |= VM_PROT_WRITE;
2341
2342 kr = mach_make_memory_entry_64(get_task_map(_task),
2343 &actualSize, range0Addr,
2344 prot, &sharedMem,
2345 NULL );
2346
2347 if( (KERN_SUCCESS == kr) && (actualSize != round_page(size)))
2348 {
2349 // map will cross vm objects
2350 #if IOASSERT
2351 IOLog("mach_make_memory_entry_64 (%08llx) size (%08llx:%08llx)\n",
2352 range0Addr, (UInt64)actualSize, (UInt64)size);
2353 #endif
2354 kr = kIOReturnVMError;
2355 ipc_port_release_send( sharedMem );
2356 sharedMem = MACH_PORT_NULL;
2357
2358 mach_vm_address_t address;
2359 mach_vm_size_t pageOffset = (range0Addr & PAGE_MASK);
2360
2361 address = trunc_page_64(mapping->fAddress);
2362 if ((options & kIOMapAnywhere) || ((mapping->fAddress - address) == pageOffset))
2363 {
2364 kr = IOMemoryDescriptorMapCopy(mapping->fAddressMap,
2365 get_task_map(_task), range0Addr,
2366 options,
2367 offset, &address, round_page_64(length + pageOffset));
2368 if (kr == KERN_SUCCESS)
2369 mapping->fAddress = address + pageOffset;
2370 else
2371 mapping->fAddress = NULL;
2372 }
2373 }
2374 }
2375 else do
2376 { // _task == 0, must be physical
2377
2378 memory_object_t pager;
2379 unsigned int flags = 0;
2380 addr64_t pa;
2381 IOPhysicalLength segLen;
2382
2383 pa = getPhysicalSegment( offset, &segLen, kIOMemoryMapperNone );
2384
2385 if( !reserved) {
2386 reserved = IONew( ExpansionData, 1 );
2387 if( !reserved)
2388 continue;
2389 }
2390 reserved->pagerContig = (1 == _rangesCount);
2391 reserved->memory = this;
2392
2393 /*What cache mode do we need*/
2394 switch(options & kIOMapCacheMask ) {
2395
2396 case kIOMapDefaultCache:
2397 default:
2398 flags = IODefaultCacheBits(pa);
2399 if (DEVICE_PAGER_CACHE_INHIB & flags)
2400 {
2401 if (DEVICE_PAGER_GUARDED & flags)
2402 mapping->fOptions |= kIOMapInhibitCache;
2403 else
2404 mapping->fOptions |= kIOMapWriteCombineCache;
2405 }
2406 else if (DEVICE_PAGER_WRITE_THROUGH & flags)
2407 mapping->fOptions |= kIOMapWriteThruCache;
2408 else
2409 mapping->fOptions |= kIOMapCopybackCache;
2410 break;
2411
2412 case kIOMapInhibitCache:
2413 flags = DEVICE_PAGER_CACHE_INHIB |
2414 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
2415 break;
2416
2417 case kIOMapWriteThruCache:
2418 flags = DEVICE_PAGER_WRITE_THROUGH |
2419 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
2420 break;
2421
2422 case kIOMapCopybackCache:
2423 flags = DEVICE_PAGER_COHERENT;
2424 break;
2425
2426 case kIOMapWriteCombineCache:
2427 flags = DEVICE_PAGER_CACHE_INHIB |
2428 DEVICE_PAGER_COHERENT;
2429 break;
2430 }
2431
2432 flags |= reserved->pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0;
2433
2434 pager = device_pager_setup( (memory_object_t) 0, (uintptr_t) reserved,
2435 size, flags);
2436 assert( pager );
2437
2438 if( pager) {
2439 kr = mach_memory_object_memory_entry_64( (host_t) 1, false /*internal*/,
2440 size, VM_PROT_READ | VM_PROT_WRITE, pager, &sharedMem );
2441
2442 assert( KERN_SUCCESS == kr );
2443 if( KERN_SUCCESS != kr)
2444 {
2445 device_pager_deallocate( pager );
2446 pager = MACH_PORT_NULL;
2447 sharedMem = MACH_PORT_NULL;
2448 }
2449 }
2450 if( pager && sharedMem)
2451 reserved->devicePager = pager;
2452 else {
2453 IODelete( reserved, ExpansionData, 1 );
2454 reserved = 0;
2455 }
2456
2457 } while( false );
2458
2459 _memEntry = (void *) sharedMem;
2460 }
2461
2462 IOReturn result;
2463 if (0 == sharedMem)
2464 result = kr;
2465 else
2466 result = super::doMap( __addressMap, __address,
2467 options, __offset, __length );
2468
2469 return( result );
2470 }
2471
2472 IOReturn IOGeneralMemoryDescriptor::doUnmap(
2473 vm_map_t addressMap,
2474 IOVirtualAddress __address,
2475 IOByteCount __length )
2476 {
2477 return (super::doUnmap(addressMap, __address, __length));
2478 }
2479
2480 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2481
2482 #undef super
2483 #define super OSObject
2484
2485 OSDefineMetaClassAndStructors( IOMemoryMap, OSObject )
2486
2487 OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
2488 OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
2489 OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
2490 OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
2491 OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
2492 OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
2493 OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
2494 OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
2495
2496 /* ex-inline function implementation */
2497 IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
2498 { return( getPhysicalSegment( 0, 0 )); }
2499
2500 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2501
2502 bool IOMemoryMap::init(
2503 task_t intoTask,
2504 mach_vm_address_t toAddress,
2505 IOOptionBits _options,
2506 mach_vm_size_t _offset,
2507 mach_vm_size_t _length )
2508 {
2509 if (!intoTask)
2510 return( false);
2511
2512 if (!super::init())
2513 return(false);
2514
2515 fAddressMap = get_task_map(intoTask);
2516 if (!fAddressMap)
2517 return(false);
2518 vm_map_reference(fAddressMap);
2519
2520 fAddressTask = intoTask;
2521 fOptions = _options;
2522 fLength = _length;
2523 fOffset = _offset;
2524 fAddress = toAddress;
2525
2526 return (true);
2527 }
2528
2529 bool IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
2530 {
2531 if (!_memory)
2532 return(false);
2533
2534 if (!fSuperMap)
2535 {
2536 if( (_offset + fLength) > _memory->getLength())
2537 return( false);
2538 fOffset = _offset;
2539 }
2540
2541 _memory->retain();
2542 if (fMemory)
2543 {
2544 if (fMemory != _memory)
2545 fMemory->removeMapping(this);
2546 fMemory->release();
2547 }
2548 fMemory = _memory;
2549
2550 return( true );
2551 }
2552
2553 struct IOMemoryDescriptorMapAllocRef
2554 {
2555 ipc_port_t sharedMem;
2556 vm_map_t src_map;
2557 mach_vm_offset_t src_address;
2558 mach_vm_address_t mapped;
2559 mach_vm_size_t size;
2560 mach_vm_size_t sourceOffset;
2561 IOOptionBits options;
2562 };
2563
2564 static kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
2565 {
2566 IOMemoryDescriptorMapAllocRef * ref = (IOMemoryDescriptorMapAllocRef *)_ref;
2567 IOReturn err;
2568
2569 do {
2570 if( ref->sharedMem)
2571 {
2572 vm_prot_t prot = VM_PROT_READ
2573 | ((ref->options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE);
2574
2575 // VM system requires write access to change cache mode
2576 if (kIOMapDefaultCache != (ref->options & kIOMapCacheMask))
2577 prot |= VM_PROT_WRITE;
2578
2579 // set memory entry cache
2580 vm_prot_t memEntryCacheMode = prot | MAP_MEM_ONLY;
2581 switch (ref->options & kIOMapCacheMask)
2582 {
2583 case kIOMapInhibitCache:
2584 SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
2585 break;
2586
2587 case kIOMapWriteThruCache:
2588 SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
2589 break;
2590
2591 case kIOMapWriteCombineCache:
2592 SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
2593 break;
2594
2595 case kIOMapCopybackCache:
2596 SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
2597 break;
2598
2599 case kIOMapDefaultCache:
2600 default:
2601 SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
2602 break;
2603 }
2604
2605 vm_size_t unused = 0;
2606
2607 err = mach_make_memory_entry( NULL /*unused*/, &unused, 0 /*unused*/,
2608 memEntryCacheMode, NULL, ref->sharedMem );
2609 if (KERN_SUCCESS != err)
2610 IOLog("MAP_MEM_ONLY failed %d\n", err);
2611
2612 err = mach_vm_map( map,
2613 &ref->mapped,
2614 ref->size, 0 /* mask */,
2615 (( ref->options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
2616 | VM_MAKE_TAG(VM_MEMORY_IOKIT),
2617 ref->sharedMem, ref->sourceOffset,
2618 false, // copy
2619 prot, // cur
2620 prot, // max
2621 VM_INHERIT_NONE);
2622
2623 if( KERN_SUCCESS != err) {
2624 ref->mapped = 0;
2625 continue;
2626 }
2627 }
2628 else if (ref->src_map)
2629 {
2630 vm_prot_t cur_prot, max_prot;
2631 err = mach_vm_remap(map, &ref->mapped, ref->size, PAGE_MASK,
2632 (ref->options & kIOMapAnywhere) ? TRUE : FALSE,
2633 ref->src_map, ref->src_address,
2634 FALSE /* copy */,
2635 &cur_prot,
2636 &max_prot,
2637 VM_INHERIT_NONE);
2638 if (KERN_SUCCESS == err)
2639 {
2640 if ((!(VM_PROT_READ & cur_prot))
2641 || (!(kIOMapReadOnly & ref->options) && !(VM_PROT_WRITE & cur_prot)))
2642 {
2643 mach_vm_deallocate(map, ref->mapped, ref->size);
2644 err = KERN_PROTECTION_FAILURE;
2645 }
2646 }
2647 if (KERN_SUCCESS != err)
2648 ref->mapped = 0;
2649 }
2650 else
2651 {
2652 err = mach_vm_allocate( map, &ref->mapped, ref->size,
2653 ((ref->options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
2654 | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
2655 if( KERN_SUCCESS != err) {
2656 ref->mapped = 0;
2657 continue;
2658 }
2659 // we have to make sure that these guys don't get copied if we fork.
2660 err = vm_inherit( map, ref->mapped, ref->size, VM_INHERIT_NONE);
2661 assert( KERN_SUCCESS == err );
2662 }
2663 }
2664 while( false );
2665
2666 return( err );
2667 }
2668
2669 kern_return_t
2670 IOMemoryDescriptorMapMemEntry(vm_map_t map, ipc_port_t entry, IOOptionBits options, bool pageable,
2671 mach_vm_size_t offset,
2672 mach_vm_address_t * address, mach_vm_size_t length)
2673 {
2674 IOReturn err;
2675 IOMemoryDescriptorMapAllocRef ref;
2676
2677 ref.sharedMem = entry;
2678 ref.src_map = NULL;
2679 ref.sharedMem = entry;
2680 ref.sourceOffset = trunc_page_64(offset);
2681 ref.options = options;
2682 ref.size = length;
2683
2684 if (options & kIOMapAnywhere)
2685 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
2686 ref.mapped = 0;
2687 else
2688 ref.mapped = *address;
2689
2690 if( ref.sharedMem && (map == kernel_map) && pageable)
2691 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
2692 else
2693 err = IOMemoryDescriptorMapAlloc( map, &ref );
2694
2695 *address = ref.mapped;
2696 return (err);
2697 }
2698
2699 kern_return_t
2700 IOMemoryDescriptorMapCopy(vm_map_t map,
2701 vm_map_t src_map,
2702 mach_vm_offset_t src_address,
2703 IOOptionBits options,
2704 mach_vm_size_t offset,
2705 mach_vm_address_t * address, mach_vm_size_t length)
2706 {
2707 IOReturn err;
2708 IOMemoryDescriptorMapAllocRef ref;
2709
2710 ref.sharedMem = NULL;
2711 ref.src_map = src_map;
2712 ref.src_address = src_address;
2713 ref.sourceOffset = trunc_page_64(offset);
2714 ref.options = options;
2715 ref.size = length;
2716
2717 if (options & kIOMapAnywhere)
2718 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
2719 ref.mapped = 0;
2720 else
2721 ref.mapped = *address;
2722
2723 if (map == kernel_map)
2724 err = IOIteratePageableMaps(ref.size, &IOMemoryDescriptorMapAlloc, &ref);
2725 else
2726 err = IOMemoryDescriptorMapAlloc(map, &ref);
2727
2728 *address = ref.mapped;
2729 return (err);
2730 }
2731
2732 IOReturn IOMemoryDescriptor::doMap(
2733 vm_map_t __addressMap,
2734 IOVirtualAddress * __address,
2735 IOOptionBits options,
2736 IOByteCount __offset,
2737 IOByteCount __length )
2738 {
2739 #ifndef __LP64__
2740 if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::doMap !64bit");
2741 #endif /* !__LP64__ */
2742
2743 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
2744 mach_vm_size_t offset = mapping->fOffset + __offset;
2745 mach_vm_size_t length = mapping->fLength;
2746
2747 IOReturn err = kIOReturnSuccess;
2748 memory_object_t pager;
2749 mach_vm_size_t pageOffset;
2750 IOPhysicalAddress sourceAddr;
2751 unsigned int lock_count;
2752
2753 do
2754 {
2755 sourceAddr = getPhysicalSegment( offset, NULL, _kIOMemorySourceSegment );
2756 pageOffset = sourceAddr - trunc_page( sourceAddr );
2757
2758 if( reserved)
2759 pager = (memory_object_t) reserved->devicePager;
2760 else
2761 pager = MACH_PORT_NULL;
2762
2763 if ((kIOMapReference|kIOMapUnique) == ((kIOMapReference|kIOMapUnique) & options))
2764 {
2765 upl_t redirUPL2;
2766 vm_size_t size;
2767 int flags;
2768
2769 if (!_memEntry)
2770 {
2771 err = kIOReturnNotReadable;
2772 continue;
2773 }
2774
2775 size = round_page(mapping->fLength + pageOffset);
2776 flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
2777 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
2778
2779 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) _memEntry, 0, &size, &redirUPL2,
2780 NULL, NULL,
2781 &flags))
2782 redirUPL2 = NULL;
2783
2784 for (lock_count = 0;
2785 IORecursiveLockHaveLock(gIOMemoryLock);
2786 lock_count++) {
2787 UNLOCK;
2788 }
2789 err = upl_transpose(redirUPL2, mapping->fRedirUPL);
2790 for (;
2791 lock_count;
2792 lock_count--) {
2793 LOCK;
2794 }
2795
2796 if (kIOReturnSuccess != err)
2797 {
2798 IOLog("upl_transpose(%x)\n", err);
2799 err = kIOReturnSuccess;
2800 }
2801
2802 if (redirUPL2)
2803 {
2804 upl_commit(redirUPL2, NULL, 0);
2805 upl_deallocate(redirUPL2);
2806 redirUPL2 = 0;
2807 }
2808 {
2809 // swap the memEntries since they now refer to different vm_objects
2810 void * me = _memEntry;
2811 _memEntry = mapping->fMemory->_memEntry;
2812 mapping->fMemory->_memEntry = me;
2813 }
2814 if (pager)
2815 err = handleFault( reserved->devicePager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
2816 }
2817 else
2818 {
2819 mach_vm_address_t address;
2820
2821 if (!(options & kIOMapAnywhere))
2822 {
2823 address = trunc_page_64(mapping->fAddress);
2824 if( (mapping->fAddress - address) != pageOffset)
2825 {
2826 err = kIOReturnVMError;
2827 continue;
2828 }
2829 }
2830
2831 err = IOMemoryDescriptorMapMemEntry(mapping->fAddressMap, (ipc_port_t) _memEntry,
2832 options, (kIOMemoryBufferPageable & _flags),
2833 offset, &address, round_page_64(length + pageOffset));
2834 if( err != KERN_SUCCESS)
2835 continue;
2836
2837 if (!_memEntry || pager)
2838 {
2839 err = handleFault( pager, mapping->fAddressMap, address, offset, length, options );
2840 if (err != KERN_SUCCESS)
2841 doUnmap( mapping->fAddressMap, (IOVirtualAddress) mapping, 0 );
2842 }
2843
2844 #if DEBUG
2845 if (kIOLogMapping & gIOKitDebug)
2846 IOLog("mapping(%x) desc %p @ %lx, map %p, address %qx, offset %qx, length %qx\n",
2847 err, this, sourceAddr, mapping, address, offset, length);
2848 #endif
2849
2850 if (err == KERN_SUCCESS)
2851 mapping->fAddress = address + pageOffset;
2852 else
2853 mapping->fAddress = NULL;
2854 }
2855 }
2856 while( false );
2857
2858 return (err);
2859 }
2860
2861 IOReturn IOMemoryDescriptor::handleFault(
2862 void * _pager,
2863 vm_map_t addressMap,
2864 mach_vm_address_t address,
2865 mach_vm_size_t sourceOffset,
2866 mach_vm_size_t length,
2867 IOOptionBits options )
2868 {
2869 IOReturn err = kIOReturnSuccess;
2870 memory_object_t pager = (memory_object_t) _pager;
2871 mach_vm_size_t size;
2872 mach_vm_size_t bytes;
2873 mach_vm_size_t page;
2874 mach_vm_size_t pageOffset;
2875 mach_vm_size_t pagerOffset;
2876 IOPhysicalLength segLen;
2877 addr64_t physAddr;
2878
2879 if( !addressMap)
2880 {
2881 if( kIOMemoryRedirected & _flags)
2882 {
2883 #if DEBUG
2884 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
2885 #endif
2886 do {
2887 SLEEP;
2888 } while( kIOMemoryRedirected & _flags );
2889 }
2890
2891 return( kIOReturnSuccess );
2892 }
2893
2894 physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
2895 assert( physAddr );
2896 pageOffset = physAddr - trunc_page_64( physAddr );
2897 pagerOffset = sourceOffset;
2898
2899 size = length + pageOffset;
2900 physAddr -= pageOffset;
2901
2902 segLen += pageOffset;
2903 bytes = size;
2904 do
2905 {
2906 // in the middle of the loop only map whole pages
2907 if( segLen >= bytes)
2908 segLen = bytes;
2909 else if( segLen != trunc_page( segLen))
2910 err = kIOReturnVMError;
2911 if( physAddr != trunc_page_64( physAddr))
2912 err = kIOReturnBadArgument;
2913 if (kIOReturnSuccess != err)
2914 break;
2915
2916 #if DEBUG
2917 if( kIOLogMapping & gIOKitDebug)
2918 IOLog("IOMemoryMap::map(%p) 0x%qx->0x%qx:0x%qx\n",
2919 addressMap, address + pageOffset, physAddr + pageOffset,
2920 segLen - pageOffset);
2921 #endif
2922
2923
2924 if( pager) {
2925 if( reserved && reserved->pagerContig) {
2926 IOPhysicalLength allLen;
2927 addr64_t allPhys;
2928
2929 allPhys = getPhysicalSegment( 0, &allLen, kIOMemoryMapperNone );
2930 assert( allPhys );
2931 err = device_pager_populate_object( pager, 0, atop_64(allPhys), round_page(allLen) );
2932 }
2933 else
2934 {
2935
2936 for( page = 0;
2937 (page < segLen) && (KERN_SUCCESS == err);
2938 page += page_size)
2939 {
2940 err = device_pager_populate_object(pager, pagerOffset,
2941 (ppnum_t)(atop_64(physAddr + page)), page_size);
2942 pagerOffset += page_size;
2943 }
2944 }
2945 assert( KERN_SUCCESS == err );
2946 if( err)
2947 break;
2948 }
2949
2950 // This call to vm_fault causes an early pmap level resolution
2951 // of the mappings created above for kernel mappings, since
2952 // faulting in later can't take place from interrupt level.
2953 /* *** ALERT *** */
2954 /* *** Temporary Workaround *** */
2955
2956 if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags))
2957 {
2958 vm_fault(addressMap,
2959 (vm_map_offset_t)address,
2960 VM_PROT_READ|VM_PROT_WRITE,
2961 FALSE, THREAD_UNINT, NULL,
2962 (vm_map_offset_t)0);
2963 }
2964
2965 /* *** Temporary Workaround *** */
2966 /* *** ALERT *** */
2967
2968 sourceOffset += segLen - pageOffset;
2969 address += segLen;
2970 bytes -= segLen;
2971 pageOffset = 0;
2972
2973 }
2974 while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
2975
2976 if (bytes)
2977 err = kIOReturnBadArgument;
2978
2979 return (err);
2980 }
2981
2982 IOReturn IOMemoryDescriptor::doUnmap(
2983 vm_map_t addressMap,
2984 IOVirtualAddress __address,
2985 IOByteCount __length )
2986 {
2987 IOReturn err;
2988 mach_vm_address_t address;
2989 mach_vm_size_t length;
2990
2991 if (__length)
2992 {
2993 address = __address;
2994 length = __length;
2995 }
2996 else
2997 {
2998 addressMap = ((IOMemoryMap *) __address)->fAddressMap;
2999 address = ((IOMemoryMap *) __address)->fAddress;
3000 length = ((IOMemoryMap *) __address)->fLength;
3001 }
3002
3003 if ((addressMap == kernel_map)
3004 && ((kIOMemoryBufferPageable & _flags) || !_memEntry))
3005 addressMap = IOPageableMapForAddress( address );
3006
3007 #if DEBUG
3008 if( kIOLogMapping & gIOKitDebug)
3009 IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
3010 addressMap, address, length );
3011 #endif
3012
3013 err = mach_vm_deallocate( addressMap, address, length );
3014
3015 return (err);
3016 }
3017
3018 IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
3019 {
3020 IOReturn err = kIOReturnSuccess;
3021 IOMemoryMap * mapping = 0;
3022 OSIterator * iter;
3023
3024 LOCK;
3025
3026 if( doRedirect)
3027 _flags |= kIOMemoryRedirected;
3028 else
3029 _flags &= ~kIOMemoryRedirected;
3030
3031 do {
3032 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
3033 while( (mapping = (IOMemoryMap *) iter->getNextObject()))
3034 mapping->redirect( safeTask, doRedirect );
3035
3036 iter->release();
3037 }
3038 } while( false );
3039
3040 if (!doRedirect)
3041 {
3042 WAKEUP;
3043 }
3044
3045 UNLOCK;
3046
3047 #ifndef __LP64__
3048 // temporary binary compatibility
3049 IOSubMemoryDescriptor * subMem;
3050 if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
3051 err = subMem->redirect( safeTask, doRedirect );
3052 else
3053 err = kIOReturnSuccess;
3054 #endif /* !__LP64__ */
3055
3056 return( err );
3057 }
3058
3059 IOReturn IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
3060 {
3061 IOReturn err = kIOReturnSuccess;
3062
3063 if( fSuperMap) {
3064 // err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
3065 } else {
3066
3067 LOCK;
3068
3069 do
3070 {
3071 if (!fAddress)
3072 break;
3073 if (!fAddressMap)
3074 break;
3075
3076 if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
3077 && (0 == (fOptions & kIOMapStatic)))
3078 {
3079 IOUnmapPages( fAddressMap, fAddress, fLength );
3080 err = kIOReturnSuccess;
3081 #if DEBUG
3082 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
3083 #endif
3084 }
3085 else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask))
3086 {
3087 IOOptionBits newMode;
3088 newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
3089 IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
3090 }
3091 }
3092 while (false);
3093 UNLOCK;
3094 }
3095
3096 if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3097 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3098 && safeTask
3099 && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected))))
3100 fMemory->redirect(safeTask, doRedirect);
3101
3102 return( err );
3103 }
3104
3105 IOReturn IOMemoryMap::unmap( void )
3106 {
3107 IOReturn err;
3108
3109 LOCK;
3110
3111 if( fAddress && fAddressMap && (0 == fSuperMap) && fMemory
3112 && (0 == (fOptions & kIOMapStatic))) {
3113
3114 err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
3115
3116 } else
3117 err = kIOReturnSuccess;
3118
3119 if (fAddressMap)
3120 {
3121 vm_map_deallocate(fAddressMap);
3122 fAddressMap = 0;
3123 }
3124
3125 fAddress = 0;
3126
3127 UNLOCK;
3128
3129 return( err );
3130 }
3131
3132 void IOMemoryMap::taskDied( void )
3133 {
3134 LOCK;
3135 if (fUserClientUnmap)
3136 unmap();
3137 if( fAddressMap) {
3138 vm_map_deallocate(fAddressMap);
3139 fAddressMap = 0;
3140 }
3141 fAddressTask = 0;
3142 fAddress = 0;
3143 UNLOCK;
3144 }
3145
3146 IOReturn IOMemoryMap::userClientUnmap( void )
3147 {
3148 fUserClientUnmap = true;
3149 return (kIOReturnSuccess);
3150 }
3151
3152 // Overload the release mechanism. All mappings must be a member
3153 // of a memory descriptors _mappings set. This means that we
3154 // always have 2 references on a mapping. When either of these mappings
3155 // are released we need to free ourselves.
3156 void IOMemoryMap::taggedRelease(const void *tag) const
3157 {
3158 LOCK;
3159 super::taggedRelease(tag, 2);
3160 UNLOCK;
3161 }
3162
3163 void IOMemoryMap::free()
3164 {
3165 unmap();
3166
3167 if (fMemory)
3168 {
3169 LOCK;
3170 fMemory->removeMapping(this);
3171 UNLOCK;
3172 fMemory->release();
3173 }
3174
3175 if (fOwner && (fOwner != fMemory))
3176 {
3177 LOCK;
3178 fOwner->removeMapping(this);
3179 UNLOCK;
3180 }
3181
3182 if (fSuperMap)
3183 fSuperMap->release();
3184
3185 if (fRedirUPL) {
3186 upl_commit(fRedirUPL, NULL, 0);
3187 upl_deallocate(fRedirUPL);
3188 }
3189
3190 super::free();
3191 }
3192
3193 IOByteCount IOMemoryMap::getLength()
3194 {
3195 return( fLength );
3196 }
3197
3198 IOVirtualAddress IOMemoryMap::getVirtualAddress()
3199 {
3200 #ifndef __LP64__
3201 if (fSuperMap)
3202 fSuperMap->getVirtualAddress();
3203 else if (fAddressMap
3204 && vm_map_is_64bit(fAddressMap)
3205 && (sizeof(IOVirtualAddress) < 8))
3206 {
3207 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
3208 }
3209 #endif /* !__LP64__ */
3210
3211 return (fAddress);
3212 }
3213
3214 #ifndef __LP64__
3215 mach_vm_address_t IOMemoryMap::getAddress()
3216 {
3217 return( fAddress);
3218 }
3219
3220 mach_vm_size_t IOMemoryMap::getSize()
3221 {
3222 return( fLength );
3223 }
3224 #endif /* !__LP64__ */
3225
3226
3227 task_t IOMemoryMap::getAddressTask()
3228 {
3229 if( fSuperMap)
3230 return( fSuperMap->getAddressTask());
3231 else
3232 return( fAddressTask);
3233 }
3234
3235 IOOptionBits IOMemoryMap::getMapOptions()
3236 {
3237 return( fOptions);
3238 }
3239
3240 IOMemoryDescriptor * IOMemoryMap::getMemoryDescriptor()
3241 {
3242 return( fMemory );
3243 }
3244
3245 IOMemoryMap * IOMemoryMap::copyCompatible(
3246 IOMemoryMap * newMapping )
3247 {
3248 task_t task = newMapping->getAddressTask();
3249 mach_vm_address_t toAddress = newMapping->fAddress;
3250 IOOptionBits _options = newMapping->fOptions;
3251 mach_vm_size_t _offset = newMapping->fOffset;
3252 mach_vm_size_t _length = newMapping->fLength;
3253
3254 if( (!task) || (!fAddressMap) || (fAddressMap != get_task_map(task)))
3255 return( 0 );
3256 if( (fOptions ^ _options) & kIOMapReadOnly)
3257 return( 0 );
3258 if( (kIOMapDefaultCache != (_options & kIOMapCacheMask))
3259 && ((fOptions ^ _options) & kIOMapCacheMask))
3260 return( 0 );
3261
3262 if( (0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress))
3263 return( 0 );
3264
3265 if( _offset < fOffset)
3266 return( 0 );
3267
3268 _offset -= fOffset;
3269
3270 if( (_offset + _length) > fLength)
3271 return( 0 );
3272
3273 retain();
3274 if( (fLength == _length) && (!_offset))
3275 {
3276 newMapping->release();
3277 newMapping = this;
3278 }
3279 else
3280 {
3281 newMapping->fSuperMap = this;
3282 newMapping->fOffset = _offset;
3283 newMapping->fAddress = fAddress + _offset;
3284 }
3285
3286 return( newMapping );
3287 }
3288
3289 IOPhysicalAddress
3290 #ifdef __LP64__
3291 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
3292 #else /* !__LP64__ */
3293 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
3294 #endif /* !__LP64__ */
3295 {
3296 IOPhysicalAddress address;
3297
3298 LOCK;
3299 #ifdef __LP64__
3300 address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
3301 #else /* !__LP64__ */
3302 address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
3303 #endif /* !__LP64__ */
3304 UNLOCK;
3305
3306 return( address );
3307 }
3308
3309 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3310
3311 #undef super
3312 #define super OSObject
3313
3314 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3315
3316 void IOMemoryDescriptor::initialize( void )
3317 {
3318 if( 0 == gIOMemoryLock)
3319 gIOMemoryLock = IORecursiveLockAlloc();
3320
3321 IORegistryEntry::getRegistryRoot()->setProperty(kIOMaximumMappedIOByteCountKey,
3322 ptoa_64(gIOMaximumMappedIOPageCount), 64);
3323 gIOLastPage = IOGetLastPageNumber();
3324 }
3325
3326 void IOMemoryDescriptor::free( void )
3327 {
3328 if( _mappings)
3329 _mappings->release();
3330
3331 super::free();
3332 }
3333
3334 IOMemoryMap * IOMemoryDescriptor::setMapping(
3335 task_t intoTask,
3336 IOVirtualAddress mapAddress,
3337 IOOptionBits options )
3338 {
3339 return (createMappingInTask( intoTask, mapAddress,
3340 options | kIOMapStatic,
3341 0, getLength() ));
3342 }
3343
3344 IOMemoryMap * IOMemoryDescriptor::map(
3345 IOOptionBits options )
3346 {
3347 return (createMappingInTask( kernel_task, 0,
3348 options | kIOMapAnywhere,
3349 0, getLength() ));
3350 }
3351
3352 #ifndef __LP64__
3353 IOMemoryMap * IOMemoryDescriptor::map(
3354 task_t intoTask,
3355 IOVirtualAddress atAddress,
3356 IOOptionBits options,
3357 IOByteCount offset,
3358 IOByteCount length )
3359 {
3360 if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask)))
3361 {
3362 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
3363 return (0);
3364 }
3365
3366 return (createMappingInTask(intoTask, atAddress,
3367 options, offset, length));
3368 }
3369 #endif /* !__LP64__ */
3370
3371 IOMemoryMap * IOMemoryDescriptor::createMappingInTask(
3372 task_t intoTask,
3373 mach_vm_address_t atAddress,
3374 IOOptionBits options,
3375 mach_vm_size_t offset,
3376 mach_vm_size_t length)
3377 {
3378 IOMemoryMap * result;
3379 IOMemoryMap * mapping;
3380
3381 if (0 == length)
3382 length = getLength();
3383
3384 mapping = new IOMemoryMap;
3385
3386 if( mapping
3387 && !mapping->init( intoTask, atAddress,
3388 options, offset, length )) {
3389 mapping->release();
3390 mapping = 0;
3391 }
3392
3393 if (mapping)
3394 result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
3395 else
3396 result = 0;
3397
3398 #if DEBUG
3399 if (!result)
3400 IOLog("createMappingInTask failed desc %p, addr %qx, options %lx, offset %qx, length %qx\n",
3401 this, atAddress, options, offset, length);
3402 #endif
3403
3404 return (result);
3405 }
3406
3407 #ifndef __LP64__ // there is only a 64 bit version for LP64
3408 IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
3409 IOOptionBits options,
3410 IOByteCount offset)
3411 {
3412 return (redirect(newBackingMemory, options, (mach_vm_size_t)offset));
3413 }
3414 #endif
3415
3416 IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
3417 IOOptionBits options,
3418 mach_vm_size_t offset)
3419 {
3420 IOReturn err = kIOReturnSuccess;
3421 IOMemoryDescriptor * physMem = 0;
3422
3423 LOCK;
3424
3425 if (fAddress && fAddressMap) do
3426 {
3427 if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3428 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3429 {
3430 physMem = fMemory;
3431 physMem->retain();
3432 }
3433
3434 if (!fRedirUPL)
3435 {
3436 vm_size_t size = round_page(fLength);
3437 int flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
3438 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
3439 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) fMemory->_memEntry, 0, &size, &fRedirUPL,
3440 NULL, NULL,
3441 &flags))
3442 fRedirUPL = 0;
3443
3444 if (physMem)
3445 {
3446 IOUnmapPages( fAddressMap, fAddress, fLength );
3447 if (false)
3448 physMem->redirect(0, true);
3449 }
3450 }
3451
3452 if (newBackingMemory)
3453 {
3454 if (newBackingMemory != fMemory)
3455 {
3456 fOffset = 0;
3457 if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
3458 options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
3459 offset, fLength))
3460 err = kIOReturnError;
3461 }
3462 if (fRedirUPL)
3463 {
3464 upl_commit(fRedirUPL, NULL, 0);
3465 upl_deallocate(fRedirUPL);
3466 fRedirUPL = 0;
3467 }
3468 if (false && physMem)
3469 physMem->redirect(0, false);
3470 }
3471 }
3472 while (false);
3473
3474 UNLOCK;
3475
3476 if (physMem)
3477 physMem->release();
3478
3479 return (err);
3480 }
3481
3482 IOMemoryMap * IOMemoryDescriptor::makeMapping(
3483 IOMemoryDescriptor * owner,
3484 task_t __intoTask,
3485 IOVirtualAddress __address,
3486 IOOptionBits options,
3487 IOByteCount __offset,
3488 IOByteCount __length )
3489 {
3490 #ifndef __LP64__
3491 if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::makeMapping !64bit");
3492 #endif /* !__LP64__ */
3493
3494 IOMemoryDescriptor * mapDesc = 0;
3495 IOMemoryMap * result = 0;
3496 OSIterator * iter;
3497
3498 IOMemoryMap * mapping = (IOMemoryMap *) __address;
3499 mach_vm_size_t offset = mapping->fOffset + __offset;
3500 mach_vm_size_t length = mapping->fLength;
3501
3502 mapping->fOffset = offset;
3503
3504 LOCK;
3505
3506 do
3507 {
3508 if (kIOMapStatic & options)
3509 {
3510 result = mapping;
3511 addMapping(mapping);
3512 mapping->setMemoryDescriptor(this, 0);
3513 continue;
3514 }
3515
3516 if (kIOMapUnique & options)
3517 {
3518 IOPhysicalAddress phys;
3519 IOByteCount physLen;
3520
3521 // if (owner != this) continue;
3522
3523 if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3524 || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3525 {
3526 phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
3527 if (!phys || (physLen < length))
3528 continue;
3529
3530 mapDesc = IOMemoryDescriptor::withAddressRange(
3531 phys, length, getDirection() | kIOMemoryMapperNone, NULL);
3532 if (!mapDesc)
3533 continue;
3534 offset = 0;
3535 mapping->fOffset = offset;
3536 }
3537 }
3538 else
3539 {
3540 // look for a compatible existing mapping
3541 if( (iter = OSCollectionIterator::withCollection(_mappings)))
3542 {
3543 IOMemoryMap * lookMapping;
3544 while ((lookMapping = (IOMemoryMap *) iter->getNextObject()))
3545 {
3546 if ((result = lookMapping->copyCompatible(mapping)))
3547 {
3548 addMapping(result);
3549 result->setMemoryDescriptor(this, offset);
3550 break;
3551 }
3552 }
3553 iter->release();
3554 }
3555 if (result || (options & kIOMapReference))
3556 continue;
3557 }
3558
3559 if (!mapDesc)
3560 {
3561 mapDesc = this;
3562 mapDesc->retain();
3563 }
3564 IOReturn
3565 kr = mapDesc->doMap( 0, (IOVirtualAddress *) &mapping, options, 0, 0 );
3566 if (kIOReturnSuccess == kr)
3567 {
3568 result = mapping;
3569 mapDesc->addMapping(result);
3570 result->setMemoryDescriptor(mapDesc, offset);
3571 }
3572 else
3573 {
3574 mapping->release();
3575 mapping = NULL;
3576 }
3577 }
3578 while( false );
3579
3580 UNLOCK;
3581
3582 if (mapDesc)
3583 mapDesc->release();
3584
3585 return (result);
3586 }
3587
3588 void IOMemoryDescriptor::addMapping(
3589 IOMemoryMap * mapping )
3590 {
3591 if( mapping)
3592 {
3593 if( 0 == _mappings)
3594 _mappings = OSSet::withCapacity(1);
3595 if( _mappings )
3596 _mappings->setObject( mapping );
3597 }
3598 }
3599
3600 void IOMemoryDescriptor::removeMapping(
3601 IOMemoryMap * mapping )
3602 {
3603 if( _mappings)
3604 _mappings->removeObject( mapping);
3605 }
3606
3607 #ifndef __LP64__
3608 // obsolete initializers
3609 // - initWithOptions is the designated initializer
3610 bool
3611 IOMemoryDescriptor::initWithAddress(void * address,
3612 IOByteCount length,
3613 IODirection direction)
3614 {
3615 return( false );
3616 }
3617
3618 bool
3619 IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
3620 IOByteCount length,
3621 IODirection direction,
3622 task_t task)
3623 {
3624 return( false );
3625 }
3626
3627 bool
3628 IOMemoryDescriptor::initWithPhysicalAddress(
3629 IOPhysicalAddress address,
3630 IOByteCount length,
3631 IODirection direction )
3632 {
3633 return( false );
3634 }
3635
3636 bool
3637 IOMemoryDescriptor::initWithRanges(
3638 IOVirtualRange * ranges,
3639 UInt32 withCount,
3640 IODirection direction,
3641 task_t task,
3642 bool asReference)
3643 {
3644 return( false );
3645 }
3646
3647 bool
3648 IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
3649 UInt32 withCount,
3650 IODirection direction,
3651 bool asReference)
3652 {
3653 return( false );
3654 }
3655
3656 void * IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
3657 IOByteCount * lengthOfSegment)
3658 {
3659 return( 0 );
3660 }
3661 #endif /* !__LP64__ */
3662
3663 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3664
3665 bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
3666 {
3667 OSSymbol const *keys[2];
3668 OSObject *values[2];
3669 struct SerData {
3670 user_addr_t address;
3671 user_size_t length;
3672 } *vcopy;
3673 unsigned int index, nRanges;
3674 bool result;
3675
3676 IOOptionBits type = _flags & kIOMemoryTypeMask;
3677
3678 if (s == NULL) return false;
3679 if (s->previouslySerialized(this)) return true;
3680
3681 // Pretend we are an array.
3682 if (!s->addXMLStartTag(this, "array")) return false;
3683
3684 nRanges = _rangesCount;
3685 vcopy = (SerData *) IOMalloc(sizeof(SerData) * nRanges);
3686 if (vcopy == 0) return false;
3687
3688 keys[0] = OSSymbol::withCString("address");
3689 keys[1] = OSSymbol::withCString("length");
3690
3691 result = false;
3692 values[0] = values[1] = 0;
3693
3694 // From this point on we can go to bail.
3695
3696 // Copy the volatile data so we don't have to allocate memory
3697 // while the lock is held.
3698 LOCK;
3699 if (nRanges == _rangesCount) {
3700 Ranges vec = _ranges;
3701 for (index = 0; index < nRanges; index++) {
3702 user_addr_t addr; IOByteCount len;
3703 getAddrLenForInd(addr, len, type, vec, index);
3704 vcopy[index].address = addr;
3705 vcopy[index].length = len;
3706 }
3707 } else {
3708 // The descriptor changed out from under us. Give up.
3709 UNLOCK;
3710 result = false;
3711 goto bail;
3712 }
3713 UNLOCK;
3714
3715 for (index = 0; index < nRanges; index++)
3716 {
3717 user_addr_t addr = vcopy[index].address;
3718 IOByteCount len = (IOByteCount) vcopy[index].length;
3719 values[0] =
3720 OSNumber::withNumber(addr, (((UInt64) addr) >> 32)? 64 : 32);
3721 if (values[0] == 0) {
3722 result = false;
3723 goto bail;
3724 }
3725 values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
3726 if (values[1] == 0) {
3727 result = false;
3728 goto bail;
3729 }
3730 OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
3731 if (dict == 0) {
3732 result = false;
3733 goto bail;
3734 }
3735 values[0]->release();
3736 values[1]->release();
3737 values[0] = values[1] = 0;
3738
3739 result = dict->serialize(s);
3740 dict->release();
3741 if (!result) {
3742 goto bail;
3743 }
3744 }
3745 result = s->addXMLEndTag("array");
3746
3747 bail:
3748 if (values[0])
3749 values[0]->release();
3750 if (values[1])
3751 values[1]->release();
3752 if (keys[0])
3753 keys[0]->release();
3754 if (keys[1])
3755 keys[1]->release();
3756 if (vcopy)
3757 IOFree(vcopy, sizeof(SerData) * nRanges);
3758 return result;
3759 }
3760
3761 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3762
3763 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
3764 #ifdef __LP64__
3765 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
3766 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
3767 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
3768 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
3769 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
3770 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
3771 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
3772 #else /* !__LP64__ */
3773 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
3774 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
3775 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3);
3776 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4);
3777 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5);
3778 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 6);
3779 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 7);
3780 #endif /* !__LP64__ */
3781 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
3782 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
3783 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
3784 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
3785 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
3786 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
3787 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
3788 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
3789
3790 /* ex-inline function implementation */
3791 IOPhysicalAddress
3792 IOMemoryDescriptor::getPhysicalAddress()
3793 { return( getPhysicalSegment( 0, 0 )); }
3794
3795
3796