]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOMemoryDescriptor.cpp
aa24637cb4485a632b6f6a1c43da37c13e2a6909
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
30 *
31 * HISTORY
32 *
33 */
34
35
36 #include <sys/cdefs.h>
37
38 #include <IOKit/assert.h>
39 #include <IOKit/system.h>
40 #include <IOKit/IOLib.h>
41 #include <IOKit/IOMemoryDescriptor.h>
42 #include <IOKit/IOMapper.h>
43 #include <IOKit/IODMACommand.h>
44 #include <IOKit/IOKitKeysPrivate.h>
45
46 #ifndef __LP64__
47 #include <IOKit/IOSubMemoryDescriptor.h>
48 #endif /* !__LP64__ */
49
50 #include <IOKit/IOKitDebug.h>
51 #include <libkern/OSDebug.h>
52
53 #include "IOKitKernelInternal.h"
54
55 #include <libkern/c++/OSContainers.h>
56 #include <libkern/c++/OSDictionary.h>
57 #include <libkern/c++/OSArray.h>
58 #include <libkern/c++/OSSymbol.h>
59 #include <libkern/c++/OSNumber.h>
60
61 #include <sys/uio.h>
62
63 __BEGIN_DECLS
64 #include <vm/pmap.h>
65 #include <vm/vm_pageout.h>
66 #include <mach/memory_object_types.h>
67 #include <device/device_port.h>
68
69 #include <mach/vm_prot.h>
70 #include <mach/mach_vm.h>
71 #include <vm/vm_fault.h>
72 #include <vm/vm_protos.h>
73
74 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
75 extern void ipc_port_release_send(ipc_port_t port);
76
77 kern_return_t
78 memory_object_iopl_request(
79 ipc_port_t port,
80 memory_object_offset_t offset,
81 vm_size_t *upl_size,
82 upl_t *upl_ptr,
83 upl_page_info_array_t user_page_list,
84 unsigned int *page_list_count,
85 int *flags);
86
87 unsigned int IOTranslateCacheBits(struct phys_entry *pp);
88
89 __END_DECLS
90
91 #define kIOMaximumMappedIOByteCount (512*1024*1024)
92
93 #define kIOMapperWaitSystem ((IOMapper *) 1)
94
95 static IOMapper * gIOSystemMapper = NULL;
96
97 static ppnum_t gIOMaximumMappedIOPageCount = atop_32(kIOMaximumMappedIOByteCount);
98
99 ppnum_t gIOLastPage;
100
101 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
102
103 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
104
105 #define super IOMemoryDescriptor
106
107 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
108
109 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
110
111 static IORecursiveLock * gIOMemoryLock;
112
113 #define LOCK IORecursiveLockLock( gIOMemoryLock)
114 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
115 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
116 #define WAKEUP \
117 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
118
119 #if 0
120 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
121 #else
122 #define DEBG(fmt, args...) {}
123 #endif
124
125 #define IOMD_DEBUG_DMAACTIVE 1
126
127 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
128
129 // Some data structures and accessor macros used by the initWithOptions
130 // Function
131
132 enum ioPLBlockFlags {
133 kIOPLOnDevice = 0x00000001,
134 kIOPLExternUPL = 0x00000002,
135 };
136
137 struct typePersMDData
138 {
139 const IOGeneralMemoryDescriptor *fMD;
140 ipc_port_t fMemEntry;
141 };
142
143 struct ioPLBlock {
144 upl_t fIOPL;
145 vm_address_t fPageInfo; // Pointer to page list or index into it
146 uint32_t fIOMDOffset; // The offset of this iopl in descriptor
147 ppnum_t fMappedPage; // Page number of first page in this iopl
148 unsigned int fPageOffset; // Offset within first page of iopl
149 unsigned int fFlags; // Flags
150 };
151
152 struct ioGMDData {
153 IOMapper * fMapper;
154 uint8_t fDMAMapNumAddressBits;
155 uint64_t fDMAMapAlignment;
156 addr64_t fMappedBase;
157 uint64_t fPreparationID;
158 unsigned int fPageCnt;
159 #if __LP64__
160 // align arrays to 8 bytes so following macros work
161 unsigned int fPad;
162 #endif
163 upl_page_info_t fPageList[1]; /* variable length */
164 ioPLBlock fBlocks[1]; /* variable length */
165 };
166
167 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
168 #define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
169 #define getNumIOPL(osd, d) \
170 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
171 #define getPageList(d) (&(d->fPageList[0]))
172 #define computeDataSize(p, u) \
173 (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
174
175
176 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
177
178 #define next_page(a) ( trunc_page(a) + PAGE_SIZE )
179
180
181 extern "C" {
182
183 kern_return_t device_data_action(
184 uintptr_t device_handle,
185 ipc_port_t device_pager,
186 vm_prot_t protection,
187 vm_object_offset_t offset,
188 vm_size_t size)
189 {
190 kern_return_t kr;
191 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
192 IOMemoryDescriptor * memDesc;
193
194 LOCK;
195 memDesc = ref->dp.memory;
196 if( memDesc)
197 {
198 memDesc->retain();
199 kr = memDesc->handleFault( device_pager, 0, 0,
200 offset, size, kIOMapDefaultCache /*?*/);
201 memDesc->release();
202 }
203 else
204 kr = KERN_ABORTED;
205 UNLOCK;
206
207 return( kr );
208 }
209
210 kern_return_t device_close(
211 uintptr_t device_handle)
212 {
213 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
214
215 IODelete( ref, IOMemoryDescriptorReserved, 1 );
216
217 return( kIOReturnSuccess );
218 }
219 }; // end extern "C"
220
221 // Note this inline function uses C++ reference arguments to return values
222 // This means that pointers are not passed and NULLs don't have to be
223 // checked for as a NULL reference is illegal.
224 static inline void
225 getAddrLenForInd(user_addr_t &addr, IOPhysicalLength &len, // Output variables
226 UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
227 {
228 assert(kIOMemoryTypeUIO == type
229 || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
230 || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
231 if (kIOMemoryTypeUIO == type) {
232 user_size_t us;
233 uio_getiov((uio_t) r.uio, ind, &addr, &us); len = us;
234 }
235 #ifndef __LP64__
236 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
237 IOAddressRange cur = r.v64[ind];
238 addr = cur.address;
239 len = cur.length;
240 }
241 #endif /* !__LP64__ */
242 else {
243 IOVirtualRange cur = r.v[ind];
244 addr = cur.address;
245 len = cur.length;
246 }
247 }
248
249 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
250
251 IOMemoryDescriptor *
252 IOMemoryDescriptor::withAddress(void * address,
253 IOByteCount length,
254 IODirection direction)
255 {
256 return IOMemoryDescriptor::
257 withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
258 }
259
260 #ifndef __LP64__
261 IOMemoryDescriptor *
262 IOMemoryDescriptor::withAddress(IOVirtualAddress address,
263 IOByteCount length,
264 IODirection direction,
265 task_t task)
266 {
267 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
268 if (that)
269 {
270 if (that->initWithAddress(address, length, direction, task))
271 return that;
272
273 that->release();
274 }
275 return 0;
276 }
277 #endif /* !__LP64__ */
278
279 IOMemoryDescriptor *
280 IOMemoryDescriptor::withPhysicalAddress(
281 IOPhysicalAddress address,
282 IOByteCount length,
283 IODirection direction )
284 {
285 return (IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL));
286 }
287
288 #ifndef __LP64__
289 IOMemoryDescriptor *
290 IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
291 UInt32 withCount,
292 IODirection direction,
293 task_t task,
294 bool asReference)
295 {
296 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
297 if (that)
298 {
299 if (that->initWithRanges(ranges, withCount, direction, task, asReference))
300 return that;
301
302 that->release();
303 }
304 return 0;
305 }
306 #endif /* !__LP64__ */
307
308 IOMemoryDescriptor *
309 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
310 mach_vm_size_t length,
311 IOOptionBits options,
312 task_t task)
313 {
314 IOAddressRange range = { address, length };
315 return (IOMemoryDescriptor::withAddressRanges(&range, 1, options, task));
316 }
317
318 IOMemoryDescriptor *
319 IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
320 UInt32 rangeCount,
321 IOOptionBits options,
322 task_t task)
323 {
324 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
325 if (that)
326 {
327 if (task)
328 options |= kIOMemoryTypeVirtual64;
329 else
330 options |= kIOMemoryTypePhysical64;
331
332 if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ 0))
333 return that;
334
335 that->release();
336 }
337
338 return 0;
339 }
340
341
342 /*
343 * withOptions:
344 *
345 * Create a new IOMemoryDescriptor. The buffer is made up of several
346 * virtual address ranges, from a given task.
347 *
348 * Passing the ranges as a reference will avoid an extra allocation.
349 */
350 IOMemoryDescriptor *
351 IOMemoryDescriptor::withOptions(void * buffers,
352 UInt32 count,
353 UInt32 offset,
354 task_t task,
355 IOOptionBits opts,
356 IOMapper * mapper)
357 {
358 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
359
360 if (self
361 && !self->initWithOptions(buffers, count, offset, task, opts, mapper))
362 {
363 self->release();
364 return 0;
365 }
366
367 return self;
368 }
369
370 bool IOMemoryDescriptor::initWithOptions(void * buffers,
371 UInt32 count,
372 UInt32 offset,
373 task_t task,
374 IOOptionBits options,
375 IOMapper * mapper)
376 {
377 return( false );
378 }
379
380 #ifndef __LP64__
381 IOMemoryDescriptor *
382 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
383 UInt32 withCount,
384 IODirection direction,
385 bool asReference)
386 {
387 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
388 if (that)
389 {
390 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference))
391 return that;
392
393 that->release();
394 }
395 return 0;
396 }
397
398 IOMemoryDescriptor *
399 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
400 IOByteCount offset,
401 IOByteCount length,
402 IODirection direction)
403 {
404 return (IOSubMemoryDescriptor::withSubRange(of, offset, length, direction | kIOMemoryThreadSafe));
405 }
406 #endif /* !__LP64__ */
407
408 IOMemoryDescriptor *
409 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
410 {
411 IOGeneralMemoryDescriptor *origGenMD =
412 OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
413
414 if (origGenMD)
415 return IOGeneralMemoryDescriptor::
416 withPersistentMemoryDescriptor(origGenMD);
417 else
418 return 0;
419 }
420
421 IOMemoryDescriptor *
422 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
423 {
424 ipc_port_t sharedMem = (ipc_port_t) originalMD->createNamedEntry();
425
426 if (!sharedMem)
427 return 0;
428
429 if (sharedMem == originalMD->_memEntry) {
430 originalMD->retain(); // Add a new reference to ourselves
431 ipc_port_release_send(sharedMem); // Remove extra send right
432 return originalMD;
433 }
434
435 IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor;
436 typePersMDData initData = { originalMD, sharedMem };
437
438 if (self
439 && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) {
440 self->release();
441 self = 0;
442 }
443 return self;
444 }
445
446 void *IOGeneralMemoryDescriptor::createNamedEntry()
447 {
448 kern_return_t error;
449 ipc_port_t sharedMem;
450
451 IOOptionBits type = _flags & kIOMemoryTypeMask;
452
453 user_addr_t range0Addr;
454 IOByteCount range0Len;
455 getAddrLenForInd(range0Addr, range0Len, type, _ranges, 0);
456 range0Addr = trunc_page_64(range0Addr);
457
458 vm_size_t size = ptoa_32(_pages);
459 vm_address_t kernelPage = (vm_address_t) range0Addr;
460
461 vm_map_t theMap = ((_task == kernel_task)
462 && (kIOMemoryBufferPageable & _flags))
463 ? IOPageableMapForAddress(kernelPage)
464 : get_task_map(_task);
465
466 memory_object_size_t actualSize = size;
467 vm_prot_t prot = VM_PROT_READ;
468 if (kIODirectionOut != (kIODirectionOutIn & _flags))
469 prot |= VM_PROT_WRITE;
470
471 if (_memEntry)
472 prot |= MAP_MEM_NAMED_REUSE;
473
474 error = mach_make_memory_entry_64(theMap,
475 &actualSize, range0Addr, prot, &sharedMem, (ipc_port_t) _memEntry);
476
477 if (KERN_SUCCESS == error) {
478 if (actualSize == size) {
479 return sharedMem;
480 } else {
481 #if IOASSERT
482 IOLog("IOGMD::mach_make_memory_entry_64 (%08llx) size (%08llx:%08llx)\n",
483 (UInt64)range0Addr, (UInt64)actualSize, (UInt64)size);
484 #endif
485 ipc_port_release_send( sharedMem );
486 }
487 }
488
489 return MACH_PORT_NULL;
490 }
491
492 #ifndef __LP64__
493 bool
494 IOGeneralMemoryDescriptor::initWithAddress(void * address,
495 IOByteCount withLength,
496 IODirection withDirection)
497 {
498 _singleRange.v.address = (vm_offset_t) address;
499 _singleRange.v.length = withLength;
500
501 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
502 }
503
504 bool
505 IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
506 IOByteCount withLength,
507 IODirection withDirection,
508 task_t withTask)
509 {
510 _singleRange.v.address = address;
511 _singleRange.v.length = withLength;
512
513 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
514 }
515
516 bool
517 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
518 IOPhysicalAddress address,
519 IOByteCount withLength,
520 IODirection withDirection )
521 {
522 _singleRange.p.address = address;
523 _singleRange.p.length = withLength;
524
525 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
526 }
527
528 bool
529 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
530 IOPhysicalRange * ranges,
531 UInt32 count,
532 IODirection direction,
533 bool reference)
534 {
535 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
536
537 if (reference)
538 mdOpts |= kIOMemoryAsReference;
539
540 return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0);
541 }
542
543 bool
544 IOGeneralMemoryDescriptor::initWithRanges(
545 IOVirtualRange * ranges,
546 UInt32 count,
547 IODirection direction,
548 task_t task,
549 bool reference)
550 {
551 IOOptionBits mdOpts = direction;
552
553 if (reference)
554 mdOpts |= kIOMemoryAsReference;
555
556 if (task) {
557 mdOpts |= kIOMemoryTypeVirtual;
558
559 // Auto-prepare if this is a kernel memory descriptor as very few
560 // clients bother to prepare() kernel memory.
561 // But it was not enforced so what are you going to do?
562 if (task == kernel_task)
563 mdOpts |= kIOMemoryAutoPrepare;
564 }
565 else
566 mdOpts |= kIOMemoryTypePhysical;
567
568 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0);
569 }
570 #endif /* !__LP64__ */
571
572 /*
573 * initWithOptions:
574 *
575 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
576 * from a given task, several physical ranges, an UPL from the ubc
577 * system or a uio (may be 64bit) from the BSD subsystem.
578 *
579 * Passing the ranges as a reference will avoid an extra allocation.
580 *
581 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
582 * existing instance -- note this behavior is not commonly supported in other
583 * I/O Kit classes, although it is supported here.
584 */
585
586 bool
587 IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
588 UInt32 count,
589 UInt32 offset,
590 task_t task,
591 IOOptionBits options,
592 IOMapper * mapper)
593 {
594 IOOptionBits type = options & kIOMemoryTypeMask;
595
596 #ifndef __LP64__
597 if (task
598 && (kIOMemoryTypeVirtual == type)
599 && vm_map_is_64bit(get_task_map(task))
600 && ((IOVirtualRange *) buffers)->address)
601 {
602 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
603 return false;
604 }
605 #endif /* !__LP64__ */
606
607 // Grab the original MD's configuation data to initialse the
608 // arguments to this function.
609 if (kIOMemoryTypePersistentMD == type) {
610
611 typePersMDData *initData = (typePersMDData *) buffers;
612 const IOGeneralMemoryDescriptor *orig = initData->fMD;
613 ioGMDData *dataP = getDataP(orig->_memoryEntries);
614
615 // Only accept persistent memory descriptors with valid dataP data.
616 assert(orig->_rangesCount == 1);
617 if ( !(orig->_flags & kIOMemoryPersistent) || !dataP)
618 return false;
619
620 _memEntry = initData->fMemEntry; // Grab the new named entry
621 options = orig->_flags & ~kIOMemoryAsReference;
622 type = options & kIOMemoryTypeMask;
623 buffers = orig->_ranges.v;
624 count = orig->_rangesCount;
625
626 // Now grab the original task and whatever mapper was previously used
627 task = orig->_task;
628 mapper = dataP->fMapper;
629
630 // We are ready to go through the original initialisation now
631 }
632
633 switch (type) {
634 case kIOMemoryTypeUIO:
635 case kIOMemoryTypeVirtual:
636 #ifndef __LP64__
637 case kIOMemoryTypeVirtual64:
638 #endif /* !__LP64__ */
639 assert(task);
640 if (!task)
641 return false;
642 break;
643
644 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
645 #ifndef __LP64__
646 case kIOMemoryTypePhysical64:
647 #endif /* !__LP64__ */
648 case kIOMemoryTypeUPL:
649 assert(!task);
650 break;
651 default:
652 return false; /* bad argument */
653 }
654
655 assert(buffers);
656 assert(count);
657
658 /*
659 * We can check the _initialized instance variable before having ever set
660 * it to an initial value because I/O Kit guarantees that all our instance
661 * variables are zeroed on an object's allocation.
662 */
663
664 if (_initialized) {
665 /*
666 * An existing memory descriptor is being retargeted to point to
667 * somewhere else. Clean up our present state.
668 */
669 IOOptionBits type = _flags & kIOMemoryTypeMask;
670 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
671 {
672 while (_wireCount)
673 complete();
674 }
675 if (_ranges.v && !(kIOMemoryAsReference & _flags))
676 {
677 if (kIOMemoryTypeUIO == type)
678 uio_free((uio_t) _ranges.v);
679 #ifndef __LP64__
680 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
681 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
682 #endif /* !__LP64__ */
683 else
684 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
685 }
686
687 if (_memEntry)
688 {
689 ipc_port_release_send((ipc_port_t) _memEntry);
690 _memEntry = 0;
691 }
692 if (_mappings)
693 _mappings->flushCollection();
694 }
695 else {
696 if (!super::init())
697 return false;
698 _initialized = true;
699 }
700
701 // Grab the appropriate mapper
702 if (kIOMemoryHostOnly & options) options |= kIOMemoryMapperNone;
703 if (kIOMemoryMapperNone & options)
704 mapper = 0; // No Mapper
705 else if (mapper == kIOMapperSystem) {
706 IOMapper::checkForSystemMapper();
707 gIOSystemMapper = mapper = IOMapper::gSystem;
708 }
709
710 // Temp binary compatibility for kIOMemoryThreadSafe
711 if (kIOMemoryReserved6156215 & options)
712 {
713 options &= ~kIOMemoryReserved6156215;
714 options |= kIOMemoryThreadSafe;
715 }
716 // Remove the dynamic internal use flags from the initial setting
717 options &= ~(kIOMemoryPreparedReadOnly);
718 _flags = options;
719 _task = task;
720
721 #ifndef __LP64__
722 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
723 #endif /* !__LP64__ */
724
725 __iomd_reservedA = 0;
726 __iomd_reservedB = 0;
727 _highestPage = 0;
728
729 if (kIOMemoryThreadSafe & options)
730 {
731 if (!_prepareLock)
732 _prepareLock = IOLockAlloc();
733 }
734 else if (_prepareLock)
735 {
736 IOLockFree(_prepareLock);
737 _prepareLock = NULL;
738 }
739
740 if (kIOMemoryTypeUPL == type) {
741
742 ioGMDData *dataP;
743 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
744
745 if (!initMemoryEntries(dataSize, mapper)) return (false);
746 dataP = getDataP(_memoryEntries);
747 dataP->fPageCnt = 0;
748
749 // _wireCount++; // UPLs start out life wired
750
751 _length = count;
752 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
753
754 ioPLBlock iopl;
755 iopl.fIOPL = (upl_t) buffers;
756 upl_set_referenced(iopl.fIOPL, true);
757 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
758
759 if (upl_get_size(iopl.fIOPL) < (count + offset))
760 panic("short external upl");
761
762 _highestPage = upl_get_highest_page(iopl.fIOPL);
763
764 // Set the flag kIOPLOnDevice convieniently equal to 1
765 iopl.fFlags = pageList->device | kIOPLExternUPL;
766 if (!pageList->device) {
767 // Pre-compute the offset into the UPL's page list
768 pageList = &pageList[atop_32(offset)];
769 offset &= PAGE_MASK;
770 }
771 iopl.fIOMDOffset = 0;
772 iopl.fMappedPage = 0;
773 iopl.fPageInfo = (vm_address_t) pageList;
774 iopl.fPageOffset = offset;
775 _memoryEntries->appendBytes(&iopl, sizeof(iopl));
776 }
777 else {
778 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
779 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
780
781 // Initialize the memory descriptor
782 if (options & kIOMemoryAsReference) {
783 #ifndef __LP64__
784 _rangesIsAllocated = false;
785 #endif /* !__LP64__ */
786
787 // Hack assignment to get the buffer arg into _ranges.
788 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
789 // work, C++ sigh.
790 // This also initialises the uio & physical ranges.
791 _ranges.v = (IOVirtualRange *) buffers;
792 }
793 else {
794 #ifndef __LP64__
795 _rangesIsAllocated = true;
796 #endif /* !__LP64__ */
797 switch (type)
798 {
799 case kIOMemoryTypeUIO:
800 _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
801 break;
802
803 #ifndef __LP64__
804 case kIOMemoryTypeVirtual64:
805 case kIOMemoryTypePhysical64:
806 if (count == 1
807 && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
808 ) {
809 if (kIOMemoryTypeVirtual64 == type)
810 type = kIOMemoryTypeVirtual;
811 else
812 type = kIOMemoryTypePhysical;
813 _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
814 _rangesIsAllocated = false;
815 _ranges.v = &_singleRange.v;
816 _singleRange.v.address = ((IOAddressRange *) buffers)->address;
817 _singleRange.v.length = ((IOAddressRange *) buffers)->length;
818 break;
819 }
820 _ranges.v64 = IONew(IOAddressRange, count);
821 if (!_ranges.v64)
822 return false;
823 bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
824 break;
825 #endif /* !__LP64__ */
826 case kIOMemoryTypeVirtual:
827 case kIOMemoryTypePhysical:
828 if (count == 1) {
829 _flags |= kIOMemoryAsReference;
830 #ifndef __LP64__
831 _rangesIsAllocated = false;
832 #endif /* !__LP64__ */
833 _ranges.v = &_singleRange.v;
834 } else {
835 _ranges.v = IONew(IOVirtualRange, count);
836 if (!_ranges.v)
837 return false;
838 }
839 bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
840 break;
841 }
842 }
843
844 // Find starting address within the vector of ranges
845 Ranges vec = _ranges;
846 UInt32 length = 0;
847 UInt32 pages = 0;
848 for (unsigned ind = 0; ind < count; ind++) {
849 user_addr_t addr;
850 IOPhysicalLength len;
851
852 // addr & len are returned by this function
853 getAddrLenForInd(addr, len, type, vec, ind);
854 pages += (atop_64(addr + len + PAGE_MASK) - atop_64(addr));
855 len += length;
856 assert(len >= length); // Check for 32 bit wrap around
857 length = len;
858
859 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
860 {
861 ppnum_t highPage = atop_64(addr + len - 1);
862 if (highPage > _highestPage)
863 _highestPage = highPage;
864 }
865 }
866 _length = length;
867 _pages = pages;
868 _rangesCount = count;
869
870 // Auto-prepare memory at creation time.
871 // Implied completion when descriptor is free-ed
872 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
873 _wireCount++; // Physical MDs are, by definition, wired
874 else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
875 ioGMDData *dataP;
876 unsigned dataSize = computeDataSize(_pages, /* upls */ count * 2);
877
878 if (!initMemoryEntries(dataSize, mapper)) return false;
879 dataP = getDataP(_memoryEntries);
880 dataP->fPageCnt = _pages;
881
882 if ( (kIOMemoryPersistent & _flags) && !_memEntry)
883 _memEntry = createNamedEntry();
884
885 if ((_flags & kIOMemoryAutoPrepare)
886 && prepare() != kIOReturnSuccess)
887 return false;
888 }
889 }
890
891 return true;
892 }
893
894 /*
895 * free
896 *
897 * Free resources.
898 */
899 void IOGeneralMemoryDescriptor::free()
900 {
901 IOOptionBits type = _flags & kIOMemoryTypeMask;
902
903 if( reserved)
904 {
905 LOCK;
906 reserved->dp.memory = 0;
907 UNLOCK;
908 }
909
910 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
911 {
912 while (_wireCount)
913 complete();
914 }
915 if (_memoryEntries)
916 _memoryEntries->release();
917
918 if (_ranges.v && !(kIOMemoryAsReference & _flags))
919 {
920 if (kIOMemoryTypeUIO == type)
921 uio_free((uio_t) _ranges.v);
922 #ifndef __LP64__
923 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
924 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
925 #endif /* !__LP64__ */
926 else
927 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
928
929 _ranges.v = NULL;
930 }
931
932 if (reserved)
933 {
934 if (reserved->dp.devicePager)
935 {
936 // memEntry holds a ref on the device pager which owns reserved
937 // (IOMemoryDescriptorReserved) so no reserved access after this point
938 device_pager_deallocate( (memory_object_t) reserved->dp.devicePager );
939 }
940 else
941 IODelete(reserved, IOMemoryDescriptorReserved, 1);
942 reserved = NULL;
943 }
944
945 if (_memEntry)
946 ipc_port_release_send( (ipc_port_t) _memEntry );
947
948 if (_prepareLock)
949 IOLockFree(_prepareLock);
950
951 super::free();
952 }
953
954 #ifndef __LP64__
955 void IOGeneralMemoryDescriptor::unmapFromKernel()
956 {
957 panic("IOGMD::unmapFromKernel deprecated");
958 }
959
960 void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
961 {
962 panic("IOGMD::mapIntoKernel deprecated");
963 }
964 #endif /* !__LP64__ */
965
966 /*
967 * getDirection:
968 *
969 * Get the direction of the transfer.
970 */
971 IODirection IOMemoryDescriptor::getDirection() const
972 {
973 #ifndef __LP64__
974 if (_direction)
975 return _direction;
976 #endif /* !__LP64__ */
977 return (IODirection) (_flags & kIOMemoryDirectionMask);
978 }
979
980 /*
981 * getLength:
982 *
983 * Get the length of the transfer (over all ranges).
984 */
985 IOByteCount IOMemoryDescriptor::getLength() const
986 {
987 return _length;
988 }
989
990 void IOMemoryDescriptor::setTag( IOOptionBits tag )
991 {
992 _tag = tag;
993 }
994
995 IOOptionBits IOMemoryDescriptor::getTag( void )
996 {
997 return( _tag);
998 }
999
1000 #ifndef __LP64__
1001 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
1002 IOPhysicalAddress
1003 IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
1004 {
1005 addr64_t physAddr = 0;
1006
1007 if( prepare() == kIOReturnSuccess) {
1008 physAddr = getPhysicalSegment64( offset, length );
1009 complete();
1010 }
1011
1012 return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used
1013 }
1014 #endif /* !__LP64__ */
1015
1016 IOByteCount IOMemoryDescriptor::readBytes
1017 (IOByteCount offset, void *bytes, IOByteCount length)
1018 {
1019 addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
1020 IOByteCount remaining;
1021
1022 // Assert that this entire I/O is withing the available range
1023 assert(offset < _length);
1024 assert(offset + length <= _length);
1025 if (offset >= _length) {
1026 return 0;
1027 }
1028
1029 if (kIOMemoryThreadSafe & _flags)
1030 LOCK;
1031
1032 remaining = length = min(length, _length - offset);
1033 while (remaining) { // (process another target segment?)
1034 addr64_t srcAddr64;
1035 IOByteCount srcLen;
1036
1037 srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
1038 if (!srcAddr64)
1039 break;
1040
1041 // Clip segment length to remaining
1042 if (srcLen > remaining)
1043 srcLen = remaining;
1044
1045 copypv(srcAddr64, dstAddr, srcLen,
1046 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
1047
1048 dstAddr += srcLen;
1049 offset += srcLen;
1050 remaining -= srcLen;
1051 }
1052
1053 if (kIOMemoryThreadSafe & _flags)
1054 UNLOCK;
1055
1056 assert(!remaining);
1057
1058 return length - remaining;
1059 }
1060
1061 IOByteCount IOMemoryDescriptor::writeBytes
1062 (IOByteCount offset, const void *bytes, IOByteCount length)
1063 {
1064 addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
1065 IOByteCount remaining;
1066
1067 // Assert that this entire I/O is withing the available range
1068 assert(offset < _length);
1069 assert(offset + length <= _length);
1070
1071 assert( !(kIOMemoryPreparedReadOnly & _flags) );
1072
1073 if ( (kIOMemoryPreparedReadOnly & _flags) || offset >= _length) {
1074 return 0;
1075 }
1076
1077 if (kIOMemoryThreadSafe & _flags)
1078 LOCK;
1079
1080 remaining = length = min(length, _length - offset);
1081 while (remaining) { // (process another target segment?)
1082 addr64_t dstAddr64;
1083 IOByteCount dstLen;
1084
1085 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
1086 if (!dstAddr64)
1087 break;
1088
1089 // Clip segment length to remaining
1090 if (dstLen > remaining)
1091 dstLen = remaining;
1092
1093 copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
1094 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
1095
1096 srcAddr += dstLen;
1097 offset += dstLen;
1098 remaining -= dstLen;
1099 }
1100
1101 if (kIOMemoryThreadSafe & _flags)
1102 UNLOCK;
1103
1104 assert(!remaining);
1105
1106 return length - remaining;
1107 }
1108
1109 // osfmk/device/iokit_rpc.c
1110 extern "C" unsigned int IODefaultCacheBits(addr64_t pa);
1111
1112 #ifndef __LP64__
1113 void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
1114 {
1115 panic("IOGMD::setPosition deprecated");
1116 }
1117 #endif /* !__LP64__ */
1118
1119 static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
1120
1121 uint64_t
1122 IOGeneralMemoryDescriptor::getPreparationID( void )
1123 {
1124 ioGMDData *dataP;
1125
1126 if (!_wireCount)
1127 return (kIOPreparationIDUnprepared);
1128
1129 if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
1130 || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64))
1131 {
1132 IOMemoryDescriptor::setPreparationID();
1133 return (IOMemoryDescriptor::getPreparationID());
1134 }
1135
1136 if (!_memoryEntries || !(dataP = getDataP(_memoryEntries)))
1137 return (kIOPreparationIDUnprepared);
1138
1139 if (kIOPreparationIDUnprepared == dataP->fPreparationID)
1140 {
1141 dataP->fPreparationID = OSIncrementAtomic64(&gIOMDPreparationID);
1142 }
1143 return (dataP->fPreparationID);
1144 }
1145
1146 IOMemoryDescriptorReserved * IOMemoryDescriptor::getKernelReserved( void )
1147 {
1148 if (!reserved)
1149 {
1150 reserved = IONew(IOMemoryDescriptorReserved, 1);
1151 if (reserved)
1152 bzero(reserved, sizeof(IOMemoryDescriptorReserved));
1153 }
1154 return (reserved);
1155 }
1156
1157 void IOMemoryDescriptor::setPreparationID( void )
1158 {
1159 if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID))
1160 {
1161 #if defined(__ppc__ )
1162 reserved->preparationID = gIOMDPreparationID++;
1163 #else
1164 reserved->preparationID = OSIncrementAtomic64(&gIOMDPreparationID);
1165 #endif
1166 }
1167 }
1168
1169 uint64_t IOMemoryDescriptor::getPreparationID( void )
1170 {
1171 if (reserved)
1172 return (reserved->preparationID);
1173 else
1174 return (kIOPreparationIDUnsupported);
1175 }
1176
1177 IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
1178 {
1179 IOReturn err = kIOReturnSuccess;
1180 DMACommandOps params;
1181 IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
1182 ioGMDData *dataP;
1183
1184 params = (op & ~kIOMDDMACommandOperationMask & op);
1185 op &= kIOMDDMACommandOperationMask;
1186
1187 if (kIOMDDMAMap == op)
1188 {
1189 if (dataSize < sizeof(IOMDDMAMapArgs))
1190 return kIOReturnUnderrun;
1191
1192 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
1193
1194 if (!_memoryEntries
1195 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
1196
1197 if (_memoryEntries && data->fMapper)
1198 {
1199 bool remap = false;
1200 bool whole = ((data->fOffset == 0) && (data->fLength == _length));
1201 dataP = getDataP(_memoryEntries);
1202 if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits)
1203 {
1204 dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
1205 remap = ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
1206 }
1207 if (data->fMapSpec.alignment > dataP->fDMAMapAlignment)
1208 {
1209 dataP->fDMAMapAlignment = data->fMapSpec.alignment;
1210 remap |= (dataP->fDMAMapAlignment > page_size);
1211 }
1212 remap |= (!whole);
1213 if (remap || !dataP->fMappedBase)
1214 {
1215 // if (dataP->fMappedBase) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params);
1216 err = md->dmaMap(data->fMapper, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocCount);
1217 if ((kIOReturnSuccess == err) && whole && !dataP->fMappedBase)
1218 {
1219 dataP->fMappedBase = data->fAlloc;
1220 data->fAllocCount = 0; // IOMD owns the alloc now
1221 }
1222 }
1223 else
1224 {
1225 data->fAlloc = dataP->fMappedBase;
1226 data->fAllocCount = 0; // IOMD owns the alloc
1227 }
1228 }
1229
1230 return (err);
1231 }
1232
1233 if (kIOMDAddDMAMapSpec == op)
1234 {
1235 if (dataSize < sizeof(IODMAMapSpecification))
1236 return kIOReturnUnderrun;
1237
1238 IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
1239
1240 if (!_memoryEntries
1241 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
1242
1243 if (_memoryEntries)
1244 {
1245 dataP = getDataP(_memoryEntries);
1246 if (data->numAddressBits < dataP->fDMAMapNumAddressBits)
1247 dataP->fDMAMapNumAddressBits = data->numAddressBits;
1248 if (data->alignment > dataP->fDMAMapAlignment)
1249 dataP->fDMAMapAlignment = data->alignment;
1250 }
1251 return kIOReturnSuccess;
1252 }
1253
1254 if (kIOMDGetCharacteristics == op) {
1255
1256 if (dataSize < sizeof(IOMDDMACharacteristics))
1257 return kIOReturnUnderrun;
1258
1259 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
1260 data->fLength = _length;
1261 data->fSGCount = _rangesCount;
1262 data->fPages = _pages;
1263 data->fDirection = getDirection();
1264 if (!_wireCount)
1265 data->fIsPrepared = false;
1266 else {
1267 data->fIsPrepared = true;
1268 data->fHighestPage = _highestPage;
1269 if (_memoryEntries)
1270 {
1271 dataP = getDataP(_memoryEntries);
1272 ioPLBlock *ioplList = getIOPLList(dataP);
1273 UInt count = getNumIOPL(_memoryEntries, dataP);
1274 if (count == 1)
1275 data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
1276 }
1277 }
1278
1279 return kIOReturnSuccess;
1280
1281 #if IOMD_DEBUG_DMAACTIVE
1282 } else if (kIOMDDMAActive == op) {
1283 if (params) OSIncrementAtomic(&md->__iomd_reservedA);
1284 else {
1285 if (md->__iomd_reservedA)
1286 OSDecrementAtomic(&md->__iomd_reservedA);
1287 else
1288 panic("kIOMDSetDMAInactive");
1289 }
1290 #endif /* IOMD_DEBUG_DMAACTIVE */
1291
1292 } else if (kIOMDWalkSegments != op)
1293 return kIOReturnBadArgument;
1294
1295 // Get the next segment
1296 struct InternalState {
1297 IOMDDMAWalkSegmentArgs fIO;
1298 UInt fOffset2Index;
1299 UInt fIndex;
1300 UInt fNextOffset;
1301 } *isP;
1302
1303 // Find the next segment
1304 if (dataSize < sizeof(*isP))
1305 return kIOReturnUnderrun;
1306
1307 isP = (InternalState *) vData;
1308 UInt offset = isP->fIO.fOffset;
1309 bool mapped = isP->fIO.fMapped;
1310
1311 if (IOMapper::gSystem && mapped
1312 && (!(kIOMemoryHostOnly & _flags))
1313 && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBase))
1314 // && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBase))
1315 {
1316 if (!_memoryEntries
1317 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
1318
1319 dataP = getDataP(_memoryEntries);
1320 if (dataP->fMapper)
1321 {
1322 IODMAMapSpecification mapSpec;
1323 bzero(&mapSpec, sizeof(mapSpec));
1324 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
1325 mapSpec.alignment = dataP->fDMAMapAlignment;
1326 err = md->dmaMap(dataP->fMapper, &mapSpec, 0, _length, &dataP->fMappedBase, NULL);
1327 if (kIOReturnSuccess != err) return (err);
1328 }
1329 }
1330
1331 if (offset >= _length)
1332 return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
1333
1334 // Validate the previous offset
1335 UInt ind, off2Ind = isP->fOffset2Index;
1336 if (!params
1337 && offset
1338 && (offset == isP->fNextOffset || off2Ind <= offset))
1339 ind = isP->fIndex;
1340 else
1341 ind = off2Ind = 0; // Start from beginning
1342
1343 UInt length;
1344 UInt64 address;
1345
1346
1347 if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
1348
1349 // Physical address based memory descriptor
1350 const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
1351
1352 // Find the range after the one that contains the offset
1353 mach_vm_size_t len;
1354 for (len = 0; off2Ind <= offset; ind++) {
1355 len = physP[ind].length;
1356 off2Ind += len;
1357 }
1358
1359 // Calculate length within range and starting address
1360 length = off2Ind - offset;
1361 address = physP[ind - 1].address + len - length;
1362
1363 if (true && mapped && _memoryEntries
1364 && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
1365 {
1366 address = dataP->fMappedBase + offset;
1367 }
1368 else
1369 {
1370 // see how far we can coalesce ranges
1371 while (ind < _rangesCount && address + length == physP[ind].address) {
1372 len = physP[ind].length;
1373 length += len;
1374 off2Ind += len;
1375 ind++;
1376 }
1377 }
1378
1379 // correct contiguous check overshoot
1380 ind--;
1381 off2Ind -= len;
1382 }
1383 #ifndef __LP64__
1384 else if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
1385
1386 // Physical address based memory descriptor
1387 const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
1388
1389 // Find the range after the one that contains the offset
1390 mach_vm_size_t len;
1391 for (len = 0; off2Ind <= offset; ind++) {
1392 len = physP[ind].length;
1393 off2Ind += len;
1394 }
1395
1396 // Calculate length within range and starting address
1397 length = off2Ind - offset;
1398 address = physP[ind - 1].address + len - length;
1399
1400 if (true && mapped && _memoryEntries
1401 && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
1402 {
1403 address = dataP->fMappedBase + offset;
1404 }
1405 else
1406 {
1407 // see how far we can coalesce ranges
1408 while (ind < _rangesCount && address + length == physP[ind].address) {
1409 len = physP[ind].length;
1410 length += len;
1411 off2Ind += len;
1412 ind++;
1413 }
1414 }
1415 // correct contiguous check overshoot
1416 ind--;
1417 off2Ind -= len;
1418 }
1419 #endif /* !__LP64__ */
1420 else do {
1421 if (!_wireCount)
1422 panic("IOGMD: not wired for the IODMACommand");
1423
1424 assert(_memoryEntries);
1425
1426 dataP = getDataP(_memoryEntries);
1427 const ioPLBlock *ioplList = getIOPLList(dataP);
1428 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1429 upl_page_info_t *pageList = getPageList(dataP);
1430
1431 assert(numIOPLs > 0);
1432
1433 // Scan through iopl info blocks looking for block containing offset
1434 while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset)
1435 ind++;
1436
1437 // Go back to actual range as search goes past it
1438 ioPLBlock ioplInfo = ioplList[ind - 1];
1439 off2Ind = ioplInfo.fIOMDOffset;
1440
1441 if (ind < numIOPLs)
1442 length = ioplList[ind].fIOMDOffset;
1443 else
1444 length = _length;
1445 length -= offset; // Remainder within iopl
1446
1447 // Subtract offset till this iopl in total list
1448 offset -= off2Ind;
1449
1450 // If a mapped address is requested and this is a pre-mapped IOPL
1451 // then just need to compute an offset relative to the mapped base.
1452 if (mapped && dataP->fMappedBase) {
1453 offset += (ioplInfo.fPageOffset & PAGE_MASK);
1454 address = trunc_page_64(dataP->fMappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
1455 continue; // Done leave do/while(false) now
1456 }
1457
1458 // The offset is rebased into the current iopl.
1459 // Now add the iopl 1st page offset.
1460 offset += ioplInfo.fPageOffset;
1461
1462 // For external UPLs the fPageInfo field points directly to
1463 // the upl's upl_page_info_t array.
1464 if (ioplInfo.fFlags & kIOPLExternUPL)
1465 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
1466 else
1467 pageList = &pageList[ioplInfo.fPageInfo];
1468
1469 // Check for direct device non-paged memory
1470 if ( ioplInfo.fFlags & kIOPLOnDevice ) {
1471 address = ptoa_64(pageList->phys_addr) + offset;
1472 continue; // Done leave do/while(false) now
1473 }
1474
1475 // Now we need compute the index into the pageList
1476 UInt pageInd = atop_32(offset);
1477 offset &= PAGE_MASK;
1478
1479 // Compute the starting address of this segment
1480 IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
1481 if (!pageAddr) {
1482 panic("!pageList phys_addr");
1483 }
1484
1485 address = ptoa_64(pageAddr) + offset;
1486
1487 // length is currently set to the length of the remainider of the iopl.
1488 // We need to check that the remainder of the iopl is contiguous.
1489 // This is indicated by pageList[ind].phys_addr being sequential.
1490 IOByteCount contigLength = PAGE_SIZE - offset;
1491 while (contigLength < length
1492 && ++pageAddr == pageList[++pageInd].phys_addr)
1493 {
1494 contigLength += PAGE_SIZE;
1495 }
1496
1497 if (contigLength < length)
1498 length = contigLength;
1499
1500
1501 assert(address);
1502 assert(length);
1503
1504 } while (false);
1505
1506 // Update return values and state
1507 isP->fIO.fIOVMAddr = address;
1508 isP->fIO.fLength = length;
1509 isP->fIndex = ind;
1510 isP->fOffset2Index = off2Ind;
1511 isP->fNextOffset = isP->fIO.fOffset + length;
1512
1513 return kIOReturnSuccess;
1514 }
1515
1516 addr64_t
1517 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
1518 {
1519 IOReturn ret;
1520 addr64_t address = 0;
1521 IOByteCount length = 0;
1522 IOMapper * mapper = gIOSystemMapper;
1523 IOOptionBits type = _flags & kIOMemoryTypeMask;
1524
1525 if (lengthOfSegment)
1526 *lengthOfSegment = 0;
1527
1528 if (offset >= _length)
1529 return 0;
1530
1531 // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
1532 // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
1533 // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
1534 // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
1535
1536 if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type))
1537 {
1538 unsigned rangesIndex = 0;
1539 Ranges vec = _ranges;
1540 user_addr_t addr;
1541
1542 // Find starting address within the vector of ranges
1543 for (;;) {
1544 getAddrLenForInd(addr, length, type, vec, rangesIndex);
1545 if (offset < length)
1546 break;
1547 offset -= length; // (make offset relative)
1548 rangesIndex++;
1549 }
1550
1551 // Now that we have the starting range,
1552 // lets find the last contiguous range
1553 addr += offset;
1554 length -= offset;
1555
1556 for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) {
1557 user_addr_t newAddr;
1558 IOPhysicalLength newLen;
1559
1560 getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
1561 if (addr + length != newAddr)
1562 break;
1563 length += newLen;
1564 }
1565 if (addr)
1566 address = (IOPhysicalAddress) addr; // Truncate address to 32bit
1567 }
1568 else
1569 {
1570 IOMDDMAWalkSegmentState _state;
1571 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
1572
1573 state->fOffset = offset;
1574 state->fLength = _length - offset;
1575 state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOnly);
1576
1577 ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
1578
1579 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret))
1580 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
1581 ret, this, state->fOffset,
1582 state->fIOVMAddr, state->fLength);
1583 if (kIOReturnSuccess == ret)
1584 {
1585 address = state->fIOVMAddr;
1586 length = state->fLength;
1587 }
1588
1589 // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
1590 // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
1591
1592 if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)))
1593 {
1594 if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone))
1595 {
1596 addr64_t origAddr = address;
1597 IOByteCount origLen = length;
1598
1599 address = mapper->mapAddr(origAddr);
1600 length = page_size - (address & (page_size - 1));
1601 while ((length < origLen)
1602 && ((address + length) == mapper->mapAddr(origAddr + length)))
1603 length += page_size;
1604 if (length > origLen)
1605 length = origLen;
1606 }
1607 }
1608 }
1609
1610 if (!address)
1611 length = 0;
1612
1613 if (lengthOfSegment)
1614 *lengthOfSegment = length;
1615
1616 return (address);
1617 }
1618
1619 #ifndef __LP64__
1620 addr64_t
1621 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
1622 {
1623 addr64_t address = 0;
1624
1625 if (options & _kIOMemorySourceSegment)
1626 {
1627 address = getSourceSegment(offset, lengthOfSegment);
1628 }
1629 else if (options & kIOMemoryMapperNone)
1630 {
1631 address = getPhysicalSegment64(offset, lengthOfSegment);
1632 }
1633 else
1634 {
1635 address = getPhysicalSegment(offset, lengthOfSegment);
1636 }
1637
1638 return (address);
1639 }
1640
1641 addr64_t
1642 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
1643 {
1644 return (getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone));
1645 }
1646
1647 IOPhysicalAddress
1648 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1649 {
1650 addr64_t address = 0;
1651 IOByteCount length = 0;
1652
1653 address = getPhysicalSegment(offset, lengthOfSegment, 0);
1654
1655 if (lengthOfSegment)
1656 length = *lengthOfSegment;
1657
1658 if ((address + length) > 0x100000000ULL)
1659 {
1660 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
1661 address, (long) length, (getMetaClass())->getClassName());
1662 }
1663
1664 return ((IOPhysicalAddress) address);
1665 }
1666
1667 addr64_t
1668 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
1669 {
1670 IOPhysicalAddress phys32;
1671 IOByteCount length;
1672 addr64_t phys64;
1673 IOMapper * mapper = 0;
1674
1675 phys32 = getPhysicalSegment(offset, lengthOfSegment);
1676 if (!phys32)
1677 return 0;
1678
1679 if (gIOSystemMapper)
1680 mapper = gIOSystemMapper;
1681
1682 if (mapper)
1683 {
1684 IOByteCount origLen;
1685
1686 phys64 = mapper->mapAddr(phys32);
1687 origLen = *lengthOfSegment;
1688 length = page_size - (phys64 & (page_size - 1));
1689 while ((length < origLen)
1690 && ((phys64 + length) == mapper->mapAddr(phys32 + length)))
1691 length += page_size;
1692 if (length > origLen)
1693 length = origLen;
1694
1695 *lengthOfSegment = length;
1696 }
1697 else
1698 phys64 = (addr64_t) phys32;
1699
1700 return phys64;
1701 }
1702
1703 IOPhysicalAddress
1704 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1705 {
1706 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0));
1707 }
1708
1709 IOPhysicalAddress
1710 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1711 {
1712 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment));
1713 }
1714
1715 void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
1716 IOByteCount * lengthOfSegment)
1717 {
1718 if (_task == kernel_task)
1719 return (void *) getSourceSegment(offset, lengthOfSegment);
1720 else
1721 panic("IOGMD::getVirtualSegment deprecated");
1722
1723 return 0;
1724 }
1725 #endif /* !__LP64__ */
1726
1727 IOReturn
1728 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
1729 {
1730 IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
1731 DMACommandOps params;
1732 IOReturn err;
1733
1734 params = (op & ~kIOMDDMACommandOperationMask & op);
1735 op &= kIOMDDMACommandOperationMask;
1736
1737 if (kIOMDGetCharacteristics == op) {
1738 if (dataSize < sizeof(IOMDDMACharacteristics))
1739 return kIOReturnUnderrun;
1740
1741 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
1742 data->fLength = getLength();
1743 data->fSGCount = 0;
1744 data->fDirection = getDirection();
1745 data->fIsPrepared = true; // Assume prepared - fails safe
1746 }
1747 else if (kIOMDWalkSegments == op) {
1748 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs))
1749 return kIOReturnUnderrun;
1750
1751 IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
1752 IOByteCount offset = (IOByteCount) data->fOffset;
1753
1754 IOPhysicalLength length;
1755 if (data->fMapped && IOMapper::gSystem)
1756 data->fIOVMAddr = md->getPhysicalSegment(offset, &length);
1757 else
1758 data->fIOVMAddr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
1759 data->fLength = length;
1760 }
1761 else if (kIOMDAddDMAMapSpec == op) return kIOReturnUnsupported;
1762 else if (kIOMDDMAMap == op)
1763 {
1764 if (dataSize < sizeof(IOMDDMAMapArgs))
1765 return kIOReturnUnderrun;
1766 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
1767
1768 if (params) panic("class %s does not support IODMACommand::kIterateOnly", getMetaClass()->getClassName());
1769
1770 err = md->dmaMap(data->fMapper, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocCount);
1771 return (err);
1772 }
1773 else return kIOReturnBadArgument;
1774
1775 return kIOReturnSuccess;
1776 }
1777
1778 static IOReturn
1779 purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
1780 {
1781 IOReturn err = kIOReturnSuccess;
1782
1783 *control = VM_PURGABLE_SET_STATE;
1784 switch (newState)
1785 {
1786 case kIOMemoryPurgeableKeepCurrent:
1787 *control = VM_PURGABLE_GET_STATE;
1788 break;
1789
1790 case kIOMemoryPurgeableNonVolatile:
1791 *state = VM_PURGABLE_NONVOLATILE;
1792 break;
1793 case kIOMemoryPurgeableVolatile:
1794 *state = VM_PURGABLE_VOLATILE;
1795 break;
1796 case kIOMemoryPurgeableEmpty:
1797 *state = VM_PURGABLE_EMPTY;
1798 break;
1799 default:
1800 err = kIOReturnBadArgument;
1801 break;
1802 }
1803 return (err);
1804 }
1805
1806 static IOReturn
1807 purgeableStateBits(int * state)
1808 {
1809 IOReturn err = kIOReturnSuccess;
1810
1811 switch (*state)
1812 {
1813 case VM_PURGABLE_NONVOLATILE:
1814 *state = kIOMemoryPurgeableNonVolatile;
1815 break;
1816 case VM_PURGABLE_VOLATILE:
1817 *state = kIOMemoryPurgeableVolatile;
1818 break;
1819 case VM_PURGABLE_EMPTY:
1820 *state = kIOMemoryPurgeableEmpty;
1821 break;
1822 default:
1823 *state = kIOMemoryPurgeableNonVolatile;
1824 err = kIOReturnNotReady;
1825 break;
1826 }
1827 return (err);
1828 }
1829
1830 IOReturn
1831 IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
1832 IOOptionBits * oldState )
1833 {
1834 IOReturn err = kIOReturnSuccess;
1835 vm_purgable_t control;
1836 int state;
1837
1838 if (_memEntry)
1839 {
1840 err = super::setPurgeable(newState, oldState);
1841 }
1842 else
1843 {
1844 if (kIOMemoryThreadSafe & _flags)
1845 LOCK;
1846 do
1847 {
1848 // Find the appropriate vm_map for the given task
1849 vm_map_t curMap;
1850 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
1851 {
1852 err = kIOReturnNotReady;
1853 break;
1854 }
1855 else
1856 curMap = get_task_map(_task);
1857
1858 // can only do one range
1859 Ranges vec = _ranges;
1860 IOOptionBits type = _flags & kIOMemoryTypeMask;
1861 user_addr_t addr;
1862 IOByteCount len;
1863 getAddrLenForInd(addr, len, type, vec, 0);
1864
1865 err = purgeableControlBits(newState, &control, &state);
1866 if (kIOReturnSuccess != err)
1867 break;
1868 err = mach_vm_purgable_control(curMap, addr, control, &state);
1869 if (oldState)
1870 {
1871 if (kIOReturnSuccess == err)
1872 {
1873 err = purgeableStateBits(&state);
1874 *oldState = state;
1875 }
1876 }
1877 }
1878 while (false);
1879 if (kIOMemoryThreadSafe & _flags)
1880 UNLOCK;
1881 }
1882 return (err);
1883 }
1884
1885 IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
1886 IOOptionBits * oldState )
1887 {
1888 IOReturn err = kIOReturnSuccess;
1889 vm_purgable_t control;
1890 int state;
1891
1892 if (kIOMemoryThreadSafe & _flags)
1893 LOCK;
1894
1895 do
1896 {
1897 if (!_memEntry)
1898 {
1899 err = kIOReturnNotReady;
1900 break;
1901 }
1902 err = purgeableControlBits(newState, &control, &state);
1903 if (kIOReturnSuccess != err)
1904 break;
1905 err = mach_memory_entry_purgable_control((ipc_port_t) _memEntry, control, &state);
1906 if (oldState)
1907 {
1908 if (kIOReturnSuccess == err)
1909 {
1910 err = purgeableStateBits(&state);
1911 *oldState = state;
1912 }
1913 }
1914 }
1915 while (false);
1916
1917 if (kIOMemoryThreadSafe & _flags)
1918 UNLOCK;
1919
1920 return (err);
1921 }
1922
1923 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
1924 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
1925
1926 static void SetEncryptOp(addr64_t pa, unsigned int count)
1927 {
1928 ppnum_t page, end;
1929
1930 page = atop_64(round_page_64(pa));
1931 end = atop_64(trunc_page_64(pa + count));
1932 for (; page < end; page++)
1933 {
1934 pmap_clear_noencrypt(page);
1935 }
1936 }
1937
1938 static void ClearEncryptOp(addr64_t pa, unsigned int count)
1939 {
1940 ppnum_t page, end;
1941
1942 page = atop_64(round_page_64(pa));
1943 end = atop_64(trunc_page_64(pa + count));
1944 for (; page < end; page++)
1945 {
1946 pmap_set_noencrypt(page);
1947 }
1948 }
1949
1950 IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options,
1951 IOByteCount offset, IOByteCount length )
1952 {
1953 IOByteCount remaining;
1954 unsigned int res;
1955 void (*func)(addr64_t pa, unsigned int count) = 0;
1956
1957 switch (options)
1958 {
1959 case kIOMemoryIncoherentIOFlush:
1960 func = &dcache_incoherent_io_flush64;
1961 break;
1962 case kIOMemoryIncoherentIOStore:
1963 func = &dcache_incoherent_io_store64;
1964 break;
1965
1966 case kIOMemorySetEncrypted:
1967 func = &SetEncryptOp;
1968 break;
1969 case kIOMemoryClearEncrypted:
1970 func = &ClearEncryptOp;
1971 break;
1972 }
1973
1974 if (!func)
1975 return (kIOReturnUnsupported);
1976
1977 if (kIOMemoryThreadSafe & _flags)
1978 LOCK;
1979
1980 res = 0x0UL;
1981 remaining = length = min(length, getLength() - offset);
1982 while (remaining)
1983 // (process another target segment?)
1984 {
1985 addr64_t dstAddr64;
1986 IOByteCount dstLen;
1987
1988 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
1989 if (!dstAddr64)
1990 break;
1991
1992 // Clip segment length to remaining
1993 if (dstLen > remaining)
1994 dstLen = remaining;
1995
1996 (*func)(dstAddr64, dstLen);
1997
1998 offset += dstLen;
1999 remaining -= dstLen;
2000 }
2001
2002 if (kIOMemoryThreadSafe & _flags)
2003 UNLOCK;
2004
2005 return (remaining ? kIOReturnUnderrun : kIOReturnSuccess);
2006 }
2007
2008 #if defined(__i386__) || defined(__x86_64__)
2009 extern vm_offset_t first_avail;
2010 #define io_kernel_static_end first_avail
2011 #else
2012 #error io_kernel_static_end is undefined for this architecture
2013 #endif
2014
2015 static kern_return_t
2016 io_get_kernel_static_upl(
2017 vm_map_t /* map */,
2018 uintptr_t offset,
2019 vm_size_t *upl_size,
2020 upl_t *upl,
2021 upl_page_info_array_t page_list,
2022 unsigned int *count,
2023 ppnum_t *highest_page)
2024 {
2025 unsigned int pageCount, page;
2026 ppnum_t phys;
2027 ppnum_t highestPage = 0;
2028
2029 pageCount = atop_32(*upl_size);
2030 if (pageCount > *count)
2031 pageCount = *count;
2032
2033 *upl = NULL;
2034
2035 for (page = 0; page < pageCount; page++)
2036 {
2037 phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
2038 if (!phys)
2039 break;
2040 page_list[page].phys_addr = phys;
2041 page_list[page].pageout = 0;
2042 page_list[page].absent = 0;
2043 page_list[page].dirty = 0;
2044 page_list[page].precious = 0;
2045 page_list[page].device = 0;
2046 if (phys > highestPage)
2047 highestPage = phys;
2048 }
2049
2050 *highest_page = highestPage;
2051
2052 return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError);
2053 }
2054
2055 IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
2056 {
2057 IOOptionBits type = _flags & kIOMemoryTypeMask;
2058 IOReturn error = kIOReturnCannotWire;
2059 ioGMDData *dataP;
2060 upl_page_info_array_t pageInfo;
2061 ppnum_t mapBase = 0;
2062 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
2063
2064 assert(!_wireCount);
2065 assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
2066
2067 if (_pages > gIOMaximumMappedIOPageCount)
2068 return kIOReturnNoResources;
2069
2070 dataP = getDataP(_memoryEntries);
2071 IOMapper *mapper;
2072 mapper = dataP->fMapper;
2073 dataP->fMappedBase = 0;
2074
2075 if (forDirection == kIODirectionNone)
2076 forDirection = getDirection();
2077
2078 int uplFlags; // This Mem Desc's default flags for upl creation
2079 switch (kIODirectionOutIn & forDirection)
2080 {
2081 case kIODirectionOut:
2082 // Pages do not need to be marked as dirty on commit
2083 uplFlags = UPL_COPYOUT_FROM;
2084 _flags |= kIOMemoryPreparedReadOnly;
2085 break;
2086
2087 case kIODirectionIn:
2088 default:
2089 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
2090 break;
2091 }
2092 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
2093
2094 #ifdef UPL_NEED_32BIT_ADDR
2095 if (kIODirectionPrepareToPhys32 & forDirection)
2096 {
2097 if (!mapper) uplFlags |= UPL_NEED_32BIT_ADDR;
2098 if (dataP->fDMAMapNumAddressBits > 32) dataP->fDMAMapNumAddressBits = 32;
2099 }
2100 #endif
2101
2102 // Note that appendBytes(NULL) zeros the data up to the desired length.
2103 _memoryEntries->appendBytes(0, dataP->fPageCnt * sizeof(upl_page_info_t));
2104 dataP = 0;
2105
2106 // Find the appropriate vm_map for the given task
2107 vm_map_t curMap;
2108 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
2109 curMap = 0;
2110 else
2111 { curMap = get_task_map(_task); }
2112
2113 // Iterate over the vector of virtual ranges
2114 Ranges vec = _ranges;
2115 unsigned int pageIndex = 0;
2116 IOByteCount mdOffset = 0;
2117 ppnum_t highestPage = 0;
2118
2119 for (UInt range = 0; range < _rangesCount; range++) {
2120 ioPLBlock iopl;
2121 user_addr_t startPage;
2122 IOByteCount numBytes;
2123 ppnum_t highPage = 0;
2124
2125 // Get the startPage address and length of vec[range]
2126 getAddrLenForInd(startPage, numBytes, type, vec, range);
2127 iopl.fPageOffset = startPage & PAGE_MASK;
2128 numBytes += iopl.fPageOffset;
2129 startPage = trunc_page_64(startPage);
2130
2131 if (mapper)
2132 iopl.fMappedPage = mapBase + pageIndex;
2133 else
2134 iopl.fMappedPage = 0;
2135
2136 // Iterate over the current range, creating UPLs
2137 while (numBytes) {
2138 vm_address_t kernelStart = (vm_address_t) startPage;
2139 vm_map_t theMap;
2140 if (curMap)
2141 theMap = curMap;
2142 else if (!sharedMem) {
2143 assert(_task == kernel_task);
2144 theMap = IOPageableMapForAddress(kernelStart);
2145 }
2146 else
2147 theMap = NULL;
2148
2149 int ioplFlags = uplFlags;
2150 dataP = getDataP(_memoryEntries);
2151 pageInfo = getPageList(dataP);
2152 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
2153
2154 vm_size_t ioplSize = round_page(numBytes);
2155 unsigned int numPageInfo = atop_32(ioplSize);
2156
2157 if (theMap == kernel_map && kernelStart < io_kernel_static_end) {
2158 error = io_get_kernel_static_upl(theMap,
2159 kernelStart,
2160 &ioplSize,
2161 &iopl.fIOPL,
2162 baseInfo,
2163 &numPageInfo,
2164 &highPage);
2165 }
2166 else if (sharedMem) {
2167 error = memory_object_iopl_request(sharedMem,
2168 ptoa_32(pageIndex),
2169 &ioplSize,
2170 &iopl.fIOPL,
2171 baseInfo,
2172 &numPageInfo,
2173 &ioplFlags);
2174 }
2175 else {
2176 assert(theMap);
2177 error = vm_map_create_upl(theMap,
2178 startPage,
2179 (upl_size_t*)&ioplSize,
2180 &iopl.fIOPL,
2181 baseInfo,
2182 &numPageInfo,
2183 &ioplFlags);
2184 }
2185
2186 assert(ioplSize);
2187 if (error != KERN_SUCCESS)
2188 goto abortExit;
2189
2190 if (iopl.fIOPL)
2191 highPage = upl_get_highest_page(iopl.fIOPL);
2192 if (highPage > highestPage)
2193 highestPage = highPage;
2194
2195 error = kIOReturnCannotWire;
2196
2197 if (baseInfo->device) {
2198 numPageInfo = 1;
2199 iopl.fFlags = kIOPLOnDevice;
2200 }
2201 else {
2202 iopl.fFlags = 0;
2203 }
2204
2205 iopl.fIOMDOffset = mdOffset;
2206 iopl.fPageInfo = pageIndex;
2207
2208 #if 0
2209 // used to remove the upl for auto prepares here, for some errant code
2210 // that freed memory before the descriptor pointing at it
2211 if ((_flags & kIOMemoryAutoPrepare) && iopl.fIOPL)
2212 {
2213 upl_commit(iopl.fIOPL, 0, 0);
2214 upl_deallocate(iopl.fIOPL);
2215 iopl.fIOPL = 0;
2216 }
2217 #endif
2218
2219 if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
2220 // Clean up partial created and unsaved iopl
2221 if (iopl.fIOPL) {
2222 upl_abort(iopl.fIOPL, 0);
2223 upl_deallocate(iopl.fIOPL);
2224 }
2225 goto abortExit;
2226 }
2227 dataP = 0;
2228
2229 // Check for a multiple iopl's in one virtual range
2230 pageIndex += numPageInfo;
2231 mdOffset -= iopl.fPageOffset;
2232 if (ioplSize < numBytes) {
2233 numBytes -= ioplSize;
2234 startPage += ioplSize;
2235 mdOffset += ioplSize;
2236 iopl.fPageOffset = 0;
2237 if (mapper) iopl.fMappedPage = mapBase + pageIndex;
2238 }
2239 else {
2240 mdOffset += numBytes;
2241 break;
2242 }
2243 }
2244 }
2245
2246 _highestPage = highestPage;
2247
2248 return kIOReturnSuccess;
2249
2250 abortExit:
2251 {
2252 dataP = getDataP(_memoryEntries);
2253 UInt done = getNumIOPL(_memoryEntries, dataP);
2254 ioPLBlock *ioplList = getIOPLList(dataP);
2255
2256 for (UInt range = 0; range < done; range++)
2257 {
2258 if (ioplList[range].fIOPL) {
2259 upl_abort(ioplList[range].fIOPL, 0);
2260 upl_deallocate(ioplList[range].fIOPL);
2261 }
2262 }
2263 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
2264 }
2265
2266 if (error == KERN_FAILURE)
2267 error = kIOReturnCannotWire;
2268
2269 return error;
2270 }
2271
2272 bool IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
2273 {
2274 ioGMDData * dataP;
2275 unsigned dataSize = size;
2276
2277 if (!_memoryEntries) {
2278 _memoryEntries = OSData::withCapacity(dataSize);
2279 if (!_memoryEntries)
2280 return false;
2281 }
2282 else if (!_memoryEntries->initWithCapacity(dataSize))
2283 return false;
2284
2285 _memoryEntries->appendBytes(0, computeDataSize(0, 0));
2286 dataP = getDataP(_memoryEntries);
2287
2288 if (mapper == kIOMapperWaitSystem) {
2289 IOMapper::checkForSystemMapper();
2290 mapper = IOMapper::gSystem;
2291 }
2292 dataP->fMapper = mapper;
2293 dataP->fPageCnt = 0;
2294 dataP->fMappedBase = 0;
2295 dataP->fDMAMapNumAddressBits = 64;
2296 dataP->fDMAMapAlignment = 0;
2297 dataP->fPreparationID = kIOPreparationIDUnprepared;
2298
2299 return (true);
2300 }
2301
2302 IOReturn IOMemoryDescriptor::dmaMap(
2303 IOMapper * mapper,
2304 const IODMAMapSpecification * mapSpec,
2305 uint64_t offset,
2306 uint64_t length,
2307 uint64_t * address,
2308 ppnum_t * mapPages)
2309 {
2310 IOMDDMAWalkSegmentState walkState;
2311 IOMDDMAWalkSegmentArgs * walkArgs = (IOMDDMAWalkSegmentArgs *) (void *)&walkState;
2312 IOOptionBits mdOp;
2313 IOReturn ret;
2314 IOPhysicalLength segLen;
2315 addr64_t phys, align, pageOffset;
2316 ppnum_t base, pageIndex, pageCount;
2317 uint64_t index;
2318 uint32_t mapOptions = 0;
2319
2320 if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess;
2321
2322 walkArgs->fMapped = false;
2323 mdOp = kIOMDFirstSegment;
2324 pageCount = 0;
2325 for (index = 0; index < length; )
2326 {
2327 if (index && (page_mask & (index + pageOffset))) break;
2328
2329 walkArgs->fOffset = offset + index;
2330 ret = dmaCommandOperation(mdOp, &walkState, sizeof(walkState));
2331 mdOp = kIOMDWalkSegments;
2332 if (ret != kIOReturnSuccess) break;
2333 phys = walkArgs->fIOVMAddr;
2334 segLen = walkArgs->fLength;
2335
2336 align = (phys & page_mask);
2337 if (!index) pageOffset = align;
2338 else if (align) break;
2339 pageCount += atop_64(round_page_64(align + segLen));
2340 index += segLen;
2341 }
2342
2343 if (index < length) return (kIOReturnVMError);
2344
2345 base = mapper->iovmMapMemory(this, offset, pageCount,
2346 mapOptions, NULL, mapSpec);
2347
2348 if (!base) return (kIOReturnNoResources);
2349
2350 mdOp = kIOMDFirstSegment;
2351 for (pageIndex = 0, index = 0; index < length; )
2352 {
2353 walkArgs->fOffset = offset + index;
2354 ret = dmaCommandOperation(mdOp, &walkState, sizeof(walkState));
2355 mdOp = kIOMDWalkSegments;
2356 if (ret != kIOReturnSuccess) break;
2357 phys = walkArgs->fIOVMAddr;
2358 segLen = walkArgs->fLength;
2359
2360 ppnum_t page = atop_64(phys);
2361 ppnum_t count = atop_64(round_page_64(phys + segLen)) - page;
2362 while (count--)
2363 {
2364 mapper->iovmInsert(base, pageIndex, page);
2365 page++;
2366 pageIndex++;
2367 }
2368 index += segLen;
2369 }
2370 if (pageIndex != pageCount) panic("pageIndex");
2371
2372 *address = ptoa_64(base) + pageOffset;
2373 if (mapPages) *mapPages = pageCount;
2374
2375 return (kIOReturnSuccess);
2376 }
2377
2378 IOReturn IOGeneralMemoryDescriptor::dmaMap(
2379 IOMapper * mapper,
2380 const IODMAMapSpecification * mapSpec,
2381 uint64_t offset,
2382 uint64_t length,
2383 uint64_t * address,
2384 ppnum_t * mapPages)
2385 {
2386 IOReturn err = kIOReturnSuccess;
2387 ioGMDData * dataP;
2388 IOOptionBits type = _flags & kIOMemoryTypeMask;
2389
2390 *address = 0;
2391 if (kIOMemoryHostOnly & _flags) return (kIOReturnSuccess);
2392
2393 if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
2394 || offset || (length != _length))
2395 {
2396 err = super::dmaMap(mapper, mapSpec, offset, length, address, mapPages);
2397 }
2398 else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries)))
2399 {
2400 const ioPLBlock * ioplList = getIOPLList(dataP);
2401 upl_page_info_t * pageList;
2402 uint32_t mapOptions = 0;
2403 ppnum_t base;
2404
2405 IODMAMapSpecification mapSpec;
2406 bzero(&mapSpec, sizeof(mapSpec));
2407 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
2408 mapSpec.alignment = dataP->fDMAMapAlignment;
2409
2410 // For external UPLs the fPageInfo field points directly to
2411 // the upl's upl_page_info_t array.
2412 if (ioplList->fFlags & kIOPLExternUPL)
2413 {
2414 pageList = (upl_page_info_t *) ioplList->fPageInfo;
2415 mapOptions |= kIODMAMapPagingPath;
2416 }
2417 else
2418 pageList = getPageList(dataP);
2419
2420 if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess;
2421
2422 // Check for direct device non-paged memory
2423 if (ioplList->fFlags & kIOPLOnDevice) mapOptions |= kIODMAMapPhysicallyContiguous;
2424
2425 base = mapper->iovmMapMemory(
2426 this, offset, _pages, mapOptions, &pageList[0], &mapSpec);
2427 *address = ptoa_64(base) + (ioplList->fPageOffset & PAGE_MASK);
2428 if (mapPages) *mapPages = _pages;
2429 }
2430
2431 return (err);
2432 }
2433
2434 /*
2435 * prepare
2436 *
2437 * Prepare the memory for an I/O transfer. This involves paging in
2438 * the memory, if necessary, and wiring it down for the duration of
2439 * the transfer. The complete() method completes the processing of
2440 * the memory after the I/O transfer finishes. This method needn't
2441 * called for non-pageable memory.
2442 */
2443
2444 IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
2445 {
2446 IOReturn error = kIOReturnSuccess;
2447 IOOptionBits type = _flags & kIOMemoryTypeMask;
2448
2449 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
2450 return kIOReturnSuccess;
2451
2452 if (_prepareLock)
2453 IOLockLock(_prepareLock);
2454
2455 if (!_wireCount
2456 && (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) ) {
2457 error = wireVirtual(forDirection);
2458 }
2459
2460 if (kIOReturnSuccess == error)
2461 {
2462 if (1 == ++_wireCount)
2463 {
2464 if (kIOMemoryClearEncrypt & _flags)
2465 {
2466 performOperation(kIOMemoryClearEncrypted, 0, _length);
2467 }
2468 }
2469 }
2470
2471 if (_prepareLock)
2472 IOLockUnlock(_prepareLock);
2473
2474 return error;
2475 }
2476
2477 /*
2478 * complete
2479 *
2480 * Complete processing of the memory after an I/O transfer finishes.
2481 * This method should not be called unless a prepare was previously
2482 * issued; the prepare() and complete() must occur in pairs, before
2483 * before and after an I/O transfer involving pageable memory.
2484 */
2485
2486 IOReturn IOGeneralMemoryDescriptor::complete(IODirection /* forDirection */)
2487 {
2488 IOOptionBits type = _flags & kIOMemoryTypeMask;
2489
2490 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
2491 return kIOReturnSuccess;
2492
2493 if (_prepareLock)
2494 IOLockLock(_prepareLock);
2495
2496 assert(_wireCount);
2497
2498 if (_wireCount)
2499 {
2500 if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount))
2501 {
2502 performOperation(kIOMemorySetEncrypted, 0, _length);
2503 }
2504
2505 _wireCount--;
2506 if (!_wireCount)
2507 {
2508 IOOptionBits type = _flags & kIOMemoryTypeMask;
2509 ioGMDData * dataP = getDataP(_memoryEntries);
2510 ioPLBlock *ioplList = getIOPLList(dataP);
2511 UInt count = getNumIOPL(_memoryEntries, dataP);
2512
2513 #if IOMD_DEBUG_DMAACTIVE
2514 if (__iomd_reservedA) panic("complete() while dma active");
2515 #endif /* IOMD_DEBUG_DMAACTIVE */
2516
2517 if (dataP->fMappedBase) {
2518 dataP->fMapper->iovmFree(atop_64(dataP->fMappedBase), _pages);
2519 dataP->fMappedBase = 0;
2520 }
2521 // Only complete iopls that we created which are for TypeVirtual
2522 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
2523 for (UInt ind = 0; ind < count; ind++)
2524 if (ioplList[ind].fIOPL) {
2525 upl_commit(ioplList[ind].fIOPL, 0, 0);
2526 upl_deallocate(ioplList[ind].fIOPL);
2527 }
2528 } else if (kIOMemoryTypeUPL == type) {
2529 upl_set_referenced(ioplList[0].fIOPL, false);
2530 }
2531
2532 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
2533
2534 dataP->fPreparationID = kIOPreparationIDUnprepared;
2535 }
2536 }
2537
2538 if (_prepareLock)
2539 IOLockUnlock(_prepareLock);
2540
2541 return kIOReturnSuccess;
2542 }
2543
2544 IOReturn IOGeneralMemoryDescriptor::doMap(
2545 vm_map_t __addressMap,
2546 IOVirtualAddress * __address,
2547 IOOptionBits options,
2548 IOByteCount __offset,
2549 IOByteCount __length )
2550
2551 {
2552 #ifndef __LP64__
2553 if (!(kIOMap64Bit & options)) panic("IOGeneralMemoryDescriptor::doMap !64bit");
2554 #endif /* !__LP64__ */
2555
2556 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
2557 mach_vm_size_t offset = mapping->fOffset + __offset;
2558 mach_vm_size_t length = mapping->fLength;
2559
2560 kern_return_t kr = kIOReturnVMError;
2561 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
2562
2563 IOOptionBits type = _flags & kIOMemoryTypeMask;
2564 Ranges vec = _ranges;
2565
2566 user_addr_t range0Addr = 0;
2567 IOByteCount range0Len = 0;
2568
2569 if ((offset >= _length) || ((offset + length) > _length))
2570 return( kIOReturnBadArgument );
2571
2572 if (vec.v)
2573 getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
2574
2575 // mapping source == dest? (could be much better)
2576 if( _task
2577 && (mapping->fAddressMap == get_task_map(_task)) && (options & kIOMapAnywhere)
2578 && (1 == _rangesCount) && (0 == offset)
2579 && range0Addr && (length <= range0Len) )
2580 {
2581 mapping->fAddress = range0Addr;
2582 mapping->fOptions |= kIOMapStatic;
2583
2584 return( kIOReturnSuccess );
2585 }
2586
2587 if( 0 == sharedMem) {
2588
2589 vm_size_t size = ptoa_32(_pages);
2590
2591 if( _task) {
2592
2593 memory_object_size_t actualSize = size;
2594 vm_prot_t prot = VM_PROT_READ;
2595 if (!(kIOMapReadOnly & options))
2596 prot |= VM_PROT_WRITE;
2597 else if (kIOMapDefaultCache != (options & kIOMapCacheMask))
2598 prot |= VM_PROT_WRITE;
2599
2600 if (_rangesCount == 1)
2601 {
2602 kr = mach_make_memory_entry_64(get_task_map(_task),
2603 &actualSize, range0Addr,
2604 prot, &sharedMem,
2605 NULL);
2606 }
2607 if( (_rangesCount != 1)
2608 || ((KERN_SUCCESS == kr) && (actualSize != round_page(size))))
2609 do
2610 {
2611 #if IOASSERT
2612 IOLog("mach_vm_remap path for ranges %d size (%08llx:%08llx)\n",
2613 _rangesCount, (UInt64)actualSize, (UInt64)size);
2614 #endif
2615 kr = kIOReturnVMError;
2616 if (sharedMem)
2617 {
2618 ipc_port_release_send(sharedMem);
2619 sharedMem = MACH_PORT_NULL;
2620 }
2621
2622 mach_vm_address_t address, segDestAddr;
2623 mach_vm_size_t mapLength;
2624 unsigned rangesIndex;
2625 IOOptionBits type = _flags & kIOMemoryTypeMask;
2626 user_addr_t srcAddr;
2627 IOPhysicalLength segLen = 0;
2628
2629 // Find starting address within the vector of ranges
2630 for (rangesIndex = 0; rangesIndex < _rangesCount; rangesIndex++) {
2631 getAddrLenForInd(srcAddr, segLen, type, _ranges, rangesIndex);
2632 if (offset < segLen)
2633 break;
2634 offset -= segLen; // (make offset relative)
2635 }
2636
2637 mach_vm_size_t pageOffset = (srcAddr & PAGE_MASK);
2638 address = trunc_page_64(mapping->fAddress);
2639
2640 if ((options & kIOMapAnywhere) || ((mapping->fAddress - address) == pageOffset))
2641 {
2642 vm_map_t map = mapping->fAddressMap;
2643 kr = IOMemoryDescriptorMapCopy(&map,
2644 options,
2645 offset, &address, round_page_64(length + pageOffset));
2646 if (kr == KERN_SUCCESS)
2647 {
2648 segDestAddr = address;
2649 segLen -= offset;
2650 srcAddr += offset;
2651 mapLength = length;
2652
2653 while (true)
2654 {
2655 vm_prot_t cur_prot, max_prot;
2656
2657 if (segLen > length) segLen = length;
2658 kr = mach_vm_remap(map, &segDestAddr, round_page_64(segLen), PAGE_MASK,
2659 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
2660 get_task_map(_task), trunc_page_64(srcAddr),
2661 FALSE /* copy */,
2662 &cur_prot,
2663 &max_prot,
2664 VM_INHERIT_NONE);
2665 if (KERN_SUCCESS == kr)
2666 {
2667 if ((!(VM_PROT_READ & cur_prot))
2668 || (!(kIOMapReadOnly & options) && !(VM_PROT_WRITE & cur_prot)))
2669 {
2670 kr = KERN_PROTECTION_FAILURE;
2671 }
2672 }
2673 if (KERN_SUCCESS != kr)
2674 break;
2675 segDestAddr += segLen;
2676 mapLength -= segLen;
2677 if (!mapLength)
2678 break;
2679 rangesIndex++;
2680 if (rangesIndex >= _rangesCount)
2681 {
2682 kr = kIOReturnBadArgument;
2683 break;
2684 }
2685 getAddrLenForInd(srcAddr, segLen, type, vec, rangesIndex);
2686 if (srcAddr & PAGE_MASK)
2687 {
2688 kr = kIOReturnBadArgument;
2689 break;
2690 }
2691 if (segLen > mapLength)
2692 segLen = mapLength;
2693 }
2694 if (KERN_SUCCESS != kr)
2695 {
2696 mach_vm_deallocate(mapping->fAddressMap, address, round_page_64(length + pageOffset));
2697 }
2698 }
2699
2700 if (KERN_SUCCESS == kr)
2701 mapping->fAddress = address + pageOffset;
2702 else
2703 mapping->fAddress = NULL;
2704 }
2705 }
2706 while (false);
2707 }
2708 else do
2709 { // _task == 0, must be physical
2710
2711 memory_object_t pager;
2712 unsigned int flags = 0;
2713 addr64_t pa;
2714 IOPhysicalLength segLen;
2715
2716 pa = getPhysicalSegment( offset, &segLen, kIOMemoryMapperNone );
2717
2718 if( !getKernelReserved())
2719 continue;
2720 reserved->dp.pagerContig = (1 == _rangesCount);
2721 reserved->dp.memory = this;
2722
2723 /*What cache mode do we need*/
2724 switch(options & kIOMapCacheMask ) {
2725
2726 case kIOMapDefaultCache:
2727 default:
2728 flags = IODefaultCacheBits(pa);
2729 if (DEVICE_PAGER_CACHE_INHIB & flags)
2730 {
2731 if (DEVICE_PAGER_GUARDED & flags)
2732 mapping->fOptions |= kIOMapInhibitCache;
2733 else
2734 mapping->fOptions |= kIOMapWriteCombineCache;
2735 }
2736 else if (DEVICE_PAGER_WRITE_THROUGH & flags)
2737 mapping->fOptions |= kIOMapWriteThruCache;
2738 else
2739 mapping->fOptions |= kIOMapCopybackCache;
2740 break;
2741
2742 case kIOMapInhibitCache:
2743 flags = DEVICE_PAGER_CACHE_INHIB |
2744 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
2745 break;
2746
2747 case kIOMapWriteThruCache:
2748 flags = DEVICE_PAGER_WRITE_THROUGH |
2749 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
2750 break;
2751
2752 case kIOMapCopybackCache:
2753 flags = DEVICE_PAGER_COHERENT;
2754 break;
2755
2756 case kIOMapWriteCombineCache:
2757 flags = DEVICE_PAGER_CACHE_INHIB |
2758 DEVICE_PAGER_COHERENT;
2759 break;
2760 }
2761
2762 flags |= reserved->dp.pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0;
2763
2764 pager = device_pager_setup( (memory_object_t) 0, (uintptr_t) reserved,
2765 size, flags);
2766 assert( pager );
2767
2768 if( pager) {
2769 kr = mach_memory_object_memory_entry_64( (host_t) 1, false /*internal*/,
2770 size, VM_PROT_READ | VM_PROT_WRITE, pager, &sharedMem );
2771
2772 assert( KERN_SUCCESS == kr );
2773 if( KERN_SUCCESS != kr)
2774 {
2775 device_pager_deallocate( pager );
2776 pager = MACH_PORT_NULL;
2777 sharedMem = MACH_PORT_NULL;
2778 }
2779 }
2780 if( pager && sharedMem)
2781 reserved->dp.devicePager = pager;
2782
2783 } while( false );
2784
2785 _memEntry = (void *) sharedMem;
2786 }
2787
2788 IOReturn result;
2789 if (0 == sharedMem)
2790 result = kr;
2791 else
2792 result = super::doMap( __addressMap, __address,
2793 options, __offset, __length );
2794
2795 return( result );
2796 }
2797
2798 IOReturn IOGeneralMemoryDescriptor::doUnmap(
2799 vm_map_t addressMap,
2800 IOVirtualAddress __address,
2801 IOByteCount __length )
2802 {
2803 return (super::doUnmap(addressMap, __address, __length));
2804 }
2805
2806 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2807
2808 #undef super
2809 #define super OSObject
2810
2811 OSDefineMetaClassAndStructors( IOMemoryMap, OSObject )
2812
2813 OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
2814 OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
2815 OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
2816 OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
2817 OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
2818 OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
2819 OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
2820 OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
2821
2822 /* ex-inline function implementation */
2823 IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
2824 { return( getPhysicalSegment( 0, 0 )); }
2825
2826 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2827
2828 bool IOMemoryMap::init(
2829 task_t intoTask,
2830 mach_vm_address_t toAddress,
2831 IOOptionBits _options,
2832 mach_vm_size_t _offset,
2833 mach_vm_size_t _length )
2834 {
2835 if (!intoTask)
2836 return( false);
2837
2838 if (!super::init())
2839 return(false);
2840
2841 fAddressMap = get_task_map(intoTask);
2842 if (!fAddressMap)
2843 return(false);
2844 vm_map_reference(fAddressMap);
2845
2846 fAddressTask = intoTask;
2847 fOptions = _options;
2848 fLength = _length;
2849 fOffset = _offset;
2850 fAddress = toAddress;
2851
2852 return (true);
2853 }
2854
2855 bool IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
2856 {
2857 if (!_memory)
2858 return(false);
2859
2860 if (!fSuperMap)
2861 {
2862 if( (_offset + fLength) > _memory->getLength())
2863 return( false);
2864 fOffset = _offset;
2865 }
2866
2867 _memory->retain();
2868 if (fMemory)
2869 {
2870 if (fMemory != _memory)
2871 fMemory->removeMapping(this);
2872 fMemory->release();
2873 }
2874 fMemory = _memory;
2875
2876 return( true );
2877 }
2878
2879 struct IOMemoryDescriptorMapAllocRef
2880 {
2881 ipc_port_t sharedMem;
2882 vm_map_t map;
2883 mach_vm_address_t mapped;
2884 mach_vm_size_t size;
2885 mach_vm_size_t sourceOffset;
2886 IOOptionBits options;
2887 };
2888
2889 static kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
2890 {
2891 IOMemoryDescriptorMapAllocRef * ref = (IOMemoryDescriptorMapAllocRef *)_ref;
2892 IOReturn err;
2893
2894 do {
2895 if( ref->sharedMem)
2896 {
2897 vm_prot_t prot = VM_PROT_READ
2898 | ((ref->options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE);
2899
2900 // VM system requires write access to change cache mode
2901 if (kIOMapDefaultCache != (ref->options & kIOMapCacheMask))
2902 prot |= VM_PROT_WRITE;
2903
2904 // set memory entry cache
2905 vm_prot_t memEntryCacheMode = prot | MAP_MEM_ONLY;
2906 switch (ref->options & kIOMapCacheMask)
2907 {
2908 case kIOMapInhibitCache:
2909 SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
2910 break;
2911
2912 case kIOMapWriteThruCache:
2913 SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
2914 break;
2915
2916 case kIOMapWriteCombineCache:
2917 SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
2918 break;
2919
2920 case kIOMapCopybackCache:
2921 SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
2922 break;
2923
2924 case kIOMapCopybackInnerCache:
2925 SET_MAP_MEM(MAP_MEM_INNERWBACK, memEntryCacheMode);
2926 break;
2927
2928 case kIOMapDefaultCache:
2929 default:
2930 SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
2931 break;
2932 }
2933
2934 vm_size_t unused = 0;
2935
2936 err = mach_make_memory_entry( NULL /*unused*/, &unused, 0 /*unused*/,
2937 memEntryCacheMode, NULL, ref->sharedMem );
2938 if (KERN_SUCCESS != err)
2939 IOLog("MAP_MEM_ONLY failed %d\n", err);
2940
2941 err = mach_vm_map( map,
2942 &ref->mapped,
2943 ref->size, 0 /* mask */,
2944 (( ref->options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
2945 | VM_MAKE_TAG(VM_MEMORY_IOKIT),
2946 ref->sharedMem, ref->sourceOffset,
2947 false, // copy
2948 prot, // cur
2949 prot, // max
2950 VM_INHERIT_NONE);
2951
2952 if( KERN_SUCCESS != err) {
2953 ref->mapped = 0;
2954 continue;
2955 }
2956 ref->map = map;
2957 }
2958 else
2959 {
2960 err = mach_vm_allocate(map, &ref->mapped, ref->size,
2961 ((ref->options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
2962 | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
2963 if( KERN_SUCCESS != err) {
2964 ref->mapped = 0;
2965 continue;
2966 }
2967 ref->map = map;
2968 // we have to make sure that these guys don't get copied if we fork.
2969 err = vm_inherit(map, ref->mapped, ref->size, VM_INHERIT_NONE);
2970 assert( KERN_SUCCESS == err );
2971 }
2972 }
2973 while( false );
2974
2975 return( err );
2976 }
2977
2978 kern_return_t
2979 IOMemoryDescriptorMapMemEntry(vm_map_t * map, ipc_port_t entry, IOOptionBits options, bool pageable,
2980 mach_vm_size_t offset,
2981 mach_vm_address_t * address, mach_vm_size_t length)
2982 {
2983 IOReturn err;
2984 IOMemoryDescriptorMapAllocRef ref;
2985
2986 ref.map = *map;
2987 ref.sharedMem = entry;
2988 ref.sourceOffset = trunc_page_64(offset);
2989 ref.options = options;
2990 ref.size = length;
2991
2992 if (options & kIOMapAnywhere)
2993 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
2994 ref.mapped = 0;
2995 else
2996 ref.mapped = *address;
2997
2998 if( ref.sharedMem && (ref.map == kernel_map) && pageable)
2999 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
3000 else
3001 err = IOMemoryDescriptorMapAlloc( ref.map, &ref );
3002
3003 *address = ref.mapped;
3004 *map = ref.map;
3005
3006 return (err);
3007 }
3008
3009 kern_return_t
3010 IOMemoryDescriptorMapCopy(vm_map_t * map,
3011 IOOptionBits options,
3012 mach_vm_size_t offset,
3013 mach_vm_address_t * address, mach_vm_size_t length)
3014 {
3015 IOReturn err;
3016 IOMemoryDescriptorMapAllocRef ref;
3017
3018 ref.map = *map;
3019 ref.sharedMem = NULL;
3020 ref.sourceOffset = trunc_page_64(offset);
3021 ref.options = options;
3022 ref.size = length;
3023
3024 if (options & kIOMapAnywhere)
3025 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
3026 ref.mapped = 0;
3027 else
3028 ref.mapped = *address;
3029
3030 if (ref.map == kernel_map)
3031 err = IOIteratePageableMaps(ref.size, &IOMemoryDescriptorMapAlloc, &ref);
3032 else
3033 err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
3034
3035 *address = ref.mapped;
3036 *map = ref.map;
3037
3038 return (err);
3039 }
3040
3041 IOReturn IOMemoryDescriptor::doMap(
3042 vm_map_t __addressMap,
3043 IOVirtualAddress * __address,
3044 IOOptionBits options,
3045 IOByteCount __offset,
3046 IOByteCount __length )
3047 {
3048 #ifndef __LP64__
3049 if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::doMap !64bit");
3050 #endif /* !__LP64__ */
3051
3052 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
3053 mach_vm_size_t offset = mapping->fOffset + __offset;
3054 mach_vm_size_t length = mapping->fLength;
3055
3056 IOReturn err = kIOReturnSuccess;
3057 memory_object_t pager;
3058 mach_vm_size_t pageOffset;
3059 IOPhysicalAddress sourceAddr;
3060 unsigned int lock_count;
3061
3062 do
3063 {
3064 sourceAddr = getPhysicalSegment( offset, NULL, _kIOMemorySourceSegment );
3065 pageOffset = sourceAddr - trunc_page( sourceAddr );
3066
3067 if( reserved)
3068 pager = (memory_object_t) reserved->dp.devicePager;
3069 else
3070 pager = MACH_PORT_NULL;
3071
3072 if ((kIOMapReference|kIOMapUnique) == ((kIOMapReference|kIOMapUnique) & options))
3073 {
3074 upl_t redirUPL2;
3075 vm_size_t size;
3076 int flags;
3077
3078 if (!_memEntry)
3079 {
3080 err = kIOReturnNotReadable;
3081 continue;
3082 }
3083
3084 size = round_page(mapping->fLength + pageOffset);
3085 flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
3086 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
3087
3088 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) _memEntry, 0, &size, &redirUPL2,
3089 NULL, NULL,
3090 &flags))
3091 redirUPL2 = NULL;
3092
3093 for (lock_count = 0;
3094 IORecursiveLockHaveLock(gIOMemoryLock);
3095 lock_count++) {
3096 UNLOCK;
3097 }
3098 err = upl_transpose(redirUPL2, mapping->fRedirUPL);
3099 for (;
3100 lock_count;
3101 lock_count--) {
3102 LOCK;
3103 }
3104
3105 if (kIOReturnSuccess != err)
3106 {
3107 IOLog("upl_transpose(%x)\n", err);
3108 err = kIOReturnSuccess;
3109 }
3110
3111 if (redirUPL2)
3112 {
3113 upl_commit(redirUPL2, NULL, 0);
3114 upl_deallocate(redirUPL2);
3115 redirUPL2 = 0;
3116 }
3117 {
3118 // swap the memEntries since they now refer to different vm_objects
3119 void * me = _memEntry;
3120 _memEntry = mapping->fMemory->_memEntry;
3121 mapping->fMemory->_memEntry = me;
3122 }
3123 if (pager)
3124 err = handleFault( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
3125 }
3126 else
3127 {
3128 mach_vm_address_t address;
3129
3130 if (!(options & kIOMapAnywhere))
3131 {
3132 address = trunc_page_64(mapping->fAddress);
3133 if( (mapping->fAddress - address) != pageOffset)
3134 {
3135 err = kIOReturnVMError;
3136 continue;
3137 }
3138 }
3139
3140 vm_map_t map = mapping->fAddressMap;
3141 err = IOMemoryDescriptorMapMemEntry(&map, (ipc_port_t) _memEntry,
3142 options, (kIOMemoryBufferPageable & _flags),
3143 offset, &address, round_page_64(length + pageOffset));
3144 if( err != KERN_SUCCESS)
3145 continue;
3146
3147 if (!_memEntry || pager)
3148 {
3149 err = handleFault( pager, mapping->fAddressMap, address, offset, length, options );
3150 if (err != KERN_SUCCESS)
3151 doUnmap( mapping->fAddressMap, (IOVirtualAddress) mapping, 0 );
3152 }
3153
3154 #if DEBUG
3155 if (kIOLogMapping & gIOKitDebug)
3156 IOLog("mapping(%x) desc %p @ %qx, map %p, address %qx, offset %qx, length %qx\n",
3157 err, this, (uint64_t)sourceAddr, mapping, address, offset, length);
3158 #endif
3159
3160 if (err == KERN_SUCCESS)
3161 mapping->fAddress = address + pageOffset;
3162 else
3163 mapping->fAddress = NULL;
3164 }
3165 }
3166 while( false );
3167
3168 return (err);
3169 }
3170
3171 IOReturn IOMemoryDescriptor::handleFault(
3172 void * _pager,
3173 vm_map_t addressMap,
3174 mach_vm_address_t address,
3175 mach_vm_size_t sourceOffset,
3176 mach_vm_size_t length,
3177 IOOptionBits options )
3178 {
3179 IOReturn err = kIOReturnSuccess;
3180 memory_object_t pager = (memory_object_t) _pager;
3181 mach_vm_size_t size;
3182 mach_vm_size_t bytes;
3183 mach_vm_size_t page;
3184 mach_vm_size_t pageOffset;
3185 mach_vm_size_t pagerOffset;
3186 IOPhysicalLength segLen;
3187 addr64_t physAddr;
3188
3189 if( !addressMap)
3190 {
3191 if( kIOMemoryRedirected & _flags)
3192 {
3193 #if DEBUG
3194 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
3195 #endif
3196 do {
3197 SLEEP;
3198 } while( kIOMemoryRedirected & _flags );
3199 }
3200
3201 return( kIOReturnSuccess );
3202 }
3203
3204 physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
3205 assert( physAddr );
3206 pageOffset = physAddr - trunc_page_64( physAddr );
3207 pagerOffset = sourceOffset;
3208
3209 size = length + pageOffset;
3210 physAddr -= pageOffset;
3211
3212 segLen += pageOffset;
3213 bytes = size;
3214 do
3215 {
3216 // in the middle of the loop only map whole pages
3217 if( segLen >= bytes)
3218 segLen = bytes;
3219 else if( segLen != trunc_page( segLen))
3220 err = kIOReturnVMError;
3221 if( physAddr != trunc_page_64( physAddr))
3222 err = kIOReturnBadArgument;
3223 if (kIOReturnSuccess != err)
3224 break;
3225
3226 #if DEBUG
3227 if( kIOLogMapping & gIOKitDebug)
3228 IOLog("IOMemoryMap::map(%p) 0x%qx->0x%qx:0x%qx\n",
3229 addressMap, address + pageOffset, physAddr + pageOffset,
3230 segLen - pageOffset);
3231 #endif
3232
3233
3234 if( pager) {
3235 if( reserved && reserved->dp.pagerContig) {
3236 IOPhysicalLength allLen;
3237 addr64_t allPhys;
3238
3239 allPhys = getPhysicalSegment( 0, &allLen, kIOMemoryMapperNone );
3240 assert( allPhys );
3241 err = device_pager_populate_object( pager, 0, atop_64(allPhys), round_page(allLen) );
3242 }
3243 else
3244 {
3245
3246 for( page = 0;
3247 (page < segLen) && (KERN_SUCCESS == err);
3248 page += page_size)
3249 {
3250 err = device_pager_populate_object(pager, pagerOffset,
3251 (ppnum_t)(atop_64(physAddr + page)), page_size);
3252 pagerOffset += page_size;
3253 }
3254 }
3255 assert( KERN_SUCCESS == err );
3256 if( err)
3257 break;
3258 }
3259
3260 // This call to vm_fault causes an early pmap level resolution
3261 // of the mappings created above for kernel mappings, since
3262 // faulting in later can't take place from interrupt level.
3263 /* *** ALERT *** */
3264 /* *** Temporary Workaround *** */
3265
3266 if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags))
3267 {
3268 vm_fault(addressMap,
3269 (vm_map_offset_t)address,
3270 VM_PROT_READ|VM_PROT_WRITE,
3271 FALSE, THREAD_UNINT, NULL,
3272 (vm_map_offset_t)0);
3273 }
3274
3275 /* *** Temporary Workaround *** */
3276 /* *** ALERT *** */
3277
3278 sourceOffset += segLen - pageOffset;
3279 address += segLen;
3280 bytes -= segLen;
3281 pageOffset = 0;
3282
3283 }
3284 while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
3285
3286 if (bytes)
3287 err = kIOReturnBadArgument;
3288
3289 return (err);
3290 }
3291
3292 IOReturn IOMemoryDescriptor::doUnmap(
3293 vm_map_t addressMap,
3294 IOVirtualAddress __address,
3295 IOByteCount __length )
3296 {
3297 IOReturn err;
3298 mach_vm_address_t address;
3299 mach_vm_size_t length;
3300
3301 if (__length)
3302 {
3303 address = __address;
3304 length = __length;
3305 }
3306 else
3307 {
3308 addressMap = ((IOMemoryMap *) __address)->fAddressMap;
3309 address = ((IOMemoryMap *) __address)->fAddress;
3310 length = ((IOMemoryMap *) __address)->fLength;
3311 }
3312
3313 if ((addressMap == kernel_map)
3314 && ((kIOMemoryBufferPageable & _flags) || !_memEntry))
3315 addressMap = IOPageableMapForAddress( address );
3316
3317 #if DEBUG
3318 if( kIOLogMapping & gIOKitDebug)
3319 IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
3320 addressMap, address, length );
3321 #endif
3322
3323 err = mach_vm_deallocate( addressMap, address, length );
3324
3325 return (err);
3326 }
3327
3328 IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
3329 {
3330 IOReturn err = kIOReturnSuccess;
3331 IOMemoryMap * mapping = 0;
3332 OSIterator * iter;
3333
3334 LOCK;
3335
3336 if( doRedirect)
3337 _flags |= kIOMemoryRedirected;
3338 else
3339 _flags &= ~kIOMemoryRedirected;
3340
3341 do {
3342 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
3343 while( (mapping = (IOMemoryMap *) iter->getNextObject()))
3344 mapping->redirect( safeTask, doRedirect );
3345
3346 iter->release();
3347 }
3348 } while( false );
3349
3350 if (!doRedirect)
3351 {
3352 WAKEUP;
3353 }
3354
3355 UNLOCK;
3356
3357 #ifndef __LP64__
3358 // temporary binary compatibility
3359 IOSubMemoryDescriptor * subMem;
3360 if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
3361 err = subMem->redirect( safeTask, doRedirect );
3362 else
3363 err = kIOReturnSuccess;
3364 #endif /* !__LP64__ */
3365
3366 return( err );
3367 }
3368
3369 IOReturn IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
3370 {
3371 IOReturn err = kIOReturnSuccess;
3372
3373 if( fSuperMap) {
3374 // err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
3375 } else {
3376
3377 LOCK;
3378
3379 do
3380 {
3381 if (!fAddress)
3382 break;
3383 if (!fAddressMap)
3384 break;
3385
3386 if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
3387 && (0 == (fOptions & kIOMapStatic)))
3388 {
3389 IOUnmapPages( fAddressMap, fAddress, fLength );
3390 err = kIOReturnSuccess;
3391 #if DEBUG
3392 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
3393 #endif
3394 }
3395 else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask))
3396 {
3397 IOOptionBits newMode;
3398 newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
3399 IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
3400 }
3401 }
3402 while (false);
3403 UNLOCK;
3404 }
3405
3406 if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3407 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3408 && safeTask
3409 && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected))))
3410 fMemory->redirect(safeTask, doRedirect);
3411
3412 return( err );
3413 }
3414
3415 IOReturn IOMemoryMap::unmap( void )
3416 {
3417 IOReturn err;
3418
3419 LOCK;
3420
3421 if( fAddress && fAddressMap && (0 == fSuperMap) && fMemory
3422 && (0 == (fOptions & kIOMapStatic))) {
3423
3424 err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
3425
3426 } else
3427 err = kIOReturnSuccess;
3428
3429 if (fAddressMap)
3430 {
3431 vm_map_deallocate(fAddressMap);
3432 fAddressMap = 0;
3433 }
3434
3435 fAddress = 0;
3436
3437 UNLOCK;
3438
3439 return( err );
3440 }
3441
3442 void IOMemoryMap::taskDied( void )
3443 {
3444 LOCK;
3445 if (fUserClientUnmap)
3446 unmap();
3447 if( fAddressMap) {
3448 vm_map_deallocate(fAddressMap);
3449 fAddressMap = 0;
3450 }
3451 fAddressTask = 0;
3452 fAddress = 0;
3453 UNLOCK;
3454 }
3455
3456 IOReturn IOMemoryMap::userClientUnmap( void )
3457 {
3458 fUserClientUnmap = true;
3459 return (kIOReturnSuccess);
3460 }
3461
3462 // Overload the release mechanism. All mappings must be a member
3463 // of a memory descriptors _mappings set. This means that we
3464 // always have 2 references on a mapping. When either of these mappings
3465 // are released we need to free ourselves.
3466 void IOMemoryMap::taggedRelease(const void *tag) const
3467 {
3468 LOCK;
3469 super::taggedRelease(tag, 2);
3470 UNLOCK;
3471 }
3472
3473 void IOMemoryMap::free()
3474 {
3475 unmap();
3476
3477 if (fMemory)
3478 {
3479 LOCK;
3480 fMemory->removeMapping(this);
3481 UNLOCK;
3482 fMemory->release();
3483 }
3484
3485 if (fOwner && (fOwner != fMemory))
3486 {
3487 LOCK;
3488 fOwner->removeMapping(this);
3489 UNLOCK;
3490 }
3491
3492 if (fSuperMap)
3493 fSuperMap->release();
3494
3495 if (fRedirUPL) {
3496 upl_commit(fRedirUPL, NULL, 0);
3497 upl_deallocate(fRedirUPL);
3498 }
3499
3500 super::free();
3501 }
3502
3503 IOByteCount IOMemoryMap::getLength()
3504 {
3505 return( fLength );
3506 }
3507
3508 IOVirtualAddress IOMemoryMap::getVirtualAddress()
3509 {
3510 #ifndef __LP64__
3511 if (fSuperMap)
3512 fSuperMap->getVirtualAddress();
3513 else if (fAddressMap
3514 && vm_map_is_64bit(fAddressMap)
3515 && (sizeof(IOVirtualAddress) < 8))
3516 {
3517 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
3518 }
3519 #endif /* !__LP64__ */
3520
3521 return (fAddress);
3522 }
3523
3524 #ifndef __LP64__
3525 mach_vm_address_t IOMemoryMap::getAddress()
3526 {
3527 return( fAddress);
3528 }
3529
3530 mach_vm_size_t IOMemoryMap::getSize()
3531 {
3532 return( fLength );
3533 }
3534 #endif /* !__LP64__ */
3535
3536
3537 task_t IOMemoryMap::getAddressTask()
3538 {
3539 if( fSuperMap)
3540 return( fSuperMap->getAddressTask());
3541 else
3542 return( fAddressTask);
3543 }
3544
3545 IOOptionBits IOMemoryMap::getMapOptions()
3546 {
3547 return( fOptions);
3548 }
3549
3550 IOMemoryDescriptor * IOMemoryMap::getMemoryDescriptor()
3551 {
3552 return( fMemory );
3553 }
3554
3555 IOMemoryMap * IOMemoryMap::copyCompatible(
3556 IOMemoryMap * newMapping )
3557 {
3558 task_t task = newMapping->getAddressTask();
3559 mach_vm_address_t toAddress = newMapping->fAddress;
3560 IOOptionBits _options = newMapping->fOptions;
3561 mach_vm_size_t _offset = newMapping->fOffset;
3562 mach_vm_size_t _length = newMapping->fLength;
3563
3564 if( (!task) || (!fAddressMap) || (fAddressMap != get_task_map(task)))
3565 return( 0 );
3566 if( (fOptions ^ _options) & kIOMapReadOnly)
3567 return( 0 );
3568 if( (kIOMapDefaultCache != (_options & kIOMapCacheMask))
3569 && ((fOptions ^ _options) & kIOMapCacheMask))
3570 return( 0 );
3571
3572 if( (0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress))
3573 return( 0 );
3574
3575 if( _offset < fOffset)
3576 return( 0 );
3577
3578 _offset -= fOffset;
3579
3580 if( (_offset + _length) > fLength)
3581 return( 0 );
3582
3583 retain();
3584 if( (fLength == _length) && (!_offset))
3585 {
3586 newMapping = this;
3587 }
3588 else
3589 {
3590 newMapping->fSuperMap = this;
3591 newMapping->fOffset = fOffset + _offset;
3592 newMapping->fAddress = fAddress + _offset;
3593 }
3594
3595 return( newMapping );
3596 }
3597
3598 IOReturn IOMemoryMap::wireRange(
3599 uint32_t options,
3600 mach_vm_size_t offset,
3601 mach_vm_size_t length)
3602 {
3603 IOReturn kr;
3604 mach_vm_address_t start = trunc_page_64(fAddress + offset);
3605 mach_vm_address_t end = round_page_64(fAddress + offset + length);
3606
3607 if (kIODirectionOutIn & options)
3608 {
3609 kr = vm_map_wire(fAddressMap, start, end, (kIODirectionOutIn & options), FALSE);
3610 }
3611 else
3612 {
3613 kr = vm_map_unwire(fAddressMap, start, end, FALSE);
3614 }
3615
3616 return (kr);
3617 }
3618
3619
3620 IOPhysicalAddress
3621 #ifdef __LP64__
3622 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
3623 #else /* !__LP64__ */
3624 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
3625 #endif /* !__LP64__ */
3626 {
3627 IOPhysicalAddress address;
3628
3629 LOCK;
3630 #ifdef __LP64__
3631 address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
3632 #else /* !__LP64__ */
3633 address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
3634 #endif /* !__LP64__ */
3635 UNLOCK;
3636
3637 return( address );
3638 }
3639
3640 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3641
3642 #undef super
3643 #define super OSObject
3644
3645 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3646
3647 void IOMemoryDescriptor::initialize( void )
3648 {
3649 if( 0 == gIOMemoryLock)
3650 gIOMemoryLock = IORecursiveLockAlloc();
3651
3652 IORegistryEntry::getRegistryRoot()->setProperty(kIOMaximumMappedIOByteCountKey,
3653 ptoa_64(gIOMaximumMappedIOPageCount), 64);
3654 gIOLastPage = IOGetLastPageNumber();
3655
3656 gIOPageAllocLock = IOSimpleLockAlloc();
3657 queue_init(&gIOPageAllocList);
3658 }
3659
3660 void IOMemoryDescriptor::free( void )
3661 {
3662 if( _mappings)
3663 _mappings->release();
3664
3665 super::free();
3666 }
3667
3668 IOMemoryMap * IOMemoryDescriptor::setMapping(
3669 task_t intoTask,
3670 IOVirtualAddress mapAddress,
3671 IOOptionBits options )
3672 {
3673 return (createMappingInTask( intoTask, mapAddress,
3674 options | kIOMapStatic,
3675 0, getLength() ));
3676 }
3677
3678 IOMemoryMap * IOMemoryDescriptor::map(
3679 IOOptionBits options )
3680 {
3681 return (createMappingInTask( kernel_task, 0,
3682 options | kIOMapAnywhere,
3683 0, getLength() ));
3684 }
3685
3686 #ifndef __LP64__
3687 IOMemoryMap * IOMemoryDescriptor::map(
3688 task_t intoTask,
3689 IOVirtualAddress atAddress,
3690 IOOptionBits options,
3691 IOByteCount offset,
3692 IOByteCount length )
3693 {
3694 if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask)))
3695 {
3696 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
3697 return (0);
3698 }
3699
3700 return (createMappingInTask(intoTask, atAddress,
3701 options, offset, length));
3702 }
3703 #endif /* !__LP64__ */
3704
3705 IOMemoryMap * IOMemoryDescriptor::createMappingInTask(
3706 task_t intoTask,
3707 mach_vm_address_t atAddress,
3708 IOOptionBits options,
3709 mach_vm_size_t offset,
3710 mach_vm_size_t length)
3711 {
3712 IOMemoryMap * result;
3713 IOMemoryMap * mapping;
3714
3715 if (0 == length)
3716 length = getLength();
3717
3718 mapping = new IOMemoryMap;
3719
3720 if( mapping
3721 && !mapping->init( intoTask, atAddress,
3722 options, offset, length )) {
3723 mapping->release();
3724 mapping = 0;
3725 }
3726
3727 if (mapping)
3728 result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
3729 else
3730 result = 0;
3731
3732 #if DEBUG
3733 if (!result)
3734 IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
3735 this, atAddress, (uint32_t) options, offset, length);
3736 #endif
3737
3738 return (result);
3739 }
3740
3741 #ifndef __LP64__ // there is only a 64 bit version for LP64
3742 IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
3743 IOOptionBits options,
3744 IOByteCount offset)
3745 {
3746 return (redirect(newBackingMemory, options, (mach_vm_size_t)offset));
3747 }
3748 #endif
3749
3750 IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
3751 IOOptionBits options,
3752 mach_vm_size_t offset)
3753 {
3754 IOReturn err = kIOReturnSuccess;
3755 IOMemoryDescriptor * physMem = 0;
3756
3757 LOCK;
3758
3759 if (fAddress && fAddressMap) do
3760 {
3761 if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3762 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3763 {
3764 physMem = fMemory;
3765 physMem->retain();
3766 }
3767
3768 if (!fRedirUPL)
3769 {
3770 vm_size_t size = round_page(fLength);
3771 int flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
3772 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
3773 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) fMemory->_memEntry, 0, &size, &fRedirUPL,
3774 NULL, NULL,
3775 &flags))
3776 fRedirUPL = 0;
3777
3778 if (physMem)
3779 {
3780 IOUnmapPages( fAddressMap, fAddress, fLength );
3781 if (false)
3782 physMem->redirect(0, true);
3783 }
3784 }
3785
3786 if (newBackingMemory)
3787 {
3788 if (newBackingMemory != fMemory)
3789 {
3790 fOffset = 0;
3791 if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
3792 options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
3793 offset, fLength))
3794 err = kIOReturnError;
3795 }
3796 if (fRedirUPL)
3797 {
3798 upl_commit(fRedirUPL, NULL, 0);
3799 upl_deallocate(fRedirUPL);
3800 fRedirUPL = 0;
3801 }
3802 if (false && physMem)
3803 physMem->redirect(0, false);
3804 }
3805 }
3806 while (false);
3807
3808 UNLOCK;
3809
3810 if (physMem)
3811 physMem->release();
3812
3813 return (err);
3814 }
3815
3816 IOMemoryMap * IOMemoryDescriptor::makeMapping(
3817 IOMemoryDescriptor * owner,
3818 task_t __intoTask,
3819 IOVirtualAddress __address,
3820 IOOptionBits options,
3821 IOByteCount __offset,
3822 IOByteCount __length )
3823 {
3824 #ifndef __LP64__
3825 if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::makeMapping !64bit");
3826 #endif /* !__LP64__ */
3827
3828 IOMemoryDescriptor * mapDesc = 0;
3829 IOMemoryMap * result = 0;
3830 OSIterator * iter;
3831
3832 IOMemoryMap * mapping = (IOMemoryMap *) __address;
3833 mach_vm_size_t offset = mapping->fOffset + __offset;
3834 mach_vm_size_t length = mapping->fLength;
3835
3836 mapping->fOffset = offset;
3837
3838 LOCK;
3839
3840 do
3841 {
3842 if (kIOMapStatic & options)
3843 {
3844 result = mapping;
3845 addMapping(mapping);
3846 mapping->setMemoryDescriptor(this, 0);
3847 continue;
3848 }
3849
3850 if (kIOMapUnique & options)
3851 {
3852 addr64_t phys;
3853 IOByteCount physLen;
3854
3855 // if (owner != this) continue;
3856
3857 if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3858 || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3859 {
3860 phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
3861 if (!phys || (physLen < length))
3862 continue;
3863
3864 mapDesc = IOMemoryDescriptor::withAddressRange(
3865 phys, length, getDirection() | kIOMemoryMapperNone, NULL);
3866 if (!mapDesc)
3867 continue;
3868 offset = 0;
3869 mapping->fOffset = offset;
3870 }
3871 }
3872 else
3873 {
3874 // look for a compatible existing mapping
3875 if( (iter = OSCollectionIterator::withCollection(_mappings)))
3876 {
3877 IOMemoryMap * lookMapping;
3878 while ((lookMapping = (IOMemoryMap *) iter->getNextObject()))
3879 {
3880 if ((result = lookMapping->copyCompatible(mapping)))
3881 {
3882 addMapping(result);
3883 result->setMemoryDescriptor(this, offset);
3884 break;
3885 }
3886 }
3887 iter->release();
3888 }
3889 if (result || (options & kIOMapReference))
3890 {
3891 if (result != mapping)
3892 {
3893 mapping->release();
3894 mapping = NULL;
3895 }
3896 continue;
3897 }
3898 }
3899
3900 if (!mapDesc)
3901 {
3902 mapDesc = this;
3903 mapDesc->retain();
3904 }
3905 IOReturn
3906 kr = mapDesc->doMap( 0, (IOVirtualAddress *) &mapping, options, 0, 0 );
3907 if (kIOReturnSuccess == kr)
3908 {
3909 result = mapping;
3910 mapDesc->addMapping(result);
3911 result->setMemoryDescriptor(mapDesc, offset);
3912 }
3913 else
3914 {
3915 mapping->release();
3916 mapping = NULL;
3917 }
3918 }
3919 while( false );
3920
3921 UNLOCK;
3922
3923 if (mapDesc)
3924 mapDesc->release();
3925
3926 return (result);
3927 }
3928
3929 void IOMemoryDescriptor::addMapping(
3930 IOMemoryMap * mapping )
3931 {
3932 if( mapping)
3933 {
3934 if( 0 == _mappings)
3935 _mappings = OSSet::withCapacity(1);
3936 if( _mappings )
3937 _mappings->setObject( mapping );
3938 }
3939 }
3940
3941 void IOMemoryDescriptor::removeMapping(
3942 IOMemoryMap * mapping )
3943 {
3944 if( _mappings)
3945 _mappings->removeObject( mapping);
3946 }
3947
3948 #ifndef __LP64__
3949 // obsolete initializers
3950 // - initWithOptions is the designated initializer
3951 bool
3952 IOMemoryDescriptor::initWithAddress(void * address,
3953 IOByteCount length,
3954 IODirection direction)
3955 {
3956 return( false );
3957 }
3958
3959 bool
3960 IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
3961 IOByteCount length,
3962 IODirection direction,
3963 task_t task)
3964 {
3965 return( false );
3966 }
3967
3968 bool
3969 IOMemoryDescriptor::initWithPhysicalAddress(
3970 IOPhysicalAddress address,
3971 IOByteCount length,
3972 IODirection direction )
3973 {
3974 return( false );
3975 }
3976
3977 bool
3978 IOMemoryDescriptor::initWithRanges(
3979 IOVirtualRange * ranges,
3980 UInt32 withCount,
3981 IODirection direction,
3982 task_t task,
3983 bool asReference)
3984 {
3985 return( false );
3986 }
3987
3988 bool
3989 IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
3990 UInt32 withCount,
3991 IODirection direction,
3992 bool asReference)
3993 {
3994 return( false );
3995 }
3996
3997 void * IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
3998 IOByteCount * lengthOfSegment)
3999 {
4000 return( 0 );
4001 }
4002 #endif /* !__LP64__ */
4003
4004 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4005
4006 bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
4007 {
4008 OSSymbol const *keys[2];
4009 OSObject *values[2];
4010 struct SerData {
4011 user_addr_t address;
4012 user_size_t length;
4013 } *vcopy;
4014 unsigned int index, nRanges;
4015 bool result;
4016
4017 IOOptionBits type = _flags & kIOMemoryTypeMask;
4018
4019 if (s == NULL) return false;
4020 if (s->previouslySerialized(this)) return true;
4021
4022 // Pretend we are an array.
4023 if (!s->addXMLStartTag(this, "array")) return false;
4024
4025 nRanges = _rangesCount;
4026 vcopy = (SerData *) IOMalloc(sizeof(SerData) * nRanges);
4027 if (vcopy == 0) return false;
4028
4029 keys[0] = OSSymbol::withCString("address");
4030 keys[1] = OSSymbol::withCString("length");
4031
4032 result = false;
4033 values[0] = values[1] = 0;
4034
4035 // From this point on we can go to bail.
4036
4037 // Copy the volatile data so we don't have to allocate memory
4038 // while the lock is held.
4039 LOCK;
4040 if (nRanges == _rangesCount) {
4041 Ranges vec = _ranges;
4042 for (index = 0; index < nRanges; index++) {
4043 user_addr_t addr; IOByteCount len;
4044 getAddrLenForInd(addr, len, type, vec, index);
4045 vcopy[index].address = addr;
4046 vcopy[index].length = len;
4047 }
4048 } else {
4049 // The descriptor changed out from under us. Give up.
4050 UNLOCK;
4051 result = false;
4052 goto bail;
4053 }
4054 UNLOCK;
4055
4056 for (index = 0; index < nRanges; index++)
4057 {
4058 user_addr_t addr = vcopy[index].address;
4059 IOByteCount len = (IOByteCount) vcopy[index].length;
4060 values[0] =
4061 OSNumber::withNumber(addr, sizeof(addr) * 8);
4062 if (values[0] == 0) {
4063 result = false;
4064 goto bail;
4065 }
4066 values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
4067 if (values[1] == 0) {
4068 result = false;
4069 goto bail;
4070 }
4071 OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
4072 if (dict == 0) {
4073 result = false;
4074 goto bail;
4075 }
4076 values[0]->release();
4077 values[1]->release();
4078 values[0] = values[1] = 0;
4079
4080 result = dict->serialize(s);
4081 dict->release();
4082 if (!result) {
4083 goto bail;
4084 }
4085 }
4086 result = s->addXMLEndTag("array");
4087
4088 bail:
4089 if (values[0])
4090 values[0]->release();
4091 if (values[1])
4092 values[1]->release();
4093 if (keys[0])
4094 keys[0]->release();
4095 if (keys[1])
4096 keys[1]->release();
4097 if (vcopy)
4098 IOFree(vcopy, sizeof(SerData) * nRanges);
4099 return result;
4100 }
4101
4102 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4103
4104 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
4105 #ifdef __LP64__
4106 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
4107 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
4108 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
4109 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
4110 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
4111 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
4112 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
4113 #else /* !__LP64__ */
4114 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
4115 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
4116 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3);
4117 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4);
4118 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5);
4119 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 6);
4120 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 7);
4121 #endif /* !__LP64__ */
4122 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
4123 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
4124 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
4125 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
4126 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
4127 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
4128 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
4129 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
4130
4131 /* ex-inline function implementation */
4132 IOPhysicalAddress
4133 IOMemoryDescriptor::getPhysicalAddress()
4134 { return( getPhysicalSegment( 0, 0 )); }