]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOMemoryDescriptor.cpp
8d49aeebe03389fd55f04874300387ff19068fa2
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
30 *
31 * HISTORY
32 *
33 */
34
35
36 #include <sys/cdefs.h>
37
38 #include <IOKit/assert.h>
39 #include <IOKit/system.h>
40 #include <IOKit/IOLib.h>
41 #include <IOKit/IOMemoryDescriptor.h>
42 #include <IOKit/IOMapper.h>
43 #include <IOKit/IODMACommand.h>
44 #include <IOKit/IOKitKeysPrivate.h>
45
46 #ifndef __LP64__
47 #include <IOKit/IOSubMemoryDescriptor.h>
48 #endif /* !__LP64__ */
49
50 #include <IOKit/IOKitDebug.h>
51 #include <libkern/OSDebug.h>
52
53 #include "IOKitKernelInternal.h"
54
55 #include <libkern/c++/OSContainers.h>
56 #include <libkern/c++/OSDictionary.h>
57 #include <libkern/c++/OSArray.h>
58 #include <libkern/c++/OSSymbol.h>
59 #include <libkern/c++/OSNumber.h>
60
61 #include <sys/uio.h>
62
63 __BEGIN_DECLS
64 #include <vm/pmap.h>
65 #include <vm/vm_pageout.h>
66 #include <mach/memory_object_types.h>
67 #include <device/device_port.h>
68
69 #include <mach/vm_prot.h>
70 #include <mach/mach_vm.h>
71 #include <vm/vm_fault.h>
72 #include <vm/vm_protos.h>
73
74 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
75 extern void ipc_port_release_send(ipc_port_t port);
76
77 kern_return_t
78 memory_object_iopl_request(
79 ipc_port_t port,
80 memory_object_offset_t offset,
81 vm_size_t *upl_size,
82 upl_t *upl_ptr,
83 upl_page_info_array_t user_page_list,
84 unsigned int *page_list_count,
85 int *flags);
86
87 unsigned int IOTranslateCacheBits(struct phys_entry *pp);
88
89 __END_DECLS
90
91 #define kIOMaximumMappedIOByteCount (512*1024*1024)
92
93 #define kIOMapperWaitSystem ((IOMapper *) 1)
94
95 static IOMapper * gIOSystemMapper = NULL;
96
97 static ppnum_t gIOMaximumMappedIOPageCount = atop_32(kIOMaximumMappedIOByteCount);
98
99 ppnum_t gIOLastPage;
100
101 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
102
103 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
104
105 #define super IOMemoryDescriptor
106
107 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
108
109 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
110
111 static IORecursiveLock * gIOMemoryLock;
112
113 #define LOCK IORecursiveLockLock( gIOMemoryLock)
114 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
115 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
116 #define WAKEUP \
117 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
118
119 #if 0
120 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
121 #else
122 #define DEBG(fmt, args...) {}
123 #endif
124
125 #define IOMD_DEBUG_DMAACTIVE 1
126
127 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
128
129 // Some data structures and accessor macros used by the initWithOptions
130 // Function
131
132 enum ioPLBlockFlags {
133 kIOPLOnDevice = 0x00000001,
134 kIOPLExternUPL = 0x00000002,
135 };
136
137 struct typePersMDData
138 {
139 const IOGeneralMemoryDescriptor *fMD;
140 ipc_port_t fMemEntry;
141 };
142
143 struct ioPLBlock {
144 upl_t fIOPL;
145 vm_address_t fPageInfo; // Pointer to page list or index into it
146 uint32_t fIOMDOffset; // The offset of this iopl in descriptor
147 ppnum_t fMappedPage; // Page number of first page in this iopl
148 unsigned int fPageOffset; // Offset within first page of iopl
149 unsigned int fFlags; // Flags
150 };
151
152 struct ioGMDData {
153 IOMapper * fMapper;
154 uint8_t fDMAMapNumAddressBits;
155 uint64_t fDMAMapAlignment;
156 addr64_t fMappedBase;
157 uint64_t fPreparationID;
158 unsigned int fPageCnt;
159 #if __LP64__
160 // align arrays to 8 bytes so following macros work
161 unsigned int fPad;
162 #endif
163 upl_page_info_t fPageList[1]; /* variable length */
164 ioPLBlock fBlocks[1]; /* variable length */
165 };
166
167 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
168 #define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
169 #define getNumIOPL(osd, d) \
170 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
171 #define getPageList(d) (&(d->fPageList[0]))
172 #define computeDataSize(p, u) \
173 (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
174
175
176 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
177
178 #define next_page(a) ( trunc_page(a) + PAGE_SIZE )
179
180
181 extern "C" {
182
183 kern_return_t device_data_action(
184 uintptr_t device_handle,
185 ipc_port_t device_pager,
186 vm_prot_t protection,
187 vm_object_offset_t offset,
188 vm_size_t size)
189 {
190 kern_return_t kr;
191 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
192 IOMemoryDescriptor * memDesc;
193
194 LOCK;
195 memDesc = ref->dp.memory;
196 if( memDesc)
197 {
198 memDesc->retain();
199 kr = memDesc->handleFault( device_pager, 0, 0,
200 offset, size, kIOMapDefaultCache /*?*/);
201 memDesc->release();
202 }
203 else
204 kr = KERN_ABORTED;
205 UNLOCK;
206
207 return( kr );
208 }
209
210 kern_return_t device_close(
211 uintptr_t device_handle)
212 {
213 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
214
215 IODelete( ref, IOMemoryDescriptorReserved, 1 );
216
217 return( kIOReturnSuccess );
218 }
219 }; // end extern "C"
220
221 // Note this inline function uses C++ reference arguments to return values
222 // This means that pointers are not passed and NULLs don't have to be
223 // checked for as a NULL reference is illegal.
224 static inline void
225 getAddrLenForInd(user_addr_t &addr, IOPhysicalLength &len, // Output variables
226 UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
227 {
228 assert(kIOMemoryTypeUIO == type
229 || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
230 || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
231 if (kIOMemoryTypeUIO == type) {
232 user_size_t us;
233 uio_getiov((uio_t) r.uio, ind, &addr, &us); len = us;
234 }
235 #ifndef __LP64__
236 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
237 IOAddressRange cur = r.v64[ind];
238 addr = cur.address;
239 len = cur.length;
240 }
241 #endif /* !__LP64__ */
242 else {
243 IOVirtualRange cur = r.v[ind];
244 addr = cur.address;
245 len = cur.length;
246 }
247 }
248
249 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
250
251 IOMemoryDescriptor *
252 IOMemoryDescriptor::withAddress(void * address,
253 IOByteCount length,
254 IODirection direction)
255 {
256 return IOMemoryDescriptor::
257 withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
258 }
259
260 #ifndef __LP64__
261 IOMemoryDescriptor *
262 IOMemoryDescriptor::withAddress(IOVirtualAddress address,
263 IOByteCount length,
264 IODirection direction,
265 task_t task)
266 {
267 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
268 if (that)
269 {
270 if (that->initWithAddress(address, length, direction, task))
271 return that;
272
273 that->release();
274 }
275 return 0;
276 }
277 #endif /* !__LP64__ */
278
279 IOMemoryDescriptor *
280 IOMemoryDescriptor::withPhysicalAddress(
281 IOPhysicalAddress address,
282 IOByteCount length,
283 IODirection direction )
284 {
285 return (IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL));
286 }
287
288 #ifndef __LP64__
289 IOMemoryDescriptor *
290 IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
291 UInt32 withCount,
292 IODirection direction,
293 task_t task,
294 bool asReference)
295 {
296 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
297 if (that)
298 {
299 if (that->initWithRanges(ranges, withCount, direction, task, asReference))
300 return that;
301
302 that->release();
303 }
304 return 0;
305 }
306 #endif /* !__LP64__ */
307
308 IOMemoryDescriptor *
309 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
310 mach_vm_size_t length,
311 IOOptionBits options,
312 task_t task)
313 {
314 IOAddressRange range = { address, length };
315 return (IOMemoryDescriptor::withAddressRanges(&range, 1, options, task));
316 }
317
318 IOMemoryDescriptor *
319 IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
320 UInt32 rangeCount,
321 IOOptionBits options,
322 task_t task)
323 {
324 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
325 if (that)
326 {
327 if (task)
328 options |= kIOMemoryTypeVirtual64;
329 else
330 options |= kIOMemoryTypePhysical64;
331
332 if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ 0))
333 return that;
334
335 that->release();
336 }
337
338 return 0;
339 }
340
341
342 /*
343 * withOptions:
344 *
345 * Create a new IOMemoryDescriptor. The buffer is made up of several
346 * virtual address ranges, from a given task.
347 *
348 * Passing the ranges as a reference will avoid an extra allocation.
349 */
350 IOMemoryDescriptor *
351 IOMemoryDescriptor::withOptions(void * buffers,
352 UInt32 count,
353 UInt32 offset,
354 task_t task,
355 IOOptionBits opts,
356 IOMapper * mapper)
357 {
358 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
359
360 if (self
361 && !self->initWithOptions(buffers, count, offset, task, opts, mapper))
362 {
363 self->release();
364 return 0;
365 }
366
367 return self;
368 }
369
370 bool IOMemoryDescriptor::initWithOptions(void * buffers,
371 UInt32 count,
372 UInt32 offset,
373 task_t task,
374 IOOptionBits options,
375 IOMapper * mapper)
376 {
377 return( false );
378 }
379
380 #ifndef __LP64__
381 IOMemoryDescriptor *
382 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
383 UInt32 withCount,
384 IODirection direction,
385 bool asReference)
386 {
387 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
388 if (that)
389 {
390 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference))
391 return that;
392
393 that->release();
394 }
395 return 0;
396 }
397
398 IOMemoryDescriptor *
399 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
400 IOByteCount offset,
401 IOByteCount length,
402 IODirection direction)
403 {
404 return (IOSubMemoryDescriptor::withSubRange(of, offset, length, direction | kIOMemoryThreadSafe));
405 }
406 #endif /* !__LP64__ */
407
408 IOMemoryDescriptor *
409 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
410 {
411 IOGeneralMemoryDescriptor *origGenMD =
412 OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
413
414 if (origGenMD)
415 return IOGeneralMemoryDescriptor::
416 withPersistentMemoryDescriptor(origGenMD);
417 else
418 return 0;
419 }
420
421 IOMemoryDescriptor *
422 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
423 {
424 ipc_port_t sharedMem = (ipc_port_t) originalMD->createNamedEntry();
425
426 if (!sharedMem)
427 return 0;
428
429 if (sharedMem == originalMD->_memEntry) {
430 originalMD->retain(); // Add a new reference to ourselves
431 ipc_port_release_send(sharedMem); // Remove extra send right
432 return originalMD;
433 }
434
435 IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor;
436 typePersMDData initData = { originalMD, sharedMem };
437
438 if (self
439 && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) {
440 self->release();
441 self = 0;
442 }
443 return self;
444 }
445
446 void *IOGeneralMemoryDescriptor::createNamedEntry()
447 {
448 kern_return_t error;
449 ipc_port_t sharedMem;
450
451 IOOptionBits type = _flags & kIOMemoryTypeMask;
452
453 user_addr_t range0Addr;
454 IOByteCount range0Len;
455 getAddrLenForInd(range0Addr, range0Len, type, _ranges, 0);
456 range0Addr = trunc_page_64(range0Addr);
457
458 vm_size_t size = ptoa_32(_pages);
459 vm_address_t kernelPage = (vm_address_t) range0Addr;
460
461 vm_map_t theMap = ((_task == kernel_task)
462 && (kIOMemoryBufferPageable & _flags))
463 ? IOPageableMapForAddress(kernelPage)
464 : get_task_map(_task);
465
466 memory_object_size_t actualSize = size;
467 vm_prot_t prot = VM_PROT_READ;
468 if (kIODirectionOut != (kIODirectionOutIn & _flags))
469 prot |= VM_PROT_WRITE;
470
471 if (_memEntry)
472 prot |= MAP_MEM_NAMED_REUSE;
473
474 error = mach_make_memory_entry_64(theMap,
475 &actualSize, range0Addr, prot, &sharedMem, (ipc_port_t) _memEntry);
476
477 if (KERN_SUCCESS == error) {
478 if (actualSize == size) {
479 return sharedMem;
480 } else {
481 #if IOASSERT
482 IOLog("IOGMD::mach_make_memory_entry_64 (%08llx) size (%08llx:%08llx)\n",
483 (UInt64)range0Addr, (UInt64)actualSize, (UInt64)size);
484 #endif
485 ipc_port_release_send( sharedMem );
486 }
487 }
488
489 return MACH_PORT_NULL;
490 }
491
492 #ifndef __LP64__
493 bool
494 IOGeneralMemoryDescriptor::initWithAddress(void * address,
495 IOByteCount withLength,
496 IODirection withDirection)
497 {
498 _singleRange.v.address = (vm_offset_t) address;
499 _singleRange.v.length = withLength;
500
501 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
502 }
503
504 bool
505 IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
506 IOByteCount withLength,
507 IODirection withDirection,
508 task_t withTask)
509 {
510 _singleRange.v.address = address;
511 _singleRange.v.length = withLength;
512
513 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
514 }
515
516 bool
517 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
518 IOPhysicalAddress address,
519 IOByteCount withLength,
520 IODirection withDirection )
521 {
522 _singleRange.p.address = address;
523 _singleRange.p.length = withLength;
524
525 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
526 }
527
528 bool
529 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
530 IOPhysicalRange * ranges,
531 UInt32 count,
532 IODirection direction,
533 bool reference)
534 {
535 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
536
537 if (reference)
538 mdOpts |= kIOMemoryAsReference;
539
540 return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0);
541 }
542
543 bool
544 IOGeneralMemoryDescriptor::initWithRanges(
545 IOVirtualRange * ranges,
546 UInt32 count,
547 IODirection direction,
548 task_t task,
549 bool reference)
550 {
551 IOOptionBits mdOpts = direction;
552
553 if (reference)
554 mdOpts |= kIOMemoryAsReference;
555
556 if (task) {
557 mdOpts |= kIOMemoryTypeVirtual;
558
559 // Auto-prepare if this is a kernel memory descriptor as very few
560 // clients bother to prepare() kernel memory.
561 // But it was not enforced so what are you going to do?
562 if (task == kernel_task)
563 mdOpts |= kIOMemoryAutoPrepare;
564 }
565 else
566 mdOpts |= kIOMemoryTypePhysical;
567
568 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0);
569 }
570 #endif /* !__LP64__ */
571
572 /*
573 * initWithOptions:
574 *
575 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
576 * from a given task, several physical ranges, an UPL from the ubc
577 * system or a uio (may be 64bit) from the BSD subsystem.
578 *
579 * Passing the ranges as a reference will avoid an extra allocation.
580 *
581 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
582 * existing instance -- note this behavior is not commonly supported in other
583 * I/O Kit classes, although it is supported here.
584 */
585
586 bool
587 IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
588 UInt32 count,
589 UInt32 offset,
590 task_t task,
591 IOOptionBits options,
592 IOMapper * mapper)
593 {
594 IOOptionBits type = options & kIOMemoryTypeMask;
595
596 #ifndef __LP64__
597 if (task
598 && (kIOMemoryTypeVirtual == type)
599 && vm_map_is_64bit(get_task_map(task))
600 && ((IOVirtualRange *) buffers)->address)
601 {
602 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
603 return false;
604 }
605 #endif /* !__LP64__ */
606
607 // Grab the original MD's configuation data to initialse the
608 // arguments to this function.
609 if (kIOMemoryTypePersistentMD == type) {
610
611 typePersMDData *initData = (typePersMDData *) buffers;
612 const IOGeneralMemoryDescriptor *orig = initData->fMD;
613 ioGMDData *dataP = getDataP(orig->_memoryEntries);
614
615 // Only accept persistent memory descriptors with valid dataP data.
616 assert(orig->_rangesCount == 1);
617 if ( !(orig->_flags & kIOMemoryPersistent) || !dataP)
618 return false;
619
620 _memEntry = initData->fMemEntry; // Grab the new named entry
621 options = orig->_flags & ~kIOMemoryAsReference;
622 type = options & kIOMemoryTypeMask;
623 buffers = orig->_ranges.v;
624 count = orig->_rangesCount;
625
626 // Now grab the original task and whatever mapper was previously used
627 task = orig->_task;
628 mapper = dataP->fMapper;
629
630 // We are ready to go through the original initialisation now
631 }
632
633 switch (type) {
634 case kIOMemoryTypeUIO:
635 case kIOMemoryTypeVirtual:
636 #ifndef __LP64__
637 case kIOMemoryTypeVirtual64:
638 #endif /* !__LP64__ */
639 assert(task);
640 if (!task)
641 return false;
642 break;
643
644 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
645 #ifndef __LP64__
646 case kIOMemoryTypePhysical64:
647 #endif /* !__LP64__ */
648 case kIOMemoryTypeUPL:
649 assert(!task);
650 break;
651 default:
652 return false; /* bad argument */
653 }
654
655 assert(buffers);
656 assert(count);
657
658 /*
659 * We can check the _initialized instance variable before having ever set
660 * it to an initial value because I/O Kit guarantees that all our instance
661 * variables are zeroed on an object's allocation.
662 */
663
664 if (_initialized) {
665 /*
666 * An existing memory descriptor is being retargeted to point to
667 * somewhere else. Clean up our present state.
668 */
669 IOOptionBits type = _flags & kIOMemoryTypeMask;
670 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
671 {
672 while (_wireCount)
673 complete();
674 }
675 if (_ranges.v && !(kIOMemoryAsReference & _flags))
676 {
677 if (kIOMemoryTypeUIO == type)
678 uio_free((uio_t) _ranges.v);
679 #ifndef __LP64__
680 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
681 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
682 #endif /* !__LP64__ */
683 else
684 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
685 }
686
687 if (_memEntry)
688 {
689 ipc_port_release_send((ipc_port_t) _memEntry);
690 _memEntry = 0;
691 }
692 if (_mappings)
693 _mappings->flushCollection();
694 }
695 else {
696 if (!super::init())
697 return false;
698 _initialized = true;
699 }
700
701 // Grab the appropriate mapper
702 if (kIOMemoryHostOnly & options) options |= kIOMemoryMapperNone;
703 if (kIOMemoryMapperNone & options)
704 mapper = 0; // No Mapper
705 else if (mapper == kIOMapperSystem) {
706 IOMapper::checkForSystemMapper();
707 gIOSystemMapper = mapper = IOMapper::gSystem;
708 }
709
710 // Temp binary compatibility for kIOMemoryThreadSafe
711 if (kIOMemoryReserved6156215 & options)
712 {
713 options &= ~kIOMemoryReserved6156215;
714 options |= kIOMemoryThreadSafe;
715 }
716 // Remove the dynamic internal use flags from the initial setting
717 options &= ~(kIOMemoryPreparedReadOnly);
718 _flags = options;
719 _task = task;
720
721 #ifndef __LP64__
722 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
723 #endif /* !__LP64__ */
724
725 __iomd_reservedA = 0;
726 __iomd_reservedB = 0;
727 _highestPage = 0;
728
729 if (kIOMemoryThreadSafe & options)
730 {
731 if (!_prepareLock)
732 _prepareLock = IOLockAlloc();
733 }
734 else if (_prepareLock)
735 {
736 IOLockFree(_prepareLock);
737 _prepareLock = NULL;
738 }
739
740 if (kIOMemoryTypeUPL == type) {
741
742 ioGMDData *dataP;
743 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
744
745 if (!initMemoryEntries(dataSize, mapper)) return (false);
746 dataP = getDataP(_memoryEntries);
747 dataP->fPageCnt = 0;
748
749 // _wireCount++; // UPLs start out life wired
750
751 _length = count;
752 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
753
754 ioPLBlock iopl;
755 iopl.fIOPL = (upl_t) buffers;
756 upl_set_referenced(iopl.fIOPL, true);
757 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
758
759 if (upl_get_size(iopl.fIOPL) < (count + offset))
760 panic("short external upl");
761
762 _highestPage = upl_get_highest_page(iopl.fIOPL);
763
764 // Set the flag kIOPLOnDevice convieniently equal to 1
765 iopl.fFlags = pageList->device | kIOPLExternUPL;
766 if (!pageList->device) {
767 // Pre-compute the offset into the UPL's page list
768 pageList = &pageList[atop_32(offset)];
769 offset &= PAGE_MASK;
770 }
771 iopl.fIOMDOffset = 0;
772 iopl.fMappedPage = 0;
773 iopl.fPageInfo = (vm_address_t) pageList;
774 iopl.fPageOffset = offset;
775 _memoryEntries->appendBytes(&iopl, sizeof(iopl));
776 }
777 else {
778 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
779 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
780
781 // Initialize the memory descriptor
782 if (options & kIOMemoryAsReference) {
783 #ifndef __LP64__
784 _rangesIsAllocated = false;
785 #endif /* !__LP64__ */
786
787 // Hack assignment to get the buffer arg into _ranges.
788 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
789 // work, C++ sigh.
790 // This also initialises the uio & physical ranges.
791 _ranges.v = (IOVirtualRange *) buffers;
792 }
793 else {
794 #ifndef __LP64__
795 _rangesIsAllocated = true;
796 #endif /* !__LP64__ */
797 switch (type)
798 {
799 case kIOMemoryTypeUIO:
800 _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
801 break;
802
803 #ifndef __LP64__
804 case kIOMemoryTypeVirtual64:
805 case kIOMemoryTypePhysical64:
806 if (count == 1
807 && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
808 ) {
809 if (kIOMemoryTypeVirtual64 == type)
810 type = kIOMemoryTypeVirtual;
811 else
812 type = kIOMemoryTypePhysical;
813 _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
814 _rangesIsAllocated = false;
815 _ranges.v = &_singleRange.v;
816 _singleRange.v.address = ((IOAddressRange *) buffers)->address;
817 _singleRange.v.length = ((IOAddressRange *) buffers)->length;
818 break;
819 }
820 _ranges.v64 = IONew(IOAddressRange, count);
821 if (!_ranges.v64)
822 return false;
823 bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
824 break;
825 #endif /* !__LP64__ */
826 case kIOMemoryTypeVirtual:
827 case kIOMemoryTypePhysical:
828 if (count == 1) {
829 _flags |= kIOMemoryAsReference;
830 #ifndef __LP64__
831 _rangesIsAllocated = false;
832 #endif /* !__LP64__ */
833 _ranges.v = &_singleRange.v;
834 } else {
835 _ranges.v = IONew(IOVirtualRange, count);
836 if (!_ranges.v)
837 return false;
838 }
839 bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
840 break;
841 }
842 }
843
844 // Find starting address within the vector of ranges
845 Ranges vec = _ranges;
846 UInt32 length = 0;
847 UInt32 pages = 0;
848 for (unsigned ind = 0; ind < count; ind++) {
849 user_addr_t addr;
850 IOPhysicalLength len;
851
852 // addr & len are returned by this function
853 getAddrLenForInd(addr, len, type, vec, ind);
854 pages += (atop_64(addr + len + PAGE_MASK) - atop_64(addr));
855 len += length;
856 assert(len >= length); // Check for 32 bit wrap around
857 length = len;
858
859 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
860 {
861 ppnum_t highPage = atop_64(addr + len - 1);
862 if (highPage > _highestPage)
863 _highestPage = highPage;
864 }
865 }
866 _length = length;
867 _pages = pages;
868 _rangesCount = count;
869
870 // Auto-prepare memory at creation time.
871 // Implied completion when descriptor is free-ed
872 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
873 _wireCount++; // Physical MDs are, by definition, wired
874 else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
875 ioGMDData *dataP;
876 unsigned dataSize = computeDataSize(_pages, /* upls */ count * 2);
877
878 if (!initMemoryEntries(dataSize, mapper)) return false;
879 dataP = getDataP(_memoryEntries);
880 dataP->fPageCnt = _pages;
881
882 if ( (kIOMemoryPersistent & _flags) && !_memEntry)
883 _memEntry = createNamedEntry();
884
885 if ((_flags & kIOMemoryAutoPrepare)
886 && prepare() != kIOReturnSuccess)
887 return false;
888 }
889 }
890
891 return true;
892 }
893
894 /*
895 * free
896 *
897 * Free resources.
898 */
899 void IOGeneralMemoryDescriptor::free()
900 {
901 IOOptionBits type = _flags & kIOMemoryTypeMask;
902
903 if( reserved)
904 {
905 LOCK;
906 reserved->dp.memory = 0;
907 UNLOCK;
908 }
909 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
910 {
911 ioGMDData * dataP;
912 if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
913 {
914 dataP->fMapper->iovmFree(atop_64(dataP->fMappedBase), _pages);
915 dataP->fMappedBase = 0;
916 }
917 }
918 else
919 {
920 while (_wireCount) complete();
921 }
922
923 if (_memoryEntries) _memoryEntries->release();
924
925 if (_ranges.v && !(kIOMemoryAsReference & _flags))
926 {
927 if (kIOMemoryTypeUIO == type)
928 uio_free((uio_t) _ranges.v);
929 #ifndef __LP64__
930 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
931 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
932 #endif /* !__LP64__ */
933 else
934 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
935
936 _ranges.v = NULL;
937 }
938
939 if (reserved)
940 {
941 if (reserved->dp.devicePager)
942 {
943 // memEntry holds a ref on the device pager which owns reserved
944 // (IOMemoryDescriptorReserved) so no reserved access after this point
945 device_pager_deallocate( (memory_object_t) reserved->dp.devicePager );
946 }
947 else
948 IODelete(reserved, IOMemoryDescriptorReserved, 1);
949 reserved = NULL;
950 }
951
952 if (_memEntry)
953 ipc_port_release_send( (ipc_port_t) _memEntry );
954
955 if (_prepareLock)
956 IOLockFree(_prepareLock);
957
958 super::free();
959 }
960
961 #ifndef __LP64__
962 void IOGeneralMemoryDescriptor::unmapFromKernel()
963 {
964 panic("IOGMD::unmapFromKernel deprecated");
965 }
966
967 void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
968 {
969 panic("IOGMD::mapIntoKernel deprecated");
970 }
971 #endif /* !__LP64__ */
972
973 /*
974 * getDirection:
975 *
976 * Get the direction of the transfer.
977 */
978 IODirection IOMemoryDescriptor::getDirection() const
979 {
980 #ifndef __LP64__
981 if (_direction)
982 return _direction;
983 #endif /* !__LP64__ */
984 return (IODirection) (_flags & kIOMemoryDirectionMask);
985 }
986
987 /*
988 * getLength:
989 *
990 * Get the length of the transfer (over all ranges).
991 */
992 IOByteCount IOMemoryDescriptor::getLength() const
993 {
994 return _length;
995 }
996
997 void IOMemoryDescriptor::setTag( IOOptionBits tag )
998 {
999 _tag = tag;
1000 }
1001
1002 IOOptionBits IOMemoryDescriptor::getTag( void )
1003 {
1004 return( _tag);
1005 }
1006
1007 #ifndef __LP64__
1008 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
1009 IOPhysicalAddress
1010 IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
1011 {
1012 addr64_t physAddr = 0;
1013
1014 if( prepare() == kIOReturnSuccess) {
1015 physAddr = getPhysicalSegment64( offset, length );
1016 complete();
1017 }
1018
1019 return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used
1020 }
1021 #endif /* !__LP64__ */
1022
1023 IOByteCount IOMemoryDescriptor::readBytes
1024 (IOByteCount offset, void *bytes, IOByteCount length)
1025 {
1026 addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
1027 IOByteCount remaining;
1028
1029 // Assert that this entire I/O is withing the available range
1030 assert(offset < _length);
1031 assert(offset + length <= _length);
1032 if (offset >= _length) {
1033 return 0;
1034 }
1035
1036 if (kIOMemoryThreadSafe & _flags)
1037 LOCK;
1038
1039 remaining = length = min(length, _length - offset);
1040 while (remaining) { // (process another target segment?)
1041 addr64_t srcAddr64;
1042 IOByteCount srcLen;
1043
1044 srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
1045 if (!srcAddr64)
1046 break;
1047
1048 // Clip segment length to remaining
1049 if (srcLen > remaining)
1050 srcLen = remaining;
1051
1052 copypv(srcAddr64, dstAddr, srcLen,
1053 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
1054
1055 dstAddr += srcLen;
1056 offset += srcLen;
1057 remaining -= srcLen;
1058 }
1059
1060 if (kIOMemoryThreadSafe & _flags)
1061 UNLOCK;
1062
1063 assert(!remaining);
1064
1065 return length - remaining;
1066 }
1067
1068 IOByteCount IOMemoryDescriptor::writeBytes
1069 (IOByteCount offset, const void *bytes, IOByteCount length)
1070 {
1071 addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
1072 IOByteCount remaining;
1073
1074 // Assert that this entire I/O is withing the available range
1075 assert(offset < _length);
1076 assert(offset + length <= _length);
1077
1078 assert( !(kIOMemoryPreparedReadOnly & _flags) );
1079
1080 if ( (kIOMemoryPreparedReadOnly & _flags) || offset >= _length) {
1081 return 0;
1082 }
1083
1084 if (kIOMemoryThreadSafe & _flags)
1085 LOCK;
1086
1087 remaining = length = min(length, _length - offset);
1088 while (remaining) { // (process another target segment?)
1089 addr64_t dstAddr64;
1090 IOByteCount dstLen;
1091
1092 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
1093 if (!dstAddr64)
1094 break;
1095
1096 // Clip segment length to remaining
1097 if (dstLen > remaining)
1098 dstLen = remaining;
1099
1100 copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
1101 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
1102
1103 srcAddr += dstLen;
1104 offset += dstLen;
1105 remaining -= dstLen;
1106 }
1107
1108 if (kIOMemoryThreadSafe & _flags)
1109 UNLOCK;
1110
1111 assert(!remaining);
1112
1113 return length - remaining;
1114 }
1115
1116 // osfmk/device/iokit_rpc.c
1117 extern "C" unsigned int IODefaultCacheBits(addr64_t pa);
1118
1119 #ifndef __LP64__
1120 void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
1121 {
1122 panic("IOGMD::setPosition deprecated");
1123 }
1124 #endif /* !__LP64__ */
1125
1126 static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
1127
1128 uint64_t
1129 IOGeneralMemoryDescriptor::getPreparationID( void )
1130 {
1131 ioGMDData *dataP;
1132
1133 if (!_wireCount)
1134 return (kIOPreparationIDUnprepared);
1135
1136 if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
1137 || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64))
1138 {
1139 IOMemoryDescriptor::setPreparationID();
1140 return (IOMemoryDescriptor::getPreparationID());
1141 }
1142
1143 if (!_memoryEntries || !(dataP = getDataP(_memoryEntries)))
1144 return (kIOPreparationIDUnprepared);
1145
1146 if (kIOPreparationIDUnprepared == dataP->fPreparationID)
1147 {
1148 dataP->fPreparationID = OSIncrementAtomic64(&gIOMDPreparationID);
1149 }
1150 return (dataP->fPreparationID);
1151 }
1152
1153 IOMemoryDescriptorReserved * IOMemoryDescriptor::getKernelReserved( void )
1154 {
1155 if (!reserved)
1156 {
1157 reserved = IONew(IOMemoryDescriptorReserved, 1);
1158 if (reserved)
1159 bzero(reserved, sizeof(IOMemoryDescriptorReserved));
1160 }
1161 return (reserved);
1162 }
1163
1164 void IOMemoryDescriptor::setPreparationID( void )
1165 {
1166 if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID))
1167 {
1168 #if defined(__ppc__ )
1169 reserved->preparationID = gIOMDPreparationID++;
1170 #else
1171 reserved->preparationID = OSIncrementAtomic64(&gIOMDPreparationID);
1172 #endif
1173 }
1174 }
1175
1176 uint64_t IOMemoryDescriptor::getPreparationID( void )
1177 {
1178 if (reserved)
1179 return (reserved->preparationID);
1180 else
1181 return (kIOPreparationIDUnsupported);
1182 }
1183
1184 IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
1185 {
1186 IOReturn err = kIOReturnSuccess;
1187 DMACommandOps params;
1188 IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
1189 ioGMDData *dataP;
1190
1191 params = (op & ~kIOMDDMACommandOperationMask & op);
1192 op &= kIOMDDMACommandOperationMask;
1193
1194 if (kIOMDDMAMap == op)
1195 {
1196 if (dataSize < sizeof(IOMDDMAMapArgs))
1197 return kIOReturnUnderrun;
1198
1199 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
1200
1201 if (!_memoryEntries
1202 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
1203
1204 if (_memoryEntries && data->fMapper)
1205 {
1206 bool remap = false;
1207 bool whole = ((data->fOffset == 0) && (data->fLength == _length));
1208 dataP = getDataP(_memoryEntries);
1209 if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits)
1210 {
1211 dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
1212 remap = ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
1213 }
1214 if (data->fMapSpec.alignment > dataP->fDMAMapAlignment)
1215 {
1216 dataP->fDMAMapAlignment = data->fMapSpec.alignment;
1217 remap |= (dataP->fDMAMapAlignment > page_size);
1218 }
1219 remap |= (!whole);
1220 if (remap || !dataP->fMappedBase)
1221 {
1222 // if (dataP->fMappedBase) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params);
1223 err = md->dmaMap(data->fMapper, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocCount);
1224 if ((kIOReturnSuccess == err) && whole && !dataP->fMappedBase)
1225 {
1226 dataP->fMappedBase = data->fAlloc;
1227 data->fAllocCount = 0; // IOMD owns the alloc now
1228 }
1229 }
1230 else
1231 {
1232 data->fAlloc = dataP->fMappedBase;
1233 data->fAllocCount = 0; // IOMD owns the alloc
1234 }
1235 }
1236
1237 return (err);
1238 }
1239
1240 if (kIOMDAddDMAMapSpec == op)
1241 {
1242 if (dataSize < sizeof(IODMAMapSpecification))
1243 return kIOReturnUnderrun;
1244
1245 IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
1246
1247 if (!_memoryEntries
1248 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
1249
1250 if (_memoryEntries)
1251 {
1252 dataP = getDataP(_memoryEntries);
1253 if (data->numAddressBits < dataP->fDMAMapNumAddressBits)
1254 dataP->fDMAMapNumAddressBits = data->numAddressBits;
1255 if (data->alignment > dataP->fDMAMapAlignment)
1256 dataP->fDMAMapAlignment = data->alignment;
1257 }
1258 return kIOReturnSuccess;
1259 }
1260
1261 if (kIOMDGetCharacteristics == op) {
1262
1263 if (dataSize < sizeof(IOMDDMACharacteristics))
1264 return kIOReturnUnderrun;
1265
1266 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
1267 data->fLength = _length;
1268 data->fSGCount = _rangesCount;
1269 data->fPages = _pages;
1270 data->fDirection = getDirection();
1271 if (!_wireCount)
1272 data->fIsPrepared = false;
1273 else {
1274 data->fIsPrepared = true;
1275 data->fHighestPage = _highestPage;
1276 if (_memoryEntries)
1277 {
1278 dataP = getDataP(_memoryEntries);
1279 ioPLBlock *ioplList = getIOPLList(dataP);
1280 UInt count = getNumIOPL(_memoryEntries, dataP);
1281 if (count == 1)
1282 data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
1283 }
1284 }
1285
1286 return kIOReturnSuccess;
1287
1288 #if IOMD_DEBUG_DMAACTIVE
1289 } else if (kIOMDDMAActive == op) {
1290 if (params) OSIncrementAtomic(&md->__iomd_reservedA);
1291 else {
1292 if (md->__iomd_reservedA)
1293 OSDecrementAtomic(&md->__iomd_reservedA);
1294 else
1295 panic("kIOMDSetDMAInactive");
1296 }
1297 #endif /* IOMD_DEBUG_DMAACTIVE */
1298
1299 } else if (kIOMDWalkSegments != op)
1300 return kIOReturnBadArgument;
1301
1302 // Get the next segment
1303 struct InternalState {
1304 IOMDDMAWalkSegmentArgs fIO;
1305 UInt fOffset2Index;
1306 UInt fIndex;
1307 UInt fNextOffset;
1308 } *isP;
1309
1310 // Find the next segment
1311 if (dataSize < sizeof(*isP))
1312 return kIOReturnUnderrun;
1313
1314 isP = (InternalState *) vData;
1315 UInt offset = isP->fIO.fOffset;
1316 bool mapped = isP->fIO.fMapped;
1317
1318 if (IOMapper::gSystem && mapped
1319 && (!(kIOMemoryHostOnly & _flags))
1320 && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBase))
1321 // && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBase))
1322 {
1323 if (!_memoryEntries
1324 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
1325
1326 dataP = getDataP(_memoryEntries);
1327 if (dataP->fMapper)
1328 {
1329 IODMAMapSpecification mapSpec;
1330 bzero(&mapSpec, sizeof(mapSpec));
1331 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
1332 mapSpec.alignment = dataP->fDMAMapAlignment;
1333 err = md->dmaMap(dataP->fMapper, &mapSpec, 0, _length, &dataP->fMappedBase, NULL);
1334 if (kIOReturnSuccess != err) return (err);
1335 }
1336 }
1337
1338 if (offset >= _length)
1339 return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
1340
1341 // Validate the previous offset
1342 UInt ind, off2Ind = isP->fOffset2Index;
1343 if (!params
1344 && offset
1345 && (offset == isP->fNextOffset || off2Ind <= offset))
1346 ind = isP->fIndex;
1347 else
1348 ind = off2Ind = 0; // Start from beginning
1349
1350 UInt length;
1351 UInt64 address;
1352
1353
1354 if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
1355
1356 // Physical address based memory descriptor
1357 const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
1358
1359 // Find the range after the one that contains the offset
1360 mach_vm_size_t len;
1361 for (len = 0; off2Ind <= offset; ind++) {
1362 len = physP[ind].length;
1363 off2Ind += len;
1364 }
1365
1366 // Calculate length within range and starting address
1367 length = off2Ind - offset;
1368 address = physP[ind - 1].address + len - length;
1369
1370 if (true && mapped && _memoryEntries
1371 && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
1372 {
1373 address = dataP->fMappedBase + offset;
1374 }
1375 else
1376 {
1377 // see how far we can coalesce ranges
1378 while (ind < _rangesCount && address + length == physP[ind].address) {
1379 len = physP[ind].length;
1380 length += len;
1381 off2Ind += len;
1382 ind++;
1383 }
1384 }
1385
1386 // correct contiguous check overshoot
1387 ind--;
1388 off2Ind -= len;
1389 }
1390 #ifndef __LP64__
1391 else if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
1392
1393 // Physical address based memory descriptor
1394 const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
1395
1396 // Find the range after the one that contains the offset
1397 mach_vm_size_t len;
1398 for (len = 0; off2Ind <= offset; ind++) {
1399 len = physP[ind].length;
1400 off2Ind += len;
1401 }
1402
1403 // Calculate length within range and starting address
1404 length = off2Ind - offset;
1405 address = physP[ind - 1].address + len - length;
1406
1407 if (true && mapped && _memoryEntries
1408 && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
1409 {
1410 address = dataP->fMappedBase + offset;
1411 }
1412 else
1413 {
1414 // see how far we can coalesce ranges
1415 while (ind < _rangesCount && address + length == physP[ind].address) {
1416 len = physP[ind].length;
1417 length += len;
1418 off2Ind += len;
1419 ind++;
1420 }
1421 }
1422 // correct contiguous check overshoot
1423 ind--;
1424 off2Ind -= len;
1425 }
1426 #endif /* !__LP64__ */
1427 else do {
1428 if (!_wireCount)
1429 panic("IOGMD: not wired for the IODMACommand");
1430
1431 assert(_memoryEntries);
1432
1433 dataP = getDataP(_memoryEntries);
1434 const ioPLBlock *ioplList = getIOPLList(dataP);
1435 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1436 upl_page_info_t *pageList = getPageList(dataP);
1437
1438 assert(numIOPLs > 0);
1439
1440 // Scan through iopl info blocks looking for block containing offset
1441 while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset)
1442 ind++;
1443
1444 // Go back to actual range as search goes past it
1445 ioPLBlock ioplInfo = ioplList[ind - 1];
1446 off2Ind = ioplInfo.fIOMDOffset;
1447
1448 if (ind < numIOPLs)
1449 length = ioplList[ind].fIOMDOffset;
1450 else
1451 length = _length;
1452 length -= offset; // Remainder within iopl
1453
1454 // Subtract offset till this iopl in total list
1455 offset -= off2Ind;
1456
1457 // If a mapped address is requested and this is a pre-mapped IOPL
1458 // then just need to compute an offset relative to the mapped base.
1459 if (mapped && dataP->fMappedBase) {
1460 offset += (ioplInfo.fPageOffset & PAGE_MASK);
1461 address = trunc_page_64(dataP->fMappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
1462 continue; // Done leave do/while(false) now
1463 }
1464
1465 // The offset is rebased into the current iopl.
1466 // Now add the iopl 1st page offset.
1467 offset += ioplInfo.fPageOffset;
1468
1469 // For external UPLs the fPageInfo field points directly to
1470 // the upl's upl_page_info_t array.
1471 if (ioplInfo.fFlags & kIOPLExternUPL)
1472 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
1473 else
1474 pageList = &pageList[ioplInfo.fPageInfo];
1475
1476 // Check for direct device non-paged memory
1477 if ( ioplInfo.fFlags & kIOPLOnDevice ) {
1478 address = ptoa_64(pageList->phys_addr) + offset;
1479 continue; // Done leave do/while(false) now
1480 }
1481
1482 // Now we need compute the index into the pageList
1483 UInt pageInd = atop_32(offset);
1484 offset &= PAGE_MASK;
1485
1486 // Compute the starting address of this segment
1487 IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
1488 if (!pageAddr) {
1489 panic("!pageList phys_addr");
1490 }
1491
1492 address = ptoa_64(pageAddr) + offset;
1493
1494 // length is currently set to the length of the remainider of the iopl.
1495 // We need to check that the remainder of the iopl is contiguous.
1496 // This is indicated by pageList[ind].phys_addr being sequential.
1497 IOByteCount contigLength = PAGE_SIZE - offset;
1498 while (contigLength < length
1499 && ++pageAddr == pageList[++pageInd].phys_addr)
1500 {
1501 contigLength += PAGE_SIZE;
1502 }
1503
1504 if (contigLength < length)
1505 length = contigLength;
1506
1507
1508 assert(address);
1509 assert(length);
1510
1511 } while (false);
1512
1513 // Update return values and state
1514 isP->fIO.fIOVMAddr = address;
1515 isP->fIO.fLength = length;
1516 isP->fIndex = ind;
1517 isP->fOffset2Index = off2Ind;
1518 isP->fNextOffset = isP->fIO.fOffset + length;
1519
1520 return kIOReturnSuccess;
1521 }
1522
1523 addr64_t
1524 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
1525 {
1526 IOReturn ret;
1527 addr64_t address = 0;
1528 IOByteCount length = 0;
1529 IOMapper * mapper = gIOSystemMapper;
1530 IOOptionBits type = _flags & kIOMemoryTypeMask;
1531
1532 if (lengthOfSegment)
1533 *lengthOfSegment = 0;
1534
1535 if (offset >= _length)
1536 return 0;
1537
1538 // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
1539 // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
1540 // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
1541 // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
1542
1543 if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type))
1544 {
1545 unsigned rangesIndex = 0;
1546 Ranges vec = _ranges;
1547 user_addr_t addr;
1548
1549 // Find starting address within the vector of ranges
1550 for (;;) {
1551 getAddrLenForInd(addr, length, type, vec, rangesIndex);
1552 if (offset < length)
1553 break;
1554 offset -= length; // (make offset relative)
1555 rangesIndex++;
1556 }
1557
1558 // Now that we have the starting range,
1559 // lets find the last contiguous range
1560 addr += offset;
1561 length -= offset;
1562
1563 for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) {
1564 user_addr_t newAddr;
1565 IOPhysicalLength newLen;
1566
1567 getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
1568 if (addr + length != newAddr)
1569 break;
1570 length += newLen;
1571 }
1572 if (addr)
1573 address = (IOPhysicalAddress) addr; // Truncate address to 32bit
1574 }
1575 else
1576 {
1577 IOMDDMAWalkSegmentState _state;
1578 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
1579
1580 state->fOffset = offset;
1581 state->fLength = _length - offset;
1582 state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOnly);
1583
1584 ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
1585
1586 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret))
1587 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
1588 ret, this, state->fOffset,
1589 state->fIOVMAddr, state->fLength);
1590 if (kIOReturnSuccess == ret)
1591 {
1592 address = state->fIOVMAddr;
1593 length = state->fLength;
1594 }
1595
1596 // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
1597 // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
1598
1599 if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)))
1600 {
1601 if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone))
1602 {
1603 addr64_t origAddr = address;
1604 IOByteCount origLen = length;
1605
1606 address = mapper->mapAddr(origAddr);
1607 length = page_size - (address & (page_size - 1));
1608 while ((length < origLen)
1609 && ((address + length) == mapper->mapAddr(origAddr + length)))
1610 length += page_size;
1611 if (length > origLen)
1612 length = origLen;
1613 }
1614 }
1615 }
1616
1617 if (!address)
1618 length = 0;
1619
1620 if (lengthOfSegment)
1621 *lengthOfSegment = length;
1622
1623 return (address);
1624 }
1625
1626 #ifndef __LP64__
1627 addr64_t
1628 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
1629 {
1630 addr64_t address = 0;
1631
1632 if (options & _kIOMemorySourceSegment)
1633 {
1634 address = getSourceSegment(offset, lengthOfSegment);
1635 }
1636 else if (options & kIOMemoryMapperNone)
1637 {
1638 address = getPhysicalSegment64(offset, lengthOfSegment);
1639 }
1640 else
1641 {
1642 address = getPhysicalSegment(offset, lengthOfSegment);
1643 }
1644
1645 return (address);
1646 }
1647
1648 addr64_t
1649 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
1650 {
1651 return (getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone));
1652 }
1653
1654 IOPhysicalAddress
1655 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1656 {
1657 addr64_t address = 0;
1658 IOByteCount length = 0;
1659
1660 address = getPhysicalSegment(offset, lengthOfSegment, 0);
1661
1662 if (lengthOfSegment)
1663 length = *lengthOfSegment;
1664
1665 if ((address + length) > 0x100000000ULL)
1666 {
1667 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
1668 address, (long) length, (getMetaClass())->getClassName());
1669 }
1670
1671 return ((IOPhysicalAddress) address);
1672 }
1673
1674 addr64_t
1675 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
1676 {
1677 IOPhysicalAddress phys32;
1678 IOByteCount length;
1679 addr64_t phys64;
1680 IOMapper * mapper = 0;
1681
1682 phys32 = getPhysicalSegment(offset, lengthOfSegment);
1683 if (!phys32)
1684 return 0;
1685
1686 if (gIOSystemMapper)
1687 mapper = gIOSystemMapper;
1688
1689 if (mapper)
1690 {
1691 IOByteCount origLen;
1692
1693 phys64 = mapper->mapAddr(phys32);
1694 origLen = *lengthOfSegment;
1695 length = page_size - (phys64 & (page_size - 1));
1696 while ((length < origLen)
1697 && ((phys64 + length) == mapper->mapAddr(phys32 + length)))
1698 length += page_size;
1699 if (length > origLen)
1700 length = origLen;
1701
1702 *lengthOfSegment = length;
1703 }
1704 else
1705 phys64 = (addr64_t) phys32;
1706
1707 return phys64;
1708 }
1709
1710 IOPhysicalAddress
1711 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1712 {
1713 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0));
1714 }
1715
1716 IOPhysicalAddress
1717 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1718 {
1719 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment));
1720 }
1721
1722 void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
1723 IOByteCount * lengthOfSegment)
1724 {
1725 if (_task == kernel_task)
1726 return (void *) getSourceSegment(offset, lengthOfSegment);
1727 else
1728 panic("IOGMD::getVirtualSegment deprecated");
1729
1730 return 0;
1731 }
1732 #endif /* !__LP64__ */
1733
1734 IOReturn
1735 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
1736 {
1737 IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
1738 DMACommandOps params;
1739 IOReturn err;
1740
1741 params = (op & ~kIOMDDMACommandOperationMask & op);
1742 op &= kIOMDDMACommandOperationMask;
1743
1744 if (kIOMDGetCharacteristics == op) {
1745 if (dataSize < sizeof(IOMDDMACharacteristics))
1746 return kIOReturnUnderrun;
1747
1748 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
1749 data->fLength = getLength();
1750 data->fSGCount = 0;
1751 data->fDirection = getDirection();
1752 data->fIsPrepared = true; // Assume prepared - fails safe
1753 }
1754 else if (kIOMDWalkSegments == op) {
1755 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs))
1756 return kIOReturnUnderrun;
1757
1758 IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
1759 IOByteCount offset = (IOByteCount) data->fOffset;
1760
1761 IOPhysicalLength length;
1762 if (data->fMapped && IOMapper::gSystem)
1763 data->fIOVMAddr = md->getPhysicalSegment(offset, &length);
1764 else
1765 data->fIOVMAddr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
1766 data->fLength = length;
1767 }
1768 else if (kIOMDAddDMAMapSpec == op) return kIOReturnUnsupported;
1769 else if (kIOMDDMAMap == op)
1770 {
1771 if (dataSize < sizeof(IOMDDMAMapArgs))
1772 return kIOReturnUnderrun;
1773 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
1774
1775 if (params) panic("class %s does not support IODMACommand::kIterateOnly", getMetaClass()->getClassName());
1776
1777 err = md->dmaMap(data->fMapper, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocCount);
1778 return (err);
1779 }
1780 else return kIOReturnBadArgument;
1781
1782 return kIOReturnSuccess;
1783 }
1784
1785 static IOReturn
1786 purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
1787 {
1788 IOReturn err = kIOReturnSuccess;
1789
1790 *control = VM_PURGABLE_SET_STATE;
1791 switch (newState)
1792 {
1793 case kIOMemoryPurgeableKeepCurrent:
1794 *control = VM_PURGABLE_GET_STATE;
1795 break;
1796
1797 case kIOMemoryPurgeableNonVolatile:
1798 *state = VM_PURGABLE_NONVOLATILE;
1799 break;
1800 case kIOMemoryPurgeableVolatile:
1801 *state = VM_PURGABLE_VOLATILE;
1802 break;
1803 case kIOMemoryPurgeableEmpty:
1804 *state = VM_PURGABLE_EMPTY;
1805 break;
1806 default:
1807 err = kIOReturnBadArgument;
1808 break;
1809 }
1810 return (err);
1811 }
1812
1813 static IOReturn
1814 purgeableStateBits(int * state)
1815 {
1816 IOReturn err = kIOReturnSuccess;
1817
1818 switch (*state)
1819 {
1820 case VM_PURGABLE_NONVOLATILE:
1821 *state = kIOMemoryPurgeableNonVolatile;
1822 break;
1823 case VM_PURGABLE_VOLATILE:
1824 *state = kIOMemoryPurgeableVolatile;
1825 break;
1826 case VM_PURGABLE_EMPTY:
1827 *state = kIOMemoryPurgeableEmpty;
1828 break;
1829 default:
1830 *state = kIOMemoryPurgeableNonVolatile;
1831 err = kIOReturnNotReady;
1832 break;
1833 }
1834 return (err);
1835 }
1836
1837 IOReturn
1838 IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
1839 IOOptionBits * oldState )
1840 {
1841 IOReturn err = kIOReturnSuccess;
1842 vm_purgable_t control;
1843 int state;
1844
1845 if (_memEntry)
1846 {
1847 err = super::setPurgeable(newState, oldState);
1848 }
1849 else
1850 {
1851 if (kIOMemoryThreadSafe & _flags)
1852 LOCK;
1853 do
1854 {
1855 // Find the appropriate vm_map for the given task
1856 vm_map_t curMap;
1857 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
1858 {
1859 err = kIOReturnNotReady;
1860 break;
1861 }
1862 else
1863 curMap = get_task_map(_task);
1864
1865 // can only do one range
1866 Ranges vec = _ranges;
1867 IOOptionBits type = _flags & kIOMemoryTypeMask;
1868 user_addr_t addr;
1869 IOByteCount len;
1870 getAddrLenForInd(addr, len, type, vec, 0);
1871
1872 err = purgeableControlBits(newState, &control, &state);
1873 if (kIOReturnSuccess != err)
1874 break;
1875 err = mach_vm_purgable_control(curMap, addr, control, &state);
1876 if (oldState)
1877 {
1878 if (kIOReturnSuccess == err)
1879 {
1880 err = purgeableStateBits(&state);
1881 *oldState = state;
1882 }
1883 }
1884 }
1885 while (false);
1886 if (kIOMemoryThreadSafe & _flags)
1887 UNLOCK;
1888 }
1889 return (err);
1890 }
1891
1892 IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
1893 IOOptionBits * oldState )
1894 {
1895 IOReturn err = kIOReturnSuccess;
1896 vm_purgable_t control;
1897 int state;
1898
1899 if (kIOMemoryThreadSafe & _flags)
1900 LOCK;
1901
1902 do
1903 {
1904 if (!_memEntry)
1905 {
1906 err = kIOReturnNotReady;
1907 break;
1908 }
1909 err = purgeableControlBits(newState, &control, &state);
1910 if (kIOReturnSuccess != err)
1911 break;
1912 err = mach_memory_entry_purgable_control((ipc_port_t) _memEntry, control, &state);
1913 if (oldState)
1914 {
1915 if (kIOReturnSuccess == err)
1916 {
1917 err = purgeableStateBits(&state);
1918 *oldState = state;
1919 }
1920 }
1921 }
1922 while (false);
1923
1924 if (kIOMemoryThreadSafe & _flags)
1925 UNLOCK;
1926
1927 return (err);
1928 }
1929
1930 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
1931 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
1932
1933 static void SetEncryptOp(addr64_t pa, unsigned int count)
1934 {
1935 ppnum_t page, end;
1936
1937 page = atop_64(round_page_64(pa));
1938 end = atop_64(trunc_page_64(pa + count));
1939 for (; page < end; page++)
1940 {
1941 pmap_clear_noencrypt(page);
1942 }
1943 }
1944
1945 static void ClearEncryptOp(addr64_t pa, unsigned int count)
1946 {
1947 ppnum_t page, end;
1948
1949 page = atop_64(round_page_64(pa));
1950 end = atop_64(trunc_page_64(pa + count));
1951 for (; page < end; page++)
1952 {
1953 pmap_set_noencrypt(page);
1954 }
1955 }
1956
1957 IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options,
1958 IOByteCount offset, IOByteCount length )
1959 {
1960 IOByteCount remaining;
1961 unsigned int res;
1962 void (*func)(addr64_t pa, unsigned int count) = 0;
1963
1964 switch (options)
1965 {
1966 case kIOMemoryIncoherentIOFlush:
1967 func = &dcache_incoherent_io_flush64;
1968 break;
1969 case kIOMemoryIncoherentIOStore:
1970 func = &dcache_incoherent_io_store64;
1971 break;
1972
1973 case kIOMemorySetEncrypted:
1974 func = &SetEncryptOp;
1975 break;
1976 case kIOMemoryClearEncrypted:
1977 func = &ClearEncryptOp;
1978 break;
1979 }
1980
1981 if (!func)
1982 return (kIOReturnUnsupported);
1983
1984 if (kIOMemoryThreadSafe & _flags)
1985 LOCK;
1986
1987 res = 0x0UL;
1988 remaining = length = min(length, getLength() - offset);
1989 while (remaining)
1990 // (process another target segment?)
1991 {
1992 addr64_t dstAddr64;
1993 IOByteCount dstLen;
1994
1995 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
1996 if (!dstAddr64)
1997 break;
1998
1999 // Clip segment length to remaining
2000 if (dstLen > remaining)
2001 dstLen = remaining;
2002
2003 (*func)(dstAddr64, dstLen);
2004
2005 offset += dstLen;
2006 remaining -= dstLen;
2007 }
2008
2009 if (kIOMemoryThreadSafe & _flags)
2010 UNLOCK;
2011
2012 return (remaining ? kIOReturnUnderrun : kIOReturnSuccess);
2013 }
2014
2015 #if defined(__i386__) || defined(__x86_64__)
2016 extern vm_offset_t first_avail;
2017 #define io_kernel_static_end first_avail
2018 #else
2019 #error io_kernel_static_end is undefined for this architecture
2020 #endif
2021
2022 static kern_return_t
2023 io_get_kernel_static_upl(
2024 vm_map_t /* map */,
2025 uintptr_t offset,
2026 vm_size_t *upl_size,
2027 upl_t *upl,
2028 upl_page_info_array_t page_list,
2029 unsigned int *count,
2030 ppnum_t *highest_page)
2031 {
2032 unsigned int pageCount, page;
2033 ppnum_t phys;
2034 ppnum_t highestPage = 0;
2035
2036 pageCount = atop_32(*upl_size);
2037 if (pageCount > *count)
2038 pageCount = *count;
2039
2040 *upl = NULL;
2041
2042 for (page = 0; page < pageCount; page++)
2043 {
2044 phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
2045 if (!phys)
2046 break;
2047 page_list[page].phys_addr = phys;
2048 page_list[page].pageout = 0;
2049 page_list[page].absent = 0;
2050 page_list[page].dirty = 0;
2051 page_list[page].precious = 0;
2052 page_list[page].device = 0;
2053 if (phys > highestPage)
2054 highestPage = phys;
2055 }
2056
2057 *highest_page = highestPage;
2058
2059 return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError);
2060 }
2061
2062 IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
2063 {
2064 IOOptionBits type = _flags & kIOMemoryTypeMask;
2065 IOReturn error = kIOReturnCannotWire;
2066 ioGMDData *dataP;
2067 upl_page_info_array_t pageInfo;
2068 ppnum_t mapBase = 0;
2069 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
2070
2071 assert(!_wireCount);
2072 assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
2073
2074 if (_pages > gIOMaximumMappedIOPageCount)
2075 return kIOReturnNoResources;
2076
2077 dataP = getDataP(_memoryEntries);
2078 IOMapper *mapper;
2079 mapper = dataP->fMapper;
2080 dataP->fMappedBase = 0;
2081
2082 if (forDirection == kIODirectionNone)
2083 forDirection = getDirection();
2084
2085 int uplFlags; // This Mem Desc's default flags for upl creation
2086 switch (kIODirectionOutIn & forDirection)
2087 {
2088 case kIODirectionOut:
2089 // Pages do not need to be marked as dirty on commit
2090 uplFlags = UPL_COPYOUT_FROM;
2091 _flags |= kIOMemoryPreparedReadOnly;
2092 break;
2093
2094 case kIODirectionIn:
2095 default:
2096 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
2097 break;
2098 }
2099 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
2100
2101 #ifdef UPL_NEED_32BIT_ADDR
2102 if (kIODirectionPrepareToPhys32 & forDirection)
2103 {
2104 if (!mapper) uplFlags |= UPL_NEED_32BIT_ADDR;
2105 if (dataP->fDMAMapNumAddressBits > 32) dataP->fDMAMapNumAddressBits = 32;
2106 }
2107 #endif
2108
2109 // Note that appendBytes(NULL) zeros the data up to the desired length.
2110 _memoryEntries->appendBytes(0, dataP->fPageCnt * sizeof(upl_page_info_t));
2111 dataP = 0;
2112
2113 // Find the appropriate vm_map for the given task
2114 vm_map_t curMap;
2115 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
2116 curMap = 0;
2117 else
2118 { curMap = get_task_map(_task); }
2119
2120 // Iterate over the vector of virtual ranges
2121 Ranges vec = _ranges;
2122 unsigned int pageIndex = 0;
2123 IOByteCount mdOffset = 0;
2124 ppnum_t highestPage = 0;
2125
2126 for (UInt range = 0; range < _rangesCount; range++) {
2127 ioPLBlock iopl;
2128 user_addr_t startPage;
2129 IOByteCount numBytes;
2130 ppnum_t highPage = 0;
2131
2132 // Get the startPage address and length of vec[range]
2133 getAddrLenForInd(startPage, numBytes, type, vec, range);
2134 iopl.fPageOffset = startPage & PAGE_MASK;
2135 numBytes += iopl.fPageOffset;
2136 startPage = trunc_page_64(startPage);
2137
2138 if (mapper)
2139 iopl.fMappedPage = mapBase + pageIndex;
2140 else
2141 iopl.fMappedPage = 0;
2142
2143 // Iterate over the current range, creating UPLs
2144 while (numBytes) {
2145 vm_address_t kernelStart = (vm_address_t) startPage;
2146 vm_map_t theMap;
2147 if (curMap)
2148 theMap = curMap;
2149 else if (!sharedMem) {
2150 assert(_task == kernel_task);
2151 theMap = IOPageableMapForAddress(kernelStart);
2152 }
2153 else
2154 theMap = NULL;
2155
2156 int ioplFlags = uplFlags;
2157 dataP = getDataP(_memoryEntries);
2158 pageInfo = getPageList(dataP);
2159 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
2160
2161 vm_size_t ioplSize = round_page(numBytes);
2162 unsigned int numPageInfo = atop_32(ioplSize);
2163
2164 if (theMap == kernel_map && kernelStart < io_kernel_static_end) {
2165 error = io_get_kernel_static_upl(theMap,
2166 kernelStart,
2167 &ioplSize,
2168 &iopl.fIOPL,
2169 baseInfo,
2170 &numPageInfo,
2171 &highPage);
2172 }
2173 else if (sharedMem) {
2174 error = memory_object_iopl_request(sharedMem,
2175 ptoa_32(pageIndex),
2176 &ioplSize,
2177 &iopl.fIOPL,
2178 baseInfo,
2179 &numPageInfo,
2180 &ioplFlags);
2181 }
2182 else {
2183 assert(theMap);
2184 error = vm_map_create_upl(theMap,
2185 startPage,
2186 (upl_size_t*)&ioplSize,
2187 &iopl.fIOPL,
2188 baseInfo,
2189 &numPageInfo,
2190 &ioplFlags);
2191 }
2192
2193 assert(ioplSize);
2194 if (error != KERN_SUCCESS)
2195 goto abortExit;
2196
2197 if (iopl.fIOPL)
2198 highPage = upl_get_highest_page(iopl.fIOPL);
2199 if (highPage > highestPage)
2200 highestPage = highPage;
2201
2202 error = kIOReturnCannotWire;
2203
2204 if (baseInfo->device) {
2205 numPageInfo = 1;
2206 iopl.fFlags = kIOPLOnDevice;
2207 }
2208 else {
2209 iopl.fFlags = 0;
2210 }
2211
2212 iopl.fIOMDOffset = mdOffset;
2213 iopl.fPageInfo = pageIndex;
2214
2215 #if 0
2216 // used to remove the upl for auto prepares here, for some errant code
2217 // that freed memory before the descriptor pointing at it
2218 if ((_flags & kIOMemoryAutoPrepare) && iopl.fIOPL)
2219 {
2220 upl_commit(iopl.fIOPL, 0, 0);
2221 upl_deallocate(iopl.fIOPL);
2222 iopl.fIOPL = 0;
2223 }
2224 #endif
2225
2226 if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
2227 // Clean up partial created and unsaved iopl
2228 if (iopl.fIOPL) {
2229 upl_abort(iopl.fIOPL, 0);
2230 upl_deallocate(iopl.fIOPL);
2231 }
2232 goto abortExit;
2233 }
2234 dataP = 0;
2235
2236 // Check for a multiple iopl's in one virtual range
2237 pageIndex += numPageInfo;
2238 mdOffset -= iopl.fPageOffset;
2239 if (ioplSize < numBytes) {
2240 numBytes -= ioplSize;
2241 startPage += ioplSize;
2242 mdOffset += ioplSize;
2243 iopl.fPageOffset = 0;
2244 if (mapper) iopl.fMappedPage = mapBase + pageIndex;
2245 }
2246 else {
2247 mdOffset += numBytes;
2248 break;
2249 }
2250 }
2251 }
2252
2253 _highestPage = highestPage;
2254
2255 return kIOReturnSuccess;
2256
2257 abortExit:
2258 {
2259 dataP = getDataP(_memoryEntries);
2260 UInt done = getNumIOPL(_memoryEntries, dataP);
2261 ioPLBlock *ioplList = getIOPLList(dataP);
2262
2263 for (UInt range = 0; range < done; range++)
2264 {
2265 if (ioplList[range].fIOPL) {
2266 upl_abort(ioplList[range].fIOPL, 0);
2267 upl_deallocate(ioplList[range].fIOPL);
2268 }
2269 }
2270 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
2271 }
2272
2273 if (error == KERN_FAILURE)
2274 error = kIOReturnCannotWire;
2275
2276 return error;
2277 }
2278
2279 bool IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
2280 {
2281 ioGMDData * dataP;
2282 unsigned dataSize = size;
2283
2284 if (!_memoryEntries) {
2285 _memoryEntries = OSData::withCapacity(dataSize);
2286 if (!_memoryEntries)
2287 return false;
2288 }
2289 else if (!_memoryEntries->initWithCapacity(dataSize))
2290 return false;
2291
2292 _memoryEntries->appendBytes(0, computeDataSize(0, 0));
2293 dataP = getDataP(_memoryEntries);
2294
2295 if (mapper == kIOMapperWaitSystem) {
2296 IOMapper::checkForSystemMapper();
2297 mapper = IOMapper::gSystem;
2298 }
2299 dataP->fMapper = mapper;
2300 dataP->fPageCnt = 0;
2301 dataP->fMappedBase = 0;
2302 dataP->fDMAMapNumAddressBits = 64;
2303 dataP->fDMAMapAlignment = 0;
2304 dataP->fPreparationID = kIOPreparationIDUnprepared;
2305
2306 return (true);
2307 }
2308
2309 IOReturn IOMemoryDescriptor::dmaMap(
2310 IOMapper * mapper,
2311 const IODMAMapSpecification * mapSpec,
2312 uint64_t offset,
2313 uint64_t length,
2314 uint64_t * address,
2315 ppnum_t * mapPages)
2316 {
2317 IOMDDMAWalkSegmentState walkState;
2318 IOMDDMAWalkSegmentArgs * walkArgs = (IOMDDMAWalkSegmentArgs *) (void *)&walkState;
2319 IOOptionBits mdOp;
2320 IOReturn ret;
2321 IOPhysicalLength segLen;
2322 addr64_t phys, align, pageOffset;
2323 ppnum_t base, pageIndex, pageCount;
2324 uint64_t index;
2325 uint32_t mapOptions = 0;
2326
2327 if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess;
2328
2329 walkArgs->fMapped = false;
2330 mdOp = kIOMDFirstSegment;
2331 pageCount = 0;
2332 for (index = 0; index < length; )
2333 {
2334 if (index && (page_mask & (index + pageOffset))) break;
2335
2336 walkArgs->fOffset = offset + index;
2337 ret = dmaCommandOperation(mdOp, &walkState, sizeof(walkState));
2338 mdOp = kIOMDWalkSegments;
2339 if (ret != kIOReturnSuccess) break;
2340 phys = walkArgs->fIOVMAddr;
2341 segLen = walkArgs->fLength;
2342
2343 align = (phys & page_mask);
2344 if (!index) pageOffset = align;
2345 else if (align) break;
2346 pageCount += atop_64(round_page_64(align + segLen));
2347 index += segLen;
2348 }
2349
2350 if (index < length) return (kIOReturnVMError);
2351
2352 base = mapper->iovmMapMemory(this, offset, pageCount,
2353 mapOptions, NULL, mapSpec);
2354
2355 if (!base) return (kIOReturnNoResources);
2356
2357 mdOp = kIOMDFirstSegment;
2358 for (pageIndex = 0, index = 0; index < length; )
2359 {
2360 walkArgs->fOffset = offset + index;
2361 ret = dmaCommandOperation(mdOp, &walkState, sizeof(walkState));
2362 mdOp = kIOMDWalkSegments;
2363 if (ret != kIOReturnSuccess) break;
2364 phys = walkArgs->fIOVMAddr;
2365 segLen = walkArgs->fLength;
2366
2367 ppnum_t page = atop_64(phys);
2368 ppnum_t count = atop_64(round_page_64(phys + segLen)) - page;
2369 while (count--)
2370 {
2371 mapper->iovmInsert(base, pageIndex, page);
2372 page++;
2373 pageIndex++;
2374 }
2375 index += segLen;
2376 }
2377 if (pageIndex != pageCount) panic("pageIndex");
2378
2379 *address = ptoa_64(base) + pageOffset;
2380 if (mapPages) *mapPages = pageCount;
2381
2382 return (kIOReturnSuccess);
2383 }
2384
2385 IOReturn IOGeneralMemoryDescriptor::dmaMap(
2386 IOMapper * mapper,
2387 const IODMAMapSpecification * mapSpec,
2388 uint64_t offset,
2389 uint64_t length,
2390 uint64_t * address,
2391 ppnum_t * mapPages)
2392 {
2393 IOReturn err = kIOReturnSuccess;
2394 ioGMDData * dataP;
2395 IOOptionBits type = _flags & kIOMemoryTypeMask;
2396
2397 *address = 0;
2398 if (kIOMemoryHostOnly & _flags) return (kIOReturnSuccess);
2399
2400 if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
2401 || offset || (length != _length))
2402 {
2403 err = super::dmaMap(mapper, mapSpec, offset, length, address, mapPages);
2404 }
2405 else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries)))
2406 {
2407 const ioPLBlock * ioplList = getIOPLList(dataP);
2408 upl_page_info_t * pageList;
2409 uint32_t mapOptions = 0;
2410 ppnum_t base;
2411
2412 IODMAMapSpecification mapSpec;
2413 bzero(&mapSpec, sizeof(mapSpec));
2414 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
2415 mapSpec.alignment = dataP->fDMAMapAlignment;
2416
2417 // For external UPLs the fPageInfo field points directly to
2418 // the upl's upl_page_info_t array.
2419 if (ioplList->fFlags & kIOPLExternUPL)
2420 {
2421 pageList = (upl_page_info_t *) ioplList->fPageInfo;
2422 mapOptions |= kIODMAMapPagingPath;
2423 }
2424 else
2425 pageList = getPageList(dataP);
2426
2427 if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess;
2428
2429 // Check for direct device non-paged memory
2430 if (ioplList->fFlags & kIOPLOnDevice) mapOptions |= kIODMAMapPhysicallyContiguous;
2431
2432 base = mapper->iovmMapMemory(
2433 this, offset, _pages, mapOptions, &pageList[0], &mapSpec);
2434 *address = ptoa_64(base) + (ioplList->fPageOffset & PAGE_MASK);
2435 if (mapPages) *mapPages = _pages;
2436 }
2437
2438 return (err);
2439 }
2440
2441 /*
2442 * prepare
2443 *
2444 * Prepare the memory for an I/O transfer. This involves paging in
2445 * the memory, if necessary, and wiring it down for the duration of
2446 * the transfer. The complete() method completes the processing of
2447 * the memory after the I/O transfer finishes. This method needn't
2448 * called for non-pageable memory.
2449 */
2450
2451 IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
2452 {
2453 IOReturn error = kIOReturnSuccess;
2454 IOOptionBits type = _flags & kIOMemoryTypeMask;
2455
2456 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
2457 return kIOReturnSuccess;
2458
2459 if (_prepareLock)
2460 IOLockLock(_prepareLock);
2461
2462 if (!_wireCount
2463 && (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) ) {
2464 error = wireVirtual(forDirection);
2465 }
2466
2467 if (kIOReturnSuccess == error)
2468 {
2469 if (1 == ++_wireCount)
2470 {
2471 if (kIOMemoryClearEncrypt & _flags)
2472 {
2473 performOperation(kIOMemoryClearEncrypted, 0, _length);
2474 }
2475 }
2476 }
2477
2478 if (_prepareLock)
2479 IOLockUnlock(_prepareLock);
2480
2481 return error;
2482 }
2483
2484 /*
2485 * complete
2486 *
2487 * Complete processing of the memory after an I/O transfer finishes.
2488 * This method should not be called unless a prepare was previously
2489 * issued; the prepare() and complete() must occur in pairs, before
2490 * before and after an I/O transfer involving pageable memory.
2491 */
2492
2493 IOReturn IOGeneralMemoryDescriptor::complete(IODirection /* forDirection */)
2494 {
2495 IOOptionBits type = _flags & kIOMemoryTypeMask;
2496
2497 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
2498 return kIOReturnSuccess;
2499
2500 if (_prepareLock)
2501 IOLockLock(_prepareLock);
2502
2503 assert(_wireCount);
2504
2505 if (_wireCount)
2506 {
2507 if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount))
2508 {
2509 performOperation(kIOMemorySetEncrypted, 0, _length);
2510 }
2511
2512 _wireCount--;
2513 if (!_wireCount)
2514 {
2515 IOOptionBits type = _flags & kIOMemoryTypeMask;
2516 ioGMDData * dataP = getDataP(_memoryEntries);
2517 ioPLBlock *ioplList = getIOPLList(dataP);
2518 UInt count = getNumIOPL(_memoryEntries, dataP);
2519
2520 #if IOMD_DEBUG_DMAACTIVE
2521 if (__iomd_reservedA) panic("complete() while dma active");
2522 #endif /* IOMD_DEBUG_DMAACTIVE */
2523
2524 if (dataP->fMappedBase) {
2525 dataP->fMapper->iovmFree(atop_64(dataP->fMappedBase), _pages);
2526 dataP->fMappedBase = 0;
2527 }
2528 // Only complete iopls that we created which are for TypeVirtual
2529 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
2530 for (UInt ind = 0; ind < count; ind++)
2531 if (ioplList[ind].fIOPL) {
2532 upl_commit(ioplList[ind].fIOPL, 0, 0);
2533 upl_deallocate(ioplList[ind].fIOPL);
2534 }
2535 } else if (kIOMemoryTypeUPL == type) {
2536 upl_set_referenced(ioplList[0].fIOPL, false);
2537 }
2538
2539 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
2540
2541 dataP->fPreparationID = kIOPreparationIDUnprepared;
2542 }
2543 }
2544
2545 if (_prepareLock)
2546 IOLockUnlock(_prepareLock);
2547
2548 return kIOReturnSuccess;
2549 }
2550
2551 IOReturn IOGeneralMemoryDescriptor::doMap(
2552 vm_map_t __addressMap,
2553 IOVirtualAddress * __address,
2554 IOOptionBits options,
2555 IOByteCount __offset,
2556 IOByteCount __length )
2557
2558 {
2559 #ifndef __LP64__
2560 if (!(kIOMap64Bit & options)) panic("IOGeneralMemoryDescriptor::doMap !64bit");
2561 #endif /* !__LP64__ */
2562
2563 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
2564 mach_vm_size_t offset = mapping->fOffset + __offset;
2565 mach_vm_size_t length = mapping->fLength;
2566
2567 kern_return_t kr = kIOReturnVMError;
2568 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
2569
2570 IOOptionBits type = _flags & kIOMemoryTypeMask;
2571 Ranges vec = _ranges;
2572
2573 user_addr_t range0Addr = 0;
2574 IOByteCount range0Len = 0;
2575
2576 if ((offset >= _length) || ((offset + length) > _length))
2577 return( kIOReturnBadArgument );
2578
2579 if (vec.v)
2580 getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
2581
2582 // mapping source == dest? (could be much better)
2583 if( _task
2584 && (mapping->fAddressMap == get_task_map(_task)) && (options & kIOMapAnywhere)
2585 && (1 == _rangesCount) && (0 == offset)
2586 && range0Addr && (length <= range0Len) )
2587 {
2588 mapping->fAddress = range0Addr;
2589 mapping->fOptions |= kIOMapStatic;
2590
2591 return( kIOReturnSuccess );
2592 }
2593
2594 if( 0 == sharedMem) {
2595
2596 vm_size_t size = ptoa_32(_pages);
2597
2598 if( _task) {
2599
2600 memory_object_size_t actualSize = size;
2601 vm_prot_t prot = VM_PROT_READ;
2602 if (!(kIOMapReadOnly & options))
2603 prot |= VM_PROT_WRITE;
2604 else if (kIOMapDefaultCache != (options & kIOMapCacheMask))
2605 prot |= VM_PROT_WRITE;
2606
2607 if (_rangesCount == 1)
2608 {
2609 kr = mach_make_memory_entry_64(get_task_map(_task),
2610 &actualSize, range0Addr,
2611 prot, &sharedMem,
2612 NULL);
2613 }
2614 if( (_rangesCount != 1)
2615 || ((KERN_SUCCESS == kr) && (actualSize != round_page(size))))
2616 do
2617 {
2618 #if IOASSERT
2619 IOLog("mach_vm_remap path for ranges %d size (%08llx:%08llx)\n",
2620 _rangesCount, (UInt64)actualSize, (UInt64)size);
2621 #endif
2622 kr = kIOReturnVMError;
2623 if (sharedMem)
2624 {
2625 ipc_port_release_send(sharedMem);
2626 sharedMem = MACH_PORT_NULL;
2627 }
2628
2629 mach_vm_address_t address, segDestAddr;
2630 mach_vm_size_t mapLength;
2631 unsigned rangesIndex;
2632 IOOptionBits type = _flags & kIOMemoryTypeMask;
2633 user_addr_t srcAddr;
2634 IOPhysicalLength segLen = 0;
2635
2636 // Find starting address within the vector of ranges
2637 for (rangesIndex = 0; rangesIndex < _rangesCount; rangesIndex++) {
2638 getAddrLenForInd(srcAddr, segLen, type, _ranges, rangesIndex);
2639 if (offset < segLen)
2640 break;
2641 offset -= segLen; // (make offset relative)
2642 }
2643
2644 mach_vm_size_t pageOffset = (srcAddr & PAGE_MASK);
2645 address = trunc_page_64(mapping->fAddress);
2646
2647 if ((options & kIOMapAnywhere) || ((mapping->fAddress - address) == pageOffset))
2648 {
2649 vm_map_t map = mapping->fAddressMap;
2650 kr = IOMemoryDescriptorMapCopy(&map,
2651 options,
2652 offset, &address, round_page_64(length + pageOffset));
2653 if (kr == KERN_SUCCESS)
2654 {
2655 segDestAddr = address;
2656 segLen -= offset;
2657 srcAddr += offset;
2658 mapLength = length;
2659
2660 while (true)
2661 {
2662 vm_prot_t cur_prot, max_prot;
2663
2664 if (segLen > length) segLen = length;
2665 kr = mach_vm_remap(map, &segDestAddr, round_page_64(segLen), PAGE_MASK,
2666 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
2667 get_task_map(_task), trunc_page_64(srcAddr),
2668 FALSE /* copy */,
2669 &cur_prot,
2670 &max_prot,
2671 VM_INHERIT_NONE);
2672 if (KERN_SUCCESS == kr)
2673 {
2674 if ((!(VM_PROT_READ & cur_prot))
2675 || (!(kIOMapReadOnly & options) && !(VM_PROT_WRITE & cur_prot)))
2676 {
2677 kr = KERN_PROTECTION_FAILURE;
2678 }
2679 }
2680 if (KERN_SUCCESS != kr)
2681 break;
2682 segDestAddr += segLen;
2683 mapLength -= segLen;
2684 if (!mapLength)
2685 break;
2686 rangesIndex++;
2687 if (rangesIndex >= _rangesCount)
2688 {
2689 kr = kIOReturnBadArgument;
2690 break;
2691 }
2692 getAddrLenForInd(srcAddr, segLen, type, vec, rangesIndex);
2693 if (srcAddr & PAGE_MASK)
2694 {
2695 kr = kIOReturnBadArgument;
2696 break;
2697 }
2698 if (segLen > mapLength)
2699 segLen = mapLength;
2700 }
2701 if (KERN_SUCCESS != kr)
2702 {
2703 mach_vm_deallocate(mapping->fAddressMap, address, round_page_64(length + pageOffset));
2704 }
2705 }
2706
2707 if (KERN_SUCCESS == kr)
2708 mapping->fAddress = address + pageOffset;
2709 else
2710 mapping->fAddress = NULL;
2711 }
2712 }
2713 while (false);
2714 }
2715 else do
2716 { // _task == 0, must be physical
2717
2718 memory_object_t pager;
2719 unsigned int flags = 0;
2720 addr64_t pa;
2721 IOPhysicalLength segLen;
2722
2723 pa = getPhysicalSegment( offset, &segLen, kIOMemoryMapperNone );
2724
2725 if( !getKernelReserved())
2726 continue;
2727 reserved->dp.pagerContig = (1 == _rangesCount);
2728 reserved->dp.memory = this;
2729
2730 /*What cache mode do we need*/
2731 switch(options & kIOMapCacheMask ) {
2732
2733 case kIOMapDefaultCache:
2734 default:
2735 flags = IODefaultCacheBits(pa);
2736 if (DEVICE_PAGER_CACHE_INHIB & flags)
2737 {
2738 if (DEVICE_PAGER_GUARDED & flags)
2739 mapping->fOptions |= kIOMapInhibitCache;
2740 else
2741 mapping->fOptions |= kIOMapWriteCombineCache;
2742 }
2743 else if (DEVICE_PAGER_WRITE_THROUGH & flags)
2744 mapping->fOptions |= kIOMapWriteThruCache;
2745 else
2746 mapping->fOptions |= kIOMapCopybackCache;
2747 break;
2748
2749 case kIOMapInhibitCache:
2750 flags = DEVICE_PAGER_CACHE_INHIB |
2751 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
2752 break;
2753
2754 case kIOMapWriteThruCache:
2755 flags = DEVICE_PAGER_WRITE_THROUGH |
2756 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
2757 break;
2758
2759 case kIOMapCopybackCache:
2760 flags = DEVICE_PAGER_COHERENT;
2761 break;
2762
2763 case kIOMapWriteCombineCache:
2764 flags = DEVICE_PAGER_CACHE_INHIB |
2765 DEVICE_PAGER_COHERENT;
2766 break;
2767 }
2768
2769 flags |= reserved->dp.pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0;
2770
2771 pager = device_pager_setup( (memory_object_t) 0, (uintptr_t) reserved,
2772 size, flags);
2773 assert( pager );
2774
2775 if( pager) {
2776 kr = mach_memory_object_memory_entry_64( (host_t) 1, false /*internal*/,
2777 size, VM_PROT_READ | VM_PROT_WRITE, pager, &sharedMem );
2778
2779 assert( KERN_SUCCESS == kr );
2780 if( KERN_SUCCESS != kr)
2781 {
2782 device_pager_deallocate( pager );
2783 pager = MACH_PORT_NULL;
2784 sharedMem = MACH_PORT_NULL;
2785 }
2786 }
2787 if( pager && sharedMem)
2788 reserved->dp.devicePager = pager;
2789
2790 } while( false );
2791
2792 _memEntry = (void *) sharedMem;
2793 }
2794
2795 IOReturn result;
2796 if (0 == sharedMem)
2797 result = kr;
2798 else
2799 result = super::doMap( __addressMap, __address,
2800 options, __offset, __length );
2801
2802 return( result );
2803 }
2804
2805 IOReturn IOGeneralMemoryDescriptor::doUnmap(
2806 vm_map_t addressMap,
2807 IOVirtualAddress __address,
2808 IOByteCount __length )
2809 {
2810 return (super::doUnmap(addressMap, __address, __length));
2811 }
2812
2813 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2814
2815 #undef super
2816 #define super OSObject
2817
2818 OSDefineMetaClassAndStructors( IOMemoryMap, OSObject )
2819
2820 OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
2821 OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
2822 OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
2823 OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
2824 OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
2825 OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
2826 OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
2827 OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
2828
2829 /* ex-inline function implementation */
2830 IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
2831 { return( getPhysicalSegment( 0, 0 )); }
2832
2833 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2834
2835 bool IOMemoryMap::init(
2836 task_t intoTask,
2837 mach_vm_address_t toAddress,
2838 IOOptionBits _options,
2839 mach_vm_size_t _offset,
2840 mach_vm_size_t _length )
2841 {
2842 if (!intoTask)
2843 return( false);
2844
2845 if (!super::init())
2846 return(false);
2847
2848 fAddressMap = get_task_map(intoTask);
2849 if (!fAddressMap)
2850 return(false);
2851 vm_map_reference(fAddressMap);
2852
2853 fAddressTask = intoTask;
2854 fOptions = _options;
2855 fLength = _length;
2856 fOffset = _offset;
2857 fAddress = toAddress;
2858
2859 return (true);
2860 }
2861
2862 bool IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
2863 {
2864 if (!_memory)
2865 return(false);
2866
2867 if (!fSuperMap)
2868 {
2869 if( (_offset + fLength) > _memory->getLength())
2870 return( false);
2871 fOffset = _offset;
2872 }
2873
2874 _memory->retain();
2875 if (fMemory)
2876 {
2877 if (fMemory != _memory)
2878 fMemory->removeMapping(this);
2879 fMemory->release();
2880 }
2881 fMemory = _memory;
2882
2883 return( true );
2884 }
2885
2886 struct IOMemoryDescriptorMapAllocRef
2887 {
2888 ipc_port_t sharedMem;
2889 vm_map_t map;
2890 mach_vm_address_t mapped;
2891 mach_vm_size_t size;
2892 mach_vm_size_t sourceOffset;
2893 IOOptionBits options;
2894 };
2895
2896 static kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
2897 {
2898 IOMemoryDescriptorMapAllocRef * ref = (IOMemoryDescriptorMapAllocRef *)_ref;
2899 IOReturn err;
2900
2901 do {
2902 if( ref->sharedMem)
2903 {
2904 vm_prot_t prot = VM_PROT_READ
2905 | ((ref->options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE);
2906
2907 // VM system requires write access to change cache mode
2908 if (kIOMapDefaultCache != (ref->options & kIOMapCacheMask))
2909 prot |= VM_PROT_WRITE;
2910
2911 // set memory entry cache
2912 vm_prot_t memEntryCacheMode = prot | MAP_MEM_ONLY;
2913 switch (ref->options & kIOMapCacheMask)
2914 {
2915 case kIOMapInhibitCache:
2916 SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
2917 break;
2918
2919 case kIOMapWriteThruCache:
2920 SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
2921 break;
2922
2923 case kIOMapWriteCombineCache:
2924 SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
2925 break;
2926
2927 case kIOMapCopybackCache:
2928 SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
2929 break;
2930
2931 case kIOMapCopybackInnerCache:
2932 SET_MAP_MEM(MAP_MEM_INNERWBACK, memEntryCacheMode);
2933 break;
2934
2935 case kIOMapDefaultCache:
2936 default:
2937 SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
2938 break;
2939 }
2940
2941 vm_size_t unused = 0;
2942
2943 err = mach_make_memory_entry( NULL /*unused*/, &unused, 0 /*unused*/,
2944 memEntryCacheMode, NULL, ref->sharedMem );
2945 if (KERN_SUCCESS != err)
2946 IOLog("MAP_MEM_ONLY failed %d\n", err);
2947
2948 err = mach_vm_map( map,
2949 &ref->mapped,
2950 ref->size, 0 /* mask */,
2951 (( ref->options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
2952 | VM_MAKE_TAG(VM_MEMORY_IOKIT),
2953 ref->sharedMem, ref->sourceOffset,
2954 false, // copy
2955 prot, // cur
2956 prot, // max
2957 VM_INHERIT_NONE);
2958
2959 if( KERN_SUCCESS != err) {
2960 ref->mapped = 0;
2961 continue;
2962 }
2963 ref->map = map;
2964 }
2965 else
2966 {
2967 err = mach_vm_allocate(map, &ref->mapped, ref->size,
2968 ((ref->options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
2969 | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
2970 if( KERN_SUCCESS != err) {
2971 ref->mapped = 0;
2972 continue;
2973 }
2974 ref->map = map;
2975 // we have to make sure that these guys don't get copied if we fork.
2976 err = vm_inherit(map, ref->mapped, ref->size, VM_INHERIT_NONE);
2977 assert( KERN_SUCCESS == err );
2978 }
2979 }
2980 while( false );
2981
2982 return( err );
2983 }
2984
2985 kern_return_t
2986 IOMemoryDescriptorMapMemEntry(vm_map_t * map, ipc_port_t entry, IOOptionBits options, bool pageable,
2987 mach_vm_size_t offset,
2988 mach_vm_address_t * address, mach_vm_size_t length)
2989 {
2990 IOReturn err;
2991 IOMemoryDescriptorMapAllocRef ref;
2992
2993 ref.map = *map;
2994 ref.sharedMem = entry;
2995 ref.sourceOffset = trunc_page_64(offset);
2996 ref.options = options;
2997 ref.size = length;
2998
2999 if (options & kIOMapAnywhere)
3000 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
3001 ref.mapped = 0;
3002 else
3003 ref.mapped = *address;
3004
3005 if( ref.sharedMem && (ref.map == kernel_map) && pageable)
3006 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
3007 else
3008 err = IOMemoryDescriptorMapAlloc( ref.map, &ref );
3009
3010 *address = ref.mapped;
3011 *map = ref.map;
3012
3013 return (err);
3014 }
3015
3016 kern_return_t
3017 IOMemoryDescriptorMapCopy(vm_map_t * map,
3018 IOOptionBits options,
3019 mach_vm_size_t offset,
3020 mach_vm_address_t * address, mach_vm_size_t length)
3021 {
3022 IOReturn err;
3023 IOMemoryDescriptorMapAllocRef ref;
3024
3025 ref.map = *map;
3026 ref.sharedMem = NULL;
3027 ref.sourceOffset = trunc_page_64(offset);
3028 ref.options = options;
3029 ref.size = length;
3030
3031 if (options & kIOMapAnywhere)
3032 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
3033 ref.mapped = 0;
3034 else
3035 ref.mapped = *address;
3036
3037 if (ref.map == kernel_map)
3038 err = IOIteratePageableMaps(ref.size, &IOMemoryDescriptorMapAlloc, &ref);
3039 else
3040 err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
3041
3042 *address = ref.mapped;
3043 *map = ref.map;
3044
3045 return (err);
3046 }
3047
3048 IOReturn IOMemoryDescriptor::doMap(
3049 vm_map_t __addressMap,
3050 IOVirtualAddress * __address,
3051 IOOptionBits options,
3052 IOByteCount __offset,
3053 IOByteCount __length )
3054 {
3055 #ifndef __LP64__
3056 if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::doMap !64bit");
3057 #endif /* !__LP64__ */
3058
3059 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
3060 mach_vm_size_t offset = mapping->fOffset + __offset;
3061 mach_vm_size_t length = mapping->fLength;
3062
3063 IOReturn err = kIOReturnSuccess;
3064 memory_object_t pager;
3065 mach_vm_size_t pageOffset;
3066 IOPhysicalAddress sourceAddr;
3067 unsigned int lock_count;
3068
3069 do
3070 {
3071 sourceAddr = getPhysicalSegment( offset, NULL, _kIOMemorySourceSegment );
3072 pageOffset = sourceAddr - trunc_page( sourceAddr );
3073
3074 if( reserved)
3075 pager = (memory_object_t) reserved->dp.devicePager;
3076 else
3077 pager = MACH_PORT_NULL;
3078
3079 if ((kIOMapReference|kIOMapUnique) == ((kIOMapReference|kIOMapUnique) & options))
3080 {
3081 upl_t redirUPL2;
3082 vm_size_t size;
3083 int flags;
3084
3085 if (!_memEntry)
3086 {
3087 err = kIOReturnNotReadable;
3088 continue;
3089 }
3090
3091 size = round_page(mapping->fLength + pageOffset);
3092 flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
3093 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
3094
3095 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) _memEntry, 0, &size, &redirUPL2,
3096 NULL, NULL,
3097 &flags))
3098 redirUPL2 = NULL;
3099
3100 for (lock_count = 0;
3101 IORecursiveLockHaveLock(gIOMemoryLock);
3102 lock_count++) {
3103 UNLOCK;
3104 }
3105 err = upl_transpose(redirUPL2, mapping->fRedirUPL);
3106 for (;
3107 lock_count;
3108 lock_count--) {
3109 LOCK;
3110 }
3111
3112 if (kIOReturnSuccess != err)
3113 {
3114 IOLog("upl_transpose(%x)\n", err);
3115 err = kIOReturnSuccess;
3116 }
3117
3118 if (redirUPL2)
3119 {
3120 upl_commit(redirUPL2, NULL, 0);
3121 upl_deallocate(redirUPL2);
3122 redirUPL2 = 0;
3123 }
3124 {
3125 // swap the memEntries since they now refer to different vm_objects
3126 void * me = _memEntry;
3127 _memEntry = mapping->fMemory->_memEntry;
3128 mapping->fMemory->_memEntry = me;
3129 }
3130 if (pager)
3131 err = handleFault( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
3132 }
3133 else
3134 {
3135 mach_vm_address_t address;
3136
3137 if (!(options & kIOMapAnywhere))
3138 {
3139 address = trunc_page_64(mapping->fAddress);
3140 if( (mapping->fAddress - address) != pageOffset)
3141 {
3142 err = kIOReturnVMError;
3143 continue;
3144 }
3145 }
3146
3147 vm_map_t map = mapping->fAddressMap;
3148 err = IOMemoryDescriptorMapMemEntry(&map, (ipc_port_t) _memEntry,
3149 options, (kIOMemoryBufferPageable & _flags),
3150 offset, &address, round_page_64(length + pageOffset));
3151 if( err != KERN_SUCCESS)
3152 continue;
3153
3154 if (!_memEntry || pager)
3155 {
3156 err = handleFault( pager, mapping->fAddressMap, address, offset, length, options );
3157 if (err != KERN_SUCCESS)
3158 doUnmap( mapping->fAddressMap, (IOVirtualAddress) mapping, 0 );
3159 }
3160
3161 #if DEBUG
3162 if (kIOLogMapping & gIOKitDebug)
3163 IOLog("mapping(%x) desc %p @ %qx, map %p, address %qx, offset %qx, length %qx\n",
3164 err, this, (uint64_t)sourceAddr, mapping, address, offset, length);
3165 #endif
3166
3167 if (err == KERN_SUCCESS)
3168 mapping->fAddress = address + pageOffset;
3169 else
3170 mapping->fAddress = NULL;
3171 }
3172 }
3173 while( false );
3174
3175 return (err);
3176 }
3177
3178 IOReturn IOMemoryDescriptor::handleFault(
3179 void * _pager,
3180 vm_map_t addressMap,
3181 mach_vm_address_t address,
3182 mach_vm_size_t sourceOffset,
3183 mach_vm_size_t length,
3184 IOOptionBits options )
3185 {
3186 IOReturn err = kIOReturnSuccess;
3187 memory_object_t pager = (memory_object_t) _pager;
3188 mach_vm_size_t size;
3189 mach_vm_size_t bytes;
3190 mach_vm_size_t page;
3191 mach_vm_size_t pageOffset;
3192 mach_vm_size_t pagerOffset;
3193 IOPhysicalLength segLen;
3194 addr64_t physAddr;
3195
3196 if( !addressMap)
3197 {
3198 if( kIOMemoryRedirected & _flags)
3199 {
3200 #if DEBUG
3201 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
3202 #endif
3203 do {
3204 SLEEP;
3205 } while( kIOMemoryRedirected & _flags );
3206 }
3207
3208 return( kIOReturnSuccess );
3209 }
3210
3211 physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
3212 assert( physAddr );
3213 pageOffset = physAddr - trunc_page_64( physAddr );
3214 pagerOffset = sourceOffset;
3215
3216 size = length + pageOffset;
3217 physAddr -= pageOffset;
3218
3219 segLen += pageOffset;
3220 bytes = size;
3221 do
3222 {
3223 // in the middle of the loop only map whole pages
3224 if( segLen >= bytes)
3225 segLen = bytes;
3226 else if( segLen != trunc_page( segLen))
3227 err = kIOReturnVMError;
3228 if( physAddr != trunc_page_64( physAddr))
3229 err = kIOReturnBadArgument;
3230 if (kIOReturnSuccess != err)
3231 break;
3232
3233 #if DEBUG
3234 if( kIOLogMapping & gIOKitDebug)
3235 IOLog("IOMemoryMap::map(%p) 0x%qx->0x%qx:0x%qx\n",
3236 addressMap, address + pageOffset, physAddr + pageOffset,
3237 segLen - pageOffset);
3238 #endif
3239
3240
3241 if( pager) {
3242 if( reserved && reserved->dp.pagerContig) {
3243 IOPhysicalLength allLen;
3244 addr64_t allPhys;
3245
3246 allPhys = getPhysicalSegment( 0, &allLen, kIOMemoryMapperNone );
3247 assert( allPhys );
3248 err = device_pager_populate_object( pager, 0, atop_64(allPhys), round_page(allLen) );
3249 }
3250 else
3251 {
3252
3253 for( page = 0;
3254 (page < segLen) && (KERN_SUCCESS == err);
3255 page += page_size)
3256 {
3257 err = device_pager_populate_object(pager, pagerOffset,
3258 (ppnum_t)(atop_64(physAddr + page)), page_size);
3259 pagerOffset += page_size;
3260 }
3261 }
3262 assert( KERN_SUCCESS == err );
3263 if( err)
3264 break;
3265 }
3266
3267 // This call to vm_fault causes an early pmap level resolution
3268 // of the mappings created above for kernel mappings, since
3269 // faulting in later can't take place from interrupt level.
3270 /* *** ALERT *** */
3271 /* *** Temporary Workaround *** */
3272
3273 if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags))
3274 {
3275 vm_fault(addressMap,
3276 (vm_map_offset_t)address,
3277 VM_PROT_READ|VM_PROT_WRITE,
3278 FALSE, THREAD_UNINT, NULL,
3279 (vm_map_offset_t)0);
3280 }
3281
3282 /* *** Temporary Workaround *** */
3283 /* *** ALERT *** */
3284
3285 sourceOffset += segLen - pageOffset;
3286 address += segLen;
3287 bytes -= segLen;
3288 pageOffset = 0;
3289
3290 }
3291 while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
3292
3293 if (bytes)
3294 err = kIOReturnBadArgument;
3295
3296 return (err);
3297 }
3298
3299 IOReturn IOMemoryDescriptor::doUnmap(
3300 vm_map_t addressMap,
3301 IOVirtualAddress __address,
3302 IOByteCount __length )
3303 {
3304 IOReturn err;
3305 mach_vm_address_t address;
3306 mach_vm_size_t length;
3307
3308 if (__length)
3309 {
3310 address = __address;
3311 length = __length;
3312 }
3313 else
3314 {
3315 addressMap = ((IOMemoryMap *) __address)->fAddressMap;
3316 address = ((IOMemoryMap *) __address)->fAddress;
3317 length = ((IOMemoryMap *) __address)->fLength;
3318 }
3319
3320 if ((addressMap == kernel_map)
3321 && ((kIOMemoryBufferPageable & _flags) || !_memEntry))
3322 addressMap = IOPageableMapForAddress( address );
3323
3324 #if DEBUG
3325 if( kIOLogMapping & gIOKitDebug)
3326 IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
3327 addressMap, address, length );
3328 #endif
3329
3330 err = mach_vm_deallocate( addressMap, address, length );
3331
3332 return (err);
3333 }
3334
3335 IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
3336 {
3337 IOReturn err = kIOReturnSuccess;
3338 IOMemoryMap * mapping = 0;
3339 OSIterator * iter;
3340
3341 LOCK;
3342
3343 if( doRedirect)
3344 _flags |= kIOMemoryRedirected;
3345 else
3346 _flags &= ~kIOMemoryRedirected;
3347
3348 do {
3349 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
3350 while( (mapping = (IOMemoryMap *) iter->getNextObject()))
3351 mapping->redirect( safeTask, doRedirect );
3352
3353 iter->release();
3354 }
3355 } while( false );
3356
3357 if (!doRedirect)
3358 {
3359 WAKEUP;
3360 }
3361
3362 UNLOCK;
3363
3364 #ifndef __LP64__
3365 // temporary binary compatibility
3366 IOSubMemoryDescriptor * subMem;
3367 if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
3368 err = subMem->redirect( safeTask, doRedirect );
3369 else
3370 err = kIOReturnSuccess;
3371 #endif /* !__LP64__ */
3372
3373 return( err );
3374 }
3375
3376 IOReturn IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
3377 {
3378 IOReturn err = kIOReturnSuccess;
3379
3380 if( fSuperMap) {
3381 // err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
3382 } else {
3383
3384 LOCK;
3385
3386 do
3387 {
3388 if (!fAddress)
3389 break;
3390 if (!fAddressMap)
3391 break;
3392
3393 if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
3394 && (0 == (fOptions & kIOMapStatic)))
3395 {
3396 IOUnmapPages( fAddressMap, fAddress, fLength );
3397 err = kIOReturnSuccess;
3398 #if DEBUG
3399 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
3400 #endif
3401 }
3402 else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask))
3403 {
3404 IOOptionBits newMode;
3405 newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
3406 IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
3407 }
3408 }
3409 while (false);
3410 UNLOCK;
3411 }
3412
3413 if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3414 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3415 && safeTask
3416 && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected))))
3417 fMemory->redirect(safeTask, doRedirect);
3418
3419 return( err );
3420 }
3421
3422 IOReturn IOMemoryMap::unmap( void )
3423 {
3424 IOReturn err;
3425
3426 LOCK;
3427
3428 if( fAddress && fAddressMap && (0 == fSuperMap) && fMemory
3429 && (0 == (fOptions & kIOMapStatic))) {
3430
3431 err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
3432
3433 } else
3434 err = kIOReturnSuccess;
3435
3436 if (fAddressMap)
3437 {
3438 vm_map_deallocate(fAddressMap);
3439 fAddressMap = 0;
3440 }
3441
3442 fAddress = 0;
3443
3444 UNLOCK;
3445
3446 return( err );
3447 }
3448
3449 void IOMemoryMap::taskDied( void )
3450 {
3451 LOCK;
3452 if (fUserClientUnmap)
3453 unmap();
3454 if( fAddressMap) {
3455 vm_map_deallocate(fAddressMap);
3456 fAddressMap = 0;
3457 }
3458 fAddressTask = 0;
3459 fAddress = 0;
3460 UNLOCK;
3461 }
3462
3463 IOReturn IOMemoryMap::userClientUnmap( void )
3464 {
3465 fUserClientUnmap = true;
3466 return (kIOReturnSuccess);
3467 }
3468
3469 // Overload the release mechanism. All mappings must be a member
3470 // of a memory descriptors _mappings set. This means that we
3471 // always have 2 references on a mapping. When either of these mappings
3472 // are released we need to free ourselves.
3473 void IOMemoryMap::taggedRelease(const void *tag) const
3474 {
3475 LOCK;
3476 super::taggedRelease(tag, 2);
3477 UNLOCK;
3478 }
3479
3480 void IOMemoryMap::free()
3481 {
3482 unmap();
3483
3484 if (fMemory)
3485 {
3486 LOCK;
3487 fMemory->removeMapping(this);
3488 UNLOCK;
3489 fMemory->release();
3490 }
3491
3492 if (fOwner && (fOwner != fMemory))
3493 {
3494 LOCK;
3495 fOwner->removeMapping(this);
3496 UNLOCK;
3497 }
3498
3499 if (fSuperMap)
3500 fSuperMap->release();
3501
3502 if (fRedirUPL) {
3503 upl_commit(fRedirUPL, NULL, 0);
3504 upl_deallocate(fRedirUPL);
3505 }
3506
3507 super::free();
3508 }
3509
3510 IOByteCount IOMemoryMap::getLength()
3511 {
3512 return( fLength );
3513 }
3514
3515 IOVirtualAddress IOMemoryMap::getVirtualAddress()
3516 {
3517 #ifndef __LP64__
3518 if (fSuperMap)
3519 fSuperMap->getVirtualAddress();
3520 else if (fAddressMap
3521 && vm_map_is_64bit(fAddressMap)
3522 && (sizeof(IOVirtualAddress) < 8))
3523 {
3524 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
3525 }
3526 #endif /* !__LP64__ */
3527
3528 return (fAddress);
3529 }
3530
3531 #ifndef __LP64__
3532 mach_vm_address_t IOMemoryMap::getAddress()
3533 {
3534 return( fAddress);
3535 }
3536
3537 mach_vm_size_t IOMemoryMap::getSize()
3538 {
3539 return( fLength );
3540 }
3541 #endif /* !__LP64__ */
3542
3543
3544 task_t IOMemoryMap::getAddressTask()
3545 {
3546 if( fSuperMap)
3547 return( fSuperMap->getAddressTask());
3548 else
3549 return( fAddressTask);
3550 }
3551
3552 IOOptionBits IOMemoryMap::getMapOptions()
3553 {
3554 return( fOptions);
3555 }
3556
3557 IOMemoryDescriptor * IOMemoryMap::getMemoryDescriptor()
3558 {
3559 return( fMemory );
3560 }
3561
3562 IOMemoryMap * IOMemoryMap::copyCompatible(
3563 IOMemoryMap * newMapping )
3564 {
3565 task_t task = newMapping->getAddressTask();
3566 mach_vm_address_t toAddress = newMapping->fAddress;
3567 IOOptionBits _options = newMapping->fOptions;
3568 mach_vm_size_t _offset = newMapping->fOffset;
3569 mach_vm_size_t _length = newMapping->fLength;
3570
3571 if( (!task) || (!fAddressMap) || (fAddressMap != get_task_map(task)))
3572 return( 0 );
3573 if( (fOptions ^ _options) & kIOMapReadOnly)
3574 return( 0 );
3575 if( (kIOMapDefaultCache != (_options & kIOMapCacheMask))
3576 && ((fOptions ^ _options) & kIOMapCacheMask))
3577 return( 0 );
3578
3579 if( (0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress))
3580 return( 0 );
3581
3582 if( _offset < fOffset)
3583 return( 0 );
3584
3585 _offset -= fOffset;
3586
3587 if( (_offset + _length) > fLength)
3588 return( 0 );
3589
3590 retain();
3591 if( (fLength == _length) && (!_offset))
3592 {
3593 newMapping = this;
3594 }
3595 else
3596 {
3597 newMapping->fSuperMap = this;
3598 newMapping->fOffset = fOffset + _offset;
3599 newMapping->fAddress = fAddress + _offset;
3600 }
3601
3602 return( newMapping );
3603 }
3604
3605 IOReturn IOMemoryMap::wireRange(
3606 uint32_t options,
3607 mach_vm_size_t offset,
3608 mach_vm_size_t length)
3609 {
3610 IOReturn kr;
3611 mach_vm_address_t start = trunc_page_64(fAddress + offset);
3612 mach_vm_address_t end = round_page_64(fAddress + offset + length);
3613
3614 if (kIODirectionOutIn & options)
3615 {
3616 kr = vm_map_wire(fAddressMap, start, end, (kIODirectionOutIn & options), FALSE);
3617 }
3618 else
3619 {
3620 kr = vm_map_unwire(fAddressMap, start, end, FALSE);
3621 }
3622
3623 return (kr);
3624 }
3625
3626
3627 IOPhysicalAddress
3628 #ifdef __LP64__
3629 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
3630 #else /* !__LP64__ */
3631 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
3632 #endif /* !__LP64__ */
3633 {
3634 IOPhysicalAddress address;
3635
3636 LOCK;
3637 #ifdef __LP64__
3638 address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
3639 #else /* !__LP64__ */
3640 address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
3641 #endif /* !__LP64__ */
3642 UNLOCK;
3643
3644 return( address );
3645 }
3646
3647 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3648
3649 #undef super
3650 #define super OSObject
3651
3652 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3653
3654 void IOMemoryDescriptor::initialize( void )
3655 {
3656 if( 0 == gIOMemoryLock)
3657 gIOMemoryLock = IORecursiveLockAlloc();
3658
3659 IORegistryEntry::getRegistryRoot()->setProperty(kIOMaximumMappedIOByteCountKey,
3660 ptoa_64(gIOMaximumMappedIOPageCount), 64);
3661 gIOLastPage = IOGetLastPageNumber();
3662
3663 gIOPageAllocLock = IOSimpleLockAlloc();
3664 queue_init(&gIOPageAllocList);
3665 }
3666
3667 void IOMemoryDescriptor::free( void )
3668 {
3669 if( _mappings)
3670 _mappings->release();
3671
3672 super::free();
3673 }
3674
3675 IOMemoryMap * IOMemoryDescriptor::setMapping(
3676 task_t intoTask,
3677 IOVirtualAddress mapAddress,
3678 IOOptionBits options )
3679 {
3680 return (createMappingInTask( intoTask, mapAddress,
3681 options | kIOMapStatic,
3682 0, getLength() ));
3683 }
3684
3685 IOMemoryMap * IOMemoryDescriptor::map(
3686 IOOptionBits options )
3687 {
3688 return (createMappingInTask( kernel_task, 0,
3689 options | kIOMapAnywhere,
3690 0, getLength() ));
3691 }
3692
3693 #ifndef __LP64__
3694 IOMemoryMap * IOMemoryDescriptor::map(
3695 task_t intoTask,
3696 IOVirtualAddress atAddress,
3697 IOOptionBits options,
3698 IOByteCount offset,
3699 IOByteCount length )
3700 {
3701 if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask)))
3702 {
3703 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
3704 return (0);
3705 }
3706
3707 return (createMappingInTask(intoTask, atAddress,
3708 options, offset, length));
3709 }
3710 #endif /* !__LP64__ */
3711
3712 IOMemoryMap * IOMemoryDescriptor::createMappingInTask(
3713 task_t intoTask,
3714 mach_vm_address_t atAddress,
3715 IOOptionBits options,
3716 mach_vm_size_t offset,
3717 mach_vm_size_t length)
3718 {
3719 IOMemoryMap * result;
3720 IOMemoryMap * mapping;
3721
3722 if (0 == length)
3723 length = getLength();
3724
3725 mapping = new IOMemoryMap;
3726
3727 if( mapping
3728 && !mapping->init( intoTask, atAddress,
3729 options, offset, length )) {
3730 mapping->release();
3731 mapping = 0;
3732 }
3733
3734 if (mapping)
3735 result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
3736 else
3737 result = 0;
3738
3739 #if DEBUG
3740 if (!result)
3741 IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
3742 this, atAddress, (uint32_t) options, offset, length);
3743 #endif
3744
3745 return (result);
3746 }
3747
3748 #ifndef __LP64__ // there is only a 64 bit version for LP64
3749 IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
3750 IOOptionBits options,
3751 IOByteCount offset)
3752 {
3753 return (redirect(newBackingMemory, options, (mach_vm_size_t)offset));
3754 }
3755 #endif
3756
3757 IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
3758 IOOptionBits options,
3759 mach_vm_size_t offset)
3760 {
3761 IOReturn err = kIOReturnSuccess;
3762 IOMemoryDescriptor * physMem = 0;
3763
3764 LOCK;
3765
3766 if (fAddress && fAddressMap) do
3767 {
3768 if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3769 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3770 {
3771 physMem = fMemory;
3772 physMem->retain();
3773 }
3774
3775 if (!fRedirUPL)
3776 {
3777 vm_size_t size = round_page(fLength);
3778 int flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
3779 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
3780 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) fMemory->_memEntry, 0, &size, &fRedirUPL,
3781 NULL, NULL,
3782 &flags))
3783 fRedirUPL = 0;
3784
3785 if (physMem)
3786 {
3787 IOUnmapPages( fAddressMap, fAddress, fLength );
3788 if (false)
3789 physMem->redirect(0, true);
3790 }
3791 }
3792
3793 if (newBackingMemory)
3794 {
3795 if (newBackingMemory != fMemory)
3796 {
3797 fOffset = 0;
3798 if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
3799 options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
3800 offset, fLength))
3801 err = kIOReturnError;
3802 }
3803 if (fRedirUPL)
3804 {
3805 upl_commit(fRedirUPL, NULL, 0);
3806 upl_deallocate(fRedirUPL);
3807 fRedirUPL = 0;
3808 }
3809 if (false && physMem)
3810 physMem->redirect(0, false);
3811 }
3812 }
3813 while (false);
3814
3815 UNLOCK;
3816
3817 if (physMem)
3818 physMem->release();
3819
3820 return (err);
3821 }
3822
3823 IOMemoryMap * IOMemoryDescriptor::makeMapping(
3824 IOMemoryDescriptor * owner,
3825 task_t __intoTask,
3826 IOVirtualAddress __address,
3827 IOOptionBits options,
3828 IOByteCount __offset,
3829 IOByteCount __length )
3830 {
3831 #ifndef __LP64__
3832 if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::makeMapping !64bit");
3833 #endif /* !__LP64__ */
3834
3835 IOMemoryDescriptor * mapDesc = 0;
3836 IOMemoryMap * result = 0;
3837 OSIterator * iter;
3838
3839 IOMemoryMap * mapping = (IOMemoryMap *) __address;
3840 mach_vm_size_t offset = mapping->fOffset + __offset;
3841 mach_vm_size_t length = mapping->fLength;
3842
3843 mapping->fOffset = offset;
3844
3845 LOCK;
3846
3847 do
3848 {
3849 if (kIOMapStatic & options)
3850 {
3851 result = mapping;
3852 addMapping(mapping);
3853 mapping->setMemoryDescriptor(this, 0);
3854 continue;
3855 }
3856
3857 if (kIOMapUnique & options)
3858 {
3859 addr64_t phys;
3860 IOByteCount physLen;
3861
3862 // if (owner != this) continue;
3863
3864 if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3865 || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3866 {
3867 phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
3868 if (!phys || (physLen < length))
3869 continue;
3870
3871 mapDesc = IOMemoryDescriptor::withAddressRange(
3872 phys, length, getDirection() | kIOMemoryMapperNone, NULL);
3873 if (!mapDesc)
3874 continue;
3875 offset = 0;
3876 mapping->fOffset = offset;
3877 }
3878 }
3879 else
3880 {
3881 // look for a compatible existing mapping
3882 if( (iter = OSCollectionIterator::withCollection(_mappings)))
3883 {
3884 IOMemoryMap * lookMapping;
3885 while ((lookMapping = (IOMemoryMap *) iter->getNextObject()))
3886 {
3887 if ((result = lookMapping->copyCompatible(mapping)))
3888 {
3889 addMapping(result);
3890 result->setMemoryDescriptor(this, offset);
3891 break;
3892 }
3893 }
3894 iter->release();
3895 }
3896 if (result || (options & kIOMapReference))
3897 {
3898 if (result != mapping)
3899 {
3900 mapping->release();
3901 mapping = NULL;
3902 }
3903 continue;
3904 }
3905 }
3906
3907 if (!mapDesc)
3908 {
3909 mapDesc = this;
3910 mapDesc->retain();
3911 }
3912 IOReturn
3913 kr = mapDesc->doMap( 0, (IOVirtualAddress *) &mapping, options, 0, 0 );
3914 if (kIOReturnSuccess == kr)
3915 {
3916 result = mapping;
3917 mapDesc->addMapping(result);
3918 result->setMemoryDescriptor(mapDesc, offset);
3919 }
3920 else
3921 {
3922 mapping->release();
3923 mapping = NULL;
3924 }
3925 }
3926 while( false );
3927
3928 UNLOCK;
3929
3930 if (mapDesc)
3931 mapDesc->release();
3932
3933 return (result);
3934 }
3935
3936 void IOMemoryDescriptor::addMapping(
3937 IOMemoryMap * mapping )
3938 {
3939 if( mapping)
3940 {
3941 if( 0 == _mappings)
3942 _mappings = OSSet::withCapacity(1);
3943 if( _mappings )
3944 _mappings->setObject( mapping );
3945 }
3946 }
3947
3948 void IOMemoryDescriptor::removeMapping(
3949 IOMemoryMap * mapping )
3950 {
3951 if( _mappings)
3952 _mappings->removeObject( mapping);
3953 }
3954
3955 #ifndef __LP64__
3956 // obsolete initializers
3957 // - initWithOptions is the designated initializer
3958 bool
3959 IOMemoryDescriptor::initWithAddress(void * address,
3960 IOByteCount length,
3961 IODirection direction)
3962 {
3963 return( false );
3964 }
3965
3966 bool
3967 IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
3968 IOByteCount length,
3969 IODirection direction,
3970 task_t task)
3971 {
3972 return( false );
3973 }
3974
3975 bool
3976 IOMemoryDescriptor::initWithPhysicalAddress(
3977 IOPhysicalAddress address,
3978 IOByteCount length,
3979 IODirection direction )
3980 {
3981 return( false );
3982 }
3983
3984 bool
3985 IOMemoryDescriptor::initWithRanges(
3986 IOVirtualRange * ranges,
3987 UInt32 withCount,
3988 IODirection direction,
3989 task_t task,
3990 bool asReference)
3991 {
3992 return( false );
3993 }
3994
3995 bool
3996 IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
3997 UInt32 withCount,
3998 IODirection direction,
3999 bool asReference)
4000 {
4001 return( false );
4002 }
4003
4004 void * IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
4005 IOByteCount * lengthOfSegment)
4006 {
4007 return( 0 );
4008 }
4009 #endif /* !__LP64__ */
4010
4011 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4012
4013 bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
4014 {
4015 OSSymbol const *keys[2];
4016 OSObject *values[2];
4017 struct SerData {
4018 user_addr_t address;
4019 user_size_t length;
4020 } *vcopy;
4021 unsigned int index, nRanges;
4022 bool result;
4023
4024 IOOptionBits type = _flags & kIOMemoryTypeMask;
4025
4026 if (s == NULL) return false;
4027 if (s->previouslySerialized(this)) return true;
4028
4029 // Pretend we are an array.
4030 if (!s->addXMLStartTag(this, "array")) return false;
4031
4032 nRanges = _rangesCount;
4033 vcopy = (SerData *) IOMalloc(sizeof(SerData) * nRanges);
4034 if (vcopy == 0) return false;
4035
4036 keys[0] = OSSymbol::withCString("address");
4037 keys[1] = OSSymbol::withCString("length");
4038
4039 result = false;
4040 values[0] = values[1] = 0;
4041
4042 // From this point on we can go to bail.
4043
4044 // Copy the volatile data so we don't have to allocate memory
4045 // while the lock is held.
4046 LOCK;
4047 if (nRanges == _rangesCount) {
4048 Ranges vec = _ranges;
4049 for (index = 0; index < nRanges; index++) {
4050 user_addr_t addr; IOByteCount len;
4051 getAddrLenForInd(addr, len, type, vec, index);
4052 vcopy[index].address = addr;
4053 vcopy[index].length = len;
4054 }
4055 } else {
4056 // The descriptor changed out from under us. Give up.
4057 UNLOCK;
4058 result = false;
4059 goto bail;
4060 }
4061 UNLOCK;
4062
4063 for (index = 0; index < nRanges; index++)
4064 {
4065 user_addr_t addr = vcopy[index].address;
4066 IOByteCount len = (IOByteCount) vcopy[index].length;
4067 values[0] =
4068 OSNumber::withNumber(addr, sizeof(addr) * 8);
4069 if (values[0] == 0) {
4070 result = false;
4071 goto bail;
4072 }
4073 values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
4074 if (values[1] == 0) {
4075 result = false;
4076 goto bail;
4077 }
4078 OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
4079 if (dict == 0) {
4080 result = false;
4081 goto bail;
4082 }
4083 values[0]->release();
4084 values[1]->release();
4085 values[0] = values[1] = 0;
4086
4087 result = dict->serialize(s);
4088 dict->release();
4089 if (!result) {
4090 goto bail;
4091 }
4092 }
4093 result = s->addXMLEndTag("array");
4094
4095 bail:
4096 if (values[0])
4097 values[0]->release();
4098 if (values[1])
4099 values[1]->release();
4100 if (keys[0])
4101 keys[0]->release();
4102 if (keys[1])
4103 keys[1]->release();
4104 if (vcopy)
4105 IOFree(vcopy, sizeof(SerData) * nRanges);
4106 return result;
4107 }
4108
4109 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4110
4111 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
4112 #ifdef __LP64__
4113 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
4114 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
4115 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
4116 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
4117 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
4118 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
4119 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
4120 #else /* !__LP64__ */
4121 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
4122 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
4123 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3);
4124 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4);
4125 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5);
4126 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 6);
4127 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 7);
4128 #endif /* !__LP64__ */
4129 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
4130 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
4131 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
4132 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
4133 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
4134 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
4135 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
4136 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
4137
4138 /* ex-inline function implementation */
4139 IOPhysicalAddress
4140 IOMemoryDescriptor::getPhysicalAddress()
4141 { return( getPhysicalSegment( 0, 0 )); }