]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOMemoryDescriptor.cpp
xnu-3789.1.32.tar.gz
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 #include <sys/cdefs.h>
31
32 #include <IOKit/assert.h>
33 #include <IOKit/system.h>
34 #include <IOKit/IOLib.h>
35 #include <IOKit/IOMemoryDescriptor.h>
36 #include <IOKit/IOMapper.h>
37 #include <IOKit/IODMACommand.h>
38 #include <IOKit/IOKitKeysPrivate.h>
39
40 #include <IOKit/IOSubMemoryDescriptor.h>
41 #include <IOKit/IOMultiMemoryDescriptor.h>
42
43 #include <IOKit/IOKitDebug.h>
44 #include <libkern/OSDebug.h>
45
46 #include "IOKitKernelInternal.h"
47
48 #include <libkern/c++/OSContainers.h>
49 #include <libkern/c++/OSDictionary.h>
50 #include <libkern/c++/OSArray.h>
51 #include <libkern/c++/OSSymbol.h>
52 #include <libkern/c++/OSNumber.h>
53 #include <os/overflow.h>
54
55 #include <sys/uio.h>
56
57 __BEGIN_DECLS
58 #include <vm/pmap.h>
59 #include <vm/vm_pageout.h>
60 #include <mach/memory_object_types.h>
61 #include <device/device_port.h>
62
63 #include <mach/vm_prot.h>
64 #include <mach/mach_vm.h>
65 #include <vm/vm_fault.h>
66 #include <vm/vm_protos.h>
67
68 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
69 extern void ipc_port_release_send(ipc_port_t port);
70
71 // osfmk/device/iokit_rpc.c
72 unsigned int IODefaultCacheBits(addr64_t pa);
73 unsigned int IOTranslateCacheBits(struct phys_entry *pp);
74
75 __END_DECLS
76
77 #define kIOMapperWaitSystem ((IOMapper *) 1)
78
79 static IOMapper * gIOSystemMapper = NULL;
80
81 ppnum_t gIOLastPage;
82
83 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
84
85 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
86
87 #define super IOMemoryDescriptor
88
89 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
90
91 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
92
93 static IORecursiveLock * gIOMemoryLock;
94
95 #define LOCK IORecursiveLockLock( gIOMemoryLock)
96 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
97 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
98 #define WAKEUP \
99 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
100
101 #if 0
102 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
103 #else
104 #define DEBG(fmt, args...) {}
105 #endif
106
107 #define IOMD_DEBUG_DMAACTIVE 1
108
109 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
110
111 // Some data structures and accessor macros used by the initWithOptions
112 // Function
113
114 enum ioPLBlockFlags {
115 kIOPLOnDevice = 0x00000001,
116 kIOPLExternUPL = 0x00000002,
117 };
118
119 struct IOMDPersistentInitData
120 {
121 const IOGeneralMemoryDescriptor * fMD;
122 IOMemoryReference * fMemRef;
123 };
124
125 struct ioPLBlock {
126 upl_t fIOPL;
127 vm_address_t fPageInfo; // Pointer to page list or index into it
128 uint32_t fIOMDOffset; // The offset of this iopl in descriptor
129 ppnum_t fMappedPage; // Page number of first page in this iopl
130 unsigned int fPageOffset; // Offset within first page of iopl
131 unsigned int fFlags; // Flags
132 };
133
134 enum { kMaxWireTags = 6 };
135
136 struct ioGMDData
137 {
138 IOMapper * fMapper;
139 uint64_t fDMAMapAlignment;
140 uint64_t fMappedBase;
141 uint64_t fMappedLength;
142 uint64_t fPreparationID;
143 #if IOTRACKING
144 IOTracking fWireTracking;
145 struct vm_tag_set fWireTags;
146 struct vm_tag_set_entry fWireTagsEntries[kMaxWireTags];
147 #endif /* IOTRACKING */
148 unsigned int fPageCnt;
149 uint8_t fDMAMapNumAddressBits;
150 vm_tag_t fAllocTag;
151 unsigned char fDiscontig:1;
152 unsigned char fCompletionError:1;
153 unsigned char _resv:6;
154
155 /* variable length arrays */
156 upl_page_info_t fPageList[1]
157 #if __LP64__
158 // align fPageList as for ioPLBlock
159 __attribute__((aligned(sizeof(upl_t))))
160 #endif
161 ;
162 ioPLBlock fBlocks[1];
163 };
164
165 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
166 #define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
167 #define getNumIOPL(osd, d) \
168 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
169 #define getPageList(d) (&(d->fPageList[0]))
170 #define computeDataSize(p, u) \
171 (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
172
173 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
174
175 #define next_page(a) ( trunc_page(a) + PAGE_SIZE )
176
177 extern "C" {
178
179 kern_return_t device_data_action(
180 uintptr_t device_handle,
181 ipc_port_t device_pager,
182 vm_prot_t protection,
183 vm_object_offset_t offset,
184 vm_size_t size)
185 {
186 kern_return_t kr;
187 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
188 IOMemoryDescriptor * memDesc;
189
190 LOCK;
191 memDesc = ref->dp.memory;
192 if( memDesc)
193 {
194 memDesc->retain();
195 kr = memDesc->handleFault(device_pager, offset, size);
196 memDesc->release();
197 }
198 else
199 kr = KERN_ABORTED;
200 UNLOCK;
201
202 return( kr );
203 }
204
205 kern_return_t device_close(
206 uintptr_t device_handle)
207 {
208 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
209
210 IODelete( ref, IOMemoryDescriptorReserved, 1 );
211
212 return( kIOReturnSuccess );
213 }
214 }; // end extern "C"
215
216 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
217
218 // Note this inline function uses C++ reference arguments to return values
219 // This means that pointers are not passed and NULLs don't have to be
220 // checked for as a NULL reference is illegal.
221 static inline void
222 getAddrLenForInd(mach_vm_address_t &addr, mach_vm_size_t &len, // Output variables
223 UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
224 {
225 assert(kIOMemoryTypeUIO == type
226 || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
227 || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
228 if (kIOMemoryTypeUIO == type) {
229 user_size_t us;
230 user_addr_t ad;
231 uio_getiov((uio_t) r.uio, ind, &ad, &us); addr = ad; len = us;
232 }
233 #ifndef __LP64__
234 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
235 IOAddressRange cur = r.v64[ind];
236 addr = cur.address;
237 len = cur.length;
238 }
239 #endif /* !__LP64__ */
240 else {
241 IOVirtualRange cur = r.v[ind];
242 addr = cur.address;
243 len = cur.length;
244 }
245 }
246
247 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
248
249 static IOReturn
250 purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
251 {
252 IOReturn err = kIOReturnSuccess;
253
254 *control = VM_PURGABLE_SET_STATE;
255
256 enum { kIOMemoryPurgeableControlMask = 15 };
257
258 switch (kIOMemoryPurgeableControlMask & newState)
259 {
260 case kIOMemoryPurgeableKeepCurrent:
261 *control = VM_PURGABLE_GET_STATE;
262 break;
263
264 case kIOMemoryPurgeableNonVolatile:
265 *state = VM_PURGABLE_NONVOLATILE;
266 break;
267 case kIOMemoryPurgeableVolatile:
268 *state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask);
269 break;
270 case kIOMemoryPurgeableEmpty:
271 *state = VM_PURGABLE_EMPTY | (newState & ~kIOMemoryPurgeableControlMask);
272 break;
273 default:
274 err = kIOReturnBadArgument;
275 break;
276 }
277 return (err);
278 }
279
280 static IOReturn
281 purgeableStateBits(int * state)
282 {
283 IOReturn err = kIOReturnSuccess;
284
285 switch (VM_PURGABLE_STATE_MASK & *state)
286 {
287 case VM_PURGABLE_NONVOLATILE:
288 *state = kIOMemoryPurgeableNonVolatile;
289 break;
290 case VM_PURGABLE_VOLATILE:
291 *state = kIOMemoryPurgeableVolatile;
292 break;
293 case VM_PURGABLE_EMPTY:
294 *state = kIOMemoryPurgeableEmpty;
295 break;
296 default:
297 *state = kIOMemoryPurgeableNonVolatile;
298 err = kIOReturnNotReady;
299 break;
300 }
301 return (err);
302 }
303
304
305 static vm_prot_t
306 vmProtForCacheMode(IOOptionBits cacheMode)
307 {
308 vm_prot_t prot = 0;
309 switch (cacheMode)
310 {
311 case kIOInhibitCache:
312 SET_MAP_MEM(MAP_MEM_IO, prot);
313 break;
314
315 case kIOWriteThruCache:
316 SET_MAP_MEM(MAP_MEM_WTHRU, prot);
317 break;
318
319 case kIOWriteCombineCache:
320 SET_MAP_MEM(MAP_MEM_WCOMB, prot);
321 break;
322
323 case kIOCopybackCache:
324 SET_MAP_MEM(MAP_MEM_COPYBACK, prot);
325 break;
326
327 case kIOCopybackInnerCache:
328 SET_MAP_MEM(MAP_MEM_INNERWBACK, prot);
329 break;
330
331 case kIODefaultCache:
332 default:
333 SET_MAP_MEM(MAP_MEM_NOOP, prot);
334 break;
335 }
336
337 return (prot);
338 }
339
340 static unsigned int
341 pagerFlagsForCacheMode(IOOptionBits cacheMode)
342 {
343 unsigned int pagerFlags = 0;
344 switch (cacheMode)
345 {
346 case kIOInhibitCache:
347 pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
348 break;
349
350 case kIOWriteThruCache:
351 pagerFlags = DEVICE_PAGER_WRITE_THROUGH | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
352 break;
353
354 case kIOWriteCombineCache:
355 pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT;
356 break;
357
358 case kIOCopybackCache:
359 pagerFlags = DEVICE_PAGER_COHERENT;
360 break;
361
362 case kIOCopybackInnerCache:
363 pagerFlags = DEVICE_PAGER_COHERENT;
364 break;
365
366 case kIODefaultCache:
367 default:
368 pagerFlags = -1U;
369 break;
370 }
371 return (pagerFlags);
372 }
373
374 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
375 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
376
377 struct IOMemoryEntry
378 {
379 ipc_port_t entry;
380 int64_t offset;
381 uint64_t size;
382 };
383
384 struct IOMemoryReference
385 {
386 volatile SInt32 refCount;
387 vm_prot_t prot;
388 uint32_t capacity;
389 uint32_t count;
390 IOMemoryEntry entries[0];
391 };
392
393 enum
394 {
395 kIOMemoryReferenceReuse = 0x00000001,
396 kIOMemoryReferenceWrite = 0x00000002,
397 };
398
399 SInt32 gIOMemoryReferenceCount;
400
401 IOMemoryReference *
402 IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc)
403 {
404 IOMemoryReference * ref;
405 size_t newSize, oldSize, copySize;
406
407 newSize = (sizeof(IOMemoryReference)
408 - sizeof(ref->entries)
409 + capacity * sizeof(ref->entries[0]));
410 ref = (typeof(ref)) IOMalloc(newSize);
411 if (realloc)
412 {
413 oldSize = (sizeof(IOMemoryReference)
414 - sizeof(realloc->entries)
415 + realloc->capacity * sizeof(realloc->entries[0]));
416 copySize = oldSize;
417 if (copySize > newSize) copySize = newSize;
418 if (ref) bcopy(realloc, ref, copySize);
419 IOFree(realloc, oldSize);
420 }
421 else if (ref)
422 {
423 bzero(ref, sizeof(*ref));
424 ref->refCount = 1;
425 OSIncrementAtomic(&gIOMemoryReferenceCount);
426 }
427 if (!ref) return (0);
428 ref->capacity = capacity;
429 return (ref);
430 }
431
432 void
433 IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref)
434 {
435 IOMemoryEntry * entries;
436 size_t size;
437
438 entries = ref->entries + ref->count;
439 while (entries > &ref->entries[0])
440 {
441 entries--;
442 ipc_port_release_send(entries->entry);
443 }
444 size = (sizeof(IOMemoryReference)
445 - sizeof(ref->entries)
446 + ref->capacity * sizeof(ref->entries[0]));
447 IOFree(ref, size);
448
449 OSDecrementAtomic(&gIOMemoryReferenceCount);
450 }
451
452 void
453 IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref)
454 {
455 if (1 == OSDecrementAtomic(&ref->refCount)) memoryReferenceFree(ref);
456 }
457
458
459 IOReturn
460 IOGeneralMemoryDescriptor::memoryReferenceCreate(
461 IOOptionBits options,
462 IOMemoryReference ** reference)
463 {
464 enum { kCapacity = 4, kCapacityInc = 4 };
465
466 kern_return_t err;
467 IOMemoryReference * ref;
468 IOMemoryEntry * entries;
469 IOMemoryEntry * cloneEntries;
470 vm_map_t map;
471 ipc_port_t entry, cloneEntry;
472 vm_prot_t prot;
473 memory_object_size_t actualSize;
474 uint32_t rangeIdx;
475 uint32_t count;
476 mach_vm_address_t entryAddr, endAddr, entrySize;
477 mach_vm_size_t srcAddr, srcLen;
478 mach_vm_size_t nextAddr, nextLen;
479 mach_vm_size_t offset, remain;
480 IOByteCount physLen;
481 IOOptionBits type = (_flags & kIOMemoryTypeMask);
482 IOOptionBits cacheMode;
483 unsigned int pagerFlags;
484 vm_tag_t tag;
485
486 ref = memoryReferenceAlloc(kCapacity, NULL);
487 if (!ref) return (kIOReturnNoMemory);
488
489 tag = getVMTag(kernel_map);
490 entries = &ref->entries[0];
491 count = 0;
492
493 offset = 0;
494 rangeIdx = 0;
495 if (_task) getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
496 else
497 {
498 nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
499 nextLen = physLen;
500
501 // default cache mode for physical
502 if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift))
503 {
504 IOOptionBits mode;
505 pagerFlags = IODefaultCacheBits(nextAddr);
506 if (DEVICE_PAGER_CACHE_INHIB & pagerFlags)
507 {
508 if (DEVICE_PAGER_GUARDED & pagerFlags)
509 mode = kIOInhibitCache;
510 else
511 mode = kIOWriteCombineCache;
512 }
513 else if (DEVICE_PAGER_WRITE_THROUGH & pagerFlags)
514 mode = kIOWriteThruCache;
515 else
516 mode = kIOCopybackCache;
517 _flags |= (mode << kIOMemoryBufferCacheShift);
518 }
519 }
520
521 // cache mode & vm_prot
522 prot = VM_PROT_READ;
523 cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
524 prot |= vmProtForCacheMode(cacheMode);
525 // VM system requires write access to change cache mode
526 if (kIODefaultCache != cacheMode) prot |= VM_PROT_WRITE;
527 if (kIODirectionOut != (kIODirectionOutIn & _flags)) prot |= VM_PROT_WRITE;
528 if (kIOMemoryReferenceWrite & options) prot |= VM_PROT_WRITE;
529
530 if ((kIOMemoryReferenceReuse & options) && _memRef)
531 {
532 cloneEntries = &_memRef->entries[0];
533 prot |= MAP_MEM_NAMED_REUSE;
534 }
535
536 if (_task)
537 {
538 // virtual ranges
539
540 if (kIOMemoryBufferPageable & _flags)
541 {
542 // IOBufferMemoryDescriptor alloc - set flags for entry + object create
543 prot |= MAP_MEM_NAMED_CREATE;
544 if (kIOMemoryBufferPurgeable & _flags) prot |= MAP_MEM_PURGABLE;
545 if (kIOMemoryUseReserve & _flags) prot |= MAP_MEM_GRAB_SECLUDED;
546
547 prot |= VM_PROT_WRITE;
548 map = NULL;
549 }
550 else map = get_task_map(_task);
551
552 remain = _length;
553 while (remain)
554 {
555 srcAddr = nextAddr;
556 srcLen = nextLen;
557 nextAddr = 0;
558 nextLen = 0;
559 // coalesce addr range
560 for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++)
561 {
562 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
563 if ((srcAddr + srcLen) != nextAddr) break;
564 srcLen += nextLen;
565 }
566 entryAddr = trunc_page_64(srcAddr);
567 endAddr = round_page_64(srcAddr + srcLen);
568 do
569 {
570 entrySize = (endAddr - entryAddr);
571 if (!entrySize) break;
572 actualSize = entrySize;
573
574 cloneEntry = MACH_PORT_NULL;
575 if (MAP_MEM_NAMED_REUSE & prot)
576 {
577 if (cloneEntries < &_memRef->entries[_memRef->count]) cloneEntry = cloneEntries->entry;
578 else prot &= ~MAP_MEM_NAMED_REUSE;
579 }
580
581 err = mach_make_memory_entry_64(map,
582 &actualSize, entryAddr, prot, &entry, cloneEntry);
583
584 if (KERN_SUCCESS != err) break;
585 if (actualSize > entrySize) panic("mach_make_memory_entry_64 actualSize");
586
587 if (count >= ref->capacity)
588 {
589 ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref);
590 entries = &ref->entries[count];
591 }
592 entries->entry = entry;
593 entries->size = actualSize;
594 entries->offset = offset + (entryAddr - srcAddr);
595 entryAddr += actualSize;
596 if (MAP_MEM_NAMED_REUSE & prot)
597 {
598 if ((cloneEntries->entry == entries->entry)
599 && (cloneEntries->size == entries->size)
600 && (cloneEntries->offset == entries->offset)) cloneEntries++;
601 else prot &= ~MAP_MEM_NAMED_REUSE;
602 }
603 entries++;
604 count++;
605 }
606 while (true);
607 offset += srcLen;
608 remain -= srcLen;
609 }
610 }
611 else
612 {
613 // _task == 0, physical or kIOMemoryTypeUPL
614 memory_object_t pager;
615 vm_size_t size = ptoa_32(_pages);
616
617 if (!getKernelReserved()) panic("getKernelReserved");
618
619 reserved->dp.pagerContig = (1 == _rangesCount);
620 reserved->dp.memory = this;
621
622 pagerFlags = pagerFlagsForCacheMode(cacheMode);
623 if (-1U == pagerFlags) panic("phys is kIODefaultCache");
624 if (reserved->dp.pagerContig) pagerFlags |= DEVICE_PAGER_CONTIGUOUS;
625
626 pager = device_pager_setup((memory_object_t) 0, (uintptr_t) reserved,
627 size, pagerFlags);
628 assert (pager);
629 if (!pager) err = kIOReturnVMError;
630 else
631 {
632 srcAddr = nextAddr;
633 entryAddr = trunc_page_64(srcAddr);
634 err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/,
635 size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry);
636 assert (KERN_SUCCESS == err);
637 if (KERN_SUCCESS != err) device_pager_deallocate(pager);
638 else
639 {
640 reserved->dp.devicePager = pager;
641 entries->entry = entry;
642 entries->size = size;
643 entries->offset = offset + (entryAddr - srcAddr);
644 entries++;
645 count++;
646 }
647 }
648 }
649
650 ref->count = count;
651 ref->prot = prot;
652
653 if (KERN_SUCCESS == err)
654 {
655 if (MAP_MEM_NAMED_REUSE & prot)
656 {
657 memoryReferenceFree(ref);
658 OSIncrementAtomic(&_memRef->refCount);
659 ref = _memRef;
660 }
661 }
662 else
663 {
664 memoryReferenceFree(ref);
665 ref = NULL;
666 }
667
668 *reference = ref;
669
670 return (err);
671 }
672
673 kern_return_t
674 IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
675 {
676 IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref;
677 IOReturn err;
678 vm_map_offset_t addr;
679
680 addr = ref->mapped;
681
682 err = vm_map_enter_mem_object(map, &addr, ref->size,
683 (vm_map_offset_t) 0,
684 (((ref->options & kIOMapAnywhere)
685 ? VM_FLAGS_ANYWHERE
686 : VM_FLAGS_FIXED)
687 | VM_MAKE_TAG(ref->tag)),
688 IPC_PORT_NULL,
689 (memory_object_offset_t) 0,
690 false, /* copy */
691 ref->prot,
692 ref->prot,
693 VM_INHERIT_NONE);
694 if (KERN_SUCCESS == err)
695 {
696 ref->mapped = (mach_vm_address_t) addr;
697 ref->map = map;
698 }
699
700 return( err );
701 }
702
703 IOReturn
704 IOGeneralMemoryDescriptor::memoryReferenceMap(
705 IOMemoryReference * ref,
706 vm_map_t map,
707 mach_vm_size_t inoffset,
708 mach_vm_size_t size,
709 IOOptionBits options,
710 mach_vm_address_t * inaddr)
711 {
712 IOReturn err;
713 int64_t offset = inoffset;
714 uint32_t rangeIdx, entryIdx;
715 vm_map_offset_t addr, mapAddr;
716 vm_map_offset_t pageOffset, entryOffset, remain, chunk;
717
718 mach_vm_address_t nextAddr;
719 mach_vm_size_t nextLen;
720 IOByteCount physLen;
721 IOMemoryEntry * entry;
722 vm_prot_t prot, memEntryCacheMode;
723 IOOptionBits type;
724 IOOptionBits cacheMode;
725 vm_tag_t tag;
726
727 /*
728 * For the kIOMapPrefault option.
729 */
730 upl_page_info_t *pageList = NULL;
731 UInt currentPageIndex = 0;
732
733 type = _flags & kIOMemoryTypeMask;
734 prot = VM_PROT_READ;
735 if (!(kIOMapReadOnly & options)) prot |= VM_PROT_WRITE;
736 prot &= ref->prot;
737
738 cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
739 if (kIODefaultCache != cacheMode)
740 {
741 // VM system requires write access to update named entry cache mode
742 memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
743 }
744
745 tag = getVMTag(map);
746
747 if (_task)
748 {
749 // Find first range for offset
750 if (!_rangesCount) return (kIOReturnBadArgument);
751 for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++)
752 {
753 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
754 if (remain < nextLen) break;
755 remain -= nextLen;
756 }
757 }
758 else
759 {
760 rangeIdx = 0;
761 remain = 0;
762 nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
763 nextLen = size;
764 }
765
766 assert(remain < nextLen);
767 if (remain >= nextLen) return (kIOReturnBadArgument);
768
769 nextAddr += remain;
770 nextLen -= remain;
771 pageOffset = (page_mask & nextAddr);
772 addr = 0;
773 if (!(options & kIOMapAnywhere))
774 {
775 addr = *inaddr;
776 if (pageOffset != (page_mask & addr)) return (kIOReturnNotAligned);
777 addr -= pageOffset;
778 }
779
780 // find first entry for offset
781 for (entryIdx = 0;
782 (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset);
783 entryIdx++) {}
784 entryIdx--;
785 entry = &ref->entries[entryIdx];
786
787 // allocate VM
788 size = round_page_64(size + pageOffset);
789 if (kIOMapOverwrite & options)
790 {
791 if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags))
792 {
793 map = IOPageableMapForAddress(addr);
794 }
795 err = KERN_SUCCESS;
796 }
797 else
798 {
799 IOMemoryDescriptorMapAllocRef ref;
800 ref.map = map;
801 ref.tag = tag;
802 ref.options = options;
803 ref.size = size;
804 ref.prot = prot;
805 if (options & kIOMapAnywhere)
806 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
807 ref.mapped = 0;
808 else
809 ref.mapped = addr;
810 if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags))
811 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
812 else
813 err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
814 if (KERN_SUCCESS == err)
815 {
816 addr = ref.mapped;
817 map = ref.map;
818 }
819 }
820
821 /*
822 * Prefaulting is only possible if we wired the memory earlier. Check the
823 * memory type, and the underlying data.
824 */
825 if (options & kIOMapPrefault)
826 {
827 /*
828 * The memory must have been wired by calling ::prepare(), otherwise
829 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
830 */
831 assert(map != kernel_map);
832 assert(_wireCount != 0);
833 assert(_memoryEntries != NULL);
834 if ((map == kernel_map) ||
835 (_wireCount == 0) ||
836 (_memoryEntries == NULL))
837 {
838 return kIOReturnBadArgument;
839 }
840
841 // Get the page list.
842 ioGMDData* dataP = getDataP(_memoryEntries);
843 ioPLBlock const* ioplList = getIOPLList(dataP);
844 pageList = getPageList(dataP);
845
846 // Get the number of IOPLs.
847 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
848
849 /*
850 * Scan through the IOPL Info Blocks, looking for the first block containing
851 * the offset. The research will go past it, so we'll need to go back to the
852 * right range at the end.
853 */
854 UInt ioplIndex = 0;
855 while (ioplIndex < numIOPLs && offset >= ioplList[ioplIndex].fIOMDOffset)
856 ioplIndex++;
857 ioplIndex--;
858
859 // Retrieve the IOPL info block.
860 ioPLBlock ioplInfo = ioplList[ioplIndex];
861
862 /*
863 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
864 * array.
865 */
866 if (ioplInfo.fFlags & kIOPLExternUPL)
867 pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
868 else
869 pageList = &pageList[ioplInfo.fPageInfo];
870
871 // Rebase [offset] into the IOPL in order to looks for the first page index.
872 mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
873
874 // Retrieve the index of the first page corresponding to the offset.
875 currentPageIndex = atop_32(offsetInIOPL);
876 }
877
878 // enter mappings
879 remain = size;
880 mapAddr = addr;
881 addr += pageOffset;
882
883 while (remain && (KERN_SUCCESS == err))
884 {
885 entryOffset = offset - entry->offset;
886 if ((page_mask & entryOffset) != pageOffset)
887 {
888 err = kIOReturnNotAligned;
889 break;
890 }
891
892 if (kIODefaultCache != cacheMode)
893 {
894 vm_size_t unused = 0;
895 err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
896 memEntryCacheMode, NULL, entry->entry);
897 assert (KERN_SUCCESS == err);
898 }
899
900 entryOffset -= pageOffset;
901 if (entryOffset >= entry->size) panic("entryOffset");
902 chunk = entry->size - entryOffset;
903 if (chunk)
904 {
905 if (chunk > remain) chunk = remain;
906 if (options & kIOMapPrefault)
907 {
908 UInt nb_pages = round_page(chunk) / PAGE_SIZE;
909 err = vm_map_enter_mem_object_prefault(map,
910 &mapAddr,
911 chunk, 0 /* mask */,
912 (VM_FLAGS_FIXED
913 | VM_FLAGS_OVERWRITE
914 | VM_MAKE_TAG(tag)
915 | VM_FLAGS_IOKIT_ACCT), /* iokit accounting */
916 entry->entry,
917 entryOffset,
918 prot, // cur
919 prot, // max
920 &pageList[currentPageIndex],
921 nb_pages);
922
923 // Compute the next index in the page list.
924 currentPageIndex += nb_pages;
925 assert(currentPageIndex <= _pages);
926 }
927 else
928 {
929 err = vm_map_enter_mem_object(map,
930 &mapAddr,
931 chunk, 0 /* mask */,
932 (VM_FLAGS_FIXED
933 | VM_FLAGS_OVERWRITE
934 | VM_MAKE_TAG(tag)
935 | VM_FLAGS_IOKIT_ACCT), /* iokit accounting */
936 entry->entry,
937 entryOffset,
938 false, // copy
939 prot, // cur
940 prot, // max
941 VM_INHERIT_NONE);
942 }
943 if (KERN_SUCCESS != err) break;
944 remain -= chunk;
945 if (!remain) break;
946 mapAddr += chunk;
947 offset += chunk - pageOffset;
948 }
949 pageOffset = 0;
950 entry++;
951 entryIdx++;
952 if (entryIdx >= ref->count)
953 {
954 err = kIOReturnOverrun;
955 break;
956 }
957 }
958
959 if ((KERN_SUCCESS != err) && addr && !(kIOMapOverwrite & options))
960 {
961 (void) mach_vm_deallocate(map, trunc_page_64(addr), size);
962 addr = 0;
963 }
964 *inaddr = addr;
965
966 return (err);
967 }
968
969 IOReturn
970 IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
971 IOMemoryReference * ref,
972 IOByteCount * residentPageCount,
973 IOByteCount * dirtyPageCount)
974 {
975 IOReturn err;
976 IOMemoryEntry * entries;
977 unsigned int resident, dirty;
978 unsigned int totalResident, totalDirty;
979
980 totalResident = totalDirty = 0;
981 err = kIOReturnSuccess;
982 entries = ref->entries + ref->count;
983 while (entries > &ref->entries[0])
984 {
985 entries--;
986 err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty);
987 if (KERN_SUCCESS != err) break;
988 totalResident += resident;
989 totalDirty += dirty;
990 }
991
992 if (residentPageCount) *residentPageCount = totalResident;
993 if (dirtyPageCount) *dirtyPageCount = totalDirty;
994 return (err);
995 }
996
997 IOReturn
998 IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
999 IOMemoryReference * ref,
1000 IOOptionBits newState,
1001 IOOptionBits * oldState)
1002 {
1003 IOReturn err;
1004 IOMemoryEntry * entries;
1005 vm_purgable_t control;
1006 int totalState, state;
1007
1008 totalState = kIOMemoryPurgeableNonVolatile;
1009 err = kIOReturnSuccess;
1010 entries = ref->entries + ref->count;
1011 while (entries > &ref->entries[0])
1012 {
1013 entries--;
1014
1015 err = purgeableControlBits(newState, &control, &state);
1016 if (KERN_SUCCESS != err) break;
1017 err = mach_memory_entry_purgable_control(entries->entry, control, &state);
1018 if (KERN_SUCCESS != err) break;
1019 err = purgeableStateBits(&state);
1020 if (KERN_SUCCESS != err) break;
1021
1022 if (kIOMemoryPurgeableEmpty == state) totalState = kIOMemoryPurgeableEmpty;
1023 else if (kIOMemoryPurgeableEmpty == totalState) continue;
1024 else if (kIOMemoryPurgeableVolatile == totalState) continue;
1025 else if (kIOMemoryPurgeableVolatile == state) totalState = kIOMemoryPurgeableVolatile;
1026 else totalState = kIOMemoryPurgeableNonVolatile;
1027 }
1028
1029 if (oldState) *oldState = totalState;
1030 return (err);
1031 }
1032
1033 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1034
1035 IOMemoryDescriptor *
1036 IOMemoryDescriptor::withAddress(void * address,
1037 IOByteCount length,
1038 IODirection direction)
1039 {
1040 return IOMemoryDescriptor::
1041 withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
1042 }
1043
1044 #ifndef __LP64__
1045 IOMemoryDescriptor *
1046 IOMemoryDescriptor::withAddress(IOVirtualAddress address,
1047 IOByteCount length,
1048 IODirection direction,
1049 task_t task)
1050 {
1051 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1052 if (that)
1053 {
1054 if (that->initWithAddress(address, length, direction, task))
1055 return that;
1056
1057 that->release();
1058 }
1059 return 0;
1060 }
1061 #endif /* !__LP64__ */
1062
1063 IOMemoryDescriptor *
1064 IOMemoryDescriptor::withPhysicalAddress(
1065 IOPhysicalAddress address,
1066 IOByteCount length,
1067 IODirection direction )
1068 {
1069 return (IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL));
1070 }
1071
1072 #ifndef __LP64__
1073 IOMemoryDescriptor *
1074 IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
1075 UInt32 withCount,
1076 IODirection direction,
1077 task_t task,
1078 bool asReference)
1079 {
1080 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1081 if (that)
1082 {
1083 if (that->initWithRanges(ranges, withCount, direction, task, asReference))
1084 return that;
1085
1086 that->release();
1087 }
1088 return 0;
1089 }
1090 #endif /* !__LP64__ */
1091
1092 IOMemoryDescriptor *
1093 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
1094 mach_vm_size_t length,
1095 IOOptionBits options,
1096 task_t task)
1097 {
1098 IOAddressRange range = { address, length };
1099 return (IOMemoryDescriptor::withAddressRanges(&range, 1, options, task));
1100 }
1101
1102 IOMemoryDescriptor *
1103 IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
1104 UInt32 rangeCount,
1105 IOOptionBits options,
1106 task_t task)
1107 {
1108 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1109 if (that)
1110 {
1111 if (task)
1112 options |= kIOMemoryTypeVirtual64;
1113 else
1114 options |= kIOMemoryTypePhysical64;
1115
1116 if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ 0))
1117 return that;
1118
1119 that->release();
1120 }
1121
1122 return 0;
1123 }
1124
1125
1126 /*
1127 * withOptions:
1128 *
1129 * Create a new IOMemoryDescriptor. The buffer is made up of several
1130 * virtual address ranges, from a given task.
1131 *
1132 * Passing the ranges as a reference will avoid an extra allocation.
1133 */
1134 IOMemoryDescriptor *
1135 IOMemoryDescriptor::withOptions(void * buffers,
1136 UInt32 count,
1137 UInt32 offset,
1138 task_t task,
1139 IOOptionBits opts,
1140 IOMapper * mapper)
1141 {
1142 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
1143
1144 if (self
1145 && !self->initWithOptions(buffers, count, offset, task, opts, mapper))
1146 {
1147 self->release();
1148 return 0;
1149 }
1150
1151 return self;
1152 }
1153
1154 bool IOMemoryDescriptor::initWithOptions(void * buffers,
1155 UInt32 count,
1156 UInt32 offset,
1157 task_t task,
1158 IOOptionBits options,
1159 IOMapper * mapper)
1160 {
1161 return( false );
1162 }
1163
1164 #ifndef __LP64__
1165 IOMemoryDescriptor *
1166 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
1167 UInt32 withCount,
1168 IODirection direction,
1169 bool asReference)
1170 {
1171 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1172 if (that)
1173 {
1174 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference))
1175 return that;
1176
1177 that->release();
1178 }
1179 return 0;
1180 }
1181
1182 IOMemoryDescriptor *
1183 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
1184 IOByteCount offset,
1185 IOByteCount length,
1186 IODirection direction)
1187 {
1188 return (IOSubMemoryDescriptor::withSubRange(of, offset, length, direction));
1189 }
1190 #endif /* !__LP64__ */
1191
1192 IOMemoryDescriptor *
1193 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
1194 {
1195 IOGeneralMemoryDescriptor *origGenMD =
1196 OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
1197
1198 if (origGenMD)
1199 return IOGeneralMemoryDescriptor::
1200 withPersistentMemoryDescriptor(origGenMD);
1201 else
1202 return 0;
1203 }
1204
1205 IOMemoryDescriptor *
1206 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
1207 {
1208 IOMemoryReference * memRef;
1209
1210 if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) return (0);
1211
1212 if (memRef == originalMD->_memRef)
1213 {
1214 originalMD->retain(); // Add a new reference to ourselves
1215 originalMD->memoryReferenceRelease(memRef);
1216 return originalMD;
1217 }
1218
1219 IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor;
1220 IOMDPersistentInitData initData = { originalMD, memRef };
1221
1222 if (self
1223 && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) {
1224 self->release();
1225 self = 0;
1226 }
1227 return self;
1228 }
1229
1230 #ifndef __LP64__
1231 bool
1232 IOGeneralMemoryDescriptor::initWithAddress(void * address,
1233 IOByteCount withLength,
1234 IODirection withDirection)
1235 {
1236 _singleRange.v.address = (vm_offset_t) address;
1237 _singleRange.v.length = withLength;
1238
1239 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
1240 }
1241
1242 bool
1243 IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
1244 IOByteCount withLength,
1245 IODirection withDirection,
1246 task_t withTask)
1247 {
1248 _singleRange.v.address = address;
1249 _singleRange.v.length = withLength;
1250
1251 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
1252 }
1253
1254 bool
1255 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
1256 IOPhysicalAddress address,
1257 IOByteCount withLength,
1258 IODirection withDirection )
1259 {
1260 _singleRange.p.address = address;
1261 _singleRange.p.length = withLength;
1262
1263 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
1264 }
1265
1266 bool
1267 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
1268 IOPhysicalRange * ranges,
1269 UInt32 count,
1270 IODirection direction,
1271 bool reference)
1272 {
1273 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
1274
1275 if (reference)
1276 mdOpts |= kIOMemoryAsReference;
1277
1278 return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0);
1279 }
1280
1281 bool
1282 IOGeneralMemoryDescriptor::initWithRanges(
1283 IOVirtualRange * ranges,
1284 UInt32 count,
1285 IODirection direction,
1286 task_t task,
1287 bool reference)
1288 {
1289 IOOptionBits mdOpts = direction;
1290
1291 if (reference)
1292 mdOpts |= kIOMemoryAsReference;
1293
1294 if (task) {
1295 mdOpts |= kIOMemoryTypeVirtual;
1296
1297 // Auto-prepare if this is a kernel memory descriptor as very few
1298 // clients bother to prepare() kernel memory.
1299 // But it was not enforced so what are you going to do?
1300 if (task == kernel_task)
1301 mdOpts |= kIOMemoryAutoPrepare;
1302 }
1303 else
1304 mdOpts |= kIOMemoryTypePhysical;
1305
1306 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0);
1307 }
1308 #endif /* !__LP64__ */
1309
1310 /*
1311 * initWithOptions:
1312 *
1313 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
1314 * from a given task, several physical ranges, an UPL from the ubc
1315 * system or a uio (may be 64bit) from the BSD subsystem.
1316 *
1317 * Passing the ranges as a reference will avoid an extra allocation.
1318 *
1319 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
1320 * existing instance -- note this behavior is not commonly supported in other
1321 * I/O Kit classes, although it is supported here.
1322 */
1323
1324 bool
1325 IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
1326 UInt32 count,
1327 UInt32 offset,
1328 task_t task,
1329 IOOptionBits options,
1330 IOMapper * mapper)
1331 {
1332 IOOptionBits type = options & kIOMemoryTypeMask;
1333
1334 #ifndef __LP64__
1335 if (task
1336 && (kIOMemoryTypeVirtual == type)
1337 && vm_map_is_64bit(get_task_map(task))
1338 && ((IOVirtualRange *) buffers)->address)
1339 {
1340 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
1341 return false;
1342 }
1343 #endif /* !__LP64__ */
1344
1345 // Grab the original MD's configuation data to initialse the
1346 // arguments to this function.
1347 if (kIOMemoryTypePersistentMD == type) {
1348
1349 IOMDPersistentInitData *initData = (typeof(initData)) buffers;
1350 const IOGeneralMemoryDescriptor *orig = initData->fMD;
1351 ioGMDData *dataP = getDataP(orig->_memoryEntries);
1352
1353 // Only accept persistent memory descriptors with valid dataP data.
1354 assert(orig->_rangesCount == 1);
1355 if ( !(orig->_flags & kIOMemoryPersistent) || !dataP)
1356 return false;
1357
1358 _memRef = initData->fMemRef; // Grab the new named entry
1359 options = orig->_flags & ~kIOMemoryAsReference;
1360 type = options & kIOMemoryTypeMask;
1361 buffers = orig->_ranges.v;
1362 count = orig->_rangesCount;
1363
1364 // Now grab the original task and whatever mapper was previously used
1365 task = orig->_task;
1366 mapper = dataP->fMapper;
1367
1368 // We are ready to go through the original initialisation now
1369 }
1370
1371 switch (type) {
1372 case kIOMemoryTypeUIO:
1373 case kIOMemoryTypeVirtual:
1374 #ifndef __LP64__
1375 case kIOMemoryTypeVirtual64:
1376 #endif /* !__LP64__ */
1377 assert(task);
1378 if (!task)
1379 return false;
1380 break;
1381
1382 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
1383 #ifndef __LP64__
1384 case kIOMemoryTypePhysical64:
1385 #endif /* !__LP64__ */
1386 case kIOMemoryTypeUPL:
1387 assert(!task);
1388 break;
1389 default:
1390 return false; /* bad argument */
1391 }
1392
1393 assert(buffers);
1394 assert(count);
1395
1396 /*
1397 * We can check the _initialized instance variable before having ever set
1398 * it to an initial value because I/O Kit guarantees that all our instance
1399 * variables are zeroed on an object's allocation.
1400 */
1401
1402 if (_initialized) {
1403 /*
1404 * An existing memory descriptor is being retargeted to point to
1405 * somewhere else. Clean up our present state.
1406 */
1407 IOOptionBits type = _flags & kIOMemoryTypeMask;
1408 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
1409 {
1410 while (_wireCount)
1411 complete();
1412 }
1413 if (_ranges.v && !(kIOMemoryAsReference & _flags))
1414 {
1415 if (kIOMemoryTypeUIO == type)
1416 uio_free((uio_t) _ranges.v);
1417 #ifndef __LP64__
1418 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
1419 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
1420 #endif /* !__LP64__ */
1421 else
1422 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
1423 }
1424
1425 options |= (kIOMemoryRedirected & _flags);
1426 if (!(kIOMemoryRedirected & options))
1427 {
1428 if (_memRef)
1429 {
1430 memoryReferenceRelease(_memRef);
1431 _memRef = 0;
1432 }
1433 if (_mappings)
1434 _mappings->flushCollection();
1435 }
1436 }
1437 else {
1438 if (!super::init())
1439 return false;
1440 _initialized = true;
1441 }
1442
1443 // Grab the appropriate mapper
1444 if (kIOMemoryHostOnly & options) options |= kIOMemoryMapperNone;
1445 if (kIOMemoryMapperNone & options)
1446 mapper = 0; // No Mapper
1447 else if (mapper == kIOMapperSystem) {
1448 IOMapper::checkForSystemMapper();
1449 gIOSystemMapper = mapper = IOMapper::gSystem;
1450 }
1451
1452 // Temp binary compatibility for kIOMemoryThreadSafe
1453 if (kIOMemoryReserved6156215 & options)
1454 {
1455 options &= ~kIOMemoryReserved6156215;
1456 options |= kIOMemoryThreadSafe;
1457 }
1458 // Remove the dynamic internal use flags from the initial setting
1459 options &= ~(kIOMemoryPreparedReadOnly);
1460 _flags = options;
1461 _task = task;
1462
1463 #ifndef __LP64__
1464 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
1465 #endif /* !__LP64__ */
1466
1467 __iomd_reservedA = 0;
1468 __iomd_reservedB = 0;
1469 _highestPage = 0;
1470
1471 if (kIOMemoryThreadSafe & options)
1472 {
1473 if (!_prepareLock)
1474 _prepareLock = IOLockAlloc();
1475 }
1476 else if (_prepareLock)
1477 {
1478 IOLockFree(_prepareLock);
1479 _prepareLock = NULL;
1480 }
1481
1482 if (kIOMemoryTypeUPL == type) {
1483
1484 ioGMDData *dataP;
1485 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
1486
1487 if (!initMemoryEntries(dataSize, mapper)) return (false);
1488 dataP = getDataP(_memoryEntries);
1489 dataP->fPageCnt = 0;
1490
1491 // _wireCount++; // UPLs start out life wired
1492
1493 _length = count;
1494 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
1495
1496 ioPLBlock iopl;
1497 iopl.fIOPL = (upl_t) buffers;
1498 upl_set_referenced(iopl.fIOPL, true);
1499 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
1500
1501 if (upl_get_size(iopl.fIOPL) < (count + offset))
1502 panic("short external upl");
1503
1504 _highestPage = upl_get_highest_page(iopl.fIOPL);
1505
1506 // Set the flag kIOPLOnDevice convieniently equal to 1
1507 iopl.fFlags = pageList->device | kIOPLExternUPL;
1508 if (!pageList->device) {
1509 // Pre-compute the offset into the UPL's page list
1510 pageList = &pageList[atop_32(offset)];
1511 offset &= PAGE_MASK;
1512 }
1513 iopl.fIOMDOffset = 0;
1514 iopl.fMappedPage = 0;
1515 iopl.fPageInfo = (vm_address_t) pageList;
1516 iopl.fPageOffset = offset;
1517 _memoryEntries->appendBytes(&iopl, sizeof(iopl));
1518 }
1519 else {
1520 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
1521 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
1522
1523 // Initialize the memory descriptor
1524 if (options & kIOMemoryAsReference) {
1525 #ifndef __LP64__
1526 _rangesIsAllocated = false;
1527 #endif /* !__LP64__ */
1528
1529 // Hack assignment to get the buffer arg into _ranges.
1530 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
1531 // work, C++ sigh.
1532 // This also initialises the uio & physical ranges.
1533 _ranges.v = (IOVirtualRange *) buffers;
1534 }
1535 else {
1536 #ifndef __LP64__
1537 _rangesIsAllocated = true;
1538 #endif /* !__LP64__ */
1539 switch (type)
1540 {
1541 case kIOMemoryTypeUIO:
1542 _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
1543 break;
1544
1545 #ifndef __LP64__
1546 case kIOMemoryTypeVirtual64:
1547 case kIOMemoryTypePhysical64:
1548 if (count == 1
1549 && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
1550 ) {
1551 if (kIOMemoryTypeVirtual64 == type)
1552 type = kIOMemoryTypeVirtual;
1553 else
1554 type = kIOMemoryTypePhysical;
1555 _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
1556 _rangesIsAllocated = false;
1557 _ranges.v = &_singleRange.v;
1558 _singleRange.v.address = ((IOAddressRange *) buffers)->address;
1559 _singleRange.v.length = ((IOAddressRange *) buffers)->length;
1560 break;
1561 }
1562 _ranges.v64 = IONew(IOAddressRange, count);
1563 if (!_ranges.v64)
1564 return false;
1565 bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
1566 break;
1567 #endif /* !__LP64__ */
1568 case kIOMemoryTypeVirtual:
1569 case kIOMemoryTypePhysical:
1570 if (count == 1) {
1571 _flags |= kIOMemoryAsReference;
1572 #ifndef __LP64__
1573 _rangesIsAllocated = false;
1574 #endif /* !__LP64__ */
1575 _ranges.v = &_singleRange.v;
1576 } else {
1577 _ranges.v = IONew(IOVirtualRange, count);
1578 if (!_ranges.v)
1579 return false;
1580 }
1581 bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
1582 break;
1583 }
1584 }
1585
1586 // Find starting address within the vector of ranges
1587 Ranges vec = _ranges;
1588 mach_vm_size_t totalLength = 0;
1589 unsigned int ind, pages = 0;
1590 for (ind = 0; ind < count; ind++) {
1591 mach_vm_address_t addr;
1592 mach_vm_address_t endAddr;
1593 mach_vm_size_t len;
1594
1595 // addr & len are returned by this function
1596 getAddrLenForInd(addr, len, type, vec, ind);
1597 if (os_add3_overflow(addr, len, PAGE_MASK, &endAddr)) break;
1598 if (os_add_overflow(pages, (atop_64(endAddr) - atop_64(addr)), &pages)) break;
1599 if (os_add_overflow(totalLength, len, &totalLength)) break;
1600 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
1601 {
1602 ppnum_t highPage = atop_64(addr + len - 1);
1603 if (highPage > _highestPage)
1604 _highestPage = highPage;
1605 }
1606 }
1607 if ((ind < count)
1608 || (totalLength != ((IOByteCount) totalLength))) return (false); /* overflow */
1609
1610 _length = totalLength;
1611 _pages = pages;
1612 _rangesCount = count;
1613
1614 // Auto-prepare memory at creation time.
1615 // Implied completion when descriptor is free-ed
1616 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
1617 _wireCount++; // Physical MDs are, by definition, wired
1618 else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
1619 ioGMDData *dataP;
1620 unsigned dataSize;
1621
1622 if (_pages > atop_64(max_mem)) return false;
1623
1624 dataSize = computeDataSize(_pages, /* upls */ count * 2);
1625 if (!initMemoryEntries(dataSize, mapper)) return false;
1626 dataP = getDataP(_memoryEntries);
1627 dataP->fPageCnt = _pages;
1628
1629 if ( (kIOMemoryPersistent & _flags) && !_memRef)
1630 {
1631 IOReturn
1632 err = memoryReferenceCreate(0, &_memRef);
1633 if (kIOReturnSuccess != err) return false;
1634 }
1635
1636 if ((_flags & kIOMemoryAutoPrepare)
1637 && prepare() != kIOReturnSuccess)
1638 return false;
1639 }
1640 }
1641
1642 return true;
1643 }
1644
1645 /*
1646 * free
1647 *
1648 * Free resources.
1649 */
1650 void IOGeneralMemoryDescriptor::free()
1651 {
1652 IOOptionBits type = _flags & kIOMemoryTypeMask;
1653
1654 if( reserved)
1655 {
1656 LOCK;
1657 reserved->dp.memory = 0;
1658 UNLOCK;
1659 }
1660 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
1661 {
1662 ioGMDData * dataP;
1663 if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
1664 {
1665 dataP->fMapper->iovmUnmapMemory(this, NULL, dataP->fMappedBase, dataP->fMappedLength);
1666 dataP->fMappedBase = 0;
1667 }
1668 }
1669 else
1670 {
1671 while (_wireCount) complete();
1672 }
1673
1674 if (_memoryEntries) _memoryEntries->release();
1675
1676 if (_ranges.v && !(kIOMemoryAsReference & _flags))
1677 {
1678 if (kIOMemoryTypeUIO == type)
1679 uio_free((uio_t) _ranges.v);
1680 #ifndef __LP64__
1681 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
1682 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
1683 #endif /* !__LP64__ */
1684 else
1685 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
1686
1687 _ranges.v = NULL;
1688 }
1689
1690 if (reserved)
1691 {
1692 if (reserved->dp.devicePager)
1693 {
1694 // memEntry holds a ref on the device pager which owns reserved
1695 // (IOMemoryDescriptorReserved) so no reserved access after this point
1696 device_pager_deallocate( (memory_object_t) reserved->dp.devicePager );
1697 }
1698 else
1699 IODelete(reserved, IOMemoryDescriptorReserved, 1);
1700 reserved = NULL;
1701 }
1702
1703 if (_memRef) memoryReferenceRelease(_memRef);
1704 if (_prepareLock) IOLockFree(_prepareLock);
1705
1706 super::free();
1707 }
1708
1709 #ifndef __LP64__
1710 void IOGeneralMemoryDescriptor::unmapFromKernel()
1711 {
1712 panic("IOGMD::unmapFromKernel deprecated");
1713 }
1714
1715 void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
1716 {
1717 panic("IOGMD::mapIntoKernel deprecated");
1718 }
1719 #endif /* !__LP64__ */
1720
1721 /*
1722 * getDirection:
1723 *
1724 * Get the direction of the transfer.
1725 */
1726 IODirection IOMemoryDescriptor::getDirection() const
1727 {
1728 #ifndef __LP64__
1729 if (_direction)
1730 return _direction;
1731 #endif /* !__LP64__ */
1732 return (IODirection) (_flags & kIOMemoryDirectionMask);
1733 }
1734
1735 /*
1736 * getLength:
1737 *
1738 * Get the length of the transfer (over all ranges).
1739 */
1740 IOByteCount IOMemoryDescriptor::getLength() const
1741 {
1742 return _length;
1743 }
1744
1745 void IOMemoryDescriptor::setTag( IOOptionBits tag )
1746 {
1747 _tag = tag;
1748 }
1749
1750 IOOptionBits IOMemoryDescriptor::getTag( void )
1751 {
1752 return( _tag);
1753 }
1754
1755 #ifndef __LP64__
1756 #pragma clang diagnostic push
1757 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
1758
1759 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
1760 IOPhysicalAddress
1761 IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
1762 {
1763 addr64_t physAddr = 0;
1764
1765 if( prepare() == kIOReturnSuccess) {
1766 physAddr = getPhysicalSegment64( offset, length );
1767 complete();
1768 }
1769
1770 return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used
1771 }
1772
1773 #pragma clang diagnostic pop
1774
1775 #endif /* !__LP64__ */
1776
1777 IOByteCount IOMemoryDescriptor::readBytes
1778 (IOByteCount offset, void *bytes, IOByteCount length)
1779 {
1780 addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
1781 IOByteCount remaining;
1782
1783 // Assert that this entire I/O is withing the available range
1784 assert(offset <= _length);
1785 assert(offset + length <= _length);
1786 if ((offset >= _length)
1787 || ((offset + length) > _length)) {
1788 return 0;
1789 }
1790
1791 if (kIOMemoryThreadSafe & _flags)
1792 LOCK;
1793
1794 remaining = length = min(length, _length - offset);
1795 while (remaining) { // (process another target segment?)
1796 addr64_t srcAddr64;
1797 IOByteCount srcLen;
1798
1799 srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
1800 if (!srcAddr64)
1801 break;
1802
1803 // Clip segment length to remaining
1804 if (srcLen > remaining)
1805 srcLen = remaining;
1806
1807 copypv(srcAddr64, dstAddr, srcLen,
1808 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
1809
1810 dstAddr += srcLen;
1811 offset += srcLen;
1812 remaining -= srcLen;
1813 }
1814
1815 if (kIOMemoryThreadSafe & _flags)
1816 UNLOCK;
1817
1818 assert(!remaining);
1819
1820 return length - remaining;
1821 }
1822
1823 IOByteCount IOMemoryDescriptor::writeBytes
1824 (IOByteCount inoffset, const void *bytes, IOByteCount length)
1825 {
1826 addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
1827 IOByteCount remaining;
1828 IOByteCount offset = inoffset;
1829
1830 // Assert that this entire I/O is withing the available range
1831 assert(offset <= _length);
1832 assert(offset + length <= _length);
1833
1834 assert( !(kIOMemoryPreparedReadOnly & _flags) );
1835
1836 if ( (kIOMemoryPreparedReadOnly & _flags)
1837 || (offset >= _length)
1838 || ((offset + length) > _length)) {
1839 return 0;
1840 }
1841
1842 if (kIOMemoryThreadSafe & _flags)
1843 LOCK;
1844
1845 remaining = length = min(length, _length - offset);
1846 while (remaining) { // (process another target segment?)
1847 addr64_t dstAddr64;
1848 IOByteCount dstLen;
1849
1850 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
1851 if (!dstAddr64)
1852 break;
1853
1854 // Clip segment length to remaining
1855 if (dstLen > remaining)
1856 dstLen = remaining;
1857
1858 if (!srcAddr) bzero_phys(dstAddr64, dstLen);
1859 else
1860 {
1861 copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
1862 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
1863 srcAddr += dstLen;
1864 }
1865 offset += dstLen;
1866 remaining -= dstLen;
1867 }
1868
1869 if (kIOMemoryThreadSafe & _flags)
1870 UNLOCK;
1871
1872 assert(!remaining);
1873
1874 if (!srcAddr) performOperation(kIOMemoryIncoherentIOFlush, inoffset, length);
1875
1876 return length - remaining;
1877 }
1878
1879 #ifndef __LP64__
1880 void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
1881 {
1882 panic("IOGMD::setPosition deprecated");
1883 }
1884 #endif /* !__LP64__ */
1885
1886 static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
1887
1888 uint64_t
1889 IOGeneralMemoryDescriptor::getPreparationID( void )
1890 {
1891 ioGMDData *dataP;
1892
1893 if (!_wireCount)
1894 return (kIOPreparationIDUnprepared);
1895
1896 if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
1897 || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64))
1898 {
1899 IOMemoryDescriptor::setPreparationID();
1900 return (IOMemoryDescriptor::getPreparationID());
1901 }
1902
1903 if (!_memoryEntries || !(dataP = getDataP(_memoryEntries)))
1904 return (kIOPreparationIDUnprepared);
1905
1906 if (kIOPreparationIDUnprepared == dataP->fPreparationID)
1907 {
1908 dataP->fPreparationID = OSIncrementAtomic64(&gIOMDPreparationID);
1909 }
1910 return (dataP->fPreparationID);
1911 }
1912
1913 IOMemoryDescriptorReserved * IOMemoryDescriptor::getKernelReserved( void )
1914 {
1915 if (!reserved)
1916 {
1917 reserved = IONew(IOMemoryDescriptorReserved, 1);
1918 if (reserved)
1919 bzero(reserved, sizeof(IOMemoryDescriptorReserved));
1920 }
1921 return (reserved);
1922 }
1923
1924 void IOMemoryDescriptor::setPreparationID( void )
1925 {
1926 if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID))
1927 {
1928 reserved->preparationID = OSIncrementAtomic64(&gIOMDPreparationID);
1929 }
1930 }
1931
1932 uint64_t IOMemoryDescriptor::getPreparationID( void )
1933 {
1934 if (reserved)
1935 return (reserved->preparationID);
1936 else
1937 return (kIOPreparationIDUnsupported);
1938 }
1939
1940 void IOMemoryDescriptor::setVMTags(vm_tag_t kernelTag, vm_tag_t userTag)
1941 {
1942 if (!getKernelReserved()) return;
1943 reserved->kernelTag = kernelTag;
1944 reserved->userTag = userTag;
1945 }
1946
1947 vm_tag_t IOMemoryDescriptor::getVMTag(vm_map_t map)
1948 {
1949 if (!reserved
1950 || (VM_KERN_MEMORY_NONE == reserved->kernelTag)
1951 || (VM_KERN_MEMORY_NONE == reserved->userTag))
1952 {
1953 return (IOMemoryTag(map));
1954 }
1955
1956 if (vm_kernel_map_is_kernel(map)) return (reserved->kernelTag);
1957 return (reserved->userTag);
1958 }
1959
1960 IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
1961 {
1962 IOReturn err = kIOReturnSuccess;
1963 DMACommandOps params;
1964 IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
1965 ioGMDData *dataP;
1966
1967 params = (op & ~kIOMDDMACommandOperationMask & op);
1968 op &= kIOMDDMACommandOperationMask;
1969
1970 if (kIOMDDMAMap == op)
1971 {
1972 if (dataSize < sizeof(IOMDDMAMapArgs))
1973 return kIOReturnUnderrun;
1974
1975 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
1976
1977 if (!_memoryEntries
1978 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
1979
1980 if (_memoryEntries && data->fMapper)
1981 {
1982 bool remap, keepMap;
1983 dataP = getDataP(_memoryEntries);
1984
1985 if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
1986 if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) dataP->fDMAMapAlignment = data->fMapSpec.alignment;
1987
1988 keepMap = (data->fMapper == gIOSystemMapper);
1989 keepMap &= ((data->fOffset == 0) && (data->fLength == _length));
1990
1991 remap = (!keepMap);
1992 remap |= (dataP->fDMAMapNumAddressBits < 64)
1993 && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
1994 remap |= (dataP->fDMAMapAlignment > page_size);
1995
1996 if (remap || !dataP->fMappedBase)
1997 {
1998 // if (dataP->fMappedBase) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params);
1999 err = md->dmaMap(data->fMapper, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
2000 if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBase)
2001 {
2002 dataP->fMappedBase = data->fAlloc;
2003 dataP->fMappedLength = data->fAllocLength;
2004 data->fAllocLength = 0; // IOMD owns the alloc now
2005 }
2006 }
2007 else
2008 {
2009 data->fAlloc = dataP->fMappedBase;
2010 data->fAllocLength = 0; // give out IOMD map
2011 }
2012 data->fMapContig = !dataP->fDiscontig;
2013 }
2014
2015 return (err);
2016 }
2017
2018 if (kIOMDAddDMAMapSpec == op)
2019 {
2020 if (dataSize < sizeof(IODMAMapSpecification))
2021 return kIOReturnUnderrun;
2022
2023 IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
2024
2025 if (!_memoryEntries
2026 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
2027
2028 if (_memoryEntries)
2029 {
2030 dataP = getDataP(_memoryEntries);
2031 if (data->numAddressBits < dataP->fDMAMapNumAddressBits)
2032 dataP->fDMAMapNumAddressBits = data->numAddressBits;
2033 if (data->alignment > dataP->fDMAMapAlignment)
2034 dataP->fDMAMapAlignment = data->alignment;
2035 }
2036 return kIOReturnSuccess;
2037 }
2038
2039 if (kIOMDGetCharacteristics == op) {
2040
2041 if (dataSize < sizeof(IOMDDMACharacteristics))
2042 return kIOReturnUnderrun;
2043
2044 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
2045 data->fLength = _length;
2046 data->fSGCount = _rangesCount;
2047 data->fPages = _pages;
2048 data->fDirection = getDirection();
2049 if (!_wireCount)
2050 data->fIsPrepared = false;
2051 else {
2052 data->fIsPrepared = true;
2053 data->fHighestPage = _highestPage;
2054 if (_memoryEntries)
2055 {
2056 dataP = getDataP(_memoryEntries);
2057 ioPLBlock *ioplList = getIOPLList(dataP);
2058 UInt count = getNumIOPL(_memoryEntries, dataP);
2059 if (count == 1)
2060 data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
2061 }
2062 }
2063
2064 return kIOReturnSuccess;
2065
2066 } else if (kIOMDWalkSegments != op)
2067 return kIOReturnBadArgument;
2068
2069 // Get the next segment
2070 struct InternalState {
2071 IOMDDMAWalkSegmentArgs fIO;
2072 UInt fOffset2Index;
2073 UInt fIndex;
2074 UInt fNextOffset;
2075 } *isP;
2076
2077 // Find the next segment
2078 if (dataSize < sizeof(*isP))
2079 return kIOReturnUnderrun;
2080
2081 isP = (InternalState *) vData;
2082 UInt offset = isP->fIO.fOffset;
2083 bool mapped = isP->fIO.fMapped;
2084
2085 if (IOMapper::gSystem && mapped
2086 && (!(kIOMemoryHostOnly & _flags))
2087 && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBase))
2088 // && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBase))
2089 {
2090 if (!_memoryEntries
2091 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
2092
2093 dataP = getDataP(_memoryEntries);
2094 if (dataP->fMapper)
2095 {
2096 IODMAMapSpecification mapSpec;
2097 bzero(&mapSpec, sizeof(mapSpec));
2098 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
2099 mapSpec.alignment = dataP->fDMAMapAlignment;
2100 err = md->dmaMap(dataP->fMapper, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength);
2101 if (kIOReturnSuccess != err) return (err);
2102 }
2103 }
2104
2105 if (offset >= _length)
2106 return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
2107
2108 // Validate the previous offset
2109 UInt ind, off2Ind = isP->fOffset2Index;
2110 if (!params
2111 && offset
2112 && (offset == isP->fNextOffset || off2Ind <= offset))
2113 ind = isP->fIndex;
2114 else
2115 ind = off2Ind = 0; // Start from beginning
2116
2117 UInt length;
2118 UInt64 address;
2119
2120
2121 if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
2122
2123 // Physical address based memory descriptor
2124 const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
2125
2126 // Find the range after the one that contains the offset
2127 mach_vm_size_t len;
2128 for (len = 0; off2Ind <= offset; ind++) {
2129 len = physP[ind].length;
2130 off2Ind += len;
2131 }
2132
2133 // Calculate length within range and starting address
2134 length = off2Ind - offset;
2135 address = physP[ind - 1].address + len - length;
2136
2137 if (true && mapped && _memoryEntries
2138 && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
2139 {
2140 address = dataP->fMappedBase + offset;
2141 }
2142 else
2143 {
2144 // see how far we can coalesce ranges
2145 while (ind < _rangesCount && address + length == physP[ind].address) {
2146 len = physP[ind].length;
2147 length += len;
2148 off2Ind += len;
2149 ind++;
2150 }
2151 }
2152
2153 // correct contiguous check overshoot
2154 ind--;
2155 off2Ind -= len;
2156 }
2157 #ifndef __LP64__
2158 else if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
2159
2160 // Physical address based memory descriptor
2161 const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
2162
2163 // Find the range after the one that contains the offset
2164 mach_vm_size_t len;
2165 for (len = 0; off2Ind <= offset; ind++) {
2166 len = physP[ind].length;
2167 off2Ind += len;
2168 }
2169
2170 // Calculate length within range and starting address
2171 length = off2Ind - offset;
2172 address = physP[ind - 1].address + len - length;
2173
2174 if (true && mapped && _memoryEntries
2175 && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
2176 {
2177 address = dataP->fMappedBase + offset;
2178 }
2179 else
2180 {
2181 // see how far we can coalesce ranges
2182 while (ind < _rangesCount && address + length == physP[ind].address) {
2183 len = physP[ind].length;
2184 length += len;
2185 off2Ind += len;
2186 ind++;
2187 }
2188 }
2189 // correct contiguous check overshoot
2190 ind--;
2191 off2Ind -= len;
2192 }
2193 #endif /* !__LP64__ */
2194 else do {
2195 if (!_wireCount)
2196 panic("IOGMD: not wired for the IODMACommand");
2197
2198 assert(_memoryEntries);
2199
2200 dataP = getDataP(_memoryEntries);
2201 const ioPLBlock *ioplList = getIOPLList(dataP);
2202 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
2203 upl_page_info_t *pageList = getPageList(dataP);
2204
2205 assert(numIOPLs > 0);
2206
2207 // Scan through iopl info blocks looking for block containing offset
2208 while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset)
2209 ind++;
2210
2211 // Go back to actual range as search goes past it
2212 ioPLBlock ioplInfo = ioplList[ind - 1];
2213 off2Ind = ioplInfo.fIOMDOffset;
2214
2215 if (ind < numIOPLs)
2216 length = ioplList[ind].fIOMDOffset;
2217 else
2218 length = _length;
2219 length -= offset; // Remainder within iopl
2220
2221 // Subtract offset till this iopl in total list
2222 offset -= off2Ind;
2223
2224 // If a mapped address is requested and this is a pre-mapped IOPL
2225 // then just need to compute an offset relative to the mapped base.
2226 if (mapped && dataP->fMappedBase) {
2227 offset += (ioplInfo.fPageOffset & PAGE_MASK);
2228 address = trunc_page_64(dataP->fMappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
2229 continue; // Done leave do/while(false) now
2230 }
2231
2232 // The offset is rebased into the current iopl.
2233 // Now add the iopl 1st page offset.
2234 offset += ioplInfo.fPageOffset;
2235
2236 // For external UPLs the fPageInfo field points directly to
2237 // the upl's upl_page_info_t array.
2238 if (ioplInfo.fFlags & kIOPLExternUPL)
2239 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
2240 else
2241 pageList = &pageList[ioplInfo.fPageInfo];
2242
2243 // Check for direct device non-paged memory
2244 if ( ioplInfo.fFlags & kIOPLOnDevice ) {
2245 address = ptoa_64(pageList->phys_addr) + offset;
2246 continue; // Done leave do/while(false) now
2247 }
2248
2249 // Now we need compute the index into the pageList
2250 UInt pageInd = atop_32(offset);
2251 offset &= PAGE_MASK;
2252
2253 // Compute the starting address of this segment
2254 IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
2255 if (!pageAddr) {
2256 panic("!pageList phys_addr");
2257 }
2258
2259 address = ptoa_64(pageAddr) + offset;
2260
2261 // length is currently set to the length of the remainider of the iopl.
2262 // We need to check that the remainder of the iopl is contiguous.
2263 // This is indicated by pageList[ind].phys_addr being sequential.
2264 IOByteCount contigLength = PAGE_SIZE - offset;
2265 while (contigLength < length
2266 && ++pageAddr == pageList[++pageInd].phys_addr)
2267 {
2268 contigLength += PAGE_SIZE;
2269 }
2270
2271 if (contigLength < length)
2272 length = contigLength;
2273
2274
2275 assert(address);
2276 assert(length);
2277
2278 } while (false);
2279
2280 // Update return values and state
2281 isP->fIO.fIOVMAddr = address;
2282 isP->fIO.fLength = length;
2283 isP->fIndex = ind;
2284 isP->fOffset2Index = off2Ind;
2285 isP->fNextOffset = isP->fIO.fOffset + length;
2286
2287 return kIOReturnSuccess;
2288 }
2289
2290 addr64_t
2291 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
2292 {
2293 IOReturn ret;
2294 mach_vm_address_t address = 0;
2295 mach_vm_size_t length = 0;
2296 IOMapper * mapper = gIOSystemMapper;
2297 IOOptionBits type = _flags & kIOMemoryTypeMask;
2298
2299 if (lengthOfSegment)
2300 *lengthOfSegment = 0;
2301
2302 if (offset >= _length)
2303 return 0;
2304
2305 // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
2306 // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
2307 // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
2308 // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
2309
2310 if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type))
2311 {
2312 unsigned rangesIndex = 0;
2313 Ranges vec = _ranges;
2314 mach_vm_address_t addr;
2315
2316 // Find starting address within the vector of ranges
2317 for (;;) {
2318 getAddrLenForInd(addr, length, type, vec, rangesIndex);
2319 if (offset < length)
2320 break;
2321 offset -= length; // (make offset relative)
2322 rangesIndex++;
2323 }
2324
2325 // Now that we have the starting range,
2326 // lets find the last contiguous range
2327 addr += offset;
2328 length -= offset;
2329
2330 for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) {
2331 mach_vm_address_t newAddr;
2332 mach_vm_size_t newLen;
2333
2334 getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
2335 if (addr + length != newAddr)
2336 break;
2337 length += newLen;
2338 }
2339 if (addr)
2340 address = (IOPhysicalAddress) addr; // Truncate address to 32bit
2341 }
2342 else
2343 {
2344 IOMDDMAWalkSegmentState _state;
2345 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
2346
2347 state->fOffset = offset;
2348 state->fLength = _length - offset;
2349 state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOnly);
2350
2351 ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
2352
2353 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret))
2354 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
2355 ret, this, state->fOffset,
2356 state->fIOVMAddr, state->fLength);
2357 if (kIOReturnSuccess == ret)
2358 {
2359 address = state->fIOVMAddr;
2360 length = state->fLength;
2361 }
2362
2363 // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
2364 // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
2365
2366 if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)))
2367 {
2368 if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone))
2369 {
2370 addr64_t origAddr = address;
2371 IOByteCount origLen = length;
2372
2373 address = mapper->mapToPhysicalAddress(origAddr);
2374 length = page_size - (address & (page_size - 1));
2375 while ((length < origLen)
2376 && ((address + length) == mapper->mapToPhysicalAddress(origAddr + length)))
2377 length += page_size;
2378 if (length > origLen)
2379 length = origLen;
2380 }
2381 }
2382 }
2383
2384 if (!address)
2385 length = 0;
2386
2387 if (lengthOfSegment)
2388 *lengthOfSegment = length;
2389
2390 return (address);
2391 }
2392
2393 #ifndef __LP64__
2394 #pragma clang diagnostic push
2395 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2396
2397 addr64_t
2398 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
2399 {
2400 addr64_t address = 0;
2401
2402 if (options & _kIOMemorySourceSegment)
2403 {
2404 address = getSourceSegment(offset, lengthOfSegment);
2405 }
2406 else if (options & kIOMemoryMapperNone)
2407 {
2408 address = getPhysicalSegment64(offset, lengthOfSegment);
2409 }
2410 else
2411 {
2412 address = getPhysicalSegment(offset, lengthOfSegment);
2413 }
2414
2415 return (address);
2416 }
2417 #pragma clang diagnostic pop
2418
2419 addr64_t
2420 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
2421 {
2422 return (getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone));
2423 }
2424
2425 IOPhysicalAddress
2426 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
2427 {
2428 addr64_t address = 0;
2429 IOByteCount length = 0;
2430
2431 address = getPhysicalSegment(offset, lengthOfSegment, 0);
2432
2433 if (lengthOfSegment)
2434 length = *lengthOfSegment;
2435
2436 if ((address + length) > 0x100000000ULL)
2437 {
2438 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
2439 address, (long) length, (getMetaClass())->getClassName());
2440 }
2441
2442 return ((IOPhysicalAddress) address);
2443 }
2444
2445 addr64_t
2446 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
2447 {
2448 IOPhysicalAddress phys32;
2449 IOByteCount length;
2450 addr64_t phys64;
2451 IOMapper * mapper = 0;
2452
2453 phys32 = getPhysicalSegment(offset, lengthOfSegment);
2454 if (!phys32)
2455 return 0;
2456
2457 if (gIOSystemMapper)
2458 mapper = gIOSystemMapper;
2459
2460 if (mapper)
2461 {
2462 IOByteCount origLen;
2463
2464 phys64 = mapper->mapToPhysicalAddress(phys32);
2465 origLen = *lengthOfSegment;
2466 length = page_size - (phys64 & (page_size - 1));
2467 while ((length < origLen)
2468 && ((phys64 + length) == mapper->mapToPhysicalAddress(phys32 + length)))
2469 length += page_size;
2470 if (length > origLen)
2471 length = origLen;
2472
2473 *lengthOfSegment = length;
2474 }
2475 else
2476 phys64 = (addr64_t) phys32;
2477
2478 return phys64;
2479 }
2480
2481 IOPhysicalAddress
2482 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
2483 {
2484 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0));
2485 }
2486
2487 IOPhysicalAddress
2488 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
2489 {
2490 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment));
2491 }
2492
2493 #pragma clang diagnostic push
2494 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2495
2496 void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
2497 IOByteCount * lengthOfSegment)
2498 {
2499 if (_task == kernel_task)
2500 return (void *) getSourceSegment(offset, lengthOfSegment);
2501 else
2502 panic("IOGMD::getVirtualSegment deprecated");
2503
2504 return 0;
2505 }
2506 #pragma clang diagnostic pop
2507 #endif /* !__LP64__ */
2508
2509 IOReturn
2510 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
2511 {
2512 IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
2513 DMACommandOps params;
2514 IOReturn err;
2515
2516 params = (op & ~kIOMDDMACommandOperationMask & op);
2517 op &= kIOMDDMACommandOperationMask;
2518
2519 if (kIOMDGetCharacteristics == op) {
2520 if (dataSize < sizeof(IOMDDMACharacteristics))
2521 return kIOReturnUnderrun;
2522
2523 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
2524 data->fLength = getLength();
2525 data->fSGCount = 0;
2526 data->fDirection = getDirection();
2527 data->fIsPrepared = true; // Assume prepared - fails safe
2528 }
2529 else if (kIOMDWalkSegments == op) {
2530 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs))
2531 return kIOReturnUnderrun;
2532
2533 IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
2534 IOByteCount offset = (IOByteCount) data->fOffset;
2535
2536 IOPhysicalLength length;
2537 if (data->fMapped && IOMapper::gSystem)
2538 data->fIOVMAddr = md->getPhysicalSegment(offset, &length);
2539 else
2540 data->fIOVMAddr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
2541 data->fLength = length;
2542 }
2543 else if (kIOMDAddDMAMapSpec == op) return kIOReturnUnsupported;
2544 else if (kIOMDDMAMap == op)
2545 {
2546 if (dataSize < sizeof(IOMDDMAMapArgs))
2547 return kIOReturnUnderrun;
2548 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2549
2550 if (params) panic("class %s does not support IODMACommand::kIterateOnly", getMetaClass()->getClassName());
2551
2552 data->fMapContig = true;
2553 err = md->dmaMap(data->fMapper, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
2554 return (err);
2555 }
2556 else return kIOReturnBadArgument;
2557
2558 return kIOReturnSuccess;
2559 }
2560
2561 IOReturn
2562 IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
2563 IOOptionBits * oldState )
2564 {
2565 IOReturn err = kIOReturnSuccess;
2566
2567 vm_purgable_t control;
2568 int state;
2569
2570 if (_memRef)
2571 {
2572 err = super::setPurgeable(newState, oldState);
2573 }
2574 else
2575 {
2576 if (kIOMemoryThreadSafe & _flags)
2577 LOCK;
2578 do
2579 {
2580 // Find the appropriate vm_map for the given task
2581 vm_map_t curMap;
2582 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
2583 {
2584 err = kIOReturnNotReady;
2585 break;
2586 }
2587 else if (!_task)
2588 {
2589 err = kIOReturnUnsupported;
2590 break;
2591 }
2592 else
2593 curMap = get_task_map(_task);
2594
2595 // can only do one range
2596 Ranges vec = _ranges;
2597 IOOptionBits type = _flags & kIOMemoryTypeMask;
2598 mach_vm_address_t addr;
2599 mach_vm_size_t len;
2600 getAddrLenForInd(addr, len, type, vec, 0);
2601
2602 err = purgeableControlBits(newState, &control, &state);
2603 if (kIOReturnSuccess != err)
2604 break;
2605 err = mach_vm_purgable_control(curMap, addr, control, &state);
2606 if (oldState)
2607 {
2608 if (kIOReturnSuccess == err)
2609 {
2610 err = purgeableStateBits(&state);
2611 *oldState = state;
2612 }
2613 }
2614 }
2615 while (false);
2616 if (kIOMemoryThreadSafe & _flags)
2617 UNLOCK;
2618 }
2619
2620 return (err);
2621 }
2622
2623 IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
2624 IOOptionBits * oldState )
2625 {
2626 IOReturn err = kIOReturnNotReady;
2627
2628 if (kIOMemoryThreadSafe & _flags) LOCK;
2629 if (_memRef) err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState);
2630 if (kIOMemoryThreadSafe & _flags) UNLOCK;
2631
2632 return (err);
2633 }
2634
2635 IOReturn IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
2636 IOByteCount * dirtyPageCount )
2637 {
2638 IOReturn err = kIOReturnNotReady;
2639
2640 if (kIOMemoryThreadSafe & _flags) LOCK;
2641 if (_memRef) err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount);
2642 else
2643 {
2644 IOMultiMemoryDescriptor * mmd;
2645 IOSubMemoryDescriptor * smd;
2646 if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this)))
2647 {
2648 err = smd->getPageCounts(residentPageCount, dirtyPageCount);
2649 }
2650 else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this)))
2651 {
2652 err = mmd->getPageCounts(residentPageCount, dirtyPageCount);
2653 }
2654 }
2655 if (kIOMemoryThreadSafe & _flags) UNLOCK;
2656
2657 return (err);
2658 }
2659
2660
2661 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
2662 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
2663
2664 static void SetEncryptOp(addr64_t pa, unsigned int count)
2665 {
2666 ppnum_t page, end;
2667
2668 page = atop_64(round_page_64(pa));
2669 end = atop_64(trunc_page_64(pa + count));
2670 for (; page < end; page++)
2671 {
2672 pmap_clear_noencrypt(page);
2673 }
2674 }
2675
2676 static void ClearEncryptOp(addr64_t pa, unsigned int count)
2677 {
2678 ppnum_t page, end;
2679
2680 page = atop_64(round_page_64(pa));
2681 end = atop_64(trunc_page_64(pa + count));
2682 for (; page < end; page++)
2683 {
2684 pmap_set_noencrypt(page);
2685 }
2686 }
2687
2688 IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options,
2689 IOByteCount offset, IOByteCount length )
2690 {
2691 IOByteCount remaining;
2692 unsigned int res;
2693 void (*func)(addr64_t pa, unsigned int count) = 0;
2694
2695 switch (options)
2696 {
2697 case kIOMemoryIncoherentIOFlush:
2698 func = &dcache_incoherent_io_flush64;
2699 break;
2700 case kIOMemoryIncoherentIOStore:
2701 func = &dcache_incoherent_io_store64;
2702 break;
2703
2704 case kIOMemorySetEncrypted:
2705 func = &SetEncryptOp;
2706 break;
2707 case kIOMemoryClearEncrypted:
2708 func = &ClearEncryptOp;
2709 break;
2710 }
2711
2712 if (!func)
2713 return (kIOReturnUnsupported);
2714
2715 if (kIOMemoryThreadSafe & _flags)
2716 LOCK;
2717
2718 res = 0x0UL;
2719 remaining = length = min(length, getLength() - offset);
2720 while (remaining)
2721 // (process another target segment?)
2722 {
2723 addr64_t dstAddr64;
2724 IOByteCount dstLen;
2725
2726 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
2727 if (!dstAddr64)
2728 break;
2729
2730 // Clip segment length to remaining
2731 if (dstLen > remaining)
2732 dstLen = remaining;
2733
2734 (*func)(dstAddr64, dstLen);
2735
2736 offset += dstLen;
2737 remaining -= dstLen;
2738 }
2739
2740 if (kIOMemoryThreadSafe & _flags)
2741 UNLOCK;
2742
2743 return (remaining ? kIOReturnUnderrun : kIOReturnSuccess);
2744 }
2745
2746 /*
2747 *
2748 */
2749
2750 #if defined(__i386__) || defined(__x86_64__)
2751
2752 #define io_kernel_static_start vm_kernel_stext
2753 #define io_kernel_static_end vm_kernel_etext
2754
2755 #else
2756 #error io_kernel_static_end is undefined for this architecture
2757 #endif
2758
2759 static kern_return_t
2760 io_get_kernel_static_upl(
2761 vm_map_t /* map */,
2762 uintptr_t offset,
2763 upl_size_t *upl_size,
2764 upl_t *upl,
2765 upl_page_info_array_t page_list,
2766 unsigned int *count,
2767 ppnum_t *highest_page)
2768 {
2769 unsigned int pageCount, page;
2770 ppnum_t phys;
2771 ppnum_t highestPage = 0;
2772
2773 pageCount = atop_32(*upl_size);
2774 if (pageCount > *count)
2775 pageCount = *count;
2776
2777 *upl = NULL;
2778
2779 for (page = 0; page < pageCount; page++)
2780 {
2781 phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
2782 if (!phys)
2783 break;
2784 page_list[page].phys_addr = phys;
2785 page_list[page].free_when_done = 0;
2786 page_list[page].absent = 0;
2787 page_list[page].dirty = 0;
2788 page_list[page].precious = 0;
2789 page_list[page].device = 0;
2790 if (phys > highestPage)
2791 highestPage = phys;
2792 }
2793
2794 *highest_page = highestPage;
2795
2796 return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError);
2797 }
2798
2799 /*
2800 *
2801 */
2802 #if IOTRACKING
2803 static void
2804 IOMemoryDescriptorUpdateWireOwner(ioGMDData * dataP, OSData * memoryEntries, vm_tag_t tag)
2805 {
2806 ioPLBlock *ioplList;
2807 UInt ind, count;
2808 vm_tag_t prior;
2809
2810 count = getNumIOPL(memoryEntries, dataP);
2811 if (!count) return;
2812 ioplList = getIOPLList(dataP);
2813
2814 if (VM_KERN_MEMORY_NONE == tag) tag = dataP->fAllocTag;
2815 assert(VM_KERN_MEMORY_NONE != tag);
2816
2817 for (ind = 0; ind < count; ind++)
2818 {
2819 if (!ioplList[ind].fIOPL) continue;
2820 prior = iopl_set_tag(ioplList[ind].fIOPL, tag);
2821 if (VM_KERN_MEMORY_NONE == dataP->fAllocTag) dataP->fAllocTag = prior;
2822 #if 0
2823 if (tag != prior)
2824 {
2825 char name[2][48];
2826 vm_tag_get_kext(prior, &name[0][0], sizeof(name[0]));
2827 vm_tag_get_kext(tag, &name[1][0], sizeof(name[1]));
2828 IOLog("switched %48s to %48s\n", name[0], name[1]);
2829 }
2830 #endif
2831 }
2832 }
2833 #endif /* IOTRACKING */
2834
2835
2836 IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
2837 {
2838 IOOptionBits type = _flags & kIOMemoryTypeMask;
2839 IOReturn error = kIOReturnSuccess;
2840 ioGMDData *dataP;
2841 upl_page_info_array_t pageInfo;
2842 ppnum_t mapBase;
2843
2844 assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
2845
2846 if ((kIODirectionOutIn & forDirection) == kIODirectionNone)
2847 forDirection = (IODirection) (forDirection | getDirection());
2848
2849 upl_control_flags_t uplFlags; // This Mem Desc's default flags for upl creation
2850 switch (kIODirectionOutIn & forDirection)
2851 {
2852 case kIODirectionOut:
2853 // Pages do not need to be marked as dirty on commit
2854 uplFlags = UPL_COPYOUT_FROM;
2855 break;
2856
2857 case kIODirectionIn:
2858 default:
2859 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
2860 break;
2861 }
2862 dataP = getDataP(_memoryEntries);
2863
2864 if (kIODirectionDMACommand & forDirection) assert(_wireCount);
2865
2866 if (_wireCount)
2867 {
2868 if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags))
2869 {
2870 OSReportWithBacktrace("IOMemoryDescriptor 0x%lx prepared read only", VM_KERNEL_ADDRPERM(this));
2871 error = kIOReturnNotWritable;
2872 }
2873 }
2874 else
2875 {
2876 IOMapper *mapper;
2877 mapper = dataP->fMapper;
2878 dataP->fMappedBase = 0;
2879
2880 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
2881 uplFlags |= UPL_MEMORY_TAG_MAKE(getVMTag(kernel_map));
2882
2883 if (kIODirectionPrepareToPhys32 & forDirection)
2884 {
2885 if (!mapper) uplFlags |= UPL_NEED_32BIT_ADDR;
2886 if (dataP->fDMAMapNumAddressBits > 32) dataP->fDMAMapNumAddressBits = 32;
2887 }
2888 if (kIODirectionPrepareNoFault & forDirection) uplFlags |= UPL_REQUEST_NO_FAULT;
2889 if (kIODirectionPrepareNoZeroFill & forDirection) uplFlags |= UPL_NOZEROFILLIO;
2890 if (kIODirectionPrepareNonCoherent & forDirection) uplFlags |= UPL_REQUEST_FORCE_COHERENCY;
2891
2892 mapBase = 0;
2893
2894 // Note that appendBytes(NULL) zeros the data up to the desired length
2895 // and the length parameter is an unsigned int
2896 size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t);
2897 if (uplPageSize > ((unsigned int)uplPageSize)) return (kIOReturnNoMemory);
2898 if (!_memoryEntries->appendBytes(0, uplPageSize)) return (kIOReturnNoMemory);
2899 dataP = 0;
2900
2901 // Find the appropriate vm_map for the given task
2902 vm_map_t curMap;
2903 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) curMap = 0;
2904 else curMap = get_task_map(_task);
2905
2906 // Iterate over the vector of virtual ranges
2907 Ranges vec = _ranges;
2908 unsigned int pageIndex = 0;
2909 IOByteCount mdOffset = 0;
2910 ppnum_t highestPage = 0;
2911
2912 IOMemoryEntry * memRefEntry = 0;
2913 if (_memRef) memRefEntry = &_memRef->entries[0];
2914
2915 for (UInt range = 0; range < _rangesCount; range++) {
2916 ioPLBlock iopl;
2917 mach_vm_address_t startPage;
2918 mach_vm_size_t numBytes;
2919 ppnum_t highPage = 0;
2920
2921 // Get the startPage address and length of vec[range]
2922 getAddrLenForInd(startPage, numBytes, type, vec, range);
2923 iopl.fPageOffset = startPage & PAGE_MASK;
2924 numBytes += iopl.fPageOffset;
2925 startPage = trunc_page_64(startPage);
2926
2927 if (mapper)
2928 iopl.fMappedPage = mapBase + pageIndex;
2929 else
2930 iopl.fMappedPage = 0;
2931
2932 // Iterate over the current range, creating UPLs
2933 while (numBytes) {
2934 vm_address_t kernelStart = (vm_address_t) startPage;
2935 vm_map_t theMap;
2936 if (curMap) theMap = curMap;
2937 else if (_memRef)
2938 {
2939 theMap = NULL;
2940 }
2941 else
2942 {
2943 assert(_task == kernel_task);
2944 theMap = IOPageableMapForAddress(kernelStart);
2945 }
2946
2947 // ioplFlags is an in/out parameter
2948 upl_control_flags_t ioplFlags = uplFlags;
2949 dataP = getDataP(_memoryEntries);
2950 pageInfo = getPageList(dataP);
2951 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
2952
2953 mach_vm_size_t _ioplSize = round_page(numBytes);
2954 upl_size_t ioplSize = (_ioplSize <= MAX_UPL_SIZE_BYTES) ? _ioplSize : MAX_UPL_SIZE_BYTES;
2955 unsigned int numPageInfo = atop_32(ioplSize);
2956
2957 if ((theMap == kernel_map)
2958 && (kernelStart >= io_kernel_static_start)
2959 && (kernelStart < io_kernel_static_end)) {
2960 error = io_get_kernel_static_upl(theMap,
2961 kernelStart,
2962 &ioplSize,
2963 &iopl.fIOPL,
2964 baseInfo,
2965 &numPageInfo,
2966 &highPage);
2967 }
2968 else if (_memRef) {
2969 memory_object_offset_t entryOffset;
2970
2971 entryOffset = mdOffset;
2972 entryOffset = (entryOffset - iopl.fPageOffset - memRefEntry->offset);
2973 if (entryOffset >= memRefEntry->size) {
2974 memRefEntry++;
2975 if (memRefEntry >= &_memRef->entries[_memRef->count]) panic("memRefEntry");
2976 entryOffset = 0;
2977 }
2978 if (ioplSize > (memRefEntry->size - entryOffset)) ioplSize = (memRefEntry->size - entryOffset);
2979 error = memory_object_iopl_request(memRefEntry->entry,
2980 entryOffset,
2981 &ioplSize,
2982 &iopl.fIOPL,
2983 baseInfo,
2984 &numPageInfo,
2985 &ioplFlags);
2986 }
2987 else {
2988 assert(theMap);
2989 error = vm_map_create_upl(theMap,
2990 startPage,
2991 (upl_size_t*)&ioplSize,
2992 &iopl.fIOPL,
2993 baseInfo,
2994 &numPageInfo,
2995 &ioplFlags);
2996 }
2997
2998 if (error != KERN_SUCCESS) goto abortExit;
2999
3000 assert(ioplSize);
3001
3002 if (iopl.fIOPL)
3003 highPage = upl_get_highest_page(iopl.fIOPL);
3004 if (highPage > highestPage)
3005 highestPage = highPage;
3006
3007 if (baseInfo->device) {
3008 numPageInfo = 1;
3009 iopl.fFlags = kIOPLOnDevice;
3010 }
3011 else {
3012 iopl.fFlags = 0;
3013 }
3014
3015 iopl.fIOMDOffset = mdOffset;
3016 iopl.fPageInfo = pageIndex;
3017 if (mapper && pageIndex && (page_mask & (mdOffset + iopl.fPageOffset))) dataP->fDiscontig = true;
3018
3019 if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
3020 // Clean up partial created and unsaved iopl
3021 if (iopl.fIOPL) {
3022 upl_abort(iopl.fIOPL, 0);
3023 upl_deallocate(iopl.fIOPL);
3024 }
3025 goto abortExit;
3026 }
3027 dataP = 0;
3028
3029 // Check for a multiple iopl's in one virtual range
3030 pageIndex += numPageInfo;
3031 mdOffset -= iopl.fPageOffset;
3032 if (ioplSize < numBytes) {
3033 numBytes -= ioplSize;
3034 startPage += ioplSize;
3035 mdOffset += ioplSize;
3036 iopl.fPageOffset = 0;
3037 if (mapper) iopl.fMappedPage = mapBase + pageIndex;
3038 }
3039 else {
3040 mdOffset += numBytes;
3041 break;
3042 }
3043 }
3044 }
3045
3046 _highestPage = highestPage;
3047
3048 if (UPL_COPYOUT_FROM & uplFlags) _flags |= kIOMemoryPreparedReadOnly;
3049 }
3050
3051 #if IOTRACKING
3052 if (kIOReturnSuccess == error)
3053 {
3054 vm_tag_t tag;
3055
3056 dataP = getDataP(_memoryEntries);
3057 if (forDirection & kIODirectionDMACommand) tag = (forDirection & kIODirectionDMACommandMask) >> kIODirectionDMACommandShift;
3058 else tag = IOMemoryTag(kernel_map);
3059
3060 if (!_wireCount) vm_tag_set_init(&dataP->fWireTags, kMaxWireTags);
3061 vm_tag_set_enter(&dataP->fWireTags, kMaxWireTags, tag);
3062
3063 IOMemoryDescriptorUpdateWireOwner(dataP, _memoryEntries, tag);
3064 if (!_wireCount)
3065 {
3066 //if (!(_flags & kIOMemoryAutoPrepare))
3067 IOTrackingAdd(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages), false);
3068 }
3069 }
3070 #endif /* IOTRACKING */
3071
3072 return (error);
3073
3074 abortExit:
3075 {
3076 dataP = getDataP(_memoryEntries);
3077 UInt done = getNumIOPL(_memoryEntries, dataP);
3078 ioPLBlock *ioplList = getIOPLList(dataP);
3079
3080 for (UInt range = 0; range < done; range++)
3081 {
3082 if (ioplList[range].fIOPL) {
3083 upl_abort(ioplList[range].fIOPL, 0);
3084 upl_deallocate(ioplList[range].fIOPL);
3085 }
3086 }
3087 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
3088 }
3089
3090 if (error == KERN_FAILURE)
3091 error = kIOReturnCannotWire;
3092 else if (error == KERN_MEMORY_ERROR)
3093 error = kIOReturnNoResources;
3094
3095 return error;
3096 }
3097
3098 bool IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
3099 {
3100 ioGMDData * dataP;
3101 unsigned dataSize = size;
3102
3103 if (!_memoryEntries) {
3104 _memoryEntries = OSData::withCapacity(dataSize);
3105 if (!_memoryEntries)
3106 return false;
3107 }
3108 else if (!_memoryEntries->initWithCapacity(dataSize))
3109 return false;
3110
3111 _memoryEntries->appendBytes(0, computeDataSize(0, 0));
3112 dataP = getDataP(_memoryEntries);
3113
3114 if (mapper == kIOMapperWaitSystem) {
3115 IOMapper::checkForSystemMapper();
3116 mapper = IOMapper::gSystem;
3117 }
3118 dataP->fMapper = mapper;
3119 dataP->fPageCnt = 0;
3120 dataP->fMappedBase = 0;
3121 dataP->fDMAMapNumAddressBits = 64;
3122 dataP->fDMAMapAlignment = 0;
3123 dataP->fPreparationID = kIOPreparationIDUnprepared;
3124 dataP->fDiscontig = false;
3125 dataP->fCompletionError = false;
3126
3127 return (true);
3128 }
3129
3130 IOReturn IOMemoryDescriptor::dmaMap(
3131 IOMapper * mapper,
3132 IODMACommand * command,
3133 const IODMAMapSpecification * mapSpec,
3134 uint64_t offset,
3135 uint64_t length,
3136 uint64_t * mapAddress,
3137 uint64_t * mapLength)
3138 {
3139 IOReturn ret;
3140 uint32_t mapOptions;
3141
3142 mapOptions = 0;
3143 mapOptions |= kIODMAMapReadAccess;
3144 if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess;
3145
3146 ret = mapper->iovmMapMemory(this, offset, length, mapOptions,
3147 mapSpec, command, NULL, mapAddress, mapLength);
3148
3149 return (ret);
3150 }
3151
3152 IOReturn IOGeneralMemoryDescriptor::dmaMap(
3153 IOMapper * mapper,
3154 IODMACommand * command,
3155 const IODMAMapSpecification * mapSpec,
3156 uint64_t offset,
3157 uint64_t length,
3158 uint64_t * mapAddress,
3159 uint64_t * mapLength)
3160 {
3161 IOReturn err = kIOReturnSuccess;
3162 ioGMDData * dataP;
3163 IOOptionBits type = _flags & kIOMemoryTypeMask;
3164
3165 *mapAddress = 0;
3166 if (kIOMemoryHostOnly & _flags) return (kIOReturnSuccess);
3167
3168 if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
3169 || offset || (length != _length))
3170 {
3171 err = super::dmaMap(mapper, command, mapSpec, offset, length, mapAddress, mapLength);
3172 }
3173 else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries)))
3174 {
3175 const ioPLBlock * ioplList = getIOPLList(dataP);
3176 upl_page_info_t * pageList;
3177 uint32_t mapOptions = 0;
3178
3179 IODMAMapSpecification mapSpec;
3180 bzero(&mapSpec, sizeof(mapSpec));
3181 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
3182 mapSpec.alignment = dataP->fDMAMapAlignment;
3183
3184 // For external UPLs the fPageInfo field points directly to
3185 // the upl's upl_page_info_t array.
3186 if (ioplList->fFlags & kIOPLExternUPL)
3187 {
3188 pageList = (upl_page_info_t *) ioplList->fPageInfo;
3189 mapOptions |= kIODMAMapPagingPath;
3190 }
3191 else pageList = getPageList(dataP);
3192
3193 if ((_length == ptoa_64(_pages)) && !(page_mask & ioplList->fPageOffset))
3194 {
3195 mapOptions |= kIODMAMapPageListFullyOccupied;
3196 }
3197
3198 mapOptions |= kIODMAMapReadAccess;
3199 if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess;
3200
3201 // Check for direct device non-paged memory
3202 if (ioplList->fFlags & kIOPLOnDevice) mapOptions |= kIODMAMapPhysicallyContiguous;
3203
3204 IODMAMapPageList dmaPageList =
3205 {
3206 .pageOffset = (uint32_t)(ioplList->fPageOffset & page_mask),
3207 .pageListCount = _pages,
3208 .pageList = &pageList[0]
3209 };
3210 err = mapper->iovmMapMemory(this, offset, length, mapOptions, &mapSpec,
3211 command, &dmaPageList, mapAddress, mapLength);
3212 }
3213
3214 return (err);
3215 }
3216
3217 /*
3218 * prepare
3219 *
3220 * Prepare the memory for an I/O transfer. This involves paging in
3221 * the memory, if necessary, and wiring it down for the duration of
3222 * the transfer. The complete() method completes the processing of
3223 * the memory after the I/O transfer finishes. This method needn't
3224 * called for non-pageable memory.
3225 */
3226
3227 IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
3228 {
3229 IOReturn error = kIOReturnSuccess;
3230 IOOptionBits type = _flags & kIOMemoryTypeMask;
3231
3232 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
3233 return kIOReturnSuccess;
3234
3235 if (_prepareLock) IOLockLock(_prepareLock);
3236
3237 if (kIODirectionDMACommand & forDirection)
3238 {
3239 #if IOMD_DEBUG_DMAACTIVE
3240 OSIncrementAtomic(&__iomd_reservedA);
3241 #endif /* IOMD_DEBUG_DMAACTIVE */
3242 }
3243 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type)
3244 {
3245 error = wireVirtual(forDirection);
3246 }
3247
3248 if ((kIOReturnSuccess == error) && !(kIODirectionDMACommand & forDirection))
3249 {
3250 if (1 == ++_wireCount)
3251 {
3252 if (kIOMemoryClearEncrypt & _flags)
3253 {
3254 performOperation(kIOMemoryClearEncrypted, 0, _length);
3255 }
3256 }
3257 }
3258
3259 if (_prepareLock) IOLockUnlock(_prepareLock);
3260
3261 return error;
3262 }
3263
3264 /*
3265 * complete
3266 *
3267 * Complete processing of the memory after an I/O transfer finishes.
3268 * This method should not be called unless a prepare was previously
3269 * issued; the prepare() and complete() must occur in pairs, before
3270 * before and after an I/O transfer involving pageable memory.
3271 */
3272
3273 IOReturn IOGeneralMemoryDescriptor::complete(IODirection forDirection)
3274 {
3275 IOOptionBits type = _flags & kIOMemoryTypeMask;
3276 ioGMDData * dataP;
3277
3278 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
3279 return kIOReturnSuccess;
3280
3281 if (_prepareLock) IOLockLock(_prepareLock);
3282 do
3283 {
3284 assert(_wireCount);
3285 if (!_wireCount) break;
3286 dataP = getDataP(_memoryEntries);
3287 if (!dataP) break;
3288
3289 #if IOMD_DEBUG_DMAACTIVE
3290 if (kIODirectionDMACommand & forDirection)
3291 {
3292 if (__iomd_reservedA) OSDecrementAtomic(&__iomd_reservedA);
3293 else panic("kIOMDSetDMAInactive");
3294 }
3295 #endif /* IOMD_DEBUG_DMAACTIVE */
3296 #if IOTRACKING
3297 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type)
3298 {
3299 vm_tag_t tag;
3300
3301 if (forDirection & kIODirectionDMACommand) tag = (forDirection & kIODirectionDMACommandMask) >> kIODirectionDMACommandShift;
3302 else tag = IOMemoryTag(kernel_map);
3303 vm_tag_set_remove(&dataP->fWireTags, kMaxWireTags, tag, &tag);
3304 IOMemoryDescriptorUpdateWireOwner(dataP, _memoryEntries, tag);
3305 }
3306 if (kIODirectionDMACommand & forDirection) break;
3307 #endif /* IOTRACKING */
3308
3309 if (kIODirectionCompleteWithError & forDirection) dataP->fCompletionError = true;
3310
3311 if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount))
3312 {
3313 performOperation(kIOMemorySetEncrypted, 0, _length);
3314 }
3315
3316 _wireCount--;
3317 if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection))
3318 {
3319 ioPLBlock *ioplList = getIOPLList(dataP);
3320 UInt ind, count = getNumIOPL(_memoryEntries, dataP);
3321
3322 if (_wireCount)
3323 {
3324 // kIODirectionCompleteWithDataValid & forDirection
3325 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type)
3326 {
3327 for (ind = 0; ind < count; ind++)
3328 {
3329 if (ioplList[ind].fIOPL) iopl_valid_data(ioplList[ind].fIOPL);
3330 }
3331 }
3332 }
3333 else
3334 {
3335 #if IOMD_DEBUG_DMAACTIVE
3336 if (__iomd_reservedA) panic("complete() while dma active");
3337 #endif /* IOMD_DEBUG_DMAACTIVE */
3338
3339 if (dataP->fMappedBase) {
3340 dataP->fMapper->iovmUnmapMemory(this, NULL, dataP->fMappedBase, dataP->fMappedLength);
3341 dataP->fMappedBase = 0;
3342 }
3343 // Only complete iopls that we created which are for TypeVirtual
3344 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
3345 #if IOTRACKING
3346 //if (!(_flags & kIOMemoryAutoPrepare))
3347 {
3348 IOTrackingRemove(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages));
3349 }
3350 #endif /* IOTRACKING */
3351 for (ind = 0; ind < count; ind++)
3352 if (ioplList[ind].fIOPL) {
3353 if (dataP->fCompletionError)
3354 upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/);
3355 else
3356 upl_commit(ioplList[ind].fIOPL, 0, 0);
3357 upl_deallocate(ioplList[ind].fIOPL);
3358 }
3359 } else if (kIOMemoryTypeUPL == type) {
3360 upl_set_referenced(ioplList[0].fIOPL, false);
3361 }
3362
3363 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
3364
3365 dataP->fPreparationID = kIOPreparationIDUnprepared;
3366 dataP->fAllocTag = VM_KERN_MEMORY_NONE;
3367 }
3368 }
3369 }
3370 while (false);
3371
3372 if (_prepareLock) IOLockUnlock(_prepareLock);
3373
3374 return kIOReturnSuccess;
3375 }
3376
3377 IOReturn IOGeneralMemoryDescriptor::doMap(
3378 vm_map_t __addressMap,
3379 IOVirtualAddress * __address,
3380 IOOptionBits options,
3381 IOByteCount __offset,
3382 IOByteCount __length )
3383 {
3384 #ifndef __LP64__
3385 if (!(kIOMap64Bit & options)) panic("IOGeneralMemoryDescriptor::doMap !64bit");
3386 #endif /* !__LP64__ */
3387
3388 kern_return_t err;
3389
3390 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
3391 mach_vm_size_t offset = mapping->fOffset + __offset;
3392 mach_vm_size_t length = mapping->fLength;
3393
3394 IOOptionBits type = _flags & kIOMemoryTypeMask;
3395 Ranges vec = _ranges;
3396
3397 mach_vm_address_t range0Addr = 0;
3398 mach_vm_size_t range0Len = 0;
3399
3400 if ((offset >= _length) || ((offset + length) > _length))
3401 return( kIOReturnBadArgument );
3402
3403 if (vec.v)
3404 getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
3405
3406 // mapping source == dest? (could be much better)
3407 if (_task
3408 && (mapping->fAddressTask == _task)
3409 && (mapping->fAddressMap == get_task_map(_task))
3410 && (options & kIOMapAnywhere)
3411 && (1 == _rangesCount)
3412 && (0 == offset)
3413 && range0Addr
3414 && (length <= range0Len))
3415 {
3416 mapping->fAddress = range0Addr;
3417 mapping->fOptions |= kIOMapStatic;
3418
3419 return( kIOReturnSuccess );
3420 }
3421
3422 if (!_memRef)
3423 {
3424 IOOptionBits createOptions = 0;
3425 if (!(kIOMapReadOnly & options))
3426 {
3427 createOptions |= kIOMemoryReferenceWrite;
3428 #if DEVELOPMENT || DEBUG
3429 if (kIODirectionOut == (kIODirectionOutIn & _flags))
3430 {
3431 OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction");
3432 }
3433 #endif
3434 }
3435 err = memoryReferenceCreate(createOptions, &_memRef);
3436 if (kIOReturnSuccess != err) return (err);
3437 }
3438
3439 memory_object_t pager;
3440 pager = (memory_object_t) (reserved ? reserved->dp.devicePager : 0);
3441
3442 // <upl_transpose //
3443 if ((kIOMapReference|kIOMapUnique) == ((kIOMapReference|kIOMapUnique) & options))
3444 {
3445 do
3446 {
3447 upl_t redirUPL2;
3448 upl_size_t size;
3449 upl_control_flags_t flags;
3450 unsigned int lock_count;
3451
3452 if (!_memRef || (1 != _memRef->count))
3453 {
3454 err = kIOReturnNotReadable;
3455 break;
3456 }
3457
3458 size = round_page(mapping->fLength);
3459 flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
3460 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS
3461 | UPL_MEMORY_TAG_MAKE(getVMTag(kernel_map));
3462
3463 if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2,
3464 NULL, NULL,
3465 &flags))
3466 redirUPL2 = NULL;
3467
3468 for (lock_count = 0;
3469 IORecursiveLockHaveLock(gIOMemoryLock);
3470 lock_count++) {
3471 UNLOCK;
3472 }
3473 err = upl_transpose(redirUPL2, mapping->fRedirUPL);
3474 for (;
3475 lock_count;
3476 lock_count--) {
3477 LOCK;
3478 }
3479
3480 if (kIOReturnSuccess != err)
3481 {
3482 IOLog("upl_transpose(%x)\n", err);
3483 err = kIOReturnSuccess;
3484 }
3485
3486 if (redirUPL2)
3487 {
3488 upl_commit(redirUPL2, NULL, 0);
3489 upl_deallocate(redirUPL2);
3490 redirUPL2 = 0;
3491 }
3492 {
3493 // swap the memEntries since they now refer to different vm_objects
3494 IOMemoryReference * me = _memRef;
3495 _memRef = mapping->fMemory->_memRef;
3496 mapping->fMemory->_memRef = me;
3497 }
3498 if (pager)
3499 err = populateDevicePager( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
3500 }
3501 while (false);
3502 }
3503 // upl_transpose> //
3504 else
3505 {
3506 err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress);
3507 #if IOTRACKING
3508 if ((err == KERN_SUCCESS) && ((kIOTracking & gIOKitDebug) || _task))
3509 {
3510 // only dram maps in the default on developement case
3511 IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength);
3512 }
3513 #endif /* IOTRACKING */
3514 if ((err == KERN_SUCCESS) && pager)
3515 {
3516 err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, offset, length, options);
3517
3518 if (err != KERN_SUCCESS) doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0);
3519 else if (kIOMapDefaultCache == (options & kIOMapCacheMask))
3520 {
3521 mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
3522 }
3523 }
3524 }
3525
3526 return (err);
3527 }
3528
3529 #if IOTRACKING
3530 IOReturn
3531 IOMemoryMapTracking(IOTrackingUser * tracking, task_t * task,
3532 mach_vm_address_t * address, mach_vm_size_t * size)
3533 {
3534 #define iomap_offsetof(type, field) ((size_t)(&((type *)0)->field))
3535
3536 IOMemoryMap * map = (typeof(map)) (((uintptr_t) tracking) - iomap_offsetof(IOMemoryMap, fTracking));
3537
3538 if (!map->fAddressMap || (map->fAddressMap != get_task_map(map->fAddressTask))) return (kIOReturnNotReady);
3539
3540 *task = map->fAddressTask;
3541 *address = map->fAddress;
3542 *size = map->fLength;
3543
3544 return (kIOReturnSuccess);
3545 }
3546 #endif /* IOTRACKING */
3547
3548 IOReturn IOGeneralMemoryDescriptor::doUnmap(
3549 vm_map_t addressMap,
3550 IOVirtualAddress __address,
3551 IOByteCount __length )
3552 {
3553 return (super::doUnmap(addressMap, __address, __length));
3554 }
3555
3556 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3557
3558 #undef super
3559 #define super OSObject
3560
3561 OSDefineMetaClassAndStructors( IOMemoryMap, OSObject )
3562
3563 OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
3564 OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
3565 OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
3566 OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
3567 OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
3568 OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
3569 OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
3570 OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
3571
3572 /* ex-inline function implementation */
3573 IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
3574 { return( getPhysicalSegment( 0, 0 )); }
3575
3576 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3577
3578 bool IOMemoryMap::init(
3579 task_t intoTask,
3580 mach_vm_address_t toAddress,
3581 IOOptionBits _options,
3582 mach_vm_size_t _offset,
3583 mach_vm_size_t _length )
3584 {
3585 if (!intoTask)
3586 return( false);
3587
3588 if (!super::init())
3589 return(false);
3590
3591 fAddressMap = get_task_map(intoTask);
3592 if (!fAddressMap)
3593 return(false);
3594 vm_map_reference(fAddressMap);
3595
3596 fAddressTask = intoTask;
3597 fOptions = _options;
3598 fLength = _length;
3599 fOffset = _offset;
3600 fAddress = toAddress;
3601
3602 return (true);
3603 }
3604
3605 bool IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
3606 {
3607 if (!_memory)
3608 return(false);
3609
3610 if (!fSuperMap)
3611 {
3612 if( (_offset + fLength) > _memory->getLength())
3613 return( false);
3614 fOffset = _offset;
3615 }
3616
3617 _memory->retain();
3618 if (fMemory)
3619 {
3620 if (fMemory != _memory)
3621 fMemory->removeMapping(this);
3622 fMemory->release();
3623 }
3624 fMemory = _memory;
3625
3626 return( true );
3627 }
3628
3629 IOReturn IOMemoryDescriptor::doMap(
3630 vm_map_t __addressMap,
3631 IOVirtualAddress * __address,
3632 IOOptionBits options,
3633 IOByteCount __offset,
3634 IOByteCount __length )
3635 {
3636 return (kIOReturnUnsupported);
3637 }
3638
3639 IOReturn IOMemoryDescriptor::handleFault(
3640 void * _pager,
3641 mach_vm_size_t sourceOffset,
3642 mach_vm_size_t length)
3643 {
3644 if( kIOMemoryRedirected & _flags)
3645 {
3646 #if DEBUG
3647 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
3648 #endif
3649 do {
3650 SLEEP;
3651 } while( kIOMemoryRedirected & _flags );
3652 }
3653 return (kIOReturnSuccess);
3654 }
3655
3656 IOReturn IOMemoryDescriptor::populateDevicePager(
3657 void * _pager,
3658 vm_map_t addressMap,
3659 mach_vm_address_t address,
3660 mach_vm_size_t sourceOffset,
3661 mach_vm_size_t length,
3662 IOOptionBits options )
3663 {
3664 IOReturn err = kIOReturnSuccess;
3665 memory_object_t pager = (memory_object_t) _pager;
3666 mach_vm_size_t size;
3667 mach_vm_size_t bytes;
3668 mach_vm_size_t page;
3669 mach_vm_size_t pageOffset;
3670 mach_vm_size_t pagerOffset;
3671 IOPhysicalLength segLen, chunk;
3672 addr64_t physAddr;
3673 IOOptionBits type;
3674
3675 type = _flags & kIOMemoryTypeMask;
3676
3677 if (reserved->dp.pagerContig)
3678 {
3679 sourceOffset = 0;
3680 pagerOffset = 0;
3681 }
3682
3683 physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
3684 assert( physAddr );
3685 pageOffset = physAddr - trunc_page_64( physAddr );
3686 pagerOffset = sourceOffset;
3687
3688 size = length + pageOffset;
3689 physAddr -= pageOffset;
3690
3691 segLen += pageOffset;
3692 bytes = size;
3693 do
3694 {
3695 // in the middle of the loop only map whole pages
3696 if( segLen >= bytes) segLen = bytes;
3697 else if (segLen != trunc_page(segLen)) err = kIOReturnVMError;
3698 if (physAddr != trunc_page_64(physAddr)) err = kIOReturnBadArgument;
3699
3700 if (kIOReturnSuccess != err) break;
3701
3702 #if DEBUG || DEVELOPMENT
3703 if ((kIOMemoryTypeUPL != type)
3704 && pmap_has_managed_page(atop_64(physAddr), atop_64(physAddr + segLen - 1)))
3705 {
3706 OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx", physAddr, segLen);
3707 }
3708 #endif /* DEBUG || DEVELOPMENT */
3709
3710 chunk = (reserved->dp.pagerContig ? round_page(segLen) : page_size);
3711 for (page = 0;
3712 (page < segLen) && (KERN_SUCCESS == err);
3713 page += chunk)
3714 {
3715 err = device_pager_populate_object(pager, pagerOffset,
3716 (ppnum_t)(atop_64(physAddr + page)), chunk);
3717 pagerOffset += chunk;
3718 }
3719
3720 assert (KERN_SUCCESS == err);
3721 if (err) break;
3722
3723 // This call to vm_fault causes an early pmap level resolution
3724 // of the mappings created above for kernel mappings, since
3725 // faulting in later can't take place from interrupt level.
3726 if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags))
3727 {
3728 vm_fault(addressMap,
3729 (vm_map_offset_t)trunc_page_64(address),
3730 VM_PROT_READ|VM_PROT_WRITE,
3731 FALSE, THREAD_UNINT, NULL,
3732 (vm_map_offset_t)0);
3733 }
3734
3735 sourceOffset += segLen - pageOffset;
3736 address += segLen;
3737 bytes -= segLen;
3738 pageOffset = 0;
3739 }
3740 while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
3741
3742 if (bytes)
3743 err = kIOReturnBadArgument;
3744
3745 return (err);
3746 }
3747
3748 IOReturn IOMemoryDescriptor::doUnmap(
3749 vm_map_t addressMap,
3750 IOVirtualAddress __address,
3751 IOByteCount __length )
3752 {
3753 IOReturn err;
3754 IOMemoryMap * mapping;
3755 mach_vm_address_t address;
3756 mach_vm_size_t length;
3757
3758 if (__length) panic("doUnmap");
3759
3760 mapping = (IOMemoryMap *) __address;
3761 addressMap = mapping->fAddressMap;
3762 address = mapping->fAddress;
3763 length = mapping->fLength;
3764
3765 if (kIOMapOverwrite & mapping->fOptions) err = KERN_SUCCESS;
3766 else
3767 {
3768 if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags))
3769 addressMap = IOPageableMapForAddress( address );
3770 #if DEBUG
3771 if( kIOLogMapping & gIOKitDebug) IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
3772 addressMap, address, length );
3773 #endif
3774 err = mach_vm_deallocate( addressMap, address, length );
3775 }
3776
3777 #if IOTRACKING
3778 IOTrackingRemoveUser(gIOMapTracking, &mapping->fTracking);
3779 #endif /* IOTRACKING */
3780
3781 return (err);
3782 }
3783
3784 IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
3785 {
3786 IOReturn err = kIOReturnSuccess;
3787 IOMemoryMap * mapping = 0;
3788 OSIterator * iter;
3789
3790 LOCK;
3791
3792 if( doRedirect)
3793 _flags |= kIOMemoryRedirected;
3794 else
3795 _flags &= ~kIOMemoryRedirected;
3796
3797 do {
3798 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
3799
3800 memory_object_t pager;
3801
3802 if( reserved)
3803 pager = (memory_object_t) reserved->dp.devicePager;
3804 else
3805 pager = MACH_PORT_NULL;
3806
3807 while( (mapping = (IOMemoryMap *) iter->getNextObject()))
3808 {
3809 mapping->redirect( safeTask, doRedirect );
3810 if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap))
3811 {
3812 err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache );
3813 }
3814 }
3815
3816 iter->release();
3817 }
3818 } while( false );
3819
3820 if (!doRedirect)
3821 {
3822 WAKEUP;
3823 }
3824
3825 UNLOCK;
3826
3827 #ifndef __LP64__
3828 // temporary binary compatibility
3829 IOSubMemoryDescriptor * subMem;
3830 if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
3831 err = subMem->redirect( safeTask, doRedirect );
3832 else
3833 err = kIOReturnSuccess;
3834 #endif /* !__LP64__ */
3835
3836 return( err );
3837 }
3838
3839 IOReturn IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
3840 {
3841 IOReturn err = kIOReturnSuccess;
3842
3843 if( fSuperMap) {
3844 // err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
3845 } else {
3846
3847 LOCK;
3848
3849 do
3850 {
3851 if (!fAddress)
3852 break;
3853 if (!fAddressMap)
3854 break;
3855
3856 if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
3857 && (0 == (fOptions & kIOMapStatic)))
3858 {
3859 IOUnmapPages( fAddressMap, fAddress, fLength );
3860 err = kIOReturnSuccess;
3861 #if DEBUG
3862 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
3863 #endif
3864 }
3865 else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask))
3866 {
3867 IOOptionBits newMode;
3868 newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
3869 IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
3870 }
3871 }
3872 while (false);
3873 UNLOCK;
3874 }
3875
3876 if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3877 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3878 && safeTask
3879 && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected))))
3880 fMemory->redirect(safeTask, doRedirect);
3881
3882 return( err );
3883 }
3884
3885 IOReturn IOMemoryMap::unmap( void )
3886 {
3887 IOReturn err;
3888
3889 LOCK;
3890
3891 if( fAddress && fAddressMap && (0 == fSuperMap) && fMemory
3892 && (0 == (kIOMapStatic & fOptions))) {
3893
3894 err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
3895
3896 } else
3897 err = kIOReturnSuccess;
3898
3899 if (fAddressMap)
3900 {
3901 vm_map_deallocate(fAddressMap);
3902 fAddressMap = 0;
3903 }
3904
3905 fAddress = 0;
3906
3907 UNLOCK;
3908
3909 return( err );
3910 }
3911
3912 void IOMemoryMap::taskDied( void )
3913 {
3914 LOCK;
3915 if (fUserClientUnmap) unmap();
3916 #if IOTRACKING
3917 else IOTrackingRemoveUser(gIOMapTracking, &fTracking);
3918 #endif /* IOTRACKING */
3919
3920 if( fAddressMap) {
3921 vm_map_deallocate(fAddressMap);
3922 fAddressMap = 0;
3923 }
3924 fAddressTask = 0;
3925 fAddress = 0;
3926 UNLOCK;
3927 }
3928
3929 IOReturn IOMemoryMap::userClientUnmap( void )
3930 {
3931 fUserClientUnmap = true;
3932 return (kIOReturnSuccess);
3933 }
3934
3935 // Overload the release mechanism. All mappings must be a member
3936 // of a memory descriptors _mappings set. This means that we
3937 // always have 2 references on a mapping. When either of these mappings
3938 // are released we need to free ourselves.
3939 void IOMemoryMap::taggedRelease(const void *tag) const
3940 {
3941 LOCK;
3942 super::taggedRelease(tag, 2);
3943 UNLOCK;
3944 }
3945
3946 void IOMemoryMap::free()
3947 {
3948 unmap();
3949
3950 if (fMemory)
3951 {
3952 LOCK;
3953 fMemory->removeMapping(this);
3954 UNLOCK;
3955 fMemory->release();
3956 }
3957
3958 if (fOwner && (fOwner != fMemory))
3959 {
3960 LOCK;
3961 fOwner->removeMapping(this);
3962 UNLOCK;
3963 }
3964
3965 if (fSuperMap)
3966 fSuperMap->release();
3967
3968 if (fRedirUPL) {
3969 upl_commit(fRedirUPL, NULL, 0);
3970 upl_deallocate(fRedirUPL);
3971 }
3972
3973 super::free();
3974 }
3975
3976 IOByteCount IOMemoryMap::getLength()
3977 {
3978 return( fLength );
3979 }
3980
3981 IOVirtualAddress IOMemoryMap::getVirtualAddress()
3982 {
3983 #ifndef __LP64__
3984 if (fSuperMap)
3985 fSuperMap->getVirtualAddress();
3986 else if (fAddressMap
3987 && vm_map_is_64bit(fAddressMap)
3988 && (sizeof(IOVirtualAddress) < 8))
3989 {
3990 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
3991 }
3992 #endif /* !__LP64__ */
3993
3994 return (fAddress);
3995 }
3996
3997 #ifndef __LP64__
3998 mach_vm_address_t IOMemoryMap::getAddress()
3999 {
4000 return( fAddress);
4001 }
4002
4003 mach_vm_size_t IOMemoryMap::getSize()
4004 {
4005 return( fLength );
4006 }
4007 #endif /* !__LP64__ */
4008
4009
4010 task_t IOMemoryMap::getAddressTask()
4011 {
4012 if( fSuperMap)
4013 return( fSuperMap->getAddressTask());
4014 else
4015 return( fAddressTask);
4016 }
4017
4018 IOOptionBits IOMemoryMap::getMapOptions()
4019 {
4020 return( fOptions);
4021 }
4022
4023 IOMemoryDescriptor * IOMemoryMap::getMemoryDescriptor()
4024 {
4025 return( fMemory );
4026 }
4027
4028 IOMemoryMap * IOMemoryMap::copyCompatible(
4029 IOMemoryMap * newMapping )
4030 {
4031 task_t task = newMapping->getAddressTask();
4032 mach_vm_address_t toAddress = newMapping->fAddress;
4033 IOOptionBits _options = newMapping->fOptions;
4034 mach_vm_size_t _offset = newMapping->fOffset;
4035 mach_vm_size_t _length = newMapping->fLength;
4036
4037 if( (!task) || (!fAddressMap) || (fAddressMap != get_task_map(task)))
4038 return( 0 );
4039 if( (fOptions ^ _options) & kIOMapReadOnly)
4040 return( 0 );
4041 if( (kIOMapDefaultCache != (_options & kIOMapCacheMask))
4042 && ((fOptions ^ _options) & kIOMapCacheMask))
4043 return( 0 );
4044
4045 if( (0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress))
4046 return( 0 );
4047
4048 if( _offset < fOffset)
4049 return( 0 );
4050
4051 _offset -= fOffset;
4052
4053 if( (_offset + _length) > fLength)
4054 return( 0 );
4055
4056 retain();
4057 if( (fLength == _length) && (!_offset))
4058 {
4059 newMapping = this;
4060 }
4061 else
4062 {
4063 newMapping->fSuperMap = this;
4064 newMapping->fOffset = fOffset + _offset;
4065 newMapping->fAddress = fAddress + _offset;
4066 }
4067
4068 return( newMapping );
4069 }
4070
4071 IOReturn IOMemoryMap::wireRange(
4072 uint32_t options,
4073 mach_vm_size_t offset,
4074 mach_vm_size_t length)
4075 {
4076 IOReturn kr;
4077 mach_vm_address_t start = trunc_page_64(fAddress + offset);
4078 mach_vm_address_t end = round_page_64(fAddress + offset + length);
4079 vm_prot_t prot;
4080
4081 prot = (kIODirectionOutIn & options);
4082 if (prot)
4083 {
4084 prot |= VM_PROT_MEMORY_TAG_MAKE(fMemory->getVMTag(kernel_map));
4085 kr = vm_map_wire(fAddressMap, start, end, prot, FALSE);
4086 }
4087 else
4088 {
4089 kr = vm_map_unwire(fAddressMap, start, end, FALSE);
4090 }
4091
4092 return (kr);
4093 }
4094
4095
4096 IOPhysicalAddress
4097 #ifdef __LP64__
4098 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
4099 #else /* !__LP64__ */
4100 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
4101 #endif /* !__LP64__ */
4102 {
4103 IOPhysicalAddress address;
4104
4105 LOCK;
4106 #ifdef __LP64__
4107 address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
4108 #else /* !__LP64__ */
4109 address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
4110 #endif /* !__LP64__ */
4111 UNLOCK;
4112
4113 return( address );
4114 }
4115
4116 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4117
4118 #undef super
4119 #define super OSObject
4120
4121 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4122
4123 void IOMemoryDescriptor::initialize( void )
4124 {
4125 if( 0 == gIOMemoryLock)
4126 gIOMemoryLock = IORecursiveLockAlloc();
4127
4128 gIOLastPage = IOGetLastPageNumber();
4129 }
4130
4131 void IOMemoryDescriptor::free( void )
4132 {
4133 if( _mappings) _mappings->release();
4134
4135 if (reserved)
4136 {
4137 IODelete(reserved, IOMemoryDescriptorReserved, 1);
4138 reserved = NULL;
4139 }
4140 super::free();
4141 }
4142
4143 IOMemoryMap * IOMemoryDescriptor::setMapping(
4144 task_t intoTask,
4145 IOVirtualAddress mapAddress,
4146 IOOptionBits options )
4147 {
4148 return (createMappingInTask( intoTask, mapAddress,
4149 options | kIOMapStatic,
4150 0, getLength() ));
4151 }
4152
4153 IOMemoryMap * IOMemoryDescriptor::map(
4154 IOOptionBits options )
4155 {
4156 return (createMappingInTask( kernel_task, 0,
4157 options | kIOMapAnywhere,
4158 0, getLength() ));
4159 }
4160
4161 #ifndef __LP64__
4162 IOMemoryMap * IOMemoryDescriptor::map(
4163 task_t intoTask,
4164 IOVirtualAddress atAddress,
4165 IOOptionBits options,
4166 IOByteCount offset,
4167 IOByteCount length )
4168 {
4169 if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask)))
4170 {
4171 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
4172 return (0);
4173 }
4174
4175 return (createMappingInTask(intoTask, atAddress,
4176 options, offset, length));
4177 }
4178 #endif /* !__LP64__ */
4179
4180 IOMemoryMap * IOMemoryDescriptor::createMappingInTask(
4181 task_t intoTask,
4182 mach_vm_address_t atAddress,
4183 IOOptionBits options,
4184 mach_vm_size_t offset,
4185 mach_vm_size_t length)
4186 {
4187 IOMemoryMap * result;
4188 IOMemoryMap * mapping;
4189
4190 if (0 == length)
4191 length = getLength();
4192
4193 mapping = new IOMemoryMap;
4194
4195 if( mapping
4196 && !mapping->init( intoTask, atAddress,
4197 options, offset, length )) {
4198 mapping->release();
4199 mapping = 0;
4200 }
4201
4202 if (mapping)
4203 result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
4204 else
4205 result = 0;
4206
4207 #if DEBUG
4208 if (!result)
4209 IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
4210 this, atAddress, (uint32_t) options, offset, length);
4211 #endif
4212
4213 return (result);
4214 }
4215
4216 #ifndef __LP64__ // there is only a 64 bit version for LP64
4217 IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
4218 IOOptionBits options,
4219 IOByteCount offset)
4220 {
4221 return (redirect(newBackingMemory, options, (mach_vm_size_t)offset));
4222 }
4223 #endif
4224
4225 IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
4226 IOOptionBits options,
4227 mach_vm_size_t offset)
4228 {
4229 IOReturn err = kIOReturnSuccess;
4230 IOMemoryDescriptor * physMem = 0;
4231
4232 LOCK;
4233
4234 if (fAddress && fAddressMap) do
4235 {
4236 if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
4237 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
4238 {
4239 physMem = fMemory;
4240 physMem->retain();
4241 }
4242
4243 if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count))
4244 {
4245 upl_size_t size = round_page(fLength);
4246 upl_control_flags_t flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
4247 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS
4248 | UPL_MEMORY_TAG_MAKE(fMemory->getVMTag(kernel_map));
4249 if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL,
4250 NULL, NULL,
4251 &flags))
4252 fRedirUPL = 0;
4253
4254 if (physMem)
4255 {
4256 IOUnmapPages( fAddressMap, fAddress, fLength );
4257 if ((false))
4258 physMem->redirect(0, true);
4259 }
4260 }
4261
4262 if (newBackingMemory)
4263 {
4264 if (newBackingMemory != fMemory)
4265 {
4266 fOffset = 0;
4267 if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
4268 options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
4269 offset, fLength))
4270 err = kIOReturnError;
4271 }
4272 if (fRedirUPL)
4273 {
4274 upl_commit(fRedirUPL, NULL, 0);
4275 upl_deallocate(fRedirUPL);
4276 fRedirUPL = 0;
4277 }
4278 if ((false) && physMem)
4279 physMem->redirect(0, false);
4280 }
4281 }
4282 while (false);
4283
4284 UNLOCK;
4285
4286 if (physMem)
4287 physMem->release();
4288
4289 return (err);
4290 }
4291
4292 IOMemoryMap * IOMemoryDescriptor::makeMapping(
4293 IOMemoryDescriptor * owner,
4294 task_t __intoTask,
4295 IOVirtualAddress __address,
4296 IOOptionBits options,
4297 IOByteCount __offset,
4298 IOByteCount __length )
4299 {
4300 #ifndef __LP64__
4301 if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::makeMapping !64bit");
4302 #endif /* !__LP64__ */
4303
4304 IOMemoryDescriptor * mapDesc = 0;
4305 IOMemoryMap * result = 0;
4306 OSIterator * iter;
4307
4308 IOMemoryMap * mapping = (IOMemoryMap *) __address;
4309 mach_vm_size_t offset = mapping->fOffset + __offset;
4310 mach_vm_size_t length = mapping->fLength;
4311
4312 mapping->fOffset = offset;
4313
4314 LOCK;
4315
4316 do
4317 {
4318 if (kIOMapStatic & options)
4319 {
4320 result = mapping;
4321 addMapping(mapping);
4322 mapping->setMemoryDescriptor(this, 0);
4323 continue;
4324 }
4325
4326 if (kIOMapUnique & options)
4327 {
4328 addr64_t phys;
4329 IOByteCount physLen;
4330
4331 // if (owner != this) continue;
4332
4333 if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
4334 || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
4335 {
4336 phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
4337 if (!phys || (physLen < length))
4338 continue;
4339
4340 mapDesc = IOMemoryDescriptor::withAddressRange(
4341 phys, length, getDirection() | kIOMemoryMapperNone, NULL);
4342 if (!mapDesc)
4343 continue;
4344 offset = 0;
4345 mapping->fOffset = offset;
4346 }
4347 }
4348 else
4349 {
4350 // look for a compatible existing mapping
4351 if( (iter = OSCollectionIterator::withCollection(_mappings)))
4352 {
4353 IOMemoryMap * lookMapping;
4354 while ((lookMapping = (IOMemoryMap *) iter->getNextObject()))
4355 {
4356 if ((result = lookMapping->copyCompatible(mapping)))
4357 {
4358 addMapping(result);
4359 result->setMemoryDescriptor(this, offset);
4360 break;
4361 }
4362 }
4363 iter->release();
4364 }
4365 if (result || (options & kIOMapReference))
4366 {
4367 if (result != mapping)
4368 {
4369 mapping->release();
4370 mapping = NULL;
4371 }
4372 continue;
4373 }
4374 }
4375
4376 if (!mapDesc)
4377 {
4378 mapDesc = this;
4379 mapDesc->retain();
4380 }
4381 IOReturn
4382 kr = mapDesc->doMap( 0, (IOVirtualAddress *) &mapping, options, 0, 0 );
4383 if (kIOReturnSuccess == kr)
4384 {
4385 result = mapping;
4386 mapDesc->addMapping(result);
4387 result->setMemoryDescriptor(mapDesc, offset);
4388 }
4389 else
4390 {
4391 mapping->release();
4392 mapping = NULL;
4393 }
4394 }
4395 while( false );
4396
4397 UNLOCK;
4398
4399 if (mapDesc)
4400 mapDesc->release();
4401
4402 return (result);
4403 }
4404
4405 void IOMemoryDescriptor::addMapping(
4406 IOMemoryMap * mapping )
4407 {
4408 if( mapping)
4409 {
4410 if( 0 == _mappings)
4411 _mappings = OSSet::withCapacity(1);
4412 if( _mappings )
4413 _mappings->setObject( mapping );
4414 }
4415 }
4416
4417 void IOMemoryDescriptor::removeMapping(
4418 IOMemoryMap * mapping )
4419 {
4420 if( _mappings)
4421 _mappings->removeObject( mapping);
4422 }
4423
4424 #ifndef __LP64__
4425 // obsolete initializers
4426 // - initWithOptions is the designated initializer
4427 bool
4428 IOMemoryDescriptor::initWithAddress(void * address,
4429 IOByteCount length,
4430 IODirection direction)
4431 {
4432 return( false );
4433 }
4434
4435 bool
4436 IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
4437 IOByteCount length,
4438 IODirection direction,
4439 task_t task)
4440 {
4441 return( false );
4442 }
4443
4444 bool
4445 IOMemoryDescriptor::initWithPhysicalAddress(
4446 IOPhysicalAddress address,
4447 IOByteCount length,
4448 IODirection direction )
4449 {
4450 return( false );
4451 }
4452
4453 bool
4454 IOMemoryDescriptor::initWithRanges(
4455 IOVirtualRange * ranges,
4456 UInt32 withCount,
4457 IODirection direction,
4458 task_t task,
4459 bool asReference)
4460 {
4461 return( false );
4462 }
4463
4464 bool
4465 IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
4466 UInt32 withCount,
4467 IODirection direction,
4468 bool asReference)
4469 {
4470 return( false );
4471 }
4472
4473 void * IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
4474 IOByteCount * lengthOfSegment)
4475 {
4476 return( 0 );
4477 }
4478 #endif /* !__LP64__ */
4479
4480 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4481
4482 bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
4483 {
4484 OSSymbol const *keys[2];
4485 OSObject *values[2];
4486 OSArray * array;
4487
4488 struct SerData {
4489 user_addr_t address;
4490 user_size_t length;
4491 } *vcopy;
4492 unsigned int index, nRanges;
4493 bool result;
4494
4495 IOOptionBits type = _flags & kIOMemoryTypeMask;
4496
4497 if (s == NULL) return false;
4498
4499 array = OSArray::withCapacity(4);
4500 if (!array) return (false);
4501
4502 nRanges = _rangesCount;
4503 vcopy = (SerData *) IOMalloc(sizeof(SerData) * nRanges);
4504 if (vcopy == 0) return false;
4505
4506 keys[0] = OSSymbol::withCString("address");
4507 keys[1] = OSSymbol::withCString("length");
4508
4509 result = false;
4510 values[0] = values[1] = 0;
4511
4512 // From this point on we can go to bail.
4513
4514 // Copy the volatile data so we don't have to allocate memory
4515 // while the lock is held.
4516 LOCK;
4517 if (nRanges == _rangesCount) {
4518 Ranges vec = _ranges;
4519 for (index = 0; index < nRanges; index++) {
4520 mach_vm_address_t addr; mach_vm_size_t len;
4521 getAddrLenForInd(addr, len, type, vec, index);
4522 vcopy[index].address = addr;
4523 vcopy[index].length = len;
4524 }
4525 } else {
4526 // The descriptor changed out from under us. Give up.
4527 UNLOCK;
4528 result = false;
4529 goto bail;
4530 }
4531 UNLOCK;
4532
4533 for (index = 0; index < nRanges; index++)
4534 {
4535 user_addr_t addr = vcopy[index].address;
4536 IOByteCount len = (IOByteCount) vcopy[index].length;
4537 values[0] = OSNumber::withNumber(addr, sizeof(addr) * 8);
4538 if (values[0] == 0) {
4539 result = false;
4540 goto bail;
4541 }
4542 values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
4543 if (values[1] == 0) {
4544 result = false;
4545 goto bail;
4546 }
4547 OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
4548 if (dict == 0) {
4549 result = false;
4550 goto bail;
4551 }
4552 array->setObject(dict);
4553 dict->release();
4554 values[0]->release();
4555 values[1]->release();
4556 values[0] = values[1] = 0;
4557 }
4558
4559 result = array->serialize(s);
4560
4561 bail:
4562 if (array)
4563 array->release();
4564 if (values[0])
4565 values[0]->release();
4566 if (values[1])
4567 values[1]->release();
4568 if (keys[0])
4569 keys[0]->release();
4570 if (keys[1])
4571 keys[1]->release();
4572 if (vcopy)
4573 IOFree(vcopy, sizeof(SerData) * nRanges);
4574
4575 return result;
4576 }
4577
4578 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4579
4580 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
4581 #ifdef __LP64__
4582 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
4583 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
4584 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
4585 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
4586 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
4587 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
4588 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
4589 #else /* !__LP64__ */
4590 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
4591 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
4592 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3);
4593 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4);
4594 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5);
4595 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 6);
4596 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 7);
4597 #endif /* !__LP64__ */
4598 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
4599 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
4600 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
4601 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
4602 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
4603 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
4604 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
4605 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
4606
4607 /* ex-inline function implementation */
4608 IOPhysicalAddress
4609 IOMemoryDescriptor::getPhysicalAddress()
4610 { return( getPhysicalSegment( 0, 0 )); }