]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOMemoryDescriptor.cpp
xnu-3789.51.2.tar.gz
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 #include <sys/cdefs.h>
31
32 #include <IOKit/assert.h>
33 #include <IOKit/system.h>
34 #include <IOKit/IOLib.h>
35 #include <IOKit/IOMemoryDescriptor.h>
36 #include <IOKit/IOMapper.h>
37 #include <IOKit/IODMACommand.h>
38 #include <IOKit/IOKitKeysPrivate.h>
39
40 #include <IOKit/IOSubMemoryDescriptor.h>
41 #include <IOKit/IOMultiMemoryDescriptor.h>
42
43 #include <IOKit/IOKitDebug.h>
44 #include <libkern/OSDebug.h>
45
46 #include "IOKitKernelInternal.h"
47
48 #include <libkern/c++/OSContainers.h>
49 #include <libkern/c++/OSDictionary.h>
50 #include <libkern/c++/OSArray.h>
51 #include <libkern/c++/OSSymbol.h>
52 #include <libkern/c++/OSNumber.h>
53 #include <os/overflow.h>
54
55 #include <sys/uio.h>
56
57 __BEGIN_DECLS
58 #include <vm/pmap.h>
59 #include <vm/vm_pageout.h>
60 #include <mach/memory_object_types.h>
61 #include <device/device_port.h>
62
63 #include <mach/vm_prot.h>
64 #include <mach/mach_vm.h>
65 #include <vm/vm_fault.h>
66 #include <vm/vm_protos.h>
67
68 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
69 extern void ipc_port_release_send(ipc_port_t port);
70
71 // osfmk/device/iokit_rpc.c
72 unsigned int IODefaultCacheBits(addr64_t pa);
73 unsigned int IOTranslateCacheBits(struct phys_entry *pp);
74
75 __END_DECLS
76
77 #define kIOMapperWaitSystem ((IOMapper *) 1)
78
79 static IOMapper * gIOSystemMapper = NULL;
80
81 ppnum_t gIOLastPage;
82
83 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
84
85 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
86
87 #define super IOMemoryDescriptor
88
89 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
90
91 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
92
93 static IORecursiveLock * gIOMemoryLock;
94
95 #define LOCK IORecursiveLockLock( gIOMemoryLock)
96 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
97 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
98 #define WAKEUP \
99 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
100
101 #if 0
102 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
103 #else
104 #define DEBG(fmt, args...) {}
105 #endif
106
107 #define IOMD_DEBUG_DMAACTIVE 1
108
109 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
110
111 // Some data structures and accessor macros used by the initWithOptions
112 // Function
113
114 enum ioPLBlockFlags {
115 kIOPLOnDevice = 0x00000001,
116 kIOPLExternUPL = 0x00000002,
117 };
118
119 struct IOMDPersistentInitData
120 {
121 const IOGeneralMemoryDescriptor * fMD;
122 IOMemoryReference * fMemRef;
123 };
124
125 struct ioPLBlock {
126 upl_t fIOPL;
127 vm_address_t fPageInfo; // Pointer to page list or index into it
128 uint32_t fIOMDOffset; // The offset of this iopl in descriptor
129 ppnum_t fMappedPage; // Page number of first page in this iopl
130 unsigned int fPageOffset; // Offset within first page of iopl
131 unsigned int fFlags; // Flags
132 };
133
134 enum { kMaxWireTags = 6 };
135
136 struct ioGMDData
137 {
138 IOMapper * fMapper;
139 uint64_t fDMAMapAlignment;
140 uint64_t fMappedBase;
141 uint64_t fMappedLength;
142 uint64_t fPreparationID;
143 #if IOTRACKING
144 IOTracking fWireTracking;
145 struct vm_tag_set fWireTags;
146 struct vm_tag_set_entry fWireTagsEntries[kMaxWireTags];
147 #endif /* IOTRACKING */
148 unsigned int fPageCnt;
149 uint8_t fDMAMapNumAddressBits;
150 vm_tag_t fAllocTag;
151 unsigned char fDiscontig:1;
152 unsigned char fCompletionError:1;
153 unsigned char _resv:6;
154
155 /* variable length arrays */
156 upl_page_info_t fPageList[1]
157 #if __LP64__
158 // align fPageList as for ioPLBlock
159 __attribute__((aligned(sizeof(upl_t))))
160 #endif
161 ;
162 ioPLBlock fBlocks[1];
163 };
164
165 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
166 #define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
167 #define getNumIOPL(osd, d) \
168 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
169 #define getPageList(d) (&(d->fPageList[0]))
170 #define computeDataSize(p, u) \
171 (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
172
173 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
174
175 #define next_page(a) ( trunc_page(a) + PAGE_SIZE )
176
177 extern "C" {
178
179 kern_return_t device_data_action(
180 uintptr_t device_handle,
181 ipc_port_t device_pager,
182 vm_prot_t protection,
183 vm_object_offset_t offset,
184 vm_size_t size)
185 {
186 kern_return_t kr;
187 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
188 IOMemoryDescriptor * memDesc;
189
190 LOCK;
191 memDesc = ref->dp.memory;
192 if( memDesc)
193 {
194 memDesc->retain();
195 kr = memDesc->handleFault(device_pager, offset, size);
196 memDesc->release();
197 }
198 else
199 kr = KERN_ABORTED;
200 UNLOCK;
201
202 return( kr );
203 }
204
205 kern_return_t device_close(
206 uintptr_t device_handle)
207 {
208 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
209
210 IODelete( ref, IOMemoryDescriptorReserved, 1 );
211
212 return( kIOReturnSuccess );
213 }
214 }; // end extern "C"
215
216 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
217
218 // Note this inline function uses C++ reference arguments to return values
219 // This means that pointers are not passed and NULLs don't have to be
220 // checked for as a NULL reference is illegal.
221 static inline void
222 getAddrLenForInd(mach_vm_address_t &addr, mach_vm_size_t &len, // Output variables
223 UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
224 {
225 assert(kIOMemoryTypeUIO == type
226 || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
227 || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
228 if (kIOMemoryTypeUIO == type) {
229 user_size_t us;
230 user_addr_t ad;
231 uio_getiov((uio_t) r.uio, ind, &ad, &us); addr = ad; len = us;
232 }
233 #ifndef __LP64__
234 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
235 IOAddressRange cur = r.v64[ind];
236 addr = cur.address;
237 len = cur.length;
238 }
239 #endif /* !__LP64__ */
240 else {
241 IOVirtualRange cur = r.v[ind];
242 addr = cur.address;
243 len = cur.length;
244 }
245 }
246
247 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
248
249 static IOReturn
250 purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
251 {
252 IOReturn err = kIOReturnSuccess;
253
254 *control = VM_PURGABLE_SET_STATE;
255
256 enum { kIOMemoryPurgeableControlMask = 15 };
257
258 switch (kIOMemoryPurgeableControlMask & newState)
259 {
260 case kIOMemoryPurgeableKeepCurrent:
261 *control = VM_PURGABLE_GET_STATE;
262 break;
263
264 case kIOMemoryPurgeableNonVolatile:
265 *state = VM_PURGABLE_NONVOLATILE;
266 break;
267 case kIOMemoryPurgeableVolatile:
268 *state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask);
269 break;
270 case kIOMemoryPurgeableEmpty:
271 *state = VM_PURGABLE_EMPTY | (newState & ~kIOMemoryPurgeableControlMask);
272 break;
273 default:
274 err = kIOReturnBadArgument;
275 break;
276 }
277 return (err);
278 }
279
280 static IOReturn
281 purgeableStateBits(int * state)
282 {
283 IOReturn err = kIOReturnSuccess;
284
285 switch (VM_PURGABLE_STATE_MASK & *state)
286 {
287 case VM_PURGABLE_NONVOLATILE:
288 *state = kIOMemoryPurgeableNonVolatile;
289 break;
290 case VM_PURGABLE_VOLATILE:
291 *state = kIOMemoryPurgeableVolatile;
292 break;
293 case VM_PURGABLE_EMPTY:
294 *state = kIOMemoryPurgeableEmpty;
295 break;
296 default:
297 *state = kIOMemoryPurgeableNonVolatile;
298 err = kIOReturnNotReady;
299 break;
300 }
301 return (err);
302 }
303
304
305 static vm_prot_t
306 vmProtForCacheMode(IOOptionBits cacheMode)
307 {
308 vm_prot_t prot = 0;
309 switch (cacheMode)
310 {
311 case kIOInhibitCache:
312 SET_MAP_MEM(MAP_MEM_IO, prot);
313 break;
314
315 case kIOWriteThruCache:
316 SET_MAP_MEM(MAP_MEM_WTHRU, prot);
317 break;
318
319 case kIOWriteCombineCache:
320 SET_MAP_MEM(MAP_MEM_WCOMB, prot);
321 break;
322
323 case kIOCopybackCache:
324 SET_MAP_MEM(MAP_MEM_COPYBACK, prot);
325 break;
326
327 case kIOCopybackInnerCache:
328 SET_MAP_MEM(MAP_MEM_INNERWBACK, prot);
329 break;
330
331 case kIODefaultCache:
332 default:
333 SET_MAP_MEM(MAP_MEM_NOOP, prot);
334 break;
335 }
336
337 return (prot);
338 }
339
340 static unsigned int
341 pagerFlagsForCacheMode(IOOptionBits cacheMode)
342 {
343 unsigned int pagerFlags = 0;
344 switch (cacheMode)
345 {
346 case kIOInhibitCache:
347 pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
348 break;
349
350 case kIOWriteThruCache:
351 pagerFlags = DEVICE_PAGER_WRITE_THROUGH | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
352 break;
353
354 case kIOWriteCombineCache:
355 pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT;
356 break;
357
358 case kIOCopybackCache:
359 pagerFlags = DEVICE_PAGER_COHERENT;
360 break;
361
362 case kIOCopybackInnerCache:
363 pagerFlags = DEVICE_PAGER_COHERENT;
364 break;
365
366 case kIODefaultCache:
367 default:
368 pagerFlags = -1U;
369 break;
370 }
371 return (pagerFlags);
372 }
373
374 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
375 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
376
377 struct IOMemoryEntry
378 {
379 ipc_port_t entry;
380 int64_t offset;
381 uint64_t size;
382 };
383
384 struct IOMemoryReference
385 {
386 volatile SInt32 refCount;
387 vm_prot_t prot;
388 uint32_t capacity;
389 uint32_t count;
390 struct IOMemoryReference * mapRef;
391 IOMemoryEntry entries[0];
392 };
393
394 enum
395 {
396 kIOMemoryReferenceReuse = 0x00000001,
397 kIOMemoryReferenceWrite = 0x00000002,
398 kIOMemoryReferenceCOW = 0x00000004,
399 };
400
401 SInt32 gIOMemoryReferenceCount;
402
403 IOMemoryReference *
404 IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc)
405 {
406 IOMemoryReference * ref;
407 size_t newSize, oldSize, copySize;
408
409 newSize = (sizeof(IOMemoryReference)
410 - sizeof(ref->entries)
411 + capacity * sizeof(ref->entries[0]));
412 ref = (typeof(ref)) IOMalloc(newSize);
413 if (realloc)
414 {
415 oldSize = (sizeof(IOMemoryReference)
416 - sizeof(realloc->entries)
417 + realloc->capacity * sizeof(realloc->entries[0]));
418 copySize = oldSize;
419 if (copySize > newSize) copySize = newSize;
420 if (ref) bcopy(realloc, ref, copySize);
421 IOFree(realloc, oldSize);
422 }
423 else if (ref)
424 {
425 bzero(ref, sizeof(*ref));
426 ref->refCount = 1;
427 OSIncrementAtomic(&gIOMemoryReferenceCount);
428 }
429 if (!ref) return (0);
430 ref->capacity = capacity;
431 return (ref);
432 }
433
434 void
435 IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref)
436 {
437 IOMemoryEntry * entries;
438 size_t size;
439
440 if (ref->mapRef)
441 {
442 memoryReferenceFree(ref->mapRef);
443 ref->mapRef = 0;
444 }
445
446 entries = ref->entries + ref->count;
447 while (entries > &ref->entries[0])
448 {
449 entries--;
450 ipc_port_release_send(entries->entry);
451 }
452 size = (sizeof(IOMemoryReference)
453 - sizeof(ref->entries)
454 + ref->capacity * sizeof(ref->entries[0]));
455 IOFree(ref, size);
456
457 OSDecrementAtomic(&gIOMemoryReferenceCount);
458 }
459
460 void
461 IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref)
462 {
463 if (1 == OSDecrementAtomic(&ref->refCount)) memoryReferenceFree(ref);
464 }
465
466
467 IOReturn
468 IOGeneralMemoryDescriptor::memoryReferenceCreate(
469 IOOptionBits options,
470 IOMemoryReference ** reference)
471 {
472 enum { kCapacity = 4, kCapacityInc = 4 };
473
474 kern_return_t err;
475 IOMemoryReference * ref;
476 IOMemoryEntry * entries;
477 IOMemoryEntry * cloneEntries;
478 vm_map_t map;
479 ipc_port_t entry, cloneEntry;
480 vm_prot_t prot;
481 memory_object_size_t actualSize;
482 uint32_t rangeIdx;
483 uint32_t count;
484 mach_vm_address_t entryAddr, endAddr, entrySize;
485 mach_vm_size_t srcAddr, srcLen;
486 mach_vm_size_t nextAddr, nextLen;
487 mach_vm_size_t offset, remain;
488 IOByteCount physLen;
489 IOOptionBits type = (_flags & kIOMemoryTypeMask);
490 IOOptionBits cacheMode;
491 unsigned int pagerFlags;
492 vm_tag_t tag;
493
494 ref = memoryReferenceAlloc(kCapacity, NULL);
495 if (!ref) return (kIOReturnNoMemory);
496
497 tag = getVMTag(kernel_map);
498 entries = &ref->entries[0];
499 count = 0;
500 err = KERN_SUCCESS;
501
502 offset = 0;
503 rangeIdx = 0;
504 if (_task)
505 {
506 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
507 }
508 else
509 {
510 nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
511 nextLen = physLen;
512
513 // default cache mode for physical
514 if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift))
515 {
516 IOOptionBits mode;
517 pagerFlags = IODefaultCacheBits(nextAddr);
518 if (DEVICE_PAGER_CACHE_INHIB & pagerFlags)
519 {
520 if (DEVICE_PAGER_GUARDED & pagerFlags)
521 mode = kIOInhibitCache;
522 else
523 mode = kIOWriteCombineCache;
524 }
525 else if (DEVICE_PAGER_WRITE_THROUGH & pagerFlags)
526 mode = kIOWriteThruCache;
527 else
528 mode = kIOCopybackCache;
529 _flags |= (mode << kIOMemoryBufferCacheShift);
530 }
531 }
532
533 // cache mode & vm_prot
534 prot = VM_PROT_READ;
535 cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
536 prot |= vmProtForCacheMode(cacheMode);
537 // VM system requires write access to change cache mode
538 if (kIODefaultCache != cacheMode) prot |= VM_PROT_WRITE;
539 if (kIODirectionOut != (kIODirectionOutIn & _flags)) prot |= VM_PROT_WRITE;
540 if (kIOMemoryReferenceWrite & options) prot |= VM_PROT_WRITE;
541 if (kIOMemoryReferenceCOW & options) prot |= MAP_MEM_VM_COPY;
542
543 if ((kIOMemoryReferenceReuse & options) && _memRef)
544 {
545 cloneEntries = &_memRef->entries[0];
546 prot |= MAP_MEM_NAMED_REUSE;
547 }
548
549 if (_task)
550 {
551 // virtual ranges
552
553 if (kIOMemoryBufferPageable & _flags)
554 {
555 // IOBufferMemoryDescriptor alloc - set flags for entry + object create
556 prot |= MAP_MEM_NAMED_CREATE;
557 if (kIOMemoryBufferPurgeable & _flags) prot |= MAP_MEM_PURGABLE;
558 if (kIOMemoryUseReserve & _flags) prot |= MAP_MEM_GRAB_SECLUDED;
559
560 prot |= VM_PROT_WRITE;
561 map = NULL;
562 }
563 else map = get_task_map(_task);
564
565 remain = _length;
566 while (remain)
567 {
568 srcAddr = nextAddr;
569 srcLen = nextLen;
570 nextAddr = 0;
571 nextLen = 0;
572 // coalesce addr range
573 for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++)
574 {
575 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
576 if ((srcAddr + srcLen) != nextAddr) break;
577 srcLen += nextLen;
578 }
579 entryAddr = trunc_page_64(srcAddr);
580 endAddr = round_page_64(srcAddr + srcLen);
581 do
582 {
583 entrySize = (endAddr - entryAddr);
584 if (!entrySize) break;
585 actualSize = entrySize;
586
587 cloneEntry = MACH_PORT_NULL;
588 if (MAP_MEM_NAMED_REUSE & prot)
589 {
590 if (cloneEntries < &_memRef->entries[_memRef->count]) cloneEntry = cloneEntries->entry;
591 else prot &= ~MAP_MEM_NAMED_REUSE;
592 }
593
594 err = mach_make_memory_entry_64(map,
595 &actualSize, entryAddr, prot, &entry, cloneEntry);
596
597 if (KERN_SUCCESS != err) break;
598 if (actualSize > entrySize) panic("mach_make_memory_entry_64 actualSize");
599
600 if (count >= ref->capacity)
601 {
602 ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref);
603 entries = &ref->entries[count];
604 }
605 entries->entry = entry;
606 entries->size = actualSize;
607 entries->offset = offset + (entryAddr - srcAddr);
608 entryAddr += actualSize;
609 if (MAP_MEM_NAMED_REUSE & prot)
610 {
611 if ((cloneEntries->entry == entries->entry)
612 && (cloneEntries->size == entries->size)
613 && (cloneEntries->offset == entries->offset)) cloneEntries++;
614 else prot &= ~MAP_MEM_NAMED_REUSE;
615 }
616 entries++;
617 count++;
618 }
619 while (true);
620 offset += srcLen;
621 remain -= srcLen;
622 }
623 }
624 else
625 {
626 // _task == 0, physical or kIOMemoryTypeUPL
627 memory_object_t pager;
628 vm_size_t size = ptoa_32(_pages);
629
630 if (!getKernelReserved()) panic("getKernelReserved");
631
632 reserved->dp.pagerContig = (1 == _rangesCount);
633 reserved->dp.memory = this;
634
635 pagerFlags = pagerFlagsForCacheMode(cacheMode);
636 if (-1U == pagerFlags) panic("phys is kIODefaultCache");
637 if (reserved->dp.pagerContig) pagerFlags |= DEVICE_PAGER_CONTIGUOUS;
638
639 pager = device_pager_setup((memory_object_t) 0, (uintptr_t) reserved,
640 size, pagerFlags);
641 assert (pager);
642 if (!pager) err = kIOReturnVMError;
643 else
644 {
645 srcAddr = nextAddr;
646 entryAddr = trunc_page_64(srcAddr);
647 err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/,
648 size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry);
649 assert (KERN_SUCCESS == err);
650 if (KERN_SUCCESS != err) device_pager_deallocate(pager);
651 else
652 {
653 reserved->dp.devicePager = pager;
654 entries->entry = entry;
655 entries->size = size;
656 entries->offset = offset + (entryAddr - srcAddr);
657 entries++;
658 count++;
659 }
660 }
661 }
662
663 ref->count = count;
664 ref->prot = prot;
665
666 if (_task && (KERN_SUCCESS == err)
667 && (kIOMemoryMapCopyOnWrite & _flags)
668 && !(kIOMemoryReferenceCOW & options))
669 {
670 err = memoryReferenceCreate(options | kIOMemoryReferenceCOW, &ref->mapRef);
671 }
672
673 if (KERN_SUCCESS == err)
674 {
675 if (MAP_MEM_NAMED_REUSE & prot)
676 {
677 memoryReferenceFree(ref);
678 OSIncrementAtomic(&_memRef->refCount);
679 ref = _memRef;
680 }
681 }
682 else
683 {
684 memoryReferenceFree(ref);
685 ref = NULL;
686 }
687
688 *reference = ref;
689
690 return (err);
691 }
692
693 kern_return_t
694 IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
695 {
696 IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref;
697 IOReturn err;
698 vm_map_offset_t addr;
699
700 addr = ref->mapped;
701
702 err = vm_map_enter_mem_object(map, &addr, ref->size,
703 (vm_map_offset_t) 0,
704 (((ref->options & kIOMapAnywhere)
705 ? VM_FLAGS_ANYWHERE
706 : VM_FLAGS_FIXED)
707 | VM_MAKE_TAG(ref->tag)),
708 IPC_PORT_NULL,
709 (memory_object_offset_t) 0,
710 false, /* copy */
711 ref->prot,
712 ref->prot,
713 VM_INHERIT_NONE);
714 if (KERN_SUCCESS == err)
715 {
716 ref->mapped = (mach_vm_address_t) addr;
717 ref->map = map;
718 }
719
720 return( err );
721 }
722
723 IOReturn
724 IOGeneralMemoryDescriptor::memoryReferenceMap(
725 IOMemoryReference * ref,
726 vm_map_t map,
727 mach_vm_size_t inoffset,
728 mach_vm_size_t size,
729 IOOptionBits options,
730 mach_vm_address_t * inaddr)
731 {
732 IOReturn err;
733 int64_t offset = inoffset;
734 uint32_t rangeIdx, entryIdx;
735 vm_map_offset_t addr, mapAddr;
736 vm_map_offset_t pageOffset, entryOffset, remain, chunk;
737
738 mach_vm_address_t nextAddr;
739 mach_vm_size_t nextLen;
740 IOByteCount physLen;
741 IOMemoryEntry * entry;
742 vm_prot_t prot, memEntryCacheMode;
743 IOOptionBits type;
744 IOOptionBits cacheMode;
745 vm_tag_t tag;
746 // for the kIOMapPrefault option.
747 upl_page_info_t * pageList = NULL;
748 UInt currentPageIndex = 0;
749 bool didAlloc;
750
751 if (ref->mapRef)
752 {
753 err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
754 return (err);
755 }
756
757 type = _flags & kIOMemoryTypeMask;
758
759 prot = VM_PROT_READ;
760 if (!(kIOMapReadOnly & options)) prot |= VM_PROT_WRITE;
761 prot &= ref->prot;
762
763 cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
764 if (kIODefaultCache != cacheMode)
765 {
766 // VM system requires write access to update named entry cache mode
767 memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
768 }
769
770 tag = getVMTag(map);
771
772 if (_task)
773 {
774 // Find first range for offset
775 if (!_rangesCount) return (kIOReturnBadArgument);
776 for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++)
777 {
778 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
779 if (remain < nextLen) break;
780 remain -= nextLen;
781 }
782 }
783 else
784 {
785 rangeIdx = 0;
786 remain = 0;
787 nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
788 nextLen = size;
789 }
790
791 assert(remain < nextLen);
792 if (remain >= nextLen) return (kIOReturnBadArgument);
793
794 nextAddr += remain;
795 nextLen -= remain;
796 pageOffset = (page_mask & nextAddr);
797 addr = 0;
798 didAlloc = false;
799
800 if (!(options & kIOMapAnywhere))
801 {
802 addr = *inaddr;
803 if (pageOffset != (page_mask & addr)) return (kIOReturnNotAligned);
804 addr -= pageOffset;
805 }
806
807 // find first entry for offset
808 for (entryIdx = 0;
809 (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset);
810 entryIdx++) {}
811 entryIdx--;
812 entry = &ref->entries[entryIdx];
813
814 // allocate VM
815 size = round_page_64(size + pageOffset);
816 if (kIOMapOverwrite & options)
817 {
818 if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags))
819 {
820 map = IOPageableMapForAddress(addr);
821 }
822 err = KERN_SUCCESS;
823 }
824 else
825 {
826 IOMemoryDescriptorMapAllocRef ref;
827 ref.map = map;
828 ref.tag = tag;
829 ref.options = options;
830 ref.size = size;
831 ref.prot = prot;
832 if (options & kIOMapAnywhere)
833 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
834 ref.mapped = 0;
835 else
836 ref.mapped = addr;
837 if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags))
838 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
839 else
840 err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
841 if (KERN_SUCCESS == err)
842 {
843 addr = ref.mapped;
844 map = ref.map;
845 didAlloc = true;
846 }
847 }
848
849 /*
850 * Prefaulting is only possible if we wired the memory earlier. Check the
851 * memory type, and the underlying data.
852 */
853 if (options & kIOMapPrefault)
854 {
855 /*
856 * The memory must have been wired by calling ::prepare(), otherwise
857 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
858 */
859 assert(map != kernel_map);
860 assert(_wireCount != 0);
861 assert(_memoryEntries != NULL);
862 if ((map == kernel_map) ||
863 (_wireCount == 0) ||
864 (_memoryEntries == NULL))
865 {
866 return kIOReturnBadArgument;
867 }
868
869 // Get the page list.
870 ioGMDData* dataP = getDataP(_memoryEntries);
871 ioPLBlock const* ioplList = getIOPLList(dataP);
872 pageList = getPageList(dataP);
873
874 // Get the number of IOPLs.
875 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
876
877 /*
878 * Scan through the IOPL Info Blocks, looking for the first block containing
879 * the offset. The research will go past it, so we'll need to go back to the
880 * right range at the end.
881 */
882 UInt ioplIndex = 0;
883 while (ioplIndex < numIOPLs && offset >= ioplList[ioplIndex].fIOMDOffset)
884 ioplIndex++;
885 ioplIndex--;
886
887 // Retrieve the IOPL info block.
888 ioPLBlock ioplInfo = ioplList[ioplIndex];
889
890 /*
891 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
892 * array.
893 */
894 if (ioplInfo.fFlags & kIOPLExternUPL)
895 pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
896 else
897 pageList = &pageList[ioplInfo.fPageInfo];
898
899 // Rebase [offset] into the IOPL in order to looks for the first page index.
900 mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
901
902 // Retrieve the index of the first page corresponding to the offset.
903 currentPageIndex = atop_32(offsetInIOPL);
904 }
905
906 // enter mappings
907 remain = size;
908 mapAddr = addr;
909 addr += pageOffset;
910
911 while (remain && (KERN_SUCCESS == err))
912 {
913 entryOffset = offset - entry->offset;
914 if ((page_mask & entryOffset) != pageOffset)
915 {
916 err = kIOReturnNotAligned;
917 break;
918 }
919
920 if (kIODefaultCache != cacheMode)
921 {
922 vm_size_t unused = 0;
923 err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
924 memEntryCacheMode, NULL, entry->entry);
925 assert (KERN_SUCCESS == err);
926 }
927
928 entryOffset -= pageOffset;
929 if (entryOffset >= entry->size) panic("entryOffset");
930 chunk = entry->size - entryOffset;
931 if (chunk)
932 {
933 if (chunk > remain) chunk = remain;
934 if (options & kIOMapPrefault)
935 {
936 UInt nb_pages = round_page(chunk) / PAGE_SIZE;
937 err = vm_map_enter_mem_object_prefault(map,
938 &mapAddr,
939 chunk, 0 /* mask */,
940 (VM_FLAGS_FIXED
941 | VM_FLAGS_OVERWRITE
942 | VM_MAKE_TAG(tag)
943 | VM_FLAGS_IOKIT_ACCT), /* iokit accounting */
944 entry->entry,
945 entryOffset,
946 prot, // cur
947 prot, // max
948 &pageList[currentPageIndex],
949 nb_pages);
950
951 // Compute the next index in the page list.
952 currentPageIndex += nb_pages;
953 assert(currentPageIndex <= _pages);
954 }
955 else
956 {
957 err = vm_map_enter_mem_object(map,
958 &mapAddr,
959 chunk, 0 /* mask */,
960 (VM_FLAGS_FIXED
961 | VM_FLAGS_OVERWRITE
962 | VM_MAKE_TAG(tag)
963 | VM_FLAGS_IOKIT_ACCT), /* iokit accounting */
964 entry->entry,
965 entryOffset,
966 false, // copy
967 prot, // cur
968 prot, // max
969 VM_INHERIT_NONE);
970 }
971 if (KERN_SUCCESS != err) break;
972 remain -= chunk;
973 if (!remain) break;
974 mapAddr += chunk;
975 offset += chunk - pageOffset;
976 }
977 pageOffset = 0;
978 entry++;
979 entryIdx++;
980 if (entryIdx >= ref->count)
981 {
982 err = kIOReturnOverrun;
983 break;
984 }
985 }
986
987 if ((KERN_SUCCESS != err) && didAlloc)
988 {
989 (void) mach_vm_deallocate(map, trunc_page_64(addr), size);
990 addr = 0;
991 }
992 *inaddr = addr;
993
994 return (err);
995 }
996
997 IOReturn
998 IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
999 IOMemoryReference * ref,
1000 IOByteCount * residentPageCount,
1001 IOByteCount * dirtyPageCount)
1002 {
1003 IOReturn err;
1004 IOMemoryEntry * entries;
1005 unsigned int resident, dirty;
1006 unsigned int totalResident, totalDirty;
1007
1008 totalResident = totalDirty = 0;
1009 err = kIOReturnSuccess;
1010 entries = ref->entries + ref->count;
1011 while (entries > &ref->entries[0])
1012 {
1013 entries--;
1014 err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty);
1015 if (KERN_SUCCESS != err) break;
1016 totalResident += resident;
1017 totalDirty += dirty;
1018 }
1019
1020 if (residentPageCount) *residentPageCount = totalResident;
1021 if (dirtyPageCount) *dirtyPageCount = totalDirty;
1022 return (err);
1023 }
1024
1025 IOReturn
1026 IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
1027 IOMemoryReference * ref,
1028 IOOptionBits newState,
1029 IOOptionBits * oldState)
1030 {
1031 IOReturn err;
1032 IOMemoryEntry * entries;
1033 vm_purgable_t control;
1034 int totalState, state;
1035
1036 totalState = kIOMemoryPurgeableNonVolatile;
1037 err = kIOReturnSuccess;
1038 entries = ref->entries + ref->count;
1039 while (entries > &ref->entries[0])
1040 {
1041 entries--;
1042
1043 err = purgeableControlBits(newState, &control, &state);
1044 if (KERN_SUCCESS != err) break;
1045 err = mach_memory_entry_purgable_control(entries->entry, control, &state);
1046 if (KERN_SUCCESS != err) break;
1047 err = purgeableStateBits(&state);
1048 if (KERN_SUCCESS != err) break;
1049
1050 if (kIOMemoryPurgeableEmpty == state) totalState = kIOMemoryPurgeableEmpty;
1051 else if (kIOMemoryPurgeableEmpty == totalState) continue;
1052 else if (kIOMemoryPurgeableVolatile == totalState) continue;
1053 else if (kIOMemoryPurgeableVolatile == state) totalState = kIOMemoryPurgeableVolatile;
1054 else totalState = kIOMemoryPurgeableNonVolatile;
1055 }
1056
1057 if (oldState) *oldState = totalState;
1058 return (err);
1059 }
1060
1061 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1062
1063 IOMemoryDescriptor *
1064 IOMemoryDescriptor::withAddress(void * address,
1065 IOByteCount length,
1066 IODirection direction)
1067 {
1068 return IOMemoryDescriptor::
1069 withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
1070 }
1071
1072 #ifndef __LP64__
1073 IOMemoryDescriptor *
1074 IOMemoryDescriptor::withAddress(IOVirtualAddress address,
1075 IOByteCount length,
1076 IODirection direction,
1077 task_t task)
1078 {
1079 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1080 if (that)
1081 {
1082 if (that->initWithAddress(address, length, direction, task))
1083 return that;
1084
1085 that->release();
1086 }
1087 return 0;
1088 }
1089 #endif /* !__LP64__ */
1090
1091 IOMemoryDescriptor *
1092 IOMemoryDescriptor::withPhysicalAddress(
1093 IOPhysicalAddress address,
1094 IOByteCount length,
1095 IODirection direction )
1096 {
1097 return (IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL));
1098 }
1099
1100 #ifndef __LP64__
1101 IOMemoryDescriptor *
1102 IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
1103 UInt32 withCount,
1104 IODirection direction,
1105 task_t task,
1106 bool asReference)
1107 {
1108 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1109 if (that)
1110 {
1111 if (that->initWithRanges(ranges, withCount, direction, task, asReference))
1112 return that;
1113
1114 that->release();
1115 }
1116 return 0;
1117 }
1118 #endif /* !__LP64__ */
1119
1120 IOMemoryDescriptor *
1121 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
1122 mach_vm_size_t length,
1123 IOOptionBits options,
1124 task_t task)
1125 {
1126 IOAddressRange range = { address, length };
1127 return (IOMemoryDescriptor::withAddressRanges(&range, 1, options, task));
1128 }
1129
1130 IOMemoryDescriptor *
1131 IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
1132 UInt32 rangeCount,
1133 IOOptionBits options,
1134 task_t task)
1135 {
1136 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1137 if (that)
1138 {
1139 if (task)
1140 options |= kIOMemoryTypeVirtual64;
1141 else
1142 options |= kIOMemoryTypePhysical64;
1143
1144 if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ 0))
1145 return that;
1146
1147 that->release();
1148 }
1149
1150 return 0;
1151 }
1152
1153
1154 /*
1155 * withOptions:
1156 *
1157 * Create a new IOMemoryDescriptor. The buffer is made up of several
1158 * virtual address ranges, from a given task.
1159 *
1160 * Passing the ranges as a reference will avoid an extra allocation.
1161 */
1162 IOMemoryDescriptor *
1163 IOMemoryDescriptor::withOptions(void * buffers,
1164 UInt32 count,
1165 UInt32 offset,
1166 task_t task,
1167 IOOptionBits opts,
1168 IOMapper * mapper)
1169 {
1170 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
1171
1172 if (self
1173 && !self->initWithOptions(buffers, count, offset, task, opts, mapper))
1174 {
1175 self->release();
1176 return 0;
1177 }
1178
1179 return self;
1180 }
1181
1182 bool IOMemoryDescriptor::initWithOptions(void * buffers,
1183 UInt32 count,
1184 UInt32 offset,
1185 task_t task,
1186 IOOptionBits options,
1187 IOMapper * mapper)
1188 {
1189 return( false );
1190 }
1191
1192 #ifndef __LP64__
1193 IOMemoryDescriptor *
1194 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
1195 UInt32 withCount,
1196 IODirection direction,
1197 bool asReference)
1198 {
1199 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1200 if (that)
1201 {
1202 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference))
1203 return that;
1204
1205 that->release();
1206 }
1207 return 0;
1208 }
1209
1210 IOMemoryDescriptor *
1211 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
1212 IOByteCount offset,
1213 IOByteCount length,
1214 IODirection direction)
1215 {
1216 return (IOSubMemoryDescriptor::withSubRange(of, offset, length, direction));
1217 }
1218 #endif /* !__LP64__ */
1219
1220 IOMemoryDescriptor *
1221 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
1222 {
1223 IOGeneralMemoryDescriptor *origGenMD =
1224 OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
1225
1226 if (origGenMD)
1227 return IOGeneralMemoryDescriptor::
1228 withPersistentMemoryDescriptor(origGenMD);
1229 else
1230 return 0;
1231 }
1232
1233 IOMemoryDescriptor *
1234 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
1235 {
1236 IOMemoryReference * memRef;
1237
1238 if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) return (0);
1239
1240 if (memRef == originalMD->_memRef)
1241 {
1242 originalMD->retain(); // Add a new reference to ourselves
1243 originalMD->memoryReferenceRelease(memRef);
1244 return originalMD;
1245 }
1246
1247 IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor;
1248 IOMDPersistentInitData initData = { originalMD, memRef };
1249
1250 if (self
1251 && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) {
1252 self->release();
1253 self = 0;
1254 }
1255 return self;
1256 }
1257
1258 #ifndef __LP64__
1259 bool
1260 IOGeneralMemoryDescriptor::initWithAddress(void * address,
1261 IOByteCount withLength,
1262 IODirection withDirection)
1263 {
1264 _singleRange.v.address = (vm_offset_t) address;
1265 _singleRange.v.length = withLength;
1266
1267 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
1268 }
1269
1270 bool
1271 IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
1272 IOByteCount withLength,
1273 IODirection withDirection,
1274 task_t withTask)
1275 {
1276 _singleRange.v.address = address;
1277 _singleRange.v.length = withLength;
1278
1279 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
1280 }
1281
1282 bool
1283 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
1284 IOPhysicalAddress address,
1285 IOByteCount withLength,
1286 IODirection withDirection )
1287 {
1288 _singleRange.p.address = address;
1289 _singleRange.p.length = withLength;
1290
1291 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
1292 }
1293
1294 bool
1295 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
1296 IOPhysicalRange * ranges,
1297 UInt32 count,
1298 IODirection direction,
1299 bool reference)
1300 {
1301 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
1302
1303 if (reference)
1304 mdOpts |= kIOMemoryAsReference;
1305
1306 return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0);
1307 }
1308
1309 bool
1310 IOGeneralMemoryDescriptor::initWithRanges(
1311 IOVirtualRange * ranges,
1312 UInt32 count,
1313 IODirection direction,
1314 task_t task,
1315 bool reference)
1316 {
1317 IOOptionBits mdOpts = direction;
1318
1319 if (reference)
1320 mdOpts |= kIOMemoryAsReference;
1321
1322 if (task) {
1323 mdOpts |= kIOMemoryTypeVirtual;
1324
1325 // Auto-prepare if this is a kernel memory descriptor as very few
1326 // clients bother to prepare() kernel memory.
1327 // But it was not enforced so what are you going to do?
1328 if (task == kernel_task)
1329 mdOpts |= kIOMemoryAutoPrepare;
1330 }
1331 else
1332 mdOpts |= kIOMemoryTypePhysical;
1333
1334 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0);
1335 }
1336 #endif /* !__LP64__ */
1337
1338 /*
1339 * initWithOptions:
1340 *
1341 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
1342 * from a given task, several physical ranges, an UPL from the ubc
1343 * system or a uio (may be 64bit) from the BSD subsystem.
1344 *
1345 * Passing the ranges as a reference will avoid an extra allocation.
1346 *
1347 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
1348 * existing instance -- note this behavior is not commonly supported in other
1349 * I/O Kit classes, although it is supported here.
1350 */
1351
1352 bool
1353 IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
1354 UInt32 count,
1355 UInt32 offset,
1356 task_t task,
1357 IOOptionBits options,
1358 IOMapper * mapper)
1359 {
1360 IOOptionBits type = options & kIOMemoryTypeMask;
1361
1362 #ifndef __LP64__
1363 if (task
1364 && (kIOMemoryTypeVirtual == type)
1365 && vm_map_is_64bit(get_task_map(task))
1366 && ((IOVirtualRange *) buffers)->address)
1367 {
1368 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
1369 return false;
1370 }
1371 #endif /* !__LP64__ */
1372
1373 // Grab the original MD's configuation data to initialse the
1374 // arguments to this function.
1375 if (kIOMemoryTypePersistentMD == type) {
1376
1377 IOMDPersistentInitData *initData = (typeof(initData)) buffers;
1378 const IOGeneralMemoryDescriptor *orig = initData->fMD;
1379 ioGMDData *dataP = getDataP(orig->_memoryEntries);
1380
1381 // Only accept persistent memory descriptors with valid dataP data.
1382 assert(orig->_rangesCount == 1);
1383 if ( !(orig->_flags & kIOMemoryPersistent) || !dataP)
1384 return false;
1385
1386 _memRef = initData->fMemRef; // Grab the new named entry
1387 options = orig->_flags & ~kIOMemoryAsReference;
1388 type = options & kIOMemoryTypeMask;
1389 buffers = orig->_ranges.v;
1390 count = orig->_rangesCount;
1391
1392 // Now grab the original task and whatever mapper was previously used
1393 task = orig->_task;
1394 mapper = dataP->fMapper;
1395
1396 // We are ready to go through the original initialisation now
1397 }
1398
1399 switch (type) {
1400 case kIOMemoryTypeUIO:
1401 case kIOMemoryTypeVirtual:
1402 #ifndef __LP64__
1403 case kIOMemoryTypeVirtual64:
1404 #endif /* !__LP64__ */
1405 assert(task);
1406 if (!task)
1407 return false;
1408 break;
1409
1410 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
1411 #ifndef __LP64__
1412 case kIOMemoryTypePhysical64:
1413 #endif /* !__LP64__ */
1414 case kIOMemoryTypeUPL:
1415 assert(!task);
1416 break;
1417 default:
1418 return false; /* bad argument */
1419 }
1420
1421 assert(buffers);
1422 assert(count);
1423
1424 /*
1425 * We can check the _initialized instance variable before having ever set
1426 * it to an initial value because I/O Kit guarantees that all our instance
1427 * variables are zeroed on an object's allocation.
1428 */
1429
1430 if (_initialized) {
1431 /*
1432 * An existing memory descriptor is being retargeted to point to
1433 * somewhere else. Clean up our present state.
1434 */
1435 IOOptionBits type = _flags & kIOMemoryTypeMask;
1436 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
1437 {
1438 while (_wireCount)
1439 complete();
1440 }
1441 if (_ranges.v && !(kIOMemoryAsReference & _flags))
1442 {
1443 if (kIOMemoryTypeUIO == type)
1444 uio_free((uio_t) _ranges.v);
1445 #ifndef __LP64__
1446 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
1447 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
1448 #endif /* !__LP64__ */
1449 else
1450 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
1451 }
1452
1453 options |= (kIOMemoryRedirected & _flags);
1454 if (!(kIOMemoryRedirected & options))
1455 {
1456 if (_memRef)
1457 {
1458 memoryReferenceRelease(_memRef);
1459 _memRef = 0;
1460 }
1461 if (_mappings)
1462 _mappings->flushCollection();
1463 }
1464 }
1465 else {
1466 if (!super::init())
1467 return false;
1468 _initialized = true;
1469 }
1470
1471 // Grab the appropriate mapper
1472 if (kIOMemoryHostOnly & options) options |= kIOMemoryMapperNone;
1473 if (kIOMemoryMapperNone & options)
1474 mapper = 0; // No Mapper
1475 else if (mapper == kIOMapperSystem) {
1476 IOMapper::checkForSystemMapper();
1477 gIOSystemMapper = mapper = IOMapper::gSystem;
1478 }
1479
1480 // Remove the dynamic internal use flags from the initial setting
1481 options &= ~(kIOMemoryPreparedReadOnly);
1482 _flags = options;
1483 _task = task;
1484
1485 #ifndef __LP64__
1486 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
1487 #endif /* !__LP64__ */
1488
1489 __iomd_reservedA = 0;
1490 __iomd_reservedB = 0;
1491 _highestPage = 0;
1492
1493 if (kIOMemoryThreadSafe & options)
1494 {
1495 if (!_prepareLock)
1496 _prepareLock = IOLockAlloc();
1497 }
1498 else if (_prepareLock)
1499 {
1500 IOLockFree(_prepareLock);
1501 _prepareLock = NULL;
1502 }
1503
1504 if (kIOMemoryTypeUPL == type) {
1505
1506 ioGMDData *dataP;
1507 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
1508
1509 if (!initMemoryEntries(dataSize, mapper)) return (false);
1510 dataP = getDataP(_memoryEntries);
1511 dataP->fPageCnt = 0;
1512
1513 // _wireCount++; // UPLs start out life wired
1514
1515 _length = count;
1516 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
1517
1518 ioPLBlock iopl;
1519 iopl.fIOPL = (upl_t) buffers;
1520 upl_set_referenced(iopl.fIOPL, true);
1521 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
1522
1523 if (upl_get_size(iopl.fIOPL) < (count + offset))
1524 panic("short external upl");
1525
1526 _highestPage = upl_get_highest_page(iopl.fIOPL);
1527
1528 // Set the flag kIOPLOnDevice convieniently equal to 1
1529 iopl.fFlags = pageList->device | kIOPLExternUPL;
1530 if (!pageList->device) {
1531 // Pre-compute the offset into the UPL's page list
1532 pageList = &pageList[atop_32(offset)];
1533 offset &= PAGE_MASK;
1534 }
1535 iopl.fIOMDOffset = 0;
1536 iopl.fMappedPage = 0;
1537 iopl.fPageInfo = (vm_address_t) pageList;
1538 iopl.fPageOffset = offset;
1539 _memoryEntries->appendBytes(&iopl, sizeof(iopl));
1540 }
1541 else {
1542 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
1543 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
1544
1545 // Initialize the memory descriptor
1546 if (options & kIOMemoryAsReference) {
1547 #ifndef __LP64__
1548 _rangesIsAllocated = false;
1549 #endif /* !__LP64__ */
1550
1551 // Hack assignment to get the buffer arg into _ranges.
1552 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
1553 // work, C++ sigh.
1554 // This also initialises the uio & physical ranges.
1555 _ranges.v = (IOVirtualRange *) buffers;
1556 }
1557 else {
1558 #ifndef __LP64__
1559 _rangesIsAllocated = true;
1560 #endif /* !__LP64__ */
1561 switch (type)
1562 {
1563 case kIOMemoryTypeUIO:
1564 _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
1565 break;
1566
1567 #ifndef __LP64__
1568 case kIOMemoryTypeVirtual64:
1569 case kIOMemoryTypePhysical64:
1570 if (count == 1
1571 && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
1572 ) {
1573 if (kIOMemoryTypeVirtual64 == type)
1574 type = kIOMemoryTypeVirtual;
1575 else
1576 type = kIOMemoryTypePhysical;
1577 _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
1578 _rangesIsAllocated = false;
1579 _ranges.v = &_singleRange.v;
1580 _singleRange.v.address = ((IOAddressRange *) buffers)->address;
1581 _singleRange.v.length = ((IOAddressRange *) buffers)->length;
1582 break;
1583 }
1584 _ranges.v64 = IONew(IOAddressRange, count);
1585 if (!_ranges.v64)
1586 return false;
1587 bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
1588 break;
1589 #endif /* !__LP64__ */
1590 case kIOMemoryTypeVirtual:
1591 case kIOMemoryTypePhysical:
1592 if (count == 1) {
1593 _flags |= kIOMemoryAsReference;
1594 #ifndef __LP64__
1595 _rangesIsAllocated = false;
1596 #endif /* !__LP64__ */
1597 _ranges.v = &_singleRange.v;
1598 } else {
1599 _ranges.v = IONew(IOVirtualRange, count);
1600 if (!_ranges.v)
1601 return false;
1602 }
1603 bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
1604 break;
1605 }
1606 }
1607
1608 // Find starting address within the vector of ranges
1609 Ranges vec = _ranges;
1610 mach_vm_size_t totalLength = 0;
1611 unsigned int ind, pages = 0;
1612 for (ind = 0; ind < count; ind++) {
1613 mach_vm_address_t addr;
1614 mach_vm_address_t endAddr;
1615 mach_vm_size_t len;
1616
1617 // addr & len are returned by this function
1618 getAddrLenForInd(addr, len, type, vec, ind);
1619 if (os_add3_overflow(addr, len, PAGE_MASK, &endAddr)) break;
1620 if (os_add_overflow(pages, (atop_64(endAddr) - atop_64(addr)), &pages)) break;
1621 if (os_add_overflow(totalLength, len, &totalLength)) break;
1622 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
1623 {
1624 ppnum_t highPage = atop_64(addr + len - 1);
1625 if (highPage > _highestPage)
1626 _highestPage = highPage;
1627 }
1628 }
1629 if ((ind < count)
1630 || (totalLength != ((IOByteCount) totalLength))) return (false); /* overflow */
1631
1632 _length = totalLength;
1633 _pages = pages;
1634 _rangesCount = count;
1635
1636 // Auto-prepare memory at creation time.
1637 // Implied completion when descriptor is free-ed
1638 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
1639 _wireCount++; // Physical MDs are, by definition, wired
1640 else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
1641 ioGMDData *dataP;
1642 unsigned dataSize;
1643
1644 if (_pages > atop_64(max_mem)) return false;
1645
1646 dataSize = computeDataSize(_pages, /* upls */ count * 2);
1647 if (!initMemoryEntries(dataSize, mapper)) return false;
1648 dataP = getDataP(_memoryEntries);
1649 dataP->fPageCnt = _pages;
1650
1651 if ( (kIOMemoryPersistent & _flags) && !_memRef)
1652 {
1653 IOReturn
1654 err = memoryReferenceCreate(0, &_memRef);
1655 if (kIOReturnSuccess != err) return false;
1656 }
1657
1658 if ((_flags & kIOMemoryAutoPrepare)
1659 && prepare() != kIOReturnSuccess)
1660 return false;
1661 }
1662 }
1663
1664 return true;
1665 }
1666
1667 /*
1668 * free
1669 *
1670 * Free resources.
1671 */
1672 void IOGeneralMemoryDescriptor::free()
1673 {
1674 IOOptionBits type = _flags & kIOMemoryTypeMask;
1675
1676 if( reserved)
1677 {
1678 LOCK;
1679 reserved->dp.memory = 0;
1680 UNLOCK;
1681 }
1682 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
1683 {
1684 ioGMDData * dataP;
1685 if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
1686 {
1687 dataP->fMapper->iovmUnmapMemory(this, NULL, dataP->fMappedBase, dataP->fMappedLength);
1688 dataP->fMappedBase = 0;
1689 }
1690 }
1691 else
1692 {
1693 while (_wireCount) complete();
1694 }
1695
1696 if (_memoryEntries) _memoryEntries->release();
1697
1698 if (_ranges.v && !(kIOMemoryAsReference & _flags))
1699 {
1700 if (kIOMemoryTypeUIO == type)
1701 uio_free((uio_t) _ranges.v);
1702 #ifndef __LP64__
1703 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
1704 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
1705 #endif /* !__LP64__ */
1706 else
1707 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
1708
1709 _ranges.v = NULL;
1710 }
1711
1712 if (reserved)
1713 {
1714 if (reserved->dp.devicePager)
1715 {
1716 // memEntry holds a ref on the device pager which owns reserved
1717 // (IOMemoryDescriptorReserved) so no reserved access after this point
1718 device_pager_deallocate( (memory_object_t) reserved->dp.devicePager );
1719 }
1720 else
1721 IODelete(reserved, IOMemoryDescriptorReserved, 1);
1722 reserved = NULL;
1723 }
1724
1725 if (_memRef) memoryReferenceRelease(_memRef);
1726 if (_prepareLock) IOLockFree(_prepareLock);
1727
1728 super::free();
1729 }
1730
1731 #ifndef __LP64__
1732 void IOGeneralMemoryDescriptor::unmapFromKernel()
1733 {
1734 panic("IOGMD::unmapFromKernel deprecated");
1735 }
1736
1737 void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
1738 {
1739 panic("IOGMD::mapIntoKernel deprecated");
1740 }
1741 #endif /* !__LP64__ */
1742
1743 /*
1744 * getDirection:
1745 *
1746 * Get the direction of the transfer.
1747 */
1748 IODirection IOMemoryDescriptor::getDirection() const
1749 {
1750 #ifndef __LP64__
1751 if (_direction)
1752 return _direction;
1753 #endif /* !__LP64__ */
1754 return (IODirection) (_flags & kIOMemoryDirectionMask);
1755 }
1756
1757 /*
1758 * getLength:
1759 *
1760 * Get the length of the transfer (over all ranges).
1761 */
1762 IOByteCount IOMemoryDescriptor::getLength() const
1763 {
1764 return _length;
1765 }
1766
1767 void IOMemoryDescriptor::setTag( IOOptionBits tag )
1768 {
1769 _tag = tag;
1770 }
1771
1772 IOOptionBits IOMemoryDescriptor::getTag( void )
1773 {
1774 return( _tag);
1775 }
1776
1777 #ifndef __LP64__
1778 #pragma clang diagnostic push
1779 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
1780
1781 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
1782 IOPhysicalAddress
1783 IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
1784 {
1785 addr64_t physAddr = 0;
1786
1787 if( prepare() == kIOReturnSuccess) {
1788 physAddr = getPhysicalSegment64( offset, length );
1789 complete();
1790 }
1791
1792 return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used
1793 }
1794
1795 #pragma clang diagnostic pop
1796
1797 #endif /* !__LP64__ */
1798
1799 IOByteCount IOMemoryDescriptor::readBytes
1800 (IOByteCount offset, void *bytes, IOByteCount length)
1801 {
1802 addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
1803 IOByteCount remaining;
1804
1805 // Assert that this entire I/O is withing the available range
1806 assert(offset <= _length);
1807 assert(offset + length <= _length);
1808 if ((offset >= _length)
1809 || ((offset + length) > _length)) {
1810 return 0;
1811 }
1812
1813 if (kIOMemoryThreadSafe & _flags)
1814 LOCK;
1815
1816 remaining = length = min(length, _length - offset);
1817 while (remaining) { // (process another target segment?)
1818 addr64_t srcAddr64;
1819 IOByteCount srcLen;
1820
1821 srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
1822 if (!srcAddr64)
1823 break;
1824
1825 // Clip segment length to remaining
1826 if (srcLen > remaining)
1827 srcLen = remaining;
1828
1829 copypv(srcAddr64, dstAddr, srcLen,
1830 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
1831
1832 dstAddr += srcLen;
1833 offset += srcLen;
1834 remaining -= srcLen;
1835 }
1836
1837 if (kIOMemoryThreadSafe & _flags)
1838 UNLOCK;
1839
1840 assert(!remaining);
1841
1842 return length - remaining;
1843 }
1844
1845 IOByteCount IOMemoryDescriptor::writeBytes
1846 (IOByteCount inoffset, const void *bytes, IOByteCount length)
1847 {
1848 addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
1849 IOByteCount remaining;
1850 IOByteCount offset = inoffset;
1851
1852 // Assert that this entire I/O is withing the available range
1853 assert(offset <= _length);
1854 assert(offset + length <= _length);
1855
1856 assert( !(kIOMemoryPreparedReadOnly & _flags) );
1857
1858 if ( (kIOMemoryPreparedReadOnly & _flags)
1859 || (offset >= _length)
1860 || ((offset + length) > _length)) {
1861 return 0;
1862 }
1863
1864 if (kIOMemoryThreadSafe & _flags)
1865 LOCK;
1866
1867 remaining = length = min(length, _length - offset);
1868 while (remaining) { // (process another target segment?)
1869 addr64_t dstAddr64;
1870 IOByteCount dstLen;
1871
1872 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
1873 if (!dstAddr64)
1874 break;
1875
1876 // Clip segment length to remaining
1877 if (dstLen > remaining)
1878 dstLen = remaining;
1879
1880 if (!srcAddr) bzero_phys(dstAddr64, dstLen);
1881 else
1882 {
1883 copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
1884 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
1885 srcAddr += dstLen;
1886 }
1887 offset += dstLen;
1888 remaining -= dstLen;
1889 }
1890
1891 if (kIOMemoryThreadSafe & _flags)
1892 UNLOCK;
1893
1894 assert(!remaining);
1895
1896 if (!srcAddr) performOperation(kIOMemoryIncoherentIOFlush, inoffset, length);
1897
1898 return length - remaining;
1899 }
1900
1901 #ifndef __LP64__
1902 void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
1903 {
1904 panic("IOGMD::setPosition deprecated");
1905 }
1906 #endif /* !__LP64__ */
1907
1908 static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
1909
1910 uint64_t
1911 IOGeneralMemoryDescriptor::getPreparationID( void )
1912 {
1913 ioGMDData *dataP;
1914
1915 if (!_wireCount)
1916 return (kIOPreparationIDUnprepared);
1917
1918 if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
1919 || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64))
1920 {
1921 IOMemoryDescriptor::setPreparationID();
1922 return (IOMemoryDescriptor::getPreparationID());
1923 }
1924
1925 if (!_memoryEntries || !(dataP = getDataP(_memoryEntries)))
1926 return (kIOPreparationIDUnprepared);
1927
1928 if (kIOPreparationIDUnprepared == dataP->fPreparationID)
1929 {
1930 dataP->fPreparationID = OSIncrementAtomic64(&gIOMDPreparationID);
1931 }
1932 return (dataP->fPreparationID);
1933 }
1934
1935 IOMemoryDescriptorReserved * IOMemoryDescriptor::getKernelReserved( void )
1936 {
1937 if (!reserved)
1938 {
1939 reserved = IONew(IOMemoryDescriptorReserved, 1);
1940 if (reserved)
1941 bzero(reserved, sizeof(IOMemoryDescriptorReserved));
1942 }
1943 return (reserved);
1944 }
1945
1946 void IOMemoryDescriptor::setPreparationID( void )
1947 {
1948 if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID))
1949 {
1950 reserved->preparationID = OSIncrementAtomic64(&gIOMDPreparationID);
1951 }
1952 }
1953
1954 uint64_t IOMemoryDescriptor::getPreparationID( void )
1955 {
1956 if (reserved)
1957 return (reserved->preparationID);
1958 else
1959 return (kIOPreparationIDUnsupported);
1960 }
1961
1962 void IOMemoryDescriptor::setVMTags(vm_tag_t kernelTag, vm_tag_t userTag)
1963 {
1964 if (!getKernelReserved()) return;
1965 reserved->kernelTag = kernelTag;
1966 reserved->userTag = userTag;
1967 }
1968
1969 vm_tag_t IOMemoryDescriptor::getVMTag(vm_map_t map)
1970 {
1971 if (!reserved
1972 || (VM_KERN_MEMORY_NONE == reserved->kernelTag)
1973 || (VM_KERN_MEMORY_NONE == reserved->userTag))
1974 {
1975 return (IOMemoryTag(map));
1976 }
1977
1978 if (vm_kernel_map_is_kernel(map)) return (reserved->kernelTag);
1979 return (reserved->userTag);
1980 }
1981
1982 IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
1983 {
1984 IOReturn err = kIOReturnSuccess;
1985 DMACommandOps params;
1986 IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
1987 ioGMDData *dataP;
1988
1989 params = (op & ~kIOMDDMACommandOperationMask & op);
1990 op &= kIOMDDMACommandOperationMask;
1991
1992 if (kIOMDDMAMap == op)
1993 {
1994 if (dataSize < sizeof(IOMDDMAMapArgs))
1995 return kIOReturnUnderrun;
1996
1997 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
1998
1999 if (!_memoryEntries
2000 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
2001
2002 if (_memoryEntries && data->fMapper)
2003 {
2004 bool remap, keepMap;
2005 dataP = getDataP(_memoryEntries);
2006
2007 if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
2008 if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) dataP->fDMAMapAlignment = data->fMapSpec.alignment;
2009
2010 keepMap = (data->fMapper == gIOSystemMapper);
2011 keepMap &= ((data->fOffset == 0) && (data->fLength == _length));
2012
2013 remap = (!keepMap);
2014 remap |= (dataP->fDMAMapNumAddressBits < 64)
2015 && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
2016 remap |= (dataP->fDMAMapAlignment > page_size);
2017
2018 if (remap || !dataP->fMappedBase)
2019 {
2020 // if (dataP->fMappedBase) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params);
2021 err = md->dmaMap(data->fMapper, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
2022 if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBase)
2023 {
2024 dataP->fMappedBase = data->fAlloc;
2025 dataP->fMappedLength = data->fAllocLength;
2026 data->fAllocLength = 0; // IOMD owns the alloc now
2027 }
2028 }
2029 else
2030 {
2031 data->fAlloc = dataP->fMappedBase;
2032 data->fAllocLength = 0; // give out IOMD map
2033 }
2034 data->fMapContig = !dataP->fDiscontig;
2035 }
2036
2037 return (err);
2038 }
2039
2040 if (kIOMDAddDMAMapSpec == op)
2041 {
2042 if (dataSize < sizeof(IODMAMapSpecification))
2043 return kIOReturnUnderrun;
2044
2045 IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
2046
2047 if (!_memoryEntries
2048 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
2049
2050 if (_memoryEntries)
2051 {
2052 dataP = getDataP(_memoryEntries);
2053 if (data->numAddressBits < dataP->fDMAMapNumAddressBits)
2054 dataP->fDMAMapNumAddressBits = data->numAddressBits;
2055 if (data->alignment > dataP->fDMAMapAlignment)
2056 dataP->fDMAMapAlignment = data->alignment;
2057 }
2058 return kIOReturnSuccess;
2059 }
2060
2061 if (kIOMDGetCharacteristics == op) {
2062
2063 if (dataSize < sizeof(IOMDDMACharacteristics))
2064 return kIOReturnUnderrun;
2065
2066 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
2067 data->fLength = _length;
2068 data->fSGCount = _rangesCount;
2069 data->fPages = _pages;
2070 data->fDirection = getDirection();
2071 if (!_wireCount)
2072 data->fIsPrepared = false;
2073 else {
2074 data->fIsPrepared = true;
2075 data->fHighestPage = _highestPage;
2076 if (_memoryEntries)
2077 {
2078 dataP = getDataP(_memoryEntries);
2079 ioPLBlock *ioplList = getIOPLList(dataP);
2080 UInt count = getNumIOPL(_memoryEntries, dataP);
2081 if (count == 1)
2082 data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
2083 }
2084 }
2085
2086 return kIOReturnSuccess;
2087
2088 } else if (kIOMDWalkSegments != op)
2089 return kIOReturnBadArgument;
2090
2091 // Get the next segment
2092 struct InternalState {
2093 IOMDDMAWalkSegmentArgs fIO;
2094 UInt fOffset2Index;
2095 UInt fIndex;
2096 UInt fNextOffset;
2097 } *isP;
2098
2099 // Find the next segment
2100 if (dataSize < sizeof(*isP))
2101 return kIOReturnUnderrun;
2102
2103 isP = (InternalState *) vData;
2104 UInt offset = isP->fIO.fOffset;
2105 bool mapped = isP->fIO.fMapped;
2106
2107 if (IOMapper::gSystem && mapped
2108 && (!(kIOMemoryHostOnly & _flags))
2109 && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBase))
2110 // && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBase))
2111 {
2112 if (!_memoryEntries
2113 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
2114
2115 dataP = getDataP(_memoryEntries);
2116 if (dataP->fMapper)
2117 {
2118 IODMAMapSpecification mapSpec;
2119 bzero(&mapSpec, sizeof(mapSpec));
2120 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
2121 mapSpec.alignment = dataP->fDMAMapAlignment;
2122 err = md->dmaMap(dataP->fMapper, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength);
2123 if (kIOReturnSuccess != err) return (err);
2124 }
2125 }
2126
2127 if (offset >= _length)
2128 return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
2129
2130 // Validate the previous offset
2131 UInt ind, off2Ind = isP->fOffset2Index;
2132 if (!params
2133 && offset
2134 && (offset == isP->fNextOffset || off2Ind <= offset))
2135 ind = isP->fIndex;
2136 else
2137 ind = off2Ind = 0; // Start from beginning
2138
2139 UInt length;
2140 UInt64 address;
2141
2142
2143 if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
2144
2145 // Physical address based memory descriptor
2146 const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
2147
2148 // Find the range after the one that contains the offset
2149 mach_vm_size_t len;
2150 for (len = 0; off2Ind <= offset; ind++) {
2151 len = physP[ind].length;
2152 off2Ind += len;
2153 }
2154
2155 // Calculate length within range and starting address
2156 length = off2Ind - offset;
2157 address = physP[ind - 1].address + len - length;
2158
2159 if (true && mapped && _memoryEntries
2160 && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
2161 {
2162 address = dataP->fMappedBase + offset;
2163 }
2164 else
2165 {
2166 // see how far we can coalesce ranges
2167 while (ind < _rangesCount && address + length == physP[ind].address) {
2168 len = physP[ind].length;
2169 length += len;
2170 off2Ind += len;
2171 ind++;
2172 }
2173 }
2174
2175 // correct contiguous check overshoot
2176 ind--;
2177 off2Ind -= len;
2178 }
2179 #ifndef __LP64__
2180 else if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
2181
2182 // Physical address based memory descriptor
2183 const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
2184
2185 // Find the range after the one that contains the offset
2186 mach_vm_size_t len;
2187 for (len = 0; off2Ind <= offset; ind++) {
2188 len = physP[ind].length;
2189 off2Ind += len;
2190 }
2191
2192 // Calculate length within range and starting address
2193 length = off2Ind - offset;
2194 address = physP[ind - 1].address + len - length;
2195
2196 if (true && mapped && _memoryEntries
2197 && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
2198 {
2199 address = dataP->fMappedBase + offset;
2200 }
2201 else
2202 {
2203 // see how far we can coalesce ranges
2204 while (ind < _rangesCount && address + length == physP[ind].address) {
2205 len = physP[ind].length;
2206 length += len;
2207 off2Ind += len;
2208 ind++;
2209 }
2210 }
2211 // correct contiguous check overshoot
2212 ind--;
2213 off2Ind -= len;
2214 }
2215 #endif /* !__LP64__ */
2216 else do {
2217 if (!_wireCount)
2218 panic("IOGMD: not wired for the IODMACommand");
2219
2220 assert(_memoryEntries);
2221
2222 dataP = getDataP(_memoryEntries);
2223 const ioPLBlock *ioplList = getIOPLList(dataP);
2224 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
2225 upl_page_info_t *pageList = getPageList(dataP);
2226
2227 assert(numIOPLs > 0);
2228
2229 // Scan through iopl info blocks looking for block containing offset
2230 while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset)
2231 ind++;
2232
2233 // Go back to actual range as search goes past it
2234 ioPLBlock ioplInfo = ioplList[ind - 1];
2235 off2Ind = ioplInfo.fIOMDOffset;
2236
2237 if (ind < numIOPLs)
2238 length = ioplList[ind].fIOMDOffset;
2239 else
2240 length = _length;
2241 length -= offset; // Remainder within iopl
2242
2243 // Subtract offset till this iopl in total list
2244 offset -= off2Ind;
2245
2246 // If a mapped address is requested and this is a pre-mapped IOPL
2247 // then just need to compute an offset relative to the mapped base.
2248 if (mapped && dataP->fMappedBase) {
2249 offset += (ioplInfo.fPageOffset & PAGE_MASK);
2250 address = trunc_page_64(dataP->fMappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
2251 continue; // Done leave do/while(false) now
2252 }
2253
2254 // The offset is rebased into the current iopl.
2255 // Now add the iopl 1st page offset.
2256 offset += ioplInfo.fPageOffset;
2257
2258 // For external UPLs the fPageInfo field points directly to
2259 // the upl's upl_page_info_t array.
2260 if (ioplInfo.fFlags & kIOPLExternUPL)
2261 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
2262 else
2263 pageList = &pageList[ioplInfo.fPageInfo];
2264
2265 // Check for direct device non-paged memory
2266 if ( ioplInfo.fFlags & kIOPLOnDevice ) {
2267 address = ptoa_64(pageList->phys_addr) + offset;
2268 continue; // Done leave do/while(false) now
2269 }
2270
2271 // Now we need compute the index into the pageList
2272 UInt pageInd = atop_32(offset);
2273 offset &= PAGE_MASK;
2274
2275 // Compute the starting address of this segment
2276 IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
2277 if (!pageAddr) {
2278 panic("!pageList phys_addr");
2279 }
2280
2281 address = ptoa_64(pageAddr) + offset;
2282
2283 // length is currently set to the length of the remainider of the iopl.
2284 // We need to check that the remainder of the iopl is contiguous.
2285 // This is indicated by pageList[ind].phys_addr being sequential.
2286 IOByteCount contigLength = PAGE_SIZE - offset;
2287 while (contigLength < length
2288 && ++pageAddr == pageList[++pageInd].phys_addr)
2289 {
2290 contigLength += PAGE_SIZE;
2291 }
2292
2293 if (contigLength < length)
2294 length = contigLength;
2295
2296
2297 assert(address);
2298 assert(length);
2299
2300 } while (false);
2301
2302 // Update return values and state
2303 isP->fIO.fIOVMAddr = address;
2304 isP->fIO.fLength = length;
2305 isP->fIndex = ind;
2306 isP->fOffset2Index = off2Ind;
2307 isP->fNextOffset = isP->fIO.fOffset + length;
2308
2309 return kIOReturnSuccess;
2310 }
2311
2312 addr64_t
2313 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
2314 {
2315 IOReturn ret;
2316 mach_vm_address_t address = 0;
2317 mach_vm_size_t length = 0;
2318 IOMapper * mapper = gIOSystemMapper;
2319 IOOptionBits type = _flags & kIOMemoryTypeMask;
2320
2321 if (lengthOfSegment)
2322 *lengthOfSegment = 0;
2323
2324 if (offset >= _length)
2325 return 0;
2326
2327 // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
2328 // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
2329 // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
2330 // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
2331
2332 if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type))
2333 {
2334 unsigned rangesIndex = 0;
2335 Ranges vec = _ranges;
2336 mach_vm_address_t addr;
2337
2338 // Find starting address within the vector of ranges
2339 for (;;) {
2340 getAddrLenForInd(addr, length, type, vec, rangesIndex);
2341 if (offset < length)
2342 break;
2343 offset -= length; // (make offset relative)
2344 rangesIndex++;
2345 }
2346
2347 // Now that we have the starting range,
2348 // lets find the last contiguous range
2349 addr += offset;
2350 length -= offset;
2351
2352 for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) {
2353 mach_vm_address_t newAddr;
2354 mach_vm_size_t newLen;
2355
2356 getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
2357 if (addr + length != newAddr)
2358 break;
2359 length += newLen;
2360 }
2361 if (addr)
2362 address = (IOPhysicalAddress) addr; // Truncate address to 32bit
2363 }
2364 else
2365 {
2366 IOMDDMAWalkSegmentState _state;
2367 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
2368
2369 state->fOffset = offset;
2370 state->fLength = _length - offset;
2371 state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOnly);
2372
2373 ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
2374
2375 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret))
2376 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
2377 ret, this, state->fOffset,
2378 state->fIOVMAddr, state->fLength);
2379 if (kIOReturnSuccess == ret)
2380 {
2381 address = state->fIOVMAddr;
2382 length = state->fLength;
2383 }
2384
2385 // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
2386 // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
2387
2388 if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)))
2389 {
2390 if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone))
2391 {
2392 addr64_t origAddr = address;
2393 IOByteCount origLen = length;
2394
2395 address = mapper->mapToPhysicalAddress(origAddr);
2396 length = page_size - (address & (page_size - 1));
2397 while ((length < origLen)
2398 && ((address + length) == mapper->mapToPhysicalAddress(origAddr + length)))
2399 length += page_size;
2400 if (length > origLen)
2401 length = origLen;
2402 }
2403 }
2404 }
2405
2406 if (!address)
2407 length = 0;
2408
2409 if (lengthOfSegment)
2410 *lengthOfSegment = length;
2411
2412 return (address);
2413 }
2414
2415 #ifndef __LP64__
2416 #pragma clang diagnostic push
2417 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2418
2419 addr64_t
2420 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
2421 {
2422 addr64_t address = 0;
2423
2424 if (options & _kIOMemorySourceSegment)
2425 {
2426 address = getSourceSegment(offset, lengthOfSegment);
2427 }
2428 else if (options & kIOMemoryMapperNone)
2429 {
2430 address = getPhysicalSegment64(offset, lengthOfSegment);
2431 }
2432 else
2433 {
2434 address = getPhysicalSegment(offset, lengthOfSegment);
2435 }
2436
2437 return (address);
2438 }
2439 #pragma clang diagnostic pop
2440
2441 addr64_t
2442 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
2443 {
2444 return (getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone));
2445 }
2446
2447 IOPhysicalAddress
2448 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
2449 {
2450 addr64_t address = 0;
2451 IOByteCount length = 0;
2452
2453 address = getPhysicalSegment(offset, lengthOfSegment, 0);
2454
2455 if (lengthOfSegment)
2456 length = *lengthOfSegment;
2457
2458 if ((address + length) > 0x100000000ULL)
2459 {
2460 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
2461 address, (long) length, (getMetaClass())->getClassName());
2462 }
2463
2464 return ((IOPhysicalAddress) address);
2465 }
2466
2467 addr64_t
2468 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
2469 {
2470 IOPhysicalAddress phys32;
2471 IOByteCount length;
2472 addr64_t phys64;
2473 IOMapper * mapper = 0;
2474
2475 phys32 = getPhysicalSegment(offset, lengthOfSegment);
2476 if (!phys32)
2477 return 0;
2478
2479 if (gIOSystemMapper)
2480 mapper = gIOSystemMapper;
2481
2482 if (mapper)
2483 {
2484 IOByteCount origLen;
2485
2486 phys64 = mapper->mapToPhysicalAddress(phys32);
2487 origLen = *lengthOfSegment;
2488 length = page_size - (phys64 & (page_size - 1));
2489 while ((length < origLen)
2490 && ((phys64 + length) == mapper->mapToPhysicalAddress(phys32 + length)))
2491 length += page_size;
2492 if (length > origLen)
2493 length = origLen;
2494
2495 *lengthOfSegment = length;
2496 }
2497 else
2498 phys64 = (addr64_t) phys32;
2499
2500 return phys64;
2501 }
2502
2503 IOPhysicalAddress
2504 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
2505 {
2506 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0));
2507 }
2508
2509 IOPhysicalAddress
2510 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
2511 {
2512 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment));
2513 }
2514
2515 #pragma clang diagnostic push
2516 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2517
2518 void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
2519 IOByteCount * lengthOfSegment)
2520 {
2521 if (_task == kernel_task)
2522 return (void *) getSourceSegment(offset, lengthOfSegment);
2523 else
2524 panic("IOGMD::getVirtualSegment deprecated");
2525
2526 return 0;
2527 }
2528 #pragma clang diagnostic pop
2529 #endif /* !__LP64__ */
2530
2531 IOReturn
2532 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
2533 {
2534 IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
2535 DMACommandOps params;
2536 IOReturn err;
2537
2538 params = (op & ~kIOMDDMACommandOperationMask & op);
2539 op &= kIOMDDMACommandOperationMask;
2540
2541 if (kIOMDGetCharacteristics == op) {
2542 if (dataSize < sizeof(IOMDDMACharacteristics))
2543 return kIOReturnUnderrun;
2544
2545 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
2546 data->fLength = getLength();
2547 data->fSGCount = 0;
2548 data->fDirection = getDirection();
2549 data->fIsPrepared = true; // Assume prepared - fails safe
2550 }
2551 else if (kIOMDWalkSegments == op) {
2552 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs))
2553 return kIOReturnUnderrun;
2554
2555 IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
2556 IOByteCount offset = (IOByteCount) data->fOffset;
2557
2558 IOPhysicalLength length;
2559 if (data->fMapped && IOMapper::gSystem)
2560 data->fIOVMAddr = md->getPhysicalSegment(offset, &length);
2561 else
2562 data->fIOVMAddr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
2563 data->fLength = length;
2564 }
2565 else if (kIOMDAddDMAMapSpec == op) return kIOReturnUnsupported;
2566 else if (kIOMDDMAMap == op)
2567 {
2568 if (dataSize < sizeof(IOMDDMAMapArgs))
2569 return kIOReturnUnderrun;
2570 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2571
2572 if (params) panic("class %s does not support IODMACommand::kIterateOnly", getMetaClass()->getClassName());
2573
2574 data->fMapContig = true;
2575 err = md->dmaMap(data->fMapper, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
2576 return (err);
2577 }
2578 else return kIOReturnBadArgument;
2579
2580 return kIOReturnSuccess;
2581 }
2582
2583 IOReturn
2584 IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
2585 IOOptionBits * oldState )
2586 {
2587 IOReturn err = kIOReturnSuccess;
2588
2589 vm_purgable_t control;
2590 int state;
2591
2592 if (_memRef)
2593 {
2594 err = super::setPurgeable(newState, oldState);
2595 }
2596 else
2597 {
2598 if (kIOMemoryThreadSafe & _flags)
2599 LOCK;
2600 do
2601 {
2602 // Find the appropriate vm_map for the given task
2603 vm_map_t curMap;
2604 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
2605 {
2606 err = kIOReturnNotReady;
2607 break;
2608 }
2609 else if (!_task)
2610 {
2611 err = kIOReturnUnsupported;
2612 break;
2613 }
2614 else
2615 curMap = get_task_map(_task);
2616
2617 // can only do one range
2618 Ranges vec = _ranges;
2619 IOOptionBits type = _flags & kIOMemoryTypeMask;
2620 mach_vm_address_t addr;
2621 mach_vm_size_t len;
2622 getAddrLenForInd(addr, len, type, vec, 0);
2623
2624 err = purgeableControlBits(newState, &control, &state);
2625 if (kIOReturnSuccess != err)
2626 break;
2627 err = mach_vm_purgable_control(curMap, addr, control, &state);
2628 if (oldState)
2629 {
2630 if (kIOReturnSuccess == err)
2631 {
2632 err = purgeableStateBits(&state);
2633 *oldState = state;
2634 }
2635 }
2636 }
2637 while (false);
2638 if (kIOMemoryThreadSafe & _flags)
2639 UNLOCK;
2640 }
2641
2642 return (err);
2643 }
2644
2645 IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
2646 IOOptionBits * oldState )
2647 {
2648 IOReturn err = kIOReturnNotReady;
2649
2650 if (kIOMemoryThreadSafe & _flags) LOCK;
2651 if (_memRef) err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState);
2652 if (kIOMemoryThreadSafe & _flags) UNLOCK;
2653
2654 return (err);
2655 }
2656
2657 IOReturn IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
2658 IOByteCount * dirtyPageCount )
2659 {
2660 IOReturn err = kIOReturnNotReady;
2661
2662 if (kIOMemoryThreadSafe & _flags) LOCK;
2663 if (_memRef) err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount);
2664 else
2665 {
2666 IOMultiMemoryDescriptor * mmd;
2667 IOSubMemoryDescriptor * smd;
2668 if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this)))
2669 {
2670 err = smd->getPageCounts(residentPageCount, dirtyPageCount);
2671 }
2672 else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this)))
2673 {
2674 err = mmd->getPageCounts(residentPageCount, dirtyPageCount);
2675 }
2676 }
2677 if (kIOMemoryThreadSafe & _flags) UNLOCK;
2678
2679 return (err);
2680 }
2681
2682
2683 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
2684 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
2685
2686 static void SetEncryptOp(addr64_t pa, unsigned int count)
2687 {
2688 ppnum_t page, end;
2689
2690 page = atop_64(round_page_64(pa));
2691 end = atop_64(trunc_page_64(pa + count));
2692 for (; page < end; page++)
2693 {
2694 pmap_clear_noencrypt(page);
2695 }
2696 }
2697
2698 static void ClearEncryptOp(addr64_t pa, unsigned int count)
2699 {
2700 ppnum_t page, end;
2701
2702 page = atop_64(round_page_64(pa));
2703 end = atop_64(trunc_page_64(pa + count));
2704 for (; page < end; page++)
2705 {
2706 pmap_set_noencrypt(page);
2707 }
2708 }
2709
2710 IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options,
2711 IOByteCount offset, IOByteCount length )
2712 {
2713 IOByteCount remaining;
2714 unsigned int res;
2715 void (*func)(addr64_t pa, unsigned int count) = 0;
2716
2717 switch (options)
2718 {
2719 case kIOMemoryIncoherentIOFlush:
2720 func = &dcache_incoherent_io_flush64;
2721 break;
2722 case kIOMemoryIncoherentIOStore:
2723 func = &dcache_incoherent_io_store64;
2724 break;
2725
2726 case kIOMemorySetEncrypted:
2727 func = &SetEncryptOp;
2728 break;
2729 case kIOMemoryClearEncrypted:
2730 func = &ClearEncryptOp;
2731 break;
2732 }
2733
2734 if (!func)
2735 return (kIOReturnUnsupported);
2736
2737 if (kIOMemoryThreadSafe & _flags)
2738 LOCK;
2739
2740 res = 0x0UL;
2741 remaining = length = min(length, getLength() - offset);
2742 while (remaining)
2743 // (process another target segment?)
2744 {
2745 addr64_t dstAddr64;
2746 IOByteCount dstLen;
2747
2748 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
2749 if (!dstAddr64)
2750 break;
2751
2752 // Clip segment length to remaining
2753 if (dstLen > remaining)
2754 dstLen = remaining;
2755
2756 (*func)(dstAddr64, dstLen);
2757
2758 offset += dstLen;
2759 remaining -= dstLen;
2760 }
2761
2762 if (kIOMemoryThreadSafe & _flags)
2763 UNLOCK;
2764
2765 return (remaining ? kIOReturnUnderrun : kIOReturnSuccess);
2766 }
2767
2768 /*
2769 *
2770 */
2771
2772 #if defined(__i386__) || defined(__x86_64__)
2773
2774 #define io_kernel_static_start vm_kernel_stext
2775 #define io_kernel_static_end vm_kernel_etext
2776
2777 #else
2778 #error io_kernel_static_end is undefined for this architecture
2779 #endif
2780
2781 static kern_return_t
2782 io_get_kernel_static_upl(
2783 vm_map_t /* map */,
2784 uintptr_t offset,
2785 upl_size_t *upl_size,
2786 upl_t *upl,
2787 upl_page_info_array_t page_list,
2788 unsigned int *count,
2789 ppnum_t *highest_page)
2790 {
2791 unsigned int pageCount, page;
2792 ppnum_t phys;
2793 ppnum_t highestPage = 0;
2794
2795 pageCount = atop_32(*upl_size);
2796 if (pageCount > *count)
2797 pageCount = *count;
2798
2799 *upl = NULL;
2800
2801 for (page = 0; page < pageCount; page++)
2802 {
2803 phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
2804 if (!phys)
2805 break;
2806 page_list[page].phys_addr = phys;
2807 page_list[page].free_when_done = 0;
2808 page_list[page].absent = 0;
2809 page_list[page].dirty = 0;
2810 page_list[page].precious = 0;
2811 page_list[page].device = 0;
2812 if (phys > highestPage)
2813 highestPage = phys;
2814 }
2815
2816 *highest_page = highestPage;
2817
2818 return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError);
2819 }
2820
2821 /*
2822 *
2823 */
2824 #if IOTRACKING
2825 static void
2826 IOMemoryDescriptorUpdateWireOwner(ioGMDData * dataP, OSData * memoryEntries, vm_tag_t tag)
2827 {
2828 ioPLBlock *ioplList;
2829 UInt ind, count;
2830 vm_tag_t prior;
2831
2832 count = getNumIOPL(memoryEntries, dataP);
2833 if (!count) return;
2834 ioplList = getIOPLList(dataP);
2835
2836 if (VM_KERN_MEMORY_NONE == tag) tag = dataP->fAllocTag;
2837 assert(VM_KERN_MEMORY_NONE != tag);
2838
2839 for (ind = 0; ind < count; ind++)
2840 {
2841 if (!ioplList[ind].fIOPL) continue;
2842 prior = iopl_set_tag(ioplList[ind].fIOPL, tag);
2843 if (VM_KERN_MEMORY_NONE == dataP->fAllocTag) dataP->fAllocTag = prior;
2844 #if 0
2845 if (tag != prior)
2846 {
2847 char name[2][48];
2848 vm_tag_get_kext(prior, &name[0][0], sizeof(name[0]));
2849 vm_tag_get_kext(tag, &name[1][0], sizeof(name[1]));
2850 IOLog("switched %48s to %48s\n", name[0], name[1]);
2851 }
2852 #endif
2853 }
2854 }
2855 #endif /* IOTRACKING */
2856
2857
2858 IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
2859 {
2860 IOOptionBits type = _flags & kIOMemoryTypeMask;
2861 IOReturn error = kIOReturnSuccess;
2862 ioGMDData *dataP;
2863 upl_page_info_array_t pageInfo;
2864 ppnum_t mapBase;
2865
2866 assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
2867
2868 if ((kIODirectionOutIn & forDirection) == kIODirectionNone)
2869 forDirection = (IODirection) (forDirection | getDirection());
2870
2871 upl_control_flags_t uplFlags; // This Mem Desc's default flags for upl creation
2872 switch (kIODirectionOutIn & forDirection)
2873 {
2874 case kIODirectionOut:
2875 // Pages do not need to be marked as dirty on commit
2876 uplFlags = UPL_COPYOUT_FROM;
2877 break;
2878
2879 case kIODirectionIn:
2880 default:
2881 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
2882 break;
2883 }
2884 dataP = getDataP(_memoryEntries);
2885
2886 if (kIODirectionDMACommand & forDirection) assert(_wireCount);
2887
2888 if (_wireCount)
2889 {
2890 if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags))
2891 {
2892 OSReportWithBacktrace("IOMemoryDescriptor 0x%lx prepared read only", VM_KERNEL_ADDRPERM(this));
2893 error = kIOReturnNotWritable;
2894 }
2895 }
2896 else
2897 {
2898 IOMapper *mapper;
2899 mapper = dataP->fMapper;
2900 dataP->fMappedBase = 0;
2901
2902 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
2903 uplFlags |= UPL_MEMORY_TAG_MAKE(getVMTag(kernel_map));
2904
2905 if (kIODirectionPrepareToPhys32 & forDirection)
2906 {
2907 if (!mapper) uplFlags |= UPL_NEED_32BIT_ADDR;
2908 if (dataP->fDMAMapNumAddressBits > 32) dataP->fDMAMapNumAddressBits = 32;
2909 }
2910 if (kIODirectionPrepareNoFault & forDirection) uplFlags |= UPL_REQUEST_NO_FAULT;
2911 if (kIODirectionPrepareNoZeroFill & forDirection) uplFlags |= UPL_NOZEROFILLIO;
2912 if (kIODirectionPrepareNonCoherent & forDirection) uplFlags |= UPL_REQUEST_FORCE_COHERENCY;
2913
2914 mapBase = 0;
2915
2916 // Note that appendBytes(NULL) zeros the data up to the desired length
2917 // and the length parameter is an unsigned int
2918 size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t);
2919 if (uplPageSize > ((unsigned int)uplPageSize)) return (kIOReturnNoMemory);
2920 if (!_memoryEntries->appendBytes(0, uplPageSize)) return (kIOReturnNoMemory);
2921 dataP = 0;
2922
2923 // Find the appropriate vm_map for the given task
2924 vm_map_t curMap;
2925 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) curMap = 0;
2926 else curMap = get_task_map(_task);
2927
2928 // Iterate over the vector of virtual ranges
2929 Ranges vec = _ranges;
2930 unsigned int pageIndex = 0;
2931 IOByteCount mdOffset = 0;
2932 ppnum_t highestPage = 0;
2933
2934 IOMemoryEntry * memRefEntry = 0;
2935 if (_memRef) memRefEntry = &_memRef->entries[0];
2936
2937 for (UInt range = 0; range < _rangesCount; range++) {
2938 ioPLBlock iopl;
2939 mach_vm_address_t startPage;
2940 mach_vm_size_t numBytes;
2941 ppnum_t highPage = 0;
2942
2943 // Get the startPage address and length of vec[range]
2944 getAddrLenForInd(startPage, numBytes, type, vec, range);
2945 iopl.fPageOffset = startPage & PAGE_MASK;
2946 numBytes += iopl.fPageOffset;
2947 startPage = trunc_page_64(startPage);
2948
2949 if (mapper)
2950 iopl.fMappedPage = mapBase + pageIndex;
2951 else
2952 iopl.fMappedPage = 0;
2953
2954 // Iterate over the current range, creating UPLs
2955 while (numBytes) {
2956 vm_address_t kernelStart = (vm_address_t) startPage;
2957 vm_map_t theMap;
2958 if (curMap) theMap = curMap;
2959 else if (_memRef)
2960 {
2961 theMap = NULL;
2962 }
2963 else
2964 {
2965 assert(_task == kernel_task);
2966 theMap = IOPageableMapForAddress(kernelStart);
2967 }
2968
2969 // ioplFlags is an in/out parameter
2970 upl_control_flags_t ioplFlags = uplFlags;
2971 dataP = getDataP(_memoryEntries);
2972 pageInfo = getPageList(dataP);
2973 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
2974
2975 mach_vm_size_t _ioplSize = round_page(numBytes);
2976 upl_size_t ioplSize = (_ioplSize <= MAX_UPL_SIZE_BYTES) ? _ioplSize : MAX_UPL_SIZE_BYTES;
2977 unsigned int numPageInfo = atop_32(ioplSize);
2978
2979 if ((theMap == kernel_map)
2980 && (kernelStart >= io_kernel_static_start)
2981 && (kernelStart < io_kernel_static_end)) {
2982 error = io_get_kernel_static_upl(theMap,
2983 kernelStart,
2984 &ioplSize,
2985 &iopl.fIOPL,
2986 baseInfo,
2987 &numPageInfo,
2988 &highPage);
2989 }
2990 else if (_memRef) {
2991 memory_object_offset_t entryOffset;
2992
2993 entryOffset = mdOffset;
2994 entryOffset = (entryOffset - iopl.fPageOffset - memRefEntry->offset);
2995 if (entryOffset >= memRefEntry->size) {
2996 memRefEntry++;
2997 if (memRefEntry >= &_memRef->entries[_memRef->count]) panic("memRefEntry");
2998 entryOffset = 0;
2999 }
3000 if (ioplSize > (memRefEntry->size - entryOffset)) ioplSize = (memRefEntry->size - entryOffset);
3001 error = memory_object_iopl_request(memRefEntry->entry,
3002 entryOffset,
3003 &ioplSize,
3004 &iopl.fIOPL,
3005 baseInfo,
3006 &numPageInfo,
3007 &ioplFlags);
3008 }
3009 else {
3010 assert(theMap);
3011 error = vm_map_create_upl(theMap,
3012 startPage,
3013 (upl_size_t*)&ioplSize,
3014 &iopl.fIOPL,
3015 baseInfo,
3016 &numPageInfo,
3017 &ioplFlags);
3018 }
3019
3020 if (error != KERN_SUCCESS) goto abortExit;
3021
3022 assert(ioplSize);
3023
3024 if (iopl.fIOPL)
3025 highPage = upl_get_highest_page(iopl.fIOPL);
3026 if (highPage > highestPage)
3027 highestPage = highPage;
3028
3029 if (baseInfo->device) {
3030 numPageInfo = 1;
3031 iopl.fFlags = kIOPLOnDevice;
3032 }
3033 else {
3034 iopl.fFlags = 0;
3035 }
3036
3037 iopl.fIOMDOffset = mdOffset;
3038 iopl.fPageInfo = pageIndex;
3039 if (mapper && pageIndex && (page_mask & (mdOffset + iopl.fPageOffset))) dataP->fDiscontig = true;
3040
3041 if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
3042 // Clean up partial created and unsaved iopl
3043 if (iopl.fIOPL) {
3044 upl_abort(iopl.fIOPL, 0);
3045 upl_deallocate(iopl.fIOPL);
3046 }
3047 goto abortExit;
3048 }
3049 dataP = 0;
3050
3051 // Check for a multiple iopl's in one virtual range
3052 pageIndex += numPageInfo;
3053 mdOffset -= iopl.fPageOffset;
3054 if (ioplSize < numBytes) {
3055 numBytes -= ioplSize;
3056 startPage += ioplSize;
3057 mdOffset += ioplSize;
3058 iopl.fPageOffset = 0;
3059 if (mapper) iopl.fMappedPage = mapBase + pageIndex;
3060 }
3061 else {
3062 mdOffset += numBytes;
3063 break;
3064 }
3065 }
3066 }
3067
3068 _highestPage = highestPage;
3069
3070 if (UPL_COPYOUT_FROM & uplFlags) _flags |= kIOMemoryPreparedReadOnly;
3071 }
3072
3073 #if IOTRACKING
3074 if (kIOReturnSuccess == error)
3075 {
3076 vm_tag_t tag;
3077
3078 dataP = getDataP(_memoryEntries);
3079 if (forDirection & kIODirectionDMACommand) tag = (forDirection & kIODirectionDMACommandMask) >> kIODirectionDMACommandShift;
3080 else tag = IOMemoryTag(kernel_map);
3081
3082 if (!_wireCount) vm_tag_set_init(&dataP->fWireTags, kMaxWireTags);
3083 vm_tag_set_enter(&dataP->fWireTags, kMaxWireTags, tag);
3084
3085 IOMemoryDescriptorUpdateWireOwner(dataP, _memoryEntries, tag);
3086 if (!_wireCount)
3087 {
3088 //if (!(_flags & kIOMemoryAutoPrepare))
3089 IOTrackingAdd(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages), false);
3090 }
3091 }
3092 #endif /* IOTRACKING */
3093
3094 return (error);
3095
3096 abortExit:
3097 {
3098 dataP = getDataP(_memoryEntries);
3099 UInt done = getNumIOPL(_memoryEntries, dataP);
3100 ioPLBlock *ioplList = getIOPLList(dataP);
3101
3102 for (UInt range = 0; range < done; range++)
3103 {
3104 if (ioplList[range].fIOPL) {
3105 upl_abort(ioplList[range].fIOPL, 0);
3106 upl_deallocate(ioplList[range].fIOPL);
3107 }
3108 }
3109 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
3110 }
3111
3112 if (error == KERN_FAILURE)
3113 error = kIOReturnCannotWire;
3114 else if (error == KERN_MEMORY_ERROR)
3115 error = kIOReturnNoResources;
3116
3117 return error;
3118 }
3119
3120 bool IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
3121 {
3122 ioGMDData * dataP;
3123 unsigned dataSize = size;
3124
3125 if (!_memoryEntries) {
3126 _memoryEntries = OSData::withCapacity(dataSize);
3127 if (!_memoryEntries)
3128 return false;
3129 }
3130 else if (!_memoryEntries->initWithCapacity(dataSize))
3131 return false;
3132
3133 _memoryEntries->appendBytes(0, computeDataSize(0, 0));
3134 dataP = getDataP(_memoryEntries);
3135
3136 if (mapper == kIOMapperWaitSystem) {
3137 IOMapper::checkForSystemMapper();
3138 mapper = IOMapper::gSystem;
3139 }
3140 dataP->fMapper = mapper;
3141 dataP->fPageCnt = 0;
3142 dataP->fMappedBase = 0;
3143 dataP->fDMAMapNumAddressBits = 64;
3144 dataP->fDMAMapAlignment = 0;
3145 dataP->fPreparationID = kIOPreparationIDUnprepared;
3146 dataP->fDiscontig = false;
3147 dataP->fCompletionError = false;
3148
3149 return (true);
3150 }
3151
3152 IOReturn IOMemoryDescriptor::dmaMap(
3153 IOMapper * mapper,
3154 IODMACommand * command,
3155 const IODMAMapSpecification * mapSpec,
3156 uint64_t offset,
3157 uint64_t length,
3158 uint64_t * mapAddress,
3159 uint64_t * mapLength)
3160 {
3161 IOReturn ret;
3162 uint32_t mapOptions;
3163
3164 mapOptions = 0;
3165 mapOptions |= kIODMAMapReadAccess;
3166 if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess;
3167
3168 ret = mapper->iovmMapMemory(this, offset, length, mapOptions,
3169 mapSpec, command, NULL, mapAddress, mapLength);
3170
3171 return (ret);
3172 }
3173
3174 IOReturn IOGeneralMemoryDescriptor::dmaMap(
3175 IOMapper * mapper,
3176 IODMACommand * command,
3177 const IODMAMapSpecification * mapSpec,
3178 uint64_t offset,
3179 uint64_t length,
3180 uint64_t * mapAddress,
3181 uint64_t * mapLength)
3182 {
3183 IOReturn err = kIOReturnSuccess;
3184 ioGMDData * dataP;
3185 IOOptionBits type = _flags & kIOMemoryTypeMask;
3186
3187 *mapAddress = 0;
3188 if (kIOMemoryHostOnly & _flags) return (kIOReturnSuccess);
3189
3190 if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
3191 || offset || (length != _length))
3192 {
3193 err = super::dmaMap(mapper, command, mapSpec, offset, length, mapAddress, mapLength);
3194 }
3195 else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries)))
3196 {
3197 const ioPLBlock * ioplList = getIOPLList(dataP);
3198 upl_page_info_t * pageList;
3199 uint32_t mapOptions = 0;
3200
3201 IODMAMapSpecification mapSpec;
3202 bzero(&mapSpec, sizeof(mapSpec));
3203 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
3204 mapSpec.alignment = dataP->fDMAMapAlignment;
3205
3206 // For external UPLs the fPageInfo field points directly to
3207 // the upl's upl_page_info_t array.
3208 if (ioplList->fFlags & kIOPLExternUPL)
3209 {
3210 pageList = (upl_page_info_t *) ioplList->fPageInfo;
3211 mapOptions |= kIODMAMapPagingPath;
3212 }
3213 else pageList = getPageList(dataP);
3214
3215 if ((_length == ptoa_64(_pages)) && !(page_mask & ioplList->fPageOffset))
3216 {
3217 mapOptions |= kIODMAMapPageListFullyOccupied;
3218 }
3219
3220 mapOptions |= kIODMAMapReadAccess;
3221 if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess;
3222
3223 // Check for direct device non-paged memory
3224 if (ioplList->fFlags & kIOPLOnDevice) mapOptions |= kIODMAMapPhysicallyContiguous;
3225
3226 IODMAMapPageList dmaPageList =
3227 {
3228 .pageOffset = (uint32_t)(ioplList->fPageOffset & page_mask),
3229 .pageListCount = _pages,
3230 .pageList = &pageList[0]
3231 };
3232 err = mapper->iovmMapMemory(this, offset, length, mapOptions, &mapSpec,
3233 command, &dmaPageList, mapAddress, mapLength);
3234 }
3235
3236 return (err);
3237 }
3238
3239 /*
3240 * prepare
3241 *
3242 * Prepare the memory for an I/O transfer. This involves paging in
3243 * the memory, if necessary, and wiring it down for the duration of
3244 * the transfer. The complete() method completes the processing of
3245 * the memory after the I/O transfer finishes. This method needn't
3246 * called for non-pageable memory.
3247 */
3248
3249 IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
3250 {
3251 IOReturn error = kIOReturnSuccess;
3252 IOOptionBits type = _flags & kIOMemoryTypeMask;
3253
3254 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
3255 return kIOReturnSuccess;
3256
3257 if (_prepareLock) IOLockLock(_prepareLock);
3258
3259 if (kIODirectionDMACommand & forDirection)
3260 {
3261 #if IOMD_DEBUG_DMAACTIVE
3262 OSIncrementAtomic(&__iomd_reservedA);
3263 #endif /* IOMD_DEBUG_DMAACTIVE */
3264 }
3265 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type)
3266 {
3267 error = wireVirtual(forDirection);
3268 }
3269
3270 if ((kIOReturnSuccess == error) && !(kIODirectionDMACommand & forDirection))
3271 {
3272 if (1 == ++_wireCount)
3273 {
3274 if (kIOMemoryClearEncrypt & _flags)
3275 {
3276 performOperation(kIOMemoryClearEncrypted, 0, _length);
3277 }
3278 }
3279 }
3280
3281 if (_prepareLock) IOLockUnlock(_prepareLock);
3282
3283 return error;
3284 }
3285
3286 /*
3287 * complete
3288 *
3289 * Complete processing of the memory after an I/O transfer finishes.
3290 * This method should not be called unless a prepare was previously
3291 * issued; the prepare() and complete() must occur in pairs, before
3292 * before and after an I/O transfer involving pageable memory.
3293 */
3294
3295 IOReturn IOGeneralMemoryDescriptor::complete(IODirection forDirection)
3296 {
3297 IOOptionBits type = _flags & kIOMemoryTypeMask;
3298 ioGMDData * dataP;
3299
3300 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
3301 return kIOReturnSuccess;
3302
3303 if (_prepareLock) IOLockLock(_prepareLock);
3304 do
3305 {
3306 assert(_wireCount);
3307 if (!_wireCount) break;
3308 dataP = getDataP(_memoryEntries);
3309 if (!dataP) break;
3310
3311 #if IOMD_DEBUG_DMAACTIVE
3312 if (kIODirectionDMACommand & forDirection)
3313 {
3314 if (__iomd_reservedA) OSDecrementAtomic(&__iomd_reservedA);
3315 else panic("kIOMDSetDMAInactive");
3316 }
3317 #endif /* IOMD_DEBUG_DMAACTIVE */
3318 #if IOTRACKING
3319 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type)
3320 {
3321 vm_tag_t tag;
3322
3323 if (forDirection & kIODirectionDMACommand) tag = (forDirection & kIODirectionDMACommandMask) >> kIODirectionDMACommandShift;
3324 else tag = IOMemoryTag(kernel_map);
3325 vm_tag_set_remove(&dataP->fWireTags, kMaxWireTags, tag, &tag);
3326 IOMemoryDescriptorUpdateWireOwner(dataP, _memoryEntries, tag);
3327 }
3328 if (kIODirectionDMACommand & forDirection) break;
3329 #endif /* IOTRACKING */
3330
3331 if (kIODirectionCompleteWithError & forDirection) dataP->fCompletionError = true;
3332
3333 if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount))
3334 {
3335 performOperation(kIOMemorySetEncrypted, 0, _length);
3336 }
3337
3338 _wireCount--;
3339 if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection))
3340 {
3341 ioPLBlock *ioplList = getIOPLList(dataP);
3342 UInt ind, count = getNumIOPL(_memoryEntries, dataP);
3343
3344 if (_wireCount)
3345 {
3346 // kIODirectionCompleteWithDataValid & forDirection
3347 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type)
3348 {
3349 for (ind = 0; ind < count; ind++)
3350 {
3351 if (ioplList[ind].fIOPL) iopl_valid_data(ioplList[ind].fIOPL);
3352 }
3353 }
3354 }
3355 else
3356 {
3357 #if IOMD_DEBUG_DMAACTIVE
3358 if (__iomd_reservedA) panic("complete() while dma active");
3359 #endif /* IOMD_DEBUG_DMAACTIVE */
3360
3361 if (dataP->fMappedBase) {
3362 dataP->fMapper->iovmUnmapMemory(this, NULL, dataP->fMappedBase, dataP->fMappedLength);
3363 dataP->fMappedBase = 0;
3364 }
3365 // Only complete iopls that we created which are for TypeVirtual
3366 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
3367 #if IOTRACKING
3368 //if (!(_flags & kIOMemoryAutoPrepare))
3369 {
3370 IOTrackingRemove(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages));
3371 }
3372 #endif /* IOTRACKING */
3373 for (ind = 0; ind < count; ind++)
3374 if (ioplList[ind].fIOPL) {
3375 if (dataP->fCompletionError)
3376 upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/);
3377 else
3378 upl_commit(ioplList[ind].fIOPL, 0, 0);
3379 upl_deallocate(ioplList[ind].fIOPL);
3380 }
3381 } else if (kIOMemoryTypeUPL == type) {
3382 upl_set_referenced(ioplList[0].fIOPL, false);
3383 }
3384
3385 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
3386
3387 dataP->fPreparationID = kIOPreparationIDUnprepared;
3388 dataP->fAllocTag = VM_KERN_MEMORY_NONE;
3389 }
3390 }
3391 }
3392 while (false);
3393
3394 if (_prepareLock) IOLockUnlock(_prepareLock);
3395
3396 return kIOReturnSuccess;
3397 }
3398
3399 IOReturn IOGeneralMemoryDescriptor::doMap(
3400 vm_map_t __addressMap,
3401 IOVirtualAddress * __address,
3402 IOOptionBits options,
3403 IOByteCount __offset,
3404 IOByteCount __length )
3405 {
3406 #ifndef __LP64__
3407 if (!(kIOMap64Bit & options)) panic("IOGeneralMemoryDescriptor::doMap !64bit");
3408 #endif /* !__LP64__ */
3409
3410 kern_return_t err;
3411
3412 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
3413 mach_vm_size_t offset = mapping->fOffset + __offset;
3414 mach_vm_size_t length = mapping->fLength;
3415
3416 IOOptionBits type = _flags & kIOMemoryTypeMask;
3417 Ranges vec = _ranges;
3418
3419 mach_vm_address_t range0Addr = 0;
3420 mach_vm_size_t range0Len = 0;
3421
3422 if ((offset >= _length) || ((offset + length) > _length))
3423 return( kIOReturnBadArgument );
3424
3425 if (vec.v)
3426 getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
3427
3428 // mapping source == dest? (could be much better)
3429 if (_task
3430 && (mapping->fAddressTask == _task)
3431 && (mapping->fAddressMap == get_task_map(_task))
3432 && (options & kIOMapAnywhere)
3433 && (1 == _rangesCount)
3434 && (0 == offset)
3435 && range0Addr
3436 && (length <= range0Len))
3437 {
3438 mapping->fAddress = range0Addr;
3439 mapping->fOptions |= kIOMapStatic;
3440
3441 return( kIOReturnSuccess );
3442 }
3443
3444 if (!_memRef)
3445 {
3446 IOOptionBits createOptions = 0;
3447 if (!(kIOMapReadOnly & options))
3448 {
3449 createOptions |= kIOMemoryReferenceWrite;
3450 #if DEVELOPMENT || DEBUG
3451 if (kIODirectionOut == (kIODirectionOutIn & _flags))
3452 {
3453 OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction");
3454 }
3455 #endif
3456 }
3457 err = memoryReferenceCreate(createOptions, &_memRef);
3458 if (kIOReturnSuccess != err) return (err);
3459 }
3460
3461 memory_object_t pager;
3462 pager = (memory_object_t) (reserved ? reserved->dp.devicePager : 0);
3463
3464 // <upl_transpose //
3465 if ((kIOMapReference|kIOMapUnique) == ((kIOMapReference|kIOMapUnique) & options))
3466 {
3467 do
3468 {
3469 upl_t redirUPL2;
3470 upl_size_t size;
3471 upl_control_flags_t flags;
3472 unsigned int lock_count;
3473
3474 if (!_memRef || (1 != _memRef->count))
3475 {
3476 err = kIOReturnNotReadable;
3477 break;
3478 }
3479
3480 size = round_page(mapping->fLength);
3481 flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
3482 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS
3483 | UPL_MEMORY_TAG_MAKE(getVMTag(kernel_map));
3484
3485 if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2,
3486 NULL, NULL,
3487 &flags))
3488 redirUPL2 = NULL;
3489
3490 for (lock_count = 0;
3491 IORecursiveLockHaveLock(gIOMemoryLock);
3492 lock_count++) {
3493 UNLOCK;
3494 }
3495 err = upl_transpose(redirUPL2, mapping->fRedirUPL);
3496 for (;
3497 lock_count;
3498 lock_count--) {
3499 LOCK;
3500 }
3501
3502 if (kIOReturnSuccess != err)
3503 {
3504 IOLog("upl_transpose(%x)\n", err);
3505 err = kIOReturnSuccess;
3506 }
3507
3508 if (redirUPL2)
3509 {
3510 upl_commit(redirUPL2, NULL, 0);
3511 upl_deallocate(redirUPL2);
3512 redirUPL2 = 0;
3513 }
3514 {
3515 // swap the memEntries since they now refer to different vm_objects
3516 IOMemoryReference * me = _memRef;
3517 _memRef = mapping->fMemory->_memRef;
3518 mapping->fMemory->_memRef = me;
3519 }
3520 if (pager)
3521 err = populateDevicePager( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
3522 }
3523 while (false);
3524 }
3525 // upl_transpose> //
3526 else
3527 {
3528 err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress);
3529 #if IOTRACKING
3530 if ((err == KERN_SUCCESS) && ((kIOTracking & gIOKitDebug) || _task))
3531 {
3532 // only dram maps in the default on developement case
3533 IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength);
3534 }
3535 #endif /* IOTRACKING */
3536 if ((err == KERN_SUCCESS) && pager)
3537 {
3538 err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, offset, length, options);
3539
3540 if (err != KERN_SUCCESS) doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0);
3541 else if (kIOMapDefaultCache == (options & kIOMapCacheMask))
3542 {
3543 mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
3544 }
3545 }
3546 }
3547
3548 return (err);
3549 }
3550
3551 #if IOTRACKING
3552 IOReturn
3553 IOMemoryMapTracking(IOTrackingUser * tracking, task_t * task,
3554 mach_vm_address_t * address, mach_vm_size_t * size)
3555 {
3556 #define iomap_offsetof(type, field) ((size_t)(&((type *)0)->field))
3557
3558 IOMemoryMap * map = (typeof(map)) (((uintptr_t) tracking) - iomap_offsetof(IOMemoryMap, fTracking));
3559
3560 if (!map->fAddressMap || (map->fAddressMap != get_task_map(map->fAddressTask))) return (kIOReturnNotReady);
3561
3562 *task = map->fAddressTask;
3563 *address = map->fAddress;
3564 *size = map->fLength;
3565
3566 return (kIOReturnSuccess);
3567 }
3568 #endif /* IOTRACKING */
3569
3570 IOReturn IOGeneralMemoryDescriptor::doUnmap(
3571 vm_map_t addressMap,
3572 IOVirtualAddress __address,
3573 IOByteCount __length )
3574 {
3575 return (super::doUnmap(addressMap, __address, __length));
3576 }
3577
3578 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3579
3580 #undef super
3581 #define super OSObject
3582
3583 OSDefineMetaClassAndStructors( IOMemoryMap, OSObject )
3584
3585 OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
3586 OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
3587 OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
3588 OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
3589 OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
3590 OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
3591 OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
3592 OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
3593
3594 /* ex-inline function implementation */
3595 IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
3596 { return( getPhysicalSegment( 0, 0 )); }
3597
3598 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3599
3600 bool IOMemoryMap::init(
3601 task_t intoTask,
3602 mach_vm_address_t toAddress,
3603 IOOptionBits _options,
3604 mach_vm_size_t _offset,
3605 mach_vm_size_t _length )
3606 {
3607 if (!intoTask)
3608 return( false);
3609
3610 if (!super::init())
3611 return(false);
3612
3613 fAddressMap = get_task_map(intoTask);
3614 if (!fAddressMap)
3615 return(false);
3616 vm_map_reference(fAddressMap);
3617
3618 fAddressTask = intoTask;
3619 fOptions = _options;
3620 fLength = _length;
3621 fOffset = _offset;
3622 fAddress = toAddress;
3623
3624 return (true);
3625 }
3626
3627 bool IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
3628 {
3629 if (!_memory)
3630 return(false);
3631
3632 if (!fSuperMap)
3633 {
3634 if( (_offset + fLength) > _memory->getLength())
3635 return( false);
3636 fOffset = _offset;
3637 }
3638
3639 _memory->retain();
3640 if (fMemory)
3641 {
3642 if (fMemory != _memory)
3643 fMemory->removeMapping(this);
3644 fMemory->release();
3645 }
3646 fMemory = _memory;
3647
3648 return( true );
3649 }
3650
3651 IOReturn IOMemoryDescriptor::doMap(
3652 vm_map_t __addressMap,
3653 IOVirtualAddress * __address,
3654 IOOptionBits options,
3655 IOByteCount __offset,
3656 IOByteCount __length )
3657 {
3658 return (kIOReturnUnsupported);
3659 }
3660
3661 IOReturn IOMemoryDescriptor::handleFault(
3662 void * _pager,
3663 mach_vm_size_t sourceOffset,
3664 mach_vm_size_t length)
3665 {
3666 if( kIOMemoryRedirected & _flags)
3667 {
3668 #if DEBUG
3669 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
3670 #endif
3671 do {
3672 SLEEP;
3673 } while( kIOMemoryRedirected & _flags );
3674 }
3675 return (kIOReturnSuccess);
3676 }
3677
3678 IOReturn IOMemoryDescriptor::populateDevicePager(
3679 void * _pager,
3680 vm_map_t addressMap,
3681 mach_vm_address_t address,
3682 mach_vm_size_t sourceOffset,
3683 mach_vm_size_t length,
3684 IOOptionBits options )
3685 {
3686 IOReturn err = kIOReturnSuccess;
3687 memory_object_t pager = (memory_object_t) _pager;
3688 mach_vm_size_t size;
3689 mach_vm_size_t bytes;
3690 mach_vm_size_t page;
3691 mach_vm_size_t pageOffset;
3692 mach_vm_size_t pagerOffset;
3693 IOPhysicalLength segLen, chunk;
3694 addr64_t physAddr;
3695 IOOptionBits type;
3696
3697 type = _flags & kIOMemoryTypeMask;
3698
3699 if (reserved->dp.pagerContig)
3700 {
3701 sourceOffset = 0;
3702 pagerOffset = 0;
3703 }
3704
3705 physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
3706 assert( physAddr );
3707 pageOffset = physAddr - trunc_page_64( physAddr );
3708 pagerOffset = sourceOffset;
3709
3710 size = length + pageOffset;
3711 physAddr -= pageOffset;
3712
3713 segLen += pageOffset;
3714 bytes = size;
3715 do
3716 {
3717 // in the middle of the loop only map whole pages
3718 if( segLen >= bytes) segLen = bytes;
3719 else if (segLen != trunc_page(segLen)) err = kIOReturnVMError;
3720 if (physAddr != trunc_page_64(physAddr)) err = kIOReturnBadArgument;
3721
3722 if (kIOReturnSuccess != err) break;
3723
3724 #if DEBUG || DEVELOPMENT
3725 if ((kIOMemoryTypeUPL != type)
3726 && pmap_has_managed_page(atop_64(physAddr), atop_64(physAddr + segLen - 1)))
3727 {
3728 OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx", physAddr, segLen);
3729 }
3730 #endif /* DEBUG || DEVELOPMENT */
3731
3732 chunk = (reserved->dp.pagerContig ? round_page(segLen) : page_size);
3733 for (page = 0;
3734 (page < segLen) && (KERN_SUCCESS == err);
3735 page += chunk)
3736 {
3737 err = device_pager_populate_object(pager, pagerOffset,
3738 (ppnum_t)(atop_64(physAddr + page)), chunk);
3739 pagerOffset += chunk;
3740 }
3741
3742 assert (KERN_SUCCESS == err);
3743 if (err) break;
3744
3745 // This call to vm_fault causes an early pmap level resolution
3746 // of the mappings created above for kernel mappings, since
3747 // faulting in later can't take place from interrupt level.
3748 if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags))
3749 {
3750 vm_fault(addressMap,
3751 (vm_map_offset_t)trunc_page_64(address),
3752 VM_PROT_READ|VM_PROT_WRITE,
3753 FALSE, THREAD_UNINT, NULL,
3754 (vm_map_offset_t)0);
3755 }
3756
3757 sourceOffset += segLen - pageOffset;
3758 address += segLen;
3759 bytes -= segLen;
3760 pageOffset = 0;
3761 }
3762 while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
3763
3764 if (bytes)
3765 err = kIOReturnBadArgument;
3766
3767 return (err);
3768 }
3769
3770 IOReturn IOMemoryDescriptor::doUnmap(
3771 vm_map_t addressMap,
3772 IOVirtualAddress __address,
3773 IOByteCount __length )
3774 {
3775 IOReturn err;
3776 IOMemoryMap * mapping;
3777 mach_vm_address_t address;
3778 mach_vm_size_t length;
3779
3780 if (__length) panic("doUnmap");
3781
3782 mapping = (IOMemoryMap *) __address;
3783 addressMap = mapping->fAddressMap;
3784 address = mapping->fAddress;
3785 length = mapping->fLength;
3786
3787 if (kIOMapOverwrite & mapping->fOptions) err = KERN_SUCCESS;
3788 else
3789 {
3790 if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags))
3791 addressMap = IOPageableMapForAddress( address );
3792 #if DEBUG
3793 if( kIOLogMapping & gIOKitDebug) IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
3794 addressMap, address, length );
3795 #endif
3796 err = mach_vm_deallocate( addressMap, address, length );
3797 }
3798
3799 #if IOTRACKING
3800 IOTrackingRemoveUser(gIOMapTracking, &mapping->fTracking);
3801 #endif /* IOTRACKING */
3802
3803 return (err);
3804 }
3805
3806 IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
3807 {
3808 IOReturn err = kIOReturnSuccess;
3809 IOMemoryMap * mapping = 0;
3810 OSIterator * iter;
3811
3812 LOCK;
3813
3814 if( doRedirect)
3815 _flags |= kIOMemoryRedirected;
3816 else
3817 _flags &= ~kIOMemoryRedirected;
3818
3819 do {
3820 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
3821
3822 memory_object_t pager;
3823
3824 if( reserved)
3825 pager = (memory_object_t) reserved->dp.devicePager;
3826 else
3827 pager = MACH_PORT_NULL;
3828
3829 while( (mapping = (IOMemoryMap *) iter->getNextObject()))
3830 {
3831 mapping->redirect( safeTask, doRedirect );
3832 if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap))
3833 {
3834 err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache );
3835 }
3836 }
3837
3838 iter->release();
3839 }
3840 } while( false );
3841
3842 if (!doRedirect)
3843 {
3844 WAKEUP;
3845 }
3846
3847 UNLOCK;
3848
3849 #ifndef __LP64__
3850 // temporary binary compatibility
3851 IOSubMemoryDescriptor * subMem;
3852 if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
3853 err = subMem->redirect( safeTask, doRedirect );
3854 else
3855 err = kIOReturnSuccess;
3856 #endif /* !__LP64__ */
3857
3858 return( err );
3859 }
3860
3861 IOReturn IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
3862 {
3863 IOReturn err = kIOReturnSuccess;
3864
3865 if( fSuperMap) {
3866 // err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
3867 } else {
3868
3869 LOCK;
3870
3871 do
3872 {
3873 if (!fAddress)
3874 break;
3875 if (!fAddressMap)
3876 break;
3877
3878 if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
3879 && (0 == (fOptions & kIOMapStatic)))
3880 {
3881 IOUnmapPages( fAddressMap, fAddress, fLength );
3882 err = kIOReturnSuccess;
3883 #if DEBUG
3884 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
3885 #endif
3886 }
3887 else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask))
3888 {
3889 IOOptionBits newMode;
3890 newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
3891 IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
3892 }
3893 }
3894 while (false);
3895 UNLOCK;
3896 }
3897
3898 if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3899 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3900 && safeTask
3901 && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected))))
3902 fMemory->redirect(safeTask, doRedirect);
3903
3904 return( err );
3905 }
3906
3907 IOReturn IOMemoryMap::unmap( void )
3908 {
3909 IOReturn err;
3910
3911 LOCK;
3912
3913 if( fAddress && fAddressMap && (0 == fSuperMap) && fMemory
3914 && (0 == (kIOMapStatic & fOptions))) {
3915
3916 err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
3917
3918 } else
3919 err = kIOReturnSuccess;
3920
3921 if (fAddressMap)
3922 {
3923 vm_map_deallocate(fAddressMap);
3924 fAddressMap = 0;
3925 }
3926
3927 fAddress = 0;
3928
3929 UNLOCK;
3930
3931 return( err );
3932 }
3933
3934 void IOMemoryMap::taskDied( void )
3935 {
3936 LOCK;
3937 if (fUserClientUnmap) unmap();
3938 #if IOTRACKING
3939 else IOTrackingRemoveUser(gIOMapTracking, &fTracking);
3940 #endif /* IOTRACKING */
3941
3942 if( fAddressMap) {
3943 vm_map_deallocate(fAddressMap);
3944 fAddressMap = 0;
3945 }
3946 fAddressTask = 0;
3947 fAddress = 0;
3948 UNLOCK;
3949 }
3950
3951 IOReturn IOMemoryMap::userClientUnmap( void )
3952 {
3953 fUserClientUnmap = true;
3954 return (kIOReturnSuccess);
3955 }
3956
3957 // Overload the release mechanism. All mappings must be a member
3958 // of a memory descriptors _mappings set. This means that we
3959 // always have 2 references on a mapping. When either of these mappings
3960 // are released we need to free ourselves.
3961 void IOMemoryMap::taggedRelease(const void *tag) const
3962 {
3963 LOCK;
3964 super::taggedRelease(tag, 2);
3965 UNLOCK;
3966 }
3967
3968 void IOMemoryMap::free()
3969 {
3970 unmap();
3971
3972 if (fMemory)
3973 {
3974 LOCK;
3975 fMemory->removeMapping(this);
3976 UNLOCK;
3977 fMemory->release();
3978 }
3979
3980 if (fOwner && (fOwner != fMemory))
3981 {
3982 LOCK;
3983 fOwner->removeMapping(this);
3984 UNLOCK;
3985 }
3986
3987 if (fSuperMap)
3988 fSuperMap->release();
3989
3990 if (fRedirUPL) {
3991 upl_commit(fRedirUPL, NULL, 0);
3992 upl_deallocate(fRedirUPL);
3993 }
3994
3995 super::free();
3996 }
3997
3998 IOByteCount IOMemoryMap::getLength()
3999 {
4000 return( fLength );
4001 }
4002
4003 IOVirtualAddress IOMemoryMap::getVirtualAddress()
4004 {
4005 #ifndef __LP64__
4006 if (fSuperMap)
4007 fSuperMap->getVirtualAddress();
4008 else if (fAddressMap
4009 && vm_map_is_64bit(fAddressMap)
4010 && (sizeof(IOVirtualAddress) < 8))
4011 {
4012 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
4013 }
4014 #endif /* !__LP64__ */
4015
4016 return (fAddress);
4017 }
4018
4019 #ifndef __LP64__
4020 mach_vm_address_t IOMemoryMap::getAddress()
4021 {
4022 return( fAddress);
4023 }
4024
4025 mach_vm_size_t IOMemoryMap::getSize()
4026 {
4027 return( fLength );
4028 }
4029 #endif /* !__LP64__ */
4030
4031
4032 task_t IOMemoryMap::getAddressTask()
4033 {
4034 if( fSuperMap)
4035 return( fSuperMap->getAddressTask());
4036 else
4037 return( fAddressTask);
4038 }
4039
4040 IOOptionBits IOMemoryMap::getMapOptions()
4041 {
4042 return( fOptions);
4043 }
4044
4045 IOMemoryDescriptor * IOMemoryMap::getMemoryDescriptor()
4046 {
4047 return( fMemory );
4048 }
4049
4050 IOMemoryMap * IOMemoryMap::copyCompatible(
4051 IOMemoryMap * newMapping )
4052 {
4053 task_t task = newMapping->getAddressTask();
4054 mach_vm_address_t toAddress = newMapping->fAddress;
4055 IOOptionBits _options = newMapping->fOptions;
4056 mach_vm_size_t _offset = newMapping->fOffset;
4057 mach_vm_size_t _length = newMapping->fLength;
4058
4059 if( (!task) || (!fAddressMap) || (fAddressMap != get_task_map(task)))
4060 return( 0 );
4061 if( (fOptions ^ _options) & kIOMapReadOnly)
4062 return( 0 );
4063 if( (kIOMapDefaultCache != (_options & kIOMapCacheMask))
4064 && ((fOptions ^ _options) & kIOMapCacheMask))
4065 return( 0 );
4066
4067 if( (0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress))
4068 return( 0 );
4069
4070 if( _offset < fOffset)
4071 return( 0 );
4072
4073 _offset -= fOffset;
4074
4075 if( (_offset + _length) > fLength)
4076 return( 0 );
4077
4078 retain();
4079 if( (fLength == _length) && (!_offset))
4080 {
4081 newMapping = this;
4082 }
4083 else
4084 {
4085 newMapping->fSuperMap = this;
4086 newMapping->fOffset = fOffset + _offset;
4087 newMapping->fAddress = fAddress + _offset;
4088 }
4089
4090 return( newMapping );
4091 }
4092
4093 IOReturn IOMemoryMap::wireRange(
4094 uint32_t options,
4095 mach_vm_size_t offset,
4096 mach_vm_size_t length)
4097 {
4098 IOReturn kr;
4099 mach_vm_address_t start = trunc_page_64(fAddress + offset);
4100 mach_vm_address_t end = round_page_64(fAddress + offset + length);
4101 vm_prot_t prot;
4102
4103 prot = (kIODirectionOutIn & options);
4104 if (prot)
4105 {
4106 prot |= VM_PROT_MEMORY_TAG_MAKE(fMemory->getVMTag(kernel_map));
4107 kr = vm_map_wire(fAddressMap, start, end, prot, FALSE);
4108 }
4109 else
4110 {
4111 kr = vm_map_unwire(fAddressMap, start, end, FALSE);
4112 }
4113
4114 return (kr);
4115 }
4116
4117
4118 IOPhysicalAddress
4119 #ifdef __LP64__
4120 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
4121 #else /* !__LP64__ */
4122 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
4123 #endif /* !__LP64__ */
4124 {
4125 IOPhysicalAddress address;
4126
4127 LOCK;
4128 #ifdef __LP64__
4129 address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
4130 #else /* !__LP64__ */
4131 address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
4132 #endif /* !__LP64__ */
4133 UNLOCK;
4134
4135 return( address );
4136 }
4137
4138 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4139
4140 #undef super
4141 #define super OSObject
4142
4143 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4144
4145 void IOMemoryDescriptor::initialize( void )
4146 {
4147 if( 0 == gIOMemoryLock)
4148 gIOMemoryLock = IORecursiveLockAlloc();
4149
4150 gIOLastPage = IOGetLastPageNumber();
4151 }
4152
4153 void IOMemoryDescriptor::free( void )
4154 {
4155 if( _mappings) _mappings->release();
4156
4157 if (reserved)
4158 {
4159 IODelete(reserved, IOMemoryDescriptorReserved, 1);
4160 reserved = NULL;
4161 }
4162 super::free();
4163 }
4164
4165 IOMemoryMap * IOMemoryDescriptor::setMapping(
4166 task_t intoTask,
4167 IOVirtualAddress mapAddress,
4168 IOOptionBits options )
4169 {
4170 return (createMappingInTask( intoTask, mapAddress,
4171 options | kIOMapStatic,
4172 0, getLength() ));
4173 }
4174
4175 IOMemoryMap * IOMemoryDescriptor::map(
4176 IOOptionBits options )
4177 {
4178 return (createMappingInTask( kernel_task, 0,
4179 options | kIOMapAnywhere,
4180 0, getLength() ));
4181 }
4182
4183 #ifndef __LP64__
4184 IOMemoryMap * IOMemoryDescriptor::map(
4185 task_t intoTask,
4186 IOVirtualAddress atAddress,
4187 IOOptionBits options,
4188 IOByteCount offset,
4189 IOByteCount length )
4190 {
4191 if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask)))
4192 {
4193 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
4194 return (0);
4195 }
4196
4197 return (createMappingInTask(intoTask, atAddress,
4198 options, offset, length));
4199 }
4200 #endif /* !__LP64__ */
4201
4202 IOMemoryMap * IOMemoryDescriptor::createMappingInTask(
4203 task_t intoTask,
4204 mach_vm_address_t atAddress,
4205 IOOptionBits options,
4206 mach_vm_size_t offset,
4207 mach_vm_size_t length)
4208 {
4209 IOMemoryMap * result;
4210 IOMemoryMap * mapping;
4211
4212 if (0 == length)
4213 length = getLength();
4214
4215 mapping = new IOMemoryMap;
4216
4217 if( mapping
4218 && !mapping->init( intoTask, atAddress,
4219 options, offset, length )) {
4220 mapping->release();
4221 mapping = 0;
4222 }
4223
4224 if (mapping)
4225 result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
4226 else
4227 result = 0;
4228
4229 #if DEBUG
4230 if (!result)
4231 IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
4232 this, atAddress, (uint32_t) options, offset, length);
4233 #endif
4234
4235 return (result);
4236 }
4237
4238 #ifndef __LP64__ // there is only a 64 bit version for LP64
4239 IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
4240 IOOptionBits options,
4241 IOByteCount offset)
4242 {
4243 return (redirect(newBackingMemory, options, (mach_vm_size_t)offset));
4244 }
4245 #endif
4246
4247 IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
4248 IOOptionBits options,
4249 mach_vm_size_t offset)
4250 {
4251 IOReturn err = kIOReturnSuccess;
4252 IOMemoryDescriptor * physMem = 0;
4253
4254 LOCK;
4255
4256 if (fAddress && fAddressMap) do
4257 {
4258 if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
4259 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
4260 {
4261 physMem = fMemory;
4262 physMem->retain();
4263 }
4264
4265 if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count))
4266 {
4267 upl_size_t size = round_page(fLength);
4268 upl_control_flags_t flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
4269 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS
4270 | UPL_MEMORY_TAG_MAKE(fMemory->getVMTag(kernel_map));
4271 if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL,
4272 NULL, NULL,
4273 &flags))
4274 fRedirUPL = 0;
4275
4276 if (physMem)
4277 {
4278 IOUnmapPages( fAddressMap, fAddress, fLength );
4279 if ((false))
4280 physMem->redirect(0, true);
4281 }
4282 }
4283
4284 if (newBackingMemory)
4285 {
4286 if (newBackingMemory != fMemory)
4287 {
4288 fOffset = 0;
4289 if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
4290 options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
4291 offset, fLength))
4292 err = kIOReturnError;
4293 }
4294 if (fRedirUPL)
4295 {
4296 upl_commit(fRedirUPL, NULL, 0);
4297 upl_deallocate(fRedirUPL);
4298 fRedirUPL = 0;
4299 }
4300 if ((false) && physMem)
4301 physMem->redirect(0, false);
4302 }
4303 }
4304 while (false);
4305
4306 UNLOCK;
4307
4308 if (physMem)
4309 physMem->release();
4310
4311 return (err);
4312 }
4313
4314 IOMemoryMap * IOMemoryDescriptor::makeMapping(
4315 IOMemoryDescriptor * owner,
4316 task_t __intoTask,
4317 IOVirtualAddress __address,
4318 IOOptionBits options,
4319 IOByteCount __offset,
4320 IOByteCount __length )
4321 {
4322 #ifndef __LP64__
4323 if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::makeMapping !64bit");
4324 #endif /* !__LP64__ */
4325
4326 IOMemoryDescriptor * mapDesc = 0;
4327 IOMemoryMap * result = 0;
4328 OSIterator * iter;
4329
4330 IOMemoryMap * mapping = (IOMemoryMap *) __address;
4331 mach_vm_size_t offset = mapping->fOffset + __offset;
4332 mach_vm_size_t length = mapping->fLength;
4333
4334 mapping->fOffset = offset;
4335
4336 LOCK;
4337
4338 do
4339 {
4340 if (kIOMapStatic & options)
4341 {
4342 result = mapping;
4343 addMapping(mapping);
4344 mapping->setMemoryDescriptor(this, 0);
4345 continue;
4346 }
4347
4348 if (kIOMapUnique & options)
4349 {
4350 addr64_t phys;
4351 IOByteCount physLen;
4352
4353 // if (owner != this) continue;
4354
4355 if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
4356 || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
4357 {
4358 phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
4359 if (!phys || (physLen < length))
4360 continue;
4361
4362 mapDesc = IOMemoryDescriptor::withAddressRange(
4363 phys, length, getDirection() | kIOMemoryMapperNone, NULL);
4364 if (!mapDesc)
4365 continue;
4366 offset = 0;
4367 mapping->fOffset = offset;
4368 }
4369 }
4370 else
4371 {
4372 // look for a compatible existing mapping
4373 if( (iter = OSCollectionIterator::withCollection(_mappings)))
4374 {
4375 IOMemoryMap * lookMapping;
4376 while ((lookMapping = (IOMemoryMap *) iter->getNextObject()))
4377 {
4378 if ((result = lookMapping->copyCompatible(mapping)))
4379 {
4380 addMapping(result);
4381 result->setMemoryDescriptor(this, offset);
4382 break;
4383 }
4384 }
4385 iter->release();
4386 }
4387 if (result || (options & kIOMapReference))
4388 {
4389 if (result != mapping)
4390 {
4391 mapping->release();
4392 mapping = NULL;
4393 }
4394 continue;
4395 }
4396 }
4397
4398 if (!mapDesc)
4399 {
4400 mapDesc = this;
4401 mapDesc->retain();
4402 }
4403 IOReturn
4404 kr = mapDesc->doMap( 0, (IOVirtualAddress *) &mapping, options, 0, 0 );
4405 if (kIOReturnSuccess == kr)
4406 {
4407 result = mapping;
4408 mapDesc->addMapping(result);
4409 result->setMemoryDescriptor(mapDesc, offset);
4410 }
4411 else
4412 {
4413 mapping->release();
4414 mapping = NULL;
4415 }
4416 }
4417 while( false );
4418
4419 UNLOCK;
4420
4421 if (mapDesc)
4422 mapDesc->release();
4423
4424 return (result);
4425 }
4426
4427 void IOMemoryDescriptor::addMapping(
4428 IOMemoryMap * mapping )
4429 {
4430 if( mapping)
4431 {
4432 if( 0 == _mappings)
4433 _mappings = OSSet::withCapacity(1);
4434 if( _mappings )
4435 _mappings->setObject( mapping );
4436 }
4437 }
4438
4439 void IOMemoryDescriptor::removeMapping(
4440 IOMemoryMap * mapping )
4441 {
4442 if( _mappings)
4443 _mappings->removeObject( mapping);
4444 }
4445
4446 #ifndef __LP64__
4447 // obsolete initializers
4448 // - initWithOptions is the designated initializer
4449 bool
4450 IOMemoryDescriptor::initWithAddress(void * address,
4451 IOByteCount length,
4452 IODirection direction)
4453 {
4454 return( false );
4455 }
4456
4457 bool
4458 IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
4459 IOByteCount length,
4460 IODirection direction,
4461 task_t task)
4462 {
4463 return( false );
4464 }
4465
4466 bool
4467 IOMemoryDescriptor::initWithPhysicalAddress(
4468 IOPhysicalAddress address,
4469 IOByteCount length,
4470 IODirection direction )
4471 {
4472 return( false );
4473 }
4474
4475 bool
4476 IOMemoryDescriptor::initWithRanges(
4477 IOVirtualRange * ranges,
4478 UInt32 withCount,
4479 IODirection direction,
4480 task_t task,
4481 bool asReference)
4482 {
4483 return( false );
4484 }
4485
4486 bool
4487 IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
4488 UInt32 withCount,
4489 IODirection direction,
4490 bool asReference)
4491 {
4492 return( false );
4493 }
4494
4495 void * IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
4496 IOByteCount * lengthOfSegment)
4497 {
4498 return( 0 );
4499 }
4500 #endif /* !__LP64__ */
4501
4502 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4503
4504 bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
4505 {
4506 OSSymbol const *keys[2];
4507 OSObject *values[2];
4508 OSArray * array;
4509
4510 struct SerData {
4511 user_addr_t address;
4512 user_size_t length;
4513 } *vcopy;
4514 unsigned int index, nRanges;
4515 bool result;
4516
4517 IOOptionBits type = _flags & kIOMemoryTypeMask;
4518
4519 if (s == NULL) return false;
4520
4521 array = OSArray::withCapacity(4);
4522 if (!array) return (false);
4523
4524 nRanges = _rangesCount;
4525 vcopy = (SerData *) IOMalloc(sizeof(SerData) * nRanges);
4526 if (vcopy == 0) return false;
4527
4528 keys[0] = OSSymbol::withCString("address");
4529 keys[1] = OSSymbol::withCString("length");
4530
4531 result = false;
4532 values[0] = values[1] = 0;
4533
4534 // From this point on we can go to bail.
4535
4536 // Copy the volatile data so we don't have to allocate memory
4537 // while the lock is held.
4538 LOCK;
4539 if (nRanges == _rangesCount) {
4540 Ranges vec = _ranges;
4541 for (index = 0; index < nRanges; index++) {
4542 mach_vm_address_t addr; mach_vm_size_t len;
4543 getAddrLenForInd(addr, len, type, vec, index);
4544 vcopy[index].address = addr;
4545 vcopy[index].length = len;
4546 }
4547 } else {
4548 // The descriptor changed out from under us. Give up.
4549 UNLOCK;
4550 result = false;
4551 goto bail;
4552 }
4553 UNLOCK;
4554
4555 for (index = 0; index < nRanges; index++)
4556 {
4557 user_addr_t addr = vcopy[index].address;
4558 IOByteCount len = (IOByteCount) vcopy[index].length;
4559 values[0] = OSNumber::withNumber(addr, sizeof(addr) * 8);
4560 if (values[0] == 0) {
4561 result = false;
4562 goto bail;
4563 }
4564 values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
4565 if (values[1] == 0) {
4566 result = false;
4567 goto bail;
4568 }
4569 OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
4570 if (dict == 0) {
4571 result = false;
4572 goto bail;
4573 }
4574 array->setObject(dict);
4575 dict->release();
4576 values[0]->release();
4577 values[1]->release();
4578 values[0] = values[1] = 0;
4579 }
4580
4581 result = array->serialize(s);
4582
4583 bail:
4584 if (array)
4585 array->release();
4586 if (values[0])
4587 values[0]->release();
4588 if (values[1])
4589 values[1]->release();
4590 if (keys[0])
4591 keys[0]->release();
4592 if (keys[1])
4593 keys[1]->release();
4594 if (vcopy)
4595 IOFree(vcopy, sizeof(SerData) * nRanges);
4596
4597 return result;
4598 }
4599
4600 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4601
4602 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
4603 #ifdef __LP64__
4604 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
4605 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
4606 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
4607 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
4608 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
4609 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
4610 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
4611 #else /* !__LP64__ */
4612 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
4613 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
4614 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3);
4615 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4);
4616 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5);
4617 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 6);
4618 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 7);
4619 #endif /* !__LP64__ */
4620 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
4621 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
4622 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
4623 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
4624 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
4625 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
4626 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
4627 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
4628
4629 /* ex-inline function implementation */
4630 IOPhysicalAddress
4631 IOMemoryDescriptor::getPhysicalAddress()
4632 { return( getPhysicalSegment( 0, 0 )); }