]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOMemoryDescriptor.cpp
xnu-6153.141.1.tar.gz
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 #include <sys/cdefs.h>
31
32 #include <IOKit/assert.h>
33 #include <IOKit/system.h>
34 #include <IOKit/IOLib.h>
35 #include <IOKit/IOMemoryDescriptor.h>
36 #include <IOKit/IOMapper.h>
37 #include <IOKit/IODMACommand.h>
38 #include <IOKit/IOKitKeysPrivate.h>
39
40 #include <IOKit/IOSubMemoryDescriptor.h>
41 #include <IOKit/IOMultiMemoryDescriptor.h>
42
43 #include <IOKit/IOKitDebug.h>
44 #include <libkern/OSDebug.h>
45 #include <libkern/OSKextLibPrivate.h>
46
47 #include "IOKitKernelInternal.h"
48
49 #include <libkern/c++/OSContainers.h>
50 #include <libkern/c++/OSDictionary.h>
51 #include <libkern/c++/OSArray.h>
52 #include <libkern/c++/OSSymbol.h>
53 #include <libkern/c++/OSNumber.h>
54 #include <os/overflow.h>
55
56 #include <sys/uio.h>
57
58 __BEGIN_DECLS
59 #include <vm/pmap.h>
60 #include <vm/vm_pageout.h>
61 #include <mach/memory_object_types.h>
62 #include <device/device_port.h>
63
64 #include <mach/vm_prot.h>
65 #include <mach/mach_vm.h>
66 #include <mach/memory_entry.h>
67 #include <vm/vm_fault.h>
68 #include <vm/vm_protos.h>
69
70 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
71 extern void ipc_port_release_send(ipc_port_t port);
72
73 __END_DECLS
74
75 #define kIOMapperWaitSystem ((IOMapper *) 1)
76
77 static IOMapper * gIOSystemMapper = NULL;
78
79 ppnum_t gIOLastPage;
80
81 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
82
83 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
84
85 #define super IOMemoryDescriptor
86
87 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
88
89 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
90
91 static IORecursiveLock * gIOMemoryLock;
92
93 #define LOCK IORecursiveLockLock( gIOMemoryLock)
94 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
95 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
96 #define WAKEUP \
97 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
98
99 #if 0
100 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
101 #else
102 #define DEBG(fmt, args...) {}
103 #endif
104
105 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
106
107 // Some data structures and accessor macros used by the initWithOptions
108 // Function
109
110 enum ioPLBlockFlags {
111 kIOPLOnDevice = 0x00000001,
112 kIOPLExternUPL = 0x00000002,
113 };
114
115 struct IOMDPersistentInitData {
116 const IOGeneralMemoryDescriptor * fMD;
117 IOMemoryReference * fMemRef;
118 };
119
120 struct ioPLBlock {
121 upl_t fIOPL;
122 vm_address_t fPageInfo; // Pointer to page list or index into it
123 uint32_t fIOMDOffset; // The offset of this iopl in descriptor
124 ppnum_t fMappedPage; // Page number of first page in this iopl
125 unsigned int fPageOffset; // Offset within first page of iopl
126 unsigned int fFlags; // Flags
127 };
128
129 enum { kMaxWireTags = 6 };
130
131 struct ioGMDData {
132 IOMapper * fMapper;
133 uint64_t fDMAMapAlignment;
134 uint64_t fMappedBase;
135 uint64_t fMappedLength;
136 uint64_t fPreparationID;
137 #if IOTRACKING
138 IOTracking fWireTracking;
139 #endif /* IOTRACKING */
140 unsigned int fPageCnt;
141 uint8_t fDMAMapNumAddressBits;
142 unsigned char fDiscontig:1;
143 unsigned char fCompletionError:1;
144 unsigned char fMappedBaseValid:1;
145 unsigned char _resv:3;
146 unsigned char fDMAAccess:2;
147
148 /* variable length arrays */
149 upl_page_info_t fPageList[1]
150 #if __LP64__
151 // align fPageList as for ioPLBlock
152 __attribute__((aligned(sizeof(upl_t))))
153 #endif
154 ;
155 //ioPLBlock fBlocks[1];
156 };
157
158 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
159 #define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
160 #define getNumIOPL(osd, d) \
161 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
162 #define getPageList(d) (&(d->fPageList[0]))
163 #define computeDataSize(p, u) \
164 (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
165
166 enum { kIOMemoryHostOrRemote = kIOMemoryHostOnly | kIOMemoryRemote };
167
168 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
169
170 #define next_page(a) ( trunc_page(a) + PAGE_SIZE )
171
172 extern "C" {
173 kern_return_t
174 device_data_action(
175 uintptr_t device_handle,
176 ipc_port_t device_pager,
177 vm_prot_t protection,
178 vm_object_offset_t offset,
179 vm_size_t size)
180 {
181 kern_return_t kr;
182 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
183 IOMemoryDescriptor * memDesc;
184
185 LOCK;
186 memDesc = ref->dp.memory;
187 if (memDesc) {
188 memDesc->retain();
189 kr = memDesc->handleFault(device_pager, offset, size);
190 memDesc->release();
191 } else {
192 kr = KERN_ABORTED;
193 }
194 UNLOCK;
195
196 return kr;
197 }
198
199 kern_return_t
200 device_close(
201 uintptr_t device_handle)
202 {
203 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
204
205 IODelete( ref, IOMemoryDescriptorReserved, 1 );
206
207 return kIOReturnSuccess;
208 }
209 }; // end extern "C"
210
211 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
212
213 // Note this inline function uses C++ reference arguments to return values
214 // This means that pointers are not passed and NULLs don't have to be
215 // checked for as a NULL reference is illegal.
216 static inline void
217 getAddrLenForInd(mach_vm_address_t &addr, mach_vm_size_t &len, // Output variables
218 UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
219 {
220 assert(kIOMemoryTypeUIO == type
221 || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
222 || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
223 if (kIOMemoryTypeUIO == type) {
224 user_size_t us;
225 user_addr_t ad;
226 uio_getiov((uio_t) r.uio, ind, &ad, &us); addr = ad; len = us;
227 }
228 #ifndef __LP64__
229 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
230 IOAddressRange cur = r.v64[ind];
231 addr = cur.address;
232 len = cur.length;
233 }
234 #endif /* !__LP64__ */
235 else {
236 IOVirtualRange cur = r.v[ind];
237 addr = cur.address;
238 len = cur.length;
239 }
240 }
241
242 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
243
244 static IOReturn
245 purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
246 {
247 IOReturn err = kIOReturnSuccess;
248
249 *control = VM_PURGABLE_SET_STATE;
250
251 enum { kIOMemoryPurgeableControlMask = 15 };
252
253 switch (kIOMemoryPurgeableControlMask & newState) {
254 case kIOMemoryPurgeableKeepCurrent:
255 *control = VM_PURGABLE_GET_STATE;
256 break;
257
258 case kIOMemoryPurgeableNonVolatile:
259 *state = VM_PURGABLE_NONVOLATILE;
260 break;
261 case kIOMemoryPurgeableVolatile:
262 *state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask);
263 break;
264 case kIOMemoryPurgeableEmpty:
265 *state = VM_PURGABLE_EMPTY | (newState & ~kIOMemoryPurgeableControlMask);
266 break;
267 default:
268 err = kIOReturnBadArgument;
269 break;
270 }
271
272 if (*control == VM_PURGABLE_SET_STATE) {
273 // let VM know this call is from the kernel and is allowed to alter
274 // the volatility of the memory entry even if it was created with
275 // MAP_MEM_PURGABLE_KERNEL_ONLY
276 *control = VM_PURGABLE_SET_STATE_FROM_KERNEL;
277 }
278
279 return err;
280 }
281
282 static IOReturn
283 purgeableStateBits(int * state)
284 {
285 IOReturn err = kIOReturnSuccess;
286
287 switch (VM_PURGABLE_STATE_MASK & *state) {
288 case VM_PURGABLE_NONVOLATILE:
289 *state = kIOMemoryPurgeableNonVolatile;
290 break;
291 case VM_PURGABLE_VOLATILE:
292 *state = kIOMemoryPurgeableVolatile;
293 break;
294 case VM_PURGABLE_EMPTY:
295 *state = kIOMemoryPurgeableEmpty;
296 break;
297 default:
298 *state = kIOMemoryPurgeableNonVolatile;
299 err = kIOReturnNotReady;
300 break;
301 }
302 return err;
303 }
304
305 typedef struct {
306 unsigned int wimg;
307 unsigned int object_type;
308 } iokit_memtype_entry;
309
310 static const iokit_memtype_entry iomd_mem_types[] = {
311 [kIODefaultCache] = {VM_WIMG_DEFAULT, MAP_MEM_NOOP},
312 [kIOInhibitCache] = {VM_WIMG_IO, MAP_MEM_IO},
313 [kIOWriteThruCache] = {VM_WIMG_WTHRU, MAP_MEM_WTHRU},
314 [kIOWriteCombineCache] = {VM_WIMG_WCOMB, MAP_MEM_WCOMB},
315 [kIOCopybackCache] = {VM_WIMG_COPYBACK, MAP_MEM_COPYBACK},
316 [kIOCopybackInnerCache] = {VM_WIMG_INNERWBACK, MAP_MEM_INNERWBACK},
317 [kIOPostedWrite] = {VM_WIMG_POSTED, MAP_MEM_POSTED},
318 [kIORealTimeCache] = {VM_WIMG_RT, MAP_MEM_RT},
319 [kIOPostedReordered] = {VM_WIMG_POSTED_REORDERED, MAP_MEM_POSTED_REORDERED},
320 [kIOPostedCombinedReordered] = {VM_WIMG_POSTED_COMBINED_REORDERED, MAP_MEM_POSTED_COMBINED_REORDERED},
321 };
322
323 static vm_prot_t
324 vmProtForCacheMode(IOOptionBits cacheMode)
325 {
326 assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
327 vm_prot_t prot = 0;
328 SET_MAP_MEM(iomd_mem_types[cacheMode].object_type, prot);
329 return prot;
330 }
331
332 static unsigned int
333 pagerFlagsForCacheMode(IOOptionBits cacheMode)
334 {
335 assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
336 if (cacheMode == kIODefaultCache) {
337 return -1U;
338 }
339 return iomd_mem_types[cacheMode].wimg;
340 }
341
342 static IOOptionBits
343 cacheModeForPagerFlags(unsigned int pagerFlags)
344 {
345 pagerFlags &= VM_WIMG_MASK;
346 IOOptionBits cacheMode = kIODefaultCache;
347 for (IOOptionBits i = 0; i < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])); ++i) {
348 if (iomd_mem_types[i].wimg == pagerFlags) {
349 cacheMode = i;
350 break;
351 }
352 }
353 return (cacheMode == kIODefaultCache) ? kIOCopybackCache : cacheMode;
354 }
355
356 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
357 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
358
359 struct IOMemoryEntry {
360 ipc_port_t entry;
361 int64_t offset;
362 uint64_t size;
363 };
364
365 struct IOMemoryReference {
366 volatile SInt32 refCount;
367 vm_prot_t prot;
368 uint32_t capacity;
369 uint32_t count;
370 struct IOMemoryReference * mapRef;
371 IOMemoryEntry entries[0];
372 };
373
374 enum{
375 kIOMemoryReferenceReuse = 0x00000001,
376 kIOMemoryReferenceWrite = 0x00000002,
377 kIOMemoryReferenceCOW = 0x00000004,
378 };
379
380 SInt32 gIOMemoryReferenceCount;
381
382 IOMemoryReference *
383 IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc)
384 {
385 IOMemoryReference * ref;
386 size_t newSize, oldSize, copySize;
387
388 newSize = (sizeof(IOMemoryReference)
389 - sizeof(ref->entries)
390 + capacity * sizeof(ref->entries[0]));
391 ref = (typeof(ref))IOMalloc(newSize);
392 if (realloc) {
393 oldSize = (sizeof(IOMemoryReference)
394 - sizeof(realloc->entries)
395 + realloc->capacity * sizeof(realloc->entries[0]));
396 copySize = oldSize;
397 if (copySize > newSize) {
398 copySize = newSize;
399 }
400 if (ref) {
401 bcopy(realloc, ref, copySize);
402 }
403 IOFree(realloc, oldSize);
404 } else if (ref) {
405 bzero(ref, sizeof(*ref));
406 ref->refCount = 1;
407 OSIncrementAtomic(&gIOMemoryReferenceCount);
408 }
409 if (!ref) {
410 return NULL;
411 }
412 ref->capacity = capacity;
413 return ref;
414 }
415
416 void
417 IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref)
418 {
419 IOMemoryEntry * entries;
420 size_t size;
421
422 if (ref->mapRef) {
423 memoryReferenceFree(ref->mapRef);
424 ref->mapRef = NULL;
425 }
426
427 entries = ref->entries + ref->count;
428 while (entries > &ref->entries[0]) {
429 entries--;
430 ipc_port_release_send(entries->entry);
431 }
432 size = (sizeof(IOMemoryReference)
433 - sizeof(ref->entries)
434 + ref->capacity * sizeof(ref->entries[0]));
435 IOFree(ref, size);
436
437 OSDecrementAtomic(&gIOMemoryReferenceCount);
438 }
439
440 void
441 IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref)
442 {
443 if (1 == OSDecrementAtomic(&ref->refCount)) {
444 memoryReferenceFree(ref);
445 }
446 }
447
448
449 IOReturn
450 IOGeneralMemoryDescriptor::memoryReferenceCreate(
451 IOOptionBits options,
452 IOMemoryReference ** reference)
453 {
454 enum { kCapacity = 4, kCapacityInc = 4 };
455
456 kern_return_t err;
457 IOMemoryReference * ref;
458 IOMemoryEntry * entries;
459 IOMemoryEntry * cloneEntries;
460 vm_map_t map;
461 ipc_port_t entry, cloneEntry;
462 vm_prot_t prot;
463 memory_object_size_t actualSize;
464 uint32_t rangeIdx;
465 uint32_t count;
466 mach_vm_address_t entryAddr, endAddr, entrySize;
467 mach_vm_size_t srcAddr, srcLen;
468 mach_vm_size_t nextAddr, nextLen;
469 mach_vm_size_t offset, remain;
470 IOByteCount physLen;
471 IOOptionBits type = (_flags & kIOMemoryTypeMask);
472 IOOptionBits cacheMode;
473 unsigned int pagerFlags;
474 vm_tag_t tag;
475 vm_named_entry_kernel_flags_t vmne_kflags;
476
477 ref = memoryReferenceAlloc(kCapacity, NULL);
478 if (!ref) {
479 return kIOReturnNoMemory;
480 }
481
482 tag = getVMTag(kernel_map);
483 vmne_kflags = VM_NAMED_ENTRY_KERNEL_FLAGS_NONE;
484 entries = &ref->entries[0];
485 count = 0;
486 err = KERN_SUCCESS;
487
488 offset = 0;
489 rangeIdx = 0;
490 if (_task) {
491 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
492 } else {
493 nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
494 nextLen = physLen;
495
496 // default cache mode for physical
497 if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift)) {
498 IOOptionBits mode = cacheModeForPagerFlags(IODefaultCacheBits(nextAddr));
499 _flags |= (mode << kIOMemoryBufferCacheShift);
500 }
501 }
502
503 // cache mode & vm_prot
504 prot = VM_PROT_READ;
505 cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
506 prot |= vmProtForCacheMode(cacheMode);
507 // VM system requires write access to change cache mode
508 if (kIODefaultCache != cacheMode) {
509 prot |= VM_PROT_WRITE;
510 }
511 if (kIODirectionOut != (kIODirectionOutIn & _flags)) {
512 prot |= VM_PROT_WRITE;
513 }
514 if (kIOMemoryReferenceWrite & options) {
515 prot |= VM_PROT_WRITE;
516 }
517 if (kIOMemoryReferenceCOW & options) {
518 prot |= MAP_MEM_VM_COPY;
519 }
520
521 if (kIOMemoryUseReserve & _flags) {
522 prot |= MAP_MEM_GRAB_SECLUDED;
523 }
524
525 if ((kIOMemoryReferenceReuse & options) && _memRef) {
526 cloneEntries = &_memRef->entries[0];
527 prot |= MAP_MEM_NAMED_REUSE;
528 }
529
530 if (_task) {
531 // virtual ranges
532
533 if (kIOMemoryBufferPageable & _flags) {
534 int ledger_tag, ledger_no_footprint;
535
536 // IOBufferMemoryDescriptor alloc - set flags for entry + object create
537 prot |= MAP_MEM_NAMED_CREATE;
538
539 // default accounting settings:
540 // + "none" ledger tag
541 // + include in footprint
542 // can be changed later with ::setOwnership()
543 ledger_tag = VM_LEDGER_TAG_NONE;
544 ledger_no_footprint = 0;
545
546 if (kIOMemoryBufferPurgeable & _flags) {
547 prot |= (MAP_MEM_PURGABLE | MAP_MEM_PURGABLE_KERNEL_ONLY);
548 if (VM_KERN_MEMORY_SKYWALK == tag) {
549 // Skywalk purgeable memory accounting:
550 // + "network" ledger tag
551 // + not included in footprint
552 ledger_tag = VM_LEDGER_TAG_NETWORK;
553 ledger_no_footprint = 1;
554 } else {
555 // regular purgeable memory accounting:
556 // + no ledger tag
557 // + included in footprint
558 ledger_tag = VM_LEDGER_TAG_NONE;
559 ledger_no_footprint = 0;
560 }
561 }
562 vmne_kflags.vmnekf_ledger_tag = ledger_tag;
563 vmne_kflags.vmnekf_ledger_no_footprint = ledger_no_footprint;
564 if (kIOMemoryUseReserve & _flags) {
565 prot |= MAP_MEM_GRAB_SECLUDED;
566 }
567
568 prot |= VM_PROT_WRITE;
569 map = NULL;
570 } else {
571 map = get_task_map(_task);
572 }
573
574 remain = _length;
575 while (remain) {
576 srcAddr = nextAddr;
577 srcLen = nextLen;
578 nextAddr = 0;
579 nextLen = 0;
580 // coalesce addr range
581 for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++) {
582 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
583 if ((srcAddr + srcLen) != nextAddr) {
584 break;
585 }
586 srcLen += nextLen;
587 }
588 entryAddr = trunc_page_64(srcAddr);
589 endAddr = round_page_64(srcAddr + srcLen);
590 do{
591 entrySize = (endAddr - entryAddr);
592 if (!entrySize) {
593 break;
594 }
595 actualSize = entrySize;
596
597 cloneEntry = MACH_PORT_NULL;
598 if (MAP_MEM_NAMED_REUSE & prot) {
599 if (cloneEntries < &_memRef->entries[_memRef->count]) {
600 cloneEntry = cloneEntries->entry;
601 } else {
602 prot &= ~MAP_MEM_NAMED_REUSE;
603 }
604 }
605
606 err = mach_make_memory_entry_internal(map,
607 &actualSize, entryAddr, prot, vmne_kflags, &entry, cloneEntry);
608
609 if (KERN_SUCCESS != err) {
610 break;
611 }
612 if (actualSize > entrySize) {
613 panic("mach_make_memory_entry_64 actualSize");
614 }
615
616 if (count >= ref->capacity) {
617 ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref);
618 entries = &ref->entries[count];
619 }
620 entries->entry = entry;
621 entries->size = actualSize;
622 entries->offset = offset + (entryAddr - srcAddr);
623 entryAddr += actualSize;
624 if (MAP_MEM_NAMED_REUSE & prot) {
625 if ((cloneEntries->entry == entries->entry)
626 && (cloneEntries->size == entries->size)
627 && (cloneEntries->offset == entries->offset)) {
628 cloneEntries++;
629 } else {
630 prot &= ~MAP_MEM_NAMED_REUSE;
631 }
632 }
633 entries++;
634 count++;
635 }while (true);
636 offset += srcLen;
637 remain -= srcLen;
638 }
639 } else {
640 // _task == 0, physical or kIOMemoryTypeUPL
641 memory_object_t pager;
642 vm_size_t size = ptoa_64(_pages);
643
644 if (!getKernelReserved()) {
645 panic("getKernelReserved");
646 }
647
648 reserved->dp.pagerContig = (1 == _rangesCount);
649 reserved->dp.memory = this;
650
651 pagerFlags = pagerFlagsForCacheMode(cacheMode);
652 if (-1U == pagerFlags) {
653 panic("phys is kIODefaultCache");
654 }
655 if (reserved->dp.pagerContig) {
656 pagerFlags |= DEVICE_PAGER_CONTIGUOUS;
657 }
658
659 pager = device_pager_setup((memory_object_t) NULL, (uintptr_t) reserved,
660 size, pagerFlags);
661 assert(pager);
662 if (!pager) {
663 err = kIOReturnVMError;
664 } else {
665 srcAddr = nextAddr;
666 entryAddr = trunc_page_64(srcAddr);
667 err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/,
668 size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry);
669 assert(KERN_SUCCESS == err);
670 if (KERN_SUCCESS != err) {
671 device_pager_deallocate(pager);
672 } else {
673 reserved->dp.devicePager = pager;
674 entries->entry = entry;
675 entries->size = size;
676 entries->offset = offset + (entryAddr - srcAddr);
677 entries++;
678 count++;
679 }
680 }
681 }
682
683 ref->count = count;
684 ref->prot = prot;
685
686 if (_task && (KERN_SUCCESS == err)
687 && (kIOMemoryMapCopyOnWrite & _flags)
688 && !(kIOMemoryReferenceCOW & options)) {
689 err = memoryReferenceCreate(options | kIOMemoryReferenceCOW, &ref->mapRef);
690 }
691
692 if (KERN_SUCCESS == err) {
693 if (MAP_MEM_NAMED_REUSE & prot) {
694 memoryReferenceFree(ref);
695 OSIncrementAtomic(&_memRef->refCount);
696 ref = _memRef;
697 }
698 } else {
699 memoryReferenceFree(ref);
700 ref = NULL;
701 }
702
703 *reference = ref;
704
705 return err;
706 }
707
708 kern_return_t
709 IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
710 {
711 IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref;
712 IOReturn err;
713 vm_map_offset_t addr;
714
715 addr = ref->mapped;
716
717 err = vm_map_enter_mem_object(map, &addr, ref->size,
718 (vm_map_offset_t) 0,
719 (((ref->options & kIOMapAnywhere)
720 ? VM_FLAGS_ANYWHERE
721 : VM_FLAGS_FIXED)),
722 VM_MAP_KERNEL_FLAGS_NONE,
723 ref->tag,
724 IPC_PORT_NULL,
725 (memory_object_offset_t) 0,
726 false, /* copy */
727 ref->prot,
728 ref->prot,
729 VM_INHERIT_NONE);
730 if (KERN_SUCCESS == err) {
731 ref->mapped = (mach_vm_address_t) addr;
732 ref->map = map;
733 }
734
735 return err;
736 }
737
738 IOReturn
739 IOGeneralMemoryDescriptor::memoryReferenceMap(
740 IOMemoryReference * ref,
741 vm_map_t map,
742 mach_vm_size_t inoffset,
743 mach_vm_size_t size,
744 IOOptionBits options,
745 mach_vm_address_t * inaddr)
746 {
747 IOReturn err;
748 int64_t offset = inoffset;
749 uint32_t rangeIdx, entryIdx;
750 vm_map_offset_t addr, mapAddr;
751 vm_map_offset_t pageOffset, entryOffset, remain, chunk;
752
753 mach_vm_address_t nextAddr;
754 mach_vm_size_t nextLen;
755 IOByteCount physLen;
756 IOMemoryEntry * entry;
757 vm_prot_t prot, memEntryCacheMode;
758 IOOptionBits type;
759 IOOptionBits cacheMode;
760 vm_tag_t tag;
761 // for the kIOMapPrefault option.
762 upl_page_info_t * pageList = NULL;
763 UInt currentPageIndex = 0;
764 bool didAlloc;
765
766 if (ref->mapRef) {
767 err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
768 return err;
769 }
770
771 type = _flags & kIOMemoryTypeMask;
772
773 prot = VM_PROT_READ;
774 if (!(kIOMapReadOnly & options)) {
775 prot |= VM_PROT_WRITE;
776 }
777 prot &= ref->prot;
778
779 cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
780 if (kIODefaultCache != cacheMode) {
781 // VM system requires write access to update named entry cache mode
782 memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
783 }
784
785 tag = getVMTag(map);
786
787 if (_task) {
788 // Find first range for offset
789 if (!_rangesCount) {
790 return kIOReturnBadArgument;
791 }
792 for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++) {
793 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
794 if (remain < nextLen) {
795 break;
796 }
797 remain -= nextLen;
798 }
799 } else {
800 rangeIdx = 0;
801 remain = 0;
802 nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
803 nextLen = size;
804 }
805
806 assert(remain < nextLen);
807 if (remain >= nextLen) {
808 return kIOReturnBadArgument;
809 }
810
811 nextAddr += remain;
812 nextLen -= remain;
813 pageOffset = (page_mask & nextAddr);
814 addr = 0;
815 didAlloc = false;
816
817 if (!(options & kIOMapAnywhere)) {
818 addr = *inaddr;
819 if (pageOffset != (page_mask & addr)) {
820 return kIOReturnNotAligned;
821 }
822 addr -= pageOffset;
823 }
824
825 // find first entry for offset
826 for (entryIdx = 0;
827 (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset);
828 entryIdx++) {
829 }
830 entryIdx--;
831 entry = &ref->entries[entryIdx];
832
833 // allocate VM
834 size = round_page_64(size + pageOffset);
835 if (kIOMapOverwrite & options) {
836 if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
837 map = IOPageableMapForAddress(addr);
838 }
839 err = KERN_SUCCESS;
840 } else {
841 IOMemoryDescriptorMapAllocRef ref;
842 ref.map = map;
843 ref.tag = tag;
844 ref.options = options;
845 ref.size = size;
846 ref.prot = prot;
847 if (options & kIOMapAnywhere) {
848 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
849 ref.mapped = 0;
850 } else {
851 ref.mapped = addr;
852 }
853 if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
854 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
855 } else {
856 err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
857 }
858 if (KERN_SUCCESS == err) {
859 addr = ref.mapped;
860 map = ref.map;
861 didAlloc = true;
862 }
863 }
864
865 /*
866 * If the memory is associated with a device pager but doesn't have a UPL,
867 * it will be immediately faulted in through the pager via populateDevicePager().
868 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
869 * operations.
870 */
871 if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
872 options &= ~kIOMapPrefault;
873 }
874
875 /*
876 * Prefaulting is only possible if we wired the memory earlier. Check the
877 * memory type, and the underlying data.
878 */
879 if (options & kIOMapPrefault) {
880 /*
881 * The memory must have been wired by calling ::prepare(), otherwise
882 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
883 */
884 assert(_wireCount != 0);
885 assert(_memoryEntries != NULL);
886 if ((_wireCount == 0) ||
887 (_memoryEntries == NULL)) {
888 return kIOReturnBadArgument;
889 }
890
891 // Get the page list.
892 ioGMDData* dataP = getDataP(_memoryEntries);
893 ioPLBlock const* ioplList = getIOPLList(dataP);
894 pageList = getPageList(dataP);
895
896 // Get the number of IOPLs.
897 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
898
899 /*
900 * Scan through the IOPL Info Blocks, looking for the first block containing
901 * the offset. The research will go past it, so we'll need to go back to the
902 * right range at the end.
903 */
904 UInt ioplIndex = 0;
905 while (ioplIndex < numIOPLs && offset >= ioplList[ioplIndex].fIOMDOffset) {
906 ioplIndex++;
907 }
908 ioplIndex--;
909
910 // Retrieve the IOPL info block.
911 ioPLBlock ioplInfo = ioplList[ioplIndex];
912
913 /*
914 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
915 * array.
916 */
917 if (ioplInfo.fFlags & kIOPLExternUPL) {
918 pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
919 } else {
920 pageList = &pageList[ioplInfo.fPageInfo];
921 }
922
923 // Rebase [offset] into the IOPL in order to looks for the first page index.
924 mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
925
926 // Retrieve the index of the first page corresponding to the offset.
927 currentPageIndex = atop_32(offsetInIOPL);
928 }
929
930 // enter mappings
931 remain = size;
932 mapAddr = addr;
933 addr += pageOffset;
934
935 while (remain && (KERN_SUCCESS == err)) {
936 entryOffset = offset - entry->offset;
937 if ((page_mask & entryOffset) != pageOffset) {
938 err = kIOReturnNotAligned;
939 break;
940 }
941
942 if (kIODefaultCache != cacheMode) {
943 vm_size_t unused = 0;
944 err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
945 memEntryCacheMode, NULL, entry->entry);
946 assert(KERN_SUCCESS == err);
947 }
948
949 entryOffset -= pageOffset;
950 if (entryOffset >= entry->size) {
951 panic("entryOffset");
952 }
953 chunk = entry->size - entryOffset;
954 if (chunk) {
955 vm_map_kernel_flags_t vmk_flags;
956
957 vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
958 vmk_flags.vmkf_iokit_acct = TRUE; /* iokit accounting */
959
960 if (chunk > remain) {
961 chunk = remain;
962 }
963 if (options & kIOMapPrefault) {
964 UInt nb_pages = round_page(chunk) / PAGE_SIZE;
965
966 err = vm_map_enter_mem_object_prefault(map,
967 &mapAddr,
968 chunk, 0 /* mask */,
969 (VM_FLAGS_FIXED
970 | VM_FLAGS_OVERWRITE),
971 vmk_flags,
972 tag,
973 entry->entry,
974 entryOffset,
975 prot, // cur
976 prot, // max
977 &pageList[currentPageIndex],
978 nb_pages);
979
980 // Compute the next index in the page list.
981 currentPageIndex += nb_pages;
982 assert(currentPageIndex <= _pages);
983 } else {
984 err = vm_map_enter_mem_object(map,
985 &mapAddr,
986 chunk, 0 /* mask */,
987 (VM_FLAGS_FIXED
988 | VM_FLAGS_OVERWRITE),
989 vmk_flags,
990 tag,
991 entry->entry,
992 entryOffset,
993 false, // copy
994 prot, // cur
995 prot, // max
996 VM_INHERIT_NONE);
997 }
998 if (KERN_SUCCESS != err) {
999 break;
1000 }
1001 remain -= chunk;
1002 if (!remain) {
1003 break;
1004 }
1005 mapAddr += chunk;
1006 offset += chunk - pageOffset;
1007 }
1008 pageOffset = 0;
1009 entry++;
1010 entryIdx++;
1011 if (entryIdx >= ref->count) {
1012 err = kIOReturnOverrun;
1013 break;
1014 }
1015 }
1016
1017 if ((KERN_SUCCESS != err) && didAlloc) {
1018 (void) mach_vm_deallocate(map, trunc_page_64(addr), size);
1019 addr = 0;
1020 }
1021 *inaddr = addr;
1022
1023 return err;
1024 }
1025
1026 IOReturn
1027 IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
1028 IOMemoryReference * ref,
1029 IOByteCount * residentPageCount,
1030 IOByteCount * dirtyPageCount)
1031 {
1032 IOReturn err;
1033 IOMemoryEntry * entries;
1034 unsigned int resident, dirty;
1035 unsigned int totalResident, totalDirty;
1036
1037 totalResident = totalDirty = 0;
1038 err = kIOReturnSuccess;
1039 entries = ref->entries + ref->count;
1040 while (entries > &ref->entries[0]) {
1041 entries--;
1042 err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty);
1043 if (KERN_SUCCESS != err) {
1044 break;
1045 }
1046 totalResident += resident;
1047 totalDirty += dirty;
1048 }
1049
1050 if (residentPageCount) {
1051 *residentPageCount = totalResident;
1052 }
1053 if (dirtyPageCount) {
1054 *dirtyPageCount = totalDirty;
1055 }
1056 return err;
1057 }
1058
1059 IOReturn
1060 IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
1061 IOMemoryReference * ref,
1062 IOOptionBits newState,
1063 IOOptionBits * oldState)
1064 {
1065 IOReturn err;
1066 IOMemoryEntry * entries;
1067 vm_purgable_t control;
1068 int totalState, state;
1069
1070 totalState = kIOMemoryPurgeableNonVolatile;
1071 err = kIOReturnSuccess;
1072 entries = ref->entries + ref->count;
1073 while (entries > &ref->entries[0]) {
1074 entries--;
1075
1076 err = purgeableControlBits(newState, &control, &state);
1077 if (KERN_SUCCESS != err) {
1078 break;
1079 }
1080 err = memory_entry_purgeable_control_internal(entries->entry, control, &state);
1081 if (KERN_SUCCESS != err) {
1082 break;
1083 }
1084 err = purgeableStateBits(&state);
1085 if (KERN_SUCCESS != err) {
1086 break;
1087 }
1088
1089 if (kIOMemoryPurgeableEmpty == state) {
1090 totalState = kIOMemoryPurgeableEmpty;
1091 } else if (kIOMemoryPurgeableEmpty == totalState) {
1092 continue;
1093 } else if (kIOMemoryPurgeableVolatile == totalState) {
1094 continue;
1095 } else if (kIOMemoryPurgeableVolatile == state) {
1096 totalState = kIOMemoryPurgeableVolatile;
1097 } else {
1098 totalState = kIOMemoryPurgeableNonVolatile;
1099 }
1100 }
1101
1102 if (oldState) {
1103 *oldState = totalState;
1104 }
1105 return err;
1106 }
1107
1108 IOReturn
1109 IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(
1110 IOMemoryReference * ref,
1111 task_t newOwner,
1112 int newLedgerTag,
1113 IOOptionBits newLedgerOptions)
1114 {
1115 IOReturn err, totalErr;
1116 IOMemoryEntry * entries;
1117
1118 totalErr = kIOReturnSuccess;
1119 entries = ref->entries + ref->count;
1120 while (entries > &ref->entries[0]) {
1121 entries--;
1122
1123 err = mach_memory_entry_ownership(entries->entry, newOwner, newLedgerTag, newLedgerOptions);
1124 if (KERN_SUCCESS != err) {
1125 totalErr = err;
1126 }
1127 }
1128
1129 return totalErr;
1130 }
1131
1132 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1133
1134 IOMemoryDescriptor *
1135 IOMemoryDescriptor::withAddress(void * address,
1136 IOByteCount length,
1137 IODirection direction)
1138 {
1139 return IOMemoryDescriptor::
1140 withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
1141 }
1142
1143 #ifndef __LP64__
1144 IOMemoryDescriptor *
1145 IOMemoryDescriptor::withAddress(IOVirtualAddress address,
1146 IOByteCount length,
1147 IODirection direction,
1148 task_t task)
1149 {
1150 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1151 if (that) {
1152 if (that->initWithAddress(address, length, direction, task)) {
1153 return that;
1154 }
1155
1156 that->release();
1157 }
1158 return NULL;
1159 }
1160 #endif /* !__LP64__ */
1161
1162 IOMemoryDescriptor *
1163 IOMemoryDescriptor::withPhysicalAddress(
1164 IOPhysicalAddress address,
1165 IOByteCount length,
1166 IODirection direction )
1167 {
1168 return IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL);
1169 }
1170
1171 #ifndef __LP64__
1172 IOMemoryDescriptor *
1173 IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
1174 UInt32 withCount,
1175 IODirection direction,
1176 task_t task,
1177 bool asReference)
1178 {
1179 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1180 if (that) {
1181 if (that->initWithRanges(ranges, withCount, direction, task, asReference)) {
1182 return that;
1183 }
1184
1185 that->release();
1186 }
1187 return NULL;
1188 }
1189 #endif /* !__LP64__ */
1190
1191 IOMemoryDescriptor *
1192 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
1193 mach_vm_size_t length,
1194 IOOptionBits options,
1195 task_t task)
1196 {
1197 IOAddressRange range = { address, length };
1198 return IOMemoryDescriptor::withAddressRanges(&range, 1, options, task);
1199 }
1200
1201 IOMemoryDescriptor *
1202 IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
1203 UInt32 rangeCount,
1204 IOOptionBits options,
1205 task_t task)
1206 {
1207 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1208 if (that) {
1209 if (task) {
1210 options |= kIOMemoryTypeVirtual64;
1211 } else {
1212 options |= kIOMemoryTypePhysical64;
1213 }
1214
1215 if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ NULL)) {
1216 return that;
1217 }
1218
1219 that->release();
1220 }
1221
1222 return NULL;
1223 }
1224
1225
1226 /*
1227 * withOptions:
1228 *
1229 * Create a new IOMemoryDescriptor. The buffer is made up of several
1230 * virtual address ranges, from a given task.
1231 *
1232 * Passing the ranges as a reference will avoid an extra allocation.
1233 */
1234 IOMemoryDescriptor *
1235 IOMemoryDescriptor::withOptions(void * buffers,
1236 UInt32 count,
1237 UInt32 offset,
1238 task_t task,
1239 IOOptionBits opts,
1240 IOMapper * mapper)
1241 {
1242 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
1243
1244 if (self
1245 && !self->initWithOptions(buffers, count, offset, task, opts, mapper)) {
1246 self->release();
1247 return NULL;
1248 }
1249
1250 return self;
1251 }
1252
1253 bool
1254 IOMemoryDescriptor::initWithOptions(void * buffers,
1255 UInt32 count,
1256 UInt32 offset,
1257 task_t task,
1258 IOOptionBits options,
1259 IOMapper * mapper)
1260 {
1261 return false;
1262 }
1263
1264 #ifndef __LP64__
1265 IOMemoryDescriptor *
1266 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
1267 UInt32 withCount,
1268 IODirection direction,
1269 bool asReference)
1270 {
1271 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1272 if (that) {
1273 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference)) {
1274 return that;
1275 }
1276
1277 that->release();
1278 }
1279 return NULL;
1280 }
1281
1282 IOMemoryDescriptor *
1283 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
1284 IOByteCount offset,
1285 IOByteCount length,
1286 IODirection direction)
1287 {
1288 return IOSubMemoryDescriptor::withSubRange(of, offset, length, direction);
1289 }
1290 #endif /* !__LP64__ */
1291
1292 IOMemoryDescriptor *
1293 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
1294 {
1295 IOGeneralMemoryDescriptor *origGenMD =
1296 OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
1297
1298 if (origGenMD) {
1299 return IOGeneralMemoryDescriptor::
1300 withPersistentMemoryDescriptor(origGenMD);
1301 } else {
1302 return NULL;
1303 }
1304 }
1305
1306 IOMemoryDescriptor *
1307 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
1308 {
1309 IOMemoryReference * memRef;
1310
1311 if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) {
1312 return NULL;
1313 }
1314
1315 if (memRef == originalMD->_memRef) {
1316 originalMD->retain(); // Add a new reference to ourselves
1317 originalMD->memoryReferenceRelease(memRef);
1318 return originalMD;
1319 }
1320
1321 IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor;
1322 IOMDPersistentInitData initData = { originalMD, memRef };
1323
1324 if (self
1325 && !self->initWithOptions(&initData, 1, 0, NULL, kIOMemoryTypePersistentMD, NULL)) {
1326 self->release();
1327 self = NULL;
1328 }
1329 return self;
1330 }
1331
1332 #ifndef __LP64__
1333 bool
1334 IOGeneralMemoryDescriptor::initWithAddress(void * address,
1335 IOByteCount withLength,
1336 IODirection withDirection)
1337 {
1338 _singleRange.v.address = (vm_offset_t) address;
1339 _singleRange.v.length = withLength;
1340
1341 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
1342 }
1343
1344 bool
1345 IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
1346 IOByteCount withLength,
1347 IODirection withDirection,
1348 task_t withTask)
1349 {
1350 _singleRange.v.address = address;
1351 _singleRange.v.length = withLength;
1352
1353 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
1354 }
1355
1356 bool
1357 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
1358 IOPhysicalAddress address,
1359 IOByteCount withLength,
1360 IODirection withDirection )
1361 {
1362 _singleRange.p.address = address;
1363 _singleRange.p.length = withLength;
1364
1365 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
1366 }
1367
1368 bool
1369 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
1370 IOPhysicalRange * ranges,
1371 UInt32 count,
1372 IODirection direction,
1373 bool reference)
1374 {
1375 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
1376
1377 if (reference) {
1378 mdOpts |= kIOMemoryAsReference;
1379 }
1380
1381 return initWithOptions(ranges, count, 0, NULL, mdOpts, /* mapper */ NULL);
1382 }
1383
1384 bool
1385 IOGeneralMemoryDescriptor::initWithRanges(
1386 IOVirtualRange * ranges,
1387 UInt32 count,
1388 IODirection direction,
1389 task_t task,
1390 bool reference)
1391 {
1392 IOOptionBits mdOpts = direction;
1393
1394 if (reference) {
1395 mdOpts |= kIOMemoryAsReference;
1396 }
1397
1398 if (task) {
1399 mdOpts |= kIOMemoryTypeVirtual;
1400
1401 // Auto-prepare if this is a kernel memory descriptor as very few
1402 // clients bother to prepare() kernel memory.
1403 // But it was not enforced so what are you going to do?
1404 if (task == kernel_task) {
1405 mdOpts |= kIOMemoryAutoPrepare;
1406 }
1407 } else {
1408 mdOpts |= kIOMemoryTypePhysical;
1409 }
1410
1411 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ NULL);
1412 }
1413 #endif /* !__LP64__ */
1414
1415 /*
1416 * initWithOptions:
1417 *
1418 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
1419 * from a given task, several physical ranges, an UPL from the ubc
1420 * system or a uio (may be 64bit) from the BSD subsystem.
1421 *
1422 * Passing the ranges as a reference will avoid an extra allocation.
1423 *
1424 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
1425 * existing instance -- note this behavior is not commonly supported in other
1426 * I/O Kit classes, although it is supported here.
1427 */
1428
1429 bool
1430 IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
1431 UInt32 count,
1432 UInt32 offset,
1433 task_t task,
1434 IOOptionBits options,
1435 IOMapper * mapper)
1436 {
1437 IOOptionBits type = options & kIOMemoryTypeMask;
1438
1439 #ifndef __LP64__
1440 if (task
1441 && (kIOMemoryTypeVirtual == type)
1442 && vm_map_is_64bit(get_task_map(task))
1443 && ((IOVirtualRange *) buffers)->address) {
1444 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
1445 return false;
1446 }
1447 #endif /* !__LP64__ */
1448
1449 // Grab the original MD's configuation data to initialse the
1450 // arguments to this function.
1451 if (kIOMemoryTypePersistentMD == type) {
1452 IOMDPersistentInitData *initData = (typeof(initData))buffers;
1453 const IOGeneralMemoryDescriptor *orig = initData->fMD;
1454 ioGMDData *dataP = getDataP(orig->_memoryEntries);
1455
1456 // Only accept persistent memory descriptors with valid dataP data.
1457 assert(orig->_rangesCount == 1);
1458 if (!(orig->_flags & kIOMemoryPersistent) || !dataP) {
1459 return false;
1460 }
1461
1462 _memRef = initData->fMemRef; // Grab the new named entry
1463 options = orig->_flags & ~kIOMemoryAsReference;
1464 type = options & kIOMemoryTypeMask;
1465 buffers = orig->_ranges.v;
1466 count = orig->_rangesCount;
1467
1468 // Now grab the original task and whatever mapper was previously used
1469 task = orig->_task;
1470 mapper = dataP->fMapper;
1471
1472 // We are ready to go through the original initialisation now
1473 }
1474
1475 switch (type) {
1476 case kIOMemoryTypeUIO:
1477 case kIOMemoryTypeVirtual:
1478 #ifndef __LP64__
1479 case kIOMemoryTypeVirtual64:
1480 #endif /* !__LP64__ */
1481 assert(task);
1482 if (!task) {
1483 return false;
1484 }
1485 break;
1486
1487 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
1488 #ifndef __LP64__
1489 case kIOMemoryTypePhysical64:
1490 #endif /* !__LP64__ */
1491 case kIOMemoryTypeUPL:
1492 assert(!task);
1493 break;
1494 default:
1495 return false; /* bad argument */
1496 }
1497
1498 assert(buffers);
1499 assert(count);
1500
1501 /*
1502 * We can check the _initialized instance variable before having ever set
1503 * it to an initial value because I/O Kit guarantees that all our instance
1504 * variables are zeroed on an object's allocation.
1505 */
1506
1507 if (_initialized) {
1508 /*
1509 * An existing memory descriptor is being retargeted to point to
1510 * somewhere else. Clean up our present state.
1511 */
1512 IOOptionBits type = _flags & kIOMemoryTypeMask;
1513 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type)) {
1514 while (_wireCount) {
1515 complete();
1516 }
1517 }
1518 if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
1519 if (kIOMemoryTypeUIO == type) {
1520 uio_free((uio_t) _ranges.v);
1521 }
1522 #ifndef __LP64__
1523 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
1524 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
1525 }
1526 #endif /* !__LP64__ */
1527 else {
1528 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
1529 }
1530 }
1531
1532 options |= (kIOMemoryRedirected & _flags);
1533 if (!(kIOMemoryRedirected & options)) {
1534 if (_memRef) {
1535 memoryReferenceRelease(_memRef);
1536 _memRef = NULL;
1537 }
1538 if (_mappings) {
1539 _mappings->flushCollection();
1540 }
1541 }
1542 } else {
1543 if (!super::init()) {
1544 return false;
1545 }
1546 _initialized = true;
1547 }
1548
1549 // Grab the appropriate mapper
1550 if (kIOMemoryHostOrRemote & options) {
1551 options |= kIOMemoryMapperNone;
1552 }
1553 if (kIOMemoryMapperNone & options) {
1554 mapper = NULL; // No Mapper
1555 } else if (mapper == kIOMapperSystem) {
1556 IOMapper::checkForSystemMapper();
1557 gIOSystemMapper = mapper = IOMapper::gSystem;
1558 }
1559
1560 // Remove the dynamic internal use flags from the initial setting
1561 options &= ~(kIOMemoryPreparedReadOnly);
1562 _flags = options;
1563 _task = task;
1564
1565 #ifndef __LP64__
1566 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
1567 #endif /* !__LP64__ */
1568
1569 _dmaReferences = 0;
1570 __iomd_reservedA = 0;
1571 __iomd_reservedB = 0;
1572 _highestPage = 0;
1573
1574 if (kIOMemoryThreadSafe & options) {
1575 if (!_prepareLock) {
1576 _prepareLock = IOLockAlloc();
1577 }
1578 } else if (_prepareLock) {
1579 IOLockFree(_prepareLock);
1580 _prepareLock = NULL;
1581 }
1582
1583 if (kIOMemoryTypeUPL == type) {
1584 ioGMDData *dataP;
1585 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
1586
1587 if (!initMemoryEntries(dataSize, mapper)) {
1588 return false;
1589 }
1590 dataP = getDataP(_memoryEntries);
1591 dataP->fPageCnt = 0;
1592 switch (kIOMemoryDirectionMask & options) {
1593 case kIODirectionOut:
1594 dataP->fDMAAccess = kIODMAMapReadAccess;
1595 break;
1596 case kIODirectionIn:
1597 dataP->fDMAAccess = kIODMAMapWriteAccess;
1598 break;
1599 case kIODirectionNone:
1600 case kIODirectionOutIn:
1601 default:
1602 panic("bad dir for upl 0x%x\n", (int) options);
1603 break;
1604 }
1605 // _wireCount++; // UPLs start out life wired
1606
1607 _length = count;
1608 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
1609
1610 ioPLBlock iopl;
1611 iopl.fIOPL = (upl_t) buffers;
1612 upl_set_referenced(iopl.fIOPL, true);
1613 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
1614
1615 if (upl_get_size(iopl.fIOPL) < (count + offset)) {
1616 panic("short external upl");
1617 }
1618
1619 _highestPage = upl_get_highest_page(iopl.fIOPL);
1620
1621 // Set the flag kIOPLOnDevice convieniently equal to 1
1622 iopl.fFlags = pageList->device | kIOPLExternUPL;
1623 if (!pageList->device) {
1624 // Pre-compute the offset into the UPL's page list
1625 pageList = &pageList[atop_32(offset)];
1626 offset &= PAGE_MASK;
1627 }
1628 iopl.fIOMDOffset = 0;
1629 iopl.fMappedPage = 0;
1630 iopl.fPageInfo = (vm_address_t) pageList;
1631 iopl.fPageOffset = offset;
1632 _memoryEntries->appendBytes(&iopl, sizeof(iopl));
1633 } else {
1634 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
1635 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
1636
1637 // Initialize the memory descriptor
1638 if (options & kIOMemoryAsReference) {
1639 #ifndef __LP64__
1640 _rangesIsAllocated = false;
1641 #endif /* !__LP64__ */
1642
1643 // Hack assignment to get the buffer arg into _ranges.
1644 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
1645 // work, C++ sigh.
1646 // This also initialises the uio & physical ranges.
1647 _ranges.v = (IOVirtualRange *) buffers;
1648 } else {
1649 #ifndef __LP64__
1650 _rangesIsAllocated = true;
1651 #endif /* !__LP64__ */
1652 switch (type) {
1653 case kIOMemoryTypeUIO:
1654 _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
1655 break;
1656
1657 #ifndef __LP64__
1658 case kIOMemoryTypeVirtual64:
1659 case kIOMemoryTypePhysical64:
1660 if (count == 1
1661 #ifndef __arm__
1662 && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
1663 #endif
1664 ) {
1665 if (kIOMemoryTypeVirtual64 == type) {
1666 type = kIOMemoryTypeVirtual;
1667 } else {
1668 type = kIOMemoryTypePhysical;
1669 }
1670 _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
1671 _rangesIsAllocated = false;
1672 _ranges.v = &_singleRange.v;
1673 _singleRange.v.address = ((IOAddressRange *) buffers)->address;
1674 _singleRange.v.length = ((IOAddressRange *) buffers)->length;
1675 break;
1676 }
1677 _ranges.v64 = IONew(IOAddressRange, count);
1678 if (!_ranges.v64) {
1679 return false;
1680 }
1681 bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
1682 break;
1683 #endif /* !__LP64__ */
1684 case kIOMemoryTypeVirtual:
1685 case kIOMemoryTypePhysical:
1686 if (count == 1) {
1687 _flags |= kIOMemoryAsReference;
1688 #ifndef __LP64__
1689 _rangesIsAllocated = false;
1690 #endif /* !__LP64__ */
1691 _ranges.v = &_singleRange.v;
1692 } else {
1693 _ranges.v = IONew(IOVirtualRange, count);
1694 if (!_ranges.v) {
1695 return false;
1696 }
1697 }
1698 bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
1699 break;
1700 }
1701 }
1702 _rangesCount = count;
1703
1704 // Find starting address within the vector of ranges
1705 Ranges vec = _ranges;
1706 mach_vm_size_t totalLength = 0;
1707 unsigned int ind, pages = 0;
1708 for (ind = 0; ind < count; ind++) {
1709 mach_vm_address_t addr;
1710 mach_vm_address_t endAddr;
1711 mach_vm_size_t len;
1712
1713 // addr & len are returned by this function
1714 getAddrLenForInd(addr, len, type, vec, ind);
1715 if (os_add3_overflow(addr, len, PAGE_MASK, &endAddr)) {
1716 break;
1717 }
1718 if (os_add_overflow(pages, (atop_64(endAddr) - atop_64(addr)), &pages)) {
1719 break;
1720 }
1721 if (os_add_overflow(totalLength, len, &totalLength)) {
1722 break;
1723 }
1724 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
1725 ppnum_t highPage = atop_64(addr + len - 1);
1726 if (highPage > _highestPage) {
1727 _highestPage = highPage;
1728 }
1729 }
1730 }
1731 if ((ind < count)
1732 || (totalLength != ((IOByteCount) totalLength))) {
1733 return false; /* overflow */
1734 }
1735 _length = totalLength;
1736 _pages = pages;
1737
1738 // Auto-prepare memory at creation time.
1739 // Implied completion when descriptor is free-ed
1740
1741
1742 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
1743 _wireCount++; // Physical MDs are, by definition, wired
1744 } else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
1745 ioGMDData *dataP;
1746 unsigned dataSize;
1747
1748 if (_pages > atop_64(max_mem)) {
1749 return false;
1750 }
1751
1752 dataSize = computeDataSize(_pages, /* upls */ count * 2);
1753 if (!initMemoryEntries(dataSize, mapper)) {
1754 return false;
1755 }
1756 dataP = getDataP(_memoryEntries);
1757 dataP->fPageCnt = _pages;
1758
1759 if (((_task != kernel_task) || (kIOMemoryBufferPageable & _flags))
1760 && (VM_KERN_MEMORY_NONE == _kernelTag)) {
1761 _kernelTag = IOMemoryTag(kernel_map);
1762 if (_kernelTag == gIOSurfaceTag) {
1763 _userTag = VM_MEMORY_IOSURFACE;
1764 }
1765 }
1766
1767 if ((kIOMemoryPersistent & _flags) && !_memRef) {
1768 IOReturn
1769 err = memoryReferenceCreate(0, &_memRef);
1770 if (kIOReturnSuccess != err) {
1771 return false;
1772 }
1773 }
1774
1775 if ((_flags & kIOMemoryAutoPrepare)
1776 && prepare() != kIOReturnSuccess) {
1777 return false;
1778 }
1779 }
1780 }
1781
1782 return true;
1783 }
1784
1785 /*
1786 * free
1787 *
1788 * Free resources.
1789 */
1790 void
1791 IOGeneralMemoryDescriptor::free()
1792 {
1793 IOOptionBits type = _flags & kIOMemoryTypeMask;
1794
1795 if (reserved) {
1796 LOCK;
1797 reserved->dp.memory = NULL;
1798 UNLOCK;
1799 }
1800 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
1801 ioGMDData * dataP;
1802 if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBaseValid) {
1803 dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
1804 dataP->fMappedBaseValid = dataP->fMappedBase = 0;
1805 }
1806 } else {
1807 while (_wireCount) {
1808 complete();
1809 }
1810 }
1811
1812 if (_memoryEntries) {
1813 _memoryEntries->release();
1814 }
1815
1816 if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
1817 if (kIOMemoryTypeUIO == type) {
1818 uio_free((uio_t) _ranges.v);
1819 }
1820 #ifndef __LP64__
1821 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
1822 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
1823 }
1824 #endif /* !__LP64__ */
1825 else {
1826 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
1827 }
1828
1829 _ranges.v = NULL;
1830 }
1831
1832 if (reserved) {
1833 cleanKernelReserved(reserved);
1834 if (reserved->dp.devicePager) {
1835 // memEntry holds a ref on the device pager which owns reserved
1836 // (IOMemoryDescriptorReserved) so no reserved access after this point
1837 device_pager_deallocate((memory_object_t) reserved->dp.devicePager );
1838 } else {
1839 IODelete(reserved, IOMemoryDescriptorReserved, 1);
1840 }
1841 reserved = NULL;
1842 }
1843
1844 if (_memRef) {
1845 memoryReferenceRelease(_memRef);
1846 }
1847 if (_prepareLock) {
1848 IOLockFree(_prepareLock);
1849 }
1850
1851 super::free();
1852 }
1853
1854 #ifndef __LP64__
1855 void
1856 IOGeneralMemoryDescriptor::unmapFromKernel()
1857 {
1858 panic("IOGMD::unmapFromKernel deprecated");
1859 }
1860
1861 void
1862 IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
1863 {
1864 panic("IOGMD::mapIntoKernel deprecated");
1865 }
1866 #endif /* !__LP64__ */
1867
1868 /*
1869 * getDirection:
1870 *
1871 * Get the direction of the transfer.
1872 */
1873 IODirection
1874 IOMemoryDescriptor::getDirection() const
1875 {
1876 #ifndef __LP64__
1877 if (_direction) {
1878 return _direction;
1879 }
1880 #endif /* !__LP64__ */
1881 return (IODirection) (_flags & kIOMemoryDirectionMask);
1882 }
1883
1884 /*
1885 * getLength:
1886 *
1887 * Get the length of the transfer (over all ranges).
1888 */
1889 IOByteCount
1890 IOMemoryDescriptor::getLength() const
1891 {
1892 return _length;
1893 }
1894
1895 void
1896 IOMemoryDescriptor::setTag( IOOptionBits tag )
1897 {
1898 _tag = tag;
1899 }
1900
1901 IOOptionBits
1902 IOMemoryDescriptor::getTag( void )
1903 {
1904 return _tag;
1905 }
1906
1907 uint64_t
1908 IOMemoryDescriptor::getFlags(void)
1909 {
1910 return _flags;
1911 }
1912
1913 #ifndef __LP64__
1914 #pragma clang diagnostic push
1915 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
1916
1917 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
1918 IOPhysicalAddress
1919 IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
1920 {
1921 addr64_t physAddr = 0;
1922
1923 if (prepare() == kIOReturnSuccess) {
1924 physAddr = getPhysicalSegment64( offset, length );
1925 complete();
1926 }
1927
1928 return (IOPhysicalAddress) physAddr; // truncated but only page offset is used
1929 }
1930
1931 #pragma clang diagnostic pop
1932
1933 #endif /* !__LP64__ */
1934
1935 IOByteCount
1936 IOMemoryDescriptor::readBytes
1937 (IOByteCount offset, void *bytes, IOByteCount length)
1938 {
1939 addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
1940 IOByteCount remaining;
1941
1942 // Assert that this entire I/O is withing the available range
1943 assert(offset <= _length);
1944 assert(offset + length <= _length);
1945 if ((offset >= _length)
1946 || ((offset + length) > _length)) {
1947 return 0;
1948 }
1949
1950 assert(!(kIOMemoryRemote & _flags));
1951 if (kIOMemoryRemote & _flags) {
1952 return 0;
1953 }
1954
1955 if (kIOMemoryThreadSafe & _flags) {
1956 LOCK;
1957 }
1958
1959 remaining = length = min(length, _length - offset);
1960 while (remaining) { // (process another target segment?)
1961 addr64_t srcAddr64;
1962 IOByteCount srcLen;
1963
1964 srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
1965 if (!srcAddr64) {
1966 break;
1967 }
1968
1969 // Clip segment length to remaining
1970 if (srcLen > remaining) {
1971 srcLen = remaining;
1972 }
1973
1974 copypv(srcAddr64, dstAddr, srcLen,
1975 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
1976
1977 dstAddr += srcLen;
1978 offset += srcLen;
1979 remaining -= srcLen;
1980 }
1981
1982 if (kIOMemoryThreadSafe & _flags) {
1983 UNLOCK;
1984 }
1985
1986 assert(!remaining);
1987
1988 return length - remaining;
1989 }
1990
1991 IOByteCount
1992 IOMemoryDescriptor::writeBytes
1993 (IOByteCount inoffset, const void *bytes, IOByteCount length)
1994 {
1995 addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
1996 IOByteCount remaining;
1997 IOByteCount offset = inoffset;
1998
1999 // Assert that this entire I/O is withing the available range
2000 assert(offset <= _length);
2001 assert(offset + length <= _length);
2002
2003 assert( !(kIOMemoryPreparedReadOnly & _flags));
2004
2005 if ((kIOMemoryPreparedReadOnly & _flags)
2006 || (offset >= _length)
2007 || ((offset + length) > _length)) {
2008 return 0;
2009 }
2010
2011 assert(!(kIOMemoryRemote & _flags));
2012 if (kIOMemoryRemote & _flags) {
2013 return 0;
2014 }
2015
2016 if (kIOMemoryThreadSafe & _flags) {
2017 LOCK;
2018 }
2019
2020 remaining = length = min(length, _length - offset);
2021 while (remaining) { // (process another target segment?)
2022 addr64_t dstAddr64;
2023 IOByteCount dstLen;
2024
2025 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
2026 if (!dstAddr64) {
2027 break;
2028 }
2029
2030 // Clip segment length to remaining
2031 if (dstLen > remaining) {
2032 dstLen = remaining;
2033 }
2034
2035 if (!srcAddr) {
2036 bzero_phys(dstAddr64, dstLen);
2037 } else {
2038 copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
2039 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
2040 srcAddr += dstLen;
2041 }
2042 offset += dstLen;
2043 remaining -= dstLen;
2044 }
2045
2046 if (kIOMemoryThreadSafe & _flags) {
2047 UNLOCK;
2048 }
2049
2050 assert(!remaining);
2051
2052 #if defined(__x86_64__)
2053 // copypv does not cppvFsnk on intel
2054 #else
2055 if (!srcAddr) {
2056 performOperation(kIOMemoryIncoherentIOFlush, inoffset, length);
2057 }
2058 #endif
2059
2060 return length - remaining;
2061 }
2062
2063 #ifndef __LP64__
2064 void
2065 IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
2066 {
2067 panic("IOGMD::setPosition deprecated");
2068 }
2069 #endif /* !__LP64__ */
2070
2071 static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
2072
2073 uint64_t
2074 IOGeneralMemoryDescriptor::getPreparationID( void )
2075 {
2076 ioGMDData *dataP;
2077
2078 if (!_wireCount) {
2079 return kIOPreparationIDUnprepared;
2080 }
2081
2082 if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
2083 || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64)) {
2084 IOMemoryDescriptor::setPreparationID();
2085 return IOMemoryDescriptor::getPreparationID();
2086 }
2087
2088 if (!_memoryEntries || !(dataP = getDataP(_memoryEntries))) {
2089 return kIOPreparationIDUnprepared;
2090 }
2091
2092 if (kIOPreparationIDUnprepared == dataP->fPreparationID) {
2093 SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2094 OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &dataP->fPreparationID);
2095 }
2096 return dataP->fPreparationID;
2097 }
2098
2099 void
2100 IOMemoryDescriptor::cleanKernelReserved( IOMemoryDescriptorReserved * reserved )
2101 {
2102 if (reserved->creator) {
2103 task_deallocate(reserved->creator);
2104 reserved->creator = NULL;
2105 }
2106 }
2107
2108 IOMemoryDescriptorReserved *
2109 IOMemoryDescriptor::getKernelReserved( void )
2110 {
2111 if (!reserved) {
2112 reserved = IONewZero(IOMemoryDescriptorReserved, 1);
2113 }
2114 return reserved;
2115 }
2116
2117 void
2118 IOMemoryDescriptor::setPreparationID( void )
2119 {
2120 if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID)) {
2121 SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2122 OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &reserved->preparationID);
2123 }
2124 }
2125
2126 uint64_t
2127 IOMemoryDescriptor::getPreparationID( void )
2128 {
2129 if (reserved) {
2130 return reserved->preparationID;
2131 } else {
2132 return kIOPreparationIDUnsupported;
2133 }
2134 }
2135
2136 void
2137 IOMemoryDescriptor::setVMTags(uint32_t kernelTag, uint32_t userTag)
2138 {
2139 _kernelTag = (vm_tag_t) kernelTag;
2140 _userTag = (vm_tag_t) userTag;
2141 }
2142
2143 uint32_t
2144 IOMemoryDescriptor::getVMTag(vm_map_t map)
2145 {
2146 if (vm_kernel_map_is_kernel(map)) {
2147 if (VM_KERN_MEMORY_NONE != _kernelTag) {
2148 return (uint32_t) _kernelTag;
2149 }
2150 } else {
2151 if (VM_KERN_MEMORY_NONE != _userTag) {
2152 return (uint32_t) _userTag;
2153 }
2154 }
2155 return IOMemoryTag(map);
2156 }
2157
2158 IOReturn
2159 IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
2160 {
2161 IOReturn err = kIOReturnSuccess;
2162 DMACommandOps params;
2163 IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
2164 ioGMDData *dataP;
2165
2166 params = (op & ~kIOMDDMACommandOperationMask & op);
2167 op &= kIOMDDMACommandOperationMask;
2168
2169 if (kIOMDDMAMap == op) {
2170 if (dataSize < sizeof(IOMDDMAMapArgs)) {
2171 return kIOReturnUnderrun;
2172 }
2173
2174 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2175
2176 if (!_memoryEntries
2177 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
2178 return kIOReturnNoMemory;
2179 }
2180
2181 if (_memoryEntries && data->fMapper) {
2182 bool remap, keepMap;
2183 dataP = getDataP(_memoryEntries);
2184
2185 if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) {
2186 dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
2187 }
2188 if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) {
2189 dataP->fDMAMapAlignment = data->fMapSpec.alignment;
2190 }
2191
2192 keepMap = (data->fMapper == gIOSystemMapper);
2193 keepMap &= ((data->fOffset == 0) && (data->fLength == _length));
2194
2195 if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
2196 IOLockLock(_prepareLock);
2197 }
2198
2199 remap = (!keepMap);
2200 remap |= (dataP->fDMAMapNumAddressBits < 64)
2201 && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
2202 remap |= (dataP->fDMAMapAlignment > page_size);
2203
2204 if (remap || !dataP->fMappedBaseValid) {
2205 // if (dataP->fMappedBaseValid) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params);
2206 err = md->dmaMap(data->fMapper, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
2207 if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBaseValid) {
2208 dataP->fMappedBase = data->fAlloc;
2209 dataP->fMappedBaseValid = true;
2210 dataP->fMappedLength = data->fAllocLength;
2211 data->fAllocLength = 0; // IOMD owns the alloc now
2212 }
2213 } else {
2214 data->fAlloc = dataP->fMappedBase;
2215 data->fAllocLength = 0; // give out IOMD map
2216 md->dmaMapRecord(data->fMapper, data->fCommand, dataP->fMappedLength);
2217 }
2218 data->fMapContig = !dataP->fDiscontig;
2219
2220 if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
2221 IOLockUnlock(_prepareLock);
2222 }
2223 }
2224 return err;
2225 }
2226 if (kIOMDDMAUnmap == op) {
2227 if (dataSize < sizeof(IOMDDMAMapArgs)) {
2228 return kIOReturnUnderrun;
2229 }
2230 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2231
2232 err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
2233
2234 return kIOReturnSuccess;
2235 }
2236
2237 if (kIOMDAddDMAMapSpec == op) {
2238 if (dataSize < sizeof(IODMAMapSpecification)) {
2239 return kIOReturnUnderrun;
2240 }
2241
2242 IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
2243
2244 if (!_memoryEntries
2245 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
2246 return kIOReturnNoMemory;
2247 }
2248
2249 if (_memoryEntries) {
2250 dataP = getDataP(_memoryEntries);
2251 if (data->numAddressBits < dataP->fDMAMapNumAddressBits) {
2252 dataP->fDMAMapNumAddressBits = data->numAddressBits;
2253 }
2254 if (data->alignment > dataP->fDMAMapAlignment) {
2255 dataP->fDMAMapAlignment = data->alignment;
2256 }
2257 }
2258 return kIOReturnSuccess;
2259 }
2260
2261 if (kIOMDGetCharacteristics == op) {
2262 if (dataSize < sizeof(IOMDDMACharacteristics)) {
2263 return kIOReturnUnderrun;
2264 }
2265
2266 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
2267 data->fLength = _length;
2268 data->fSGCount = _rangesCount;
2269 data->fPages = _pages;
2270 data->fDirection = getDirection();
2271 if (!_wireCount) {
2272 data->fIsPrepared = false;
2273 } else {
2274 data->fIsPrepared = true;
2275 data->fHighestPage = _highestPage;
2276 if (_memoryEntries) {
2277 dataP = getDataP(_memoryEntries);
2278 ioPLBlock *ioplList = getIOPLList(dataP);
2279 UInt count = getNumIOPL(_memoryEntries, dataP);
2280 if (count == 1) {
2281 data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
2282 }
2283 }
2284 }
2285
2286 return kIOReturnSuccess;
2287 } else if (kIOMDDMAActive == op) {
2288 if (params) {
2289 int16_t prior;
2290 prior = OSAddAtomic16(1, &md->_dmaReferences);
2291 if (!prior) {
2292 md->_mapName = NULL;
2293 }
2294 } else {
2295 if (md->_dmaReferences) {
2296 OSAddAtomic16(-1, &md->_dmaReferences);
2297 } else {
2298 panic("_dmaReferences underflow");
2299 }
2300 }
2301 } else if (kIOMDWalkSegments != op) {
2302 return kIOReturnBadArgument;
2303 }
2304
2305 // Get the next segment
2306 struct InternalState {
2307 IOMDDMAWalkSegmentArgs fIO;
2308 mach_vm_size_t fOffset2Index;
2309 mach_vm_size_t fNextOffset;
2310 UInt fIndex;
2311 } *isP;
2312
2313 // Find the next segment
2314 if (dataSize < sizeof(*isP)) {
2315 return kIOReturnUnderrun;
2316 }
2317
2318 isP = (InternalState *) vData;
2319 mach_vm_size_t offset = isP->fIO.fOffset;
2320 uint8_t mapped = isP->fIO.fMapped;
2321 uint64_t mappedBase;
2322
2323 if (mapped && (kIOMemoryRemote & _flags)) {
2324 return kIOReturnNotAttached;
2325 }
2326
2327 if (IOMapper::gSystem && mapped
2328 && (!(kIOMemoryHostOnly & _flags))
2329 && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBaseValid)) {
2330 // && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBaseValid))
2331 if (!_memoryEntries
2332 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
2333 return kIOReturnNoMemory;
2334 }
2335
2336 dataP = getDataP(_memoryEntries);
2337 if (dataP->fMapper) {
2338 IODMAMapSpecification mapSpec;
2339 bzero(&mapSpec, sizeof(mapSpec));
2340 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
2341 mapSpec.alignment = dataP->fDMAMapAlignment;
2342 err = md->dmaMap(dataP->fMapper, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength);
2343 if (kIOReturnSuccess != err) {
2344 return err;
2345 }
2346 dataP->fMappedBaseValid = true;
2347 }
2348 }
2349
2350 if (kIOMDDMAWalkMappedLocal == mapped) {
2351 mappedBase = isP->fIO.fMappedBase;
2352 } else if (mapped) {
2353 if (IOMapper::gSystem
2354 && (!(kIOMemoryHostOnly & _flags))
2355 && _memoryEntries
2356 && (dataP = getDataP(_memoryEntries))
2357 && dataP->fMappedBaseValid) {
2358 mappedBase = dataP->fMappedBase;
2359 } else {
2360 mapped = 0;
2361 }
2362 }
2363
2364 if (offset >= _length) {
2365 return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
2366 }
2367
2368 // Validate the previous offset
2369 UInt ind;
2370 mach_vm_size_t off2Ind = isP->fOffset2Index;
2371 if (!params
2372 && offset
2373 && (offset == isP->fNextOffset || off2Ind <= offset)) {
2374 ind = isP->fIndex;
2375 } else {
2376 ind = off2Ind = 0; // Start from beginning
2377 }
2378 mach_vm_size_t length;
2379 UInt64 address;
2380
2381 if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
2382 // Physical address based memory descriptor
2383 const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
2384
2385 // Find the range after the one that contains the offset
2386 mach_vm_size_t len;
2387 for (len = 0; off2Ind <= offset; ind++) {
2388 len = physP[ind].length;
2389 off2Ind += len;
2390 }
2391
2392 // Calculate length within range and starting address
2393 length = off2Ind - offset;
2394 address = physP[ind - 1].address + len - length;
2395
2396 if (true && mapped) {
2397 address = mappedBase + offset;
2398 } else {
2399 // see how far we can coalesce ranges
2400 while (ind < _rangesCount && address + length == physP[ind].address) {
2401 len = physP[ind].length;
2402 length += len;
2403 off2Ind += len;
2404 ind++;
2405 }
2406 }
2407
2408 // correct contiguous check overshoot
2409 ind--;
2410 off2Ind -= len;
2411 }
2412 #ifndef __LP64__
2413 else if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
2414 // Physical address based memory descriptor
2415 const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
2416
2417 // Find the range after the one that contains the offset
2418 mach_vm_size_t len;
2419 for (len = 0; off2Ind <= offset; ind++) {
2420 len = physP[ind].length;
2421 off2Ind += len;
2422 }
2423
2424 // Calculate length within range and starting address
2425 length = off2Ind - offset;
2426 address = physP[ind - 1].address + len - length;
2427
2428 if (true && mapped) {
2429 address = mappedBase + offset;
2430 } else {
2431 // see how far we can coalesce ranges
2432 while (ind < _rangesCount && address + length == physP[ind].address) {
2433 len = physP[ind].length;
2434 length += len;
2435 off2Ind += len;
2436 ind++;
2437 }
2438 }
2439 // correct contiguous check overshoot
2440 ind--;
2441 off2Ind -= len;
2442 }
2443 #endif /* !__LP64__ */
2444 else {
2445 do {
2446 if (!_wireCount) {
2447 panic("IOGMD: not wired for the IODMACommand");
2448 }
2449
2450 assert(_memoryEntries);
2451
2452 dataP = getDataP(_memoryEntries);
2453 const ioPLBlock *ioplList = getIOPLList(dataP);
2454 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
2455 upl_page_info_t *pageList = getPageList(dataP);
2456
2457 assert(numIOPLs > 0);
2458
2459 // Scan through iopl info blocks looking for block containing offset
2460 while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset) {
2461 ind++;
2462 }
2463
2464 // Go back to actual range as search goes past it
2465 ioPLBlock ioplInfo = ioplList[ind - 1];
2466 off2Ind = ioplInfo.fIOMDOffset;
2467
2468 if (ind < numIOPLs) {
2469 length = ioplList[ind].fIOMDOffset;
2470 } else {
2471 length = _length;
2472 }
2473 length -= offset; // Remainder within iopl
2474
2475 // Subtract offset till this iopl in total list
2476 offset -= off2Ind;
2477
2478 // If a mapped address is requested and this is a pre-mapped IOPL
2479 // then just need to compute an offset relative to the mapped base.
2480 if (mapped) {
2481 offset += (ioplInfo.fPageOffset & PAGE_MASK);
2482 address = trunc_page_64(mappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
2483 continue; // Done leave do/while(false) now
2484 }
2485
2486 // The offset is rebased into the current iopl.
2487 // Now add the iopl 1st page offset.
2488 offset += ioplInfo.fPageOffset;
2489
2490 // For external UPLs the fPageInfo field points directly to
2491 // the upl's upl_page_info_t array.
2492 if (ioplInfo.fFlags & kIOPLExternUPL) {
2493 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
2494 } else {
2495 pageList = &pageList[ioplInfo.fPageInfo];
2496 }
2497
2498 // Check for direct device non-paged memory
2499 if (ioplInfo.fFlags & kIOPLOnDevice) {
2500 address = ptoa_64(pageList->phys_addr) + offset;
2501 continue; // Done leave do/while(false) now
2502 }
2503
2504 // Now we need compute the index into the pageList
2505 UInt pageInd = atop_32(offset);
2506 offset &= PAGE_MASK;
2507
2508 // Compute the starting address of this segment
2509 IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
2510 if (!pageAddr) {
2511 panic("!pageList phys_addr");
2512 }
2513
2514 address = ptoa_64(pageAddr) + offset;
2515
2516 // length is currently set to the length of the remainider of the iopl.
2517 // We need to check that the remainder of the iopl is contiguous.
2518 // This is indicated by pageList[ind].phys_addr being sequential.
2519 IOByteCount contigLength = PAGE_SIZE - offset;
2520 while (contigLength < length
2521 && ++pageAddr == pageList[++pageInd].phys_addr) {
2522 contigLength += PAGE_SIZE;
2523 }
2524
2525 if (contigLength < length) {
2526 length = contigLength;
2527 }
2528
2529
2530 assert(address);
2531 assert(length);
2532 } while (false);
2533 }
2534
2535 // Update return values and state
2536 isP->fIO.fIOVMAddr = address;
2537 isP->fIO.fLength = length;
2538 isP->fIndex = ind;
2539 isP->fOffset2Index = off2Ind;
2540 isP->fNextOffset = isP->fIO.fOffset + length;
2541
2542 return kIOReturnSuccess;
2543 }
2544
2545 addr64_t
2546 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
2547 {
2548 IOReturn ret;
2549 mach_vm_address_t address = 0;
2550 mach_vm_size_t length = 0;
2551 IOMapper * mapper = gIOSystemMapper;
2552 IOOptionBits type = _flags & kIOMemoryTypeMask;
2553
2554 if (lengthOfSegment) {
2555 *lengthOfSegment = 0;
2556 }
2557
2558 if (offset >= _length) {
2559 return 0;
2560 }
2561
2562 // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
2563 // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
2564 // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
2565 // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
2566
2567 if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type)) {
2568 unsigned rangesIndex = 0;
2569 Ranges vec = _ranges;
2570 mach_vm_address_t addr;
2571
2572 // Find starting address within the vector of ranges
2573 for (;;) {
2574 getAddrLenForInd(addr, length, type, vec, rangesIndex);
2575 if (offset < length) {
2576 break;
2577 }
2578 offset -= length; // (make offset relative)
2579 rangesIndex++;
2580 }
2581
2582 // Now that we have the starting range,
2583 // lets find the last contiguous range
2584 addr += offset;
2585 length -= offset;
2586
2587 for (++rangesIndex; rangesIndex < _rangesCount; rangesIndex++) {
2588 mach_vm_address_t newAddr;
2589 mach_vm_size_t newLen;
2590
2591 getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
2592 if (addr + length != newAddr) {
2593 break;
2594 }
2595 length += newLen;
2596 }
2597 if (addr) {
2598 address = (IOPhysicalAddress) addr; // Truncate address to 32bit
2599 }
2600 } else {
2601 IOMDDMAWalkSegmentState _state;
2602 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
2603
2604 state->fOffset = offset;
2605 state->fLength = _length - offset;
2606 state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOrRemote);
2607
2608 ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
2609
2610 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret)) {
2611 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
2612 ret, this, state->fOffset,
2613 state->fIOVMAddr, state->fLength);
2614 }
2615 if (kIOReturnSuccess == ret) {
2616 address = state->fIOVMAddr;
2617 length = state->fLength;
2618 }
2619
2620 // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
2621 // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
2622
2623 if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))) {
2624 if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone)) {
2625 addr64_t origAddr = address;
2626 IOByteCount origLen = length;
2627
2628 address = mapper->mapToPhysicalAddress(origAddr);
2629 length = page_size - (address & (page_size - 1));
2630 while ((length < origLen)
2631 && ((address + length) == mapper->mapToPhysicalAddress(origAddr + length))) {
2632 length += page_size;
2633 }
2634 if (length > origLen) {
2635 length = origLen;
2636 }
2637 }
2638 }
2639 }
2640
2641 if (!address) {
2642 length = 0;
2643 }
2644
2645 if (lengthOfSegment) {
2646 *lengthOfSegment = length;
2647 }
2648
2649 return address;
2650 }
2651
2652 #ifndef __LP64__
2653 #pragma clang diagnostic push
2654 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2655
2656 addr64_t
2657 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
2658 {
2659 addr64_t address = 0;
2660
2661 if (options & _kIOMemorySourceSegment) {
2662 address = getSourceSegment(offset, lengthOfSegment);
2663 } else if (options & kIOMemoryMapperNone) {
2664 address = getPhysicalSegment64(offset, lengthOfSegment);
2665 } else {
2666 address = getPhysicalSegment(offset, lengthOfSegment);
2667 }
2668
2669 return address;
2670 }
2671 #pragma clang diagnostic pop
2672
2673 addr64_t
2674 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
2675 {
2676 return getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone);
2677 }
2678
2679 IOPhysicalAddress
2680 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
2681 {
2682 addr64_t address = 0;
2683 IOByteCount length = 0;
2684
2685 address = getPhysicalSegment(offset, lengthOfSegment, 0);
2686
2687 if (lengthOfSegment) {
2688 length = *lengthOfSegment;
2689 }
2690
2691 if ((address + length) > 0x100000000ULL) {
2692 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
2693 address, (long) length, (getMetaClass())->getClassName());
2694 }
2695
2696 return (IOPhysicalAddress) address;
2697 }
2698
2699 addr64_t
2700 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
2701 {
2702 IOPhysicalAddress phys32;
2703 IOByteCount length;
2704 addr64_t phys64;
2705 IOMapper * mapper = NULL;
2706
2707 phys32 = getPhysicalSegment(offset, lengthOfSegment);
2708 if (!phys32) {
2709 return 0;
2710 }
2711
2712 if (gIOSystemMapper) {
2713 mapper = gIOSystemMapper;
2714 }
2715
2716 if (mapper) {
2717 IOByteCount origLen;
2718
2719 phys64 = mapper->mapToPhysicalAddress(phys32);
2720 origLen = *lengthOfSegment;
2721 length = page_size - (phys64 & (page_size - 1));
2722 while ((length < origLen)
2723 && ((phys64 + length) == mapper->mapToPhysicalAddress(phys32 + length))) {
2724 length += page_size;
2725 }
2726 if (length > origLen) {
2727 length = origLen;
2728 }
2729
2730 *lengthOfSegment = length;
2731 } else {
2732 phys64 = (addr64_t) phys32;
2733 }
2734
2735 return phys64;
2736 }
2737
2738 IOPhysicalAddress
2739 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
2740 {
2741 return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0);
2742 }
2743
2744 IOPhysicalAddress
2745 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
2746 {
2747 return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment);
2748 }
2749
2750 #pragma clang diagnostic push
2751 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2752
2753 void *
2754 IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
2755 IOByteCount * lengthOfSegment)
2756 {
2757 if (_task == kernel_task) {
2758 return (void *) getSourceSegment(offset, lengthOfSegment);
2759 } else {
2760 panic("IOGMD::getVirtualSegment deprecated");
2761 }
2762
2763 return NULL;
2764 }
2765 #pragma clang diagnostic pop
2766 #endif /* !__LP64__ */
2767
2768 IOReturn
2769 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
2770 {
2771 IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
2772 DMACommandOps params;
2773 IOReturn err;
2774
2775 params = (op & ~kIOMDDMACommandOperationMask & op);
2776 op &= kIOMDDMACommandOperationMask;
2777
2778 if (kIOMDGetCharacteristics == op) {
2779 if (dataSize < sizeof(IOMDDMACharacteristics)) {
2780 return kIOReturnUnderrun;
2781 }
2782
2783 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
2784 data->fLength = getLength();
2785 data->fSGCount = 0;
2786 data->fDirection = getDirection();
2787 data->fIsPrepared = true; // Assume prepared - fails safe
2788 } else if (kIOMDWalkSegments == op) {
2789 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs)) {
2790 return kIOReturnUnderrun;
2791 }
2792
2793 IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
2794 IOByteCount offset = (IOByteCount) data->fOffset;
2795
2796 IOPhysicalLength length;
2797 if (data->fMapped && IOMapper::gSystem) {
2798 data->fIOVMAddr = md->getPhysicalSegment(offset, &length);
2799 } else {
2800 data->fIOVMAddr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
2801 }
2802 data->fLength = length;
2803 } else if (kIOMDAddDMAMapSpec == op) {
2804 return kIOReturnUnsupported;
2805 } else if (kIOMDDMAMap == op) {
2806 if (dataSize < sizeof(IOMDDMAMapArgs)) {
2807 return kIOReturnUnderrun;
2808 }
2809 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2810
2811 if (params) {
2812 panic("class %s does not support IODMACommand::kIterateOnly", getMetaClass()->getClassName());
2813 }
2814
2815 data->fMapContig = true;
2816 err = md->dmaMap(data->fMapper, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
2817
2818 return err;
2819 } else if (kIOMDDMAUnmap == op) {
2820 if (dataSize < sizeof(IOMDDMAMapArgs)) {
2821 return kIOReturnUnderrun;
2822 }
2823 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2824
2825 err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
2826
2827 return kIOReturnSuccess;
2828 } else {
2829 return kIOReturnBadArgument;
2830 }
2831
2832 return kIOReturnSuccess;
2833 }
2834
2835 IOReturn
2836 IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
2837 IOOptionBits * oldState )
2838 {
2839 IOReturn err = kIOReturnSuccess;
2840
2841 vm_purgable_t control;
2842 int state;
2843
2844 assert(!(kIOMemoryRemote & _flags));
2845 if (kIOMemoryRemote & _flags) {
2846 return kIOReturnNotAttached;
2847 }
2848
2849 if (_memRef) {
2850 err = super::setPurgeable(newState, oldState);
2851 } else {
2852 if (kIOMemoryThreadSafe & _flags) {
2853 LOCK;
2854 }
2855 do{
2856 // Find the appropriate vm_map for the given task
2857 vm_map_t curMap;
2858 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) {
2859 err = kIOReturnNotReady;
2860 break;
2861 } else if (!_task) {
2862 err = kIOReturnUnsupported;
2863 break;
2864 } else {
2865 curMap = get_task_map(_task);
2866 if (NULL == curMap) {
2867 err = KERN_INVALID_ARGUMENT;
2868 break;
2869 }
2870 }
2871
2872 // can only do one range
2873 Ranges vec = _ranges;
2874 IOOptionBits type = _flags & kIOMemoryTypeMask;
2875 mach_vm_address_t addr;
2876 mach_vm_size_t len;
2877 getAddrLenForInd(addr, len, type, vec, 0);
2878
2879 err = purgeableControlBits(newState, &control, &state);
2880 if (kIOReturnSuccess != err) {
2881 break;
2882 }
2883 err = vm_map_purgable_control(curMap, addr, control, &state);
2884 if (oldState) {
2885 if (kIOReturnSuccess == err) {
2886 err = purgeableStateBits(&state);
2887 *oldState = state;
2888 }
2889 }
2890 }while (false);
2891 if (kIOMemoryThreadSafe & _flags) {
2892 UNLOCK;
2893 }
2894 }
2895
2896 return err;
2897 }
2898
2899 IOReturn
2900 IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
2901 IOOptionBits * oldState )
2902 {
2903 IOReturn err = kIOReturnNotReady;
2904
2905 if (kIOMemoryThreadSafe & _flags) {
2906 LOCK;
2907 }
2908 if (_memRef) {
2909 err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState);
2910 }
2911 if (kIOMemoryThreadSafe & _flags) {
2912 UNLOCK;
2913 }
2914
2915 return err;
2916 }
2917
2918 IOReturn
2919 IOGeneralMemoryDescriptor::setOwnership( task_t newOwner,
2920 int newLedgerTag,
2921 IOOptionBits newLedgerOptions )
2922 {
2923 IOReturn err = kIOReturnSuccess;
2924
2925 assert(!(kIOMemoryRemote & _flags));
2926 if (kIOMemoryRemote & _flags) {
2927 return kIOReturnNotAttached;
2928 }
2929
2930 if (iokit_iomd_setownership_enabled == FALSE) {
2931 return kIOReturnUnsupported;
2932 }
2933
2934 if (_memRef) {
2935 err = super::setOwnership(newOwner, newLedgerTag, newLedgerOptions);
2936 } else {
2937 err = kIOReturnUnsupported;
2938 }
2939
2940 return err;
2941 }
2942
2943 IOReturn
2944 IOMemoryDescriptor::setOwnership( task_t newOwner,
2945 int newLedgerTag,
2946 IOOptionBits newLedgerOptions )
2947 {
2948 IOReturn err = kIOReturnNotReady;
2949
2950 assert(!(kIOMemoryRemote & _flags));
2951 if (kIOMemoryRemote & _flags) {
2952 return kIOReturnNotAttached;
2953 }
2954
2955 if (iokit_iomd_setownership_enabled == FALSE) {
2956 return kIOReturnUnsupported;
2957 }
2958
2959 if (kIOMemoryThreadSafe & _flags) {
2960 LOCK;
2961 }
2962 if (_memRef) {
2963 err = IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(_memRef, newOwner, newLedgerTag, newLedgerOptions);
2964 } else {
2965 IOMultiMemoryDescriptor * mmd;
2966 IOSubMemoryDescriptor * smd;
2967 if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
2968 err = smd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
2969 } else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
2970 err = mmd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
2971 }
2972 }
2973 if (kIOMemoryThreadSafe & _flags) {
2974 UNLOCK;
2975 }
2976
2977 return err;
2978 }
2979
2980 IOReturn
2981 IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
2982 IOByteCount * dirtyPageCount )
2983 {
2984 IOReturn err = kIOReturnNotReady;
2985
2986 assert(!(kIOMemoryRemote & _flags));
2987 if (kIOMemoryRemote & _flags) {
2988 return kIOReturnNotAttached;
2989 }
2990
2991 if (kIOMemoryThreadSafe & _flags) {
2992 LOCK;
2993 }
2994 if (_memRef) {
2995 err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount);
2996 } else {
2997 IOMultiMemoryDescriptor * mmd;
2998 IOSubMemoryDescriptor * smd;
2999 if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3000 err = smd->getPageCounts(residentPageCount, dirtyPageCount);
3001 } else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3002 err = mmd->getPageCounts(residentPageCount, dirtyPageCount);
3003 }
3004 }
3005 if (kIOMemoryThreadSafe & _flags) {
3006 UNLOCK;
3007 }
3008
3009 return err;
3010 }
3011
3012
3013 #if defined(__arm__) || defined(__arm64__)
3014 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3015 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3016 #else /* defined(__arm__) || defined(__arm64__) */
3017 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
3018 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
3019 #endif /* defined(__arm__) || defined(__arm64__) */
3020
3021 static void
3022 SetEncryptOp(addr64_t pa, unsigned int count)
3023 {
3024 ppnum_t page, end;
3025
3026 page = atop_64(round_page_64(pa));
3027 end = atop_64(trunc_page_64(pa + count));
3028 for (; page < end; page++) {
3029 pmap_clear_noencrypt(page);
3030 }
3031 }
3032
3033 static void
3034 ClearEncryptOp(addr64_t pa, unsigned int count)
3035 {
3036 ppnum_t page, end;
3037
3038 page = atop_64(round_page_64(pa));
3039 end = atop_64(trunc_page_64(pa + count));
3040 for (; page < end; page++) {
3041 pmap_set_noencrypt(page);
3042 }
3043 }
3044
3045 IOReturn
3046 IOMemoryDescriptor::performOperation( IOOptionBits options,
3047 IOByteCount offset, IOByteCount length )
3048 {
3049 IOByteCount remaining;
3050 unsigned int res;
3051 void (*func)(addr64_t pa, unsigned int count) = NULL;
3052 #if defined(__arm__) || defined(__arm64__)
3053 void (*func_ext)(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *result) = NULL;
3054 #endif
3055
3056 assert(!(kIOMemoryRemote & _flags));
3057 if (kIOMemoryRemote & _flags) {
3058 return kIOReturnNotAttached;
3059 }
3060
3061 switch (options) {
3062 case kIOMemoryIncoherentIOFlush:
3063 #if defined(__arm__) || defined(__arm64__)
3064 func_ext = &dcache_incoherent_io_flush64;
3065 #if __ARM_COHERENT_IO__
3066 func_ext(0, 0, 0, &res);
3067 return kIOReturnSuccess;
3068 #else /* __ARM_COHERENT_IO__ */
3069 break;
3070 #endif /* __ARM_COHERENT_IO__ */
3071 #else /* defined(__arm__) || defined(__arm64__) */
3072 func = &dcache_incoherent_io_flush64;
3073 break;
3074 #endif /* defined(__arm__) || defined(__arm64__) */
3075 case kIOMemoryIncoherentIOStore:
3076 #if defined(__arm__) || defined(__arm64__)
3077 func_ext = &dcache_incoherent_io_store64;
3078 #if __ARM_COHERENT_IO__
3079 func_ext(0, 0, 0, &res);
3080 return kIOReturnSuccess;
3081 #else /* __ARM_COHERENT_IO__ */
3082 break;
3083 #endif /* __ARM_COHERENT_IO__ */
3084 #else /* defined(__arm__) || defined(__arm64__) */
3085 func = &dcache_incoherent_io_store64;
3086 break;
3087 #endif /* defined(__arm__) || defined(__arm64__) */
3088
3089 case kIOMemorySetEncrypted:
3090 func = &SetEncryptOp;
3091 break;
3092 case kIOMemoryClearEncrypted:
3093 func = &ClearEncryptOp;
3094 break;
3095 }
3096
3097 #if defined(__arm__) || defined(__arm64__)
3098 if ((func == NULL) && (func_ext == NULL)) {
3099 return kIOReturnUnsupported;
3100 }
3101 #else /* defined(__arm__) || defined(__arm64__) */
3102 if (!func) {
3103 return kIOReturnUnsupported;
3104 }
3105 #endif /* defined(__arm__) || defined(__arm64__) */
3106
3107 if (kIOMemoryThreadSafe & _flags) {
3108 LOCK;
3109 }
3110
3111 res = 0x0UL;
3112 remaining = length = min(length, getLength() - offset);
3113 while (remaining) {
3114 // (process another target segment?)
3115 addr64_t dstAddr64;
3116 IOByteCount dstLen;
3117
3118 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
3119 if (!dstAddr64) {
3120 break;
3121 }
3122
3123 // Clip segment length to remaining
3124 if (dstLen > remaining) {
3125 dstLen = remaining;
3126 }
3127
3128 #if defined(__arm__) || defined(__arm64__)
3129 if (func) {
3130 (*func)(dstAddr64, dstLen);
3131 }
3132 if (func_ext) {
3133 (*func_ext)(dstAddr64, dstLen, remaining, &res);
3134 if (res != 0x0UL) {
3135 remaining = 0;
3136 break;
3137 }
3138 }
3139 #else /* defined(__arm__) || defined(__arm64__) */
3140 (*func)(dstAddr64, dstLen);
3141 #endif /* defined(__arm__) || defined(__arm64__) */
3142
3143 offset += dstLen;
3144 remaining -= dstLen;
3145 }
3146
3147 if (kIOMemoryThreadSafe & _flags) {
3148 UNLOCK;
3149 }
3150
3151 return remaining ? kIOReturnUnderrun : kIOReturnSuccess;
3152 }
3153
3154 /*
3155 *
3156 */
3157
3158 #if defined(__i386__) || defined(__x86_64__)
3159
3160 #define io_kernel_static_start vm_kernel_stext
3161 #define io_kernel_static_end vm_kernel_etext
3162
3163 #elif defined(__arm__) || defined(__arm64__)
3164
3165 extern vm_offset_t static_memory_end;
3166
3167 #if defined(__arm64__)
3168 #define io_kernel_static_start vm_kext_base
3169 #else /* defined(__arm64__) */
3170 #define io_kernel_static_start vm_kernel_stext
3171 #endif /* defined(__arm64__) */
3172
3173 #define io_kernel_static_end static_memory_end
3174
3175 #else
3176 #error io_kernel_static_end is undefined for this architecture
3177 #endif
3178
3179 static kern_return_t
3180 io_get_kernel_static_upl(
3181 vm_map_t /* map */,
3182 uintptr_t offset,
3183 upl_size_t *upl_size,
3184 upl_t *upl,
3185 upl_page_info_array_t page_list,
3186 unsigned int *count,
3187 ppnum_t *highest_page)
3188 {
3189 unsigned int pageCount, page;
3190 ppnum_t phys;
3191 ppnum_t highestPage = 0;
3192
3193 pageCount = atop_32(*upl_size);
3194 if (pageCount > *count) {
3195 pageCount = *count;
3196 }
3197
3198 *upl = NULL;
3199
3200 for (page = 0; page < pageCount; page++) {
3201 phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
3202 if (!phys) {
3203 break;
3204 }
3205 page_list[page].phys_addr = phys;
3206 page_list[page].free_when_done = 0;
3207 page_list[page].absent = 0;
3208 page_list[page].dirty = 0;
3209 page_list[page].precious = 0;
3210 page_list[page].device = 0;
3211 if (phys > highestPage) {
3212 highestPage = phys;
3213 }
3214 }
3215
3216 *highest_page = highestPage;
3217
3218 return (page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError;
3219 }
3220
3221 IOReturn
3222 IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
3223 {
3224 IOOptionBits type = _flags & kIOMemoryTypeMask;
3225 IOReturn error = kIOReturnSuccess;
3226 ioGMDData *dataP;
3227 upl_page_info_array_t pageInfo;
3228 ppnum_t mapBase;
3229 vm_tag_t tag = VM_KERN_MEMORY_NONE;
3230
3231 assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
3232
3233 if ((kIODirectionOutIn & forDirection) == kIODirectionNone) {
3234 forDirection = (IODirection) (forDirection | getDirection());
3235 }
3236
3237 dataP = getDataP(_memoryEntries);
3238 upl_control_flags_t uplFlags; // This Mem Desc's default flags for upl creation
3239 switch (kIODirectionOutIn & forDirection) {
3240 case kIODirectionOut:
3241 // Pages do not need to be marked as dirty on commit
3242 uplFlags = UPL_COPYOUT_FROM;
3243 dataP->fDMAAccess = kIODMAMapReadAccess;
3244 break;
3245
3246 case kIODirectionIn:
3247 dataP->fDMAAccess = kIODMAMapWriteAccess;
3248 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
3249 break;
3250
3251 default:
3252 dataP->fDMAAccess = kIODMAMapReadAccess | kIODMAMapWriteAccess;
3253 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
3254 break;
3255 }
3256
3257 if (_wireCount) {
3258 if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags)) {
3259 OSReportWithBacktrace("IOMemoryDescriptor 0x%lx prepared read only", VM_KERNEL_ADDRPERM(this));
3260 error = kIOReturnNotWritable;
3261 }
3262 } else {
3263 IOMapper *mapper;
3264
3265 mapper = dataP->fMapper;
3266 dataP->fMappedBaseValid = dataP->fMappedBase = 0;
3267
3268 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
3269 tag = _kernelTag;
3270 if (VM_KERN_MEMORY_NONE == tag) {
3271 tag = IOMemoryTag(kernel_map);
3272 }
3273
3274 if (kIODirectionPrepareToPhys32 & forDirection) {
3275 if (!mapper) {
3276 uplFlags |= UPL_NEED_32BIT_ADDR;
3277 }
3278 if (dataP->fDMAMapNumAddressBits > 32) {
3279 dataP->fDMAMapNumAddressBits = 32;
3280 }
3281 }
3282 if (kIODirectionPrepareNoFault & forDirection) {
3283 uplFlags |= UPL_REQUEST_NO_FAULT;
3284 }
3285 if (kIODirectionPrepareNoZeroFill & forDirection) {
3286 uplFlags |= UPL_NOZEROFILLIO;
3287 }
3288 if (kIODirectionPrepareNonCoherent & forDirection) {
3289 uplFlags |= UPL_REQUEST_FORCE_COHERENCY;
3290 }
3291
3292 mapBase = 0;
3293
3294 // Note that appendBytes(NULL) zeros the data up to the desired length
3295 // and the length parameter is an unsigned int
3296 size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t);
3297 if (uplPageSize > ((unsigned int)uplPageSize)) {
3298 return kIOReturnNoMemory;
3299 }
3300 if (!_memoryEntries->appendBytes(NULL, uplPageSize)) {
3301 return kIOReturnNoMemory;
3302 }
3303 dataP = NULL;
3304
3305 // Find the appropriate vm_map for the given task
3306 vm_map_t curMap;
3307 if ((NULL != _memRef) || ((_task == kernel_task && (kIOMemoryBufferPageable & _flags)))) {
3308 curMap = NULL;
3309 } else {
3310 curMap = get_task_map(_task);
3311 }
3312
3313 // Iterate over the vector of virtual ranges
3314 Ranges vec = _ranges;
3315 unsigned int pageIndex = 0;
3316 IOByteCount mdOffset = 0;
3317 ppnum_t highestPage = 0;
3318
3319 IOMemoryEntry * memRefEntry = NULL;
3320 if (_memRef) {
3321 memRefEntry = &_memRef->entries[0];
3322 }
3323
3324 for (UInt range = 0; range < _rangesCount; range++) {
3325 ioPLBlock iopl;
3326 mach_vm_address_t startPage, startPageOffset;
3327 mach_vm_size_t numBytes;
3328 ppnum_t highPage = 0;
3329
3330 // Get the startPage address and length of vec[range]
3331 getAddrLenForInd(startPage, numBytes, type, vec, range);
3332 startPageOffset = startPage & PAGE_MASK;
3333 iopl.fPageOffset = startPageOffset;
3334 numBytes += startPageOffset;
3335 startPage = trunc_page_64(startPage);
3336
3337 if (mapper) {
3338 iopl.fMappedPage = mapBase + pageIndex;
3339 } else {
3340 iopl.fMappedPage = 0;
3341 }
3342
3343 // Iterate over the current range, creating UPLs
3344 while (numBytes) {
3345 vm_address_t kernelStart = (vm_address_t) startPage;
3346 vm_map_t theMap;
3347 if (curMap) {
3348 theMap = curMap;
3349 } else if (_memRef) {
3350 theMap = NULL;
3351 } else {
3352 assert(_task == kernel_task);
3353 theMap = IOPageableMapForAddress(kernelStart);
3354 }
3355
3356 // ioplFlags is an in/out parameter
3357 upl_control_flags_t ioplFlags = uplFlags;
3358 dataP = getDataP(_memoryEntries);
3359 pageInfo = getPageList(dataP);
3360 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
3361
3362 mach_vm_size_t _ioplSize = round_page(numBytes);
3363 upl_size_t ioplSize = (_ioplSize <= MAX_UPL_SIZE_BYTES) ? _ioplSize : MAX_UPL_SIZE_BYTES;
3364 unsigned int numPageInfo = atop_32(ioplSize);
3365
3366 if ((theMap == kernel_map)
3367 && (kernelStart >= io_kernel_static_start)
3368 && (kernelStart < io_kernel_static_end)) {
3369 error = io_get_kernel_static_upl(theMap,
3370 kernelStart,
3371 &ioplSize,
3372 &iopl.fIOPL,
3373 baseInfo,
3374 &numPageInfo,
3375 &highPage);
3376 } else if (_memRef) {
3377 memory_object_offset_t entryOffset;
3378
3379 entryOffset = mdOffset;
3380 entryOffset = (entryOffset - iopl.fPageOffset - memRefEntry->offset);
3381 if (entryOffset >= memRefEntry->size) {
3382 memRefEntry++;
3383 if (memRefEntry >= &_memRef->entries[_memRef->count]) {
3384 panic("memRefEntry");
3385 }
3386 entryOffset = 0;
3387 }
3388 if (ioplSize > (memRefEntry->size - entryOffset)) {
3389 ioplSize = (memRefEntry->size - entryOffset);
3390 }
3391 error = memory_object_iopl_request(memRefEntry->entry,
3392 entryOffset,
3393 &ioplSize,
3394 &iopl.fIOPL,
3395 baseInfo,
3396 &numPageInfo,
3397 &ioplFlags,
3398 tag);
3399 } else {
3400 assert(theMap);
3401 error = vm_map_create_upl(theMap,
3402 startPage,
3403 (upl_size_t*)&ioplSize,
3404 &iopl.fIOPL,
3405 baseInfo,
3406 &numPageInfo,
3407 &ioplFlags,
3408 tag);
3409 }
3410
3411 if (error != KERN_SUCCESS) {
3412 goto abortExit;
3413 }
3414
3415 assert(ioplSize);
3416
3417 if (iopl.fIOPL) {
3418 highPage = upl_get_highest_page(iopl.fIOPL);
3419 }
3420 if (highPage > highestPage) {
3421 highestPage = highPage;
3422 }
3423
3424 if (baseInfo->device) {
3425 numPageInfo = 1;
3426 iopl.fFlags = kIOPLOnDevice;
3427 } else {
3428 iopl.fFlags = 0;
3429 }
3430
3431 iopl.fIOMDOffset = mdOffset;
3432 iopl.fPageInfo = pageIndex;
3433 if (mapper && pageIndex && (page_mask & (mdOffset + startPageOffset))) {
3434 dataP->fDiscontig = true;
3435 }
3436
3437 if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
3438 // Clean up partial created and unsaved iopl
3439 if (iopl.fIOPL) {
3440 upl_abort(iopl.fIOPL, 0);
3441 upl_deallocate(iopl.fIOPL);
3442 }
3443 error = kIOReturnNoMemory;
3444 goto abortExit;
3445 }
3446 dataP = NULL;
3447
3448 // Check for a multiple iopl's in one virtual range
3449 pageIndex += numPageInfo;
3450 mdOffset -= iopl.fPageOffset;
3451 if (ioplSize < numBytes) {
3452 numBytes -= ioplSize;
3453 startPage += ioplSize;
3454 mdOffset += ioplSize;
3455 iopl.fPageOffset = 0;
3456 if (mapper) {
3457 iopl.fMappedPage = mapBase + pageIndex;
3458 }
3459 } else {
3460 mdOffset += numBytes;
3461 break;
3462 }
3463 }
3464 }
3465
3466 _highestPage = highestPage;
3467
3468 if (UPL_COPYOUT_FROM & uplFlags) {
3469 _flags |= kIOMemoryPreparedReadOnly;
3470 }
3471 }
3472
3473 #if IOTRACKING
3474 if (!(_flags & kIOMemoryAutoPrepare) && (kIOReturnSuccess == error)) {
3475 dataP = getDataP(_memoryEntries);
3476 if (!dataP->fWireTracking.link.next) {
3477 IOTrackingAdd(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages), false, tag);
3478 }
3479 }
3480 #endif /* IOTRACKING */
3481
3482 return error;
3483
3484 abortExit:
3485 {
3486 dataP = getDataP(_memoryEntries);
3487 UInt done = getNumIOPL(_memoryEntries, dataP);
3488 ioPLBlock *ioplList = getIOPLList(dataP);
3489
3490 for (UInt range = 0; range < done; range++) {
3491 if (ioplList[range].fIOPL) {
3492 upl_abort(ioplList[range].fIOPL, 0);
3493 upl_deallocate(ioplList[range].fIOPL);
3494 }
3495 }
3496 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
3497 }
3498
3499 if (error == KERN_FAILURE) {
3500 error = kIOReturnCannotWire;
3501 } else if (error == KERN_MEMORY_ERROR) {
3502 error = kIOReturnNoResources;
3503 }
3504
3505 return error;
3506 }
3507
3508 bool
3509 IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
3510 {
3511 ioGMDData * dataP;
3512 unsigned dataSize = size;
3513
3514 if (!_memoryEntries) {
3515 _memoryEntries = OSData::withCapacity(dataSize);
3516 if (!_memoryEntries) {
3517 return false;
3518 }
3519 } else if (!_memoryEntries->initWithCapacity(dataSize)) {
3520 return false;
3521 }
3522
3523 _memoryEntries->appendBytes(NULL, computeDataSize(0, 0));
3524 dataP = getDataP(_memoryEntries);
3525
3526 if (mapper == kIOMapperWaitSystem) {
3527 IOMapper::checkForSystemMapper();
3528 mapper = IOMapper::gSystem;
3529 }
3530 dataP->fMapper = mapper;
3531 dataP->fPageCnt = 0;
3532 dataP->fMappedBase = 0;
3533 dataP->fDMAMapNumAddressBits = 64;
3534 dataP->fDMAMapAlignment = 0;
3535 dataP->fPreparationID = kIOPreparationIDUnprepared;
3536 dataP->fDiscontig = false;
3537 dataP->fCompletionError = false;
3538 dataP->fMappedBaseValid = false;
3539
3540 return true;
3541 }
3542
3543 IOReturn
3544 IOMemoryDescriptor::dmaMap(
3545 IOMapper * mapper,
3546 IODMACommand * command,
3547 const IODMAMapSpecification * mapSpec,
3548 uint64_t offset,
3549 uint64_t length,
3550 uint64_t * mapAddress,
3551 uint64_t * mapLength)
3552 {
3553 IOReturn err;
3554 uint32_t mapOptions;
3555
3556 mapOptions = 0;
3557 mapOptions |= kIODMAMapReadAccess;
3558 if (!(kIOMemoryPreparedReadOnly & _flags)) {
3559 mapOptions |= kIODMAMapWriteAccess;
3560 }
3561
3562 err = mapper->iovmMapMemory(this, offset, length, mapOptions,
3563 mapSpec, command, NULL, mapAddress, mapLength);
3564
3565 if (kIOReturnSuccess == err) {
3566 dmaMapRecord(mapper, command, *mapLength);
3567 }
3568
3569 return err;
3570 }
3571
3572 void
3573 IOMemoryDescriptor::dmaMapRecord(
3574 IOMapper * mapper,
3575 IODMACommand * command,
3576 uint64_t mapLength)
3577 {
3578 kern_allocation_name_t alloc;
3579 int16_t prior;
3580
3581 if ((alloc = mapper->fAllocName) /* && mapper != IOMapper::gSystem */) {
3582 kern_allocation_update_size(mapper->fAllocName, mapLength);
3583 }
3584
3585 if (!command) {
3586 return;
3587 }
3588 prior = OSAddAtomic16(1, &_dmaReferences);
3589 if (!prior) {
3590 if (alloc && (VM_KERN_MEMORY_NONE != _kernelTag)) {
3591 _mapName = alloc;
3592 mapLength = _length;
3593 kern_allocation_update_subtotal(alloc, _kernelTag, mapLength);
3594 } else {
3595 _mapName = NULL;
3596 }
3597 }
3598 }
3599
3600 IOReturn
3601 IOMemoryDescriptor::dmaUnmap(
3602 IOMapper * mapper,
3603 IODMACommand * command,
3604 uint64_t offset,
3605 uint64_t mapAddress,
3606 uint64_t mapLength)
3607 {
3608 IOReturn ret;
3609 kern_allocation_name_t alloc;
3610 kern_allocation_name_t mapName;
3611 int16_t prior;
3612
3613 mapName = NULL;
3614 prior = 0;
3615 if (command) {
3616 mapName = _mapName;
3617 if (_dmaReferences) {
3618 prior = OSAddAtomic16(-1, &_dmaReferences);
3619 } else {
3620 panic("_dmaReferences underflow");
3621 }
3622 }
3623
3624 if (!mapLength) {
3625 return kIOReturnSuccess;
3626 }
3627
3628 ret = mapper->iovmUnmapMemory(this, command, mapAddress, mapLength);
3629
3630 if ((alloc = mapper->fAllocName)) {
3631 kern_allocation_update_size(alloc, -mapLength);
3632 if ((1 == prior) && mapName && (VM_KERN_MEMORY_NONE != _kernelTag)) {
3633 mapLength = _length;
3634 kern_allocation_update_subtotal(mapName, _kernelTag, -mapLength);
3635 }
3636 }
3637
3638 return ret;
3639 }
3640
3641 IOReturn
3642 IOGeneralMemoryDescriptor::dmaMap(
3643 IOMapper * mapper,
3644 IODMACommand * command,
3645 const IODMAMapSpecification * mapSpec,
3646 uint64_t offset,
3647 uint64_t length,
3648 uint64_t * mapAddress,
3649 uint64_t * mapLength)
3650 {
3651 IOReturn err = kIOReturnSuccess;
3652 ioGMDData * dataP;
3653 IOOptionBits type = _flags & kIOMemoryTypeMask;
3654
3655 *mapAddress = 0;
3656 if (kIOMemoryHostOnly & _flags) {
3657 return kIOReturnSuccess;
3658 }
3659 if (kIOMemoryRemote & _flags) {
3660 return kIOReturnNotAttached;
3661 }
3662
3663 if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
3664 || offset || (length != _length)) {
3665 err = super::dmaMap(mapper, command, mapSpec, offset, length, mapAddress, mapLength);
3666 } else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries))) {
3667 const ioPLBlock * ioplList = getIOPLList(dataP);
3668 upl_page_info_t * pageList;
3669 uint32_t mapOptions = 0;
3670
3671 IODMAMapSpecification mapSpec;
3672 bzero(&mapSpec, sizeof(mapSpec));
3673 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
3674 mapSpec.alignment = dataP->fDMAMapAlignment;
3675
3676 // For external UPLs the fPageInfo field points directly to
3677 // the upl's upl_page_info_t array.
3678 if (ioplList->fFlags & kIOPLExternUPL) {
3679 pageList = (upl_page_info_t *) ioplList->fPageInfo;
3680 mapOptions |= kIODMAMapPagingPath;
3681 } else {
3682 pageList = getPageList(dataP);
3683 }
3684
3685 if ((_length == ptoa_64(_pages)) && !(page_mask & ioplList->fPageOffset)) {
3686 mapOptions |= kIODMAMapPageListFullyOccupied;
3687 }
3688
3689 assert(dataP->fDMAAccess);
3690 mapOptions |= dataP->fDMAAccess;
3691
3692 // Check for direct device non-paged memory
3693 if (ioplList->fFlags & kIOPLOnDevice) {
3694 mapOptions |= kIODMAMapPhysicallyContiguous;
3695 }
3696
3697 IODMAMapPageList dmaPageList =
3698 {
3699 .pageOffset = (uint32_t)(ioplList->fPageOffset & page_mask),
3700 .pageListCount = _pages,
3701 .pageList = &pageList[0]
3702 };
3703 err = mapper->iovmMapMemory(this, offset, length, mapOptions, &mapSpec,
3704 command, &dmaPageList, mapAddress, mapLength);
3705
3706 if (kIOReturnSuccess == err) {
3707 dmaMapRecord(mapper, command, *mapLength);
3708 }
3709 }
3710
3711 return err;
3712 }
3713
3714 /*
3715 * prepare
3716 *
3717 * Prepare the memory for an I/O transfer. This involves paging in
3718 * the memory, if necessary, and wiring it down for the duration of
3719 * the transfer. The complete() method completes the processing of
3720 * the memory after the I/O transfer finishes. This method needn't
3721 * called for non-pageable memory.
3722 */
3723
3724 IOReturn
3725 IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
3726 {
3727 IOReturn error = kIOReturnSuccess;
3728 IOOptionBits type = _flags & kIOMemoryTypeMask;
3729
3730 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
3731 return kIOReturnSuccess;
3732 }
3733
3734 assert(!(kIOMemoryRemote & _flags));
3735 if (kIOMemoryRemote & _flags) {
3736 return kIOReturnNotAttached;
3737 }
3738
3739 if (_prepareLock) {
3740 IOLockLock(_prepareLock);
3741 }
3742
3743 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
3744 if ((forDirection & kIODirectionPrepareAvoidThrottling) && NEED_TO_HARD_THROTTLE_THIS_TASK()) {
3745 error = kIOReturnNotReady;
3746 goto finish;
3747 }
3748 error = wireVirtual(forDirection);
3749 }
3750
3751 if (kIOReturnSuccess == error) {
3752 if (1 == ++_wireCount) {
3753 if (kIOMemoryClearEncrypt & _flags) {
3754 performOperation(kIOMemoryClearEncrypted, 0, _length);
3755 }
3756 }
3757 }
3758
3759 finish:
3760
3761 if (_prepareLock) {
3762 IOLockUnlock(_prepareLock);
3763 }
3764
3765 return error;
3766 }
3767
3768 /*
3769 * complete
3770 *
3771 * Complete processing of the memory after an I/O transfer finishes.
3772 * This method should not be called unless a prepare was previously
3773 * issued; the prepare() and complete() must occur in pairs, before
3774 * before and after an I/O transfer involving pageable memory.
3775 */
3776
3777 IOReturn
3778 IOGeneralMemoryDescriptor::complete(IODirection forDirection)
3779 {
3780 IOOptionBits type = _flags & kIOMemoryTypeMask;
3781 ioGMDData * dataP;
3782
3783 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
3784 return kIOReturnSuccess;
3785 }
3786
3787 assert(!(kIOMemoryRemote & _flags));
3788 if (kIOMemoryRemote & _flags) {
3789 return kIOReturnNotAttached;
3790 }
3791
3792 if (_prepareLock) {
3793 IOLockLock(_prepareLock);
3794 }
3795 do{
3796 assert(_wireCount);
3797 if (!_wireCount) {
3798 break;
3799 }
3800 dataP = getDataP(_memoryEntries);
3801 if (!dataP) {
3802 break;
3803 }
3804
3805 if (kIODirectionCompleteWithError & forDirection) {
3806 dataP->fCompletionError = true;
3807 }
3808
3809 if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount)) {
3810 performOperation(kIOMemorySetEncrypted, 0, _length);
3811 }
3812
3813 _wireCount--;
3814 if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection)) {
3815 ioPLBlock *ioplList = getIOPLList(dataP);
3816 UInt ind, count = getNumIOPL(_memoryEntries, dataP);
3817
3818 if (_wireCount) {
3819 // kIODirectionCompleteWithDataValid & forDirection
3820 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
3821 vm_tag_t tag;
3822 tag = getVMTag(kernel_map);
3823 for (ind = 0; ind < count; ind++) {
3824 if (ioplList[ind].fIOPL) {
3825 iopl_valid_data(ioplList[ind].fIOPL, tag);
3826 }
3827 }
3828 }
3829 } else {
3830 if (_dmaReferences) {
3831 panic("complete() while dma active");
3832 }
3833
3834 if (dataP->fMappedBaseValid) {
3835 dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
3836 dataP->fMappedBaseValid = dataP->fMappedBase = 0;
3837 }
3838 #if IOTRACKING
3839 if (dataP->fWireTracking.link.next) {
3840 IOTrackingRemove(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages));
3841 }
3842 #endif /* IOTRACKING */
3843 // Only complete iopls that we created which are for TypeVirtual
3844 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
3845 for (ind = 0; ind < count; ind++) {
3846 if (ioplList[ind].fIOPL) {
3847 if (dataP->fCompletionError) {
3848 upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/);
3849 } else {
3850 upl_commit(ioplList[ind].fIOPL, NULL, 0);
3851 }
3852 upl_deallocate(ioplList[ind].fIOPL);
3853 }
3854 }
3855 } else if (kIOMemoryTypeUPL == type) {
3856 upl_set_referenced(ioplList[0].fIOPL, false);
3857 }
3858
3859 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
3860
3861 dataP->fPreparationID = kIOPreparationIDUnprepared;
3862 _flags &= ~kIOMemoryPreparedReadOnly;
3863 }
3864 }
3865 }while (false);
3866
3867 if (_prepareLock) {
3868 IOLockUnlock(_prepareLock);
3869 }
3870
3871 return kIOReturnSuccess;
3872 }
3873
3874 IOReturn
3875 IOGeneralMemoryDescriptor::doMap(
3876 vm_map_t __addressMap,
3877 IOVirtualAddress * __address,
3878 IOOptionBits options,
3879 IOByteCount __offset,
3880 IOByteCount __length )
3881 {
3882 #ifndef __LP64__
3883 if (!(kIOMap64Bit & options)) {
3884 panic("IOGeneralMemoryDescriptor::doMap !64bit");
3885 }
3886 #endif /* !__LP64__ */
3887
3888 kern_return_t err;
3889
3890 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
3891 mach_vm_size_t offset = mapping->fOffset + __offset;
3892 mach_vm_size_t length = mapping->fLength;
3893
3894 IOOptionBits type = _flags & kIOMemoryTypeMask;
3895 Ranges vec = _ranges;
3896
3897 mach_vm_address_t range0Addr = 0;
3898 mach_vm_size_t range0Len = 0;
3899
3900 if ((offset >= _length) || ((offset + length) > _length)) {
3901 return kIOReturnBadArgument;
3902 }
3903
3904 assert(!(kIOMemoryRemote & _flags));
3905 if (kIOMemoryRemote & _flags) {
3906 return 0;
3907 }
3908
3909 if (vec.v) {
3910 getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
3911 }
3912
3913 // mapping source == dest? (could be much better)
3914 if (_task
3915 && (mapping->fAddressTask == _task)
3916 && (mapping->fAddressMap == get_task_map(_task))
3917 && (options & kIOMapAnywhere)
3918 && (!(kIOMapUnique & options))
3919 && (1 == _rangesCount)
3920 && (0 == offset)
3921 && range0Addr
3922 && (length <= range0Len)) {
3923 mapping->fAddress = range0Addr;
3924 mapping->fOptions |= kIOMapStatic;
3925
3926 return kIOReturnSuccess;
3927 }
3928
3929 if (!_memRef) {
3930 IOOptionBits createOptions = 0;
3931 if (!(kIOMapReadOnly & options)) {
3932 createOptions |= kIOMemoryReferenceWrite;
3933 #if DEVELOPMENT || DEBUG
3934 if ((kIODirectionOut == (kIODirectionOutIn & _flags))
3935 && (!reserved || (reserved->creator != mapping->fAddressTask))) {
3936 OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction");
3937 }
3938 #endif
3939 }
3940 err = memoryReferenceCreate(createOptions, &_memRef);
3941 if (kIOReturnSuccess != err) {
3942 return err;
3943 }
3944 }
3945
3946 memory_object_t pager;
3947 pager = (memory_object_t) (reserved ? reserved->dp.devicePager : NULL);
3948
3949 // <upl_transpose //
3950 if ((kIOMapReference | kIOMapUnique) == ((kIOMapReference | kIOMapUnique) & options)) {
3951 do{
3952 upl_t redirUPL2;
3953 upl_size_t size;
3954 upl_control_flags_t flags;
3955 unsigned int lock_count;
3956
3957 if (!_memRef || (1 != _memRef->count)) {
3958 err = kIOReturnNotReadable;
3959 break;
3960 }
3961
3962 size = round_page(mapping->fLength);
3963 flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
3964 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
3965
3966 if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2,
3967 NULL, NULL,
3968 &flags, getVMTag(kernel_map))) {
3969 redirUPL2 = NULL;
3970 }
3971
3972 for (lock_count = 0;
3973 IORecursiveLockHaveLock(gIOMemoryLock);
3974 lock_count++) {
3975 UNLOCK;
3976 }
3977 err = upl_transpose(redirUPL2, mapping->fRedirUPL);
3978 for (;
3979 lock_count;
3980 lock_count--) {
3981 LOCK;
3982 }
3983
3984 if (kIOReturnSuccess != err) {
3985 IOLog("upl_transpose(%x)\n", err);
3986 err = kIOReturnSuccess;
3987 }
3988
3989 if (redirUPL2) {
3990 upl_commit(redirUPL2, NULL, 0);
3991 upl_deallocate(redirUPL2);
3992 redirUPL2 = NULL;
3993 }
3994 {
3995 // swap the memEntries since they now refer to different vm_objects
3996 IOMemoryReference * me = _memRef;
3997 _memRef = mapping->fMemory->_memRef;
3998 mapping->fMemory->_memRef = me;
3999 }
4000 if (pager) {
4001 err = populateDevicePager( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
4002 }
4003 }while (false);
4004 }
4005 // upl_transpose> //
4006 else {
4007 err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress);
4008 #if IOTRACKING
4009 if ((err == KERN_SUCCESS) && ((kIOTracking & gIOKitDebug) || _task)) {
4010 // only dram maps in the default on developement case
4011 IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength);
4012 }
4013 #endif /* IOTRACKING */
4014 if ((err == KERN_SUCCESS) && pager) {
4015 err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, offset, length, options);
4016
4017 if (err != KERN_SUCCESS) {
4018 doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0);
4019 } else if (kIOMapDefaultCache == (options & kIOMapCacheMask)) {
4020 mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
4021 }
4022 }
4023 }
4024
4025 return err;
4026 }
4027
4028 #if IOTRACKING
4029 IOReturn
4030 IOMemoryMapTracking(IOTrackingUser * tracking, task_t * task,
4031 mach_vm_address_t * address, mach_vm_size_t * size)
4032 {
4033 #define iomap_offsetof(type, field) ((size_t)(&((type *)NULL)->field))
4034
4035 IOMemoryMap * map = (typeof(map))(((uintptr_t) tracking) - iomap_offsetof(IOMemoryMap, fTracking));
4036
4037 if (!map->fAddressMap || (map->fAddressMap != get_task_map(map->fAddressTask))) {
4038 return kIOReturnNotReady;
4039 }
4040
4041 *task = map->fAddressTask;
4042 *address = map->fAddress;
4043 *size = map->fLength;
4044
4045 return kIOReturnSuccess;
4046 }
4047 #endif /* IOTRACKING */
4048
4049 IOReturn
4050 IOGeneralMemoryDescriptor::doUnmap(
4051 vm_map_t addressMap,
4052 IOVirtualAddress __address,
4053 IOByteCount __length )
4054 {
4055 return super::doUnmap(addressMap, __address, __length);
4056 }
4057
4058 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4059
4060 #undef super
4061 #define super OSObject
4062
4063 OSDefineMetaClassAndStructors( IOMemoryMap, OSObject )
4064
4065 OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
4066 OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
4067 OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
4068 OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
4069 OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
4070 OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
4071 OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
4072 OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
4073
4074 /* ex-inline function implementation */
4075 IOPhysicalAddress
4076 IOMemoryMap::getPhysicalAddress()
4077 {
4078 return getPhysicalSegment( 0, NULL );
4079 }
4080
4081 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4082
4083 bool
4084 IOMemoryMap::init(
4085 task_t intoTask,
4086 mach_vm_address_t toAddress,
4087 IOOptionBits _options,
4088 mach_vm_size_t _offset,
4089 mach_vm_size_t _length )
4090 {
4091 if (!intoTask) {
4092 return false;
4093 }
4094
4095 if (!super::init()) {
4096 return false;
4097 }
4098
4099 fAddressMap = get_task_map(intoTask);
4100 if (!fAddressMap) {
4101 return false;
4102 }
4103 vm_map_reference(fAddressMap);
4104
4105 fAddressTask = intoTask;
4106 fOptions = _options;
4107 fLength = _length;
4108 fOffset = _offset;
4109 fAddress = toAddress;
4110
4111 return true;
4112 }
4113
4114 bool
4115 IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
4116 {
4117 if (!_memory) {
4118 return false;
4119 }
4120
4121 if (!fSuperMap) {
4122 if ((_offset + fLength) > _memory->getLength()) {
4123 return false;
4124 }
4125 fOffset = _offset;
4126 }
4127
4128 _memory->retain();
4129 if (fMemory) {
4130 if (fMemory != _memory) {
4131 fMemory->removeMapping(this);
4132 }
4133 fMemory->release();
4134 }
4135 fMemory = _memory;
4136
4137 return true;
4138 }
4139
4140 IOReturn
4141 IOMemoryDescriptor::doMap(
4142 vm_map_t __addressMap,
4143 IOVirtualAddress * __address,
4144 IOOptionBits options,
4145 IOByteCount __offset,
4146 IOByteCount __length )
4147 {
4148 return kIOReturnUnsupported;
4149 }
4150
4151 IOReturn
4152 IOMemoryDescriptor::handleFault(
4153 void * _pager,
4154 mach_vm_size_t sourceOffset,
4155 mach_vm_size_t length)
4156 {
4157 if (kIOMemoryRedirected & _flags) {
4158 #if DEBUG
4159 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
4160 #endif
4161 do {
4162 SLEEP;
4163 } while (kIOMemoryRedirected & _flags);
4164 }
4165 return kIOReturnSuccess;
4166 }
4167
4168 IOReturn
4169 IOMemoryDescriptor::populateDevicePager(
4170 void * _pager,
4171 vm_map_t addressMap,
4172 mach_vm_address_t address,
4173 mach_vm_size_t sourceOffset,
4174 mach_vm_size_t length,
4175 IOOptionBits options )
4176 {
4177 IOReturn err = kIOReturnSuccess;
4178 memory_object_t pager = (memory_object_t) _pager;
4179 mach_vm_size_t size;
4180 mach_vm_size_t bytes;
4181 mach_vm_size_t page;
4182 mach_vm_size_t pageOffset;
4183 mach_vm_size_t pagerOffset;
4184 IOPhysicalLength segLen, chunk;
4185 addr64_t physAddr;
4186 IOOptionBits type;
4187
4188 type = _flags & kIOMemoryTypeMask;
4189
4190 if (reserved->dp.pagerContig) {
4191 sourceOffset = 0;
4192 pagerOffset = 0;
4193 }
4194
4195 physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
4196 assert( physAddr );
4197 pageOffset = physAddr - trunc_page_64( physAddr );
4198 pagerOffset = sourceOffset;
4199
4200 size = length + pageOffset;
4201 physAddr -= pageOffset;
4202
4203 segLen += pageOffset;
4204 bytes = size;
4205 do{
4206 // in the middle of the loop only map whole pages
4207 if (segLen >= bytes) {
4208 segLen = bytes;
4209 } else if (segLen != trunc_page_64(segLen)) {
4210 err = kIOReturnVMError;
4211 }
4212 if (physAddr != trunc_page_64(physAddr)) {
4213 err = kIOReturnBadArgument;
4214 }
4215
4216 if (kIOReturnSuccess != err) {
4217 break;
4218 }
4219
4220 #if DEBUG || DEVELOPMENT
4221 if ((kIOMemoryTypeUPL != type)
4222 && pmap_has_managed_page(atop_64(physAddr), atop_64(physAddr + segLen - 1))) {
4223 OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx", physAddr, segLen);
4224 }
4225 #endif /* DEBUG || DEVELOPMENT */
4226
4227 chunk = (reserved->dp.pagerContig ? round_page(segLen) : page_size);
4228 for (page = 0;
4229 (page < segLen) && (KERN_SUCCESS == err);
4230 page += chunk) {
4231 err = device_pager_populate_object(pager, pagerOffset,
4232 (ppnum_t)(atop_64(physAddr + page)), chunk);
4233 pagerOffset += chunk;
4234 }
4235
4236 assert(KERN_SUCCESS == err);
4237 if (err) {
4238 break;
4239 }
4240
4241 // This call to vm_fault causes an early pmap level resolution
4242 // of the mappings created above for kernel mappings, since
4243 // faulting in later can't take place from interrupt level.
4244 if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags)) {
4245 err = vm_fault(addressMap,
4246 (vm_map_offset_t)trunc_page_64(address),
4247 options & kIOMapReadOnly ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE,
4248 FALSE, VM_KERN_MEMORY_NONE,
4249 THREAD_UNINT, NULL,
4250 (vm_map_offset_t)0);
4251
4252 if (KERN_SUCCESS != err) {
4253 break;
4254 }
4255 }
4256
4257 sourceOffset += segLen - pageOffset;
4258 address += segLen;
4259 bytes -= segLen;
4260 pageOffset = 0;
4261 }while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
4262
4263 if (bytes) {
4264 err = kIOReturnBadArgument;
4265 }
4266
4267 return err;
4268 }
4269
4270 IOReturn
4271 IOMemoryDescriptor::doUnmap(
4272 vm_map_t addressMap,
4273 IOVirtualAddress __address,
4274 IOByteCount __length )
4275 {
4276 IOReturn err;
4277 IOMemoryMap * mapping;
4278 mach_vm_address_t address;
4279 mach_vm_size_t length;
4280
4281 if (__length) {
4282 panic("doUnmap");
4283 }
4284
4285 mapping = (IOMemoryMap *) __address;
4286 addressMap = mapping->fAddressMap;
4287 address = mapping->fAddress;
4288 length = mapping->fLength;
4289
4290 if (kIOMapOverwrite & mapping->fOptions) {
4291 err = KERN_SUCCESS;
4292 } else {
4293 if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
4294 addressMap = IOPageableMapForAddress( address );
4295 }
4296 #if DEBUG
4297 if (kIOLogMapping & gIOKitDebug) {
4298 IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
4299 addressMap, address, length );
4300 }
4301 #endif
4302 err = mach_vm_deallocate( addressMap, address, length );
4303 }
4304
4305 #if IOTRACKING
4306 IOTrackingRemoveUser(gIOMapTracking, &mapping->fTracking);
4307 #endif /* IOTRACKING */
4308
4309 return err;
4310 }
4311
4312 IOReturn
4313 IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
4314 {
4315 IOReturn err = kIOReturnSuccess;
4316 IOMemoryMap * mapping = NULL;
4317 OSIterator * iter;
4318
4319 LOCK;
4320
4321 if (doRedirect) {
4322 _flags |= kIOMemoryRedirected;
4323 } else {
4324 _flags &= ~kIOMemoryRedirected;
4325 }
4326
4327 do {
4328 if ((iter = OSCollectionIterator::withCollection( _mappings))) {
4329 memory_object_t pager;
4330
4331 if (reserved) {
4332 pager = (memory_object_t) reserved->dp.devicePager;
4333 } else {
4334 pager = MACH_PORT_NULL;
4335 }
4336
4337 while ((mapping = (IOMemoryMap *) iter->getNextObject())) {
4338 mapping->redirect( safeTask, doRedirect );
4339 if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap)) {
4340 err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache );
4341 }
4342 }
4343
4344 iter->release();
4345 }
4346 } while (false);
4347
4348 if (!doRedirect) {
4349 WAKEUP;
4350 }
4351
4352 UNLOCK;
4353
4354 #ifndef __LP64__
4355 // temporary binary compatibility
4356 IOSubMemoryDescriptor * subMem;
4357 if ((subMem = OSDynamicCast( IOSubMemoryDescriptor, this))) {
4358 err = subMem->redirect( safeTask, doRedirect );
4359 } else {
4360 err = kIOReturnSuccess;
4361 }
4362 #endif /* !__LP64__ */
4363
4364 return err;
4365 }
4366
4367 IOReturn
4368 IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
4369 {
4370 IOReturn err = kIOReturnSuccess;
4371
4372 if (fSuperMap) {
4373 // err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
4374 } else {
4375 LOCK;
4376
4377 do{
4378 if (!fAddress) {
4379 break;
4380 }
4381 if (!fAddressMap) {
4382 break;
4383 }
4384
4385 if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
4386 && (0 == (fOptions & kIOMapStatic))) {
4387 IOUnmapPages( fAddressMap, fAddress, fLength );
4388 err = kIOReturnSuccess;
4389 #if DEBUG
4390 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
4391 #endif
4392 } else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask)) {
4393 IOOptionBits newMode;
4394 newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
4395 IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
4396 }
4397 }while (false);
4398 UNLOCK;
4399 }
4400
4401 if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
4402 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
4403 && safeTask
4404 && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected)))) {
4405 fMemory->redirect(safeTask, doRedirect);
4406 }
4407
4408 return err;
4409 }
4410
4411 IOReturn
4412 IOMemoryMap::unmap( void )
4413 {
4414 IOReturn err;
4415
4416 LOCK;
4417
4418 if (fAddress && fAddressMap && (NULL == fSuperMap) && fMemory
4419 && (0 == (kIOMapStatic & fOptions))) {
4420 err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
4421 } else {
4422 err = kIOReturnSuccess;
4423 }
4424
4425 if (fAddressMap) {
4426 vm_map_deallocate(fAddressMap);
4427 fAddressMap = NULL;
4428 }
4429
4430 fAddress = 0;
4431
4432 UNLOCK;
4433
4434 return err;
4435 }
4436
4437 void
4438 IOMemoryMap::taskDied( void )
4439 {
4440 LOCK;
4441 if (fUserClientUnmap) {
4442 unmap();
4443 }
4444 #if IOTRACKING
4445 else {
4446 IOTrackingRemoveUser(gIOMapTracking, &fTracking);
4447 }
4448 #endif /* IOTRACKING */
4449
4450 if (fAddressMap) {
4451 vm_map_deallocate(fAddressMap);
4452 fAddressMap = NULL;
4453 }
4454 fAddressTask = NULL;
4455 fAddress = 0;
4456 UNLOCK;
4457 }
4458
4459 IOReturn
4460 IOMemoryMap::userClientUnmap( void )
4461 {
4462 fUserClientUnmap = true;
4463 return kIOReturnSuccess;
4464 }
4465
4466 // Overload the release mechanism. All mappings must be a member
4467 // of a memory descriptors _mappings set. This means that we
4468 // always have 2 references on a mapping. When either of these mappings
4469 // are released we need to free ourselves.
4470 void
4471 IOMemoryMap::taggedRelease(const void *tag) const
4472 {
4473 LOCK;
4474 super::taggedRelease(tag, 2);
4475 UNLOCK;
4476 }
4477
4478 void
4479 IOMemoryMap::free()
4480 {
4481 unmap();
4482
4483 if (fMemory) {
4484 LOCK;
4485 fMemory->removeMapping(this);
4486 UNLOCK;
4487 fMemory->release();
4488 }
4489
4490 if (fOwner && (fOwner != fMemory)) {
4491 LOCK;
4492 fOwner->removeMapping(this);
4493 UNLOCK;
4494 }
4495
4496 if (fSuperMap) {
4497 fSuperMap->release();
4498 }
4499
4500 if (fRedirUPL) {
4501 upl_commit(fRedirUPL, NULL, 0);
4502 upl_deallocate(fRedirUPL);
4503 }
4504
4505 super::free();
4506 }
4507
4508 IOByteCount
4509 IOMemoryMap::getLength()
4510 {
4511 return fLength;
4512 }
4513
4514 IOVirtualAddress
4515 IOMemoryMap::getVirtualAddress()
4516 {
4517 #ifndef __LP64__
4518 if (fSuperMap) {
4519 fSuperMap->getVirtualAddress();
4520 } else if (fAddressMap
4521 && vm_map_is_64bit(fAddressMap)
4522 && (sizeof(IOVirtualAddress) < 8)) {
4523 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
4524 }
4525 #endif /* !__LP64__ */
4526
4527 return fAddress;
4528 }
4529
4530 #ifndef __LP64__
4531 mach_vm_address_t
4532 IOMemoryMap::getAddress()
4533 {
4534 return fAddress;
4535 }
4536
4537 mach_vm_size_t
4538 IOMemoryMap::getSize()
4539 {
4540 return fLength;
4541 }
4542 #endif /* !__LP64__ */
4543
4544
4545 task_t
4546 IOMemoryMap::getAddressTask()
4547 {
4548 if (fSuperMap) {
4549 return fSuperMap->getAddressTask();
4550 } else {
4551 return fAddressTask;
4552 }
4553 }
4554
4555 IOOptionBits
4556 IOMemoryMap::getMapOptions()
4557 {
4558 return fOptions;
4559 }
4560
4561 IOMemoryDescriptor *
4562 IOMemoryMap::getMemoryDescriptor()
4563 {
4564 return fMemory;
4565 }
4566
4567 IOMemoryMap *
4568 IOMemoryMap::copyCompatible(
4569 IOMemoryMap * newMapping )
4570 {
4571 task_t task = newMapping->getAddressTask();
4572 mach_vm_address_t toAddress = newMapping->fAddress;
4573 IOOptionBits _options = newMapping->fOptions;
4574 mach_vm_size_t _offset = newMapping->fOffset;
4575 mach_vm_size_t _length = newMapping->fLength;
4576
4577 if ((!task) || (!fAddressMap) || (fAddressMap != get_task_map(task))) {
4578 return NULL;
4579 }
4580 if ((fOptions ^ _options) & kIOMapReadOnly) {
4581 return NULL;
4582 }
4583 if ((kIOMapDefaultCache != (_options & kIOMapCacheMask))
4584 && ((fOptions ^ _options) & kIOMapCacheMask)) {
4585 return NULL;
4586 }
4587
4588 if ((0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress)) {
4589 return NULL;
4590 }
4591
4592 if (_offset < fOffset) {
4593 return NULL;
4594 }
4595
4596 _offset -= fOffset;
4597
4598 if ((_offset + _length) > fLength) {
4599 return NULL;
4600 }
4601
4602 retain();
4603 if ((fLength == _length) && (!_offset)) {
4604 newMapping = this;
4605 } else {
4606 newMapping->fSuperMap = this;
4607 newMapping->fOffset = fOffset + _offset;
4608 newMapping->fAddress = fAddress + _offset;
4609 }
4610
4611 return newMapping;
4612 }
4613
4614 IOReturn
4615 IOMemoryMap::wireRange(
4616 uint32_t options,
4617 mach_vm_size_t offset,
4618 mach_vm_size_t length)
4619 {
4620 IOReturn kr;
4621 mach_vm_address_t start = trunc_page_64(fAddress + offset);
4622 mach_vm_address_t end = round_page_64(fAddress + offset + length);
4623 vm_prot_t prot;
4624
4625 prot = (kIODirectionOutIn & options);
4626 if (prot) {
4627 kr = vm_map_wire_kernel(fAddressMap, start, end, prot, fMemory->getVMTag(kernel_map), FALSE);
4628 } else {
4629 kr = vm_map_unwire(fAddressMap, start, end, FALSE);
4630 }
4631
4632 return kr;
4633 }
4634
4635
4636 IOPhysicalAddress
4637 #ifdef __LP64__
4638 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
4639 #else /* !__LP64__ */
4640 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
4641 #endif /* !__LP64__ */
4642 {
4643 IOPhysicalAddress address;
4644
4645 LOCK;
4646 #ifdef __LP64__
4647 address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
4648 #else /* !__LP64__ */
4649 address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
4650 #endif /* !__LP64__ */
4651 UNLOCK;
4652
4653 return address;
4654 }
4655
4656 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4657
4658 #undef super
4659 #define super OSObject
4660
4661 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4662
4663 void
4664 IOMemoryDescriptor::initialize( void )
4665 {
4666 if (NULL == gIOMemoryLock) {
4667 gIOMemoryLock = IORecursiveLockAlloc();
4668 }
4669
4670 gIOLastPage = IOGetLastPageNumber();
4671 }
4672
4673 void
4674 IOMemoryDescriptor::free( void )
4675 {
4676 if (_mappings) {
4677 _mappings->release();
4678 }
4679
4680 if (reserved) {
4681 cleanKernelReserved(reserved);
4682 IODelete(reserved, IOMemoryDescriptorReserved, 1);
4683 reserved = NULL;
4684 }
4685 super::free();
4686 }
4687
4688 IOMemoryMap *
4689 IOMemoryDescriptor::setMapping(
4690 task_t intoTask,
4691 IOVirtualAddress mapAddress,
4692 IOOptionBits options )
4693 {
4694 return createMappingInTask( intoTask, mapAddress,
4695 options | kIOMapStatic,
4696 0, getLength());
4697 }
4698
4699 IOMemoryMap *
4700 IOMemoryDescriptor::map(
4701 IOOptionBits options )
4702 {
4703 return createMappingInTask( kernel_task, 0,
4704 options | kIOMapAnywhere,
4705 0, getLength());
4706 }
4707
4708 #ifndef __LP64__
4709 IOMemoryMap *
4710 IOMemoryDescriptor::map(
4711 task_t intoTask,
4712 IOVirtualAddress atAddress,
4713 IOOptionBits options,
4714 IOByteCount offset,
4715 IOByteCount length )
4716 {
4717 if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask))) {
4718 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
4719 return NULL;
4720 }
4721
4722 return createMappingInTask(intoTask, atAddress,
4723 options, offset, length);
4724 }
4725 #endif /* !__LP64__ */
4726
4727 IOMemoryMap *
4728 IOMemoryDescriptor::createMappingInTask(
4729 task_t intoTask,
4730 mach_vm_address_t atAddress,
4731 IOOptionBits options,
4732 mach_vm_size_t offset,
4733 mach_vm_size_t length)
4734 {
4735 IOMemoryMap * result;
4736 IOMemoryMap * mapping;
4737
4738 if (0 == length) {
4739 length = getLength();
4740 }
4741
4742 mapping = new IOMemoryMap;
4743
4744 if (mapping
4745 && !mapping->init( intoTask, atAddress,
4746 options, offset, length )) {
4747 mapping->release();
4748 mapping = NULL;
4749 }
4750
4751 if (mapping) {
4752 result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
4753 } else {
4754 result = NULL;
4755 }
4756
4757 #if DEBUG
4758 if (!result) {
4759 IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
4760 this, atAddress, (uint32_t) options, offset, length);
4761 }
4762 #endif
4763
4764 return result;
4765 }
4766
4767 #ifndef __LP64__ // there is only a 64 bit version for LP64
4768 IOReturn
4769 IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
4770 IOOptionBits options,
4771 IOByteCount offset)
4772 {
4773 return redirect(newBackingMemory, options, (mach_vm_size_t)offset);
4774 }
4775 #endif
4776
4777 IOReturn
4778 IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
4779 IOOptionBits options,
4780 mach_vm_size_t offset)
4781 {
4782 IOReturn err = kIOReturnSuccess;
4783 IOMemoryDescriptor * physMem = NULL;
4784
4785 LOCK;
4786
4787 if (fAddress && fAddressMap) {
4788 do{
4789 if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
4790 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
4791 physMem = fMemory;
4792 physMem->retain();
4793 }
4794
4795 if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count)) {
4796 upl_size_t size = round_page(fLength);
4797 upl_control_flags_t flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
4798 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
4799 if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL,
4800 NULL, NULL,
4801 &flags, fMemory->getVMTag(kernel_map))) {
4802 fRedirUPL = NULL;
4803 }
4804
4805 if (physMem) {
4806 IOUnmapPages( fAddressMap, fAddress, fLength );
4807 if ((false)) {
4808 physMem->redirect(NULL, true);
4809 }
4810 }
4811 }
4812
4813 if (newBackingMemory) {
4814 if (newBackingMemory != fMemory) {
4815 fOffset = 0;
4816 if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
4817 options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
4818 offset, fLength)) {
4819 err = kIOReturnError;
4820 }
4821 }
4822 if (fRedirUPL) {
4823 upl_commit(fRedirUPL, NULL, 0);
4824 upl_deallocate(fRedirUPL);
4825 fRedirUPL = NULL;
4826 }
4827 if ((false) && physMem) {
4828 physMem->redirect(NULL, false);
4829 }
4830 }
4831 }while (false);
4832 }
4833
4834 UNLOCK;
4835
4836 if (physMem) {
4837 physMem->release();
4838 }
4839
4840 return err;
4841 }
4842
4843 IOMemoryMap *
4844 IOMemoryDescriptor::makeMapping(
4845 IOMemoryDescriptor * owner,
4846 task_t __intoTask,
4847 IOVirtualAddress __address,
4848 IOOptionBits options,
4849 IOByteCount __offset,
4850 IOByteCount __length )
4851 {
4852 #ifndef __LP64__
4853 if (!(kIOMap64Bit & options)) {
4854 panic("IOMemoryDescriptor::makeMapping !64bit");
4855 }
4856 #endif /* !__LP64__ */
4857
4858 IOMemoryDescriptor * mapDesc = NULL;
4859 __block IOMemoryMap * result = NULL;
4860
4861 IOMemoryMap * mapping = (IOMemoryMap *) __address;
4862 mach_vm_size_t offset = mapping->fOffset + __offset;
4863 mach_vm_size_t length = mapping->fLength;
4864
4865 mapping->fOffset = offset;
4866
4867 LOCK;
4868
4869 do{
4870 if (kIOMapStatic & options) {
4871 result = mapping;
4872 addMapping(mapping);
4873 mapping->setMemoryDescriptor(this, 0);
4874 continue;
4875 }
4876
4877 if (kIOMapUnique & options) {
4878 addr64_t phys;
4879 IOByteCount physLen;
4880
4881 // if (owner != this) continue;
4882
4883 if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
4884 || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
4885 phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
4886 if (!phys || (physLen < length)) {
4887 continue;
4888 }
4889
4890 mapDesc = IOMemoryDescriptor::withAddressRange(
4891 phys, length, getDirection() | kIOMemoryMapperNone, NULL);
4892 if (!mapDesc) {
4893 continue;
4894 }
4895 offset = 0;
4896 mapping->fOffset = offset;
4897 }
4898 } else {
4899 // look for a compatible existing mapping
4900 if (_mappings) {
4901 _mappings->iterateObjects(^(OSObject * object)
4902 {
4903 IOMemoryMap * lookMapping = (IOMemoryMap *) object;
4904 if ((result = lookMapping->copyCompatible(mapping))) {
4905 addMapping(result);
4906 result->setMemoryDescriptor(this, offset);
4907 return true;
4908 }
4909 return false;
4910 });
4911 }
4912 if (result || (options & kIOMapReference)) {
4913 if (result != mapping) {
4914 mapping->release();
4915 mapping = NULL;
4916 }
4917 continue;
4918 }
4919 }
4920
4921 if (!mapDesc) {
4922 mapDesc = this;
4923 mapDesc->retain();
4924 }
4925 IOReturn
4926 kr = mapDesc->doMap( NULL, (IOVirtualAddress *) &mapping, options, 0, 0 );
4927 if (kIOReturnSuccess == kr) {
4928 result = mapping;
4929 mapDesc->addMapping(result);
4930 result->setMemoryDescriptor(mapDesc, offset);
4931 } else {
4932 mapping->release();
4933 mapping = NULL;
4934 }
4935 }while (false);
4936
4937 UNLOCK;
4938
4939 if (mapDesc) {
4940 mapDesc->release();
4941 }
4942
4943 return result;
4944 }
4945
4946 void
4947 IOMemoryDescriptor::addMapping(
4948 IOMemoryMap * mapping )
4949 {
4950 if (mapping) {
4951 if (NULL == _mappings) {
4952 _mappings = OSSet::withCapacity(1);
4953 }
4954 if (_mappings) {
4955 _mappings->setObject( mapping );
4956 }
4957 }
4958 }
4959
4960 void
4961 IOMemoryDescriptor::removeMapping(
4962 IOMemoryMap * mapping )
4963 {
4964 if (_mappings) {
4965 _mappings->removeObject( mapping);
4966 }
4967 }
4968
4969 #ifndef __LP64__
4970 // obsolete initializers
4971 // - initWithOptions is the designated initializer
4972 bool
4973 IOMemoryDescriptor::initWithAddress(void * address,
4974 IOByteCount length,
4975 IODirection direction)
4976 {
4977 return false;
4978 }
4979
4980 bool
4981 IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
4982 IOByteCount length,
4983 IODirection direction,
4984 task_t task)
4985 {
4986 return false;
4987 }
4988
4989 bool
4990 IOMemoryDescriptor::initWithPhysicalAddress(
4991 IOPhysicalAddress address,
4992 IOByteCount length,
4993 IODirection direction )
4994 {
4995 return false;
4996 }
4997
4998 bool
4999 IOMemoryDescriptor::initWithRanges(
5000 IOVirtualRange * ranges,
5001 UInt32 withCount,
5002 IODirection direction,
5003 task_t task,
5004 bool asReference)
5005 {
5006 return false;
5007 }
5008
5009 bool
5010 IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
5011 UInt32 withCount,
5012 IODirection direction,
5013 bool asReference)
5014 {
5015 return false;
5016 }
5017
5018 void *
5019 IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
5020 IOByteCount * lengthOfSegment)
5021 {
5022 return NULL;
5023 }
5024 #endif /* !__LP64__ */
5025
5026 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5027
5028 bool
5029 IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
5030 {
5031 OSSymbol const *keys[2] = {NULL};
5032 OSObject *values[2] = {NULL};
5033 OSArray * array;
5034 vm_size_t vcopy_size;
5035
5036 struct SerData {
5037 user_addr_t address;
5038 user_size_t length;
5039 } *vcopy = NULL;
5040 unsigned int index, nRanges;
5041 bool result = false;
5042
5043 IOOptionBits type = _flags & kIOMemoryTypeMask;
5044
5045 if (s == NULL) {
5046 return false;
5047 }
5048
5049 array = OSArray::withCapacity(4);
5050 if (!array) {
5051 return false;
5052 }
5053
5054 nRanges = _rangesCount;
5055 if (os_mul_overflow(sizeof(SerData), nRanges, &vcopy_size)) {
5056 result = false;
5057 goto bail;
5058 }
5059 vcopy = (SerData *) IOMalloc(vcopy_size);
5060 if (vcopy == NULL) {
5061 result = false;
5062 goto bail;
5063 }
5064
5065 keys[0] = OSSymbol::withCString("address");
5066 keys[1] = OSSymbol::withCString("length");
5067
5068 // Copy the volatile data so we don't have to allocate memory
5069 // while the lock is held.
5070 LOCK;
5071 if (nRanges == _rangesCount) {
5072 Ranges vec = _ranges;
5073 for (index = 0; index < nRanges; index++) {
5074 mach_vm_address_t addr; mach_vm_size_t len;
5075 getAddrLenForInd(addr, len, type, vec, index);
5076 vcopy[index].address = addr;
5077 vcopy[index].length = len;
5078 }
5079 } else {
5080 // The descriptor changed out from under us. Give up.
5081 UNLOCK;
5082 result = false;
5083 goto bail;
5084 }
5085 UNLOCK;
5086
5087 for (index = 0; index < nRanges; index++) {
5088 user_addr_t addr = vcopy[index].address;
5089 IOByteCount len = (IOByteCount) vcopy[index].length;
5090 values[0] = OSNumber::withNumber(addr, sizeof(addr) * 8);
5091 if (values[0] == NULL) {
5092 result = false;
5093 goto bail;
5094 }
5095 values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
5096 if (values[1] == NULL) {
5097 result = false;
5098 goto bail;
5099 }
5100 OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
5101 if (dict == NULL) {
5102 result = false;
5103 goto bail;
5104 }
5105 array->setObject(dict);
5106 dict->release();
5107 values[0]->release();
5108 values[1]->release();
5109 values[0] = values[1] = NULL;
5110 }
5111
5112 result = array->serialize(s);
5113
5114 bail:
5115 if (array) {
5116 array->release();
5117 }
5118 if (values[0]) {
5119 values[0]->release();
5120 }
5121 if (values[1]) {
5122 values[1]->release();
5123 }
5124 if (keys[0]) {
5125 keys[0]->release();
5126 }
5127 if (keys[1]) {
5128 keys[1]->release();
5129 }
5130 if (vcopy) {
5131 IOFree(vcopy, vcopy_size);
5132 }
5133
5134 return result;
5135 }
5136
5137 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5138
5139 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
5140 #ifdef __LP64__
5141 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
5142 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
5143 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
5144 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
5145 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
5146 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
5147 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
5148 #else /* !__LP64__ */
5149 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
5150 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
5151 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3);
5152 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4);
5153 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5);
5154 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 6);
5155 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 7);
5156 #endif /* !__LP64__ */
5157 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
5158 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
5159 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
5160 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
5161 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
5162 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
5163 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
5164 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
5165
5166 /* ex-inline function implementation */
5167 IOPhysicalAddress
5168 IOMemoryDescriptor::getPhysicalAddress()
5169 {
5170 return getPhysicalSegment( 0, NULL );
5171 }