]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOMemoryDescriptor.cpp
xnu-6153.81.5.tar.gz
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 #include <sys/cdefs.h>
31
32 #include <IOKit/assert.h>
33 #include <IOKit/system.h>
34 #include <IOKit/IOLib.h>
35 #include <IOKit/IOMemoryDescriptor.h>
36 #include <IOKit/IOMapper.h>
37 #include <IOKit/IODMACommand.h>
38 #include <IOKit/IOKitKeysPrivate.h>
39
40 #include <IOKit/IOSubMemoryDescriptor.h>
41 #include <IOKit/IOMultiMemoryDescriptor.h>
42
43 #include <IOKit/IOKitDebug.h>
44 #include <libkern/OSDebug.h>
45 #include <libkern/OSKextLibPrivate.h>
46
47 #include "IOKitKernelInternal.h"
48
49 #include <libkern/c++/OSContainers.h>
50 #include <libkern/c++/OSDictionary.h>
51 #include <libkern/c++/OSArray.h>
52 #include <libkern/c++/OSSymbol.h>
53 #include <libkern/c++/OSNumber.h>
54 #include <os/overflow.h>
55
56 #include <sys/uio.h>
57
58 __BEGIN_DECLS
59 #include <vm/pmap.h>
60 #include <vm/vm_pageout.h>
61 #include <mach/memory_object_types.h>
62 #include <device/device_port.h>
63
64 #include <mach/vm_prot.h>
65 #include <mach/mach_vm.h>
66 #include <mach/memory_entry.h>
67 #include <vm/vm_fault.h>
68 #include <vm/vm_protos.h>
69
70 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
71 extern void ipc_port_release_send(ipc_port_t port);
72
73 __END_DECLS
74
75 #define kIOMapperWaitSystem ((IOMapper *) 1)
76
77 static IOMapper * gIOSystemMapper = NULL;
78
79 ppnum_t gIOLastPage;
80
81 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
82
83 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
84
85 #define super IOMemoryDescriptor
86
87 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
88
89 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
90
91 static IORecursiveLock * gIOMemoryLock;
92
93 #define LOCK IORecursiveLockLock( gIOMemoryLock)
94 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
95 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
96 #define WAKEUP \
97 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
98
99 #if 0
100 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
101 #else
102 #define DEBG(fmt, args...) {}
103 #endif
104
105 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
106
107 // Some data structures and accessor macros used by the initWithOptions
108 // Function
109
110 enum ioPLBlockFlags {
111 kIOPLOnDevice = 0x00000001,
112 kIOPLExternUPL = 0x00000002,
113 };
114
115 struct IOMDPersistentInitData {
116 const IOGeneralMemoryDescriptor * fMD;
117 IOMemoryReference * fMemRef;
118 };
119
120 struct ioPLBlock {
121 upl_t fIOPL;
122 vm_address_t fPageInfo; // Pointer to page list or index into it
123 uint32_t fIOMDOffset; // The offset of this iopl in descriptor
124 ppnum_t fMappedPage; // Page number of first page in this iopl
125 unsigned int fPageOffset; // Offset within first page of iopl
126 unsigned int fFlags; // Flags
127 };
128
129 enum { kMaxWireTags = 6 };
130
131 struct ioGMDData {
132 IOMapper * fMapper;
133 uint64_t fDMAMapAlignment;
134 uint64_t fMappedBase;
135 uint64_t fMappedLength;
136 uint64_t fPreparationID;
137 #if IOTRACKING
138 IOTracking fWireTracking;
139 #endif /* IOTRACKING */
140 unsigned int fPageCnt;
141 uint8_t fDMAMapNumAddressBits;
142 unsigned char fDiscontig:1;
143 unsigned char fCompletionError:1;
144 unsigned char fMappedBaseValid:1;
145 unsigned char _resv:3;
146 unsigned char fDMAAccess:2;
147
148 /* variable length arrays */
149 upl_page_info_t fPageList[1]
150 #if __LP64__
151 // align fPageList as for ioPLBlock
152 __attribute__((aligned(sizeof(upl_t))))
153 #endif
154 ;
155 //ioPLBlock fBlocks[1];
156 };
157
158 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
159 #define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
160 #define getNumIOPL(osd, d) \
161 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
162 #define getPageList(d) (&(d->fPageList[0]))
163 #define computeDataSize(p, u) \
164 (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
165
166 enum { kIOMemoryHostOrRemote = kIOMemoryHostOnly | kIOMemoryRemote };
167
168 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
169
170 #define next_page(a) ( trunc_page(a) + PAGE_SIZE )
171
172 extern "C" {
173 kern_return_t
174 device_data_action(
175 uintptr_t device_handle,
176 ipc_port_t device_pager,
177 vm_prot_t protection,
178 vm_object_offset_t offset,
179 vm_size_t size)
180 {
181 kern_return_t kr;
182 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
183 IOMemoryDescriptor * memDesc;
184
185 LOCK;
186 memDesc = ref->dp.memory;
187 if (memDesc) {
188 memDesc->retain();
189 kr = memDesc->handleFault(device_pager, offset, size);
190 memDesc->release();
191 } else {
192 kr = KERN_ABORTED;
193 }
194 UNLOCK;
195
196 return kr;
197 }
198
199 kern_return_t
200 device_close(
201 uintptr_t device_handle)
202 {
203 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
204
205 IODelete( ref, IOMemoryDescriptorReserved, 1 );
206
207 return kIOReturnSuccess;
208 }
209 }; // end extern "C"
210
211 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
212
213 // Note this inline function uses C++ reference arguments to return values
214 // This means that pointers are not passed and NULLs don't have to be
215 // checked for as a NULL reference is illegal.
216 static inline void
217 getAddrLenForInd(mach_vm_address_t &addr, mach_vm_size_t &len, // Output variables
218 UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
219 {
220 assert(kIOMemoryTypeUIO == type
221 || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
222 || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
223 if (kIOMemoryTypeUIO == type) {
224 user_size_t us;
225 user_addr_t ad;
226 uio_getiov((uio_t) r.uio, ind, &ad, &us); addr = ad; len = us;
227 }
228 #ifndef __LP64__
229 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
230 IOAddressRange cur = r.v64[ind];
231 addr = cur.address;
232 len = cur.length;
233 }
234 #endif /* !__LP64__ */
235 else {
236 IOVirtualRange cur = r.v[ind];
237 addr = cur.address;
238 len = cur.length;
239 }
240 }
241
242 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
243
244 static IOReturn
245 purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
246 {
247 IOReturn err = kIOReturnSuccess;
248
249 *control = VM_PURGABLE_SET_STATE;
250
251 enum { kIOMemoryPurgeableControlMask = 15 };
252
253 switch (kIOMemoryPurgeableControlMask & newState) {
254 case kIOMemoryPurgeableKeepCurrent:
255 *control = VM_PURGABLE_GET_STATE;
256 break;
257
258 case kIOMemoryPurgeableNonVolatile:
259 *state = VM_PURGABLE_NONVOLATILE;
260 break;
261 case kIOMemoryPurgeableVolatile:
262 *state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask);
263 break;
264 case kIOMemoryPurgeableEmpty:
265 *state = VM_PURGABLE_EMPTY | (newState & ~kIOMemoryPurgeableControlMask);
266 break;
267 default:
268 err = kIOReturnBadArgument;
269 break;
270 }
271
272 if (*control == VM_PURGABLE_SET_STATE) {
273 // let VM know this call is from the kernel and is allowed to alter
274 // the volatility of the memory entry even if it was created with
275 // MAP_MEM_PURGABLE_KERNEL_ONLY
276 *control = VM_PURGABLE_SET_STATE_FROM_KERNEL;
277 }
278
279 return err;
280 }
281
282 static IOReturn
283 purgeableStateBits(int * state)
284 {
285 IOReturn err = kIOReturnSuccess;
286
287 switch (VM_PURGABLE_STATE_MASK & *state) {
288 case VM_PURGABLE_NONVOLATILE:
289 *state = kIOMemoryPurgeableNonVolatile;
290 break;
291 case VM_PURGABLE_VOLATILE:
292 *state = kIOMemoryPurgeableVolatile;
293 break;
294 case VM_PURGABLE_EMPTY:
295 *state = kIOMemoryPurgeableEmpty;
296 break;
297 default:
298 *state = kIOMemoryPurgeableNonVolatile;
299 err = kIOReturnNotReady;
300 break;
301 }
302 return err;
303 }
304
305 typedef struct {
306 unsigned int wimg;
307 unsigned int object_type;
308 } iokit_memtype_entry;
309
310 static const iokit_memtype_entry iomd_mem_types[] = {
311 [kIODefaultCache] = {VM_WIMG_DEFAULT, MAP_MEM_NOOP},
312 [kIOInhibitCache] = {VM_WIMG_IO, MAP_MEM_IO},
313 [kIOWriteThruCache] = {VM_WIMG_WTHRU, MAP_MEM_WTHRU},
314 [kIOWriteCombineCache] = {VM_WIMG_WCOMB, MAP_MEM_WCOMB},
315 [kIOCopybackCache] = {VM_WIMG_COPYBACK, MAP_MEM_COPYBACK},
316 [kIOCopybackInnerCache] = {VM_WIMG_INNERWBACK, MAP_MEM_INNERWBACK},
317 [kIOPostedWrite] = {VM_WIMG_POSTED, MAP_MEM_POSTED},
318 [kIORealTimeCache] = {VM_WIMG_RT, MAP_MEM_RT},
319 [kIOPostedReordered] = {VM_WIMG_POSTED_REORDERED, MAP_MEM_POSTED_REORDERED},
320 [kIOPostedCombinedReordered] = {VM_WIMG_POSTED_COMBINED_REORDERED, MAP_MEM_POSTED_COMBINED_REORDERED},
321 };
322
323 static vm_prot_t
324 vmProtForCacheMode(IOOptionBits cacheMode)
325 {
326 assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
327 vm_prot_t prot = 0;
328 SET_MAP_MEM(iomd_mem_types[cacheMode].object_type, prot);
329 return prot;
330 }
331
332 static unsigned int
333 pagerFlagsForCacheMode(IOOptionBits cacheMode)
334 {
335 assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
336 if (cacheMode == kIODefaultCache) {
337 return -1U;
338 }
339 return iomd_mem_types[cacheMode].wimg;
340 }
341
342 static IOOptionBits
343 cacheModeForPagerFlags(unsigned int pagerFlags)
344 {
345 pagerFlags &= VM_WIMG_MASK;
346 IOOptionBits cacheMode = kIODefaultCache;
347 for (IOOptionBits i = 0; i < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])); ++i) {
348 if (iomd_mem_types[i].wimg == pagerFlags) {
349 cacheMode = i;
350 break;
351 }
352 }
353 return (cacheMode == kIODefaultCache) ? kIOCopybackCache : cacheMode;
354 }
355
356 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
357 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
358
359 struct IOMemoryEntry {
360 ipc_port_t entry;
361 int64_t offset;
362 uint64_t size;
363 };
364
365 struct IOMemoryReference {
366 volatile SInt32 refCount;
367 vm_prot_t prot;
368 uint32_t capacity;
369 uint32_t count;
370 struct IOMemoryReference * mapRef;
371 IOMemoryEntry entries[0];
372 };
373
374 enum{
375 kIOMemoryReferenceReuse = 0x00000001,
376 kIOMemoryReferenceWrite = 0x00000002,
377 kIOMemoryReferenceCOW = 0x00000004,
378 };
379
380 SInt32 gIOMemoryReferenceCount;
381
382 IOMemoryReference *
383 IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc)
384 {
385 IOMemoryReference * ref;
386 size_t newSize, oldSize, copySize;
387
388 newSize = (sizeof(IOMemoryReference)
389 - sizeof(ref->entries)
390 + capacity * sizeof(ref->entries[0]));
391 ref = (typeof(ref))IOMalloc(newSize);
392 if (realloc) {
393 oldSize = (sizeof(IOMemoryReference)
394 - sizeof(realloc->entries)
395 + realloc->capacity * sizeof(realloc->entries[0]));
396 copySize = oldSize;
397 if (copySize > newSize) {
398 copySize = newSize;
399 }
400 if (ref) {
401 bcopy(realloc, ref, copySize);
402 }
403 IOFree(realloc, oldSize);
404 } else if (ref) {
405 bzero(ref, sizeof(*ref));
406 ref->refCount = 1;
407 OSIncrementAtomic(&gIOMemoryReferenceCount);
408 }
409 if (!ref) {
410 return NULL;
411 }
412 ref->capacity = capacity;
413 return ref;
414 }
415
416 void
417 IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref)
418 {
419 IOMemoryEntry * entries;
420 size_t size;
421
422 if (ref->mapRef) {
423 memoryReferenceFree(ref->mapRef);
424 ref->mapRef = NULL;
425 }
426
427 entries = ref->entries + ref->count;
428 while (entries > &ref->entries[0]) {
429 entries--;
430 ipc_port_release_send(entries->entry);
431 }
432 size = (sizeof(IOMemoryReference)
433 - sizeof(ref->entries)
434 + ref->capacity * sizeof(ref->entries[0]));
435 IOFree(ref, size);
436
437 OSDecrementAtomic(&gIOMemoryReferenceCount);
438 }
439
440 void
441 IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref)
442 {
443 if (1 == OSDecrementAtomic(&ref->refCount)) {
444 memoryReferenceFree(ref);
445 }
446 }
447
448
449 IOReturn
450 IOGeneralMemoryDescriptor::memoryReferenceCreate(
451 IOOptionBits options,
452 IOMemoryReference ** reference)
453 {
454 enum { kCapacity = 4, kCapacityInc = 4 };
455
456 kern_return_t err;
457 IOMemoryReference * ref;
458 IOMemoryEntry * entries;
459 IOMemoryEntry * cloneEntries;
460 vm_map_t map;
461 ipc_port_t entry, cloneEntry;
462 vm_prot_t prot;
463 memory_object_size_t actualSize;
464 uint32_t rangeIdx;
465 uint32_t count;
466 mach_vm_address_t entryAddr, endAddr, entrySize;
467 mach_vm_size_t srcAddr, srcLen;
468 mach_vm_size_t nextAddr, nextLen;
469 mach_vm_size_t offset, remain;
470 IOByteCount physLen;
471 IOOptionBits type = (_flags & kIOMemoryTypeMask);
472 IOOptionBits cacheMode;
473 unsigned int pagerFlags;
474 vm_tag_t tag;
475 vm_named_entry_kernel_flags_t vmne_kflags;
476
477 ref = memoryReferenceAlloc(kCapacity, NULL);
478 if (!ref) {
479 return kIOReturnNoMemory;
480 }
481
482 tag = getVMTag(kernel_map);
483 vmne_kflags = VM_NAMED_ENTRY_KERNEL_FLAGS_NONE;
484 entries = &ref->entries[0];
485 count = 0;
486 err = KERN_SUCCESS;
487
488 offset = 0;
489 rangeIdx = 0;
490 if (_task) {
491 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
492 } else {
493 nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
494 nextLen = physLen;
495
496 // default cache mode for physical
497 if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift)) {
498 IOOptionBits mode = cacheModeForPagerFlags(IODefaultCacheBits(nextAddr));
499 _flags |= (mode << kIOMemoryBufferCacheShift);
500 }
501 }
502
503 // cache mode & vm_prot
504 prot = VM_PROT_READ;
505 cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
506 prot |= vmProtForCacheMode(cacheMode);
507 // VM system requires write access to change cache mode
508 if (kIODefaultCache != cacheMode) {
509 prot |= VM_PROT_WRITE;
510 }
511 if (kIODirectionOut != (kIODirectionOutIn & _flags)) {
512 prot |= VM_PROT_WRITE;
513 }
514 if (kIOMemoryReferenceWrite & options) {
515 prot |= VM_PROT_WRITE;
516 }
517 if (kIOMemoryReferenceCOW & options) {
518 prot |= MAP_MEM_VM_COPY;
519 }
520
521 if (kIOMemoryUseReserve & _flags) {
522 prot |= MAP_MEM_GRAB_SECLUDED;
523 }
524
525 if ((kIOMemoryReferenceReuse & options) && _memRef) {
526 cloneEntries = &_memRef->entries[0];
527 prot |= MAP_MEM_NAMED_REUSE;
528 }
529
530 if (_task) {
531 // virtual ranges
532
533 if (kIOMemoryBufferPageable & _flags) {
534 int ledger_tag, ledger_no_footprint;
535
536 // IOBufferMemoryDescriptor alloc - set flags for entry + object create
537 prot |= MAP_MEM_NAMED_CREATE;
538
539 // default accounting settings:
540 // + "none" ledger tag
541 // + include in footprint
542 // can be changed later with ::setOwnership()
543 ledger_tag = VM_LEDGER_TAG_NONE;
544 ledger_no_footprint = 0;
545
546 if (kIOMemoryBufferPurgeable & _flags) {
547 prot |= (MAP_MEM_PURGABLE | MAP_MEM_PURGABLE_KERNEL_ONLY);
548 if (VM_KERN_MEMORY_SKYWALK == tag) {
549 // Skywalk purgeable memory accounting:
550 // + "network" ledger tag
551 // + not included in footprint
552 ledger_tag = VM_LEDGER_TAG_NETWORK;
553 ledger_no_footprint = 1;
554 } else {
555 // regular purgeable memory accounting:
556 // + no ledger tag
557 // + included in footprint
558 ledger_tag = VM_LEDGER_TAG_NONE;
559 ledger_no_footprint = 0;
560 }
561 }
562 vmne_kflags.vmnekf_ledger_tag = ledger_tag;
563 vmne_kflags.vmnekf_ledger_no_footprint = ledger_no_footprint;
564 if (kIOMemoryUseReserve & _flags) {
565 prot |= MAP_MEM_GRAB_SECLUDED;
566 }
567
568 prot |= VM_PROT_WRITE;
569 map = NULL;
570 } else {
571 map = get_task_map(_task);
572 }
573
574 remain = _length;
575 while (remain) {
576 srcAddr = nextAddr;
577 srcLen = nextLen;
578 nextAddr = 0;
579 nextLen = 0;
580 // coalesce addr range
581 for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++) {
582 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
583 if ((srcAddr + srcLen) != nextAddr) {
584 break;
585 }
586 srcLen += nextLen;
587 }
588 entryAddr = trunc_page_64(srcAddr);
589 endAddr = round_page_64(srcAddr + srcLen);
590 do{
591 entrySize = (endAddr - entryAddr);
592 if (!entrySize) {
593 break;
594 }
595 actualSize = entrySize;
596
597 cloneEntry = MACH_PORT_NULL;
598 if (MAP_MEM_NAMED_REUSE & prot) {
599 if (cloneEntries < &_memRef->entries[_memRef->count]) {
600 cloneEntry = cloneEntries->entry;
601 } else {
602 prot &= ~MAP_MEM_NAMED_REUSE;
603 }
604 }
605
606 err = mach_make_memory_entry_internal(map,
607 &actualSize, entryAddr, prot, vmne_kflags, &entry, cloneEntry);
608
609 if (KERN_SUCCESS != err) {
610 break;
611 }
612 if (actualSize > entrySize) {
613 panic("mach_make_memory_entry_64 actualSize");
614 }
615
616 if (count >= ref->capacity) {
617 ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref);
618 entries = &ref->entries[count];
619 }
620 entries->entry = entry;
621 entries->size = actualSize;
622 entries->offset = offset + (entryAddr - srcAddr);
623 entryAddr += actualSize;
624 if (MAP_MEM_NAMED_REUSE & prot) {
625 if ((cloneEntries->entry == entries->entry)
626 && (cloneEntries->size == entries->size)
627 && (cloneEntries->offset == entries->offset)) {
628 cloneEntries++;
629 } else {
630 prot &= ~MAP_MEM_NAMED_REUSE;
631 }
632 }
633 entries++;
634 count++;
635 }while (true);
636 offset += srcLen;
637 remain -= srcLen;
638 }
639 } else {
640 // _task == 0, physical or kIOMemoryTypeUPL
641 memory_object_t pager;
642 vm_size_t size = ptoa_64(_pages);
643
644 if (!getKernelReserved()) {
645 panic("getKernelReserved");
646 }
647
648 reserved->dp.pagerContig = (1 == _rangesCount);
649 reserved->dp.memory = this;
650
651 pagerFlags = pagerFlagsForCacheMode(cacheMode);
652 if (-1U == pagerFlags) {
653 panic("phys is kIODefaultCache");
654 }
655 if (reserved->dp.pagerContig) {
656 pagerFlags |= DEVICE_PAGER_CONTIGUOUS;
657 }
658
659 pager = device_pager_setup((memory_object_t) NULL, (uintptr_t) reserved,
660 size, pagerFlags);
661 assert(pager);
662 if (!pager) {
663 err = kIOReturnVMError;
664 } else {
665 srcAddr = nextAddr;
666 entryAddr = trunc_page_64(srcAddr);
667 err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/,
668 size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry);
669 assert(KERN_SUCCESS == err);
670 if (KERN_SUCCESS != err) {
671 device_pager_deallocate(pager);
672 } else {
673 reserved->dp.devicePager = pager;
674 entries->entry = entry;
675 entries->size = size;
676 entries->offset = offset + (entryAddr - srcAddr);
677 entries++;
678 count++;
679 }
680 }
681 }
682
683 ref->count = count;
684 ref->prot = prot;
685
686 if (_task && (KERN_SUCCESS == err)
687 && (kIOMemoryMapCopyOnWrite & _flags)
688 && !(kIOMemoryReferenceCOW & options)) {
689 err = memoryReferenceCreate(options | kIOMemoryReferenceCOW, &ref->mapRef);
690 }
691
692 if (KERN_SUCCESS == err) {
693 if (MAP_MEM_NAMED_REUSE & prot) {
694 memoryReferenceFree(ref);
695 OSIncrementAtomic(&_memRef->refCount);
696 ref = _memRef;
697 }
698 } else {
699 memoryReferenceFree(ref);
700 ref = NULL;
701 }
702
703 *reference = ref;
704
705 return err;
706 }
707
708 kern_return_t
709 IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
710 {
711 IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref;
712 IOReturn err;
713 vm_map_offset_t addr;
714
715 addr = ref->mapped;
716
717 err = vm_map_enter_mem_object(map, &addr, ref->size,
718 (vm_map_offset_t) 0,
719 (((ref->options & kIOMapAnywhere)
720 ? VM_FLAGS_ANYWHERE
721 : VM_FLAGS_FIXED)),
722 VM_MAP_KERNEL_FLAGS_NONE,
723 ref->tag,
724 IPC_PORT_NULL,
725 (memory_object_offset_t) 0,
726 false, /* copy */
727 ref->prot,
728 ref->prot,
729 VM_INHERIT_NONE);
730 if (KERN_SUCCESS == err) {
731 ref->mapped = (mach_vm_address_t) addr;
732 ref->map = map;
733 }
734
735 return err;
736 }
737
738 IOReturn
739 IOGeneralMemoryDescriptor::memoryReferenceMap(
740 IOMemoryReference * ref,
741 vm_map_t map,
742 mach_vm_size_t inoffset,
743 mach_vm_size_t size,
744 IOOptionBits options,
745 mach_vm_address_t * inaddr)
746 {
747 IOReturn err;
748 int64_t offset = inoffset;
749 uint32_t rangeIdx, entryIdx;
750 vm_map_offset_t addr, mapAddr;
751 vm_map_offset_t pageOffset, entryOffset, remain, chunk;
752
753 mach_vm_address_t nextAddr;
754 mach_vm_size_t nextLen;
755 IOByteCount physLen;
756 IOMemoryEntry * entry;
757 vm_prot_t prot, memEntryCacheMode;
758 IOOptionBits type;
759 IOOptionBits cacheMode;
760 vm_tag_t tag;
761 // for the kIOMapPrefault option.
762 upl_page_info_t * pageList = NULL;
763 UInt currentPageIndex = 0;
764 bool didAlloc;
765
766 if (ref->mapRef) {
767 err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
768 return err;
769 }
770
771 type = _flags & kIOMemoryTypeMask;
772
773 prot = VM_PROT_READ;
774 if (!(kIOMapReadOnly & options)) {
775 prot |= VM_PROT_WRITE;
776 }
777 prot &= ref->prot;
778
779 cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
780 if (kIODefaultCache != cacheMode) {
781 // VM system requires write access to update named entry cache mode
782 memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
783 }
784
785 tag = getVMTag(map);
786
787 if (_task) {
788 // Find first range for offset
789 if (!_rangesCount) {
790 return kIOReturnBadArgument;
791 }
792 for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++) {
793 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
794 if (remain < nextLen) {
795 break;
796 }
797 remain -= nextLen;
798 }
799 } else {
800 rangeIdx = 0;
801 remain = 0;
802 nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
803 nextLen = size;
804 }
805
806 assert(remain < nextLen);
807 if (remain >= nextLen) {
808 return kIOReturnBadArgument;
809 }
810
811 nextAddr += remain;
812 nextLen -= remain;
813 pageOffset = (page_mask & nextAddr);
814 addr = 0;
815 didAlloc = false;
816
817 if (!(options & kIOMapAnywhere)) {
818 addr = *inaddr;
819 if (pageOffset != (page_mask & addr)) {
820 return kIOReturnNotAligned;
821 }
822 addr -= pageOffset;
823 }
824
825 // find first entry for offset
826 for (entryIdx = 0;
827 (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset);
828 entryIdx++) {
829 }
830 entryIdx--;
831 entry = &ref->entries[entryIdx];
832
833 // allocate VM
834 size = round_page_64(size + pageOffset);
835 if (kIOMapOverwrite & options) {
836 if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
837 map = IOPageableMapForAddress(addr);
838 }
839 err = KERN_SUCCESS;
840 } else {
841 IOMemoryDescriptorMapAllocRef ref;
842 ref.map = map;
843 ref.tag = tag;
844 ref.options = options;
845 ref.size = size;
846 ref.prot = prot;
847 if (options & kIOMapAnywhere) {
848 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
849 ref.mapped = 0;
850 } else {
851 ref.mapped = addr;
852 }
853 if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
854 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
855 } else {
856 err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
857 }
858 if (KERN_SUCCESS == err) {
859 addr = ref.mapped;
860 map = ref.map;
861 didAlloc = true;
862 }
863 }
864
865 /*
866 * If the memory is associated with a device pager but doesn't have a UPL,
867 * it will be immediately faulted in through the pager via populateDevicePager().
868 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
869 * operations.
870 */
871 if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
872 options &= ~kIOMapPrefault;
873 }
874
875 /*
876 * Prefaulting is only possible if we wired the memory earlier. Check the
877 * memory type, and the underlying data.
878 */
879 if (options & kIOMapPrefault) {
880 /*
881 * The memory must have been wired by calling ::prepare(), otherwise
882 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
883 */
884 assert(_wireCount != 0);
885 assert(_memoryEntries != NULL);
886 if ((_wireCount == 0) ||
887 (_memoryEntries == NULL)) {
888 return kIOReturnBadArgument;
889 }
890
891 // Get the page list.
892 ioGMDData* dataP = getDataP(_memoryEntries);
893 ioPLBlock const* ioplList = getIOPLList(dataP);
894 pageList = getPageList(dataP);
895
896 // Get the number of IOPLs.
897 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
898
899 /*
900 * Scan through the IOPL Info Blocks, looking for the first block containing
901 * the offset. The research will go past it, so we'll need to go back to the
902 * right range at the end.
903 */
904 UInt ioplIndex = 0;
905 while (ioplIndex < numIOPLs && offset >= ioplList[ioplIndex].fIOMDOffset) {
906 ioplIndex++;
907 }
908 ioplIndex--;
909
910 // Retrieve the IOPL info block.
911 ioPLBlock ioplInfo = ioplList[ioplIndex];
912
913 /*
914 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
915 * array.
916 */
917 if (ioplInfo.fFlags & kIOPLExternUPL) {
918 pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
919 } else {
920 pageList = &pageList[ioplInfo.fPageInfo];
921 }
922
923 // Rebase [offset] into the IOPL in order to looks for the first page index.
924 mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
925
926 // Retrieve the index of the first page corresponding to the offset.
927 currentPageIndex = atop_32(offsetInIOPL);
928 }
929
930 // enter mappings
931 remain = size;
932 mapAddr = addr;
933 addr += pageOffset;
934
935 while (remain && (KERN_SUCCESS == err)) {
936 entryOffset = offset - entry->offset;
937 if ((page_mask & entryOffset) != pageOffset) {
938 err = kIOReturnNotAligned;
939 break;
940 }
941
942 if (kIODefaultCache != cacheMode) {
943 vm_size_t unused = 0;
944 err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
945 memEntryCacheMode, NULL, entry->entry);
946 assert(KERN_SUCCESS == err);
947 }
948
949 entryOffset -= pageOffset;
950 if (entryOffset >= entry->size) {
951 panic("entryOffset");
952 }
953 chunk = entry->size - entryOffset;
954 if (chunk) {
955 vm_map_kernel_flags_t vmk_flags;
956
957 vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
958 vmk_flags.vmkf_iokit_acct = TRUE; /* iokit accounting */
959
960 if (chunk > remain) {
961 chunk = remain;
962 }
963 if (options & kIOMapPrefault) {
964 UInt nb_pages = round_page(chunk) / PAGE_SIZE;
965
966 err = vm_map_enter_mem_object_prefault(map,
967 &mapAddr,
968 chunk, 0 /* mask */,
969 (VM_FLAGS_FIXED
970 | VM_FLAGS_OVERWRITE),
971 vmk_flags,
972 tag,
973 entry->entry,
974 entryOffset,
975 prot, // cur
976 prot, // max
977 &pageList[currentPageIndex],
978 nb_pages);
979
980 // Compute the next index in the page list.
981 currentPageIndex += nb_pages;
982 assert(currentPageIndex <= _pages);
983 } else {
984 err = vm_map_enter_mem_object(map,
985 &mapAddr,
986 chunk, 0 /* mask */,
987 (VM_FLAGS_FIXED
988 | VM_FLAGS_OVERWRITE),
989 vmk_flags,
990 tag,
991 entry->entry,
992 entryOffset,
993 false, // copy
994 prot, // cur
995 prot, // max
996 VM_INHERIT_NONE);
997 }
998 if (KERN_SUCCESS != err) {
999 break;
1000 }
1001 remain -= chunk;
1002 if (!remain) {
1003 break;
1004 }
1005 mapAddr += chunk;
1006 offset += chunk - pageOffset;
1007 }
1008 pageOffset = 0;
1009 entry++;
1010 entryIdx++;
1011 if (entryIdx >= ref->count) {
1012 err = kIOReturnOverrun;
1013 break;
1014 }
1015 }
1016
1017 if ((KERN_SUCCESS != err) && didAlloc) {
1018 (void) mach_vm_deallocate(map, trunc_page_64(addr), size);
1019 addr = 0;
1020 }
1021 *inaddr = addr;
1022
1023 return err;
1024 }
1025
1026 IOReturn
1027 IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
1028 IOMemoryReference * ref,
1029 IOByteCount * residentPageCount,
1030 IOByteCount * dirtyPageCount)
1031 {
1032 IOReturn err;
1033 IOMemoryEntry * entries;
1034 unsigned int resident, dirty;
1035 unsigned int totalResident, totalDirty;
1036
1037 totalResident = totalDirty = 0;
1038 err = kIOReturnSuccess;
1039 entries = ref->entries + ref->count;
1040 while (entries > &ref->entries[0]) {
1041 entries--;
1042 err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty);
1043 if (KERN_SUCCESS != err) {
1044 break;
1045 }
1046 totalResident += resident;
1047 totalDirty += dirty;
1048 }
1049
1050 if (residentPageCount) {
1051 *residentPageCount = totalResident;
1052 }
1053 if (dirtyPageCount) {
1054 *dirtyPageCount = totalDirty;
1055 }
1056 return err;
1057 }
1058
1059 IOReturn
1060 IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
1061 IOMemoryReference * ref,
1062 IOOptionBits newState,
1063 IOOptionBits * oldState)
1064 {
1065 IOReturn err;
1066 IOMemoryEntry * entries;
1067 vm_purgable_t control;
1068 int totalState, state;
1069
1070 totalState = kIOMemoryPurgeableNonVolatile;
1071 err = kIOReturnSuccess;
1072 entries = ref->entries + ref->count;
1073 while (entries > &ref->entries[0]) {
1074 entries--;
1075
1076 err = purgeableControlBits(newState, &control, &state);
1077 if (KERN_SUCCESS != err) {
1078 break;
1079 }
1080 err = memory_entry_purgeable_control_internal(entries->entry, control, &state);
1081 if (KERN_SUCCESS != err) {
1082 break;
1083 }
1084 err = purgeableStateBits(&state);
1085 if (KERN_SUCCESS != err) {
1086 break;
1087 }
1088
1089 if (kIOMemoryPurgeableEmpty == state) {
1090 totalState = kIOMemoryPurgeableEmpty;
1091 } else if (kIOMemoryPurgeableEmpty == totalState) {
1092 continue;
1093 } else if (kIOMemoryPurgeableVolatile == totalState) {
1094 continue;
1095 } else if (kIOMemoryPurgeableVolatile == state) {
1096 totalState = kIOMemoryPurgeableVolatile;
1097 } else {
1098 totalState = kIOMemoryPurgeableNonVolatile;
1099 }
1100 }
1101
1102 if (oldState) {
1103 *oldState = totalState;
1104 }
1105 return err;
1106 }
1107
1108 IOReturn
1109 IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(
1110 IOMemoryReference * ref,
1111 task_t newOwner,
1112 int newLedgerTag,
1113 IOOptionBits newLedgerOptions)
1114 {
1115 IOReturn err, totalErr;
1116 IOMemoryEntry * entries;
1117
1118 totalErr = kIOReturnSuccess;
1119 entries = ref->entries + ref->count;
1120 while (entries > &ref->entries[0]) {
1121 entries--;
1122
1123 err = mach_memory_entry_ownership(entries->entry, newOwner, newLedgerTag, newLedgerOptions);
1124 if (KERN_SUCCESS != err) {
1125 totalErr = err;
1126 }
1127 }
1128
1129 return totalErr;
1130 }
1131
1132 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1133
1134 IOMemoryDescriptor *
1135 IOMemoryDescriptor::withAddress(void * address,
1136 IOByteCount length,
1137 IODirection direction)
1138 {
1139 return IOMemoryDescriptor::
1140 withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
1141 }
1142
1143 #ifndef __LP64__
1144 IOMemoryDescriptor *
1145 IOMemoryDescriptor::withAddress(IOVirtualAddress address,
1146 IOByteCount length,
1147 IODirection direction,
1148 task_t task)
1149 {
1150 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1151 if (that) {
1152 if (that->initWithAddress(address, length, direction, task)) {
1153 return that;
1154 }
1155
1156 that->release();
1157 }
1158 return NULL;
1159 }
1160 #endif /* !__LP64__ */
1161
1162 IOMemoryDescriptor *
1163 IOMemoryDescriptor::withPhysicalAddress(
1164 IOPhysicalAddress address,
1165 IOByteCount length,
1166 IODirection direction )
1167 {
1168 return IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL);
1169 }
1170
1171 #ifndef __LP64__
1172 IOMemoryDescriptor *
1173 IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
1174 UInt32 withCount,
1175 IODirection direction,
1176 task_t task,
1177 bool asReference)
1178 {
1179 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1180 if (that) {
1181 if (that->initWithRanges(ranges, withCount, direction, task, asReference)) {
1182 return that;
1183 }
1184
1185 that->release();
1186 }
1187 return NULL;
1188 }
1189 #endif /* !__LP64__ */
1190
1191 IOMemoryDescriptor *
1192 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
1193 mach_vm_size_t length,
1194 IOOptionBits options,
1195 task_t task)
1196 {
1197 IOAddressRange range = { address, length };
1198 return IOMemoryDescriptor::withAddressRanges(&range, 1, options, task);
1199 }
1200
1201 IOMemoryDescriptor *
1202 IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
1203 UInt32 rangeCount,
1204 IOOptionBits options,
1205 task_t task)
1206 {
1207 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1208 if (that) {
1209 if (task) {
1210 options |= kIOMemoryTypeVirtual64;
1211 } else {
1212 options |= kIOMemoryTypePhysical64;
1213 }
1214
1215 if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ NULL)) {
1216 return that;
1217 }
1218
1219 that->release();
1220 }
1221
1222 return NULL;
1223 }
1224
1225
1226 /*
1227 * withOptions:
1228 *
1229 * Create a new IOMemoryDescriptor. The buffer is made up of several
1230 * virtual address ranges, from a given task.
1231 *
1232 * Passing the ranges as a reference will avoid an extra allocation.
1233 */
1234 IOMemoryDescriptor *
1235 IOMemoryDescriptor::withOptions(void * buffers,
1236 UInt32 count,
1237 UInt32 offset,
1238 task_t task,
1239 IOOptionBits opts,
1240 IOMapper * mapper)
1241 {
1242 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
1243
1244 if (self
1245 && !self->initWithOptions(buffers, count, offset, task, opts, mapper)) {
1246 self->release();
1247 return NULL;
1248 }
1249
1250 return self;
1251 }
1252
1253 bool
1254 IOMemoryDescriptor::initWithOptions(void * buffers,
1255 UInt32 count,
1256 UInt32 offset,
1257 task_t task,
1258 IOOptionBits options,
1259 IOMapper * mapper)
1260 {
1261 return false;
1262 }
1263
1264 #ifndef __LP64__
1265 IOMemoryDescriptor *
1266 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
1267 UInt32 withCount,
1268 IODirection direction,
1269 bool asReference)
1270 {
1271 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1272 if (that) {
1273 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference)) {
1274 return that;
1275 }
1276
1277 that->release();
1278 }
1279 return NULL;
1280 }
1281
1282 IOMemoryDescriptor *
1283 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
1284 IOByteCount offset,
1285 IOByteCount length,
1286 IODirection direction)
1287 {
1288 return IOSubMemoryDescriptor::withSubRange(of, offset, length, direction);
1289 }
1290 #endif /* !__LP64__ */
1291
1292 IOMemoryDescriptor *
1293 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
1294 {
1295 IOGeneralMemoryDescriptor *origGenMD =
1296 OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
1297
1298 if (origGenMD) {
1299 return IOGeneralMemoryDescriptor::
1300 withPersistentMemoryDescriptor(origGenMD);
1301 } else {
1302 return NULL;
1303 }
1304 }
1305
1306 IOMemoryDescriptor *
1307 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
1308 {
1309 IOMemoryReference * memRef;
1310
1311 if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) {
1312 return NULL;
1313 }
1314
1315 if (memRef == originalMD->_memRef) {
1316 originalMD->retain(); // Add a new reference to ourselves
1317 originalMD->memoryReferenceRelease(memRef);
1318 return originalMD;
1319 }
1320
1321 IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor;
1322 IOMDPersistentInitData initData = { originalMD, memRef };
1323
1324 if (self
1325 && !self->initWithOptions(&initData, 1, 0, NULL, kIOMemoryTypePersistentMD, NULL)) {
1326 self->release();
1327 self = NULL;
1328 }
1329 return self;
1330 }
1331
1332 #ifndef __LP64__
1333 bool
1334 IOGeneralMemoryDescriptor::initWithAddress(void * address,
1335 IOByteCount withLength,
1336 IODirection withDirection)
1337 {
1338 _singleRange.v.address = (vm_offset_t) address;
1339 _singleRange.v.length = withLength;
1340
1341 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
1342 }
1343
1344 bool
1345 IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
1346 IOByteCount withLength,
1347 IODirection withDirection,
1348 task_t withTask)
1349 {
1350 _singleRange.v.address = address;
1351 _singleRange.v.length = withLength;
1352
1353 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
1354 }
1355
1356 bool
1357 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
1358 IOPhysicalAddress address,
1359 IOByteCount withLength,
1360 IODirection withDirection )
1361 {
1362 _singleRange.p.address = address;
1363 _singleRange.p.length = withLength;
1364
1365 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
1366 }
1367
1368 bool
1369 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
1370 IOPhysicalRange * ranges,
1371 UInt32 count,
1372 IODirection direction,
1373 bool reference)
1374 {
1375 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
1376
1377 if (reference) {
1378 mdOpts |= kIOMemoryAsReference;
1379 }
1380
1381 return initWithOptions(ranges, count, 0, NULL, mdOpts, /* mapper */ NULL);
1382 }
1383
1384 bool
1385 IOGeneralMemoryDescriptor::initWithRanges(
1386 IOVirtualRange * ranges,
1387 UInt32 count,
1388 IODirection direction,
1389 task_t task,
1390 bool reference)
1391 {
1392 IOOptionBits mdOpts = direction;
1393
1394 if (reference) {
1395 mdOpts |= kIOMemoryAsReference;
1396 }
1397
1398 if (task) {
1399 mdOpts |= kIOMemoryTypeVirtual;
1400
1401 // Auto-prepare if this is a kernel memory descriptor as very few
1402 // clients bother to prepare() kernel memory.
1403 // But it was not enforced so what are you going to do?
1404 if (task == kernel_task) {
1405 mdOpts |= kIOMemoryAutoPrepare;
1406 }
1407 } else {
1408 mdOpts |= kIOMemoryTypePhysical;
1409 }
1410
1411 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ NULL);
1412 }
1413 #endif /* !__LP64__ */
1414
1415 /*
1416 * initWithOptions:
1417 *
1418 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
1419 * from a given task, several physical ranges, an UPL from the ubc
1420 * system or a uio (may be 64bit) from the BSD subsystem.
1421 *
1422 * Passing the ranges as a reference will avoid an extra allocation.
1423 *
1424 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
1425 * existing instance -- note this behavior is not commonly supported in other
1426 * I/O Kit classes, although it is supported here.
1427 */
1428
1429 bool
1430 IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
1431 UInt32 count,
1432 UInt32 offset,
1433 task_t task,
1434 IOOptionBits options,
1435 IOMapper * mapper)
1436 {
1437 IOOptionBits type = options & kIOMemoryTypeMask;
1438
1439 #ifndef __LP64__
1440 if (task
1441 && (kIOMemoryTypeVirtual == type)
1442 && vm_map_is_64bit(get_task_map(task))
1443 && ((IOVirtualRange *) buffers)->address) {
1444 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
1445 return false;
1446 }
1447 #endif /* !__LP64__ */
1448
1449 // Grab the original MD's configuation data to initialse the
1450 // arguments to this function.
1451 if (kIOMemoryTypePersistentMD == type) {
1452 IOMDPersistentInitData *initData = (typeof(initData))buffers;
1453 const IOGeneralMemoryDescriptor *orig = initData->fMD;
1454 ioGMDData *dataP = getDataP(orig->_memoryEntries);
1455
1456 // Only accept persistent memory descriptors with valid dataP data.
1457 assert(orig->_rangesCount == 1);
1458 if (!(orig->_flags & kIOMemoryPersistent) || !dataP) {
1459 return false;
1460 }
1461
1462 _memRef = initData->fMemRef; // Grab the new named entry
1463 options = orig->_flags & ~kIOMemoryAsReference;
1464 type = options & kIOMemoryTypeMask;
1465 buffers = orig->_ranges.v;
1466 count = orig->_rangesCount;
1467
1468 // Now grab the original task and whatever mapper was previously used
1469 task = orig->_task;
1470 mapper = dataP->fMapper;
1471
1472 // We are ready to go through the original initialisation now
1473 }
1474
1475 switch (type) {
1476 case kIOMemoryTypeUIO:
1477 case kIOMemoryTypeVirtual:
1478 #ifndef __LP64__
1479 case kIOMemoryTypeVirtual64:
1480 #endif /* !__LP64__ */
1481 assert(task);
1482 if (!task) {
1483 return false;
1484 }
1485 break;
1486
1487 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
1488 #ifndef __LP64__
1489 case kIOMemoryTypePhysical64:
1490 #endif /* !__LP64__ */
1491 case kIOMemoryTypeUPL:
1492 assert(!task);
1493 break;
1494 default:
1495 return false; /* bad argument */
1496 }
1497
1498 assert(buffers);
1499 assert(count);
1500
1501 /*
1502 * We can check the _initialized instance variable before having ever set
1503 * it to an initial value because I/O Kit guarantees that all our instance
1504 * variables are zeroed on an object's allocation.
1505 */
1506
1507 if (_initialized) {
1508 /*
1509 * An existing memory descriptor is being retargeted to point to
1510 * somewhere else. Clean up our present state.
1511 */
1512 IOOptionBits type = _flags & kIOMemoryTypeMask;
1513 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type)) {
1514 while (_wireCount) {
1515 complete();
1516 }
1517 }
1518 if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
1519 if (kIOMemoryTypeUIO == type) {
1520 uio_free((uio_t) _ranges.v);
1521 }
1522 #ifndef __LP64__
1523 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
1524 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
1525 }
1526 #endif /* !__LP64__ */
1527 else {
1528 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
1529 }
1530 }
1531
1532 options |= (kIOMemoryRedirected & _flags);
1533 if (!(kIOMemoryRedirected & options)) {
1534 if (_memRef) {
1535 memoryReferenceRelease(_memRef);
1536 _memRef = NULL;
1537 }
1538 if (_mappings) {
1539 _mappings->flushCollection();
1540 }
1541 }
1542 } else {
1543 if (!super::init()) {
1544 return false;
1545 }
1546 _initialized = true;
1547 }
1548
1549 // Grab the appropriate mapper
1550 if (kIOMemoryHostOrRemote & options) {
1551 options |= kIOMemoryMapperNone;
1552 }
1553 if (kIOMemoryMapperNone & options) {
1554 mapper = NULL; // No Mapper
1555 } else if (mapper == kIOMapperSystem) {
1556 IOMapper::checkForSystemMapper();
1557 gIOSystemMapper = mapper = IOMapper::gSystem;
1558 }
1559
1560 // Remove the dynamic internal use flags from the initial setting
1561 options &= ~(kIOMemoryPreparedReadOnly);
1562 _flags = options;
1563 _task = task;
1564
1565 #ifndef __LP64__
1566 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
1567 #endif /* !__LP64__ */
1568
1569 _dmaReferences = 0;
1570 __iomd_reservedA = 0;
1571 __iomd_reservedB = 0;
1572 _highestPage = 0;
1573
1574 if (kIOMemoryThreadSafe & options) {
1575 if (!_prepareLock) {
1576 _prepareLock = IOLockAlloc();
1577 }
1578 } else if (_prepareLock) {
1579 IOLockFree(_prepareLock);
1580 _prepareLock = NULL;
1581 }
1582
1583 if (kIOMemoryTypeUPL == type) {
1584 ioGMDData *dataP;
1585 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
1586
1587 if (!initMemoryEntries(dataSize, mapper)) {
1588 return false;
1589 }
1590 dataP = getDataP(_memoryEntries);
1591 dataP->fPageCnt = 0;
1592 switch (kIOMemoryDirectionMask & options) {
1593 case kIODirectionOut:
1594 dataP->fDMAAccess = kIODMAMapReadAccess;
1595 break;
1596 case kIODirectionIn:
1597 dataP->fDMAAccess = kIODMAMapWriteAccess;
1598 break;
1599 case kIODirectionNone:
1600 case kIODirectionOutIn:
1601 default:
1602 panic("bad dir for upl 0x%x\n", (int) options);
1603 break;
1604 }
1605 // _wireCount++; // UPLs start out life wired
1606
1607 _length = count;
1608 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
1609
1610 ioPLBlock iopl;
1611 iopl.fIOPL = (upl_t) buffers;
1612 upl_set_referenced(iopl.fIOPL, true);
1613 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
1614
1615 if (upl_get_size(iopl.fIOPL) < (count + offset)) {
1616 panic("short external upl");
1617 }
1618
1619 _highestPage = upl_get_highest_page(iopl.fIOPL);
1620
1621 // Set the flag kIOPLOnDevice convieniently equal to 1
1622 iopl.fFlags = pageList->device | kIOPLExternUPL;
1623 if (!pageList->device) {
1624 // Pre-compute the offset into the UPL's page list
1625 pageList = &pageList[atop_32(offset)];
1626 offset &= PAGE_MASK;
1627 }
1628 iopl.fIOMDOffset = 0;
1629 iopl.fMappedPage = 0;
1630 iopl.fPageInfo = (vm_address_t) pageList;
1631 iopl.fPageOffset = offset;
1632 _memoryEntries->appendBytes(&iopl, sizeof(iopl));
1633 } else {
1634 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
1635 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
1636
1637 // Initialize the memory descriptor
1638 if (options & kIOMemoryAsReference) {
1639 #ifndef __LP64__
1640 _rangesIsAllocated = false;
1641 #endif /* !__LP64__ */
1642
1643 // Hack assignment to get the buffer arg into _ranges.
1644 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
1645 // work, C++ sigh.
1646 // This also initialises the uio & physical ranges.
1647 _ranges.v = (IOVirtualRange *) buffers;
1648 } else {
1649 #ifndef __LP64__
1650 _rangesIsAllocated = true;
1651 #endif /* !__LP64__ */
1652 switch (type) {
1653 case kIOMemoryTypeUIO:
1654 _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
1655 break;
1656
1657 #ifndef __LP64__
1658 case kIOMemoryTypeVirtual64:
1659 case kIOMemoryTypePhysical64:
1660 if (count == 1
1661 #ifndef __arm__
1662 && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
1663 #endif
1664 ) {
1665 if (kIOMemoryTypeVirtual64 == type) {
1666 type = kIOMemoryTypeVirtual;
1667 } else {
1668 type = kIOMemoryTypePhysical;
1669 }
1670 _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
1671 _rangesIsAllocated = false;
1672 _ranges.v = &_singleRange.v;
1673 _singleRange.v.address = ((IOAddressRange *) buffers)->address;
1674 _singleRange.v.length = ((IOAddressRange *) buffers)->length;
1675 break;
1676 }
1677 _ranges.v64 = IONew(IOAddressRange, count);
1678 if (!_ranges.v64) {
1679 return false;
1680 }
1681 bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
1682 break;
1683 #endif /* !__LP64__ */
1684 case kIOMemoryTypeVirtual:
1685 case kIOMemoryTypePhysical:
1686 if (count == 1) {
1687 _flags |= kIOMemoryAsReference;
1688 #ifndef __LP64__
1689 _rangesIsAllocated = false;
1690 #endif /* !__LP64__ */
1691 _ranges.v = &_singleRange.v;
1692 } else {
1693 _ranges.v = IONew(IOVirtualRange, count);
1694 if (!_ranges.v) {
1695 return false;
1696 }
1697 }
1698 bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
1699 break;
1700 }
1701 }
1702 _rangesCount = count;
1703
1704 // Find starting address within the vector of ranges
1705 Ranges vec = _ranges;
1706 mach_vm_size_t totalLength = 0;
1707 unsigned int ind, pages = 0;
1708 for (ind = 0; ind < count; ind++) {
1709 mach_vm_address_t addr;
1710 mach_vm_address_t endAddr;
1711 mach_vm_size_t len;
1712
1713 // addr & len are returned by this function
1714 getAddrLenForInd(addr, len, type, vec, ind);
1715 if (os_add3_overflow(addr, len, PAGE_MASK, &endAddr)) {
1716 break;
1717 }
1718 if (os_add_overflow(pages, (atop_64(endAddr) - atop_64(addr)), &pages)) {
1719 break;
1720 }
1721 if (os_add_overflow(totalLength, len, &totalLength)) {
1722 break;
1723 }
1724 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
1725 ppnum_t highPage = atop_64(addr + len - 1);
1726 if (highPage > _highestPage) {
1727 _highestPage = highPage;
1728 }
1729 }
1730 }
1731 if ((ind < count)
1732 || (totalLength != ((IOByteCount) totalLength))) {
1733 return false; /* overflow */
1734 }
1735 _length = totalLength;
1736 _pages = pages;
1737
1738 // Auto-prepare memory at creation time.
1739 // Implied completion when descriptor is free-ed
1740
1741
1742 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
1743 _wireCount++; // Physical MDs are, by definition, wired
1744 } else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
1745 ioGMDData *dataP;
1746 unsigned dataSize;
1747
1748 if (_pages > atop_64(max_mem)) {
1749 return false;
1750 }
1751
1752 dataSize = computeDataSize(_pages, /* upls */ count * 2);
1753 if (!initMemoryEntries(dataSize, mapper)) {
1754 return false;
1755 }
1756 dataP = getDataP(_memoryEntries);
1757 dataP->fPageCnt = _pages;
1758
1759 if (((_task != kernel_task) || (kIOMemoryBufferPageable & _flags))
1760 && (VM_KERN_MEMORY_NONE == _kernelTag)) {
1761 _kernelTag = IOMemoryTag(kernel_map);
1762 if (_kernelTag == gIOSurfaceTag) {
1763 _userTag = VM_MEMORY_IOSURFACE;
1764 }
1765 }
1766
1767 if ((kIOMemoryPersistent & _flags) && !_memRef) {
1768 IOReturn
1769 err = memoryReferenceCreate(0, &_memRef);
1770 if (kIOReturnSuccess != err) {
1771 return false;
1772 }
1773 }
1774
1775 if ((_flags & kIOMemoryAutoPrepare)
1776 && prepare() != kIOReturnSuccess) {
1777 return false;
1778 }
1779 }
1780 }
1781
1782 return true;
1783 }
1784
1785 /*
1786 * free
1787 *
1788 * Free resources.
1789 */
1790 void
1791 IOGeneralMemoryDescriptor::free()
1792 {
1793 IOOptionBits type = _flags & kIOMemoryTypeMask;
1794
1795 if (reserved) {
1796 LOCK;
1797 reserved->dp.memory = NULL;
1798 UNLOCK;
1799 }
1800 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
1801 ioGMDData * dataP;
1802 if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBaseValid) {
1803 dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
1804 dataP->fMappedBaseValid = dataP->fMappedBase = 0;
1805 }
1806 } else {
1807 while (_wireCount) {
1808 complete();
1809 }
1810 }
1811
1812 if (_memoryEntries) {
1813 _memoryEntries->release();
1814 }
1815
1816 if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
1817 if (kIOMemoryTypeUIO == type) {
1818 uio_free((uio_t) _ranges.v);
1819 }
1820 #ifndef __LP64__
1821 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
1822 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
1823 }
1824 #endif /* !__LP64__ */
1825 else {
1826 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
1827 }
1828
1829 _ranges.v = NULL;
1830 }
1831
1832 if (reserved) {
1833 cleanKernelReserved(reserved);
1834 if (reserved->dp.devicePager) {
1835 // memEntry holds a ref on the device pager which owns reserved
1836 // (IOMemoryDescriptorReserved) so no reserved access after this point
1837 device_pager_deallocate((memory_object_t) reserved->dp.devicePager );
1838 } else {
1839 IODelete(reserved, IOMemoryDescriptorReserved, 1);
1840 }
1841 reserved = NULL;
1842 }
1843
1844 if (_memRef) {
1845 memoryReferenceRelease(_memRef);
1846 }
1847 if (_prepareLock) {
1848 IOLockFree(_prepareLock);
1849 }
1850
1851 super::free();
1852 }
1853
1854 #ifndef __LP64__
1855 void
1856 IOGeneralMemoryDescriptor::unmapFromKernel()
1857 {
1858 panic("IOGMD::unmapFromKernel deprecated");
1859 }
1860
1861 void
1862 IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
1863 {
1864 panic("IOGMD::mapIntoKernel deprecated");
1865 }
1866 #endif /* !__LP64__ */
1867
1868 /*
1869 * getDirection:
1870 *
1871 * Get the direction of the transfer.
1872 */
1873 IODirection
1874 IOMemoryDescriptor::getDirection() const
1875 {
1876 #ifndef __LP64__
1877 if (_direction) {
1878 return _direction;
1879 }
1880 #endif /* !__LP64__ */
1881 return (IODirection) (_flags & kIOMemoryDirectionMask);
1882 }
1883
1884 /*
1885 * getLength:
1886 *
1887 * Get the length of the transfer (over all ranges).
1888 */
1889 IOByteCount
1890 IOMemoryDescriptor::getLength() const
1891 {
1892 return _length;
1893 }
1894
1895 void
1896 IOMemoryDescriptor::setTag( IOOptionBits tag )
1897 {
1898 _tag = tag;
1899 }
1900
1901 IOOptionBits
1902 IOMemoryDescriptor::getTag( void )
1903 {
1904 return _tag;
1905 }
1906
1907 uint64_t
1908 IOMemoryDescriptor::getFlags(void)
1909 {
1910 return _flags;
1911 }
1912
1913 #ifndef __LP64__
1914 #pragma clang diagnostic push
1915 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
1916
1917 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
1918 IOPhysicalAddress
1919 IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
1920 {
1921 addr64_t physAddr = 0;
1922
1923 if (prepare() == kIOReturnSuccess) {
1924 physAddr = getPhysicalSegment64( offset, length );
1925 complete();
1926 }
1927
1928 return (IOPhysicalAddress) physAddr; // truncated but only page offset is used
1929 }
1930
1931 #pragma clang diagnostic pop
1932
1933 #endif /* !__LP64__ */
1934
1935 IOByteCount
1936 IOMemoryDescriptor::readBytes
1937 (IOByteCount offset, void *bytes, IOByteCount length)
1938 {
1939 addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
1940 IOByteCount remaining;
1941
1942 // Assert that this entire I/O is withing the available range
1943 assert(offset <= _length);
1944 assert(offset + length <= _length);
1945 if ((offset >= _length)
1946 || ((offset + length) > _length)) {
1947 return 0;
1948 }
1949
1950 assert(!(kIOMemoryRemote & _flags));
1951 if (kIOMemoryRemote & _flags) {
1952 return 0;
1953 }
1954
1955 if (kIOMemoryThreadSafe & _flags) {
1956 LOCK;
1957 }
1958
1959 remaining = length = min(length, _length - offset);
1960 while (remaining) { // (process another target segment?)
1961 addr64_t srcAddr64;
1962 IOByteCount srcLen;
1963
1964 srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
1965 if (!srcAddr64) {
1966 break;
1967 }
1968
1969 // Clip segment length to remaining
1970 if (srcLen > remaining) {
1971 srcLen = remaining;
1972 }
1973
1974 copypv(srcAddr64, dstAddr, srcLen,
1975 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
1976
1977 dstAddr += srcLen;
1978 offset += srcLen;
1979 remaining -= srcLen;
1980 }
1981
1982 if (kIOMemoryThreadSafe & _flags) {
1983 UNLOCK;
1984 }
1985
1986 assert(!remaining);
1987
1988 return length - remaining;
1989 }
1990
1991 IOByteCount
1992 IOMemoryDescriptor::writeBytes
1993 (IOByteCount inoffset, const void *bytes, IOByteCount length)
1994 {
1995 addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
1996 IOByteCount remaining;
1997 IOByteCount offset = inoffset;
1998
1999 // Assert that this entire I/O is withing the available range
2000 assert(offset <= _length);
2001 assert(offset + length <= _length);
2002
2003 assert( !(kIOMemoryPreparedReadOnly & _flags));
2004
2005 if ((kIOMemoryPreparedReadOnly & _flags)
2006 || (offset >= _length)
2007 || ((offset + length) > _length)) {
2008 return 0;
2009 }
2010
2011 assert(!(kIOMemoryRemote & _flags));
2012 if (kIOMemoryRemote & _flags) {
2013 return 0;
2014 }
2015
2016 if (kIOMemoryThreadSafe & _flags) {
2017 LOCK;
2018 }
2019
2020 remaining = length = min(length, _length - offset);
2021 while (remaining) { // (process another target segment?)
2022 addr64_t dstAddr64;
2023 IOByteCount dstLen;
2024
2025 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
2026 if (!dstAddr64) {
2027 break;
2028 }
2029
2030 // Clip segment length to remaining
2031 if (dstLen > remaining) {
2032 dstLen = remaining;
2033 }
2034
2035 if (!srcAddr) {
2036 bzero_phys(dstAddr64, dstLen);
2037 } else {
2038 copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
2039 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
2040 srcAddr += dstLen;
2041 }
2042 offset += dstLen;
2043 remaining -= dstLen;
2044 }
2045
2046 if (kIOMemoryThreadSafe & _flags) {
2047 UNLOCK;
2048 }
2049
2050 assert(!remaining);
2051
2052 #if defined(__x86_64__)
2053 // copypv does not cppvFsnk on intel
2054 #else
2055 if (!srcAddr) {
2056 performOperation(kIOMemoryIncoherentIOFlush, inoffset, length);
2057 }
2058 #endif
2059
2060 return length - remaining;
2061 }
2062
2063 #ifndef __LP64__
2064 void
2065 IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
2066 {
2067 panic("IOGMD::setPosition deprecated");
2068 }
2069 #endif /* !__LP64__ */
2070
2071 static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
2072
2073 uint64_t
2074 IOGeneralMemoryDescriptor::getPreparationID( void )
2075 {
2076 ioGMDData *dataP;
2077
2078 if (!_wireCount) {
2079 return kIOPreparationIDUnprepared;
2080 }
2081
2082 if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
2083 || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64)) {
2084 IOMemoryDescriptor::setPreparationID();
2085 return IOMemoryDescriptor::getPreparationID();
2086 }
2087
2088 if (!_memoryEntries || !(dataP = getDataP(_memoryEntries))) {
2089 return kIOPreparationIDUnprepared;
2090 }
2091
2092 if (kIOPreparationIDUnprepared == dataP->fPreparationID) {
2093 SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2094 OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &dataP->fPreparationID);
2095 }
2096 return dataP->fPreparationID;
2097 }
2098
2099 void
2100 IOMemoryDescriptor::cleanKernelReserved( IOMemoryDescriptorReserved * reserved )
2101 {
2102 if (reserved->creator) {
2103 task_deallocate(reserved->creator);
2104 reserved->creator = NULL;
2105 }
2106 }
2107
2108 IOMemoryDescriptorReserved *
2109 IOMemoryDescriptor::getKernelReserved( void )
2110 {
2111 if (!reserved) {
2112 reserved = IONewZero(IOMemoryDescriptorReserved, 1);
2113 }
2114 return reserved;
2115 }
2116
2117 void
2118 IOMemoryDescriptor::setPreparationID( void )
2119 {
2120 if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID)) {
2121 SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2122 OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &reserved->preparationID);
2123 }
2124 }
2125
2126 uint64_t
2127 IOMemoryDescriptor::getPreparationID( void )
2128 {
2129 if (reserved) {
2130 return reserved->preparationID;
2131 } else {
2132 return kIOPreparationIDUnsupported;
2133 }
2134 }
2135
2136 void
2137 IOMemoryDescriptor::setVMTags(uint32_t kernelTag, uint32_t userTag)
2138 {
2139 _kernelTag = (vm_tag_t) kernelTag;
2140 _userTag = (vm_tag_t) userTag;
2141 }
2142
2143 uint32_t
2144 IOMemoryDescriptor::getVMTag(vm_map_t map)
2145 {
2146 if (vm_kernel_map_is_kernel(map)) {
2147 if (VM_KERN_MEMORY_NONE != _kernelTag) {
2148 return (uint32_t) _kernelTag;
2149 }
2150 } else {
2151 if (VM_KERN_MEMORY_NONE != _userTag) {
2152 return (uint32_t) _userTag;
2153 }
2154 }
2155 return IOMemoryTag(map);
2156 }
2157
2158 IOReturn
2159 IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
2160 {
2161 IOReturn err = kIOReturnSuccess;
2162 DMACommandOps params;
2163 IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
2164 ioGMDData *dataP;
2165
2166 params = (op & ~kIOMDDMACommandOperationMask & op);
2167 op &= kIOMDDMACommandOperationMask;
2168
2169 if (kIOMDDMAMap == op) {
2170 if (dataSize < sizeof(IOMDDMAMapArgs)) {
2171 return kIOReturnUnderrun;
2172 }
2173
2174 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2175
2176 if (!_memoryEntries
2177 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
2178 return kIOReturnNoMemory;
2179 }
2180
2181 if (_memoryEntries && data->fMapper) {
2182 bool remap, keepMap;
2183 dataP = getDataP(_memoryEntries);
2184
2185 if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) {
2186 dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
2187 }
2188 if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) {
2189 dataP->fDMAMapAlignment = data->fMapSpec.alignment;
2190 }
2191
2192 keepMap = (data->fMapper == gIOSystemMapper);
2193 keepMap &= ((data->fOffset == 0) && (data->fLength == _length));
2194
2195 if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
2196 IOLockLock(_prepareLock);
2197 }
2198
2199 remap = (!keepMap);
2200 remap |= (dataP->fDMAMapNumAddressBits < 64)
2201 && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
2202 remap |= (dataP->fDMAMapAlignment > page_size);
2203
2204 if (remap || !dataP->fMappedBaseValid) {
2205 // if (dataP->fMappedBaseValid) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params);
2206 err = md->dmaMap(data->fMapper, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
2207 if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBaseValid) {
2208 dataP->fMappedBase = data->fAlloc;
2209 dataP->fMappedBaseValid = true;
2210 dataP->fMappedLength = data->fAllocLength;
2211 data->fAllocLength = 0; // IOMD owns the alloc now
2212 }
2213 } else {
2214 data->fAlloc = dataP->fMappedBase;
2215 data->fAllocLength = 0; // give out IOMD map
2216 md->dmaMapRecord(data->fMapper, data->fCommand, dataP->fMappedLength);
2217 }
2218 data->fMapContig = !dataP->fDiscontig;
2219
2220 if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
2221 IOLockUnlock(_prepareLock);
2222 }
2223 }
2224 return err;
2225 }
2226 if (kIOMDDMAUnmap == op) {
2227 if (dataSize < sizeof(IOMDDMAMapArgs)) {
2228 return kIOReturnUnderrun;
2229 }
2230 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2231
2232 err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
2233
2234 return kIOReturnSuccess;
2235 }
2236
2237 if (kIOMDAddDMAMapSpec == op) {
2238 if (dataSize < sizeof(IODMAMapSpecification)) {
2239 return kIOReturnUnderrun;
2240 }
2241
2242 IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
2243
2244 if (!_memoryEntries
2245 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
2246 return kIOReturnNoMemory;
2247 }
2248
2249 if (_memoryEntries) {
2250 dataP = getDataP(_memoryEntries);
2251 if (data->numAddressBits < dataP->fDMAMapNumAddressBits) {
2252 dataP->fDMAMapNumAddressBits = data->numAddressBits;
2253 }
2254 if (data->alignment > dataP->fDMAMapAlignment) {
2255 dataP->fDMAMapAlignment = data->alignment;
2256 }
2257 }
2258 return kIOReturnSuccess;
2259 }
2260
2261 if (kIOMDGetCharacteristics == op) {
2262 if (dataSize < sizeof(IOMDDMACharacteristics)) {
2263 return kIOReturnUnderrun;
2264 }
2265
2266 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
2267 data->fLength = _length;
2268 data->fSGCount = _rangesCount;
2269 data->fPages = _pages;
2270 data->fDirection = getDirection();
2271 if (!_wireCount) {
2272 data->fIsPrepared = false;
2273 } else {
2274 data->fIsPrepared = true;
2275 data->fHighestPage = _highestPage;
2276 if (_memoryEntries) {
2277 dataP = getDataP(_memoryEntries);
2278 ioPLBlock *ioplList = getIOPLList(dataP);
2279 UInt count = getNumIOPL(_memoryEntries, dataP);
2280 if (count == 1) {
2281 data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
2282 }
2283 }
2284 }
2285
2286 return kIOReturnSuccess;
2287 } else if (kIOMDDMAActive == op) {
2288 if (params) {
2289 int16_t prior;
2290 prior = OSAddAtomic16(1, &md->_dmaReferences);
2291 if (!prior) {
2292 md->_mapName = NULL;
2293 }
2294 } else {
2295 if (md->_dmaReferences) {
2296 OSAddAtomic16(-1, &md->_dmaReferences);
2297 } else {
2298 panic("_dmaReferences underflow");
2299 }
2300 }
2301 } else if (kIOMDWalkSegments != op) {
2302 return kIOReturnBadArgument;
2303 }
2304
2305 // Get the next segment
2306 struct InternalState {
2307 IOMDDMAWalkSegmentArgs fIO;
2308 mach_vm_size_t fOffset2Index;
2309 mach_vm_size_t fNextOffset;
2310 UInt fIndex;
2311 } *isP;
2312
2313 // Find the next segment
2314 if (dataSize < sizeof(*isP)) {
2315 return kIOReturnUnderrun;
2316 }
2317
2318 isP = (InternalState *) vData;
2319 mach_vm_size_t offset = isP->fIO.fOffset;
2320 uint8_t mapped = isP->fIO.fMapped;
2321 uint64_t mappedBase;
2322
2323 if (mapped && (kIOMemoryRemote & _flags)) {
2324 return kIOReturnNotAttached;
2325 }
2326
2327 if (IOMapper::gSystem && mapped
2328 && (!(kIOMemoryHostOnly & _flags))
2329 && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBaseValid)) {
2330 // && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBaseValid))
2331 if (!_memoryEntries
2332 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
2333 return kIOReturnNoMemory;
2334 }
2335
2336 dataP = getDataP(_memoryEntries);
2337 if (dataP->fMapper) {
2338 IODMAMapSpecification mapSpec;
2339 bzero(&mapSpec, sizeof(mapSpec));
2340 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
2341 mapSpec.alignment = dataP->fDMAMapAlignment;
2342 err = md->dmaMap(dataP->fMapper, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength);
2343 if (kIOReturnSuccess != err) {
2344 return err;
2345 }
2346 dataP->fMappedBaseValid = true;
2347 }
2348 }
2349
2350 if (kIOMDDMAWalkMappedLocal == mapped) {
2351 mappedBase = isP->fIO.fMappedBase;
2352 } else if (mapped) {
2353 if (IOMapper::gSystem
2354 && (!(kIOMemoryHostOnly & _flags))
2355 && _memoryEntries
2356 && (dataP = getDataP(_memoryEntries))
2357 && dataP->fMappedBaseValid) {
2358 mappedBase = dataP->fMappedBase;
2359 } else {
2360 mapped = 0;
2361 }
2362 }
2363
2364 if (offset >= _length) {
2365 return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
2366 }
2367
2368 // Validate the previous offset
2369 UInt ind;
2370 mach_vm_size_t off2Ind = isP->fOffset2Index;
2371 if (!params
2372 && offset
2373 && (offset == isP->fNextOffset || off2Ind <= offset)) {
2374 ind = isP->fIndex;
2375 } else {
2376 ind = off2Ind = 0; // Start from beginning
2377 }
2378 mach_vm_size_t length;
2379 UInt64 address;
2380
2381 if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
2382 // Physical address based memory descriptor
2383 const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
2384
2385 // Find the range after the one that contains the offset
2386 mach_vm_size_t len;
2387 for (len = 0; off2Ind <= offset; ind++) {
2388 len = physP[ind].length;
2389 off2Ind += len;
2390 }
2391
2392 // Calculate length within range and starting address
2393 length = off2Ind - offset;
2394 address = physP[ind - 1].address + len - length;
2395
2396 if (true && mapped) {
2397 address = mappedBase + offset;
2398 } else {
2399 // see how far we can coalesce ranges
2400 while (ind < _rangesCount && address + length == physP[ind].address) {
2401 len = physP[ind].length;
2402 length += len;
2403 off2Ind += len;
2404 ind++;
2405 }
2406 }
2407
2408 // correct contiguous check overshoot
2409 ind--;
2410 off2Ind -= len;
2411 }
2412 #ifndef __LP64__
2413 else if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
2414 // Physical address based memory descriptor
2415 const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
2416
2417 // Find the range after the one that contains the offset
2418 mach_vm_size_t len;
2419 for (len = 0; off2Ind <= offset; ind++) {
2420 len = physP[ind].length;
2421 off2Ind += len;
2422 }
2423
2424 // Calculate length within range and starting address
2425 length = off2Ind - offset;
2426 address = physP[ind - 1].address + len - length;
2427
2428 if (true && mapped) {
2429 address = mappedBase + offset;
2430 } else {
2431 // see how far we can coalesce ranges
2432 while (ind < _rangesCount && address + length == physP[ind].address) {
2433 len = physP[ind].length;
2434 length += len;
2435 off2Ind += len;
2436 ind++;
2437 }
2438 }
2439 // correct contiguous check overshoot
2440 ind--;
2441 off2Ind -= len;
2442 }
2443 #endif /* !__LP64__ */
2444 else {
2445 do {
2446 if (!_wireCount) {
2447 panic("IOGMD: not wired for the IODMACommand");
2448 }
2449
2450 assert(_memoryEntries);
2451
2452 dataP = getDataP(_memoryEntries);
2453 const ioPLBlock *ioplList = getIOPLList(dataP);
2454 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
2455 upl_page_info_t *pageList = getPageList(dataP);
2456
2457 assert(numIOPLs > 0);
2458
2459 // Scan through iopl info blocks looking for block containing offset
2460 while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset) {
2461 ind++;
2462 }
2463
2464 // Go back to actual range as search goes past it
2465 ioPLBlock ioplInfo = ioplList[ind - 1];
2466 off2Ind = ioplInfo.fIOMDOffset;
2467
2468 if (ind < numIOPLs) {
2469 length = ioplList[ind].fIOMDOffset;
2470 } else {
2471 length = _length;
2472 }
2473 length -= offset; // Remainder within iopl
2474
2475 // Subtract offset till this iopl in total list
2476 offset -= off2Ind;
2477
2478 // If a mapped address is requested and this is a pre-mapped IOPL
2479 // then just need to compute an offset relative to the mapped base.
2480 if (mapped) {
2481 offset += (ioplInfo.fPageOffset & PAGE_MASK);
2482 address = trunc_page_64(mappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
2483 continue; // Done leave do/while(false) now
2484 }
2485
2486 // The offset is rebased into the current iopl.
2487 // Now add the iopl 1st page offset.
2488 offset += ioplInfo.fPageOffset;
2489
2490 // For external UPLs the fPageInfo field points directly to
2491 // the upl's upl_page_info_t array.
2492 if (ioplInfo.fFlags & kIOPLExternUPL) {
2493 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
2494 } else {
2495 pageList = &pageList[ioplInfo.fPageInfo];
2496 }
2497
2498 // Check for direct device non-paged memory
2499 if (ioplInfo.fFlags & kIOPLOnDevice) {
2500 address = ptoa_64(pageList->phys_addr) + offset;
2501 continue; // Done leave do/while(false) now
2502 }
2503
2504 // Now we need compute the index into the pageList
2505 UInt pageInd = atop_32(offset);
2506 offset &= PAGE_MASK;
2507
2508 // Compute the starting address of this segment
2509 IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
2510 if (!pageAddr) {
2511 panic("!pageList phys_addr");
2512 }
2513
2514 address = ptoa_64(pageAddr) + offset;
2515
2516 // length is currently set to the length of the remainider of the iopl.
2517 // We need to check that the remainder of the iopl is contiguous.
2518 // This is indicated by pageList[ind].phys_addr being sequential.
2519 IOByteCount contigLength = PAGE_SIZE - offset;
2520 while (contigLength < length
2521 && ++pageAddr == pageList[++pageInd].phys_addr) {
2522 contigLength += PAGE_SIZE;
2523 }
2524
2525 if (contigLength < length) {
2526 length = contigLength;
2527 }
2528
2529
2530 assert(address);
2531 assert(length);
2532 } while (false);
2533 }
2534
2535 // Update return values and state
2536 isP->fIO.fIOVMAddr = address;
2537 isP->fIO.fLength = length;
2538 isP->fIndex = ind;
2539 isP->fOffset2Index = off2Ind;
2540 isP->fNextOffset = isP->fIO.fOffset + length;
2541
2542 return kIOReturnSuccess;
2543 }
2544
2545 addr64_t
2546 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
2547 {
2548 IOReturn ret;
2549 mach_vm_address_t address = 0;
2550 mach_vm_size_t length = 0;
2551 IOMapper * mapper = gIOSystemMapper;
2552 IOOptionBits type = _flags & kIOMemoryTypeMask;
2553
2554 if (lengthOfSegment) {
2555 *lengthOfSegment = 0;
2556 }
2557
2558 if (offset >= _length) {
2559 return 0;
2560 }
2561
2562 // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
2563 // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
2564 // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
2565 // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
2566
2567 if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type)) {
2568 unsigned rangesIndex = 0;
2569 Ranges vec = _ranges;
2570 mach_vm_address_t addr;
2571
2572 // Find starting address within the vector of ranges
2573 for (;;) {
2574 getAddrLenForInd(addr, length, type, vec, rangesIndex);
2575 if (offset < length) {
2576 break;
2577 }
2578 offset -= length; // (make offset relative)
2579 rangesIndex++;
2580 }
2581
2582 // Now that we have the starting range,
2583 // lets find the last contiguous range
2584 addr += offset;
2585 length -= offset;
2586
2587 for (++rangesIndex; rangesIndex < _rangesCount; rangesIndex++) {
2588 mach_vm_address_t newAddr;
2589 mach_vm_size_t newLen;
2590
2591 getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
2592 if (addr + length != newAddr) {
2593 break;
2594 }
2595 length += newLen;
2596 }
2597 if (addr) {
2598 address = (IOPhysicalAddress) addr; // Truncate address to 32bit
2599 }
2600 } else {
2601 IOMDDMAWalkSegmentState _state;
2602 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
2603
2604 state->fOffset = offset;
2605 state->fLength = _length - offset;
2606 state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOrRemote);
2607
2608 ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
2609
2610 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret)) {
2611 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
2612 ret, this, state->fOffset,
2613 state->fIOVMAddr, state->fLength);
2614 }
2615 if (kIOReturnSuccess == ret) {
2616 address = state->fIOVMAddr;
2617 length = state->fLength;
2618 }
2619
2620 // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
2621 // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
2622
2623 if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))) {
2624 if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone)) {
2625 addr64_t origAddr = address;
2626 IOByteCount origLen = length;
2627
2628 address = mapper->mapToPhysicalAddress(origAddr);
2629 length = page_size - (address & (page_size - 1));
2630 while ((length < origLen)
2631 && ((address + length) == mapper->mapToPhysicalAddress(origAddr + length))) {
2632 length += page_size;
2633 }
2634 if (length > origLen) {
2635 length = origLen;
2636 }
2637 }
2638 }
2639 }
2640
2641 if (!address) {
2642 length = 0;
2643 }
2644
2645 if (lengthOfSegment) {
2646 *lengthOfSegment = length;
2647 }
2648
2649 return address;
2650 }
2651
2652 #ifndef __LP64__
2653 #pragma clang diagnostic push
2654 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2655
2656 addr64_t
2657 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
2658 {
2659 addr64_t address = 0;
2660
2661 if (options & _kIOMemorySourceSegment) {
2662 address = getSourceSegment(offset, lengthOfSegment);
2663 } else if (options & kIOMemoryMapperNone) {
2664 address = getPhysicalSegment64(offset, lengthOfSegment);
2665 } else {
2666 address = getPhysicalSegment(offset, lengthOfSegment);
2667 }
2668
2669 return address;
2670 }
2671 #pragma clang diagnostic pop
2672
2673 addr64_t
2674 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
2675 {
2676 return getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone);
2677 }
2678
2679 IOPhysicalAddress
2680 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
2681 {
2682 addr64_t address = 0;
2683 IOByteCount length = 0;
2684
2685 address = getPhysicalSegment(offset, lengthOfSegment, 0);
2686
2687 if (lengthOfSegment) {
2688 length = *lengthOfSegment;
2689 }
2690
2691 if ((address + length) > 0x100000000ULL) {
2692 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
2693 address, (long) length, (getMetaClass())->getClassName());
2694 }
2695
2696 return (IOPhysicalAddress) address;
2697 }
2698
2699 addr64_t
2700 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
2701 {
2702 IOPhysicalAddress phys32;
2703 IOByteCount length;
2704 addr64_t phys64;
2705 IOMapper * mapper = NULL;
2706
2707 phys32 = getPhysicalSegment(offset, lengthOfSegment);
2708 if (!phys32) {
2709 return 0;
2710 }
2711
2712 if (gIOSystemMapper) {
2713 mapper = gIOSystemMapper;
2714 }
2715
2716 if (mapper) {
2717 IOByteCount origLen;
2718
2719 phys64 = mapper->mapToPhysicalAddress(phys32);
2720 origLen = *lengthOfSegment;
2721 length = page_size - (phys64 & (page_size - 1));
2722 while ((length < origLen)
2723 && ((phys64 + length) == mapper->mapToPhysicalAddress(phys32 + length))) {
2724 length += page_size;
2725 }
2726 if (length > origLen) {
2727 length = origLen;
2728 }
2729
2730 *lengthOfSegment = length;
2731 } else {
2732 phys64 = (addr64_t) phys32;
2733 }
2734
2735 return phys64;
2736 }
2737
2738 IOPhysicalAddress
2739 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
2740 {
2741 return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0);
2742 }
2743
2744 IOPhysicalAddress
2745 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
2746 {
2747 return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment);
2748 }
2749
2750 #pragma clang diagnostic push
2751 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2752
2753 void *
2754 IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
2755 IOByteCount * lengthOfSegment)
2756 {
2757 if (_task == kernel_task) {
2758 return (void *) getSourceSegment(offset, lengthOfSegment);
2759 } else {
2760 panic("IOGMD::getVirtualSegment deprecated");
2761 }
2762
2763 return NULL;
2764 }
2765 #pragma clang diagnostic pop
2766 #endif /* !__LP64__ */
2767
2768 IOReturn
2769 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
2770 {
2771 IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
2772 DMACommandOps params;
2773 IOReturn err;
2774
2775 params = (op & ~kIOMDDMACommandOperationMask & op);
2776 op &= kIOMDDMACommandOperationMask;
2777
2778 if (kIOMDGetCharacteristics == op) {
2779 if (dataSize < sizeof(IOMDDMACharacteristics)) {
2780 return kIOReturnUnderrun;
2781 }
2782
2783 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
2784 data->fLength = getLength();
2785 data->fSGCount = 0;
2786 data->fDirection = getDirection();
2787 data->fIsPrepared = true; // Assume prepared - fails safe
2788 } else if (kIOMDWalkSegments == op) {
2789 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs)) {
2790 return kIOReturnUnderrun;
2791 }
2792
2793 IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
2794 IOByteCount offset = (IOByteCount) data->fOffset;
2795
2796 IOPhysicalLength length;
2797 if (data->fMapped && IOMapper::gSystem) {
2798 data->fIOVMAddr = md->getPhysicalSegment(offset, &length);
2799 } else {
2800 data->fIOVMAddr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
2801 }
2802 data->fLength = length;
2803 } else if (kIOMDAddDMAMapSpec == op) {
2804 return kIOReturnUnsupported;
2805 } else if (kIOMDDMAMap == op) {
2806 if (dataSize < sizeof(IOMDDMAMapArgs)) {
2807 return kIOReturnUnderrun;
2808 }
2809 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2810
2811 if (params) {
2812 panic("class %s does not support IODMACommand::kIterateOnly", getMetaClass()->getClassName());
2813 }
2814
2815 data->fMapContig = true;
2816 err = md->dmaMap(data->fMapper, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
2817
2818 return err;
2819 } else if (kIOMDDMAUnmap == op) {
2820 if (dataSize < sizeof(IOMDDMAMapArgs)) {
2821 return kIOReturnUnderrun;
2822 }
2823 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2824
2825 err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
2826
2827 return kIOReturnSuccess;
2828 } else {
2829 return kIOReturnBadArgument;
2830 }
2831
2832 return kIOReturnSuccess;
2833 }
2834
2835 IOReturn
2836 IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
2837 IOOptionBits * oldState )
2838 {
2839 IOReturn err = kIOReturnSuccess;
2840
2841 vm_purgable_t control;
2842 int state;
2843
2844 assert(!(kIOMemoryRemote & _flags));
2845 if (kIOMemoryRemote & _flags) {
2846 return kIOReturnNotAttached;
2847 }
2848
2849 if (_memRef) {
2850 err = super::setPurgeable(newState, oldState);
2851 } else {
2852 if (kIOMemoryThreadSafe & _flags) {
2853 LOCK;
2854 }
2855 do{
2856 // Find the appropriate vm_map for the given task
2857 vm_map_t curMap;
2858 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) {
2859 err = kIOReturnNotReady;
2860 break;
2861 } else if (!_task) {
2862 err = kIOReturnUnsupported;
2863 break;
2864 } else {
2865 curMap = get_task_map(_task);
2866 if (NULL == curMap) {
2867 err = KERN_INVALID_ARGUMENT;
2868 break;
2869 }
2870 }
2871
2872 // can only do one range
2873 Ranges vec = _ranges;
2874 IOOptionBits type = _flags & kIOMemoryTypeMask;
2875 mach_vm_address_t addr;
2876 mach_vm_size_t len;
2877 getAddrLenForInd(addr, len, type, vec, 0);
2878
2879 err = purgeableControlBits(newState, &control, &state);
2880 if (kIOReturnSuccess != err) {
2881 break;
2882 }
2883 err = vm_map_purgable_control(curMap, addr, control, &state);
2884 if (oldState) {
2885 if (kIOReturnSuccess == err) {
2886 err = purgeableStateBits(&state);
2887 *oldState = state;
2888 }
2889 }
2890 }while (false);
2891 if (kIOMemoryThreadSafe & _flags) {
2892 UNLOCK;
2893 }
2894 }
2895
2896 return err;
2897 }
2898
2899 IOReturn
2900 IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
2901 IOOptionBits * oldState )
2902 {
2903 IOReturn err = kIOReturnNotReady;
2904
2905 if (kIOMemoryThreadSafe & _flags) {
2906 LOCK;
2907 }
2908 if (_memRef) {
2909 err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState);
2910 }
2911 if (kIOMemoryThreadSafe & _flags) {
2912 UNLOCK;
2913 }
2914
2915 return err;
2916 }
2917
2918 IOReturn
2919 IOGeneralMemoryDescriptor::setOwnership( task_t newOwner,
2920 int newLedgerTag,
2921 IOOptionBits newLedgerOptions )
2922 {
2923 IOReturn err = kIOReturnSuccess;
2924
2925 assert(!(kIOMemoryRemote & _flags));
2926 if (kIOMemoryRemote & _flags) {
2927 return kIOReturnNotAttached;
2928 }
2929
2930 if (iokit_iomd_setownership_enabled == FALSE) {
2931 return kIOReturnUnsupported;
2932 }
2933
2934 if (_memRef) {
2935 err = super::setOwnership(newOwner, newLedgerTag, newLedgerOptions);
2936 } else {
2937 err = kIOReturnUnsupported;
2938 }
2939
2940 return err;
2941 }
2942
2943 IOReturn
2944 IOMemoryDescriptor::setOwnership( task_t newOwner,
2945 int newLedgerTag,
2946 IOOptionBits newLedgerOptions )
2947 {
2948 IOReturn err = kIOReturnNotReady;
2949
2950 assert(!(kIOMemoryRemote & _flags));
2951 if (kIOMemoryRemote & _flags) {
2952 return kIOReturnNotAttached;
2953 }
2954
2955 if (iokit_iomd_setownership_enabled == FALSE) {
2956 return kIOReturnUnsupported;
2957 }
2958
2959 if (kIOMemoryThreadSafe & _flags) {
2960 LOCK;
2961 }
2962 if (_memRef) {
2963 err = IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(_memRef, newOwner, newLedgerTag, newLedgerOptions);
2964 } else {
2965 IOMultiMemoryDescriptor * mmd;
2966 IOSubMemoryDescriptor * smd;
2967 if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
2968 err = smd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
2969 } else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
2970 err = mmd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
2971 }
2972 }
2973 if (kIOMemoryThreadSafe & _flags) {
2974 UNLOCK;
2975 }
2976
2977 return err;
2978 }
2979
2980 IOReturn
2981 IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
2982 IOByteCount * dirtyPageCount )
2983 {
2984 IOReturn err = kIOReturnNotReady;
2985
2986 assert(!(kIOMemoryRemote & _flags));
2987 if (kIOMemoryRemote & _flags) {
2988 return kIOReturnNotAttached;
2989 }
2990
2991 if (kIOMemoryThreadSafe & _flags) {
2992 LOCK;
2993 }
2994 if (_memRef) {
2995 err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount);
2996 } else {
2997 IOMultiMemoryDescriptor * mmd;
2998 IOSubMemoryDescriptor * smd;
2999 if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3000 err = smd->getPageCounts(residentPageCount, dirtyPageCount);
3001 } else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3002 err = mmd->getPageCounts(residentPageCount, dirtyPageCount);
3003 }
3004 }
3005 if (kIOMemoryThreadSafe & _flags) {
3006 UNLOCK;
3007 }
3008
3009 return err;
3010 }
3011
3012
3013 #if defined(__arm__) || defined(__arm64__)
3014 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3015 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3016 #else /* defined(__arm__) || defined(__arm64__) */
3017 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
3018 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
3019 #endif /* defined(__arm__) || defined(__arm64__) */
3020
3021 static void
3022 SetEncryptOp(addr64_t pa, unsigned int count)
3023 {
3024 ppnum_t page, end;
3025
3026 page = atop_64(round_page_64(pa));
3027 end = atop_64(trunc_page_64(pa + count));
3028 for (; page < end; page++) {
3029 pmap_clear_noencrypt(page);
3030 }
3031 }
3032
3033 static void
3034 ClearEncryptOp(addr64_t pa, unsigned int count)
3035 {
3036 ppnum_t page, end;
3037
3038 page = atop_64(round_page_64(pa));
3039 end = atop_64(trunc_page_64(pa + count));
3040 for (; page < end; page++) {
3041 pmap_set_noencrypt(page);
3042 }
3043 }
3044
3045 IOReturn
3046 IOMemoryDescriptor::performOperation( IOOptionBits options,
3047 IOByteCount offset, IOByteCount length )
3048 {
3049 IOByteCount remaining;
3050 unsigned int res;
3051 void (*func)(addr64_t pa, unsigned int count) = NULL;
3052 #if defined(__arm__) || defined(__arm64__)
3053 void (*func_ext)(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *result) = NULL;
3054 #endif
3055
3056 assert(!(kIOMemoryRemote & _flags));
3057 if (kIOMemoryRemote & _flags) {
3058 return kIOReturnNotAttached;
3059 }
3060
3061 switch (options) {
3062 case kIOMemoryIncoherentIOFlush:
3063 #if defined(__arm__) || defined(__arm64__)
3064 func_ext = &dcache_incoherent_io_flush64;
3065 #if __ARM_COHERENT_IO__
3066 func_ext(0, 0, 0, &res);
3067 return kIOReturnSuccess;
3068 #else /* __ARM_COHERENT_IO__ */
3069 break;
3070 #endif /* __ARM_COHERENT_IO__ */
3071 #else /* defined(__arm__) || defined(__arm64__) */
3072 func = &dcache_incoherent_io_flush64;
3073 break;
3074 #endif /* defined(__arm__) || defined(__arm64__) */
3075 case kIOMemoryIncoherentIOStore:
3076 #if defined(__arm__) || defined(__arm64__)
3077 func_ext = &dcache_incoherent_io_store64;
3078 #if __ARM_COHERENT_IO__
3079 func_ext(0, 0, 0, &res);
3080 return kIOReturnSuccess;
3081 #else /* __ARM_COHERENT_IO__ */
3082 break;
3083 #endif /* __ARM_COHERENT_IO__ */
3084 #else /* defined(__arm__) || defined(__arm64__) */
3085 func = &dcache_incoherent_io_store64;
3086 break;
3087 #endif /* defined(__arm__) || defined(__arm64__) */
3088
3089 case kIOMemorySetEncrypted:
3090 func = &SetEncryptOp;
3091 break;
3092 case kIOMemoryClearEncrypted:
3093 func = &ClearEncryptOp;
3094 break;
3095 }
3096
3097 #if defined(__arm__) || defined(__arm64__)
3098 if ((func == NULL) && (func_ext == NULL)) {
3099 return kIOReturnUnsupported;
3100 }
3101 #else /* defined(__arm__) || defined(__arm64__) */
3102 if (!func) {
3103 return kIOReturnUnsupported;
3104 }
3105 #endif /* defined(__arm__) || defined(__arm64__) */
3106
3107 if (kIOMemoryThreadSafe & _flags) {
3108 LOCK;
3109 }
3110
3111 res = 0x0UL;
3112 remaining = length = min(length, getLength() - offset);
3113 while (remaining) {
3114 // (process another target segment?)
3115 addr64_t dstAddr64;
3116 IOByteCount dstLen;
3117
3118 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
3119 if (!dstAddr64) {
3120 break;
3121 }
3122
3123 // Clip segment length to remaining
3124 if (dstLen > remaining) {
3125 dstLen = remaining;
3126 }
3127
3128 #if defined(__arm__) || defined(__arm64__)
3129 if (func) {
3130 (*func)(dstAddr64, dstLen);
3131 }
3132 if (func_ext) {
3133 (*func_ext)(dstAddr64, dstLen, remaining, &res);
3134 if (res != 0x0UL) {
3135 remaining = 0;
3136 break;
3137 }
3138 }
3139 #else /* defined(__arm__) || defined(__arm64__) */
3140 (*func)(dstAddr64, dstLen);
3141 #endif /* defined(__arm__) || defined(__arm64__) */
3142
3143 offset += dstLen;
3144 remaining -= dstLen;
3145 }
3146
3147 if (kIOMemoryThreadSafe & _flags) {
3148 UNLOCK;
3149 }
3150
3151 return remaining ? kIOReturnUnderrun : kIOReturnSuccess;
3152 }
3153
3154 /*
3155 *
3156 */
3157
3158 #if defined(__i386__) || defined(__x86_64__)
3159
3160 #define io_kernel_static_start vm_kernel_stext
3161 #define io_kernel_static_end vm_kernel_etext
3162
3163 #elif defined(__arm__) || defined(__arm64__)
3164
3165 extern vm_offset_t static_memory_end;
3166
3167 #if defined(__arm64__)
3168 #define io_kernel_static_start vm_kext_base
3169 #else /* defined(__arm64__) */
3170 #define io_kernel_static_start vm_kernel_stext
3171 #endif /* defined(__arm64__) */
3172
3173 #define io_kernel_static_end static_memory_end
3174
3175 #else
3176 #error io_kernel_static_end is undefined for this architecture
3177 #endif
3178
3179 static kern_return_t
3180 io_get_kernel_static_upl(
3181 vm_map_t /* map */,
3182 uintptr_t offset,
3183 upl_size_t *upl_size,
3184 upl_t *upl,
3185 upl_page_info_array_t page_list,
3186 unsigned int *count,
3187 ppnum_t *highest_page)
3188 {
3189 unsigned int pageCount, page;
3190 ppnum_t phys;
3191 ppnum_t highestPage = 0;
3192
3193 pageCount = atop_32(*upl_size);
3194 if (pageCount > *count) {
3195 pageCount = *count;
3196 }
3197
3198 *upl = NULL;
3199
3200 for (page = 0; page < pageCount; page++) {
3201 phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
3202 if (!phys) {
3203 break;
3204 }
3205 page_list[page].phys_addr = phys;
3206 page_list[page].free_when_done = 0;
3207 page_list[page].absent = 0;
3208 page_list[page].dirty = 0;
3209 page_list[page].precious = 0;
3210 page_list[page].device = 0;
3211 if (phys > highestPage) {
3212 highestPage = phys;
3213 }
3214 }
3215
3216 *highest_page = highestPage;
3217
3218 return (page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError;
3219 }
3220
3221 IOReturn
3222 IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
3223 {
3224 IOOptionBits type = _flags & kIOMemoryTypeMask;
3225 IOReturn error = kIOReturnSuccess;
3226 ioGMDData *dataP;
3227 upl_page_info_array_t pageInfo;
3228 ppnum_t mapBase;
3229 vm_tag_t tag = VM_KERN_MEMORY_NONE;
3230
3231 assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
3232
3233 if ((kIODirectionOutIn & forDirection) == kIODirectionNone) {
3234 forDirection = (IODirection) (forDirection | getDirection());
3235 }
3236
3237 dataP = getDataP(_memoryEntries);
3238 upl_control_flags_t uplFlags; // This Mem Desc's default flags for upl creation
3239 switch (kIODirectionOutIn & forDirection) {
3240 case kIODirectionOut:
3241 // Pages do not need to be marked as dirty on commit
3242 uplFlags = UPL_COPYOUT_FROM;
3243 dataP->fDMAAccess = kIODMAMapReadAccess;
3244 break;
3245
3246 case kIODirectionIn:
3247 dataP->fDMAAccess = kIODMAMapWriteAccess;
3248 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
3249 break;
3250
3251 default:
3252 dataP->fDMAAccess = kIODMAMapReadAccess | kIODMAMapWriteAccess;
3253 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
3254 break;
3255 }
3256
3257 if (_wireCount) {
3258 if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags)) {
3259 OSReportWithBacktrace("IOMemoryDescriptor 0x%lx prepared read only", VM_KERNEL_ADDRPERM(this));
3260 error = kIOReturnNotWritable;
3261 }
3262 } else {
3263 IOMapper *mapper;
3264
3265 mapper = dataP->fMapper;
3266 dataP->fMappedBaseValid = dataP->fMappedBase = 0;
3267
3268 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
3269 tag = _kernelTag;
3270 if (VM_KERN_MEMORY_NONE == tag) {
3271 tag = IOMemoryTag(kernel_map);
3272 }
3273
3274 if (kIODirectionPrepareToPhys32 & forDirection) {
3275 if (!mapper) {
3276 uplFlags |= UPL_NEED_32BIT_ADDR;
3277 }
3278 if (dataP->fDMAMapNumAddressBits > 32) {
3279 dataP->fDMAMapNumAddressBits = 32;
3280 }
3281 }
3282 if (kIODirectionPrepareNoFault & forDirection) {
3283 uplFlags |= UPL_REQUEST_NO_FAULT;
3284 }
3285 if (kIODirectionPrepareNoZeroFill & forDirection) {
3286 uplFlags |= UPL_NOZEROFILLIO;
3287 }
3288 if (kIODirectionPrepareNonCoherent & forDirection) {
3289 uplFlags |= UPL_REQUEST_FORCE_COHERENCY;
3290 }
3291
3292 mapBase = 0;
3293
3294 // Note that appendBytes(NULL) zeros the data up to the desired length
3295 // and the length parameter is an unsigned int
3296 size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t);
3297 if (uplPageSize > ((unsigned int)uplPageSize)) {
3298 return kIOReturnNoMemory;
3299 }
3300 if (!_memoryEntries->appendBytes(NULL, uplPageSize)) {
3301 return kIOReturnNoMemory;
3302 }
3303 dataP = NULL;
3304
3305 // Find the appropriate vm_map for the given task
3306 vm_map_t curMap;
3307 if ((NULL != _memRef) || ((_task == kernel_task && (kIOMemoryBufferPageable & _flags)))) {
3308 curMap = NULL;
3309 } else {
3310 curMap = get_task_map(_task);
3311 }
3312
3313 // Iterate over the vector of virtual ranges
3314 Ranges vec = _ranges;
3315 unsigned int pageIndex = 0;
3316 IOByteCount mdOffset = 0;
3317 ppnum_t highestPage = 0;
3318
3319 IOMemoryEntry * memRefEntry = NULL;
3320 if (_memRef) {
3321 memRefEntry = &_memRef->entries[0];
3322 }
3323
3324 for (UInt range = 0; range < _rangesCount; range++) {
3325 ioPLBlock iopl;
3326 mach_vm_address_t startPage, startPageOffset;
3327 mach_vm_size_t numBytes;
3328 ppnum_t highPage = 0;
3329
3330 // Get the startPage address and length of vec[range]
3331 getAddrLenForInd(startPage, numBytes, type, vec, range);
3332 startPageOffset = startPage & PAGE_MASK;
3333 iopl.fPageOffset = startPageOffset;
3334 numBytes += startPageOffset;
3335 startPage = trunc_page_64(startPage);
3336
3337 if (mapper) {
3338 iopl.fMappedPage = mapBase + pageIndex;
3339 } else {
3340 iopl.fMappedPage = 0;
3341 }
3342
3343 // Iterate over the current range, creating UPLs
3344 while (numBytes) {
3345 vm_address_t kernelStart = (vm_address_t) startPage;
3346 vm_map_t theMap;
3347 if (curMap) {
3348 theMap = curMap;
3349 } else if (_memRef) {
3350 theMap = NULL;
3351 } else {
3352 assert(_task == kernel_task);
3353 theMap = IOPageableMapForAddress(kernelStart);
3354 }
3355
3356 // ioplFlags is an in/out parameter
3357 upl_control_flags_t ioplFlags = uplFlags;
3358 dataP = getDataP(_memoryEntries);
3359 pageInfo = getPageList(dataP);
3360 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
3361
3362 mach_vm_size_t _ioplSize = round_page(numBytes);
3363 upl_size_t ioplSize = (_ioplSize <= MAX_UPL_SIZE_BYTES) ? _ioplSize : MAX_UPL_SIZE_BYTES;
3364 unsigned int numPageInfo = atop_32(ioplSize);
3365
3366 if ((theMap == kernel_map)
3367 && (kernelStart >= io_kernel_static_start)
3368 && (kernelStart < io_kernel_static_end)) {
3369 error = io_get_kernel_static_upl(theMap,
3370 kernelStart,
3371 &ioplSize,
3372 &iopl.fIOPL,
3373 baseInfo,
3374 &numPageInfo,
3375 &highPage);
3376 } else if (_memRef) {
3377 memory_object_offset_t entryOffset;
3378
3379 entryOffset = mdOffset;
3380 entryOffset = (entryOffset - iopl.fPageOffset - memRefEntry->offset);
3381 if (entryOffset >= memRefEntry->size) {
3382 memRefEntry++;
3383 if (memRefEntry >= &_memRef->entries[_memRef->count]) {
3384 panic("memRefEntry");
3385 }
3386 entryOffset = 0;
3387 }
3388 if (ioplSize > (memRefEntry->size - entryOffset)) {
3389 ioplSize = (memRefEntry->size - entryOffset);
3390 }
3391 error = memory_object_iopl_request(memRefEntry->entry,
3392 entryOffset,
3393 &ioplSize,
3394 &iopl.fIOPL,
3395 baseInfo,
3396 &numPageInfo,
3397 &ioplFlags,
3398 tag);
3399 } else {
3400 assert(theMap);
3401 error = vm_map_create_upl(theMap,
3402 startPage,
3403 (upl_size_t*)&ioplSize,
3404 &iopl.fIOPL,
3405 baseInfo,
3406 &numPageInfo,
3407 &ioplFlags,
3408 tag);
3409 }
3410
3411 if (error != KERN_SUCCESS) {
3412 goto abortExit;
3413 }
3414
3415 assert(ioplSize);
3416
3417 if (iopl.fIOPL) {
3418 highPage = upl_get_highest_page(iopl.fIOPL);
3419 }
3420 if (highPage > highestPage) {
3421 highestPage = highPage;
3422 }
3423
3424 if (baseInfo->device) {
3425 numPageInfo = 1;
3426 iopl.fFlags = kIOPLOnDevice;
3427 } else {
3428 iopl.fFlags = 0;
3429 }
3430
3431 iopl.fIOMDOffset = mdOffset;
3432 iopl.fPageInfo = pageIndex;
3433 if (mapper && pageIndex && (page_mask & (mdOffset + startPageOffset))) {
3434 dataP->fDiscontig = true;
3435 }
3436
3437 if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
3438 // Clean up partial created and unsaved iopl
3439 if (iopl.fIOPL) {
3440 upl_abort(iopl.fIOPL, 0);
3441 upl_deallocate(iopl.fIOPL);
3442 }
3443 goto abortExit;
3444 }
3445 dataP = NULL;
3446
3447 // Check for a multiple iopl's in one virtual range
3448 pageIndex += numPageInfo;
3449 mdOffset -= iopl.fPageOffset;
3450 if (ioplSize < numBytes) {
3451 numBytes -= ioplSize;
3452 startPage += ioplSize;
3453 mdOffset += ioplSize;
3454 iopl.fPageOffset = 0;
3455 if (mapper) {
3456 iopl.fMappedPage = mapBase + pageIndex;
3457 }
3458 } else {
3459 mdOffset += numBytes;
3460 break;
3461 }
3462 }
3463 }
3464
3465 _highestPage = highestPage;
3466
3467 if (UPL_COPYOUT_FROM & uplFlags) {
3468 _flags |= kIOMemoryPreparedReadOnly;
3469 }
3470 }
3471
3472 #if IOTRACKING
3473 if (!(_flags & kIOMemoryAutoPrepare) && (kIOReturnSuccess == error)) {
3474 dataP = getDataP(_memoryEntries);
3475 if (!dataP->fWireTracking.link.next) {
3476 IOTrackingAdd(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages), false, tag);
3477 }
3478 }
3479 #endif /* IOTRACKING */
3480
3481 return error;
3482
3483 abortExit:
3484 {
3485 dataP = getDataP(_memoryEntries);
3486 UInt done = getNumIOPL(_memoryEntries, dataP);
3487 ioPLBlock *ioplList = getIOPLList(dataP);
3488
3489 for (UInt range = 0; range < done; range++) {
3490 if (ioplList[range].fIOPL) {
3491 upl_abort(ioplList[range].fIOPL, 0);
3492 upl_deallocate(ioplList[range].fIOPL);
3493 }
3494 }
3495 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
3496 }
3497
3498 if (error == KERN_FAILURE) {
3499 error = kIOReturnCannotWire;
3500 } else if (error == KERN_MEMORY_ERROR) {
3501 error = kIOReturnNoResources;
3502 }
3503
3504 return error;
3505 }
3506
3507 bool
3508 IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
3509 {
3510 ioGMDData * dataP;
3511 unsigned dataSize = size;
3512
3513 if (!_memoryEntries) {
3514 _memoryEntries = OSData::withCapacity(dataSize);
3515 if (!_memoryEntries) {
3516 return false;
3517 }
3518 } else if (!_memoryEntries->initWithCapacity(dataSize)) {
3519 return false;
3520 }
3521
3522 _memoryEntries->appendBytes(NULL, computeDataSize(0, 0));
3523 dataP = getDataP(_memoryEntries);
3524
3525 if (mapper == kIOMapperWaitSystem) {
3526 IOMapper::checkForSystemMapper();
3527 mapper = IOMapper::gSystem;
3528 }
3529 dataP->fMapper = mapper;
3530 dataP->fPageCnt = 0;
3531 dataP->fMappedBase = 0;
3532 dataP->fDMAMapNumAddressBits = 64;
3533 dataP->fDMAMapAlignment = 0;
3534 dataP->fPreparationID = kIOPreparationIDUnprepared;
3535 dataP->fDiscontig = false;
3536 dataP->fCompletionError = false;
3537 dataP->fMappedBaseValid = false;
3538
3539 return true;
3540 }
3541
3542 IOReturn
3543 IOMemoryDescriptor::dmaMap(
3544 IOMapper * mapper,
3545 IODMACommand * command,
3546 const IODMAMapSpecification * mapSpec,
3547 uint64_t offset,
3548 uint64_t length,
3549 uint64_t * mapAddress,
3550 uint64_t * mapLength)
3551 {
3552 IOReturn err;
3553 uint32_t mapOptions;
3554
3555 mapOptions = 0;
3556 mapOptions |= kIODMAMapReadAccess;
3557 if (!(kIOMemoryPreparedReadOnly & _flags)) {
3558 mapOptions |= kIODMAMapWriteAccess;
3559 }
3560
3561 err = mapper->iovmMapMemory(this, offset, length, mapOptions,
3562 mapSpec, command, NULL, mapAddress, mapLength);
3563
3564 if (kIOReturnSuccess == err) {
3565 dmaMapRecord(mapper, command, *mapLength);
3566 }
3567
3568 return err;
3569 }
3570
3571 void
3572 IOMemoryDescriptor::dmaMapRecord(
3573 IOMapper * mapper,
3574 IODMACommand * command,
3575 uint64_t mapLength)
3576 {
3577 kern_allocation_name_t alloc;
3578 int16_t prior;
3579
3580 if ((alloc = mapper->fAllocName) /* && mapper != IOMapper::gSystem */) {
3581 kern_allocation_update_size(mapper->fAllocName, mapLength);
3582 }
3583
3584 if (!command) {
3585 return;
3586 }
3587 prior = OSAddAtomic16(1, &_dmaReferences);
3588 if (!prior) {
3589 if (alloc && (VM_KERN_MEMORY_NONE != _kernelTag)) {
3590 _mapName = alloc;
3591 mapLength = _length;
3592 kern_allocation_update_subtotal(alloc, _kernelTag, mapLength);
3593 } else {
3594 _mapName = NULL;
3595 }
3596 }
3597 }
3598
3599 IOReturn
3600 IOMemoryDescriptor::dmaUnmap(
3601 IOMapper * mapper,
3602 IODMACommand * command,
3603 uint64_t offset,
3604 uint64_t mapAddress,
3605 uint64_t mapLength)
3606 {
3607 IOReturn ret;
3608 kern_allocation_name_t alloc;
3609 kern_allocation_name_t mapName;
3610 int16_t prior;
3611
3612 mapName = NULL;
3613 prior = 0;
3614 if (command) {
3615 mapName = _mapName;
3616 if (_dmaReferences) {
3617 prior = OSAddAtomic16(-1, &_dmaReferences);
3618 } else {
3619 panic("_dmaReferences underflow");
3620 }
3621 }
3622
3623 if (!mapLength) {
3624 return kIOReturnSuccess;
3625 }
3626
3627 ret = mapper->iovmUnmapMemory(this, command, mapAddress, mapLength);
3628
3629 if ((alloc = mapper->fAllocName)) {
3630 kern_allocation_update_size(alloc, -mapLength);
3631 if ((1 == prior) && mapName && (VM_KERN_MEMORY_NONE != _kernelTag)) {
3632 mapLength = _length;
3633 kern_allocation_update_subtotal(mapName, _kernelTag, -mapLength);
3634 }
3635 }
3636
3637 return ret;
3638 }
3639
3640 IOReturn
3641 IOGeneralMemoryDescriptor::dmaMap(
3642 IOMapper * mapper,
3643 IODMACommand * command,
3644 const IODMAMapSpecification * mapSpec,
3645 uint64_t offset,
3646 uint64_t length,
3647 uint64_t * mapAddress,
3648 uint64_t * mapLength)
3649 {
3650 IOReturn err = kIOReturnSuccess;
3651 ioGMDData * dataP;
3652 IOOptionBits type = _flags & kIOMemoryTypeMask;
3653
3654 *mapAddress = 0;
3655 if (kIOMemoryHostOnly & _flags) {
3656 return kIOReturnSuccess;
3657 }
3658 if (kIOMemoryRemote & _flags) {
3659 return kIOReturnNotAttached;
3660 }
3661
3662 if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
3663 || offset || (length != _length)) {
3664 err = super::dmaMap(mapper, command, mapSpec, offset, length, mapAddress, mapLength);
3665 } else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries))) {
3666 const ioPLBlock * ioplList = getIOPLList(dataP);
3667 upl_page_info_t * pageList;
3668 uint32_t mapOptions = 0;
3669
3670 IODMAMapSpecification mapSpec;
3671 bzero(&mapSpec, sizeof(mapSpec));
3672 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
3673 mapSpec.alignment = dataP->fDMAMapAlignment;
3674
3675 // For external UPLs the fPageInfo field points directly to
3676 // the upl's upl_page_info_t array.
3677 if (ioplList->fFlags & kIOPLExternUPL) {
3678 pageList = (upl_page_info_t *) ioplList->fPageInfo;
3679 mapOptions |= kIODMAMapPagingPath;
3680 } else {
3681 pageList = getPageList(dataP);
3682 }
3683
3684 if ((_length == ptoa_64(_pages)) && !(page_mask & ioplList->fPageOffset)) {
3685 mapOptions |= kIODMAMapPageListFullyOccupied;
3686 }
3687
3688 assert(dataP->fDMAAccess);
3689 mapOptions |= dataP->fDMAAccess;
3690
3691 // Check for direct device non-paged memory
3692 if (ioplList->fFlags & kIOPLOnDevice) {
3693 mapOptions |= kIODMAMapPhysicallyContiguous;
3694 }
3695
3696 IODMAMapPageList dmaPageList =
3697 {
3698 .pageOffset = (uint32_t)(ioplList->fPageOffset & page_mask),
3699 .pageListCount = _pages,
3700 .pageList = &pageList[0]
3701 };
3702 err = mapper->iovmMapMemory(this, offset, length, mapOptions, &mapSpec,
3703 command, &dmaPageList, mapAddress, mapLength);
3704
3705 if (kIOReturnSuccess == err) {
3706 dmaMapRecord(mapper, command, *mapLength);
3707 }
3708 }
3709
3710 return err;
3711 }
3712
3713 /*
3714 * prepare
3715 *
3716 * Prepare the memory for an I/O transfer. This involves paging in
3717 * the memory, if necessary, and wiring it down for the duration of
3718 * the transfer. The complete() method completes the processing of
3719 * the memory after the I/O transfer finishes. This method needn't
3720 * called for non-pageable memory.
3721 */
3722
3723 IOReturn
3724 IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
3725 {
3726 IOReturn error = kIOReturnSuccess;
3727 IOOptionBits type = _flags & kIOMemoryTypeMask;
3728
3729 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
3730 return kIOReturnSuccess;
3731 }
3732
3733 assert(!(kIOMemoryRemote & _flags));
3734 if (kIOMemoryRemote & _flags) {
3735 return kIOReturnNotAttached;
3736 }
3737
3738 if (_prepareLock) {
3739 IOLockLock(_prepareLock);
3740 }
3741
3742 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
3743 error = wireVirtual(forDirection);
3744 }
3745
3746 if (kIOReturnSuccess == error) {
3747 if (1 == ++_wireCount) {
3748 if (kIOMemoryClearEncrypt & _flags) {
3749 performOperation(kIOMemoryClearEncrypted, 0, _length);
3750 }
3751 }
3752 }
3753
3754 if (_prepareLock) {
3755 IOLockUnlock(_prepareLock);
3756 }
3757
3758 return error;
3759 }
3760
3761 /*
3762 * complete
3763 *
3764 * Complete processing of the memory after an I/O transfer finishes.
3765 * This method should not be called unless a prepare was previously
3766 * issued; the prepare() and complete() must occur in pairs, before
3767 * before and after an I/O transfer involving pageable memory.
3768 */
3769
3770 IOReturn
3771 IOGeneralMemoryDescriptor::complete(IODirection forDirection)
3772 {
3773 IOOptionBits type = _flags & kIOMemoryTypeMask;
3774 ioGMDData * dataP;
3775
3776 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
3777 return kIOReturnSuccess;
3778 }
3779
3780 assert(!(kIOMemoryRemote & _flags));
3781 if (kIOMemoryRemote & _flags) {
3782 return kIOReturnNotAttached;
3783 }
3784
3785 if (_prepareLock) {
3786 IOLockLock(_prepareLock);
3787 }
3788 do{
3789 assert(_wireCount);
3790 if (!_wireCount) {
3791 break;
3792 }
3793 dataP = getDataP(_memoryEntries);
3794 if (!dataP) {
3795 break;
3796 }
3797
3798 if (kIODirectionCompleteWithError & forDirection) {
3799 dataP->fCompletionError = true;
3800 }
3801
3802 if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount)) {
3803 performOperation(kIOMemorySetEncrypted, 0, _length);
3804 }
3805
3806 _wireCount--;
3807 if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection)) {
3808 ioPLBlock *ioplList = getIOPLList(dataP);
3809 UInt ind, count = getNumIOPL(_memoryEntries, dataP);
3810
3811 if (_wireCount) {
3812 // kIODirectionCompleteWithDataValid & forDirection
3813 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
3814 vm_tag_t tag;
3815 tag = getVMTag(kernel_map);
3816 for (ind = 0; ind < count; ind++) {
3817 if (ioplList[ind].fIOPL) {
3818 iopl_valid_data(ioplList[ind].fIOPL, tag);
3819 }
3820 }
3821 }
3822 } else {
3823 if (_dmaReferences) {
3824 panic("complete() while dma active");
3825 }
3826
3827 if (dataP->fMappedBaseValid) {
3828 dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
3829 dataP->fMappedBaseValid = dataP->fMappedBase = 0;
3830 }
3831 #if IOTRACKING
3832 if (dataP->fWireTracking.link.next) {
3833 IOTrackingRemove(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages));
3834 }
3835 #endif /* IOTRACKING */
3836 // Only complete iopls that we created which are for TypeVirtual
3837 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
3838 for (ind = 0; ind < count; ind++) {
3839 if (ioplList[ind].fIOPL) {
3840 if (dataP->fCompletionError) {
3841 upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/);
3842 } else {
3843 upl_commit(ioplList[ind].fIOPL, NULL, 0);
3844 }
3845 upl_deallocate(ioplList[ind].fIOPL);
3846 }
3847 }
3848 } else if (kIOMemoryTypeUPL == type) {
3849 upl_set_referenced(ioplList[0].fIOPL, false);
3850 }
3851
3852 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
3853
3854 dataP->fPreparationID = kIOPreparationIDUnprepared;
3855 _flags &= ~kIOMemoryPreparedReadOnly;
3856 }
3857 }
3858 }while (false);
3859
3860 if (_prepareLock) {
3861 IOLockUnlock(_prepareLock);
3862 }
3863
3864 return kIOReturnSuccess;
3865 }
3866
3867 IOReturn
3868 IOGeneralMemoryDescriptor::doMap(
3869 vm_map_t __addressMap,
3870 IOVirtualAddress * __address,
3871 IOOptionBits options,
3872 IOByteCount __offset,
3873 IOByteCount __length )
3874 {
3875 #ifndef __LP64__
3876 if (!(kIOMap64Bit & options)) {
3877 panic("IOGeneralMemoryDescriptor::doMap !64bit");
3878 }
3879 #endif /* !__LP64__ */
3880
3881 kern_return_t err;
3882
3883 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
3884 mach_vm_size_t offset = mapping->fOffset + __offset;
3885 mach_vm_size_t length = mapping->fLength;
3886
3887 IOOptionBits type = _flags & kIOMemoryTypeMask;
3888 Ranges vec = _ranges;
3889
3890 mach_vm_address_t range0Addr = 0;
3891 mach_vm_size_t range0Len = 0;
3892
3893 if ((offset >= _length) || ((offset + length) > _length)) {
3894 return kIOReturnBadArgument;
3895 }
3896
3897 assert(!(kIOMemoryRemote & _flags));
3898 if (kIOMemoryRemote & _flags) {
3899 return 0;
3900 }
3901
3902 if (vec.v) {
3903 getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
3904 }
3905
3906 // mapping source == dest? (could be much better)
3907 if (_task
3908 && (mapping->fAddressTask == _task)
3909 && (mapping->fAddressMap == get_task_map(_task))
3910 && (options & kIOMapAnywhere)
3911 && (!(kIOMapUnique & options))
3912 && (1 == _rangesCount)
3913 && (0 == offset)
3914 && range0Addr
3915 && (length <= range0Len)) {
3916 mapping->fAddress = range0Addr;
3917 mapping->fOptions |= kIOMapStatic;
3918
3919 return kIOReturnSuccess;
3920 }
3921
3922 if (!_memRef) {
3923 IOOptionBits createOptions = 0;
3924 if (!(kIOMapReadOnly & options)) {
3925 createOptions |= kIOMemoryReferenceWrite;
3926 #if DEVELOPMENT || DEBUG
3927 if ((kIODirectionOut == (kIODirectionOutIn & _flags))
3928 && (!reserved || (reserved->creator != mapping->fAddressTask))) {
3929 OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction");
3930 }
3931 #endif
3932 }
3933 err = memoryReferenceCreate(createOptions, &_memRef);
3934 if (kIOReturnSuccess != err) {
3935 return err;
3936 }
3937 }
3938
3939 memory_object_t pager;
3940 pager = (memory_object_t) (reserved ? reserved->dp.devicePager : NULL);
3941
3942 // <upl_transpose //
3943 if ((kIOMapReference | kIOMapUnique) == ((kIOMapReference | kIOMapUnique) & options)) {
3944 do{
3945 upl_t redirUPL2;
3946 upl_size_t size;
3947 upl_control_flags_t flags;
3948 unsigned int lock_count;
3949
3950 if (!_memRef || (1 != _memRef->count)) {
3951 err = kIOReturnNotReadable;
3952 break;
3953 }
3954
3955 size = round_page(mapping->fLength);
3956 flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
3957 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
3958
3959 if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2,
3960 NULL, NULL,
3961 &flags, getVMTag(kernel_map))) {
3962 redirUPL2 = NULL;
3963 }
3964
3965 for (lock_count = 0;
3966 IORecursiveLockHaveLock(gIOMemoryLock);
3967 lock_count++) {
3968 UNLOCK;
3969 }
3970 err = upl_transpose(redirUPL2, mapping->fRedirUPL);
3971 for (;
3972 lock_count;
3973 lock_count--) {
3974 LOCK;
3975 }
3976
3977 if (kIOReturnSuccess != err) {
3978 IOLog("upl_transpose(%x)\n", err);
3979 err = kIOReturnSuccess;
3980 }
3981
3982 if (redirUPL2) {
3983 upl_commit(redirUPL2, NULL, 0);
3984 upl_deallocate(redirUPL2);
3985 redirUPL2 = NULL;
3986 }
3987 {
3988 // swap the memEntries since they now refer to different vm_objects
3989 IOMemoryReference * me = _memRef;
3990 _memRef = mapping->fMemory->_memRef;
3991 mapping->fMemory->_memRef = me;
3992 }
3993 if (pager) {
3994 err = populateDevicePager( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
3995 }
3996 }while (false);
3997 }
3998 // upl_transpose> //
3999 else {
4000 err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress);
4001 #if IOTRACKING
4002 if ((err == KERN_SUCCESS) && ((kIOTracking & gIOKitDebug) || _task)) {
4003 // only dram maps in the default on developement case
4004 IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength);
4005 }
4006 #endif /* IOTRACKING */
4007 if ((err == KERN_SUCCESS) && pager) {
4008 err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, offset, length, options);
4009
4010 if (err != KERN_SUCCESS) {
4011 doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0);
4012 } else if (kIOMapDefaultCache == (options & kIOMapCacheMask)) {
4013 mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
4014 }
4015 }
4016 }
4017
4018 return err;
4019 }
4020
4021 #if IOTRACKING
4022 IOReturn
4023 IOMemoryMapTracking(IOTrackingUser * tracking, task_t * task,
4024 mach_vm_address_t * address, mach_vm_size_t * size)
4025 {
4026 #define iomap_offsetof(type, field) ((size_t)(&((type *)NULL)->field))
4027
4028 IOMemoryMap * map = (typeof(map))(((uintptr_t) tracking) - iomap_offsetof(IOMemoryMap, fTracking));
4029
4030 if (!map->fAddressMap || (map->fAddressMap != get_task_map(map->fAddressTask))) {
4031 return kIOReturnNotReady;
4032 }
4033
4034 *task = map->fAddressTask;
4035 *address = map->fAddress;
4036 *size = map->fLength;
4037
4038 return kIOReturnSuccess;
4039 }
4040 #endif /* IOTRACKING */
4041
4042 IOReturn
4043 IOGeneralMemoryDescriptor::doUnmap(
4044 vm_map_t addressMap,
4045 IOVirtualAddress __address,
4046 IOByteCount __length )
4047 {
4048 return super::doUnmap(addressMap, __address, __length);
4049 }
4050
4051 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4052
4053 #undef super
4054 #define super OSObject
4055
4056 OSDefineMetaClassAndStructors( IOMemoryMap, OSObject )
4057
4058 OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
4059 OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
4060 OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
4061 OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
4062 OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
4063 OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
4064 OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
4065 OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
4066
4067 /* ex-inline function implementation */
4068 IOPhysicalAddress
4069 IOMemoryMap::getPhysicalAddress()
4070 {
4071 return getPhysicalSegment( 0, NULL );
4072 }
4073
4074 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4075
4076 bool
4077 IOMemoryMap::init(
4078 task_t intoTask,
4079 mach_vm_address_t toAddress,
4080 IOOptionBits _options,
4081 mach_vm_size_t _offset,
4082 mach_vm_size_t _length )
4083 {
4084 if (!intoTask) {
4085 return false;
4086 }
4087
4088 if (!super::init()) {
4089 return false;
4090 }
4091
4092 fAddressMap = get_task_map(intoTask);
4093 if (!fAddressMap) {
4094 return false;
4095 }
4096 vm_map_reference(fAddressMap);
4097
4098 fAddressTask = intoTask;
4099 fOptions = _options;
4100 fLength = _length;
4101 fOffset = _offset;
4102 fAddress = toAddress;
4103
4104 return true;
4105 }
4106
4107 bool
4108 IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
4109 {
4110 if (!_memory) {
4111 return false;
4112 }
4113
4114 if (!fSuperMap) {
4115 if ((_offset + fLength) > _memory->getLength()) {
4116 return false;
4117 }
4118 fOffset = _offset;
4119 }
4120
4121 _memory->retain();
4122 if (fMemory) {
4123 if (fMemory != _memory) {
4124 fMemory->removeMapping(this);
4125 }
4126 fMemory->release();
4127 }
4128 fMemory = _memory;
4129
4130 return true;
4131 }
4132
4133 IOReturn
4134 IOMemoryDescriptor::doMap(
4135 vm_map_t __addressMap,
4136 IOVirtualAddress * __address,
4137 IOOptionBits options,
4138 IOByteCount __offset,
4139 IOByteCount __length )
4140 {
4141 return kIOReturnUnsupported;
4142 }
4143
4144 IOReturn
4145 IOMemoryDescriptor::handleFault(
4146 void * _pager,
4147 mach_vm_size_t sourceOffset,
4148 mach_vm_size_t length)
4149 {
4150 if (kIOMemoryRedirected & _flags) {
4151 #if DEBUG
4152 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
4153 #endif
4154 do {
4155 SLEEP;
4156 } while (kIOMemoryRedirected & _flags);
4157 }
4158 return kIOReturnSuccess;
4159 }
4160
4161 IOReturn
4162 IOMemoryDescriptor::populateDevicePager(
4163 void * _pager,
4164 vm_map_t addressMap,
4165 mach_vm_address_t address,
4166 mach_vm_size_t sourceOffset,
4167 mach_vm_size_t length,
4168 IOOptionBits options )
4169 {
4170 IOReturn err = kIOReturnSuccess;
4171 memory_object_t pager = (memory_object_t) _pager;
4172 mach_vm_size_t size;
4173 mach_vm_size_t bytes;
4174 mach_vm_size_t page;
4175 mach_vm_size_t pageOffset;
4176 mach_vm_size_t pagerOffset;
4177 IOPhysicalLength segLen, chunk;
4178 addr64_t physAddr;
4179 IOOptionBits type;
4180
4181 type = _flags & kIOMemoryTypeMask;
4182
4183 if (reserved->dp.pagerContig) {
4184 sourceOffset = 0;
4185 pagerOffset = 0;
4186 }
4187
4188 physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
4189 assert( physAddr );
4190 pageOffset = physAddr - trunc_page_64( physAddr );
4191 pagerOffset = sourceOffset;
4192
4193 size = length + pageOffset;
4194 physAddr -= pageOffset;
4195
4196 segLen += pageOffset;
4197 bytes = size;
4198 do{
4199 // in the middle of the loop only map whole pages
4200 if (segLen >= bytes) {
4201 segLen = bytes;
4202 } else if (segLen != trunc_page_64(segLen)) {
4203 err = kIOReturnVMError;
4204 }
4205 if (physAddr != trunc_page_64(physAddr)) {
4206 err = kIOReturnBadArgument;
4207 }
4208
4209 if (kIOReturnSuccess != err) {
4210 break;
4211 }
4212
4213 #if DEBUG || DEVELOPMENT
4214 if ((kIOMemoryTypeUPL != type)
4215 && pmap_has_managed_page(atop_64(physAddr), atop_64(physAddr + segLen - 1))) {
4216 OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx", physAddr, segLen);
4217 }
4218 #endif /* DEBUG || DEVELOPMENT */
4219
4220 chunk = (reserved->dp.pagerContig ? round_page(segLen) : page_size);
4221 for (page = 0;
4222 (page < segLen) && (KERN_SUCCESS == err);
4223 page += chunk) {
4224 err = device_pager_populate_object(pager, pagerOffset,
4225 (ppnum_t)(atop_64(physAddr + page)), chunk);
4226 pagerOffset += chunk;
4227 }
4228
4229 assert(KERN_SUCCESS == err);
4230 if (err) {
4231 break;
4232 }
4233
4234 // This call to vm_fault causes an early pmap level resolution
4235 // of the mappings created above for kernel mappings, since
4236 // faulting in later can't take place from interrupt level.
4237 if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags)) {
4238 err = vm_fault(addressMap,
4239 (vm_map_offset_t)trunc_page_64(address),
4240 options & kIOMapReadOnly ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE,
4241 FALSE, VM_KERN_MEMORY_NONE,
4242 THREAD_UNINT, NULL,
4243 (vm_map_offset_t)0);
4244
4245 if (KERN_SUCCESS != err) {
4246 break;
4247 }
4248 }
4249
4250 sourceOffset += segLen - pageOffset;
4251 address += segLen;
4252 bytes -= segLen;
4253 pageOffset = 0;
4254 }while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
4255
4256 if (bytes) {
4257 err = kIOReturnBadArgument;
4258 }
4259
4260 return err;
4261 }
4262
4263 IOReturn
4264 IOMemoryDescriptor::doUnmap(
4265 vm_map_t addressMap,
4266 IOVirtualAddress __address,
4267 IOByteCount __length )
4268 {
4269 IOReturn err;
4270 IOMemoryMap * mapping;
4271 mach_vm_address_t address;
4272 mach_vm_size_t length;
4273
4274 if (__length) {
4275 panic("doUnmap");
4276 }
4277
4278 mapping = (IOMemoryMap *) __address;
4279 addressMap = mapping->fAddressMap;
4280 address = mapping->fAddress;
4281 length = mapping->fLength;
4282
4283 if (kIOMapOverwrite & mapping->fOptions) {
4284 err = KERN_SUCCESS;
4285 } else {
4286 if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
4287 addressMap = IOPageableMapForAddress( address );
4288 }
4289 #if DEBUG
4290 if (kIOLogMapping & gIOKitDebug) {
4291 IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
4292 addressMap, address, length );
4293 }
4294 #endif
4295 err = mach_vm_deallocate( addressMap, address, length );
4296 }
4297
4298 #if IOTRACKING
4299 IOTrackingRemoveUser(gIOMapTracking, &mapping->fTracking);
4300 #endif /* IOTRACKING */
4301
4302 return err;
4303 }
4304
4305 IOReturn
4306 IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
4307 {
4308 IOReturn err = kIOReturnSuccess;
4309 IOMemoryMap * mapping = NULL;
4310 OSIterator * iter;
4311
4312 LOCK;
4313
4314 if (doRedirect) {
4315 _flags |= kIOMemoryRedirected;
4316 } else {
4317 _flags &= ~kIOMemoryRedirected;
4318 }
4319
4320 do {
4321 if ((iter = OSCollectionIterator::withCollection( _mappings))) {
4322 memory_object_t pager;
4323
4324 if (reserved) {
4325 pager = (memory_object_t) reserved->dp.devicePager;
4326 } else {
4327 pager = MACH_PORT_NULL;
4328 }
4329
4330 while ((mapping = (IOMemoryMap *) iter->getNextObject())) {
4331 mapping->redirect( safeTask, doRedirect );
4332 if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap)) {
4333 err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache );
4334 }
4335 }
4336
4337 iter->release();
4338 }
4339 } while (false);
4340
4341 if (!doRedirect) {
4342 WAKEUP;
4343 }
4344
4345 UNLOCK;
4346
4347 #ifndef __LP64__
4348 // temporary binary compatibility
4349 IOSubMemoryDescriptor * subMem;
4350 if ((subMem = OSDynamicCast( IOSubMemoryDescriptor, this))) {
4351 err = subMem->redirect( safeTask, doRedirect );
4352 } else {
4353 err = kIOReturnSuccess;
4354 }
4355 #endif /* !__LP64__ */
4356
4357 return err;
4358 }
4359
4360 IOReturn
4361 IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
4362 {
4363 IOReturn err = kIOReturnSuccess;
4364
4365 if (fSuperMap) {
4366 // err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
4367 } else {
4368 LOCK;
4369
4370 do{
4371 if (!fAddress) {
4372 break;
4373 }
4374 if (!fAddressMap) {
4375 break;
4376 }
4377
4378 if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
4379 && (0 == (fOptions & kIOMapStatic))) {
4380 IOUnmapPages( fAddressMap, fAddress, fLength );
4381 err = kIOReturnSuccess;
4382 #if DEBUG
4383 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
4384 #endif
4385 } else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask)) {
4386 IOOptionBits newMode;
4387 newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
4388 IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
4389 }
4390 }while (false);
4391 UNLOCK;
4392 }
4393
4394 if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
4395 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
4396 && safeTask
4397 && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected)))) {
4398 fMemory->redirect(safeTask, doRedirect);
4399 }
4400
4401 return err;
4402 }
4403
4404 IOReturn
4405 IOMemoryMap::unmap( void )
4406 {
4407 IOReturn err;
4408
4409 LOCK;
4410
4411 if (fAddress && fAddressMap && (NULL == fSuperMap) && fMemory
4412 && (0 == (kIOMapStatic & fOptions))) {
4413 err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
4414 } else {
4415 err = kIOReturnSuccess;
4416 }
4417
4418 if (fAddressMap) {
4419 vm_map_deallocate(fAddressMap);
4420 fAddressMap = NULL;
4421 }
4422
4423 fAddress = 0;
4424
4425 UNLOCK;
4426
4427 return err;
4428 }
4429
4430 void
4431 IOMemoryMap::taskDied( void )
4432 {
4433 LOCK;
4434 if (fUserClientUnmap) {
4435 unmap();
4436 }
4437 #if IOTRACKING
4438 else {
4439 IOTrackingRemoveUser(gIOMapTracking, &fTracking);
4440 }
4441 #endif /* IOTRACKING */
4442
4443 if (fAddressMap) {
4444 vm_map_deallocate(fAddressMap);
4445 fAddressMap = NULL;
4446 }
4447 fAddressTask = NULL;
4448 fAddress = 0;
4449 UNLOCK;
4450 }
4451
4452 IOReturn
4453 IOMemoryMap::userClientUnmap( void )
4454 {
4455 fUserClientUnmap = true;
4456 return kIOReturnSuccess;
4457 }
4458
4459 // Overload the release mechanism. All mappings must be a member
4460 // of a memory descriptors _mappings set. This means that we
4461 // always have 2 references on a mapping. When either of these mappings
4462 // are released we need to free ourselves.
4463 void
4464 IOMemoryMap::taggedRelease(const void *tag) const
4465 {
4466 LOCK;
4467 super::taggedRelease(tag, 2);
4468 UNLOCK;
4469 }
4470
4471 void
4472 IOMemoryMap::free()
4473 {
4474 unmap();
4475
4476 if (fMemory) {
4477 LOCK;
4478 fMemory->removeMapping(this);
4479 UNLOCK;
4480 fMemory->release();
4481 }
4482
4483 if (fOwner && (fOwner != fMemory)) {
4484 LOCK;
4485 fOwner->removeMapping(this);
4486 UNLOCK;
4487 }
4488
4489 if (fSuperMap) {
4490 fSuperMap->release();
4491 }
4492
4493 if (fRedirUPL) {
4494 upl_commit(fRedirUPL, NULL, 0);
4495 upl_deallocate(fRedirUPL);
4496 }
4497
4498 super::free();
4499 }
4500
4501 IOByteCount
4502 IOMemoryMap::getLength()
4503 {
4504 return fLength;
4505 }
4506
4507 IOVirtualAddress
4508 IOMemoryMap::getVirtualAddress()
4509 {
4510 #ifndef __LP64__
4511 if (fSuperMap) {
4512 fSuperMap->getVirtualAddress();
4513 } else if (fAddressMap
4514 && vm_map_is_64bit(fAddressMap)
4515 && (sizeof(IOVirtualAddress) < 8)) {
4516 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
4517 }
4518 #endif /* !__LP64__ */
4519
4520 return fAddress;
4521 }
4522
4523 #ifndef __LP64__
4524 mach_vm_address_t
4525 IOMemoryMap::getAddress()
4526 {
4527 return fAddress;
4528 }
4529
4530 mach_vm_size_t
4531 IOMemoryMap::getSize()
4532 {
4533 return fLength;
4534 }
4535 #endif /* !__LP64__ */
4536
4537
4538 task_t
4539 IOMemoryMap::getAddressTask()
4540 {
4541 if (fSuperMap) {
4542 return fSuperMap->getAddressTask();
4543 } else {
4544 return fAddressTask;
4545 }
4546 }
4547
4548 IOOptionBits
4549 IOMemoryMap::getMapOptions()
4550 {
4551 return fOptions;
4552 }
4553
4554 IOMemoryDescriptor *
4555 IOMemoryMap::getMemoryDescriptor()
4556 {
4557 return fMemory;
4558 }
4559
4560 IOMemoryMap *
4561 IOMemoryMap::copyCompatible(
4562 IOMemoryMap * newMapping )
4563 {
4564 task_t task = newMapping->getAddressTask();
4565 mach_vm_address_t toAddress = newMapping->fAddress;
4566 IOOptionBits _options = newMapping->fOptions;
4567 mach_vm_size_t _offset = newMapping->fOffset;
4568 mach_vm_size_t _length = newMapping->fLength;
4569
4570 if ((!task) || (!fAddressMap) || (fAddressMap != get_task_map(task))) {
4571 return NULL;
4572 }
4573 if ((fOptions ^ _options) & kIOMapReadOnly) {
4574 return NULL;
4575 }
4576 if ((kIOMapDefaultCache != (_options & kIOMapCacheMask))
4577 && ((fOptions ^ _options) & kIOMapCacheMask)) {
4578 return NULL;
4579 }
4580
4581 if ((0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress)) {
4582 return NULL;
4583 }
4584
4585 if (_offset < fOffset) {
4586 return NULL;
4587 }
4588
4589 _offset -= fOffset;
4590
4591 if ((_offset + _length) > fLength) {
4592 return NULL;
4593 }
4594
4595 retain();
4596 if ((fLength == _length) && (!_offset)) {
4597 newMapping = this;
4598 } else {
4599 newMapping->fSuperMap = this;
4600 newMapping->fOffset = fOffset + _offset;
4601 newMapping->fAddress = fAddress + _offset;
4602 }
4603
4604 return newMapping;
4605 }
4606
4607 IOReturn
4608 IOMemoryMap::wireRange(
4609 uint32_t options,
4610 mach_vm_size_t offset,
4611 mach_vm_size_t length)
4612 {
4613 IOReturn kr;
4614 mach_vm_address_t start = trunc_page_64(fAddress + offset);
4615 mach_vm_address_t end = round_page_64(fAddress + offset + length);
4616 vm_prot_t prot;
4617
4618 prot = (kIODirectionOutIn & options);
4619 if (prot) {
4620 kr = vm_map_wire_kernel(fAddressMap, start, end, prot, fMemory->getVMTag(kernel_map), FALSE);
4621 } else {
4622 kr = vm_map_unwire(fAddressMap, start, end, FALSE);
4623 }
4624
4625 return kr;
4626 }
4627
4628
4629 IOPhysicalAddress
4630 #ifdef __LP64__
4631 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
4632 #else /* !__LP64__ */
4633 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
4634 #endif /* !__LP64__ */
4635 {
4636 IOPhysicalAddress address;
4637
4638 LOCK;
4639 #ifdef __LP64__
4640 address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
4641 #else /* !__LP64__ */
4642 address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
4643 #endif /* !__LP64__ */
4644 UNLOCK;
4645
4646 return address;
4647 }
4648
4649 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4650
4651 #undef super
4652 #define super OSObject
4653
4654 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4655
4656 void
4657 IOMemoryDescriptor::initialize( void )
4658 {
4659 if (NULL == gIOMemoryLock) {
4660 gIOMemoryLock = IORecursiveLockAlloc();
4661 }
4662
4663 gIOLastPage = IOGetLastPageNumber();
4664 }
4665
4666 void
4667 IOMemoryDescriptor::free( void )
4668 {
4669 if (_mappings) {
4670 _mappings->release();
4671 }
4672
4673 if (reserved) {
4674 cleanKernelReserved(reserved);
4675 IODelete(reserved, IOMemoryDescriptorReserved, 1);
4676 reserved = NULL;
4677 }
4678 super::free();
4679 }
4680
4681 IOMemoryMap *
4682 IOMemoryDescriptor::setMapping(
4683 task_t intoTask,
4684 IOVirtualAddress mapAddress,
4685 IOOptionBits options )
4686 {
4687 return createMappingInTask( intoTask, mapAddress,
4688 options | kIOMapStatic,
4689 0, getLength());
4690 }
4691
4692 IOMemoryMap *
4693 IOMemoryDescriptor::map(
4694 IOOptionBits options )
4695 {
4696 return createMappingInTask( kernel_task, 0,
4697 options | kIOMapAnywhere,
4698 0, getLength());
4699 }
4700
4701 #ifndef __LP64__
4702 IOMemoryMap *
4703 IOMemoryDescriptor::map(
4704 task_t intoTask,
4705 IOVirtualAddress atAddress,
4706 IOOptionBits options,
4707 IOByteCount offset,
4708 IOByteCount length )
4709 {
4710 if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask))) {
4711 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
4712 return NULL;
4713 }
4714
4715 return createMappingInTask(intoTask, atAddress,
4716 options, offset, length);
4717 }
4718 #endif /* !__LP64__ */
4719
4720 IOMemoryMap *
4721 IOMemoryDescriptor::createMappingInTask(
4722 task_t intoTask,
4723 mach_vm_address_t atAddress,
4724 IOOptionBits options,
4725 mach_vm_size_t offset,
4726 mach_vm_size_t length)
4727 {
4728 IOMemoryMap * result;
4729 IOMemoryMap * mapping;
4730
4731 if (0 == length) {
4732 length = getLength();
4733 }
4734
4735 mapping = new IOMemoryMap;
4736
4737 if (mapping
4738 && !mapping->init( intoTask, atAddress,
4739 options, offset, length )) {
4740 mapping->release();
4741 mapping = NULL;
4742 }
4743
4744 if (mapping) {
4745 result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
4746 } else {
4747 result = NULL;
4748 }
4749
4750 #if DEBUG
4751 if (!result) {
4752 IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
4753 this, atAddress, (uint32_t) options, offset, length);
4754 }
4755 #endif
4756
4757 return result;
4758 }
4759
4760 #ifndef __LP64__ // there is only a 64 bit version for LP64
4761 IOReturn
4762 IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
4763 IOOptionBits options,
4764 IOByteCount offset)
4765 {
4766 return redirect(newBackingMemory, options, (mach_vm_size_t)offset);
4767 }
4768 #endif
4769
4770 IOReturn
4771 IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
4772 IOOptionBits options,
4773 mach_vm_size_t offset)
4774 {
4775 IOReturn err = kIOReturnSuccess;
4776 IOMemoryDescriptor * physMem = NULL;
4777
4778 LOCK;
4779
4780 if (fAddress && fAddressMap) {
4781 do{
4782 if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
4783 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
4784 physMem = fMemory;
4785 physMem->retain();
4786 }
4787
4788 if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count)) {
4789 upl_size_t size = round_page(fLength);
4790 upl_control_flags_t flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
4791 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
4792 if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL,
4793 NULL, NULL,
4794 &flags, fMemory->getVMTag(kernel_map))) {
4795 fRedirUPL = NULL;
4796 }
4797
4798 if (physMem) {
4799 IOUnmapPages( fAddressMap, fAddress, fLength );
4800 if ((false)) {
4801 physMem->redirect(NULL, true);
4802 }
4803 }
4804 }
4805
4806 if (newBackingMemory) {
4807 if (newBackingMemory != fMemory) {
4808 fOffset = 0;
4809 if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
4810 options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
4811 offset, fLength)) {
4812 err = kIOReturnError;
4813 }
4814 }
4815 if (fRedirUPL) {
4816 upl_commit(fRedirUPL, NULL, 0);
4817 upl_deallocate(fRedirUPL);
4818 fRedirUPL = NULL;
4819 }
4820 if ((false) && physMem) {
4821 physMem->redirect(NULL, false);
4822 }
4823 }
4824 }while (false);
4825 }
4826
4827 UNLOCK;
4828
4829 if (physMem) {
4830 physMem->release();
4831 }
4832
4833 return err;
4834 }
4835
4836 IOMemoryMap *
4837 IOMemoryDescriptor::makeMapping(
4838 IOMemoryDescriptor * owner,
4839 task_t __intoTask,
4840 IOVirtualAddress __address,
4841 IOOptionBits options,
4842 IOByteCount __offset,
4843 IOByteCount __length )
4844 {
4845 #ifndef __LP64__
4846 if (!(kIOMap64Bit & options)) {
4847 panic("IOMemoryDescriptor::makeMapping !64bit");
4848 }
4849 #endif /* !__LP64__ */
4850
4851 IOMemoryDescriptor * mapDesc = NULL;
4852 __block IOMemoryMap * result = NULL;
4853
4854 IOMemoryMap * mapping = (IOMemoryMap *) __address;
4855 mach_vm_size_t offset = mapping->fOffset + __offset;
4856 mach_vm_size_t length = mapping->fLength;
4857
4858 mapping->fOffset = offset;
4859
4860 LOCK;
4861
4862 do{
4863 if (kIOMapStatic & options) {
4864 result = mapping;
4865 addMapping(mapping);
4866 mapping->setMemoryDescriptor(this, 0);
4867 continue;
4868 }
4869
4870 if (kIOMapUnique & options) {
4871 addr64_t phys;
4872 IOByteCount physLen;
4873
4874 // if (owner != this) continue;
4875
4876 if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
4877 || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
4878 phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
4879 if (!phys || (physLen < length)) {
4880 continue;
4881 }
4882
4883 mapDesc = IOMemoryDescriptor::withAddressRange(
4884 phys, length, getDirection() | kIOMemoryMapperNone, NULL);
4885 if (!mapDesc) {
4886 continue;
4887 }
4888 offset = 0;
4889 mapping->fOffset = offset;
4890 }
4891 } else {
4892 // look for a compatible existing mapping
4893 if (_mappings) {
4894 _mappings->iterateObjects(^(OSObject * object)
4895 {
4896 IOMemoryMap * lookMapping = (IOMemoryMap *) object;
4897 if ((result = lookMapping->copyCompatible(mapping))) {
4898 addMapping(result);
4899 result->setMemoryDescriptor(this, offset);
4900 return true;
4901 }
4902 return false;
4903 });
4904 }
4905 if (result || (options & kIOMapReference)) {
4906 if (result != mapping) {
4907 mapping->release();
4908 mapping = NULL;
4909 }
4910 continue;
4911 }
4912 }
4913
4914 if (!mapDesc) {
4915 mapDesc = this;
4916 mapDesc->retain();
4917 }
4918 IOReturn
4919 kr = mapDesc->doMap( NULL, (IOVirtualAddress *) &mapping, options, 0, 0 );
4920 if (kIOReturnSuccess == kr) {
4921 result = mapping;
4922 mapDesc->addMapping(result);
4923 result->setMemoryDescriptor(mapDesc, offset);
4924 } else {
4925 mapping->release();
4926 mapping = NULL;
4927 }
4928 }while (false);
4929
4930 UNLOCK;
4931
4932 if (mapDesc) {
4933 mapDesc->release();
4934 }
4935
4936 return result;
4937 }
4938
4939 void
4940 IOMemoryDescriptor::addMapping(
4941 IOMemoryMap * mapping )
4942 {
4943 if (mapping) {
4944 if (NULL == _mappings) {
4945 _mappings = OSSet::withCapacity(1);
4946 }
4947 if (_mappings) {
4948 _mappings->setObject( mapping );
4949 }
4950 }
4951 }
4952
4953 void
4954 IOMemoryDescriptor::removeMapping(
4955 IOMemoryMap * mapping )
4956 {
4957 if (_mappings) {
4958 _mappings->removeObject( mapping);
4959 }
4960 }
4961
4962 #ifndef __LP64__
4963 // obsolete initializers
4964 // - initWithOptions is the designated initializer
4965 bool
4966 IOMemoryDescriptor::initWithAddress(void * address,
4967 IOByteCount length,
4968 IODirection direction)
4969 {
4970 return false;
4971 }
4972
4973 bool
4974 IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
4975 IOByteCount length,
4976 IODirection direction,
4977 task_t task)
4978 {
4979 return false;
4980 }
4981
4982 bool
4983 IOMemoryDescriptor::initWithPhysicalAddress(
4984 IOPhysicalAddress address,
4985 IOByteCount length,
4986 IODirection direction )
4987 {
4988 return false;
4989 }
4990
4991 bool
4992 IOMemoryDescriptor::initWithRanges(
4993 IOVirtualRange * ranges,
4994 UInt32 withCount,
4995 IODirection direction,
4996 task_t task,
4997 bool asReference)
4998 {
4999 return false;
5000 }
5001
5002 bool
5003 IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
5004 UInt32 withCount,
5005 IODirection direction,
5006 bool asReference)
5007 {
5008 return false;
5009 }
5010
5011 void *
5012 IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
5013 IOByteCount * lengthOfSegment)
5014 {
5015 return NULL;
5016 }
5017 #endif /* !__LP64__ */
5018
5019 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5020
5021 bool
5022 IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
5023 {
5024 OSSymbol const *keys[2] = {NULL};
5025 OSObject *values[2] = {NULL};
5026 OSArray * array;
5027 vm_size_t vcopy_size;
5028
5029 struct SerData {
5030 user_addr_t address;
5031 user_size_t length;
5032 } *vcopy = NULL;
5033 unsigned int index, nRanges;
5034 bool result = false;
5035
5036 IOOptionBits type = _flags & kIOMemoryTypeMask;
5037
5038 if (s == NULL) {
5039 return false;
5040 }
5041
5042 array = OSArray::withCapacity(4);
5043 if (!array) {
5044 return false;
5045 }
5046
5047 nRanges = _rangesCount;
5048 if (os_mul_overflow(sizeof(SerData), nRanges, &vcopy_size)) {
5049 result = false;
5050 goto bail;
5051 }
5052 vcopy = (SerData *) IOMalloc(vcopy_size);
5053 if (vcopy == NULL) {
5054 result = false;
5055 goto bail;
5056 }
5057
5058 keys[0] = OSSymbol::withCString("address");
5059 keys[1] = OSSymbol::withCString("length");
5060
5061 // Copy the volatile data so we don't have to allocate memory
5062 // while the lock is held.
5063 LOCK;
5064 if (nRanges == _rangesCount) {
5065 Ranges vec = _ranges;
5066 for (index = 0; index < nRanges; index++) {
5067 mach_vm_address_t addr; mach_vm_size_t len;
5068 getAddrLenForInd(addr, len, type, vec, index);
5069 vcopy[index].address = addr;
5070 vcopy[index].length = len;
5071 }
5072 } else {
5073 // The descriptor changed out from under us. Give up.
5074 UNLOCK;
5075 result = false;
5076 goto bail;
5077 }
5078 UNLOCK;
5079
5080 for (index = 0; index < nRanges; index++) {
5081 user_addr_t addr = vcopy[index].address;
5082 IOByteCount len = (IOByteCount) vcopy[index].length;
5083 values[0] = OSNumber::withNumber(addr, sizeof(addr) * 8);
5084 if (values[0] == NULL) {
5085 result = false;
5086 goto bail;
5087 }
5088 values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
5089 if (values[1] == NULL) {
5090 result = false;
5091 goto bail;
5092 }
5093 OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
5094 if (dict == NULL) {
5095 result = false;
5096 goto bail;
5097 }
5098 array->setObject(dict);
5099 dict->release();
5100 values[0]->release();
5101 values[1]->release();
5102 values[0] = values[1] = NULL;
5103 }
5104
5105 result = array->serialize(s);
5106
5107 bail:
5108 if (array) {
5109 array->release();
5110 }
5111 if (values[0]) {
5112 values[0]->release();
5113 }
5114 if (values[1]) {
5115 values[1]->release();
5116 }
5117 if (keys[0]) {
5118 keys[0]->release();
5119 }
5120 if (keys[1]) {
5121 keys[1]->release();
5122 }
5123 if (vcopy) {
5124 IOFree(vcopy, vcopy_size);
5125 }
5126
5127 return result;
5128 }
5129
5130 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5131
5132 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
5133 #ifdef __LP64__
5134 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
5135 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
5136 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
5137 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
5138 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
5139 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
5140 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
5141 #else /* !__LP64__ */
5142 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
5143 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
5144 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3);
5145 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4);
5146 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5);
5147 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 6);
5148 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 7);
5149 #endif /* !__LP64__ */
5150 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
5151 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
5152 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
5153 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
5154 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
5155 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
5156 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
5157 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
5158
5159 /* ex-inline function implementation */
5160 IOPhysicalAddress
5161 IOMemoryDescriptor::getPhysicalAddress()
5162 {
5163 return getPhysicalSegment( 0, NULL );
5164 }