]> git.saurik.com Git - apple/xnu.git/blame_incremental - iokit/Kernel/IOMemoryDescriptor.cpp
xnu-7195.101.1.tar.gz
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
... / ...
CommitLineData
1/*
2 * Copyright (c) 1998-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#define IOKIT_ENABLE_SHARED_PTR
29
30#include <sys/cdefs.h>
31
32#include <IOKit/assert.h>
33#include <IOKit/system.h>
34#include <IOKit/IOLib.h>
35#include <IOKit/IOMemoryDescriptor.h>
36#include <IOKit/IOMapper.h>
37#include <IOKit/IODMACommand.h>
38#include <IOKit/IOKitKeysPrivate.h>
39
40#include <IOKit/IOSubMemoryDescriptor.h>
41#include <IOKit/IOMultiMemoryDescriptor.h>
42
43#include <IOKit/IOKitDebug.h>
44#include <IOKit/IOTimeStamp.h>
45#include <libkern/OSDebug.h>
46#include <libkern/OSKextLibPrivate.h>
47
48#include "IOKitKernelInternal.h"
49
50#include <libkern/c++/OSContainers.h>
51#include <libkern/c++/OSDictionary.h>
52#include <libkern/c++/OSArray.h>
53#include <libkern/c++/OSSymbol.h>
54#include <libkern/c++/OSNumber.h>
55#include <os/overflow.h>
56#include <os/cpp_util.h>
57#include <os/base_private.h>
58
59#include <sys/uio.h>
60
61__BEGIN_DECLS
62#include <vm/pmap.h>
63#include <vm/vm_pageout.h>
64#include <mach/memory_object_types.h>
65#include <device/device_port.h>
66
67#include <mach/vm_prot.h>
68#include <mach/mach_vm.h>
69#include <mach/memory_entry.h>
70#include <vm/vm_fault.h>
71#include <vm/vm_protos.h>
72
73extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
74extern void ipc_port_release_send(ipc_port_t port);
75
76__END_DECLS
77
78#define kIOMapperWaitSystem ((IOMapper *) 1)
79
80static IOMapper * gIOSystemMapper = NULL;
81
82ppnum_t gIOLastPage;
83
84/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
85
86OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
87
88#define super IOMemoryDescriptor
89
90OSDefineMetaClassAndStructorsWithZone(IOGeneralMemoryDescriptor,
91 IOMemoryDescriptor, ZC_ZFREE_CLEARMEM)
92
93/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
94
95static IORecursiveLock * gIOMemoryLock;
96
97#define LOCK IORecursiveLockLock( gIOMemoryLock)
98#define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
99#define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
100#define WAKEUP \
101 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
102
103#if 0
104#define DEBG(fmt, args...) { kprintf(fmt, ## args); }
105#else
106#define DEBG(fmt, args...) {}
107#endif
108
109/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
110
111// Some data structures and accessor macros used by the initWithOptions
112// Function
113
114enum ioPLBlockFlags {
115 kIOPLOnDevice = 0x00000001,
116 kIOPLExternUPL = 0x00000002,
117};
118
119struct IOMDPersistentInitData {
120 const IOGeneralMemoryDescriptor * fMD;
121 IOMemoryReference * fMemRef;
122};
123
124struct ioPLBlock {
125 upl_t fIOPL;
126 vm_address_t fPageInfo; // Pointer to page list or index into it
127 uint64_t fIOMDOffset; // The offset of this iopl in descriptor
128 ppnum_t fMappedPage; // Page number of first page in this iopl
129 unsigned int fPageOffset; // Offset within first page of iopl
130 unsigned int fFlags; // Flags
131};
132
133enum { kMaxWireTags = 6 };
134
135struct ioGMDData {
136 IOMapper * fMapper;
137 uint64_t fDMAMapAlignment;
138 uint64_t fMappedBase;
139 uint64_t fMappedLength;
140 uint64_t fPreparationID;
141#if IOTRACKING
142 IOTracking fWireTracking;
143#endif /* IOTRACKING */
144 unsigned int fPageCnt;
145 uint8_t fDMAMapNumAddressBits;
146 unsigned char fCompletionError:1;
147 unsigned char fMappedBaseValid:1;
148 unsigned char _resv:4;
149 unsigned char fDMAAccess:2;
150
151 /* variable length arrays */
152 upl_page_info_t fPageList[1]
153#if __LP64__
154 // align fPageList as for ioPLBlock
155 __attribute__((aligned(sizeof(upl_t))))
156#endif
157 ;
158 //ioPLBlock fBlocks[1];
159};
160
161#define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
162#define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
163#define getNumIOPL(osd, d) \
164 ((UInt)(((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock)))
165#define getPageList(d) (&(d->fPageList[0]))
166#define computeDataSize(p, u) \
167 (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
168
169enum { kIOMemoryHostOrRemote = kIOMemoryHostOnly | kIOMemoryRemote };
170
171/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
172
173extern "C" {
174kern_return_t
175device_data_action(
176 uintptr_t device_handle,
177 ipc_port_t device_pager,
178 vm_prot_t protection,
179 vm_object_offset_t offset,
180 vm_size_t size)
181{
182 kern_return_t kr;
183 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
184 OSSharedPtr<IOMemoryDescriptor> memDesc;
185
186 LOCK;
187 if (ref->dp.memory) {
188 memDesc.reset(ref->dp.memory, OSRetain);
189 kr = memDesc->handleFault(device_pager, offset, size);
190 memDesc.reset();
191 } else {
192 kr = KERN_ABORTED;
193 }
194 UNLOCK;
195
196 return kr;
197}
198
199kern_return_t
200device_close(
201 uintptr_t device_handle)
202{
203 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
204
205 IODelete( ref, IOMemoryDescriptorReserved, 1 );
206
207 return kIOReturnSuccess;
208}
209}; // end extern "C"
210
211/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
212
213// Note this inline function uses C++ reference arguments to return values
214// This means that pointers are not passed and NULLs don't have to be
215// checked for as a NULL reference is illegal.
216static inline void
217getAddrLenForInd(mach_vm_address_t &addr, mach_vm_size_t &len, // Output variables
218 UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
219{
220 assert(kIOMemoryTypeUIO == type
221 || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
222 || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
223 if (kIOMemoryTypeUIO == type) {
224 user_size_t us;
225 user_addr_t ad;
226 uio_getiov((uio_t) r.uio, ind, &ad, &us); addr = ad; len = us;
227 }
228#ifndef __LP64__
229 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
230 IOAddressRange cur = r.v64[ind];
231 addr = cur.address;
232 len = cur.length;
233 }
234#endif /* !__LP64__ */
235 else {
236 IOVirtualRange cur = r.v[ind];
237 addr = cur.address;
238 len = cur.length;
239 }
240}
241
242/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
243
244static IOReturn
245purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
246{
247 IOReturn err = kIOReturnSuccess;
248
249 *control = VM_PURGABLE_SET_STATE;
250
251 enum { kIOMemoryPurgeableControlMask = 15 };
252
253 switch (kIOMemoryPurgeableControlMask & newState) {
254 case kIOMemoryPurgeableKeepCurrent:
255 *control = VM_PURGABLE_GET_STATE;
256 break;
257
258 case kIOMemoryPurgeableNonVolatile:
259 *state = VM_PURGABLE_NONVOLATILE;
260 break;
261 case kIOMemoryPurgeableVolatile:
262 *state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask);
263 break;
264 case kIOMemoryPurgeableEmpty:
265 *state = VM_PURGABLE_EMPTY | (newState & ~kIOMemoryPurgeableControlMask);
266 break;
267 default:
268 err = kIOReturnBadArgument;
269 break;
270 }
271
272 if (*control == VM_PURGABLE_SET_STATE) {
273 // let VM know this call is from the kernel and is allowed to alter
274 // the volatility of the memory entry even if it was created with
275 // MAP_MEM_PURGABLE_KERNEL_ONLY
276 *control = VM_PURGABLE_SET_STATE_FROM_KERNEL;
277 }
278
279 return err;
280}
281
282static IOReturn
283purgeableStateBits(int * state)
284{
285 IOReturn err = kIOReturnSuccess;
286
287 switch (VM_PURGABLE_STATE_MASK & *state) {
288 case VM_PURGABLE_NONVOLATILE:
289 *state = kIOMemoryPurgeableNonVolatile;
290 break;
291 case VM_PURGABLE_VOLATILE:
292 *state = kIOMemoryPurgeableVolatile;
293 break;
294 case VM_PURGABLE_EMPTY:
295 *state = kIOMemoryPurgeableEmpty;
296 break;
297 default:
298 *state = kIOMemoryPurgeableNonVolatile;
299 err = kIOReturnNotReady;
300 break;
301 }
302 return err;
303}
304
305typedef struct {
306 unsigned int wimg;
307 unsigned int object_type;
308} iokit_memtype_entry;
309
310static const iokit_memtype_entry iomd_mem_types[] = {
311 [kIODefaultCache] = {VM_WIMG_DEFAULT, MAP_MEM_NOOP},
312 [kIOInhibitCache] = {VM_WIMG_IO, MAP_MEM_IO},
313 [kIOWriteThruCache] = {VM_WIMG_WTHRU, MAP_MEM_WTHRU},
314 [kIOWriteCombineCache] = {VM_WIMG_WCOMB, MAP_MEM_WCOMB},
315 [kIOCopybackCache] = {VM_WIMG_COPYBACK, MAP_MEM_COPYBACK},
316 [kIOCopybackInnerCache] = {VM_WIMG_INNERWBACK, MAP_MEM_INNERWBACK},
317 [kIOPostedWrite] = {VM_WIMG_POSTED, MAP_MEM_POSTED},
318 [kIORealTimeCache] = {VM_WIMG_RT, MAP_MEM_RT},
319 [kIOPostedReordered] = {VM_WIMG_POSTED_REORDERED, MAP_MEM_POSTED_REORDERED},
320 [kIOPostedCombinedReordered] = {VM_WIMG_POSTED_COMBINED_REORDERED, MAP_MEM_POSTED_COMBINED_REORDERED},
321};
322
323static vm_prot_t
324vmProtForCacheMode(IOOptionBits cacheMode)
325{
326 assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
327 vm_prot_t prot = 0;
328 SET_MAP_MEM(iomd_mem_types[cacheMode].object_type, prot);
329 return prot;
330}
331
332static unsigned int
333pagerFlagsForCacheMode(IOOptionBits cacheMode)
334{
335 assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
336 if (cacheMode == kIODefaultCache) {
337 return -1U;
338 }
339 return iomd_mem_types[cacheMode].wimg;
340}
341
342static IOOptionBits
343cacheModeForPagerFlags(unsigned int pagerFlags)
344{
345 pagerFlags &= VM_WIMG_MASK;
346 IOOptionBits cacheMode = kIODefaultCache;
347 for (IOOptionBits i = 0; i < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])); ++i) {
348 if (iomd_mem_types[i].wimg == pagerFlags) {
349 cacheMode = i;
350 break;
351 }
352 }
353 return (cacheMode == kIODefaultCache) ? kIOCopybackCache : cacheMode;
354}
355
356/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
357/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
358
359struct IOMemoryEntry {
360 ipc_port_t entry;
361 int64_t offset;
362 uint64_t size;
363 uint64_t start;
364};
365
366struct IOMemoryReference {
367 volatile SInt32 refCount;
368 vm_prot_t prot;
369 uint32_t capacity;
370 uint32_t count;
371 struct IOMemoryReference * mapRef;
372 IOMemoryEntry entries[0];
373};
374
375enum{
376 kIOMemoryReferenceReuse = 0x00000001,
377 kIOMemoryReferenceWrite = 0x00000002,
378 kIOMemoryReferenceCOW = 0x00000004,
379};
380
381SInt32 gIOMemoryReferenceCount;
382
383IOMemoryReference *
384IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc)
385{
386 IOMemoryReference * ref;
387 size_t newSize, oldSize, copySize;
388
389 newSize = (sizeof(IOMemoryReference)
390 - sizeof(ref->entries)
391 + capacity * sizeof(ref->entries[0]));
392 ref = (typeof(ref))IOMalloc(newSize);
393 if (realloc) {
394 oldSize = (sizeof(IOMemoryReference)
395 - sizeof(realloc->entries)
396 + realloc->capacity * sizeof(realloc->entries[0]));
397 copySize = oldSize;
398 if (copySize > newSize) {
399 copySize = newSize;
400 }
401 if (ref) {
402 bcopy(realloc, ref, copySize);
403 }
404 IOFree(realloc, oldSize);
405 } else if (ref) {
406 bzero(ref, sizeof(*ref));
407 ref->refCount = 1;
408 OSIncrementAtomic(&gIOMemoryReferenceCount);
409 }
410 if (!ref) {
411 return NULL;
412 }
413 ref->capacity = capacity;
414 return ref;
415}
416
417void
418IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref)
419{
420 IOMemoryEntry * entries;
421 size_t size;
422
423 if (ref->mapRef) {
424 memoryReferenceFree(ref->mapRef);
425 ref->mapRef = NULL;
426 }
427
428 entries = ref->entries + ref->count;
429 while (entries > &ref->entries[0]) {
430 entries--;
431 ipc_port_release_send(entries->entry);
432 }
433 size = (sizeof(IOMemoryReference)
434 - sizeof(ref->entries)
435 + ref->capacity * sizeof(ref->entries[0]));
436 IOFree(ref, size);
437
438 OSDecrementAtomic(&gIOMemoryReferenceCount);
439}
440
441void
442IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref)
443{
444 if (1 == OSDecrementAtomic(&ref->refCount)) {
445 memoryReferenceFree(ref);
446 }
447}
448
449
450IOReturn
451IOGeneralMemoryDescriptor::memoryReferenceCreate(
452 IOOptionBits options,
453 IOMemoryReference ** reference)
454{
455 enum { kCapacity = 4, kCapacityInc = 4 };
456
457 kern_return_t err;
458 IOMemoryReference * ref;
459 IOMemoryEntry * entries;
460 IOMemoryEntry * cloneEntries;
461 vm_map_t map;
462 ipc_port_t entry, cloneEntry;
463 vm_prot_t prot;
464 memory_object_size_t actualSize;
465 uint32_t rangeIdx;
466 uint32_t count;
467 mach_vm_address_t entryAddr, endAddr, entrySize;
468 mach_vm_size_t srcAddr, srcLen;
469 mach_vm_size_t nextAddr, nextLen;
470 mach_vm_size_t offset, remain;
471 vm_map_offset_t overmap_start = 0, overmap_end = 0;
472 int misaligned_start = 0, misaligned_end = 0;
473 IOByteCount physLen;
474 IOOptionBits type = (_flags & kIOMemoryTypeMask);
475 IOOptionBits cacheMode;
476 unsigned int pagerFlags;
477 vm_tag_t tag;
478 vm_named_entry_kernel_flags_t vmne_kflags;
479
480 ref = memoryReferenceAlloc(kCapacity, NULL);
481 if (!ref) {
482 return kIOReturnNoMemory;
483 }
484
485 tag = (vm_tag_t) getVMTag(kernel_map);
486 vmne_kflags = VM_NAMED_ENTRY_KERNEL_FLAGS_NONE;
487 entries = &ref->entries[0];
488 count = 0;
489 err = KERN_SUCCESS;
490
491 offset = 0;
492 rangeIdx = 0;
493 if (_task) {
494 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
495 } else {
496 nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
497 nextLen = physLen;
498
499 // default cache mode for physical
500 if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift)) {
501 IOOptionBits mode = cacheModeForPagerFlags(IODefaultCacheBits(nextAddr));
502 _flags |= (mode << kIOMemoryBufferCacheShift);
503 }
504 }
505
506 // cache mode & vm_prot
507 prot = VM_PROT_READ;
508 cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
509 prot |= vmProtForCacheMode(cacheMode);
510 // VM system requires write access to change cache mode
511 if (kIODefaultCache != cacheMode) {
512 prot |= VM_PROT_WRITE;
513 }
514 if (kIODirectionOut != (kIODirectionOutIn & _flags)) {
515 prot |= VM_PROT_WRITE;
516 }
517 if (kIOMemoryReferenceWrite & options) {
518 prot |= VM_PROT_WRITE;
519 }
520 if (kIOMemoryReferenceCOW & options) {
521 prot |= MAP_MEM_VM_COPY;
522 }
523
524 if (kIOMemoryUseReserve & _flags) {
525 prot |= MAP_MEM_GRAB_SECLUDED;
526 }
527
528 if ((kIOMemoryReferenceReuse & options) && _memRef) {
529 cloneEntries = &_memRef->entries[0];
530 prot |= MAP_MEM_NAMED_REUSE;
531 }
532
533 if (_task) {
534 // virtual ranges
535
536 if (kIOMemoryBufferPageable & _flags) {
537 int ledger_tag, ledger_no_footprint;
538
539 // IOBufferMemoryDescriptor alloc - set flags for entry + object create
540 prot |= MAP_MEM_NAMED_CREATE;
541
542 // default accounting settings:
543 // + "none" ledger tag
544 // + include in footprint
545 // can be changed later with ::setOwnership()
546 ledger_tag = VM_LEDGER_TAG_NONE;
547 ledger_no_footprint = 0;
548
549 if (kIOMemoryBufferPurgeable & _flags) {
550 prot |= (MAP_MEM_PURGABLE | MAP_MEM_PURGABLE_KERNEL_ONLY);
551 if (VM_KERN_MEMORY_SKYWALK == tag) {
552 // Skywalk purgeable memory accounting:
553 // + "network" ledger tag
554 // + not included in footprint
555 ledger_tag = VM_LEDGER_TAG_NETWORK;
556 ledger_no_footprint = 1;
557 } else {
558 // regular purgeable memory accounting:
559 // + no ledger tag
560 // + included in footprint
561 ledger_tag = VM_LEDGER_TAG_NONE;
562 ledger_no_footprint = 0;
563 }
564 }
565 vmne_kflags.vmnekf_ledger_tag = ledger_tag;
566 vmne_kflags.vmnekf_ledger_no_footprint = ledger_no_footprint;
567 if (kIOMemoryUseReserve & _flags) {
568 prot |= MAP_MEM_GRAB_SECLUDED;
569 }
570
571 prot |= VM_PROT_WRITE;
572 map = NULL;
573 } else {
574 prot |= MAP_MEM_USE_DATA_ADDR;
575 map = get_task_map(_task);
576 }
577 DEBUG4K_IOKIT("map %p _length 0x%llx prot 0x%x\n", map, (uint64_t)_length, prot);
578
579 remain = _length;
580 while (remain) {
581 srcAddr = nextAddr;
582 srcLen = nextLen;
583 nextAddr = 0;
584 nextLen = 0;
585 // coalesce addr range
586 for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++) {
587 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
588 if ((srcAddr + srcLen) != nextAddr) {
589 break;
590 }
591 srcLen += nextLen;
592 }
593
594 if (MAP_MEM_USE_DATA_ADDR & prot) {
595 entryAddr = srcAddr;
596 endAddr = srcAddr + srcLen;
597 } else {
598 entryAddr = trunc_page_64(srcAddr);
599 endAddr = round_page_64(srcAddr + srcLen);
600 }
601 if (vm_map_page_mask(get_task_map(_task)) < PAGE_MASK) {
602 DEBUG4K_IOKIT("IOMemRef %p _flags 0x%x prot 0x%x _ranges[%d]: 0x%llx 0x%llx\n", ref, (uint32_t)_flags, prot, rangeIdx - 1, srcAddr, srcLen);
603 }
604
605 do{
606 entrySize = (endAddr - entryAddr);
607 if (!entrySize) {
608 break;
609 }
610 actualSize = entrySize;
611
612 cloneEntry = MACH_PORT_NULL;
613 if (MAP_MEM_NAMED_REUSE & prot) {
614 if (cloneEntries < &_memRef->entries[_memRef->count]) {
615 cloneEntry = cloneEntries->entry;
616 } else {
617 prot &= ~MAP_MEM_NAMED_REUSE;
618 }
619 }
620
621 err = mach_make_memory_entry_internal(map,
622 &actualSize, entryAddr, prot, vmne_kflags, &entry, cloneEntry);
623
624 if (KERN_SUCCESS != err) {
625 DEBUG4K_ERROR("make_memory_entry(map %p, addr 0x%llx, size 0x%llx, prot 0x%x) err 0x%x\n", map, entryAddr, actualSize, prot, err);
626 break;
627 }
628 if (MAP_MEM_USE_DATA_ADDR & prot) {
629 if (actualSize > entrySize) {
630 actualSize = entrySize;
631 }
632 } else if (actualSize > entrySize) {
633 panic("mach_make_memory_entry_64 actualSize");
634 }
635
636 memory_entry_check_for_adjustment(map, entry, &overmap_start, &overmap_end);
637
638 if (count && overmap_start) {
639 /*
640 * Track misaligned start for all
641 * except the first entry.
642 */
643 misaligned_start++;
644 }
645
646 if (overmap_end) {
647 /*
648 * Ignore misaligned end for the
649 * last entry.
650 */
651 if ((entryAddr + actualSize) != endAddr) {
652 misaligned_end++;
653 }
654 }
655
656 if (count) {
657 /* Middle entries */
658 if (misaligned_start || misaligned_end) {
659 DEBUG4K_IOKIT("stopped at entryAddr 0x%llx\n", entryAddr);
660 ipc_port_release_send(entry);
661 err = KERN_NOT_SUPPORTED;
662 break;
663 }
664 }
665
666 if (count >= ref->capacity) {
667 ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref);
668 entries = &ref->entries[count];
669 }
670 entries->entry = entry;
671 entries->size = actualSize;
672 entries->offset = offset + (entryAddr - srcAddr);
673 entries->start = entryAddr;
674 entryAddr += actualSize;
675 if (MAP_MEM_NAMED_REUSE & prot) {
676 if ((cloneEntries->entry == entries->entry)
677 && (cloneEntries->size == entries->size)
678 && (cloneEntries->offset == entries->offset)) {
679 cloneEntries++;
680 } else {
681 prot &= ~MAP_MEM_NAMED_REUSE;
682 }
683 }
684 entries++;
685 count++;
686 }while (true);
687 offset += srcLen;
688 remain -= srcLen;
689 }
690 } else {
691 // _task == 0, physical or kIOMemoryTypeUPL
692 memory_object_t pager;
693 vm_size_t size = ptoa_64(_pages);
694
695 if (!getKernelReserved()) {
696 panic("getKernelReserved");
697 }
698
699 reserved->dp.pagerContig = (1 == _rangesCount);
700 reserved->dp.memory = this;
701
702 pagerFlags = pagerFlagsForCacheMode(cacheMode);
703 if (-1U == pagerFlags) {
704 panic("phys is kIODefaultCache");
705 }
706 if (reserved->dp.pagerContig) {
707 pagerFlags |= DEVICE_PAGER_CONTIGUOUS;
708 }
709
710 pager = device_pager_setup((memory_object_t) NULL, (uintptr_t) reserved,
711 size, pagerFlags);
712 assert(pager);
713 if (!pager) {
714 DEBUG4K_ERROR("pager setup failed size 0x%llx flags 0x%x\n", (uint64_t)size, pagerFlags);
715 err = kIOReturnVMError;
716 } else {
717 srcAddr = nextAddr;
718 entryAddr = trunc_page_64(srcAddr);
719 err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/,
720 size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry);
721 assert(KERN_SUCCESS == err);
722 if (KERN_SUCCESS != err) {
723 device_pager_deallocate(pager);
724 } else {
725 reserved->dp.devicePager = pager;
726 entries->entry = entry;
727 entries->size = size;
728 entries->offset = offset + (entryAddr - srcAddr);
729 entries++;
730 count++;
731 }
732 }
733 }
734
735 ref->count = count;
736 ref->prot = prot;
737
738 if (_task && (KERN_SUCCESS == err)
739 && (kIOMemoryMapCopyOnWrite & _flags)
740 && !(kIOMemoryReferenceCOW & options)) {
741 err = memoryReferenceCreate(options | kIOMemoryReferenceCOW, &ref->mapRef);
742 if (KERN_SUCCESS != err) {
743 DEBUG4K_ERROR("ref %p options 0x%x err 0x%x\n", ref, (unsigned int)options, err);
744 }
745 }
746
747 if (KERN_SUCCESS == err) {
748 if (MAP_MEM_NAMED_REUSE & prot) {
749 memoryReferenceFree(ref);
750 OSIncrementAtomic(&_memRef->refCount);
751 ref = _memRef;
752 }
753 } else {
754 DEBUG4K_ERROR("ref %p err 0x%x\n", ref, err);
755 memoryReferenceFree(ref);
756 ref = NULL;
757 }
758
759 *reference = ref;
760
761 return err;
762}
763
764kern_return_t
765IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
766{
767 IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref;
768 IOReturn err;
769 vm_map_offset_t addr;
770
771 addr = ref->mapped;
772
773 err = vm_map_enter_mem_object(map, &addr, ref->size,
774#if __ARM_MIXED_PAGE_SIZE__
775 // TODO4K this should not be necessary...
776 (vm_map_offset_t)((ref->options & kIOMapAnywhere) ? max(PAGE_MASK, vm_map_page_mask(map)) : 0),
777#else /* __ARM_MIXED_PAGE_SIZE__ */
778 (vm_map_offset_t) 0,
779#endif /* __ARM_MIXED_PAGE_SIZE__ */
780 (((ref->options & kIOMapAnywhere)
781 ? VM_FLAGS_ANYWHERE
782 : VM_FLAGS_FIXED)),
783 VM_MAP_KERNEL_FLAGS_NONE,
784 ref->tag,
785 IPC_PORT_NULL,
786 (memory_object_offset_t) 0,
787 false, /* copy */
788 ref->prot,
789 ref->prot,
790 VM_INHERIT_NONE);
791 if (KERN_SUCCESS == err) {
792 ref->mapped = (mach_vm_address_t) addr;
793 ref->map = map;
794 }
795
796 return err;
797}
798
799IOReturn
800IOGeneralMemoryDescriptor::memoryReferenceMap(
801 IOMemoryReference * ref,
802 vm_map_t map,
803 mach_vm_size_t inoffset,
804 mach_vm_size_t size,
805 IOOptionBits options,
806 mach_vm_address_t * inaddr)
807{
808 IOReturn err;
809 int64_t offset = inoffset;
810 uint32_t rangeIdx, entryIdx;
811 vm_map_offset_t addr, mapAddr;
812 vm_map_offset_t pageOffset, entryOffset, remain, chunk;
813
814 mach_vm_address_t nextAddr;
815 mach_vm_size_t nextLen;
816 IOByteCount physLen;
817 IOMemoryEntry * entry;
818 vm_prot_t prot, memEntryCacheMode;
819 IOOptionBits type;
820 IOOptionBits cacheMode;
821 vm_tag_t tag;
822 // for the kIOMapPrefault option.
823 upl_page_info_t * pageList = NULL;
824 UInt currentPageIndex = 0;
825 bool didAlloc;
826
827 DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr);
828
829 if (ref->mapRef) {
830 err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
831 return err;
832 }
833
834 if (MAP_MEM_USE_DATA_ADDR & ref->prot) {
835 err = memoryReferenceMapNew(ref, map, inoffset, size, options, inaddr);
836 return err;
837 }
838
839 type = _flags & kIOMemoryTypeMask;
840
841 prot = VM_PROT_READ;
842 if (!(kIOMapReadOnly & options)) {
843 prot |= VM_PROT_WRITE;
844 }
845 prot &= ref->prot;
846
847 cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
848 if (kIODefaultCache != cacheMode) {
849 // VM system requires write access to update named entry cache mode
850 memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
851 }
852
853 tag = (typeof(tag))getVMTag(map);
854
855 if (_task) {
856 // Find first range for offset
857 if (!_rangesCount) {
858 return kIOReturnBadArgument;
859 }
860 for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++) {
861 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
862 if (remain < nextLen) {
863 break;
864 }
865 remain -= nextLen;
866 }
867 } else {
868 rangeIdx = 0;
869 remain = 0;
870 nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
871 nextLen = size;
872 }
873
874 assert(remain < nextLen);
875 if (remain >= nextLen) {
876 DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx remain 0x%llx nextLen 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)remain, nextLen);
877 return kIOReturnBadArgument;
878 }
879
880 nextAddr += remain;
881 nextLen -= remain;
882#if __ARM_MIXED_PAGE_SIZE__
883 pageOffset = (vm_map_page_mask(map) & nextAddr);
884#else /* __ARM_MIXED_PAGE_SIZE__ */
885 pageOffset = (page_mask & nextAddr);
886#endif /* __ARM_MIXED_PAGE_SIZE__ */
887 addr = 0;
888 didAlloc = false;
889
890 if (!(options & kIOMapAnywhere)) {
891 addr = *inaddr;
892 if (pageOffset != (vm_map_page_mask(map) & addr)) {
893 DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx addr 0x%llx page_mask 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)addr, (uint64_t)page_mask, (uint64_t)pageOffset);
894 }
895 addr -= pageOffset;
896 }
897
898 // find first entry for offset
899 for (entryIdx = 0;
900 (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset);
901 entryIdx++) {
902 }
903 entryIdx--;
904 entry = &ref->entries[entryIdx];
905
906 // allocate VM
907 size = round_page_64(size + pageOffset);
908 if (kIOMapOverwrite & options) {
909 if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
910 map = IOPageableMapForAddress(addr);
911 }
912 err = KERN_SUCCESS;
913 } else {
914 IOMemoryDescriptorMapAllocRef ref;
915 ref.map = map;
916 ref.tag = tag;
917 ref.options = options;
918 ref.size = size;
919 ref.prot = prot;
920 if (options & kIOMapAnywhere) {
921 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
922 ref.mapped = 0;
923 } else {
924 ref.mapped = addr;
925 }
926 if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
927 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
928 } else {
929 err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
930 }
931 if (KERN_SUCCESS == err) {
932 addr = ref.mapped;
933 map = ref.map;
934 didAlloc = true;
935 }
936 }
937
938 /*
939 * If the memory is associated with a device pager but doesn't have a UPL,
940 * it will be immediately faulted in through the pager via populateDevicePager().
941 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
942 * operations.
943 */
944 if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
945 options &= ~kIOMapPrefault;
946 }
947
948 /*
949 * Prefaulting is only possible if we wired the memory earlier. Check the
950 * memory type, and the underlying data.
951 */
952 if (options & kIOMapPrefault) {
953 /*
954 * The memory must have been wired by calling ::prepare(), otherwise
955 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
956 */
957 assert(_wireCount != 0);
958 assert(_memoryEntries != NULL);
959 if ((_wireCount == 0) ||
960 (_memoryEntries == NULL)) {
961 DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr);
962 return kIOReturnBadArgument;
963 }
964
965 // Get the page list.
966 ioGMDData* dataP = getDataP(_memoryEntries);
967 ioPLBlock const* ioplList = getIOPLList(dataP);
968 pageList = getPageList(dataP);
969
970 // Get the number of IOPLs.
971 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
972
973 /*
974 * Scan through the IOPL Info Blocks, looking for the first block containing
975 * the offset. The research will go past it, so we'll need to go back to the
976 * right range at the end.
977 */
978 UInt ioplIndex = 0;
979 while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
980 ioplIndex++;
981 }
982 ioplIndex--;
983
984 // Retrieve the IOPL info block.
985 ioPLBlock ioplInfo = ioplList[ioplIndex];
986
987 /*
988 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
989 * array.
990 */
991 if (ioplInfo.fFlags & kIOPLExternUPL) {
992 pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
993 } else {
994 pageList = &pageList[ioplInfo.fPageInfo];
995 }
996
997 // Rebase [offset] into the IOPL in order to looks for the first page index.
998 mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
999
1000 // Retrieve the index of the first page corresponding to the offset.
1001 currentPageIndex = atop_32(offsetInIOPL);
1002 }
1003
1004 // enter mappings
1005 remain = size;
1006 mapAddr = addr;
1007 addr += pageOffset;
1008
1009 while (remain && (KERN_SUCCESS == err)) {
1010 entryOffset = offset - entry->offset;
1011 if ((min(vm_map_page_mask(map), page_mask) & entryOffset) != pageOffset) {
1012 err = kIOReturnNotAligned;
1013 DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryOffset 0x%llx pageOffset 0x%llx\n", map, inoffset, size, (uint32_t)options, *inaddr, (uint64_t)entryOffset, (uint64_t)pageOffset);
1014 break;
1015 }
1016
1017 if (kIODefaultCache != cacheMode) {
1018 vm_size_t unused = 0;
1019 err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
1020 memEntryCacheMode, NULL, entry->entry);
1021 assert(KERN_SUCCESS == err);
1022 }
1023
1024 entryOffset -= pageOffset;
1025 if (entryOffset >= entry->size) {
1026 panic("entryOffset");
1027 }
1028 chunk = entry->size - entryOffset;
1029 if (chunk) {
1030 vm_map_kernel_flags_t vmk_flags;
1031
1032 vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
1033 vmk_flags.vmkf_iokit_acct = TRUE; /* iokit accounting */
1034
1035 if (chunk > remain) {
1036 chunk = remain;
1037 }
1038 if (options & kIOMapPrefault) {
1039 UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE;
1040
1041 err = vm_map_enter_mem_object_prefault(map,
1042 &mapAddr,
1043 chunk, 0 /* mask */,
1044 (VM_FLAGS_FIXED
1045 | VM_FLAGS_OVERWRITE),
1046 vmk_flags,
1047 tag,
1048 entry->entry,
1049 entryOffset,
1050 prot, // cur
1051 prot, // max
1052 &pageList[currentPageIndex],
1053 nb_pages);
1054
1055 if (err || vm_map_page_mask(map) < PAGE_MASK) {
1056 DEBUG4K_IOKIT("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err);
1057 }
1058 // Compute the next index in the page list.
1059 currentPageIndex += nb_pages;
1060 assert(currentPageIndex <= _pages);
1061 } else {
1062 err = vm_map_enter_mem_object(map,
1063 &mapAddr,
1064 chunk, 0 /* mask */,
1065 (VM_FLAGS_FIXED
1066 | VM_FLAGS_OVERWRITE),
1067 vmk_flags,
1068 tag,
1069 entry->entry,
1070 entryOffset,
1071 false, // copy
1072 prot, // cur
1073 prot, // max
1074 VM_INHERIT_NONE);
1075 }
1076 if (KERN_SUCCESS != err) {
1077 DEBUG4K_ERROR("IOMemRef %p mapped in map %p (pgshift %d) at 0x%llx size 0x%llx err 0x%x\n", ref, map, vm_map_page_shift(map), (uint64_t)mapAddr, (uint64_t)chunk, err);
1078 break;
1079 }
1080 remain -= chunk;
1081 if (!remain) {
1082 break;
1083 }
1084 mapAddr += chunk;
1085 offset += chunk - pageOffset;
1086 }
1087 pageOffset = 0;
1088 entry++;
1089 entryIdx++;
1090 if (entryIdx >= ref->count) {
1091 err = kIOReturnOverrun;
1092 DEBUG4K_ERROR("map %p inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx entryIdx %d ref->count %d\n", map, inoffset, size, (uint32_t)options, *inaddr, entryIdx, ref->count);
1093 break;
1094 }
1095 }
1096
1097 if ((KERN_SUCCESS != err) && didAlloc) {
1098 (void) mach_vm_deallocate(map, trunc_page_64(addr), size);
1099 addr = 0;
1100 }
1101 *inaddr = addr;
1102
1103 if (err /* || vm_map_page_mask(map) < PAGE_MASK */) {
1104 DEBUG4K_ERROR("map %p (%d) inoffset 0x%llx size 0x%llx options 0x%x inaddr 0x%llx err 0x%x\n", map, vm_map_page_shift(map), inoffset, size, (uint32_t)options, *inaddr, err);
1105 }
1106 return err;
1107}
1108
1109#define LOGUNALIGN 0
1110IOReturn
1111IOGeneralMemoryDescriptor::memoryReferenceMapNew(
1112 IOMemoryReference * ref,
1113 vm_map_t map,
1114 mach_vm_size_t inoffset,
1115 mach_vm_size_t size,
1116 IOOptionBits options,
1117 mach_vm_address_t * inaddr)
1118{
1119 IOReturn err;
1120 int64_t offset = inoffset;
1121 uint32_t entryIdx, firstEntryIdx;
1122 vm_map_offset_t addr, mapAddr, mapAddrOut;
1123 vm_map_offset_t entryOffset, remain, chunk;
1124
1125 IOMemoryEntry * entry;
1126 vm_prot_t prot, memEntryCacheMode;
1127 IOOptionBits type;
1128 IOOptionBits cacheMode;
1129 vm_tag_t tag;
1130 // for the kIOMapPrefault option.
1131 upl_page_info_t * pageList = NULL;
1132 UInt currentPageIndex = 0;
1133 bool didAlloc;
1134
1135 DEBUG4K_IOKIT("ref %p map %p inoffset 0x%llx size 0x%llx options 0x%x *inaddr 0x%llx\n", ref, map, inoffset, size, (uint32_t)options, *inaddr);
1136
1137 if (ref->mapRef) {
1138 err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
1139 return err;
1140 }
1141
1142#if LOGUNALIGN
1143 printf("MAP offset %qx, %qx\n", inoffset, size);
1144#endif
1145
1146 type = _flags & kIOMemoryTypeMask;
1147
1148 prot = VM_PROT_READ;
1149 if (!(kIOMapReadOnly & options)) {
1150 prot |= VM_PROT_WRITE;
1151 }
1152 prot &= ref->prot;
1153
1154 cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
1155 if (kIODefaultCache != cacheMode) {
1156 // VM system requires write access to update named entry cache mode
1157 memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
1158 }
1159
1160 tag = (vm_tag_t) getVMTag(map);
1161
1162 addr = 0;
1163 didAlloc = false;
1164
1165 if (!(options & kIOMapAnywhere)) {
1166 addr = *inaddr;
1167 }
1168
1169 // find first entry for offset
1170 for (firstEntryIdx = 0;
1171 (firstEntryIdx < ref->count) && (offset >= ref->entries[firstEntryIdx].offset);
1172 firstEntryIdx++) {
1173 }
1174 firstEntryIdx--;
1175
1176 // calculate required VM space
1177
1178 entryIdx = firstEntryIdx;
1179 entry = &ref->entries[entryIdx];
1180
1181 remain = size;
1182 int64_t iteroffset = offset;
1183 uint64_t mapSize = 0;
1184 while (remain) {
1185 entryOffset = iteroffset - entry->offset;
1186 if (entryOffset >= entry->size) {
1187 panic("entryOffset");
1188 }
1189
1190#if LOGUNALIGN
1191 printf("[%d] size %qx offset %qx start %qx iter %qx\n",
1192 entryIdx, entry->size, entry->offset, entry->start, iteroffset);
1193#endif
1194
1195 chunk = entry->size - entryOffset;
1196 if (chunk) {
1197 if (chunk > remain) {
1198 chunk = remain;
1199 }
1200 mach_vm_size_t entrySize;
1201 err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize);
1202 assert(KERN_SUCCESS == err);
1203 mapSize += entrySize;
1204
1205 remain -= chunk;
1206 if (!remain) {
1207 break;
1208 }
1209 iteroffset += chunk; // - pageOffset;
1210 }
1211 entry++;
1212 entryIdx++;
1213 if (entryIdx >= ref->count) {
1214 panic("overrun");
1215 err = kIOReturnOverrun;
1216 break;
1217 }
1218 }
1219
1220 if (kIOMapOverwrite & options) {
1221 if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1222 map = IOPageableMapForAddress(addr);
1223 }
1224 err = KERN_SUCCESS;
1225 } else {
1226 IOMemoryDescriptorMapAllocRef ref;
1227 ref.map = map;
1228 ref.tag = tag;
1229 ref.options = options;
1230 ref.size = mapSize;
1231 ref.prot = prot;
1232 if (options & kIOMapAnywhere) {
1233 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1234 ref.mapped = 0;
1235 } else {
1236 ref.mapped = addr;
1237 }
1238 if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
1239 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
1240 } else {
1241 err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
1242 }
1243
1244 if (KERN_SUCCESS == err) {
1245 addr = ref.mapped;
1246 map = ref.map;
1247 didAlloc = true;
1248 }
1249#if LOGUNALIGN
1250 IOLog("map err %x size %qx addr %qx\n", err, mapSize, addr);
1251#endif
1252 }
1253
1254 /*
1255 * If the memory is associated with a device pager but doesn't have a UPL,
1256 * it will be immediately faulted in through the pager via populateDevicePager().
1257 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
1258 * operations.
1259 */
1260 if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
1261 options &= ~kIOMapPrefault;
1262 }
1263
1264 /*
1265 * Prefaulting is only possible if we wired the memory earlier. Check the
1266 * memory type, and the underlying data.
1267 */
1268 if (options & kIOMapPrefault) {
1269 /*
1270 * The memory must have been wired by calling ::prepare(), otherwise
1271 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
1272 */
1273 assert(_wireCount != 0);
1274 assert(_memoryEntries != NULL);
1275 if ((_wireCount == 0) ||
1276 (_memoryEntries == NULL)) {
1277 return kIOReturnBadArgument;
1278 }
1279
1280 // Get the page list.
1281 ioGMDData* dataP = getDataP(_memoryEntries);
1282 ioPLBlock const* ioplList = getIOPLList(dataP);
1283 pageList = getPageList(dataP);
1284
1285 // Get the number of IOPLs.
1286 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1287
1288 /*
1289 * Scan through the IOPL Info Blocks, looking for the first block containing
1290 * the offset. The research will go past it, so we'll need to go back to the
1291 * right range at the end.
1292 */
1293 UInt ioplIndex = 0;
1294 while ((ioplIndex < numIOPLs) && (((uint64_t) offset) >= ioplList[ioplIndex].fIOMDOffset)) {
1295 ioplIndex++;
1296 }
1297 ioplIndex--;
1298
1299 // Retrieve the IOPL info block.
1300 ioPLBlock ioplInfo = ioplList[ioplIndex];
1301
1302 /*
1303 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
1304 * array.
1305 */
1306 if (ioplInfo.fFlags & kIOPLExternUPL) {
1307 pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
1308 } else {
1309 pageList = &pageList[ioplInfo.fPageInfo];
1310 }
1311
1312 // Rebase [offset] into the IOPL in order to looks for the first page index.
1313 mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
1314
1315 // Retrieve the index of the first page corresponding to the offset.
1316 currentPageIndex = atop_32(offsetInIOPL);
1317 }
1318
1319 // enter mappings
1320 remain = size;
1321 mapAddr = addr;
1322 entryIdx = firstEntryIdx;
1323 entry = &ref->entries[entryIdx];
1324
1325 while (remain && (KERN_SUCCESS == err)) {
1326#if LOGUNALIGN
1327 printf("offset %qx, %qx\n", offset, entry->offset);
1328#endif
1329 if (kIODefaultCache != cacheMode) {
1330 vm_size_t unused = 0;
1331 err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
1332 memEntryCacheMode, NULL, entry->entry);
1333 assert(KERN_SUCCESS == err);
1334 }
1335 entryOffset = offset - entry->offset;
1336 if (entryOffset >= entry->size) {
1337 panic("entryOffset");
1338 }
1339 chunk = entry->size - entryOffset;
1340#if LOGUNALIGN
1341 printf("entryIdx %d, chunk %qx\n", entryIdx, chunk);
1342#endif
1343 if (chunk) {
1344 vm_map_kernel_flags_t vmk_flags;
1345
1346 vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
1347 vmk_flags.vmkf_iokit_acct = TRUE; /* iokit accounting */
1348
1349 if (chunk > remain) {
1350 chunk = remain;
1351 }
1352 mapAddrOut = mapAddr;
1353 if (options & kIOMapPrefault) {
1354 UInt nb_pages = (typeof(nb_pages))round_page(chunk) / PAGE_SIZE;
1355
1356 err = vm_map_enter_mem_object_prefault(map,
1357 &mapAddrOut,
1358 chunk, 0 /* mask */,
1359 (VM_FLAGS_FIXED
1360 | VM_FLAGS_OVERWRITE
1361 | VM_FLAGS_RETURN_DATA_ADDR),
1362 vmk_flags,
1363 tag,
1364 entry->entry,
1365 entryOffset,
1366 prot, // cur
1367 prot, // max
1368 &pageList[currentPageIndex],
1369 nb_pages);
1370
1371 // Compute the next index in the page list.
1372 currentPageIndex += nb_pages;
1373 assert(currentPageIndex <= _pages);
1374 } else {
1375#if LOGUNALIGN
1376 printf("mapAddr i %qx chunk %qx\n", mapAddr, chunk);
1377#endif
1378 err = vm_map_enter_mem_object(map,
1379 &mapAddrOut,
1380 chunk, 0 /* mask */,
1381 (VM_FLAGS_FIXED
1382 | VM_FLAGS_OVERWRITE
1383 | VM_FLAGS_RETURN_DATA_ADDR),
1384 vmk_flags,
1385 tag,
1386 entry->entry,
1387 entryOffset,
1388 false, // copy
1389 prot, // cur
1390 prot, // max
1391 VM_INHERIT_NONE);
1392 }
1393 if (KERN_SUCCESS != err) {
1394 panic("map enter err %x", err);
1395 break;
1396 }
1397#if LOGUNALIGN
1398 printf("mapAddr o %qx\n", mapAddrOut);
1399#endif
1400 if (entryIdx == firstEntryIdx) {
1401 addr = mapAddrOut;
1402 }
1403 remain -= chunk;
1404 if (!remain) {
1405 break;
1406 }
1407 mach_vm_size_t entrySize;
1408 err = mach_memory_entry_map_size(entry->entry, map, entryOffset, chunk, &entrySize);
1409 assert(KERN_SUCCESS == err);
1410 mapAddr += entrySize;
1411 offset += chunk;
1412 }
1413
1414 entry++;
1415 entryIdx++;
1416 if (entryIdx >= ref->count) {
1417 err = kIOReturnOverrun;
1418 break;
1419 }
1420 }
1421
1422 if (KERN_SUCCESS != err) {
1423 DEBUG4K_ERROR("size 0x%llx err 0x%x\n", size, err);
1424 }
1425
1426 if ((KERN_SUCCESS != err) && didAlloc) {
1427 (void) mach_vm_deallocate(map, trunc_page_64(addr), size);
1428 addr = 0;
1429 }
1430 *inaddr = addr;
1431
1432 return err;
1433}
1434
1435uint64_t
1436IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(
1437 IOMemoryReference * ref,
1438 uint64_t * offset)
1439{
1440 kern_return_t kr;
1441 vm_object_offset_t data_offset = 0;
1442 uint64_t total;
1443 uint32_t idx;
1444
1445 assert(ref->count);
1446 if (offset) {
1447 *offset = (uint64_t) data_offset;
1448 }
1449 total = 0;
1450 for (idx = 0; idx < ref->count; idx++) {
1451 kr = mach_memory_entry_phys_page_offset(ref->entries[idx].entry,
1452 &data_offset);
1453 if (KERN_SUCCESS != kr) {
1454 DEBUG4K_ERROR("ref %p entry %p kr 0x%x\n", ref, ref->entries[idx].entry, kr);
1455 } else if (0 != data_offset) {
1456 DEBUG4K_IOKIT("ref %p entry %p offset 0x%llx kr 0x%x\n", ref, ref->entries[0].entry, data_offset, kr);
1457 }
1458 if (offset && !idx) {
1459 *offset = (uint64_t) data_offset;
1460 }
1461 total += round_page(data_offset + ref->entries[idx].size);
1462 }
1463
1464 DEBUG4K_IOKIT("ref %p offset 0x%llx total 0x%llx\n", ref,
1465 (offset ? *offset : (vm_object_offset_t)-1), total);
1466
1467 return total;
1468}
1469
1470
1471IOReturn
1472IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
1473 IOMemoryReference * ref,
1474 IOByteCount * residentPageCount,
1475 IOByteCount * dirtyPageCount)
1476{
1477 IOReturn err;
1478 IOMemoryEntry * entries;
1479 unsigned int resident, dirty;
1480 unsigned int totalResident, totalDirty;
1481
1482 totalResident = totalDirty = 0;
1483 err = kIOReturnSuccess;
1484 entries = ref->entries + ref->count;
1485 while (entries > &ref->entries[0]) {
1486 entries--;
1487 err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty);
1488 if (KERN_SUCCESS != err) {
1489 break;
1490 }
1491 totalResident += resident;
1492 totalDirty += dirty;
1493 }
1494
1495 if (residentPageCount) {
1496 *residentPageCount = totalResident;
1497 }
1498 if (dirtyPageCount) {
1499 *dirtyPageCount = totalDirty;
1500 }
1501 return err;
1502}
1503
1504IOReturn
1505IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
1506 IOMemoryReference * ref,
1507 IOOptionBits newState,
1508 IOOptionBits * oldState)
1509{
1510 IOReturn err;
1511 IOMemoryEntry * entries;
1512 vm_purgable_t control;
1513 int totalState, state;
1514
1515 totalState = kIOMemoryPurgeableNonVolatile;
1516 err = kIOReturnSuccess;
1517 entries = ref->entries + ref->count;
1518 while (entries > &ref->entries[0]) {
1519 entries--;
1520
1521 err = purgeableControlBits(newState, &control, &state);
1522 if (KERN_SUCCESS != err) {
1523 break;
1524 }
1525 err = memory_entry_purgeable_control_internal(entries->entry, control, &state);
1526 if (KERN_SUCCESS != err) {
1527 break;
1528 }
1529 err = purgeableStateBits(&state);
1530 if (KERN_SUCCESS != err) {
1531 break;
1532 }
1533
1534 if (kIOMemoryPurgeableEmpty == state) {
1535 totalState = kIOMemoryPurgeableEmpty;
1536 } else if (kIOMemoryPurgeableEmpty == totalState) {
1537 continue;
1538 } else if (kIOMemoryPurgeableVolatile == totalState) {
1539 continue;
1540 } else if (kIOMemoryPurgeableVolatile == state) {
1541 totalState = kIOMemoryPurgeableVolatile;
1542 } else {
1543 totalState = kIOMemoryPurgeableNonVolatile;
1544 }
1545 }
1546
1547 if (oldState) {
1548 *oldState = totalState;
1549 }
1550 return err;
1551}
1552
1553IOReturn
1554IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(
1555 IOMemoryReference * ref,
1556 task_t newOwner,
1557 int newLedgerTag,
1558 IOOptionBits newLedgerOptions)
1559{
1560 IOReturn err, totalErr;
1561 IOMemoryEntry * entries;
1562
1563 totalErr = kIOReturnSuccess;
1564 entries = ref->entries + ref->count;
1565 while (entries > &ref->entries[0]) {
1566 entries--;
1567
1568 err = mach_memory_entry_ownership(entries->entry, newOwner, newLedgerTag, newLedgerOptions);
1569 if (KERN_SUCCESS != err) {
1570 totalErr = err;
1571 }
1572 }
1573
1574 return totalErr;
1575}
1576
1577/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1578
1579OSSharedPtr<IOMemoryDescriptor>
1580IOMemoryDescriptor::withAddress(void * address,
1581 IOByteCount length,
1582 IODirection direction)
1583{
1584 return IOMemoryDescriptor::
1585 withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
1586}
1587
1588#ifndef __LP64__
1589OSSharedPtr<IOMemoryDescriptor>
1590IOMemoryDescriptor::withAddress(IOVirtualAddress address,
1591 IOByteCount length,
1592 IODirection direction,
1593 task_t task)
1594{
1595 OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1596 if (that) {
1597 if (that->initWithAddress(address, length, direction, task)) {
1598 return os::move(that);
1599 }
1600 }
1601 return nullptr;
1602}
1603#endif /* !__LP64__ */
1604
1605OSSharedPtr<IOMemoryDescriptor>
1606IOMemoryDescriptor::withPhysicalAddress(
1607 IOPhysicalAddress address,
1608 IOByteCount length,
1609 IODirection direction )
1610{
1611 return IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL);
1612}
1613
1614#ifndef __LP64__
1615OSSharedPtr<IOMemoryDescriptor>
1616IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
1617 UInt32 withCount,
1618 IODirection direction,
1619 task_t task,
1620 bool asReference)
1621{
1622 OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1623 if (that) {
1624 if (that->initWithRanges(ranges, withCount, direction, task, asReference)) {
1625 return os::move(that);
1626 }
1627 }
1628 return nullptr;
1629}
1630#endif /* !__LP64__ */
1631
1632OSSharedPtr<IOMemoryDescriptor>
1633IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
1634 mach_vm_size_t length,
1635 IOOptionBits options,
1636 task_t task)
1637{
1638 IOAddressRange range = { address, length };
1639 return IOMemoryDescriptor::withAddressRanges(&range, 1, options, task);
1640}
1641
1642OSSharedPtr<IOMemoryDescriptor>
1643IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
1644 UInt32 rangeCount,
1645 IOOptionBits options,
1646 task_t task)
1647{
1648 OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1649 if (that) {
1650 if (task) {
1651 options |= kIOMemoryTypeVirtual64;
1652 } else {
1653 options |= kIOMemoryTypePhysical64;
1654 }
1655
1656 if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ NULL)) {
1657 return os::move(that);
1658 }
1659 }
1660
1661 return nullptr;
1662}
1663
1664
1665/*
1666 * withOptions:
1667 *
1668 * Create a new IOMemoryDescriptor. The buffer is made up of several
1669 * virtual address ranges, from a given task.
1670 *
1671 * Passing the ranges as a reference will avoid an extra allocation.
1672 */
1673OSSharedPtr<IOMemoryDescriptor>
1674IOMemoryDescriptor::withOptions(void * buffers,
1675 UInt32 count,
1676 UInt32 offset,
1677 task_t task,
1678 IOOptionBits opts,
1679 IOMapper * mapper)
1680{
1681 OSSharedPtr<IOGeneralMemoryDescriptor> self = OSMakeShared<IOGeneralMemoryDescriptor>();
1682
1683 if (self
1684 && !self->initWithOptions(buffers, count, offset, task, opts, mapper)) {
1685 return nullptr;
1686 }
1687
1688 return os::move(self);
1689}
1690
1691bool
1692IOMemoryDescriptor::initWithOptions(void * buffers,
1693 UInt32 count,
1694 UInt32 offset,
1695 task_t task,
1696 IOOptionBits options,
1697 IOMapper * mapper)
1698{
1699 return false;
1700}
1701
1702#ifndef __LP64__
1703OSSharedPtr<IOMemoryDescriptor>
1704IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
1705 UInt32 withCount,
1706 IODirection direction,
1707 bool asReference)
1708{
1709 OSSharedPtr<IOGeneralMemoryDescriptor> that = OSMakeShared<IOGeneralMemoryDescriptor>();
1710 if (that) {
1711 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference)) {
1712 return os::move(that);
1713 }
1714 }
1715 return nullptr;
1716}
1717
1718OSSharedPtr<IOMemoryDescriptor>
1719IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
1720 IOByteCount offset,
1721 IOByteCount length,
1722 IODirection direction)
1723{
1724 return IOSubMemoryDescriptor::withSubRange(of, offset, length, direction);
1725}
1726#endif /* !__LP64__ */
1727
1728OSSharedPtr<IOMemoryDescriptor>
1729IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
1730{
1731 IOGeneralMemoryDescriptor *origGenMD =
1732 OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
1733
1734 if (origGenMD) {
1735 return IOGeneralMemoryDescriptor::
1736 withPersistentMemoryDescriptor(origGenMD);
1737 } else {
1738 return nullptr;
1739 }
1740}
1741
1742OSSharedPtr<IOMemoryDescriptor>
1743IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
1744{
1745 IOMemoryReference * memRef;
1746 OSSharedPtr<IOGeneralMemoryDescriptor> self;
1747
1748 if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) {
1749 return nullptr;
1750 }
1751
1752 if (memRef == originalMD->_memRef) {
1753 self.reset(originalMD, OSRetain);
1754 originalMD->memoryReferenceRelease(memRef);
1755 return os::move(self);
1756 }
1757
1758 self = OSMakeShared<IOGeneralMemoryDescriptor>();
1759 IOMDPersistentInitData initData = { originalMD, memRef };
1760
1761 if (self
1762 && !self->initWithOptions(&initData, 1, 0, NULL, kIOMemoryTypePersistentMD, NULL)) {
1763 return nullptr;
1764 }
1765 return os::move(self);
1766}
1767
1768#ifndef __LP64__
1769bool
1770IOGeneralMemoryDescriptor::initWithAddress(void * address,
1771 IOByteCount withLength,
1772 IODirection withDirection)
1773{
1774 _singleRange.v.address = (vm_offset_t) address;
1775 _singleRange.v.length = withLength;
1776
1777 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
1778}
1779
1780bool
1781IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
1782 IOByteCount withLength,
1783 IODirection withDirection,
1784 task_t withTask)
1785{
1786 _singleRange.v.address = address;
1787 _singleRange.v.length = withLength;
1788
1789 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
1790}
1791
1792bool
1793IOGeneralMemoryDescriptor::initWithPhysicalAddress(
1794 IOPhysicalAddress address,
1795 IOByteCount withLength,
1796 IODirection withDirection )
1797{
1798 _singleRange.p.address = address;
1799 _singleRange.p.length = withLength;
1800
1801 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
1802}
1803
1804bool
1805IOGeneralMemoryDescriptor::initWithPhysicalRanges(
1806 IOPhysicalRange * ranges,
1807 UInt32 count,
1808 IODirection direction,
1809 bool reference)
1810{
1811 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
1812
1813 if (reference) {
1814 mdOpts |= kIOMemoryAsReference;
1815 }
1816
1817 return initWithOptions(ranges, count, 0, NULL, mdOpts, /* mapper */ NULL);
1818}
1819
1820bool
1821IOGeneralMemoryDescriptor::initWithRanges(
1822 IOVirtualRange * ranges,
1823 UInt32 count,
1824 IODirection direction,
1825 task_t task,
1826 bool reference)
1827{
1828 IOOptionBits mdOpts = direction;
1829
1830 if (reference) {
1831 mdOpts |= kIOMemoryAsReference;
1832 }
1833
1834 if (task) {
1835 mdOpts |= kIOMemoryTypeVirtual;
1836
1837 // Auto-prepare if this is a kernel memory descriptor as very few
1838 // clients bother to prepare() kernel memory.
1839 // But it was not enforced so what are you going to do?
1840 if (task == kernel_task) {
1841 mdOpts |= kIOMemoryAutoPrepare;
1842 }
1843 } else {
1844 mdOpts |= kIOMemoryTypePhysical;
1845 }
1846
1847 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ NULL);
1848}
1849#endif /* !__LP64__ */
1850
1851/*
1852 * initWithOptions:
1853 *
1854 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
1855 * from a given task, several physical ranges, an UPL from the ubc
1856 * system or a uio (may be 64bit) from the BSD subsystem.
1857 *
1858 * Passing the ranges as a reference will avoid an extra allocation.
1859 *
1860 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
1861 * existing instance -- note this behavior is not commonly supported in other
1862 * I/O Kit classes, although it is supported here.
1863 */
1864
1865bool
1866IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
1867 UInt32 count,
1868 UInt32 offset,
1869 task_t task,
1870 IOOptionBits options,
1871 IOMapper * mapper)
1872{
1873 IOOptionBits type = options & kIOMemoryTypeMask;
1874
1875#ifndef __LP64__
1876 if (task
1877 && (kIOMemoryTypeVirtual == type)
1878 && vm_map_is_64bit(get_task_map(task))
1879 && ((IOVirtualRange *) buffers)->address) {
1880 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
1881 return false;
1882 }
1883#endif /* !__LP64__ */
1884
1885 // Grab the original MD's configuation data to initialse the
1886 // arguments to this function.
1887 if (kIOMemoryTypePersistentMD == type) {
1888 IOMDPersistentInitData *initData = (typeof(initData))buffers;
1889 const IOGeneralMemoryDescriptor *orig = initData->fMD;
1890 ioGMDData *dataP = getDataP(orig->_memoryEntries);
1891
1892 // Only accept persistent memory descriptors with valid dataP data.
1893 assert(orig->_rangesCount == 1);
1894 if (!(orig->_flags & kIOMemoryPersistent) || !dataP) {
1895 return false;
1896 }
1897
1898 _memRef = initData->fMemRef; // Grab the new named entry
1899 options = orig->_flags & ~kIOMemoryAsReference;
1900 type = options & kIOMemoryTypeMask;
1901 buffers = orig->_ranges.v;
1902 count = orig->_rangesCount;
1903
1904 // Now grab the original task and whatever mapper was previously used
1905 task = orig->_task;
1906 mapper = dataP->fMapper;
1907
1908 // We are ready to go through the original initialisation now
1909 }
1910
1911 switch (type) {
1912 case kIOMemoryTypeUIO:
1913 case kIOMemoryTypeVirtual:
1914#ifndef __LP64__
1915 case kIOMemoryTypeVirtual64:
1916#endif /* !__LP64__ */
1917 assert(task);
1918 if (!task) {
1919 return false;
1920 }
1921 break;
1922
1923 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
1924#ifndef __LP64__
1925 case kIOMemoryTypePhysical64:
1926#endif /* !__LP64__ */
1927 case kIOMemoryTypeUPL:
1928 assert(!task);
1929 break;
1930 default:
1931 return false; /* bad argument */
1932 }
1933
1934 assert(buffers);
1935 assert(count);
1936
1937 /*
1938 * We can check the _initialized instance variable before having ever set
1939 * it to an initial value because I/O Kit guarantees that all our instance
1940 * variables are zeroed on an object's allocation.
1941 */
1942
1943 if (_initialized) {
1944 /*
1945 * An existing memory descriptor is being retargeted to point to
1946 * somewhere else. Clean up our present state.
1947 */
1948 IOOptionBits type = _flags & kIOMemoryTypeMask;
1949 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type)) {
1950 while (_wireCount) {
1951 complete();
1952 }
1953 }
1954 if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
1955 if (kIOMemoryTypeUIO == type) {
1956 uio_free((uio_t) _ranges.v);
1957 }
1958#ifndef __LP64__
1959 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
1960 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
1961 }
1962#endif /* !__LP64__ */
1963 else {
1964 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
1965 }
1966 }
1967
1968 options |= (kIOMemoryRedirected & _flags);
1969 if (!(kIOMemoryRedirected & options)) {
1970 if (_memRef) {
1971 memoryReferenceRelease(_memRef);
1972 _memRef = NULL;
1973 }
1974 if (_mappings) {
1975 _mappings->flushCollection();
1976 }
1977 }
1978 } else {
1979 if (!super::init()) {
1980 return false;
1981 }
1982 _initialized = true;
1983 }
1984
1985 // Grab the appropriate mapper
1986 if (kIOMemoryHostOrRemote & options) {
1987 options |= kIOMemoryMapperNone;
1988 }
1989 if (kIOMemoryMapperNone & options) {
1990 mapper = NULL; // No Mapper
1991 } else if (mapper == kIOMapperSystem) {
1992 IOMapper::checkForSystemMapper();
1993 gIOSystemMapper = mapper = IOMapper::gSystem;
1994 }
1995
1996 // Remove the dynamic internal use flags from the initial setting
1997 options &= ~(kIOMemoryPreparedReadOnly);
1998 _flags = options;
1999 _task = task;
2000
2001#ifndef __LP64__
2002 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
2003#endif /* !__LP64__ */
2004
2005 _dmaReferences = 0;
2006 __iomd_reservedA = 0;
2007 __iomd_reservedB = 0;
2008 _highestPage = 0;
2009
2010 if (kIOMemoryThreadSafe & options) {
2011 if (!_prepareLock) {
2012 _prepareLock = IOLockAlloc();
2013 }
2014 } else if (_prepareLock) {
2015 IOLockFree(_prepareLock);
2016 _prepareLock = NULL;
2017 }
2018
2019 if (kIOMemoryTypeUPL == type) {
2020 ioGMDData *dataP;
2021 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
2022
2023 if (!initMemoryEntries(dataSize, mapper)) {
2024 return false;
2025 }
2026 dataP = getDataP(_memoryEntries);
2027 dataP->fPageCnt = 0;
2028 switch (kIOMemoryDirectionMask & options) {
2029 case kIODirectionOut:
2030 dataP->fDMAAccess = kIODMAMapReadAccess;
2031 break;
2032 case kIODirectionIn:
2033 dataP->fDMAAccess = kIODMAMapWriteAccess;
2034 break;
2035 case kIODirectionNone:
2036 case kIODirectionOutIn:
2037 default:
2038 panic("bad dir for upl 0x%x\n", (int) options);
2039 break;
2040 }
2041 // _wireCount++; // UPLs start out life wired
2042
2043 _length = count;
2044 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
2045
2046 ioPLBlock iopl;
2047 iopl.fIOPL = (upl_t) buffers;
2048 upl_set_referenced(iopl.fIOPL, true);
2049 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
2050
2051 if (upl_get_size(iopl.fIOPL) < (count + offset)) {
2052 panic("short external upl");
2053 }
2054
2055 _highestPage = upl_get_highest_page(iopl.fIOPL);
2056 DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
2057
2058 // Set the flag kIOPLOnDevice convieniently equal to 1
2059 iopl.fFlags = pageList->device | kIOPLExternUPL;
2060 if (!pageList->device) {
2061 // Pre-compute the offset into the UPL's page list
2062 pageList = &pageList[atop_32(offset)];
2063 offset &= PAGE_MASK;
2064 }
2065 iopl.fIOMDOffset = 0;
2066 iopl.fMappedPage = 0;
2067 iopl.fPageInfo = (vm_address_t) pageList;
2068 iopl.fPageOffset = offset;
2069 _memoryEntries->appendBytes(&iopl, sizeof(iopl));
2070 } else {
2071 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
2072 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
2073
2074 // Initialize the memory descriptor
2075 if (options & kIOMemoryAsReference) {
2076#ifndef __LP64__
2077 _rangesIsAllocated = false;
2078#endif /* !__LP64__ */
2079
2080 // Hack assignment to get the buffer arg into _ranges.
2081 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
2082 // work, C++ sigh.
2083 // This also initialises the uio & physical ranges.
2084 _ranges.v = (IOVirtualRange *) buffers;
2085 } else {
2086#ifndef __LP64__
2087 _rangesIsAllocated = true;
2088#endif /* !__LP64__ */
2089 switch (type) {
2090 case kIOMemoryTypeUIO:
2091 _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
2092 break;
2093
2094#ifndef __LP64__
2095 case kIOMemoryTypeVirtual64:
2096 case kIOMemoryTypePhysical64:
2097 if (count == 1
2098#ifndef __arm__
2099 && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
2100#endif
2101 ) {
2102 if (kIOMemoryTypeVirtual64 == type) {
2103 type = kIOMemoryTypeVirtual;
2104 } else {
2105 type = kIOMemoryTypePhysical;
2106 }
2107 _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
2108 _rangesIsAllocated = false;
2109 _ranges.v = &_singleRange.v;
2110 _singleRange.v.address = ((IOAddressRange *) buffers)->address;
2111 _singleRange.v.length = ((IOAddressRange *) buffers)->length;
2112 break;
2113 }
2114 _ranges.v64 = IONew(IOAddressRange, count);
2115 if (!_ranges.v64) {
2116 return false;
2117 }
2118 bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
2119 break;
2120#endif /* !__LP64__ */
2121 case kIOMemoryTypeVirtual:
2122 case kIOMemoryTypePhysical:
2123 if (count == 1) {
2124 _flags |= kIOMemoryAsReference;
2125#ifndef __LP64__
2126 _rangesIsAllocated = false;
2127#endif /* !__LP64__ */
2128 _ranges.v = &_singleRange.v;
2129 } else {
2130 _ranges.v = IONew(IOVirtualRange, count);
2131 if (!_ranges.v) {
2132 return false;
2133 }
2134 }
2135 bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
2136 break;
2137 }
2138 }
2139 _rangesCount = count;
2140
2141 // Find starting address within the vector of ranges
2142 Ranges vec = _ranges;
2143 mach_vm_size_t totalLength = 0;
2144 unsigned int ind, pages = 0;
2145 for (ind = 0; ind < count; ind++) {
2146 mach_vm_address_t addr;
2147 mach_vm_address_t endAddr;
2148 mach_vm_size_t len;
2149
2150 // addr & len are returned by this function
2151 getAddrLenForInd(addr, len, type, vec, ind);
2152 if (_task) {
2153 mach_vm_size_t phys_size;
2154 kern_return_t kret;
2155 kret = vm_map_range_physical_size(get_task_map(_task), addr, len, &phys_size);
2156 if (KERN_SUCCESS != kret) {
2157 break;
2158 }
2159 if (os_add_overflow(pages, atop_64(phys_size), &pages)) {
2160 break;
2161 }
2162 } else {
2163 if (os_add3_overflow(addr, len, PAGE_MASK, &endAddr)) {
2164 break;
2165 }
2166 if (!(kIOMemoryRemote & options) && (atop_64(endAddr) > UINT_MAX)) {
2167 break;
2168 }
2169 if (os_add_overflow(pages, (atop_64(endAddr) - atop_64(addr)), &pages)) {
2170 break;
2171 }
2172 }
2173 if (os_add_overflow(totalLength, len, &totalLength)) {
2174 break;
2175 }
2176 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2177 uint64_t highPage = atop_64(addr + len - 1);
2178 if ((highPage > _highestPage) && (highPage <= UINT_MAX)) {
2179 _highestPage = (ppnum_t) highPage;
2180 DEBUG4K_IOKIT("offset 0x%x task %p options 0x%x -> _highestPage 0x%x\n", (uint32_t)offset, task, (uint32_t)options, _highestPage);
2181 }
2182 }
2183 }
2184 if ((ind < count)
2185 || (totalLength != ((IOByteCount) totalLength))) {
2186 return false; /* overflow */
2187 }
2188 _length = totalLength;
2189 _pages = pages;
2190
2191 // Auto-prepare memory at creation time.
2192 // Implied completion when descriptor is free-ed
2193
2194
2195 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2196 _wireCount++; // Physical MDs are, by definition, wired
2197 } else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
2198 ioGMDData *dataP;
2199 unsigned dataSize;
2200
2201 if (_pages > atop_64(max_mem)) {
2202 return false;
2203 }
2204
2205 dataSize = computeDataSize(_pages, /* upls */ count * 2);
2206 if (!initMemoryEntries(dataSize, mapper)) {
2207 return false;
2208 }
2209 dataP = getDataP(_memoryEntries);
2210 dataP->fPageCnt = _pages;
2211
2212 if (((_task != kernel_task) || (kIOMemoryBufferPageable & _flags))
2213 && (VM_KERN_MEMORY_NONE == _kernelTag)) {
2214 _kernelTag = IOMemoryTag(kernel_map);
2215 if (_kernelTag == gIOSurfaceTag) {
2216 _userTag = VM_MEMORY_IOSURFACE;
2217 }
2218 }
2219
2220 if ((kIOMemoryPersistent & _flags) && !_memRef) {
2221 IOReturn
2222 err = memoryReferenceCreate(0, &_memRef);
2223 if (kIOReturnSuccess != err) {
2224 return false;
2225 }
2226 }
2227
2228 if ((_flags & kIOMemoryAutoPrepare)
2229 && prepare() != kIOReturnSuccess) {
2230 return false;
2231 }
2232 }
2233 }
2234
2235 return true;
2236}
2237
2238/*
2239 * free
2240 *
2241 * Free resources.
2242 */
2243void
2244IOGeneralMemoryDescriptor::free()
2245{
2246 IOOptionBits type = _flags & kIOMemoryTypeMask;
2247
2248 if (reserved && reserved->dp.memory) {
2249 LOCK;
2250 reserved->dp.memory = NULL;
2251 UNLOCK;
2252 }
2253 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2254 ioGMDData * dataP;
2255 if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBaseValid) {
2256 dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
2257 dataP->fMappedBaseValid = dataP->fMappedBase = 0;
2258 }
2259 } else {
2260 while (_wireCount) {
2261 complete();
2262 }
2263 }
2264
2265 if (_memoryEntries) {
2266 _memoryEntries.reset();
2267 }
2268
2269 if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
2270 if (kIOMemoryTypeUIO == type) {
2271 uio_free((uio_t) _ranges.v);
2272 }
2273#ifndef __LP64__
2274 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
2275 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
2276 }
2277#endif /* !__LP64__ */
2278 else {
2279 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
2280 }
2281
2282 _ranges.v = NULL;
2283 }
2284
2285 if (reserved) {
2286 cleanKernelReserved(reserved);
2287 if (reserved->dp.devicePager) {
2288 // memEntry holds a ref on the device pager which owns reserved
2289 // (IOMemoryDescriptorReserved) so no reserved access after this point
2290 device_pager_deallocate((memory_object_t) reserved->dp.devicePager );
2291 } else {
2292 IODelete(reserved, IOMemoryDescriptorReserved, 1);
2293 }
2294 reserved = NULL;
2295 }
2296
2297 if (_memRef) {
2298 memoryReferenceRelease(_memRef);
2299 }
2300 if (_prepareLock) {
2301 IOLockFree(_prepareLock);
2302 }
2303
2304 super::free();
2305}
2306
2307#ifndef __LP64__
2308void
2309IOGeneralMemoryDescriptor::unmapFromKernel()
2310{
2311 panic("IOGMD::unmapFromKernel deprecated");
2312}
2313
2314void
2315IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
2316{
2317 panic("IOGMD::mapIntoKernel deprecated");
2318}
2319#endif /* !__LP64__ */
2320
2321/*
2322 * getDirection:
2323 *
2324 * Get the direction of the transfer.
2325 */
2326IODirection
2327IOMemoryDescriptor::getDirection() const
2328{
2329#ifndef __LP64__
2330 if (_direction) {
2331 return _direction;
2332 }
2333#endif /* !__LP64__ */
2334 return (IODirection) (_flags & kIOMemoryDirectionMask);
2335}
2336
2337/*
2338 * getLength:
2339 *
2340 * Get the length of the transfer (over all ranges).
2341 */
2342IOByteCount
2343IOMemoryDescriptor::getLength() const
2344{
2345 return _length;
2346}
2347
2348void
2349IOMemoryDescriptor::setTag( IOOptionBits tag )
2350{
2351 _tag = tag;
2352}
2353
2354IOOptionBits
2355IOMemoryDescriptor::getTag( void )
2356{
2357 return _tag;
2358}
2359
2360uint64_t
2361IOMemoryDescriptor::getFlags(void)
2362{
2363 return _flags;
2364}
2365
2366#ifndef __LP64__
2367#pragma clang diagnostic push
2368#pragma clang diagnostic ignored "-Wdeprecated-declarations"
2369
2370// @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
2371IOPhysicalAddress
2372IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
2373{
2374 addr64_t physAddr = 0;
2375
2376 if (prepare() == kIOReturnSuccess) {
2377 physAddr = getPhysicalSegment64( offset, length );
2378 complete();
2379 }
2380
2381 return (IOPhysicalAddress) physAddr; // truncated but only page offset is used
2382}
2383
2384#pragma clang diagnostic pop
2385
2386#endif /* !__LP64__ */
2387
2388IOByteCount
2389IOMemoryDescriptor::readBytes
2390(IOByteCount offset, void *bytes, IOByteCount length)
2391{
2392 addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
2393 IOByteCount endoffset;
2394 IOByteCount remaining;
2395
2396
2397 // Check that this entire I/O is within the available range
2398 if ((offset > _length)
2399 || os_add_overflow(length, offset, &endoffset)
2400 || (endoffset > _length)) {
2401 assertf(false, "readBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) offset, (long) length, (long) _length);
2402 return 0;
2403 }
2404 if (offset >= _length) {
2405 return 0;
2406 }
2407
2408 assert(!(kIOMemoryRemote & _flags));
2409 if (kIOMemoryRemote & _flags) {
2410 return 0;
2411 }
2412
2413 if (kIOMemoryThreadSafe & _flags) {
2414 LOCK;
2415 }
2416
2417 remaining = length = min(length, _length - offset);
2418 while (remaining) { // (process another target segment?)
2419 addr64_t srcAddr64;
2420 IOByteCount srcLen;
2421
2422 srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
2423 if (!srcAddr64) {
2424 break;
2425 }
2426
2427 // Clip segment length to remaining
2428 if (srcLen > remaining) {
2429 srcLen = remaining;
2430 }
2431
2432 if (srcLen > (UINT_MAX - PAGE_SIZE + 1)) {
2433 srcLen = (UINT_MAX - PAGE_SIZE + 1);
2434 }
2435 copypv(srcAddr64, dstAddr, (unsigned int) srcLen,
2436 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
2437
2438 dstAddr += srcLen;
2439 offset += srcLen;
2440 remaining -= srcLen;
2441 }
2442
2443 if (kIOMemoryThreadSafe & _flags) {
2444 UNLOCK;
2445 }
2446
2447 assert(!remaining);
2448
2449 return length - remaining;
2450}
2451
2452IOByteCount
2453IOMemoryDescriptor::writeBytes
2454(IOByteCount inoffset, const void *bytes, IOByteCount length)
2455{
2456 addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
2457 IOByteCount remaining;
2458 IOByteCount endoffset;
2459 IOByteCount offset = inoffset;
2460
2461 assert( !(kIOMemoryPreparedReadOnly & _flags));
2462
2463 // Check that this entire I/O is within the available range
2464 if ((offset > _length)
2465 || os_add_overflow(length, offset, &endoffset)
2466 || (endoffset > _length)) {
2467 assertf(false, "writeBytes exceeds length (0x%lx, 0x%lx) > 0x%lx", (long) inoffset, (long) length, (long) _length);
2468 return 0;
2469 }
2470 if (kIOMemoryPreparedReadOnly & _flags) {
2471 return 0;
2472 }
2473 if (offset >= _length) {
2474 return 0;
2475 }
2476
2477 assert(!(kIOMemoryRemote & _flags));
2478 if (kIOMemoryRemote & _flags) {
2479 return 0;
2480 }
2481
2482 if (kIOMemoryThreadSafe & _flags) {
2483 LOCK;
2484 }
2485
2486 remaining = length = min(length, _length - offset);
2487 while (remaining) { // (process another target segment?)
2488 addr64_t dstAddr64;
2489 IOByteCount dstLen;
2490
2491 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
2492 if (!dstAddr64) {
2493 break;
2494 }
2495
2496 // Clip segment length to remaining
2497 if (dstLen > remaining) {
2498 dstLen = remaining;
2499 }
2500
2501 if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
2502 dstLen = (UINT_MAX - PAGE_SIZE + 1);
2503 }
2504 if (!srcAddr) {
2505 bzero_phys(dstAddr64, (unsigned int) dstLen);
2506 } else {
2507 copypv(srcAddr, (addr64_t) dstAddr64, (unsigned int) dstLen,
2508 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
2509 srcAddr += dstLen;
2510 }
2511 offset += dstLen;
2512 remaining -= dstLen;
2513 }
2514
2515 if (kIOMemoryThreadSafe & _flags) {
2516 UNLOCK;
2517 }
2518
2519 assert(!remaining);
2520
2521#if defined(__x86_64__)
2522 // copypv does not cppvFsnk on intel
2523#else
2524 if (!srcAddr) {
2525 performOperation(kIOMemoryIncoherentIOFlush, inoffset, length);
2526 }
2527#endif
2528
2529 return length - remaining;
2530}
2531
2532#ifndef __LP64__
2533void
2534IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
2535{
2536 panic("IOGMD::setPosition deprecated");
2537}
2538#endif /* !__LP64__ */
2539
2540static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
2541static volatile SInt64 gIOMDDescriptorID __attribute__((aligned(8))) = (kIODescriptorIDInvalid + 1ULL);
2542
2543uint64_t
2544IOGeneralMemoryDescriptor::getPreparationID( void )
2545{
2546 ioGMDData *dataP;
2547
2548 if (!_wireCount) {
2549 return kIOPreparationIDUnprepared;
2550 }
2551
2552 if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
2553 || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64)) {
2554 IOMemoryDescriptor::setPreparationID();
2555 return IOMemoryDescriptor::getPreparationID();
2556 }
2557
2558 if (!_memoryEntries || !(dataP = getDataP(_memoryEntries))) {
2559 return kIOPreparationIDUnprepared;
2560 }
2561
2562 if (kIOPreparationIDUnprepared == dataP->fPreparationID) {
2563 SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2564 OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &dataP->fPreparationID);
2565 }
2566 return dataP->fPreparationID;
2567}
2568
2569void
2570IOMemoryDescriptor::cleanKernelReserved( IOMemoryDescriptorReserved * reserved )
2571{
2572 if (reserved->creator) {
2573 task_deallocate(reserved->creator);
2574 reserved->creator = NULL;
2575 }
2576}
2577
2578IOMemoryDescriptorReserved *
2579IOMemoryDescriptor::getKernelReserved( void )
2580{
2581 if (!reserved) {
2582 reserved = IONewZero(IOMemoryDescriptorReserved, 1);
2583 }
2584 return reserved;
2585}
2586
2587void
2588IOMemoryDescriptor::setPreparationID( void )
2589{
2590 if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID)) {
2591 SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2592 OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &reserved->preparationID);
2593 }
2594}
2595
2596uint64_t
2597IOMemoryDescriptor::getPreparationID( void )
2598{
2599 if (reserved) {
2600 return reserved->preparationID;
2601 } else {
2602 return kIOPreparationIDUnsupported;
2603 }
2604}
2605
2606void
2607IOMemoryDescriptor::setDescriptorID( void )
2608{
2609 if (getKernelReserved() && (kIODescriptorIDInvalid == reserved->descriptorID)) {
2610 SInt64 newID = OSIncrementAtomic64(&gIOMDDescriptorID);
2611 OSCompareAndSwap64(kIODescriptorIDInvalid, newID, &reserved->descriptorID);
2612 }
2613}
2614
2615uint64_t
2616IOMemoryDescriptor::getDescriptorID( void )
2617{
2618 setDescriptorID();
2619
2620 if (reserved) {
2621 return reserved->descriptorID;
2622 } else {
2623 return kIODescriptorIDInvalid;
2624 }
2625}
2626
2627IOReturn
2628IOMemoryDescriptor::ktraceEmitPhysicalSegments( void )
2629{
2630 if (!kdebug_debugid_explicitly_enabled(IODBG_IOMDPA(IOMDPA_MAPPED))) {
2631 return kIOReturnSuccess;
2632 }
2633
2634 assert(getPreparationID() >= kIOPreparationIDAlwaysPrepared);
2635 if (getPreparationID() < kIOPreparationIDAlwaysPrepared) {
2636 return kIOReturnBadArgument;
2637 }
2638
2639 uint64_t descriptorID = getDescriptorID();
2640 assert(descriptorID != kIODescriptorIDInvalid);
2641 if (getDescriptorID() == kIODescriptorIDInvalid) {
2642 return kIOReturnBadArgument;
2643 }
2644
2645 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_MAPPED), descriptorID, VM_KERNEL_ADDRHIDE(this), getLength());
2646
2647#if __LP64__
2648 static const uint8_t num_segments_page = 8;
2649#else
2650 static const uint8_t num_segments_page = 4;
2651#endif
2652 static const uint8_t num_segments_long = 2;
2653
2654 IOPhysicalAddress segments_page[num_segments_page];
2655 IOPhysicalRange segments_long[num_segments_long];
2656 memset(segments_page, UINT32_MAX, sizeof(segments_page));
2657 memset(segments_long, 0, sizeof(segments_long));
2658
2659 uint8_t segment_page_idx = 0;
2660 uint8_t segment_long_idx = 0;
2661
2662 IOPhysicalRange physical_segment;
2663 for (IOByteCount offset = 0; offset < getLength(); offset += physical_segment.length) {
2664 physical_segment.address = getPhysicalSegment(offset, &physical_segment.length);
2665
2666 if (physical_segment.length == 0) {
2667 break;
2668 }
2669
2670 /**
2671 * Most IOMemoryDescriptors are made up of many individual physically discontiguous pages. To optimize for trace
2672 * buffer memory, pack segment events according to the following.
2673 *
2674 * Mappings must be emitted in ascending order starting from offset 0. Mappings can be associated with the previous
2675 * IOMDPA_MAPPED event emitted on by the current thread_id.
2676 *
2677 * IOMDPA_SEGMENTS_PAGE = up to 8 virtually contiguous page aligned mappings of PAGE_SIZE length
2678 * - (ppn_0 << 32 | ppn_1), ..., (ppn_6 << 32 | ppn_7)
2679 * - unmapped pages will have a ppn of MAX_INT_32
2680 * IOMDPA_SEGMENTS_LONG = up to 2 virtually contiguous mappings of variable length
2681 * - address_0, length_0, address_0, length_1
2682 * - unmapped pages will have an address of 0
2683 *
2684 * During each iteration do the following depending on the length of the mapping:
2685 * 1. add the current segment to the appropriate queue of pending segments
2686 * 1. check if we are operating on the same type of segment (PAGE/LONG) as the previous pass
2687 * 1a. if FALSE emit and reset all events in the previous queue
2688 * 2. check if we have filled up the current queue of pending events
2689 * 2a. if TRUE emit and reset all events in the pending queue
2690 * 3. after completing all iterations emit events in the current queue
2691 */
2692
2693 bool emit_page = false;
2694 bool emit_long = false;
2695 if ((physical_segment.address & PAGE_MASK) == 0 && physical_segment.length == PAGE_SIZE) {
2696 segments_page[segment_page_idx] = physical_segment.address;
2697 segment_page_idx++;
2698
2699 emit_long = segment_long_idx != 0;
2700 emit_page = segment_page_idx == num_segments_page;
2701
2702 if (os_unlikely(emit_long)) {
2703 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2704 segments_long[0].address, segments_long[0].length,
2705 segments_long[1].address, segments_long[1].length);
2706 }
2707
2708 if (os_unlikely(emit_page)) {
2709#if __LP64__
2710 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2711 ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2712 ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2713 ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2714 ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2715#else
2716 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2717 (ppnum_t) atop_32(segments_page[1]),
2718 (ppnum_t) atop_32(segments_page[2]),
2719 (ppnum_t) atop_32(segments_page[3]),
2720 (ppnum_t) atop_32(segments_page[4]));
2721#endif
2722 }
2723 } else {
2724 segments_long[segment_long_idx] = physical_segment;
2725 segment_long_idx++;
2726
2727 emit_page = segment_page_idx != 0;
2728 emit_long = segment_long_idx == num_segments_long;
2729
2730 if (os_unlikely(emit_page)) {
2731#if __LP64__
2732 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2733 ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2734 ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2735 ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2736 ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2737#else
2738 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2739 (ppnum_t) atop_32(segments_page[1]),
2740 (ppnum_t) atop_32(segments_page[2]),
2741 (ppnum_t) atop_32(segments_page[3]),
2742 (ppnum_t) atop_32(segments_page[4]));
2743#endif
2744 }
2745
2746 if (emit_long) {
2747 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2748 segments_long[0].address, segments_long[0].length,
2749 segments_long[1].address, segments_long[1].length);
2750 }
2751 }
2752
2753 if (os_unlikely(emit_page)) {
2754 memset(segments_page, UINT32_MAX, sizeof(segments_page));
2755 segment_page_idx = 0;
2756 }
2757
2758 if (os_unlikely(emit_long)) {
2759 memset(segments_long, 0, sizeof(segments_long));
2760 segment_long_idx = 0;
2761 }
2762 }
2763
2764 if (segment_page_idx != 0) {
2765 assert(segment_long_idx == 0);
2766#if __LP64__
2767 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2768 ((uintptr_t) atop_64(segments_page[0]) << 32) | (ppnum_t) atop_64(segments_page[1]),
2769 ((uintptr_t) atop_64(segments_page[2]) << 32) | (ppnum_t) atop_64(segments_page[3]),
2770 ((uintptr_t) atop_64(segments_page[4]) << 32) | (ppnum_t) atop_64(segments_page[5]),
2771 ((uintptr_t) atop_64(segments_page[6]) << 32) | (ppnum_t) atop_64(segments_page[7]));
2772#else
2773 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_PAGE),
2774 (ppnum_t) atop_32(segments_page[1]),
2775 (ppnum_t) atop_32(segments_page[2]),
2776 (ppnum_t) atop_32(segments_page[3]),
2777 (ppnum_t) atop_32(segments_page[4]));
2778#endif
2779 } else if (segment_long_idx != 0) {
2780 assert(segment_page_idx == 0);
2781 IOTimeStampConstant(IODBG_IOMDPA(IOMDPA_SEGMENTS_LONG),
2782 segments_long[0].address, segments_long[0].length,
2783 segments_long[1].address, segments_long[1].length);
2784 }
2785
2786 return kIOReturnSuccess;
2787}
2788
2789void
2790IOMemoryDescriptor::setVMTags(uint32_t kernelTag, uint32_t userTag)
2791{
2792 _kernelTag = (vm_tag_t) kernelTag;
2793 _userTag = (vm_tag_t) userTag;
2794}
2795
2796uint32_t
2797IOMemoryDescriptor::getVMTag(vm_map_t map)
2798{
2799 if (vm_kernel_map_is_kernel(map)) {
2800 if (VM_KERN_MEMORY_NONE != _kernelTag) {
2801 return (uint32_t) _kernelTag;
2802 }
2803 } else {
2804 if (VM_KERN_MEMORY_NONE != _userTag) {
2805 return (uint32_t) _userTag;
2806 }
2807 }
2808 return IOMemoryTag(map);
2809}
2810
2811IOReturn
2812IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
2813{
2814 IOReturn err = kIOReturnSuccess;
2815 DMACommandOps params;
2816 IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
2817 ioGMDData *dataP;
2818
2819 params = (op & ~kIOMDDMACommandOperationMask & op);
2820 op &= kIOMDDMACommandOperationMask;
2821
2822 if (kIOMDDMAMap == op) {
2823 if (dataSize < sizeof(IOMDDMAMapArgs)) {
2824 return kIOReturnUnderrun;
2825 }
2826
2827 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2828
2829 if (!_memoryEntries
2830 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
2831 return kIOReturnNoMemory;
2832 }
2833
2834 if (_memoryEntries && data->fMapper) {
2835 bool remap, keepMap;
2836 dataP = getDataP(_memoryEntries);
2837
2838 if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) {
2839 dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
2840 }
2841 if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) {
2842 dataP->fDMAMapAlignment = data->fMapSpec.alignment;
2843 }
2844
2845 keepMap = (data->fMapper == gIOSystemMapper);
2846 keepMap &= ((data->fOffset == 0) && (data->fLength == _length));
2847
2848 if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
2849 IOLockLock(_prepareLock);
2850 }
2851
2852 remap = (!keepMap);
2853 remap |= (dataP->fDMAMapNumAddressBits < 64)
2854 && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
2855 remap |= (dataP->fDMAMapAlignment > page_size);
2856
2857 if (remap || !dataP->fMappedBaseValid) {
2858 err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
2859 if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBaseValid) {
2860 dataP->fMappedBase = data->fAlloc;
2861 dataP->fMappedBaseValid = true;
2862 dataP->fMappedLength = data->fAllocLength;
2863 data->fAllocLength = 0; // IOMD owns the alloc now
2864 }
2865 } else {
2866 data->fAlloc = dataP->fMappedBase;
2867 data->fAllocLength = 0; // give out IOMD map
2868 md->dmaMapRecord(data->fMapper, data->fCommand, dataP->fMappedLength);
2869 }
2870
2871 if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
2872 IOLockUnlock(_prepareLock);
2873 }
2874 }
2875 return err;
2876 }
2877 if (kIOMDDMAUnmap == op) {
2878 if (dataSize < sizeof(IOMDDMAMapArgs)) {
2879 return kIOReturnUnderrun;
2880 }
2881 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2882
2883 err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
2884
2885 return kIOReturnSuccess;
2886 }
2887
2888 if (kIOMDAddDMAMapSpec == op) {
2889 if (dataSize < sizeof(IODMAMapSpecification)) {
2890 return kIOReturnUnderrun;
2891 }
2892
2893 IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
2894
2895 if (!_memoryEntries
2896 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
2897 return kIOReturnNoMemory;
2898 }
2899
2900 if (_memoryEntries) {
2901 dataP = getDataP(_memoryEntries);
2902 if (data->numAddressBits < dataP->fDMAMapNumAddressBits) {
2903 dataP->fDMAMapNumAddressBits = data->numAddressBits;
2904 }
2905 if (data->alignment > dataP->fDMAMapAlignment) {
2906 dataP->fDMAMapAlignment = data->alignment;
2907 }
2908 }
2909 return kIOReturnSuccess;
2910 }
2911
2912 if (kIOMDGetCharacteristics == op) {
2913 if (dataSize < sizeof(IOMDDMACharacteristics)) {
2914 return kIOReturnUnderrun;
2915 }
2916
2917 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
2918 data->fLength = _length;
2919 data->fSGCount = _rangesCount;
2920 data->fPages = _pages;
2921 data->fDirection = getDirection();
2922 if (!_wireCount) {
2923 data->fIsPrepared = false;
2924 } else {
2925 data->fIsPrepared = true;
2926 data->fHighestPage = _highestPage;
2927 if (_memoryEntries) {
2928 dataP = getDataP(_memoryEntries);
2929 ioPLBlock *ioplList = getIOPLList(dataP);
2930 UInt count = getNumIOPL(_memoryEntries, dataP);
2931 if (count == 1) {
2932 data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
2933 }
2934 }
2935 }
2936
2937 return kIOReturnSuccess;
2938 } else if (kIOMDDMAActive == op) {
2939 if (params) {
2940 int16_t prior;
2941 prior = OSAddAtomic16(1, &md->_dmaReferences);
2942 if (!prior) {
2943 md->_mapName = NULL;
2944 }
2945 } else {
2946 if (md->_dmaReferences) {
2947 OSAddAtomic16(-1, &md->_dmaReferences);
2948 } else {
2949 panic("_dmaReferences underflow");
2950 }
2951 }
2952 } else if (kIOMDWalkSegments != op) {
2953 return kIOReturnBadArgument;
2954 }
2955
2956 // Get the next segment
2957 struct InternalState {
2958 IOMDDMAWalkSegmentArgs fIO;
2959 mach_vm_size_t fOffset2Index;
2960 mach_vm_size_t fNextOffset;
2961 UInt fIndex;
2962 } *isP;
2963
2964 // Find the next segment
2965 if (dataSize < sizeof(*isP)) {
2966 return kIOReturnUnderrun;
2967 }
2968
2969 isP = (InternalState *) vData;
2970 uint64_t offset = isP->fIO.fOffset;
2971 uint8_t mapped = isP->fIO.fMapped;
2972 uint64_t mappedBase;
2973
2974 if (mapped && (kIOMemoryRemote & _flags)) {
2975 return kIOReturnNotAttached;
2976 }
2977
2978 if (IOMapper::gSystem && mapped
2979 && (!(kIOMemoryHostOnly & _flags))
2980 && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBaseValid)) {
2981// && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBaseValid))
2982 if (!_memoryEntries
2983 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
2984 return kIOReturnNoMemory;
2985 }
2986
2987 dataP = getDataP(_memoryEntries);
2988 if (dataP->fMapper) {
2989 IODMAMapSpecification mapSpec;
2990 bzero(&mapSpec, sizeof(mapSpec));
2991 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
2992 mapSpec.alignment = dataP->fDMAMapAlignment;
2993 err = md->dmaMap(dataP->fMapper, md, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength);
2994 if (kIOReturnSuccess != err) {
2995 return err;
2996 }
2997 dataP->fMappedBaseValid = true;
2998 }
2999 }
3000
3001 if (mapped) {
3002 if (IOMapper::gSystem
3003 && (!(kIOMemoryHostOnly & _flags))
3004 && _memoryEntries
3005 && (dataP = getDataP(_memoryEntries))
3006 && dataP->fMappedBaseValid) {
3007 mappedBase = dataP->fMappedBase;
3008 } else {
3009 mapped = 0;
3010 }
3011 }
3012
3013 if (offset >= _length) {
3014 return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
3015 }
3016
3017 // Validate the previous offset
3018 UInt ind;
3019 mach_vm_size_t off2Ind = isP->fOffset2Index;
3020 if (!params
3021 && offset
3022 && (offset == isP->fNextOffset || off2Ind <= offset)) {
3023 ind = isP->fIndex;
3024 } else {
3025 ind = off2Ind = 0; // Start from beginning
3026 }
3027 mach_vm_size_t length;
3028 UInt64 address;
3029
3030 if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
3031 // Physical address based memory descriptor
3032 const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
3033
3034 // Find the range after the one that contains the offset
3035 mach_vm_size_t len;
3036 for (len = 0; off2Ind <= offset; ind++) {
3037 len = physP[ind].length;
3038 off2Ind += len;
3039 }
3040
3041 // Calculate length within range and starting address
3042 length = off2Ind - offset;
3043 address = physP[ind - 1].address + len - length;
3044
3045 if (true && mapped) {
3046 address = mappedBase + offset;
3047 } else {
3048 // see how far we can coalesce ranges
3049 while (ind < _rangesCount && address + length == physP[ind].address) {
3050 len = physP[ind].length;
3051 length += len;
3052 off2Ind += len;
3053 ind++;
3054 }
3055 }
3056
3057 // correct contiguous check overshoot
3058 ind--;
3059 off2Ind -= len;
3060 }
3061#ifndef __LP64__
3062 else if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
3063 // Physical address based memory descriptor
3064 const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
3065
3066 // Find the range after the one that contains the offset
3067 mach_vm_size_t len;
3068 for (len = 0; off2Ind <= offset; ind++) {
3069 len = physP[ind].length;
3070 off2Ind += len;
3071 }
3072
3073 // Calculate length within range and starting address
3074 length = off2Ind - offset;
3075 address = physP[ind - 1].address + len - length;
3076
3077 if (true && mapped) {
3078 address = mappedBase + offset;
3079 } else {
3080 // see how far we can coalesce ranges
3081 while (ind < _rangesCount && address + length == physP[ind].address) {
3082 len = physP[ind].length;
3083 length += len;
3084 off2Ind += len;
3085 ind++;
3086 }
3087 }
3088 // correct contiguous check overshoot
3089 ind--;
3090 off2Ind -= len;
3091 }
3092#endif /* !__LP64__ */
3093 else {
3094 do {
3095 if (!_wireCount) {
3096 panic("IOGMD: not wired for the IODMACommand");
3097 }
3098
3099 assert(_memoryEntries);
3100
3101 dataP = getDataP(_memoryEntries);
3102 const ioPLBlock *ioplList = getIOPLList(dataP);
3103 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
3104 upl_page_info_t *pageList = getPageList(dataP);
3105
3106 assert(numIOPLs > 0);
3107
3108 // Scan through iopl info blocks looking for block containing offset
3109 while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset) {
3110 ind++;
3111 }
3112
3113 // Go back to actual range as search goes past it
3114 ioPLBlock ioplInfo = ioplList[ind - 1];
3115 off2Ind = ioplInfo.fIOMDOffset;
3116
3117 if (ind < numIOPLs) {
3118 length = ioplList[ind].fIOMDOffset;
3119 } else {
3120 length = _length;
3121 }
3122 length -= offset; // Remainder within iopl
3123
3124 // Subtract offset till this iopl in total list
3125 offset -= off2Ind;
3126
3127 // If a mapped address is requested and this is a pre-mapped IOPL
3128 // then just need to compute an offset relative to the mapped base.
3129 if (mapped) {
3130 offset += (ioplInfo.fPageOffset & PAGE_MASK);
3131 address = trunc_page_64(mappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
3132 continue; // Done leave do/while(false) now
3133 }
3134
3135 // The offset is rebased into the current iopl.
3136 // Now add the iopl 1st page offset.
3137 offset += ioplInfo.fPageOffset;
3138
3139 // For external UPLs the fPageInfo field points directly to
3140 // the upl's upl_page_info_t array.
3141 if (ioplInfo.fFlags & kIOPLExternUPL) {
3142 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
3143 } else {
3144 pageList = &pageList[ioplInfo.fPageInfo];
3145 }
3146
3147 // Check for direct device non-paged memory
3148 if (ioplInfo.fFlags & kIOPLOnDevice) {
3149 address = ptoa_64(pageList->phys_addr) + offset;
3150 continue; // Done leave do/while(false) now
3151 }
3152
3153 // Now we need compute the index into the pageList
3154 UInt pageInd = atop_32(offset);
3155 offset &= PAGE_MASK;
3156
3157 // Compute the starting address of this segment
3158 IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
3159 if (!pageAddr) {
3160 panic("!pageList phys_addr");
3161 }
3162
3163 address = ptoa_64(pageAddr) + offset;
3164
3165 // length is currently set to the length of the remainider of the iopl.
3166 // We need to check that the remainder of the iopl is contiguous.
3167 // This is indicated by pageList[ind].phys_addr being sequential.
3168 IOByteCount contigLength = PAGE_SIZE - offset;
3169 while (contigLength < length
3170 && ++pageAddr == pageList[++pageInd].phys_addr) {
3171 contigLength += PAGE_SIZE;
3172 }
3173
3174 if (contigLength < length) {
3175 length = contigLength;
3176 }
3177
3178
3179 assert(address);
3180 assert(length);
3181 } while (false);
3182 }
3183
3184 // Update return values and state
3185 isP->fIO.fIOVMAddr = address;
3186 isP->fIO.fLength = length;
3187 isP->fIndex = ind;
3188 isP->fOffset2Index = off2Ind;
3189 isP->fNextOffset = isP->fIO.fOffset + length;
3190
3191 return kIOReturnSuccess;
3192}
3193
3194addr64_t
3195IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
3196{
3197 IOReturn ret;
3198 mach_vm_address_t address = 0;
3199 mach_vm_size_t length = 0;
3200 IOMapper * mapper = gIOSystemMapper;
3201 IOOptionBits type = _flags & kIOMemoryTypeMask;
3202
3203 if (lengthOfSegment) {
3204 *lengthOfSegment = 0;
3205 }
3206
3207 if (offset >= _length) {
3208 return 0;
3209 }
3210
3211 // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
3212 // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
3213 // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
3214 // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
3215
3216 if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type)) {
3217 unsigned rangesIndex = 0;
3218 Ranges vec = _ranges;
3219 mach_vm_address_t addr;
3220
3221 // Find starting address within the vector of ranges
3222 for (;;) {
3223 getAddrLenForInd(addr, length, type, vec, rangesIndex);
3224 if (offset < length) {
3225 break;
3226 }
3227 offset -= length; // (make offset relative)
3228 rangesIndex++;
3229 }
3230
3231 // Now that we have the starting range,
3232 // lets find the last contiguous range
3233 addr += offset;
3234 length -= offset;
3235
3236 for (++rangesIndex; rangesIndex < _rangesCount; rangesIndex++) {
3237 mach_vm_address_t newAddr;
3238 mach_vm_size_t newLen;
3239
3240 getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
3241 if (addr + length != newAddr) {
3242 break;
3243 }
3244 length += newLen;
3245 }
3246 if (addr) {
3247 address = (IOPhysicalAddress) addr; // Truncate address to 32bit
3248 }
3249 } else {
3250 IOMDDMAWalkSegmentState _state;
3251 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
3252
3253 state->fOffset = offset;
3254 state->fLength = _length - offset;
3255 state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOrRemote);
3256
3257 ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
3258
3259 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret)) {
3260 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
3261 ret, this, state->fOffset,
3262 state->fIOVMAddr, state->fLength);
3263 }
3264 if (kIOReturnSuccess == ret) {
3265 address = state->fIOVMAddr;
3266 length = state->fLength;
3267 }
3268
3269 // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
3270 // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
3271
3272 if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))) {
3273 if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone)) {
3274 addr64_t origAddr = address;
3275 IOByteCount origLen = length;
3276
3277 address = mapper->mapToPhysicalAddress(origAddr);
3278 length = page_size - (address & (page_size - 1));
3279 while ((length < origLen)
3280 && ((address + length) == mapper->mapToPhysicalAddress(origAddr + length))) {
3281 length += page_size;
3282 }
3283 if (length > origLen) {
3284 length = origLen;
3285 }
3286 }
3287 }
3288 }
3289
3290 if (!address) {
3291 length = 0;
3292 }
3293
3294 if (lengthOfSegment) {
3295 *lengthOfSegment = length;
3296 }
3297
3298 return address;
3299}
3300
3301#ifndef __LP64__
3302#pragma clang diagnostic push
3303#pragma clang diagnostic ignored "-Wdeprecated-declarations"
3304
3305addr64_t
3306IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
3307{
3308 addr64_t address = 0;
3309
3310 if (options & _kIOMemorySourceSegment) {
3311 address = getSourceSegment(offset, lengthOfSegment);
3312 } else if (options & kIOMemoryMapperNone) {
3313 address = getPhysicalSegment64(offset, lengthOfSegment);
3314 } else {
3315 address = getPhysicalSegment(offset, lengthOfSegment);
3316 }
3317
3318 return address;
3319}
3320#pragma clang diagnostic pop
3321
3322addr64_t
3323IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
3324{
3325 return getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone);
3326}
3327
3328IOPhysicalAddress
3329IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3330{
3331 addr64_t address = 0;
3332 IOByteCount length = 0;
3333
3334 address = getPhysicalSegment(offset, lengthOfSegment, 0);
3335
3336 if (lengthOfSegment) {
3337 length = *lengthOfSegment;
3338 }
3339
3340 if ((address + length) > 0x100000000ULL) {
3341 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
3342 address, (long) length, (getMetaClass())->getClassName());
3343 }
3344
3345 return (IOPhysicalAddress) address;
3346}
3347
3348addr64_t
3349IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
3350{
3351 IOPhysicalAddress phys32;
3352 IOByteCount length;
3353 addr64_t phys64;
3354 IOMapper * mapper = NULL;
3355
3356 phys32 = getPhysicalSegment(offset, lengthOfSegment);
3357 if (!phys32) {
3358 return 0;
3359 }
3360
3361 if (gIOSystemMapper) {
3362 mapper = gIOSystemMapper;
3363 }
3364
3365 if (mapper) {
3366 IOByteCount origLen;
3367
3368 phys64 = mapper->mapToPhysicalAddress(phys32);
3369 origLen = *lengthOfSegment;
3370 length = page_size - (phys64 & (page_size - 1));
3371 while ((length < origLen)
3372 && ((phys64 + length) == mapper->mapToPhysicalAddress(phys32 + length))) {
3373 length += page_size;
3374 }
3375 if (length > origLen) {
3376 length = origLen;
3377 }
3378
3379 *lengthOfSegment = length;
3380 } else {
3381 phys64 = (addr64_t) phys32;
3382 }
3383
3384 return phys64;
3385}
3386
3387IOPhysicalAddress
3388IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3389{
3390 return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0);
3391}
3392
3393IOPhysicalAddress
3394IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
3395{
3396 return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment);
3397}
3398
3399#pragma clang diagnostic push
3400#pragma clang diagnostic ignored "-Wdeprecated-declarations"
3401
3402void *
3403IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
3404 IOByteCount * lengthOfSegment)
3405{
3406 if (_task == kernel_task) {
3407 return (void *) getSourceSegment(offset, lengthOfSegment);
3408 } else {
3409 panic("IOGMD::getVirtualSegment deprecated");
3410 }
3411
3412 return NULL;
3413}
3414#pragma clang diagnostic pop
3415#endif /* !__LP64__ */
3416
3417IOReturn
3418IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
3419{
3420 IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
3421 DMACommandOps params;
3422 IOReturn err;
3423
3424 params = (op & ~kIOMDDMACommandOperationMask & op);
3425 op &= kIOMDDMACommandOperationMask;
3426
3427 if (kIOMDGetCharacteristics == op) {
3428 if (dataSize < sizeof(IOMDDMACharacteristics)) {
3429 return kIOReturnUnderrun;
3430 }
3431
3432 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3433 data->fLength = getLength();
3434 data->fSGCount = 0;
3435 data->fDirection = getDirection();
3436 data->fIsPrepared = true; // Assume prepared - fails safe
3437 } else if (kIOMDWalkSegments == op) {
3438 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs)) {
3439 return kIOReturnUnderrun;
3440 }
3441
3442 IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
3443 IOByteCount offset = (IOByteCount) data->fOffset;
3444 IOPhysicalLength length, nextLength;
3445 addr64_t addr, nextAddr;
3446
3447 if (data->fMapped) {
3448 panic("fMapped %p %s %qx\n", this, getMetaClass()->getClassName(), (uint64_t) getLength());
3449 }
3450 addr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
3451 offset += length;
3452 while (offset < getLength()) {
3453 nextAddr = md->getPhysicalSegment(offset, &nextLength, kIOMemoryMapperNone);
3454 if ((addr + length) != nextAddr) {
3455 break;
3456 }
3457 length += nextLength;
3458 offset += nextLength;
3459 }
3460 data->fIOVMAddr = addr;
3461 data->fLength = length;
3462 } else if (kIOMDAddDMAMapSpec == op) {
3463 return kIOReturnUnsupported;
3464 } else if (kIOMDDMAMap == op) {
3465 if (dataSize < sizeof(IOMDDMAMapArgs)) {
3466 return kIOReturnUnderrun;
3467 }
3468 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3469
3470 err = md->dmaMap(data->fMapper, md, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
3471
3472 return err;
3473 } else if (kIOMDDMAUnmap == op) {
3474 if (dataSize < sizeof(IOMDDMAMapArgs)) {
3475 return kIOReturnUnderrun;
3476 }
3477 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
3478
3479 err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
3480
3481 return kIOReturnSuccess;
3482 } else {
3483 return kIOReturnBadArgument;
3484 }
3485
3486 return kIOReturnSuccess;
3487}
3488
3489IOReturn
3490IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
3491 IOOptionBits * oldState )
3492{
3493 IOReturn err = kIOReturnSuccess;
3494
3495 vm_purgable_t control;
3496 int state;
3497
3498 assert(!(kIOMemoryRemote & _flags));
3499 if (kIOMemoryRemote & _flags) {
3500 return kIOReturnNotAttached;
3501 }
3502
3503 if (_memRef) {
3504 err = super::setPurgeable(newState, oldState);
3505 } else {
3506 if (kIOMemoryThreadSafe & _flags) {
3507 LOCK;
3508 }
3509 do{
3510 // Find the appropriate vm_map for the given task
3511 vm_map_t curMap;
3512 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) {
3513 err = kIOReturnNotReady;
3514 break;
3515 } else if (!_task) {
3516 err = kIOReturnUnsupported;
3517 break;
3518 } else {
3519 curMap = get_task_map(_task);
3520 if (NULL == curMap) {
3521 err = KERN_INVALID_ARGUMENT;
3522 break;
3523 }
3524 }
3525
3526 // can only do one range
3527 Ranges vec = _ranges;
3528 IOOptionBits type = _flags & kIOMemoryTypeMask;
3529 mach_vm_address_t addr;
3530 mach_vm_size_t len;
3531 getAddrLenForInd(addr, len, type, vec, 0);
3532
3533 err = purgeableControlBits(newState, &control, &state);
3534 if (kIOReturnSuccess != err) {
3535 break;
3536 }
3537 err = vm_map_purgable_control(curMap, addr, control, &state);
3538 if (oldState) {
3539 if (kIOReturnSuccess == err) {
3540 err = purgeableStateBits(&state);
3541 *oldState = state;
3542 }
3543 }
3544 }while (false);
3545 if (kIOMemoryThreadSafe & _flags) {
3546 UNLOCK;
3547 }
3548 }
3549
3550 return err;
3551}
3552
3553IOReturn
3554IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
3555 IOOptionBits * oldState )
3556{
3557 IOReturn err = kIOReturnNotReady;
3558
3559 if (kIOMemoryThreadSafe & _flags) {
3560 LOCK;
3561 }
3562 if (_memRef) {
3563 err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState);
3564 }
3565 if (kIOMemoryThreadSafe & _flags) {
3566 UNLOCK;
3567 }
3568
3569 return err;
3570}
3571
3572IOReturn
3573IOGeneralMemoryDescriptor::setOwnership( task_t newOwner,
3574 int newLedgerTag,
3575 IOOptionBits newLedgerOptions )
3576{
3577 IOReturn err = kIOReturnSuccess;
3578
3579 assert(!(kIOMemoryRemote & _flags));
3580 if (kIOMemoryRemote & _flags) {
3581 return kIOReturnNotAttached;
3582 }
3583
3584 if (iokit_iomd_setownership_enabled == FALSE) {
3585 return kIOReturnUnsupported;
3586 }
3587
3588 if (_memRef) {
3589 err = super::setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3590 } else {
3591 err = kIOReturnUnsupported;
3592 }
3593
3594 return err;
3595}
3596
3597IOReturn
3598IOMemoryDescriptor::setOwnership( task_t newOwner,
3599 int newLedgerTag,
3600 IOOptionBits newLedgerOptions )
3601{
3602 IOReturn err = kIOReturnNotReady;
3603
3604 assert(!(kIOMemoryRemote & _flags));
3605 if (kIOMemoryRemote & _flags) {
3606 return kIOReturnNotAttached;
3607 }
3608
3609 if (iokit_iomd_setownership_enabled == FALSE) {
3610 return kIOReturnUnsupported;
3611 }
3612
3613 if (kIOMemoryThreadSafe & _flags) {
3614 LOCK;
3615 }
3616 if (_memRef) {
3617 err = IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(_memRef, newOwner, newLedgerTag, newLedgerOptions);
3618 } else {
3619 IOMultiMemoryDescriptor * mmd;
3620 IOSubMemoryDescriptor * smd;
3621 if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3622 err = smd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3623 } else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3624 err = mmd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
3625 }
3626 }
3627 if (kIOMemoryThreadSafe & _flags) {
3628 UNLOCK;
3629 }
3630
3631 return err;
3632}
3633
3634
3635uint64_t
3636IOMemoryDescriptor::getDMAMapLength(uint64_t * offset)
3637{
3638 uint64_t length;
3639
3640 if (_memRef) {
3641 length = IOGeneralMemoryDescriptor::memoryReferenceGetDMAMapLength(_memRef, offset);
3642 } else {
3643 IOByteCount iterate, segLen;
3644 IOPhysicalAddress sourceAddr, sourceAlign;
3645
3646 if (kIOMemoryThreadSafe & _flags) {
3647 LOCK;
3648 }
3649 length = 0;
3650 iterate = 0;
3651 while ((sourceAddr = getPhysicalSegment(iterate, &segLen, _kIOMemorySourceSegment))) {
3652 sourceAlign = (sourceAddr & page_mask);
3653 if (offset && !iterate) {
3654 *offset = sourceAlign;
3655 }
3656 length += round_page(sourceAddr + segLen) - trunc_page(sourceAddr);
3657 iterate += segLen;
3658 }
3659 if (kIOMemoryThreadSafe & _flags) {
3660 UNLOCK;
3661 }
3662 }
3663
3664 return length;
3665}
3666
3667
3668IOReturn
3669IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
3670 IOByteCount * dirtyPageCount )
3671{
3672 IOReturn err = kIOReturnNotReady;
3673
3674 assert(!(kIOMemoryRemote & _flags));
3675 if (kIOMemoryRemote & _flags) {
3676 return kIOReturnNotAttached;
3677 }
3678
3679 if (kIOMemoryThreadSafe & _flags) {
3680 LOCK;
3681 }
3682 if (_memRef) {
3683 err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount);
3684 } else {
3685 IOMultiMemoryDescriptor * mmd;
3686 IOSubMemoryDescriptor * smd;
3687 if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3688 err = smd->getPageCounts(residentPageCount, dirtyPageCount);
3689 } else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3690 err = mmd->getPageCounts(residentPageCount, dirtyPageCount);
3691 }
3692 }
3693 if (kIOMemoryThreadSafe & _flags) {
3694 UNLOCK;
3695 }
3696
3697 return err;
3698}
3699
3700
3701#if defined(__arm__) || defined(__arm64__)
3702extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3703extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3704#else /* defined(__arm__) || defined(__arm64__) */
3705extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
3706extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
3707#endif /* defined(__arm__) || defined(__arm64__) */
3708
3709static void
3710SetEncryptOp(addr64_t pa, unsigned int count)
3711{
3712 ppnum_t page, end;
3713
3714 page = (ppnum_t) atop_64(round_page_64(pa));
3715 end = (ppnum_t) atop_64(trunc_page_64(pa + count));
3716 for (; page < end; page++) {
3717 pmap_clear_noencrypt(page);
3718 }
3719}
3720
3721static void
3722ClearEncryptOp(addr64_t pa, unsigned int count)
3723{
3724 ppnum_t page, end;
3725
3726 page = (ppnum_t) atop_64(round_page_64(pa));
3727 end = (ppnum_t) atop_64(trunc_page_64(pa + count));
3728 for (; page < end; page++) {
3729 pmap_set_noencrypt(page);
3730 }
3731}
3732
3733IOReturn
3734IOMemoryDescriptor::performOperation( IOOptionBits options,
3735 IOByteCount offset, IOByteCount length )
3736{
3737 IOByteCount remaining;
3738 unsigned int res;
3739 void (*func)(addr64_t pa, unsigned int count) = NULL;
3740#if defined(__arm__) || defined(__arm64__)
3741 void (*func_ext)(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *result) = NULL;
3742#endif
3743
3744 assert(!(kIOMemoryRemote & _flags));
3745 if (kIOMemoryRemote & _flags) {
3746 return kIOReturnNotAttached;
3747 }
3748
3749 switch (options) {
3750 case kIOMemoryIncoherentIOFlush:
3751#if defined(__arm__) || defined(__arm64__)
3752 func_ext = &dcache_incoherent_io_flush64;
3753#if __ARM_COHERENT_IO__
3754 func_ext(0, 0, 0, &res);
3755 return kIOReturnSuccess;
3756#else /* __ARM_COHERENT_IO__ */
3757 break;
3758#endif /* __ARM_COHERENT_IO__ */
3759#else /* defined(__arm__) || defined(__arm64__) */
3760 func = &dcache_incoherent_io_flush64;
3761 break;
3762#endif /* defined(__arm__) || defined(__arm64__) */
3763 case kIOMemoryIncoherentIOStore:
3764#if defined(__arm__) || defined(__arm64__)
3765 func_ext = &dcache_incoherent_io_store64;
3766#if __ARM_COHERENT_IO__
3767 func_ext(0, 0, 0, &res);
3768 return kIOReturnSuccess;
3769#else /* __ARM_COHERENT_IO__ */
3770 break;
3771#endif /* __ARM_COHERENT_IO__ */
3772#else /* defined(__arm__) || defined(__arm64__) */
3773 func = &dcache_incoherent_io_store64;
3774 break;
3775#endif /* defined(__arm__) || defined(__arm64__) */
3776
3777 case kIOMemorySetEncrypted:
3778 func = &SetEncryptOp;
3779 break;
3780 case kIOMemoryClearEncrypted:
3781 func = &ClearEncryptOp;
3782 break;
3783 }
3784
3785#if defined(__arm__) || defined(__arm64__)
3786 if ((func == NULL) && (func_ext == NULL)) {
3787 return kIOReturnUnsupported;
3788 }
3789#else /* defined(__arm__) || defined(__arm64__) */
3790 if (!func) {
3791 return kIOReturnUnsupported;
3792 }
3793#endif /* defined(__arm__) || defined(__arm64__) */
3794
3795 if (kIOMemoryThreadSafe & _flags) {
3796 LOCK;
3797 }
3798
3799 res = 0x0UL;
3800 remaining = length = min(length, getLength() - offset);
3801 while (remaining) {
3802 // (process another target segment?)
3803 addr64_t dstAddr64;
3804 IOByteCount dstLen;
3805
3806 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
3807 if (!dstAddr64) {
3808 break;
3809 }
3810
3811 // Clip segment length to remaining
3812 if (dstLen > remaining) {
3813 dstLen = remaining;
3814 }
3815 if (dstLen > (UINT_MAX - PAGE_SIZE + 1)) {
3816 dstLen = (UINT_MAX - PAGE_SIZE + 1);
3817 }
3818 if (remaining > UINT_MAX) {
3819 remaining = UINT_MAX;
3820 }
3821
3822#if defined(__arm__) || defined(__arm64__)
3823 if (func) {
3824 (*func)(dstAddr64, (unsigned int) dstLen);
3825 }
3826 if (func_ext) {
3827 (*func_ext)(dstAddr64, (unsigned int) dstLen, (unsigned int) remaining, &res);
3828 if (res != 0x0UL) {
3829 remaining = 0;
3830 break;
3831 }
3832 }
3833#else /* defined(__arm__) || defined(__arm64__) */
3834 (*func)(dstAddr64, (unsigned int) dstLen);
3835#endif /* defined(__arm__) || defined(__arm64__) */
3836
3837 offset += dstLen;
3838 remaining -= dstLen;
3839 }
3840
3841 if (kIOMemoryThreadSafe & _flags) {
3842 UNLOCK;
3843 }
3844
3845 return remaining ? kIOReturnUnderrun : kIOReturnSuccess;
3846}
3847
3848/*
3849 *
3850 */
3851
3852#if defined(__i386__) || defined(__x86_64__)
3853
3854extern vm_offset_t kc_highest_nonlinkedit_vmaddr;
3855
3856/* XXX: By extending io_kernel_static_end to the highest virtual address in the KC,
3857 * we're opening up this path to IOMemoryDescriptor consumers who can now create UPLs to
3858 * kernel non-text data -- should we just add another range instead?
3859 */
3860#define io_kernel_static_start vm_kernel_stext
3861#define io_kernel_static_end (kc_highest_nonlinkedit_vmaddr ? kc_highest_nonlinkedit_vmaddr : vm_kernel_etext)
3862
3863#elif defined(__arm__) || defined(__arm64__)
3864
3865extern vm_offset_t static_memory_end;
3866
3867#if defined(__arm64__)
3868#define io_kernel_static_start vm_kext_base
3869#else /* defined(__arm64__) */
3870#define io_kernel_static_start vm_kernel_stext
3871#endif /* defined(__arm64__) */
3872
3873#define io_kernel_static_end static_memory_end
3874
3875#else
3876#error io_kernel_static_end is undefined for this architecture
3877#endif
3878
3879static kern_return_t
3880io_get_kernel_static_upl(
3881 vm_map_t /* map */,
3882 uintptr_t offset,
3883 upl_size_t *upl_size,
3884 unsigned int *page_offset,
3885 upl_t *upl,
3886 upl_page_info_array_t page_list,
3887 unsigned int *count,
3888 ppnum_t *highest_page)
3889{
3890 unsigned int pageCount, page;
3891 ppnum_t phys;
3892 ppnum_t highestPage = 0;
3893
3894 pageCount = atop_32(round_page(*upl_size + (page_mask & offset)));
3895 if (pageCount > *count) {
3896 pageCount = *count;
3897 }
3898 *upl_size = (upl_size_t) ptoa_64(pageCount);
3899
3900 *upl = NULL;
3901 *page_offset = ((unsigned int) page_mask & offset);
3902
3903 for (page = 0; page < pageCount; page++) {
3904 phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
3905 if (!phys) {
3906 break;
3907 }
3908 page_list[page].phys_addr = phys;
3909 page_list[page].free_when_done = 0;
3910 page_list[page].absent = 0;
3911 page_list[page].dirty = 0;
3912 page_list[page].precious = 0;
3913 page_list[page].device = 0;
3914 if (phys > highestPage) {
3915 highestPage = phys;
3916 }
3917 }
3918
3919 *highest_page = highestPage;
3920
3921 return (page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError;
3922}
3923
3924IOReturn
3925IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
3926{
3927 IOOptionBits type = _flags & kIOMemoryTypeMask;
3928 IOReturn error = kIOReturnSuccess;
3929 ioGMDData *dataP;
3930 upl_page_info_array_t pageInfo;
3931 ppnum_t mapBase;
3932 vm_tag_t tag = VM_KERN_MEMORY_NONE;
3933 mach_vm_size_t numBytesWired = 0;
3934
3935 assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
3936
3937 if ((kIODirectionOutIn & forDirection) == kIODirectionNone) {
3938 forDirection = (IODirection) (forDirection | getDirection());
3939 }
3940
3941 dataP = getDataP(_memoryEntries);
3942 upl_control_flags_t uplFlags; // This Mem Desc's default flags for upl creation
3943 switch (kIODirectionOutIn & forDirection) {
3944 case kIODirectionOut:
3945 // Pages do not need to be marked as dirty on commit
3946 uplFlags = UPL_COPYOUT_FROM;
3947 dataP->fDMAAccess = kIODMAMapReadAccess;
3948 break;
3949
3950 case kIODirectionIn:
3951 dataP->fDMAAccess = kIODMAMapWriteAccess;
3952 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
3953 break;
3954
3955 default:
3956 dataP->fDMAAccess = kIODMAMapReadAccess | kIODMAMapWriteAccess;
3957 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
3958 break;
3959 }
3960
3961 if (_wireCount) {
3962 if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags)) {
3963 OSReportWithBacktrace("IOMemoryDescriptor 0x%lx prepared read only", VM_KERNEL_ADDRPERM(this));
3964 error = kIOReturnNotWritable;
3965 }
3966 } else {
3967 IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_WIRE), VM_KERNEL_ADDRHIDE(this), forDirection);
3968 IOMapper *mapper;
3969
3970 mapper = dataP->fMapper;
3971 dataP->fMappedBaseValid = dataP->fMappedBase = 0;
3972
3973 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
3974 tag = _kernelTag;
3975 if (VM_KERN_MEMORY_NONE == tag) {
3976 tag = IOMemoryTag(kernel_map);
3977 }
3978
3979 if (kIODirectionPrepareToPhys32 & forDirection) {
3980 if (!mapper) {
3981 uplFlags |= UPL_NEED_32BIT_ADDR;
3982 }
3983 if (dataP->fDMAMapNumAddressBits > 32) {
3984 dataP->fDMAMapNumAddressBits = 32;
3985 }
3986 }
3987 if (kIODirectionPrepareNoFault & forDirection) {
3988 uplFlags |= UPL_REQUEST_NO_FAULT;
3989 }
3990 if (kIODirectionPrepareNoZeroFill & forDirection) {
3991 uplFlags |= UPL_NOZEROFILLIO;
3992 }
3993 if (kIODirectionPrepareNonCoherent & forDirection) {
3994 uplFlags |= UPL_REQUEST_FORCE_COHERENCY;
3995 }
3996
3997 mapBase = 0;
3998
3999 // Note that appendBytes(NULL) zeros the data up to the desired length
4000 // and the length parameter is an unsigned int
4001 size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t);
4002 if (uplPageSize > ((unsigned int)uplPageSize)) {
4003 error = kIOReturnNoMemory;
4004 traceInterval.setEndArg2(error);
4005 return error;
4006 }
4007 if (!_memoryEntries->appendBytes(NULL, (unsigned int) uplPageSize)) {
4008 error = kIOReturnNoMemory;
4009 traceInterval.setEndArg2(error);
4010 return error;
4011 }
4012 dataP = NULL;
4013
4014 // Find the appropriate vm_map for the given task
4015 vm_map_t curMap;
4016 if ((NULL != _memRef) || ((_task == kernel_task && (kIOMemoryBufferPageable & _flags)))) {
4017 curMap = NULL;
4018 } else {
4019 curMap = get_task_map(_task);
4020 }
4021
4022 // Iterate over the vector of virtual ranges
4023 Ranges vec = _ranges;
4024 unsigned int pageIndex = 0;
4025 IOByteCount mdOffset = 0;
4026 ppnum_t highestPage = 0;
4027 bool byteAlignUPL;
4028
4029 IOMemoryEntry * memRefEntry = NULL;
4030 if (_memRef) {
4031 memRefEntry = &_memRef->entries[0];
4032 byteAlignUPL = (0 != (MAP_MEM_USE_DATA_ADDR & _memRef->prot));
4033 } else {
4034 byteAlignUPL = true;
4035 }
4036
4037 for (UInt range = 0; mdOffset < _length; range++) {
4038 ioPLBlock iopl;
4039 mach_vm_address_t startPage, startPageOffset;
4040 mach_vm_size_t numBytes;
4041 ppnum_t highPage = 0;
4042
4043 if (_memRef) {
4044 if (range >= _memRef->count) {
4045 panic("memRefEntry");
4046 }
4047 memRefEntry = &_memRef->entries[range];
4048 numBytes = memRefEntry->size;
4049 startPage = -1ULL;
4050 if (byteAlignUPL) {
4051 startPageOffset = 0;
4052 } else {
4053 startPageOffset = (memRefEntry->start & PAGE_MASK);
4054 }
4055 } else {
4056 // Get the startPage address and length of vec[range]
4057 getAddrLenForInd(startPage, numBytes, type, vec, range);
4058 if (byteAlignUPL) {
4059 startPageOffset = 0;
4060 } else {
4061 startPageOffset = startPage & PAGE_MASK;
4062 startPage = trunc_page_64(startPage);
4063 }
4064 }
4065 iopl.fPageOffset = (typeof(iopl.fPageOffset))startPageOffset;
4066 numBytes += startPageOffset;
4067
4068 if (mapper) {
4069 iopl.fMappedPage = mapBase + pageIndex;
4070 } else {
4071 iopl.fMappedPage = 0;
4072 }
4073
4074 // Iterate over the current range, creating UPLs
4075 while (numBytes) {
4076 vm_address_t kernelStart = (vm_address_t) startPage;
4077 vm_map_t theMap;
4078 if (curMap) {
4079 theMap = curMap;
4080 } else if (_memRef) {
4081 theMap = NULL;
4082 } else {
4083 assert(_task == kernel_task);
4084 theMap = IOPageableMapForAddress(kernelStart);
4085 }
4086
4087 // ioplFlags is an in/out parameter
4088 upl_control_flags_t ioplFlags = uplFlags;
4089 dataP = getDataP(_memoryEntries);
4090 pageInfo = getPageList(dataP);
4091 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
4092
4093 mach_vm_size_t ioplPhysSize;
4094 upl_size_t ioplSize;
4095 unsigned int numPageInfo;
4096
4097 if (_memRef) {
4098 error = mach_memory_entry_map_size(memRefEntry->entry, NULL /*physical*/, 0, memRefEntry->size, &ioplPhysSize);
4099 DEBUG4K_IOKIT("_memRef %p memRefEntry %p entry %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, memRefEntry, memRefEntry->entry, startPage, numBytes, ioplPhysSize);
4100 } else {
4101 error = vm_map_range_physical_size(theMap, startPage, numBytes, &ioplPhysSize);
4102 DEBUG4K_IOKIT("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx ioplPhysSize 0x%llx\n", _memRef, theMap, startPage, numBytes, ioplPhysSize);
4103 }
4104 if (error != KERN_SUCCESS) {
4105 if (_memRef) {
4106 DEBUG4K_ERROR("_memRef %p memRefEntry %p entry %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, memRefEntry, memRefEntry->entry, theMap, startPage, numBytes, error);
4107 } else {
4108 DEBUG4K_ERROR("_memRef %p theMap %p startPage 0x%llx numBytes 0x%llx error 0x%x\n", _memRef, theMap, startPage, numBytes, error);
4109 }
4110 printf("entry size error %d\n", error);
4111 goto abortExit;
4112 }
4113 ioplPhysSize = (ioplPhysSize <= MAX_UPL_SIZE_BYTES) ? ioplPhysSize : MAX_UPL_SIZE_BYTES;
4114 numPageInfo = atop_32(ioplPhysSize);
4115 if (byteAlignUPL) {
4116 if (numBytes > ioplPhysSize) {
4117 ioplSize = ((typeof(ioplSize))ioplPhysSize);
4118 } else {
4119 ioplSize = ((typeof(ioplSize))numBytes);
4120 }
4121 } else {
4122 ioplSize = ((typeof(ioplSize))ioplPhysSize);
4123 }
4124
4125 if (_memRef) {
4126 memory_object_offset_t entryOffset;
4127
4128 entryOffset = mdOffset;
4129 if (byteAlignUPL) {
4130 entryOffset = (entryOffset - memRefEntry->offset);
4131 } else {
4132 entryOffset = (entryOffset - iopl.fPageOffset - memRefEntry->offset);
4133 }
4134 if (ioplSize > (memRefEntry->size - entryOffset)) {
4135 ioplSize = ((typeof(ioplSize))(memRefEntry->size - entryOffset));
4136 }
4137 error = memory_object_iopl_request(memRefEntry->entry,
4138 entryOffset,
4139 &ioplSize,
4140 &iopl.fIOPL,
4141 baseInfo,
4142 &numPageInfo,
4143 &ioplFlags,
4144 tag);
4145 } else if ((theMap == kernel_map)
4146 && (kernelStart >= io_kernel_static_start)
4147 && (kernelStart < io_kernel_static_end)) {
4148 error = io_get_kernel_static_upl(theMap,
4149 kernelStart,
4150 &ioplSize,
4151 &iopl.fPageOffset,
4152 &iopl.fIOPL,
4153 baseInfo,
4154 &numPageInfo,
4155 &highPage);
4156 } else {
4157 assert(theMap);
4158 error = vm_map_create_upl(theMap,
4159 startPage,
4160 (upl_size_t*)&ioplSize,
4161 &iopl.fIOPL,
4162 baseInfo,
4163 &numPageInfo,
4164 &ioplFlags,
4165 tag);
4166 }
4167
4168 if (error != KERN_SUCCESS) {
4169 traceInterval.setEndArg2(error);
4170 DEBUG4K_ERROR("UPL create error 0x%x theMap %p (kernel:%d) _memRef %p startPage 0x%llx ioplSize 0x%x\n", error, theMap, (theMap == kernel_map), _memRef, startPage, ioplSize);
4171 goto abortExit;
4172 }
4173
4174 assert(ioplSize);
4175
4176 if (iopl.fIOPL) {
4177 highPage = upl_get_highest_page(iopl.fIOPL);
4178 }
4179 if (highPage > highestPage) {
4180 highestPage = highPage;
4181 }
4182
4183 if (baseInfo->device) {
4184 numPageInfo = 1;
4185 iopl.fFlags = kIOPLOnDevice;
4186 } else {
4187 iopl.fFlags = 0;
4188 }
4189
4190 if (byteAlignUPL) {
4191 if (iopl.fIOPL) {
4192 DEBUG4K_UPL("startPage 0x%llx numBytes 0x%llx iopl.fPageOffset 0x%x upl_get_data_offset(%p) 0x%llx\n", startPage, numBytes, iopl.fPageOffset, iopl.fIOPL, upl_get_data_offset(iopl.fIOPL));
4193 iopl.fPageOffset = (typeof(iopl.fPageOffset))upl_get_data_offset(iopl.fIOPL);
4194 }
4195 if (startPage != (mach_vm_address_t)-1) {
4196 // assert(iopl.fPageOffset == (startPage & PAGE_MASK));
4197 startPage -= iopl.fPageOffset;
4198 }
4199 ioplSize = ((typeof(ioplSize))ptoa_64(numPageInfo));
4200 numBytes += iopl.fPageOffset;
4201 }
4202
4203 iopl.fIOMDOffset = mdOffset;
4204 iopl.fPageInfo = pageIndex;
4205
4206 if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
4207 // Clean up partial created and unsaved iopl
4208 if (iopl.fIOPL) {
4209 upl_abort(iopl.fIOPL, 0);
4210 upl_deallocate(iopl.fIOPL);
4211 }
4212 error = kIOReturnNoMemory;
4213 traceInterval.setEndArg2(error);
4214 goto abortExit;
4215 }
4216 dataP = NULL;
4217
4218 // Check for a multiple iopl's in one virtual range
4219 pageIndex += numPageInfo;
4220 mdOffset -= iopl.fPageOffset;
4221 numBytesWired += ioplSize;
4222 if (ioplSize < numBytes) {
4223 numBytes -= ioplSize;
4224 if (startPage != (mach_vm_address_t)-1) {
4225 startPage += ioplSize;
4226 }
4227 mdOffset += ioplSize;
4228 iopl.fPageOffset = 0;
4229 if (mapper) {
4230 iopl.fMappedPage = mapBase + pageIndex;
4231 }
4232 } else {
4233 mdOffset += numBytes;
4234 break;
4235 }
4236 }
4237 }
4238
4239 _highestPage = highestPage;
4240 DEBUG4K_IOKIT("-> _highestPage 0x%x\n", _highestPage);
4241
4242 if (UPL_COPYOUT_FROM & uplFlags) {
4243 _flags |= kIOMemoryPreparedReadOnly;
4244 }
4245 traceInterval.setEndCodes(numBytesWired, error);
4246 }
4247
4248#if IOTRACKING
4249 if (!(_flags & kIOMemoryAutoPrepare) && (kIOReturnSuccess == error)) {
4250 dataP = getDataP(_memoryEntries);
4251 if (!dataP->fWireTracking.link.next) {
4252 IOTrackingAdd(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages), false, tag);
4253 }
4254 }
4255#endif /* IOTRACKING */
4256
4257 return error;
4258
4259abortExit:
4260 {
4261 dataP = getDataP(_memoryEntries);
4262 UInt done = getNumIOPL(_memoryEntries, dataP);
4263 ioPLBlock *ioplList = getIOPLList(dataP);
4264
4265 for (UInt ioplIdx = 0; ioplIdx < done; ioplIdx++) {
4266 if (ioplList[ioplIdx].fIOPL) {
4267 upl_abort(ioplList[ioplIdx].fIOPL, 0);
4268 upl_deallocate(ioplList[ioplIdx].fIOPL);
4269 }
4270 }
4271 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
4272 }
4273
4274 if (error == KERN_FAILURE) {
4275 error = kIOReturnCannotWire;
4276 } else if (error == KERN_MEMORY_ERROR) {
4277 error = kIOReturnNoResources;
4278 }
4279
4280 return error;
4281}
4282
4283bool
4284IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
4285{
4286 ioGMDData * dataP;
4287 unsigned dataSize;
4288
4289 if (size > UINT_MAX) {
4290 return false;
4291 }
4292 dataSize = (unsigned int) size;
4293 if (!_memoryEntries) {
4294 _memoryEntries = OSData::withCapacity(dataSize);
4295 if (!_memoryEntries) {
4296 return false;
4297 }
4298 } else if (!_memoryEntries->initWithCapacity(dataSize)) {
4299 return false;
4300 }
4301
4302 _memoryEntries->appendBytes(NULL, computeDataSize(0, 0));
4303 dataP = getDataP(_memoryEntries);
4304
4305 if (mapper == kIOMapperWaitSystem) {
4306 IOMapper::checkForSystemMapper();
4307 mapper = IOMapper::gSystem;
4308 }
4309 dataP->fMapper = mapper;
4310 dataP->fPageCnt = 0;
4311 dataP->fMappedBase = 0;
4312 dataP->fDMAMapNumAddressBits = 64;
4313 dataP->fDMAMapAlignment = 0;
4314 dataP->fPreparationID = kIOPreparationIDUnprepared;
4315 dataP->fCompletionError = false;
4316 dataP->fMappedBaseValid = false;
4317
4318 return true;
4319}
4320
4321IOReturn
4322IOMemoryDescriptor::dmaMap(
4323 IOMapper * mapper,
4324 IOMemoryDescriptor * memory,
4325 IODMACommand * command,
4326 const IODMAMapSpecification * mapSpec,
4327 uint64_t offset,
4328 uint64_t length,
4329 uint64_t * mapAddress,
4330 uint64_t * mapLength)
4331{
4332 IOReturn err;
4333 uint32_t mapOptions;
4334
4335 mapOptions = 0;
4336 mapOptions |= kIODMAMapReadAccess;
4337 if (!(kIOMemoryPreparedReadOnly & _flags)) {
4338 mapOptions |= kIODMAMapWriteAccess;
4339 }
4340
4341 err = mapper->iovmMapMemory(memory, offset, length, mapOptions,
4342 mapSpec, command, NULL, mapAddress, mapLength);
4343
4344 if (kIOReturnSuccess == err) {
4345 dmaMapRecord(mapper, command, *mapLength);
4346 }
4347
4348 return err;
4349}
4350
4351void
4352IOMemoryDescriptor::dmaMapRecord(
4353 IOMapper * mapper,
4354 IODMACommand * command,
4355 uint64_t mapLength)
4356{
4357 IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_MAP), VM_KERNEL_ADDRHIDE(this));
4358 kern_allocation_name_t alloc;
4359 int16_t prior;
4360
4361 if ((alloc = mapper->fAllocName) /* && mapper != IOMapper::gSystem */) {
4362 kern_allocation_update_size(mapper->fAllocName, mapLength);
4363 }
4364
4365 if (!command) {
4366 return;
4367 }
4368 prior = OSAddAtomic16(1, &_dmaReferences);
4369 if (!prior) {
4370 if (alloc && (VM_KERN_MEMORY_NONE != _kernelTag)) {
4371 _mapName = alloc;
4372 mapLength = _length;
4373 kern_allocation_update_subtotal(alloc, _kernelTag, mapLength);
4374 } else {
4375 _mapName = NULL;
4376 }
4377 }
4378}
4379
4380IOReturn
4381IOMemoryDescriptor::dmaUnmap(
4382 IOMapper * mapper,
4383 IODMACommand * command,
4384 uint64_t offset,
4385 uint64_t mapAddress,
4386 uint64_t mapLength)
4387{
4388 IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_DMA_UNMAP), VM_KERNEL_ADDRHIDE(this));
4389 IOReturn ret;
4390 kern_allocation_name_t alloc;
4391 kern_allocation_name_t mapName;
4392 int16_t prior;
4393
4394 mapName = NULL;
4395 prior = 0;
4396 if (command) {
4397 mapName = _mapName;
4398 if (_dmaReferences) {
4399 prior = OSAddAtomic16(-1, &_dmaReferences);
4400 } else {
4401 panic("_dmaReferences underflow");
4402 }
4403 }
4404
4405 if (!mapLength) {
4406 traceInterval.setEndArg1(kIOReturnSuccess);
4407 return kIOReturnSuccess;
4408 }
4409
4410 ret = mapper->iovmUnmapMemory(this, command, mapAddress, mapLength);
4411
4412 if ((alloc = mapper->fAllocName)) {
4413 kern_allocation_update_size(alloc, -mapLength);
4414 if ((1 == prior) && mapName && (VM_KERN_MEMORY_NONE != _kernelTag)) {
4415 mapLength = _length;
4416 kern_allocation_update_subtotal(mapName, _kernelTag, -mapLength);
4417 }
4418 }
4419
4420 traceInterval.setEndArg1(ret);
4421 return ret;
4422}
4423
4424IOReturn
4425IOGeneralMemoryDescriptor::dmaMap(
4426 IOMapper * mapper,
4427 IOMemoryDescriptor * memory,
4428 IODMACommand * command,
4429 const IODMAMapSpecification * mapSpec,
4430 uint64_t offset,
4431 uint64_t length,
4432 uint64_t * mapAddress,
4433 uint64_t * mapLength)
4434{
4435 IOReturn err = kIOReturnSuccess;
4436 ioGMDData * dataP;
4437 IOOptionBits type = _flags & kIOMemoryTypeMask;
4438
4439 *mapAddress = 0;
4440 if (kIOMemoryHostOnly & _flags) {
4441 return kIOReturnSuccess;
4442 }
4443 if (kIOMemoryRemote & _flags) {
4444 return kIOReturnNotAttached;
4445 }
4446
4447 if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
4448 || offset || (length != _length)) {
4449 err = super::dmaMap(mapper, memory, command, mapSpec, offset, length, mapAddress, mapLength);
4450 } else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries))) {
4451 const ioPLBlock * ioplList = getIOPLList(dataP);
4452 upl_page_info_t * pageList;
4453 uint32_t mapOptions = 0;
4454
4455 IODMAMapSpecification mapSpec;
4456 bzero(&mapSpec, sizeof(mapSpec));
4457 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
4458 mapSpec.alignment = dataP->fDMAMapAlignment;
4459
4460 // For external UPLs the fPageInfo field points directly to
4461 // the upl's upl_page_info_t array.
4462 if (ioplList->fFlags & kIOPLExternUPL) {
4463 pageList = (upl_page_info_t *) ioplList->fPageInfo;
4464 mapOptions |= kIODMAMapPagingPath;
4465 } else {
4466 pageList = getPageList(dataP);
4467 }
4468
4469 if ((_length == ptoa_64(_pages)) && !(page_mask & ioplList->fPageOffset)) {
4470 mapOptions |= kIODMAMapPageListFullyOccupied;
4471 }
4472
4473 assert(dataP->fDMAAccess);
4474 mapOptions |= dataP->fDMAAccess;
4475
4476 // Check for direct device non-paged memory
4477 if (ioplList->fFlags & kIOPLOnDevice) {
4478 mapOptions |= kIODMAMapPhysicallyContiguous;
4479 }
4480
4481 IODMAMapPageList dmaPageList =
4482 {
4483 .pageOffset = (uint32_t)(ioplList->fPageOffset & page_mask),
4484 .pageListCount = _pages,
4485 .pageList = &pageList[0]
4486 };
4487 err = mapper->iovmMapMemory(memory, offset, length, mapOptions, &mapSpec,
4488 command, &dmaPageList, mapAddress, mapLength);
4489
4490 if (kIOReturnSuccess == err) {
4491 dmaMapRecord(mapper, command, *mapLength);
4492 }
4493 }
4494
4495 return err;
4496}
4497
4498/*
4499 * prepare
4500 *
4501 * Prepare the memory for an I/O transfer. This involves paging in
4502 * the memory, if necessary, and wiring it down for the duration of
4503 * the transfer. The complete() method completes the processing of
4504 * the memory after the I/O transfer finishes. This method needn't
4505 * called for non-pageable memory.
4506 */
4507
4508IOReturn
4509IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
4510{
4511 IOReturn error = kIOReturnSuccess;
4512 IOOptionBits type = _flags & kIOMemoryTypeMask;
4513 IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_PREPARE), VM_KERNEL_ADDRHIDE(this), forDirection);
4514
4515 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
4516 traceInterval.setEndArg1(kIOReturnSuccess);
4517 return kIOReturnSuccess;
4518 }
4519
4520 assert(!(kIOMemoryRemote & _flags));
4521 if (kIOMemoryRemote & _flags) {
4522 traceInterval.setEndArg1(kIOReturnNotAttached);
4523 return kIOReturnNotAttached;
4524 }
4525
4526 if (_prepareLock) {
4527 IOLockLock(_prepareLock);
4528 }
4529
4530 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4531 if ((forDirection & kIODirectionPrepareAvoidThrottling) && NEED_TO_HARD_THROTTLE_THIS_TASK()) {
4532 error = kIOReturnNotReady;
4533 goto finish;
4534 }
4535 error = wireVirtual(forDirection);
4536 }
4537
4538 if (kIOReturnSuccess == error) {
4539 if (1 == ++_wireCount) {
4540 if (kIOMemoryClearEncrypt & _flags) {
4541 performOperation(kIOMemoryClearEncrypted, 0, _length);
4542 }
4543
4544 ktraceEmitPhysicalSegments();
4545 }
4546 }
4547
4548finish:
4549
4550 if (_prepareLock) {
4551 IOLockUnlock(_prepareLock);
4552 }
4553 traceInterval.setEndArg1(error);
4554
4555 return error;
4556}
4557
4558/*
4559 * complete
4560 *
4561 * Complete processing of the memory after an I/O transfer finishes.
4562 * This method should not be called unless a prepare was previously
4563 * issued; the prepare() and complete() must occur in pairs, before
4564 * before and after an I/O transfer involving pageable memory.
4565 */
4566
4567IOReturn
4568IOGeneralMemoryDescriptor::complete(IODirection forDirection)
4569{
4570 IOOptionBits type = _flags & kIOMemoryTypeMask;
4571 ioGMDData * dataP;
4572 IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_COMPLETE), VM_KERNEL_ADDRHIDE(this), forDirection);
4573
4574 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
4575 traceInterval.setEndArg1(kIOReturnSuccess);
4576 return kIOReturnSuccess;
4577 }
4578
4579 assert(!(kIOMemoryRemote & _flags));
4580 if (kIOMemoryRemote & _flags) {
4581 traceInterval.setEndArg1(kIOReturnNotAttached);
4582 return kIOReturnNotAttached;
4583 }
4584
4585 if (_prepareLock) {
4586 IOLockLock(_prepareLock);
4587 }
4588 do{
4589 assert(_wireCount);
4590 if (!_wireCount) {
4591 break;
4592 }
4593 dataP = getDataP(_memoryEntries);
4594 if (!dataP) {
4595 break;
4596 }
4597
4598 if (kIODirectionCompleteWithError & forDirection) {
4599 dataP->fCompletionError = true;
4600 }
4601
4602 if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount)) {
4603 performOperation(kIOMemorySetEncrypted, 0, _length);
4604 }
4605
4606 _wireCount--;
4607 if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection)) {
4608 ioPLBlock *ioplList = getIOPLList(dataP);
4609 UInt ind, count = getNumIOPL(_memoryEntries, dataP);
4610
4611 if (_wireCount) {
4612 // kIODirectionCompleteWithDataValid & forDirection
4613 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4614 vm_tag_t tag;
4615 tag = (typeof(tag))getVMTag(kernel_map);
4616 for (ind = 0; ind < count; ind++) {
4617 if (ioplList[ind].fIOPL) {
4618 iopl_valid_data(ioplList[ind].fIOPL, tag);
4619 }
4620 }
4621 }
4622 } else {
4623 if (_dmaReferences) {
4624 panic("complete() while dma active");
4625 }
4626
4627 if (dataP->fMappedBaseValid) {
4628 dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
4629 dataP->fMappedBaseValid = dataP->fMappedBase = 0;
4630 }
4631#if IOTRACKING
4632 if (dataP->fWireTracking.link.next) {
4633 IOTrackingRemove(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages));
4634 }
4635#endif /* IOTRACKING */
4636 // Only complete iopls that we created which are for TypeVirtual
4637 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
4638 for (ind = 0; ind < count; ind++) {
4639 if (ioplList[ind].fIOPL) {
4640 if (dataP->fCompletionError) {
4641 upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/);
4642 } else {
4643 upl_commit(ioplList[ind].fIOPL, NULL, 0);
4644 }
4645 upl_deallocate(ioplList[ind].fIOPL);
4646 }
4647 }
4648 } else if (kIOMemoryTypeUPL == type) {
4649 upl_set_referenced(ioplList[0].fIOPL, false);
4650 }
4651
4652 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
4653
4654 dataP->fPreparationID = kIOPreparationIDUnprepared;
4655 _flags &= ~kIOMemoryPreparedReadOnly;
4656
4657 if (kdebug_debugid_explicitly_enabled(IODBG_IOMDPA(IOMDPA_UNMAPPED))) {
4658 IOTimeStampConstantFiltered(IODBG_IOMDPA(IOMDPA_UNMAPPED), getDescriptorID(), VM_KERNEL_ADDRHIDE(this));
4659 }
4660 }
4661 }
4662 }while (false);
4663
4664 if (_prepareLock) {
4665 IOLockUnlock(_prepareLock);
4666 }
4667
4668 traceInterval.setEndArg1(kIOReturnSuccess);
4669 return kIOReturnSuccess;
4670}
4671
4672IOReturn
4673IOGeneralMemoryDescriptor::doMap(
4674 vm_map_t __addressMap,
4675 IOVirtualAddress * __address,
4676 IOOptionBits options,
4677 IOByteCount __offset,
4678 IOByteCount __length )
4679{
4680 IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_MAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(*__address), __length);
4681 traceInterval.setEndArg1(kIOReturnSuccess);
4682#ifndef __LP64__
4683 if (!(kIOMap64Bit & options)) {
4684 panic("IOGeneralMemoryDescriptor::doMap !64bit");
4685 }
4686#endif /* !__LP64__ */
4687
4688 kern_return_t err;
4689
4690 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
4691 mach_vm_size_t offset = mapping->fOffset + __offset;
4692 mach_vm_size_t length = mapping->fLength;
4693
4694 IOOptionBits type = _flags & kIOMemoryTypeMask;
4695 Ranges vec = _ranges;
4696
4697 mach_vm_address_t range0Addr = 0;
4698 mach_vm_size_t range0Len = 0;
4699
4700 if ((offset >= _length) || ((offset + length) > _length)) {
4701 traceInterval.setEndArg1(kIOReturnBadArgument);
4702 DEBUG4K_ERROR("map %p offset 0x%llx length 0x%llx _length 0x%llx kIOReturnBadArgument\n", __addressMap, offset, length, (uint64_t)_length);
4703 // assert(offset == 0 && _length == 0 && length == 0);
4704 return kIOReturnBadArgument;
4705 }
4706
4707 assert(!(kIOMemoryRemote & _flags));
4708 if (kIOMemoryRemote & _flags) {
4709 return 0;
4710 }
4711
4712 if (vec.v) {
4713 getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
4714 }
4715
4716 // mapping source == dest? (could be much better)
4717 if (_task
4718 && (mapping->fAddressTask == _task)
4719 && (mapping->fAddressMap == get_task_map(_task))
4720 && (options & kIOMapAnywhere)
4721 && (!(kIOMapUnique & options))
4722 && (1 == _rangesCount)
4723 && (0 == offset)
4724 && range0Addr
4725 && (length <= range0Len)) {
4726 mapping->fAddress = range0Addr;
4727 mapping->fOptions |= kIOMapStatic;
4728
4729 return kIOReturnSuccess;
4730 }
4731
4732 if (!_memRef) {
4733 IOOptionBits createOptions = 0;
4734 if (!(kIOMapReadOnly & options)) {
4735 createOptions |= kIOMemoryReferenceWrite;
4736#if DEVELOPMENT || DEBUG
4737 if ((kIODirectionOut == (kIODirectionOutIn & _flags))
4738 && (!reserved || (reserved->creator != mapping->fAddressTask))) {
4739 OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction");
4740 }
4741#endif
4742 }
4743 err = memoryReferenceCreate(createOptions, &_memRef);
4744 if (kIOReturnSuccess != err) {
4745 traceInterval.setEndArg1(err);
4746 DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4747 return err;
4748 }
4749 }
4750
4751 memory_object_t pager;
4752 pager = (memory_object_t) (reserved ? reserved->dp.devicePager : NULL);
4753
4754 // <upl_transpose //
4755 if ((kIOMapReference | kIOMapUnique) == ((kIOMapReference | kIOMapUnique) & options)) {
4756 do{
4757 upl_t redirUPL2;
4758 upl_size_t size;
4759 upl_control_flags_t flags;
4760 unsigned int lock_count;
4761
4762 if (!_memRef || (1 != _memRef->count)) {
4763 err = kIOReturnNotReadable;
4764 DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4765 break;
4766 }
4767
4768 size = (upl_size_t) round_page(mapping->fLength);
4769 flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
4770 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
4771
4772 if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2,
4773 NULL, NULL,
4774 &flags, (vm_tag_t) getVMTag(kernel_map))) {
4775 redirUPL2 = NULL;
4776 }
4777
4778 for (lock_count = 0;
4779 IORecursiveLockHaveLock(gIOMemoryLock);
4780 lock_count++) {
4781 UNLOCK;
4782 }
4783 err = upl_transpose(redirUPL2, mapping->fRedirUPL);
4784 for (;
4785 lock_count;
4786 lock_count--) {
4787 LOCK;
4788 }
4789
4790 if (kIOReturnSuccess != err) {
4791 IOLog("upl_transpose(%x)\n", err);
4792 err = kIOReturnSuccess;
4793 }
4794
4795 if (redirUPL2) {
4796 upl_commit(redirUPL2, NULL, 0);
4797 upl_deallocate(redirUPL2);
4798 redirUPL2 = NULL;
4799 }
4800 {
4801 // swap the memEntries since they now refer to different vm_objects
4802 IOMemoryReference * me = _memRef;
4803 _memRef = mapping->fMemory->_memRef;
4804 mapping->fMemory->_memRef = me;
4805 }
4806 if (pager) {
4807 err = populateDevicePager( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
4808 }
4809 }while (false);
4810 }
4811 // upl_transpose> //
4812 else {
4813 err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress);
4814 if (err) {
4815 DEBUG4K_ERROR("map %p err 0x%x\n", mapping->fAddressMap, err);
4816 }
4817#if IOTRACKING
4818 if ((err == KERN_SUCCESS) && ((kIOTracking & gIOKitDebug) || _task)) {
4819 // only dram maps in the default on developement case
4820 IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength);
4821 }
4822#endif /* IOTRACKING */
4823 if ((err == KERN_SUCCESS) && pager) {
4824 err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, offset, length, options);
4825
4826 if (err != KERN_SUCCESS) {
4827 doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0);
4828 } else if (kIOMapDefaultCache == (options & kIOMapCacheMask)) {
4829 mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
4830 }
4831 }
4832 }
4833
4834 traceInterval.setEndArg1(err);
4835 if (err) {
4836 DEBUG4K_ERROR("map %p err 0x%x\n", __addressMap, err);
4837 }
4838 return err;
4839}
4840
4841#if IOTRACKING
4842IOReturn
4843IOMemoryMapTracking(IOTrackingUser * tracking, task_t * task,
4844 mach_vm_address_t * address, mach_vm_size_t * size)
4845{
4846#define iomap_offsetof(type, field) ((size_t)(&((type *)NULL)->field))
4847
4848 IOMemoryMap * map = (typeof(map))(((uintptr_t) tracking) - iomap_offsetof(IOMemoryMap, fTracking));
4849
4850 if (!map->fAddressMap || (map->fAddressMap != get_task_map(map->fAddressTask))) {
4851 return kIOReturnNotReady;
4852 }
4853
4854 *task = map->fAddressTask;
4855 *address = map->fAddress;
4856 *size = map->fLength;
4857
4858 return kIOReturnSuccess;
4859}
4860#endif /* IOTRACKING */
4861
4862IOReturn
4863IOGeneralMemoryDescriptor::doUnmap(
4864 vm_map_t addressMap,
4865 IOVirtualAddress __address,
4866 IOByteCount __length )
4867{
4868 IOTimeStampIntervalConstantFiltered traceInterval(IODBG_MDESC(IOMDESC_UNMAP), VM_KERNEL_ADDRHIDE(this), VM_KERNEL_ADDRHIDE(__address), __length);
4869 IOReturn ret;
4870 ret = super::doUnmap(addressMap, __address, __length);
4871 traceInterval.setEndArg1(ret);
4872 return ret;
4873}
4874
4875/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4876
4877#undef super
4878#define super OSObject
4879
4880OSDefineMetaClassAndStructorsWithZone( IOMemoryMap, OSObject, ZC_NONE )
4881
4882OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
4883OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
4884OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
4885OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
4886OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
4887OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
4888OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
4889OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
4890
4891/* ex-inline function implementation */
4892IOPhysicalAddress
4893IOMemoryMap::getPhysicalAddress()
4894{
4895 return getPhysicalSegment( 0, NULL );
4896}
4897
4898/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4899
4900bool
4901IOMemoryMap::init(
4902 task_t intoTask,
4903 mach_vm_address_t toAddress,
4904 IOOptionBits _options,
4905 mach_vm_size_t _offset,
4906 mach_vm_size_t _length )
4907{
4908 if (!intoTask) {
4909 return false;
4910 }
4911
4912 if (!super::init()) {
4913 return false;
4914 }
4915
4916 fAddressMap = get_task_map(intoTask);
4917 if (!fAddressMap) {
4918 return false;
4919 }
4920 vm_map_reference(fAddressMap);
4921
4922 fAddressTask = intoTask;
4923 fOptions = _options;
4924 fLength = _length;
4925 fOffset = _offset;
4926 fAddress = toAddress;
4927
4928 return true;
4929}
4930
4931bool
4932IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
4933{
4934 if (!_memory) {
4935 return false;
4936 }
4937
4938 if (!fSuperMap) {
4939 if ((_offset + fLength) > _memory->getLength()) {
4940 return false;
4941 }
4942 fOffset = _offset;
4943 }
4944
4945
4946 OSSharedPtr<IOMemoryDescriptor> tempval(_memory, OSRetain);
4947 if (fMemory) {
4948 if (fMemory != _memory) {
4949 fMemory->removeMapping(this);
4950 }
4951 }
4952 fMemory = os::move(tempval);
4953
4954 return true;
4955}
4956
4957IOReturn
4958IOMemoryDescriptor::doMap(
4959 vm_map_t __addressMap,
4960 IOVirtualAddress * __address,
4961 IOOptionBits options,
4962 IOByteCount __offset,
4963 IOByteCount __length )
4964{
4965 return kIOReturnUnsupported;
4966}
4967
4968IOReturn
4969IOMemoryDescriptor::handleFault(
4970 void * _pager,
4971 mach_vm_size_t sourceOffset,
4972 mach_vm_size_t length)
4973{
4974 if (kIOMemoryRedirected & _flags) {
4975#if DEBUG
4976 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
4977#endif
4978 do {
4979 SLEEP;
4980 } while (kIOMemoryRedirected & _flags);
4981 }
4982 return kIOReturnSuccess;
4983}
4984
4985IOReturn
4986IOMemoryDescriptor::populateDevicePager(
4987 void * _pager,
4988 vm_map_t addressMap,
4989 mach_vm_address_t address,
4990 mach_vm_size_t sourceOffset,
4991 mach_vm_size_t length,
4992 IOOptionBits options )
4993{
4994 IOReturn err = kIOReturnSuccess;
4995 memory_object_t pager = (memory_object_t) _pager;
4996 mach_vm_size_t size;
4997 mach_vm_size_t bytes;
4998 mach_vm_size_t page;
4999 mach_vm_size_t pageOffset;
5000 mach_vm_size_t pagerOffset;
5001 IOPhysicalLength segLen, chunk;
5002 addr64_t physAddr;
5003 IOOptionBits type;
5004
5005 type = _flags & kIOMemoryTypeMask;
5006
5007 if (reserved->dp.pagerContig) {
5008 sourceOffset = 0;
5009 pagerOffset = 0;
5010 }
5011
5012 physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
5013 assert( physAddr );
5014 pageOffset = physAddr - trunc_page_64( physAddr );
5015 pagerOffset = sourceOffset;
5016
5017 size = length + pageOffset;
5018 physAddr -= pageOffset;
5019
5020 segLen += pageOffset;
5021 bytes = size;
5022 do{
5023 // in the middle of the loop only map whole pages
5024 if (segLen >= bytes) {
5025 segLen = bytes;
5026 } else if (segLen != trunc_page_64(segLen)) {
5027 err = kIOReturnVMError;
5028 }
5029 if (physAddr != trunc_page_64(physAddr)) {
5030 err = kIOReturnBadArgument;
5031 }
5032
5033 if (kIOReturnSuccess != err) {
5034 break;
5035 }
5036
5037#if DEBUG || DEVELOPMENT
5038 if ((kIOMemoryTypeUPL != type)
5039 && pmap_has_managed_page((ppnum_t) atop_64(physAddr), (ppnum_t) atop_64(physAddr + segLen - 1))) {
5040 OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx", physAddr, segLen);
5041 }
5042#endif /* DEBUG || DEVELOPMENT */
5043
5044 chunk = (reserved->dp.pagerContig ? round_page(segLen) : page_size);
5045 for (page = 0;
5046 (page < segLen) && (KERN_SUCCESS == err);
5047 page += chunk) {
5048 err = device_pager_populate_object(pager, pagerOffset,
5049 (ppnum_t)(atop_64(physAddr + page)), chunk);
5050 pagerOffset += chunk;
5051 }
5052
5053 assert(KERN_SUCCESS == err);
5054 if (err) {
5055 break;
5056 }
5057
5058 // This call to vm_fault causes an early pmap level resolution
5059 // of the mappings created above for kernel mappings, since
5060 // faulting in later can't take place from interrupt level.
5061 if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags)) {
5062 err = vm_fault(addressMap,
5063 (vm_map_offset_t)trunc_page_64(address),
5064 options & kIOMapReadOnly ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE,
5065 FALSE, VM_KERN_MEMORY_NONE,
5066 THREAD_UNINT, NULL,
5067 (vm_map_offset_t)0);
5068
5069 if (KERN_SUCCESS != err) {
5070 break;
5071 }
5072 }
5073
5074 sourceOffset += segLen - pageOffset;
5075 address += segLen;
5076 bytes -= segLen;
5077 pageOffset = 0;
5078 }while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
5079
5080 if (bytes) {
5081 err = kIOReturnBadArgument;
5082 }
5083
5084 return err;
5085}
5086
5087IOReturn
5088IOMemoryDescriptor::doUnmap(
5089 vm_map_t addressMap,
5090 IOVirtualAddress __address,
5091 IOByteCount __length )
5092{
5093 IOReturn err;
5094 IOMemoryMap * mapping;
5095 mach_vm_address_t address;
5096 mach_vm_size_t length;
5097
5098 if (__length) {
5099 panic("doUnmap");
5100 }
5101
5102 mapping = (IOMemoryMap *) __address;
5103 addressMap = mapping->fAddressMap;
5104 address = mapping->fAddress;
5105 length = mapping->fLength;
5106
5107 if (kIOMapOverwrite & mapping->fOptions) {
5108 err = KERN_SUCCESS;
5109 } else {
5110 if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
5111 addressMap = IOPageableMapForAddress( address );
5112 }
5113#if DEBUG
5114 if (kIOLogMapping & gIOKitDebug) {
5115 IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
5116 addressMap, address, length );
5117 }
5118#endif
5119 err = mach_vm_deallocate( addressMap, address, length );
5120 if (vm_map_page_mask(addressMap) < PAGE_MASK) {
5121 DEBUG4K_IOKIT("map %p address 0x%llx length 0x%llx err 0x%x\n", addressMap, address, length, err);
5122 }
5123 }
5124
5125#if IOTRACKING
5126 IOTrackingRemoveUser(gIOMapTracking, &mapping->fTracking);
5127#endif /* IOTRACKING */
5128
5129 return err;
5130}
5131
5132IOReturn
5133IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
5134{
5135 IOReturn err = kIOReturnSuccess;
5136 IOMemoryMap * mapping = NULL;
5137 OSSharedPtr<OSIterator> iter;
5138
5139 LOCK;
5140
5141 if (doRedirect) {
5142 _flags |= kIOMemoryRedirected;
5143 } else {
5144 _flags &= ~kIOMemoryRedirected;
5145 }
5146
5147 do {
5148 if ((iter = OSCollectionIterator::withCollection( _mappings.get()))) {
5149 memory_object_t pager;
5150
5151 if (reserved) {
5152 pager = (memory_object_t) reserved->dp.devicePager;
5153 } else {
5154 pager = MACH_PORT_NULL;
5155 }
5156
5157 while ((mapping = (IOMemoryMap *) iter->getNextObject())) {
5158 mapping->redirect( safeTask, doRedirect );
5159 if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap)) {
5160 err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache );
5161 }
5162 }
5163
5164 iter.reset();
5165 }
5166 } while (false);
5167
5168 if (!doRedirect) {
5169 WAKEUP;
5170 }
5171
5172 UNLOCK;
5173
5174#ifndef __LP64__
5175 // temporary binary compatibility
5176 IOSubMemoryDescriptor * subMem;
5177 if ((subMem = OSDynamicCast( IOSubMemoryDescriptor, this))) {
5178 err = subMem->redirect( safeTask, doRedirect );
5179 } else {
5180 err = kIOReturnSuccess;
5181 }
5182#endif /* !__LP64__ */
5183
5184 return err;
5185}
5186
5187IOReturn
5188IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
5189{
5190 IOReturn err = kIOReturnSuccess;
5191
5192 if (fSuperMap) {
5193// err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
5194 } else {
5195 LOCK;
5196
5197 do{
5198 if (!fAddress) {
5199 break;
5200 }
5201 if (!fAddressMap) {
5202 break;
5203 }
5204
5205 if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
5206 && (0 == (fOptions & kIOMapStatic))) {
5207 IOUnmapPages( fAddressMap, fAddress, fLength );
5208 err = kIOReturnSuccess;
5209#if DEBUG
5210 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
5211#endif
5212 } else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask)) {
5213 IOOptionBits newMode;
5214 newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
5215 IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
5216 }
5217 }while (false);
5218 UNLOCK;
5219 }
5220
5221 if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5222 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
5223 && safeTask
5224 && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected)))) {
5225 fMemory->redirect(safeTask, doRedirect);
5226 }
5227
5228 return err;
5229}
5230
5231IOReturn
5232IOMemoryMap::unmap( void )
5233{
5234 IOReturn err;
5235
5236 LOCK;
5237
5238 if (fAddress && fAddressMap && (NULL == fSuperMap) && fMemory
5239 && (0 == (kIOMapStatic & fOptions))) {
5240 err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
5241 } else {
5242 err = kIOReturnSuccess;
5243 }
5244
5245 if (fAddressMap) {
5246 vm_map_deallocate(fAddressMap);
5247 fAddressMap = NULL;
5248 }
5249
5250 fAddress = 0;
5251
5252 UNLOCK;
5253
5254 return err;
5255}
5256
5257void
5258IOMemoryMap::taskDied( void )
5259{
5260 LOCK;
5261 if (fUserClientUnmap) {
5262 unmap();
5263 }
5264#if IOTRACKING
5265 else {
5266 IOTrackingRemoveUser(gIOMapTracking, &fTracking);
5267 }
5268#endif /* IOTRACKING */
5269
5270 if (fAddressMap) {
5271 vm_map_deallocate(fAddressMap);
5272 fAddressMap = NULL;
5273 }
5274 fAddressTask = NULL;
5275 fAddress = 0;
5276 UNLOCK;
5277}
5278
5279IOReturn
5280IOMemoryMap::userClientUnmap( void )
5281{
5282 fUserClientUnmap = true;
5283 return kIOReturnSuccess;
5284}
5285
5286// Overload the release mechanism. All mappings must be a member
5287// of a memory descriptors _mappings set. This means that we
5288// always have 2 references on a mapping. When either of these mappings
5289// are released we need to free ourselves.
5290void
5291IOMemoryMap::taggedRelease(const void *tag) const
5292{
5293 LOCK;
5294 super::taggedRelease(tag, 2);
5295 UNLOCK;
5296}
5297
5298void
5299IOMemoryMap::free()
5300{
5301 unmap();
5302
5303 if (fMemory) {
5304 LOCK;
5305 fMemory->removeMapping(this);
5306 UNLOCK;
5307 fMemory.reset();
5308 }
5309
5310 if (fSuperMap) {
5311 fSuperMap.reset();
5312 }
5313
5314 if (fRedirUPL) {
5315 upl_commit(fRedirUPL, NULL, 0);
5316 upl_deallocate(fRedirUPL);
5317 }
5318
5319 super::free();
5320}
5321
5322IOByteCount
5323IOMemoryMap::getLength()
5324{
5325 return fLength;
5326}
5327
5328IOVirtualAddress
5329IOMemoryMap::getVirtualAddress()
5330{
5331#ifndef __LP64__
5332 if (fSuperMap) {
5333 fSuperMap->getVirtualAddress();
5334 } else if (fAddressMap
5335 && vm_map_is_64bit(fAddressMap)
5336 && (sizeof(IOVirtualAddress) < 8)) {
5337 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
5338 }
5339#endif /* !__LP64__ */
5340
5341 return fAddress;
5342}
5343
5344#ifndef __LP64__
5345mach_vm_address_t
5346IOMemoryMap::getAddress()
5347{
5348 return fAddress;
5349}
5350
5351mach_vm_size_t
5352IOMemoryMap::getSize()
5353{
5354 return fLength;
5355}
5356#endif /* !__LP64__ */
5357
5358
5359task_t
5360IOMemoryMap::getAddressTask()
5361{
5362 if (fSuperMap) {
5363 return fSuperMap->getAddressTask();
5364 } else {
5365 return fAddressTask;
5366 }
5367}
5368
5369IOOptionBits
5370IOMemoryMap::getMapOptions()
5371{
5372 return fOptions;
5373}
5374
5375IOMemoryDescriptor *
5376IOMemoryMap::getMemoryDescriptor()
5377{
5378 return fMemory.get();
5379}
5380
5381IOMemoryMap *
5382IOMemoryMap::copyCompatible(
5383 IOMemoryMap * newMapping )
5384{
5385 task_t task = newMapping->getAddressTask();
5386 mach_vm_address_t toAddress = newMapping->fAddress;
5387 IOOptionBits _options = newMapping->fOptions;
5388 mach_vm_size_t _offset = newMapping->fOffset;
5389 mach_vm_size_t _length = newMapping->fLength;
5390
5391 if ((!task) || (!fAddressMap) || (fAddressMap != get_task_map(task))) {
5392 return NULL;
5393 }
5394 if ((fOptions ^ _options) & kIOMapReadOnly) {
5395 return NULL;
5396 }
5397 if ((kIOMapDefaultCache != (_options & kIOMapCacheMask))
5398 && ((fOptions ^ _options) & kIOMapCacheMask)) {
5399 return NULL;
5400 }
5401
5402 if ((0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress)) {
5403 return NULL;
5404 }
5405
5406 if (_offset < fOffset) {
5407 return NULL;
5408 }
5409
5410 _offset -= fOffset;
5411
5412 if ((_offset + _length) > fLength) {
5413 return NULL;
5414 }
5415
5416 if ((fLength == _length) && (!_offset)) {
5417 retain();
5418 newMapping = this;
5419 } else {
5420 newMapping->fSuperMap.reset(this, OSRetain);
5421 newMapping->fOffset = fOffset + _offset;
5422 newMapping->fAddress = fAddress + _offset;
5423 }
5424
5425 return newMapping;
5426}
5427
5428IOReturn
5429IOMemoryMap::wireRange(
5430 uint32_t options,
5431 mach_vm_size_t offset,
5432 mach_vm_size_t length)
5433{
5434 IOReturn kr;
5435 mach_vm_address_t start = trunc_page_64(fAddress + offset);
5436 mach_vm_address_t end = round_page_64(fAddress + offset + length);
5437 vm_prot_t prot;
5438
5439 prot = (kIODirectionOutIn & options);
5440 if (prot) {
5441 kr = vm_map_wire_kernel(fAddressMap, start, end, prot, (vm_tag_t) fMemory->getVMTag(kernel_map), FALSE);
5442 } else {
5443 kr = vm_map_unwire(fAddressMap, start, end, FALSE);
5444 }
5445
5446 return kr;
5447}
5448
5449
5450IOPhysicalAddress
5451#ifdef __LP64__
5452IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
5453#else /* !__LP64__ */
5454IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
5455#endif /* !__LP64__ */
5456{
5457 IOPhysicalAddress address;
5458
5459 LOCK;
5460#ifdef __LP64__
5461 address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
5462#else /* !__LP64__ */
5463 address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
5464#endif /* !__LP64__ */
5465 UNLOCK;
5466
5467 return address;
5468}
5469
5470/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5471
5472#undef super
5473#define super OSObject
5474
5475/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5476
5477void
5478IOMemoryDescriptor::initialize( void )
5479{
5480 if (NULL == gIOMemoryLock) {
5481 gIOMemoryLock = IORecursiveLockAlloc();
5482 }
5483
5484 gIOLastPage = IOGetLastPageNumber();
5485}
5486
5487void
5488IOMemoryDescriptor::free( void )
5489{
5490 if (_mappings) {
5491 _mappings.reset();
5492 }
5493
5494 if (reserved) {
5495 cleanKernelReserved(reserved);
5496 IODelete(reserved, IOMemoryDescriptorReserved, 1);
5497 reserved = NULL;
5498 }
5499 super::free();
5500}
5501
5502OSSharedPtr<IOMemoryMap>
5503IOMemoryDescriptor::setMapping(
5504 task_t intoTask,
5505 IOVirtualAddress mapAddress,
5506 IOOptionBits options )
5507{
5508 return createMappingInTask( intoTask, mapAddress,
5509 options | kIOMapStatic,
5510 0, getLength());
5511}
5512
5513OSSharedPtr<IOMemoryMap>
5514IOMemoryDescriptor::map(
5515 IOOptionBits options )
5516{
5517 return createMappingInTask( kernel_task, 0,
5518 options | kIOMapAnywhere,
5519 0, getLength());
5520}
5521
5522#ifndef __LP64__
5523OSSharedPtr<IOMemoryMap>
5524IOMemoryDescriptor::map(
5525 task_t intoTask,
5526 IOVirtualAddress atAddress,
5527 IOOptionBits options,
5528 IOByteCount offset,
5529 IOByteCount length )
5530{
5531 if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask))) {
5532 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
5533 return NULL;
5534 }
5535
5536 return createMappingInTask(intoTask, atAddress,
5537 options, offset, length);
5538}
5539#endif /* !__LP64__ */
5540
5541OSSharedPtr<IOMemoryMap>
5542IOMemoryDescriptor::createMappingInTask(
5543 task_t intoTask,
5544 mach_vm_address_t atAddress,
5545 IOOptionBits options,
5546 mach_vm_size_t offset,
5547 mach_vm_size_t length)
5548{
5549 IOMemoryMap * result;
5550 IOMemoryMap * mapping;
5551
5552 if (0 == length) {
5553 length = getLength();
5554 }
5555
5556 mapping = new IOMemoryMap;
5557
5558 if (mapping
5559 && !mapping->init( intoTask, atAddress,
5560 options, offset, length )) {
5561 mapping->release();
5562 mapping = NULL;
5563 }
5564
5565 if (mapping) {
5566 result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
5567 } else {
5568 result = nullptr;
5569 }
5570
5571#if DEBUG
5572 if (!result) {
5573 IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
5574 this, atAddress, (uint32_t) options, offset, length);
5575 }
5576#endif
5577
5578 // already retained through makeMapping
5579 OSSharedPtr<IOMemoryMap> retval(result, OSNoRetain);
5580
5581 return retval;
5582}
5583
5584#ifndef __LP64__ // there is only a 64 bit version for LP64
5585IOReturn
5586IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
5587 IOOptionBits options,
5588 IOByteCount offset)
5589{
5590 return redirect(newBackingMemory, options, (mach_vm_size_t)offset);
5591}
5592#endif
5593
5594IOReturn
5595IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
5596 IOOptionBits options,
5597 mach_vm_size_t offset)
5598{
5599 IOReturn err = kIOReturnSuccess;
5600 OSSharedPtr<IOMemoryDescriptor> physMem;
5601
5602 LOCK;
5603
5604 if (fAddress && fAddressMap) {
5605 do{
5606 if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5607 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
5608 physMem = fMemory;
5609 }
5610
5611 if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count)) {
5612 upl_size_t size = (typeof(size))round_page(fLength);
5613 upl_control_flags_t flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
5614 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
5615 if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL,
5616 NULL, NULL,
5617 &flags, (vm_tag_t) fMemory->getVMTag(kernel_map))) {
5618 fRedirUPL = NULL;
5619 }
5620
5621 if (physMem) {
5622 IOUnmapPages( fAddressMap, fAddress, fLength );
5623 if ((false)) {
5624 physMem->redirect(NULL, true);
5625 }
5626 }
5627 }
5628
5629 if (newBackingMemory) {
5630 if (newBackingMemory != fMemory) {
5631 fOffset = 0;
5632 if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
5633 options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
5634 offset, fLength)) {
5635 err = kIOReturnError;
5636 }
5637 }
5638 if (fRedirUPL) {
5639 upl_commit(fRedirUPL, NULL, 0);
5640 upl_deallocate(fRedirUPL);
5641 fRedirUPL = NULL;
5642 }
5643 if ((false) && physMem) {
5644 physMem->redirect(NULL, false);
5645 }
5646 }
5647 }while (false);
5648 }
5649
5650 UNLOCK;
5651
5652 return err;
5653}
5654
5655IOMemoryMap *
5656IOMemoryDescriptor::makeMapping(
5657 IOMemoryDescriptor * owner,
5658 task_t __intoTask,
5659 IOVirtualAddress __address,
5660 IOOptionBits options,
5661 IOByteCount __offset,
5662 IOByteCount __length )
5663{
5664#ifndef __LP64__
5665 if (!(kIOMap64Bit & options)) {
5666 panic("IOMemoryDescriptor::makeMapping !64bit");
5667 }
5668#endif /* !__LP64__ */
5669
5670 OSSharedPtr<IOMemoryDescriptor> mapDesc;
5671 __block IOMemoryMap * result = NULL;
5672
5673 IOMemoryMap * mapping = (IOMemoryMap *) __address;
5674 mach_vm_size_t offset = mapping->fOffset + __offset;
5675 mach_vm_size_t length = mapping->fLength;
5676
5677 mapping->fOffset = offset;
5678
5679 LOCK;
5680
5681 do{
5682 if (kIOMapStatic & options) {
5683 result = mapping;
5684 addMapping(mapping);
5685 mapping->setMemoryDescriptor(this, 0);
5686 continue;
5687 }
5688
5689 if (kIOMapUnique & options) {
5690 addr64_t phys;
5691 IOByteCount physLen;
5692
5693// if (owner != this) continue;
5694
5695 if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
5696 || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
5697 phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
5698 if (!phys || (physLen < length)) {
5699 continue;
5700 }
5701
5702 mapDesc = IOMemoryDescriptor::withAddressRange(
5703 phys, length, getDirection() | kIOMemoryMapperNone, NULL);
5704 if (!mapDesc) {
5705 continue;
5706 }
5707 offset = 0;
5708 mapping->fOffset = offset;
5709 }
5710 } else {
5711 // look for a compatible existing mapping
5712 if (_mappings) {
5713 _mappings->iterateObjects(^(OSObject * object)
5714 {
5715 IOMemoryMap * lookMapping = (IOMemoryMap *) object;
5716 if ((result = lookMapping->copyCompatible(mapping))) {
5717 addMapping(result);
5718 result->setMemoryDescriptor(this, offset);
5719 return true;
5720 }
5721 return false;
5722 });
5723 }
5724 if (result || (options & kIOMapReference)) {
5725 if (result != mapping) {
5726 mapping->release();
5727 mapping = NULL;
5728 }
5729 continue;
5730 }
5731 }
5732
5733 if (!mapDesc) {
5734 mapDesc.reset(this, OSRetain);
5735 }
5736 IOReturn
5737 kr = mapDesc->doMap( NULL, (IOVirtualAddress *) &mapping, options, 0, 0 );
5738 if (kIOReturnSuccess == kr) {
5739 result = mapping;
5740 mapDesc->addMapping(result);
5741 result->setMemoryDescriptor(mapDesc.get(), offset);
5742 } else {
5743 mapping->release();
5744 mapping = NULL;
5745 }
5746 }while (false);
5747
5748 UNLOCK;
5749
5750 return result;
5751}
5752
5753void
5754IOMemoryDescriptor::addMapping(
5755 IOMemoryMap * mapping )
5756{
5757 if (mapping) {
5758 if (NULL == _mappings) {
5759 _mappings = OSSet::withCapacity(1);
5760 }
5761 if (_mappings) {
5762 _mappings->setObject( mapping );
5763 }
5764 }
5765}
5766
5767void
5768IOMemoryDescriptor::removeMapping(
5769 IOMemoryMap * mapping )
5770{
5771 if (_mappings) {
5772 _mappings->removeObject( mapping);
5773 }
5774}
5775
5776#ifndef __LP64__
5777// obsolete initializers
5778// - initWithOptions is the designated initializer
5779bool
5780IOMemoryDescriptor::initWithAddress(void * address,
5781 IOByteCount length,
5782 IODirection direction)
5783{
5784 return false;
5785}
5786
5787bool
5788IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
5789 IOByteCount length,
5790 IODirection direction,
5791 task_t task)
5792{
5793 return false;
5794}
5795
5796bool
5797IOMemoryDescriptor::initWithPhysicalAddress(
5798 IOPhysicalAddress address,
5799 IOByteCount length,
5800 IODirection direction )
5801{
5802 return false;
5803}
5804
5805bool
5806IOMemoryDescriptor::initWithRanges(
5807 IOVirtualRange * ranges,
5808 UInt32 withCount,
5809 IODirection direction,
5810 task_t task,
5811 bool asReference)
5812{
5813 return false;
5814}
5815
5816bool
5817IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
5818 UInt32 withCount,
5819 IODirection direction,
5820 bool asReference)
5821{
5822 return false;
5823}
5824
5825void *
5826IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
5827 IOByteCount * lengthOfSegment)
5828{
5829 return NULL;
5830}
5831#endif /* !__LP64__ */
5832
5833/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5834
5835bool
5836IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
5837{
5838 OSSharedPtr<OSSymbol const> keys[2] = {NULL};
5839 OSSharedPtr<OSObject> values[2] = {NULL};
5840 OSSharedPtr<OSArray> array;
5841
5842 vm_size_t vcopy_size;
5843
5844 struct SerData {
5845 user_addr_t address;
5846 user_size_t length;
5847 } *vcopy = NULL;
5848
5849 unsigned int index, nRanges;
5850 bool result = false;
5851
5852 IOOptionBits type = _flags & kIOMemoryTypeMask;
5853
5854 if (s == NULL) {
5855 return false;
5856 }
5857
5858 array = OSArray::withCapacity(4);
5859 if (!array) {
5860 return false;
5861 }
5862
5863 nRanges = _rangesCount;
5864 if (os_mul_overflow(sizeof(SerData), nRanges, &vcopy_size)) {
5865 result = false;
5866 goto bail;
5867 }
5868 vcopy = (SerData *) IOMalloc(vcopy_size);
5869 if (vcopy == NULL) {
5870 result = false;
5871 goto bail;
5872 }
5873
5874 keys[0] = OSSymbol::withCString("address");
5875 keys[1] = OSSymbol::withCString("length");
5876
5877 // Copy the volatile data so we don't have to allocate memory
5878 // while the lock is held.
5879 LOCK;
5880 if (nRanges == _rangesCount) {
5881 Ranges vec = _ranges;
5882 for (index = 0; index < nRanges; index++) {
5883 mach_vm_address_t addr; mach_vm_size_t len;
5884 getAddrLenForInd(addr, len, type, vec, index);
5885 vcopy[index].address = addr;
5886 vcopy[index].length = len;
5887 }
5888 } else {
5889 // The descriptor changed out from under us. Give up.
5890 UNLOCK;
5891 result = false;
5892 goto bail;
5893 }
5894 UNLOCK;
5895
5896 for (index = 0; index < nRanges; index++) {
5897 user_addr_t addr = vcopy[index].address;
5898 IOByteCount len = (IOByteCount) vcopy[index].length;
5899 values[0] = OSNumber::withNumber(addr, sizeof(addr) * 8);
5900 if (values[0] == NULL) {
5901 result = false;
5902 goto bail;
5903 }
5904 values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
5905 if (values[1] == NULL) {
5906 result = false;
5907 goto bail;
5908 }
5909 OSSharedPtr<OSDictionary> dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
5910 if (dict == NULL) {
5911 result = false;
5912 goto bail;
5913 }
5914 array->setObject(dict.get());
5915 dict.reset();
5916 values[0].reset();
5917 values[1].reset();
5918 }
5919
5920 result = array->serialize(s);
5921
5922bail:
5923 if (vcopy) {
5924 IOFree(vcopy, vcopy_size);
5925 }
5926
5927 return result;
5928}
5929/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5930
5931OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 0);
5932#ifdef __LP64__
5933OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
5934OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
5935OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
5936OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
5937OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
5938OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
5939OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
5940#else /* !__LP64__ */
5941OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 1);
5942OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 2);
5943OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 3);
5944OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 4);
5945OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 5);
5946OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 6);
5947OSMetaClassDefineReservedUsedX86(IOMemoryDescriptor, 7);
5948#endif /* !__LP64__ */
5949OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
5950OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
5951OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
5952OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
5953OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
5954OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
5955OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
5956OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
5957
5958/* ex-inline function implementation */
5959IOPhysicalAddress
5960IOMemoryDescriptor::getPhysicalAddress()
5961{
5962 return getPhysicalSegment( 0, NULL );
5963}