]> git.saurik.com Git - apple/xnu.git/blame - iokit/Kernel/IOMemoryDescriptor.cpp
xnu-4903.270.47.tar.gz
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
CommitLineData
1c79356b 1/*
39037602 2 * Copyright (c) 1998-2016 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b 27 */
b0d623f7
A
28
29
55e303ae 30#include <sys/cdefs.h>
1c79356b
A
31
32#include <IOKit/assert.h>
33#include <IOKit/system.h>
34#include <IOKit/IOLib.h>
35#include <IOKit/IOMemoryDescriptor.h>
55e303ae 36#include <IOKit/IOMapper.h>
99c3a104 37#include <IOKit/IODMACommand.h>
55e303ae 38#include <IOKit/IOKitKeysPrivate.h>
1c79356b 39
b0d623f7 40#include <IOKit/IOSubMemoryDescriptor.h>
3e170ce0 41#include <IOKit/IOMultiMemoryDescriptor.h>
b0d623f7 42
1c79356b 43#include <IOKit/IOKitDebug.h>
2d21ac55 44#include <libkern/OSDebug.h>
d9a64523 45#include <libkern/OSKextLibPrivate.h>
1c79356b 46
91447636
A
47#include "IOKitKernelInternal.h"
48
1c79356b 49#include <libkern/c++/OSContainers.h>
9bccf70c
A
50#include <libkern/c++/OSDictionary.h>
51#include <libkern/c++/OSArray.h>
52#include <libkern/c++/OSSymbol.h>
53#include <libkern/c++/OSNumber.h>
39037602 54#include <os/overflow.h>
91447636
A
55
56#include <sys/uio.h>
1c79356b
A
57
58__BEGIN_DECLS
59#include <vm/pmap.h>
91447636 60#include <vm/vm_pageout.h>
55e303ae 61#include <mach/memory_object_types.h>
0b4e3aa0 62#include <device/device_port.h>
55e303ae 63
91447636 64#include <mach/vm_prot.h>
2d21ac55 65#include <mach/mach_vm.h>
91447636 66#include <vm/vm_fault.h>
2d21ac55 67#include <vm/vm_protos.h>
91447636 68
55e303ae 69extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
6d2010ae
A
70extern void ipc_port_release_send(ipc_port_t port);
71
55e303ae 72__END_DECLS
1c79356b 73
0a7de745 74#define kIOMapperWaitSystem ((IOMapper *) 1)
99c3a104 75
0c530ab8
A
76static IOMapper * gIOSystemMapper = NULL;
77
0a7de745 78ppnum_t gIOLastPage;
0c530ab8 79
55e303ae 80/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
de355530 81
55e303ae 82OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
de355530 83
55e303ae 84#define super IOMemoryDescriptor
de355530 85
55e303ae 86OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
de355530 87
1c79356b
A
88/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
89
9bccf70c
A
90static IORecursiveLock * gIOMemoryLock;
91
0a7de745
A
92#define LOCK IORecursiveLockLock( gIOMemoryLock)
93#define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
94#define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
95#define WAKEUP \
9bccf70c
A
96 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
97
0c530ab8 98#if 0
0a7de745 99#define DEBG(fmt, args...) { kprintf(fmt, ## args); }
0c530ab8 100#else
0a7de745 101#define DEBG(fmt, args...) {}
0c530ab8
A
102#endif
103
91447636
A
104/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
105
106// Some data structures and accessor macros used by the initWithOptions
107// Function
108
109enum ioPLBlockFlags {
0a7de745
A
110 kIOPLOnDevice = 0x00000001,
111 kIOPLExternUPL = 0x00000002,
91447636
A
112};
113
0a7de745
A
114struct IOMDPersistentInitData {
115 const IOGeneralMemoryDescriptor * fMD;
116 IOMemoryReference * fMemRef;
91447636
A
117};
118
119struct ioPLBlock {
0a7de745
A
120 upl_t fIOPL;
121 vm_address_t fPageInfo; // Pointer to page list or index into it
122 uint32_t fIOMDOffset; // The offset of this iopl in descriptor
123 ppnum_t fMappedPage; // Page number of first page in this iopl
124 unsigned int fPageOffset; // Offset within first page of iopl
125 unsigned int fFlags; // Flags
91447636
A
126};
127
39037602
A
128enum { kMaxWireTags = 6 };
129
0a7de745
A
130struct ioGMDData {
131 IOMapper * fMapper;
132 uint64_t fDMAMapAlignment;
133 uint64_t fMappedBase;
134 uint64_t fMappedLength;
135 uint64_t fPreparationID;
3e170ce0 136#if IOTRACKING
0a7de745 137 IOTracking fWireTracking;
39037602 138#endif /* IOTRACKING */
0a7de745
A
139 unsigned int fPageCnt;
140 uint8_t fDMAMapNumAddressBits;
141 unsigned char fDiscontig:1;
142 unsigned char fCompletionError:1;
143 unsigned char fMappedBaseValid:1;
144 unsigned char _resv:3;
145 unsigned char fDMAAccess:2;
146
147 /* variable length arrays */
148 upl_page_info_t fPageList[1]
b0d623f7 149#if __LP64__
0a7de745
A
150 // align fPageList as for ioPLBlock
151 __attribute__((aligned(sizeof(upl_t))))
b0d623f7 152#endif
0a7de745
A
153 ;
154 ioPLBlock fBlocks[1];
91447636
A
155};
156
0a7de745
A
157#define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
158#define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
159#define getNumIOPL(osd, d) \
91447636 160 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
0a7de745 161#define getPageList(d) (&(d->fPageList[0]))
91447636 162#define computeDataSize(p, u) \
6d2010ae 163 (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
91447636 164
5ba3f43e
A
165enum { kIOMemoryHostOrRemote = kIOMemoryHostOnly | kIOMemoryRemote };
166
91447636
A
167/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
168
b0d623f7 169#define next_page(a) ( trunc_page(a) + PAGE_SIZE )
0b4e3aa0 170
0b4e3aa0 171extern "C" {
0a7de745
A
172kern_return_t
173device_data_action(
174 uintptr_t device_handle,
175 ipc_port_t device_pager,
176 vm_prot_t protection,
177 vm_object_offset_t offset,
178 vm_size_t size)
0b4e3aa0 179{
0a7de745
A
180 kern_return_t kr;
181 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
182 IOMemoryDescriptor * memDesc;
183
184 LOCK;
185 memDesc = ref->dp.memory;
186 if (memDesc) {
187 memDesc->retain();
188 kr = memDesc->handleFault(device_pager, offset, size);
189 memDesc->release();
190 } else {
191 kr = KERN_ABORTED;
192 }
193 UNLOCK;
194
195 return kr;
0b4e3aa0
A
196}
197
0a7de745
A
198kern_return_t
199device_close(
200 uintptr_t device_handle)
0b4e3aa0 201{
0a7de745 202 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
0b4e3aa0 203
0a7de745 204 IODelete( ref, IOMemoryDescriptorReserved, 1 );
0b4e3aa0 205
0a7de745 206 return kIOReturnSuccess;
0b4e3aa0 207}
0a7de745 208}; // end extern "C"
0b4e3aa0 209
fe8ab488
A
210/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
211
91447636
A
212// Note this inline function uses C++ reference arguments to return values
213// This means that pointers are not passed and NULLs don't have to be
214// checked for as a NULL reference is illegal.
215static inline void
fe8ab488 216getAddrLenForInd(mach_vm_address_t &addr, mach_vm_size_t &len, // Output variables
0a7de745 217 UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
91447636 218{
0a7de745
A
219 assert(kIOMemoryTypeUIO == type
220 || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
221 || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
222 if (kIOMemoryTypeUIO == type) {
223 user_size_t us;
224 user_addr_t ad;
225 uio_getiov((uio_t) r.uio, ind, &ad, &us); addr = ad; len = us;
226 }
b0d623f7 227#ifndef __LP64__
0a7de745
A
228 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
229 IOAddressRange cur = r.v64[ind];
230 addr = cur.address;
231 len = cur.length;
232 }
b0d623f7 233#endif /* !__LP64__ */
0a7de745
A
234 else {
235 IOVirtualRange cur = r.v[ind];
236 addr = cur.address;
237 len = cur.length;
238 }
0b4e3aa0
A
239}
240
1c79356b
A
241/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
242
0a7de745 243static IOReturn
fe8ab488
A
244purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
245{
0a7de745 246 IOReturn err = kIOReturnSuccess;
fe8ab488 247
0a7de745 248 *control = VM_PURGABLE_SET_STATE;
fe8ab488 249
0a7de745 250 enum { kIOMemoryPurgeableControlMask = 15 };
fe8ab488 251
0a7de745 252 switch (kIOMemoryPurgeableControlMask & newState) {
fe8ab488 253 case kIOMemoryPurgeableKeepCurrent:
0a7de745
A
254 *control = VM_PURGABLE_GET_STATE;
255 break;
fe8ab488
A
256
257 case kIOMemoryPurgeableNonVolatile:
0a7de745
A
258 *state = VM_PURGABLE_NONVOLATILE;
259 break;
fe8ab488 260 case kIOMemoryPurgeableVolatile:
0a7de745
A
261 *state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask);
262 break;
fe8ab488 263 case kIOMemoryPurgeableEmpty:
0a7de745
A
264 *state = VM_PURGABLE_EMPTY | (newState & ~kIOMemoryPurgeableControlMask);
265 break;
fe8ab488 266 default:
0a7de745
A
267 err = kIOReturnBadArgument;
268 break;
269 }
270
271 if (*control == VM_PURGABLE_SET_STATE) {
272 // let VM know this call is from the kernel and is allowed to alter
273 // the volatility of the memory entry even if it was created with
274 // MAP_MEM_PURGABLE_KERNEL_ONLY
275 *control = VM_PURGABLE_SET_STATE_FROM_KERNEL;
276 }
277
278 return err;
fe8ab488
A
279}
280
0a7de745 281static IOReturn
fe8ab488
A
282purgeableStateBits(int * state)
283{
0a7de745 284 IOReturn err = kIOReturnSuccess;
fe8ab488 285
0a7de745 286 switch (VM_PURGABLE_STATE_MASK & *state) {
fe8ab488 287 case VM_PURGABLE_NONVOLATILE:
0a7de745
A
288 *state = kIOMemoryPurgeableNonVolatile;
289 break;
fe8ab488 290 case VM_PURGABLE_VOLATILE:
0a7de745
A
291 *state = kIOMemoryPurgeableVolatile;
292 break;
fe8ab488 293 case VM_PURGABLE_EMPTY:
0a7de745
A
294 *state = kIOMemoryPurgeableEmpty;
295 break;
fe8ab488 296 default:
0a7de745
A
297 *state = kIOMemoryPurgeableNonVolatile;
298 err = kIOReturnNotReady;
299 break;
300 }
301 return err;
fe8ab488
A
302}
303
304
0a7de745 305static vm_prot_t
fe8ab488
A
306vmProtForCacheMode(IOOptionBits cacheMode)
307{
0a7de745
A
308 vm_prot_t prot = 0;
309 switch (cacheMode) {
fe8ab488 310 case kIOInhibitCache:
0a7de745
A
311 SET_MAP_MEM(MAP_MEM_IO, prot);
312 break;
fe8ab488
A
313
314 case kIOWriteThruCache:
0a7de745
A
315 SET_MAP_MEM(MAP_MEM_WTHRU, prot);
316 break;
fe8ab488
A
317
318 case kIOWriteCombineCache:
0a7de745
A
319 SET_MAP_MEM(MAP_MEM_WCOMB, prot);
320 break;
fe8ab488
A
321
322 case kIOCopybackCache:
0a7de745
A
323 SET_MAP_MEM(MAP_MEM_COPYBACK, prot);
324 break;
fe8ab488
A
325
326 case kIOCopybackInnerCache:
0a7de745
A
327 SET_MAP_MEM(MAP_MEM_INNERWBACK, prot);
328 break;
fe8ab488 329
5ba3f43e 330 case kIOPostedWrite:
0a7de745
A
331 SET_MAP_MEM(MAP_MEM_POSTED, prot);
332 break;
5ba3f43e 333
fe8ab488
A
334 case kIODefaultCache:
335 default:
0a7de745
A
336 SET_MAP_MEM(MAP_MEM_NOOP, prot);
337 break;
338 }
fe8ab488 339
0a7de745 340 return prot;
fe8ab488
A
341}
342
343static unsigned int
344pagerFlagsForCacheMode(IOOptionBits cacheMode)
345{
0a7de745
A
346 unsigned int pagerFlags = 0;
347 switch (cacheMode) {
fe8ab488 348 case kIOInhibitCache:
0a7de745
A
349 pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
350 break;
fe8ab488
A
351
352 case kIOWriteThruCache:
0a7de745
A
353 pagerFlags = DEVICE_PAGER_WRITE_THROUGH | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
354 break;
fe8ab488
A
355
356 case kIOWriteCombineCache:
0a7de745
A
357 pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT;
358 break;
fe8ab488
A
359
360 case kIOCopybackCache:
0a7de745
A
361 pagerFlags = DEVICE_PAGER_COHERENT;
362 break;
fe8ab488
A
363
364 case kIOCopybackInnerCache:
0a7de745
A
365 pagerFlags = DEVICE_PAGER_COHERENT;
366 break;
fe8ab488 367
5ba3f43e 368 case kIOPostedWrite:
0a7de745
A
369 pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED | DEVICE_PAGER_EARLY_ACK;
370 break;
5ba3f43e 371
fe8ab488
A
372 case kIODefaultCache:
373 default:
0a7de745
A
374 pagerFlags = -1U;
375 break;
376 }
377 return pagerFlags;
fe8ab488
A
378}
379
380/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
381/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
382
0a7de745
A
383struct IOMemoryEntry {
384 ipc_port_t entry;
385 int64_t offset;
386 uint64_t size;
fe8ab488
A
387};
388
0a7de745
A
389struct IOMemoryReference {
390 volatile SInt32 refCount;
391 vm_prot_t prot;
392 uint32_t capacity;
393 uint32_t count;
394 struct IOMemoryReference * mapRef;
395 IOMemoryEntry entries[0];
fe8ab488
A
396};
397
0a7de745
A
398enum{
399 kIOMemoryReferenceReuse = 0x00000001,
400 kIOMemoryReferenceWrite = 0x00000002,
401 kIOMemoryReferenceCOW = 0x00000004,
fe8ab488
A
402};
403
404SInt32 gIOMemoryReferenceCount;
405
406IOMemoryReference *
407IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc)
408{
0a7de745
A
409 IOMemoryReference * ref;
410 size_t newSize, oldSize, copySize;
411
412 newSize = (sizeof(IOMemoryReference)
413 - sizeof(ref->entries)
414 + capacity * sizeof(ref->entries[0]));
415 ref = (typeof(ref))IOMalloc(newSize);
416 if (realloc) {
417 oldSize = (sizeof(IOMemoryReference)
418 - sizeof(realloc->entries)
419 + realloc->capacity * sizeof(realloc->entries[0]));
420 copySize = oldSize;
421 if (copySize > newSize) {
422 copySize = newSize;
423 }
424 if (ref) {
425 bcopy(realloc, ref, copySize);
426 }
427 IOFree(realloc, oldSize);
428 } else if (ref) {
429 bzero(ref, sizeof(*ref));
430 ref->refCount = 1;
431 OSIncrementAtomic(&gIOMemoryReferenceCount);
432 }
433 if (!ref) {
434 return 0;
435 }
436 ref->capacity = capacity;
437 return ref;
fe8ab488
A
438}
439
0a7de745 440void
fe8ab488
A
441IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref)
442{
0a7de745
A
443 IOMemoryEntry * entries;
444 size_t size;
445
446 if (ref->mapRef) {
447 memoryReferenceFree(ref->mapRef);
448 ref->mapRef = 0;
449 }
450
451 entries = ref->entries + ref->count;
452 while (entries > &ref->entries[0]) {
453 entries--;
454 ipc_port_release_send(entries->entry);
455 }
456 size = (sizeof(IOMemoryReference)
457 - sizeof(ref->entries)
458 + ref->capacity * sizeof(ref->entries[0]));
459 IOFree(ref, size);
460
461 OSDecrementAtomic(&gIOMemoryReferenceCount);
fe8ab488
A
462}
463
0a7de745 464void
fe8ab488
A
465IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref)
466{
0a7de745
A
467 if (1 == OSDecrementAtomic(&ref->refCount)) {
468 memoryReferenceFree(ref);
469 }
fe8ab488
A
470}
471
472
473IOReturn
474IOGeneralMemoryDescriptor::memoryReferenceCreate(
0a7de745
A
475 IOOptionBits options,
476 IOMemoryReference ** reference)
fe8ab488 477{
0a7de745
A
478 enum { kCapacity = 4, kCapacityInc = 4 };
479
480 kern_return_t err;
481 IOMemoryReference * ref;
482 IOMemoryEntry * entries;
483 IOMemoryEntry * cloneEntries;
484 vm_map_t map;
485 ipc_port_t entry, cloneEntry;
486 vm_prot_t prot;
487 memory_object_size_t actualSize;
488 uint32_t rangeIdx;
489 uint32_t count;
490 mach_vm_address_t entryAddr, endAddr, entrySize;
491 mach_vm_size_t srcAddr, srcLen;
492 mach_vm_size_t nextAddr, nextLen;
493 mach_vm_size_t offset, remain;
494 IOByteCount physLen;
495 IOOptionBits type = (_flags & kIOMemoryTypeMask);
496 IOOptionBits cacheMode;
497 unsigned int pagerFlags;
498 vm_tag_t tag;
499
500 ref = memoryReferenceAlloc(kCapacity, NULL);
501 if (!ref) {
502 return kIOReturnNoMemory;
503 }
504
505 tag = getVMTag(kernel_map);
506 entries = &ref->entries[0];
507 count = 0;
508 err = KERN_SUCCESS;
509
510 offset = 0;
511 rangeIdx = 0;
512 if (_task) {
513 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
514 } else {
515 nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
516 nextLen = physLen;
517
518 // default cache mode for physical
519 if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift)) {
520 IOOptionBits mode;
521 pagerFlags = IODefaultCacheBits(nextAddr);
522 if (DEVICE_PAGER_CACHE_INHIB & pagerFlags) {
523 if (DEVICE_PAGER_EARLY_ACK & pagerFlags) {
524 mode = kIOPostedWrite;
525 } else if (DEVICE_PAGER_GUARDED & pagerFlags) {
526 mode = kIOInhibitCache;
527 } else {
528 mode = kIOWriteCombineCache;
529 }
530 } else if (DEVICE_PAGER_WRITE_THROUGH & pagerFlags) {
531 mode = kIOWriteThruCache;
532 } else {
533 mode = kIOCopybackCache;
534 }
535 _flags |= (mode << kIOMemoryBufferCacheShift);
9d749ea3 536 }
0a7de745
A
537 }
538
539 // cache mode & vm_prot
540 prot = VM_PROT_READ;
541 cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
542 prot |= vmProtForCacheMode(cacheMode);
543 // VM system requires write access to change cache mode
544 if (kIODefaultCache != cacheMode) {
545 prot |= VM_PROT_WRITE;
546 }
547 if (kIODirectionOut != (kIODirectionOutIn & _flags)) {
548 prot |= VM_PROT_WRITE;
549 }
550 if (kIOMemoryReferenceWrite & options) {
551 prot |= VM_PROT_WRITE;
552 }
553 if (kIOMemoryReferenceCOW & options) {
554 prot |= MAP_MEM_VM_COPY;
555 }
39037602 556
0a7de745
A
557 if ((kIOMemoryReferenceReuse & options) && _memRef) {
558 cloneEntries = &_memRef->entries[0];
559 prot |= MAP_MEM_NAMED_REUSE;
fe8ab488 560 }
fe8ab488 561
0a7de745
A
562 if (_task) {
563 // virtual ranges
564
565 if (kIOMemoryBufferPageable & _flags) {
566 // IOBufferMemoryDescriptor alloc - set flags for entry + object create
567 prot |= MAP_MEM_NAMED_CREATE;
568 if (kIOMemoryBufferPurgeable & _flags) {
569 prot |= (MAP_MEM_PURGABLE | MAP_MEM_PURGABLE_KERNEL_ONLY);
570 if (VM_KERN_MEMORY_SKYWALK == tag) {
571 prot |= MAP_MEM_LEDGER_TAG_NETWORK;
572 }
573 }
574 if (kIOMemoryUseReserve & _flags) {
575 prot |= MAP_MEM_GRAB_SECLUDED;
576 }
577
578 prot |= VM_PROT_WRITE;
579 map = NULL;
580 } else {
581 map = get_task_map(_task);
582 }
583
584 remain = _length;
585 while (remain) {
586 srcAddr = nextAddr;
587 srcLen = nextLen;
588 nextAddr = 0;
589 nextLen = 0;
590 // coalesce addr range
591 for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++) {
592 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
593 if ((srcAddr + srcLen) != nextAddr) {
594 break;
595 }
596 srcLen += nextLen;
597 }
598 entryAddr = trunc_page_64(srcAddr);
599 endAddr = round_page_64(srcAddr + srcLen);
600 do{
601 entrySize = (endAddr - entryAddr);
602 if (!entrySize) {
603 break;
604 }
605 actualSize = entrySize;
606
607 cloneEntry = MACH_PORT_NULL;
608 if (MAP_MEM_NAMED_REUSE & prot) {
609 if (cloneEntries < &_memRef->entries[_memRef->count]) {
610 cloneEntry = cloneEntries->entry;
611 } else {
612 prot &= ~MAP_MEM_NAMED_REUSE;
613 }
614 }
615
616 err = mach_make_memory_entry_internal(map,
617 &actualSize, entryAddr, prot, &entry, cloneEntry);
618
619 if (KERN_SUCCESS != err) {
620 break;
621 }
622 if (actualSize > entrySize) {
623 panic("mach_make_memory_entry_64 actualSize");
624 }
625
626 if (count >= ref->capacity) {
627 ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref);
628 entries = &ref->entries[count];
629 }
630 entries->entry = entry;
631 entries->size = actualSize;
632 entries->offset = offset + (entryAddr - srcAddr);
633 entryAddr += actualSize;
634 if (MAP_MEM_NAMED_REUSE & prot) {
635 if ((cloneEntries->entry == entries->entry)
636 && (cloneEntries->size == entries->size)
637 && (cloneEntries->offset == entries->offset)) {
638 cloneEntries++;
639 } else {
640 prot &= ~MAP_MEM_NAMED_REUSE;
641 }
642 }
643 entries++;
644 count++;
645 }while (true);
646 offset += srcLen;
647 remain -= srcLen;
fe8ab488 648 }
0a7de745
A
649 } else {
650 // _task == 0, physical or kIOMemoryTypeUPL
651 memory_object_t pager;
652 vm_size_t size = ptoa_32(_pages);
fe8ab488 653
0a7de745
A
654 if (!getKernelReserved()) {
655 panic("getKernelReserved");
656 }
fe8ab488 657
0a7de745
A
658 reserved->dp.pagerContig = (1 == _rangesCount);
659 reserved->dp.memory = this;
fe8ab488 660
0a7de745
A
661 pagerFlags = pagerFlagsForCacheMode(cacheMode);
662 if (-1U == pagerFlags) {
663 panic("phys is kIODefaultCache");
664 }
665 if (reserved->dp.pagerContig) {
666 pagerFlags |= DEVICE_PAGER_CONTIGUOUS;
667 }
fe8ab488 668
0a7de745
A
669 pager = device_pager_setup((memory_object_t) 0, (uintptr_t) reserved,
670 size, pagerFlags);
671 assert(pager);
672 if (!pager) {
673 err = kIOReturnVMError;
674 } else {
675 srcAddr = nextAddr;
676 entryAddr = trunc_page_64(srcAddr);
677 err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/,
678 size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry);
679 assert(KERN_SUCCESS == err);
680 if (KERN_SUCCESS != err) {
681 device_pager_deallocate(pager);
682 } else {
683 reserved->dp.devicePager = pager;
684 entries->entry = entry;
685 entries->size = size;
686 entries->offset = offset + (entryAddr - srcAddr);
687 entries++;
688 count++;
689 }
690 }
691 }
fe8ab488 692
0a7de745
A
693 ref->count = count;
694 ref->prot = prot;
fe8ab488 695
0a7de745
A
696 if (_task && (KERN_SUCCESS == err)
697 && (kIOMemoryMapCopyOnWrite & _flags)
698 && !(kIOMemoryReferenceCOW & options)) {
699 err = memoryReferenceCreate(options | kIOMemoryReferenceCOW, &ref->mapRef);
700 }
fe8ab488 701
0a7de745
A
702 if (KERN_SUCCESS == err) {
703 if (MAP_MEM_NAMED_REUSE & prot) {
704 memoryReferenceFree(ref);
705 OSIncrementAtomic(&_memRef->refCount);
706 ref = _memRef;
707 }
708 } else {
709 memoryReferenceFree(ref);
710 ref = NULL;
fe8ab488 711 }
fe8ab488 712
0a7de745 713 *reference = ref;
fe8ab488 714
0a7de745 715 return err;
fe8ab488
A
716}
717
0a7de745 718kern_return_t
fe8ab488
A
719IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
720{
0a7de745
A
721 IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref;
722 IOReturn err;
723 vm_map_offset_t addr;
724
725 addr = ref->mapped;
726
727 err = vm_map_enter_mem_object(map, &addr, ref->size,
728 (vm_map_offset_t) 0,
729 (((ref->options & kIOMapAnywhere)
730 ? VM_FLAGS_ANYWHERE
731 : VM_FLAGS_FIXED)),
732 VM_MAP_KERNEL_FLAGS_NONE,
733 ref->tag,
734 IPC_PORT_NULL,
735 (memory_object_offset_t) 0,
736 false, /* copy */
737 ref->prot,
738 ref->prot,
739 VM_INHERIT_NONE);
740 if (KERN_SUCCESS == err) {
741 ref->mapped = (mach_vm_address_t) addr;
742 ref->map = map;
743 }
744
745 return err;
fe8ab488
A
746}
747
0a7de745 748IOReturn
fe8ab488 749IOGeneralMemoryDescriptor::memoryReferenceMap(
0a7de745
A
750 IOMemoryReference * ref,
751 vm_map_t map,
752 mach_vm_size_t inoffset,
753 mach_vm_size_t size,
754 IOOptionBits options,
755 mach_vm_address_t * inaddr)
fe8ab488 756{
0a7de745
A
757 IOReturn err;
758 int64_t offset = inoffset;
759 uint32_t rangeIdx, entryIdx;
760 vm_map_offset_t addr, mapAddr;
761 vm_map_offset_t pageOffset, entryOffset, remain, chunk;
762
763 mach_vm_address_t nextAddr;
764 mach_vm_size_t nextLen;
765 IOByteCount physLen;
766 IOMemoryEntry * entry;
767 vm_prot_t prot, memEntryCacheMode;
768 IOOptionBits type;
769 IOOptionBits cacheMode;
770 vm_tag_t tag;
771 // for the kIOMapPrefault option.
772 upl_page_info_t * pageList = NULL;
773 UInt currentPageIndex = 0;
774 bool didAlloc;
775
776 if (ref->mapRef) {
777 err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
778 return err;
779 }
780
781 type = _flags & kIOMemoryTypeMask;
782
783 prot = VM_PROT_READ;
784 if (!(kIOMapReadOnly & options)) {
785 prot |= VM_PROT_WRITE;
786 }
787 prot &= ref->prot;
788
789 cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
790 if (kIODefaultCache != cacheMode) {
791 // VM system requires write access to update named entry cache mode
792 memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
793 }
794
795 tag = getVMTag(map);
796
797 if (_task) {
798 // Find first range for offset
799 if (!_rangesCount) {
800 return kIOReturnBadArgument;
801 }
802 for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++) {
803 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
804 if (remain < nextLen) {
805 break;
806 }
807 remain -= nextLen;
808 }
809 } else {
810 rangeIdx = 0;
811 remain = 0;
812 nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
813 nextLen = size;
814 }
815
816 assert(remain < nextLen);
817 if (remain >= nextLen) {
818 return kIOReturnBadArgument;
819 }
820
821 nextAddr += remain;
822 nextLen -= remain;
823 pageOffset = (page_mask & nextAddr);
824 addr = 0;
825 didAlloc = false;
826
827 if (!(options & kIOMapAnywhere)) {
828 addr = *inaddr;
829 if (pageOffset != (page_mask & addr)) {
830 return kIOReturnNotAligned;
831 }
832 addr -= pageOffset;
833 }
834
835 // find first entry for offset
836 for (entryIdx = 0;
837 (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset);
838 entryIdx++) {
839 }
840 entryIdx--;
841 entry = &ref->entries[entryIdx];
842
843 // allocate VM
844 size = round_page_64(size + pageOffset);
845 if (kIOMapOverwrite & options) {
846 if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
847 map = IOPageableMapForAddress(addr);
848 }
849 err = KERN_SUCCESS;
850 } else {
851 IOMemoryDescriptorMapAllocRef ref;
852 ref.map = map;
853 ref.tag = tag;
854 ref.options = options;
855 ref.size = size;
856 ref.prot = prot;
857 if (options & kIOMapAnywhere) {
858 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
859 ref.mapped = 0;
860 } else {
861 ref.mapped = addr;
862 }
863 if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
864 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
865 } else {
866 err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
867 }
868 if (KERN_SUCCESS == err) {
869 addr = ref.mapped;
870 map = ref.map;
871 didAlloc = true;
872 }
873 }
874
875 /*
876 * If the memory is associated with a device pager but doesn't have a UPL,
877 * it will be immediately faulted in through the pager via populateDevicePager().
878 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
879 * operations.
880 */
881 if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
882 options &= ~kIOMapPrefault;
883 }
884
885 /*
886 * Prefaulting is only possible if we wired the memory earlier. Check the
887 * memory type, and the underlying data.
888 */
889 if (options & kIOMapPrefault) {
890 /*
891 * The memory must have been wired by calling ::prepare(), otherwise
892 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
893 */
894 assert(_wireCount != 0);
895 assert(_memoryEntries != NULL);
896 if ((_wireCount == 0) ||
897 (_memoryEntries == NULL)) {
898 return kIOReturnBadArgument;
899 }
900
901 // Get the page list.
902 ioGMDData* dataP = getDataP(_memoryEntries);
903 ioPLBlock const* ioplList = getIOPLList(dataP);
904 pageList = getPageList(dataP);
905
906 // Get the number of IOPLs.
907 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
908
909 /*
910 * Scan through the IOPL Info Blocks, looking for the first block containing
911 * the offset. The research will go past it, so we'll need to go back to the
912 * right range at the end.
913 */
914 UInt ioplIndex = 0;
915 while (ioplIndex < numIOPLs && offset >= ioplList[ioplIndex].fIOMDOffset) {
916 ioplIndex++;
917 }
918 ioplIndex--;
919
920 // Retrieve the IOPL info block.
921 ioPLBlock ioplInfo = ioplList[ioplIndex];
922
923 /*
924 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
925 * array.
926 */
927 if (ioplInfo.fFlags & kIOPLExternUPL) {
928 pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
929 } else {
930 pageList = &pageList[ioplInfo.fPageInfo];
931 }
932
933 // Rebase [offset] into the IOPL in order to looks for the first page index.
934 mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
935
936 // Retrieve the index of the first page corresponding to the offset.
937 currentPageIndex = atop_32(offsetInIOPL);
938 }
939
940 // enter mappings
941 remain = size;
942 mapAddr = addr;
943 addr += pageOffset;
944
945 while (remain && (KERN_SUCCESS == err)) {
946 entryOffset = offset - entry->offset;
947 if ((page_mask & entryOffset) != pageOffset) {
948 err = kIOReturnNotAligned;
949 break;
950 }
951
952 if (kIODefaultCache != cacheMode) {
953 vm_size_t unused = 0;
954 err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
955 memEntryCacheMode, NULL, entry->entry);
956 assert(KERN_SUCCESS == err);
957 }
958
959 entryOffset -= pageOffset;
960 if (entryOffset >= entry->size) {
961 panic("entryOffset");
962 }
963 chunk = entry->size - entryOffset;
964 if (chunk) {
965 vm_map_kernel_flags_t vmk_flags;
966
967 vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
968 vmk_flags.vmkf_iokit_acct = TRUE; /* iokit accounting */
969
970 if (chunk > remain) {
971 chunk = remain;
972 }
973 if (options & kIOMapPrefault) {
974 UInt nb_pages = round_page(chunk) / PAGE_SIZE;
975
976 err = vm_map_enter_mem_object_prefault(map,
977 &mapAddr,
978 chunk, 0 /* mask */,
979 (VM_FLAGS_FIXED
980 | VM_FLAGS_OVERWRITE),
981 vmk_flags,
982 tag,
983 entry->entry,
984 entryOffset,
985 prot, // cur
986 prot, // max
987 &pageList[currentPageIndex],
988 nb_pages);
989
990 // Compute the next index in the page list.
991 currentPageIndex += nb_pages;
992 assert(currentPageIndex <= _pages);
993 } else {
994 err = vm_map_enter_mem_object(map,
995 &mapAddr,
996 chunk, 0 /* mask */,
997 (VM_FLAGS_FIXED
998 | VM_FLAGS_OVERWRITE),
999 vmk_flags,
1000 tag,
1001 entry->entry,
1002 entryOffset,
1003 false, // copy
1004 prot, // cur
1005 prot, // max
1006 VM_INHERIT_NONE);
1007 }
1008 if (KERN_SUCCESS != err) {
1009 break;
1010 }
1011 remain -= chunk;
1012 if (!remain) {
1013 break;
1014 }
1015 mapAddr += chunk;
1016 offset += chunk - pageOffset;
1017 }
1018 pageOffset = 0;
1019 entry++;
1020 entryIdx++;
1021 if (entryIdx >= ref->count) {
1022 err = kIOReturnOverrun;
1023 break;
1024 }
1025 }
1026
1027 if ((KERN_SUCCESS != err) && didAlloc) {
1028 (void) mach_vm_deallocate(map, trunc_page_64(addr), size);
1029 addr = 0;
1030 }
1031 *inaddr = addr;
1032
1033 return err;
fe8ab488
A
1034}
1035
0a7de745 1036IOReturn
fe8ab488 1037IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
0a7de745
A
1038 IOMemoryReference * ref,
1039 IOByteCount * residentPageCount,
1040 IOByteCount * dirtyPageCount)
fe8ab488 1041{
0a7de745
A
1042 IOReturn err;
1043 IOMemoryEntry * entries;
1044 unsigned int resident, dirty;
1045 unsigned int totalResident, totalDirty;
1046
1047 totalResident = totalDirty = 0;
1048 err = kIOReturnSuccess;
1049 entries = ref->entries + ref->count;
1050 while (entries > &ref->entries[0]) {
1051 entries--;
1052 err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty);
1053 if (KERN_SUCCESS != err) {
1054 break;
1055 }
1056 totalResident += resident;
1057 totalDirty += dirty;
1058 }
1059
1060 if (residentPageCount) {
1061 *residentPageCount = totalResident;
1062 }
1063 if (dirtyPageCount) {
1064 *dirtyPageCount = totalDirty;
1065 }
1066 return err;
fe8ab488
A
1067}
1068
1069IOReturn
1070IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
0a7de745
A
1071 IOMemoryReference * ref,
1072 IOOptionBits newState,
1073 IOOptionBits * oldState)
fe8ab488 1074{
0a7de745
A
1075 IOReturn err;
1076 IOMemoryEntry * entries;
1077 vm_purgable_t control;
1078 int totalState, state;
1079
1080 totalState = kIOMemoryPurgeableNonVolatile;
1081 err = kIOReturnSuccess;
1082 entries = ref->entries + ref->count;
1083 while (entries > &ref->entries[0]) {
1084 entries--;
1085
1086 err = purgeableControlBits(newState, &control, &state);
1087 if (KERN_SUCCESS != err) {
1088 break;
1089 }
1090 err = memory_entry_purgeable_control_internal(entries->entry, control, &state);
1091 if (KERN_SUCCESS != err) {
1092 break;
1093 }
1094 err = purgeableStateBits(&state);
1095 if (KERN_SUCCESS != err) {
1096 break;
1097 }
1098
1099 if (kIOMemoryPurgeableEmpty == state) {
1100 totalState = kIOMemoryPurgeableEmpty;
1101 } else if (kIOMemoryPurgeableEmpty == totalState) {
1102 continue;
1103 } else if (kIOMemoryPurgeableVolatile == totalState) {
1104 continue;
1105 } else if (kIOMemoryPurgeableVolatile == state) {
1106 totalState = kIOMemoryPurgeableVolatile;
1107 } else {
1108 totalState = kIOMemoryPurgeableNonVolatile;
1109 }
1110 }
1111
1112 if (oldState) {
1113 *oldState = totalState;
1114 }
1115 return err;
fe8ab488
A
1116}
1117
1118/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1119
1c79356b
A
1120IOMemoryDescriptor *
1121IOMemoryDescriptor::withAddress(void * address,
0a7de745
A
1122 IOByteCount length,
1123 IODirection direction)
55e303ae 1124{
0a7de745
A
1125 return IOMemoryDescriptor::
1126 withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
55e303ae
A
1127}
1128
b0d623f7 1129#ifndef __LP64__
55e303ae 1130IOMemoryDescriptor *
b0d623f7 1131IOMemoryDescriptor::withAddress(IOVirtualAddress address,
0a7de745
A
1132 IOByteCount length,
1133 IODirection direction,
1134 task_t task)
1c79356b 1135{
0a7de745
A
1136 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1137 if (that) {
1138 if (that->initWithAddress(address, length, direction, task)) {
1139 return that;
1140 }
1141
1142 that->release();
1143 }
1144 return 0;
1c79356b 1145}
b0d623f7 1146#endif /* !__LP64__ */
1c79356b
A
1147
1148IOMemoryDescriptor *
55e303ae 1149IOMemoryDescriptor::withPhysicalAddress(
0a7de745
A
1150 IOPhysicalAddress address,
1151 IOByteCount length,
1152 IODirection direction )
55e303ae 1153{
0a7de745 1154 return IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL);
55e303ae
A
1155}
1156
b0d623f7 1157#ifndef __LP64__
55e303ae 1158IOMemoryDescriptor *
0a7de745
A
1159IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
1160 UInt32 withCount,
1161 IODirection direction,
1162 task_t task,
1163 bool asReference)
1c79356b 1164{
0a7de745
A
1165 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1166 if (that) {
1167 if (that->initWithRanges(ranges, withCount, direction, task, asReference)) {
1168 return that;
1169 }
1170
1171 that->release();
1172 }
1173 return 0;
1c79356b 1174}
b0d623f7 1175#endif /* !__LP64__ */
1c79356b 1176
0c530ab8
A
1177IOMemoryDescriptor *
1178IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
0a7de745
A
1179 mach_vm_size_t length,
1180 IOOptionBits options,
1181 task_t task)
0c530ab8 1182{
0a7de745
A
1183 IOAddressRange range = { address, length };
1184 return IOMemoryDescriptor::withAddressRanges(&range, 1, options, task);
0c530ab8
A
1185}
1186
1187IOMemoryDescriptor *
1188IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
0a7de745
A
1189 UInt32 rangeCount,
1190 IOOptionBits options,
1191 task_t task)
0c530ab8 1192{
0a7de745
A
1193 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1194 if (that) {
1195 if (task) {
1196 options |= kIOMemoryTypeVirtual64;
1197 } else {
1198 options |= kIOMemoryTypePhysical64;
1199 }
0c530ab8 1200
0a7de745
A
1201 if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ 0)) {
1202 return that;
1203 }
0c530ab8 1204
0a7de745
A
1205 that->release();
1206 }
0c530ab8 1207
0a7de745 1208 return 0;
0c530ab8
A
1209}
1210
1c79356b
A
1211
1212/*
b0d623f7 1213 * withOptions:
1c79356b
A
1214 *
1215 * Create a new IOMemoryDescriptor. The buffer is made up of several
1216 * virtual address ranges, from a given task.
1217 *
1218 * Passing the ranges as a reference will avoid an extra allocation.
1219 */
1220IOMemoryDescriptor *
0a7de745
A
1221IOMemoryDescriptor::withOptions(void * buffers,
1222 UInt32 count,
1223 UInt32 offset,
1224 task_t task,
1225 IOOptionBits opts,
1226 IOMapper * mapper)
1c79356b 1227{
0a7de745 1228 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
d7e50217 1229
0a7de745
A
1230 if (self
1231 && !self->initWithOptions(buffers, count, offset, task, opts, mapper)) {
1232 self->release();
1233 return 0;
1234 }
55e303ae 1235
0a7de745 1236 return self;
55e303ae
A
1237}
1238
0a7de745
A
1239bool
1240IOMemoryDescriptor::initWithOptions(void * buffers,
1241 UInt32 count,
1242 UInt32 offset,
1243 task_t task,
1244 IOOptionBits options,
1245 IOMapper * mapper)
55e303ae 1246{
0a7de745 1247 return false;
1c79356b
A
1248}
1249
b0d623f7 1250#ifndef __LP64__
1c79356b 1251IOMemoryDescriptor *
0a7de745
A
1252IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
1253 UInt32 withCount,
1254 IODirection direction,
1255 bool asReference)
1c79356b 1256{
0a7de745
A
1257 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1258 if (that) {
1259 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference)) {
1260 return that;
1261 }
1262
1263 that->release();
1264 }
1265 return 0;
1c79356b
A
1266}
1267
1268IOMemoryDescriptor *
0a7de745
A
1269IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
1270 IOByteCount offset,
1271 IOByteCount length,
1272 IODirection direction)
1c79356b 1273{
0a7de745 1274 return IOSubMemoryDescriptor::withSubRange(of, offset, length, direction);
1c79356b 1275}
b0d623f7 1276#endif /* !__LP64__ */
1c79356b 1277
0c530ab8
A
1278IOMemoryDescriptor *
1279IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
91447636 1280{
0a7de745
A
1281 IOGeneralMemoryDescriptor *origGenMD =
1282 OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
1283
1284 if (origGenMD) {
1285 return IOGeneralMemoryDescriptor::
1286 withPersistentMemoryDescriptor(origGenMD);
1287 } else {
1288 return 0;
1289 }
91447636
A
1290}
1291
0c530ab8
A
1292IOMemoryDescriptor *
1293IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
91447636 1294{
0a7de745
A
1295 IOMemoryReference * memRef;
1296
1297 if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) {
1298 return 0;
1299 }
1300
1301 if (memRef == originalMD->_memRef) {
1302 originalMD->retain(); // Add a new reference to ourselves
1303 originalMD->memoryReferenceRelease(memRef);
1304 return originalMD;
1305 }
1306
1307 IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor;
1308 IOMDPersistentInitData initData = { originalMD, memRef };
1309
1310 if (self
1311 && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) {
1312 self->release();
1313 self = 0;
1314 }
1315 return self;
91447636
A
1316}
1317
b0d623f7 1318#ifndef __LP64__
1c79356b
A
1319bool
1320IOGeneralMemoryDescriptor::initWithAddress(void * address,
0a7de745
A
1321 IOByteCount withLength,
1322 IODirection withDirection)
1c79356b 1323{
0a7de745
A
1324 _singleRange.v.address = (vm_offset_t) address;
1325 _singleRange.v.length = withLength;
1c79356b 1326
0a7de745 1327 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
1c79356b
A
1328}
1329
1330bool
b0d623f7 1331IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
0a7de745
A
1332 IOByteCount withLength,
1333 IODirection withDirection,
1334 task_t withTask)
1c79356b 1335{
0a7de745
A
1336 _singleRange.v.address = address;
1337 _singleRange.v.length = withLength;
1c79356b 1338
0a7de745 1339 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
1c79356b
A
1340}
1341
1342bool
1343IOGeneralMemoryDescriptor::initWithPhysicalAddress(
0a7de745
A
1344 IOPhysicalAddress address,
1345 IOByteCount withLength,
1346 IODirection withDirection )
1c79356b 1347{
0a7de745
A
1348 _singleRange.p.address = address;
1349 _singleRange.p.length = withLength;
1c79356b 1350
0a7de745 1351 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
1c79356b
A
1352}
1353
55e303ae
A
1354bool
1355IOGeneralMemoryDescriptor::initWithPhysicalRanges(
0a7de745
A
1356 IOPhysicalRange * ranges,
1357 UInt32 count,
1358 IODirection direction,
1359 bool reference)
55e303ae 1360{
0a7de745 1361 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
55e303ae 1362
0a7de745
A
1363 if (reference) {
1364 mdOpts |= kIOMemoryAsReference;
1365 }
55e303ae 1366
0a7de745 1367 return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0);
55e303ae
A
1368}
1369
1370bool
1371IOGeneralMemoryDescriptor::initWithRanges(
0a7de745
A
1372 IOVirtualRange * ranges,
1373 UInt32 count,
1374 IODirection direction,
1375 task_t task,
1376 bool reference)
55e303ae 1377{
0a7de745
A
1378 IOOptionBits mdOpts = direction;
1379
1380 if (reference) {
1381 mdOpts |= kIOMemoryAsReference;
1382 }
1383
1384 if (task) {
1385 mdOpts |= kIOMemoryTypeVirtual;
1386
1387 // Auto-prepare if this is a kernel memory descriptor as very few
1388 // clients bother to prepare() kernel memory.
1389 // But it was not enforced so what are you going to do?
1390 if (task == kernel_task) {
1391 mdOpts |= kIOMemoryAutoPrepare;
1392 }
1393 } else {
1394 mdOpts |= kIOMemoryTypePhysical;
1395 }
1396
1397 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0);
55e303ae 1398}
b0d623f7 1399#endif /* !__LP64__ */
55e303ae 1400
1c79356b 1401/*
55e303ae 1402 * initWithOptions:
1c79356b 1403 *
55e303ae 1404 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
91447636
A
1405 * from a given task, several physical ranges, an UPL from the ubc
1406 * system or a uio (may be 64bit) from the BSD subsystem.
1c79356b
A
1407 *
1408 * Passing the ranges as a reference will avoid an extra allocation.
1409 *
55e303ae
A
1410 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
1411 * existing instance -- note this behavior is not commonly supported in other
1412 * I/O Kit classes, although it is supported here.
1c79356b 1413 */
55e303ae 1414
1c79356b 1415bool
0a7de745
A
1416IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
1417 UInt32 count,
1418 UInt32 offset,
1419 task_t task,
1420 IOOptionBits options,
1421 IOMapper * mapper)
55e303ae 1422{
0a7de745 1423 IOOptionBits type = options & kIOMemoryTypeMask;
91447636 1424
6d2010ae 1425#ifndef __LP64__
0a7de745
A
1426 if (task
1427 && (kIOMemoryTypeVirtual == type)
1428 && vm_map_is_64bit(get_task_map(task))
1429 && ((IOVirtualRange *) buffers)->address) {
1430 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
1431 return false;
1432 }
6d2010ae
A
1433#endif /* !__LP64__ */
1434
0a7de745
A
1435 // Grab the original MD's configuation data to initialse the
1436 // arguments to this function.
1437 if (kIOMemoryTypePersistentMD == type) {
1438 IOMDPersistentInitData *initData = (typeof(initData))buffers;
1439 const IOGeneralMemoryDescriptor *orig = initData->fMD;
1440 ioGMDData *dataP = getDataP(orig->_memoryEntries);
91447636 1441
0a7de745
A
1442 // Only accept persistent memory descriptors with valid dataP data.
1443 assert(orig->_rangesCount == 1);
1444 if (!(orig->_flags & kIOMemoryPersistent) || !dataP) {
1445 return false;
1446 }
91447636 1447
0a7de745
A
1448 _memRef = initData->fMemRef; // Grab the new named entry
1449 options = orig->_flags & ~kIOMemoryAsReference;
1450 type = options & kIOMemoryTypeMask;
1451 buffers = orig->_ranges.v;
1452 count = orig->_rangesCount;
55e303ae 1453
0a7de745
A
1454 // Now grab the original task and whatever mapper was previously used
1455 task = orig->_task;
1456 mapper = dataP->fMapper;
91447636 1457
0a7de745
A
1458 // We are ready to go through the original initialisation now
1459 }
91447636 1460
0a7de745
A
1461 switch (type) {
1462 case kIOMemoryTypeUIO:
1463 case kIOMemoryTypeVirtual:
b0d623f7 1464#ifndef __LP64__
0a7de745 1465 case kIOMemoryTypeVirtual64:
b0d623f7 1466#endif /* !__LP64__ */
0a7de745
A
1467 assert(task);
1468 if (!task) {
1469 return false;
1470 }
1471 break;
55e303ae 1472
0a7de745 1473 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
b0d623f7 1474#ifndef __LP64__
0a7de745 1475 case kIOMemoryTypePhysical64:
b0d623f7 1476#endif /* !__LP64__ */
0a7de745
A
1477 case kIOMemoryTypeUPL:
1478 assert(!task);
1479 break;
1480 default:
1481 return false; /* bad argument */
2d21ac55 1482 }
0a7de745
A
1483
1484 assert(buffers);
1485 assert(count);
1486
1487 /*
1488 * We can check the _initialized instance variable before having ever set
1489 * it to an initial value because I/O Kit guarantees that all our instance
1490 * variables are zeroed on an object's allocation.
1491 */
1492
1493 if (_initialized) {
1494 /*
1495 * An existing memory descriptor is being retargeted to point to
1496 * somewhere else. Clean up our present state.
1497 */
1498 IOOptionBits type = _flags & kIOMemoryTypeMask;
1499 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type)) {
1500 while (_wireCount) {
1501 complete();
1502 }
1503 }
1504 if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
1505 if (kIOMemoryTypeUIO == type) {
1506 uio_free((uio_t) _ranges.v);
1507 }
b0d623f7 1508#ifndef __LP64__
0a7de745
A
1509 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
1510 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
1511 }
b0d623f7 1512#endif /* !__LP64__ */
0a7de745
A
1513 else {
1514 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
1515 }
1516 }
1517
1518 options |= (kIOMemoryRedirected & _flags);
1519 if (!(kIOMemoryRedirected & options)) {
1520 if (_memRef) {
1521 memoryReferenceRelease(_memRef);
1522 _memRef = 0;
1523 }
1524 if (_mappings) {
1525 _mappings->flushCollection();
1526 }
1527 }
1528 } else {
1529 if (!super::init()) {
1530 return false;
1531 }
1532 _initialized = true;
0c530ab8 1533 }
2d21ac55 1534
0a7de745
A
1535 // Grab the appropriate mapper
1536 if (kIOMemoryHostOrRemote & options) {
1537 options |= kIOMemoryMapperNone;
1538 }
1539 if (kIOMemoryMapperNone & options) {
1540 mapper = 0; // No Mapper
1541 } else if (mapper == kIOMapperSystem) {
1542 IOMapper::checkForSystemMapper();
1543 gIOSystemMapper = mapper = IOMapper::gSystem;
1544 }
55e303ae 1545
0a7de745
A
1546 // Remove the dynamic internal use flags from the initial setting
1547 options &= ~(kIOMemoryPreparedReadOnly);
1548 _flags = options;
1549 _task = task;
0c530ab8 1550
b0d623f7 1551#ifndef __LP64__
0a7de745 1552 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
b0d623f7 1553#endif /* !__LP64__ */
91447636 1554
0a7de745
A
1555 _dmaReferences = 0;
1556 __iomd_reservedA = 0;
1557 __iomd_reservedB = 0;
1558 _highestPage = 0;
1559
1560 if (kIOMemoryThreadSafe & options) {
1561 if (!_prepareLock) {
1562 _prepareLock = IOLockAlloc();
1563 }
1564 } else if (_prepareLock) {
1565 IOLockFree(_prepareLock);
1566 _prepareLock = NULL;
91447636 1567 }
0c530ab8 1568
0a7de745
A
1569 if (kIOMemoryTypeUPL == type) {
1570 ioGMDData *dataP;
1571 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
1572
1573 if (!initMemoryEntries(dataSize, mapper)) {
1574 return false;
1575 }
1576 dataP = getDataP(_memoryEntries);
1577 dataP->fPageCnt = 0;
1578 switch (kIOMemoryDirectionMask & options) {
1579 case kIODirectionOut:
1580 dataP->fDMAAccess = kIODMAMapReadAccess;
1581 break;
1582 case kIODirectionIn:
1583 dataP->fDMAAccess = kIODMAMapWriteAccess;
1584 break;
1585 case kIODirectionNone:
1586 case kIODirectionOutIn:
1587 default:
1588 panic("bad dir for upl 0x%x\n", (int) options);
1589 break;
1590 }
1591 // _wireCount++; // UPLs start out life wired
1592
1593 _length = count;
1594 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
1595
1596 ioPLBlock iopl;
1597 iopl.fIOPL = (upl_t) buffers;
1598 upl_set_referenced(iopl.fIOPL, true);
1599 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
1600
1601 if (upl_get_size(iopl.fIOPL) < (count + offset)) {
1602 panic("short external upl");
1603 }
1604
1605 _highestPage = upl_get_highest_page(iopl.fIOPL);
1606
1607 // Set the flag kIOPLOnDevice convieniently equal to 1
1608 iopl.fFlags = pageList->device | kIOPLExternUPL;
1609 if (!pageList->device) {
1610 // Pre-compute the offset into the UPL's page list
1611 pageList = &pageList[atop_32(offset)];
1612 offset &= PAGE_MASK;
1613 }
1614 iopl.fIOMDOffset = 0;
1615 iopl.fMappedPage = 0;
1616 iopl.fPageInfo = (vm_address_t) pageList;
1617 iopl.fPageOffset = offset;
1618 _memoryEntries->appendBytes(&iopl, sizeof(iopl));
1619 } else {
1620 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
1621 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
1622
1623 // Initialize the memory descriptor
1624 if (options & kIOMemoryAsReference) {
1625#ifndef __LP64__
1626 _rangesIsAllocated = false;
b0d623f7 1627#endif /* !__LP64__ */
0a7de745
A
1628
1629 // Hack assignment to get the buffer arg into _ranges.
1630 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
1631 // work, C++ sigh.
1632 // This also initialises the uio & physical ranges.
1633 _ranges.v = (IOVirtualRange *) buffers;
b0d623f7 1634 } else {
0a7de745
A
1635#ifndef __LP64__
1636 _rangesIsAllocated = true;
1637#endif /* !__LP64__ */
1638 switch (type) {
1639 case kIOMemoryTypeUIO:
1640 _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
1641 break;
1642
1643#ifndef __LP64__
1644 case kIOMemoryTypeVirtual64:
1645 case kIOMemoryTypePhysical64:
1646 if (count == 1
1647#ifndef __arm__
1648 && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
1649#endif
1650 ) {
1651 if (kIOMemoryTypeVirtual64 == type) {
1652 type = kIOMemoryTypeVirtual;
1653 } else {
1654 type = kIOMemoryTypePhysical;
1655 }
1656 _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
1657 _rangesIsAllocated = false;
1658 _ranges.v = &_singleRange.v;
1659 _singleRange.v.address = ((IOAddressRange *) buffers)->address;
1660 _singleRange.v.length = ((IOAddressRange *) buffers)->length;
1661 break;
1662 }
1663 _ranges.v64 = IONew(IOAddressRange, count);
1664 if (!_ranges.v64) {
1665 return false;
1666 }
1667 bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
1668 break;
1669#endif /* !__LP64__ */
1670 case kIOMemoryTypeVirtual:
1671 case kIOMemoryTypePhysical:
1672 if (count == 1) {
1673 _flags |= kIOMemoryAsReference;
1674#ifndef __LP64__
1675 _rangesIsAllocated = false;
1676#endif /* !__LP64__ */
1677 _ranges.v = &_singleRange.v;
1678 } else {
1679 _ranges.v = IONew(IOVirtualRange, count);
1680 if (!_ranges.v) {
1681 return false;
1682 }
1683 }
1684 bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
1685 break;
1686 }
b0d623f7 1687 }
0a7de745
A
1688 _rangesCount = count;
1689
1690 // Find starting address within the vector of ranges
1691 Ranges vec = _ranges;
1692 mach_vm_size_t totalLength = 0;
1693 unsigned int ind, pages = 0;
1694 for (ind = 0; ind < count; ind++) {
1695 mach_vm_address_t addr;
1696 mach_vm_address_t endAddr;
1697 mach_vm_size_t len;
1698
1699 // addr & len are returned by this function
1700 getAddrLenForInd(addr, len, type, vec, ind);
1701 if (os_add3_overflow(addr, len, PAGE_MASK, &endAddr)) {
1702 break;
1703 }
1704 if (os_add_overflow(pages, (atop_64(endAddr) - atop_64(addr)), &pages)) {
1705 break;
1706 }
1707 if (os_add_overflow(totalLength, len, &totalLength)) {
1708 break;
1709 }
1710 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
1711 ppnum_t highPage = atop_64(addr + len - 1);
1712 if (highPage > _highestPage) {
1713 _highestPage = highPage;
1714 }
1715 }
1716 }
1717 if ((ind < count)
1718 || (totalLength != ((IOByteCount) totalLength))) {
1719 return false; /* overflow */
1720 }
1721 _length = totalLength;
1722 _pages = pages;
1723
1724 // Auto-prepare memory at creation time.
1725 // Implied completion when descriptor is free-ed
1726
1727
1728 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
1729 _wireCount++; // Physical MDs are, by definition, wired
1730 } else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
1731 ioGMDData *dataP;
1732 unsigned dataSize;
1733
1734 if (_pages > atop_64(max_mem)) {
1735 return false;
1736 }
1737
1738 dataSize = computeDataSize(_pages, /* upls */ count * 2);
1739 if (!initMemoryEntries(dataSize, mapper)) {
1740 return false;
1741 }
1742 dataP = getDataP(_memoryEntries);
1743 dataP->fPageCnt = _pages;
1744
1745 if (((_task != kernel_task) || (kIOMemoryBufferPageable & _flags))
1746 && (VM_KERN_MEMORY_NONE == _kernelTag)) {
1747 _kernelTag = IOMemoryTag(kernel_map);
1748 if (_kernelTag == gIOSurfaceTag) {
1749 _userTag = VM_MEMORY_IOSURFACE;
1750 }
1751 }
1752
1753 if ((kIOMemoryPersistent & _flags) && !_memRef) {
1754 IOReturn
1755 err = memoryReferenceCreate(0, &_memRef);
1756 if (kIOReturnSuccess != err) {
1757 return false;
1758 }
1759 }
1760
1761 if ((_flags & kIOMemoryAutoPrepare)
1762 && prepare() != kIOReturnSuccess) {
1763 return false;
1764 }
1765 }
1766 }
91447636 1767
0a7de745 1768 return true;
de355530
A
1769}
1770
1c79356b
A
1771/*
1772 * free
1773 *
1774 * Free resources.
1775 */
0a7de745
A
1776void
1777IOGeneralMemoryDescriptor::free()
1c79356b 1778{
0a7de745 1779 IOOptionBits type = _flags & kIOMemoryTypeMask;
2d21ac55 1780
0a7de745
A
1781 if (reserved) {
1782 LOCK;
1783 reserved->dp.memory = 0;
1784 UNLOCK;
1785 }
1786 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
1787 ioGMDData * dataP;
1788 if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBaseValid) {
1789 dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
1790 dataP->fMappedBaseValid = dataP->fMappedBase = 0;
1791 }
1792 } else {
1793 while (_wireCount) {
1794 complete();
1795 }
bd504ef0 1796 }
bd504ef0 1797
0a7de745
A
1798 if (_memoryEntries) {
1799 _memoryEntries->release();
1800 }
55e303ae 1801
0a7de745
A
1802 if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
1803 if (kIOMemoryTypeUIO == type) {
1804 uio_free((uio_t) _ranges.v);
1805 }
b0d623f7 1806#ifndef __LP64__
0a7de745
A
1807 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
1808 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
1809 }
b0d623f7 1810#endif /* !__LP64__ */
0a7de745
A
1811 else {
1812 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
1813 }
1814
1815 _ranges.v = NULL;
1816 }
1817
1818 if (reserved) {
1819 if (reserved->dp.devicePager) {
1820 // memEntry holds a ref on the device pager which owns reserved
1821 // (IOMemoryDescriptorReserved) so no reserved access after this point
1822 device_pager_deallocate((memory_object_t) reserved->dp.devicePager );
1823 } else {
1824 IODelete(reserved, IOMemoryDescriptorReserved, 1);
1825 }
1826 reserved = NULL;
1827 }
1828
1829 if (_memRef) {
1830 memoryReferenceRelease(_memRef);
1831 }
1832 if (_prepareLock) {
1833 IOLockFree(_prepareLock);
1834 }
1835
1836 super::free();
1c79356b
A
1837}
1838
b0d623f7 1839#ifndef __LP64__
0a7de745
A
1840void
1841IOGeneralMemoryDescriptor::unmapFromKernel()
b0d623f7 1842{
0a7de745 1843 panic("IOGMD::unmapFromKernel deprecated");
b0d623f7
A
1844}
1845
0a7de745
A
1846void
1847IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
b0d623f7 1848{
0a7de745 1849 panic("IOGMD::mapIntoKernel deprecated");
b0d623f7
A
1850}
1851#endif /* !__LP64__ */
1c79356b
A
1852
1853/*
1854 * getDirection:
1855 *
1856 * Get the direction of the transfer.
1857 */
0a7de745
A
1858IODirection
1859IOMemoryDescriptor::getDirection() const
1c79356b 1860{
b0d623f7 1861#ifndef __LP64__
0a7de745
A
1862 if (_direction) {
1863 return _direction;
1864 }
b0d623f7 1865#endif /* !__LP64__ */
0a7de745 1866 return (IODirection) (_flags & kIOMemoryDirectionMask);
1c79356b
A
1867}
1868
1869/*
1870 * getLength:
1871 *
1872 * Get the length of the transfer (over all ranges).
1873 */
0a7de745
A
1874IOByteCount
1875IOMemoryDescriptor::getLength() const
1c79356b 1876{
0a7de745 1877 return _length;
1c79356b
A
1878}
1879
0a7de745
A
1880void
1881IOMemoryDescriptor::setTag( IOOptionBits tag )
1c79356b 1882{
0a7de745 1883 _tag = tag;
1c79356b
A
1884}
1885
0a7de745
A
1886IOOptionBits
1887IOMemoryDescriptor::getTag( void )
1c79356b 1888{
0a7de745 1889 return _tag;
1c79356b
A
1890}
1891
0a7de745
A
1892uint64_t
1893IOMemoryDescriptor::getFlags(void)
5ba3f43e 1894{
0a7de745 1895 return _flags;
5ba3f43e
A
1896}
1897
b0d623f7 1898#ifndef __LP64__
39037602
A
1899#pragma clang diagnostic push
1900#pragma clang diagnostic ignored "-Wdeprecated-declarations"
1901
55e303ae 1902// @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
0c530ab8
A
1903IOPhysicalAddress
1904IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
0b4e3aa0 1905{
0a7de745 1906 addr64_t physAddr = 0;
1c79356b 1907
0a7de745
A
1908 if (prepare() == kIOReturnSuccess) {
1909 physAddr = getPhysicalSegment64( offset, length );
1910 complete();
1911 }
0b4e3aa0 1912
0a7de745 1913 return (IOPhysicalAddress) physAddr; // truncated but only page offset is used
0b4e3aa0 1914}
39037602
A
1915
1916#pragma clang diagnostic pop
1917
b0d623f7 1918#endif /* !__LP64__ */
0b4e3aa0 1919
0a7de745
A
1920IOByteCount
1921IOMemoryDescriptor::readBytes
1922(IOByteCount offset, void *bytes, IOByteCount length)
1c79356b 1923{
0a7de745
A
1924 addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
1925 IOByteCount remaining;
1926
1927 // Assert that this entire I/O is withing the available range
1928 assert(offset <= _length);
1929 assert(offset + length <= _length);
1930 if ((offset >= _length)
1931 || ((offset + length) > _length)) {
1932 return 0;
1933 }
1c79356b 1934
0a7de745
A
1935 assert(!(kIOMemoryRemote & _flags));
1936 if (kIOMemoryRemote & _flags) {
1937 return 0;
1938 }
5ba3f43e 1939
0a7de745
A
1940 if (kIOMemoryThreadSafe & _flags) {
1941 LOCK;
1942 }
b0d623f7 1943
0a7de745
A
1944 remaining = length = min(length, _length - offset);
1945 while (remaining) { // (process another target segment?)
1946 addr64_t srcAddr64;
1947 IOByteCount srcLen;
1c79356b 1948
0a7de745
A
1949 srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
1950 if (!srcAddr64) {
1951 break;
1952 }
1c79356b 1953
0a7de745
A
1954 // Clip segment length to remaining
1955 if (srcLen > remaining) {
1956 srcLen = remaining;
1957 }
1c79356b 1958
0a7de745
A
1959 copypv(srcAddr64, dstAddr, srcLen,
1960 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
1c79356b 1961
0a7de745
A
1962 dstAddr += srcLen;
1963 offset += srcLen;
1964 remaining -= srcLen;
1965 }
1c79356b 1966
0a7de745
A
1967 if (kIOMemoryThreadSafe & _flags) {
1968 UNLOCK;
1969 }
b0d623f7 1970
0a7de745 1971 assert(!remaining);
1c79356b 1972
0a7de745 1973 return length - remaining;
55e303ae 1974}
0b4e3aa0 1975
0a7de745
A
1976IOByteCount
1977IOMemoryDescriptor::writeBytes
1978(IOByteCount inoffset, const void *bytes, IOByteCount length)
55e303ae 1979{
0a7de745
A
1980 addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
1981 IOByteCount remaining;
1982 IOByteCount offset = inoffset;
0b4e3aa0 1983
0a7de745
A
1984 // Assert that this entire I/O is withing the available range
1985 assert(offset <= _length);
1986 assert(offset + length <= _length);
0b4e3aa0 1987
0a7de745 1988 assert( !(kIOMemoryPreparedReadOnly & _flags));
0b4e3aa0 1989
0a7de745
A
1990 if ((kIOMemoryPreparedReadOnly & _flags)
1991 || (offset >= _length)
1992 || ((offset + length) > _length)) {
1993 return 0;
1994 }
0b4e3aa0 1995
0a7de745
A
1996 assert(!(kIOMemoryRemote & _flags));
1997 if (kIOMemoryRemote & _flags) {
1998 return 0;
1999 }
5ba3f43e 2000
0a7de745
A
2001 if (kIOMemoryThreadSafe & _flags) {
2002 LOCK;
2003 }
b0d623f7 2004
0a7de745
A
2005 remaining = length = min(length, _length - offset);
2006 while (remaining) { // (process another target segment?)
2007 addr64_t dstAddr64;
2008 IOByteCount dstLen;
0b4e3aa0 2009
0a7de745
A
2010 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
2011 if (!dstAddr64) {
2012 break;
2013 }
0b4e3aa0 2014
0a7de745
A
2015 // Clip segment length to remaining
2016 if (dstLen > remaining) {
2017 dstLen = remaining;
2018 }
0b4e3aa0 2019
0a7de745
A
2020 if (!srcAddr) {
2021 bzero_phys(dstAddr64, dstLen);
2022 } else {
2023 copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
2024 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
2025 srcAddr += dstLen;
2026 }
2027 offset += dstLen;
2028 remaining -= dstLen;
fe8ab488 2029 }
1c79356b 2030
0a7de745
A
2031 if (kIOMemoryThreadSafe & _flags) {
2032 UNLOCK;
2033 }
b0d623f7 2034
0a7de745 2035 assert(!remaining);
55e303ae 2036
d9a64523 2037#if defined(__x86_64__)
0a7de745 2038 // copypv does not cppvFsnk on intel
d9a64523 2039#else
0a7de745
A
2040 if (!srcAddr) {
2041 performOperation(kIOMemoryIncoherentIOFlush, inoffset, length);
2042 }
d9a64523 2043#endif
fe8ab488 2044
0a7de745 2045 return length - remaining;
1c79356b
A
2046}
2047
b0d623f7 2048#ifndef __LP64__
0a7de745
A
2049void
2050IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
b0d623f7 2051{
0a7de745 2052 panic("IOGMD::setPosition deprecated");
b0d623f7
A
2053}
2054#endif /* !__LP64__ */
2055
2056static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
2057
2058uint64_t
2059IOGeneralMemoryDescriptor::getPreparationID( void )
2060{
0a7de745
A
2061 ioGMDData *dataP;
2062
2063 if (!_wireCount) {
2064 return kIOPreparationIDUnprepared;
2065 }
2066
2067 if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
2068 || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64)) {
2069 IOMemoryDescriptor::setPreparationID();
2070 return IOMemoryDescriptor::getPreparationID();
2071 }
2072
2073 if (!_memoryEntries || !(dataP = getDataP(_memoryEntries))) {
2074 return kIOPreparationIDUnprepared;
2075 }
2076
2077 if (kIOPreparationIDUnprepared == dataP->fPreparationID) {
2078 dataP->fPreparationID = OSIncrementAtomic64(&gIOMDPreparationID);
2079 }
2080 return dataP->fPreparationID;
b0d623f7
A
2081}
2082
0a7de745
A
2083IOMemoryDescriptorReserved *
2084IOMemoryDescriptor::getKernelReserved( void )
b0d623f7 2085{
0a7de745
A
2086 if (!reserved) {
2087 reserved = IONew(IOMemoryDescriptorReserved, 1);
2088 if (reserved) {
2089 bzero(reserved, sizeof(IOMemoryDescriptorReserved));
2090 }
2091 }
2092 return reserved;
316670eb
A
2093}
2094
0a7de745
A
2095void
2096IOMemoryDescriptor::setPreparationID( void )
316670eb 2097{
0a7de745
A
2098 if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID)) {
2099 reserved->preparationID = OSIncrementAtomic64(&gIOMDPreparationID);
2100 }
316670eb
A
2101}
2102
0a7de745
A
2103uint64_t
2104IOMemoryDescriptor::getPreparationID( void )
316670eb 2105{
0a7de745
A
2106 if (reserved) {
2107 return reserved->preparationID;
2108 } else {
2109 return kIOPreparationIDUnsupported;
2110 }
b0d623f7 2111}
de355530 2112
0a7de745
A
2113void
2114IOMemoryDescriptor::setVMTags(vm_tag_t kernelTag, vm_tag_t userTag)
39037602 2115{
0a7de745
A
2116 _kernelTag = kernelTag;
2117 _userTag = userTag;
39037602
A
2118}
2119
0a7de745
A
2120vm_tag_t
2121IOMemoryDescriptor::getVMTag(vm_map_t map)
39037602 2122{
0a7de745
A
2123 if (vm_kernel_map_is_kernel(map)) {
2124 if (VM_KERN_MEMORY_NONE != _kernelTag) {
2125 return _kernelTag;
2126 }
2127 } else {
2128 if (VM_KERN_MEMORY_NONE != _userTag) {
2129 return _userTag;
2130 }
2131 }
2132 return IOMemoryTag(map);
39037602
A
2133}
2134
0a7de745
A
2135IOReturn
2136IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
55e303ae 2137{
0a7de745
A
2138 IOReturn err = kIOReturnSuccess;
2139 DMACommandOps params;
2140 IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
2141 ioGMDData *dataP;
99c3a104 2142
0a7de745
A
2143 params = (op & ~kIOMDDMACommandOperationMask & op);
2144 op &= kIOMDDMACommandOperationMask;
99c3a104 2145
0a7de745
A
2146 if (kIOMDDMAMap == op) {
2147 if (dataSize < sizeof(IOMDDMAMapArgs)) {
2148 return kIOReturnUnderrun;
2149 }
99c3a104 2150
0a7de745 2151 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
99c3a104 2152
0a7de745
A
2153 if (!_memoryEntries
2154 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
2155 return kIOReturnNoMemory;
2156 }
99c3a104 2157
0a7de745
A
2158 if (_memoryEntries && data->fMapper) {
2159 bool remap, keepMap;
2160 dataP = getDataP(_memoryEntries);
39236c6e 2161
0a7de745
A
2162 if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) {
2163 dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
2164 }
2165 if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) {
2166 dataP->fDMAMapAlignment = data->fMapSpec.alignment;
2167 }
39236c6e 2168
0a7de745
A
2169 keepMap = (data->fMapper == gIOSystemMapper);
2170 keepMap &= ((data->fOffset == 0) && (data->fLength == _length));
3e170ce0 2171
0a7de745
A
2172 if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
2173 IOLockLock(_prepareLock);
2174 }
a39ff7e2 2175
0a7de745
A
2176 remap = (!keepMap);
2177 remap |= (dataP->fDMAMapNumAddressBits < 64)
2178 && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
2179 remap |= (dataP->fDMAMapAlignment > page_size);
3e170ce0 2180
0a7de745 2181 if (remap || !dataP->fMappedBaseValid) {
5ba3f43e 2182// if (dataP->fMappedBaseValid) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params);
0a7de745
A
2183 err = md->dmaMap(data->fMapper, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
2184 if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBaseValid) {
2185 dataP->fMappedBase = data->fAlloc;
2186 dataP->fMappedBaseValid = true;
2187 dataP->fMappedLength = data->fAllocLength;
2188 data->fAllocLength = 0; // IOMD owns the alloc now
2189 }
2190 } else {
2191 data->fAlloc = dataP->fMappedBase;
2192 data->fAllocLength = 0; // give out IOMD map
2193 md->dmaMapRecord(data->fMapper, data->fCommand, dataP->fMappedLength);
2194 }
2195 data->fMapContig = !dataP->fDiscontig;
2196
2197 if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
2198 IOLockUnlock(_prepareLock);
2199 }
2200 }
2201 return err;
99c3a104 2202 }
0a7de745
A
2203 if (kIOMDDMAUnmap == op) {
2204 if (dataSize < sizeof(IOMDDMAMapArgs)) {
2205 return kIOReturnUnderrun;
2206 }
2207 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
4452a7af 2208
0a7de745 2209 err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
99c3a104 2210
0a7de745 2211 return kIOReturnSuccess;
cc8bc92a 2212 }
0c530ab8 2213
0a7de745
A
2214 if (kIOMDAddDMAMapSpec == op) {
2215 if (dataSize < sizeof(IODMAMapSpecification)) {
2216 return kIOReturnUnderrun;
2217 }
99c3a104 2218
0a7de745 2219 IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
4452a7af 2220
0a7de745
A
2221 if (!_memoryEntries
2222 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
2223 return kIOReturnNoMemory;
2224 }
4452a7af 2225
0a7de745
A
2226 if (_memoryEntries) {
2227 dataP = getDataP(_memoryEntries);
2228 if (data->numAddressBits < dataP->fDMAMapNumAddressBits) {
2229 dataP->fDMAMapNumAddressBits = data->numAddressBits;
2230 }
2231 if (data->alignment > dataP->fDMAMapAlignment) {
2232 dataP->fDMAMapAlignment = data->alignment;
2233 }
2234 }
2235 return kIOReturnSuccess;
0c530ab8 2236 }
4452a7af 2237
0a7de745
A
2238 if (kIOMDGetCharacteristics == op) {
2239 if (dataSize < sizeof(IOMDDMACharacteristics)) {
2240 return kIOReturnUnderrun;
2241 }
4452a7af 2242
0a7de745
A
2243 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
2244 data->fLength = _length;
2245 data->fSGCount = _rangesCount;
2246 data->fPages = _pages;
2247 data->fDirection = getDirection();
2248 if (!_wireCount) {
2249 data->fIsPrepared = false;
2250 } else {
2251 data->fIsPrepared = true;
2252 data->fHighestPage = _highestPage;
2253 if (_memoryEntries) {
2254 dataP = getDataP(_memoryEntries);
2255 ioPLBlock *ioplList = getIOPLList(dataP);
2256 UInt count = getNumIOPL(_memoryEntries, dataP);
2257 if (count == 1) {
2258 data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
2259 }
2260 }
2261 }
4452a7af 2262
0a7de745
A
2263 return kIOReturnSuccess;
2264 } else if (kIOMDDMAActive == op) {
2265 if (params) {
2266 int16_t prior;
2267 prior = OSAddAtomic16(1, &md->_dmaReferences);
2268 if (!prior) {
2269 md->_mapName = NULL;
2270 }
2271 } else {
2272 if (md->_dmaReferences) {
2273 OSAddAtomic16(-1, &md->_dmaReferences);
2274 } else {
2275 panic("_dmaReferences underflow");
2276 }
2277 }
2278 } else if (kIOMDWalkSegments != op) {
2279 return kIOReturnBadArgument;
0c530ab8 2280 }
89b3af67 2281
0a7de745
A
2282 // Get the next segment
2283 struct InternalState {
2284 IOMDDMAWalkSegmentArgs fIO;
2285 UInt fOffset2Index;
2286 UInt fIndex;
2287 UInt fNextOffset;
2288 } *isP;
2289
2290 // Find the next segment
2291 if (dataSize < sizeof(*isP)) {
2292 return kIOReturnUnderrun;
99c3a104 2293 }
4452a7af 2294
0a7de745
A
2295 isP = (InternalState *) vData;
2296 UInt offset = isP->fIO.fOffset;
2297 uint8_t mapped = isP->fIO.fMapped;
2298 uint64_t mappedBase;
4452a7af 2299
0a7de745
A
2300 if (mapped && (kIOMemoryRemote & _flags)) {
2301 return kIOReturnNotAttached;
2302 }
4452a7af 2303
0a7de745
A
2304 if (IOMapper::gSystem && mapped
2305 && (!(kIOMemoryHostOnly & _flags))
2306 && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBaseValid)) {
2307// && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBaseValid))
2308 if (!_memoryEntries
2309 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
2310 return kIOReturnNoMemory;
2311 }
4452a7af 2312
0a7de745
A
2313 dataP = getDataP(_memoryEntries);
2314 if (dataP->fMapper) {
2315 IODMAMapSpecification mapSpec;
2316 bzero(&mapSpec, sizeof(mapSpec));
2317 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
2318 mapSpec.alignment = dataP->fDMAMapAlignment;
2319 err = md->dmaMap(dataP->fMapper, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength);
2320 if (kIOReturnSuccess != err) {
2321 return err;
2322 }
2323 dataP->fMappedBaseValid = true;
2324 }
2325 }
0c530ab8 2326
0a7de745
A
2327 if (kIOMDDMAWalkMappedLocal == mapped) {
2328 mappedBase = isP->fIO.fMappedBase;
2329 } else if (mapped) {
2330 if (IOMapper::gSystem
2331 && (!(kIOMemoryHostOnly & _flags))
2332 && _memoryEntries
2333 && (dataP = getDataP(_memoryEntries))
2334 && dataP->fMappedBaseValid) {
2335 mappedBase = dataP->fMappedBase;
2336 } else {
2337 mapped = 0;
2338 }
2339 }
0c530ab8 2340
0a7de745
A
2341 if (offset >= _length) {
2342 return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
2343 }
0c530ab8 2344
0a7de745
A
2345 // Validate the previous offset
2346 UInt ind, off2Ind = isP->fOffset2Index;
2347 if (!params
2348 && offset
2349 && (offset == isP->fNextOffset || off2Ind <= offset)) {
2350 ind = isP->fIndex;
2351 } else {
2352 ind = off2Ind = 0; // Start from beginning
0c530ab8 2353 }
0a7de745
A
2354 UInt length;
2355 UInt64 address;
2356
2357 if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
2358 // Physical address based memory descriptor
2359 const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
2360
2361 // Find the range after the one that contains the offset
2362 mach_vm_size_t len;
2363 for (len = 0; off2Ind <= offset; ind++) {
2364 len = physP[ind].length;
2365 off2Ind += len;
2366 }
0c530ab8 2367
0a7de745
A
2368 // Calculate length within range and starting address
2369 length = off2Ind - offset;
2370 address = physP[ind - 1].address + len - length;
0c530ab8 2371
0a7de745
A
2372 if (true && mapped) {
2373 address = mappedBase + offset;
2374 } else {
2375 // see how far we can coalesce ranges
2376 while (ind < _rangesCount && address + length == physP[ind].address) {
2377 len = physP[ind].length;
2378 length += len;
2379 off2Ind += len;
2380 ind++;
2381 }
2382 }
0c530ab8 2383
0a7de745
A
2384 // correct contiguous check overshoot
2385 ind--;
2386 off2Ind -= len;
0c530ab8 2387 }
0a7de745
A
2388#ifndef __LP64__
2389 else if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
2390 // Physical address based memory descriptor
2391 const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
2392
2393 // Find the range after the one that contains the offset
2394 mach_vm_size_t len;
2395 for (len = 0; off2Ind <= offset; ind++) {
2396 len = physP[ind].length;
2397 off2Ind += len;
2398 }
4452a7af 2399
0a7de745
A
2400 // Calculate length within range and starting address
2401 length = off2Ind - offset;
2402 address = physP[ind - 1].address + len - length;
0c530ab8 2403
0a7de745
A
2404 if (true && mapped) {
2405 address = mappedBase + offset;
2406 } else {
2407 // see how far we can coalesce ranges
2408 while (ind < _rangesCount && address + length == physP[ind].address) {
2409 len = physP[ind].length;
2410 length += len;
2411 off2Ind += len;
2412 ind++;
2413 }
2414 }
2415 // correct contiguous check overshoot
2416 ind--;
2417 off2Ind -= len;
6d2010ae 2418 }
0a7de745
A
2419#endif /* !__LP64__ */
2420 else {
2421 do {
2422 if (!_wireCount) {
2423 panic("IOGMD: not wired for the IODMACommand");
2424 }
2425
2426 assert(_memoryEntries);
2427
2428 dataP = getDataP(_memoryEntries);
2429 const ioPLBlock *ioplList = getIOPLList(dataP);
2430 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
2431 upl_page_info_t *pageList = getPageList(dataP);
2432
2433 assert(numIOPLs > 0);
2434
2435 // Scan through iopl info blocks looking for block containing offset
2436 while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset) {
2437 ind++;
2438 }
2439
2440 // Go back to actual range as search goes past it
2441 ioPLBlock ioplInfo = ioplList[ind - 1];
2442 off2Ind = ioplInfo.fIOMDOffset;
2443
2444 if (ind < numIOPLs) {
2445 length = ioplList[ind].fIOMDOffset;
2446 } else {
2447 length = _length;
2448 }
2449 length -= offset; // Remainder within iopl
2450
2451 // Subtract offset till this iopl in total list
2452 offset -= off2Ind;
2453
2454 // If a mapped address is requested and this is a pre-mapped IOPL
2455 // then just need to compute an offset relative to the mapped base.
2456 if (mapped) {
2457 offset += (ioplInfo.fPageOffset & PAGE_MASK);
2458 address = trunc_page_64(mappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
2459 continue; // Done leave do/while(false) now
2460 }
2461
2462 // The offset is rebased into the current iopl.
2463 // Now add the iopl 1st page offset.
2464 offset += ioplInfo.fPageOffset;
2465
2466 // For external UPLs the fPageInfo field points directly to
2467 // the upl's upl_page_info_t array.
2468 if (ioplInfo.fFlags & kIOPLExternUPL) {
2469 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
2470 } else {
2471 pageList = &pageList[ioplInfo.fPageInfo];
2472 }
2473
2474 // Check for direct device non-paged memory
2475 if (ioplInfo.fFlags & kIOPLOnDevice) {
2476 address = ptoa_64(pageList->phys_addr) + offset;
2477 continue; // Done leave do/while(false) now
2478 }
2479
2480 // Now we need compute the index into the pageList
2481 UInt pageInd = atop_32(offset);
2482 offset &= PAGE_MASK;
2483
2484 // Compute the starting address of this segment
2485 IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
2486 if (!pageAddr) {
2487 panic("!pageList phys_addr");
2488 }
2489
2490 address = ptoa_64(pageAddr) + offset;
2491
2492 // length is currently set to the length of the remainider of the iopl.
2493 // We need to check that the remainder of the iopl is contiguous.
2494 // This is indicated by pageList[ind].phys_addr being sequential.
2495 IOByteCount contigLength = PAGE_SIZE - offset;
2496 while (contigLength < length
2497 && ++pageAddr == pageList[++pageInd].phys_addr) {
2498 contigLength += PAGE_SIZE;
2499 }
2500
2501 if (contigLength < length) {
2502 length = contigLength;
2503 }
2504
2505
2506 assert(address);
2507 assert(length);
2508 } while (false);
0c530ab8
A
2509 }
2510
0a7de745
A
2511 // Update return values and state
2512 isP->fIO.fIOVMAddr = address;
2513 isP->fIO.fLength = length;
2514 isP->fIndex = ind;
2515 isP->fOffset2Index = off2Ind;
2516 isP->fNextOffset = isP->fIO.fOffset + length;
0c530ab8 2517
0a7de745 2518 return kIOReturnSuccess;
0c530ab8
A
2519}
2520
2521addr64_t
b0d623f7 2522IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
0c530ab8 2523{
0a7de745
A
2524 IOReturn ret;
2525 mach_vm_address_t address = 0;
2526 mach_vm_size_t length = 0;
2527 IOMapper * mapper = gIOSystemMapper;
2528 IOOptionBits type = _flags & kIOMemoryTypeMask;
2529
2530 if (lengthOfSegment) {
2531 *lengthOfSegment = 0;
2532 }
b0d623f7 2533
0a7de745
A
2534 if (offset >= _length) {
2535 return 0;
2536 }
b0d623f7 2537
0a7de745
A
2538 // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
2539 // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
2540 // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
2541 // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
2542
2543 if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type)) {
2544 unsigned rangesIndex = 0;
2545 Ranges vec = _ranges;
2546 mach_vm_address_t addr;
2547
2548 // Find starting address within the vector of ranges
2549 for (;;) {
2550 getAddrLenForInd(addr, length, type, vec, rangesIndex);
2551 if (offset < length) {
2552 break;
2553 }
2554 offset -= length; // (make offset relative)
2555 rangesIndex++;
2556 }
b0d623f7 2557
0a7de745
A
2558 // Now that we have the starting range,
2559 // lets find the last contiguous range
2560 addr += offset;
2561 length -= offset;
b0d623f7 2562
0a7de745
A
2563 for (++rangesIndex; rangesIndex < _rangesCount; rangesIndex++) {
2564 mach_vm_address_t newAddr;
2565 mach_vm_size_t newLen;
2566
2567 getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
2568 if (addr + length != newAddr) {
2569 break;
2570 }
2571 length += newLen;
2572 }
2573 if (addr) {
2574 address = (IOPhysicalAddress) addr; // Truncate address to 32bit
2575 }
2576 } else {
2577 IOMDDMAWalkSegmentState _state;
2578 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
b0d623f7 2579
0a7de745
A
2580 state->fOffset = offset;
2581 state->fLength = _length - offset;
2582 state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOrRemote);
b0d623f7 2583
0a7de745 2584 ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
b0d623f7 2585
0a7de745
A
2586 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret)) {
2587 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
2588 ret, this, state->fOffset,
2589 state->fIOVMAddr, state->fLength);
2590 }
2591 if (kIOReturnSuccess == ret) {
2592 address = state->fIOVMAddr;
2593 length = state->fLength;
2594 }
2595
2596 // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
2597 // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
2598
2599 if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))) {
2600 if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone)) {
2601 addr64_t origAddr = address;
2602 IOByteCount origLen = length;
2603
2604 address = mapper->mapToPhysicalAddress(origAddr);
2605 length = page_size - (address & (page_size - 1));
2606 while ((length < origLen)
2607 && ((address + length) == mapper->mapToPhysicalAddress(origAddr + length))) {
2608 length += page_size;
2609 }
2610 if (length > origLen) {
2611 length = origLen;
2612 }
2613 }
2614 }
b0d623f7 2615 }
4452a7af 2616
0a7de745
A
2617 if (!address) {
2618 length = 0;
2619 }
b0d623f7 2620
0a7de745
A
2621 if (lengthOfSegment) {
2622 *lengthOfSegment = length;
2623 }
4452a7af 2624
0a7de745 2625 return address;
0c530ab8
A
2626}
2627
b0d623f7 2628#ifndef __LP64__
39037602
A
2629#pragma clang diagnostic push
2630#pragma clang diagnostic ignored "-Wdeprecated-declarations"
2631
b0d623f7
A
2632addr64_t
2633IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
0c530ab8 2634{
0a7de745
A
2635 addr64_t address = 0;
2636
2637 if (options & _kIOMemorySourceSegment) {
2638 address = getSourceSegment(offset, lengthOfSegment);
2639 } else if (options & kIOMemoryMapperNone) {
2640 address = getPhysicalSegment64(offset, lengthOfSegment);
2641 } else {
2642 address = getPhysicalSegment(offset, lengthOfSegment);
2643 }
2644
2645 return address;
b0d623f7 2646}
39037602 2647#pragma clang diagnostic pop
0c530ab8 2648
b0d623f7
A
2649addr64_t
2650IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
2651{
0a7de745 2652 return getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone);
b0d623f7 2653}
0c530ab8 2654
b0d623f7
A
2655IOPhysicalAddress
2656IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
2657{
0a7de745
A
2658 addr64_t address = 0;
2659 IOByteCount length = 0;
0c530ab8 2660
0a7de745 2661 address = getPhysicalSegment(offset, lengthOfSegment, 0);
b0d623f7 2662
0a7de745
A
2663 if (lengthOfSegment) {
2664 length = *lengthOfSegment;
2665 }
0c530ab8 2666
0a7de745
A
2667 if ((address + length) > 0x100000000ULL) {
2668 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
b0d623f7 2669 address, (long) length, (getMetaClass())->getClassName());
0a7de745 2670 }
0c530ab8 2671
0a7de745 2672 return (IOPhysicalAddress) address;
55e303ae 2673}
de355530 2674
0c530ab8
A
2675addr64_t
2676IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
55e303ae 2677{
0a7de745
A
2678 IOPhysicalAddress phys32;
2679 IOByteCount length;
2680 addr64_t phys64;
2681 IOMapper * mapper = 0;
2682
2683 phys32 = getPhysicalSegment(offset, lengthOfSegment);
2684 if (!phys32) {
2685 return 0;
2686 }
0b4e3aa0 2687
0a7de745
A
2688 if (gIOSystemMapper) {
2689 mapper = gIOSystemMapper;
2690 }
0c530ab8 2691
0a7de745
A
2692 if (mapper) {
2693 IOByteCount origLen;
55e303ae 2694
0a7de745
A
2695 phys64 = mapper->mapToPhysicalAddress(phys32);
2696 origLen = *lengthOfSegment;
2697 length = page_size - (phys64 & (page_size - 1));
2698 while ((length < origLen)
2699 && ((phys64 + length) == mapper->mapToPhysicalAddress(phys32 + length))) {
2700 length += page_size;
2701 }
2702 if (length > origLen) {
2703 length = origLen;
2704 }
55e303ae 2705
0a7de745
A
2706 *lengthOfSegment = length;
2707 } else {
2708 phys64 = (addr64_t) phys32;
2709 }
1c79356b 2710
0a7de745 2711 return phys64;
0b4e3aa0
A
2712}
2713
0c530ab8 2714IOPhysicalAddress
b0d623f7 2715IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1c79356b 2716{
0a7de745 2717 return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0);
0b4e3aa0
A
2718}
2719
b0d623f7
A
2720IOPhysicalAddress
2721IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
2722{
0a7de745 2723 return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment);
b0d623f7 2724}
1c79356b 2725
39037602
A
2726#pragma clang diagnostic push
2727#pragma clang diagnostic ignored "-Wdeprecated-declarations"
2728
0a7de745
A
2729void *
2730IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
2731 IOByteCount * lengthOfSegment)
b0d623f7 2732{
0a7de745
A
2733 if (_task == kernel_task) {
2734 return (void *) getSourceSegment(offset, lengthOfSegment);
2735 } else {
2736 panic("IOGMD::getVirtualSegment deprecated");
2737 }
91447636 2738
0a7de745 2739 return 0;
b0d623f7 2740}
39037602 2741#pragma clang diagnostic pop
b0d623f7 2742#endif /* !__LP64__ */
91447636 2743
0a7de745 2744IOReturn
0c530ab8
A
2745IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
2746{
0a7de745
A
2747 IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
2748 DMACommandOps params;
2749 IOReturn err;
2750
2751 params = (op & ~kIOMDDMACommandOperationMask & op);
2752 op &= kIOMDDMACommandOperationMask;
2753
2754 if (kIOMDGetCharacteristics == op) {
2755 if (dataSize < sizeof(IOMDDMACharacteristics)) {
2756 return kIOReturnUnderrun;
2757 }
2758
2759 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
2760 data->fLength = getLength();
2761 data->fSGCount = 0;
2762 data->fDirection = getDirection();
2763 data->fIsPrepared = true; // Assume prepared - fails safe
2764 } else if (kIOMDWalkSegments == op) {
2765 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs)) {
2766 return kIOReturnUnderrun;
2767 }
2768
2769 IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
2770 IOByteCount offset = (IOByteCount) data->fOffset;
2771
2772 IOPhysicalLength length;
2773 if (data->fMapped && IOMapper::gSystem) {
2774 data->fIOVMAddr = md->getPhysicalSegment(offset, &length);
2775 } else {
2776 data->fIOVMAddr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
2777 }
2778 data->fLength = length;
2779 } else if (kIOMDAddDMAMapSpec == op) {
2780 return kIOReturnUnsupported;
2781 } else if (kIOMDDMAMap == op) {
2782 if (dataSize < sizeof(IOMDDMAMapArgs)) {
2783 return kIOReturnUnderrun;
2784 }
2785 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2786
2787 if (params) {
2788 panic("class %s does not support IODMACommand::kIterateOnly", getMetaClass()->getClassName());
2789 }
2790
2791 data->fMapContig = true;
2792 err = md->dmaMap(data->fMapper, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
2793
2794 return err;
2795 } else if (kIOMDDMAUnmap == op) {
2796 if (dataSize < sizeof(IOMDDMAMapArgs)) {
2797 return kIOReturnUnderrun;
2798 }
2799 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2800
2801 err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
2802
2803 return kIOReturnSuccess;
2804 } else {
2805 return kIOReturnBadArgument;
2806 }
2807
2808 return kIOReturnSuccess;
0c530ab8
A
2809}
2810
0a7de745 2811IOReturn
b0d623f7 2812IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
0a7de745 2813 IOOptionBits * oldState )
b0d623f7 2814{
0a7de745 2815 IOReturn err = kIOReturnSuccess;
b0d623f7 2816
0a7de745
A
2817 vm_purgable_t control;
2818 int state;
b0d623f7 2819
0a7de745
A
2820 assert(!(kIOMemoryRemote & _flags));
2821 if (kIOMemoryRemote & _flags) {
2822 return kIOReturnNotAttached;
2823 }
2824
2825 if (_memRef) {
2826 err = super::setPurgeable(newState, oldState);
2827 } else {
2828 if (kIOMemoryThreadSafe & _flags) {
2829 LOCK;
2830 }
2831 do{
2832 // Find the appropriate vm_map for the given task
2833 vm_map_t curMap;
2834 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) {
2835 err = kIOReturnNotReady;
2836 break;
2837 } else if (!_task) {
2838 err = kIOReturnUnsupported;
2839 break;
2840 } else {
2841 curMap = get_task_map(_task);
2842 if (NULL == curMap) {
2843 err = KERN_INVALID_ARGUMENT;
2844 break;
2845 }
2846 }
2847
2848 // can only do one range
2849 Ranges vec = _ranges;
2850 IOOptionBits type = _flags & kIOMemoryTypeMask;
2851 mach_vm_address_t addr;
2852 mach_vm_size_t len;
2853 getAddrLenForInd(addr, len, type, vec, 0);
2854
2855 err = purgeableControlBits(newState, &control, &state);
2856 if (kIOReturnSuccess != err) {
2857 break;
2858 }
2859 err = vm_map_purgable_control(curMap, addr, control, &state);
2860 if (oldState) {
2861 if (kIOReturnSuccess == err) {
2862 err = purgeableStateBits(&state);
2863 *oldState = state;
2864 }
2865 }
2866 }while (false);
2867 if (kIOMemoryThreadSafe & _flags) {
2868 UNLOCK;
b0d623f7 2869 }
b0d623f7 2870 }
fe8ab488 2871
0a7de745 2872 return err;
b0d623f7
A
2873}
2874
0a7de745
A
2875IOReturn
2876IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
2877 IOOptionBits * oldState )
91447636 2878{
0a7de745 2879 IOReturn err = kIOReturnNotReady;
b0d623f7 2880
0a7de745
A
2881 if (kIOMemoryThreadSafe & _flags) {
2882 LOCK;
2883 }
2884 if (_memRef) {
2885 err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState);
2886 }
2887 if (kIOMemoryThreadSafe & _flags) {
2888 UNLOCK;
2889 }
b0d623f7 2890
0a7de745 2891 return err;
91447636 2892}
0a7de745
A
2893
2894IOReturn
2895IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
2896 IOByteCount * dirtyPageCount )
39236c6e 2897{
0a7de745 2898 IOReturn err = kIOReturnNotReady;
39236c6e 2899
0a7de745
A
2900 assert(!(kIOMemoryRemote & _flags));
2901 if (kIOMemoryRemote & _flags) {
2902 return kIOReturnNotAttached;
2903 }
5ba3f43e 2904
0a7de745
A
2905 if (kIOMemoryThreadSafe & _flags) {
2906 LOCK;
3e170ce0 2907 }
0a7de745
A
2908 if (_memRef) {
2909 err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount);
2910 } else {
2911 IOMultiMemoryDescriptor * mmd;
2912 IOSubMemoryDescriptor * smd;
2913 if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
2914 err = smd->getPageCounts(residentPageCount, dirtyPageCount);
2915 } else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
2916 err = mmd->getPageCounts(residentPageCount, dirtyPageCount);
2917 }
2918 }
2919 if (kIOMemoryThreadSafe & _flags) {
2920 UNLOCK;
3e170ce0 2921 }
39236c6e 2922
0a7de745 2923 return err;
39236c6e 2924}
0a7de745 2925
39236c6e 2926
5ba3f43e
A
2927#if defined(__arm__) || defined(__arm64__)
2928extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
2929extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
2930#else /* defined(__arm__) || defined(__arm64__) */
91447636
A
2931extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
2932extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
5ba3f43e 2933#endif /* defined(__arm__) || defined(__arm64__) */
91447636 2934
0a7de745
A
2935static void
2936SetEncryptOp(addr64_t pa, unsigned int count)
0b4c1975 2937{
0a7de745
A
2938 ppnum_t page, end;
2939
2940 page = atop_64(round_page_64(pa));
2941 end = atop_64(trunc_page_64(pa + count));
2942 for (; page < end; page++) {
2943 pmap_clear_noencrypt(page);
2944 }
0b4c1975
A
2945}
2946
0a7de745
A
2947static void
2948ClearEncryptOp(addr64_t pa, unsigned int count)
0b4c1975 2949{
0a7de745
A
2950 ppnum_t page, end;
2951
2952 page = atop_64(round_page_64(pa));
2953 end = atop_64(trunc_page_64(pa + count));
2954 for (; page < end; page++) {
2955 pmap_set_noencrypt(page);
2956 }
0b4c1975
A
2957}
2958
0a7de745
A
2959IOReturn
2960IOMemoryDescriptor::performOperation( IOOptionBits options,
2961 IOByteCount offset, IOByteCount length )
91447636 2962{
0a7de745
A
2963 IOByteCount remaining;
2964 unsigned int res;
2965 void (*func)(addr64_t pa, unsigned int count) = 0;
5ba3f43e 2966#if defined(__arm__) || defined(__arm64__)
0a7de745 2967 void (*func_ext)(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *result) = 0;
5ba3f43e
A
2968#endif
2969
0a7de745
A
2970 assert(!(kIOMemoryRemote & _flags));
2971 if (kIOMemoryRemote & _flags) {
2972 return kIOReturnNotAttached;
2973 }
91447636 2974
0a7de745
A
2975 switch (options) {
2976 case kIOMemoryIncoherentIOFlush:
5ba3f43e 2977#if defined(__arm__) || defined(__arm64__)
0a7de745 2978 func_ext = &dcache_incoherent_io_flush64;
5ba3f43e 2979#if __ARM_COHERENT_IO__
0a7de745
A
2980 func_ext(0, 0, 0, &res);
2981 return kIOReturnSuccess;
5ba3f43e 2982#else /* __ARM_COHERENT_IO__ */
0a7de745 2983 break;
5ba3f43e
A
2984#endif /* __ARM_COHERENT_IO__ */
2985#else /* defined(__arm__) || defined(__arm64__) */
0a7de745
A
2986 func = &dcache_incoherent_io_flush64;
2987 break;
5ba3f43e 2988#endif /* defined(__arm__) || defined(__arm64__) */
0a7de745 2989 case kIOMemoryIncoherentIOStore:
5ba3f43e 2990#if defined(__arm__) || defined(__arm64__)
0a7de745 2991 func_ext = &dcache_incoherent_io_store64;
5ba3f43e 2992#if __ARM_COHERENT_IO__
0a7de745
A
2993 func_ext(0, 0, 0, &res);
2994 return kIOReturnSuccess;
5ba3f43e 2995#else /* __ARM_COHERENT_IO__ */
0a7de745 2996 break;
5ba3f43e
A
2997#endif /* __ARM_COHERENT_IO__ */
2998#else /* defined(__arm__) || defined(__arm64__) */
0a7de745
A
2999 func = &dcache_incoherent_io_store64;
3000 break;
5ba3f43e 3001#endif /* defined(__arm__) || defined(__arm64__) */
0b4c1975 3002
0a7de745
A
3003 case kIOMemorySetEncrypted:
3004 func = &SetEncryptOp;
3005 break;
3006 case kIOMemoryClearEncrypted:
3007 func = &ClearEncryptOp;
3008 break;
3009 }
91447636 3010
5ba3f43e 3011#if defined(__arm__) || defined(__arm64__)
0a7de745
A
3012 if ((func == 0) && (func_ext == 0)) {
3013 return kIOReturnUnsupported;
3014 }
5ba3f43e 3015#else /* defined(__arm__) || defined(__arm64__) */
0a7de745
A
3016 if (!func) {
3017 return kIOReturnUnsupported;
3018 }
5ba3f43e 3019#endif /* defined(__arm__) || defined(__arm64__) */
91447636 3020
0a7de745
A
3021 if (kIOMemoryThreadSafe & _flags) {
3022 LOCK;
3023 }
b0d623f7 3024
0a7de745
A
3025 res = 0x0UL;
3026 remaining = length = min(length, getLength() - offset);
3027 while (remaining) {
3028 // (process another target segment?)
3029 addr64_t dstAddr64;
3030 IOByteCount dstLen;
91447636 3031
0a7de745
A
3032 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
3033 if (!dstAddr64) {
3034 break;
3035 }
91447636 3036
0a7de745
A
3037 // Clip segment length to remaining
3038 if (dstLen > remaining) {
3039 dstLen = remaining;
3040 }
91447636 3041
5ba3f43e 3042#if defined(__arm__) || defined(__arm64__)
0a7de745
A
3043 if (func) {
3044 (*func)(dstAddr64, dstLen);
3045 }
3046 if (func_ext) {
3047 (*func_ext)(dstAddr64, dstLen, remaining, &res);
3048 if (res != 0x0UL) {
3049 remaining = 0;
3050 break;
3051 }
3052 }
5ba3f43e 3053#else /* defined(__arm__) || defined(__arm64__) */
0a7de745 3054 (*func)(dstAddr64, dstLen);
5ba3f43e 3055#endif /* defined(__arm__) || defined(__arm64__) */
91447636 3056
0a7de745
A
3057 offset += dstLen;
3058 remaining -= dstLen;
3059 }
91447636 3060
0a7de745
A
3061 if (kIOMemoryThreadSafe & _flags) {
3062 UNLOCK;
3063 }
b0d623f7 3064
0a7de745 3065 return remaining ? kIOReturnUnderrun : kIOReturnSuccess;
91447636
A
3066}
3067
39037602
A
3068/*
3069 *
3070 */
3071
316670eb 3072#if defined(__i386__) || defined(__x86_64__)
3e170ce0 3073
0a7de745
A
3074#define io_kernel_static_start vm_kernel_stext
3075#define io_kernel_static_end vm_kernel_etext
3e170ce0 3076
5ba3f43e
A
3077#elif defined(__arm__) || defined(__arm64__)
3078
0a7de745 3079extern vm_offset_t static_memory_end;
5ba3f43e
A
3080
3081#if defined(__arm64__)
3082#define io_kernel_static_start vm_kext_base
3083#else /* defined(__arm64__) */
3084#define io_kernel_static_start vm_kernel_stext
3085#endif /* defined(__arm64__) */
3086
0a7de745 3087#define io_kernel_static_end static_memory_end
5ba3f43e 3088
316670eb
A
3089#else
3090#error io_kernel_static_end is undefined for this architecture
3091#endif
55e303ae
A
3092
3093static kern_return_t
3094io_get_kernel_static_upl(
0a7de745
A
3095 vm_map_t /* map */,
3096 uintptr_t offset,
3097 upl_size_t *upl_size,
3098 upl_t *upl,
3099 upl_page_info_array_t page_list,
3100 unsigned int *count,
3101 ppnum_t *highest_page)
1c79356b 3102{
0a7de745
A
3103 unsigned int pageCount, page;
3104 ppnum_t phys;
3105 ppnum_t highestPage = 0;
3106
3107 pageCount = atop_32(*upl_size);
3108 if (pageCount > *count) {
3109 pageCount = *count;
3110 }
3111
3112 *upl = NULL;
3113
3114 for (page = 0; page < pageCount; page++) {
3115 phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
3116 if (!phys) {
3117 break;
3118 }
3119 page_list[page].phys_addr = phys;
3120 page_list[page].free_when_done = 0;
3121 page_list[page].absent = 0;
3122 page_list[page].dirty = 0;
3123 page_list[page].precious = 0;
3124 page_list[page].device = 0;
3125 if (phys > highestPage) {
3126 highestPage = phys;
3127 }
3128 }
3129
3130 *highest_page = highestPage;
3131
3132 return (page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError;
55e303ae 3133}
0b4e3aa0 3134
0a7de745
A
3135IOReturn
3136IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
55e303ae 3137{
0a7de745
A
3138 IOOptionBits type = _flags & kIOMemoryTypeMask;
3139 IOReturn error = kIOReturnSuccess;
3140 ioGMDData *dataP;
3141 upl_page_info_array_t pageInfo;
3142 ppnum_t mapBase;
3143 vm_tag_t tag = VM_KERN_MEMORY_NONE;
3144
3145 assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
3146
3147 if ((kIODirectionOutIn & forDirection) == kIODirectionNone) {
3148 forDirection = (IODirection) (forDirection | getDirection());
3149 }
3150
3151 dataP = getDataP(_memoryEntries);
3152 upl_control_flags_t uplFlags; // This Mem Desc's default flags for upl creation
3153 switch (kIODirectionOutIn & forDirection) {
5ba3f43e 3154 case kIODirectionOut:
0a7de745
A
3155 // Pages do not need to be marked as dirty on commit
3156 uplFlags = UPL_COPYOUT_FROM;
3157 dataP->fDMAAccess = kIODMAMapReadAccess;
3158 break;
55e303ae 3159
5ba3f43e 3160 case kIODirectionIn:
0a7de745
A
3161 dataP->fDMAAccess = kIODMAMapWriteAccess;
3162 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
3163 break;
39037602 3164
5ba3f43e 3165 default:
0a7de745
A
3166 dataP->fDMAAccess = kIODMAMapReadAccess | kIODMAMapWriteAccess;
3167 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
3168 break;
3169 }
3170
3171 if (_wireCount) {
3172 if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags)) {
3173 OSReportWithBacktrace("IOMemoryDescriptor 0x%lx prepared read only", VM_KERNEL_ADDRPERM(this));
3174 error = kIOReturnNotWritable;
3175 }
3176 } else {
3177 IOMapper *mapper;
3178
3179 mapper = dataP->fMapper;
3180 dataP->fMappedBaseValid = dataP->fMappedBase = 0;
3181
3182 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
3183 tag = _kernelTag;
3184 if (VM_KERN_MEMORY_NONE == tag) {
3185 tag = IOMemoryTag(kernel_map);
3186 }
3187
3188 if (kIODirectionPrepareToPhys32 & forDirection) {
3189 if (!mapper) {
3190 uplFlags |= UPL_NEED_32BIT_ADDR;
3191 }
3192 if (dataP->fDMAMapNumAddressBits > 32) {
3193 dataP->fDMAMapNumAddressBits = 32;
3194 }
3195 }
3196 if (kIODirectionPrepareNoFault & forDirection) {
3197 uplFlags |= UPL_REQUEST_NO_FAULT;
3198 }
3199 if (kIODirectionPrepareNoZeroFill & forDirection) {
3200 uplFlags |= UPL_NOZEROFILLIO;
3201 }
3202 if (kIODirectionPrepareNonCoherent & forDirection) {
3203 uplFlags |= UPL_REQUEST_FORCE_COHERENCY;
3204 }
3205
3206 mapBase = 0;
3207
3208 // Note that appendBytes(NULL) zeros the data up to the desired length
3209 // and the length parameter is an unsigned int
3210 size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t);
3211 if (uplPageSize > ((unsigned int)uplPageSize)) {
3212 return kIOReturnNoMemory;
3213 }
3214 if (!_memoryEntries->appendBytes(0, uplPageSize)) {
3215 return kIOReturnNoMemory;
3216 }
3217 dataP = 0;
3218
3219 // Find the appropriate vm_map for the given task
3220 vm_map_t curMap;
3221 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) {
3222 curMap = 0;
3223 } else {
3224 curMap = get_task_map(_task);
3225 }
3226
3227 // Iterate over the vector of virtual ranges
3228 Ranges vec = _ranges;
3229 unsigned int pageIndex = 0;
3230 IOByteCount mdOffset = 0;
3231 ppnum_t highestPage = 0;
3232
3233 IOMemoryEntry * memRefEntry = 0;
3234 if (_memRef) {
3235 memRefEntry = &_memRef->entries[0];
3236 }
3237
3238 for (UInt range = 0; range < _rangesCount; range++) {
3239 ioPLBlock iopl;
3240 mach_vm_address_t startPage, startPageOffset;
3241 mach_vm_size_t numBytes;
3242 ppnum_t highPage = 0;
3243
3244 // Get the startPage address and length of vec[range]
3245 getAddrLenForInd(startPage, numBytes, type, vec, range);
3246 startPageOffset = startPage & PAGE_MASK;
3247 iopl.fPageOffset = startPageOffset;
3248 numBytes += startPageOffset;
3249 startPage = trunc_page_64(startPage);
3250
3251 if (mapper) {
3252 iopl.fMappedPage = mapBase + pageIndex;
3253 } else {
3254 iopl.fMappedPage = 0;
3255 }
3256
3257 // Iterate over the current range, creating UPLs
3258 while (numBytes) {
3259 vm_address_t kernelStart = (vm_address_t) startPage;
3260 vm_map_t theMap;
3261 if (curMap) {
3262 theMap = curMap;
3263 } else if (_memRef) {
3264 theMap = NULL;
3265 } else {
3266 assert(_task == kernel_task);
3267 theMap = IOPageableMapForAddress(kernelStart);
3268 }
3269
3270 // ioplFlags is an in/out parameter
3271 upl_control_flags_t ioplFlags = uplFlags;
3272 dataP = getDataP(_memoryEntries);
3273 pageInfo = getPageList(dataP);
3274 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
3275
3276 mach_vm_size_t _ioplSize = round_page(numBytes);
3277 upl_size_t ioplSize = (_ioplSize <= MAX_UPL_SIZE_BYTES) ? _ioplSize : MAX_UPL_SIZE_BYTES;
3278 unsigned int numPageInfo = atop_32(ioplSize);
3279
3280 if ((theMap == kernel_map)
3281 && (kernelStart >= io_kernel_static_start)
3282 && (kernelStart < io_kernel_static_end)) {
3283 error = io_get_kernel_static_upl(theMap,
3284 kernelStart,
3285 &ioplSize,
3286 &iopl.fIOPL,
3287 baseInfo,
3288 &numPageInfo,
3289 &highPage);
3290 } else if (_memRef) {
3291 memory_object_offset_t entryOffset;
3292
3293 entryOffset = mdOffset;
3294 entryOffset = (entryOffset - iopl.fPageOffset - memRefEntry->offset);
3295 if (entryOffset >= memRefEntry->size) {
3296 memRefEntry++;
3297 if (memRefEntry >= &_memRef->entries[_memRef->count]) {
3298 panic("memRefEntry");
3299 }
3300 entryOffset = 0;
3301 }
3302 if (ioplSize > (memRefEntry->size - entryOffset)) {
3303 ioplSize = (memRefEntry->size - entryOffset);
3304 }
3305 error = memory_object_iopl_request(memRefEntry->entry,
3306 entryOffset,
3307 &ioplSize,
3308 &iopl.fIOPL,
3309 baseInfo,
3310 &numPageInfo,
3311 &ioplFlags,
3312 tag);
3313 } else {
3314 assert(theMap);
3315 error = vm_map_create_upl(theMap,
3316 startPage,
3317 (upl_size_t*)&ioplSize,
3318 &iopl.fIOPL,
3319 baseInfo,
3320 &numPageInfo,
3321 &ioplFlags,
3322 tag);
3323 }
3324
3325 if (error != KERN_SUCCESS) {
3326 goto abortExit;
3327 }
3328
3329 assert(ioplSize);
3330
3331 if (iopl.fIOPL) {
3332 highPage = upl_get_highest_page(iopl.fIOPL);
3333 }
3334 if (highPage > highestPage) {
3335 highestPage = highPage;
3336 }
3337
3338 if (baseInfo->device) {
3339 numPageInfo = 1;
3340 iopl.fFlags = kIOPLOnDevice;
3341 } else {
3342 iopl.fFlags = 0;
3343 }
3344
3345 iopl.fIOMDOffset = mdOffset;
3346 iopl.fPageInfo = pageIndex;
3347 if (mapper && pageIndex && (page_mask & (mdOffset + startPageOffset))) {
3348 dataP->fDiscontig = true;
3349 }
3350
3351 if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
3352 // Clean up partial created and unsaved iopl
3353 if (iopl.fIOPL) {
3354 upl_abort(iopl.fIOPL, 0);
3355 upl_deallocate(iopl.fIOPL);
3356 }
3357 goto abortExit;
3358 }
3359 dataP = 0;
3360
3361 // Check for a multiple iopl's in one virtual range
3362 pageIndex += numPageInfo;
3363 mdOffset -= iopl.fPageOffset;
3364 if (ioplSize < numBytes) {
3365 numBytes -= ioplSize;
3366 startPage += ioplSize;
3367 mdOffset += ioplSize;
3368 iopl.fPageOffset = 0;
3369 if (mapper) {
3370 iopl.fMappedPage = mapBase + pageIndex;
3371 }
3372 } else {
3373 mdOffset += numBytes;
3374 break;
3375 }
3376 }
3377 }
3378
3379 _highestPage = highestPage;
3380
3381 if (UPL_COPYOUT_FROM & uplFlags) {
3382 _flags |= kIOMemoryPreparedReadOnly;
3383 }
3384 }
39236c6e 3385
39037602 3386#if IOTRACKING
0a7de745
A
3387 if (!(_flags & kIOMemoryAutoPrepare) && (kIOReturnSuccess == error)) {
3388 dataP = getDataP(_memoryEntries);
3389 if (!dataP->fWireTracking.link.next) {
3390 IOTrackingAdd(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages), false, tag);
3391 }
5ba3f43e 3392 }
39037602 3393#endif /* IOTRACKING */
3e170ce0 3394
0a7de745 3395 return error;
1c79356b
A
3396
3397abortExit:
55e303ae 3398 {
0a7de745
A
3399 dataP = getDataP(_memoryEntries);
3400 UInt done = getNumIOPL(_memoryEntries, dataP);
3401 ioPLBlock *ioplList = getIOPLList(dataP);
3402
3403 for (UInt range = 0; range < done; range++) {
3404 if (ioplList[range].fIOPL) {
3405 upl_abort(ioplList[range].fIOPL, 0);
3406 upl_deallocate(ioplList[range].fIOPL);
3407 }
3408 }
3409 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
55e303ae 3410 }
1c79356b 3411
0a7de745
A
3412 if (error == KERN_FAILURE) {
3413 error = kIOReturnCannotWire;
3414 } else if (error == KERN_MEMORY_ERROR) {
3415 error = kIOReturnNoResources;
3416 }
2d21ac55 3417
0a7de745 3418 return error;
55e303ae 3419}
d7e50217 3420
0a7de745
A
3421bool
3422IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
99c3a104 3423{
0a7de745
A
3424 ioGMDData * dataP;
3425 unsigned dataSize = size;
3426
3427 if (!_memoryEntries) {
3428 _memoryEntries = OSData::withCapacity(dataSize);
3429 if (!_memoryEntries) {
3430 return false;
3431 }
3432 } else if (!_memoryEntries->initWithCapacity(dataSize)) {
3433 return false;
3434 }
3435
3436 _memoryEntries->appendBytes(0, computeDataSize(0, 0));
3437 dataP = getDataP(_memoryEntries);
99c3a104 3438
0a7de745
A
3439 if (mapper == kIOMapperWaitSystem) {
3440 IOMapper::checkForSystemMapper();
3441 mapper = IOMapper::gSystem;
3442 }
3443 dataP->fMapper = mapper;
3444 dataP->fPageCnt = 0;
3445 dataP->fMappedBase = 0;
3446 dataP->fDMAMapNumAddressBits = 64;
3447 dataP->fDMAMapAlignment = 0;
3448 dataP->fPreparationID = kIOPreparationIDUnprepared;
3449 dataP->fDiscontig = false;
3450 dataP->fCompletionError = false;
3451 dataP->fMappedBaseValid = false;
3452
3453 return true;
99c3a104
A
3454}
3455
0a7de745
A
3456IOReturn
3457IOMemoryDescriptor::dmaMap(
3458 IOMapper * mapper,
3459 IODMACommand * command,
3460 const IODMAMapSpecification * mapSpec,
3461 uint64_t offset,
3462 uint64_t length,
3463 uint64_t * mapAddress,
3464 uint64_t * mapLength)
99c3a104 3465{
0a7de745
A
3466 IOReturn err;
3467 uint32_t mapOptions;
99c3a104 3468
0a7de745
A
3469 mapOptions = 0;
3470 mapOptions |= kIODMAMapReadAccess;
3471 if (!(kIOMemoryPreparedReadOnly & _flags)) {
3472 mapOptions |= kIODMAMapWriteAccess;
3473 }
99c3a104 3474
0a7de745
A
3475 err = mapper->iovmMapMemory(this, offset, length, mapOptions,
3476 mapSpec, command, NULL, mapAddress, mapLength);
99c3a104 3477
0a7de745
A
3478 if (kIOReturnSuccess == err) {
3479 dmaMapRecord(mapper, command, *mapLength);
3480 }
5ba3f43e 3481
0a7de745 3482 return err;
5ba3f43e
A
3483}
3484
0a7de745
A
3485void
3486IOMemoryDescriptor::dmaMapRecord(
3487 IOMapper * mapper,
3488 IODMACommand * command,
3489 uint64_t mapLength)
5ba3f43e 3490{
0a7de745
A
3491 kern_allocation_name_t alloc;
3492 int16_t prior;
3493
3494 if ((alloc = mapper->fAllocName) /* && mapper != IOMapper::gSystem */) {
3495 kern_allocation_update_size(mapper->fAllocName, mapLength);
3496 }
3497
3498 if (!command) {
3499 return;
3500 }
3501 prior = OSAddAtomic16(1, &_dmaReferences);
3502 if (!prior) {
3503 if (alloc && (VM_KERN_MEMORY_NONE != _kernelTag)) {
3504 _mapName = alloc;
3505 mapLength = _length;
3506 kern_allocation_update_subtotal(alloc, _kernelTag, mapLength);
3507 } else {
3508 _mapName = NULL;
3509 }
5ba3f43e 3510 }
5ba3f43e
A
3511}
3512
0a7de745
A
3513IOReturn
3514IOMemoryDescriptor::dmaUnmap(
3515 IOMapper * mapper,
3516 IODMACommand * command,
3517 uint64_t offset,
3518 uint64_t mapAddress,
3519 uint64_t mapLength)
5ba3f43e 3520{
0a7de745
A
3521 IOReturn ret;
3522 kern_allocation_name_t alloc;
3523 kern_allocation_name_t mapName;
3524 int16_t prior;
3525
3526 mapName = 0;
3527 prior = 0;
3528 if (command) {
3529 mapName = _mapName;
3530 if (_dmaReferences) {
3531 prior = OSAddAtomic16(-1, &_dmaReferences);
3532 } else {
3533 panic("_dmaReferences underflow");
3534 }
3535 }
3536
3537 if (!mapLength) {
3538 return kIOReturnSuccess;
3539 }
3540
3541 ret = mapper->iovmUnmapMemory(this, command, mapAddress, mapLength);
3542
3543 if ((alloc = mapper->fAllocName)) {
3544 kern_allocation_update_size(alloc, -mapLength);
3545 if ((1 == prior) && mapName && (VM_KERN_MEMORY_NONE != _kernelTag)) {
3546 mapLength = _length;
3547 kern_allocation_update_subtotal(mapName, _kernelTag, -mapLength);
3548 }
3549 }
3550
3551 return ret;
99c3a104
A
3552}
3553
0a7de745
A
3554IOReturn
3555IOGeneralMemoryDescriptor::dmaMap(
3556 IOMapper * mapper,
3557 IODMACommand * command,
3558 const IODMAMapSpecification * mapSpec,
3559 uint64_t offset,
3560 uint64_t length,
3561 uint64_t * mapAddress,
3562 uint64_t * mapLength)
99c3a104 3563{
0a7de745
A
3564 IOReturn err = kIOReturnSuccess;
3565 ioGMDData * dataP;
3566 IOOptionBits type = _flags & kIOMemoryTypeMask;
99c3a104 3567
0a7de745
A
3568 *mapAddress = 0;
3569 if (kIOMemoryHostOnly & _flags) {
3570 return kIOReturnSuccess;
3571 }
3572 if (kIOMemoryRemote & _flags) {
3573 return kIOReturnNotAttached;
3e170ce0
A
3574 }
3575
0a7de745
A
3576 if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
3577 || offset || (length != _length)) {
3578 err = super::dmaMap(mapper, command, mapSpec, offset, length, mapAddress, mapLength);
3579 } else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries))) {
3580 const ioPLBlock * ioplList = getIOPLList(dataP);
3581 upl_page_info_t * pageList;
3582 uint32_t mapOptions = 0;
3583
3584 IODMAMapSpecification mapSpec;
3585 bzero(&mapSpec, sizeof(mapSpec));
3586 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
3587 mapSpec.alignment = dataP->fDMAMapAlignment;
3588
3589 // For external UPLs the fPageInfo field points directly to
3590 // the upl's upl_page_info_t array.
3591 if (ioplList->fFlags & kIOPLExternUPL) {
3592 pageList = (upl_page_info_t *) ioplList->fPageInfo;
3593 mapOptions |= kIODMAMapPagingPath;
3594 } else {
3595 pageList = getPageList(dataP);
3596 }
99c3a104 3597
0a7de745
A
3598 if ((_length == ptoa_64(_pages)) && !(page_mask & ioplList->fPageOffset)) {
3599 mapOptions |= kIODMAMapPageListFullyOccupied;
3600 }
99c3a104 3601
0a7de745
A
3602 assert(dataP->fDMAAccess);
3603 mapOptions |= dataP->fDMAAccess;
5ba3f43e 3604
0a7de745
A
3605 // Check for direct device non-paged memory
3606 if (ioplList->fFlags & kIOPLOnDevice) {
3607 mapOptions |= kIODMAMapPhysicallyContiguous;
3608 }
99c3a104 3609
0a7de745
A
3610 IODMAMapPageList dmaPageList =
3611 {
3612 .pageOffset = (uint32_t)(ioplList->fPageOffset & page_mask),
3613 .pageListCount = _pages,
3614 .pageList = &pageList[0]
3615 };
3616 err = mapper->iovmMapMemory(this, offset, length, mapOptions, &mapSpec,
3617 command, &dmaPageList, mapAddress, mapLength);
3618
3619 if (kIOReturnSuccess == err) {
3620 dmaMapRecord(mapper, command, *mapLength);
3621 }
3622 }
3623
3624 return err;
99c3a104
A
3625}
3626
55e303ae
A
3627/*
3628 * prepare
3629 *
3630 * Prepare the memory for an I/O transfer. This involves paging in
3631 * the memory, if necessary, and wiring it down for the duration of
3632 * the transfer. The complete() method completes the processing of
3633 * the memory after the I/O transfer finishes. This method needn't
3634 * called for non-pageable memory.
3635 */
99c3a104 3636
0a7de745
A
3637IOReturn
3638IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
55e303ae 3639{
0a7de745
A
3640 IOReturn error = kIOReturnSuccess;
3641 IOOptionBits type = _flags & kIOMemoryTypeMask;
55e303ae 3642
0a7de745
A
3643 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
3644 return kIOReturnSuccess;
3645 }
2d21ac55 3646
0a7de745
A
3647 assert(!(kIOMemoryRemote & _flags));
3648 if (kIOMemoryRemote & _flags) {
3649 return kIOReturnNotAttached;
3650 }
5ba3f43e 3651
0a7de745
A
3652 if (_prepareLock) {
3653 IOLockLock(_prepareLock);
3654 }
2d21ac55 3655
0a7de745
A
3656 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
3657 error = wireVirtual(forDirection);
3658 }
de355530 3659
0a7de745
A
3660 if (kIOReturnSuccess == error) {
3661 if (1 == ++_wireCount) {
3662 if (kIOMemoryClearEncrypt & _flags) {
3663 performOperation(kIOMemoryClearEncrypted, 0, _length);
3664 }
3665 }
3666 }
0b4c1975 3667
0a7de745
A
3668 if (_prepareLock) {
3669 IOLockUnlock(_prepareLock);
3670 }
2d21ac55 3671
0a7de745 3672 return error;
1c79356b
A
3673}
3674
3675/*
3676 * complete
3677 *
3678 * Complete processing of the memory after an I/O transfer finishes.
3679 * This method should not be called unless a prepare was previously
3680 * issued; the prepare() and complete() must occur in pairs, before
3681 * before and after an I/O transfer involving pageable memory.
3682 */
6d2010ae 3683
0a7de745
A
3684IOReturn
3685IOGeneralMemoryDescriptor::complete(IODirection forDirection)
1c79356b 3686{
0a7de745
A
3687 IOOptionBits type = _flags & kIOMemoryTypeMask;
3688 ioGMDData * dataP;
1c79356b 3689
0a7de745
A
3690 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
3691 return kIOReturnSuccess;
3692 }
3693
3694 assert(!(kIOMemoryRemote & _flags));
3695 if (kIOMemoryRemote & _flags) {
3696 return kIOReturnNotAttached;
3697 }
3698
3699 if (_prepareLock) {
3700 IOLockLock(_prepareLock);
3701 }
3702 do{
3703 assert(_wireCount);
3704 if (!_wireCount) {
3705 break;
3706 }
3707 dataP = getDataP(_memoryEntries);
3708 if (!dataP) {
3709 break;
3710 }
3711
3712 if (kIODirectionCompleteWithError & forDirection) {
3713 dataP->fCompletionError = true;
3714 }
3715
3716 if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount)) {
3717 performOperation(kIOMemorySetEncrypted, 0, _length);
3718 }
1c79356b 3719
0a7de745
A
3720 _wireCount--;
3721 if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection)) {
3722 ioPLBlock *ioplList = getIOPLList(dataP);
3723 UInt ind, count = getNumIOPL(_memoryEntries, dataP);
3724
3725 if (_wireCount) {
3726 // kIODirectionCompleteWithDataValid & forDirection
3727 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
3728 vm_tag_t tag;
3729 tag = getVMTag(kernel_map);
3730 for (ind = 0; ind < count; ind++) {
3731 if (ioplList[ind].fIOPL) {
3732 iopl_valid_data(ioplList[ind].fIOPL, tag);
3733 }
3734 }
3735 }
3736 } else {
3737 if (_dmaReferences) {
3738 panic("complete() while dma active");
3739 }
3740
3741 if (dataP->fMappedBaseValid) {
3742 dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
3743 dataP->fMappedBaseValid = dataP->fMappedBase = 0;
3744 }
3e170ce0 3745#if IOTRACKING
0a7de745
A
3746 if (dataP->fWireTracking.link.next) {
3747 IOTrackingRemove(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages));
3748 }
39037602 3749#endif /* IOTRACKING */
0a7de745
A
3750 // Only complete iopls that we created which are for TypeVirtual
3751 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
3752 for (ind = 0; ind < count; ind++) {
3753 if (ioplList[ind].fIOPL) {
3754 if (dataP->fCompletionError) {
3755 upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/);
3756 } else {
3757 upl_commit(ioplList[ind].fIOPL, 0, 0);
3758 }
3759 upl_deallocate(ioplList[ind].fIOPL);
3760 }
3761 }
3762 } else if (kIOMemoryTypeUPL == type) {
3763 upl_set_referenced(ioplList[0].fIOPL, false);
3764 }
3765
3766 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
3767
3768 dataP->fPreparationID = kIOPreparationIDUnprepared;
3769 _flags &= ~kIOMemoryPreparedReadOnly;
3770 }
3771 }
3772 }while (false);
3773
3774 if (_prepareLock) {
3775 IOLockUnlock(_prepareLock);
3776 }
3777
3778 return kIOReturnSuccess;
1c79356b
A
3779}
3780
0a7de745
A
3781IOReturn
3782IOGeneralMemoryDescriptor::doMap(
3783 vm_map_t __addressMap,
3784 IOVirtualAddress * __address,
3785 IOOptionBits options,
3786 IOByteCount __offset,
3787 IOByteCount __length )
1c79356b 3788{
b0d623f7 3789#ifndef __LP64__
0a7de745
A
3790 if (!(kIOMap64Bit & options)) {
3791 panic("IOGeneralMemoryDescriptor::doMap !64bit");
3792 }
b0d623f7 3793#endif /* !__LP64__ */
2d21ac55 3794
0a7de745 3795 kern_return_t err;
fe8ab488 3796
0a7de745
A
3797 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
3798 mach_vm_size_t offset = mapping->fOffset + __offset;
3799 mach_vm_size_t length = mapping->fLength;
2d21ac55 3800
0a7de745
A
3801 IOOptionBits type = _flags & kIOMemoryTypeMask;
3802 Ranges vec = _ranges;
91447636 3803
0a7de745
A
3804 mach_vm_address_t range0Addr = 0;
3805 mach_vm_size_t range0Len = 0;
060df5ea 3806
0a7de745
A
3807 if ((offset >= _length) || ((offset + length) > _length)) {
3808 return kIOReturnBadArgument;
3809 }
5ba3f43e 3810
0a7de745
A
3811 assert(!(kIOMemoryRemote & _flags));
3812 if (kIOMemoryRemote & _flags) {
3813 return 0;
3814 }
91447636 3815
0a7de745
A
3816 if (vec.v) {
3817 getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
3818 }
2d21ac55 3819
0a7de745
A
3820 // mapping source == dest? (could be much better)
3821 if (_task
3822 && (mapping->fAddressTask == _task)
3823 && (mapping->fAddressMap == get_task_map(_task))
3824 && (options & kIOMapAnywhere)
3825 && (!(kIOMapUnique & options))
3826 && (1 == _rangesCount)
3827 && (0 == offset)
3828 && range0Addr
3829 && (length <= range0Len)) {
3830 mapping->fAddress = range0Addr;
3831 mapping->fOptions |= kIOMapStatic;
3832
3833 return kIOReturnSuccess;
3834 }
1c79356b 3835
0a7de745
A
3836 if (!_memRef) {
3837 IOOptionBits createOptions = 0;
3838 if (!(kIOMapReadOnly & options)) {
3839 createOptions |= kIOMemoryReferenceWrite;
fe8ab488 3840#if DEVELOPMENT || DEBUG
0a7de745
A
3841 if (kIODirectionOut == (kIODirectionOutIn & _flags)) {
3842 OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction");
3843 }
0b4e3aa0 3844#endif
0a7de745
A
3845 }
3846 err = memoryReferenceCreate(createOptions, &_memRef);
3847 if (kIOReturnSuccess != err) {
3848 return err;
3849 }
fe8ab488 3850 }
9bccf70c 3851
0a7de745
A
3852 memory_object_t pager;
3853 pager = (memory_object_t) (reserved ? reserved->dp.devicePager : 0);
3854
3855 // <upl_transpose //
3856 if ((kIOMapReference | kIOMapUnique) == ((kIOMapReference | kIOMapUnique) & options)) {
3857 do{
3858 upl_t redirUPL2;
3859 upl_size_t size;
3860 upl_control_flags_t flags;
3861 unsigned int lock_count;
3862
3863 if (!_memRef || (1 != _memRef->count)) {
3864 err = kIOReturnNotReadable;
3865 break;
3866 }
3867
3868 size = round_page(mapping->fLength);
3869 flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
3870 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
3871
3872 if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2,
3873 NULL, NULL,
3874 &flags, getVMTag(kernel_map))) {
3875 redirUPL2 = NULL;
3876 }
3877
3878 for (lock_count = 0;
3879 IORecursiveLockHaveLock(gIOMemoryLock);
3880 lock_count++) {
3881 UNLOCK;
3882 }
3883 err = upl_transpose(redirUPL2, mapping->fRedirUPL);
3884 for (;
3885 lock_count;
3886 lock_count--) {
3887 LOCK;
3888 }
3889
3890 if (kIOReturnSuccess != err) {
3891 IOLog("upl_transpose(%x)\n", err);
3892 err = kIOReturnSuccess;
3893 }
3894
3895 if (redirUPL2) {
3896 upl_commit(redirUPL2, NULL, 0);
3897 upl_deallocate(redirUPL2);
3898 redirUPL2 = 0;
3899 }
3900 {
3901 // swap the memEntries since they now refer to different vm_objects
3902 IOMemoryReference * me = _memRef;
3903 _memRef = mapping->fMemory->_memRef;
3904 mapping->fMemory->_memRef = me;
3905 }
3906 if (pager) {
3907 err = populateDevicePager( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
3908 }
3909 }while (false);
39037602 3910 }
0a7de745
A
3911 // upl_transpose> //
3912 else {
3913 err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress);
3914#if IOTRACKING
3915 if ((err == KERN_SUCCESS) && ((kIOTracking & gIOKitDebug) || _task)) {
3916 // only dram maps in the default on developement case
3917 IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength);
3918 }
39037602 3919#endif /* IOTRACKING */
0a7de745
A
3920 if ((err == KERN_SUCCESS) && pager) {
3921 err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, offset, length, options);
3922
3923 if (err != KERN_SUCCESS) {
3924 doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0);
3925 } else if (kIOMapDefaultCache == (options & kIOMapCacheMask)) {
3926 mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
3927 }
3928 }
fe8ab488 3929 }
fe8ab488 3930
0a7de745 3931 return err;
1c79356b
A
3932}
3933
39037602
A
3934#if IOTRACKING
3935IOReturn
3936IOMemoryMapTracking(IOTrackingUser * tracking, task_t * task,
0a7de745 3937 mach_vm_address_t * address, mach_vm_size_t * size)
39037602
A
3938{
3939#define iomap_offsetof(type, field) ((size_t)(&((type *)0)->field))
3940
0a7de745 3941 IOMemoryMap * map = (typeof(map))(((uintptr_t) tracking) - iomap_offsetof(IOMemoryMap, fTracking));
39037602 3942
0a7de745
A
3943 if (!map->fAddressMap || (map->fAddressMap != get_task_map(map->fAddressTask))) {
3944 return kIOReturnNotReady;
3945 }
39037602 3946
0a7de745
A
3947 *task = map->fAddressTask;
3948 *address = map->fAddress;
3949 *size = map->fLength;
39037602 3950
0a7de745 3951 return kIOReturnSuccess;
39037602
A
3952}
3953#endif /* IOTRACKING */
3954
0a7de745
A
3955IOReturn
3956IOGeneralMemoryDescriptor::doUnmap(
3957 vm_map_t addressMap,
3958 IOVirtualAddress __address,
3959 IOByteCount __length )
1c79356b 3960{
0a7de745 3961 return super::doUnmap(addressMap, __address, __length);
1c79356b
A
3962}
3963
3964/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3965
b0d623f7
A
3966#undef super
3967#define super OSObject
1c79356b 3968
b0d623f7 3969OSDefineMetaClassAndStructors( IOMemoryMap, OSObject )
1c79356b 3970
b0d623f7
A
3971OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
3972OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
3973OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
3974OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
3975OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
3976OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
3977OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
3978OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
1c79356b 3979
b0d623f7 3980/* ex-inline function implementation */
0a7de745
A
3981IOPhysicalAddress
3982IOMemoryMap::getPhysicalAddress()
3983{
3984 return getPhysicalSegment( 0, 0 );
3985}
1c79356b
A
3986
3987/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3988
0a7de745
A
3989bool
3990IOMemoryMap::init(
3991 task_t intoTask,
3992 mach_vm_address_t toAddress,
3993 IOOptionBits _options,
3994 mach_vm_size_t _offset,
3995 mach_vm_size_t _length )
1c79356b 3996{
0a7de745
A
3997 if (!intoTask) {
3998 return false;
3999 }
1c79356b 4000
0a7de745
A
4001 if (!super::init()) {
4002 return false;
4003 }
1c79356b 4004
0a7de745
A
4005 fAddressMap = get_task_map(intoTask);
4006 if (!fAddressMap) {
4007 return false;
4008 }
4009 vm_map_reference(fAddressMap);
1c79356b 4010
0a7de745
A
4011 fAddressTask = intoTask;
4012 fOptions = _options;
4013 fLength = _length;
4014 fOffset = _offset;
4015 fAddress = toAddress;
1c79356b 4016
0a7de745 4017 return true;
1c79356b
A
4018}
4019
0a7de745
A
4020bool
4021IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
1c79356b 4022{
0a7de745
A
4023 if (!_memory) {
4024 return false;
4025 }
4026
4027 if (!fSuperMap) {
4028 if ((_offset + fLength) > _memory->getLength()) {
4029 return false;
4030 }
4031 fOffset = _offset;
4032 }
4033
4034 _memory->retain();
4035 if (fMemory) {
4036 if (fMemory != _memory) {
4037 fMemory->removeMapping(this);
4038 }
4039 fMemory->release();
4040 }
4041 fMemory = _memory;
4042
4043 return true;
1c79356b
A
4044}
4045
0a7de745
A
4046IOReturn
4047IOMemoryDescriptor::doMap(
4048 vm_map_t __addressMap,
4049 IOVirtualAddress * __address,
4050 IOOptionBits options,
4051 IOByteCount __offset,
4052 IOByteCount __length )
1c79356b 4053{
0a7de745 4054 return kIOReturnUnsupported;
fe8ab488 4055}
1c79356b 4056
0a7de745
A
4057IOReturn
4058IOMemoryDescriptor::handleFault(
4059 void * _pager,
4060 mach_vm_size_t sourceOffset,
4061 mach_vm_size_t length)
fe8ab488 4062{
0a7de745 4063 if (kIOMemoryRedirected & _flags) {
b0d623f7 4064#if DEBUG
0a7de745 4065 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
2d21ac55 4066#endif
0a7de745
A
4067 do {
4068 SLEEP;
4069 } while (kIOMemoryRedirected & _flags);
4070 }
4071 return kIOReturnSuccess;
0b4e3aa0
A
4072}
4073
0a7de745
A
4074IOReturn
4075IOMemoryDescriptor::populateDevicePager(
4076 void * _pager,
4077 vm_map_t addressMap,
4078 mach_vm_address_t address,
4079 mach_vm_size_t sourceOffset,
4080 mach_vm_size_t length,
4081 IOOptionBits options )
0b4e3aa0 4082{
0a7de745
A
4083 IOReturn err = kIOReturnSuccess;
4084 memory_object_t pager = (memory_object_t) _pager;
4085 mach_vm_size_t size;
4086 mach_vm_size_t bytes;
4087 mach_vm_size_t page;
4088 mach_vm_size_t pageOffset;
4089 mach_vm_size_t pagerOffset;
4090 IOPhysicalLength segLen, chunk;
4091 addr64_t physAddr;
4092 IOOptionBits type;
4093
4094 type = _flags & kIOMemoryTypeMask;
4095
4096 if (reserved->dp.pagerContig) {
4097 sourceOffset = 0;
4098 pagerOffset = 0;
4099 }
4100
4101 physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
4102 assert( physAddr );
4103 pageOffset = physAddr - trunc_page_64( physAddr );
4104 pagerOffset = sourceOffset;
4105
4106 size = length + pageOffset;
4107 physAddr -= pageOffset;
4108
4109 segLen += pageOffset;
4110 bytes = size;
4111 do{
4112 // in the middle of the loop only map whole pages
4113 if (segLen >= bytes) {
4114 segLen = bytes;
4115 } else if (segLen != trunc_page(segLen)) {
4116 err = kIOReturnVMError;
4117 }
4118 if (physAddr != trunc_page_64(physAddr)) {
4119 err = kIOReturnBadArgument;
4120 }
4121
4122 if (kIOReturnSuccess != err) {
4123 break;
4124 }
1c79356b 4125
3e170ce0 4126#if DEBUG || DEVELOPMENT
0a7de745
A
4127 if ((kIOMemoryTypeUPL != type)
4128 && pmap_has_managed_page(atop_64(physAddr), atop_64(physAddr + segLen - 1))) {
4129 OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx", physAddr, segLen);
4130 }
3e170ce0
A
4131#endif /* DEBUG || DEVELOPMENT */
4132
0a7de745
A
4133 chunk = (reserved->dp.pagerContig ? round_page(segLen) : page_size);
4134 for (page = 0;
4135 (page < segLen) && (KERN_SUCCESS == err);
4136 page += chunk) {
4137 err = device_pager_populate_object(pager, pagerOffset,
4138 (ppnum_t)(atop_64(physAddr + page)), chunk);
4139 pagerOffset += chunk;
4140 }
5ba3f43e 4141
0a7de745
A
4142 assert(KERN_SUCCESS == err);
4143 if (err) {
4144 break;
4145 }
4146
4147 // This call to vm_fault causes an early pmap level resolution
4148 // of the mappings created above for kernel mappings, since
4149 // faulting in later can't take place from interrupt level.
4150 if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags)) {
4151 err = vm_fault(addressMap,
4152 (vm_map_offset_t)trunc_page_64(address),
4153 options & kIOMapReadOnly ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE,
4154 FALSE, VM_KERN_MEMORY_NONE,
4155 THREAD_UNINT, NULL,
4156 (vm_map_offset_t)0);
4157
4158 if (KERN_SUCCESS != err) {
4159 break;
4160 }
4161 }
9bccf70c 4162
0a7de745
A
4163 sourceOffset += segLen - pageOffset;
4164 address += segLen;
4165 bytes -= segLen;
4166 pageOffset = 0;
4167 }while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
1c79356b 4168
0a7de745
A
4169 if (bytes) {
4170 err = kIOReturnBadArgument;
4171 }
1c79356b 4172
0a7de745 4173 return err;
1c79356b
A
4174}
4175
0a7de745
A
4176IOReturn
4177IOMemoryDescriptor::doUnmap(
4178 vm_map_t addressMap,
4179 IOVirtualAddress __address,
4180 IOByteCount __length )
1c79356b 4181{
0a7de745
A
4182 IOReturn err;
4183 IOMemoryMap * mapping;
4184 mach_vm_address_t address;
4185 mach_vm_size_t length;
4186
4187 if (__length) {
4188 panic("doUnmap");
4189 }
4190
4191 mapping = (IOMemoryMap *) __address;
4192 addressMap = mapping->fAddressMap;
4193 address = mapping->fAddress;
4194 length = mapping->fLength;
4195
4196 if (kIOMapOverwrite & mapping->fOptions) {
4197 err = KERN_SUCCESS;
4198 } else {
4199 if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
4200 addressMap = IOPageableMapForAddress( address );
4201 }
b0d623f7 4202#if DEBUG
0a7de745
A
4203 if (kIOLogMapping & gIOKitDebug) {
4204 IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
4205 addressMap, address, length );
4206 }
1c79356b 4207#endif
0a7de745
A
4208 err = mach_vm_deallocate( addressMap, address, length );
4209 }
1c79356b 4210
3e170ce0 4211#if IOTRACKING
0a7de745 4212 IOTrackingRemoveUser(gIOMapTracking, &mapping->fTracking);
39037602 4213#endif /* IOTRACKING */
1c79356b 4214
0a7de745 4215 return err;
1c79356b
A
4216}
4217
0a7de745
A
4218IOReturn
4219IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
e3027f41 4220{
0a7de745
A
4221 IOReturn err = kIOReturnSuccess;
4222 IOMemoryMap * mapping = 0;
4223 OSIterator * iter;
91447636 4224
0a7de745 4225 LOCK;
39236c6e 4226
0a7de745
A
4227 if (doRedirect) {
4228 _flags |= kIOMemoryRedirected;
4229 } else {
4230 _flags &= ~kIOMemoryRedirected;
4231 }
39236c6e 4232
0a7de745
A
4233 do {
4234 if ((iter = OSCollectionIterator::withCollection( _mappings))) {
4235 memory_object_t pager;
4236
4237 if (reserved) {
4238 pager = (memory_object_t) reserved->dp.devicePager;
4239 } else {
4240 pager = MACH_PORT_NULL;
4241 }
4242
4243 while ((mapping = (IOMemoryMap *) iter->getNextObject())) {
4244 mapping->redirect( safeTask, doRedirect );
4245 if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap)) {
4246 err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache );
4247 }
4248 }
4249
4250 iter->release();
39236c6e 4251 }
0a7de745 4252 } while (false);
e3027f41 4253
0a7de745
A
4254 if (!doRedirect) {
4255 WAKEUP;
91447636 4256 }
0b4e3aa0 4257
0a7de745 4258 UNLOCK;
e3027f41 4259
b0d623f7 4260#ifndef __LP64__
0a7de745
A
4261 // temporary binary compatibility
4262 IOSubMemoryDescriptor * subMem;
4263 if ((subMem = OSDynamicCast( IOSubMemoryDescriptor, this))) {
4264 err = subMem->redirect( safeTask, doRedirect );
4265 } else {
4266 err = kIOReturnSuccess;
4267 }
b0d623f7 4268#endif /* !__LP64__ */
e3027f41 4269
0a7de745 4270 return err;
e3027f41
A
4271}
4272
0a7de745
A
4273IOReturn
4274IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
e3027f41 4275{
0a7de745 4276 IOReturn err = kIOReturnSuccess;
e3027f41 4277
0a7de745 4278 if (fSuperMap) {
b0d623f7 4279// err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
0a7de745
A
4280 } else {
4281 LOCK;
4282
4283 do{
4284 if (!fAddress) {
4285 break;
4286 }
4287 if (!fAddressMap) {
4288 break;
4289 }
4290
4291 if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
4292 && (0 == (fOptions & kIOMapStatic))) {
4293 IOUnmapPages( fAddressMap, fAddress, fLength );
4294 err = kIOReturnSuccess;
b0d623f7 4295#if DEBUG
0a7de745 4296 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
e3027f41 4297#endif
0a7de745
A
4298 } else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask)) {
4299 IOOptionBits newMode;
4300 newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
4301 IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
4302 }
4303 }while (false);
4304 UNLOCK;
4305 }
e3027f41 4306
0a7de745
A
4307 if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
4308 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
4309 && safeTask
4310 && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected)))) {
4311 fMemory->redirect(safeTask, doRedirect);
4312 }
91447636 4313
0a7de745 4314 return err;
e3027f41
A
4315}
4316
0a7de745
A
4317IOReturn
4318IOMemoryMap::unmap( void )
1c79356b 4319{
0a7de745 4320 IOReturn err;
1c79356b 4321
0a7de745 4322 LOCK;
1c79356b 4323
0a7de745
A
4324 if (fAddress && fAddressMap && (0 == fSuperMap) && fMemory
4325 && (0 == (kIOMapStatic & fOptions))) {
4326 err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
4327 } else {
4328 err = kIOReturnSuccess;
4329 }
1c79356b 4330
0a7de745
A
4331 if (fAddressMap) {
4332 vm_map_deallocate(fAddressMap);
4333 fAddressMap = 0;
4334 }
2d21ac55 4335
0a7de745 4336 fAddress = 0;
1c79356b 4337
0a7de745 4338 UNLOCK;
1c79356b 4339
0a7de745 4340 return err;
1c79356b
A
4341}
4342
0a7de745
A
4343void
4344IOMemoryMap::taskDied( void )
1c79356b 4345{
0a7de745
A
4346 LOCK;
4347 if (fUserClientUnmap) {
4348 unmap();
4349 }
3e170ce0 4350#if IOTRACKING
0a7de745
A
4351 else {
4352 IOTrackingRemoveUser(gIOMapTracking, &fTracking);
4353 }
39037602 4354#endif /* IOTRACKING */
3e170ce0 4355
0a7de745
A
4356 if (fAddressMap) {
4357 vm_map_deallocate(fAddressMap);
4358 fAddressMap = 0;
4359 }
4360 fAddressTask = 0;
4361 fAddress = 0;
4362 UNLOCK;
1c79356b
A
4363}
4364
0a7de745
A
4365IOReturn
4366IOMemoryMap::userClientUnmap( void )
b0d623f7 4367{
0a7de745
A
4368 fUserClientUnmap = true;
4369 return kIOReturnSuccess;
b0d623f7
A
4370}
4371
9bccf70c
A
4372// Overload the release mechanism. All mappings must be a member
4373// of a memory descriptors _mappings set. This means that we
4374// always have 2 references on a mapping. When either of these mappings
4375// are released we need to free ourselves.
0a7de745
A
4376void
4377IOMemoryMap::taggedRelease(const void *tag) const
9bccf70c 4378{
0a7de745
A
4379 LOCK;
4380 super::taggedRelease(tag, 2);
4381 UNLOCK;
9bccf70c
A
4382}
4383
0a7de745
A
4384void
4385IOMemoryMap::free()
1c79356b 4386{
0a7de745 4387 unmap();
1c79356b 4388
0a7de745
A
4389 if (fMemory) {
4390 LOCK;
4391 fMemory->removeMapping(this);
4392 UNLOCK;
4393 fMemory->release();
4394 }
1c79356b 4395
0a7de745
A
4396 if (fOwner && (fOwner != fMemory)) {
4397 LOCK;
4398 fOwner->removeMapping(this);
4399 UNLOCK;
4400 }
91447636 4401
0a7de745
A
4402 if (fSuperMap) {
4403 fSuperMap->release();
4404 }
1c79356b 4405
0a7de745
A
4406 if (fRedirUPL) {
4407 upl_commit(fRedirUPL, NULL, 0);
4408 upl_deallocate(fRedirUPL);
4409 }
91447636 4410
0a7de745 4411 super::free();
1c79356b
A
4412}
4413
0a7de745
A
4414IOByteCount
4415IOMemoryMap::getLength()
1c79356b 4416{
0a7de745 4417 return fLength;
1c79356b
A
4418}
4419
0a7de745
A
4420IOVirtualAddress
4421IOMemoryMap::getVirtualAddress()
1c79356b 4422{
b0d623f7 4423#ifndef __LP64__
0a7de745
A
4424 if (fSuperMap) {
4425 fSuperMap->getVirtualAddress();
4426 } else if (fAddressMap
4427 && vm_map_is_64bit(fAddressMap)
4428 && (sizeof(IOVirtualAddress) < 8)) {
4429 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
4430 }
b0d623f7 4431#endif /* !__LP64__ */
2d21ac55 4432
0a7de745 4433 return fAddress;
2d21ac55
A
4434}
4435
b0d623f7 4436#ifndef __LP64__
0a7de745
A
4437mach_vm_address_t
4438IOMemoryMap::getAddress()
2d21ac55 4439{
0a7de745 4440 return fAddress;
2d21ac55
A
4441}
4442
0a7de745
A
4443mach_vm_size_t
4444IOMemoryMap::getSize()
2d21ac55 4445{
0a7de745 4446 return fLength;
1c79356b 4447}
b0d623f7 4448#endif /* !__LP64__ */
1c79356b 4449
2d21ac55 4450
0a7de745
A
4451task_t
4452IOMemoryMap::getAddressTask()
1c79356b 4453{
0a7de745
A
4454 if (fSuperMap) {
4455 return fSuperMap->getAddressTask();
4456 } else {
4457 return fAddressTask;
4458 }
1c79356b
A
4459}
4460
0a7de745
A
4461IOOptionBits
4462IOMemoryMap::getMapOptions()
1c79356b 4463{
0a7de745 4464 return fOptions;
1c79356b
A
4465}
4466
0a7de745
A
4467IOMemoryDescriptor *
4468IOMemoryMap::getMemoryDescriptor()
1c79356b 4469{
0a7de745 4470 return fMemory;
1c79356b
A
4471}
4472
0a7de745
A
4473IOMemoryMap *
4474IOMemoryMap::copyCompatible(
4475 IOMemoryMap * newMapping )
1c79356b 4476{
0a7de745
A
4477 task_t task = newMapping->getAddressTask();
4478 mach_vm_address_t toAddress = newMapping->fAddress;
4479 IOOptionBits _options = newMapping->fOptions;
4480 mach_vm_size_t _offset = newMapping->fOffset;
4481 mach_vm_size_t _length = newMapping->fLength;
4482
4483 if ((!task) || (!fAddressMap) || (fAddressMap != get_task_map(task))) {
4484 return 0;
4485 }
4486 if ((fOptions ^ _options) & kIOMapReadOnly) {
4487 return 0;
4488 }
4489 if ((kIOMapDefaultCache != (_options & kIOMapCacheMask))
4490 && ((fOptions ^ _options) & kIOMapCacheMask)) {
4491 return 0;
4492 }
4493
4494 if ((0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress)) {
4495 return 0;
4496 }
4497
4498 if (_offset < fOffset) {
4499 return 0;
4500 }
4501
4502 _offset -= fOffset;
4503
4504 if ((_offset + _length) > fLength) {
4505 return 0;
4506 }
4507
4508 retain();
4509 if ((fLength == _length) && (!_offset)) {
4510 newMapping = this;
4511 } else {
4512 newMapping->fSuperMap = this;
4513 newMapping->fOffset = fOffset + _offset;
4514 newMapping->fAddress = fAddress + _offset;
4515 }
4516
4517 return newMapping;
1c79356b
A
4518}
4519
0a7de745
A
4520IOReturn
4521IOMemoryMap::wireRange(
4522 uint32_t options,
4523 mach_vm_size_t offset,
4524 mach_vm_size_t length)
99c3a104 4525{
0a7de745
A
4526 IOReturn kr;
4527 mach_vm_address_t start = trunc_page_64(fAddress + offset);
4528 mach_vm_address_t end = round_page_64(fAddress + offset + length);
4529 vm_prot_t prot;
4530
4531 prot = (kIODirectionOutIn & options);
4532 if (prot) {
4533 kr = vm_map_wire_kernel(fAddressMap, start, end, prot, fMemory->getVMTag(kernel_map), FALSE);
4534 } else {
4535 kr = vm_map_unwire(fAddressMap, start, end, FALSE);
4536 }
4537
4538 return kr;
99c3a104
A
4539}
4540
4541
0a7de745 4542IOPhysicalAddress
b0d623f7
A
4543#ifdef __LP64__
4544IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
4545#else /* !__LP64__ */
4546IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
4547#endif /* !__LP64__ */
1c79356b 4548{
0a7de745 4549 IOPhysicalAddress address;
1c79356b 4550
0a7de745 4551 LOCK;
b0d623f7 4552#ifdef __LP64__
0a7de745 4553 address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
b0d623f7 4554#else /* !__LP64__ */
0a7de745 4555 address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
b0d623f7 4556#endif /* !__LP64__ */
0a7de745 4557 UNLOCK;
1c79356b 4558
0a7de745 4559 return address;
1c79356b
A
4560}
4561
4562/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4563
4564#undef super
4565#define super OSObject
4566
4567/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4568
0a7de745
A
4569void
4570IOMemoryDescriptor::initialize( void )
1c79356b 4571{
0a7de745
A
4572 if (0 == gIOMemoryLock) {
4573 gIOMemoryLock = IORecursiveLockAlloc();
4574 }
55e303ae 4575
0a7de745 4576 gIOLastPage = IOGetLastPageNumber();
1c79356b
A
4577}
4578
0a7de745
A
4579void
4580IOMemoryDescriptor::free( void )
1c79356b 4581{
0a7de745
A
4582 if (_mappings) {
4583 _mappings->release();
4584 }
4585
4586 if (reserved) {
4587 IODelete(reserved, IOMemoryDescriptorReserved, 1);
4588 reserved = NULL;
4589 }
4590 super::free();
1c79356b
A
4591}
4592
0a7de745
A
4593IOMemoryMap *
4594IOMemoryDescriptor::setMapping(
4595 task_t intoTask,
4596 IOVirtualAddress mapAddress,
4597 IOOptionBits options )
1c79356b 4598{
0a7de745
A
4599 return createMappingInTask( intoTask, mapAddress,
4600 options | kIOMapStatic,
4601 0, getLength());
1c79356b
A
4602}
4603
0a7de745
A
4604IOMemoryMap *
4605IOMemoryDescriptor::map(
4606 IOOptionBits options )
1c79356b 4607{
0a7de745
A
4608 return createMappingInTask( kernel_task, 0,
4609 options | kIOMapAnywhere,
4610 0, getLength());
1c79356b
A
4611}
4612
b0d623f7 4613#ifndef __LP64__
0a7de745
A
4614IOMemoryMap *
4615IOMemoryDescriptor::map(
4616 task_t intoTask,
4617 IOVirtualAddress atAddress,
4618 IOOptionBits options,
4619 IOByteCount offset,
4620 IOByteCount length )
1c79356b 4621{
0a7de745
A
4622 if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask))) {
4623 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
4624 return 0;
4625 }
4626
4627 return createMappingInTask(intoTask, atAddress,
4628 options, offset, length);
2d21ac55 4629}
b0d623f7 4630#endif /* !__LP64__ */
2d21ac55 4631
0a7de745
A
4632IOMemoryMap *
4633IOMemoryDescriptor::createMappingInTask(
4634 task_t intoTask,
4635 mach_vm_address_t atAddress,
4636 IOOptionBits options,
4637 mach_vm_size_t offset,
4638 mach_vm_size_t length)
2d21ac55 4639{
0a7de745
A
4640 IOMemoryMap * result;
4641 IOMemoryMap * mapping;
2d21ac55 4642
0a7de745
A
4643 if (0 == length) {
4644 length = getLength();
4645 }
1c79356b 4646
0a7de745 4647 mapping = new IOMemoryMap;
2d21ac55 4648
0a7de745
A
4649 if (mapping
4650 && !mapping->init( intoTask, atAddress,
4651 options, offset, length )) {
4652 mapping->release();
4653 mapping = 0;
4654 }
2d21ac55 4655
0a7de745
A
4656 if (mapping) {
4657 result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
4658 } else {
4659 result = 0;
4660 }
2d21ac55 4661
b0d623f7 4662#if DEBUG
0a7de745
A
4663 if (!result) {
4664 IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
4665 this, atAddress, (uint32_t) options, offset, length);
4666 }
2d21ac55
A
4667#endif
4668
0a7de745 4669 return result;
1c79356b
A
4670}
4671
b0d623f7 4672#ifndef __LP64__ // there is only a 64 bit version for LP64
0a7de745
A
4673IOReturn
4674IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
4675 IOOptionBits options,
4676 IOByteCount offset)
2d21ac55 4677{
0a7de745 4678 return redirect(newBackingMemory, options, (mach_vm_size_t)offset);
2d21ac55 4679}
b0d623f7 4680#endif
2d21ac55 4681
0a7de745
A
4682IOReturn
4683IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
4684 IOOptionBits options,
4685 mach_vm_size_t offset)
91447636 4686{
0a7de745
A
4687 IOReturn err = kIOReturnSuccess;
4688 IOMemoryDescriptor * physMem = 0;
91447636 4689
0a7de745 4690 LOCK;
91447636 4691
0a7de745
A
4692 if (fAddress && fAddressMap) {
4693 do{
4694 if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
4695 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
4696 physMem = fMemory;
4697 physMem->retain();
4698 }
4699
4700 if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count)) {
4701 upl_size_t size = round_page(fLength);
4702 upl_control_flags_t flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
4703 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
4704 if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL,
4705 NULL, NULL,
4706 &flags, fMemory->getVMTag(kernel_map))) {
4707 fRedirUPL = 0;
4708 }
4709
4710 if (physMem) {
4711 IOUnmapPages( fAddressMap, fAddress, fLength );
4712 if ((false)) {
4713 physMem->redirect(0, true);
4714 }
4715 }
4716 }
4717
4718 if (newBackingMemory) {
4719 if (newBackingMemory != fMemory) {
4720 fOffset = 0;
4721 if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
4722 options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
4723 offset, fLength)) {
4724 err = kIOReturnError;
4725 }
4726 }
4727 if (fRedirUPL) {
4728 upl_commit(fRedirUPL, NULL, 0);
4729 upl_deallocate(fRedirUPL);
4730 fRedirUPL = 0;
4731 }
4732 if ((false) && physMem) {
4733 physMem->redirect(0, false);
4734 }
4735 }
4736 }while (false);
91447636 4737 }
91447636 4738
0a7de745 4739 UNLOCK;
91447636 4740
0a7de745
A
4741 if (physMem) {
4742 physMem->release();
4743 }
91447636 4744
0a7de745 4745 return err;
91447636
A
4746}
4747
0a7de745
A
4748IOMemoryMap *
4749IOMemoryDescriptor::makeMapping(
4750 IOMemoryDescriptor * owner,
4751 task_t __intoTask,
4752 IOVirtualAddress __address,
4753 IOOptionBits options,
4754 IOByteCount __offset,
4755 IOByteCount __length )
1c79356b 4756{
b0d623f7 4757#ifndef __LP64__
0a7de745
A
4758 if (!(kIOMap64Bit & options)) {
4759 panic("IOMemoryDescriptor::makeMapping !64bit");
4760 }
b0d623f7 4761#endif /* !__LP64__ */
2d21ac55 4762
0a7de745
A
4763 IOMemoryDescriptor * mapDesc = 0;
4764 __block IOMemoryMap * result = 0;
2d21ac55 4765
0a7de745
A
4766 IOMemoryMap * mapping = (IOMemoryMap *) __address;
4767 mach_vm_size_t offset = mapping->fOffset + __offset;
4768 mach_vm_size_t length = mapping->fLength;
2d21ac55 4769
0a7de745 4770 mapping->fOffset = offset;
1c79356b 4771
0a7de745 4772 LOCK;
1c79356b 4773
0a7de745
A
4774 do{
4775 if (kIOMapStatic & options) {
4776 result = mapping;
4777 addMapping(mapping);
4778 mapping->setMemoryDescriptor(this, 0);
4779 continue;
4780 }
2d21ac55 4781
0a7de745
A
4782 if (kIOMapUnique & options) {
4783 addr64_t phys;
4784 IOByteCount physLen;
1c79356b 4785
2d21ac55 4786// if (owner != this) continue;
1c79356b 4787
0a7de745
A
4788 if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
4789 || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
4790 phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
4791 if (!phys || (physLen < length)) {
4792 continue;
4793 }
4794
4795 mapDesc = IOMemoryDescriptor::withAddressRange(
4796 phys, length, getDirection() | kIOMemoryMapperNone, NULL);
4797 if (!mapDesc) {
4798 continue;
4799 }
4800 offset = 0;
4801 mapping->fOffset = offset;
4802 }
4803 } else {
4804 // look for a compatible existing mapping
4805 if (_mappings) {
4806 _mappings->iterateObjects(^(OSObject * object)
4807 {
4808 IOMemoryMap * lookMapping = (IOMemoryMap *) object;
4809 if ((result = lookMapping->copyCompatible(mapping))) {
4810 addMapping(result);
4811 result->setMemoryDescriptor(this, offset);
4812 return true;
4813 }
4814 return false;
4815 });
4816 }
4817 if (result || (options & kIOMapReference)) {
4818 if (result != mapping) {
4819 mapping->release();
4820 mapping = NULL;
4821 }
4822 continue;
4823 }
4824 }
4825
4826 if (!mapDesc) {
4827 mapDesc = this;
4828 mapDesc->retain();
4829 }
4830 IOReturn
4831 kr = mapDesc->doMap( 0, (IOVirtualAddress *) &mapping, options, 0, 0 );
4832 if (kIOReturnSuccess == kr) {
4833 result = mapping;
4834 mapDesc->addMapping(result);
4835 result->setMemoryDescriptor(mapDesc, offset);
4836 } else {
4837 mapping->release();
4838 mapping = NULL;
4839 }
4840 }while (false);
1c79356b 4841
0a7de745 4842 UNLOCK;
1c79356b 4843
0a7de745
A
4844 if (mapDesc) {
4845 mapDesc->release();
4846 }
91447636 4847
0a7de745 4848 return result;
1c79356b
A
4849}
4850
0a7de745
A
4851void
4852IOMemoryDescriptor::addMapping(
1c79356b
A
4853 IOMemoryMap * mapping )
4854{
0a7de745
A
4855 if (mapping) {
4856 if (0 == _mappings) {
4857 _mappings = OSSet::withCapacity(1);
4858 }
4859 if (_mappings) {
4860 _mappings->setObject( mapping );
4861 }
4862 }
1c79356b
A
4863}
4864
0a7de745
A
4865void
4866IOMemoryDescriptor::removeMapping(
1c79356b
A
4867 IOMemoryMap * mapping )
4868{
0a7de745
A
4869 if (_mappings) {
4870 _mappings->removeObject( mapping);
4871 }
1c79356b
A
4872}
4873
b0d623f7
A
4874#ifndef __LP64__
4875// obsolete initializers
0a7de745 4876// - initWithOptions is the designated initializer
1c79356b 4877bool
b0d623f7 4878IOMemoryDescriptor::initWithAddress(void * address,
0a7de745
A
4879 IOByteCount length,
4880 IODirection direction)
1c79356b 4881{
0a7de745 4882 return false;
1c79356b
A
4883}
4884
4885bool
b0d623f7 4886IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
0a7de745
A
4887 IOByteCount length,
4888 IODirection direction,
4889 task_t task)
1c79356b 4890{
0a7de745 4891 return false;
1c79356b
A
4892}
4893
4894bool
b0d623f7 4895IOMemoryDescriptor::initWithPhysicalAddress(
0a7de745
A
4896 IOPhysicalAddress address,
4897 IOByteCount length,
4898 IODirection direction )
1c79356b 4899{
0a7de745 4900 return false;
1c79356b
A
4901}
4902
4903bool
b0d623f7 4904IOMemoryDescriptor::initWithRanges(
0a7de745
A
4905 IOVirtualRange * ranges,
4906 UInt32 withCount,
4907 IODirection direction,
4908 task_t task,
4909 bool asReference)
1c79356b 4910{
0a7de745 4911 return false;
1c79356b
A
4912}
4913
4914bool
0a7de745
A
4915IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
4916 UInt32 withCount,
4917 IODirection direction,
4918 bool asReference)
1c79356b 4919{
0a7de745 4920 return false;
1c79356b
A
4921}
4922
0a7de745
A
4923void *
4924IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
4925 IOByteCount * lengthOfSegment)
b0d623f7 4926{
0a7de745 4927 return 0;
b0d623f7
A
4928}
4929#endif /* !__LP64__ */
4930
1c79356b
A
4931/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4932
0a7de745
A
4933bool
4934IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
9bccf70c 4935{
0a7de745
A
4936 OSSymbol const *keys[2] = {0};
4937 OSObject *values[2] = {0};
4938 OSArray * array;
4939 vm_size_t vcopy_size;
4940
4941 struct SerData {
4942 user_addr_t address;
4943 user_size_t length;
4944 } *vcopy = NULL;
4945 unsigned int index, nRanges;
4946 bool result = false;
4947
4948 IOOptionBits type = _flags & kIOMemoryTypeMask;
4949
4950 if (s == NULL) {
4951 return false;
4952 }
4953
4954 array = OSArray::withCapacity(4);
4955 if (!array) {
4956 return false;
4957 }
4958
4959 nRanges = _rangesCount;
4960 if (os_mul_overflow(sizeof(SerData), nRanges, &vcopy_size)) {
4961 result = false;
4962 goto bail;
4963 }
4964 vcopy = (SerData *) IOMalloc(vcopy_size);
4965 if (vcopy == 0) {
4966 result = false;
4967 goto bail;
4968 }
4969
4970 keys[0] = OSSymbol::withCString("address");
4971 keys[1] = OSSymbol::withCString("length");
4972
4973 // Copy the volatile data so we don't have to allocate memory
4974 // while the lock is held.
4975 LOCK;
4976 if (nRanges == _rangesCount) {
4977 Ranges vec = _ranges;
4978 for (index = 0; index < nRanges; index++) {
4979 mach_vm_address_t addr; mach_vm_size_t len;
4980 getAddrLenForInd(addr, len, type, vec, index);
4981 vcopy[index].address = addr;
4982 vcopy[index].length = len;
4983 }
4984 } else {
4985 // The descriptor changed out from under us. Give up.
4986 UNLOCK;
4987 result = false;
4988 goto bail;
4989 }
4990 UNLOCK;
4991
4992 for (index = 0; index < nRanges; index++) {
4993 user_addr_t addr = vcopy[index].address;
4994 IOByteCount len = (IOByteCount) vcopy[index].length;
4995 values[0] = OSNumber::withNumber(addr, sizeof(addr) * 8);
4996 if (values[0] == 0) {
4997 result = false;
4998 goto bail;
4999 }
5000 values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
5001 if (values[1] == 0) {
5002 result = false;
5003 goto bail;
5004 }
5005 OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
5006 if (dict == 0) {
5007 result = false;
5008 goto bail;
5009 }
5010 array->setObject(dict);
5011 dict->release();
5012 values[0]->release();
5013 values[1]->release();
5014 values[0] = values[1] = 0;
5015 }
5016
5017 result = array->serialize(s);
5018
5019bail:
5020 if (array) {
5021 array->release();
5022 }
5023 if (values[0]) {
5024 values[0]->release();
5025 }
5026 if (values[1]) {
5027 values[1]->release();
5028 }
5029 if (keys[0]) {
5030 keys[0]->release();
5031 }
5032 if (keys[1]) {
5033 keys[1]->release();
5034 }
5035 if (vcopy) {
5036 IOFree(vcopy, vcopy_size);
5037 }
5038
5039 return result;
9bccf70c
A
5040}
5041
9bccf70c
A
5042/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5043
0b4e3aa0 5044OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
b0d623f7
A
5045#ifdef __LP64__
5046OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
5047OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
5048OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
5049OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
5050OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
5051OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
5052OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
5053#else /* !__LP64__ */
55e303ae
A
5054OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
5055OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
91447636
A
5056OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3);
5057OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4);
0c530ab8 5058OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5);
b0d623f7
A
5059OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 6);
5060OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 7);
5061#endif /* !__LP64__ */
1c79356b
A
5062OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
5063OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
5064OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
5065OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
5066OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
5067OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
5068OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
5069OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
9bccf70c 5070
55e303ae 5071/* ex-inline function implementation */
0a7de745 5072IOPhysicalAddress
0c530ab8 5073IOMemoryDescriptor::getPhysicalAddress()
0a7de745
A
5074{
5075 return getPhysicalSegment( 0, 0 );
5076}