]> git.saurik.com Git - apple/xnu.git/blame - iokit/Kernel/IOMemoryDescriptor.cpp
xnu-6153.141.1.tar.gz
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
CommitLineData
1c79356b 1/*
39037602 2 * Copyright (c) 1998-2016 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b 27 */
b0d623f7
A
28
29
55e303ae 30#include <sys/cdefs.h>
1c79356b
A
31
32#include <IOKit/assert.h>
33#include <IOKit/system.h>
34#include <IOKit/IOLib.h>
35#include <IOKit/IOMemoryDescriptor.h>
55e303ae 36#include <IOKit/IOMapper.h>
99c3a104 37#include <IOKit/IODMACommand.h>
55e303ae 38#include <IOKit/IOKitKeysPrivate.h>
1c79356b 39
b0d623f7 40#include <IOKit/IOSubMemoryDescriptor.h>
3e170ce0 41#include <IOKit/IOMultiMemoryDescriptor.h>
b0d623f7 42
1c79356b 43#include <IOKit/IOKitDebug.h>
2d21ac55 44#include <libkern/OSDebug.h>
d9a64523 45#include <libkern/OSKextLibPrivate.h>
1c79356b 46
91447636
A
47#include "IOKitKernelInternal.h"
48
1c79356b 49#include <libkern/c++/OSContainers.h>
9bccf70c
A
50#include <libkern/c++/OSDictionary.h>
51#include <libkern/c++/OSArray.h>
52#include <libkern/c++/OSSymbol.h>
53#include <libkern/c++/OSNumber.h>
39037602 54#include <os/overflow.h>
91447636
A
55
56#include <sys/uio.h>
1c79356b
A
57
58__BEGIN_DECLS
59#include <vm/pmap.h>
91447636 60#include <vm/vm_pageout.h>
55e303ae 61#include <mach/memory_object_types.h>
0b4e3aa0 62#include <device/device_port.h>
55e303ae 63
91447636 64#include <mach/vm_prot.h>
2d21ac55 65#include <mach/mach_vm.h>
cb323159 66#include <mach/memory_entry.h>
91447636 67#include <vm/vm_fault.h>
2d21ac55 68#include <vm/vm_protos.h>
91447636 69
55e303ae 70extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
6d2010ae
A
71extern void ipc_port_release_send(ipc_port_t port);
72
55e303ae 73__END_DECLS
1c79356b 74
0a7de745 75#define kIOMapperWaitSystem ((IOMapper *) 1)
99c3a104 76
0c530ab8
A
77static IOMapper * gIOSystemMapper = NULL;
78
0a7de745 79ppnum_t gIOLastPage;
0c530ab8 80
55e303ae 81/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
de355530 82
55e303ae 83OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
de355530 84
55e303ae 85#define super IOMemoryDescriptor
de355530 86
55e303ae 87OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
de355530 88
1c79356b
A
89/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
90
9bccf70c
A
91static IORecursiveLock * gIOMemoryLock;
92
0a7de745
A
93#define LOCK IORecursiveLockLock( gIOMemoryLock)
94#define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
95#define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
96#define WAKEUP \
9bccf70c
A
97 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
98
0c530ab8 99#if 0
0a7de745 100#define DEBG(fmt, args...) { kprintf(fmt, ## args); }
0c530ab8 101#else
0a7de745 102#define DEBG(fmt, args...) {}
0c530ab8
A
103#endif
104
91447636
A
105/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
106
107// Some data structures and accessor macros used by the initWithOptions
108// Function
109
110enum ioPLBlockFlags {
0a7de745
A
111 kIOPLOnDevice = 0x00000001,
112 kIOPLExternUPL = 0x00000002,
91447636
A
113};
114
0a7de745
A
115struct IOMDPersistentInitData {
116 const IOGeneralMemoryDescriptor * fMD;
117 IOMemoryReference * fMemRef;
91447636
A
118};
119
120struct ioPLBlock {
0a7de745
A
121 upl_t fIOPL;
122 vm_address_t fPageInfo; // Pointer to page list or index into it
123 uint32_t fIOMDOffset; // The offset of this iopl in descriptor
124 ppnum_t fMappedPage; // Page number of first page in this iopl
125 unsigned int fPageOffset; // Offset within first page of iopl
126 unsigned int fFlags; // Flags
91447636
A
127};
128
39037602
A
129enum { kMaxWireTags = 6 };
130
0a7de745
A
131struct ioGMDData {
132 IOMapper * fMapper;
133 uint64_t fDMAMapAlignment;
134 uint64_t fMappedBase;
135 uint64_t fMappedLength;
136 uint64_t fPreparationID;
3e170ce0 137#if IOTRACKING
0a7de745 138 IOTracking fWireTracking;
39037602 139#endif /* IOTRACKING */
0a7de745
A
140 unsigned int fPageCnt;
141 uint8_t fDMAMapNumAddressBits;
142 unsigned char fDiscontig:1;
143 unsigned char fCompletionError:1;
144 unsigned char fMappedBaseValid:1;
145 unsigned char _resv:3;
146 unsigned char fDMAAccess:2;
147
148 /* variable length arrays */
149 upl_page_info_t fPageList[1]
b0d623f7 150#if __LP64__
0a7de745
A
151 // align fPageList as for ioPLBlock
152 __attribute__((aligned(sizeof(upl_t))))
b0d623f7 153#endif
0a7de745 154 ;
cb323159 155 //ioPLBlock fBlocks[1];
91447636
A
156};
157
0a7de745
A
158#define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
159#define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
160#define getNumIOPL(osd, d) \
91447636 161 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
0a7de745 162#define getPageList(d) (&(d->fPageList[0]))
91447636 163#define computeDataSize(p, u) \
6d2010ae 164 (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
91447636 165
5ba3f43e
A
166enum { kIOMemoryHostOrRemote = kIOMemoryHostOnly | kIOMemoryRemote };
167
91447636
A
168/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
169
b0d623f7 170#define next_page(a) ( trunc_page(a) + PAGE_SIZE )
0b4e3aa0 171
0b4e3aa0 172extern "C" {
0a7de745
A
173kern_return_t
174device_data_action(
175 uintptr_t device_handle,
176 ipc_port_t device_pager,
177 vm_prot_t protection,
178 vm_object_offset_t offset,
179 vm_size_t size)
0b4e3aa0 180{
0a7de745
A
181 kern_return_t kr;
182 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
183 IOMemoryDescriptor * memDesc;
184
185 LOCK;
186 memDesc = ref->dp.memory;
187 if (memDesc) {
188 memDesc->retain();
189 kr = memDesc->handleFault(device_pager, offset, size);
190 memDesc->release();
191 } else {
192 kr = KERN_ABORTED;
193 }
194 UNLOCK;
195
196 return kr;
0b4e3aa0
A
197}
198
0a7de745
A
199kern_return_t
200device_close(
201 uintptr_t device_handle)
0b4e3aa0 202{
0a7de745 203 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
0b4e3aa0 204
0a7de745 205 IODelete( ref, IOMemoryDescriptorReserved, 1 );
0b4e3aa0 206
0a7de745 207 return kIOReturnSuccess;
0b4e3aa0 208}
0a7de745 209}; // end extern "C"
0b4e3aa0 210
fe8ab488
A
211/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
212
91447636
A
213// Note this inline function uses C++ reference arguments to return values
214// This means that pointers are not passed and NULLs don't have to be
215// checked for as a NULL reference is illegal.
216static inline void
fe8ab488 217getAddrLenForInd(mach_vm_address_t &addr, mach_vm_size_t &len, // Output variables
0a7de745 218 UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
91447636 219{
0a7de745
A
220 assert(kIOMemoryTypeUIO == type
221 || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
222 || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
223 if (kIOMemoryTypeUIO == type) {
224 user_size_t us;
225 user_addr_t ad;
226 uio_getiov((uio_t) r.uio, ind, &ad, &us); addr = ad; len = us;
227 }
b0d623f7 228#ifndef __LP64__
0a7de745
A
229 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
230 IOAddressRange cur = r.v64[ind];
231 addr = cur.address;
232 len = cur.length;
233 }
b0d623f7 234#endif /* !__LP64__ */
0a7de745
A
235 else {
236 IOVirtualRange cur = r.v[ind];
237 addr = cur.address;
238 len = cur.length;
239 }
0b4e3aa0
A
240}
241
1c79356b
A
242/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
243
0a7de745 244static IOReturn
fe8ab488
A
245purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
246{
0a7de745 247 IOReturn err = kIOReturnSuccess;
fe8ab488 248
0a7de745 249 *control = VM_PURGABLE_SET_STATE;
fe8ab488 250
0a7de745 251 enum { kIOMemoryPurgeableControlMask = 15 };
fe8ab488 252
0a7de745 253 switch (kIOMemoryPurgeableControlMask & newState) {
fe8ab488 254 case kIOMemoryPurgeableKeepCurrent:
0a7de745
A
255 *control = VM_PURGABLE_GET_STATE;
256 break;
fe8ab488
A
257
258 case kIOMemoryPurgeableNonVolatile:
0a7de745
A
259 *state = VM_PURGABLE_NONVOLATILE;
260 break;
fe8ab488 261 case kIOMemoryPurgeableVolatile:
0a7de745
A
262 *state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask);
263 break;
fe8ab488 264 case kIOMemoryPurgeableEmpty:
0a7de745
A
265 *state = VM_PURGABLE_EMPTY | (newState & ~kIOMemoryPurgeableControlMask);
266 break;
fe8ab488 267 default:
0a7de745
A
268 err = kIOReturnBadArgument;
269 break;
270 }
271
272 if (*control == VM_PURGABLE_SET_STATE) {
273 // let VM know this call is from the kernel and is allowed to alter
274 // the volatility of the memory entry even if it was created with
275 // MAP_MEM_PURGABLE_KERNEL_ONLY
276 *control = VM_PURGABLE_SET_STATE_FROM_KERNEL;
277 }
278
279 return err;
fe8ab488
A
280}
281
0a7de745 282static IOReturn
fe8ab488
A
283purgeableStateBits(int * state)
284{
0a7de745 285 IOReturn err = kIOReturnSuccess;
fe8ab488 286
0a7de745 287 switch (VM_PURGABLE_STATE_MASK & *state) {
fe8ab488 288 case VM_PURGABLE_NONVOLATILE:
0a7de745
A
289 *state = kIOMemoryPurgeableNonVolatile;
290 break;
fe8ab488 291 case VM_PURGABLE_VOLATILE:
0a7de745
A
292 *state = kIOMemoryPurgeableVolatile;
293 break;
fe8ab488 294 case VM_PURGABLE_EMPTY:
0a7de745
A
295 *state = kIOMemoryPurgeableEmpty;
296 break;
fe8ab488 297 default:
0a7de745
A
298 *state = kIOMemoryPurgeableNonVolatile;
299 err = kIOReturnNotReady;
300 break;
301 }
302 return err;
fe8ab488
A
303}
304
cb323159
A
305typedef struct {
306 unsigned int wimg;
307 unsigned int object_type;
308} iokit_memtype_entry;
309
310static const iokit_memtype_entry iomd_mem_types[] = {
311 [kIODefaultCache] = {VM_WIMG_DEFAULT, MAP_MEM_NOOP},
312 [kIOInhibitCache] = {VM_WIMG_IO, MAP_MEM_IO},
313 [kIOWriteThruCache] = {VM_WIMG_WTHRU, MAP_MEM_WTHRU},
314 [kIOWriteCombineCache] = {VM_WIMG_WCOMB, MAP_MEM_WCOMB},
315 [kIOCopybackCache] = {VM_WIMG_COPYBACK, MAP_MEM_COPYBACK},
316 [kIOCopybackInnerCache] = {VM_WIMG_INNERWBACK, MAP_MEM_INNERWBACK},
317 [kIOPostedWrite] = {VM_WIMG_POSTED, MAP_MEM_POSTED},
318 [kIORealTimeCache] = {VM_WIMG_RT, MAP_MEM_RT},
319 [kIOPostedReordered] = {VM_WIMG_POSTED_REORDERED, MAP_MEM_POSTED_REORDERED},
320 [kIOPostedCombinedReordered] = {VM_WIMG_POSTED_COMBINED_REORDERED, MAP_MEM_POSTED_COMBINED_REORDERED},
321};
fe8ab488 322
0a7de745 323static vm_prot_t
fe8ab488
A
324vmProtForCacheMode(IOOptionBits cacheMode)
325{
cb323159 326 assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
0a7de745 327 vm_prot_t prot = 0;
cb323159 328 SET_MAP_MEM(iomd_mem_types[cacheMode].object_type, prot);
0a7de745 329 return prot;
fe8ab488
A
330}
331
332static unsigned int
333pagerFlagsForCacheMode(IOOptionBits cacheMode)
334{
cb323159
A
335 assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
336 if (cacheMode == kIODefaultCache) {
337 return -1U;
338 }
339 return iomd_mem_types[cacheMode].wimg;
340}
5ba3f43e 341
cb323159
A
342static IOOptionBits
343cacheModeForPagerFlags(unsigned int pagerFlags)
344{
345 pagerFlags &= VM_WIMG_MASK;
346 IOOptionBits cacheMode = kIODefaultCache;
347 for (IOOptionBits i = 0; i < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])); ++i) {
348 if (iomd_mem_types[i].wimg == pagerFlags) {
349 cacheMode = i;
350 break;
351 }
0a7de745 352 }
cb323159 353 return (cacheMode == kIODefaultCache) ? kIOCopybackCache : cacheMode;
fe8ab488
A
354}
355
356/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
357/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
358
0a7de745
A
359struct IOMemoryEntry {
360 ipc_port_t entry;
361 int64_t offset;
362 uint64_t size;
fe8ab488
A
363};
364
0a7de745
A
365struct IOMemoryReference {
366 volatile SInt32 refCount;
367 vm_prot_t prot;
368 uint32_t capacity;
369 uint32_t count;
370 struct IOMemoryReference * mapRef;
371 IOMemoryEntry entries[0];
fe8ab488
A
372};
373
0a7de745
A
374enum{
375 kIOMemoryReferenceReuse = 0x00000001,
376 kIOMemoryReferenceWrite = 0x00000002,
377 kIOMemoryReferenceCOW = 0x00000004,
fe8ab488
A
378};
379
380SInt32 gIOMemoryReferenceCount;
381
382IOMemoryReference *
383IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc)
384{
0a7de745
A
385 IOMemoryReference * ref;
386 size_t newSize, oldSize, copySize;
387
388 newSize = (sizeof(IOMemoryReference)
389 - sizeof(ref->entries)
390 + capacity * sizeof(ref->entries[0]));
391 ref = (typeof(ref))IOMalloc(newSize);
392 if (realloc) {
393 oldSize = (sizeof(IOMemoryReference)
394 - sizeof(realloc->entries)
395 + realloc->capacity * sizeof(realloc->entries[0]));
396 copySize = oldSize;
397 if (copySize > newSize) {
398 copySize = newSize;
399 }
400 if (ref) {
401 bcopy(realloc, ref, copySize);
402 }
403 IOFree(realloc, oldSize);
404 } else if (ref) {
405 bzero(ref, sizeof(*ref));
406 ref->refCount = 1;
407 OSIncrementAtomic(&gIOMemoryReferenceCount);
408 }
409 if (!ref) {
cb323159 410 return NULL;
0a7de745
A
411 }
412 ref->capacity = capacity;
413 return ref;
fe8ab488
A
414}
415
0a7de745 416void
fe8ab488
A
417IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref)
418{
0a7de745
A
419 IOMemoryEntry * entries;
420 size_t size;
421
422 if (ref->mapRef) {
423 memoryReferenceFree(ref->mapRef);
cb323159 424 ref->mapRef = NULL;
0a7de745
A
425 }
426
427 entries = ref->entries + ref->count;
428 while (entries > &ref->entries[0]) {
429 entries--;
430 ipc_port_release_send(entries->entry);
431 }
432 size = (sizeof(IOMemoryReference)
433 - sizeof(ref->entries)
434 + ref->capacity * sizeof(ref->entries[0]));
435 IOFree(ref, size);
436
437 OSDecrementAtomic(&gIOMemoryReferenceCount);
fe8ab488
A
438}
439
0a7de745 440void
fe8ab488
A
441IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref)
442{
0a7de745
A
443 if (1 == OSDecrementAtomic(&ref->refCount)) {
444 memoryReferenceFree(ref);
445 }
fe8ab488
A
446}
447
448
449IOReturn
450IOGeneralMemoryDescriptor::memoryReferenceCreate(
0a7de745
A
451 IOOptionBits options,
452 IOMemoryReference ** reference)
fe8ab488 453{
0a7de745
A
454 enum { kCapacity = 4, kCapacityInc = 4 };
455
456 kern_return_t err;
457 IOMemoryReference * ref;
458 IOMemoryEntry * entries;
459 IOMemoryEntry * cloneEntries;
460 vm_map_t map;
461 ipc_port_t entry, cloneEntry;
462 vm_prot_t prot;
463 memory_object_size_t actualSize;
464 uint32_t rangeIdx;
465 uint32_t count;
466 mach_vm_address_t entryAddr, endAddr, entrySize;
467 mach_vm_size_t srcAddr, srcLen;
468 mach_vm_size_t nextAddr, nextLen;
469 mach_vm_size_t offset, remain;
470 IOByteCount physLen;
471 IOOptionBits type = (_flags & kIOMemoryTypeMask);
472 IOOptionBits cacheMode;
473 unsigned int pagerFlags;
474 vm_tag_t tag;
cb323159 475 vm_named_entry_kernel_flags_t vmne_kflags;
0a7de745
A
476
477 ref = memoryReferenceAlloc(kCapacity, NULL);
478 if (!ref) {
479 return kIOReturnNoMemory;
480 }
481
482 tag = getVMTag(kernel_map);
cb323159 483 vmne_kflags = VM_NAMED_ENTRY_KERNEL_FLAGS_NONE;
0a7de745
A
484 entries = &ref->entries[0];
485 count = 0;
486 err = KERN_SUCCESS;
487
488 offset = 0;
489 rangeIdx = 0;
490 if (_task) {
491 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
492 } else {
493 nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
494 nextLen = physLen;
495
496 // default cache mode for physical
497 if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift)) {
cb323159 498 IOOptionBits mode = cacheModeForPagerFlags(IODefaultCacheBits(nextAddr));
0a7de745 499 _flags |= (mode << kIOMemoryBufferCacheShift);
9d749ea3 500 }
0a7de745
A
501 }
502
503 // cache mode & vm_prot
504 prot = VM_PROT_READ;
505 cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
506 prot |= vmProtForCacheMode(cacheMode);
507 // VM system requires write access to change cache mode
508 if (kIODefaultCache != cacheMode) {
509 prot |= VM_PROT_WRITE;
510 }
511 if (kIODirectionOut != (kIODirectionOutIn & _flags)) {
512 prot |= VM_PROT_WRITE;
513 }
514 if (kIOMemoryReferenceWrite & options) {
515 prot |= VM_PROT_WRITE;
516 }
517 if (kIOMemoryReferenceCOW & options) {
518 prot |= MAP_MEM_VM_COPY;
519 }
39037602 520
cb323159
A
521 if (kIOMemoryUseReserve & _flags) {
522 prot |= MAP_MEM_GRAB_SECLUDED;
523 }
524
0a7de745
A
525 if ((kIOMemoryReferenceReuse & options) && _memRef) {
526 cloneEntries = &_memRef->entries[0];
527 prot |= MAP_MEM_NAMED_REUSE;
fe8ab488 528 }
fe8ab488 529
0a7de745
A
530 if (_task) {
531 // virtual ranges
532
533 if (kIOMemoryBufferPageable & _flags) {
cb323159
A
534 int ledger_tag, ledger_no_footprint;
535
0a7de745
A
536 // IOBufferMemoryDescriptor alloc - set flags for entry + object create
537 prot |= MAP_MEM_NAMED_CREATE;
cb323159
A
538
539 // default accounting settings:
540 // + "none" ledger tag
541 // + include in footprint
542 // can be changed later with ::setOwnership()
543 ledger_tag = VM_LEDGER_TAG_NONE;
544 ledger_no_footprint = 0;
545
0a7de745
A
546 if (kIOMemoryBufferPurgeable & _flags) {
547 prot |= (MAP_MEM_PURGABLE | MAP_MEM_PURGABLE_KERNEL_ONLY);
548 if (VM_KERN_MEMORY_SKYWALK == tag) {
cb323159
A
549 // Skywalk purgeable memory accounting:
550 // + "network" ledger tag
551 // + not included in footprint
552 ledger_tag = VM_LEDGER_TAG_NETWORK;
553 ledger_no_footprint = 1;
554 } else {
555 // regular purgeable memory accounting:
556 // + no ledger tag
557 // + included in footprint
558 ledger_tag = VM_LEDGER_TAG_NONE;
559 ledger_no_footprint = 0;
0a7de745
A
560 }
561 }
cb323159
A
562 vmne_kflags.vmnekf_ledger_tag = ledger_tag;
563 vmne_kflags.vmnekf_ledger_no_footprint = ledger_no_footprint;
0a7de745
A
564 if (kIOMemoryUseReserve & _flags) {
565 prot |= MAP_MEM_GRAB_SECLUDED;
566 }
567
568 prot |= VM_PROT_WRITE;
569 map = NULL;
570 } else {
571 map = get_task_map(_task);
572 }
573
574 remain = _length;
575 while (remain) {
576 srcAddr = nextAddr;
577 srcLen = nextLen;
578 nextAddr = 0;
579 nextLen = 0;
580 // coalesce addr range
581 for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++) {
582 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
583 if ((srcAddr + srcLen) != nextAddr) {
584 break;
585 }
586 srcLen += nextLen;
587 }
588 entryAddr = trunc_page_64(srcAddr);
589 endAddr = round_page_64(srcAddr + srcLen);
590 do{
591 entrySize = (endAddr - entryAddr);
592 if (!entrySize) {
593 break;
594 }
595 actualSize = entrySize;
596
597 cloneEntry = MACH_PORT_NULL;
598 if (MAP_MEM_NAMED_REUSE & prot) {
599 if (cloneEntries < &_memRef->entries[_memRef->count]) {
600 cloneEntry = cloneEntries->entry;
601 } else {
602 prot &= ~MAP_MEM_NAMED_REUSE;
603 }
604 }
605
606 err = mach_make_memory_entry_internal(map,
cb323159 607 &actualSize, entryAddr, prot, vmne_kflags, &entry, cloneEntry);
0a7de745
A
608
609 if (KERN_SUCCESS != err) {
610 break;
611 }
612 if (actualSize > entrySize) {
613 panic("mach_make_memory_entry_64 actualSize");
614 }
615
616 if (count >= ref->capacity) {
617 ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref);
618 entries = &ref->entries[count];
619 }
620 entries->entry = entry;
621 entries->size = actualSize;
622 entries->offset = offset + (entryAddr - srcAddr);
623 entryAddr += actualSize;
624 if (MAP_MEM_NAMED_REUSE & prot) {
625 if ((cloneEntries->entry == entries->entry)
626 && (cloneEntries->size == entries->size)
627 && (cloneEntries->offset == entries->offset)) {
628 cloneEntries++;
629 } else {
630 prot &= ~MAP_MEM_NAMED_REUSE;
631 }
632 }
633 entries++;
634 count++;
635 }while (true);
636 offset += srcLen;
637 remain -= srcLen;
fe8ab488 638 }
0a7de745
A
639 } else {
640 // _task == 0, physical or kIOMemoryTypeUPL
641 memory_object_t pager;
cb323159 642 vm_size_t size = ptoa_64(_pages);
fe8ab488 643
0a7de745
A
644 if (!getKernelReserved()) {
645 panic("getKernelReserved");
646 }
fe8ab488 647
0a7de745
A
648 reserved->dp.pagerContig = (1 == _rangesCount);
649 reserved->dp.memory = this;
fe8ab488 650
0a7de745
A
651 pagerFlags = pagerFlagsForCacheMode(cacheMode);
652 if (-1U == pagerFlags) {
653 panic("phys is kIODefaultCache");
654 }
655 if (reserved->dp.pagerContig) {
656 pagerFlags |= DEVICE_PAGER_CONTIGUOUS;
657 }
fe8ab488 658
cb323159 659 pager = device_pager_setup((memory_object_t) NULL, (uintptr_t) reserved,
0a7de745
A
660 size, pagerFlags);
661 assert(pager);
662 if (!pager) {
663 err = kIOReturnVMError;
664 } else {
665 srcAddr = nextAddr;
666 entryAddr = trunc_page_64(srcAddr);
667 err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/,
668 size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry);
669 assert(KERN_SUCCESS == err);
670 if (KERN_SUCCESS != err) {
671 device_pager_deallocate(pager);
672 } else {
673 reserved->dp.devicePager = pager;
674 entries->entry = entry;
675 entries->size = size;
676 entries->offset = offset + (entryAddr - srcAddr);
677 entries++;
678 count++;
679 }
680 }
681 }
fe8ab488 682
0a7de745
A
683 ref->count = count;
684 ref->prot = prot;
fe8ab488 685
0a7de745
A
686 if (_task && (KERN_SUCCESS == err)
687 && (kIOMemoryMapCopyOnWrite & _flags)
688 && !(kIOMemoryReferenceCOW & options)) {
689 err = memoryReferenceCreate(options | kIOMemoryReferenceCOW, &ref->mapRef);
690 }
fe8ab488 691
0a7de745
A
692 if (KERN_SUCCESS == err) {
693 if (MAP_MEM_NAMED_REUSE & prot) {
694 memoryReferenceFree(ref);
695 OSIncrementAtomic(&_memRef->refCount);
696 ref = _memRef;
697 }
698 } else {
699 memoryReferenceFree(ref);
700 ref = NULL;
fe8ab488 701 }
fe8ab488 702
0a7de745 703 *reference = ref;
fe8ab488 704
0a7de745 705 return err;
fe8ab488
A
706}
707
0a7de745 708kern_return_t
fe8ab488
A
709IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
710{
0a7de745
A
711 IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref;
712 IOReturn err;
713 vm_map_offset_t addr;
714
715 addr = ref->mapped;
716
717 err = vm_map_enter_mem_object(map, &addr, ref->size,
718 (vm_map_offset_t) 0,
719 (((ref->options & kIOMapAnywhere)
720 ? VM_FLAGS_ANYWHERE
721 : VM_FLAGS_FIXED)),
722 VM_MAP_KERNEL_FLAGS_NONE,
723 ref->tag,
724 IPC_PORT_NULL,
725 (memory_object_offset_t) 0,
726 false, /* copy */
727 ref->prot,
728 ref->prot,
729 VM_INHERIT_NONE);
730 if (KERN_SUCCESS == err) {
731 ref->mapped = (mach_vm_address_t) addr;
732 ref->map = map;
733 }
734
735 return err;
fe8ab488
A
736}
737
0a7de745 738IOReturn
fe8ab488 739IOGeneralMemoryDescriptor::memoryReferenceMap(
0a7de745
A
740 IOMemoryReference * ref,
741 vm_map_t map,
742 mach_vm_size_t inoffset,
743 mach_vm_size_t size,
744 IOOptionBits options,
745 mach_vm_address_t * inaddr)
fe8ab488 746{
0a7de745
A
747 IOReturn err;
748 int64_t offset = inoffset;
749 uint32_t rangeIdx, entryIdx;
750 vm_map_offset_t addr, mapAddr;
751 vm_map_offset_t pageOffset, entryOffset, remain, chunk;
752
753 mach_vm_address_t nextAddr;
754 mach_vm_size_t nextLen;
755 IOByteCount physLen;
756 IOMemoryEntry * entry;
757 vm_prot_t prot, memEntryCacheMode;
758 IOOptionBits type;
759 IOOptionBits cacheMode;
760 vm_tag_t tag;
761 // for the kIOMapPrefault option.
762 upl_page_info_t * pageList = NULL;
763 UInt currentPageIndex = 0;
764 bool didAlloc;
765
766 if (ref->mapRef) {
767 err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
768 return err;
769 }
770
771 type = _flags & kIOMemoryTypeMask;
772
773 prot = VM_PROT_READ;
774 if (!(kIOMapReadOnly & options)) {
775 prot |= VM_PROT_WRITE;
776 }
777 prot &= ref->prot;
778
779 cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
780 if (kIODefaultCache != cacheMode) {
781 // VM system requires write access to update named entry cache mode
782 memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
783 }
784
785 tag = getVMTag(map);
786
787 if (_task) {
788 // Find first range for offset
789 if (!_rangesCount) {
790 return kIOReturnBadArgument;
791 }
792 for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++) {
793 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
794 if (remain < nextLen) {
795 break;
796 }
797 remain -= nextLen;
798 }
799 } else {
800 rangeIdx = 0;
801 remain = 0;
802 nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
803 nextLen = size;
804 }
805
806 assert(remain < nextLen);
807 if (remain >= nextLen) {
808 return kIOReturnBadArgument;
809 }
810
811 nextAddr += remain;
812 nextLen -= remain;
813 pageOffset = (page_mask & nextAddr);
814 addr = 0;
815 didAlloc = false;
816
817 if (!(options & kIOMapAnywhere)) {
818 addr = *inaddr;
819 if (pageOffset != (page_mask & addr)) {
820 return kIOReturnNotAligned;
821 }
822 addr -= pageOffset;
823 }
824
825 // find first entry for offset
826 for (entryIdx = 0;
827 (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset);
828 entryIdx++) {
829 }
830 entryIdx--;
831 entry = &ref->entries[entryIdx];
832
833 // allocate VM
834 size = round_page_64(size + pageOffset);
835 if (kIOMapOverwrite & options) {
836 if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
837 map = IOPageableMapForAddress(addr);
838 }
839 err = KERN_SUCCESS;
840 } else {
841 IOMemoryDescriptorMapAllocRef ref;
842 ref.map = map;
843 ref.tag = tag;
844 ref.options = options;
845 ref.size = size;
846 ref.prot = prot;
847 if (options & kIOMapAnywhere) {
848 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
849 ref.mapped = 0;
850 } else {
851 ref.mapped = addr;
852 }
853 if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
854 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
855 } else {
856 err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
857 }
858 if (KERN_SUCCESS == err) {
859 addr = ref.mapped;
860 map = ref.map;
861 didAlloc = true;
862 }
863 }
864
865 /*
866 * If the memory is associated with a device pager but doesn't have a UPL,
867 * it will be immediately faulted in through the pager via populateDevicePager().
868 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
869 * operations.
870 */
871 if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
872 options &= ~kIOMapPrefault;
873 }
874
875 /*
876 * Prefaulting is only possible if we wired the memory earlier. Check the
877 * memory type, and the underlying data.
878 */
879 if (options & kIOMapPrefault) {
880 /*
881 * The memory must have been wired by calling ::prepare(), otherwise
882 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
883 */
884 assert(_wireCount != 0);
885 assert(_memoryEntries != NULL);
886 if ((_wireCount == 0) ||
887 (_memoryEntries == NULL)) {
888 return kIOReturnBadArgument;
889 }
890
891 // Get the page list.
892 ioGMDData* dataP = getDataP(_memoryEntries);
893 ioPLBlock const* ioplList = getIOPLList(dataP);
894 pageList = getPageList(dataP);
895
896 // Get the number of IOPLs.
897 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
898
899 /*
900 * Scan through the IOPL Info Blocks, looking for the first block containing
901 * the offset. The research will go past it, so we'll need to go back to the
902 * right range at the end.
903 */
904 UInt ioplIndex = 0;
905 while (ioplIndex < numIOPLs && offset >= ioplList[ioplIndex].fIOMDOffset) {
906 ioplIndex++;
907 }
908 ioplIndex--;
909
910 // Retrieve the IOPL info block.
911 ioPLBlock ioplInfo = ioplList[ioplIndex];
912
913 /*
914 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
915 * array.
916 */
917 if (ioplInfo.fFlags & kIOPLExternUPL) {
918 pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
919 } else {
920 pageList = &pageList[ioplInfo.fPageInfo];
921 }
922
923 // Rebase [offset] into the IOPL in order to looks for the first page index.
924 mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
925
926 // Retrieve the index of the first page corresponding to the offset.
927 currentPageIndex = atop_32(offsetInIOPL);
928 }
929
930 // enter mappings
931 remain = size;
932 mapAddr = addr;
933 addr += pageOffset;
934
935 while (remain && (KERN_SUCCESS == err)) {
936 entryOffset = offset - entry->offset;
937 if ((page_mask & entryOffset) != pageOffset) {
938 err = kIOReturnNotAligned;
939 break;
940 }
941
942 if (kIODefaultCache != cacheMode) {
943 vm_size_t unused = 0;
944 err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
945 memEntryCacheMode, NULL, entry->entry);
946 assert(KERN_SUCCESS == err);
947 }
948
949 entryOffset -= pageOffset;
950 if (entryOffset >= entry->size) {
951 panic("entryOffset");
952 }
953 chunk = entry->size - entryOffset;
954 if (chunk) {
955 vm_map_kernel_flags_t vmk_flags;
956
957 vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
958 vmk_flags.vmkf_iokit_acct = TRUE; /* iokit accounting */
959
960 if (chunk > remain) {
961 chunk = remain;
962 }
963 if (options & kIOMapPrefault) {
964 UInt nb_pages = round_page(chunk) / PAGE_SIZE;
965
966 err = vm_map_enter_mem_object_prefault(map,
967 &mapAddr,
968 chunk, 0 /* mask */,
969 (VM_FLAGS_FIXED
970 | VM_FLAGS_OVERWRITE),
971 vmk_flags,
972 tag,
973 entry->entry,
974 entryOffset,
975 prot, // cur
976 prot, // max
977 &pageList[currentPageIndex],
978 nb_pages);
979
980 // Compute the next index in the page list.
981 currentPageIndex += nb_pages;
982 assert(currentPageIndex <= _pages);
983 } else {
984 err = vm_map_enter_mem_object(map,
985 &mapAddr,
986 chunk, 0 /* mask */,
987 (VM_FLAGS_FIXED
988 | VM_FLAGS_OVERWRITE),
989 vmk_flags,
990 tag,
991 entry->entry,
992 entryOffset,
993 false, // copy
994 prot, // cur
995 prot, // max
996 VM_INHERIT_NONE);
997 }
998 if (KERN_SUCCESS != err) {
999 break;
1000 }
1001 remain -= chunk;
1002 if (!remain) {
1003 break;
1004 }
1005 mapAddr += chunk;
1006 offset += chunk - pageOffset;
1007 }
1008 pageOffset = 0;
1009 entry++;
1010 entryIdx++;
1011 if (entryIdx >= ref->count) {
1012 err = kIOReturnOverrun;
1013 break;
1014 }
1015 }
1016
1017 if ((KERN_SUCCESS != err) && didAlloc) {
1018 (void) mach_vm_deallocate(map, trunc_page_64(addr), size);
1019 addr = 0;
1020 }
1021 *inaddr = addr;
1022
1023 return err;
fe8ab488
A
1024}
1025
0a7de745 1026IOReturn
fe8ab488 1027IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
0a7de745
A
1028 IOMemoryReference * ref,
1029 IOByteCount * residentPageCount,
1030 IOByteCount * dirtyPageCount)
fe8ab488 1031{
0a7de745
A
1032 IOReturn err;
1033 IOMemoryEntry * entries;
1034 unsigned int resident, dirty;
1035 unsigned int totalResident, totalDirty;
1036
1037 totalResident = totalDirty = 0;
1038 err = kIOReturnSuccess;
1039 entries = ref->entries + ref->count;
1040 while (entries > &ref->entries[0]) {
1041 entries--;
1042 err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty);
1043 if (KERN_SUCCESS != err) {
1044 break;
1045 }
1046 totalResident += resident;
1047 totalDirty += dirty;
1048 }
1049
1050 if (residentPageCount) {
1051 *residentPageCount = totalResident;
1052 }
1053 if (dirtyPageCount) {
1054 *dirtyPageCount = totalDirty;
1055 }
1056 return err;
fe8ab488
A
1057}
1058
1059IOReturn
1060IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
0a7de745
A
1061 IOMemoryReference * ref,
1062 IOOptionBits newState,
1063 IOOptionBits * oldState)
fe8ab488 1064{
0a7de745
A
1065 IOReturn err;
1066 IOMemoryEntry * entries;
1067 vm_purgable_t control;
1068 int totalState, state;
1069
1070 totalState = kIOMemoryPurgeableNonVolatile;
1071 err = kIOReturnSuccess;
1072 entries = ref->entries + ref->count;
1073 while (entries > &ref->entries[0]) {
1074 entries--;
1075
1076 err = purgeableControlBits(newState, &control, &state);
1077 if (KERN_SUCCESS != err) {
1078 break;
1079 }
1080 err = memory_entry_purgeable_control_internal(entries->entry, control, &state);
1081 if (KERN_SUCCESS != err) {
1082 break;
1083 }
1084 err = purgeableStateBits(&state);
1085 if (KERN_SUCCESS != err) {
1086 break;
1087 }
1088
1089 if (kIOMemoryPurgeableEmpty == state) {
1090 totalState = kIOMemoryPurgeableEmpty;
1091 } else if (kIOMemoryPurgeableEmpty == totalState) {
1092 continue;
1093 } else if (kIOMemoryPurgeableVolatile == totalState) {
1094 continue;
1095 } else if (kIOMemoryPurgeableVolatile == state) {
1096 totalState = kIOMemoryPurgeableVolatile;
1097 } else {
1098 totalState = kIOMemoryPurgeableNonVolatile;
1099 }
1100 }
1101
1102 if (oldState) {
1103 *oldState = totalState;
1104 }
1105 return err;
fe8ab488
A
1106}
1107
cb323159
A
1108IOReturn
1109IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(
1110 IOMemoryReference * ref,
1111 task_t newOwner,
1112 int newLedgerTag,
1113 IOOptionBits newLedgerOptions)
1114{
1115 IOReturn err, totalErr;
1116 IOMemoryEntry * entries;
1117
1118 totalErr = kIOReturnSuccess;
1119 entries = ref->entries + ref->count;
1120 while (entries > &ref->entries[0]) {
1121 entries--;
1122
1123 err = mach_memory_entry_ownership(entries->entry, newOwner, newLedgerTag, newLedgerOptions);
1124 if (KERN_SUCCESS != err) {
1125 totalErr = err;
1126 }
1127 }
1128
1129 return totalErr;
1130}
1131
fe8ab488
A
1132/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1133
1c79356b
A
1134IOMemoryDescriptor *
1135IOMemoryDescriptor::withAddress(void * address,
0a7de745
A
1136 IOByteCount length,
1137 IODirection direction)
55e303ae 1138{
0a7de745
A
1139 return IOMemoryDescriptor::
1140 withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
55e303ae
A
1141}
1142
b0d623f7 1143#ifndef __LP64__
55e303ae 1144IOMemoryDescriptor *
b0d623f7 1145IOMemoryDescriptor::withAddress(IOVirtualAddress address,
0a7de745
A
1146 IOByteCount length,
1147 IODirection direction,
1148 task_t task)
1c79356b 1149{
0a7de745
A
1150 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1151 if (that) {
1152 if (that->initWithAddress(address, length, direction, task)) {
1153 return that;
1154 }
1155
1156 that->release();
1157 }
cb323159 1158 return NULL;
1c79356b 1159}
b0d623f7 1160#endif /* !__LP64__ */
1c79356b
A
1161
1162IOMemoryDescriptor *
55e303ae 1163IOMemoryDescriptor::withPhysicalAddress(
0a7de745
A
1164 IOPhysicalAddress address,
1165 IOByteCount length,
1166 IODirection direction )
55e303ae 1167{
0a7de745 1168 return IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL);
55e303ae
A
1169}
1170
b0d623f7 1171#ifndef __LP64__
55e303ae 1172IOMemoryDescriptor *
0a7de745
A
1173IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
1174 UInt32 withCount,
1175 IODirection direction,
1176 task_t task,
1177 bool asReference)
1c79356b 1178{
0a7de745
A
1179 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1180 if (that) {
1181 if (that->initWithRanges(ranges, withCount, direction, task, asReference)) {
1182 return that;
1183 }
1184
1185 that->release();
1186 }
cb323159 1187 return NULL;
1c79356b 1188}
b0d623f7 1189#endif /* !__LP64__ */
1c79356b 1190
0c530ab8
A
1191IOMemoryDescriptor *
1192IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
0a7de745
A
1193 mach_vm_size_t length,
1194 IOOptionBits options,
1195 task_t task)
0c530ab8 1196{
0a7de745
A
1197 IOAddressRange range = { address, length };
1198 return IOMemoryDescriptor::withAddressRanges(&range, 1, options, task);
0c530ab8
A
1199}
1200
1201IOMemoryDescriptor *
1202IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
0a7de745
A
1203 UInt32 rangeCount,
1204 IOOptionBits options,
1205 task_t task)
0c530ab8 1206{
0a7de745
A
1207 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1208 if (that) {
1209 if (task) {
1210 options |= kIOMemoryTypeVirtual64;
1211 } else {
1212 options |= kIOMemoryTypePhysical64;
1213 }
0c530ab8 1214
cb323159 1215 if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ NULL)) {
0a7de745
A
1216 return that;
1217 }
0c530ab8 1218
0a7de745
A
1219 that->release();
1220 }
0c530ab8 1221
cb323159 1222 return NULL;
0c530ab8
A
1223}
1224
1c79356b
A
1225
1226/*
b0d623f7 1227 * withOptions:
1c79356b
A
1228 *
1229 * Create a new IOMemoryDescriptor. The buffer is made up of several
1230 * virtual address ranges, from a given task.
1231 *
1232 * Passing the ranges as a reference will avoid an extra allocation.
1233 */
1234IOMemoryDescriptor *
0a7de745
A
1235IOMemoryDescriptor::withOptions(void * buffers,
1236 UInt32 count,
1237 UInt32 offset,
1238 task_t task,
1239 IOOptionBits opts,
1240 IOMapper * mapper)
1c79356b 1241{
0a7de745 1242 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
d7e50217 1243
0a7de745
A
1244 if (self
1245 && !self->initWithOptions(buffers, count, offset, task, opts, mapper)) {
1246 self->release();
cb323159 1247 return NULL;
0a7de745 1248 }
55e303ae 1249
0a7de745 1250 return self;
55e303ae
A
1251}
1252
0a7de745
A
1253bool
1254IOMemoryDescriptor::initWithOptions(void * buffers,
1255 UInt32 count,
1256 UInt32 offset,
1257 task_t task,
1258 IOOptionBits options,
1259 IOMapper * mapper)
55e303ae 1260{
0a7de745 1261 return false;
1c79356b
A
1262}
1263
b0d623f7 1264#ifndef __LP64__
1c79356b 1265IOMemoryDescriptor *
0a7de745
A
1266IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
1267 UInt32 withCount,
1268 IODirection direction,
1269 bool asReference)
1c79356b 1270{
0a7de745
A
1271 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1272 if (that) {
1273 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference)) {
1274 return that;
1275 }
1276
1277 that->release();
1278 }
cb323159 1279 return NULL;
1c79356b
A
1280}
1281
1282IOMemoryDescriptor *
0a7de745
A
1283IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
1284 IOByteCount offset,
1285 IOByteCount length,
1286 IODirection direction)
1c79356b 1287{
0a7de745 1288 return IOSubMemoryDescriptor::withSubRange(of, offset, length, direction);
1c79356b 1289}
b0d623f7 1290#endif /* !__LP64__ */
1c79356b 1291
0c530ab8
A
1292IOMemoryDescriptor *
1293IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
91447636 1294{
0a7de745
A
1295 IOGeneralMemoryDescriptor *origGenMD =
1296 OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
1297
1298 if (origGenMD) {
1299 return IOGeneralMemoryDescriptor::
1300 withPersistentMemoryDescriptor(origGenMD);
1301 } else {
cb323159 1302 return NULL;
0a7de745 1303 }
91447636
A
1304}
1305
0c530ab8
A
1306IOMemoryDescriptor *
1307IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
91447636 1308{
0a7de745
A
1309 IOMemoryReference * memRef;
1310
1311 if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) {
cb323159 1312 return NULL;
0a7de745
A
1313 }
1314
1315 if (memRef == originalMD->_memRef) {
1316 originalMD->retain(); // Add a new reference to ourselves
1317 originalMD->memoryReferenceRelease(memRef);
1318 return originalMD;
1319 }
1320
1321 IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor;
1322 IOMDPersistentInitData initData = { originalMD, memRef };
1323
1324 if (self
cb323159 1325 && !self->initWithOptions(&initData, 1, 0, NULL, kIOMemoryTypePersistentMD, NULL)) {
0a7de745 1326 self->release();
cb323159 1327 self = NULL;
0a7de745
A
1328 }
1329 return self;
91447636
A
1330}
1331
b0d623f7 1332#ifndef __LP64__
1c79356b
A
1333bool
1334IOGeneralMemoryDescriptor::initWithAddress(void * address,
0a7de745
A
1335 IOByteCount withLength,
1336 IODirection withDirection)
1c79356b 1337{
0a7de745
A
1338 _singleRange.v.address = (vm_offset_t) address;
1339 _singleRange.v.length = withLength;
1c79356b 1340
0a7de745 1341 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
1c79356b
A
1342}
1343
1344bool
b0d623f7 1345IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
0a7de745
A
1346 IOByteCount withLength,
1347 IODirection withDirection,
1348 task_t withTask)
1c79356b 1349{
0a7de745
A
1350 _singleRange.v.address = address;
1351 _singleRange.v.length = withLength;
1c79356b 1352
0a7de745 1353 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
1c79356b
A
1354}
1355
1356bool
1357IOGeneralMemoryDescriptor::initWithPhysicalAddress(
0a7de745
A
1358 IOPhysicalAddress address,
1359 IOByteCount withLength,
1360 IODirection withDirection )
1c79356b 1361{
0a7de745
A
1362 _singleRange.p.address = address;
1363 _singleRange.p.length = withLength;
1c79356b 1364
0a7de745 1365 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
1c79356b
A
1366}
1367
55e303ae
A
1368bool
1369IOGeneralMemoryDescriptor::initWithPhysicalRanges(
0a7de745
A
1370 IOPhysicalRange * ranges,
1371 UInt32 count,
1372 IODirection direction,
1373 bool reference)
55e303ae 1374{
0a7de745 1375 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
55e303ae 1376
0a7de745
A
1377 if (reference) {
1378 mdOpts |= kIOMemoryAsReference;
1379 }
55e303ae 1380
cb323159 1381 return initWithOptions(ranges, count, 0, NULL, mdOpts, /* mapper */ NULL);
55e303ae
A
1382}
1383
1384bool
1385IOGeneralMemoryDescriptor::initWithRanges(
0a7de745
A
1386 IOVirtualRange * ranges,
1387 UInt32 count,
1388 IODirection direction,
1389 task_t task,
1390 bool reference)
55e303ae 1391{
0a7de745
A
1392 IOOptionBits mdOpts = direction;
1393
1394 if (reference) {
1395 mdOpts |= kIOMemoryAsReference;
1396 }
1397
1398 if (task) {
1399 mdOpts |= kIOMemoryTypeVirtual;
1400
1401 // Auto-prepare if this is a kernel memory descriptor as very few
1402 // clients bother to prepare() kernel memory.
1403 // But it was not enforced so what are you going to do?
1404 if (task == kernel_task) {
1405 mdOpts |= kIOMemoryAutoPrepare;
1406 }
1407 } else {
1408 mdOpts |= kIOMemoryTypePhysical;
1409 }
1410
cb323159 1411 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ NULL);
55e303ae 1412}
b0d623f7 1413#endif /* !__LP64__ */
55e303ae 1414
1c79356b 1415/*
55e303ae 1416 * initWithOptions:
1c79356b 1417 *
55e303ae 1418 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
91447636
A
1419 * from a given task, several physical ranges, an UPL from the ubc
1420 * system or a uio (may be 64bit) from the BSD subsystem.
1c79356b
A
1421 *
1422 * Passing the ranges as a reference will avoid an extra allocation.
1423 *
55e303ae
A
1424 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
1425 * existing instance -- note this behavior is not commonly supported in other
1426 * I/O Kit classes, although it is supported here.
1c79356b 1427 */
55e303ae 1428
1c79356b 1429bool
0a7de745
A
1430IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
1431 UInt32 count,
1432 UInt32 offset,
1433 task_t task,
1434 IOOptionBits options,
1435 IOMapper * mapper)
55e303ae 1436{
0a7de745 1437 IOOptionBits type = options & kIOMemoryTypeMask;
91447636 1438
6d2010ae 1439#ifndef __LP64__
0a7de745
A
1440 if (task
1441 && (kIOMemoryTypeVirtual == type)
1442 && vm_map_is_64bit(get_task_map(task))
1443 && ((IOVirtualRange *) buffers)->address) {
1444 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
1445 return false;
1446 }
6d2010ae
A
1447#endif /* !__LP64__ */
1448
0a7de745
A
1449 // Grab the original MD's configuation data to initialse the
1450 // arguments to this function.
1451 if (kIOMemoryTypePersistentMD == type) {
1452 IOMDPersistentInitData *initData = (typeof(initData))buffers;
1453 const IOGeneralMemoryDescriptor *orig = initData->fMD;
1454 ioGMDData *dataP = getDataP(orig->_memoryEntries);
91447636 1455
0a7de745
A
1456 // Only accept persistent memory descriptors with valid dataP data.
1457 assert(orig->_rangesCount == 1);
1458 if (!(orig->_flags & kIOMemoryPersistent) || !dataP) {
1459 return false;
1460 }
91447636 1461
0a7de745
A
1462 _memRef = initData->fMemRef; // Grab the new named entry
1463 options = orig->_flags & ~kIOMemoryAsReference;
1464 type = options & kIOMemoryTypeMask;
1465 buffers = orig->_ranges.v;
1466 count = orig->_rangesCount;
55e303ae 1467
0a7de745
A
1468 // Now grab the original task and whatever mapper was previously used
1469 task = orig->_task;
1470 mapper = dataP->fMapper;
91447636 1471
0a7de745
A
1472 // We are ready to go through the original initialisation now
1473 }
91447636 1474
0a7de745
A
1475 switch (type) {
1476 case kIOMemoryTypeUIO:
1477 case kIOMemoryTypeVirtual:
b0d623f7 1478#ifndef __LP64__
0a7de745 1479 case kIOMemoryTypeVirtual64:
b0d623f7 1480#endif /* !__LP64__ */
0a7de745
A
1481 assert(task);
1482 if (!task) {
1483 return false;
1484 }
1485 break;
55e303ae 1486
0a7de745 1487 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
b0d623f7 1488#ifndef __LP64__
0a7de745 1489 case kIOMemoryTypePhysical64:
b0d623f7 1490#endif /* !__LP64__ */
0a7de745
A
1491 case kIOMemoryTypeUPL:
1492 assert(!task);
1493 break;
1494 default:
1495 return false; /* bad argument */
2d21ac55 1496 }
0a7de745
A
1497
1498 assert(buffers);
1499 assert(count);
1500
1501 /*
1502 * We can check the _initialized instance variable before having ever set
1503 * it to an initial value because I/O Kit guarantees that all our instance
1504 * variables are zeroed on an object's allocation.
1505 */
1506
1507 if (_initialized) {
1508 /*
1509 * An existing memory descriptor is being retargeted to point to
1510 * somewhere else. Clean up our present state.
1511 */
1512 IOOptionBits type = _flags & kIOMemoryTypeMask;
1513 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type)) {
1514 while (_wireCount) {
1515 complete();
1516 }
1517 }
1518 if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
1519 if (kIOMemoryTypeUIO == type) {
1520 uio_free((uio_t) _ranges.v);
1521 }
b0d623f7 1522#ifndef __LP64__
0a7de745
A
1523 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
1524 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
1525 }
b0d623f7 1526#endif /* !__LP64__ */
0a7de745
A
1527 else {
1528 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
1529 }
1530 }
1531
1532 options |= (kIOMemoryRedirected & _flags);
1533 if (!(kIOMemoryRedirected & options)) {
1534 if (_memRef) {
1535 memoryReferenceRelease(_memRef);
cb323159 1536 _memRef = NULL;
0a7de745
A
1537 }
1538 if (_mappings) {
1539 _mappings->flushCollection();
1540 }
1541 }
1542 } else {
1543 if (!super::init()) {
1544 return false;
1545 }
1546 _initialized = true;
0c530ab8 1547 }
2d21ac55 1548
0a7de745
A
1549 // Grab the appropriate mapper
1550 if (kIOMemoryHostOrRemote & options) {
1551 options |= kIOMemoryMapperNone;
1552 }
1553 if (kIOMemoryMapperNone & options) {
cb323159 1554 mapper = NULL; // No Mapper
0a7de745
A
1555 } else if (mapper == kIOMapperSystem) {
1556 IOMapper::checkForSystemMapper();
1557 gIOSystemMapper = mapper = IOMapper::gSystem;
1558 }
55e303ae 1559
0a7de745
A
1560 // Remove the dynamic internal use flags from the initial setting
1561 options &= ~(kIOMemoryPreparedReadOnly);
1562 _flags = options;
1563 _task = task;
0c530ab8 1564
b0d623f7 1565#ifndef __LP64__
0a7de745 1566 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
b0d623f7 1567#endif /* !__LP64__ */
91447636 1568
0a7de745
A
1569 _dmaReferences = 0;
1570 __iomd_reservedA = 0;
1571 __iomd_reservedB = 0;
1572 _highestPage = 0;
1573
1574 if (kIOMemoryThreadSafe & options) {
1575 if (!_prepareLock) {
1576 _prepareLock = IOLockAlloc();
1577 }
1578 } else if (_prepareLock) {
1579 IOLockFree(_prepareLock);
1580 _prepareLock = NULL;
91447636 1581 }
0c530ab8 1582
0a7de745
A
1583 if (kIOMemoryTypeUPL == type) {
1584 ioGMDData *dataP;
1585 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
1586
1587 if (!initMemoryEntries(dataSize, mapper)) {
1588 return false;
1589 }
1590 dataP = getDataP(_memoryEntries);
1591 dataP->fPageCnt = 0;
1592 switch (kIOMemoryDirectionMask & options) {
1593 case kIODirectionOut:
1594 dataP->fDMAAccess = kIODMAMapReadAccess;
1595 break;
1596 case kIODirectionIn:
1597 dataP->fDMAAccess = kIODMAMapWriteAccess;
1598 break;
1599 case kIODirectionNone:
1600 case kIODirectionOutIn:
1601 default:
1602 panic("bad dir for upl 0x%x\n", (int) options);
1603 break;
1604 }
1605 // _wireCount++; // UPLs start out life wired
1606
1607 _length = count;
1608 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
1609
1610 ioPLBlock iopl;
1611 iopl.fIOPL = (upl_t) buffers;
1612 upl_set_referenced(iopl.fIOPL, true);
1613 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
1614
1615 if (upl_get_size(iopl.fIOPL) < (count + offset)) {
1616 panic("short external upl");
1617 }
1618
1619 _highestPage = upl_get_highest_page(iopl.fIOPL);
1620
1621 // Set the flag kIOPLOnDevice convieniently equal to 1
1622 iopl.fFlags = pageList->device | kIOPLExternUPL;
1623 if (!pageList->device) {
1624 // Pre-compute the offset into the UPL's page list
1625 pageList = &pageList[atop_32(offset)];
1626 offset &= PAGE_MASK;
1627 }
1628 iopl.fIOMDOffset = 0;
1629 iopl.fMappedPage = 0;
1630 iopl.fPageInfo = (vm_address_t) pageList;
1631 iopl.fPageOffset = offset;
1632 _memoryEntries->appendBytes(&iopl, sizeof(iopl));
1633 } else {
1634 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
1635 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
1636
1637 // Initialize the memory descriptor
1638 if (options & kIOMemoryAsReference) {
1639#ifndef __LP64__
1640 _rangesIsAllocated = false;
b0d623f7 1641#endif /* !__LP64__ */
0a7de745
A
1642
1643 // Hack assignment to get the buffer arg into _ranges.
1644 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
1645 // work, C++ sigh.
1646 // This also initialises the uio & physical ranges.
1647 _ranges.v = (IOVirtualRange *) buffers;
b0d623f7 1648 } else {
0a7de745
A
1649#ifndef __LP64__
1650 _rangesIsAllocated = true;
1651#endif /* !__LP64__ */
1652 switch (type) {
1653 case kIOMemoryTypeUIO:
1654 _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
1655 break;
1656
1657#ifndef __LP64__
1658 case kIOMemoryTypeVirtual64:
1659 case kIOMemoryTypePhysical64:
1660 if (count == 1
1661#ifndef __arm__
1662 && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
1663#endif
1664 ) {
1665 if (kIOMemoryTypeVirtual64 == type) {
1666 type = kIOMemoryTypeVirtual;
1667 } else {
1668 type = kIOMemoryTypePhysical;
1669 }
1670 _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
1671 _rangesIsAllocated = false;
1672 _ranges.v = &_singleRange.v;
1673 _singleRange.v.address = ((IOAddressRange *) buffers)->address;
1674 _singleRange.v.length = ((IOAddressRange *) buffers)->length;
1675 break;
1676 }
1677 _ranges.v64 = IONew(IOAddressRange, count);
1678 if (!_ranges.v64) {
1679 return false;
1680 }
1681 bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
1682 break;
1683#endif /* !__LP64__ */
1684 case kIOMemoryTypeVirtual:
1685 case kIOMemoryTypePhysical:
1686 if (count == 1) {
1687 _flags |= kIOMemoryAsReference;
1688#ifndef __LP64__
1689 _rangesIsAllocated = false;
1690#endif /* !__LP64__ */
1691 _ranges.v = &_singleRange.v;
1692 } else {
1693 _ranges.v = IONew(IOVirtualRange, count);
1694 if (!_ranges.v) {
1695 return false;
1696 }
1697 }
1698 bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
1699 break;
1700 }
b0d623f7 1701 }
0a7de745
A
1702 _rangesCount = count;
1703
1704 // Find starting address within the vector of ranges
1705 Ranges vec = _ranges;
1706 mach_vm_size_t totalLength = 0;
1707 unsigned int ind, pages = 0;
1708 for (ind = 0; ind < count; ind++) {
1709 mach_vm_address_t addr;
1710 mach_vm_address_t endAddr;
1711 mach_vm_size_t len;
1712
1713 // addr & len are returned by this function
1714 getAddrLenForInd(addr, len, type, vec, ind);
1715 if (os_add3_overflow(addr, len, PAGE_MASK, &endAddr)) {
1716 break;
1717 }
1718 if (os_add_overflow(pages, (atop_64(endAddr) - atop_64(addr)), &pages)) {
1719 break;
1720 }
1721 if (os_add_overflow(totalLength, len, &totalLength)) {
1722 break;
1723 }
1724 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
1725 ppnum_t highPage = atop_64(addr + len - 1);
1726 if (highPage > _highestPage) {
1727 _highestPage = highPage;
1728 }
1729 }
1730 }
1731 if ((ind < count)
1732 || (totalLength != ((IOByteCount) totalLength))) {
1733 return false; /* overflow */
1734 }
1735 _length = totalLength;
1736 _pages = pages;
1737
1738 // Auto-prepare memory at creation time.
1739 // Implied completion when descriptor is free-ed
1740
1741
1742 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
1743 _wireCount++; // Physical MDs are, by definition, wired
1744 } else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
1745 ioGMDData *dataP;
1746 unsigned dataSize;
1747
1748 if (_pages > atop_64(max_mem)) {
1749 return false;
1750 }
1751
1752 dataSize = computeDataSize(_pages, /* upls */ count * 2);
1753 if (!initMemoryEntries(dataSize, mapper)) {
1754 return false;
1755 }
1756 dataP = getDataP(_memoryEntries);
1757 dataP->fPageCnt = _pages;
1758
1759 if (((_task != kernel_task) || (kIOMemoryBufferPageable & _flags))
1760 && (VM_KERN_MEMORY_NONE == _kernelTag)) {
1761 _kernelTag = IOMemoryTag(kernel_map);
1762 if (_kernelTag == gIOSurfaceTag) {
1763 _userTag = VM_MEMORY_IOSURFACE;
1764 }
1765 }
1766
1767 if ((kIOMemoryPersistent & _flags) && !_memRef) {
1768 IOReturn
1769 err = memoryReferenceCreate(0, &_memRef);
1770 if (kIOReturnSuccess != err) {
1771 return false;
1772 }
1773 }
1774
1775 if ((_flags & kIOMemoryAutoPrepare)
1776 && prepare() != kIOReturnSuccess) {
1777 return false;
1778 }
1779 }
1780 }
91447636 1781
0a7de745 1782 return true;
de355530
A
1783}
1784
1c79356b
A
1785/*
1786 * free
1787 *
1788 * Free resources.
1789 */
0a7de745
A
1790void
1791IOGeneralMemoryDescriptor::free()
1c79356b 1792{
0a7de745 1793 IOOptionBits type = _flags & kIOMemoryTypeMask;
2d21ac55 1794
0a7de745
A
1795 if (reserved) {
1796 LOCK;
cb323159 1797 reserved->dp.memory = NULL;
0a7de745
A
1798 UNLOCK;
1799 }
1800 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
1801 ioGMDData * dataP;
1802 if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBaseValid) {
1803 dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
1804 dataP->fMappedBaseValid = dataP->fMappedBase = 0;
1805 }
1806 } else {
1807 while (_wireCount) {
1808 complete();
1809 }
bd504ef0 1810 }
bd504ef0 1811
0a7de745
A
1812 if (_memoryEntries) {
1813 _memoryEntries->release();
1814 }
55e303ae 1815
0a7de745
A
1816 if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
1817 if (kIOMemoryTypeUIO == type) {
1818 uio_free((uio_t) _ranges.v);
1819 }
b0d623f7 1820#ifndef __LP64__
0a7de745
A
1821 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
1822 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
1823 }
b0d623f7 1824#endif /* !__LP64__ */
0a7de745
A
1825 else {
1826 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
1827 }
1828
1829 _ranges.v = NULL;
1830 }
1831
1832 if (reserved) {
cb323159 1833 cleanKernelReserved(reserved);
0a7de745
A
1834 if (reserved->dp.devicePager) {
1835 // memEntry holds a ref on the device pager which owns reserved
1836 // (IOMemoryDescriptorReserved) so no reserved access after this point
1837 device_pager_deallocate((memory_object_t) reserved->dp.devicePager );
1838 } else {
1839 IODelete(reserved, IOMemoryDescriptorReserved, 1);
1840 }
1841 reserved = NULL;
1842 }
1843
1844 if (_memRef) {
1845 memoryReferenceRelease(_memRef);
1846 }
1847 if (_prepareLock) {
1848 IOLockFree(_prepareLock);
1849 }
1850
1851 super::free();
1c79356b
A
1852}
1853
b0d623f7 1854#ifndef __LP64__
0a7de745
A
1855void
1856IOGeneralMemoryDescriptor::unmapFromKernel()
b0d623f7 1857{
0a7de745 1858 panic("IOGMD::unmapFromKernel deprecated");
b0d623f7
A
1859}
1860
0a7de745
A
1861void
1862IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
b0d623f7 1863{
0a7de745 1864 panic("IOGMD::mapIntoKernel deprecated");
b0d623f7
A
1865}
1866#endif /* !__LP64__ */
1c79356b
A
1867
1868/*
1869 * getDirection:
1870 *
1871 * Get the direction of the transfer.
1872 */
0a7de745
A
1873IODirection
1874IOMemoryDescriptor::getDirection() const
1c79356b 1875{
b0d623f7 1876#ifndef __LP64__
0a7de745
A
1877 if (_direction) {
1878 return _direction;
1879 }
b0d623f7 1880#endif /* !__LP64__ */
0a7de745 1881 return (IODirection) (_flags & kIOMemoryDirectionMask);
1c79356b
A
1882}
1883
1884/*
1885 * getLength:
1886 *
1887 * Get the length of the transfer (over all ranges).
1888 */
0a7de745
A
1889IOByteCount
1890IOMemoryDescriptor::getLength() const
1c79356b 1891{
0a7de745 1892 return _length;
1c79356b
A
1893}
1894
0a7de745
A
1895void
1896IOMemoryDescriptor::setTag( IOOptionBits tag )
1c79356b 1897{
0a7de745 1898 _tag = tag;
1c79356b
A
1899}
1900
0a7de745
A
1901IOOptionBits
1902IOMemoryDescriptor::getTag( void )
1c79356b 1903{
0a7de745 1904 return _tag;
1c79356b
A
1905}
1906
0a7de745
A
1907uint64_t
1908IOMemoryDescriptor::getFlags(void)
5ba3f43e 1909{
0a7de745 1910 return _flags;
5ba3f43e
A
1911}
1912
b0d623f7 1913#ifndef __LP64__
39037602
A
1914#pragma clang diagnostic push
1915#pragma clang diagnostic ignored "-Wdeprecated-declarations"
1916
55e303ae 1917// @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
0c530ab8
A
1918IOPhysicalAddress
1919IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
0b4e3aa0 1920{
0a7de745 1921 addr64_t physAddr = 0;
1c79356b 1922
0a7de745
A
1923 if (prepare() == kIOReturnSuccess) {
1924 physAddr = getPhysicalSegment64( offset, length );
1925 complete();
1926 }
0b4e3aa0 1927
0a7de745 1928 return (IOPhysicalAddress) physAddr; // truncated but only page offset is used
0b4e3aa0 1929}
39037602
A
1930
1931#pragma clang diagnostic pop
1932
b0d623f7 1933#endif /* !__LP64__ */
0b4e3aa0 1934
0a7de745
A
1935IOByteCount
1936IOMemoryDescriptor::readBytes
1937(IOByteCount offset, void *bytes, IOByteCount length)
1c79356b 1938{
0a7de745
A
1939 addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
1940 IOByteCount remaining;
1941
1942 // Assert that this entire I/O is withing the available range
1943 assert(offset <= _length);
1944 assert(offset + length <= _length);
1945 if ((offset >= _length)
1946 || ((offset + length) > _length)) {
1947 return 0;
1948 }
1c79356b 1949
0a7de745
A
1950 assert(!(kIOMemoryRemote & _flags));
1951 if (kIOMemoryRemote & _flags) {
1952 return 0;
1953 }
5ba3f43e 1954
0a7de745
A
1955 if (kIOMemoryThreadSafe & _flags) {
1956 LOCK;
1957 }
b0d623f7 1958
0a7de745
A
1959 remaining = length = min(length, _length - offset);
1960 while (remaining) { // (process another target segment?)
1961 addr64_t srcAddr64;
1962 IOByteCount srcLen;
1c79356b 1963
0a7de745
A
1964 srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
1965 if (!srcAddr64) {
1966 break;
1967 }
1c79356b 1968
0a7de745
A
1969 // Clip segment length to remaining
1970 if (srcLen > remaining) {
1971 srcLen = remaining;
1972 }
1c79356b 1973
0a7de745
A
1974 copypv(srcAddr64, dstAddr, srcLen,
1975 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
1c79356b 1976
0a7de745
A
1977 dstAddr += srcLen;
1978 offset += srcLen;
1979 remaining -= srcLen;
1980 }
1c79356b 1981
0a7de745
A
1982 if (kIOMemoryThreadSafe & _flags) {
1983 UNLOCK;
1984 }
b0d623f7 1985
0a7de745 1986 assert(!remaining);
1c79356b 1987
0a7de745 1988 return length - remaining;
55e303ae 1989}
0b4e3aa0 1990
0a7de745
A
1991IOByteCount
1992IOMemoryDescriptor::writeBytes
1993(IOByteCount inoffset, const void *bytes, IOByteCount length)
55e303ae 1994{
0a7de745
A
1995 addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
1996 IOByteCount remaining;
1997 IOByteCount offset = inoffset;
0b4e3aa0 1998
0a7de745
A
1999 // Assert that this entire I/O is withing the available range
2000 assert(offset <= _length);
2001 assert(offset + length <= _length);
0b4e3aa0 2002
0a7de745 2003 assert( !(kIOMemoryPreparedReadOnly & _flags));
0b4e3aa0 2004
0a7de745
A
2005 if ((kIOMemoryPreparedReadOnly & _flags)
2006 || (offset >= _length)
2007 || ((offset + length) > _length)) {
2008 return 0;
2009 }
0b4e3aa0 2010
0a7de745
A
2011 assert(!(kIOMemoryRemote & _flags));
2012 if (kIOMemoryRemote & _flags) {
2013 return 0;
2014 }
5ba3f43e 2015
0a7de745
A
2016 if (kIOMemoryThreadSafe & _flags) {
2017 LOCK;
2018 }
b0d623f7 2019
0a7de745
A
2020 remaining = length = min(length, _length - offset);
2021 while (remaining) { // (process another target segment?)
2022 addr64_t dstAddr64;
2023 IOByteCount dstLen;
0b4e3aa0 2024
0a7de745
A
2025 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
2026 if (!dstAddr64) {
2027 break;
2028 }
0b4e3aa0 2029
0a7de745
A
2030 // Clip segment length to remaining
2031 if (dstLen > remaining) {
2032 dstLen = remaining;
2033 }
0b4e3aa0 2034
0a7de745
A
2035 if (!srcAddr) {
2036 bzero_phys(dstAddr64, dstLen);
2037 } else {
2038 copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
2039 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
2040 srcAddr += dstLen;
2041 }
2042 offset += dstLen;
2043 remaining -= dstLen;
fe8ab488 2044 }
1c79356b 2045
0a7de745
A
2046 if (kIOMemoryThreadSafe & _flags) {
2047 UNLOCK;
2048 }
b0d623f7 2049
0a7de745 2050 assert(!remaining);
55e303ae 2051
d9a64523 2052#if defined(__x86_64__)
0a7de745 2053 // copypv does not cppvFsnk on intel
d9a64523 2054#else
0a7de745
A
2055 if (!srcAddr) {
2056 performOperation(kIOMemoryIncoherentIOFlush, inoffset, length);
2057 }
d9a64523 2058#endif
fe8ab488 2059
0a7de745 2060 return length - remaining;
1c79356b
A
2061}
2062
b0d623f7 2063#ifndef __LP64__
0a7de745
A
2064void
2065IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
b0d623f7 2066{
0a7de745 2067 panic("IOGMD::setPosition deprecated");
b0d623f7
A
2068}
2069#endif /* !__LP64__ */
2070
2071static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
2072
2073uint64_t
2074IOGeneralMemoryDescriptor::getPreparationID( void )
2075{
0a7de745
A
2076 ioGMDData *dataP;
2077
2078 if (!_wireCount) {
2079 return kIOPreparationIDUnprepared;
2080 }
2081
2082 if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
2083 || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64)) {
2084 IOMemoryDescriptor::setPreparationID();
2085 return IOMemoryDescriptor::getPreparationID();
2086 }
2087
2088 if (!_memoryEntries || !(dataP = getDataP(_memoryEntries))) {
2089 return kIOPreparationIDUnprepared;
2090 }
2091
2092 if (kIOPreparationIDUnprepared == dataP->fPreparationID) {
cb323159
A
2093 SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2094 OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &dataP->fPreparationID);
0a7de745
A
2095 }
2096 return dataP->fPreparationID;
b0d623f7
A
2097}
2098
cb323159
A
2099void
2100IOMemoryDescriptor::cleanKernelReserved( IOMemoryDescriptorReserved * reserved )
2101{
2102 if (reserved->creator) {
2103 task_deallocate(reserved->creator);
2104 reserved->creator = NULL;
2105 }
2106}
2107
0a7de745
A
2108IOMemoryDescriptorReserved *
2109IOMemoryDescriptor::getKernelReserved( void )
b0d623f7 2110{
0a7de745 2111 if (!reserved) {
cb323159 2112 reserved = IONewZero(IOMemoryDescriptorReserved, 1);
0a7de745
A
2113 }
2114 return reserved;
316670eb
A
2115}
2116
0a7de745
A
2117void
2118IOMemoryDescriptor::setPreparationID( void )
316670eb 2119{
0a7de745 2120 if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID)) {
cb323159
A
2121 SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2122 OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &reserved->preparationID);
0a7de745 2123 }
316670eb
A
2124}
2125
0a7de745
A
2126uint64_t
2127IOMemoryDescriptor::getPreparationID( void )
316670eb 2128{
0a7de745
A
2129 if (reserved) {
2130 return reserved->preparationID;
2131 } else {
2132 return kIOPreparationIDUnsupported;
2133 }
b0d623f7 2134}
de355530 2135
0a7de745 2136void
cb323159 2137IOMemoryDescriptor::setVMTags(uint32_t kernelTag, uint32_t userTag)
39037602 2138{
cb323159
A
2139 _kernelTag = (vm_tag_t) kernelTag;
2140 _userTag = (vm_tag_t) userTag;
39037602
A
2141}
2142
cb323159 2143uint32_t
0a7de745 2144IOMemoryDescriptor::getVMTag(vm_map_t map)
39037602 2145{
0a7de745
A
2146 if (vm_kernel_map_is_kernel(map)) {
2147 if (VM_KERN_MEMORY_NONE != _kernelTag) {
cb323159 2148 return (uint32_t) _kernelTag;
0a7de745
A
2149 }
2150 } else {
2151 if (VM_KERN_MEMORY_NONE != _userTag) {
cb323159 2152 return (uint32_t) _userTag;
0a7de745
A
2153 }
2154 }
2155 return IOMemoryTag(map);
39037602
A
2156}
2157
0a7de745
A
2158IOReturn
2159IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
55e303ae 2160{
0a7de745
A
2161 IOReturn err = kIOReturnSuccess;
2162 DMACommandOps params;
2163 IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
2164 ioGMDData *dataP;
99c3a104 2165
0a7de745
A
2166 params = (op & ~kIOMDDMACommandOperationMask & op);
2167 op &= kIOMDDMACommandOperationMask;
99c3a104 2168
0a7de745
A
2169 if (kIOMDDMAMap == op) {
2170 if (dataSize < sizeof(IOMDDMAMapArgs)) {
2171 return kIOReturnUnderrun;
2172 }
99c3a104 2173
0a7de745 2174 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
99c3a104 2175
0a7de745
A
2176 if (!_memoryEntries
2177 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
2178 return kIOReturnNoMemory;
2179 }
99c3a104 2180
0a7de745
A
2181 if (_memoryEntries && data->fMapper) {
2182 bool remap, keepMap;
2183 dataP = getDataP(_memoryEntries);
39236c6e 2184
0a7de745
A
2185 if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) {
2186 dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
2187 }
2188 if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) {
2189 dataP->fDMAMapAlignment = data->fMapSpec.alignment;
2190 }
39236c6e 2191
0a7de745
A
2192 keepMap = (data->fMapper == gIOSystemMapper);
2193 keepMap &= ((data->fOffset == 0) && (data->fLength == _length));
3e170ce0 2194
0a7de745
A
2195 if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
2196 IOLockLock(_prepareLock);
2197 }
a39ff7e2 2198
0a7de745
A
2199 remap = (!keepMap);
2200 remap |= (dataP->fDMAMapNumAddressBits < 64)
2201 && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
2202 remap |= (dataP->fDMAMapAlignment > page_size);
3e170ce0 2203
0a7de745 2204 if (remap || !dataP->fMappedBaseValid) {
5ba3f43e 2205// if (dataP->fMappedBaseValid) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params);
0a7de745
A
2206 err = md->dmaMap(data->fMapper, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
2207 if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBaseValid) {
2208 dataP->fMappedBase = data->fAlloc;
2209 dataP->fMappedBaseValid = true;
2210 dataP->fMappedLength = data->fAllocLength;
2211 data->fAllocLength = 0; // IOMD owns the alloc now
2212 }
2213 } else {
2214 data->fAlloc = dataP->fMappedBase;
2215 data->fAllocLength = 0; // give out IOMD map
2216 md->dmaMapRecord(data->fMapper, data->fCommand, dataP->fMappedLength);
2217 }
2218 data->fMapContig = !dataP->fDiscontig;
2219
2220 if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
2221 IOLockUnlock(_prepareLock);
2222 }
2223 }
2224 return err;
99c3a104 2225 }
0a7de745
A
2226 if (kIOMDDMAUnmap == op) {
2227 if (dataSize < sizeof(IOMDDMAMapArgs)) {
2228 return kIOReturnUnderrun;
2229 }
2230 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
4452a7af 2231
0a7de745 2232 err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
99c3a104 2233
0a7de745 2234 return kIOReturnSuccess;
cc8bc92a 2235 }
0c530ab8 2236
0a7de745
A
2237 if (kIOMDAddDMAMapSpec == op) {
2238 if (dataSize < sizeof(IODMAMapSpecification)) {
2239 return kIOReturnUnderrun;
2240 }
99c3a104 2241
0a7de745 2242 IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
4452a7af 2243
0a7de745
A
2244 if (!_memoryEntries
2245 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
2246 return kIOReturnNoMemory;
2247 }
4452a7af 2248
0a7de745
A
2249 if (_memoryEntries) {
2250 dataP = getDataP(_memoryEntries);
2251 if (data->numAddressBits < dataP->fDMAMapNumAddressBits) {
2252 dataP->fDMAMapNumAddressBits = data->numAddressBits;
2253 }
2254 if (data->alignment > dataP->fDMAMapAlignment) {
2255 dataP->fDMAMapAlignment = data->alignment;
2256 }
2257 }
2258 return kIOReturnSuccess;
0c530ab8 2259 }
4452a7af 2260
0a7de745
A
2261 if (kIOMDGetCharacteristics == op) {
2262 if (dataSize < sizeof(IOMDDMACharacteristics)) {
2263 return kIOReturnUnderrun;
2264 }
4452a7af 2265
0a7de745
A
2266 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
2267 data->fLength = _length;
2268 data->fSGCount = _rangesCount;
2269 data->fPages = _pages;
2270 data->fDirection = getDirection();
2271 if (!_wireCount) {
2272 data->fIsPrepared = false;
2273 } else {
2274 data->fIsPrepared = true;
2275 data->fHighestPage = _highestPage;
2276 if (_memoryEntries) {
2277 dataP = getDataP(_memoryEntries);
2278 ioPLBlock *ioplList = getIOPLList(dataP);
2279 UInt count = getNumIOPL(_memoryEntries, dataP);
2280 if (count == 1) {
2281 data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
2282 }
2283 }
2284 }
4452a7af 2285
0a7de745
A
2286 return kIOReturnSuccess;
2287 } else if (kIOMDDMAActive == op) {
2288 if (params) {
2289 int16_t prior;
2290 prior = OSAddAtomic16(1, &md->_dmaReferences);
2291 if (!prior) {
2292 md->_mapName = NULL;
2293 }
2294 } else {
2295 if (md->_dmaReferences) {
2296 OSAddAtomic16(-1, &md->_dmaReferences);
2297 } else {
2298 panic("_dmaReferences underflow");
2299 }
2300 }
2301 } else if (kIOMDWalkSegments != op) {
2302 return kIOReturnBadArgument;
0c530ab8 2303 }
89b3af67 2304
0a7de745
A
2305 // Get the next segment
2306 struct InternalState {
2307 IOMDDMAWalkSegmentArgs fIO;
cb323159
A
2308 mach_vm_size_t fOffset2Index;
2309 mach_vm_size_t fNextOffset;
0a7de745 2310 UInt fIndex;
0a7de745
A
2311 } *isP;
2312
2313 // Find the next segment
2314 if (dataSize < sizeof(*isP)) {
2315 return kIOReturnUnderrun;
99c3a104 2316 }
4452a7af 2317
0a7de745 2318 isP = (InternalState *) vData;
cb323159 2319 mach_vm_size_t offset = isP->fIO.fOffset;
0a7de745
A
2320 uint8_t mapped = isP->fIO.fMapped;
2321 uint64_t mappedBase;
4452a7af 2322
0a7de745
A
2323 if (mapped && (kIOMemoryRemote & _flags)) {
2324 return kIOReturnNotAttached;
2325 }
4452a7af 2326
0a7de745
A
2327 if (IOMapper::gSystem && mapped
2328 && (!(kIOMemoryHostOnly & _flags))
2329 && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBaseValid)) {
2330// && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBaseValid))
2331 if (!_memoryEntries
2332 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
2333 return kIOReturnNoMemory;
2334 }
4452a7af 2335
0a7de745
A
2336 dataP = getDataP(_memoryEntries);
2337 if (dataP->fMapper) {
2338 IODMAMapSpecification mapSpec;
2339 bzero(&mapSpec, sizeof(mapSpec));
2340 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
2341 mapSpec.alignment = dataP->fDMAMapAlignment;
2342 err = md->dmaMap(dataP->fMapper, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength);
2343 if (kIOReturnSuccess != err) {
2344 return err;
2345 }
2346 dataP->fMappedBaseValid = true;
2347 }
2348 }
0c530ab8 2349
0a7de745
A
2350 if (kIOMDDMAWalkMappedLocal == mapped) {
2351 mappedBase = isP->fIO.fMappedBase;
2352 } else if (mapped) {
2353 if (IOMapper::gSystem
2354 && (!(kIOMemoryHostOnly & _flags))
2355 && _memoryEntries
2356 && (dataP = getDataP(_memoryEntries))
2357 && dataP->fMappedBaseValid) {
2358 mappedBase = dataP->fMappedBase;
2359 } else {
2360 mapped = 0;
2361 }
2362 }
0c530ab8 2363
0a7de745
A
2364 if (offset >= _length) {
2365 return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
2366 }
0c530ab8 2367
0a7de745 2368 // Validate the previous offset
cb323159
A
2369 UInt ind;
2370 mach_vm_size_t off2Ind = isP->fOffset2Index;
0a7de745
A
2371 if (!params
2372 && offset
2373 && (offset == isP->fNextOffset || off2Ind <= offset)) {
2374 ind = isP->fIndex;
2375 } else {
2376 ind = off2Ind = 0; // Start from beginning
0c530ab8 2377 }
cb323159 2378 mach_vm_size_t length;
0a7de745
A
2379 UInt64 address;
2380
2381 if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
2382 // Physical address based memory descriptor
2383 const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
2384
2385 // Find the range after the one that contains the offset
2386 mach_vm_size_t len;
2387 for (len = 0; off2Ind <= offset; ind++) {
2388 len = physP[ind].length;
2389 off2Ind += len;
2390 }
0c530ab8 2391
0a7de745
A
2392 // Calculate length within range and starting address
2393 length = off2Ind - offset;
2394 address = physP[ind - 1].address + len - length;
0c530ab8 2395
0a7de745
A
2396 if (true && mapped) {
2397 address = mappedBase + offset;
2398 } else {
2399 // see how far we can coalesce ranges
2400 while (ind < _rangesCount && address + length == physP[ind].address) {
2401 len = physP[ind].length;
2402 length += len;
2403 off2Ind += len;
2404 ind++;
2405 }
2406 }
0c530ab8 2407
0a7de745
A
2408 // correct contiguous check overshoot
2409 ind--;
2410 off2Ind -= len;
0c530ab8 2411 }
0a7de745
A
2412#ifndef __LP64__
2413 else if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
2414 // Physical address based memory descriptor
2415 const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
2416
2417 // Find the range after the one that contains the offset
2418 mach_vm_size_t len;
2419 for (len = 0; off2Ind <= offset; ind++) {
2420 len = physP[ind].length;
2421 off2Ind += len;
2422 }
4452a7af 2423
0a7de745
A
2424 // Calculate length within range and starting address
2425 length = off2Ind - offset;
2426 address = physP[ind - 1].address + len - length;
0c530ab8 2427
0a7de745
A
2428 if (true && mapped) {
2429 address = mappedBase + offset;
2430 } else {
2431 // see how far we can coalesce ranges
2432 while (ind < _rangesCount && address + length == physP[ind].address) {
2433 len = physP[ind].length;
2434 length += len;
2435 off2Ind += len;
2436 ind++;
2437 }
2438 }
2439 // correct contiguous check overshoot
2440 ind--;
2441 off2Ind -= len;
6d2010ae 2442 }
0a7de745
A
2443#endif /* !__LP64__ */
2444 else {
2445 do {
2446 if (!_wireCount) {
2447 panic("IOGMD: not wired for the IODMACommand");
2448 }
2449
2450 assert(_memoryEntries);
2451
2452 dataP = getDataP(_memoryEntries);
2453 const ioPLBlock *ioplList = getIOPLList(dataP);
2454 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
2455 upl_page_info_t *pageList = getPageList(dataP);
2456
2457 assert(numIOPLs > 0);
2458
2459 // Scan through iopl info blocks looking for block containing offset
2460 while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset) {
2461 ind++;
2462 }
2463
2464 // Go back to actual range as search goes past it
2465 ioPLBlock ioplInfo = ioplList[ind - 1];
2466 off2Ind = ioplInfo.fIOMDOffset;
2467
2468 if (ind < numIOPLs) {
2469 length = ioplList[ind].fIOMDOffset;
2470 } else {
2471 length = _length;
2472 }
2473 length -= offset; // Remainder within iopl
2474
2475 // Subtract offset till this iopl in total list
2476 offset -= off2Ind;
2477
2478 // If a mapped address is requested and this is a pre-mapped IOPL
2479 // then just need to compute an offset relative to the mapped base.
2480 if (mapped) {
2481 offset += (ioplInfo.fPageOffset & PAGE_MASK);
2482 address = trunc_page_64(mappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
2483 continue; // Done leave do/while(false) now
2484 }
2485
2486 // The offset is rebased into the current iopl.
2487 // Now add the iopl 1st page offset.
2488 offset += ioplInfo.fPageOffset;
2489
2490 // For external UPLs the fPageInfo field points directly to
2491 // the upl's upl_page_info_t array.
2492 if (ioplInfo.fFlags & kIOPLExternUPL) {
2493 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
2494 } else {
2495 pageList = &pageList[ioplInfo.fPageInfo];
2496 }
2497
2498 // Check for direct device non-paged memory
2499 if (ioplInfo.fFlags & kIOPLOnDevice) {
2500 address = ptoa_64(pageList->phys_addr) + offset;
2501 continue; // Done leave do/while(false) now
2502 }
2503
2504 // Now we need compute the index into the pageList
2505 UInt pageInd = atop_32(offset);
2506 offset &= PAGE_MASK;
2507
2508 // Compute the starting address of this segment
2509 IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
2510 if (!pageAddr) {
2511 panic("!pageList phys_addr");
2512 }
2513
2514 address = ptoa_64(pageAddr) + offset;
2515
2516 // length is currently set to the length of the remainider of the iopl.
2517 // We need to check that the remainder of the iopl is contiguous.
2518 // This is indicated by pageList[ind].phys_addr being sequential.
2519 IOByteCount contigLength = PAGE_SIZE - offset;
2520 while (contigLength < length
2521 && ++pageAddr == pageList[++pageInd].phys_addr) {
2522 contigLength += PAGE_SIZE;
2523 }
2524
2525 if (contigLength < length) {
2526 length = contigLength;
2527 }
2528
2529
2530 assert(address);
2531 assert(length);
2532 } while (false);
0c530ab8
A
2533 }
2534
0a7de745
A
2535 // Update return values and state
2536 isP->fIO.fIOVMAddr = address;
2537 isP->fIO.fLength = length;
2538 isP->fIndex = ind;
2539 isP->fOffset2Index = off2Ind;
2540 isP->fNextOffset = isP->fIO.fOffset + length;
0c530ab8 2541
0a7de745 2542 return kIOReturnSuccess;
0c530ab8
A
2543}
2544
2545addr64_t
b0d623f7 2546IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
0c530ab8 2547{
0a7de745
A
2548 IOReturn ret;
2549 mach_vm_address_t address = 0;
2550 mach_vm_size_t length = 0;
2551 IOMapper * mapper = gIOSystemMapper;
2552 IOOptionBits type = _flags & kIOMemoryTypeMask;
2553
2554 if (lengthOfSegment) {
2555 *lengthOfSegment = 0;
2556 }
b0d623f7 2557
0a7de745
A
2558 if (offset >= _length) {
2559 return 0;
2560 }
b0d623f7 2561
0a7de745
A
2562 // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
2563 // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
2564 // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
2565 // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
2566
2567 if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type)) {
2568 unsigned rangesIndex = 0;
2569 Ranges vec = _ranges;
2570 mach_vm_address_t addr;
2571
2572 // Find starting address within the vector of ranges
2573 for (;;) {
2574 getAddrLenForInd(addr, length, type, vec, rangesIndex);
2575 if (offset < length) {
2576 break;
2577 }
2578 offset -= length; // (make offset relative)
2579 rangesIndex++;
2580 }
b0d623f7 2581
0a7de745
A
2582 // Now that we have the starting range,
2583 // lets find the last contiguous range
2584 addr += offset;
2585 length -= offset;
b0d623f7 2586
0a7de745
A
2587 for (++rangesIndex; rangesIndex < _rangesCount; rangesIndex++) {
2588 mach_vm_address_t newAddr;
2589 mach_vm_size_t newLen;
2590
2591 getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
2592 if (addr + length != newAddr) {
2593 break;
2594 }
2595 length += newLen;
2596 }
2597 if (addr) {
2598 address = (IOPhysicalAddress) addr; // Truncate address to 32bit
2599 }
2600 } else {
2601 IOMDDMAWalkSegmentState _state;
2602 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
b0d623f7 2603
0a7de745
A
2604 state->fOffset = offset;
2605 state->fLength = _length - offset;
2606 state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOrRemote);
b0d623f7 2607
0a7de745 2608 ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
b0d623f7 2609
0a7de745
A
2610 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret)) {
2611 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
2612 ret, this, state->fOffset,
2613 state->fIOVMAddr, state->fLength);
2614 }
2615 if (kIOReturnSuccess == ret) {
2616 address = state->fIOVMAddr;
2617 length = state->fLength;
2618 }
2619
2620 // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
2621 // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
2622
2623 if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))) {
2624 if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone)) {
2625 addr64_t origAddr = address;
2626 IOByteCount origLen = length;
2627
2628 address = mapper->mapToPhysicalAddress(origAddr);
2629 length = page_size - (address & (page_size - 1));
2630 while ((length < origLen)
2631 && ((address + length) == mapper->mapToPhysicalAddress(origAddr + length))) {
2632 length += page_size;
2633 }
2634 if (length > origLen) {
2635 length = origLen;
2636 }
2637 }
2638 }
b0d623f7 2639 }
4452a7af 2640
0a7de745
A
2641 if (!address) {
2642 length = 0;
2643 }
b0d623f7 2644
0a7de745
A
2645 if (lengthOfSegment) {
2646 *lengthOfSegment = length;
2647 }
4452a7af 2648
0a7de745 2649 return address;
0c530ab8
A
2650}
2651
b0d623f7 2652#ifndef __LP64__
39037602
A
2653#pragma clang diagnostic push
2654#pragma clang diagnostic ignored "-Wdeprecated-declarations"
2655
b0d623f7
A
2656addr64_t
2657IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
0c530ab8 2658{
0a7de745
A
2659 addr64_t address = 0;
2660
2661 if (options & _kIOMemorySourceSegment) {
2662 address = getSourceSegment(offset, lengthOfSegment);
2663 } else if (options & kIOMemoryMapperNone) {
2664 address = getPhysicalSegment64(offset, lengthOfSegment);
2665 } else {
2666 address = getPhysicalSegment(offset, lengthOfSegment);
2667 }
2668
2669 return address;
b0d623f7 2670}
39037602 2671#pragma clang diagnostic pop
0c530ab8 2672
b0d623f7
A
2673addr64_t
2674IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
2675{
0a7de745 2676 return getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone);
b0d623f7 2677}
0c530ab8 2678
b0d623f7
A
2679IOPhysicalAddress
2680IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
2681{
0a7de745
A
2682 addr64_t address = 0;
2683 IOByteCount length = 0;
0c530ab8 2684
0a7de745 2685 address = getPhysicalSegment(offset, lengthOfSegment, 0);
b0d623f7 2686
0a7de745
A
2687 if (lengthOfSegment) {
2688 length = *lengthOfSegment;
2689 }
0c530ab8 2690
0a7de745
A
2691 if ((address + length) > 0x100000000ULL) {
2692 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
b0d623f7 2693 address, (long) length, (getMetaClass())->getClassName());
0a7de745 2694 }
0c530ab8 2695
0a7de745 2696 return (IOPhysicalAddress) address;
55e303ae 2697}
de355530 2698
0c530ab8
A
2699addr64_t
2700IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
55e303ae 2701{
0a7de745
A
2702 IOPhysicalAddress phys32;
2703 IOByteCount length;
2704 addr64_t phys64;
cb323159 2705 IOMapper * mapper = NULL;
0a7de745
A
2706
2707 phys32 = getPhysicalSegment(offset, lengthOfSegment);
2708 if (!phys32) {
2709 return 0;
2710 }
0b4e3aa0 2711
0a7de745
A
2712 if (gIOSystemMapper) {
2713 mapper = gIOSystemMapper;
2714 }
0c530ab8 2715
0a7de745
A
2716 if (mapper) {
2717 IOByteCount origLen;
55e303ae 2718
0a7de745
A
2719 phys64 = mapper->mapToPhysicalAddress(phys32);
2720 origLen = *lengthOfSegment;
2721 length = page_size - (phys64 & (page_size - 1));
2722 while ((length < origLen)
2723 && ((phys64 + length) == mapper->mapToPhysicalAddress(phys32 + length))) {
2724 length += page_size;
2725 }
2726 if (length > origLen) {
2727 length = origLen;
2728 }
55e303ae 2729
0a7de745
A
2730 *lengthOfSegment = length;
2731 } else {
2732 phys64 = (addr64_t) phys32;
2733 }
1c79356b 2734
0a7de745 2735 return phys64;
0b4e3aa0
A
2736}
2737
0c530ab8 2738IOPhysicalAddress
b0d623f7 2739IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1c79356b 2740{
0a7de745 2741 return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0);
0b4e3aa0
A
2742}
2743
b0d623f7
A
2744IOPhysicalAddress
2745IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
2746{
0a7de745 2747 return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment);
b0d623f7 2748}
1c79356b 2749
39037602
A
2750#pragma clang diagnostic push
2751#pragma clang diagnostic ignored "-Wdeprecated-declarations"
2752
0a7de745
A
2753void *
2754IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
2755 IOByteCount * lengthOfSegment)
b0d623f7 2756{
0a7de745
A
2757 if (_task == kernel_task) {
2758 return (void *) getSourceSegment(offset, lengthOfSegment);
2759 } else {
2760 panic("IOGMD::getVirtualSegment deprecated");
2761 }
91447636 2762
cb323159 2763 return NULL;
b0d623f7 2764}
39037602 2765#pragma clang diagnostic pop
b0d623f7 2766#endif /* !__LP64__ */
91447636 2767
0a7de745 2768IOReturn
0c530ab8
A
2769IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
2770{
0a7de745
A
2771 IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
2772 DMACommandOps params;
2773 IOReturn err;
2774
2775 params = (op & ~kIOMDDMACommandOperationMask & op);
2776 op &= kIOMDDMACommandOperationMask;
2777
2778 if (kIOMDGetCharacteristics == op) {
2779 if (dataSize < sizeof(IOMDDMACharacteristics)) {
2780 return kIOReturnUnderrun;
2781 }
2782
2783 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
2784 data->fLength = getLength();
2785 data->fSGCount = 0;
2786 data->fDirection = getDirection();
2787 data->fIsPrepared = true; // Assume prepared - fails safe
2788 } else if (kIOMDWalkSegments == op) {
2789 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs)) {
2790 return kIOReturnUnderrun;
2791 }
2792
2793 IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
2794 IOByteCount offset = (IOByteCount) data->fOffset;
2795
2796 IOPhysicalLength length;
2797 if (data->fMapped && IOMapper::gSystem) {
2798 data->fIOVMAddr = md->getPhysicalSegment(offset, &length);
2799 } else {
2800 data->fIOVMAddr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
2801 }
2802 data->fLength = length;
2803 } else if (kIOMDAddDMAMapSpec == op) {
2804 return kIOReturnUnsupported;
2805 } else if (kIOMDDMAMap == op) {
2806 if (dataSize < sizeof(IOMDDMAMapArgs)) {
2807 return kIOReturnUnderrun;
2808 }
2809 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2810
2811 if (params) {
2812 panic("class %s does not support IODMACommand::kIterateOnly", getMetaClass()->getClassName());
2813 }
2814
2815 data->fMapContig = true;
2816 err = md->dmaMap(data->fMapper, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
2817
2818 return err;
2819 } else if (kIOMDDMAUnmap == op) {
2820 if (dataSize < sizeof(IOMDDMAMapArgs)) {
2821 return kIOReturnUnderrun;
2822 }
2823 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2824
2825 err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
2826
2827 return kIOReturnSuccess;
2828 } else {
2829 return kIOReturnBadArgument;
2830 }
2831
2832 return kIOReturnSuccess;
0c530ab8
A
2833}
2834
0a7de745 2835IOReturn
b0d623f7 2836IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
0a7de745 2837 IOOptionBits * oldState )
b0d623f7 2838{
0a7de745 2839 IOReturn err = kIOReturnSuccess;
b0d623f7 2840
0a7de745
A
2841 vm_purgable_t control;
2842 int state;
b0d623f7 2843
0a7de745
A
2844 assert(!(kIOMemoryRemote & _flags));
2845 if (kIOMemoryRemote & _flags) {
2846 return kIOReturnNotAttached;
2847 }
2848
2849 if (_memRef) {
2850 err = super::setPurgeable(newState, oldState);
2851 } else {
2852 if (kIOMemoryThreadSafe & _flags) {
2853 LOCK;
2854 }
2855 do{
2856 // Find the appropriate vm_map for the given task
2857 vm_map_t curMap;
2858 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) {
2859 err = kIOReturnNotReady;
2860 break;
2861 } else if (!_task) {
2862 err = kIOReturnUnsupported;
2863 break;
2864 } else {
2865 curMap = get_task_map(_task);
2866 if (NULL == curMap) {
2867 err = KERN_INVALID_ARGUMENT;
2868 break;
2869 }
2870 }
2871
2872 // can only do one range
2873 Ranges vec = _ranges;
2874 IOOptionBits type = _flags & kIOMemoryTypeMask;
2875 mach_vm_address_t addr;
2876 mach_vm_size_t len;
2877 getAddrLenForInd(addr, len, type, vec, 0);
2878
2879 err = purgeableControlBits(newState, &control, &state);
2880 if (kIOReturnSuccess != err) {
2881 break;
2882 }
2883 err = vm_map_purgable_control(curMap, addr, control, &state);
2884 if (oldState) {
2885 if (kIOReturnSuccess == err) {
2886 err = purgeableStateBits(&state);
2887 *oldState = state;
2888 }
2889 }
2890 }while (false);
2891 if (kIOMemoryThreadSafe & _flags) {
2892 UNLOCK;
b0d623f7 2893 }
b0d623f7 2894 }
fe8ab488 2895
0a7de745 2896 return err;
b0d623f7
A
2897}
2898
0a7de745
A
2899IOReturn
2900IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
2901 IOOptionBits * oldState )
91447636 2902{
0a7de745 2903 IOReturn err = kIOReturnNotReady;
b0d623f7 2904
0a7de745
A
2905 if (kIOMemoryThreadSafe & _flags) {
2906 LOCK;
2907 }
2908 if (_memRef) {
2909 err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState);
2910 }
2911 if (kIOMemoryThreadSafe & _flags) {
2912 UNLOCK;
2913 }
b0d623f7 2914
0a7de745 2915 return err;
91447636 2916}
0a7de745 2917
cb323159
A
2918IOReturn
2919IOGeneralMemoryDescriptor::setOwnership( task_t newOwner,
2920 int newLedgerTag,
2921 IOOptionBits newLedgerOptions )
2922{
2923 IOReturn err = kIOReturnSuccess;
2924
2925 assert(!(kIOMemoryRemote & _flags));
2926 if (kIOMemoryRemote & _flags) {
2927 return kIOReturnNotAttached;
2928 }
2929
2930 if (iokit_iomd_setownership_enabled == FALSE) {
2931 return kIOReturnUnsupported;
2932 }
2933
2934 if (_memRef) {
2935 err = super::setOwnership(newOwner, newLedgerTag, newLedgerOptions);
2936 } else {
2937 err = kIOReturnUnsupported;
2938 }
2939
2940 return err;
2941}
2942
2943IOReturn
2944IOMemoryDescriptor::setOwnership( task_t newOwner,
2945 int newLedgerTag,
2946 IOOptionBits newLedgerOptions )
2947{
2948 IOReturn err = kIOReturnNotReady;
2949
2950 assert(!(kIOMemoryRemote & _flags));
2951 if (kIOMemoryRemote & _flags) {
2952 return kIOReturnNotAttached;
2953 }
2954
2955 if (iokit_iomd_setownership_enabled == FALSE) {
2956 return kIOReturnUnsupported;
2957 }
2958
2959 if (kIOMemoryThreadSafe & _flags) {
2960 LOCK;
2961 }
2962 if (_memRef) {
2963 err = IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(_memRef, newOwner, newLedgerTag, newLedgerOptions);
2964 } else {
2965 IOMultiMemoryDescriptor * mmd;
2966 IOSubMemoryDescriptor * smd;
2967 if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
2968 err = smd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
2969 } else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
2970 err = mmd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
2971 }
2972 }
2973 if (kIOMemoryThreadSafe & _flags) {
2974 UNLOCK;
2975 }
2976
2977 return err;
2978}
2979
0a7de745
A
2980IOReturn
2981IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
2982 IOByteCount * dirtyPageCount )
39236c6e 2983{
0a7de745 2984 IOReturn err = kIOReturnNotReady;
39236c6e 2985
0a7de745
A
2986 assert(!(kIOMemoryRemote & _flags));
2987 if (kIOMemoryRemote & _flags) {
2988 return kIOReturnNotAttached;
2989 }
5ba3f43e 2990
0a7de745
A
2991 if (kIOMemoryThreadSafe & _flags) {
2992 LOCK;
3e170ce0 2993 }
0a7de745
A
2994 if (_memRef) {
2995 err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount);
2996 } else {
2997 IOMultiMemoryDescriptor * mmd;
2998 IOSubMemoryDescriptor * smd;
2999 if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3000 err = smd->getPageCounts(residentPageCount, dirtyPageCount);
3001 } else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3002 err = mmd->getPageCounts(residentPageCount, dirtyPageCount);
3003 }
3004 }
3005 if (kIOMemoryThreadSafe & _flags) {
3006 UNLOCK;
3e170ce0 3007 }
39236c6e 3008
0a7de745 3009 return err;
39236c6e 3010}
0a7de745 3011
39236c6e 3012
5ba3f43e
A
3013#if defined(__arm__) || defined(__arm64__)
3014extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3015extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3016#else /* defined(__arm__) || defined(__arm64__) */
91447636
A
3017extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
3018extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
5ba3f43e 3019#endif /* defined(__arm__) || defined(__arm64__) */
91447636 3020
0a7de745
A
3021static void
3022SetEncryptOp(addr64_t pa, unsigned int count)
0b4c1975 3023{
0a7de745
A
3024 ppnum_t page, end;
3025
3026 page = atop_64(round_page_64(pa));
3027 end = atop_64(trunc_page_64(pa + count));
3028 for (; page < end; page++) {
3029 pmap_clear_noencrypt(page);
3030 }
0b4c1975
A
3031}
3032
0a7de745
A
3033static void
3034ClearEncryptOp(addr64_t pa, unsigned int count)
0b4c1975 3035{
0a7de745
A
3036 ppnum_t page, end;
3037
3038 page = atop_64(round_page_64(pa));
3039 end = atop_64(trunc_page_64(pa + count));
3040 for (; page < end; page++) {
3041 pmap_set_noencrypt(page);
3042 }
0b4c1975
A
3043}
3044
0a7de745
A
3045IOReturn
3046IOMemoryDescriptor::performOperation( IOOptionBits options,
3047 IOByteCount offset, IOByteCount length )
91447636 3048{
0a7de745
A
3049 IOByteCount remaining;
3050 unsigned int res;
cb323159 3051 void (*func)(addr64_t pa, unsigned int count) = NULL;
5ba3f43e 3052#if defined(__arm__) || defined(__arm64__)
cb323159 3053 void (*func_ext)(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *result) = NULL;
5ba3f43e
A
3054#endif
3055
0a7de745
A
3056 assert(!(kIOMemoryRemote & _flags));
3057 if (kIOMemoryRemote & _flags) {
3058 return kIOReturnNotAttached;
3059 }
91447636 3060
0a7de745
A
3061 switch (options) {
3062 case kIOMemoryIncoherentIOFlush:
5ba3f43e 3063#if defined(__arm__) || defined(__arm64__)
0a7de745 3064 func_ext = &dcache_incoherent_io_flush64;
5ba3f43e 3065#if __ARM_COHERENT_IO__
0a7de745
A
3066 func_ext(0, 0, 0, &res);
3067 return kIOReturnSuccess;
5ba3f43e 3068#else /* __ARM_COHERENT_IO__ */
0a7de745 3069 break;
5ba3f43e
A
3070#endif /* __ARM_COHERENT_IO__ */
3071#else /* defined(__arm__) || defined(__arm64__) */
0a7de745
A
3072 func = &dcache_incoherent_io_flush64;
3073 break;
5ba3f43e 3074#endif /* defined(__arm__) || defined(__arm64__) */
0a7de745 3075 case kIOMemoryIncoherentIOStore:
5ba3f43e 3076#if defined(__arm__) || defined(__arm64__)
0a7de745 3077 func_ext = &dcache_incoherent_io_store64;
5ba3f43e 3078#if __ARM_COHERENT_IO__
0a7de745
A
3079 func_ext(0, 0, 0, &res);
3080 return kIOReturnSuccess;
5ba3f43e 3081#else /* __ARM_COHERENT_IO__ */
0a7de745 3082 break;
5ba3f43e
A
3083#endif /* __ARM_COHERENT_IO__ */
3084#else /* defined(__arm__) || defined(__arm64__) */
0a7de745
A
3085 func = &dcache_incoherent_io_store64;
3086 break;
5ba3f43e 3087#endif /* defined(__arm__) || defined(__arm64__) */
0b4c1975 3088
0a7de745
A
3089 case kIOMemorySetEncrypted:
3090 func = &SetEncryptOp;
3091 break;
3092 case kIOMemoryClearEncrypted:
3093 func = &ClearEncryptOp;
3094 break;
3095 }
91447636 3096
5ba3f43e 3097#if defined(__arm__) || defined(__arm64__)
cb323159 3098 if ((func == NULL) && (func_ext == NULL)) {
0a7de745
A
3099 return kIOReturnUnsupported;
3100 }
5ba3f43e 3101#else /* defined(__arm__) || defined(__arm64__) */
0a7de745
A
3102 if (!func) {
3103 return kIOReturnUnsupported;
3104 }
5ba3f43e 3105#endif /* defined(__arm__) || defined(__arm64__) */
91447636 3106
0a7de745
A
3107 if (kIOMemoryThreadSafe & _flags) {
3108 LOCK;
3109 }
b0d623f7 3110
0a7de745
A
3111 res = 0x0UL;
3112 remaining = length = min(length, getLength() - offset);
3113 while (remaining) {
3114 // (process another target segment?)
3115 addr64_t dstAddr64;
3116 IOByteCount dstLen;
91447636 3117
0a7de745
A
3118 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
3119 if (!dstAddr64) {
3120 break;
3121 }
91447636 3122
0a7de745
A
3123 // Clip segment length to remaining
3124 if (dstLen > remaining) {
3125 dstLen = remaining;
3126 }
91447636 3127
5ba3f43e 3128#if defined(__arm__) || defined(__arm64__)
0a7de745
A
3129 if (func) {
3130 (*func)(dstAddr64, dstLen);
3131 }
3132 if (func_ext) {
3133 (*func_ext)(dstAddr64, dstLen, remaining, &res);
3134 if (res != 0x0UL) {
3135 remaining = 0;
3136 break;
3137 }
3138 }
5ba3f43e 3139#else /* defined(__arm__) || defined(__arm64__) */
0a7de745 3140 (*func)(dstAddr64, dstLen);
5ba3f43e 3141#endif /* defined(__arm__) || defined(__arm64__) */
91447636 3142
0a7de745
A
3143 offset += dstLen;
3144 remaining -= dstLen;
3145 }
91447636 3146
0a7de745
A
3147 if (kIOMemoryThreadSafe & _flags) {
3148 UNLOCK;
3149 }
b0d623f7 3150
0a7de745 3151 return remaining ? kIOReturnUnderrun : kIOReturnSuccess;
91447636
A
3152}
3153
39037602
A
3154/*
3155 *
3156 */
3157
316670eb 3158#if defined(__i386__) || defined(__x86_64__)
3e170ce0 3159
0a7de745
A
3160#define io_kernel_static_start vm_kernel_stext
3161#define io_kernel_static_end vm_kernel_etext
3e170ce0 3162
5ba3f43e
A
3163#elif defined(__arm__) || defined(__arm64__)
3164
0a7de745 3165extern vm_offset_t static_memory_end;
5ba3f43e
A
3166
3167#if defined(__arm64__)
3168#define io_kernel_static_start vm_kext_base
3169#else /* defined(__arm64__) */
3170#define io_kernel_static_start vm_kernel_stext
3171#endif /* defined(__arm64__) */
3172
0a7de745 3173#define io_kernel_static_end static_memory_end
5ba3f43e 3174
316670eb
A
3175#else
3176#error io_kernel_static_end is undefined for this architecture
3177#endif
55e303ae
A
3178
3179static kern_return_t
3180io_get_kernel_static_upl(
0a7de745
A
3181 vm_map_t /* map */,
3182 uintptr_t offset,
3183 upl_size_t *upl_size,
3184 upl_t *upl,
3185 upl_page_info_array_t page_list,
3186 unsigned int *count,
3187 ppnum_t *highest_page)
1c79356b 3188{
0a7de745
A
3189 unsigned int pageCount, page;
3190 ppnum_t phys;
3191 ppnum_t highestPage = 0;
3192
3193 pageCount = atop_32(*upl_size);
3194 if (pageCount > *count) {
3195 pageCount = *count;
3196 }
3197
3198 *upl = NULL;
3199
3200 for (page = 0; page < pageCount; page++) {
3201 phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
3202 if (!phys) {
3203 break;
3204 }
3205 page_list[page].phys_addr = phys;
3206 page_list[page].free_when_done = 0;
3207 page_list[page].absent = 0;
3208 page_list[page].dirty = 0;
3209 page_list[page].precious = 0;
3210 page_list[page].device = 0;
3211 if (phys > highestPage) {
3212 highestPage = phys;
3213 }
3214 }
3215
3216 *highest_page = highestPage;
3217
3218 return (page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError;
55e303ae 3219}
0b4e3aa0 3220
0a7de745
A
3221IOReturn
3222IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
55e303ae 3223{
0a7de745
A
3224 IOOptionBits type = _flags & kIOMemoryTypeMask;
3225 IOReturn error = kIOReturnSuccess;
3226 ioGMDData *dataP;
3227 upl_page_info_array_t pageInfo;
3228 ppnum_t mapBase;
3229 vm_tag_t tag = VM_KERN_MEMORY_NONE;
3230
3231 assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
3232
3233 if ((kIODirectionOutIn & forDirection) == kIODirectionNone) {
3234 forDirection = (IODirection) (forDirection | getDirection());
3235 }
3236
3237 dataP = getDataP(_memoryEntries);
3238 upl_control_flags_t uplFlags; // This Mem Desc's default flags for upl creation
3239 switch (kIODirectionOutIn & forDirection) {
5ba3f43e 3240 case kIODirectionOut:
0a7de745
A
3241 // Pages do not need to be marked as dirty on commit
3242 uplFlags = UPL_COPYOUT_FROM;
3243 dataP->fDMAAccess = kIODMAMapReadAccess;
3244 break;
55e303ae 3245
5ba3f43e 3246 case kIODirectionIn:
0a7de745
A
3247 dataP->fDMAAccess = kIODMAMapWriteAccess;
3248 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
3249 break;
39037602 3250
5ba3f43e 3251 default:
0a7de745
A
3252 dataP->fDMAAccess = kIODMAMapReadAccess | kIODMAMapWriteAccess;
3253 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
3254 break;
3255 }
3256
3257 if (_wireCount) {
3258 if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags)) {
3259 OSReportWithBacktrace("IOMemoryDescriptor 0x%lx prepared read only", VM_KERNEL_ADDRPERM(this));
3260 error = kIOReturnNotWritable;
3261 }
3262 } else {
3263 IOMapper *mapper;
3264
3265 mapper = dataP->fMapper;
3266 dataP->fMappedBaseValid = dataP->fMappedBase = 0;
3267
3268 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
3269 tag = _kernelTag;
3270 if (VM_KERN_MEMORY_NONE == tag) {
3271 tag = IOMemoryTag(kernel_map);
3272 }
3273
3274 if (kIODirectionPrepareToPhys32 & forDirection) {
3275 if (!mapper) {
3276 uplFlags |= UPL_NEED_32BIT_ADDR;
3277 }
3278 if (dataP->fDMAMapNumAddressBits > 32) {
3279 dataP->fDMAMapNumAddressBits = 32;
3280 }
3281 }
3282 if (kIODirectionPrepareNoFault & forDirection) {
3283 uplFlags |= UPL_REQUEST_NO_FAULT;
3284 }
3285 if (kIODirectionPrepareNoZeroFill & forDirection) {
3286 uplFlags |= UPL_NOZEROFILLIO;
3287 }
3288 if (kIODirectionPrepareNonCoherent & forDirection) {
3289 uplFlags |= UPL_REQUEST_FORCE_COHERENCY;
3290 }
3291
3292 mapBase = 0;
3293
3294 // Note that appendBytes(NULL) zeros the data up to the desired length
3295 // and the length parameter is an unsigned int
3296 size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t);
3297 if (uplPageSize > ((unsigned int)uplPageSize)) {
3298 return kIOReturnNoMemory;
3299 }
cb323159 3300 if (!_memoryEntries->appendBytes(NULL, uplPageSize)) {
0a7de745
A
3301 return kIOReturnNoMemory;
3302 }
cb323159 3303 dataP = NULL;
0a7de745
A
3304
3305 // Find the appropriate vm_map for the given task
3306 vm_map_t curMap;
cb323159
A
3307 if ((NULL != _memRef) || ((_task == kernel_task && (kIOMemoryBufferPageable & _flags)))) {
3308 curMap = NULL;
0a7de745
A
3309 } else {
3310 curMap = get_task_map(_task);
3311 }
3312
3313 // Iterate over the vector of virtual ranges
3314 Ranges vec = _ranges;
3315 unsigned int pageIndex = 0;
3316 IOByteCount mdOffset = 0;
3317 ppnum_t highestPage = 0;
3318
cb323159 3319 IOMemoryEntry * memRefEntry = NULL;
0a7de745
A
3320 if (_memRef) {
3321 memRefEntry = &_memRef->entries[0];
3322 }
3323
3324 for (UInt range = 0; range < _rangesCount; range++) {
3325 ioPLBlock iopl;
3326 mach_vm_address_t startPage, startPageOffset;
3327 mach_vm_size_t numBytes;
3328 ppnum_t highPage = 0;
3329
3330 // Get the startPage address and length of vec[range]
3331 getAddrLenForInd(startPage, numBytes, type, vec, range);
3332 startPageOffset = startPage & PAGE_MASK;
3333 iopl.fPageOffset = startPageOffset;
3334 numBytes += startPageOffset;
3335 startPage = trunc_page_64(startPage);
3336
3337 if (mapper) {
3338 iopl.fMappedPage = mapBase + pageIndex;
3339 } else {
3340 iopl.fMappedPage = 0;
3341 }
3342
3343 // Iterate over the current range, creating UPLs
3344 while (numBytes) {
3345 vm_address_t kernelStart = (vm_address_t) startPage;
3346 vm_map_t theMap;
3347 if (curMap) {
3348 theMap = curMap;
3349 } else if (_memRef) {
3350 theMap = NULL;
3351 } else {
3352 assert(_task == kernel_task);
3353 theMap = IOPageableMapForAddress(kernelStart);
3354 }
3355
3356 // ioplFlags is an in/out parameter
3357 upl_control_flags_t ioplFlags = uplFlags;
3358 dataP = getDataP(_memoryEntries);
3359 pageInfo = getPageList(dataP);
3360 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
3361
3362 mach_vm_size_t _ioplSize = round_page(numBytes);
3363 upl_size_t ioplSize = (_ioplSize <= MAX_UPL_SIZE_BYTES) ? _ioplSize : MAX_UPL_SIZE_BYTES;
3364 unsigned int numPageInfo = atop_32(ioplSize);
3365
3366 if ((theMap == kernel_map)
3367 && (kernelStart >= io_kernel_static_start)
3368 && (kernelStart < io_kernel_static_end)) {
3369 error = io_get_kernel_static_upl(theMap,
3370 kernelStart,
3371 &ioplSize,
3372 &iopl.fIOPL,
3373 baseInfo,
3374 &numPageInfo,
3375 &highPage);
3376 } else if (_memRef) {
3377 memory_object_offset_t entryOffset;
3378
3379 entryOffset = mdOffset;
3380 entryOffset = (entryOffset - iopl.fPageOffset - memRefEntry->offset);
3381 if (entryOffset >= memRefEntry->size) {
3382 memRefEntry++;
3383 if (memRefEntry >= &_memRef->entries[_memRef->count]) {
3384 panic("memRefEntry");
3385 }
3386 entryOffset = 0;
3387 }
3388 if (ioplSize > (memRefEntry->size - entryOffset)) {
3389 ioplSize = (memRefEntry->size - entryOffset);
3390 }
3391 error = memory_object_iopl_request(memRefEntry->entry,
3392 entryOffset,
3393 &ioplSize,
3394 &iopl.fIOPL,
3395 baseInfo,
3396 &numPageInfo,
3397 &ioplFlags,
3398 tag);
3399 } else {
3400 assert(theMap);
3401 error = vm_map_create_upl(theMap,
3402 startPage,
3403 (upl_size_t*)&ioplSize,
3404 &iopl.fIOPL,
3405 baseInfo,
3406 &numPageInfo,
3407 &ioplFlags,
3408 tag);
3409 }
3410
3411 if (error != KERN_SUCCESS) {
3412 goto abortExit;
3413 }
3414
3415 assert(ioplSize);
3416
3417 if (iopl.fIOPL) {
3418 highPage = upl_get_highest_page(iopl.fIOPL);
3419 }
3420 if (highPage > highestPage) {
3421 highestPage = highPage;
3422 }
3423
3424 if (baseInfo->device) {
3425 numPageInfo = 1;
3426 iopl.fFlags = kIOPLOnDevice;
3427 } else {
3428 iopl.fFlags = 0;
3429 }
3430
3431 iopl.fIOMDOffset = mdOffset;
3432 iopl.fPageInfo = pageIndex;
3433 if (mapper && pageIndex && (page_mask & (mdOffset + startPageOffset))) {
3434 dataP->fDiscontig = true;
3435 }
3436
3437 if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
3438 // Clean up partial created and unsaved iopl
3439 if (iopl.fIOPL) {
3440 upl_abort(iopl.fIOPL, 0);
3441 upl_deallocate(iopl.fIOPL);
3442 }
bca245ac 3443 error = kIOReturnNoMemory;
0a7de745
A
3444 goto abortExit;
3445 }
cb323159 3446 dataP = NULL;
0a7de745
A
3447
3448 // Check for a multiple iopl's in one virtual range
3449 pageIndex += numPageInfo;
3450 mdOffset -= iopl.fPageOffset;
3451 if (ioplSize < numBytes) {
3452 numBytes -= ioplSize;
3453 startPage += ioplSize;
3454 mdOffset += ioplSize;
3455 iopl.fPageOffset = 0;
3456 if (mapper) {
3457 iopl.fMappedPage = mapBase + pageIndex;
3458 }
3459 } else {
3460 mdOffset += numBytes;
3461 break;
3462 }
3463 }
3464 }
3465
3466 _highestPage = highestPage;
3467
3468 if (UPL_COPYOUT_FROM & uplFlags) {
3469 _flags |= kIOMemoryPreparedReadOnly;
3470 }
3471 }
39236c6e 3472
39037602 3473#if IOTRACKING
0a7de745
A
3474 if (!(_flags & kIOMemoryAutoPrepare) && (kIOReturnSuccess == error)) {
3475 dataP = getDataP(_memoryEntries);
3476 if (!dataP->fWireTracking.link.next) {
3477 IOTrackingAdd(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages), false, tag);
3478 }
5ba3f43e 3479 }
39037602 3480#endif /* IOTRACKING */
3e170ce0 3481
0a7de745 3482 return error;
1c79356b
A
3483
3484abortExit:
55e303ae 3485 {
0a7de745
A
3486 dataP = getDataP(_memoryEntries);
3487 UInt done = getNumIOPL(_memoryEntries, dataP);
3488 ioPLBlock *ioplList = getIOPLList(dataP);
3489
3490 for (UInt range = 0; range < done; range++) {
3491 if (ioplList[range].fIOPL) {
3492 upl_abort(ioplList[range].fIOPL, 0);
3493 upl_deallocate(ioplList[range].fIOPL);
3494 }
3495 }
3496 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
55e303ae 3497 }
1c79356b 3498
0a7de745
A
3499 if (error == KERN_FAILURE) {
3500 error = kIOReturnCannotWire;
3501 } else if (error == KERN_MEMORY_ERROR) {
3502 error = kIOReturnNoResources;
3503 }
2d21ac55 3504
0a7de745 3505 return error;
55e303ae 3506}
d7e50217 3507
0a7de745
A
3508bool
3509IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
99c3a104 3510{
0a7de745
A
3511 ioGMDData * dataP;
3512 unsigned dataSize = size;
3513
3514 if (!_memoryEntries) {
3515 _memoryEntries = OSData::withCapacity(dataSize);
3516 if (!_memoryEntries) {
3517 return false;
3518 }
3519 } else if (!_memoryEntries->initWithCapacity(dataSize)) {
3520 return false;
3521 }
3522
cb323159 3523 _memoryEntries->appendBytes(NULL, computeDataSize(0, 0));
0a7de745 3524 dataP = getDataP(_memoryEntries);
99c3a104 3525
0a7de745
A
3526 if (mapper == kIOMapperWaitSystem) {
3527 IOMapper::checkForSystemMapper();
3528 mapper = IOMapper::gSystem;
3529 }
3530 dataP->fMapper = mapper;
3531 dataP->fPageCnt = 0;
3532 dataP->fMappedBase = 0;
3533 dataP->fDMAMapNumAddressBits = 64;
3534 dataP->fDMAMapAlignment = 0;
3535 dataP->fPreparationID = kIOPreparationIDUnprepared;
3536 dataP->fDiscontig = false;
3537 dataP->fCompletionError = false;
3538 dataP->fMappedBaseValid = false;
3539
3540 return true;
99c3a104
A
3541}
3542
0a7de745
A
3543IOReturn
3544IOMemoryDescriptor::dmaMap(
3545 IOMapper * mapper,
3546 IODMACommand * command,
3547 const IODMAMapSpecification * mapSpec,
3548 uint64_t offset,
3549 uint64_t length,
3550 uint64_t * mapAddress,
3551 uint64_t * mapLength)
99c3a104 3552{
0a7de745
A
3553 IOReturn err;
3554 uint32_t mapOptions;
99c3a104 3555
0a7de745
A
3556 mapOptions = 0;
3557 mapOptions |= kIODMAMapReadAccess;
3558 if (!(kIOMemoryPreparedReadOnly & _flags)) {
3559 mapOptions |= kIODMAMapWriteAccess;
3560 }
99c3a104 3561
0a7de745
A
3562 err = mapper->iovmMapMemory(this, offset, length, mapOptions,
3563 mapSpec, command, NULL, mapAddress, mapLength);
99c3a104 3564
0a7de745
A
3565 if (kIOReturnSuccess == err) {
3566 dmaMapRecord(mapper, command, *mapLength);
3567 }
5ba3f43e 3568
0a7de745 3569 return err;
5ba3f43e
A
3570}
3571
0a7de745
A
3572void
3573IOMemoryDescriptor::dmaMapRecord(
3574 IOMapper * mapper,
3575 IODMACommand * command,
3576 uint64_t mapLength)
5ba3f43e 3577{
0a7de745
A
3578 kern_allocation_name_t alloc;
3579 int16_t prior;
3580
3581 if ((alloc = mapper->fAllocName) /* && mapper != IOMapper::gSystem */) {
3582 kern_allocation_update_size(mapper->fAllocName, mapLength);
3583 }
3584
3585 if (!command) {
3586 return;
3587 }
3588 prior = OSAddAtomic16(1, &_dmaReferences);
3589 if (!prior) {
3590 if (alloc && (VM_KERN_MEMORY_NONE != _kernelTag)) {
3591 _mapName = alloc;
3592 mapLength = _length;
3593 kern_allocation_update_subtotal(alloc, _kernelTag, mapLength);
3594 } else {
3595 _mapName = NULL;
3596 }
5ba3f43e 3597 }
5ba3f43e
A
3598}
3599
0a7de745
A
3600IOReturn
3601IOMemoryDescriptor::dmaUnmap(
3602 IOMapper * mapper,
3603 IODMACommand * command,
3604 uint64_t offset,
3605 uint64_t mapAddress,
3606 uint64_t mapLength)
5ba3f43e 3607{
0a7de745
A
3608 IOReturn ret;
3609 kern_allocation_name_t alloc;
3610 kern_allocation_name_t mapName;
3611 int16_t prior;
3612
cb323159 3613 mapName = NULL;
0a7de745
A
3614 prior = 0;
3615 if (command) {
3616 mapName = _mapName;
3617 if (_dmaReferences) {
3618 prior = OSAddAtomic16(-1, &_dmaReferences);
3619 } else {
3620 panic("_dmaReferences underflow");
3621 }
3622 }
3623
3624 if (!mapLength) {
3625 return kIOReturnSuccess;
3626 }
3627
3628 ret = mapper->iovmUnmapMemory(this, command, mapAddress, mapLength);
3629
3630 if ((alloc = mapper->fAllocName)) {
3631 kern_allocation_update_size(alloc, -mapLength);
3632 if ((1 == prior) && mapName && (VM_KERN_MEMORY_NONE != _kernelTag)) {
3633 mapLength = _length;
3634 kern_allocation_update_subtotal(mapName, _kernelTag, -mapLength);
3635 }
3636 }
3637
3638 return ret;
99c3a104
A
3639}
3640
0a7de745
A
3641IOReturn
3642IOGeneralMemoryDescriptor::dmaMap(
3643 IOMapper * mapper,
3644 IODMACommand * command,
3645 const IODMAMapSpecification * mapSpec,
3646 uint64_t offset,
3647 uint64_t length,
3648 uint64_t * mapAddress,
3649 uint64_t * mapLength)
99c3a104 3650{
0a7de745
A
3651 IOReturn err = kIOReturnSuccess;
3652 ioGMDData * dataP;
3653 IOOptionBits type = _flags & kIOMemoryTypeMask;
99c3a104 3654
0a7de745
A
3655 *mapAddress = 0;
3656 if (kIOMemoryHostOnly & _flags) {
3657 return kIOReturnSuccess;
3658 }
3659 if (kIOMemoryRemote & _flags) {
3660 return kIOReturnNotAttached;
3e170ce0
A
3661 }
3662
0a7de745
A
3663 if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
3664 || offset || (length != _length)) {
3665 err = super::dmaMap(mapper, command, mapSpec, offset, length, mapAddress, mapLength);
3666 } else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries))) {
3667 const ioPLBlock * ioplList = getIOPLList(dataP);
3668 upl_page_info_t * pageList;
3669 uint32_t mapOptions = 0;
3670
3671 IODMAMapSpecification mapSpec;
3672 bzero(&mapSpec, sizeof(mapSpec));
3673 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
3674 mapSpec.alignment = dataP->fDMAMapAlignment;
3675
3676 // For external UPLs the fPageInfo field points directly to
3677 // the upl's upl_page_info_t array.
3678 if (ioplList->fFlags & kIOPLExternUPL) {
3679 pageList = (upl_page_info_t *) ioplList->fPageInfo;
3680 mapOptions |= kIODMAMapPagingPath;
3681 } else {
3682 pageList = getPageList(dataP);
3683 }
99c3a104 3684
0a7de745
A
3685 if ((_length == ptoa_64(_pages)) && !(page_mask & ioplList->fPageOffset)) {
3686 mapOptions |= kIODMAMapPageListFullyOccupied;
3687 }
99c3a104 3688
0a7de745
A
3689 assert(dataP->fDMAAccess);
3690 mapOptions |= dataP->fDMAAccess;
5ba3f43e 3691
0a7de745
A
3692 // Check for direct device non-paged memory
3693 if (ioplList->fFlags & kIOPLOnDevice) {
3694 mapOptions |= kIODMAMapPhysicallyContiguous;
3695 }
99c3a104 3696
0a7de745
A
3697 IODMAMapPageList dmaPageList =
3698 {
3699 .pageOffset = (uint32_t)(ioplList->fPageOffset & page_mask),
3700 .pageListCount = _pages,
3701 .pageList = &pageList[0]
3702 };
3703 err = mapper->iovmMapMemory(this, offset, length, mapOptions, &mapSpec,
3704 command, &dmaPageList, mapAddress, mapLength);
3705
3706 if (kIOReturnSuccess == err) {
3707 dmaMapRecord(mapper, command, *mapLength);
3708 }
3709 }
3710
3711 return err;
99c3a104
A
3712}
3713
55e303ae
A
3714/*
3715 * prepare
3716 *
3717 * Prepare the memory for an I/O transfer. This involves paging in
3718 * the memory, if necessary, and wiring it down for the duration of
3719 * the transfer. The complete() method completes the processing of
3720 * the memory after the I/O transfer finishes. This method needn't
3721 * called for non-pageable memory.
3722 */
99c3a104 3723
0a7de745
A
3724IOReturn
3725IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
55e303ae 3726{
0a7de745
A
3727 IOReturn error = kIOReturnSuccess;
3728 IOOptionBits type = _flags & kIOMemoryTypeMask;
55e303ae 3729
0a7de745
A
3730 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
3731 return kIOReturnSuccess;
3732 }
2d21ac55 3733
0a7de745
A
3734 assert(!(kIOMemoryRemote & _flags));
3735 if (kIOMemoryRemote & _flags) {
3736 return kIOReturnNotAttached;
3737 }
5ba3f43e 3738
0a7de745
A
3739 if (_prepareLock) {
3740 IOLockLock(_prepareLock);
3741 }
2d21ac55 3742
0a7de745 3743 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
bca245ac
A
3744 if ((forDirection & kIODirectionPrepareAvoidThrottling) && NEED_TO_HARD_THROTTLE_THIS_TASK()) {
3745 error = kIOReturnNotReady;
3746 goto finish;
3747 }
0a7de745
A
3748 error = wireVirtual(forDirection);
3749 }
de355530 3750
0a7de745
A
3751 if (kIOReturnSuccess == error) {
3752 if (1 == ++_wireCount) {
3753 if (kIOMemoryClearEncrypt & _flags) {
3754 performOperation(kIOMemoryClearEncrypted, 0, _length);
3755 }
3756 }
3757 }
0b4c1975 3758
bca245ac
A
3759finish:
3760
0a7de745
A
3761 if (_prepareLock) {
3762 IOLockUnlock(_prepareLock);
3763 }
2d21ac55 3764
0a7de745 3765 return error;
1c79356b
A
3766}
3767
3768/*
3769 * complete
3770 *
3771 * Complete processing of the memory after an I/O transfer finishes.
3772 * This method should not be called unless a prepare was previously
3773 * issued; the prepare() and complete() must occur in pairs, before
3774 * before and after an I/O transfer involving pageable memory.
3775 */
6d2010ae 3776
0a7de745
A
3777IOReturn
3778IOGeneralMemoryDescriptor::complete(IODirection forDirection)
1c79356b 3779{
0a7de745
A
3780 IOOptionBits type = _flags & kIOMemoryTypeMask;
3781 ioGMDData * dataP;
1c79356b 3782
0a7de745
A
3783 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
3784 return kIOReturnSuccess;
3785 }
3786
3787 assert(!(kIOMemoryRemote & _flags));
3788 if (kIOMemoryRemote & _flags) {
3789 return kIOReturnNotAttached;
3790 }
3791
3792 if (_prepareLock) {
3793 IOLockLock(_prepareLock);
3794 }
3795 do{
3796 assert(_wireCount);
3797 if (!_wireCount) {
3798 break;
3799 }
3800 dataP = getDataP(_memoryEntries);
3801 if (!dataP) {
3802 break;
3803 }
3804
3805 if (kIODirectionCompleteWithError & forDirection) {
3806 dataP->fCompletionError = true;
3807 }
3808
3809 if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount)) {
3810 performOperation(kIOMemorySetEncrypted, 0, _length);
3811 }
1c79356b 3812
0a7de745
A
3813 _wireCount--;
3814 if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection)) {
3815 ioPLBlock *ioplList = getIOPLList(dataP);
3816 UInt ind, count = getNumIOPL(_memoryEntries, dataP);
3817
3818 if (_wireCount) {
3819 // kIODirectionCompleteWithDataValid & forDirection
3820 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
3821 vm_tag_t tag;
3822 tag = getVMTag(kernel_map);
3823 for (ind = 0; ind < count; ind++) {
3824 if (ioplList[ind].fIOPL) {
3825 iopl_valid_data(ioplList[ind].fIOPL, tag);
3826 }
3827 }
3828 }
3829 } else {
3830 if (_dmaReferences) {
3831 panic("complete() while dma active");
3832 }
3833
3834 if (dataP->fMappedBaseValid) {
3835 dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
3836 dataP->fMappedBaseValid = dataP->fMappedBase = 0;
3837 }
3e170ce0 3838#if IOTRACKING
0a7de745
A
3839 if (dataP->fWireTracking.link.next) {
3840 IOTrackingRemove(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages));
3841 }
39037602 3842#endif /* IOTRACKING */
0a7de745
A
3843 // Only complete iopls that we created which are for TypeVirtual
3844 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
3845 for (ind = 0; ind < count; ind++) {
3846 if (ioplList[ind].fIOPL) {
3847 if (dataP->fCompletionError) {
3848 upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/);
3849 } else {
cb323159 3850 upl_commit(ioplList[ind].fIOPL, NULL, 0);
0a7de745
A
3851 }
3852 upl_deallocate(ioplList[ind].fIOPL);
3853 }
3854 }
3855 } else if (kIOMemoryTypeUPL == type) {
3856 upl_set_referenced(ioplList[0].fIOPL, false);
3857 }
3858
3859 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
3860
3861 dataP->fPreparationID = kIOPreparationIDUnprepared;
3862 _flags &= ~kIOMemoryPreparedReadOnly;
3863 }
3864 }
3865 }while (false);
3866
3867 if (_prepareLock) {
3868 IOLockUnlock(_prepareLock);
3869 }
3870
3871 return kIOReturnSuccess;
1c79356b
A
3872}
3873
0a7de745
A
3874IOReturn
3875IOGeneralMemoryDescriptor::doMap(
3876 vm_map_t __addressMap,
3877 IOVirtualAddress * __address,
3878 IOOptionBits options,
3879 IOByteCount __offset,
3880 IOByteCount __length )
1c79356b 3881{
b0d623f7 3882#ifndef __LP64__
0a7de745
A
3883 if (!(kIOMap64Bit & options)) {
3884 panic("IOGeneralMemoryDescriptor::doMap !64bit");
3885 }
b0d623f7 3886#endif /* !__LP64__ */
2d21ac55 3887
0a7de745 3888 kern_return_t err;
fe8ab488 3889
0a7de745
A
3890 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
3891 mach_vm_size_t offset = mapping->fOffset + __offset;
3892 mach_vm_size_t length = mapping->fLength;
2d21ac55 3893
0a7de745
A
3894 IOOptionBits type = _flags & kIOMemoryTypeMask;
3895 Ranges vec = _ranges;
91447636 3896
0a7de745
A
3897 mach_vm_address_t range0Addr = 0;
3898 mach_vm_size_t range0Len = 0;
060df5ea 3899
0a7de745
A
3900 if ((offset >= _length) || ((offset + length) > _length)) {
3901 return kIOReturnBadArgument;
3902 }
5ba3f43e 3903
0a7de745
A
3904 assert(!(kIOMemoryRemote & _flags));
3905 if (kIOMemoryRemote & _flags) {
3906 return 0;
3907 }
91447636 3908
0a7de745
A
3909 if (vec.v) {
3910 getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
3911 }
2d21ac55 3912
0a7de745
A
3913 // mapping source == dest? (could be much better)
3914 if (_task
3915 && (mapping->fAddressTask == _task)
3916 && (mapping->fAddressMap == get_task_map(_task))
3917 && (options & kIOMapAnywhere)
3918 && (!(kIOMapUnique & options))
3919 && (1 == _rangesCount)
3920 && (0 == offset)
3921 && range0Addr
3922 && (length <= range0Len)) {
3923 mapping->fAddress = range0Addr;
3924 mapping->fOptions |= kIOMapStatic;
3925
3926 return kIOReturnSuccess;
3927 }
1c79356b 3928
0a7de745
A
3929 if (!_memRef) {
3930 IOOptionBits createOptions = 0;
3931 if (!(kIOMapReadOnly & options)) {
3932 createOptions |= kIOMemoryReferenceWrite;
fe8ab488 3933#if DEVELOPMENT || DEBUG
cb323159
A
3934 if ((kIODirectionOut == (kIODirectionOutIn & _flags))
3935 && (!reserved || (reserved->creator != mapping->fAddressTask))) {
0a7de745
A
3936 OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction");
3937 }
0b4e3aa0 3938#endif
0a7de745
A
3939 }
3940 err = memoryReferenceCreate(createOptions, &_memRef);
3941 if (kIOReturnSuccess != err) {
3942 return err;
3943 }
fe8ab488 3944 }
9bccf70c 3945
0a7de745 3946 memory_object_t pager;
cb323159 3947 pager = (memory_object_t) (reserved ? reserved->dp.devicePager : NULL);
0a7de745
A
3948
3949 // <upl_transpose //
3950 if ((kIOMapReference | kIOMapUnique) == ((kIOMapReference | kIOMapUnique) & options)) {
3951 do{
3952 upl_t redirUPL2;
3953 upl_size_t size;
3954 upl_control_flags_t flags;
3955 unsigned int lock_count;
3956
3957 if (!_memRef || (1 != _memRef->count)) {
3958 err = kIOReturnNotReadable;
3959 break;
3960 }
3961
3962 size = round_page(mapping->fLength);
3963 flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
3964 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
3965
3966 if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2,
3967 NULL, NULL,
3968 &flags, getVMTag(kernel_map))) {
3969 redirUPL2 = NULL;
3970 }
3971
3972 for (lock_count = 0;
3973 IORecursiveLockHaveLock(gIOMemoryLock);
3974 lock_count++) {
3975 UNLOCK;
3976 }
3977 err = upl_transpose(redirUPL2, mapping->fRedirUPL);
3978 for (;
3979 lock_count;
3980 lock_count--) {
3981 LOCK;
3982 }
3983
3984 if (kIOReturnSuccess != err) {
3985 IOLog("upl_transpose(%x)\n", err);
3986 err = kIOReturnSuccess;
3987 }
3988
3989 if (redirUPL2) {
3990 upl_commit(redirUPL2, NULL, 0);
3991 upl_deallocate(redirUPL2);
cb323159 3992 redirUPL2 = NULL;
0a7de745
A
3993 }
3994 {
3995 // swap the memEntries since they now refer to different vm_objects
3996 IOMemoryReference * me = _memRef;
3997 _memRef = mapping->fMemory->_memRef;
3998 mapping->fMemory->_memRef = me;
3999 }
4000 if (pager) {
4001 err = populateDevicePager( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
4002 }
4003 }while (false);
39037602 4004 }
0a7de745
A
4005 // upl_transpose> //
4006 else {
4007 err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress);
4008#if IOTRACKING
4009 if ((err == KERN_SUCCESS) && ((kIOTracking & gIOKitDebug) || _task)) {
4010 // only dram maps in the default on developement case
4011 IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength);
4012 }
39037602 4013#endif /* IOTRACKING */
0a7de745
A
4014 if ((err == KERN_SUCCESS) && pager) {
4015 err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, offset, length, options);
4016
4017 if (err != KERN_SUCCESS) {
4018 doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0);
4019 } else if (kIOMapDefaultCache == (options & kIOMapCacheMask)) {
4020 mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
4021 }
4022 }
fe8ab488 4023 }
fe8ab488 4024
0a7de745 4025 return err;
1c79356b
A
4026}
4027
39037602
A
4028#if IOTRACKING
4029IOReturn
4030IOMemoryMapTracking(IOTrackingUser * tracking, task_t * task,
0a7de745 4031 mach_vm_address_t * address, mach_vm_size_t * size)
39037602 4032{
cb323159 4033#define iomap_offsetof(type, field) ((size_t)(&((type *)NULL)->field))
39037602 4034
0a7de745 4035 IOMemoryMap * map = (typeof(map))(((uintptr_t) tracking) - iomap_offsetof(IOMemoryMap, fTracking));
39037602 4036
0a7de745
A
4037 if (!map->fAddressMap || (map->fAddressMap != get_task_map(map->fAddressTask))) {
4038 return kIOReturnNotReady;
4039 }
39037602 4040
0a7de745
A
4041 *task = map->fAddressTask;
4042 *address = map->fAddress;
4043 *size = map->fLength;
39037602 4044
0a7de745 4045 return kIOReturnSuccess;
39037602
A
4046}
4047#endif /* IOTRACKING */
4048
0a7de745
A
4049IOReturn
4050IOGeneralMemoryDescriptor::doUnmap(
4051 vm_map_t addressMap,
4052 IOVirtualAddress __address,
4053 IOByteCount __length )
1c79356b 4054{
0a7de745 4055 return super::doUnmap(addressMap, __address, __length);
1c79356b
A
4056}
4057
4058/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4059
b0d623f7
A
4060#undef super
4061#define super OSObject
1c79356b 4062
b0d623f7 4063OSDefineMetaClassAndStructors( IOMemoryMap, OSObject )
1c79356b 4064
b0d623f7
A
4065OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
4066OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
4067OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
4068OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
4069OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
4070OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
4071OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
4072OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
1c79356b 4073
b0d623f7 4074/* ex-inline function implementation */
0a7de745
A
4075IOPhysicalAddress
4076IOMemoryMap::getPhysicalAddress()
4077{
cb323159 4078 return getPhysicalSegment( 0, NULL );
0a7de745 4079}
1c79356b
A
4080
4081/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4082
0a7de745
A
4083bool
4084IOMemoryMap::init(
4085 task_t intoTask,
4086 mach_vm_address_t toAddress,
4087 IOOptionBits _options,
4088 mach_vm_size_t _offset,
4089 mach_vm_size_t _length )
1c79356b 4090{
0a7de745
A
4091 if (!intoTask) {
4092 return false;
4093 }
1c79356b 4094
0a7de745
A
4095 if (!super::init()) {
4096 return false;
4097 }
1c79356b 4098
0a7de745
A
4099 fAddressMap = get_task_map(intoTask);
4100 if (!fAddressMap) {
4101 return false;
4102 }
4103 vm_map_reference(fAddressMap);
1c79356b 4104
0a7de745
A
4105 fAddressTask = intoTask;
4106 fOptions = _options;
4107 fLength = _length;
4108 fOffset = _offset;
4109 fAddress = toAddress;
1c79356b 4110
0a7de745 4111 return true;
1c79356b
A
4112}
4113
0a7de745
A
4114bool
4115IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
1c79356b 4116{
0a7de745
A
4117 if (!_memory) {
4118 return false;
4119 }
4120
4121 if (!fSuperMap) {
4122 if ((_offset + fLength) > _memory->getLength()) {
4123 return false;
4124 }
4125 fOffset = _offset;
4126 }
4127
4128 _memory->retain();
4129 if (fMemory) {
4130 if (fMemory != _memory) {
4131 fMemory->removeMapping(this);
4132 }
4133 fMemory->release();
4134 }
4135 fMemory = _memory;
4136
4137 return true;
1c79356b
A
4138}
4139
0a7de745
A
4140IOReturn
4141IOMemoryDescriptor::doMap(
4142 vm_map_t __addressMap,
4143 IOVirtualAddress * __address,
4144 IOOptionBits options,
4145 IOByteCount __offset,
4146 IOByteCount __length )
1c79356b 4147{
0a7de745 4148 return kIOReturnUnsupported;
fe8ab488 4149}
1c79356b 4150
0a7de745
A
4151IOReturn
4152IOMemoryDescriptor::handleFault(
4153 void * _pager,
4154 mach_vm_size_t sourceOffset,
4155 mach_vm_size_t length)
fe8ab488 4156{
0a7de745 4157 if (kIOMemoryRedirected & _flags) {
b0d623f7 4158#if DEBUG
0a7de745 4159 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
2d21ac55 4160#endif
0a7de745
A
4161 do {
4162 SLEEP;
4163 } while (kIOMemoryRedirected & _flags);
4164 }
4165 return kIOReturnSuccess;
0b4e3aa0
A
4166}
4167
0a7de745
A
4168IOReturn
4169IOMemoryDescriptor::populateDevicePager(
4170 void * _pager,
4171 vm_map_t addressMap,
4172 mach_vm_address_t address,
4173 mach_vm_size_t sourceOffset,
4174 mach_vm_size_t length,
4175 IOOptionBits options )
0b4e3aa0 4176{
0a7de745
A
4177 IOReturn err = kIOReturnSuccess;
4178 memory_object_t pager = (memory_object_t) _pager;
4179 mach_vm_size_t size;
4180 mach_vm_size_t bytes;
4181 mach_vm_size_t page;
4182 mach_vm_size_t pageOffset;
4183 mach_vm_size_t pagerOffset;
4184 IOPhysicalLength segLen, chunk;
4185 addr64_t physAddr;
4186 IOOptionBits type;
4187
4188 type = _flags & kIOMemoryTypeMask;
4189
4190 if (reserved->dp.pagerContig) {
4191 sourceOffset = 0;
4192 pagerOffset = 0;
4193 }
4194
4195 physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
4196 assert( physAddr );
4197 pageOffset = physAddr - trunc_page_64( physAddr );
4198 pagerOffset = sourceOffset;
4199
4200 size = length + pageOffset;
4201 physAddr -= pageOffset;
4202
4203 segLen += pageOffset;
4204 bytes = size;
4205 do{
4206 // in the middle of the loop only map whole pages
4207 if (segLen >= bytes) {
4208 segLen = bytes;
cb323159 4209 } else if (segLen != trunc_page_64(segLen)) {
0a7de745
A
4210 err = kIOReturnVMError;
4211 }
4212 if (physAddr != trunc_page_64(physAddr)) {
4213 err = kIOReturnBadArgument;
4214 }
4215
4216 if (kIOReturnSuccess != err) {
4217 break;
4218 }
1c79356b 4219
3e170ce0 4220#if DEBUG || DEVELOPMENT
0a7de745
A
4221 if ((kIOMemoryTypeUPL != type)
4222 && pmap_has_managed_page(atop_64(physAddr), atop_64(physAddr + segLen - 1))) {
4223 OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx", physAddr, segLen);
4224 }
3e170ce0
A
4225#endif /* DEBUG || DEVELOPMENT */
4226
0a7de745
A
4227 chunk = (reserved->dp.pagerContig ? round_page(segLen) : page_size);
4228 for (page = 0;
4229 (page < segLen) && (KERN_SUCCESS == err);
4230 page += chunk) {
4231 err = device_pager_populate_object(pager, pagerOffset,
4232 (ppnum_t)(atop_64(physAddr + page)), chunk);
4233 pagerOffset += chunk;
4234 }
5ba3f43e 4235
0a7de745
A
4236 assert(KERN_SUCCESS == err);
4237 if (err) {
4238 break;
4239 }
4240
4241 // This call to vm_fault causes an early pmap level resolution
4242 // of the mappings created above for kernel mappings, since
4243 // faulting in later can't take place from interrupt level.
4244 if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags)) {
4245 err = vm_fault(addressMap,
4246 (vm_map_offset_t)trunc_page_64(address),
4247 options & kIOMapReadOnly ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE,
4248 FALSE, VM_KERN_MEMORY_NONE,
4249 THREAD_UNINT, NULL,
4250 (vm_map_offset_t)0);
4251
4252 if (KERN_SUCCESS != err) {
4253 break;
4254 }
4255 }
9bccf70c 4256
0a7de745
A
4257 sourceOffset += segLen - pageOffset;
4258 address += segLen;
4259 bytes -= segLen;
4260 pageOffset = 0;
4261 }while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
1c79356b 4262
0a7de745
A
4263 if (bytes) {
4264 err = kIOReturnBadArgument;
4265 }
1c79356b 4266
0a7de745 4267 return err;
1c79356b
A
4268}
4269
0a7de745
A
4270IOReturn
4271IOMemoryDescriptor::doUnmap(
4272 vm_map_t addressMap,
4273 IOVirtualAddress __address,
4274 IOByteCount __length )
1c79356b 4275{
0a7de745
A
4276 IOReturn err;
4277 IOMemoryMap * mapping;
4278 mach_vm_address_t address;
4279 mach_vm_size_t length;
4280
4281 if (__length) {
4282 panic("doUnmap");
4283 }
4284
4285 mapping = (IOMemoryMap *) __address;
4286 addressMap = mapping->fAddressMap;
4287 address = mapping->fAddress;
4288 length = mapping->fLength;
4289
4290 if (kIOMapOverwrite & mapping->fOptions) {
4291 err = KERN_SUCCESS;
4292 } else {
4293 if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
4294 addressMap = IOPageableMapForAddress( address );
4295 }
b0d623f7 4296#if DEBUG
0a7de745
A
4297 if (kIOLogMapping & gIOKitDebug) {
4298 IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
4299 addressMap, address, length );
4300 }
1c79356b 4301#endif
0a7de745
A
4302 err = mach_vm_deallocate( addressMap, address, length );
4303 }
1c79356b 4304
3e170ce0 4305#if IOTRACKING
0a7de745 4306 IOTrackingRemoveUser(gIOMapTracking, &mapping->fTracking);
39037602 4307#endif /* IOTRACKING */
1c79356b 4308
0a7de745 4309 return err;
1c79356b
A
4310}
4311
0a7de745
A
4312IOReturn
4313IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
e3027f41 4314{
0a7de745 4315 IOReturn err = kIOReturnSuccess;
cb323159 4316 IOMemoryMap * mapping = NULL;
0a7de745 4317 OSIterator * iter;
91447636 4318
0a7de745 4319 LOCK;
39236c6e 4320
0a7de745
A
4321 if (doRedirect) {
4322 _flags |= kIOMemoryRedirected;
4323 } else {
4324 _flags &= ~kIOMemoryRedirected;
4325 }
39236c6e 4326
0a7de745
A
4327 do {
4328 if ((iter = OSCollectionIterator::withCollection( _mappings))) {
4329 memory_object_t pager;
4330
4331 if (reserved) {
4332 pager = (memory_object_t) reserved->dp.devicePager;
4333 } else {
4334 pager = MACH_PORT_NULL;
4335 }
4336
4337 while ((mapping = (IOMemoryMap *) iter->getNextObject())) {
4338 mapping->redirect( safeTask, doRedirect );
4339 if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap)) {
4340 err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache );
4341 }
4342 }
4343
4344 iter->release();
39236c6e 4345 }
0a7de745 4346 } while (false);
e3027f41 4347
0a7de745
A
4348 if (!doRedirect) {
4349 WAKEUP;
91447636 4350 }
0b4e3aa0 4351
0a7de745 4352 UNLOCK;
e3027f41 4353
b0d623f7 4354#ifndef __LP64__
0a7de745
A
4355 // temporary binary compatibility
4356 IOSubMemoryDescriptor * subMem;
4357 if ((subMem = OSDynamicCast( IOSubMemoryDescriptor, this))) {
4358 err = subMem->redirect( safeTask, doRedirect );
4359 } else {
4360 err = kIOReturnSuccess;
4361 }
b0d623f7 4362#endif /* !__LP64__ */
e3027f41 4363
0a7de745 4364 return err;
e3027f41
A
4365}
4366
0a7de745
A
4367IOReturn
4368IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
e3027f41 4369{
0a7de745 4370 IOReturn err = kIOReturnSuccess;
e3027f41 4371
0a7de745 4372 if (fSuperMap) {
b0d623f7 4373// err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
0a7de745
A
4374 } else {
4375 LOCK;
4376
4377 do{
4378 if (!fAddress) {
4379 break;
4380 }
4381 if (!fAddressMap) {
4382 break;
4383 }
4384
4385 if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
4386 && (0 == (fOptions & kIOMapStatic))) {
4387 IOUnmapPages( fAddressMap, fAddress, fLength );
4388 err = kIOReturnSuccess;
b0d623f7 4389#if DEBUG
0a7de745 4390 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
e3027f41 4391#endif
0a7de745
A
4392 } else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask)) {
4393 IOOptionBits newMode;
4394 newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
4395 IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
4396 }
4397 }while (false);
4398 UNLOCK;
4399 }
e3027f41 4400
0a7de745
A
4401 if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
4402 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
4403 && safeTask
4404 && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected)))) {
4405 fMemory->redirect(safeTask, doRedirect);
4406 }
91447636 4407
0a7de745 4408 return err;
e3027f41
A
4409}
4410
0a7de745
A
4411IOReturn
4412IOMemoryMap::unmap( void )
1c79356b 4413{
0a7de745 4414 IOReturn err;
1c79356b 4415
0a7de745 4416 LOCK;
1c79356b 4417
cb323159 4418 if (fAddress && fAddressMap && (NULL == fSuperMap) && fMemory
0a7de745
A
4419 && (0 == (kIOMapStatic & fOptions))) {
4420 err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
4421 } else {
4422 err = kIOReturnSuccess;
4423 }
1c79356b 4424
0a7de745
A
4425 if (fAddressMap) {
4426 vm_map_deallocate(fAddressMap);
cb323159 4427 fAddressMap = NULL;
0a7de745 4428 }
2d21ac55 4429
0a7de745 4430 fAddress = 0;
1c79356b 4431
0a7de745 4432 UNLOCK;
1c79356b 4433
0a7de745 4434 return err;
1c79356b
A
4435}
4436
0a7de745
A
4437void
4438IOMemoryMap::taskDied( void )
1c79356b 4439{
0a7de745
A
4440 LOCK;
4441 if (fUserClientUnmap) {
4442 unmap();
4443 }
3e170ce0 4444#if IOTRACKING
0a7de745
A
4445 else {
4446 IOTrackingRemoveUser(gIOMapTracking, &fTracking);
4447 }
39037602 4448#endif /* IOTRACKING */
3e170ce0 4449
0a7de745
A
4450 if (fAddressMap) {
4451 vm_map_deallocate(fAddressMap);
cb323159 4452 fAddressMap = NULL;
0a7de745 4453 }
cb323159 4454 fAddressTask = NULL;
0a7de745
A
4455 fAddress = 0;
4456 UNLOCK;
1c79356b
A
4457}
4458
0a7de745
A
4459IOReturn
4460IOMemoryMap::userClientUnmap( void )
b0d623f7 4461{
0a7de745
A
4462 fUserClientUnmap = true;
4463 return kIOReturnSuccess;
b0d623f7
A
4464}
4465
9bccf70c
A
4466// Overload the release mechanism. All mappings must be a member
4467// of a memory descriptors _mappings set. This means that we
4468// always have 2 references on a mapping. When either of these mappings
4469// are released we need to free ourselves.
0a7de745
A
4470void
4471IOMemoryMap::taggedRelease(const void *tag) const
9bccf70c 4472{
0a7de745
A
4473 LOCK;
4474 super::taggedRelease(tag, 2);
4475 UNLOCK;
9bccf70c
A
4476}
4477
0a7de745
A
4478void
4479IOMemoryMap::free()
1c79356b 4480{
0a7de745 4481 unmap();
1c79356b 4482
0a7de745
A
4483 if (fMemory) {
4484 LOCK;
4485 fMemory->removeMapping(this);
4486 UNLOCK;
4487 fMemory->release();
4488 }
1c79356b 4489
0a7de745
A
4490 if (fOwner && (fOwner != fMemory)) {
4491 LOCK;
4492 fOwner->removeMapping(this);
4493 UNLOCK;
4494 }
91447636 4495
0a7de745
A
4496 if (fSuperMap) {
4497 fSuperMap->release();
4498 }
1c79356b 4499
0a7de745
A
4500 if (fRedirUPL) {
4501 upl_commit(fRedirUPL, NULL, 0);
4502 upl_deallocate(fRedirUPL);
4503 }
91447636 4504
0a7de745 4505 super::free();
1c79356b
A
4506}
4507
0a7de745
A
4508IOByteCount
4509IOMemoryMap::getLength()
1c79356b 4510{
0a7de745 4511 return fLength;
1c79356b
A
4512}
4513
0a7de745
A
4514IOVirtualAddress
4515IOMemoryMap::getVirtualAddress()
1c79356b 4516{
b0d623f7 4517#ifndef __LP64__
0a7de745
A
4518 if (fSuperMap) {
4519 fSuperMap->getVirtualAddress();
4520 } else if (fAddressMap
4521 && vm_map_is_64bit(fAddressMap)
4522 && (sizeof(IOVirtualAddress) < 8)) {
4523 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
4524 }
b0d623f7 4525#endif /* !__LP64__ */
2d21ac55 4526
0a7de745 4527 return fAddress;
2d21ac55
A
4528}
4529
b0d623f7 4530#ifndef __LP64__
0a7de745
A
4531mach_vm_address_t
4532IOMemoryMap::getAddress()
2d21ac55 4533{
0a7de745 4534 return fAddress;
2d21ac55
A
4535}
4536
0a7de745
A
4537mach_vm_size_t
4538IOMemoryMap::getSize()
2d21ac55 4539{
0a7de745 4540 return fLength;
1c79356b 4541}
b0d623f7 4542#endif /* !__LP64__ */
1c79356b 4543
2d21ac55 4544
0a7de745
A
4545task_t
4546IOMemoryMap::getAddressTask()
1c79356b 4547{
0a7de745
A
4548 if (fSuperMap) {
4549 return fSuperMap->getAddressTask();
4550 } else {
4551 return fAddressTask;
4552 }
1c79356b
A
4553}
4554
0a7de745
A
4555IOOptionBits
4556IOMemoryMap::getMapOptions()
1c79356b 4557{
0a7de745 4558 return fOptions;
1c79356b
A
4559}
4560
0a7de745
A
4561IOMemoryDescriptor *
4562IOMemoryMap::getMemoryDescriptor()
1c79356b 4563{
0a7de745 4564 return fMemory;
1c79356b
A
4565}
4566
0a7de745
A
4567IOMemoryMap *
4568IOMemoryMap::copyCompatible(
4569 IOMemoryMap * newMapping )
1c79356b 4570{
0a7de745
A
4571 task_t task = newMapping->getAddressTask();
4572 mach_vm_address_t toAddress = newMapping->fAddress;
4573 IOOptionBits _options = newMapping->fOptions;
4574 mach_vm_size_t _offset = newMapping->fOffset;
4575 mach_vm_size_t _length = newMapping->fLength;
4576
4577 if ((!task) || (!fAddressMap) || (fAddressMap != get_task_map(task))) {
cb323159 4578 return NULL;
0a7de745
A
4579 }
4580 if ((fOptions ^ _options) & kIOMapReadOnly) {
cb323159 4581 return NULL;
0a7de745
A
4582 }
4583 if ((kIOMapDefaultCache != (_options & kIOMapCacheMask))
4584 && ((fOptions ^ _options) & kIOMapCacheMask)) {
cb323159 4585 return NULL;
0a7de745
A
4586 }
4587
4588 if ((0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress)) {
cb323159 4589 return NULL;
0a7de745
A
4590 }
4591
4592 if (_offset < fOffset) {
cb323159 4593 return NULL;
0a7de745
A
4594 }
4595
4596 _offset -= fOffset;
4597
4598 if ((_offset + _length) > fLength) {
cb323159 4599 return NULL;
0a7de745
A
4600 }
4601
4602 retain();
4603 if ((fLength == _length) && (!_offset)) {
4604 newMapping = this;
4605 } else {
4606 newMapping->fSuperMap = this;
4607 newMapping->fOffset = fOffset + _offset;
4608 newMapping->fAddress = fAddress + _offset;
4609 }
4610
4611 return newMapping;
1c79356b
A
4612}
4613
0a7de745
A
4614IOReturn
4615IOMemoryMap::wireRange(
4616 uint32_t options,
4617 mach_vm_size_t offset,
4618 mach_vm_size_t length)
99c3a104 4619{
0a7de745
A
4620 IOReturn kr;
4621 mach_vm_address_t start = trunc_page_64(fAddress + offset);
4622 mach_vm_address_t end = round_page_64(fAddress + offset + length);
4623 vm_prot_t prot;
4624
4625 prot = (kIODirectionOutIn & options);
4626 if (prot) {
4627 kr = vm_map_wire_kernel(fAddressMap, start, end, prot, fMemory->getVMTag(kernel_map), FALSE);
4628 } else {
4629 kr = vm_map_unwire(fAddressMap, start, end, FALSE);
4630 }
4631
4632 return kr;
99c3a104
A
4633}
4634
4635
0a7de745 4636IOPhysicalAddress
b0d623f7
A
4637#ifdef __LP64__
4638IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
4639#else /* !__LP64__ */
4640IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
4641#endif /* !__LP64__ */
1c79356b 4642{
0a7de745 4643 IOPhysicalAddress address;
1c79356b 4644
0a7de745 4645 LOCK;
b0d623f7 4646#ifdef __LP64__
0a7de745 4647 address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
b0d623f7 4648#else /* !__LP64__ */
0a7de745 4649 address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
b0d623f7 4650#endif /* !__LP64__ */
0a7de745 4651 UNLOCK;
1c79356b 4652
0a7de745 4653 return address;
1c79356b
A
4654}
4655
4656/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4657
4658#undef super
4659#define super OSObject
4660
4661/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4662
0a7de745
A
4663void
4664IOMemoryDescriptor::initialize( void )
1c79356b 4665{
cb323159 4666 if (NULL == gIOMemoryLock) {
0a7de745
A
4667 gIOMemoryLock = IORecursiveLockAlloc();
4668 }
55e303ae 4669
0a7de745 4670 gIOLastPage = IOGetLastPageNumber();
1c79356b
A
4671}
4672
0a7de745
A
4673void
4674IOMemoryDescriptor::free( void )
1c79356b 4675{
0a7de745
A
4676 if (_mappings) {
4677 _mappings->release();
4678 }
4679
4680 if (reserved) {
cb323159 4681 cleanKernelReserved(reserved);
0a7de745
A
4682 IODelete(reserved, IOMemoryDescriptorReserved, 1);
4683 reserved = NULL;
4684 }
4685 super::free();
1c79356b
A
4686}
4687
0a7de745
A
4688IOMemoryMap *
4689IOMemoryDescriptor::setMapping(
4690 task_t intoTask,
4691 IOVirtualAddress mapAddress,
4692 IOOptionBits options )
1c79356b 4693{
0a7de745
A
4694 return createMappingInTask( intoTask, mapAddress,
4695 options | kIOMapStatic,
4696 0, getLength());
1c79356b
A
4697}
4698
0a7de745
A
4699IOMemoryMap *
4700IOMemoryDescriptor::map(
4701 IOOptionBits options )
1c79356b 4702{
0a7de745
A
4703 return createMappingInTask( kernel_task, 0,
4704 options | kIOMapAnywhere,
4705 0, getLength());
1c79356b
A
4706}
4707
b0d623f7 4708#ifndef __LP64__
0a7de745
A
4709IOMemoryMap *
4710IOMemoryDescriptor::map(
4711 task_t intoTask,
4712 IOVirtualAddress atAddress,
4713 IOOptionBits options,
4714 IOByteCount offset,
4715 IOByteCount length )
1c79356b 4716{
0a7de745
A
4717 if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask))) {
4718 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
cb323159 4719 return NULL;
0a7de745
A
4720 }
4721
4722 return createMappingInTask(intoTask, atAddress,
4723 options, offset, length);
2d21ac55 4724}
b0d623f7 4725#endif /* !__LP64__ */
2d21ac55 4726
0a7de745
A
4727IOMemoryMap *
4728IOMemoryDescriptor::createMappingInTask(
4729 task_t intoTask,
4730 mach_vm_address_t atAddress,
4731 IOOptionBits options,
4732 mach_vm_size_t offset,
4733 mach_vm_size_t length)
2d21ac55 4734{
0a7de745
A
4735 IOMemoryMap * result;
4736 IOMemoryMap * mapping;
2d21ac55 4737
0a7de745
A
4738 if (0 == length) {
4739 length = getLength();
4740 }
1c79356b 4741
0a7de745 4742 mapping = new IOMemoryMap;
2d21ac55 4743
0a7de745
A
4744 if (mapping
4745 && !mapping->init( intoTask, atAddress,
4746 options, offset, length )) {
4747 mapping->release();
cb323159 4748 mapping = NULL;
0a7de745 4749 }
2d21ac55 4750
0a7de745
A
4751 if (mapping) {
4752 result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
4753 } else {
cb323159 4754 result = NULL;
0a7de745 4755 }
2d21ac55 4756
b0d623f7 4757#if DEBUG
0a7de745
A
4758 if (!result) {
4759 IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
4760 this, atAddress, (uint32_t) options, offset, length);
4761 }
2d21ac55
A
4762#endif
4763
0a7de745 4764 return result;
1c79356b
A
4765}
4766
b0d623f7 4767#ifndef __LP64__ // there is only a 64 bit version for LP64
0a7de745
A
4768IOReturn
4769IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
4770 IOOptionBits options,
4771 IOByteCount offset)
2d21ac55 4772{
0a7de745 4773 return redirect(newBackingMemory, options, (mach_vm_size_t)offset);
2d21ac55 4774}
b0d623f7 4775#endif
2d21ac55 4776
0a7de745
A
4777IOReturn
4778IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
4779 IOOptionBits options,
4780 mach_vm_size_t offset)
91447636 4781{
0a7de745 4782 IOReturn err = kIOReturnSuccess;
cb323159 4783 IOMemoryDescriptor * physMem = NULL;
91447636 4784
0a7de745 4785 LOCK;
91447636 4786
0a7de745
A
4787 if (fAddress && fAddressMap) {
4788 do{
4789 if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
4790 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
4791 physMem = fMemory;
4792 physMem->retain();
4793 }
4794
4795 if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count)) {
4796 upl_size_t size = round_page(fLength);
4797 upl_control_flags_t flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
4798 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
4799 if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL,
4800 NULL, NULL,
4801 &flags, fMemory->getVMTag(kernel_map))) {
cb323159 4802 fRedirUPL = NULL;
0a7de745
A
4803 }
4804
4805 if (physMem) {
4806 IOUnmapPages( fAddressMap, fAddress, fLength );
4807 if ((false)) {
cb323159 4808 physMem->redirect(NULL, true);
0a7de745
A
4809 }
4810 }
4811 }
4812
4813 if (newBackingMemory) {
4814 if (newBackingMemory != fMemory) {
4815 fOffset = 0;
4816 if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
4817 options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
4818 offset, fLength)) {
4819 err = kIOReturnError;
4820 }
4821 }
4822 if (fRedirUPL) {
4823 upl_commit(fRedirUPL, NULL, 0);
4824 upl_deallocate(fRedirUPL);
cb323159 4825 fRedirUPL = NULL;
0a7de745
A
4826 }
4827 if ((false) && physMem) {
cb323159 4828 physMem->redirect(NULL, false);
0a7de745
A
4829 }
4830 }
4831 }while (false);
91447636 4832 }
91447636 4833
0a7de745 4834 UNLOCK;
91447636 4835
0a7de745
A
4836 if (physMem) {
4837 physMem->release();
4838 }
91447636 4839
0a7de745 4840 return err;
91447636
A
4841}
4842
0a7de745
A
4843IOMemoryMap *
4844IOMemoryDescriptor::makeMapping(
4845 IOMemoryDescriptor * owner,
4846 task_t __intoTask,
4847 IOVirtualAddress __address,
4848 IOOptionBits options,
4849 IOByteCount __offset,
4850 IOByteCount __length )
1c79356b 4851{
b0d623f7 4852#ifndef __LP64__
0a7de745
A
4853 if (!(kIOMap64Bit & options)) {
4854 panic("IOMemoryDescriptor::makeMapping !64bit");
4855 }
b0d623f7 4856#endif /* !__LP64__ */
2d21ac55 4857
cb323159
A
4858 IOMemoryDescriptor * mapDesc = NULL;
4859 __block IOMemoryMap * result = NULL;
2d21ac55 4860
0a7de745
A
4861 IOMemoryMap * mapping = (IOMemoryMap *) __address;
4862 mach_vm_size_t offset = mapping->fOffset + __offset;
4863 mach_vm_size_t length = mapping->fLength;
2d21ac55 4864
0a7de745 4865 mapping->fOffset = offset;
1c79356b 4866
0a7de745 4867 LOCK;
1c79356b 4868
0a7de745
A
4869 do{
4870 if (kIOMapStatic & options) {
4871 result = mapping;
4872 addMapping(mapping);
4873 mapping->setMemoryDescriptor(this, 0);
4874 continue;
4875 }
2d21ac55 4876
0a7de745
A
4877 if (kIOMapUnique & options) {
4878 addr64_t phys;
4879 IOByteCount physLen;
1c79356b 4880
2d21ac55 4881// if (owner != this) continue;
1c79356b 4882
0a7de745
A
4883 if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
4884 || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
4885 phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
4886 if (!phys || (physLen < length)) {
4887 continue;
4888 }
4889
4890 mapDesc = IOMemoryDescriptor::withAddressRange(
4891 phys, length, getDirection() | kIOMemoryMapperNone, NULL);
4892 if (!mapDesc) {
4893 continue;
4894 }
4895 offset = 0;
4896 mapping->fOffset = offset;
4897 }
4898 } else {
4899 // look for a compatible existing mapping
4900 if (_mappings) {
4901 _mappings->iterateObjects(^(OSObject * object)
4902 {
4903 IOMemoryMap * lookMapping = (IOMemoryMap *) object;
4904 if ((result = lookMapping->copyCompatible(mapping))) {
4905 addMapping(result);
4906 result->setMemoryDescriptor(this, offset);
4907 return true;
4908 }
4909 return false;
4910 });
4911 }
4912 if (result || (options & kIOMapReference)) {
4913 if (result != mapping) {
4914 mapping->release();
4915 mapping = NULL;
4916 }
4917 continue;
4918 }
4919 }
4920
4921 if (!mapDesc) {
4922 mapDesc = this;
4923 mapDesc->retain();
4924 }
4925 IOReturn
cb323159 4926 kr = mapDesc->doMap( NULL, (IOVirtualAddress *) &mapping, options, 0, 0 );
0a7de745
A
4927 if (kIOReturnSuccess == kr) {
4928 result = mapping;
4929 mapDesc->addMapping(result);
4930 result->setMemoryDescriptor(mapDesc, offset);
4931 } else {
4932 mapping->release();
4933 mapping = NULL;
4934 }
4935 }while (false);
1c79356b 4936
0a7de745 4937 UNLOCK;
1c79356b 4938
0a7de745
A
4939 if (mapDesc) {
4940 mapDesc->release();
4941 }
91447636 4942
0a7de745 4943 return result;
1c79356b
A
4944}
4945
0a7de745
A
4946void
4947IOMemoryDescriptor::addMapping(
1c79356b
A
4948 IOMemoryMap * mapping )
4949{
0a7de745 4950 if (mapping) {
cb323159 4951 if (NULL == _mappings) {
0a7de745
A
4952 _mappings = OSSet::withCapacity(1);
4953 }
4954 if (_mappings) {
4955 _mappings->setObject( mapping );
4956 }
4957 }
1c79356b
A
4958}
4959
0a7de745
A
4960void
4961IOMemoryDescriptor::removeMapping(
1c79356b
A
4962 IOMemoryMap * mapping )
4963{
0a7de745
A
4964 if (_mappings) {
4965 _mappings->removeObject( mapping);
4966 }
1c79356b
A
4967}
4968
b0d623f7
A
4969#ifndef __LP64__
4970// obsolete initializers
0a7de745 4971// - initWithOptions is the designated initializer
1c79356b 4972bool
b0d623f7 4973IOMemoryDescriptor::initWithAddress(void * address,
0a7de745
A
4974 IOByteCount length,
4975 IODirection direction)
1c79356b 4976{
0a7de745 4977 return false;
1c79356b
A
4978}
4979
4980bool
b0d623f7 4981IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
0a7de745
A
4982 IOByteCount length,
4983 IODirection direction,
4984 task_t task)
1c79356b 4985{
0a7de745 4986 return false;
1c79356b
A
4987}
4988
4989bool
b0d623f7 4990IOMemoryDescriptor::initWithPhysicalAddress(
0a7de745
A
4991 IOPhysicalAddress address,
4992 IOByteCount length,
4993 IODirection direction )
1c79356b 4994{
0a7de745 4995 return false;
1c79356b
A
4996}
4997
4998bool
b0d623f7 4999IOMemoryDescriptor::initWithRanges(
0a7de745
A
5000 IOVirtualRange * ranges,
5001 UInt32 withCount,
5002 IODirection direction,
5003 task_t task,
5004 bool asReference)
1c79356b 5005{
0a7de745 5006 return false;
1c79356b
A
5007}
5008
5009bool
0a7de745
A
5010IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
5011 UInt32 withCount,
5012 IODirection direction,
5013 bool asReference)
1c79356b 5014{
0a7de745 5015 return false;
1c79356b
A
5016}
5017
0a7de745
A
5018void *
5019IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
5020 IOByteCount * lengthOfSegment)
b0d623f7 5021{
cb323159 5022 return NULL;
b0d623f7
A
5023}
5024#endif /* !__LP64__ */
5025
1c79356b
A
5026/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5027
0a7de745
A
5028bool
5029IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
9bccf70c 5030{
cb323159
A
5031 OSSymbol const *keys[2] = {NULL};
5032 OSObject *values[2] = {NULL};
0a7de745
A
5033 OSArray * array;
5034 vm_size_t vcopy_size;
5035
5036 struct SerData {
5037 user_addr_t address;
5038 user_size_t length;
5039 } *vcopy = NULL;
5040 unsigned int index, nRanges;
5041 bool result = false;
5042
5043 IOOptionBits type = _flags & kIOMemoryTypeMask;
5044
5045 if (s == NULL) {
5046 return false;
5047 }
5048
5049 array = OSArray::withCapacity(4);
5050 if (!array) {
5051 return false;
5052 }
5053
5054 nRanges = _rangesCount;
5055 if (os_mul_overflow(sizeof(SerData), nRanges, &vcopy_size)) {
5056 result = false;
5057 goto bail;
5058 }
5059 vcopy = (SerData *) IOMalloc(vcopy_size);
cb323159 5060 if (vcopy == NULL) {
0a7de745
A
5061 result = false;
5062 goto bail;
5063 }
5064
5065 keys[0] = OSSymbol::withCString("address");
5066 keys[1] = OSSymbol::withCString("length");
5067
5068 // Copy the volatile data so we don't have to allocate memory
5069 // while the lock is held.
5070 LOCK;
5071 if (nRanges == _rangesCount) {
5072 Ranges vec = _ranges;
5073 for (index = 0; index < nRanges; index++) {
5074 mach_vm_address_t addr; mach_vm_size_t len;
5075 getAddrLenForInd(addr, len, type, vec, index);
5076 vcopy[index].address = addr;
5077 vcopy[index].length = len;
5078 }
5079 } else {
5080 // The descriptor changed out from under us. Give up.
5081 UNLOCK;
5082 result = false;
5083 goto bail;
5084 }
5085 UNLOCK;
5086
5087 for (index = 0; index < nRanges; index++) {
5088 user_addr_t addr = vcopy[index].address;
5089 IOByteCount len = (IOByteCount) vcopy[index].length;
5090 values[0] = OSNumber::withNumber(addr, sizeof(addr) * 8);
cb323159 5091 if (values[0] == NULL) {
0a7de745
A
5092 result = false;
5093 goto bail;
5094 }
5095 values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
cb323159 5096 if (values[1] == NULL) {
0a7de745
A
5097 result = false;
5098 goto bail;
5099 }
5100 OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
cb323159 5101 if (dict == NULL) {
0a7de745
A
5102 result = false;
5103 goto bail;
5104 }
5105 array->setObject(dict);
5106 dict->release();
5107 values[0]->release();
5108 values[1]->release();
cb323159 5109 values[0] = values[1] = NULL;
0a7de745
A
5110 }
5111
5112 result = array->serialize(s);
5113
5114bail:
5115 if (array) {
5116 array->release();
5117 }
5118 if (values[0]) {
5119 values[0]->release();
5120 }
5121 if (values[1]) {
5122 values[1]->release();
5123 }
5124 if (keys[0]) {
5125 keys[0]->release();
5126 }
5127 if (keys[1]) {
5128 keys[1]->release();
5129 }
5130 if (vcopy) {
5131 IOFree(vcopy, vcopy_size);
5132 }
5133
5134 return result;
9bccf70c
A
5135}
5136
9bccf70c
A
5137/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5138
0b4e3aa0 5139OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
b0d623f7
A
5140#ifdef __LP64__
5141OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
5142OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
5143OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
5144OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
5145OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
5146OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
5147OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
5148#else /* !__LP64__ */
55e303ae
A
5149OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
5150OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
91447636
A
5151OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3);
5152OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4);
0c530ab8 5153OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5);
b0d623f7
A
5154OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 6);
5155OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 7);
5156#endif /* !__LP64__ */
1c79356b
A
5157OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
5158OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
5159OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
5160OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
5161OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
5162OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
5163OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
5164OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
9bccf70c 5165
55e303ae 5166/* ex-inline function implementation */
0a7de745 5167IOPhysicalAddress
0c530ab8 5168IOMemoryDescriptor::getPhysicalAddress()
0a7de745 5169{
cb323159 5170 return getPhysicalSegment( 0, NULL );
0a7de745 5171}