]> git.saurik.com Git - apple/xnu.git/blame - iokit/Kernel/IOMemoryDescriptor.cpp
xnu-6153.101.6.tar.gz
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
CommitLineData
1c79356b 1/*
39037602 2 * Copyright (c) 1998-2016 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b 27 */
b0d623f7
A
28
29
55e303ae 30#include <sys/cdefs.h>
1c79356b
A
31
32#include <IOKit/assert.h>
33#include <IOKit/system.h>
34#include <IOKit/IOLib.h>
35#include <IOKit/IOMemoryDescriptor.h>
55e303ae 36#include <IOKit/IOMapper.h>
99c3a104 37#include <IOKit/IODMACommand.h>
55e303ae 38#include <IOKit/IOKitKeysPrivate.h>
1c79356b 39
b0d623f7 40#include <IOKit/IOSubMemoryDescriptor.h>
3e170ce0 41#include <IOKit/IOMultiMemoryDescriptor.h>
b0d623f7 42
1c79356b 43#include <IOKit/IOKitDebug.h>
2d21ac55 44#include <libkern/OSDebug.h>
d9a64523 45#include <libkern/OSKextLibPrivate.h>
1c79356b 46
91447636
A
47#include "IOKitKernelInternal.h"
48
1c79356b 49#include <libkern/c++/OSContainers.h>
9bccf70c
A
50#include <libkern/c++/OSDictionary.h>
51#include <libkern/c++/OSArray.h>
52#include <libkern/c++/OSSymbol.h>
53#include <libkern/c++/OSNumber.h>
39037602 54#include <os/overflow.h>
91447636
A
55
56#include <sys/uio.h>
1c79356b
A
57
58__BEGIN_DECLS
59#include <vm/pmap.h>
91447636 60#include <vm/vm_pageout.h>
55e303ae 61#include <mach/memory_object_types.h>
0b4e3aa0 62#include <device/device_port.h>
55e303ae 63
91447636 64#include <mach/vm_prot.h>
2d21ac55 65#include <mach/mach_vm.h>
cb323159 66#include <mach/memory_entry.h>
91447636 67#include <vm/vm_fault.h>
2d21ac55 68#include <vm/vm_protos.h>
91447636 69
55e303ae 70extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
6d2010ae
A
71extern void ipc_port_release_send(ipc_port_t port);
72
55e303ae 73__END_DECLS
1c79356b 74
0a7de745 75#define kIOMapperWaitSystem ((IOMapper *) 1)
99c3a104 76
0c530ab8
A
77static IOMapper * gIOSystemMapper = NULL;
78
0a7de745 79ppnum_t gIOLastPage;
0c530ab8 80
55e303ae 81/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
de355530 82
55e303ae 83OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
de355530 84
55e303ae 85#define super IOMemoryDescriptor
de355530 86
55e303ae 87OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
de355530 88
1c79356b
A
89/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
90
9bccf70c
A
91static IORecursiveLock * gIOMemoryLock;
92
0a7de745
A
93#define LOCK IORecursiveLockLock( gIOMemoryLock)
94#define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
95#define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
96#define WAKEUP \
9bccf70c
A
97 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
98
0c530ab8 99#if 0
0a7de745 100#define DEBG(fmt, args...) { kprintf(fmt, ## args); }
0c530ab8 101#else
0a7de745 102#define DEBG(fmt, args...) {}
0c530ab8
A
103#endif
104
91447636
A
105/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
106
107// Some data structures and accessor macros used by the initWithOptions
108// Function
109
110enum ioPLBlockFlags {
0a7de745
A
111 kIOPLOnDevice = 0x00000001,
112 kIOPLExternUPL = 0x00000002,
91447636
A
113};
114
0a7de745
A
115struct IOMDPersistentInitData {
116 const IOGeneralMemoryDescriptor * fMD;
117 IOMemoryReference * fMemRef;
91447636
A
118};
119
120struct ioPLBlock {
0a7de745
A
121 upl_t fIOPL;
122 vm_address_t fPageInfo; // Pointer to page list or index into it
123 uint32_t fIOMDOffset; // The offset of this iopl in descriptor
124 ppnum_t fMappedPage; // Page number of first page in this iopl
125 unsigned int fPageOffset; // Offset within first page of iopl
126 unsigned int fFlags; // Flags
91447636
A
127};
128
39037602
A
129enum { kMaxWireTags = 6 };
130
0a7de745
A
131struct ioGMDData {
132 IOMapper * fMapper;
133 uint64_t fDMAMapAlignment;
134 uint64_t fMappedBase;
135 uint64_t fMappedLength;
136 uint64_t fPreparationID;
3e170ce0 137#if IOTRACKING
0a7de745 138 IOTracking fWireTracking;
39037602 139#endif /* IOTRACKING */
0a7de745
A
140 unsigned int fPageCnt;
141 uint8_t fDMAMapNumAddressBits;
142 unsigned char fDiscontig:1;
143 unsigned char fCompletionError:1;
144 unsigned char fMappedBaseValid:1;
145 unsigned char _resv:3;
146 unsigned char fDMAAccess:2;
147
148 /* variable length arrays */
149 upl_page_info_t fPageList[1]
b0d623f7 150#if __LP64__
0a7de745
A
151 // align fPageList as for ioPLBlock
152 __attribute__((aligned(sizeof(upl_t))))
b0d623f7 153#endif
0a7de745 154 ;
cb323159 155 //ioPLBlock fBlocks[1];
91447636
A
156};
157
0a7de745
A
158#define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
159#define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
160#define getNumIOPL(osd, d) \
91447636 161 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
0a7de745 162#define getPageList(d) (&(d->fPageList[0]))
91447636 163#define computeDataSize(p, u) \
6d2010ae 164 (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
91447636 165
5ba3f43e
A
166enum { kIOMemoryHostOrRemote = kIOMemoryHostOnly | kIOMemoryRemote };
167
91447636
A
168/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
169
b0d623f7 170#define next_page(a) ( trunc_page(a) + PAGE_SIZE )
0b4e3aa0 171
0b4e3aa0 172extern "C" {
0a7de745
A
173kern_return_t
174device_data_action(
175 uintptr_t device_handle,
176 ipc_port_t device_pager,
177 vm_prot_t protection,
178 vm_object_offset_t offset,
179 vm_size_t size)
0b4e3aa0 180{
0a7de745
A
181 kern_return_t kr;
182 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
183 IOMemoryDescriptor * memDesc;
184
185 LOCK;
186 memDesc = ref->dp.memory;
187 if (memDesc) {
188 memDesc->retain();
189 kr = memDesc->handleFault(device_pager, offset, size);
190 memDesc->release();
191 } else {
192 kr = KERN_ABORTED;
193 }
194 UNLOCK;
195
196 return kr;
0b4e3aa0
A
197}
198
0a7de745
A
199kern_return_t
200device_close(
201 uintptr_t device_handle)
0b4e3aa0 202{
0a7de745 203 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
0b4e3aa0 204
0a7de745 205 IODelete( ref, IOMemoryDescriptorReserved, 1 );
0b4e3aa0 206
0a7de745 207 return kIOReturnSuccess;
0b4e3aa0 208}
0a7de745 209}; // end extern "C"
0b4e3aa0 210
fe8ab488
A
211/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
212
91447636
A
213// Note this inline function uses C++ reference arguments to return values
214// This means that pointers are not passed and NULLs don't have to be
215// checked for as a NULL reference is illegal.
216static inline void
fe8ab488 217getAddrLenForInd(mach_vm_address_t &addr, mach_vm_size_t &len, // Output variables
0a7de745 218 UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
91447636 219{
0a7de745
A
220 assert(kIOMemoryTypeUIO == type
221 || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
222 || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
223 if (kIOMemoryTypeUIO == type) {
224 user_size_t us;
225 user_addr_t ad;
226 uio_getiov((uio_t) r.uio, ind, &ad, &us); addr = ad; len = us;
227 }
b0d623f7 228#ifndef __LP64__
0a7de745
A
229 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
230 IOAddressRange cur = r.v64[ind];
231 addr = cur.address;
232 len = cur.length;
233 }
b0d623f7 234#endif /* !__LP64__ */
0a7de745
A
235 else {
236 IOVirtualRange cur = r.v[ind];
237 addr = cur.address;
238 len = cur.length;
239 }
0b4e3aa0
A
240}
241
1c79356b
A
242/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
243
0a7de745 244static IOReturn
fe8ab488
A
245purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
246{
0a7de745 247 IOReturn err = kIOReturnSuccess;
fe8ab488 248
0a7de745 249 *control = VM_PURGABLE_SET_STATE;
fe8ab488 250
0a7de745 251 enum { kIOMemoryPurgeableControlMask = 15 };
fe8ab488 252
0a7de745 253 switch (kIOMemoryPurgeableControlMask & newState) {
fe8ab488 254 case kIOMemoryPurgeableKeepCurrent:
0a7de745
A
255 *control = VM_PURGABLE_GET_STATE;
256 break;
fe8ab488
A
257
258 case kIOMemoryPurgeableNonVolatile:
0a7de745
A
259 *state = VM_PURGABLE_NONVOLATILE;
260 break;
fe8ab488 261 case kIOMemoryPurgeableVolatile:
0a7de745
A
262 *state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask);
263 break;
fe8ab488 264 case kIOMemoryPurgeableEmpty:
0a7de745
A
265 *state = VM_PURGABLE_EMPTY | (newState & ~kIOMemoryPurgeableControlMask);
266 break;
fe8ab488 267 default:
0a7de745
A
268 err = kIOReturnBadArgument;
269 break;
270 }
271
272 if (*control == VM_PURGABLE_SET_STATE) {
273 // let VM know this call is from the kernel and is allowed to alter
274 // the volatility of the memory entry even if it was created with
275 // MAP_MEM_PURGABLE_KERNEL_ONLY
276 *control = VM_PURGABLE_SET_STATE_FROM_KERNEL;
277 }
278
279 return err;
fe8ab488
A
280}
281
0a7de745 282static IOReturn
fe8ab488
A
283purgeableStateBits(int * state)
284{
0a7de745 285 IOReturn err = kIOReturnSuccess;
fe8ab488 286
0a7de745 287 switch (VM_PURGABLE_STATE_MASK & *state) {
fe8ab488 288 case VM_PURGABLE_NONVOLATILE:
0a7de745
A
289 *state = kIOMemoryPurgeableNonVolatile;
290 break;
fe8ab488 291 case VM_PURGABLE_VOLATILE:
0a7de745
A
292 *state = kIOMemoryPurgeableVolatile;
293 break;
fe8ab488 294 case VM_PURGABLE_EMPTY:
0a7de745
A
295 *state = kIOMemoryPurgeableEmpty;
296 break;
fe8ab488 297 default:
0a7de745
A
298 *state = kIOMemoryPurgeableNonVolatile;
299 err = kIOReturnNotReady;
300 break;
301 }
302 return err;
fe8ab488
A
303}
304
cb323159
A
305typedef struct {
306 unsigned int wimg;
307 unsigned int object_type;
308} iokit_memtype_entry;
309
310static const iokit_memtype_entry iomd_mem_types[] = {
311 [kIODefaultCache] = {VM_WIMG_DEFAULT, MAP_MEM_NOOP},
312 [kIOInhibitCache] = {VM_WIMG_IO, MAP_MEM_IO},
313 [kIOWriteThruCache] = {VM_WIMG_WTHRU, MAP_MEM_WTHRU},
314 [kIOWriteCombineCache] = {VM_WIMG_WCOMB, MAP_MEM_WCOMB},
315 [kIOCopybackCache] = {VM_WIMG_COPYBACK, MAP_MEM_COPYBACK},
316 [kIOCopybackInnerCache] = {VM_WIMG_INNERWBACK, MAP_MEM_INNERWBACK},
317 [kIOPostedWrite] = {VM_WIMG_POSTED, MAP_MEM_POSTED},
318 [kIORealTimeCache] = {VM_WIMG_RT, MAP_MEM_RT},
319 [kIOPostedReordered] = {VM_WIMG_POSTED_REORDERED, MAP_MEM_POSTED_REORDERED},
320 [kIOPostedCombinedReordered] = {VM_WIMG_POSTED_COMBINED_REORDERED, MAP_MEM_POSTED_COMBINED_REORDERED},
321};
fe8ab488 322
0a7de745 323static vm_prot_t
fe8ab488
A
324vmProtForCacheMode(IOOptionBits cacheMode)
325{
cb323159 326 assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
0a7de745 327 vm_prot_t prot = 0;
cb323159 328 SET_MAP_MEM(iomd_mem_types[cacheMode].object_type, prot);
0a7de745 329 return prot;
fe8ab488
A
330}
331
332static unsigned int
333pagerFlagsForCacheMode(IOOptionBits cacheMode)
334{
cb323159
A
335 assert(cacheMode < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])));
336 if (cacheMode == kIODefaultCache) {
337 return -1U;
338 }
339 return iomd_mem_types[cacheMode].wimg;
340}
5ba3f43e 341
cb323159
A
342static IOOptionBits
343cacheModeForPagerFlags(unsigned int pagerFlags)
344{
345 pagerFlags &= VM_WIMG_MASK;
346 IOOptionBits cacheMode = kIODefaultCache;
347 for (IOOptionBits i = 0; i < (sizeof(iomd_mem_types) / sizeof(iomd_mem_types[0])); ++i) {
348 if (iomd_mem_types[i].wimg == pagerFlags) {
349 cacheMode = i;
350 break;
351 }
0a7de745 352 }
cb323159 353 return (cacheMode == kIODefaultCache) ? kIOCopybackCache : cacheMode;
fe8ab488
A
354}
355
356/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
357/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
358
0a7de745
A
359struct IOMemoryEntry {
360 ipc_port_t entry;
361 int64_t offset;
362 uint64_t size;
fe8ab488
A
363};
364
0a7de745
A
365struct IOMemoryReference {
366 volatile SInt32 refCount;
367 vm_prot_t prot;
368 uint32_t capacity;
369 uint32_t count;
370 struct IOMemoryReference * mapRef;
371 IOMemoryEntry entries[0];
fe8ab488
A
372};
373
0a7de745
A
374enum{
375 kIOMemoryReferenceReuse = 0x00000001,
376 kIOMemoryReferenceWrite = 0x00000002,
377 kIOMemoryReferenceCOW = 0x00000004,
fe8ab488
A
378};
379
380SInt32 gIOMemoryReferenceCount;
381
382IOMemoryReference *
383IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc)
384{
0a7de745
A
385 IOMemoryReference * ref;
386 size_t newSize, oldSize, copySize;
387
388 newSize = (sizeof(IOMemoryReference)
389 - sizeof(ref->entries)
390 + capacity * sizeof(ref->entries[0]));
391 ref = (typeof(ref))IOMalloc(newSize);
392 if (realloc) {
393 oldSize = (sizeof(IOMemoryReference)
394 - sizeof(realloc->entries)
395 + realloc->capacity * sizeof(realloc->entries[0]));
396 copySize = oldSize;
397 if (copySize > newSize) {
398 copySize = newSize;
399 }
400 if (ref) {
401 bcopy(realloc, ref, copySize);
402 }
403 IOFree(realloc, oldSize);
404 } else if (ref) {
405 bzero(ref, sizeof(*ref));
406 ref->refCount = 1;
407 OSIncrementAtomic(&gIOMemoryReferenceCount);
408 }
409 if (!ref) {
cb323159 410 return NULL;
0a7de745
A
411 }
412 ref->capacity = capacity;
413 return ref;
fe8ab488
A
414}
415
0a7de745 416void
fe8ab488
A
417IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref)
418{
0a7de745
A
419 IOMemoryEntry * entries;
420 size_t size;
421
422 if (ref->mapRef) {
423 memoryReferenceFree(ref->mapRef);
cb323159 424 ref->mapRef = NULL;
0a7de745
A
425 }
426
427 entries = ref->entries + ref->count;
428 while (entries > &ref->entries[0]) {
429 entries--;
430 ipc_port_release_send(entries->entry);
431 }
432 size = (sizeof(IOMemoryReference)
433 - sizeof(ref->entries)
434 + ref->capacity * sizeof(ref->entries[0]));
435 IOFree(ref, size);
436
437 OSDecrementAtomic(&gIOMemoryReferenceCount);
fe8ab488
A
438}
439
0a7de745 440void
fe8ab488
A
441IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref)
442{
0a7de745
A
443 if (1 == OSDecrementAtomic(&ref->refCount)) {
444 memoryReferenceFree(ref);
445 }
fe8ab488
A
446}
447
448
449IOReturn
450IOGeneralMemoryDescriptor::memoryReferenceCreate(
0a7de745
A
451 IOOptionBits options,
452 IOMemoryReference ** reference)
fe8ab488 453{
0a7de745
A
454 enum { kCapacity = 4, kCapacityInc = 4 };
455
456 kern_return_t err;
457 IOMemoryReference * ref;
458 IOMemoryEntry * entries;
459 IOMemoryEntry * cloneEntries;
460 vm_map_t map;
461 ipc_port_t entry, cloneEntry;
462 vm_prot_t prot;
463 memory_object_size_t actualSize;
464 uint32_t rangeIdx;
465 uint32_t count;
466 mach_vm_address_t entryAddr, endAddr, entrySize;
467 mach_vm_size_t srcAddr, srcLen;
468 mach_vm_size_t nextAddr, nextLen;
469 mach_vm_size_t offset, remain;
470 IOByteCount physLen;
471 IOOptionBits type = (_flags & kIOMemoryTypeMask);
472 IOOptionBits cacheMode;
473 unsigned int pagerFlags;
474 vm_tag_t tag;
cb323159 475 vm_named_entry_kernel_flags_t vmne_kflags;
0a7de745
A
476
477 ref = memoryReferenceAlloc(kCapacity, NULL);
478 if (!ref) {
479 return kIOReturnNoMemory;
480 }
481
482 tag = getVMTag(kernel_map);
cb323159 483 vmne_kflags = VM_NAMED_ENTRY_KERNEL_FLAGS_NONE;
0a7de745
A
484 entries = &ref->entries[0];
485 count = 0;
486 err = KERN_SUCCESS;
487
488 offset = 0;
489 rangeIdx = 0;
490 if (_task) {
491 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
492 } else {
493 nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
494 nextLen = physLen;
495
496 // default cache mode for physical
497 if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift)) {
cb323159 498 IOOptionBits mode = cacheModeForPagerFlags(IODefaultCacheBits(nextAddr));
0a7de745 499 _flags |= (mode << kIOMemoryBufferCacheShift);
9d749ea3 500 }
0a7de745
A
501 }
502
503 // cache mode & vm_prot
504 prot = VM_PROT_READ;
505 cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
506 prot |= vmProtForCacheMode(cacheMode);
507 // VM system requires write access to change cache mode
508 if (kIODefaultCache != cacheMode) {
509 prot |= VM_PROT_WRITE;
510 }
511 if (kIODirectionOut != (kIODirectionOutIn & _flags)) {
512 prot |= VM_PROT_WRITE;
513 }
514 if (kIOMemoryReferenceWrite & options) {
515 prot |= VM_PROT_WRITE;
516 }
517 if (kIOMemoryReferenceCOW & options) {
518 prot |= MAP_MEM_VM_COPY;
519 }
39037602 520
cb323159
A
521 if (kIOMemoryUseReserve & _flags) {
522 prot |= MAP_MEM_GRAB_SECLUDED;
523 }
524
0a7de745
A
525 if ((kIOMemoryReferenceReuse & options) && _memRef) {
526 cloneEntries = &_memRef->entries[0];
527 prot |= MAP_MEM_NAMED_REUSE;
fe8ab488 528 }
fe8ab488 529
0a7de745
A
530 if (_task) {
531 // virtual ranges
532
533 if (kIOMemoryBufferPageable & _flags) {
cb323159
A
534 int ledger_tag, ledger_no_footprint;
535
0a7de745
A
536 // IOBufferMemoryDescriptor alloc - set flags for entry + object create
537 prot |= MAP_MEM_NAMED_CREATE;
cb323159
A
538
539 // default accounting settings:
540 // + "none" ledger tag
541 // + include in footprint
542 // can be changed later with ::setOwnership()
543 ledger_tag = VM_LEDGER_TAG_NONE;
544 ledger_no_footprint = 0;
545
0a7de745
A
546 if (kIOMemoryBufferPurgeable & _flags) {
547 prot |= (MAP_MEM_PURGABLE | MAP_MEM_PURGABLE_KERNEL_ONLY);
548 if (VM_KERN_MEMORY_SKYWALK == tag) {
cb323159
A
549 // Skywalk purgeable memory accounting:
550 // + "network" ledger tag
551 // + not included in footprint
552 ledger_tag = VM_LEDGER_TAG_NETWORK;
553 ledger_no_footprint = 1;
554 } else {
555 // regular purgeable memory accounting:
556 // + no ledger tag
557 // + included in footprint
558 ledger_tag = VM_LEDGER_TAG_NONE;
559 ledger_no_footprint = 0;
0a7de745
A
560 }
561 }
cb323159
A
562 vmne_kflags.vmnekf_ledger_tag = ledger_tag;
563 vmne_kflags.vmnekf_ledger_no_footprint = ledger_no_footprint;
0a7de745
A
564 if (kIOMemoryUseReserve & _flags) {
565 prot |= MAP_MEM_GRAB_SECLUDED;
566 }
567
568 prot |= VM_PROT_WRITE;
569 map = NULL;
570 } else {
571 map = get_task_map(_task);
572 }
573
574 remain = _length;
575 while (remain) {
576 srcAddr = nextAddr;
577 srcLen = nextLen;
578 nextAddr = 0;
579 nextLen = 0;
580 // coalesce addr range
581 for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++) {
582 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
583 if ((srcAddr + srcLen) != nextAddr) {
584 break;
585 }
586 srcLen += nextLen;
587 }
588 entryAddr = trunc_page_64(srcAddr);
589 endAddr = round_page_64(srcAddr + srcLen);
590 do{
591 entrySize = (endAddr - entryAddr);
592 if (!entrySize) {
593 break;
594 }
595 actualSize = entrySize;
596
597 cloneEntry = MACH_PORT_NULL;
598 if (MAP_MEM_NAMED_REUSE & prot) {
599 if (cloneEntries < &_memRef->entries[_memRef->count]) {
600 cloneEntry = cloneEntries->entry;
601 } else {
602 prot &= ~MAP_MEM_NAMED_REUSE;
603 }
604 }
605
606 err = mach_make_memory_entry_internal(map,
cb323159 607 &actualSize, entryAddr, prot, vmne_kflags, &entry, cloneEntry);
0a7de745
A
608
609 if (KERN_SUCCESS != err) {
610 break;
611 }
612 if (actualSize > entrySize) {
613 panic("mach_make_memory_entry_64 actualSize");
614 }
615
616 if (count >= ref->capacity) {
617 ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref);
618 entries = &ref->entries[count];
619 }
620 entries->entry = entry;
621 entries->size = actualSize;
622 entries->offset = offset + (entryAddr - srcAddr);
623 entryAddr += actualSize;
624 if (MAP_MEM_NAMED_REUSE & prot) {
625 if ((cloneEntries->entry == entries->entry)
626 && (cloneEntries->size == entries->size)
627 && (cloneEntries->offset == entries->offset)) {
628 cloneEntries++;
629 } else {
630 prot &= ~MAP_MEM_NAMED_REUSE;
631 }
632 }
633 entries++;
634 count++;
635 }while (true);
636 offset += srcLen;
637 remain -= srcLen;
fe8ab488 638 }
0a7de745
A
639 } else {
640 // _task == 0, physical or kIOMemoryTypeUPL
641 memory_object_t pager;
cb323159 642 vm_size_t size = ptoa_64(_pages);
fe8ab488 643
0a7de745
A
644 if (!getKernelReserved()) {
645 panic("getKernelReserved");
646 }
fe8ab488 647
0a7de745
A
648 reserved->dp.pagerContig = (1 == _rangesCount);
649 reserved->dp.memory = this;
fe8ab488 650
0a7de745
A
651 pagerFlags = pagerFlagsForCacheMode(cacheMode);
652 if (-1U == pagerFlags) {
653 panic("phys is kIODefaultCache");
654 }
655 if (reserved->dp.pagerContig) {
656 pagerFlags |= DEVICE_PAGER_CONTIGUOUS;
657 }
fe8ab488 658
cb323159 659 pager = device_pager_setup((memory_object_t) NULL, (uintptr_t) reserved,
0a7de745
A
660 size, pagerFlags);
661 assert(pager);
662 if (!pager) {
663 err = kIOReturnVMError;
664 } else {
665 srcAddr = nextAddr;
666 entryAddr = trunc_page_64(srcAddr);
667 err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/,
668 size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry);
669 assert(KERN_SUCCESS == err);
670 if (KERN_SUCCESS != err) {
671 device_pager_deallocate(pager);
672 } else {
673 reserved->dp.devicePager = pager;
674 entries->entry = entry;
675 entries->size = size;
676 entries->offset = offset + (entryAddr - srcAddr);
677 entries++;
678 count++;
679 }
680 }
681 }
fe8ab488 682
0a7de745
A
683 ref->count = count;
684 ref->prot = prot;
fe8ab488 685
0a7de745
A
686 if (_task && (KERN_SUCCESS == err)
687 && (kIOMemoryMapCopyOnWrite & _flags)
688 && !(kIOMemoryReferenceCOW & options)) {
689 err = memoryReferenceCreate(options | kIOMemoryReferenceCOW, &ref->mapRef);
690 }
fe8ab488 691
0a7de745
A
692 if (KERN_SUCCESS == err) {
693 if (MAP_MEM_NAMED_REUSE & prot) {
694 memoryReferenceFree(ref);
695 OSIncrementAtomic(&_memRef->refCount);
696 ref = _memRef;
697 }
698 } else {
699 memoryReferenceFree(ref);
700 ref = NULL;
fe8ab488 701 }
fe8ab488 702
0a7de745 703 *reference = ref;
fe8ab488 704
0a7de745 705 return err;
fe8ab488
A
706}
707
0a7de745 708kern_return_t
fe8ab488
A
709IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
710{
0a7de745
A
711 IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref;
712 IOReturn err;
713 vm_map_offset_t addr;
714
715 addr = ref->mapped;
716
717 err = vm_map_enter_mem_object(map, &addr, ref->size,
718 (vm_map_offset_t) 0,
719 (((ref->options & kIOMapAnywhere)
720 ? VM_FLAGS_ANYWHERE
721 : VM_FLAGS_FIXED)),
722 VM_MAP_KERNEL_FLAGS_NONE,
723 ref->tag,
724 IPC_PORT_NULL,
725 (memory_object_offset_t) 0,
726 false, /* copy */
727 ref->prot,
728 ref->prot,
729 VM_INHERIT_NONE);
730 if (KERN_SUCCESS == err) {
731 ref->mapped = (mach_vm_address_t) addr;
732 ref->map = map;
733 }
734
735 return err;
fe8ab488
A
736}
737
0a7de745 738IOReturn
fe8ab488 739IOGeneralMemoryDescriptor::memoryReferenceMap(
0a7de745
A
740 IOMemoryReference * ref,
741 vm_map_t map,
742 mach_vm_size_t inoffset,
743 mach_vm_size_t size,
744 IOOptionBits options,
745 mach_vm_address_t * inaddr)
fe8ab488 746{
0a7de745
A
747 IOReturn err;
748 int64_t offset = inoffset;
749 uint32_t rangeIdx, entryIdx;
750 vm_map_offset_t addr, mapAddr;
751 vm_map_offset_t pageOffset, entryOffset, remain, chunk;
752
753 mach_vm_address_t nextAddr;
754 mach_vm_size_t nextLen;
755 IOByteCount physLen;
756 IOMemoryEntry * entry;
757 vm_prot_t prot, memEntryCacheMode;
758 IOOptionBits type;
759 IOOptionBits cacheMode;
760 vm_tag_t tag;
761 // for the kIOMapPrefault option.
762 upl_page_info_t * pageList = NULL;
763 UInt currentPageIndex = 0;
764 bool didAlloc;
765
766 if (ref->mapRef) {
767 err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
768 return err;
769 }
770
771 type = _flags & kIOMemoryTypeMask;
772
773 prot = VM_PROT_READ;
774 if (!(kIOMapReadOnly & options)) {
775 prot |= VM_PROT_WRITE;
776 }
777 prot &= ref->prot;
778
779 cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
780 if (kIODefaultCache != cacheMode) {
781 // VM system requires write access to update named entry cache mode
782 memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
783 }
784
785 tag = getVMTag(map);
786
787 if (_task) {
788 // Find first range for offset
789 if (!_rangesCount) {
790 return kIOReturnBadArgument;
791 }
792 for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++) {
793 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
794 if (remain < nextLen) {
795 break;
796 }
797 remain -= nextLen;
798 }
799 } else {
800 rangeIdx = 0;
801 remain = 0;
802 nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
803 nextLen = size;
804 }
805
806 assert(remain < nextLen);
807 if (remain >= nextLen) {
808 return kIOReturnBadArgument;
809 }
810
811 nextAddr += remain;
812 nextLen -= remain;
813 pageOffset = (page_mask & nextAddr);
814 addr = 0;
815 didAlloc = false;
816
817 if (!(options & kIOMapAnywhere)) {
818 addr = *inaddr;
819 if (pageOffset != (page_mask & addr)) {
820 return kIOReturnNotAligned;
821 }
822 addr -= pageOffset;
823 }
824
825 // find first entry for offset
826 for (entryIdx = 0;
827 (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset);
828 entryIdx++) {
829 }
830 entryIdx--;
831 entry = &ref->entries[entryIdx];
832
833 // allocate VM
834 size = round_page_64(size + pageOffset);
835 if (kIOMapOverwrite & options) {
836 if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
837 map = IOPageableMapForAddress(addr);
838 }
839 err = KERN_SUCCESS;
840 } else {
841 IOMemoryDescriptorMapAllocRef ref;
842 ref.map = map;
843 ref.tag = tag;
844 ref.options = options;
845 ref.size = size;
846 ref.prot = prot;
847 if (options & kIOMapAnywhere) {
848 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
849 ref.mapped = 0;
850 } else {
851 ref.mapped = addr;
852 }
853 if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
854 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
855 } else {
856 err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
857 }
858 if (KERN_SUCCESS == err) {
859 addr = ref.mapped;
860 map = ref.map;
861 didAlloc = true;
862 }
863 }
864
865 /*
866 * If the memory is associated with a device pager but doesn't have a UPL,
867 * it will be immediately faulted in through the pager via populateDevicePager().
868 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
869 * operations.
870 */
871 if ((reserved != NULL) && (reserved->dp.devicePager) && (_wireCount != 0)) {
872 options &= ~kIOMapPrefault;
873 }
874
875 /*
876 * Prefaulting is only possible if we wired the memory earlier. Check the
877 * memory type, and the underlying data.
878 */
879 if (options & kIOMapPrefault) {
880 /*
881 * The memory must have been wired by calling ::prepare(), otherwise
882 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
883 */
884 assert(_wireCount != 0);
885 assert(_memoryEntries != NULL);
886 if ((_wireCount == 0) ||
887 (_memoryEntries == NULL)) {
888 return kIOReturnBadArgument;
889 }
890
891 // Get the page list.
892 ioGMDData* dataP = getDataP(_memoryEntries);
893 ioPLBlock const* ioplList = getIOPLList(dataP);
894 pageList = getPageList(dataP);
895
896 // Get the number of IOPLs.
897 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
898
899 /*
900 * Scan through the IOPL Info Blocks, looking for the first block containing
901 * the offset. The research will go past it, so we'll need to go back to the
902 * right range at the end.
903 */
904 UInt ioplIndex = 0;
905 while (ioplIndex < numIOPLs && offset >= ioplList[ioplIndex].fIOMDOffset) {
906 ioplIndex++;
907 }
908 ioplIndex--;
909
910 // Retrieve the IOPL info block.
911 ioPLBlock ioplInfo = ioplList[ioplIndex];
912
913 /*
914 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
915 * array.
916 */
917 if (ioplInfo.fFlags & kIOPLExternUPL) {
918 pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
919 } else {
920 pageList = &pageList[ioplInfo.fPageInfo];
921 }
922
923 // Rebase [offset] into the IOPL in order to looks for the first page index.
924 mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
925
926 // Retrieve the index of the first page corresponding to the offset.
927 currentPageIndex = atop_32(offsetInIOPL);
928 }
929
930 // enter mappings
931 remain = size;
932 mapAddr = addr;
933 addr += pageOffset;
934
935 while (remain && (KERN_SUCCESS == err)) {
936 entryOffset = offset - entry->offset;
937 if ((page_mask & entryOffset) != pageOffset) {
938 err = kIOReturnNotAligned;
939 break;
940 }
941
942 if (kIODefaultCache != cacheMode) {
943 vm_size_t unused = 0;
944 err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
945 memEntryCacheMode, NULL, entry->entry);
946 assert(KERN_SUCCESS == err);
947 }
948
949 entryOffset -= pageOffset;
950 if (entryOffset >= entry->size) {
951 panic("entryOffset");
952 }
953 chunk = entry->size - entryOffset;
954 if (chunk) {
955 vm_map_kernel_flags_t vmk_flags;
956
957 vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
958 vmk_flags.vmkf_iokit_acct = TRUE; /* iokit accounting */
959
960 if (chunk > remain) {
961 chunk = remain;
962 }
963 if (options & kIOMapPrefault) {
964 UInt nb_pages = round_page(chunk) / PAGE_SIZE;
965
966 err = vm_map_enter_mem_object_prefault(map,
967 &mapAddr,
968 chunk, 0 /* mask */,
969 (VM_FLAGS_FIXED
970 | VM_FLAGS_OVERWRITE),
971 vmk_flags,
972 tag,
973 entry->entry,
974 entryOffset,
975 prot, // cur
976 prot, // max
977 &pageList[currentPageIndex],
978 nb_pages);
979
980 // Compute the next index in the page list.
981 currentPageIndex += nb_pages;
982 assert(currentPageIndex <= _pages);
983 } else {
984 err = vm_map_enter_mem_object(map,
985 &mapAddr,
986 chunk, 0 /* mask */,
987 (VM_FLAGS_FIXED
988 | VM_FLAGS_OVERWRITE),
989 vmk_flags,
990 tag,
991 entry->entry,
992 entryOffset,
993 false, // copy
994 prot, // cur
995 prot, // max
996 VM_INHERIT_NONE);
997 }
998 if (KERN_SUCCESS != err) {
999 break;
1000 }
1001 remain -= chunk;
1002 if (!remain) {
1003 break;
1004 }
1005 mapAddr += chunk;
1006 offset += chunk - pageOffset;
1007 }
1008 pageOffset = 0;
1009 entry++;
1010 entryIdx++;
1011 if (entryIdx >= ref->count) {
1012 err = kIOReturnOverrun;
1013 break;
1014 }
1015 }
1016
1017 if ((KERN_SUCCESS != err) && didAlloc) {
1018 (void) mach_vm_deallocate(map, trunc_page_64(addr), size);
1019 addr = 0;
1020 }
1021 *inaddr = addr;
1022
1023 return err;
fe8ab488
A
1024}
1025
0a7de745 1026IOReturn
fe8ab488 1027IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
0a7de745
A
1028 IOMemoryReference * ref,
1029 IOByteCount * residentPageCount,
1030 IOByteCount * dirtyPageCount)
fe8ab488 1031{
0a7de745
A
1032 IOReturn err;
1033 IOMemoryEntry * entries;
1034 unsigned int resident, dirty;
1035 unsigned int totalResident, totalDirty;
1036
1037 totalResident = totalDirty = 0;
1038 err = kIOReturnSuccess;
1039 entries = ref->entries + ref->count;
1040 while (entries > &ref->entries[0]) {
1041 entries--;
1042 err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty);
1043 if (KERN_SUCCESS != err) {
1044 break;
1045 }
1046 totalResident += resident;
1047 totalDirty += dirty;
1048 }
1049
1050 if (residentPageCount) {
1051 *residentPageCount = totalResident;
1052 }
1053 if (dirtyPageCount) {
1054 *dirtyPageCount = totalDirty;
1055 }
1056 return err;
fe8ab488
A
1057}
1058
1059IOReturn
1060IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
0a7de745
A
1061 IOMemoryReference * ref,
1062 IOOptionBits newState,
1063 IOOptionBits * oldState)
fe8ab488 1064{
0a7de745
A
1065 IOReturn err;
1066 IOMemoryEntry * entries;
1067 vm_purgable_t control;
1068 int totalState, state;
1069
1070 totalState = kIOMemoryPurgeableNonVolatile;
1071 err = kIOReturnSuccess;
1072 entries = ref->entries + ref->count;
1073 while (entries > &ref->entries[0]) {
1074 entries--;
1075
1076 err = purgeableControlBits(newState, &control, &state);
1077 if (KERN_SUCCESS != err) {
1078 break;
1079 }
1080 err = memory_entry_purgeable_control_internal(entries->entry, control, &state);
1081 if (KERN_SUCCESS != err) {
1082 break;
1083 }
1084 err = purgeableStateBits(&state);
1085 if (KERN_SUCCESS != err) {
1086 break;
1087 }
1088
1089 if (kIOMemoryPurgeableEmpty == state) {
1090 totalState = kIOMemoryPurgeableEmpty;
1091 } else if (kIOMemoryPurgeableEmpty == totalState) {
1092 continue;
1093 } else if (kIOMemoryPurgeableVolatile == totalState) {
1094 continue;
1095 } else if (kIOMemoryPurgeableVolatile == state) {
1096 totalState = kIOMemoryPurgeableVolatile;
1097 } else {
1098 totalState = kIOMemoryPurgeableNonVolatile;
1099 }
1100 }
1101
1102 if (oldState) {
1103 *oldState = totalState;
1104 }
1105 return err;
fe8ab488
A
1106}
1107
cb323159
A
1108IOReturn
1109IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(
1110 IOMemoryReference * ref,
1111 task_t newOwner,
1112 int newLedgerTag,
1113 IOOptionBits newLedgerOptions)
1114{
1115 IOReturn err, totalErr;
1116 IOMemoryEntry * entries;
1117
1118 totalErr = kIOReturnSuccess;
1119 entries = ref->entries + ref->count;
1120 while (entries > &ref->entries[0]) {
1121 entries--;
1122
1123 err = mach_memory_entry_ownership(entries->entry, newOwner, newLedgerTag, newLedgerOptions);
1124 if (KERN_SUCCESS != err) {
1125 totalErr = err;
1126 }
1127 }
1128
1129 return totalErr;
1130}
1131
fe8ab488
A
1132/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1133
1c79356b
A
1134IOMemoryDescriptor *
1135IOMemoryDescriptor::withAddress(void * address,
0a7de745
A
1136 IOByteCount length,
1137 IODirection direction)
55e303ae 1138{
0a7de745
A
1139 return IOMemoryDescriptor::
1140 withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
55e303ae
A
1141}
1142
b0d623f7 1143#ifndef __LP64__
55e303ae 1144IOMemoryDescriptor *
b0d623f7 1145IOMemoryDescriptor::withAddress(IOVirtualAddress address,
0a7de745
A
1146 IOByteCount length,
1147 IODirection direction,
1148 task_t task)
1c79356b 1149{
0a7de745
A
1150 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1151 if (that) {
1152 if (that->initWithAddress(address, length, direction, task)) {
1153 return that;
1154 }
1155
1156 that->release();
1157 }
cb323159 1158 return NULL;
1c79356b 1159}
b0d623f7 1160#endif /* !__LP64__ */
1c79356b
A
1161
1162IOMemoryDescriptor *
55e303ae 1163IOMemoryDescriptor::withPhysicalAddress(
0a7de745
A
1164 IOPhysicalAddress address,
1165 IOByteCount length,
1166 IODirection direction )
55e303ae 1167{
0a7de745 1168 return IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL);
55e303ae
A
1169}
1170
b0d623f7 1171#ifndef __LP64__
55e303ae 1172IOMemoryDescriptor *
0a7de745
A
1173IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
1174 UInt32 withCount,
1175 IODirection direction,
1176 task_t task,
1177 bool asReference)
1c79356b 1178{
0a7de745
A
1179 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1180 if (that) {
1181 if (that->initWithRanges(ranges, withCount, direction, task, asReference)) {
1182 return that;
1183 }
1184
1185 that->release();
1186 }
cb323159 1187 return NULL;
1c79356b 1188}
b0d623f7 1189#endif /* !__LP64__ */
1c79356b 1190
0c530ab8
A
1191IOMemoryDescriptor *
1192IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
0a7de745
A
1193 mach_vm_size_t length,
1194 IOOptionBits options,
1195 task_t task)
0c530ab8 1196{
0a7de745
A
1197 IOAddressRange range = { address, length };
1198 return IOMemoryDescriptor::withAddressRanges(&range, 1, options, task);
0c530ab8
A
1199}
1200
1201IOMemoryDescriptor *
1202IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
0a7de745
A
1203 UInt32 rangeCount,
1204 IOOptionBits options,
1205 task_t task)
0c530ab8 1206{
0a7de745
A
1207 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1208 if (that) {
1209 if (task) {
1210 options |= kIOMemoryTypeVirtual64;
1211 } else {
1212 options |= kIOMemoryTypePhysical64;
1213 }
0c530ab8 1214
cb323159 1215 if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ NULL)) {
0a7de745
A
1216 return that;
1217 }
0c530ab8 1218
0a7de745
A
1219 that->release();
1220 }
0c530ab8 1221
cb323159 1222 return NULL;
0c530ab8
A
1223}
1224
1c79356b
A
1225
1226/*
b0d623f7 1227 * withOptions:
1c79356b
A
1228 *
1229 * Create a new IOMemoryDescriptor. The buffer is made up of several
1230 * virtual address ranges, from a given task.
1231 *
1232 * Passing the ranges as a reference will avoid an extra allocation.
1233 */
1234IOMemoryDescriptor *
0a7de745
A
1235IOMemoryDescriptor::withOptions(void * buffers,
1236 UInt32 count,
1237 UInt32 offset,
1238 task_t task,
1239 IOOptionBits opts,
1240 IOMapper * mapper)
1c79356b 1241{
0a7de745 1242 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
d7e50217 1243
0a7de745
A
1244 if (self
1245 && !self->initWithOptions(buffers, count, offset, task, opts, mapper)) {
1246 self->release();
cb323159 1247 return NULL;
0a7de745 1248 }
55e303ae 1249
0a7de745 1250 return self;
55e303ae
A
1251}
1252
0a7de745
A
1253bool
1254IOMemoryDescriptor::initWithOptions(void * buffers,
1255 UInt32 count,
1256 UInt32 offset,
1257 task_t task,
1258 IOOptionBits options,
1259 IOMapper * mapper)
55e303ae 1260{
0a7de745 1261 return false;
1c79356b
A
1262}
1263
b0d623f7 1264#ifndef __LP64__
1c79356b 1265IOMemoryDescriptor *
0a7de745
A
1266IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
1267 UInt32 withCount,
1268 IODirection direction,
1269 bool asReference)
1c79356b 1270{
0a7de745
A
1271 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1272 if (that) {
1273 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference)) {
1274 return that;
1275 }
1276
1277 that->release();
1278 }
cb323159 1279 return NULL;
1c79356b
A
1280}
1281
1282IOMemoryDescriptor *
0a7de745
A
1283IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
1284 IOByteCount offset,
1285 IOByteCount length,
1286 IODirection direction)
1c79356b 1287{
0a7de745 1288 return IOSubMemoryDescriptor::withSubRange(of, offset, length, direction);
1c79356b 1289}
b0d623f7 1290#endif /* !__LP64__ */
1c79356b 1291
0c530ab8
A
1292IOMemoryDescriptor *
1293IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
91447636 1294{
0a7de745
A
1295 IOGeneralMemoryDescriptor *origGenMD =
1296 OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
1297
1298 if (origGenMD) {
1299 return IOGeneralMemoryDescriptor::
1300 withPersistentMemoryDescriptor(origGenMD);
1301 } else {
cb323159 1302 return NULL;
0a7de745 1303 }
91447636
A
1304}
1305
0c530ab8
A
1306IOMemoryDescriptor *
1307IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
91447636 1308{
0a7de745
A
1309 IOMemoryReference * memRef;
1310
1311 if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) {
cb323159 1312 return NULL;
0a7de745
A
1313 }
1314
1315 if (memRef == originalMD->_memRef) {
1316 originalMD->retain(); // Add a new reference to ourselves
1317 originalMD->memoryReferenceRelease(memRef);
1318 return originalMD;
1319 }
1320
1321 IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor;
1322 IOMDPersistentInitData initData = { originalMD, memRef };
1323
1324 if (self
cb323159 1325 && !self->initWithOptions(&initData, 1, 0, NULL, kIOMemoryTypePersistentMD, NULL)) {
0a7de745 1326 self->release();
cb323159 1327 self = NULL;
0a7de745
A
1328 }
1329 return self;
91447636
A
1330}
1331
b0d623f7 1332#ifndef __LP64__
1c79356b
A
1333bool
1334IOGeneralMemoryDescriptor::initWithAddress(void * address,
0a7de745
A
1335 IOByteCount withLength,
1336 IODirection withDirection)
1c79356b 1337{
0a7de745
A
1338 _singleRange.v.address = (vm_offset_t) address;
1339 _singleRange.v.length = withLength;
1c79356b 1340
0a7de745 1341 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
1c79356b
A
1342}
1343
1344bool
b0d623f7 1345IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
0a7de745
A
1346 IOByteCount withLength,
1347 IODirection withDirection,
1348 task_t withTask)
1c79356b 1349{
0a7de745
A
1350 _singleRange.v.address = address;
1351 _singleRange.v.length = withLength;
1c79356b 1352
0a7de745 1353 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
1c79356b
A
1354}
1355
1356bool
1357IOGeneralMemoryDescriptor::initWithPhysicalAddress(
0a7de745
A
1358 IOPhysicalAddress address,
1359 IOByteCount withLength,
1360 IODirection withDirection )
1c79356b 1361{
0a7de745
A
1362 _singleRange.p.address = address;
1363 _singleRange.p.length = withLength;
1c79356b 1364
0a7de745 1365 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
1c79356b
A
1366}
1367
55e303ae
A
1368bool
1369IOGeneralMemoryDescriptor::initWithPhysicalRanges(
0a7de745
A
1370 IOPhysicalRange * ranges,
1371 UInt32 count,
1372 IODirection direction,
1373 bool reference)
55e303ae 1374{
0a7de745 1375 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
55e303ae 1376
0a7de745
A
1377 if (reference) {
1378 mdOpts |= kIOMemoryAsReference;
1379 }
55e303ae 1380
cb323159 1381 return initWithOptions(ranges, count, 0, NULL, mdOpts, /* mapper */ NULL);
55e303ae
A
1382}
1383
1384bool
1385IOGeneralMemoryDescriptor::initWithRanges(
0a7de745
A
1386 IOVirtualRange * ranges,
1387 UInt32 count,
1388 IODirection direction,
1389 task_t task,
1390 bool reference)
55e303ae 1391{
0a7de745
A
1392 IOOptionBits mdOpts = direction;
1393
1394 if (reference) {
1395 mdOpts |= kIOMemoryAsReference;
1396 }
1397
1398 if (task) {
1399 mdOpts |= kIOMemoryTypeVirtual;
1400
1401 // Auto-prepare if this is a kernel memory descriptor as very few
1402 // clients bother to prepare() kernel memory.
1403 // But it was not enforced so what are you going to do?
1404 if (task == kernel_task) {
1405 mdOpts |= kIOMemoryAutoPrepare;
1406 }
1407 } else {
1408 mdOpts |= kIOMemoryTypePhysical;
1409 }
1410
cb323159 1411 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ NULL);
55e303ae 1412}
b0d623f7 1413#endif /* !__LP64__ */
55e303ae 1414
1c79356b 1415/*
55e303ae 1416 * initWithOptions:
1c79356b 1417 *
55e303ae 1418 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
91447636
A
1419 * from a given task, several physical ranges, an UPL from the ubc
1420 * system or a uio (may be 64bit) from the BSD subsystem.
1c79356b
A
1421 *
1422 * Passing the ranges as a reference will avoid an extra allocation.
1423 *
55e303ae
A
1424 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
1425 * existing instance -- note this behavior is not commonly supported in other
1426 * I/O Kit classes, although it is supported here.
1c79356b 1427 */
55e303ae 1428
1c79356b 1429bool
0a7de745
A
1430IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
1431 UInt32 count,
1432 UInt32 offset,
1433 task_t task,
1434 IOOptionBits options,
1435 IOMapper * mapper)
55e303ae 1436{
0a7de745 1437 IOOptionBits type = options & kIOMemoryTypeMask;
91447636 1438
6d2010ae 1439#ifndef __LP64__
0a7de745
A
1440 if (task
1441 && (kIOMemoryTypeVirtual == type)
1442 && vm_map_is_64bit(get_task_map(task))
1443 && ((IOVirtualRange *) buffers)->address) {
1444 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
1445 return false;
1446 }
6d2010ae
A
1447#endif /* !__LP64__ */
1448
0a7de745
A
1449 // Grab the original MD's configuation data to initialse the
1450 // arguments to this function.
1451 if (kIOMemoryTypePersistentMD == type) {
1452 IOMDPersistentInitData *initData = (typeof(initData))buffers;
1453 const IOGeneralMemoryDescriptor *orig = initData->fMD;
1454 ioGMDData *dataP = getDataP(orig->_memoryEntries);
91447636 1455
0a7de745
A
1456 // Only accept persistent memory descriptors with valid dataP data.
1457 assert(orig->_rangesCount == 1);
1458 if (!(orig->_flags & kIOMemoryPersistent) || !dataP) {
1459 return false;
1460 }
91447636 1461
0a7de745
A
1462 _memRef = initData->fMemRef; // Grab the new named entry
1463 options = orig->_flags & ~kIOMemoryAsReference;
1464 type = options & kIOMemoryTypeMask;
1465 buffers = orig->_ranges.v;
1466 count = orig->_rangesCount;
55e303ae 1467
0a7de745
A
1468 // Now grab the original task and whatever mapper was previously used
1469 task = orig->_task;
1470 mapper = dataP->fMapper;
91447636 1471
0a7de745
A
1472 // We are ready to go through the original initialisation now
1473 }
91447636 1474
0a7de745
A
1475 switch (type) {
1476 case kIOMemoryTypeUIO:
1477 case kIOMemoryTypeVirtual:
b0d623f7 1478#ifndef __LP64__
0a7de745 1479 case kIOMemoryTypeVirtual64:
b0d623f7 1480#endif /* !__LP64__ */
0a7de745
A
1481 assert(task);
1482 if (!task) {
1483 return false;
1484 }
1485 break;
55e303ae 1486
0a7de745 1487 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
b0d623f7 1488#ifndef __LP64__
0a7de745 1489 case kIOMemoryTypePhysical64:
b0d623f7 1490#endif /* !__LP64__ */
0a7de745
A
1491 case kIOMemoryTypeUPL:
1492 assert(!task);
1493 break;
1494 default:
1495 return false; /* bad argument */
2d21ac55 1496 }
0a7de745
A
1497
1498 assert(buffers);
1499 assert(count);
1500
1501 /*
1502 * We can check the _initialized instance variable before having ever set
1503 * it to an initial value because I/O Kit guarantees that all our instance
1504 * variables are zeroed on an object's allocation.
1505 */
1506
1507 if (_initialized) {
1508 /*
1509 * An existing memory descriptor is being retargeted to point to
1510 * somewhere else. Clean up our present state.
1511 */
1512 IOOptionBits type = _flags & kIOMemoryTypeMask;
1513 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type)) {
1514 while (_wireCount) {
1515 complete();
1516 }
1517 }
1518 if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
1519 if (kIOMemoryTypeUIO == type) {
1520 uio_free((uio_t) _ranges.v);
1521 }
b0d623f7 1522#ifndef __LP64__
0a7de745
A
1523 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
1524 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
1525 }
b0d623f7 1526#endif /* !__LP64__ */
0a7de745
A
1527 else {
1528 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
1529 }
1530 }
1531
1532 options |= (kIOMemoryRedirected & _flags);
1533 if (!(kIOMemoryRedirected & options)) {
1534 if (_memRef) {
1535 memoryReferenceRelease(_memRef);
cb323159 1536 _memRef = NULL;
0a7de745
A
1537 }
1538 if (_mappings) {
1539 _mappings->flushCollection();
1540 }
1541 }
1542 } else {
1543 if (!super::init()) {
1544 return false;
1545 }
1546 _initialized = true;
0c530ab8 1547 }
2d21ac55 1548
0a7de745
A
1549 // Grab the appropriate mapper
1550 if (kIOMemoryHostOrRemote & options) {
1551 options |= kIOMemoryMapperNone;
1552 }
1553 if (kIOMemoryMapperNone & options) {
cb323159 1554 mapper = NULL; // No Mapper
0a7de745
A
1555 } else if (mapper == kIOMapperSystem) {
1556 IOMapper::checkForSystemMapper();
1557 gIOSystemMapper = mapper = IOMapper::gSystem;
1558 }
55e303ae 1559
0a7de745
A
1560 // Remove the dynamic internal use flags from the initial setting
1561 options &= ~(kIOMemoryPreparedReadOnly);
1562 _flags = options;
1563 _task = task;
0c530ab8 1564
b0d623f7 1565#ifndef __LP64__
0a7de745 1566 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
b0d623f7 1567#endif /* !__LP64__ */
91447636 1568
0a7de745
A
1569 _dmaReferences = 0;
1570 __iomd_reservedA = 0;
1571 __iomd_reservedB = 0;
1572 _highestPage = 0;
1573
1574 if (kIOMemoryThreadSafe & options) {
1575 if (!_prepareLock) {
1576 _prepareLock = IOLockAlloc();
1577 }
1578 } else if (_prepareLock) {
1579 IOLockFree(_prepareLock);
1580 _prepareLock = NULL;
91447636 1581 }
0c530ab8 1582
0a7de745
A
1583 if (kIOMemoryTypeUPL == type) {
1584 ioGMDData *dataP;
1585 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
1586
1587 if (!initMemoryEntries(dataSize, mapper)) {
1588 return false;
1589 }
1590 dataP = getDataP(_memoryEntries);
1591 dataP->fPageCnt = 0;
1592 switch (kIOMemoryDirectionMask & options) {
1593 case kIODirectionOut:
1594 dataP->fDMAAccess = kIODMAMapReadAccess;
1595 break;
1596 case kIODirectionIn:
1597 dataP->fDMAAccess = kIODMAMapWriteAccess;
1598 break;
1599 case kIODirectionNone:
1600 case kIODirectionOutIn:
1601 default:
1602 panic("bad dir for upl 0x%x\n", (int) options);
1603 break;
1604 }
1605 // _wireCount++; // UPLs start out life wired
1606
1607 _length = count;
1608 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
1609
1610 ioPLBlock iopl;
1611 iopl.fIOPL = (upl_t) buffers;
1612 upl_set_referenced(iopl.fIOPL, true);
1613 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
1614
1615 if (upl_get_size(iopl.fIOPL) < (count + offset)) {
1616 panic("short external upl");
1617 }
1618
1619 _highestPage = upl_get_highest_page(iopl.fIOPL);
1620
1621 // Set the flag kIOPLOnDevice convieniently equal to 1
1622 iopl.fFlags = pageList->device | kIOPLExternUPL;
1623 if (!pageList->device) {
1624 // Pre-compute the offset into the UPL's page list
1625 pageList = &pageList[atop_32(offset)];
1626 offset &= PAGE_MASK;
1627 }
1628 iopl.fIOMDOffset = 0;
1629 iopl.fMappedPage = 0;
1630 iopl.fPageInfo = (vm_address_t) pageList;
1631 iopl.fPageOffset = offset;
1632 _memoryEntries->appendBytes(&iopl, sizeof(iopl));
1633 } else {
1634 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
1635 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
1636
1637 // Initialize the memory descriptor
1638 if (options & kIOMemoryAsReference) {
1639#ifndef __LP64__
1640 _rangesIsAllocated = false;
b0d623f7 1641#endif /* !__LP64__ */
0a7de745
A
1642
1643 // Hack assignment to get the buffer arg into _ranges.
1644 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
1645 // work, C++ sigh.
1646 // This also initialises the uio & physical ranges.
1647 _ranges.v = (IOVirtualRange *) buffers;
b0d623f7 1648 } else {
0a7de745
A
1649#ifndef __LP64__
1650 _rangesIsAllocated = true;
1651#endif /* !__LP64__ */
1652 switch (type) {
1653 case kIOMemoryTypeUIO:
1654 _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
1655 break;
1656
1657#ifndef __LP64__
1658 case kIOMemoryTypeVirtual64:
1659 case kIOMemoryTypePhysical64:
1660 if (count == 1
1661#ifndef __arm__
1662 && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
1663#endif
1664 ) {
1665 if (kIOMemoryTypeVirtual64 == type) {
1666 type = kIOMemoryTypeVirtual;
1667 } else {
1668 type = kIOMemoryTypePhysical;
1669 }
1670 _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
1671 _rangesIsAllocated = false;
1672 _ranges.v = &_singleRange.v;
1673 _singleRange.v.address = ((IOAddressRange *) buffers)->address;
1674 _singleRange.v.length = ((IOAddressRange *) buffers)->length;
1675 break;
1676 }
1677 _ranges.v64 = IONew(IOAddressRange, count);
1678 if (!_ranges.v64) {
1679 return false;
1680 }
1681 bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
1682 break;
1683#endif /* !__LP64__ */
1684 case kIOMemoryTypeVirtual:
1685 case kIOMemoryTypePhysical:
1686 if (count == 1) {
1687 _flags |= kIOMemoryAsReference;
1688#ifndef __LP64__
1689 _rangesIsAllocated = false;
1690#endif /* !__LP64__ */
1691 _ranges.v = &_singleRange.v;
1692 } else {
1693 _ranges.v = IONew(IOVirtualRange, count);
1694 if (!_ranges.v) {
1695 return false;
1696 }
1697 }
1698 bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
1699 break;
1700 }
b0d623f7 1701 }
0a7de745
A
1702 _rangesCount = count;
1703
1704 // Find starting address within the vector of ranges
1705 Ranges vec = _ranges;
1706 mach_vm_size_t totalLength = 0;
1707 unsigned int ind, pages = 0;
1708 for (ind = 0; ind < count; ind++) {
1709 mach_vm_address_t addr;
1710 mach_vm_address_t endAddr;
1711 mach_vm_size_t len;
1712
1713 // addr & len are returned by this function
1714 getAddrLenForInd(addr, len, type, vec, ind);
1715 if (os_add3_overflow(addr, len, PAGE_MASK, &endAddr)) {
1716 break;
1717 }
1718 if (os_add_overflow(pages, (atop_64(endAddr) - atop_64(addr)), &pages)) {
1719 break;
1720 }
1721 if (os_add_overflow(totalLength, len, &totalLength)) {
1722 break;
1723 }
1724 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
1725 ppnum_t highPage = atop_64(addr + len - 1);
1726 if (highPage > _highestPage) {
1727 _highestPage = highPage;
1728 }
1729 }
1730 }
1731 if ((ind < count)
1732 || (totalLength != ((IOByteCount) totalLength))) {
1733 return false; /* overflow */
1734 }
1735 _length = totalLength;
1736 _pages = pages;
1737
1738 // Auto-prepare memory at creation time.
1739 // Implied completion when descriptor is free-ed
1740
1741
1742 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
1743 _wireCount++; // Physical MDs are, by definition, wired
1744 } else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
1745 ioGMDData *dataP;
1746 unsigned dataSize;
1747
1748 if (_pages > atop_64(max_mem)) {
1749 return false;
1750 }
1751
1752 dataSize = computeDataSize(_pages, /* upls */ count * 2);
1753 if (!initMemoryEntries(dataSize, mapper)) {
1754 return false;
1755 }
1756 dataP = getDataP(_memoryEntries);
1757 dataP->fPageCnt = _pages;
1758
1759 if (((_task != kernel_task) || (kIOMemoryBufferPageable & _flags))
1760 && (VM_KERN_MEMORY_NONE == _kernelTag)) {
1761 _kernelTag = IOMemoryTag(kernel_map);
1762 if (_kernelTag == gIOSurfaceTag) {
1763 _userTag = VM_MEMORY_IOSURFACE;
1764 }
1765 }
1766
1767 if ((kIOMemoryPersistent & _flags) && !_memRef) {
1768 IOReturn
1769 err = memoryReferenceCreate(0, &_memRef);
1770 if (kIOReturnSuccess != err) {
1771 return false;
1772 }
1773 }
1774
1775 if ((_flags & kIOMemoryAutoPrepare)
1776 && prepare() != kIOReturnSuccess) {
1777 return false;
1778 }
1779 }
1780 }
91447636 1781
0a7de745 1782 return true;
de355530
A
1783}
1784
1c79356b
A
1785/*
1786 * free
1787 *
1788 * Free resources.
1789 */
0a7de745
A
1790void
1791IOGeneralMemoryDescriptor::free()
1c79356b 1792{
0a7de745 1793 IOOptionBits type = _flags & kIOMemoryTypeMask;
2d21ac55 1794
0a7de745
A
1795 if (reserved) {
1796 LOCK;
cb323159 1797 reserved->dp.memory = NULL;
0a7de745
A
1798 UNLOCK;
1799 }
1800 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
1801 ioGMDData * dataP;
1802 if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBaseValid) {
1803 dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
1804 dataP->fMappedBaseValid = dataP->fMappedBase = 0;
1805 }
1806 } else {
1807 while (_wireCount) {
1808 complete();
1809 }
bd504ef0 1810 }
bd504ef0 1811
0a7de745
A
1812 if (_memoryEntries) {
1813 _memoryEntries->release();
1814 }
55e303ae 1815
0a7de745
A
1816 if (_ranges.v && !(kIOMemoryAsReference & _flags)) {
1817 if (kIOMemoryTypeUIO == type) {
1818 uio_free((uio_t) _ranges.v);
1819 }
b0d623f7 1820#ifndef __LP64__
0a7de745
A
1821 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
1822 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
1823 }
b0d623f7 1824#endif /* !__LP64__ */
0a7de745
A
1825 else {
1826 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
1827 }
1828
1829 _ranges.v = NULL;
1830 }
1831
1832 if (reserved) {
cb323159 1833 cleanKernelReserved(reserved);
0a7de745
A
1834 if (reserved->dp.devicePager) {
1835 // memEntry holds a ref on the device pager which owns reserved
1836 // (IOMemoryDescriptorReserved) so no reserved access after this point
1837 device_pager_deallocate((memory_object_t) reserved->dp.devicePager );
1838 } else {
1839 IODelete(reserved, IOMemoryDescriptorReserved, 1);
1840 }
1841 reserved = NULL;
1842 }
1843
1844 if (_memRef) {
1845 memoryReferenceRelease(_memRef);
1846 }
1847 if (_prepareLock) {
1848 IOLockFree(_prepareLock);
1849 }
1850
1851 super::free();
1c79356b
A
1852}
1853
b0d623f7 1854#ifndef __LP64__
0a7de745
A
1855void
1856IOGeneralMemoryDescriptor::unmapFromKernel()
b0d623f7 1857{
0a7de745 1858 panic("IOGMD::unmapFromKernel deprecated");
b0d623f7
A
1859}
1860
0a7de745
A
1861void
1862IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
b0d623f7 1863{
0a7de745 1864 panic("IOGMD::mapIntoKernel deprecated");
b0d623f7
A
1865}
1866#endif /* !__LP64__ */
1c79356b
A
1867
1868/*
1869 * getDirection:
1870 *
1871 * Get the direction of the transfer.
1872 */
0a7de745
A
1873IODirection
1874IOMemoryDescriptor::getDirection() const
1c79356b 1875{
b0d623f7 1876#ifndef __LP64__
0a7de745
A
1877 if (_direction) {
1878 return _direction;
1879 }
b0d623f7 1880#endif /* !__LP64__ */
0a7de745 1881 return (IODirection) (_flags & kIOMemoryDirectionMask);
1c79356b
A
1882}
1883
1884/*
1885 * getLength:
1886 *
1887 * Get the length of the transfer (over all ranges).
1888 */
0a7de745
A
1889IOByteCount
1890IOMemoryDescriptor::getLength() const
1c79356b 1891{
0a7de745 1892 return _length;
1c79356b
A
1893}
1894
0a7de745
A
1895void
1896IOMemoryDescriptor::setTag( IOOptionBits tag )
1c79356b 1897{
0a7de745 1898 _tag = tag;
1c79356b
A
1899}
1900
0a7de745
A
1901IOOptionBits
1902IOMemoryDescriptor::getTag( void )
1c79356b 1903{
0a7de745 1904 return _tag;
1c79356b
A
1905}
1906
0a7de745
A
1907uint64_t
1908IOMemoryDescriptor::getFlags(void)
5ba3f43e 1909{
0a7de745 1910 return _flags;
5ba3f43e
A
1911}
1912
b0d623f7 1913#ifndef __LP64__
39037602
A
1914#pragma clang diagnostic push
1915#pragma clang diagnostic ignored "-Wdeprecated-declarations"
1916
55e303ae 1917// @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
0c530ab8
A
1918IOPhysicalAddress
1919IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
0b4e3aa0 1920{
0a7de745 1921 addr64_t physAddr = 0;
1c79356b 1922
0a7de745
A
1923 if (prepare() == kIOReturnSuccess) {
1924 physAddr = getPhysicalSegment64( offset, length );
1925 complete();
1926 }
0b4e3aa0 1927
0a7de745 1928 return (IOPhysicalAddress) physAddr; // truncated but only page offset is used
0b4e3aa0 1929}
39037602
A
1930
1931#pragma clang diagnostic pop
1932
b0d623f7 1933#endif /* !__LP64__ */
0b4e3aa0 1934
0a7de745
A
1935IOByteCount
1936IOMemoryDescriptor::readBytes
1937(IOByteCount offset, void *bytes, IOByteCount length)
1c79356b 1938{
0a7de745
A
1939 addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
1940 IOByteCount remaining;
1941
1942 // Assert that this entire I/O is withing the available range
1943 assert(offset <= _length);
1944 assert(offset + length <= _length);
1945 if ((offset >= _length)
1946 || ((offset + length) > _length)) {
1947 return 0;
1948 }
1c79356b 1949
0a7de745
A
1950 assert(!(kIOMemoryRemote & _flags));
1951 if (kIOMemoryRemote & _flags) {
1952 return 0;
1953 }
5ba3f43e 1954
0a7de745
A
1955 if (kIOMemoryThreadSafe & _flags) {
1956 LOCK;
1957 }
b0d623f7 1958
0a7de745
A
1959 remaining = length = min(length, _length - offset);
1960 while (remaining) { // (process another target segment?)
1961 addr64_t srcAddr64;
1962 IOByteCount srcLen;
1c79356b 1963
0a7de745
A
1964 srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
1965 if (!srcAddr64) {
1966 break;
1967 }
1c79356b 1968
0a7de745
A
1969 // Clip segment length to remaining
1970 if (srcLen > remaining) {
1971 srcLen = remaining;
1972 }
1c79356b 1973
0a7de745
A
1974 copypv(srcAddr64, dstAddr, srcLen,
1975 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
1c79356b 1976
0a7de745
A
1977 dstAddr += srcLen;
1978 offset += srcLen;
1979 remaining -= srcLen;
1980 }
1c79356b 1981
0a7de745
A
1982 if (kIOMemoryThreadSafe & _flags) {
1983 UNLOCK;
1984 }
b0d623f7 1985
0a7de745 1986 assert(!remaining);
1c79356b 1987
0a7de745 1988 return length - remaining;
55e303ae 1989}
0b4e3aa0 1990
0a7de745
A
1991IOByteCount
1992IOMemoryDescriptor::writeBytes
1993(IOByteCount inoffset, const void *bytes, IOByteCount length)
55e303ae 1994{
0a7de745
A
1995 addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
1996 IOByteCount remaining;
1997 IOByteCount offset = inoffset;
0b4e3aa0 1998
0a7de745
A
1999 // Assert that this entire I/O is withing the available range
2000 assert(offset <= _length);
2001 assert(offset + length <= _length);
0b4e3aa0 2002
0a7de745 2003 assert( !(kIOMemoryPreparedReadOnly & _flags));
0b4e3aa0 2004
0a7de745
A
2005 if ((kIOMemoryPreparedReadOnly & _flags)
2006 || (offset >= _length)
2007 || ((offset + length) > _length)) {
2008 return 0;
2009 }
0b4e3aa0 2010
0a7de745
A
2011 assert(!(kIOMemoryRemote & _flags));
2012 if (kIOMemoryRemote & _flags) {
2013 return 0;
2014 }
5ba3f43e 2015
0a7de745
A
2016 if (kIOMemoryThreadSafe & _flags) {
2017 LOCK;
2018 }
b0d623f7 2019
0a7de745
A
2020 remaining = length = min(length, _length - offset);
2021 while (remaining) { // (process another target segment?)
2022 addr64_t dstAddr64;
2023 IOByteCount dstLen;
0b4e3aa0 2024
0a7de745
A
2025 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
2026 if (!dstAddr64) {
2027 break;
2028 }
0b4e3aa0 2029
0a7de745
A
2030 // Clip segment length to remaining
2031 if (dstLen > remaining) {
2032 dstLen = remaining;
2033 }
0b4e3aa0 2034
0a7de745
A
2035 if (!srcAddr) {
2036 bzero_phys(dstAddr64, dstLen);
2037 } else {
2038 copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
2039 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
2040 srcAddr += dstLen;
2041 }
2042 offset += dstLen;
2043 remaining -= dstLen;
fe8ab488 2044 }
1c79356b 2045
0a7de745
A
2046 if (kIOMemoryThreadSafe & _flags) {
2047 UNLOCK;
2048 }
b0d623f7 2049
0a7de745 2050 assert(!remaining);
55e303ae 2051
d9a64523 2052#if defined(__x86_64__)
0a7de745 2053 // copypv does not cppvFsnk on intel
d9a64523 2054#else
0a7de745
A
2055 if (!srcAddr) {
2056 performOperation(kIOMemoryIncoherentIOFlush, inoffset, length);
2057 }
d9a64523 2058#endif
fe8ab488 2059
0a7de745 2060 return length - remaining;
1c79356b
A
2061}
2062
b0d623f7 2063#ifndef __LP64__
0a7de745
A
2064void
2065IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
b0d623f7 2066{
0a7de745 2067 panic("IOGMD::setPosition deprecated");
b0d623f7
A
2068}
2069#endif /* !__LP64__ */
2070
2071static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
2072
2073uint64_t
2074IOGeneralMemoryDescriptor::getPreparationID( void )
2075{
0a7de745
A
2076 ioGMDData *dataP;
2077
2078 if (!_wireCount) {
2079 return kIOPreparationIDUnprepared;
2080 }
2081
2082 if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
2083 || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64)) {
2084 IOMemoryDescriptor::setPreparationID();
2085 return IOMemoryDescriptor::getPreparationID();
2086 }
2087
2088 if (!_memoryEntries || !(dataP = getDataP(_memoryEntries))) {
2089 return kIOPreparationIDUnprepared;
2090 }
2091
2092 if (kIOPreparationIDUnprepared == dataP->fPreparationID) {
cb323159
A
2093 SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2094 OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &dataP->fPreparationID);
0a7de745
A
2095 }
2096 return dataP->fPreparationID;
b0d623f7
A
2097}
2098
cb323159
A
2099void
2100IOMemoryDescriptor::cleanKernelReserved( IOMemoryDescriptorReserved * reserved )
2101{
2102 if (reserved->creator) {
2103 task_deallocate(reserved->creator);
2104 reserved->creator = NULL;
2105 }
2106}
2107
0a7de745
A
2108IOMemoryDescriptorReserved *
2109IOMemoryDescriptor::getKernelReserved( void )
b0d623f7 2110{
0a7de745 2111 if (!reserved) {
cb323159 2112 reserved = IONewZero(IOMemoryDescriptorReserved, 1);
0a7de745
A
2113 }
2114 return reserved;
316670eb
A
2115}
2116
0a7de745
A
2117void
2118IOMemoryDescriptor::setPreparationID( void )
316670eb 2119{
0a7de745 2120 if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID)) {
cb323159
A
2121 SInt64 newID = OSIncrementAtomic64(&gIOMDPreparationID);
2122 OSCompareAndSwap64(kIOPreparationIDUnprepared, newID, &reserved->preparationID);
0a7de745 2123 }
316670eb
A
2124}
2125
0a7de745
A
2126uint64_t
2127IOMemoryDescriptor::getPreparationID( void )
316670eb 2128{
0a7de745
A
2129 if (reserved) {
2130 return reserved->preparationID;
2131 } else {
2132 return kIOPreparationIDUnsupported;
2133 }
b0d623f7 2134}
de355530 2135
0a7de745 2136void
cb323159 2137IOMemoryDescriptor::setVMTags(uint32_t kernelTag, uint32_t userTag)
39037602 2138{
cb323159
A
2139 _kernelTag = (vm_tag_t) kernelTag;
2140 _userTag = (vm_tag_t) userTag;
39037602
A
2141}
2142
cb323159 2143uint32_t
0a7de745 2144IOMemoryDescriptor::getVMTag(vm_map_t map)
39037602 2145{
0a7de745
A
2146 if (vm_kernel_map_is_kernel(map)) {
2147 if (VM_KERN_MEMORY_NONE != _kernelTag) {
cb323159 2148 return (uint32_t) _kernelTag;
0a7de745
A
2149 }
2150 } else {
2151 if (VM_KERN_MEMORY_NONE != _userTag) {
cb323159 2152 return (uint32_t) _userTag;
0a7de745
A
2153 }
2154 }
2155 return IOMemoryTag(map);
39037602
A
2156}
2157
0a7de745
A
2158IOReturn
2159IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
55e303ae 2160{
0a7de745
A
2161 IOReturn err = kIOReturnSuccess;
2162 DMACommandOps params;
2163 IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
2164 ioGMDData *dataP;
99c3a104 2165
0a7de745
A
2166 params = (op & ~kIOMDDMACommandOperationMask & op);
2167 op &= kIOMDDMACommandOperationMask;
99c3a104 2168
0a7de745
A
2169 if (kIOMDDMAMap == op) {
2170 if (dataSize < sizeof(IOMDDMAMapArgs)) {
2171 return kIOReturnUnderrun;
2172 }
99c3a104 2173
0a7de745 2174 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
99c3a104 2175
0a7de745
A
2176 if (!_memoryEntries
2177 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
2178 return kIOReturnNoMemory;
2179 }
99c3a104 2180
0a7de745
A
2181 if (_memoryEntries && data->fMapper) {
2182 bool remap, keepMap;
2183 dataP = getDataP(_memoryEntries);
39236c6e 2184
0a7de745
A
2185 if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) {
2186 dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
2187 }
2188 if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) {
2189 dataP->fDMAMapAlignment = data->fMapSpec.alignment;
2190 }
39236c6e 2191
0a7de745
A
2192 keepMap = (data->fMapper == gIOSystemMapper);
2193 keepMap &= ((data->fOffset == 0) && (data->fLength == _length));
3e170ce0 2194
0a7de745
A
2195 if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
2196 IOLockLock(_prepareLock);
2197 }
a39ff7e2 2198
0a7de745
A
2199 remap = (!keepMap);
2200 remap |= (dataP->fDMAMapNumAddressBits < 64)
2201 && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
2202 remap |= (dataP->fDMAMapAlignment > page_size);
3e170ce0 2203
0a7de745 2204 if (remap || !dataP->fMappedBaseValid) {
5ba3f43e 2205// if (dataP->fMappedBaseValid) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params);
0a7de745
A
2206 err = md->dmaMap(data->fMapper, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
2207 if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBaseValid) {
2208 dataP->fMappedBase = data->fAlloc;
2209 dataP->fMappedBaseValid = true;
2210 dataP->fMappedLength = data->fAllocLength;
2211 data->fAllocLength = 0; // IOMD owns the alloc now
2212 }
2213 } else {
2214 data->fAlloc = dataP->fMappedBase;
2215 data->fAllocLength = 0; // give out IOMD map
2216 md->dmaMapRecord(data->fMapper, data->fCommand, dataP->fMappedLength);
2217 }
2218 data->fMapContig = !dataP->fDiscontig;
2219
2220 if ((data->fMapper == gIOSystemMapper) && _prepareLock) {
2221 IOLockUnlock(_prepareLock);
2222 }
2223 }
2224 return err;
99c3a104 2225 }
0a7de745
A
2226 if (kIOMDDMAUnmap == op) {
2227 if (dataSize < sizeof(IOMDDMAMapArgs)) {
2228 return kIOReturnUnderrun;
2229 }
2230 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
4452a7af 2231
0a7de745 2232 err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
99c3a104 2233
0a7de745 2234 return kIOReturnSuccess;
cc8bc92a 2235 }
0c530ab8 2236
0a7de745
A
2237 if (kIOMDAddDMAMapSpec == op) {
2238 if (dataSize < sizeof(IODMAMapSpecification)) {
2239 return kIOReturnUnderrun;
2240 }
99c3a104 2241
0a7de745 2242 IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
4452a7af 2243
0a7de745
A
2244 if (!_memoryEntries
2245 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
2246 return kIOReturnNoMemory;
2247 }
4452a7af 2248
0a7de745
A
2249 if (_memoryEntries) {
2250 dataP = getDataP(_memoryEntries);
2251 if (data->numAddressBits < dataP->fDMAMapNumAddressBits) {
2252 dataP->fDMAMapNumAddressBits = data->numAddressBits;
2253 }
2254 if (data->alignment > dataP->fDMAMapAlignment) {
2255 dataP->fDMAMapAlignment = data->alignment;
2256 }
2257 }
2258 return kIOReturnSuccess;
0c530ab8 2259 }
4452a7af 2260
0a7de745
A
2261 if (kIOMDGetCharacteristics == op) {
2262 if (dataSize < sizeof(IOMDDMACharacteristics)) {
2263 return kIOReturnUnderrun;
2264 }
4452a7af 2265
0a7de745
A
2266 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
2267 data->fLength = _length;
2268 data->fSGCount = _rangesCount;
2269 data->fPages = _pages;
2270 data->fDirection = getDirection();
2271 if (!_wireCount) {
2272 data->fIsPrepared = false;
2273 } else {
2274 data->fIsPrepared = true;
2275 data->fHighestPage = _highestPage;
2276 if (_memoryEntries) {
2277 dataP = getDataP(_memoryEntries);
2278 ioPLBlock *ioplList = getIOPLList(dataP);
2279 UInt count = getNumIOPL(_memoryEntries, dataP);
2280 if (count == 1) {
2281 data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
2282 }
2283 }
2284 }
4452a7af 2285
0a7de745
A
2286 return kIOReturnSuccess;
2287 } else if (kIOMDDMAActive == op) {
2288 if (params) {
2289 int16_t prior;
2290 prior = OSAddAtomic16(1, &md->_dmaReferences);
2291 if (!prior) {
2292 md->_mapName = NULL;
2293 }
2294 } else {
2295 if (md->_dmaReferences) {
2296 OSAddAtomic16(-1, &md->_dmaReferences);
2297 } else {
2298 panic("_dmaReferences underflow");
2299 }
2300 }
2301 } else if (kIOMDWalkSegments != op) {
2302 return kIOReturnBadArgument;
0c530ab8 2303 }
89b3af67 2304
0a7de745
A
2305 // Get the next segment
2306 struct InternalState {
2307 IOMDDMAWalkSegmentArgs fIO;
cb323159
A
2308 mach_vm_size_t fOffset2Index;
2309 mach_vm_size_t fNextOffset;
0a7de745 2310 UInt fIndex;
0a7de745
A
2311 } *isP;
2312
2313 // Find the next segment
2314 if (dataSize < sizeof(*isP)) {
2315 return kIOReturnUnderrun;
99c3a104 2316 }
4452a7af 2317
0a7de745 2318 isP = (InternalState *) vData;
cb323159 2319 mach_vm_size_t offset = isP->fIO.fOffset;
0a7de745
A
2320 uint8_t mapped = isP->fIO.fMapped;
2321 uint64_t mappedBase;
4452a7af 2322
0a7de745
A
2323 if (mapped && (kIOMemoryRemote & _flags)) {
2324 return kIOReturnNotAttached;
2325 }
4452a7af 2326
0a7de745
A
2327 if (IOMapper::gSystem && mapped
2328 && (!(kIOMemoryHostOnly & _flags))
2329 && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBaseValid)) {
2330// && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBaseValid))
2331 if (!_memoryEntries
2332 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) {
2333 return kIOReturnNoMemory;
2334 }
4452a7af 2335
0a7de745
A
2336 dataP = getDataP(_memoryEntries);
2337 if (dataP->fMapper) {
2338 IODMAMapSpecification mapSpec;
2339 bzero(&mapSpec, sizeof(mapSpec));
2340 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
2341 mapSpec.alignment = dataP->fDMAMapAlignment;
2342 err = md->dmaMap(dataP->fMapper, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength);
2343 if (kIOReturnSuccess != err) {
2344 return err;
2345 }
2346 dataP->fMappedBaseValid = true;
2347 }
2348 }
0c530ab8 2349
0a7de745
A
2350 if (kIOMDDMAWalkMappedLocal == mapped) {
2351 mappedBase = isP->fIO.fMappedBase;
2352 } else if (mapped) {
2353 if (IOMapper::gSystem
2354 && (!(kIOMemoryHostOnly & _flags))
2355 && _memoryEntries
2356 && (dataP = getDataP(_memoryEntries))
2357 && dataP->fMappedBaseValid) {
2358 mappedBase = dataP->fMappedBase;
2359 } else {
2360 mapped = 0;
2361 }
2362 }
0c530ab8 2363
0a7de745
A
2364 if (offset >= _length) {
2365 return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
2366 }
0c530ab8 2367
0a7de745 2368 // Validate the previous offset
cb323159
A
2369 UInt ind;
2370 mach_vm_size_t off2Ind = isP->fOffset2Index;
0a7de745
A
2371 if (!params
2372 && offset
2373 && (offset == isP->fNextOffset || off2Ind <= offset)) {
2374 ind = isP->fIndex;
2375 } else {
2376 ind = off2Ind = 0; // Start from beginning
0c530ab8 2377 }
cb323159 2378 mach_vm_size_t length;
0a7de745
A
2379 UInt64 address;
2380
2381 if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
2382 // Physical address based memory descriptor
2383 const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
2384
2385 // Find the range after the one that contains the offset
2386 mach_vm_size_t len;
2387 for (len = 0; off2Ind <= offset; ind++) {
2388 len = physP[ind].length;
2389 off2Ind += len;
2390 }
0c530ab8 2391
0a7de745
A
2392 // Calculate length within range and starting address
2393 length = off2Ind - offset;
2394 address = physP[ind - 1].address + len - length;
0c530ab8 2395
0a7de745
A
2396 if (true && mapped) {
2397 address = mappedBase + offset;
2398 } else {
2399 // see how far we can coalesce ranges
2400 while (ind < _rangesCount && address + length == physP[ind].address) {
2401 len = physP[ind].length;
2402 length += len;
2403 off2Ind += len;
2404 ind++;
2405 }
2406 }
0c530ab8 2407
0a7de745
A
2408 // correct contiguous check overshoot
2409 ind--;
2410 off2Ind -= len;
0c530ab8 2411 }
0a7de745
A
2412#ifndef __LP64__
2413 else if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
2414 // Physical address based memory descriptor
2415 const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
2416
2417 // Find the range after the one that contains the offset
2418 mach_vm_size_t len;
2419 for (len = 0; off2Ind <= offset; ind++) {
2420 len = physP[ind].length;
2421 off2Ind += len;
2422 }
4452a7af 2423
0a7de745
A
2424 // Calculate length within range and starting address
2425 length = off2Ind - offset;
2426 address = physP[ind - 1].address + len - length;
0c530ab8 2427
0a7de745
A
2428 if (true && mapped) {
2429 address = mappedBase + offset;
2430 } else {
2431 // see how far we can coalesce ranges
2432 while (ind < _rangesCount && address + length == physP[ind].address) {
2433 len = physP[ind].length;
2434 length += len;
2435 off2Ind += len;
2436 ind++;
2437 }
2438 }
2439 // correct contiguous check overshoot
2440 ind--;
2441 off2Ind -= len;
6d2010ae 2442 }
0a7de745
A
2443#endif /* !__LP64__ */
2444 else {
2445 do {
2446 if (!_wireCount) {
2447 panic("IOGMD: not wired for the IODMACommand");
2448 }
2449
2450 assert(_memoryEntries);
2451
2452 dataP = getDataP(_memoryEntries);
2453 const ioPLBlock *ioplList = getIOPLList(dataP);
2454 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
2455 upl_page_info_t *pageList = getPageList(dataP);
2456
2457 assert(numIOPLs > 0);
2458
2459 // Scan through iopl info blocks looking for block containing offset
2460 while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset) {
2461 ind++;
2462 }
2463
2464 // Go back to actual range as search goes past it
2465 ioPLBlock ioplInfo = ioplList[ind - 1];
2466 off2Ind = ioplInfo.fIOMDOffset;
2467
2468 if (ind < numIOPLs) {
2469 length = ioplList[ind].fIOMDOffset;
2470 } else {
2471 length = _length;
2472 }
2473 length -= offset; // Remainder within iopl
2474
2475 // Subtract offset till this iopl in total list
2476 offset -= off2Ind;
2477
2478 // If a mapped address is requested and this is a pre-mapped IOPL
2479 // then just need to compute an offset relative to the mapped base.
2480 if (mapped) {
2481 offset += (ioplInfo.fPageOffset & PAGE_MASK);
2482 address = trunc_page_64(mappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
2483 continue; // Done leave do/while(false) now
2484 }
2485
2486 // The offset is rebased into the current iopl.
2487 // Now add the iopl 1st page offset.
2488 offset += ioplInfo.fPageOffset;
2489
2490 // For external UPLs the fPageInfo field points directly to
2491 // the upl's upl_page_info_t array.
2492 if (ioplInfo.fFlags & kIOPLExternUPL) {
2493 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
2494 } else {
2495 pageList = &pageList[ioplInfo.fPageInfo];
2496 }
2497
2498 // Check for direct device non-paged memory
2499 if (ioplInfo.fFlags & kIOPLOnDevice) {
2500 address = ptoa_64(pageList->phys_addr) + offset;
2501 continue; // Done leave do/while(false) now
2502 }
2503
2504 // Now we need compute the index into the pageList
2505 UInt pageInd = atop_32(offset);
2506 offset &= PAGE_MASK;
2507
2508 // Compute the starting address of this segment
2509 IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
2510 if (!pageAddr) {
2511 panic("!pageList phys_addr");
2512 }
2513
2514 address = ptoa_64(pageAddr) + offset;
2515
2516 // length is currently set to the length of the remainider of the iopl.
2517 // We need to check that the remainder of the iopl is contiguous.
2518 // This is indicated by pageList[ind].phys_addr being sequential.
2519 IOByteCount contigLength = PAGE_SIZE - offset;
2520 while (contigLength < length
2521 && ++pageAddr == pageList[++pageInd].phys_addr) {
2522 contigLength += PAGE_SIZE;
2523 }
2524
2525 if (contigLength < length) {
2526 length = contigLength;
2527 }
2528
2529
2530 assert(address);
2531 assert(length);
2532 } while (false);
0c530ab8
A
2533 }
2534
0a7de745
A
2535 // Update return values and state
2536 isP->fIO.fIOVMAddr = address;
2537 isP->fIO.fLength = length;
2538 isP->fIndex = ind;
2539 isP->fOffset2Index = off2Ind;
2540 isP->fNextOffset = isP->fIO.fOffset + length;
0c530ab8 2541
0a7de745 2542 return kIOReturnSuccess;
0c530ab8
A
2543}
2544
2545addr64_t
b0d623f7 2546IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
0c530ab8 2547{
0a7de745
A
2548 IOReturn ret;
2549 mach_vm_address_t address = 0;
2550 mach_vm_size_t length = 0;
2551 IOMapper * mapper = gIOSystemMapper;
2552 IOOptionBits type = _flags & kIOMemoryTypeMask;
2553
2554 if (lengthOfSegment) {
2555 *lengthOfSegment = 0;
2556 }
b0d623f7 2557
0a7de745
A
2558 if (offset >= _length) {
2559 return 0;
2560 }
b0d623f7 2561
0a7de745
A
2562 // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
2563 // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
2564 // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
2565 // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
2566
2567 if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type)) {
2568 unsigned rangesIndex = 0;
2569 Ranges vec = _ranges;
2570 mach_vm_address_t addr;
2571
2572 // Find starting address within the vector of ranges
2573 for (;;) {
2574 getAddrLenForInd(addr, length, type, vec, rangesIndex);
2575 if (offset < length) {
2576 break;
2577 }
2578 offset -= length; // (make offset relative)
2579 rangesIndex++;
2580 }
b0d623f7 2581
0a7de745
A
2582 // Now that we have the starting range,
2583 // lets find the last contiguous range
2584 addr += offset;
2585 length -= offset;
b0d623f7 2586
0a7de745
A
2587 for (++rangesIndex; rangesIndex < _rangesCount; rangesIndex++) {
2588 mach_vm_address_t newAddr;
2589 mach_vm_size_t newLen;
2590
2591 getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
2592 if (addr + length != newAddr) {
2593 break;
2594 }
2595 length += newLen;
2596 }
2597 if (addr) {
2598 address = (IOPhysicalAddress) addr; // Truncate address to 32bit
2599 }
2600 } else {
2601 IOMDDMAWalkSegmentState _state;
2602 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
b0d623f7 2603
0a7de745
A
2604 state->fOffset = offset;
2605 state->fLength = _length - offset;
2606 state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOrRemote);
b0d623f7 2607
0a7de745 2608 ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
b0d623f7 2609
0a7de745
A
2610 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret)) {
2611 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
2612 ret, this, state->fOffset,
2613 state->fIOVMAddr, state->fLength);
2614 }
2615 if (kIOReturnSuccess == ret) {
2616 address = state->fIOVMAddr;
2617 length = state->fLength;
2618 }
2619
2620 // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
2621 // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
2622
2623 if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))) {
2624 if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone)) {
2625 addr64_t origAddr = address;
2626 IOByteCount origLen = length;
2627
2628 address = mapper->mapToPhysicalAddress(origAddr);
2629 length = page_size - (address & (page_size - 1));
2630 while ((length < origLen)
2631 && ((address + length) == mapper->mapToPhysicalAddress(origAddr + length))) {
2632 length += page_size;
2633 }
2634 if (length > origLen) {
2635 length = origLen;
2636 }
2637 }
2638 }
b0d623f7 2639 }
4452a7af 2640
0a7de745
A
2641 if (!address) {
2642 length = 0;
2643 }
b0d623f7 2644
0a7de745
A
2645 if (lengthOfSegment) {
2646 *lengthOfSegment = length;
2647 }
4452a7af 2648
0a7de745 2649 return address;
0c530ab8
A
2650}
2651
b0d623f7 2652#ifndef __LP64__
39037602
A
2653#pragma clang diagnostic push
2654#pragma clang diagnostic ignored "-Wdeprecated-declarations"
2655
b0d623f7
A
2656addr64_t
2657IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
0c530ab8 2658{
0a7de745
A
2659 addr64_t address = 0;
2660
2661 if (options & _kIOMemorySourceSegment) {
2662 address = getSourceSegment(offset, lengthOfSegment);
2663 } else if (options & kIOMemoryMapperNone) {
2664 address = getPhysicalSegment64(offset, lengthOfSegment);
2665 } else {
2666 address = getPhysicalSegment(offset, lengthOfSegment);
2667 }
2668
2669 return address;
b0d623f7 2670}
39037602 2671#pragma clang diagnostic pop
0c530ab8 2672
b0d623f7
A
2673addr64_t
2674IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
2675{
0a7de745 2676 return getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone);
b0d623f7 2677}
0c530ab8 2678
b0d623f7
A
2679IOPhysicalAddress
2680IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
2681{
0a7de745
A
2682 addr64_t address = 0;
2683 IOByteCount length = 0;
0c530ab8 2684
0a7de745 2685 address = getPhysicalSegment(offset, lengthOfSegment, 0);
b0d623f7 2686
0a7de745
A
2687 if (lengthOfSegment) {
2688 length = *lengthOfSegment;
2689 }
0c530ab8 2690
0a7de745
A
2691 if ((address + length) > 0x100000000ULL) {
2692 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
b0d623f7 2693 address, (long) length, (getMetaClass())->getClassName());
0a7de745 2694 }
0c530ab8 2695
0a7de745 2696 return (IOPhysicalAddress) address;
55e303ae 2697}
de355530 2698
0c530ab8
A
2699addr64_t
2700IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
55e303ae 2701{
0a7de745
A
2702 IOPhysicalAddress phys32;
2703 IOByteCount length;
2704 addr64_t phys64;
cb323159 2705 IOMapper * mapper = NULL;
0a7de745
A
2706
2707 phys32 = getPhysicalSegment(offset, lengthOfSegment);
2708 if (!phys32) {
2709 return 0;
2710 }
0b4e3aa0 2711
0a7de745
A
2712 if (gIOSystemMapper) {
2713 mapper = gIOSystemMapper;
2714 }
0c530ab8 2715
0a7de745
A
2716 if (mapper) {
2717 IOByteCount origLen;
55e303ae 2718
0a7de745
A
2719 phys64 = mapper->mapToPhysicalAddress(phys32);
2720 origLen = *lengthOfSegment;
2721 length = page_size - (phys64 & (page_size - 1));
2722 while ((length < origLen)
2723 && ((phys64 + length) == mapper->mapToPhysicalAddress(phys32 + length))) {
2724 length += page_size;
2725 }
2726 if (length > origLen) {
2727 length = origLen;
2728 }
55e303ae 2729
0a7de745
A
2730 *lengthOfSegment = length;
2731 } else {
2732 phys64 = (addr64_t) phys32;
2733 }
1c79356b 2734
0a7de745 2735 return phys64;
0b4e3aa0
A
2736}
2737
0c530ab8 2738IOPhysicalAddress
b0d623f7 2739IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1c79356b 2740{
0a7de745 2741 return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0);
0b4e3aa0
A
2742}
2743
b0d623f7
A
2744IOPhysicalAddress
2745IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
2746{
0a7de745 2747 return (IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment);
b0d623f7 2748}
1c79356b 2749
39037602
A
2750#pragma clang diagnostic push
2751#pragma clang diagnostic ignored "-Wdeprecated-declarations"
2752
0a7de745
A
2753void *
2754IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
2755 IOByteCount * lengthOfSegment)
b0d623f7 2756{
0a7de745
A
2757 if (_task == kernel_task) {
2758 return (void *) getSourceSegment(offset, lengthOfSegment);
2759 } else {
2760 panic("IOGMD::getVirtualSegment deprecated");
2761 }
91447636 2762
cb323159 2763 return NULL;
b0d623f7 2764}
39037602 2765#pragma clang diagnostic pop
b0d623f7 2766#endif /* !__LP64__ */
91447636 2767
0a7de745 2768IOReturn
0c530ab8
A
2769IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
2770{
0a7de745
A
2771 IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
2772 DMACommandOps params;
2773 IOReturn err;
2774
2775 params = (op & ~kIOMDDMACommandOperationMask & op);
2776 op &= kIOMDDMACommandOperationMask;
2777
2778 if (kIOMDGetCharacteristics == op) {
2779 if (dataSize < sizeof(IOMDDMACharacteristics)) {
2780 return kIOReturnUnderrun;
2781 }
2782
2783 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
2784 data->fLength = getLength();
2785 data->fSGCount = 0;
2786 data->fDirection = getDirection();
2787 data->fIsPrepared = true; // Assume prepared - fails safe
2788 } else if (kIOMDWalkSegments == op) {
2789 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs)) {
2790 return kIOReturnUnderrun;
2791 }
2792
2793 IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
2794 IOByteCount offset = (IOByteCount) data->fOffset;
2795
2796 IOPhysicalLength length;
2797 if (data->fMapped && IOMapper::gSystem) {
2798 data->fIOVMAddr = md->getPhysicalSegment(offset, &length);
2799 } else {
2800 data->fIOVMAddr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
2801 }
2802 data->fLength = length;
2803 } else if (kIOMDAddDMAMapSpec == op) {
2804 return kIOReturnUnsupported;
2805 } else if (kIOMDDMAMap == op) {
2806 if (dataSize < sizeof(IOMDDMAMapArgs)) {
2807 return kIOReturnUnderrun;
2808 }
2809 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2810
2811 if (params) {
2812 panic("class %s does not support IODMACommand::kIterateOnly", getMetaClass()->getClassName());
2813 }
2814
2815 data->fMapContig = true;
2816 err = md->dmaMap(data->fMapper, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
2817
2818 return err;
2819 } else if (kIOMDDMAUnmap == op) {
2820 if (dataSize < sizeof(IOMDDMAMapArgs)) {
2821 return kIOReturnUnderrun;
2822 }
2823 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2824
2825 err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
2826
2827 return kIOReturnSuccess;
2828 } else {
2829 return kIOReturnBadArgument;
2830 }
2831
2832 return kIOReturnSuccess;
0c530ab8
A
2833}
2834
0a7de745 2835IOReturn
b0d623f7 2836IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
0a7de745 2837 IOOptionBits * oldState )
b0d623f7 2838{
0a7de745 2839 IOReturn err = kIOReturnSuccess;
b0d623f7 2840
0a7de745
A
2841 vm_purgable_t control;
2842 int state;
b0d623f7 2843
0a7de745
A
2844 assert(!(kIOMemoryRemote & _flags));
2845 if (kIOMemoryRemote & _flags) {
2846 return kIOReturnNotAttached;
2847 }
2848
2849 if (_memRef) {
2850 err = super::setPurgeable(newState, oldState);
2851 } else {
2852 if (kIOMemoryThreadSafe & _flags) {
2853 LOCK;
2854 }
2855 do{
2856 // Find the appropriate vm_map for the given task
2857 vm_map_t curMap;
2858 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) {
2859 err = kIOReturnNotReady;
2860 break;
2861 } else if (!_task) {
2862 err = kIOReturnUnsupported;
2863 break;
2864 } else {
2865 curMap = get_task_map(_task);
2866 if (NULL == curMap) {
2867 err = KERN_INVALID_ARGUMENT;
2868 break;
2869 }
2870 }
2871
2872 // can only do one range
2873 Ranges vec = _ranges;
2874 IOOptionBits type = _flags & kIOMemoryTypeMask;
2875 mach_vm_address_t addr;
2876 mach_vm_size_t len;
2877 getAddrLenForInd(addr, len, type, vec, 0);
2878
2879 err = purgeableControlBits(newState, &control, &state);
2880 if (kIOReturnSuccess != err) {
2881 break;
2882 }
2883 err = vm_map_purgable_control(curMap, addr, control, &state);
2884 if (oldState) {
2885 if (kIOReturnSuccess == err) {
2886 err = purgeableStateBits(&state);
2887 *oldState = state;
2888 }
2889 }
2890 }while (false);
2891 if (kIOMemoryThreadSafe & _flags) {
2892 UNLOCK;
b0d623f7 2893 }
b0d623f7 2894 }
fe8ab488 2895
0a7de745 2896 return err;
b0d623f7
A
2897}
2898
0a7de745
A
2899IOReturn
2900IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
2901 IOOptionBits * oldState )
91447636 2902{
0a7de745 2903 IOReturn err = kIOReturnNotReady;
b0d623f7 2904
0a7de745
A
2905 if (kIOMemoryThreadSafe & _flags) {
2906 LOCK;
2907 }
2908 if (_memRef) {
2909 err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState);
2910 }
2911 if (kIOMemoryThreadSafe & _flags) {
2912 UNLOCK;
2913 }
b0d623f7 2914
0a7de745 2915 return err;
91447636 2916}
0a7de745 2917
cb323159
A
2918IOReturn
2919IOGeneralMemoryDescriptor::setOwnership( task_t newOwner,
2920 int newLedgerTag,
2921 IOOptionBits newLedgerOptions )
2922{
2923 IOReturn err = kIOReturnSuccess;
2924
2925 assert(!(kIOMemoryRemote & _flags));
2926 if (kIOMemoryRemote & _flags) {
2927 return kIOReturnNotAttached;
2928 }
2929
2930 if (iokit_iomd_setownership_enabled == FALSE) {
2931 return kIOReturnUnsupported;
2932 }
2933
2934 if (_memRef) {
2935 err = super::setOwnership(newOwner, newLedgerTag, newLedgerOptions);
2936 } else {
2937 err = kIOReturnUnsupported;
2938 }
2939
2940 return err;
2941}
2942
2943IOReturn
2944IOMemoryDescriptor::setOwnership( task_t newOwner,
2945 int newLedgerTag,
2946 IOOptionBits newLedgerOptions )
2947{
2948 IOReturn err = kIOReturnNotReady;
2949
2950 assert(!(kIOMemoryRemote & _flags));
2951 if (kIOMemoryRemote & _flags) {
2952 return kIOReturnNotAttached;
2953 }
2954
2955 if (iokit_iomd_setownership_enabled == FALSE) {
2956 return kIOReturnUnsupported;
2957 }
2958
2959 if (kIOMemoryThreadSafe & _flags) {
2960 LOCK;
2961 }
2962 if (_memRef) {
2963 err = IOGeneralMemoryDescriptor::memoryReferenceSetOwnership(_memRef, newOwner, newLedgerTag, newLedgerOptions);
2964 } else {
2965 IOMultiMemoryDescriptor * mmd;
2966 IOSubMemoryDescriptor * smd;
2967 if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
2968 err = smd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
2969 } else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
2970 err = mmd->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
2971 }
2972 }
2973 if (kIOMemoryThreadSafe & _flags) {
2974 UNLOCK;
2975 }
2976
2977 return err;
2978}
2979
0a7de745
A
2980IOReturn
2981IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
2982 IOByteCount * dirtyPageCount )
39236c6e 2983{
0a7de745 2984 IOReturn err = kIOReturnNotReady;
39236c6e 2985
0a7de745
A
2986 assert(!(kIOMemoryRemote & _flags));
2987 if (kIOMemoryRemote & _flags) {
2988 return kIOReturnNotAttached;
2989 }
5ba3f43e 2990
0a7de745
A
2991 if (kIOMemoryThreadSafe & _flags) {
2992 LOCK;
3e170ce0 2993 }
0a7de745
A
2994 if (_memRef) {
2995 err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount);
2996 } else {
2997 IOMultiMemoryDescriptor * mmd;
2998 IOSubMemoryDescriptor * smd;
2999 if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this))) {
3000 err = smd->getPageCounts(residentPageCount, dirtyPageCount);
3001 } else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this))) {
3002 err = mmd->getPageCounts(residentPageCount, dirtyPageCount);
3003 }
3004 }
3005 if (kIOMemoryThreadSafe & _flags) {
3006 UNLOCK;
3e170ce0 3007 }
39236c6e 3008
0a7de745 3009 return err;
39236c6e 3010}
0a7de745 3011
39236c6e 3012
5ba3f43e
A
3013#if defined(__arm__) || defined(__arm64__)
3014extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3015extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
3016#else /* defined(__arm__) || defined(__arm64__) */
91447636
A
3017extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
3018extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
5ba3f43e 3019#endif /* defined(__arm__) || defined(__arm64__) */
91447636 3020
0a7de745
A
3021static void
3022SetEncryptOp(addr64_t pa, unsigned int count)
0b4c1975 3023{
0a7de745
A
3024 ppnum_t page, end;
3025
3026 page = atop_64(round_page_64(pa));
3027 end = atop_64(trunc_page_64(pa + count));
3028 for (; page < end; page++) {
3029 pmap_clear_noencrypt(page);
3030 }
0b4c1975
A
3031}
3032
0a7de745
A
3033static void
3034ClearEncryptOp(addr64_t pa, unsigned int count)
0b4c1975 3035{
0a7de745
A
3036 ppnum_t page, end;
3037
3038 page = atop_64(round_page_64(pa));
3039 end = atop_64(trunc_page_64(pa + count));
3040 for (; page < end; page++) {
3041 pmap_set_noencrypt(page);
3042 }
0b4c1975
A
3043}
3044
0a7de745
A
3045IOReturn
3046IOMemoryDescriptor::performOperation( IOOptionBits options,
3047 IOByteCount offset, IOByteCount length )
91447636 3048{
0a7de745
A
3049 IOByteCount remaining;
3050 unsigned int res;
cb323159 3051 void (*func)(addr64_t pa, unsigned int count) = NULL;
5ba3f43e 3052#if defined(__arm__) || defined(__arm64__)
cb323159 3053 void (*func_ext)(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *result) = NULL;
5ba3f43e
A
3054#endif
3055
0a7de745
A
3056 assert(!(kIOMemoryRemote & _flags));
3057 if (kIOMemoryRemote & _flags) {
3058 return kIOReturnNotAttached;
3059 }
91447636 3060
0a7de745
A
3061 switch (options) {
3062 case kIOMemoryIncoherentIOFlush:
5ba3f43e 3063#if defined(__arm__) || defined(__arm64__)
0a7de745 3064 func_ext = &dcache_incoherent_io_flush64;
5ba3f43e 3065#if __ARM_COHERENT_IO__
0a7de745
A
3066 func_ext(0, 0, 0, &res);
3067 return kIOReturnSuccess;
5ba3f43e 3068#else /* __ARM_COHERENT_IO__ */
0a7de745 3069 break;
5ba3f43e
A
3070#endif /* __ARM_COHERENT_IO__ */
3071#else /* defined(__arm__) || defined(__arm64__) */
0a7de745
A
3072 func = &dcache_incoherent_io_flush64;
3073 break;
5ba3f43e 3074#endif /* defined(__arm__) || defined(__arm64__) */
0a7de745 3075 case kIOMemoryIncoherentIOStore:
5ba3f43e 3076#if defined(__arm__) || defined(__arm64__)
0a7de745 3077 func_ext = &dcache_incoherent_io_store64;
5ba3f43e 3078#if __ARM_COHERENT_IO__
0a7de745
A
3079 func_ext(0, 0, 0, &res);
3080 return kIOReturnSuccess;
5ba3f43e 3081#else /* __ARM_COHERENT_IO__ */
0a7de745 3082 break;
5ba3f43e
A
3083#endif /* __ARM_COHERENT_IO__ */
3084#else /* defined(__arm__) || defined(__arm64__) */
0a7de745
A
3085 func = &dcache_incoherent_io_store64;
3086 break;
5ba3f43e 3087#endif /* defined(__arm__) || defined(__arm64__) */
0b4c1975 3088
0a7de745
A
3089 case kIOMemorySetEncrypted:
3090 func = &SetEncryptOp;
3091 break;
3092 case kIOMemoryClearEncrypted:
3093 func = &ClearEncryptOp;
3094 break;
3095 }
91447636 3096
5ba3f43e 3097#if defined(__arm__) || defined(__arm64__)
cb323159 3098 if ((func == NULL) && (func_ext == NULL)) {
0a7de745
A
3099 return kIOReturnUnsupported;
3100 }
5ba3f43e 3101#else /* defined(__arm__) || defined(__arm64__) */
0a7de745
A
3102 if (!func) {
3103 return kIOReturnUnsupported;
3104 }
5ba3f43e 3105#endif /* defined(__arm__) || defined(__arm64__) */
91447636 3106
0a7de745
A
3107 if (kIOMemoryThreadSafe & _flags) {
3108 LOCK;
3109 }
b0d623f7 3110
0a7de745
A
3111 res = 0x0UL;
3112 remaining = length = min(length, getLength() - offset);
3113 while (remaining) {
3114 // (process another target segment?)
3115 addr64_t dstAddr64;
3116 IOByteCount dstLen;
91447636 3117
0a7de745
A
3118 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
3119 if (!dstAddr64) {
3120 break;
3121 }
91447636 3122
0a7de745
A
3123 // Clip segment length to remaining
3124 if (dstLen > remaining) {
3125 dstLen = remaining;
3126 }
91447636 3127
5ba3f43e 3128#if defined(__arm__) || defined(__arm64__)
0a7de745
A
3129 if (func) {
3130 (*func)(dstAddr64, dstLen);
3131 }
3132 if (func_ext) {
3133 (*func_ext)(dstAddr64, dstLen, remaining, &res);
3134 if (res != 0x0UL) {
3135 remaining = 0;
3136 break;
3137 }
3138 }
5ba3f43e 3139#else /* defined(__arm__) || defined(__arm64__) */
0a7de745 3140 (*func)(dstAddr64, dstLen);
5ba3f43e 3141#endif /* defined(__arm__) || defined(__arm64__) */
91447636 3142
0a7de745
A
3143 offset += dstLen;
3144 remaining -= dstLen;
3145 }
91447636 3146
0a7de745
A
3147 if (kIOMemoryThreadSafe & _flags) {
3148 UNLOCK;
3149 }
b0d623f7 3150
0a7de745 3151 return remaining ? kIOReturnUnderrun : kIOReturnSuccess;
91447636
A
3152}
3153
39037602
A
3154/*
3155 *
3156 */
3157
316670eb 3158#if defined(__i386__) || defined(__x86_64__)
3e170ce0 3159
0a7de745
A
3160#define io_kernel_static_start vm_kernel_stext
3161#define io_kernel_static_end vm_kernel_etext
3e170ce0 3162
5ba3f43e
A
3163#elif defined(__arm__) || defined(__arm64__)
3164
0a7de745 3165extern vm_offset_t static_memory_end;
5ba3f43e
A
3166
3167#if defined(__arm64__)
3168#define io_kernel_static_start vm_kext_base
3169#else /* defined(__arm64__) */
3170#define io_kernel_static_start vm_kernel_stext
3171#endif /* defined(__arm64__) */
3172
0a7de745 3173#define io_kernel_static_end static_memory_end
5ba3f43e 3174
316670eb
A
3175#else
3176#error io_kernel_static_end is undefined for this architecture
3177#endif
55e303ae
A
3178
3179static kern_return_t
3180io_get_kernel_static_upl(
0a7de745
A
3181 vm_map_t /* map */,
3182 uintptr_t offset,
3183 upl_size_t *upl_size,
3184 upl_t *upl,
3185 upl_page_info_array_t page_list,
3186 unsigned int *count,
3187 ppnum_t *highest_page)
1c79356b 3188{
0a7de745
A
3189 unsigned int pageCount, page;
3190 ppnum_t phys;
3191 ppnum_t highestPage = 0;
3192
3193 pageCount = atop_32(*upl_size);
3194 if (pageCount > *count) {
3195 pageCount = *count;
3196 }
3197
3198 *upl = NULL;
3199
3200 for (page = 0; page < pageCount; page++) {
3201 phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
3202 if (!phys) {
3203 break;
3204 }
3205 page_list[page].phys_addr = phys;
3206 page_list[page].free_when_done = 0;
3207 page_list[page].absent = 0;
3208 page_list[page].dirty = 0;
3209 page_list[page].precious = 0;
3210 page_list[page].device = 0;
3211 if (phys > highestPage) {
3212 highestPage = phys;
3213 }
3214 }
3215
3216 *highest_page = highestPage;
3217
3218 return (page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError;
55e303ae 3219}
0b4e3aa0 3220
0a7de745
A
3221IOReturn
3222IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
55e303ae 3223{
0a7de745
A
3224 IOOptionBits type = _flags & kIOMemoryTypeMask;
3225 IOReturn error = kIOReturnSuccess;
3226 ioGMDData *dataP;
3227 upl_page_info_array_t pageInfo;
3228 ppnum_t mapBase;
3229 vm_tag_t tag = VM_KERN_MEMORY_NONE;
3230
3231 assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
3232
3233 if ((kIODirectionOutIn & forDirection) == kIODirectionNone) {
3234 forDirection = (IODirection) (forDirection | getDirection());
3235 }
3236
3237 dataP = getDataP(_memoryEntries);
3238 upl_control_flags_t uplFlags; // This Mem Desc's default flags for upl creation
3239 switch (kIODirectionOutIn & forDirection) {
5ba3f43e 3240 case kIODirectionOut:
0a7de745
A
3241 // Pages do not need to be marked as dirty on commit
3242 uplFlags = UPL_COPYOUT_FROM;
3243 dataP->fDMAAccess = kIODMAMapReadAccess;
3244 break;
55e303ae 3245
5ba3f43e 3246 case kIODirectionIn:
0a7de745
A
3247 dataP->fDMAAccess = kIODMAMapWriteAccess;
3248 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
3249 break;
39037602 3250
5ba3f43e 3251 default:
0a7de745
A
3252 dataP->fDMAAccess = kIODMAMapReadAccess | kIODMAMapWriteAccess;
3253 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
3254 break;
3255 }
3256
3257 if (_wireCount) {
3258 if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags)) {
3259 OSReportWithBacktrace("IOMemoryDescriptor 0x%lx prepared read only", VM_KERNEL_ADDRPERM(this));
3260 error = kIOReturnNotWritable;
3261 }
3262 } else {
3263 IOMapper *mapper;
3264
3265 mapper = dataP->fMapper;
3266 dataP->fMappedBaseValid = dataP->fMappedBase = 0;
3267
3268 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
3269 tag = _kernelTag;
3270 if (VM_KERN_MEMORY_NONE == tag) {
3271 tag = IOMemoryTag(kernel_map);
3272 }
3273
3274 if (kIODirectionPrepareToPhys32 & forDirection) {
3275 if (!mapper) {
3276 uplFlags |= UPL_NEED_32BIT_ADDR;
3277 }
3278 if (dataP->fDMAMapNumAddressBits > 32) {
3279 dataP->fDMAMapNumAddressBits = 32;
3280 }
3281 }
3282 if (kIODirectionPrepareNoFault & forDirection) {
3283 uplFlags |= UPL_REQUEST_NO_FAULT;
3284 }
3285 if (kIODirectionPrepareNoZeroFill & forDirection) {
3286 uplFlags |= UPL_NOZEROFILLIO;
3287 }
3288 if (kIODirectionPrepareNonCoherent & forDirection) {
3289 uplFlags |= UPL_REQUEST_FORCE_COHERENCY;
3290 }
3291
3292 mapBase = 0;
3293
3294 // Note that appendBytes(NULL) zeros the data up to the desired length
3295 // and the length parameter is an unsigned int
3296 size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t);
3297 if (uplPageSize > ((unsigned int)uplPageSize)) {
3298 return kIOReturnNoMemory;
3299 }
cb323159 3300 if (!_memoryEntries->appendBytes(NULL, uplPageSize)) {
0a7de745
A
3301 return kIOReturnNoMemory;
3302 }
cb323159 3303 dataP = NULL;
0a7de745
A
3304
3305 // Find the appropriate vm_map for the given task
3306 vm_map_t curMap;
cb323159
A
3307 if ((NULL != _memRef) || ((_task == kernel_task && (kIOMemoryBufferPageable & _flags)))) {
3308 curMap = NULL;
0a7de745
A
3309 } else {
3310 curMap = get_task_map(_task);
3311 }
3312
3313 // Iterate over the vector of virtual ranges
3314 Ranges vec = _ranges;
3315 unsigned int pageIndex = 0;
3316 IOByteCount mdOffset = 0;
3317 ppnum_t highestPage = 0;
3318
cb323159 3319 IOMemoryEntry * memRefEntry = NULL;
0a7de745
A
3320 if (_memRef) {
3321 memRefEntry = &_memRef->entries[0];
3322 }
3323
3324 for (UInt range = 0; range < _rangesCount; range++) {
3325 ioPLBlock iopl;
3326 mach_vm_address_t startPage, startPageOffset;
3327 mach_vm_size_t numBytes;
3328 ppnum_t highPage = 0;
3329
3330 // Get the startPage address and length of vec[range]
3331 getAddrLenForInd(startPage, numBytes, type, vec, range);
3332 startPageOffset = startPage & PAGE_MASK;
3333 iopl.fPageOffset = startPageOffset;
3334 numBytes += startPageOffset;
3335 startPage = trunc_page_64(startPage);
3336
3337 if (mapper) {
3338 iopl.fMappedPage = mapBase + pageIndex;
3339 } else {
3340 iopl.fMappedPage = 0;
3341 }
3342
3343 // Iterate over the current range, creating UPLs
3344 while (numBytes) {
3345 vm_address_t kernelStart = (vm_address_t) startPage;
3346 vm_map_t theMap;
3347 if (curMap) {
3348 theMap = curMap;
3349 } else if (_memRef) {
3350 theMap = NULL;
3351 } else {
3352 assert(_task == kernel_task);
3353 theMap = IOPageableMapForAddress(kernelStart);
3354 }
3355
3356 // ioplFlags is an in/out parameter
3357 upl_control_flags_t ioplFlags = uplFlags;
3358 dataP = getDataP(_memoryEntries);
3359 pageInfo = getPageList(dataP);
3360 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
3361
3362 mach_vm_size_t _ioplSize = round_page(numBytes);
3363 upl_size_t ioplSize = (_ioplSize <= MAX_UPL_SIZE_BYTES) ? _ioplSize : MAX_UPL_SIZE_BYTES;
3364 unsigned int numPageInfo = atop_32(ioplSize);
3365
3366 if ((theMap == kernel_map)
3367 && (kernelStart >= io_kernel_static_start)
3368 && (kernelStart < io_kernel_static_end)) {
3369 error = io_get_kernel_static_upl(theMap,
3370 kernelStart,
3371 &ioplSize,
3372 &iopl.fIOPL,
3373 baseInfo,
3374 &numPageInfo,
3375 &highPage);
3376 } else if (_memRef) {
3377 memory_object_offset_t entryOffset;
3378
3379 entryOffset = mdOffset;
3380 entryOffset = (entryOffset - iopl.fPageOffset - memRefEntry->offset);
3381 if (entryOffset >= memRefEntry->size) {
3382 memRefEntry++;
3383 if (memRefEntry >= &_memRef->entries[_memRef->count]) {
3384 panic("memRefEntry");
3385 }
3386 entryOffset = 0;
3387 }
3388 if (ioplSize > (memRefEntry->size - entryOffset)) {
3389 ioplSize = (memRefEntry->size - entryOffset);
3390 }
3391 error = memory_object_iopl_request(memRefEntry->entry,
3392 entryOffset,
3393 &ioplSize,
3394 &iopl.fIOPL,
3395 baseInfo,
3396 &numPageInfo,
3397 &ioplFlags,
3398 tag);
3399 } else {
3400 assert(theMap);
3401 error = vm_map_create_upl(theMap,
3402 startPage,
3403 (upl_size_t*)&ioplSize,
3404 &iopl.fIOPL,
3405 baseInfo,
3406 &numPageInfo,
3407 &ioplFlags,
3408 tag);
3409 }
3410
3411 if (error != KERN_SUCCESS) {
3412 goto abortExit;
3413 }
3414
3415 assert(ioplSize);
3416
3417 if (iopl.fIOPL) {
3418 highPage = upl_get_highest_page(iopl.fIOPL);
3419 }
3420 if (highPage > highestPage) {
3421 highestPage = highPage;
3422 }
3423
3424 if (baseInfo->device) {
3425 numPageInfo = 1;
3426 iopl.fFlags = kIOPLOnDevice;
3427 } else {
3428 iopl.fFlags = 0;
3429 }
3430
3431 iopl.fIOMDOffset = mdOffset;
3432 iopl.fPageInfo = pageIndex;
3433 if (mapper && pageIndex && (page_mask & (mdOffset + startPageOffset))) {
3434 dataP->fDiscontig = true;
3435 }
3436
3437 if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
3438 // Clean up partial created and unsaved iopl
3439 if (iopl.fIOPL) {
3440 upl_abort(iopl.fIOPL, 0);
3441 upl_deallocate(iopl.fIOPL);
3442 }
3443 goto abortExit;
3444 }
cb323159 3445 dataP = NULL;
0a7de745
A
3446
3447 // Check for a multiple iopl's in one virtual range
3448 pageIndex += numPageInfo;
3449 mdOffset -= iopl.fPageOffset;
3450 if (ioplSize < numBytes) {
3451 numBytes -= ioplSize;
3452 startPage += ioplSize;
3453 mdOffset += ioplSize;
3454 iopl.fPageOffset = 0;
3455 if (mapper) {
3456 iopl.fMappedPage = mapBase + pageIndex;
3457 }
3458 } else {
3459 mdOffset += numBytes;
3460 break;
3461 }
3462 }
3463 }
3464
3465 _highestPage = highestPage;
3466
3467 if (UPL_COPYOUT_FROM & uplFlags) {
3468 _flags |= kIOMemoryPreparedReadOnly;
3469 }
3470 }
39236c6e 3471
39037602 3472#if IOTRACKING
0a7de745
A
3473 if (!(_flags & kIOMemoryAutoPrepare) && (kIOReturnSuccess == error)) {
3474 dataP = getDataP(_memoryEntries);
3475 if (!dataP->fWireTracking.link.next) {
3476 IOTrackingAdd(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages), false, tag);
3477 }
5ba3f43e 3478 }
39037602 3479#endif /* IOTRACKING */
3e170ce0 3480
0a7de745 3481 return error;
1c79356b
A
3482
3483abortExit:
55e303ae 3484 {
0a7de745
A
3485 dataP = getDataP(_memoryEntries);
3486 UInt done = getNumIOPL(_memoryEntries, dataP);
3487 ioPLBlock *ioplList = getIOPLList(dataP);
3488
3489 for (UInt range = 0; range < done; range++) {
3490 if (ioplList[range].fIOPL) {
3491 upl_abort(ioplList[range].fIOPL, 0);
3492 upl_deallocate(ioplList[range].fIOPL);
3493 }
3494 }
3495 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
55e303ae 3496 }
1c79356b 3497
0a7de745
A
3498 if (error == KERN_FAILURE) {
3499 error = kIOReturnCannotWire;
3500 } else if (error == KERN_MEMORY_ERROR) {
3501 error = kIOReturnNoResources;
3502 }
2d21ac55 3503
0a7de745 3504 return error;
55e303ae 3505}
d7e50217 3506
0a7de745
A
3507bool
3508IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
99c3a104 3509{
0a7de745
A
3510 ioGMDData * dataP;
3511 unsigned dataSize = size;
3512
3513 if (!_memoryEntries) {
3514 _memoryEntries = OSData::withCapacity(dataSize);
3515 if (!_memoryEntries) {
3516 return false;
3517 }
3518 } else if (!_memoryEntries->initWithCapacity(dataSize)) {
3519 return false;
3520 }
3521
cb323159 3522 _memoryEntries->appendBytes(NULL, computeDataSize(0, 0));
0a7de745 3523 dataP = getDataP(_memoryEntries);
99c3a104 3524
0a7de745
A
3525 if (mapper == kIOMapperWaitSystem) {
3526 IOMapper::checkForSystemMapper();
3527 mapper = IOMapper::gSystem;
3528 }
3529 dataP->fMapper = mapper;
3530 dataP->fPageCnt = 0;
3531 dataP->fMappedBase = 0;
3532 dataP->fDMAMapNumAddressBits = 64;
3533 dataP->fDMAMapAlignment = 0;
3534 dataP->fPreparationID = kIOPreparationIDUnprepared;
3535 dataP->fDiscontig = false;
3536 dataP->fCompletionError = false;
3537 dataP->fMappedBaseValid = false;
3538
3539 return true;
99c3a104
A
3540}
3541
0a7de745
A
3542IOReturn
3543IOMemoryDescriptor::dmaMap(
3544 IOMapper * mapper,
3545 IODMACommand * command,
3546 const IODMAMapSpecification * mapSpec,
3547 uint64_t offset,
3548 uint64_t length,
3549 uint64_t * mapAddress,
3550 uint64_t * mapLength)
99c3a104 3551{
0a7de745
A
3552 IOReturn err;
3553 uint32_t mapOptions;
99c3a104 3554
0a7de745
A
3555 mapOptions = 0;
3556 mapOptions |= kIODMAMapReadAccess;
3557 if (!(kIOMemoryPreparedReadOnly & _flags)) {
3558 mapOptions |= kIODMAMapWriteAccess;
3559 }
99c3a104 3560
0a7de745
A
3561 err = mapper->iovmMapMemory(this, offset, length, mapOptions,
3562 mapSpec, command, NULL, mapAddress, mapLength);
99c3a104 3563
0a7de745
A
3564 if (kIOReturnSuccess == err) {
3565 dmaMapRecord(mapper, command, *mapLength);
3566 }
5ba3f43e 3567
0a7de745 3568 return err;
5ba3f43e
A
3569}
3570
0a7de745
A
3571void
3572IOMemoryDescriptor::dmaMapRecord(
3573 IOMapper * mapper,
3574 IODMACommand * command,
3575 uint64_t mapLength)
5ba3f43e 3576{
0a7de745
A
3577 kern_allocation_name_t alloc;
3578 int16_t prior;
3579
3580 if ((alloc = mapper->fAllocName) /* && mapper != IOMapper::gSystem */) {
3581 kern_allocation_update_size(mapper->fAllocName, mapLength);
3582 }
3583
3584 if (!command) {
3585 return;
3586 }
3587 prior = OSAddAtomic16(1, &_dmaReferences);
3588 if (!prior) {
3589 if (alloc && (VM_KERN_MEMORY_NONE != _kernelTag)) {
3590 _mapName = alloc;
3591 mapLength = _length;
3592 kern_allocation_update_subtotal(alloc, _kernelTag, mapLength);
3593 } else {
3594 _mapName = NULL;
3595 }
5ba3f43e 3596 }
5ba3f43e
A
3597}
3598
0a7de745
A
3599IOReturn
3600IOMemoryDescriptor::dmaUnmap(
3601 IOMapper * mapper,
3602 IODMACommand * command,
3603 uint64_t offset,
3604 uint64_t mapAddress,
3605 uint64_t mapLength)
5ba3f43e 3606{
0a7de745
A
3607 IOReturn ret;
3608 kern_allocation_name_t alloc;
3609 kern_allocation_name_t mapName;
3610 int16_t prior;
3611
cb323159 3612 mapName = NULL;
0a7de745
A
3613 prior = 0;
3614 if (command) {
3615 mapName = _mapName;
3616 if (_dmaReferences) {
3617 prior = OSAddAtomic16(-1, &_dmaReferences);
3618 } else {
3619 panic("_dmaReferences underflow");
3620 }
3621 }
3622
3623 if (!mapLength) {
3624 return kIOReturnSuccess;
3625 }
3626
3627 ret = mapper->iovmUnmapMemory(this, command, mapAddress, mapLength);
3628
3629 if ((alloc = mapper->fAllocName)) {
3630 kern_allocation_update_size(alloc, -mapLength);
3631 if ((1 == prior) && mapName && (VM_KERN_MEMORY_NONE != _kernelTag)) {
3632 mapLength = _length;
3633 kern_allocation_update_subtotal(mapName, _kernelTag, -mapLength);
3634 }
3635 }
3636
3637 return ret;
99c3a104
A
3638}
3639
0a7de745
A
3640IOReturn
3641IOGeneralMemoryDescriptor::dmaMap(
3642 IOMapper * mapper,
3643 IODMACommand * command,
3644 const IODMAMapSpecification * mapSpec,
3645 uint64_t offset,
3646 uint64_t length,
3647 uint64_t * mapAddress,
3648 uint64_t * mapLength)
99c3a104 3649{
0a7de745
A
3650 IOReturn err = kIOReturnSuccess;
3651 ioGMDData * dataP;
3652 IOOptionBits type = _flags & kIOMemoryTypeMask;
99c3a104 3653
0a7de745
A
3654 *mapAddress = 0;
3655 if (kIOMemoryHostOnly & _flags) {
3656 return kIOReturnSuccess;
3657 }
3658 if (kIOMemoryRemote & _flags) {
3659 return kIOReturnNotAttached;
3e170ce0
A
3660 }
3661
0a7de745
A
3662 if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
3663 || offset || (length != _length)) {
3664 err = super::dmaMap(mapper, command, mapSpec, offset, length, mapAddress, mapLength);
3665 } else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries))) {
3666 const ioPLBlock * ioplList = getIOPLList(dataP);
3667 upl_page_info_t * pageList;
3668 uint32_t mapOptions = 0;
3669
3670 IODMAMapSpecification mapSpec;
3671 bzero(&mapSpec, sizeof(mapSpec));
3672 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
3673 mapSpec.alignment = dataP->fDMAMapAlignment;
3674
3675 // For external UPLs the fPageInfo field points directly to
3676 // the upl's upl_page_info_t array.
3677 if (ioplList->fFlags & kIOPLExternUPL) {
3678 pageList = (upl_page_info_t *) ioplList->fPageInfo;
3679 mapOptions |= kIODMAMapPagingPath;
3680 } else {
3681 pageList = getPageList(dataP);
3682 }
99c3a104 3683
0a7de745
A
3684 if ((_length == ptoa_64(_pages)) && !(page_mask & ioplList->fPageOffset)) {
3685 mapOptions |= kIODMAMapPageListFullyOccupied;
3686 }
99c3a104 3687
0a7de745
A
3688 assert(dataP->fDMAAccess);
3689 mapOptions |= dataP->fDMAAccess;
5ba3f43e 3690
0a7de745
A
3691 // Check for direct device non-paged memory
3692 if (ioplList->fFlags & kIOPLOnDevice) {
3693 mapOptions |= kIODMAMapPhysicallyContiguous;
3694 }
99c3a104 3695
0a7de745
A
3696 IODMAMapPageList dmaPageList =
3697 {
3698 .pageOffset = (uint32_t)(ioplList->fPageOffset & page_mask),
3699 .pageListCount = _pages,
3700 .pageList = &pageList[0]
3701 };
3702 err = mapper->iovmMapMemory(this, offset, length, mapOptions, &mapSpec,
3703 command, &dmaPageList, mapAddress, mapLength);
3704
3705 if (kIOReturnSuccess == err) {
3706 dmaMapRecord(mapper, command, *mapLength);
3707 }
3708 }
3709
3710 return err;
99c3a104
A
3711}
3712
55e303ae
A
3713/*
3714 * prepare
3715 *
3716 * Prepare the memory for an I/O transfer. This involves paging in
3717 * the memory, if necessary, and wiring it down for the duration of
3718 * the transfer. The complete() method completes the processing of
3719 * the memory after the I/O transfer finishes. This method needn't
3720 * called for non-pageable memory.
3721 */
99c3a104 3722
0a7de745
A
3723IOReturn
3724IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
55e303ae 3725{
0a7de745
A
3726 IOReturn error = kIOReturnSuccess;
3727 IOOptionBits type = _flags & kIOMemoryTypeMask;
55e303ae 3728
0a7de745
A
3729 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
3730 return kIOReturnSuccess;
3731 }
2d21ac55 3732
0a7de745
A
3733 assert(!(kIOMemoryRemote & _flags));
3734 if (kIOMemoryRemote & _flags) {
3735 return kIOReturnNotAttached;
3736 }
5ba3f43e 3737
0a7de745
A
3738 if (_prepareLock) {
3739 IOLockLock(_prepareLock);
3740 }
2d21ac55 3741
0a7de745
A
3742 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
3743 error = wireVirtual(forDirection);
3744 }
de355530 3745
0a7de745
A
3746 if (kIOReturnSuccess == error) {
3747 if (1 == ++_wireCount) {
3748 if (kIOMemoryClearEncrypt & _flags) {
3749 performOperation(kIOMemoryClearEncrypted, 0, _length);
3750 }
3751 }
3752 }
0b4c1975 3753
0a7de745
A
3754 if (_prepareLock) {
3755 IOLockUnlock(_prepareLock);
3756 }
2d21ac55 3757
0a7de745 3758 return error;
1c79356b
A
3759}
3760
3761/*
3762 * complete
3763 *
3764 * Complete processing of the memory after an I/O transfer finishes.
3765 * This method should not be called unless a prepare was previously
3766 * issued; the prepare() and complete() must occur in pairs, before
3767 * before and after an I/O transfer involving pageable memory.
3768 */
6d2010ae 3769
0a7de745
A
3770IOReturn
3771IOGeneralMemoryDescriptor::complete(IODirection forDirection)
1c79356b 3772{
0a7de745
A
3773 IOOptionBits type = _flags & kIOMemoryTypeMask;
3774 ioGMDData * dataP;
1c79356b 3775
0a7de745
A
3776 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
3777 return kIOReturnSuccess;
3778 }
3779
3780 assert(!(kIOMemoryRemote & _flags));
3781 if (kIOMemoryRemote & _flags) {
3782 return kIOReturnNotAttached;
3783 }
3784
3785 if (_prepareLock) {
3786 IOLockLock(_prepareLock);
3787 }
3788 do{
3789 assert(_wireCount);
3790 if (!_wireCount) {
3791 break;
3792 }
3793 dataP = getDataP(_memoryEntries);
3794 if (!dataP) {
3795 break;
3796 }
3797
3798 if (kIODirectionCompleteWithError & forDirection) {
3799 dataP->fCompletionError = true;
3800 }
3801
3802 if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount)) {
3803 performOperation(kIOMemorySetEncrypted, 0, _length);
3804 }
1c79356b 3805
0a7de745
A
3806 _wireCount--;
3807 if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection)) {
3808 ioPLBlock *ioplList = getIOPLList(dataP);
3809 UInt ind, count = getNumIOPL(_memoryEntries, dataP);
3810
3811 if (_wireCount) {
3812 // kIODirectionCompleteWithDataValid & forDirection
3813 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
3814 vm_tag_t tag;
3815 tag = getVMTag(kernel_map);
3816 for (ind = 0; ind < count; ind++) {
3817 if (ioplList[ind].fIOPL) {
3818 iopl_valid_data(ioplList[ind].fIOPL, tag);
3819 }
3820 }
3821 }
3822 } else {
3823 if (_dmaReferences) {
3824 panic("complete() while dma active");
3825 }
3826
3827 if (dataP->fMappedBaseValid) {
3828 dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
3829 dataP->fMappedBaseValid = dataP->fMappedBase = 0;
3830 }
3e170ce0 3831#if IOTRACKING
0a7de745
A
3832 if (dataP->fWireTracking.link.next) {
3833 IOTrackingRemove(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages));
3834 }
39037602 3835#endif /* IOTRACKING */
0a7de745
A
3836 // Only complete iopls that we created which are for TypeVirtual
3837 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
3838 for (ind = 0; ind < count; ind++) {
3839 if (ioplList[ind].fIOPL) {
3840 if (dataP->fCompletionError) {
3841 upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/);
3842 } else {
cb323159 3843 upl_commit(ioplList[ind].fIOPL, NULL, 0);
0a7de745
A
3844 }
3845 upl_deallocate(ioplList[ind].fIOPL);
3846 }
3847 }
3848 } else if (kIOMemoryTypeUPL == type) {
3849 upl_set_referenced(ioplList[0].fIOPL, false);
3850 }
3851
3852 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
3853
3854 dataP->fPreparationID = kIOPreparationIDUnprepared;
3855 _flags &= ~kIOMemoryPreparedReadOnly;
3856 }
3857 }
3858 }while (false);
3859
3860 if (_prepareLock) {
3861 IOLockUnlock(_prepareLock);
3862 }
3863
3864 return kIOReturnSuccess;
1c79356b
A
3865}
3866
0a7de745
A
3867IOReturn
3868IOGeneralMemoryDescriptor::doMap(
3869 vm_map_t __addressMap,
3870 IOVirtualAddress * __address,
3871 IOOptionBits options,
3872 IOByteCount __offset,
3873 IOByteCount __length )
1c79356b 3874{
b0d623f7 3875#ifndef __LP64__
0a7de745
A
3876 if (!(kIOMap64Bit & options)) {
3877 panic("IOGeneralMemoryDescriptor::doMap !64bit");
3878 }
b0d623f7 3879#endif /* !__LP64__ */
2d21ac55 3880
0a7de745 3881 kern_return_t err;
fe8ab488 3882
0a7de745
A
3883 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
3884 mach_vm_size_t offset = mapping->fOffset + __offset;
3885 mach_vm_size_t length = mapping->fLength;
2d21ac55 3886
0a7de745
A
3887 IOOptionBits type = _flags & kIOMemoryTypeMask;
3888 Ranges vec = _ranges;
91447636 3889
0a7de745
A
3890 mach_vm_address_t range0Addr = 0;
3891 mach_vm_size_t range0Len = 0;
060df5ea 3892
0a7de745
A
3893 if ((offset >= _length) || ((offset + length) > _length)) {
3894 return kIOReturnBadArgument;
3895 }
5ba3f43e 3896
0a7de745
A
3897 assert(!(kIOMemoryRemote & _flags));
3898 if (kIOMemoryRemote & _flags) {
3899 return 0;
3900 }
91447636 3901
0a7de745
A
3902 if (vec.v) {
3903 getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
3904 }
2d21ac55 3905
0a7de745
A
3906 // mapping source == dest? (could be much better)
3907 if (_task
3908 && (mapping->fAddressTask == _task)
3909 && (mapping->fAddressMap == get_task_map(_task))
3910 && (options & kIOMapAnywhere)
3911 && (!(kIOMapUnique & options))
3912 && (1 == _rangesCount)
3913 && (0 == offset)
3914 && range0Addr
3915 && (length <= range0Len)) {
3916 mapping->fAddress = range0Addr;
3917 mapping->fOptions |= kIOMapStatic;
3918
3919 return kIOReturnSuccess;
3920 }
1c79356b 3921
0a7de745
A
3922 if (!_memRef) {
3923 IOOptionBits createOptions = 0;
3924 if (!(kIOMapReadOnly & options)) {
3925 createOptions |= kIOMemoryReferenceWrite;
fe8ab488 3926#if DEVELOPMENT || DEBUG
cb323159
A
3927 if ((kIODirectionOut == (kIODirectionOutIn & _flags))
3928 && (!reserved || (reserved->creator != mapping->fAddressTask))) {
0a7de745
A
3929 OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction");
3930 }
0b4e3aa0 3931#endif
0a7de745
A
3932 }
3933 err = memoryReferenceCreate(createOptions, &_memRef);
3934 if (kIOReturnSuccess != err) {
3935 return err;
3936 }
fe8ab488 3937 }
9bccf70c 3938
0a7de745 3939 memory_object_t pager;
cb323159 3940 pager = (memory_object_t) (reserved ? reserved->dp.devicePager : NULL);
0a7de745
A
3941
3942 // <upl_transpose //
3943 if ((kIOMapReference | kIOMapUnique) == ((kIOMapReference | kIOMapUnique) & options)) {
3944 do{
3945 upl_t redirUPL2;
3946 upl_size_t size;
3947 upl_control_flags_t flags;
3948 unsigned int lock_count;
3949
3950 if (!_memRef || (1 != _memRef->count)) {
3951 err = kIOReturnNotReadable;
3952 break;
3953 }
3954
3955 size = round_page(mapping->fLength);
3956 flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
3957 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
3958
3959 if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2,
3960 NULL, NULL,
3961 &flags, getVMTag(kernel_map))) {
3962 redirUPL2 = NULL;
3963 }
3964
3965 for (lock_count = 0;
3966 IORecursiveLockHaveLock(gIOMemoryLock);
3967 lock_count++) {
3968 UNLOCK;
3969 }
3970 err = upl_transpose(redirUPL2, mapping->fRedirUPL);
3971 for (;
3972 lock_count;
3973 lock_count--) {
3974 LOCK;
3975 }
3976
3977 if (kIOReturnSuccess != err) {
3978 IOLog("upl_transpose(%x)\n", err);
3979 err = kIOReturnSuccess;
3980 }
3981
3982 if (redirUPL2) {
3983 upl_commit(redirUPL2, NULL, 0);
3984 upl_deallocate(redirUPL2);
cb323159 3985 redirUPL2 = NULL;
0a7de745
A
3986 }
3987 {
3988 // swap the memEntries since they now refer to different vm_objects
3989 IOMemoryReference * me = _memRef;
3990 _memRef = mapping->fMemory->_memRef;
3991 mapping->fMemory->_memRef = me;
3992 }
3993 if (pager) {
3994 err = populateDevicePager( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
3995 }
3996 }while (false);
39037602 3997 }
0a7de745
A
3998 // upl_transpose> //
3999 else {
4000 err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress);
4001#if IOTRACKING
4002 if ((err == KERN_SUCCESS) && ((kIOTracking & gIOKitDebug) || _task)) {
4003 // only dram maps in the default on developement case
4004 IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength);
4005 }
39037602 4006#endif /* IOTRACKING */
0a7de745
A
4007 if ((err == KERN_SUCCESS) && pager) {
4008 err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, offset, length, options);
4009
4010 if (err != KERN_SUCCESS) {
4011 doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0);
4012 } else if (kIOMapDefaultCache == (options & kIOMapCacheMask)) {
4013 mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
4014 }
4015 }
fe8ab488 4016 }
fe8ab488 4017
0a7de745 4018 return err;
1c79356b
A
4019}
4020
39037602
A
4021#if IOTRACKING
4022IOReturn
4023IOMemoryMapTracking(IOTrackingUser * tracking, task_t * task,
0a7de745 4024 mach_vm_address_t * address, mach_vm_size_t * size)
39037602 4025{
cb323159 4026#define iomap_offsetof(type, field) ((size_t)(&((type *)NULL)->field))
39037602 4027
0a7de745 4028 IOMemoryMap * map = (typeof(map))(((uintptr_t) tracking) - iomap_offsetof(IOMemoryMap, fTracking));
39037602 4029
0a7de745
A
4030 if (!map->fAddressMap || (map->fAddressMap != get_task_map(map->fAddressTask))) {
4031 return kIOReturnNotReady;
4032 }
39037602 4033
0a7de745
A
4034 *task = map->fAddressTask;
4035 *address = map->fAddress;
4036 *size = map->fLength;
39037602 4037
0a7de745 4038 return kIOReturnSuccess;
39037602
A
4039}
4040#endif /* IOTRACKING */
4041
0a7de745
A
4042IOReturn
4043IOGeneralMemoryDescriptor::doUnmap(
4044 vm_map_t addressMap,
4045 IOVirtualAddress __address,
4046 IOByteCount __length )
1c79356b 4047{
0a7de745 4048 return super::doUnmap(addressMap, __address, __length);
1c79356b
A
4049}
4050
4051/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4052
b0d623f7
A
4053#undef super
4054#define super OSObject
1c79356b 4055
b0d623f7 4056OSDefineMetaClassAndStructors( IOMemoryMap, OSObject )
1c79356b 4057
b0d623f7
A
4058OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
4059OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
4060OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
4061OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
4062OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
4063OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
4064OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
4065OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
1c79356b 4066
b0d623f7 4067/* ex-inline function implementation */
0a7de745
A
4068IOPhysicalAddress
4069IOMemoryMap::getPhysicalAddress()
4070{
cb323159 4071 return getPhysicalSegment( 0, NULL );
0a7de745 4072}
1c79356b
A
4073
4074/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4075
0a7de745
A
4076bool
4077IOMemoryMap::init(
4078 task_t intoTask,
4079 mach_vm_address_t toAddress,
4080 IOOptionBits _options,
4081 mach_vm_size_t _offset,
4082 mach_vm_size_t _length )
1c79356b 4083{
0a7de745
A
4084 if (!intoTask) {
4085 return false;
4086 }
1c79356b 4087
0a7de745
A
4088 if (!super::init()) {
4089 return false;
4090 }
1c79356b 4091
0a7de745
A
4092 fAddressMap = get_task_map(intoTask);
4093 if (!fAddressMap) {
4094 return false;
4095 }
4096 vm_map_reference(fAddressMap);
1c79356b 4097
0a7de745
A
4098 fAddressTask = intoTask;
4099 fOptions = _options;
4100 fLength = _length;
4101 fOffset = _offset;
4102 fAddress = toAddress;
1c79356b 4103
0a7de745 4104 return true;
1c79356b
A
4105}
4106
0a7de745
A
4107bool
4108IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
1c79356b 4109{
0a7de745
A
4110 if (!_memory) {
4111 return false;
4112 }
4113
4114 if (!fSuperMap) {
4115 if ((_offset + fLength) > _memory->getLength()) {
4116 return false;
4117 }
4118 fOffset = _offset;
4119 }
4120
4121 _memory->retain();
4122 if (fMemory) {
4123 if (fMemory != _memory) {
4124 fMemory->removeMapping(this);
4125 }
4126 fMemory->release();
4127 }
4128 fMemory = _memory;
4129
4130 return true;
1c79356b
A
4131}
4132
0a7de745
A
4133IOReturn
4134IOMemoryDescriptor::doMap(
4135 vm_map_t __addressMap,
4136 IOVirtualAddress * __address,
4137 IOOptionBits options,
4138 IOByteCount __offset,
4139 IOByteCount __length )
1c79356b 4140{
0a7de745 4141 return kIOReturnUnsupported;
fe8ab488 4142}
1c79356b 4143
0a7de745
A
4144IOReturn
4145IOMemoryDescriptor::handleFault(
4146 void * _pager,
4147 mach_vm_size_t sourceOffset,
4148 mach_vm_size_t length)
fe8ab488 4149{
0a7de745 4150 if (kIOMemoryRedirected & _flags) {
b0d623f7 4151#if DEBUG
0a7de745 4152 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
2d21ac55 4153#endif
0a7de745
A
4154 do {
4155 SLEEP;
4156 } while (kIOMemoryRedirected & _flags);
4157 }
4158 return kIOReturnSuccess;
0b4e3aa0
A
4159}
4160
0a7de745
A
4161IOReturn
4162IOMemoryDescriptor::populateDevicePager(
4163 void * _pager,
4164 vm_map_t addressMap,
4165 mach_vm_address_t address,
4166 mach_vm_size_t sourceOffset,
4167 mach_vm_size_t length,
4168 IOOptionBits options )
0b4e3aa0 4169{
0a7de745
A
4170 IOReturn err = kIOReturnSuccess;
4171 memory_object_t pager = (memory_object_t) _pager;
4172 mach_vm_size_t size;
4173 mach_vm_size_t bytes;
4174 mach_vm_size_t page;
4175 mach_vm_size_t pageOffset;
4176 mach_vm_size_t pagerOffset;
4177 IOPhysicalLength segLen, chunk;
4178 addr64_t physAddr;
4179 IOOptionBits type;
4180
4181 type = _flags & kIOMemoryTypeMask;
4182
4183 if (reserved->dp.pagerContig) {
4184 sourceOffset = 0;
4185 pagerOffset = 0;
4186 }
4187
4188 physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
4189 assert( physAddr );
4190 pageOffset = physAddr - trunc_page_64( physAddr );
4191 pagerOffset = sourceOffset;
4192
4193 size = length + pageOffset;
4194 physAddr -= pageOffset;
4195
4196 segLen += pageOffset;
4197 bytes = size;
4198 do{
4199 // in the middle of the loop only map whole pages
4200 if (segLen >= bytes) {
4201 segLen = bytes;
cb323159 4202 } else if (segLen != trunc_page_64(segLen)) {
0a7de745
A
4203 err = kIOReturnVMError;
4204 }
4205 if (physAddr != trunc_page_64(physAddr)) {
4206 err = kIOReturnBadArgument;
4207 }
4208
4209 if (kIOReturnSuccess != err) {
4210 break;
4211 }
1c79356b 4212
3e170ce0 4213#if DEBUG || DEVELOPMENT
0a7de745
A
4214 if ((kIOMemoryTypeUPL != type)
4215 && pmap_has_managed_page(atop_64(physAddr), atop_64(physAddr + segLen - 1))) {
4216 OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx", physAddr, segLen);
4217 }
3e170ce0
A
4218#endif /* DEBUG || DEVELOPMENT */
4219
0a7de745
A
4220 chunk = (reserved->dp.pagerContig ? round_page(segLen) : page_size);
4221 for (page = 0;
4222 (page < segLen) && (KERN_SUCCESS == err);
4223 page += chunk) {
4224 err = device_pager_populate_object(pager, pagerOffset,
4225 (ppnum_t)(atop_64(physAddr + page)), chunk);
4226 pagerOffset += chunk;
4227 }
5ba3f43e 4228
0a7de745
A
4229 assert(KERN_SUCCESS == err);
4230 if (err) {
4231 break;
4232 }
4233
4234 // This call to vm_fault causes an early pmap level resolution
4235 // of the mappings created above for kernel mappings, since
4236 // faulting in later can't take place from interrupt level.
4237 if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags)) {
4238 err = vm_fault(addressMap,
4239 (vm_map_offset_t)trunc_page_64(address),
4240 options & kIOMapReadOnly ? VM_PROT_READ : VM_PROT_READ | VM_PROT_WRITE,
4241 FALSE, VM_KERN_MEMORY_NONE,
4242 THREAD_UNINT, NULL,
4243 (vm_map_offset_t)0);
4244
4245 if (KERN_SUCCESS != err) {
4246 break;
4247 }
4248 }
9bccf70c 4249
0a7de745
A
4250 sourceOffset += segLen - pageOffset;
4251 address += segLen;
4252 bytes -= segLen;
4253 pageOffset = 0;
4254 }while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
1c79356b 4255
0a7de745
A
4256 if (bytes) {
4257 err = kIOReturnBadArgument;
4258 }
1c79356b 4259
0a7de745 4260 return err;
1c79356b
A
4261}
4262
0a7de745
A
4263IOReturn
4264IOMemoryDescriptor::doUnmap(
4265 vm_map_t addressMap,
4266 IOVirtualAddress __address,
4267 IOByteCount __length )
1c79356b 4268{
0a7de745
A
4269 IOReturn err;
4270 IOMemoryMap * mapping;
4271 mach_vm_address_t address;
4272 mach_vm_size_t length;
4273
4274 if (__length) {
4275 panic("doUnmap");
4276 }
4277
4278 mapping = (IOMemoryMap *) __address;
4279 addressMap = mapping->fAddressMap;
4280 address = mapping->fAddress;
4281 length = mapping->fLength;
4282
4283 if (kIOMapOverwrite & mapping->fOptions) {
4284 err = KERN_SUCCESS;
4285 } else {
4286 if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
4287 addressMap = IOPageableMapForAddress( address );
4288 }
b0d623f7 4289#if DEBUG
0a7de745
A
4290 if (kIOLogMapping & gIOKitDebug) {
4291 IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
4292 addressMap, address, length );
4293 }
1c79356b 4294#endif
0a7de745
A
4295 err = mach_vm_deallocate( addressMap, address, length );
4296 }
1c79356b 4297
3e170ce0 4298#if IOTRACKING
0a7de745 4299 IOTrackingRemoveUser(gIOMapTracking, &mapping->fTracking);
39037602 4300#endif /* IOTRACKING */
1c79356b 4301
0a7de745 4302 return err;
1c79356b
A
4303}
4304
0a7de745
A
4305IOReturn
4306IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
e3027f41 4307{
0a7de745 4308 IOReturn err = kIOReturnSuccess;
cb323159 4309 IOMemoryMap * mapping = NULL;
0a7de745 4310 OSIterator * iter;
91447636 4311
0a7de745 4312 LOCK;
39236c6e 4313
0a7de745
A
4314 if (doRedirect) {
4315 _flags |= kIOMemoryRedirected;
4316 } else {
4317 _flags &= ~kIOMemoryRedirected;
4318 }
39236c6e 4319
0a7de745
A
4320 do {
4321 if ((iter = OSCollectionIterator::withCollection( _mappings))) {
4322 memory_object_t pager;
4323
4324 if (reserved) {
4325 pager = (memory_object_t) reserved->dp.devicePager;
4326 } else {
4327 pager = MACH_PORT_NULL;
4328 }
4329
4330 while ((mapping = (IOMemoryMap *) iter->getNextObject())) {
4331 mapping->redirect( safeTask, doRedirect );
4332 if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap)) {
4333 err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache );
4334 }
4335 }
4336
4337 iter->release();
39236c6e 4338 }
0a7de745 4339 } while (false);
e3027f41 4340
0a7de745
A
4341 if (!doRedirect) {
4342 WAKEUP;
91447636 4343 }
0b4e3aa0 4344
0a7de745 4345 UNLOCK;
e3027f41 4346
b0d623f7 4347#ifndef __LP64__
0a7de745
A
4348 // temporary binary compatibility
4349 IOSubMemoryDescriptor * subMem;
4350 if ((subMem = OSDynamicCast( IOSubMemoryDescriptor, this))) {
4351 err = subMem->redirect( safeTask, doRedirect );
4352 } else {
4353 err = kIOReturnSuccess;
4354 }
b0d623f7 4355#endif /* !__LP64__ */
e3027f41 4356
0a7de745 4357 return err;
e3027f41
A
4358}
4359
0a7de745
A
4360IOReturn
4361IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
e3027f41 4362{
0a7de745 4363 IOReturn err = kIOReturnSuccess;
e3027f41 4364
0a7de745 4365 if (fSuperMap) {
b0d623f7 4366// err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
0a7de745
A
4367 } else {
4368 LOCK;
4369
4370 do{
4371 if (!fAddress) {
4372 break;
4373 }
4374 if (!fAddressMap) {
4375 break;
4376 }
4377
4378 if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
4379 && (0 == (fOptions & kIOMapStatic))) {
4380 IOUnmapPages( fAddressMap, fAddress, fLength );
4381 err = kIOReturnSuccess;
b0d623f7 4382#if DEBUG
0a7de745 4383 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
e3027f41 4384#endif
0a7de745
A
4385 } else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask)) {
4386 IOOptionBits newMode;
4387 newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
4388 IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
4389 }
4390 }while (false);
4391 UNLOCK;
4392 }
e3027f41 4393
0a7de745
A
4394 if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
4395 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
4396 && safeTask
4397 && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected)))) {
4398 fMemory->redirect(safeTask, doRedirect);
4399 }
91447636 4400
0a7de745 4401 return err;
e3027f41
A
4402}
4403
0a7de745
A
4404IOReturn
4405IOMemoryMap::unmap( void )
1c79356b 4406{
0a7de745 4407 IOReturn err;
1c79356b 4408
0a7de745 4409 LOCK;
1c79356b 4410
cb323159 4411 if (fAddress && fAddressMap && (NULL == fSuperMap) && fMemory
0a7de745
A
4412 && (0 == (kIOMapStatic & fOptions))) {
4413 err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
4414 } else {
4415 err = kIOReturnSuccess;
4416 }
1c79356b 4417
0a7de745
A
4418 if (fAddressMap) {
4419 vm_map_deallocate(fAddressMap);
cb323159 4420 fAddressMap = NULL;
0a7de745 4421 }
2d21ac55 4422
0a7de745 4423 fAddress = 0;
1c79356b 4424
0a7de745 4425 UNLOCK;
1c79356b 4426
0a7de745 4427 return err;
1c79356b
A
4428}
4429
0a7de745
A
4430void
4431IOMemoryMap::taskDied( void )
1c79356b 4432{
0a7de745
A
4433 LOCK;
4434 if (fUserClientUnmap) {
4435 unmap();
4436 }
3e170ce0 4437#if IOTRACKING
0a7de745
A
4438 else {
4439 IOTrackingRemoveUser(gIOMapTracking, &fTracking);
4440 }
39037602 4441#endif /* IOTRACKING */
3e170ce0 4442
0a7de745
A
4443 if (fAddressMap) {
4444 vm_map_deallocate(fAddressMap);
cb323159 4445 fAddressMap = NULL;
0a7de745 4446 }
cb323159 4447 fAddressTask = NULL;
0a7de745
A
4448 fAddress = 0;
4449 UNLOCK;
1c79356b
A
4450}
4451
0a7de745
A
4452IOReturn
4453IOMemoryMap::userClientUnmap( void )
b0d623f7 4454{
0a7de745
A
4455 fUserClientUnmap = true;
4456 return kIOReturnSuccess;
b0d623f7
A
4457}
4458
9bccf70c
A
4459// Overload the release mechanism. All mappings must be a member
4460// of a memory descriptors _mappings set. This means that we
4461// always have 2 references on a mapping. When either of these mappings
4462// are released we need to free ourselves.
0a7de745
A
4463void
4464IOMemoryMap::taggedRelease(const void *tag) const
9bccf70c 4465{
0a7de745
A
4466 LOCK;
4467 super::taggedRelease(tag, 2);
4468 UNLOCK;
9bccf70c
A
4469}
4470
0a7de745
A
4471void
4472IOMemoryMap::free()
1c79356b 4473{
0a7de745 4474 unmap();
1c79356b 4475
0a7de745
A
4476 if (fMemory) {
4477 LOCK;
4478 fMemory->removeMapping(this);
4479 UNLOCK;
4480 fMemory->release();
4481 }
1c79356b 4482
0a7de745
A
4483 if (fOwner && (fOwner != fMemory)) {
4484 LOCK;
4485 fOwner->removeMapping(this);
4486 UNLOCK;
4487 }
91447636 4488
0a7de745
A
4489 if (fSuperMap) {
4490 fSuperMap->release();
4491 }
1c79356b 4492
0a7de745
A
4493 if (fRedirUPL) {
4494 upl_commit(fRedirUPL, NULL, 0);
4495 upl_deallocate(fRedirUPL);
4496 }
91447636 4497
0a7de745 4498 super::free();
1c79356b
A
4499}
4500
0a7de745
A
4501IOByteCount
4502IOMemoryMap::getLength()
1c79356b 4503{
0a7de745 4504 return fLength;
1c79356b
A
4505}
4506
0a7de745
A
4507IOVirtualAddress
4508IOMemoryMap::getVirtualAddress()
1c79356b 4509{
b0d623f7 4510#ifndef __LP64__
0a7de745
A
4511 if (fSuperMap) {
4512 fSuperMap->getVirtualAddress();
4513 } else if (fAddressMap
4514 && vm_map_is_64bit(fAddressMap)
4515 && (sizeof(IOVirtualAddress) < 8)) {
4516 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
4517 }
b0d623f7 4518#endif /* !__LP64__ */
2d21ac55 4519
0a7de745 4520 return fAddress;
2d21ac55
A
4521}
4522
b0d623f7 4523#ifndef __LP64__
0a7de745
A
4524mach_vm_address_t
4525IOMemoryMap::getAddress()
2d21ac55 4526{
0a7de745 4527 return fAddress;
2d21ac55
A
4528}
4529
0a7de745
A
4530mach_vm_size_t
4531IOMemoryMap::getSize()
2d21ac55 4532{
0a7de745 4533 return fLength;
1c79356b 4534}
b0d623f7 4535#endif /* !__LP64__ */
1c79356b 4536
2d21ac55 4537
0a7de745
A
4538task_t
4539IOMemoryMap::getAddressTask()
1c79356b 4540{
0a7de745
A
4541 if (fSuperMap) {
4542 return fSuperMap->getAddressTask();
4543 } else {
4544 return fAddressTask;
4545 }
1c79356b
A
4546}
4547
0a7de745
A
4548IOOptionBits
4549IOMemoryMap::getMapOptions()
1c79356b 4550{
0a7de745 4551 return fOptions;
1c79356b
A
4552}
4553
0a7de745
A
4554IOMemoryDescriptor *
4555IOMemoryMap::getMemoryDescriptor()
1c79356b 4556{
0a7de745 4557 return fMemory;
1c79356b
A
4558}
4559
0a7de745
A
4560IOMemoryMap *
4561IOMemoryMap::copyCompatible(
4562 IOMemoryMap * newMapping )
1c79356b 4563{
0a7de745
A
4564 task_t task = newMapping->getAddressTask();
4565 mach_vm_address_t toAddress = newMapping->fAddress;
4566 IOOptionBits _options = newMapping->fOptions;
4567 mach_vm_size_t _offset = newMapping->fOffset;
4568 mach_vm_size_t _length = newMapping->fLength;
4569
4570 if ((!task) || (!fAddressMap) || (fAddressMap != get_task_map(task))) {
cb323159 4571 return NULL;
0a7de745
A
4572 }
4573 if ((fOptions ^ _options) & kIOMapReadOnly) {
cb323159 4574 return NULL;
0a7de745
A
4575 }
4576 if ((kIOMapDefaultCache != (_options & kIOMapCacheMask))
4577 && ((fOptions ^ _options) & kIOMapCacheMask)) {
cb323159 4578 return NULL;
0a7de745
A
4579 }
4580
4581 if ((0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress)) {
cb323159 4582 return NULL;
0a7de745
A
4583 }
4584
4585 if (_offset < fOffset) {
cb323159 4586 return NULL;
0a7de745
A
4587 }
4588
4589 _offset -= fOffset;
4590
4591 if ((_offset + _length) > fLength) {
cb323159 4592 return NULL;
0a7de745
A
4593 }
4594
4595 retain();
4596 if ((fLength == _length) && (!_offset)) {
4597 newMapping = this;
4598 } else {
4599 newMapping->fSuperMap = this;
4600 newMapping->fOffset = fOffset + _offset;
4601 newMapping->fAddress = fAddress + _offset;
4602 }
4603
4604 return newMapping;
1c79356b
A
4605}
4606
0a7de745
A
4607IOReturn
4608IOMemoryMap::wireRange(
4609 uint32_t options,
4610 mach_vm_size_t offset,
4611 mach_vm_size_t length)
99c3a104 4612{
0a7de745
A
4613 IOReturn kr;
4614 mach_vm_address_t start = trunc_page_64(fAddress + offset);
4615 mach_vm_address_t end = round_page_64(fAddress + offset + length);
4616 vm_prot_t prot;
4617
4618 prot = (kIODirectionOutIn & options);
4619 if (prot) {
4620 kr = vm_map_wire_kernel(fAddressMap, start, end, prot, fMemory->getVMTag(kernel_map), FALSE);
4621 } else {
4622 kr = vm_map_unwire(fAddressMap, start, end, FALSE);
4623 }
4624
4625 return kr;
99c3a104
A
4626}
4627
4628
0a7de745 4629IOPhysicalAddress
b0d623f7
A
4630#ifdef __LP64__
4631IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
4632#else /* !__LP64__ */
4633IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
4634#endif /* !__LP64__ */
1c79356b 4635{
0a7de745 4636 IOPhysicalAddress address;
1c79356b 4637
0a7de745 4638 LOCK;
b0d623f7 4639#ifdef __LP64__
0a7de745 4640 address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
b0d623f7 4641#else /* !__LP64__ */
0a7de745 4642 address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
b0d623f7 4643#endif /* !__LP64__ */
0a7de745 4644 UNLOCK;
1c79356b 4645
0a7de745 4646 return address;
1c79356b
A
4647}
4648
4649/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4650
4651#undef super
4652#define super OSObject
4653
4654/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4655
0a7de745
A
4656void
4657IOMemoryDescriptor::initialize( void )
1c79356b 4658{
cb323159 4659 if (NULL == gIOMemoryLock) {
0a7de745
A
4660 gIOMemoryLock = IORecursiveLockAlloc();
4661 }
55e303ae 4662
0a7de745 4663 gIOLastPage = IOGetLastPageNumber();
1c79356b
A
4664}
4665
0a7de745
A
4666void
4667IOMemoryDescriptor::free( void )
1c79356b 4668{
0a7de745
A
4669 if (_mappings) {
4670 _mappings->release();
4671 }
4672
4673 if (reserved) {
cb323159 4674 cleanKernelReserved(reserved);
0a7de745
A
4675 IODelete(reserved, IOMemoryDescriptorReserved, 1);
4676 reserved = NULL;
4677 }
4678 super::free();
1c79356b
A
4679}
4680
0a7de745
A
4681IOMemoryMap *
4682IOMemoryDescriptor::setMapping(
4683 task_t intoTask,
4684 IOVirtualAddress mapAddress,
4685 IOOptionBits options )
1c79356b 4686{
0a7de745
A
4687 return createMappingInTask( intoTask, mapAddress,
4688 options | kIOMapStatic,
4689 0, getLength());
1c79356b
A
4690}
4691
0a7de745
A
4692IOMemoryMap *
4693IOMemoryDescriptor::map(
4694 IOOptionBits options )
1c79356b 4695{
0a7de745
A
4696 return createMappingInTask( kernel_task, 0,
4697 options | kIOMapAnywhere,
4698 0, getLength());
1c79356b
A
4699}
4700
b0d623f7 4701#ifndef __LP64__
0a7de745
A
4702IOMemoryMap *
4703IOMemoryDescriptor::map(
4704 task_t intoTask,
4705 IOVirtualAddress atAddress,
4706 IOOptionBits options,
4707 IOByteCount offset,
4708 IOByteCount length )
1c79356b 4709{
0a7de745
A
4710 if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask))) {
4711 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
cb323159 4712 return NULL;
0a7de745
A
4713 }
4714
4715 return createMappingInTask(intoTask, atAddress,
4716 options, offset, length);
2d21ac55 4717}
b0d623f7 4718#endif /* !__LP64__ */
2d21ac55 4719
0a7de745
A
4720IOMemoryMap *
4721IOMemoryDescriptor::createMappingInTask(
4722 task_t intoTask,
4723 mach_vm_address_t atAddress,
4724 IOOptionBits options,
4725 mach_vm_size_t offset,
4726 mach_vm_size_t length)
2d21ac55 4727{
0a7de745
A
4728 IOMemoryMap * result;
4729 IOMemoryMap * mapping;
2d21ac55 4730
0a7de745
A
4731 if (0 == length) {
4732 length = getLength();
4733 }
1c79356b 4734
0a7de745 4735 mapping = new IOMemoryMap;
2d21ac55 4736
0a7de745
A
4737 if (mapping
4738 && !mapping->init( intoTask, atAddress,
4739 options, offset, length )) {
4740 mapping->release();
cb323159 4741 mapping = NULL;
0a7de745 4742 }
2d21ac55 4743
0a7de745
A
4744 if (mapping) {
4745 result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
4746 } else {
cb323159 4747 result = NULL;
0a7de745 4748 }
2d21ac55 4749
b0d623f7 4750#if DEBUG
0a7de745
A
4751 if (!result) {
4752 IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
4753 this, atAddress, (uint32_t) options, offset, length);
4754 }
2d21ac55
A
4755#endif
4756
0a7de745 4757 return result;
1c79356b
A
4758}
4759
b0d623f7 4760#ifndef __LP64__ // there is only a 64 bit version for LP64
0a7de745
A
4761IOReturn
4762IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
4763 IOOptionBits options,
4764 IOByteCount offset)
2d21ac55 4765{
0a7de745 4766 return redirect(newBackingMemory, options, (mach_vm_size_t)offset);
2d21ac55 4767}
b0d623f7 4768#endif
2d21ac55 4769
0a7de745
A
4770IOReturn
4771IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
4772 IOOptionBits options,
4773 mach_vm_size_t offset)
91447636 4774{
0a7de745 4775 IOReturn err = kIOReturnSuccess;
cb323159 4776 IOMemoryDescriptor * physMem = NULL;
91447636 4777
0a7de745 4778 LOCK;
91447636 4779
0a7de745
A
4780 if (fAddress && fAddressMap) {
4781 do{
4782 if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
4783 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
4784 physMem = fMemory;
4785 physMem->retain();
4786 }
4787
4788 if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count)) {
4789 upl_size_t size = round_page(fLength);
4790 upl_control_flags_t flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
4791 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
4792 if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL,
4793 NULL, NULL,
4794 &flags, fMemory->getVMTag(kernel_map))) {
cb323159 4795 fRedirUPL = NULL;
0a7de745
A
4796 }
4797
4798 if (physMem) {
4799 IOUnmapPages( fAddressMap, fAddress, fLength );
4800 if ((false)) {
cb323159 4801 physMem->redirect(NULL, true);
0a7de745
A
4802 }
4803 }
4804 }
4805
4806 if (newBackingMemory) {
4807 if (newBackingMemory != fMemory) {
4808 fOffset = 0;
4809 if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
4810 options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
4811 offset, fLength)) {
4812 err = kIOReturnError;
4813 }
4814 }
4815 if (fRedirUPL) {
4816 upl_commit(fRedirUPL, NULL, 0);
4817 upl_deallocate(fRedirUPL);
cb323159 4818 fRedirUPL = NULL;
0a7de745
A
4819 }
4820 if ((false) && physMem) {
cb323159 4821 physMem->redirect(NULL, false);
0a7de745
A
4822 }
4823 }
4824 }while (false);
91447636 4825 }
91447636 4826
0a7de745 4827 UNLOCK;
91447636 4828
0a7de745
A
4829 if (physMem) {
4830 physMem->release();
4831 }
91447636 4832
0a7de745 4833 return err;
91447636
A
4834}
4835
0a7de745
A
4836IOMemoryMap *
4837IOMemoryDescriptor::makeMapping(
4838 IOMemoryDescriptor * owner,
4839 task_t __intoTask,
4840 IOVirtualAddress __address,
4841 IOOptionBits options,
4842 IOByteCount __offset,
4843 IOByteCount __length )
1c79356b 4844{
b0d623f7 4845#ifndef __LP64__
0a7de745
A
4846 if (!(kIOMap64Bit & options)) {
4847 panic("IOMemoryDescriptor::makeMapping !64bit");
4848 }
b0d623f7 4849#endif /* !__LP64__ */
2d21ac55 4850
cb323159
A
4851 IOMemoryDescriptor * mapDesc = NULL;
4852 __block IOMemoryMap * result = NULL;
2d21ac55 4853
0a7de745
A
4854 IOMemoryMap * mapping = (IOMemoryMap *) __address;
4855 mach_vm_size_t offset = mapping->fOffset + __offset;
4856 mach_vm_size_t length = mapping->fLength;
2d21ac55 4857
0a7de745 4858 mapping->fOffset = offset;
1c79356b 4859
0a7de745 4860 LOCK;
1c79356b 4861
0a7de745
A
4862 do{
4863 if (kIOMapStatic & options) {
4864 result = mapping;
4865 addMapping(mapping);
4866 mapping->setMemoryDescriptor(this, 0);
4867 continue;
4868 }
2d21ac55 4869
0a7de745
A
4870 if (kIOMapUnique & options) {
4871 addr64_t phys;
4872 IOByteCount physLen;
1c79356b 4873
2d21ac55 4874// if (owner != this) continue;
1c79356b 4875
0a7de745
A
4876 if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
4877 || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) {
4878 phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
4879 if (!phys || (physLen < length)) {
4880 continue;
4881 }
4882
4883 mapDesc = IOMemoryDescriptor::withAddressRange(
4884 phys, length, getDirection() | kIOMemoryMapperNone, NULL);
4885 if (!mapDesc) {
4886 continue;
4887 }
4888 offset = 0;
4889 mapping->fOffset = offset;
4890 }
4891 } else {
4892 // look for a compatible existing mapping
4893 if (_mappings) {
4894 _mappings->iterateObjects(^(OSObject * object)
4895 {
4896 IOMemoryMap * lookMapping = (IOMemoryMap *) object;
4897 if ((result = lookMapping->copyCompatible(mapping))) {
4898 addMapping(result);
4899 result->setMemoryDescriptor(this, offset);
4900 return true;
4901 }
4902 return false;
4903 });
4904 }
4905 if (result || (options & kIOMapReference)) {
4906 if (result != mapping) {
4907 mapping->release();
4908 mapping = NULL;
4909 }
4910 continue;
4911 }
4912 }
4913
4914 if (!mapDesc) {
4915 mapDesc = this;
4916 mapDesc->retain();
4917 }
4918 IOReturn
cb323159 4919 kr = mapDesc->doMap( NULL, (IOVirtualAddress *) &mapping, options, 0, 0 );
0a7de745
A
4920 if (kIOReturnSuccess == kr) {
4921 result = mapping;
4922 mapDesc->addMapping(result);
4923 result->setMemoryDescriptor(mapDesc, offset);
4924 } else {
4925 mapping->release();
4926 mapping = NULL;
4927 }
4928 }while (false);
1c79356b 4929
0a7de745 4930 UNLOCK;
1c79356b 4931
0a7de745
A
4932 if (mapDesc) {
4933 mapDesc->release();
4934 }
91447636 4935
0a7de745 4936 return result;
1c79356b
A
4937}
4938
0a7de745
A
4939void
4940IOMemoryDescriptor::addMapping(
1c79356b
A
4941 IOMemoryMap * mapping )
4942{
0a7de745 4943 if (mapping) {
cb323159 4944 if (NULL == _mappings) {
0a7de745
A
4945 _mappings = OSSet::withCapacity(1);
4946 }
4947 if (_mappings) {
4948 _mappings->setObject( mapping );
4949 }
4950 }
1c79356b
A
4951}
4952
0a7de745
A
4953void
4954IOMemoryDescriptor::removeMapping(
1c79356b
A
4955 IOMemoryMap * mapping )
4956{
0a7de745
A
4957 if (_mappings) {
4958 _mappings->removeObject( mapping);
4959 }
1c79356b
A
4960}
4961
b0d623f7
A
4962#ifndef __LP64__
4963// obsolete initializers
0a7de745 4964// - initWithOptions is the designated initializer
1c79356b 4965bool
b0d623f7 4966IOMemoryDescriptor::initWithAddress(void * address,
0a7de745
A
4967 IOByteCount length,
4968 IODirection direction)
1c79356b 4969{
0a7de745 4970 return false;
1c79356b
A
4971}
4972
4973bool
b0d623f7 4974IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
0a7de745
A
4975 IOByteCount length,
4976 IODirection direction,
4977 task_t task)
1c79356b 4978{
0a7de745 4979 return false;
1c79356b
A
4980}
4981
4982bool
b0d623f7 4983IOMemoryDescriptor::initWithPhysicalAddress(
0a7de745
A
4984 IOPhysicalAddress address,
4985 IOByteCount length,
4986 IODirection direction )
1c79356b 4987{
0a7de745 4988 return false;
1c79356b
A
4989}
4990
4991bool
b0d623f7 4992IOMemoryDescriptor::initWithRanges(
0a7de745
A
4993 IOVirtualRange * ranges,
4994 UInt32 withCount,
4995 IODirection direction,
4996 task_t task,
4997 bool asReference)
1c79356b 4998{
0a7de745 4999 return false;
1c79356b
A
5000}
5001
5002bool
0a7de745
A
5003IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
5004 UInt32 withCount,
5005 IODirection direction,
5006 bool asReference)
1c79356b 5007{
0a7de745 5008 return false;
1c79356b
A
5009}
5010
0a7de745
A
5011void *
5012IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
5013 IOByteCount * lengthOfSegment)
b0d623f7 5014{
cb323159 5015 return NULL;
b0d623f7
A
5016}
5017#endif /* !__LP64__ */
5018
1c79356b
A
5019/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5020
0a7de745
A
5021bool
5022IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
9bccf70c 5023{
cb323159
A
5024 OSSymbol const *keys[2] = {NULL};
5025 OSObject *values[2] = {NULL};
0a7de745
A
5026 OSArray * array;
5027 vm_size_t vcopy_size;
5028
5029 struct SerData {
5030 user_addr_t address;
5031 user_size_t length;
5032 } *vcopy = NULL;
5033 unsigned int index, nRanges;
5034 bool result = false;
5035
5036 IOOptionBits type = _flags & kIOMemoryTypeMask;
5037
5038 if (s == NULL) {
5039 return false;
5040 }
5041
5042 array = OSArray::withCapacity(4);
5043 if (!array) {
5044 return false;
5045 }
5046
5047 nRanges = _rangesCount;
5048 if (os_mul_overflow(sizeof(SerData), nRanges, &vcopy_size)) {
5049 result = false;
5050 goto bail;
5051 }
5052 vcopy = (SerData *) IOMalloc(vcopy_size);
cb323159 5053 if (vcopy == NULL) {
0a7de745
A
5054 result = false;
5055 goto bail;
5056 }
5057
5058 keys[0] = OSSymbol::withCString("address");
5059 keys[1] = OSSymbol::withCString("length");
5060
5061 // Copy the volatile data so we don't have to allocate memory
5062 // while the lock is held.
5063 LOCK;
5064 if (nRanges == _rangesCount) {
5065 Ranges vec = _ranges;
5066 for (index = 0; index < nRanges; index++) {
5067 mach_vm_address_t addr; mach_vm_size_t len;
5068 getAddrLenForInd(addr, len, type, vec, index);
5069 vcopy[index].address = addr;
5070 vcopy[index].length = len;
5071 }
5072 } else {
5073 // The descriptor changed out from under us. Give up.
5074 UNLOCK;
5075 result = false;
5076 goto bail;
5077 }
5078 UNLOCK;
5079
5080 for (index = 0; index < nRanges; index++) {
5081 user_addr_t addr = vcopy[index].address;
5082 IOByteCount len = (IOByteCount) vcopy[index].length;
5083 values[0] = OSNumber::withNumber(addr, sizeof(addr) * 8);
cb323159 5084 if (values[0] == NULL) {
0a7de745
A
5085 result = false;
5086 goto bail;
5087 }
5088 values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
cb323159 5089 if (values[1] == NULL) {
0a7de745
A
5090 result = false;
5091 goto bail;
5092 }
5093 OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
cb323159 5094 if (dict == NULL) {
0a7de745
A
5095 result = false;
5096 goto bail;
5097 }
5098 array->setObject(dict);
5099 dict->release();
5100 values[0]->release();
5101 values[1]->release();
cb323159 5102 values[0] = values[1] = NULL;
0a7de745
A
5103 }
5104
5105 result = array->serialize(s);
5106
5107bail:
5108 if (array) {
5109 array->release();
5110 }
5111 if (values[0]) {
5112 values[0]->release();
5113 }
5114 if (values[1]) {
5115 values[1]->release();
5116 }
5117 if (keys[0]) {
5118 keys[0]->release();
5119 }
5120 if (keys[1]) {
5121 keys[1]->release();
5122 }
5123 if (vcopy) {
5124 IOFree(vcopy, vcopy_size);
5125 }
5126
5127 return result;
9bccf70c
A
5128}
5129
9bccf70c
A
5130/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5131
0b4e3aa0 5132OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
b0d623f7
A
5133#ifdef __LP64__
5134OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
5135OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
5136OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
5137OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
5138OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
5139OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
5140OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
5141#else /* !__LP64__ */
55e303ae
A
5142OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
5143OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
91447636
A
5144OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3);
5145OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4);
0c530ab8 5146OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5);
b0d623f7
A
5147OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 6);
5148OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 7);
5149#endif /* !__LP64__ */
1c79356b
A
5150OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
5151OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
5152OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
5153OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
5154OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
5155OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
5156OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
5157OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
9bccf70c 5158
55e303ae 5159/* ex-inline function implementation */
0a7de745 5160IOPhysicalAddress
0c530ab8 5161IOMemoryDescriptor::getPhysicalAddress()
0a7de745 5162{
cb323159 5163 return getPhysicalSegment( 0, NULL );
0a7de745 5164}