]> git.saurik.com Git - apple/xnu.git/blame - iokit/Kernel/IOMemoryDescriptor.cpp
xnu-4570.71.2.tar.gz
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
CommitLineData
1c79356b 1/*
39037602 2 * Copyright (c) 1998-2016 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b 27 */
b0d623f7
A
28
29
55e303ae 30#include <sys/cdefs.h>
1c79356b
A
31
32#include <IOKit/assert.h>
33#include <IOKit/system.h>
34#include <IOKit/IOLib.h>
35#include <IOKit/IOMemoryDescriptor.h>
55e303ae 36#include <IOKit/IOMapper.h>
99c3a104 37#include <IOKit/IODMACommand.h>
55e303ae 38#include <IOKit/IOKitKeysPrivate.h>
1c79356b 39
b0d623f7 40#include <IOKit/IOSubMemoryDescriptor.h>
3e170ce0 41#include <IOKit/IOMultiMemoryDescriptor.h>
b0d623f7 42
1c79356b 43#include <IOKit/IOKitDebug.h>
2d21ac55 44#include <libkern/OSDebug.h>
1c79356b 45
91447636
A
46#include "IOKitKernelInternal.h"
47
1c79356b 48#include <libkern/c++/OSContainers.h>
9bccf70c
A
49#include <libkern/c++/OSDictionary.h>
50#include <libkern/c++/OSArray.h>
51#include <libkern/c++/OSSymbol.h>
52#include <libkern/c++/OSNumber.h>
39037602 53#include <os/overflow.h>
91447636
A
54
55#include <sys/uio.h>
1c79356b
A
56
57__BEGIN_DECLS
58#include <vm/pmap.h>
91447636 59#include <vm/vm_pageout.h>
55e303ae 60#include <mach/memory_object_types.h>
0b4e3aa0 61#include <device/device_port.h>
55e303ae 62
91447636 63#include <mach/vm_prot.h>
2d21ac55 64#include <mach/mach_vm.h>
91447636 65#include <vm/vm_fault.h>
2d21ac55 66#include <vm/vm_protos.h>
91447636 67
55e303ae 68extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
6d2010ae
A
69extern void ipc_port_release_send(ipc_port_t port);
70
55e303ae 71__END_DECLS
1c79356b 72
99c3a104
A
73#define kIOMapperWaitSystem ((IOMapper *) 1)
74
0c530ab8
A
75static IOMapper * gIOSystemMapper = NULL;
76
0c530ab8
A
77ppnum_t gIOLastPage;
78
55e303ae 79/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
de355530 80
55e303ae 81OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
de355530 82
55e303ae 83#define super IOMemoryDescriptor
de355530 84
55e303ae 85OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
de355530 86
1c79356b
A
87/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
88
9bccf70c
A
89static IORecursiveLock * gIOMemoryLock;
90
91#define LOCK IORecursiveLockLock( gIOMemoryLock)
92#define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
93#define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
94#define WAKEUP \
95 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
96
0c530ab8
A
97#if 0
98#define DEBG(fmt, args...) { kprintf(fmt, ## args); }
99#else
100#define DEBG(fmt, args...) {}
101#endif
102
91447636
A
103/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
104
105// Some data structures and accessor macros used by the initWithOptions
106// Function
107
108enum ioPLBlockFlags {
109 kIOPLOnDevice = 0x00000001,
110 kIOPLExternUPL = 0x00000002,
111};
112
fe8ab488 113struct IOMDPersistentInitData
91447636 114{
fe8ab488
A
115 const IOGeneralMemoryDescriptor * fMD;
116 IOMemoryReference * fMemRef;
91447636
A
117};
118
119struct ioPLBlock {
120 upl_t fIOPL;
b0d623f7
A
121 vm_address_t fPageInfo; // Pointer to page list or index into it
122 uint32_t fIOMDOffset; // The offset of this iopl in descriptor
99c3a104 123 ppnum_t fMappedPage; // Page number of first page in this iopl
b0d623f7
A
124 unsigned int fPageOffset; // Offset within first page of iopl
125 unsigned int fFlags; // Flags
91447636
A
126};
127
39037602
A
128enum { kMaxWireTags = 6 };
129
130struct ioGMDData
131{
99c3a104 132 IOMapper * fMapper;
99c3a104 133 uint64_t fDMAMapAlignment;
3e170ce0
A
134 uint64_t fMappedBase;
135 uint64_t fMappedLength;
136 uint64_t fPreparationID;
137#if IOTRACKING
138 IOTracking fWireTracking;
39037602
A
139#endif /* IOTRACKING */
140 unsigned int fPageCnt;
141 uint8_t fDMAMapNumAddressBits;
39037602
A
142 unsigned char fDiscontig:1;
143 unsigned char fCompletionError:1;
5ba3f43e
A
144 unsigned char fMappedBaseValid:1;
145 unsigned char _resv:3;
146 unsigned char fDMAAccess:2;
39037602
A
147
148 /* variable length arrays */
149 upl_page_info_t fPageList[1]
b0d623f7 150#if __LP64__
39037602
A
151 // align fPageList as for ioPLBlock
152 __attribute__((aligned(sizeof(upl_t))))
b0d623f7 153#endif
39037602
A
154 ;
155 ioPLBlock fBlocks[1];
91447636
A
156};
157
158#define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
99c3a104 159#define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
91447636
A
160#define getNumIOPL(osd, d) \
161 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
162#define getPageList(d) (&(d->fPageList[0]))
163#define computeDataSize(p, u) \
6d2010ae 164 (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
91447636 165
5ba3f43e
A
166enum { kIOMemoryHostOrRemote = kIOMemoryHostOnly | kIOMemoryRemote };
167
91447636
A
168/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
169
b0d623f7 170#define next_page(a) ( trunc_page(a) + PAGE_SIZE )
0b4e3aa0 171
0b4e3aa0
A
172extern "C" {
173
174kern_return_t device_data_action(
b0d623f7 175 uintptr_t device_handle,
0b4e3aa0
A
176 ipc_port_t device_pager,
177 vm_prot_t protection,
178 vm_object_offset_t offset,
179 vm_size_t size)
180{
9bccf70c 181 kern_return_t kr;
316670eb 182 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
9bccf70c 183 IOMemoryDescriptor * memDesc;
0b4e3aa0 184
9bccf70c 185 LOCK;
316670eb 186 memDesc = ref->dp.memory;
9bccf70c 187 if( memDesc)
91447636
A
188 {
189 memDesc->retain();
fe8ab488 190 kr = memDesc->handleFault(device_pager, offset, size);
91447636
A
191 memDesc->release();
192 }
9bccf70c
A
193 else
194 kr = KERN_ABORTED;
195 UNLOCK;
0b4e3aa0 196
9bccf70c 197 return( kr );
0b4e3aa0
A
198}
199
200kern_return_t device_close(
b0d623f7 201 uintptr_t device_handle)
0b4e3aa0 202{
316670eb 203 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
0b4e3aa0 204
316670eb 205 IODelete( ref, IOMemoryDescriptorReserved, 1 );
0b4e3aa0
A
206
207 return( kIOReturnSuccess );
208}
91447636 209}; // end extern "C"
0b4e3aa0 210
fe8ab488
A
211/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
212
91447636
A
213// Note this inline function uses C++ reference arguments to return values
214// This means that pointers are not passed and NULLs don't have to be
215// checked for as a NULL reference is illegal.
216static inline void
fe8ab488 217getAddrLenForInd(mach_vm_address_t &addr, mach_vm_size_t &len, // Output variables
91447636
A
218 UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
219{
0c530ab8
A
220 assert(kIOMemoryTypeUIO == type
221 || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
222 || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
91447636
A
223 if (kIOMemoryTypeUIO == type) {
224 user_size_t us;
fe8ab488
A
225 user_addr_t ad;
226 uio_getiov((uio_t) r.uio, ind, &ad, &us); addr = ad; len = us;
91447636 227 }
b0d623f7 228#ifndef __LP64__
0c530ab8
A
229 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
230 IOAddressRange cur = r.v64[ind];
231 addr = cur.address;
232 len = cur.length;
233 }
b0d623f7 234#endif /* !__LP64__ */
91447636
A
235 else {
236 IOVirtualRange cur = r.v[ind];
237 addr = cur.address;
238 len = cur.length;
239 }
0b4e3aa0
A
240}
241
1c79356b
A
242/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
243
fe8ab488
A
244static IOReturn
245purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
246{
247 IOReturn err = kIOReturnSuccess;
248
249 *control = VM_PURGABLE_SET_STATE;
250
251 enum { kIOMemoryPurgeableControlMask = 15 };
252
253 switch (kIOMemoryPurgeableControlMask & newState)
254 {
255 case kIOMemoryPurgeableKeepCurrent:
256 *control = VM_PURGABLE_GET_STATE;
257 break;
258
259 case kIOMemoryPurgeableNonVolatile:
260 *state = VM_PURGABLE_NONVOLATILE;
261 break;
262 case kIOMemoryPurgeableVolatile:
263 *state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask);
264 break;
265 case kIOMemoryPurgeableEmpty:
39037602 266 *state = VM_PURGABLE_EMPTY | (newState & ~kIOMemoryPurgeableControlMask);
fe8ab488
A
267 break;
268 default:
269 err = kIOReturnBadArgument;
270 break;
271 }
5ba3f43e
A
272
273 if (*control == VM_PURGABLE_SET_STATE) {
274 // let VM know this call is from the kernel and is allowed to alter
275 // the volatility of the memory entry even if it was created with
276 // MAP_MEM_PURGABLE_KERNEL_ONLY
277 *control = VM_PURGABLE_SET_STATE_FROM_KERNEL;
278 }
279
fe8ab488
A
280 return (err);
281}
282
283static IOReturn
284purgeableStateBits(int * state)
285{
286 IOReturn err = kIOReturnSuccess;
287
288 switch (VM_PURGABLE_STATE_MASK & *state)
289 {
290 case VM_PURGABLE_NONVOLATILE:
291 *state = kIOMemoryPurgeableNonVolatile;
292 break;
293 case VM_PURGABLE_VOLATILE:
294 *state = kIOMemoryPurgeableVolatile;
295 break;
296 case VM_PURGABLE_EMPTY:
297 *state = kIOMemoryPurgeableEmpty;
298 break;
299 default:
300 *state = kIOMemoryPurgeableNonVolatile;
301 err = kIOReturnNotReady;
302 break;
303 }
304 return (err);
305}
306
307
308static vm_prot_t
309vmProtForCacheMode(IOOptionBits cacheMode)
310{
311 vm_prot_t prot = 0;
312 switch (cacheMode)
313 {
314 case kIOInhibitCache:
315 SET_MAP_MEM(MAP_MEM_IO, prot);
316 break;
317
318 case kIOWriteThruCache:
319 SET_MAP_MEM(MAP_MEM_WTHRU, prot);
320 break;
321
322 case kIOWriteCombineCache:
323 SET_MAP_MEM(MAP_MEM_WCOMB, prot);
324 break;
325
326 case kIOCopybackCache:
327 SET_MAP_MEM(MAP_MEM_COPYBACK, prot);
328 break;
329
330 case kIOCopybackInnerCache:
331 SET_MAP_MEM(MAP_MEM_INNERWBACK, prot);
332 break;
333
5ba3f43e
A
334 case kIOPostedWrite:
335 SET_MAP_MEM(MAP_MEM_POSTED, prot);
336 break;
337
fe8ab488
A
338 case kIODefaultCache:
339 default:
340 SET_MAP_MEM(MAP_MEM_NOOP, prot);
341 break;
342 }
343
344 return (prot);
345}
346
347static unsigned int
348pagerFlagsForCacheMode(IOOptionBits cacheMode)
349{
350 unsigned int pagerFlags = 0;
351 switch (cacheMode)
352 {
353 case kIOInhibitCache:
354 pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
355 break;
356
357 case kIOWriteThruCache:
358 pagerFlags = DEVICE_PAGER_WRITE_THROUGH | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
359 break;
360
361 case kIOWriteCombineCache:
362 pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT;
363 break;
364
365 case kIOCopybackCache:
366 pagerFlags = DEVICE_PAGER_COHERENT;
367 break;
368
369 case kIOCopybackInnerCache:
370 pagerFlags = DEVICE_PAGER_COHERENT;
371 break;
372
5ba3f43e
A
373 case kIOPostedWrite:
374 pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED | DEVICE_PAGER_EARLY_ACK;
375 break;
376
fe8ab488
A
377 case kIODefaultCache:
378 default:
379 pagerFlags = -1U;
380 break;
381 }
382 return (pagerFlags);
383}
384
385/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
386/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
387
388struct IOMemoryEntry
389{
390 ipc_port_t entry;
391 int64_t offset;
392 uint64_t size;
393};
394
395struct IOMemoryReference
396{
d190cdc3
A
397 volatile SInt32 refCount;
398 vm_prot_t prot;
399 uint32_t capacity;
400 uint32_t count;
401 struct IOMemoryReference * mapRef;
402 IOMemoryEntry entries[0];
fe8ab488
A
403};
404
405enum
406{
407 kIOMemoryReferenceReuse = 0x00000001,
408 kIOMemoryReferenceWrite = 0x00000002,
d190cdc3 409 kIOMemoryReferenceCOW = 0x00000004,
fe8ab488
A
410};
411
412SInt32 gIOMemoryReferenceCount;
413
414IOMemoryReference *
415IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc)
416{
417 IOMemoryReference * ref;
418 size_t newSize, oldSize, copySize;
419
420 newSize = (sizeof(IOMemoryReference)
421 - sizeof(ref->entries)
422 + capacity * sizeof(ref->entries[0]));
423 ref = (typeof(ref)) IOMalloc(newSize);
424 if (realloc)
425 {
426 oldSize = (sizeof(IOMemoryReference)
427 - sizeof(realloc->entries)
428 + realloc->capacity * sizeof(realloc->entries[0]));
429 copySize = oldSize;
430 if (copySize > newSize) copySize = newSize;
431 if (ref) bcopy(realloc, ref, copySize);
432 IOFree(realloc, oldSize);
433 }
434 else if (ref)
435 {
436 bzero(ref, sizeof(*ref));
437 ref->refCount = 1;
438 OSIncrementAtomic(&gIOMemoryReferenceCount);
439 }
440 if (!ref) return (0);
441 ref->capacity = capacity;
442 return (ref);
443}
444
445void
446IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref)
447{
448 IOMemoryEntry * entries;
449 size_t size;
450
d190cdc3
A
451 if (ref->mapRef)
452 {
453 memoryReferenceFree(ref->mapRef);
454 ref->mapRef = 0;
455 }
456
fe8ab488
A
457 entries = ref->entries + ref->count;
458 while (entries > &ref->entries[0])
459 {
460 entries--;
461 ipc_port_release_send(entries->entry);
462 }
463 size = (sizeof(IOMemoryReference)
464 - sizeof(ref->entries)
465 + ref->capacity * sizeof(ref->entries[0]));
466 IOFree(ref, size);
467
468 OSDecrementAtomic(&gIOMemoryReferenceCount);
469}
470
471void
472IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref)
473{
474 if (1 == OSDecrementAtomic(&ref->refCount)) memoryReferenceFree(ref);
475}
476
477
478IOReturn
479IOGeneralMemoryDescriptor::memoryReferenceCreate(
480 IOOptionBits options,
481 IOMemoryReference ** reference)
482{
483 enum { kCapacity = 4, kCapacityInc = 4 };
484
485 kern_return_t err;
486 IOMemoryReference * ref;
487 IOMemoryEntry * entries;
488 IOMemoryEntry * cloneEntries;
489 vm_map_t map;
490 ipc_port_t entry, cloneEntry;
491 vm_prot_t prot;
492 memory_object_size_t actualSize;
493 uint32_t rangeIdx;
494 uint32_t count;
495 mach_vm_address_t entryAddr, endAddr, entrySize;
496 mach_vm_size_t srcAddr, srcLen;
497 mach_vm_size_t nextAddr, nextLen;
498 mach_vm_size_t offset, remain;
499 IOByteCount physLen;
500 IOOptionBits type = (_flags & kIOMemoryTypeMask);
501 IOOptionBits cacheMode;
502 unsigned int pagerFlags;
3e170ce0 503 vm_tag_t tag;
fe8ab488
A
504
505 ref = memoryReferenceAlloc(kCapacity, NULL);
506 if (!ref) return (kIOReturnNoMemory);
3e170ce0 507
39037602 508 tag = getVMTag(kernel_map);
fe8ab488
A
509 entries = &ref->entries[0];
510 count = 0;
d190cdc3 511 err = KERN_SUCCESS;
fe8ab488
A
512
513 offset = 0;
514 rangeIdx = 0;
d190cdc3
A
515 if (_task)
516 {
517 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
518 }
fe8ab488
A
519 else
520 {
521 nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
522 nextLen = physLen;
3e170ce0 523
fe8ab488
A
524 // default cache mode for physical
525 if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift))
526 {
527 IOOptionBits mode;
528 pagerFlags = IODefaultCacheBits(nextAddr);
529 if (DEVICE_PAGER_CACHE_INHIB & pagerFlags)
530 {
5ba3f43e
A
531 if (DEVICE_PAGER_EARLY_ACK & pagerFlags)
532 mode = kIOPostedWrite;
533 else if (DEVICE_PAGER_GUARDED & pagerFlags)
fe8ab488
A
534 mode = kIOInhibitCache;
535 else
536 mode = kIOWriteCombineCache;
537 }
538 else if (DEVICE_PAGER_WRITE_THROUGH & pagerFlags)
539 mode = kIOWriteThruCache;
540 else
541 mode = kIOCopybackCache;
542 _flags |= (mode << kIOMemoryBufferCacheShift);
543 }
544 }
545
546 // cache mode & vm_prot
547 prot = VM_PROT_READ;
548 cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
549 prot |= vmProtForCacheMode(cacheMode);
550 // VM system requires write access to change cache mode
551 if (kIODefaultCache != cacheMode) prot |= VM_PROT_WRITE;
552 if (kIODirectionOut != (kIODirectionOutIn & _flags)) prot |= VM_PROT_WRITE;
553 if (kIOMemoryReferenceWrite & options) prot |= VM_PROT_WRITE;
d190cdc3 554 if (kIOMemoryReferenceCOW & options) prot |= MAP_MEM_VM_COPY;
fe8ab488
A
555
556 if ((kIOMemoryReferenceReuse & options) && _memRef)
557 {
558 cloneEntries = &_memRef->entries[0];
559 prot |= MAP_MEM_NAMED_REUSE;
560 }
561
562 if (_task)
563 {
564 // virtual ranges
565
566 if (kIOMemoryBufferPageable & _flags)
567 {
568 // IOBufferMemoryDescriptor alloc - set flags for entry + object create
569 prot |= MAP_MEM_NAMED_CREATE;
9d749ea3
A
570 if (kIOMemoryBufferPurgeable & _flags)
571 {
572 prot |= (MAP_MEM_PURGABLE | MAP_MEM_PURGABLE_KERNEL_ONLY);
573 if (VM_KERN_MEMORY_SKYWALK == tag)
574 {
575 prot |= MAP_MEM_LEDGER_TAG_NETWORK;
576 }
577 }
39037602
A
578 if (kIOMemoryUseReserve & _flags) prot |= MAP_MEM_GRAB_SECLUDED;
579
fe8ab488
A
580 prot |= VM_PROT_WRITE;
581 map = NULL;
582 }
583 else map = get_task_map(_task);
584
585 remain = _length;
586 while (remain)
587 {
588 srcAddr = nextAddr;
589 srcLen = nextLen;
590 nextAddr = 0;
591 nextLen = 0;
592 // coalesce addr range
593 for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++)
594 {
595 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
596 if ((srcAddr + srcLen) != nextAddr) break;
597 srcLen += nextLen;
598 }
599 entryAddr = trunc_page_64(srcAddr);
600 endAddr = round_page_64(srcAddr + srcLen);
601 do
602 {
603 entrySize = (endAddr - entryAddr);
604 if (!entrySize) break;
605 actualSize = entrySize;
606
607 cloneEntry = MACH_PORT_NULL;
608 if (MAP_MEM_NAMED_REUSE & prot)
609 {
610 if (cloneEntries < &_memRef->entries[_memRef->count]) cloneEntry = cloneEntries->entry;
611 else prot &= ~MAP_MEM_NAMED_REUSE;
612 }
613
9d749ea3 614 err = mach_make_memory_entry_internal(map,
fe8ab488
A
615 &actualSize, entryAddr, prot, &entry, cloneEntry);
616
617 if (KERN_SUCCESS != err) break;
618 if (actualSize > entrySize) panic("mach_make_memory_entry_64 actualSize");
619
620 if (count >= ref->capacity)
621 {
622 ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref);
623 entries = &ref->entries[count];
624 }
625 entries->entry = entry;
626 entries->size = actualSize;
627 entries->offset = offset + (entryAddr - srcAddr);
628 entryAddr += actualSize;
629 if (MAP_MEM_NAMED_REUSE & prot)
630 {
631 if ((cloneEntries->entry == entries->entry)
632 && (cloneEntries->size == entries->size)
633 && (cloneEntries->offset == entries->offset)) cloneEntries++;
634 else prot &= ~MAP_MEM_NAMED_REUSE;
635 }
636 entries++;
637 count++;
638 }
639 while (true);
640 offset += srcLen;
641 remain -= srcLen;
642 }
643 }
644 else
645 {
3e170ce0 646 // _task == 0, physical or kIOMemoryTypeUPL
fe8ab488
A
647 memory_object_t pager;
648 vm_size_t size = ptoa_32(_pages);
649
650 if (!getKernelReserved()) panic("getKernelReserved");
651
652 reserved->dp.pagerContig = (1 == _rangesCount);
653 reserved->dp.memory = this;
654
655 pagerFlags = pagerFlagsForCacheMode(cacheMode);
656 if (-1U == pagerFlags) panic("phys is kIODefaultCache");
657 if (reserved->dp.pagerContig) pagerFlags |= DEVICE_PAGER_CONTIGUOUS;
658
659 pager = device_pager_setup((memory_object_t) 0, (uintptr_t) reserved,
660 size, pagerFlags);
661 assert (pager);
662 if (!pager) err = kIOReturnVMError;
663 else
664 {
665 srcAddr = nextAddr;
666 entryAddr = trunc_page_64(srcAddr);
667 err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/,
668 size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry);
669 assert (KERN_SUCCESS == err);
670 if (KERN_SUCCESS != err) device_pager_deallocate(pager);
671 else
672 {
673 reserved->dp.devicePager = pager;
674 entries->entry = entry;
675 entries->size = size;
676 entries->offset = offset + (entryAddr - srcAddr);
677 entries++;
678 count++;
679 }
680 }
681 }
682
683 ref->count = count;
684 ref->prot = prot;
685
d190cdc3
A
686 if (_task && (KERN_SUCCESS == err)
687 && (kIOMemoryMapCopyOnWrite & _flags)
688 && !(kIOMemoryReferenceCOW & options))
689 {
690 err = memoryReferenceCreate(options | kIOMemoryReferenceCOW, &ref->mapRef);
691 }
692
fe8ab488
A
693 if (KERN_SUCCESS == err)
694 {
695 if (MAP_MEM_NAMED_REUSE & prot)
696 {
697 memoryReferenceFree(ref);
698 OSIncrementAtomic(&_memRef->refCount);
699 ref = _memRef;
700 }
701 }
702 else
703 {
704 memoryReferenceFree(ref);
705 ref = NULL;
706 }
707
708 *reference = ref;
709
710 return (err);
711}
712
3e170ce0 713kern_return_t
fe8ab488
A
714IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
715{
716 IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref;
717 IOReturn err;
718 vm_map_offset_t addr;
719
720 addr = ref->mapped;
3e170ce0 721
fe8ab488
A
722 err = vm_map_enter_mem_object(map, &addr, ref->size,
723 (vm_map_offset_t) 0,
724 (((ref->options & kIOMapAnywhere)
725 ? VM_FLAGS_ANYWHERE
5ba3f43e
A
726 : VM_FLAGS_FIXED)),
727 VM_MAP_KERNEL_FLAGS_NONE,
728 ref->tag,
fe8ab488
A
729 IPC_PORT_NULL,
730 (memory_object_offset_t) 0,
731 false, /* copy */
732 ref->prot,
733 ref->prot,
734 VM_INHERIT_NONE);
735 if (KERN_SUCCESS == err)
736 {
737 ref->mapped = (mach_vm_address_t) addr;
738 ref->map = map;
739 }
740
741 return( err );
742}
743
744IOReturn
745IOGeneralMemoryDescriptor::memoryReferenceMap(
746 IOMemoryReference * ref,
747 vm_map_t map,
748 mach_vm_size_t inoffset,
749 mach_vm_size_t size,
750 IOOptionBits options,
751 mach_vm_address_t * inaddr)
752{
753 IOReturn err;
754 int64_t offset = inoffset;
755 uint32_t rangeIdx, entryIdx;
756 vm_map_offset_t addr, mapAddr;
757 vm_map_offset_t pageOffset, entryOffset, remain, chunk;
758
3e170ce0
A
759 mach_vm_address_t nextAddr;
760 mach_vm_size_t nextLen;
fe8ab488
A
761 IOByteCount physLen;
762 IOMemoryEntry * entry;
763 vm_prot_t prot, memEntryCacheMode;
764 IOOptionBits type;
765 IOOptionBits cacheMode;
3e170ce0 766 vm_tag_t tag;
d190cdc3
A
767 // for the kIOMapPrefault option.
768 upl_page_info_t * pageList = NULL;
769 UInt currentPageIndex = 0;
770 bool didAlloc;
fe8ab488 771
d190cdc3
A
772 if (ref->mapRef)
773 {
774 err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
775 return (err);
776 }
fe8ab488
A
777
778 type = _flags & kIOMemoryTypeMask;
d190cdc3 779
fe8ab488
A
780 prot = VM_PROT_READ;
781 if (!(kIOMapReadOnly & options)) prot |= VM_PROT_WRITE;
782 prot &= ref->prot;
783
784 cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
785 if (kIODefaultCache != cacheMode)
786 {
a1c7dba1
A
787 // VM system requires write access to update named entry cache mode
788 memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
fe8ab488
A
789 }
790
39037602 791 tag = getVMTag(map);
3e170ce0 792
fe8ab488
A
793 if (_task)
794 {
795 // Find first range for offset
39037602 796 if (!_rangesCount) return (kIOReturnBadArgument);
fe8ab488
A
797 for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++)
798 {
799 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
800 if (remain < nextLen) break;
801 remain -= nextLen;
802 }
803 }
804 else
805 {
806 rangeIdx = 0;
807 remain = 0;
808 nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
809 nextLen = size;
810 }
811
812 assert(remain < nextLen);
813 if (remain >= nextLen) return (kIOReturnBadArgument);
814
815 nextAddr += remain;
816 nextLen -= remain;
817 pageOffset = (page_mask & nextAddr);
d190cdc3
A
818 addr = 0;
819 didAlloc = false;
820
fe8ab488
A
821 if (!(options & kIOMapAnywhere))
822 {
823 addr = *inaddr;
824 if (pageOffset != (page_mask & addr)) return (kIOReturnNotAligned);
825 addr -= pageOffset;
826 }
827
828 // find first entry for offset
829 for (entryIdx = 0;
830 (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset);
831 entryIdx++) {}
832 entryIdx--;
833 entry = &ref->entries[entryIdx];
834
835 // allocate VM
836 size = round_page_64(size + pageOffset);
3e170ce0
A
837 if (kIOMapOverwrite & options)
838 {
839 if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags))
840 {
841 map = IOPageableMapForAddress(addr);
842 }
843 err = KERN_SUCCESS;
844 }
845 else
fe8ab488
A
846 {
847 IOMemoryDescriptorMapAllocRef ref;
848 ref.map = map;
3e170ce0 849 ref.tag = tag;
fe8ab488
A
850 ref.options = options;
851 ref.size = size;
852 ref.prot = prot;
853 if (options & kIOMapAnywhere)
854 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
855 ref.mapped = 0;
856 else
857 ref.mapped = addr;
fe8ab488
A
858 if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags))
859 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
860 else
861 err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
862 if (KERN_SUCCESS == err)
863 {
d190cdc3
A
864 addr = ref.mapped;
865 map = ref.map;
866 didAlloc = true;
fe8ab488
A
867 }
868 }
869
5ba3f43e
A
870 /*
871 * If the memory is associated with a device pager but doesn't have a UPL,
872 * it will be immediately faulted in through the pager via populateDevicePager().
873 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
874 * operations.
875 */
876 if ((reserved != NULL) && (reserved->dp.devicePager) && (_memoryEntries == NULL) && (_wireCount != 0))
877 options &= ~kIOMapPrefault;
878
fe8ab488
A
879 /*
880 * Prefaulting is only possible if we wired the memory earlier. Check the
881 * memory type, and the underlying data.
882 */
3e170ce0
A
883 if (options & kIOMapPrefault)
884 {
fe8ab488
A
885 /*
886 * The memory must have been wired by calling ::prepare(), otherwise
887 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
888 */
fe8ab488
A
889 assert(_wireCount != 0);
890 assert(_memoryEntries != NULL);
5ba3f43e 891 if ((_wireCount == 0) ||
fe8ab488
A
892 (_memoryEntries == NULL))
893 {
894 return kIOReturnBadArgument;
895 }
3e170ce0 896
fe8ab488
A
897 // Get the page list.
898 ioGMDData* dataP = getDataP(_memoryEntries);
899 ioPLBlock const* ioplList = getIOPLList(dataP);
900 pageList = getPageList(dataP);
901
902 // Get the number of IOPLs.
903 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
904
905 /*
906 * Scan through the IOPL Info Blocks, looking for the first block containing
907 * the offset. The research will go past it, so we'll need to go back to the
908 * right range at the end.
909 */
910 UInt ioplIndex = 0;
911 while (ioplIndex < numIOPLs && offset >= ioplList[ioplIndex].fIOMDOffset)
912 ioplIndex++;
913 ioplIndex--;
914
915 // Retrieve the IOPL info block.
916 ioPLBlock ioplInfo = ioplList[ioplIndex];
917
918 /*
919 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
920 * array.
921 */
922 if (ioplInfo.fFlags & kIOPLExternUPL)
923 pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
924 else
925 pageList = &pageList[ioplInfo.fPageInfo];
926
927 // Rebase [offset] into the IOPL in order to looks for the first page index.
928 mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
929
930 // Retrieve the index of the first page corresponding to the offset.
931 currentPageIndex = atop_32(offsetInIOPL);
932 }
933
934 // enter mappings
935 remain = size;
936 mapAddr = addr;
937 addr += pageOffset;
fe8ab488 938
3e170ce0
A
939 while (remain && (KERN_SUCCESS == err))
940 {
fe8ab488
A
941 entryOffset = offset - entry->offset;
942 if ((page_mask & entryOffset) != pageOffset)
943 {
944 err = kIOReturnNotAligned;
945 break;
946 }
947
948 if (kIODefaultCache != cacheMode)
949 {
950 vm_size_t unused = 0;
951 err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
952 memEntryCacheMode, NULL, entry->entry);
953 assert (KERN_SUCCESS == err);
954 }
955
956 entryOffset -= pageOffset;
957 if (entryOffset >= entry->size) panic("entryOffset");
958 chunk = entry->size - entryOffset;
959 if (chunk)
960 {
5ba3f43e
A
961 vm_map_kernel_flags_t vmk_flags;
962
963 vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
964 vmk_flags.vmkf_iokit_acct = TRUE; /* iokit accounting */
965
fe8ab488 966 if (chunk > remain) chunk = remain;
3e170ce0
A
967 if (options & kIOMapPrefault)
968 {
fe8ab488 969 UInt nb_pages = round_page(chunk) / PAGE_SIZE;
5ba3f43e 970
fe8ab488
A
971 err = vm_map_enter_mem_object_prefault(map,
972 &mapAddr,
973 chunk, 0 /* mask */,
5ba3f43e
A
974 (VM_FLAGS_FIXED
975 | VM_FLAGS_OVERWRITE),
976 vmk_flags,
977 tag,
fe8ab488
A
978 entry->entry,
979 entryOffset,
980 prot, // cur
981 prot, // max
982 &pageList[currentPageIndex],
983 nb_pages);
984
985 // Compute the next index in the page list.
986 currentPageIndex += nb_pages;
987 assert(currentPageIndex <= _pages);
3e170ce0
A
988 }
989 else
990 {
fe8ab488
A
991 err = vm_map_enter_mem_object(map,
992 &mapAddr,
993 chunk, 0 /* mask */,
994 (VM_FLAGS_FIXED
5ba3f43e
A
995 | VM_FLAGS_OVERWRITE),
996 vmk_flags,
997 tag,
fe8ab488
A
998 entry->entry,
999 entryOffset,
1000 false, // copy
1001 prot, // cur
1002 prot, // max
1003 VM_INHERIT_NONE);
1004 }
fe8ab488
A
1005 if (KERN_SUCCESS != err) break;
1006 remain -= chunk;
1007 if (!remain) break;
1008 mapAddr += chunk;
1009 offset += chunk - pageOffset;
1010 }
1011 pageOffset = 0;
1012 entry++;
1013 entryIdx++;
1014 if (entryIdx >= ref->count)
1015 {
1016 err = kIOReturnOverrun;
1017 break;
1018 }
1019 }
fe8ab488 1020
d190cdc3 1021 if ((KERN_SUCCESS != err) && didAlloc)
fe8ab488
A
1022 {
1023 (void) mach_vm_deallocate(map, trunc_page_64(addr), size);
1024 addr = 0;
1025 }
1026 *inaddr = addr;
1027
1028 return (err);
1029}
1030
1031IOReturn
1032IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
1033 IOMemoryReference * ref,
1034 IOByteCount * residentPageCount,
1035 IOByteCount * dirtyPageCount)
1036{
1037 IOReturn err;
1038 IOMemoryEntry * entries;
1039 unsigned int resident, dirty;
1040 unsigned int totalResident, totalDirty;
1041
1042 totalResident = totalDirty = 0;
39037602 1043 err = kIOReturnSuccess;
fe8ab488
A
1044 entries = ref->entries + ref->count;
1045 while (entries > &ref->entries[0])
1046 {
1047 entries--;
1048 err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty);
1049 if (KERN_SUCCESS != err) break;
1050 totalResident += resident;
1051 totalDirty += dirty;
1052 }
1053
1054 if (residentPageCount) *residentPageCount = totalResident;
1055 if (dirtyPageCount) *dirtyPageCount = totalDirty;
1056 return (err);
1057}
1058
1059IOReturn
1060IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
1061 IOMemoryReference * ref,
1062 IOOptionBits newState,
1063 IOOptionBits * oldState)
1064{
1065 IOReturn err;
1066 IOMemoryEntry * entries;
1067 vm_purgable_t control;
1068 int totalState, state;
1069
fe8ab488 1070 totalState = kIOMemoryPurgeableNonVolatile;
39037602
A
1071 err = kIOReturnSuccess;
1072 entries = ref->entries + ref->count;
fe8ab488
A
1073 while (entries > &ref->entries[0])
1074 {
1075 entries--;
1076
1077 err = purgeableControlBits(newState, &control, &state);
1078 if (KERN_SUCCESS != err) break;
5ba3f43e 1079 err = memory_entry_purgeable_control_internal(entries->entry, control, &state);
fe8ab488
A
1080 if (KERN_SUCCESS != err) break;
1081 err = purgeableStateBits(&state);
1082 if (KERN_SUCCESS != err) break;
1083
1084 if (kIOMemoryPurgeableEmpty == state) totalState = kIOMemoryPurgeableEmpty;
1085 else if (kIOMemoryPurgeableEmpty == totalState) continue;
1086 else if (kIOMemoryPurgeableVolatile == totalState) continue;
1087 else if (kIOMemoryPurgeableVolatile == state) totalState = kIOMemoryPurgeableVolatile;
1088 else totalState = kIOMemoryPurgeableNonVolatile;
1089 }
1090
1091 if (oldState) *oldState = totalState;
1092 return (err);
1093}
1094
1095/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1096
1c79356b
A
1097IOMemoryDescriptor *
1098IOMemoryDescriptor::withAddress(void * address,
55e303ae
A
1099 IOByteCount length,
1100 IODirection direction)
1101{
1102 return IOMemoryDescriptor::
b0d623f7 1103 withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
55e303ae
A
1104}
1105
b0d623f7 1106#ifndef __LP64__
55e303ae 1107IOMemoryDescriptor *
b0d623f7 1108IOMemoryDescriptor::withAddress(IOVirtualAddress address,
55e303ae
A
1109 IOByteCount length,
1110 IODirection direction,
1111 task_t task)
1c79356b
A
1112{
1113 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1114 if (that)
1115 {
55e303ae 1116 if (that->initWithAddress(address, length, direction, task))
1c79356b
A
1117 return that;
1118
1119 that->release();
1120 }
1121 return 0;
1122}
b0d623f7 1123#endif /* !__LP64__ */
1c79356b
A
1124
1125IOMemoryDescriptor *
55e303ae
A
1126IOMemoryDescriptor::withPhysicalAddress(
1127 IOPhysicalAddress address,
1128 IOByteCount length,
1129 IODirection direction )
1130{
b0d623f7 1131 return (IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL));
55e303ae
A
1132}
1133
b0d623f7 1134#ifndef __LP64__
55e303ae
A
1135IOMemoryDescriptor *
1136IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
1137 UInt32 withCount,
1138 IODirection direction,
1139 task_t task,
1140 bool asReference)
1c79356b
A
1141{
1142 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1143 if (that)
1144 {
55e303ae 1145 if (that->initWithRanges(ranges, withCount, direction, task, asReference))
1c79356b
A
1146 return that;
1147
1148 that->release();
1149 }
1150 return 0;
1151}
b0d623f7 1152#endif /* !__LP64__ */
1c79356b 1153
0c530ab8
A
1154IOMemoryDescriptor *
1155IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
2d21ac55
A
1156 mach_vm_size_t length,
1157 IOOptionBits options,
1158 task_t task)
0c530ab8
A
1159{
1160 IOAddressRange range = { address, length };
1161 return (IOMemoryDescriptor::withAddressRanges(&range, 1, options, task));
1162}
1163
1164IOMemoryDescriptor *
1165IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
2d21ac55
A
1166 UInt32 rangeCount,
1167 IOOptionBits options,
1168 task_t task)
0c530ab8
A
1169{
1170 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1171 if (that)
1172 {
1173 if (task)
1174 options |= kIOMemoryTypeVirtual64;
1175 else
1176 options |= kIOMemoryTypePhysical64;
1177
2d21ac55
A
1178 if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ 0))
1179 return that;
0c530ab8 1180
2d21ac55 1181 that->release();
0c530ab8
A
1182 }
1183
1184 return 0;
1185}
1186
1c79356b
A
1187
1188/*
b0d623f7 1189 * withOptions:
1c79356b
A
1190 *
1191 * Create a new IOMemoryDescriptor. The buffer is made up of several
1192 * virtual address ranges, from a given task.
1193 *
1194 * Passing the ranges as a reference will avoid an extra allocation.
1195 */
1196IOMemoryDescriptor *
55e303ae
A
1197IOMemoryDescriptor::withOptions(void * buffers,
1198 UInt32 count,
1199 UInt32 offset,
1200 task_t task,
1201 IOOptionBits opts,
1202 IOMapper * mapper)
1c79356b 1203{
55e303ae 1204 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
d7e50217 1205
55e303ae
A
1206 if (self
1207 && !self->initWithOptions(buffers, count, offset, task, opts, mapper))
1208 {
1209 self->release();
1210 return 0;
de355530 1211 }
55e303ae
A
1212
1213 return self;
1214}
1215
55e303ae
A
1216bool IOMemoryDescriptor::initWithOptions(void * buffers,
1217 UInt32 count,
1218 UInt32 offset,
1219 task_t task,
1220 IOOptionBits options,
1221 IOMapper * mapper)
1222{
b0d623f7 1223 return( false );
1c79356b
A
1224}
1225
b0d623f7 1226#ifndef __LP64__
1c79356b
A
1227IOMemoryDescriptor *
1228IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
1229 UInt32 withCount,
55e303ae
A
1230 IODirection direction,
1231 bool asReference)
1c79356b
A
1232{
1233 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1234 if (that)
1235 {
55e303ae 1236 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference))
1c79356b
A
1237 return that;
1238
1239 that->release();
1240 }
1241 return 0;
1242}
1243
1244IOMemoryDescriptor *
1245IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
1246 IOByteCount offset,
1247 IOByteCount length,
55e303ae 1248 IODirection direction)
1c79356b 1249{
3e170ce0 1250 return (IOSubMemoryDescriptor::withSubRange(of, offset, length, direction));
1c79356b 1251}
b0d623f7 1252#endif /* !__LP64__ */
1c79356b 1253
0c530ab8
A
1254IOMemoryDescriptor *
1255IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
91447636
A
1256{
1257 IOGeneralMemoryDescriptor *origGenMD =
1258 OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
1259
1260 if (origGenMD)
1261 return IOGeneralMemoryDescriptor::
1262 withPersistentMemoryDescriptor(origGenMD);
1263 else
1264 return 0;
1265}
1266
0c530ab8
A
1267IOMemoryDescriptor *
1268IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
91447636 1269{
fe8ab488
A
1270 IOMemoryReference * memRef;
1271
1272 if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) return (0);
91447636 1273
fe8ab488
A
1274 if (memRef == originalMD->_memRef)
1275 {
91447636 1276 originalMD->retain(); // Add a new reference to ourselves
fe8ab488 1277 originalMD->memoryReferenceRelease(memRef);
91447636
A
1278 return originalMD;
1279 }
1280
1281 IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor;
fe8ab488 1282 IOMDPersistentInitData initData = { originalMD, memRef };
91447636
A
1283
1284 if (self
1285 && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) {
1286 self->release();
1287 self = 0;
1288 }
1289 return self;
1290}
1291
b0d623f7 1292#ifndef __LP64__
1c79356b
A
1293bool
1294IOGeneralMemoryDescriptor::initWithAddress(void * address,
1295 IOByteCount withLength,
1296 IODirection withDirection)
1297{
b0d623f7 1298 _singleRange.v.address = (vm_offset_t) address;
1c79356b
A
1299 _singleRange.v.length = withLength;
1300
1301 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
1302}
1303
1304bool
b0d623f7 1305IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
1c79356b
A
1306 IOByteCount withLength,
1307 IODirection withDirection,
1308 task_t withTask)
1309{
1310 _singleRange.v.address = address;
1311 _singleRange.v.length = withLength;
1312
1313 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
1314}
1315
1316bool
1317IOGeneralMemoryDescriptor::initWithPhysicalAddress(
1318 IOPhysicalAddress address,
1319 IOByteCount withLength,
1320 IODirection withDirection )
1321{
1322 _singleRange.p.address = address;
1323 _singleRange.p.length = withLength;
1324
1325 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
1326}
1327
55e303ae
A
1328bool
1329IOGeneralMemoryDescriptor::initWithPhysicalRanges(
1330 IOPhysicalRange * ranges,
1331 UInt32 count,
1332 IODirection direction,
1333 bool reference)
1334{
1335 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
1336
1337 if (reference)
1338 mdOpts |= kIOMemoryAsReference;
1339
1340 return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0);
1341}
1342
1343bool
1344IOGeneralMemoryDescriptor::initWithRanges(
1345 IOVirtualRange * ranges,
1346 UInt32 count,
1347 IODirection direction,
1348 task_t task,
1349 bool reference)
1350{
1351 IOOptionBits mdOpts = direction;
1352
1353 if (reference)
1354 mdOpts |= kIOMemoryAsReference;
1355
1356 if (task) {
1357 mdOpts |= kIOMemoryTypeVirtual;
91447636
A
1358
1359 // Auto-prepare if this is a kernel memory descriptor as very few
1360 // clients bother to prepare() kernel memory.
2d21ac55 1361 // But it was not enforced so what are you going to do?
55e303ae
A
1362 if (task == kernel_task)
1363 mdOpts |= kIOMemoryAutoPrepare;
1364 }
1365 else
1366 mdOpts |= kIOMemoryTypePhysical;
55e303ae
A
1367
1368 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0);
1369}
b0d623f7 1370#endif /* !__LP64__ */
55e303ae 1371
1c79356b 1372/*
55e303ae 1373 * initWithOptions:
1c79356b 1374 *
55e303ae 1375 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
91447636
A
1376 * from a given task, several physical ranges, an UPL from the ubc
1377 * system or a uio (may be 64bit) from the BSD subsystem.
1c79356b
A
1378 *
1379 * Passing the ranges as a reference will avoid an extra allocation.
1380 *
55e303ae
A
1381 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
1382 * existing instance -- note this behavior is not commonly supported in other
1383 * I/O Kit classes, although it is supported here.
1c79356b 1384 */
55e303ae 1385
1c79356b 1386bool
55e303ae
A
1387IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
1388 UInt32 count,
1389 UInt32 offset,
1390 task_t task,
1391 IOOptionBits options,
1392 IOMapper * mapper)
1393{
91447636
A
1394 IOOptionBits type = options & kIOMemoryTypeMask;
1395
6d2010ae
A
1396#ifndef __LP64__
1397 if (task
1398 && (kIOMemoryTypeVirtual == type)
1399 && vm_map_is_64bit(get_task_map(task))
1400 && ((IOVirtualRange *) buffers)->address)
1401 {
1402 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
1403 return false;
1404 }
1405#endif /* !__LP64__ */
1406
91447636
A
1407 // Grab the original MD's configuation data to initialse the
1408 // arguments to this function.
1409 if (kIOMemoryTypePersistentMD == type) {
1410
fe8ab488 1411 IOMDPersistentInitData *initData = (typeof(initData)) buffers;
91447636
A
1412 const IOGeneralMemoryDescriptor *orig = initData->fMD;
1413 ioGMDData *dataP = getDataP(orig->_memoryEntries);
1414
1415 // Only accept persistent memory descriptors with valid dataP data.
1416 assert(orig->_rangesCount == 1);
1417 if ( !(orig->_flags & kIOMemoryPersistent) || !dataP)
1418 return false;
1419
fe8ab488 1420 _memRef = initData->fMemRef; // Grab the new named entry
6d2010ae
A
1421 options = orig->_flags & ~kIOMemoryAsReference;
1422 type = options & kIOMemoryTypeMask;
1423 buffers = orig->_ranges.v;
1424 count = orig->_rangesCount;
55e303ae 1425
91447636
A
1426 // Now grab the original task and whatever mapper was previously used
1427 task = orig->_task;
1428 mapper = dataP->fMapper;
1429
1430 // We are ready to go through the original initialisation now
1431 }
1432
1433 switch (type) {
1434 case kIOMemoryTypeUIO:
55e303ae 1435 case kIOMemoryTypeVirtual:
b0d623f7 1436#ifndef __LP64__
0c530ab8 1437 case kIOMemoryTypeVirtual64:
b0d623f7 1438#endif /* !__LP64__ */
55e303ae
A
1439 assert(task);
1440 if (!task)
1441 return false;
2d21ac55 1442 break;
55e303ae
A
1443
1444 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
b0d623f7 1445#ifndef __LP64__
0c530ab8 1446 case kIOMemoryTypePhysical64:
b0d623f7 1447#endif /* !__LP64__ */
55e303ae
A
1448 case kIOMemoryTypeUPL:
1449 assert(!task);
1450 break;
1451 default:
55e303ae
A
1452 return false; /* bad argument */
1453 }
1454
1455 assert(buffers);
1456 assert(count);
1c79356b
A
1457
1458 /*
1459 * We can check the _initialized instance variable before having ever set
1460 * it to an initial value because I/O Kit guarantees that all our instance
1461 * variables are zeroed on an object's allocation.
1462 */
1463
55e303ae 1464 if (_initialized) {
1c79356b
A
1465 /*
1466 * An existing memory descriptor is being retargeted to point to
1467 * somewhere else. Clean up our present state.
1468 */
2d21ac55
A
1469 IOOptionBits type = _flags & kIOMemoryTypeMask;
1470 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
1471 {
1472 while (_wireCount)
1473 complete();
1474 }
b0d623f7 1475 if (_ranges.v && !(kIOMemoryAsReference & _flags))
0c530ab8
A
1476 {
1477 if (kIOMemoryTypeUIO == type)
1478 uio_free((uio_t) _ranges.v);
b0d623f7 1479#ifndef __LP64__
0c530ab8
A
1480 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
1481 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
b0d623f7 1482#endif /* !__LP64__ */
0c530ab8
A
1483 else
1484 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
1485 }
2d21ac55 1486
39236c6e
A
1487 options |= (kIOMemoryRedirected & _flags);
1488 if (!(kIOMemoryRedirected & options))
6d2010ae 1489 {
fe8ab488 1490 if (_memRef)
39236c6e 1491 {
fe8ab488
A
1492 memoryReferenceRelease(_memRef);
1493 _memRef = 0;
39236c6e
A
1494 }
1495 if (_mappings)
1496 _mappings->flushCollection();
6d2010ae 1497 }
1c79356b 1498 }
55e303ae
A
1499 else {
1500 if (!super::init())
1501 return false;
1502 _initialized = true;
1503 }
d7e50217 1504
55e303ae 1505 // Grab the appropriate mapper
5ba3f43e 1506 if (kIOMemoryHostOrRemote & options) options |= kIOMemoryMapperNone;
b0d623f7 1507 if (kIOMemoryMapperNone & options)
55e303ae 1508 mapper = 0; // No Mapper
0c530ab8 1509 else if (mapper == kIOMapperSystem) {
55e303ae
A
1510 IOMapper::checkForSystemMapper();
1511 gIOSystemMapper = mapper = IOMapper::gSystem;
1512 }
1c79356b 1513
91447636
A
1514 // Remove the dynamic internal use flags from the initial setting
1515 options &= ~(kIOMemoryPreparedReadOnly);
55e303ae
A
1516 _flags = options;
1517 _task = task;
1518
b0d623f7 1519#ifndef __LP64__
55e303ae 1520 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
b0d623f7 1521#endif /* !__LP64__ */
0c530ab8 1522
5ba3f43e 1523 _dmaReferences = 0;
0c530ab8
A
1524 __iomd_reservedA = 0;
1525 __iomd_reservedB = 0;
0c530ab8 1526 _highestPage = 0;
1c79356b 1527
2d21ac55
A
1528 if (kIOMemoryThreadSafe & options)
1529 {
1530 if (!_prepareLock)
1531 _prepareLock = IOLockAlloc();
1532 }
1533 else if (_prepareLock)
1534 {
1535 IOLockFree(_prepareLock);
1536 _prepareLock = NULL;
1537 }
1538
91447636 1539 if (kIOMemoryTypeUPL == type) {
1c79356b 1540
55e303ae
A
1541 ioGMDData *dataP;
1542 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
d7e50217 1543
99c3a104 1544 if (!initMemoryEntries(dataSize, mapper)) return (false);
55e303ae 1545 dataP = getDataP(_memoryEntries);
55e303ae 1546 dataP->fPageCnt = 0;
5ba3f43e
A
1547 switch (kIOMemoryDirectionMask & options)
1548 {
1549 case kIODirectionOut:
1550 dataP->fDMAAccess = kIODMAMapReadAccess;
1551 break;
1552 case kIODirectionIn:
1553 dataP->fDMAAccess = kIODMAMapWriteAccess;
1554 break;
1555 case kIODirectionNone:
1556 case kIODirectionOutIn:
1557 default:
1558 panic("bad dir for upl 0x%x\n", (int) options);
1559 break;
1560 }
0c530ab8 1561 // _wireCount++; // UPLs start out life wired
55e303ae
A
1562
1563 _length = count;
1564 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
1565
1566 ioPLBlock iopl;
55e303ae 1567 iopl.fIOPL = (upl_t) buffers;
6d2010ae 1568 upl_set_referenced(iopl.fIOPL, true);
b0d623f7
A
1569 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
1570
1571 if (upl_get_size(iopl.fIOPL) < (count + offset))
1572 panic("short external upl");
1573
0c530ab8
A
1574 _highestPage = upl_get_highest_page(iopl.fIOPL);
1575
99c3a104
A
1576 // Set the flag kIOPLOnDevice convieniently equal to 1
1577 iopl.fFlags = pageList->device | kIOPLExternUPL;
55e303ae 1578 if (!pageList->device) {
55e303ae
A
1579 // Pre-compute the offset into the UPL's page list
1580 pageList = &pageList[atop_32(offset)];
1581 offset &= PAGE_MASK;
55e303ae 1582 }
99c3a104
A
1583 iopl.fIOMDOffset = 0;
1584 iopl.fMappedPage = 0;
55e303ae
A
1585 iopl.fPageInfo = (vm_address_t) pageList;
1586 iopl.fPageOffset = offset;
55e303ae 1587 _memoryEntries->appendBytes(&iopl, sizeof(iopl));
d7e50217 1588 }
91447636 1589 else {
0c530ab8
A
1590 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
1591 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
91447636
A
1592
1593 // Initialize the memory descriptor
1594 if (options & kIOMemoryAsReference) {
b0d623f7 1595#ifndef __LP64__
91447636 1596 _rangesIsAllocated = false;
b0d623f7 1597#endif /* !__LP64__ */
91447636
A
1598
1599 // Hack assignment to get the buffer arg into _ranges.
1600 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
1601 // work, C++ sigh.
1602 // This also initialises the uio & physical ranges.
1603 _ranges.v = (IOVirtualRange *) buffers;
1604 }
1605 else {
b0d623f7 1606#ifndef __LP64__
6601e61a 1607 _rangesIsAllocated = true;
b0d623f7
A
1608#endif /* !__LP64__ */
1609 switch (type)
0c530ab8
A
1610 {
1611 case kIOMemoryTypeUIO:
1612 _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
1613 break;
1614
b0d623f7 1615#ifndef __LP64__
0c530ab8
A
1616 case kIOMemoryTypeVirtual64:
1617 case kIOMemoryTypePhysical64:
b0d623f7 1618 if (count == 1
5ba3f43e 1619#ifndef __arm__
6d2010ae 1620 && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
5ba3f43e 1621#endif
6d2010ae 1622 ) {
b0d623f7
A
1623 if (kIOMemoryTypeVirtual64 == type)
1624 type = kIOMemoryTypeVirtual;
1625 else
1626 type = kIOMemoryTypePhysical;
1627 _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
1628 _rangesIsAllocated = false;
1629 _ranges.v = &_singleRange.v;
1630 _singleRange.v.address = ((IOAddressRange *) buffers)->address;
1631 _singleRange.v.length = ((IOAddressRange *) buffers)->length;
1632 break;
1633 }
0c530ab8
A
1634 _ranges.v64 = IONew(IOAddressRange, count);
1635 if (!_ranges.v64)
1636 return false;
1637 bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
1638 break;
b0d623f7 1639#endif /* !__LP64__ */
0c530ab8 1640 case kIOMemoryTypeVirtual:
2d21ac55 1641 case kIOMemoryTypePhysical:
b0d623f7
A
1642 if (count == 1) {
1643 _flags |= kIOMemoryAsReference;
1644#ifndef __LP64__
1645 _rangesIsAllocated = false;
1646#endif /* !__LP64__ */
1647 _ranges.v = &_singleRange.v;
1648 } else {
1649 _ranges.v = IONew(IOVirtualRange, count);
1650 if (!_ranges.v)
1651 return false;
1652 }
0c530ab8
A
1653 bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
1654 break;
1655 }
91447636 1656 }
5ba3f43e 1657 _rangesCount = count;
91447636
A
1658
1659 // Find starting address within the vector of ranges
1660 Ranges vec = _ranges;
c7d2c2c6
A
1661 mach_vm_size_t totalLength = 0;
1662 unsigned int ind, pages = 0;
1663 for (ind = 0; ind < count; ind++) {
fe8ab488 1664 mach_vm_address_t addr;
39037602
A
1665 mach_vm_address_t endAddr;
1666 mach_vm_size_t len;
91447636
A
1667
1668 // addr & len are returned by this function
1669 getAddrLenForInd(addr, len, type, vec, ind);
39037602
A
1670 if (os_add3_overflow(addr, len, PAGE_MASK, &endAddr)) break;
1671 if (os_add_overflow(pages, (atop_64(endAddr) - atop_64(addr)), &pages)) break;
1672 if (os_add_overflow(totalLength, len, &totalLength)) break;
0c530ab8
A
1673 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
1674 {
1675 ppnum_t highPage = atop_64(addr + len - 1);
1676 if (highPage > _highestPage)
1677 _highestPage = highPage;
1678 }
91447636 1679 }
c7d2c2c6
A
1680 if ((ind < count)
1681 || (totalLength != ((IOByteCount) totalLength))) return (false); /* overflow */
1682
1683 _length = totalLength;
91447636 1684 _pages = pages;
55e303ae
A
1685
1686 // Auto-prepare memory at creation time.
1687 // Implied completion when descriptor is free-ed
5ba3f43e
A
1688
1689
0c530ab8 1690 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
91447636 1691 _wireCount++; // Physical MDs are, by definition, wired
0c530ab8 1692 else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
55e303ae 1693 ioGMDData *dataP;
490019cf 1694 unsigned dataSize;
55e303ae 1695
490019cf
A
1696 if (_pages > atop_64(max_mem)) return false;
1697
1698 dataSize = computeDataSize(_pages, /* upls */ count * 2);
99c3a104 1699 if (!initMemoryEntries(dataSize, mapper)) return false;
55e303ae 1700 dataP = getDataP(_memoryEntries);
55e303ae
A
1701 dataP->fPageCnt = _pages;
1702
5ba3f43e
A
1703 if (((_task != kernel_task) || (kIOMemoryBufferPageable & _flags))
1704 && (VM_KERN_MEMORY_NONE == _kernelTag))
1705 {
1706 _kernelTag = IOMemoryTag(kernel_map);
1707 }
1708
fe8ab488
A
1709 if ( (kIOMemoryPersistent & _flags) && !_memRef)
1710 {
1711 IOReturn
1712 err = memoryReferenceCreate(0, &_memRef);
1713 if (kIOReturnSuccess != err) return false;
1714 }
55e303ae
A
1715
1716 if ((_flags & kIOMemoryAutoPrepare)
1717 && prepare() != kIOReturnSuccess)
1718 return false;
1719 }
1720 }
1721
1722 return true;
de355530
A
1723}
1724
1c79356b
A
1725/*
1726 * free
1727 *
1728 * Free resources.
1729 */
1730void IOGeneralMemoryDescriptor::free()
1731{
2d21ac55
A
1732 IOOptionBits type = _flags & kIOMemoryTypeMask;
1733
9bccf70c 1734 if( reserved)
2d21ac55
A
1735 {
1736 LOCK;
316670eb 1737 reserved->dp.memory = 0;
2d21ac55
A
1738 UNLOCK;
1739 }
bd504ef0 1740 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
2d21ac55 1741 {
bd504ef0 1742 ioGMDData * dataP;
5ba3f43e 1743 if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBaseValid)
bd504ef0 1744 {
5ba3f43e
A
1745 dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
1746 dataP->fMappedBaseValid = dataP->fMappedBase = 0;
bd504ef0 1747 }
2d21ac55 1748 }
bd504ef0
A
1749 else
1750 {
1751 while (_wireCount) complete();
1752 }
1753
1754 if (_memoryEntries) _memoryEntries->release();
55e303ae 1755
b0d623f7 1756 if (_ranges.v && !(kIOMemoryAsReference & _flags))
0c530ab8 1757 {
0c530ab8
A
1758 if (kIOMemoryTypeUIO == type)
1759 uio_free((uio_t) _ranges.v);
b0d623f7 1760#ifndef __LP64__
0c530ab8
A
1761 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
1762 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
b0d623f7 1763#endif /* !__LP64__ */
0c530ab8
A
1764 else
1765 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
4a3eedf9
A
1766
1767 _ranges.v = NULL;
0c530ab8 1768 }
9bccf70c 1769
316670eb
A
1770 if (reserved)
1771 {
1772 if (reserved->dp.devicePager)
1773 {
1774 // memEntry holds a ref on the device pager which owns reserved
1775 // (IOMemoryDescriptorReserved) so no reserved access after this point
1776 device_pager_deallocate( (memory_object_t) reserved->dp.devicePager );
1777 }
1778 else
1779 IODelete(reserved, IOMemoryDescriptorReserved, 1);
1780 reserved = NULL;
1781 }
9bccf70c 1782
fe8ab488
A
1783 if (_memRef) memoryReferenceRelease(_memRef);
1784 if (_prepareLock) IOLockFree(_prepareLock);
2d21ac55 1785
1c79356b
A
1786 super::free();
1787}
1788
b0d623f7
A
1789#ifndef __LP64__
1790void IOGeneralMemoryDescriptor::unmapFromKernel()
1791{
1792 panic("IOGMD::unmapFromKernel deprecated");
1793}
1794
1795void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
1796{
1797 panic("IOGMD::mapIntoKernel deprecated");
1798}
1799#endif /* !__LP64__ */
1c79356b
A
1800
1801/*
1802 * getDirection:
1803 *
1804 * Get the direction of the transfer.
1805 */
1806IODirection IOMemoryDescriptor::getDirection() const
1807{
b0d623f7
A
1808#ifndef __LP64__
1809 if (_direction)
1810 return _direction;
1811#endif /* !__LP64__ */
1812 return (IODirection) (_flags & kIOMemoryDirectionMask);
1c79356b
A
1813}
1814
1815/*
1816 * getLength:
1817 *
1818 * Get the length of the transfer (over all ranges).
1819 */
1820IOByteCount IOMemoryDescriptor::getLength() const
1821{
1822 return _length;
1823}
1824
55e303ae 1825void IOMemoryDescriptor::setTag( IOOptionBits tag )
1c79356b
A
1826{
1827 _tag = tag;
1828}
1829
1830IOOptionBits IOMemoryDescriptor::getTag( void )
1831{
1832 return( _tag);
1833}
1834
5ba3f43e
A
1835uint64_t IOMemoryDescriptor::getFlags(void)
1836{
1837 return (_flags);
1838}
1839
b0d623f7 1840#ifndef __LP64__
39037602
A
1841#pragma clang diagnostic push
1842#pragma clang diagnostic ignored "-Wdeprecated-declarations"
1843
55e303ae 1844// @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
0c530ab8
A
1845IOPhysicalAddress
1846IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
0b4e3aa0 1847{
0c530ab8 1848 addr64_t physAddr = 0;
1c79356b 1849
9bccf70c 1850 if( prepare() == kIOReturnSuccess) {
0c530ab8 1851 physAddr = getPhysicalSegment64( offset, length );
9bccf70c
A
1852 complete();
1853 }
0b4e3aa0 1854
0c530ab8 1855 return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used
0b4e3aa0 1856}
39037602
A
1857
1858#pragma clang diagnostic pop
1859
b0d623f7 1860#endif /* !__LP64__ */
0b4e3aa0 1861
55e303ae
A
1862IOByteCount IOMemoryDescriptor::readBytes
1863 (IOByteCount offset, void *bytes, IOByteCount length)
1c79356b 1864{
b0d623f7 1865 addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
55e303ae 1866 IOByteCount remaining;
1c79356b 1867
55e303ae 1868 // Assert that this entire I/O is withing the available range
fe8ab488 1869 assert(offset <= _length);
55e303ae 1870 assert(offset + length <= _length);
c7d2c2c6
A
1871 if ((offset >= _length)
1872 || ((offset + length) > _length)) {
55e303ae
A
1873 return 0;
1874 }
1c79356b 1875
5ba3f43e
A
1876 assert (!(kIOMemoryRemote & _flags));
1877 if (kIOMemoryRemote & _flags) return (0);
1878
b0d623f7
A
1879 if (kIOMemoryThreadSafe & _flags)
1880 LOCK;
1881
55e303ae
A
1882 remaining = length = min(length, _length - offset);
1883 while (remaining) { // (process another target segment?)
1884 addr64_t srcAddr64;
1885 IOByteCount srcLen;
1c79356b 1886
b0d623f7 1887 srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
55e303ae
A
1888 if (!srcAddr64)
1889 break;
1c79356b 1890
55e303ae
A
1891 // Clip segment length to remaining
1892 if (srcLen > remaining)
1893 srcLen = remaining;
1c79356b 1894
55e303ae
A
1895 copypv(srcAddr64, dstAddr, srcLen,
1896 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
1c79356b 1897
55e303ae
A
1898 dstAddr += srcLen;
1899 offset += srcLen;
1900 remaining -= srcLen;
1901 }
1c79356b 1902
b0d623f7
A
1903 if (kIOMemoryThreadSafe & _flags)
1904 UNLOCK;
1905
55e303ae 1906 assert(!remaining);
1c79356b 1907
55e303ae
A
1908 return length - remaining;
1909}
0b4e3aa0 1910
55e303ae 1911IOByteCount IOMemoryDescriptor::writeBytes
fe8ab488 1912 (IOByteCount inoffset, const void *bytes, IOByteCount length)
55e303ae 1913{
b0d623f7 1914 addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
55e303ae 1915 IOByteCount remaining;
fe8ab488 1916 IOByteCount offset = inoffset;
0b4e3aa0 1917
55e303ae 1918 // Assert that this entire I/O is withing the available range
fe8ab488 1919 assert(offset <= _length);
55e303ae 1920 assert(offset + length <= _length);
0b4e3aa0 1921
55e303ae 1922 assert( !(kIOMemoryPreparedReadOnly & _flags) );
0b4e3aa0 1923
c7d2c2c6
A
1924 if ( (kIOMemoryPreparedReadOnly & _flags)
1925 || (offset >= _length)
1926 || ((offset + length) > _length)) {
55e303ae
A
1927 return 0;
1928 }
0b4e3aa0 1929
5ba3f43e
A
1930 assert (!(kIOMemoryRemote & _flags));
1931 if (kIOMemoryRemote & _flags) return (0);
1932
b0d623f7
A
1933 if (kIOMemoryThreadSafe & _flags)
1934 LOCK;
1935
55e303ae
A
1936 remaining = length = min(length, _length - offset);
1937 while (remaining) { // (process another target segment?)
1938 addr64_t dstAddr64;
1939 IOByteCount dstLen;
0b4e3aa0 1940
b0d623f7 1941 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
55e303ae
A
1942 if (!dstAddr64)
1943 break;
0b4e3aa0 1944
55e303ae
A
1945 // Clip segment length to remaining
1946 if (dstLen > remaining)
1947 dstLen = remaining;
0b4e3aa0 1948
fe8ab488
A
1949 if (!srcAddr) bzero_phys(dstAddr64, dstLen);
1950 else
1951 {
1952 copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
1953 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
1954 srcAddr += dstLen;
1955 }
55e303ae
A
1956 offset += dstLen;
1957 remaining -= dstLen;
1c79356b 1958 }
1c79356b 1959
b0d623f7
A
1960 if (kIOMemoryThreadSafe & _flags)
1961 UNLOCK;
1962
55e303ae
A
1963 assert(!remaining);
1964
fe8ab488
A
1965 if (!srcAddr) performOperation(kIOMemoryIncoherentIOFlush, inoffset, length);
1966
55e303ae 1967 return length - remaining;
1c79356b
A
1968}
1969
b0d623f7
A
1970#ifndef __LP64__
1971void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
1972{
1973 panic("IOGMD::setPosition deprecated");
1974}
1975#endif /* !__LP64__ */
1976
1977static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
1978
1979uint64_t
1980IOGeneralMemoryDescriptor::getPreparationID( void )
1981{
1982 ioGMDData *dataP;
7e4a7d39
A
1983
1984 if (!_wireCount)
b0d623f7 1985 return (kIOPreparationIDUnprepared);
7e4a7d39 1986
99c3a104
A
1987 if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
1988 || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64))
316670eb
A
1989 {
1990 IOMemoryDescriptor::setPreparationID();
1991 return (IOMemoryDescriptor::getPreparationID());
1992 }
7e4a7d39
A
1993
1994 if (!_memoryEntries || !(dataP = getDataP(_memoryEntries)))
1995 return (kIOPreparationIDUnprepared);
1996
b0d623f7
A
1997 if (kIOPreparationIDUnprepared == dataP->fPreparationID)
1998 {
b0d623f7 1999 dataP->fPreparationID = OSIncrementAtomic64(&gIOMDPreparationID);
b0d623f7
A
2000 }
2001 return (dataP->fPreparationID);
2002}
2003
316670eb 2004IOMemoryDescriptorReserved * IOMemoryDescriptor::getKernelReserved( void )
b0d623f7 2005{
316670eb
A
2006 if (!reserved)
2007 {
2008 reserved = IONew(IOMemoryDescriptorReserved, 1);
2009 if (reserved)
2010 bzero(reserved, sizeof(IOMemoryDescriptorReserved));
2011 }
2012 return (reserved);
2013}
2014
2015void IOMemoryDescriptor::setPreparationID( void )
2016{
2017 if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID))
2018 {
316670eb 2019 reserved->preparationID = OSIncrementAtomic64(&gIOMDPreparationID);
316670eb
A
2020 }
2021}
2022
2023uint64_t IOMemoryDescriptor::getPreparationID( void )
2024{
2025 if (reserved)
2026 return (reserved->preparationID);
2027 else
2028 return (kIOPreparationIDUnsupported);
b0d623f7 2029}
de355530 2030
39037602
A
2031void IOMemoryDescriptor::setVMTags(vm_tag_t kernelTag, vm_tag_t userTag)
2032{
5ba3f43e
A
2033 _kernelTag = kernelTag;
2034 _userTag = userTag;
39037602
A
2035}
2036
2037vm_tag_t IOMemoryDescriptor::getVMTag(vm_map_t map)
2038{
5ba3f43e 2039 if (vm_kernel_map_is_kernel(map))
39037602 2040 {
5ba3f43e 2041 if (VM_KERN_MEMORY_NONE != _kernelTag) return (_kernelTag);
39037602 2042 }
5ba3f43e
A
2043 else
2044 {
2045 if (VM_KERN_MEMORY_NONE != _userTag) return (_userTag);
2046 }
2047 return (IOMemoryTag(map));
39037602
A
2048}
2049
0c530ab8 2050IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
55e303ae 2051{
99c3a104
A
2052 IOReturn err = kIOReturnSuccess;
2053 DMACommandOps params;
2054 IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
2055 ioGMDData *dataP;
2056
2057 params = (op & ~kIOMDDMACommandOperationMask & op);
2058 op &= kIOMDDMACommandOperationMask;
2059
2060 if (kIOMDDMAMap == op)
2061 {
2062 if (dataSize < sizeof(IOMDDMAMapArgs))
2063 return kIOReturnUnderrun;
2064
2065 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2066
2067 if (!_memoryEntries
2068 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
2069
2070 if (_memoryEntries && data->fMapper)
2071 {
3e170ce0 2072 bool remap, keepMap;
99c3a104 2073 dataP = getDataP(_memoryEntries);
39236c6e
A
2074
2075 if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
2076 if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) dataP->fDMAMapAlignment = data->fMapSpec.alignment;
2077
3e170ce0
A
2078 keepMap = (data->fMapper == gIOSystemMapper);
2079 keepMap &= ((data->fOffset == 0) && (data->fLength == _length));
2080
a39ff7e2
A
2081 if ((data->fMapper == gIOSystemMapper) && _prepareLock) IOLockLock(_prepareLock);
2082
3e170ce0
A
2083 remap = (!keepMap);
2084 remap |= (dataP->fDMAMapNumAddressBits < 64)
2085 && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
39236c6e 2086 remap |= (dataP->fDMAMapAlignment > page_size);
3e170ce0 2087
5ba3f43e 2088 if (remap || !dataP->fMappedBaseValid)
99c3a104 2089 {
5ba3f43e 2090// if (dataP->fMappedBaseValid) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params);
3e170ce0 2091 err = md->dmaMap(data->fMapper, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
5ba3f43e 2092 if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBaseValid)
99c3a104 2093 {
5ba3f43e
A
2094 dataP->fMappedBase = data->fAlloc;
2095 dataP->fMappedBaseValid = true;
2096 dataP->fMappedLength = data->fAllocLength;
2097 data->fAllocLength = 0; // IOMD owns the alloc now
99c3a104
A
2098 }
2099 }
2100 else
2101 {
2102 data->fAlloc = dataP->fMappedBase;
3e170ce0 2103 data->fAllocLength = 0; // give out IOMD map
5ba3f43e 2104 md->dmaMapRecord(data->fMapper, data->fCommand, dataP->fMappedLength);
99c3a104 2105 }
39236c6e 2106 data->fMapContig = !dataP->fDiscontig;
a39ff7e2
A
2107
2108 if ((data->fMapper == gIOSystemMapper) && _prepareLock) IOLockUnlock(_prepareLock);
99c3a104 2109 }
99c3a104
A
2110 return (err);
2111 }
5ba3f43e
A
2112 if (kIOMDDMAUnmap == op)
2113 {
2114 if (dataSize < sizeof(IOMDDMAMapArgs))
2115 return kIOReturnUnderrun;
2116 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2117
2118 err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
2119
2120 return kIOReturnSuccess;
2121 }
99c3a104
A
2122
2123 if (kIOMDAddDMAMapSpec == op)
2124 {
2125 if (dataSize < sizeof(IODMAMapSpecification))
2126 return kIOReturnUnderrun;
2127
2128 IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
2129
2130 if (!_memoryEntries
2131 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
2132
2133 if (_memoryEntries)
2134 {
2135 dataP = getDataP(_memoryEntries);
2136 if (data->numAddressBits < dataP->fDMAMapNumAddressBits)
2137 dataP->fDMAMapNumAddressBits = data->numAddressBits;
2138 if (data->alignment > dataP->fDMAMapAlignment)
2139 dataP->fDMAMapAlignment = data->alignment;
2140 }
2141 return kIOReturnSuccess;
2142 }
2143
0c530ab8 2144 if (kIOMDGetCharacteristics == op) {
4452a7af 2145
0c530ab8
A
2146 if (dataSize < sizeof(IOMDDMACharacteristics))
2147 return kIOReturnUnderrun;
4452a7af 2148
0c530ab8
A
2149 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
2150 data->fLength = _length;
2151 data->fSGCount = _rangesCount;
2152 data->fPages = _pages;
b0d623f7 2153 data->fDirection = getDirection();
0c530ab8
A
2154 if (!_wireCount)
2155 data->fIsPrepared = false;
2156 else {
2157 data->fIsPrepared = true;
2158 data->fHighestPage = _highestPage;
99c3a104
A
2159 if (_memoryEntries)
2160 {
2161 dataP = getDataP(_memoryEntries);
2162 ioPLBlock *ioplList = getIOPLList(dataP);
2163 UInt count = getNumIOPL(_memoryEntries, dataP);
0c530ab8
A
2164 if (count == 1)
2165 data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
2166 }
0c530ab8 2167 }
4452a7af 2168
5ba3f43e
A
2169 return kIOReturnSuccess;
2170 }
b0d623f7 2171
5ba3f43e
A
2172 else if (kIOMDDMAActive == op)
2173 {
2174 if (params)
2175 {
2176 int16_t prior;
2177 prior = OSAddAtomic16(1, &md->_dmaReferences);
2178 if (!prior) md->_mapName = NULL;
2179 }
2180 else
2181 {
2182 if (md->_dmaReferences) OSAddAtomic16(-1, &md->_dmaReferences);
2183 else panic("_dmaReferences underflow");
2184 }
2185 }
2186 else if (kIOMDWalkSegments != op)
0c530ab8
A
2187 return kIOReturnBadArgument;
2188
2189 // Get the next segment
2190 struct InternalState {
2191 IOMDDMAWalkSegmentArgs fIO;
2192 UInt fOffset2Index;
2193 UInt fIndex;
2194 UInt fNextOffset;
2195 } *isP;
2196
2197 // Find the next segment
2198 if (dataSize < sizeof(*isP))
2199 return kIOReturnUnderrun;
2200
2201 isP = (InternalState *) vData;
2202 UInt offset = isP->fIO.fOffset;
cc8bc92a
A
2203 uint8_t mapped = isP->fIO.fMapped;
2204 uint64_t mappedBase;
0c530ab8 2205
5ba3f43e
A
2206 if (mapped && (kIOMemoryRemote & _flags)) return (kIOReturnNotAttached);
2207
99c3a104
A
2208 if (IOMapper::gSystem && mapped
2209 && (!(kIOMemoryHostOnly & _flags))
5ba3f43e
A
2210 && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBaseValid))
2211// && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBaseValid))
99c3a104
A
2212 {
2213 if (!_memoryEntries
2214 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
2215
2216 dataP = getDataP(_memoryEntries);
2217 if (dataP->fMapper)
2218 {
2219 IODMAMapSpecification mapSpec;
2220 bzero(&mapSpec, sizeof(mapSpec));
2221 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
2222 mapSpec.alignment = dataP->fDMAMapAlignment;
3e170ce0 2223 err = md->dmaMap(dataP->fMapper, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength);
99c3a104 2224 if (kIOReturnSuccess != err) return (err);
5ba3f43e 2225 dataP->fMappedBaseValid = true;
99c3a104
A
2226 }
2227 }
2228
cc8bc92a
A
2229 if (kIOMDDMAWalkMappedLocal == mapped) mappedBase = isP->fIO.fMappedBase;
2230 else if (mapped)
2231 {
2232 if (IOMapper::gSystem
2233 && (!(kIOMemoryHostOnly & _flags))
2234 && _memoryEntries
2235 && (dataP = getDataP(_memoryEntries))
2236 && dataP->fMappedBaseValid)
2237 {
2238 mappedBase = dataP->fMappedBase;
2239 }
2240 else mapped = 0;
2241 }
2242
0c530ab8
A
2243 if (offset >= _length)
2244 return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
2245
2246 // Validate the previous offset
2247 UInt ind, off2Ind = isP->fOffset2Index;
99c3a104 2248 if (!params
0c530ab8
A
2249 && offset
2250 && (offset == isP->fNextOffset || off2Ind <= offset))
2251 ind = isP->fIndex;
2252 else
2253 ind = off2Ind = 0; // Start from beginning
4452a7af 2254
0c530ab8
A
2255 UInt length;
2256 UInt64 address;
99c3a104 2257
0c530ab8 2258 if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
4452a7af 2259
0c530ab8
A
2260 // Physical address based memory descriptor
2261 const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
4452a7af 2262
0c530ab8 2263 // Find the range after the one that contains the offset
b0d623f7 2264 mach_vm_size_t len;
0c530ab8
A
2265 for (len = 0; off2Ind <= offset; ind++) {
2266 len = physP[ind].length;
2267 off2Ind += len;
2268 }
4452a7af 2269
0c530ab8
A
2270 // Calculate length within range and starting address
2271 length = off2Ind - offset;
2272 address = physP[ind - 1].address + len - length;
89b3af67 2273
cc8bc92a 2274 if (true && mapped)
99c3a104 2275 {
cc8bc92a 2276 address = mappedBase + offset;
99c3a104
A
2277 }
2278 else
2279 {
2280 // see how far we can coalesce ranges
2281 while (ind < _rangesCount && address + length == physP[ind].address) {
2282 len = physP[ind].length;
2283 length += len;
2284 off2Ind += len;
2285 ind++;
2286 }
0c530ab8 2287 }
4452a7af 2288
0c530ab8
A
2289 // correct contiguous check overshoot
2290 ind--;
2291 off2Ind -= len;
2292 }
b0d623f7 2293#ifndef __LP64__
0c530ab8 2294 else if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
4452a7af 2295
0c530ab8
A
2296 // Physical address based memory descriptor
2297 const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
4452a7af 2298
0c530ab8
A
2299 // Find the range after the one that contains the offset
2300 mach_vm_size_t len;
2301 for (len = 0; off2Ind <= offset; ind++) {
2302 len = physP[ind].length;
2303 off2Ind += len;
2304 }
89b3af67 2305
0c530ab8
A
2306 // Calculate length within range and starting address
2307 length = off2Ind - offset;
2308 address = physP[ind - 1].address + len - length;
89b3af67 2309
cc8bc92a 2310 if (true && mapped)
99c3a104 2311 {
cc8bc92a 2312 address = mappedBase + offset;
99c3a104
A
2313 }
2314 else
2315 {
2316 // see how far we can coalesce ranges
2317 while (ind < _rangesCount && address + length == physP[ind].address) {
2318 len = physP[ind].length;
2319 length += len;
2320 off2Ind += len;
2321 ind++;
2322 }
0c530ab8 2323 }
0c530ab8
A
2324 // correct contiguous check overshoot
2325 ind--;
2326 off2Ind -= len;
99c3a104 2327 }
b0d623f7 2328#endif /* !__LP64__ */
0c530ab8
A
2329 else do {
2330 if (!_wireCount)
2331 panic("IOGMD: not wired for the IODMACommand");
4452a7af 2332
0c530ab8 2333 assert(_memoryEntries);
4452a7af 2334
99c3a104 2335 dataP = getDataP(_memoryEntries);
0c530ab8
A
2336 const ioPLBlock *ioplList = getIOPLList(dataP);
2337 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
2338 upl_page_info_t *pageList = getPageList(dataP);
4452a7af 2339
0c530ab8 2340 assert(numIOPLs > 0);
4452a7af 2341
0c530ab8
A
2342 // Scan through iopl info blocks looking for block containing offset
2343 while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset)
2344 ind++;
4452a7af 2345
0c530ab8
A
2346 // Go back to actual range as search goes past it
2347 ioPLBlock ioplInfo = ioplList[ind - 1];
2348 off2Ind = ioplInfo.fIOMDOffset;
2349
2350 if (ind < numIOPLs)
2351 length = ioplList[ind].fIOMDOffset;
2352 else
2353 length = _length;
2354 length -= offset; // Remainder within iopl
2355
2356 // Subtract offset till this iopl in total list
2357 offset -= off2Ind;
2358
2359 // If a mapped address is requested and this is a pre-mapped IOPL
2360 // then just need to compute an offset relative to the mapped base.
cc8bc92a 2361 if (mapped) {
0c530ab8 2362 offset += (ioplInfo.fPageOffset & PAGE_MASK);
cc8bc92a 2363 address = trunc_page_64(mappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
0c530ab8
A
2364 continue; // Done leave do/while(false) now
2365 }
2366
2367 // The offset is rebased into the current iopl.
2368 // Now add the iopl 1st page offset.
2369 offset += ioplInfo.fPageOffset;
2370
2371 // For external UPLs the fPageInfo field points directly to
2372 // the upl's upl_page_info_t array.
2373 if (ioplInfo.fFlags & kIOPLExternUPL)
2374 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
2375 else
2376 pageList = &pageList[ioplInfo.fPageInfo];
2377
2378 // Check for direct device non-paged memory
2379 if ( ioplInfo.fFlags & kIOPLOnDevice ) {
2380 address = ptoa_64(pageList->phys_addr) + offset;
2381 continue; // Done leave do/while(false) now
2382 }
4452a7af 2383
0c530ab8
A
2384 // Now we need compute the index into the pageList
2385 UInt pageInd = atop_32(offset);
2386 offset &= PAGE_MASK;
2387
2388 // Compute the starting address of this segment
2389 IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
b0d623f7
A
2390 if (!pageAddr) {
2391 panic("!pageList phys_addr");
6d2010ae 2392 }
b0d623f7 2393
0c530ab8
A
2394 address = ptoa_64(pageAddr) + offset;
2395
2396 // length is currently set to the length of the remainider of the iopl.
2397 // We need to check that the remainder of the iopl is contiguous.
2398 // This is indicated by pageList[ind].phys_addr being sequential.
2399 IOByteCount contigLength = PAGE_SIZE - offset;
2400 while (contigLength < length
2401 && ++pageAddr == pageList[++pageInd].phys_addr)
2402 {
2403 contigLength += PAGE_SIZE;
2404 }
2405
2406 if (contigLength < length)
2407 length = contigLength;
2408
2409
2410 assert(address);
2411 assert(length);
2412
2413 } while (false);
2414
2415 // Update return values and state
2416 isP->fIO.fIOVMAddr = address;
2417 isP->fIO.fLength = length;
2418 isP->fIndex = ind;
2419 isP->fOffset2Index = off2Ind;
2420 isP->fNextOffset = isP->fIO.fOffset + length;
2421
2422 return kIOReturnSuccess;
2423}
2424
2425addr64_t
b0d623f7 2426IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
0c530ab8 2427{
fe8ab488
A
2428 IOReturn ret;
2429 mach_vm_address_t address = 0;
2430 mach_vm_size_t length = 0;
2431 IOMapper * mapper = gIOSystemMapper;
2432 IOOptionBits type = _flags & kIOMemoryTypeMask;
b0d623f7
A
2433
2434 if (lengthOfSegment)
2435 *lengthOfSegment = 0;
2436
2437 if (offset >= _length)
2438 return 0;
4452a7af 2439
b0d623f7
A
2440 // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
2441 // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
2442 // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
2443 // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
2d21ac55 2444
b0d623f7
A
2445 if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type))
2446 {
2447 unsigned rangesIndex = 0;
2448 Ranges vec = _ranges;
fe8ab488 2449 mach_vm_address_t addr;
b0d623f7
A
2450
2451 // Find starting address within the vector of ranges
2452 for (;;) {
2453 getAddrLenForInd(addr, length, type, vec, rangesIndex);
2454 if (offset < length)
2455 break;
2456 offset -= length; // (make offset relative)
2457 rangesIndex++;
2458 }
2459
2460 // Now that we have the starting range,
2461 // lets find the last contiguous range
2462 addr += offset;
2463 length -= offset;
2464
2465 for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) {
fe8ab488
A
2466 mach_vm_address_t newAddr;
2467 mach_vm_size_t newLen;
b0d623f7
A
2468
2469 getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
2470 if (addr + length != newAddr)
2471 break;
2472 length += newLen;
2473 }
2474 if (addr)
2475 address = (IOPhysicalAddress) addr; // Truncate address to 32bit
2476 }
2477 else
0c530ab8
A
2478 {
2479 IOMDDMAWalkSegmentState _state;
99c3a104 2480 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
0c530ab8
A
2481
2482 state->fOffset = offset;
2483 state->fLength = _length - offset;
5ba3f43e 2484 state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOrRemote);
0c530ab8
A
2485
2486 ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
2487
2488 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret))
b0d623f7 2489 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
0c530ab8
A
2490 ret, this, state->fOffset,
2491 state->fIOVMAddr, state->fLength);
2492 if (kIOReturnSuccess == ret)
2493 {
2494 address = state->fIOVMAddr;
2495 length = state->fLength;
2496 }
b0d623f7
A
2497
2498 // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
2499 // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
2500
2501 if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)))
2502 {
2503 if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone))
2504 {
2505 addr64_t origAddr = address;
2506 IOByteCount origLen = length;
2507
3e170ce0 2508 address = mapper->mapToPhysicalAddress(origAddr);
b0d623f7
A
2509 length = page_size - (address & (page_size - 1));
2510 while ((length < origLen)
3e170ce0 2511 && ((address + length) == mapper->mapToPhysicalAddress(origAddr + length)))
b0d623f7
A
2512 length += page_size;
2513 if (length > origLen)
2514 length = origLen;
2515 }
b0d623f7 2516 }
4452a7af
A
2517 }
2518
b0d623f7
A
2519 if (!address)
2520 length = 0;
2521
4452a7af
A
2522 if (lengthOfSegment)
2523 *lengthOfSegment = length;
2524
0c530ab8
A
2525 return (address);
2526}
2527
b0d623f7 2528#ifndef __LP64__
39037602
A
2529#pragma clang diagnostic push
2530#pragma clang diagnostic ignored "-Wdeprecated-declarations"
2531
b0d623f7
A
2532addr64_t
2533IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
0c530ab8 2534{
b0d623f7 2535 addr64_t address = 0;
0c530ab8 2536
b0d623f7 2537 if (options & _kIOMemorySourceSegment)
0c530ab8 2538 {
b0d623f7
A
2539 address = getSourceSegment(offset, lengthOfSegment);
2540 }
2541 else if (options & kIOMemoryMapperNone)
2542 {
2543 address = getPhysicalSegment64(offset, lengthOfSegment);
2544 }
2545 else
2546 {
2547 address = getPhysicalSegment(offset, lengthOfSegment);
2548 }
0c530ab8 2549
b0d623f7
A
2550 return (address);
2551}
39037602 2552#pragma clang diagnostic pop
0c530ab8 2553
b0d623f7
A
2554addr64_t
2555IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
2556{
2557 return (getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone));
2558}
0c530ab8 2559
b0d623f7
A
2560IOPhysicalAddress
2561IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
2562{
2563 addr64_t address = 0;
2564 IOByteCount length = 0;
0c530ab8 2565
b0d623f7
A
2566 address = getPhysicalSegment(offset, lengthOfSegment, 0);
2567
2568 if (lengthOfSegment)
2569 length = *lengthOfSegment;
0c530ab8
A
2570
2571 if ((address + length) > 0x100000000ULL)
2572 {
2d21ac55 2573 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
b0d623f7 2574 address, (long) length, (getMetaClass())->getClassName());
0c530ab8
A
2575 }
2576
0c530ab8 2577 return ((IOPhysicalAddress) address);
55e303ae 2578}
de355530 2579
0c530ab8
A
2580addr64_t
2581IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
55e303ae
A
2582{
2583 IOPhysicalAddress phys32;
2584 IOByteCount length;
2585 addr64_t phys64;
0c530ab8 2586 IOMapper * mapper = 0;
0b4e3aa0 2587
55e303ae
A
2588 phys32 = getPhysicalSegment(offset, lengthOfSegment);
2589 if (!phys32)
2590 return 0;
0b4e3aa0 2591
55e303ae 2592 if (gIOSystemMapper)
0c530ab8
A
2593 mapper = gIOSystemMapper;
2594
2595 if (mapper)
1c79356b 2596 {
55e303ae
A
2597 IOByteCount origLen;
2598
3e170ce0 2599 phys64 = mapper->mapToPhysicalAddress(phys32);
55e303ae
A
2600 origLen = *lengthOfSegment;
2601 length = page_size - (phys64 & (page_size - 1));
2602 while ((length < origLen)
3e170ce0 2603 && ((phys64 + length) == mapper->mapToPhysicalAddress(phys32 + length)))
55e303ae
A
2604 length += page_size;
2605 if (length > origLen)
2606 length = origLen;
2607
2608 *lengthOfSegment = length;
0b4e3aa0 2609 }
55e303ae
A
2610 else
2611 phys64 = (addr64_t) phys32;
1c79356b 2612
55e303ae 2613 return phys64;
0b4e3aa0
A
2614}
2615
0c530ab8 2616IOPhysicalAddress
b0d623f7 2617IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1c79356b 2618{
b0d623f7 2619 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0));
0b4e3aa0
A
2620}
2621
b0d623f7
A
2622IOPhysicalAddress
2623IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
2624{
2625 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment));
2626}
1c79356b 2627
39037602
A
2628#pragma clang diagnostic push
2629#pragma clang diagnostic ignored "-Wdeprecated-declarations"
2630
b0d623f7
A
2631void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
2632 IOByteCount * lengthOfSegment)
2633{
2634 if (_task == kernel_task)
2635 return (void *) getSourceSegment(offset, lengthOfSegment);
2636 else
2637 panic("IOGMD::getVirtualSegment deprecated");
91447636 2638
b0d623f7
A
2639 return 0;
2640}
39037602 2641#pragma clang diagnostic pop
b0d623f7 2642#endif /* !__LP64__ */
91447636 2643
0c530ab8
A
2644IOReturn
2645IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
2646{
99c3a104
A
2647 IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
2648 DMACommandOps params;
2649 IOReturn err;
2650
2651 params = (op & ~kIOMDDMACommandOperationMask & op);
2652 op &= kIOMDDMACommandOperationMask;
2653
0c530ab8
A
2654 if (kIOMDGetCharacteristics == op) {
2655 if (dataSize < sizeof(IOMDDMACharacteristics))
2656 return kIOReturnUnderrun;
2657
2658 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
2659 data->fLength = getLength();
2660 data->fSGCount = 0;
b0d623f7 2661 data->fDirection = getDirection();
0c530ab8
A
2662 data->fIsPrepared = true; // Assume prepared - fails safe
2663 }
99c3a104 2664 else if (kIOMDWalkSegments == op) {
0c530ab8
A
2665 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs))
2666 return kIOReturnUnderrun;
2667
2668 IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
2669 IOByteCount offset = (IOByteCount) data->fOffset;
2670
2671 IOPhysicalLength length;
0c530ab8 2672 if (data->fMapped && IOMapper::gSystem)
99c3a104 2673 data->fIOVMAddr = md->getPhysicalSegment(offset, &length);
0c530ab8 2674 else
99c3a104 2675 data->fIOVMAddr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
0c530ab8
A
2676 data->fLength = length;
2677 }
99c3a104
A
2678 else if (kIOMDAddDMAMapSpec == op) return kIOReturnUnsupported;
2679 else if (kIOMDDMAMap == op)
2680 {
2681 if (dataSize < sizeof(IOMDDMAMapArgs))
2682 return kIOReturnUnderrun;
2683 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2684
2685 if (params) panic("class %s does not support IODMACommand::kIterateOnly", getMetaClass()->getClassName());
2686
39236c6e 2687 data->fMapContig = true;
3e170ce0 2688 err = md->dmaMap(data->fMapper, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
5ba3f43e 2689
99c3a104
A
2690 return (err);
2691 }
5ba3f43e
A
2692 else if (kIOMDDMAUnmap == op)
2693 {
2694 if (dataSize < sizeof(IOMDDMAMapArgs))
2695 return kIOReturnUnderrun;
2696 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2697
2698 err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
2699
2700 return (kIOReturnSuccess);
2701 }
99c3a104 2702 else return kIOReturnBadArgument;
0c530ab8
A
2703
2704 return kIOReturnSuccess;
2705}
2706
b0d623f7
A
2707IOReturn
2708IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
2709 IOOptionBits * oldState )
2710{
2711 IOReturn err = kIOReturnSuccess;
fe8ab488 2712
b0d623f7
A
2713 vm_purgable_t control;
2714 int state;
2715
5ba3f43e
A
2716 assert (!(kIOMemoryRemote & _flags));
2717 if (kIOMemoryRemote & _flags) return (kIOReturnNotAttached);
2718
fe8ab488 2719 if (_memRef)
b0d623f7
A
2720 {
2721 err = super::setPurgeable(newState, oldState);
2722 }
2723 else
2724 {
2725 if (kIOMemoryThreadSafe & _flags)
2726 LOCK;
2727 do
2728 {
2729 // Find the appropriate vm_map for the given task
2730 vm_map_t curMap;
2731 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
2732 {
2733 err = kIOReturnNotReady;
2734 break;
2735 }
39236c6e
A
2736 else if (!_task)
2737 {
2738 err = kIOReturnUnsupported;
2739 break;
2740 }
b0d623f7 2741 else
5ba3f43e 2742 {
b0d623f7 2743 curMap = get_task_map(_task);
5ba3f43e
A
2744 if (NULL == curMap)
2745 {
2746 err = KERN_INVALID_ARGUMENT;
2747 break;
2748 }
2749 }
b0d623f7
A
2750
2751 // can only do one range
2752 Ranges vec = _ranges;
2753 IOOptionBits type = _flags & kIOMemoryTypeMask;
fe8ab488
A
2754 mach_vm_address_t addr;
2755 mach_vm_size_t len;
b0d623f7
A
2756 getAddrLenForInd(addr, len, type, vec, 0);
2757
2758 err = purgeableControlBits(newState, &control, &state);
2759 if (kIOReturnSuccess != err)
2760 break;
5ba3f43e 2761 err = vm_map_purgable_control(curMap, addr, control, &state);
b0d623f7
A
2762 if (oldState)
2763 {
2764 if (kIOReturnSuccess == err)
2765 {
2766 err = purgeableStateBits(&state);
2767 *oldState = state;
2768 }
2769 }
2770 }
2771 while (false);
2772 if (kIOMemoryThreadSafe & _flags)
2773 UNLOCK;
2774 }
fe8ab488 2775
b0d623f7
A
2776 return (err);
2777}
2778
91447636
A
2779IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
2780 IOOptionBits * oldState )
2781{
fe8ab488 2782 IOReturn err = kIOReturnNotReady;
b0d623f7 2783
fe8ab488
A
2784 if (kIOMemoryThreadSafe & _flags) LOCK;
2785 if (_memRef) err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState);
2786 if (kIOMemoryThreadSafe & _flags) UNLOCK;
b0d623f7 2787
91447636
A
2788 return (err);
2789}
39236c6e
A
2790
2791IOReturn IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
2792 IOByteCount * dirtyPageCount )
2793{
3e170ce0 2794 IOReturn err = kIOReturnNotReady;
39236c6e 2795
5ba3f43e
A
2796 assert (!(kIOMemoryRemote & _flags));
2797 if (kIOMemoryRemote & _flags) return (kIOReturnNotAttached);
2798
fe8ab488
A
2799 if (kIOMemoryThreadSafe & _flags) LOCK;
2800 if (_memRef) err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount);
3e170ce0
A
2801 else
2802 {
2803 IOMultiMemoryDescriptor * mmd;
2804 IOSubMemoryDescriptor * smd;
2805 if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this)))
2806 {
2807 err = smd->getPageCounts(residentPageCount, dirtyPageCount);
2808 }
2809 else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this)))
2810 {
2811 err = mmd->getPageCounts(residentPageCount, dirtyPageCount);
2812 }
2813 }
39236c6e
A
2814 if (kIOMemoryThreadSafe & _flags) UNLOCK;
2815
2816 return (err);
2817}
2818
2819
5ba3f43e
A
2820#if defined(__arm__) || defined(__arm64__)
2821extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
2822extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
2823#else /* defined(__arm__) || defined(__arm64__) */
91447636
A
2824extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
2825extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
5ba3f43e 2826#endif /* defined(__arm__) || defined(__arm64__) */
91447636 2827
0b4c1975
A
2828static void SetEncryptOp(addr64_t pa, unsigned int count)
2829{
2830 ppnum_t page, end;
2831
2832 page = atop_64(round_page_64(pa));
2833 end = atop_64(trunc_page_64(pa + count));
2834 for (; page < end; page++)
2835 {
2836 pmap_clear_noencrypt(page);
2837 }
2838}
2839
2840static void ClearEncryptOp(addr64_t pa, unsigned int count)
2841{
2842 ppnum_t page, end;
2843
2844 page = atop_64(round_page_64(pa));
2845 end = atop_64(trunc_page_64(pa + count));
2846 for (; page < end; page++)
2847 {
2848 pmap_set_noencrypt(page);
2849 }
2850}
2851
91447636
A
2852IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options,
2853 IOByteCount offset, IOByteCount length )
2854{
2855 IOByteCount remaining;
316670eb 2856 unsigned int res;
91447636 2857 void (*func)(addr64_t pa, unsigned int count) = 0;
5ba3f43e
A
2858#if defined(__arm__) || defined(__arm64__)
2859 void (*func_ext)(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *result) = 0;
2860#endif
2861
2862 assert (!(kIOMemoryRemote & _flags));
2863 if (kIOMemoryRemote & _flags) return (kIOReturnNotAttached);
91447636
A
2864
2865 switch (options)
2866 {
2867 case kIOMemoryIncoherentIOFlush:
5ba3f43e
A
2868#if defined(__arm__) || defined(__arm64__)
2869 func_ext = &dcache_incoherent_io_flush64;
2870#if __ARM_COHERENT_IO__
2871 func_ext(0, 0, 0, &res);
2872 return kIOReturnSuccess;
2873#else /* __ARM_COHERENT_IO__ */
2874 break;
2875#endif /* __ARM_COHERENT_IO__ */
2876#else /* defined(__arm__) || defined(__arm64__) */
91447636
A
2877 func = &dcache_incoherent_io_flush64;
2878 break;
5ba3f43e 2879#endif /* defined(__arm__) || defined(__arm64__) */
91447636 2880 case kIOMemoryIncoherentIOStore:
5ba3f43e
A
2881#if defined(__arm__) || defined(__arm64__)
2882 func_ext = &dcache_incoherent_io_store64;
2883#if __ARM_COHERENT_IO__
2884 func_ext(0, 0, 0, &res);
2885 return kIOReturnSuccess;
2886#else /* __ARM_COHERENT_IO__ */
2887 break;
2888#endif /* __ARM_COHERENT_IO__ */
2889#else /* defined(__arm__) || defined(__arm64__) */
91447636
A
2890 func = &dcache_incoherent_io_store64;
2891 break;
5ba3f43e 2892#endif /* defined(__arm__) || defined(__arm64__) */
0b4c1975
A
2893
2894 case kIOMemorySetEncrypted:
2895 func = &SetEncryptOp;
2896 break;
2897 case kIOMemoryClearEncrypted:
2898 func = &ClearEncryptOp;
2899 break;
91447636
A
2900 }
2901
5ba3f43e
A
2902#if defined(__arm__) || defined(__arm64__)
2903 if ((func == 0) && (func_ext == 0))
2904 return (kIOReturnUnsupported);
2905#else /* defined(__arm__) || defined(__arm64__) */
91447636
A
2906 if (!func)
2907 return (kIOReturnUnsupported);
5ba3f43e 2908#endif /* defined(__arm__) || defined(__arm64__) */
91447636 2909
b0d623f7
A
2910 if (kIOMemoryThreadSafe & _flags)
2911 LOCK;
2912
316670eb 2913 res = 0x0UL;
91447636
A
2914 remaining = length = min(length, getLength() - offset);
2915 while (remaining)
2916 // (process another target segment?)
2917 {
2918 addr64_t dstAddr64;
2919 IOByteCount dstLen;
2920
b0d623f7 2921 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
91447636
A
2922 if (!dstAddr64)
2923 break;
2924
2925 // Clip segment length to remaining
2926 if (dstLen > remaining)
2927 dstLen = remaining;
2928
5ba3f43e
A
2929#if defined(__arm__) || defined(__arm64__)
2930 if (func)
2931 (*func)(dstAddr64, dstLen);
2932 if (func_ext) {
2933 (*func_ext)(dstAddr64, dstLen, remaining, &res);
2934 if (res != 0x0UL) {
2935 remaining = 0;
2936 break;
2937 }
2938 }
2939#else /* defined(__arm__) || defined(__arm64__) */
91447636 2940 (*func)(dstAddr64, dstLen);
5ba3f43e 2941#endif /* defined(__arm__) || defined(__arm64__) */
91447636
A
2942
2943 offset += dstLen;
2944 remaining -= dstLen;
2945 }
2946
b0d623f7
A
2947 if (kIOMemoryThreadSafe & _flags)
2948 UNLOCK;
2949
91447636
A
2950 return (remaining ? kIOReturnUnderrun : kIOReturnSuccess);
2951}
2952
39037602
A
2953/*
2954 *
2955 */
2956
316670eb 2957#if defined(__i386__) || defined(__x86_64__)
3e170ce0
A
2958
2959#define io_kernel_static_start vm_kernel_stext
2960#define io_kernel_static_end vm_kernel_etext
2961
5ba3f43e
A
2962#elif defined(__arm__) || defined(__arm64__)
2963
2964extern vm_offset_t static_memory_end;
2965
2966#if defined(__arm64__)
2967#define io_kernel_static_start vm_kext_base
2968#else /* defined(__arm64__) */
2969#define io_kernel_static_start vm_kernel_stext
2970#endif /* defined(__arm64__) */
2971
2972#define io_kernel_static_end static_memory_end
2973
316670eb
A
2974#else
2975#error io_kernel_static_end is undefined for this architecture
2976#endif
55e303ae
A
2977
2978static kern_return_t
2979io_get_kernel_static_upl(
91447636 2980 vm_map_t /* map */,
b0d623f7 2981 uintptr_t offset,
3e170ce0 2982 upl_size_t *upl_size,
55e303ae
A
2983 upl_t *upl,
2984 upl_page_info_array_t page_list,
0c530ab8
A
2985 unsigned int *count,
2986 ppnum_t *highest_page)
1c79356b 2987{
55e303ae
A
2988 unsigned int pageCount, page;
2989 ppnum_t phys;
0c530ab8 2990 ppnum_t highestPage = 0;
1c79356b 2991
55e303ae
A
2992 pageCount = atop_32(*upl_size);
2993 if (pageCount > *count)
2994 pageCount = *count;
1c79356b 2995
55e303ae 2996 *upl = NULL;
1c79356b 2997
55e303ae
A
2998 for (page = 0; page < pageCount; page++)
2999 {
3000 phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
3001 if (!phys)
3002 break;
3003 page_list[page].phys_addr = phys;
39037602 3004 page_list[page].free_when_done = 0;
55e303ae
A
3005 page_list[page].absent = 0;
3006 page_list[page].dirty = 0;
3007 page_list[page].precious = 0;
3008 page_list[page].device = 0;
0c530ab8 3009 if (phys > highestPage)
b0d623f7 3010 highestPage = phys;
55e303ae 3011 }
0b4e3aa0 3012
0c530ab8
A
3013 *highest_page = highestPage;
3014
55e303ae
A
3015 return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError);
3016}
0b4e3aa0 3017
55e303ae
A
3018IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
3019{
91447636 3020 IOOptionBits type = _flags & kIOMemoryTypeMask;
39037602 3021 IOReturn error = kIOReturnSuccess;
55e303ae 3022 ioGMDData *dataP;
99c3a104 3023 upl_page_info_array_t pageInfo;
39236c6e 3024 ppnum_t mapBase;
5ba3f43e 3025 vm_tag_t tag = VM_KERN_MEMORY_NONE;
1c79356b 3026
0c530ab8 3027 assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
1c79356b 3028
39236c6e
A
3029 if ((kIODirectionOutIn & forDirection) == kIODirectionNone)
3030 forDirection = (IODirection) (forDirection | getDirection());
55e303ae 3031
5ba3f43e 3032 dataP = getDataP(_memoryEntries);
3e170ce0 3033 upl_control_flags_t uplFlags; // This Mem Desc's default flags for upl creation
0c530ab8 3034 switch (kIODirectionOutIn & forDirection)
55e303ae 3035 {
5ba3f43e
A
3036 case kIODirectionOut:
3037 // Pages do not need to be marked as dirty on commit
3038 uplFlags = UPL_COPYOUT_FROM;
3039 dataP->fDMAAccess = kIODMAMapReadAccess;
3040 break;
55e303ae 3041
5ba3f43e
A
3042 case kIODirectionIn:
3043 dataP->fDMAAccess = kIODMAMapWriteAccess;
3044 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
3045 break;
39037602 3046
5ba3f43e
A
3047 default:
3048 dataP->fDMAAccess = kIODMAMapReadAccess | kIODMAMapWriteAccess;
3049 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
3050 break;
3051 }
55e303ae 3052
39236c6e
A
3053 if (_wireCount)
3054 {
3055 if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags))
3056 {
3057 OSReportWithBacktrace("IOMemoryDescriptor 0x%lx prepared read only", VM_KERNEL_ADDRPERM(this));
3058 error = kIOReturnNotWritable;
3059 }
39236c6e 3060 }
39037602 3061 else
99c3a104 3062 {
39037602 3063 IOMapper *mapper;
5ba3f43e 3064
39037602 3065 mapper = dataP->fMapper;
5ba3f43e 3066 dataP->fMappedBaseValid = dataP->fMappedBase = 0;
de355530 3067
39037602 3068 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
5ba3f43e
A
3069 tag = _kernelTag;
3070 if (VM_KERN_MEMORY_NONE == tag) tag = IOMemoryTag(kernel_map);
55e303ae 3071
39037602
A
3072 if (kIODirectionPrepareToPhys32 & forDirection)
3073 {
3074 if (!mapper) uplFlags |= UPL_NEED_32BIT_ADDR;
3075 if (dataP->fDMAMapNumAddressBits > 32) dataP->fDMAMapNumAddressBits = 32;
3076 }
3077 if (kIODirectionPrepareNoFault & forDirection) uplFlags |= UPL_REQUEST_NO_FAULT;
3078 if (kIODirectionPrepareNoZeroFill & forDirection) uplFlags |= UPL_NOZEROFILLIO;
3079 if (kIODirectionPrepareNonCoherent & forDirection) uplFlags |= UPL_REQUEST_FORCE_COHERENCY;
3080
3081 mapBase = 0;
3082
3083 // Note that appendBytes(NULL) zeros the data up to the desired length
3084 // and the length parameter is an unsigned int
3085 size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t);
3086 if (uplPageSize > ((unsigned int)uplPageSize)) return (kIOReturnNoMemory);
3087 if (!_memoryEntries->appendBytes(0, uplPageSize)) return (kIOReturnNoMemory);
3088 dataP = 0;
3089
3090 // Find the appropriate vm_map for the given task
3091 vm_map_t curMap;
3092 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) curMap = 0;
3093 else curMap = get_task_map(_task);
3094
3095 // Iterate over the vector of virtual ranges
3096 Ranges vec = _ranges;
3097 unsigned int pageIndex = 0;
3098 IOByteCount mdOffset = 0;
3099 ppnum_t highestPage = 0;
3100
3101 IOMemoryEntry * memRefEntry = 0;
3102 if (_memRef) memRefEntry = &_memRef->entries[0];
3103
3104 for (UInt range = 0; range < _rangesCount; range++) {
3105 ioPLBlock iopl;
cc8bc92a 3106 mach_vm_address_t startPage, startPageOffset;
39037602
A
3107 mach_vm_size_t numBytes;
3108 ppnum_t highPage = 0;
3109
3110 // Get the startPage address and length of vec[range]
3111 getAddrLenForInd(startPage, numBytes, type, vec, range);
cc8bc92a
A
3112 startPageOffset = startPage & PAGE_MASK;
3113 iopl.fPageOffset = startPageOffset;
3114 numBytes += startPageOffset;
39037602
A
3115 startPage = trunc_page_64(startPage);
3116
3117 if (mapper)
3118 iopl.fMappedPage = mapBase + pageIndex;
3119 else
3120 iopl.fMappedPage = 0;
3121
3122 // Iterate over the current range, creating UPLs
3123 while (numBytes) {
3124 vm_address_t kernelStart = (vm_address_t) startPage;
3125 vm_map_t theMap;
3126 if (curMap) theMap = curMap;
3127 else if (_memRef)
3128 {
3129 theMap = NULL;
3130 }
3131 else
3132 {
3133 assert(_task == kernel_task);
3134 theMap = IOPageableMapForAddress(kernelStart);
3135 }
ecc0ceb4 3136
39037602
A
3137 // ioplFlags is an in/out parameter
3138 upl_control_flags_t ioplFlags = uplFlags;
3139 dataP = getDataP(_memoryEntries);
3140 pageInfo = getPageList(dataP);
3141 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
3142
3143 mach_vm_size_t _ioplSize = round_page(numBytes);
3144 upl_size_t ioplSize = (_ioplSize <= MAX_UPL_SIZE_BYTES) ? _ioplSize : MAX_UPL_SIZE_BYTES;
3145 unsigned int numPageInfo = atop_32(ioplSize);
3146
3147 if ((theMap == kernel_map)
3148 && (kernelStart >= io_kernel_static_start)
3149 && (kernelStart < io_kernel_static_end)) {
3150 error = io_get_kernel_static_upl(theMap,
3151 kernelStart,
3152 &ioplSize,
3153 &iopl.fIOPL,
3154 baseInfo,
3155 &numPageInfo,
3156 &highPage);
3157 }
3158 else if (_memRef) {
3159 memory_object_offset_t entryOffset;
3160
3161 entryOffset = mdOffset;
3162 entryOffset = (entryOffset - iopl.fPageOffset - memRefEntry->offset);
3163 if (entryOffset >= memRefEntry->size) {
3164 memRefEntry++;
3165 if (memRefEntry >= &_memRef->entries[_memRef->count]) panic("memRefEntry");
3166 entryOffset = 0;
3167 }
3168 if (ioplSize > (memRefEntry->size - entryOffset)) ioplSize = (memRefEntry->size - entryOffset);
3169 error = memory_object_iopl_request(memRefEntry->entry,
3170 entryOffset,
3171 &ioplSize,
3172 &iopl.fIOPL,
3173 baseInfo,
3174 &numPageInfo,
5ba3f43e
A
3175 &ioplFlags,
3176 tag);
39037602
A
3177 }
3178 else {
3179 assert(theMap);
3180 error = vm_map_create_upl(theMap,
3181 startPage,
3182 (upl_size_t*)&ioplSize,
3183 &iopl.fIOPL,
3184 baseInfo,
3185 &numPageInfo,
5ba3f43e
A
3186 &ioplFlags,
3187 tag);
39037602 3188 }
0c530ab8 3189
39037602 3190 if (error != KERN_SUCCESS) goto abortExit;
55e303ae 3191
39037602 3192 assert(ioplSize);
55e303ae 3193
39037602
A
3194 if (iopl.fIOPL)
3195 highPage = upl_get_highest_page(iopl.fIOPL);
3196 if (highPage > highestPage)
3197 highestPage = highPage;
55e303ae 3198
39037602
A
3199 if (baseInfo->device) {
3200 numPageInfo = 1;
3201 iopl.fFlags = kIOPLOnDevice;
3202 }
3203 else {
3204 iopl.fFlags = 0;
3205 }
55e303ae 3206
39037602
A
3207 iopl.fIOMDOffset = mdOffset;
3208 iopl.fPageInfo = pageIndex;
cc8bc92a 3209 if (mapper && pageIndex && (page_mask & (mdOffset + startPageOffset))) dataP->fDiscontig = true;
39037602
A
3210
3211 if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
3212 // Clean up partial created and unsaved iopl
3213 if (iopl.fIOPL) {
3214 upl_abort(iopl.fIOPL, 0);
3215 upl_deallocate(iopl.fIOPL);
3216 }
3217 goto abortExit;
3218 }
3219 dataP = 0;
3220
3221 // Check for a multiple iopl's in one virtual range
3222 pageIndex += numPageInfo;
3223 mdOffset -= iopl.fPageOffset;
3224 if (ioplSize < numBytes) {
3225 numBytes -= ioplSize;
3226 startPage += ioplSize;
3227 mdOffset += ioplSize;
3228 iopl.fPageOffset = 0;
3229 if (mapper) iopl.fMappedPage = mapBase + pageIndex;
3230 }
3231 else {
3232 mdOffset += numBytes;
3233 break;
91447636 3234 }
55e303ae 3235 }
1c79356b 3236 }
55e303ae 3237
39037602 3238 _highestPage = highestPage;
0c530ab8 3239
39037602
A
3240 if (UPL_COPYOUT_FROM & uplFlags) _flags |= kIOMemoryPreparedReadOnly;
3241 }
39236c6e 3242
39037602 3243#if IOTRACKING
5ba3f43e 3244 if (!(_flags & kIOMemoryAutoPrepare) && (kIOReturnSuccess == error))
3e170ce0 3245 {
5ba3f43e
A
3246 dataP = getDataP(_memoryEntries);
3247 if (!dataP->fWireTracking.link.next)
3248 {
3249 IOTrackingAdd(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages), false, tag);
3250 }
3e170ce0 3251 }
39037602 3252#endif /* IOTRACKING */
3e170ce0 3253
39037602 3254 return (error);
1c79356b
A
3255
3256abortExit:
55e303ae
A
3257 {
3258 dataP = getDataP(_memoryEntries);
91447636 3259 UInt done = getNumIOPL(_memoryEntries, dataP);
55e303ae
A
3260 ioPLBlock *ioplList = getIOPLList(dataP);
3261
3262 for (UInt range = 0; range < done; range++)
3263 {
91447636
A
3264 if (ioplList[range].fIOPL) {
3265 upl_abort(ioplList[range].fIOPL, 0);
3266 upl_deallocate(ioplList[range].fIOPL);
3267 }
55e303ae 3268 }
6d2010ae 3269 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
1c79356b
A
3270 }
3271
2d21ac55
A
3272 if (error == KERN_FAILURE)
3273 error = kIOReturnCannotWire;
39236c6e
A
3274 else if (error == KERN_MEMORY_ERROR)
3275 error = kIOReturnNoResources;
2d21ac55 3276
55e303ae
A
3277 return error;
3278}
d7e50217 3279
99c3a104
A
3280bool IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
3281{
3282 ioGMDData * dataP;
3283 unsigned dataSize = size;
3284
3285 if (!_memoryEntries) {
3286 _memoryEntries = OSData::withCapacity(dataSize);
3287 if (!_memoryEntries)
3288 return false;
3289 }
3290 else if (!_memoryEntries->initWithCapacity(dataSize))
3291 return false;
3292
3293 _memoryEntries->appendBytes(0, computeDataSize(0, 0));
3294 dataP = getDataP(_memoryEntries);
3295
3296 if (mapper == kIOMapperWaitSystem) {
3297 IOMapper::checkForSystemMapper();
3298 mapper = IOMapper::gSystem;
3299 }
3300 dataP->fMapper = mapper;
3301 dataP->fPageCnt = 0;
3302 dataP->fMappedBase = 0;
3303 dataP->fDMAMapNumAddressBits = 64;
3304 dataP->fDMAMapAlignment = 0;
3305 dataP->fPreparationID = kIOPreparationIDUnprepared;
39236c6e 3306 dataP->fDiscontig = false;
fe8ab488 3307 dataP->fCompletionError = false;
5ba3f43e 3308 dataP->fMappedBaseValid = false;
99c3a104
A
3309
3310 return (true);
3311}
3312
3313IOReturn IOMemoryDescriptor::dmaMap(
3314 IOMapper * mapper,
3e170ce0 3315 IODMACommand * command,
99c3a104
A
3316 const IODMAMapSpecification * mapSpec,
3317 uint64_t offset,
3318 uint64_t length,
3e170ce0
A
3319 uint64_t * mapAddress,
3320 uint64_t * mapLength)
99c3a104 3321{
5ba3f43e 3322 IOReturn err;
3e170ce0 3323 uint32_t mapOptions;
99c3a104 3324
3e170ce0
A
3325 mapOptions = 0;
3326 mapOptions |= kIODMAMapReadAccess;
99c3a104
A
3327 if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess;
3328
5ba3f43e 3329 err = mapper->iovmMapMemory(this, offset, length, mapOptions,
3e170ce0 3330 mapSpec, command, NULL, mapAddress, mapLength);
99c3a104 3331
5ba3f43e
A
3332 if (kIOReturnSuccess == err) dmaMapRecord(mapper, command, *mapLength);
3333
3334 return (err);
3335}
3336
3337void IOMemoryDescriptor::dmaMapRecord(
3338 IOMapper * mapper,
3339 IODMACommand * command,
3340 uint64_t mapLength)
3341{
3342 kern_allocation_name_t alloc;
3343 int16_t prior;
3344
3345 if ((alloc = mapper->fAllocName) /* && mapper != IOMapper::gSystem */)
3346 {
3347 kern_allocation_update_size(mapper->fAllocName, mapLength);
3348 }
3349
3350 if (!command) return;
3351 prior = OSAddAtomic16(1, &_dmaReferences);
3352 if (!prior)
3353 {
3354 if (alloc && (VM_KERN_MEMORY_NONE != _kernelTag))
3355 {
3356 _mapName = alloc;
3357 mapLength = _length;
3358 kern_allocation_update_subtotal(alloc, _kernelTag, mapLength);
3359 }
3360 else _mapName = NULL;
3361 }
3362}
3363
3364IOReturn IOMemoryDescriptor::dmaUnmap(
3365 IOMapper * mapper,
3366 IODMACommand * command,
3367 uint64_t offset,
3368 uint64_t mapAddress,
3369 uint64_t mapLength)
3370{
3371 IOReturn ret;
3372 kern_allocation_name_t alloc;
3373 kern_allocation_name_t mapName;
3374 int16_t prior;
3375
3376 mapName = 0;
3377 prior = 0;
3378 if (command)
3379 {
3380 mapName = _mapName;
3381 if (_dmaReferences) prior = OSAddAtomic16(-1, &_dmaReferences);
3382 else panic("_dmaReferences underflow");
3383 }
3384
3385 if (!mapLength) return (kIOReturnSuccess);
3386
3387 ret = mapper->iovmUnmapMemory(this, command, mapAddress, mapLength);
3388
3389 if ((alloc = mapper->fAllocName))
3390 {
3391 kern_allocation_update_size(alloc, -mapLength);
3392 if ((1 == prior) && mapName && (VM_KERN_MEMORY_NONE != _kernelTag))
3393 {
3394 mapLength = _length;
3395 kern_allocation_update_subtotal(mapName, _kernelTag, -mapLength);
3396 }
3397 }
3398
3e170ce0 3399 return (ret);
99c3a104
A
3400}
3401
3402IOReturn IOGeneralMemoryDescriptor::dmaMap(
3403 IOMapper * mapper,
3e170ce0 3404 IODMACommand * command,
99c3a104
A
3405 const IODMAMapSpecification * mapSpec,
3406 uint64_t offset,
3407 uint64_t length,
3e170ce0
A
3408 uint64_t * mapAddress,
3409 uint64_t * mapLength)
99c3a104
A
3410{
3411 IOReturn err = kIOReturnSuccess;
3412 ioGMDData * dataP;
3413 IOOptionBits type = _flags & kIOMemoryTypeMask;
3414
3e170ce0 3415 *mapAddress = 0;
99c3a104 3416 if (kIOMemoryHostOnly & _flags) return (kIOReturnSuccess);
5ba3f43e 3417 if (kIOMemoryRemote & _flags) return (kIOReturnNotAttached);
99c3a104
A
3418
3419 if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
3420 || offset || (length != _length))
3421 {
3e170ce0 3422 err = super::dmaMap(mapper, command, mapSpec, offset, length, mapAddress, mapLength);
99c3a104
A
3423 }
3424 else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries)))
3425 {
3426 const ioPLBlock * ioplList = getIOPLList(dataP);
3427 upl_page_info_t * pageList;
3428 uint32_t mapOptions = 0;
99c3a104
A
3429
3430 IODMAMapSpecification mapSpec;
3431 bzero(&mapSpec, sizeof(mapSpec));
3432 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
3433 mapSpec.alignment = dataP->fDMAMapAlignment;
3434
3435 // For external UPLs the fPageInfo field points directly to
3436 // the upl's upl_page_info_t array.
3437 if (ioplList->fFlags & kIOPLExternUPL)
3438 {
3439 pageList = (upl_page_info_t *) ioplList->fPageInfo;
3440 mapOptions |= kIODMAMapPagingPath;
3441 }
3e170ce0 3442 else pageList = getPageList(dataP);
99c3a104 3443
3e170ce0
A
3444 if ((_length == ptoa_64(_pages)) && !(page_mask & ioplList->fPageOffset))
3445 {
3446 mapOptions |= kIODMAMapPageListFullyOccupied;
3447 }
3448
5ba3f43e
A
3449 assert(dataP->fDMAAccess);
3450 mapOptions |= dataP->fDMAAccess;
99c3a104
A
3451
3452 // Check for direct device non-paged memory
3453 if (ioplList->fFlags & kIOPLOnDevice) mapOptions |= kIODMAMapPhysicallyContiguous;
3454
3e170ce0
A
3455 IODMAMapPageList dmaPageList =
3456 {
39037602 3457 .pageOffset = (uint32_t)(ioplList->fPageOffset & page_mask),
3e170ce0
A
3458 .pageListCount = _pages,
3459 .pageList = &pageList[0]
3460 };
3461 err = mapper->iovmMapMemory(this, offset, length, mapOptions, &mapSpec,
3462 command, &dmaPageList, mapAddress, mapLength);
5ba3f43e
A
3463
3464 if (kIOReturnSuccess == err) dmaMapRecord(mapper, command, *mapLength);
99c3a104
A
3465 }
3466
3467 return (err);
3468}
3469
55e303ae
A
3470/*
3471 * prepare
3472 *
3473 * Prepare the memory for an I/O transfer. This involves paging in
3474 * the memory, if necessary, and wiring it down for the duration of
3475 * the transfer. The complete() method completes the processing of
3476 * the memory after the I/O transfer finishes. This method needn't
3477 * called for non-pageable memory.
3478 */
99c3a104 3479
55e303ae
A
3480IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
3481{
39037602 3482 IOReturn error = kIOReturnSuccess;
91447636 3483 IOOptionBits type = _flags & kIOMemoryTypeMask;
55e303ae 3484
2d21ac55
A
3485 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
3486 return kIOReturnSuccess;
3487
5ba3f43e
A
3488 assert (!(kIOMemoryRemote & _flags));
3489 if (kIOMemoryRemote & _flags) return (kIOReturnNotAttached);
3490
39037602 3491 if (_prepareLock) IOLockLock(_prepareLock);
2d21ac55 3492
39236c6e
A
3493 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type)
3494 {
39037602 3495 error = wireVirtual(forDirection);
de355530
A
3496 }
3497
5ba3f43e 3498 if (kIOReturnSuccess == error)
0b4c1975 3499 {
39037602
A
3500 if (1 == ++_wireCount)
3501 {
3502 if (kIOMemoryClearEncrypt & _flags)
3503 {
3504 performOperation(kIOMemoryClearEncrypted, 0, _length);
3505 }
3506 }
0b4c1975
A
3507 }
3508
39037602 3509 if (_prepareLock) IOLockUnlock(_prepareLock);
2d21ac55
A
3510
3511 return error;
1c79356b
A
3512}
3513
3514/*
3515 * complete
3516 *
3517 * Complete processing of the memory after an I/O transfer finishes.
3518 * This method should not be called unless a prepare was previously
3519 * issued; the prepare() and complete() must occur in pairs, before
3520 * before and after an I/O transfer involving pageable memory.
3521 */
6d2010ae 3522
fe8ab488 3523IOReturn IOGeneralMemoryDescriptor::complete(IODirection forDirection)
1c79356b 3524{
2d21ac55 3525 IOOptionBits type = _flags & kIOMemoryTypeMask;
39037602 3526 ioGMDData * dataP;
1c79356b 3527
2d21ac55
A
3528 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
3529 return kIOReturnSuccess;
1c79356b 3530
5ba3f43e
A
3531 assert (!(kIOMemoryRemote & _flags));
3532 if (kIOMemoryRemote & _flags) return (kIOReturnNotAttached);
3533
39037602
A
3534 if (_prepareLock) IOLockLock(_prepareLock);
3535 do
3536 {
3537 assert(_wireCount);
3538 if (!_wireCount) break;
3539 dataP = getDataP(_memoryEntries);
3540 if (!dataP) break;
91447636 3541
39037602 3542 if (kIODirectionCompleteWithError & forDirection) dataP->fCompletionError = true;
fe8ab488 3543
0b4c1975
A
3544 if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount))
3545 {
3546 performOperation(kIOMemorySetEncrypted, 0, _length);
3547 }
3548
39037602
A
3549 _wireCount--;
3550 if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection))
3551 {
3552 ioPLBlock *ioplList = getIOPLList(dataP);
3553 UInt ind, count = getNumIOPL(_memoryEntries, dataP);
55e303ae 3554
39037602
A
3555 if (_wireCount)
3556 {
3557 // kIODirectionCompleteWithDataValid & forDirection
3558 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type)
3559 {
5ba3f43e
A
3560 vm_tag_t tag;
3561 tag = getVMTag(kernel_map);
39037602
A
3562 for (ind = 0; ind < count; ind++)
3563 {
5ba3f43e 3564 if (ioplList[ind].fIOPL) iopl_valid_data(ioplList[ind].fIOPL, tag);
39037602
A
3565 }
3566 }
3567 }
3568 else
3569 {
5ba3f43e 3570 if (_dmaReferences) panic("complete() while dma active");
b0d623f7 3571
5ba3f43e
A
3572 if (dataP->fMappedBaseValid) {
3573 dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
3574 dataP->fMappedBaseValid = dataP->fMappedBase = 0;
39037602 3575 }
3e170ce0 3576#if IOTRACKING
5ba3f43e 3577 if (dataP->fWireTracking.link.next) IOTrackingRemove(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages));
39037602 3578#endif /* IOTRACKING */
5ba3f43e
A
3579 // Only complete iopls that we created which are for TypeVirtual
3580 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type)
3581 {
39037602
A
3582 for (ind = 0; ind < count; ind++)
3583 if (ioplList[ind].fIOPL) {
3584 if (dataP->fCompletionError)
3585 upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/);
3586 else
3587 upl_commit(ioplList[ind].fIOPL, 0, 0);
3588 upl_deallocate(ioplList[ind].fIOPL);
3589 }
3590 } else if (kIOMemoryTypeUPL == type) {
3591 upl_set_referenced(ioplList[0].fIOPL, false);
3592 }
6d2010ae 3593
39037602 3594 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
b0d623f7 3595
39037602 3596 dataP->fPreparationID = kIOPreparationIDUnprepared;
5ba3f43e 3597 _flags &= ~kIOMemoryPreparedReadOnly;
39037602
A
3598 }
3599 }
1c79356b 3600 }
39037602 3601 while (false);
2d21ac55 3602
39037602 3603 if (_prepareLock) IOLockUnlock(_prepareLock);
2d21ac55 3604
1c79356b
A
3605 return kIOReturnSuccess;
3606}
3607
3608IOReturn IOGeneralMemoryDescriptor::doMap(
2d21ac55
A
3609 vm_map_t __addressMap,
3610 IOVirtualAddress * __address,
1c79356b 3611 IOOptionBits options,
2d21ac55
A
3612 IOByteCount __offset,
3613 IOByteCount __length )
1c79356b 3614{
b0d623f7 3615#ifndef __LP64__
2d21ac55 3616 if (!(kIOMap64Bit & options)) panic("IOGeneralMemoryDescriptor::doMap !64bit");
b0d623f7 3617#endif /* !__LP64__ */
2d21ac55 3618
fe8ab488
A
3619 kern_return_t err;
3620
b0d623f7 3621 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
2d21ac55
A
3622 mach_vm_size_t offset = mapping->fOffset + __offset;
3623 mach_vm_size_t length = mapping->fLength;
3624
91447636
A
3625 IOOptionBits type = _flags & kIOMemoryTypeMask;
3626 Ranges vec = _ranges;
3627
fe8ab488
A
3628 mach_vm_address_t range0Addr = 0;
3629 mach_vm_size_t range0Len = 0;
91447636 3630
060df5ea
A
3631 if ((offset >= _length) || ((offset + length) > _length))
3632 return( kIOReturnBadArgument );
3633
5ba3f43e
A
3634 assert (!(kIOMemoryRemote & _flags));
3635 if (kIOMemoryRemote & _flags) return (0);
3636
91447636
A
3637 if (vec.v)
3638 getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
3639
1c79356b 3640 // mapping source == dest? (could be much better)
fe8ab488
A
3641 if (_task
3642 && (mapping->fAddressTask == _task)
3643 && (mapping->fAddressMap == get_task_map(_task))
3644 && (options & kIOMapAnywhere)
3645 && (1 == _rangesCount)
3646 && (0 == offset)
3647 && range0Addr
3648 && (length <= range0Len))
2d21ac55
A
3649 {
3650 mapping->fAddress = range0Addr;
3651 mapping->fOptions |= kIOMapStatic;
3652
3653 return( kIOReturnSuccess );
1c79356b
A
3654 }
3655
fe8ab488
A
3656 if (!_memRef)
3657 {
3658 IOOptionBits createOptions = 0;
3659 if (!(kIOMapReadOnly & options))
3660 {
3661 createOptions |= kIOMemoryReferenceWrite;
3662#if DEVELOPMENT || DEBUG
3663 if (kIODirectionOut == (kIODirectionOutIn & _flags))
060df5ea 3664 {
fe8ab488
A
3665 OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction");
3666 }
0b4e3aa0 3667#endif
fe8ab488
A
3668 }
3669 err = memoryReferenceCreate(createOptions, &_memRef);
3670 if (kIOReturnSuccess != err) return (err);
3671 }
9bccf70c 3672
fe8ab488
A
3673 memory_object_t pager;
3674 pager = (memory_object_t) (reserved ? reserved->dp.devicePager : 0);
55e303ae 3675
fe8ab488
A
3676 // <upl_transpose //
3677 if ((kIOMapReference|kIOMapUnique) == ((kIOMapReference|kIOMapUnique) & options))
3678 {
3679 do
3680 {
3e170ce0
A
3681 upl_t redirUPL2;
3682 upl_size_t size;
3683 upl_control_flags_t flags;
3684 unsigned int lock_count;
9bccf70c 3685
fe8ab488
A
3686 if (!_memRef || (1 != _memRef->count))
3687 {
3688 err = kIOReturnNotReadable;
3689 break;
3690 }
0b4e3aa0 3691
fe8ab488
A
3692 size = round_page(mapping->fLength);
3693 flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
5ba3f43e 3694 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
0b4e3aa0 3695
fe8ab488
A
3696 if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2,
3697 NULL, NULL,
5ba3f43e 3698 &flags, getVMTag(kernel_map)))
fe8ab488 3699 redirUPL2 = NULL;
1c79356b 3700
fe8ab488
A
3701 for (lock_count = 0;
3702 IORecursiveLockHaveLock(gIOMemoryLock);
3703 lock_count++) {
3704 UNLOCK;
3705 }
3706 err = upl_transpose(redirUPL2, mapping->fRedirUPL);
3707 for (;
3708 lock_count;
3709 lock_count--) {
3710 LOCK;
3711 }
1c79356b 3712
fe8ab488
A
3713 if (kIOReturnSuccess != err)
3714 {
3715 IOLog("upl_transpose(%x)\n", err);
3716 err = kIOReturnSuccess;
3717 }
0b4e3aa0 3718
fe8ab488
A
3719 if (redirUPL2)
3720 {
3721 upl_commit(redirUPL2, NULL, 0);
3722 upl_deallocate(redirUPL2);
3723 redirUPL2 = 0;
3724 }
3725 {
3726 // swap the memEntries since they now refer to different vm_objects
3727 IOMemoryReference * me = _memRef;
3728 _memRef = mapping->fMemory->_memRef;
3729 mapping->fMemory->_memRef = me;
3730 }
3731 if (pager)
3732 err = populateDevicePager( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
3733 }
3734 while (false);
3735 }
3736 // upl_transpose> //
9bccf70c 3737 else
fe8ab488 3738 {
d190cdc3 3739 err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress);
3e170ce0 3740#if IOTRACKING
39037602
A
3741 if ((err == KERN_SUCCESS) && ((kIOTracking & gIOKitDebug) || _task))
3742 {
3743 // only dram maps in the default on developement case
3744 IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength);
3745 }
3746#endif /* IOTRACKING */
fe8ab488
A
3747 if ((err == KERN_SUCCESS) && pager)
3748 {
3749 err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, offset, length, options);
3e170ce0
A
3750
3751 if (err != KERN_SUCCESS) doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0);
fe8ab488
A
3752 else if (kIOMapDefaultCache == (options & kIOMapCacheMask))
3753 {
3754 mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
3755 }
3756 }
3757 }
3758
3759 return (err);
1c79356b
A
3760}
3761
39037602
A
3762#if IOTRACKING
3763IOReturn
3764IOMemoryMapTracking(IOTrackingUser * tracking, task_t * task,
3765 mach_vm_address_t * address, mach_vm_size_t * size)
3766{
3767#define iomap_offsetof(type, field) ((size_t)(&((type *)0)->field))
3768
3769 IOMemoryMap * map = (typeof(map)) (((uintptr_t) tracking) - iomap_offsetof(IOMemoryMap, fTracking));
3770
3771 if (!map->fAddressMap || (map->fAddressMap != get_task_map(map->fAddressTask))) return (kIOReturnNotReady);
3772
3773 *task = map->fAddressTask;
3774 *address = map->fAddress;
3775 *size = map->fLength;
3776
3777 return (kIOReturnSuccess);
3778}
3779#endif /* IOTRACKING */
3780
1c79356b
A
3781IOReturn IOGeneralMemoryDescriptor::doUnmap(
3782 vm_map_t addressMap,
2d21ac55
A
3783 IOVirtualAddress __address,
3784 IOByteCount __length )
1c79356b 3785{
2d21ac55 3786 return (super::doUnmap(addressMap, __address, __length));
1c79356b
A
3787}
3788
3789/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3790
b0d623f7
A
3791#undef super
3792#define super OSObject
1c79356b 3793
b0d623f7 3794OSDefineMetaClassAndStructors( IOMemoryMap, OSObject )
1c79356b 3795
b0d623f7
A
3796OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
3797OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
3798OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
3799OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
3800OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
3801OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
3802OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
3803OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
1c79356b 3804
b0d623f7
A
3805/* ex-inline function implementation */
3806IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
3807 { return( getPhysicalSegment( 0, 0 )); }
1c79356b
A
3808
3809/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3810
b0d623f7 3811bool IOMemoryMap::init(
2d21ac55
A
3812 task_t intoTask,
3813 mach_vm_address_t toAddress,
3814 IOOptionBits _options,
3815 mach_vm_size_t _offset,
3816 mach_vm_size_t _length )
1c79356b 3817{
2d21ac55 3818 if (!intoTask)
1c79356b
A
3819 return( false);
3820
2d21ac55
A
3821 if (!super::init())
3822 return(false);
1c79356b 3823
2d21ac55
A
3824 fAddressMap = get_task_map(intoTask);
3825 if (!fAddressMap)
3826 return(false);
3827 vm_map_reference(fAddressMap);
1c79356b 3828
2d21ac55
A
3829 fAddressTask = intoTask;
3830 fOptions = _options;
3831 fLength = _length;
3832 fOffset = _offset;
3833 fAddress = toAddress;
1c79356b 3834
2d21ac55 3835 return (true);
1c79356b
A
3836}
3837
b0d623f7 3838bool IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
1c79356b 3839{
2d21ac55
A
3840 if (!_memory)
3841 return(false);
1c79356b 3842
2d21ac55 3843 if (!fSuperMap)
91447636 3844 {
2d21ac55 3845 if( (_offset + fLength) > _memory->getLength())
91447636 3846 return( false);
2d21ac55 3847 fOffset = _offset;
91447636 3848 }
1c79356b
A
3849
3850 _memory->retain();
2d21ac55 3851 if (fMemory)
91447636 3852 {
2d21ac55
A
3853 if (fMemory != _memory)
3854 fMemory->removeMapping(this);
3855 fMemory->release();
1c79356b 3856 }
2d21ac55 3857 fMemory = _memory;
91447636 3858
2d21ac55 3859 return( true );
1c79356b
A
3860}
3861
3862IOReturn IOMemoryDescriptor::doMap(
2d21ac55
A
3863 vm_map_t __addressMap,
3864 IOVirtualAddress * __address,
1c79356b 3865 IOOptionBits options,
2d21ac55
A
3866 IOByteCount __offset,
3867 IOByteCount __length )
1c79356b 3868{
fe8ab488
A
3869 return (kIOReturnUnsupported);
3870}
1c79356b 3871
fe8ab488
A
3872IOReturn IOMemoryDescriptor::handleFault(
3873 void * _pager,
3874 mach_vm_size_t sourceOffset,
3875 mach_vm_size_t length)
3876{
3877 if( kIOMemoryRedirected & _flags)
2d21ac55 3878 {
b0d623f7 3879#if DEBUG
fe8ab488 3880 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
2d21ac55 3881#endif
fe8ab488
A
3882 do {
3883 SLEEP;
3884 } while( kIOMemoryRedirected & _flags );
2d21ac55 3885 }
fe8ab488 3886 return (kIOReturnSuccess);
0b4e3aa0
A
3887}
3888
fe8ab488 3889IOReturn IOMemoryDescriptor::populateDevicePager(
0b4e3aa0
A
3890 void * _pager,
3891 vm_map_t addressMap,
2d21ac55
A
3892 mach_vm_address_t address,
3893 mach_vm_size_t sourceOffset,
3894 mach_vm_size_t length,
0b4e3aa0
A
3895 IOOptionBits options )
3896{
3897 IOReturn err = kIOReturnSuccess;
3898 memory_object_t pager = (memory_object_t) _pager;
2d21ac55
A
3899 mach_vm_size_t size;
3900 mach_vm_size_t bytes;
3901 mach_vm_size_t page;
3902 mach_vm_size_t pageOffset;
3903 mach_vm_size_t pagerOffset;
3e170ce0 3904 IOPhysicalLength segLen, chunk;
55e303ae 3905 addr64_t physAddr;
3e170ce0
A
3906 IOOptionBits type;
3907
3908 type = _flags & kIOMemoryTypeMask;
3909
3910 if (reserved->dp.pagerContig)
3911 {
3912 sourceOffset = 0;
3913 pagerOffset = 0;
3914 }
0b4e3aa0 3915
b0d623f7 3916 physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
0b4e3aa0 3917 assert( physAddr );
55e303ae
A
3918 pageOffset = physAddr - trunc_page_64( physAddr );
3919 pagerOffset = sourceOffset;
0b4e3aa0
A
3920
3921 size = length + pageOffset;
3922 physAddr -= pageOffset;
1c79356b
A
3923
3924 segLen += pageOffset;
0b4e3aa0 3925 bytes = size;
2d21ac55
A
3926 do
3927 {
1c79356b 3928 // in the middle of the loop only map whole pages
fe8ab488
A
3929 if( segLen >= bytes) segLen = bytes;
3930 else if (segLen != trunc_page(segLen)) err = kIOReturnVMError;
3931 if (physAddr != trunc_page_64(physAddr)) err = kIOReturnBadArgument;
1c79356b 3932
fe8ab488 3933 if (kIOReturnSuccess != err) break;
1c79356b 3934
3e170ce0
A
3935#if DEBUG || DEVELOPMENT
3936 if ((kIOMemoryTypeUPL != type)
3937 && pmap_has_managed_page(atop_64(physAddr), atop_64(physAddr + segLen - 1)))
fe8ab488 3938 {
3e170ce0
A
3939 OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx", physAddr, segLen);
3940 }
3941#endif /* DEBUG || DEVELOPMENT */
3942
3943 chunk = (reserved->dp.pagerContig ? round_page(segLen) : page_size);
3944 for (page = 0;
3945 (page < segLen) && (KERN_SUCCESS == err);
3946 page += chunk)
3947 {
3948 err = device_pager_populate_object(pager, pagerOffset,
3949 (ppnum_t)(atop_64(physAddr + page)), chunk);
3950 pagerOffset += chunk;
3951 }
2d21ac55 3952
fe8ab488
A
3953 assert (KERN_SUCCESS == err);
3954 if (err) break;
0c530ab8 3955
2d21ac55
A
3956 // This call to vm_fault causes an early pmap level resolution
3957 // of the mappings created above for kernel mappings, since
3958 // faulting in later can't take place from interrupt level.
2d21ac55
A
3959 if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags))
3960 {
5ba3f43e
A
3961 err = vm_fault(addressMap,
3962 (vm_map_offset_t)trunc_page_64(address),
3963 options & kIOMapReadOnly ? VM_PROT_READ : VM_PROT_READ|VM_PROT_WRITE,
3964 FALSE, VM_KERN_MEMORY_NONE,
3965 THREAD_UNINT, NULL,
3966 (vm_map_offset_t)0);
3967
3968 if (KERN_SUCCESS != err) break;
9bccf70c
A
3969 }
3970
1c79356b 3971 sourceOffset += segLen - pageOffset;
0b4e3aa0 3972 address += segLen;
1c79356b
A
3973 bytes -= segLen;
3974 pageOffset = 0;
2d21ac55 3975 }
b0d623f7 3976 while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
1c79356b 3977
2d21ac55 3978 if (bytes)
1c79356b 3979 err = kIOReturnBadArgument;
1c79356b 3980
2d21ac55 3981 return (err);
1c79356b
A
3982}
3983
3984IOReturn IOMemoryDescriptor::doUnmap(
3985 vm_map_t addressMap,
2d21ac55
A
3986 IOVirtualAddress __address,
3987 IOByteCount __length )
1c79356b 3988{
2d21ac55 3989 IOReturn err;
3e170ce0 3990 IOMemoryMap * mapping;
2d21ac55
A
3991 mach_vm_address_t address;
3992 mach_vm_size_t length;
3993
3e170ce0 3994 if (__length) panic("doUnmap");
2d21ac55 3995
3e170ce0
A
3996 mapping = (IOMemoryMap *) __address;
3997 addressMap = mapping->fAddressMap;
3998 address = mapping->fAddress;
3999 length = mapping->fLength;
1c79356b 4000
3e170ce0
A
4001 if (kIOMapOverwrite & mapping->fOptions) err = KERN_SUCCESS;
4002 else
4003 {
4004 if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags))
4005 addressMap = IOPageableMapForAddress( address );
b0d623f7 4006#if DEBUG
3e170ce0
A
4007 if( kIOLogMapping & gIOKitDebug) IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
4008 addressMap, address, length );
1c79356b 4009#endif
3e170ce0
A
4010 err = mach_vm_deallocate( addressMap, address, length );
4011 }
1c79356b 4012
3e170ce0 4013#if IOTRACKING
39037602
A
4014 IOTrackingRemoveUser(gIOMapTracking, &mapping->fTracking);
4015#endif /* IOTRACKING */
1c79356b 4016
2d21ac55 4017 return (err);
1c79356b
A
4018}
4019
91447636 4020IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
e3027f41 4021{
91447636 4022 IOReturn err = kIOReturnSuccess;
b0d623f7 4023 IOMemoryMap * mapping = 0;
e3027f41
A
4024 OSIterator * iter;
4025
4026 LOCK;
4027
91447636
A
4028 if( doRedirect)
4029 _flags |= kIOMemoryRedirected;
4030 else
4031 _flags &= ~kIOMemoryRedirected;
4032
e3027f41
A
4033 do {
4034 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
39236c6e
A
4035
4036 memory_object_t pager;
4037
4038 if( reserved)
4039 pager = (memory_object_t) reserved->dp.devicePager;
4040 else
4041 pager = MACH_PORT_NULL;
4042
b0d623f7 4043 while( (mapping = (IOMemoryMap *) iter->getNextObject()))
39236c6e 4044 {
91447636 4045 mapping->redirect( safeTask, doRedirect );
39236c6e
A
4046 if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap))
4047 {
fe8ab488 4048 err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache );
39236c6e
A
4049 }
4050 }
e3027f41 4051
91447636
A
4052 iter->release();
4053 }
e3027f41
A
4054 } while( false );
4055
91447636
A
4056 if (!doRedirect)
4057 {
9bccf70c 4058 WAKEUP;
0b4e3aa0
A
4059 }
4060
e3027f41
A
4061 UNLOCK;
4062
b0d623f7 4063#ifndef __LP64__
e3027f41
A
4064 // temporary binary compatibility
4065 IOSubMemoryDescriptor * subMem;
4066 if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
91447636 4067 err = subMem->redirect( safeTask, doRedirect );
e3027f41 4068 else
91447636 4069 err = kIOReturnSuccess;
b0d623f7 4070#endif /* !__LP64__ */
e3027f41
A
4071
4072 return( err );
4073}
4074
b0d623f7 4075IOReturn IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
e3027f41
A
4076{
4077 IOReturn err = kIOReturnSuccess;
4078
2d21ac55 4079 if( fSuperMap) {
b0d623f7 4080// err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
e3027f41
A
4081 } else {
4082
4083 LOCK;
0c530ab8
A
4084
4085 do
91447636 4086 {
2d21ac55 4087 if (!fAddress)
0c530ab8 4088 break;
2d21ac55 4089 if (!fAddressMap)
0c530ab8
A
4090 break;
4091
2d21ac55
A
4092 if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
4093 && (0 == (fOptions & kIOMapStatic)))
0c530ab8 4094 {
2d21ac55 4095 IOUnmapPages( fAddressMap, fAddress, fLength );
b0d623f7
A
4096 err = kIOReturnSuccess;
4097#if DEBUG
2d21ac55 4098 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
e3027f41 4099#endif
0c530ab8 4100 }
2d21ac55 4101 else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask))
0c530ab8
A
4102 {
4103 IOOptionBits newMode;
2d21ac55
A
4104 newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
4105 IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
0c530ab8
A
4106 }
4107 }
4108 while (false);
0c530ab8 4109 UNLOCK;
e3027f41
A
4110 }
4111
2d21ac55
A
4112 if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
4113 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
91447636 4114 && safeTask
2d21ac55
A
4115 && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected))))
4116 fMemory->redirect(safeTask, doRedirect);
91447636 4117
e3027f41
A
4118 return( err );
4119}
4120
b0d623f7 4121IOReturn IOMemoryMap::unmap( void )
1c79356b
A
4122{
4123 IOReturn err;
4124
4125 LOCK;
4126
2d21ac55 4127 if( fAddress && fAddressMap && (0 == fSuperMap) && fMemory
3e170ce0 4128 && (0 == (kIOMapStatic & fOptions))) {
1c79356b 4129
2d21ac55 4130 err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
1c79356b
A
4131
4132 } else
4133 err = kIOReturnSuccess;
4134
2d21ac55
A
4135 if (fAddressMap)
4136 {
4137 vm_map_deallocate(fAddressMap);
4138 fAddressMap = 0;
4139 }
4140
4141 fAddress = 0;
1c79356b
A
4142
4143 UNLOCK;
4144
4145 return( err );
4146}
4147
b0d623f7 4148void IOMemoryMap::taskDied( void )
1c79356b
A
4149{
4150 LOCK;
3e170ce0
A
4151 if (fUserClientUnmap) unmap();
4152#if IOTRACKING
39037602
A
4153 else IOTrackingRemoveUser(gIOMapTracking, &fTracking);
4154#endif /* IOTRACKING */
3e170ce0 4155
2d21ac55
A
4156 if( fAddressMap) {
4157 vm_map_deallocate(fAddressMap);
4158 fAddressMap = 0;
1c79356b 4159 }
2d21ac55
A
4160 fAddressTask = 0;
4161 fAddress = 0;
1c79356b
A
4162 UNLOCK;
4163}
4164
b0d623f7
A
4165IOReturn IOMemoryMap::userClientUnmap( void )
4166{
4167 fUserClientUnmap = true;
4168 return (kIOReturnSuccess);
4169}
4170
9bccf70c
A
4171// Overload the release mechanism. All mappings must be a member
4172// of a memory descriptors _mappings set. This means that we
4173// always have 2 references on a mapping. When either of these mappings
4174// are released we need to free ourselves.
b0d623f7 4175void IOMemoryMap::taggedRelease(const void *tag) const
9bccf70c 4176{
55e303ae 4177 LOCK;
9bccf70c 4178 super::taggedRelease(tag, 2);
55e303ae 4179 UNLOCK;
9bccf70c
A
4180}
4181
b0d623f7 4182void IOMemoryMap::free()
1c79356b
A
4183{
4184 unmap();
4185
2d21ac55
A
4186 if (fMemory)
4187 {
1c79356b 4188 LOCK;
2d21ac55 4189 fMemory->removeMapping(this);
1c79356b 4190 UNLOCK;
2d21ac55 4191 fMemory->release();
1c79356b
A
4192 }
4193
2d21ac55 4194 if (fOwner && (fOwner != fMemory))
91447636
A
4195 {
4196 LOCK;
2d21ac55 4197 fOwner->removeMapping(this);
91447636
A
4198 UNLOCK;
4199 }
4200
2d21ac55
A
4201 if (fSuperMap)
4202 fSuperMap->release();
1c79356b 4203
2d21ac55
A
4204 if (fRedirUPL) {
4205 upl_commit(fRedirUPL, NULL, 0);
4206 upl_deallocate(fRedirUPL);
91447636
A
4207 }
4208
1c79356b
A
4209 super::free();
4210}
4211
b0d623f7 4212IOByteCount IOMemoryMap::getLength()
1c79356b 4213{
2d21ac55 4214 return( fLength );
1c79356b
A
4215}
4216
b0d623f7 4217IOVirtualAddress IOMemoryMap::getVirtualAddress()
1c79356b 4218{
b0d623f7 4219#ifndef __LP64__
2d21ac55
A
4220 if (fSuperMap)
4221 fSuperMap->getVirtualAddress();
b0d623f7
A
4222 else if (fAddressMap
4223 && vm_map_is_64bit(fAddressMap)
4224 && (sizeof(IOVirtualAddress) < 8))
2d21ac55
A
4225 {
4226 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
4227 }
b0d623f7 4228#endif /* !__LP64__ */
2d21ac55
A
4229
4230 return (fAddress);
4231}
4232
b0d623f7
A
4233#ifndef __LP64__
4234mach_vm_address_t IOMemoryMap::getAddress()
2d21ac55
A
4235{
4236 return( fAddress);
4237}
4238
b0d623f7 4239mach_vm_size_t IOMemoryMap::getSize()
2d21ac55
A
4240{
4241 return( fLength );
1c79356b 4242}
b0d623f7 4243#endif /* !__LP64__ */
1c79356b 4244
2d21ac55 4245
b0d623f7 4246task_t IOMemoryMap::getAddressTask()
1c79356b 4247{
2d21ac55
A
4248 if( fSuperMap)
4249 return( fSuperMap->getAddressTask());
1c79356b 4250 else
2d21ac55 4251 return( fAddressTask);
1c79356b
A
4252}
4253
b0d623f7 4254IOOptionBits IOMemoryMap::getMapOptions()
1c79356b 4255{
2d21ac55 4256 return( fOptions);
1c79356b
A
4257}
4258
b0d623f7 4259IOMemoryDescriptor * IOMemoryMap::getMemoryDescriptor()
1c79356b 4260{
2d21ac55 4261 return( fMemory );
1c79356b
A
4262}
4263
b0d623f7
A
4264IOMemoryMap * IOMemoryMap::copyCompatible(
4265 IOMemoryMap * newMapping )
1c79356b 4266{
2d21ac55
A
4267 task_t task = newMapping->getAddressTask();
4268 mach_vm_address_t toAddress = newMapping->fAddress;
4269 IOOptionBits _options = newMapping->fOptions;
4270 mach_vm_size_t _offset = newMapping->fOffset;
4271 mach_vm_size_t _length = newMapping->fLength;
1c79356b 4272
2d21ac55 4273 if( (!task) || (!fAddressMap) || (fAddressMap != get_task_map(task)))
1c79356b 4274 return( 0 );
2d21ac55 4275 if( (fOptions ^ _options) & kIOMapReadOnly)
9bccf70c
A
4276 return( 0 );
4277 if( (kIOMapDefaultCache != (_options & kIOMapCacheMask))
2d21ac55 4278 && ((fOptions ^ _options) & kIOMapCacheMask))
1c79356b
A
4279 return( 0 );
4280
2d21ac55 4281 if( (0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress))
1c79356b
A
4282 return( 0 );
4283
2d21ac55 4284 if( _offset < fOffset)
1c79356b
A
4285 return( 0 );
4286
2d21ac55 4287 _offset -= fOffset;
1c79356b 4288
2d21ac55 4289 if( (_offset + _length) > fLength)
1c79356b
A
4290 return( 0 );
4291
2d21ac55
A
4292 retain();
4293 if( (fLength == _length) && (!_offset))
4294 {
2d21ac55
A
4295 newMapping = this;
4296 }
4297 else
4298 {
4299 newMapping->fSuperMap = this;
6d2010ae 4300 newMapping->fOffset = fOffset + _offset;
2d21ac55 4301 newMapping->fAddress = fAddress + _offset;
1c79356b
A
4302 }
4303
2d21ac55 4304 return( newMapping );
1c79356b
A
4305}
4306
99c3a104
A
4307IOReturn IOMemoryMap::wireRange(
4308 uint32_t options,
4309 mach_vm_size_t offset,
4310 mach_vm_size_t length)
4311{
4312 IOReturn kr;
4313 mach_vm_address_t start = trunc_page_64(fAddress + offset);
4314 mach_vm_address_t end = round_page_64(fAddress + offset + length);
3e170ce0
A
4315 vm_prot_t prot;
4316
4317 prot = (kIODirectionOutIn & options);
4318 if (prot)
99c3a104 4319 {
5ba3f43e 4320 kr = vm_map_wire_kernel(fAddressMap, start, end, prot, fMemory->getVMTag(kernel_map), FALSE);
99c3a104
A
4321 }
4322 else
4323 {
4324 kr = vm_map_unwire(fAddressMap, start, end, FALSE);
4325 }
4326
4327 return (kr);
4328}
4329
4330
0c530ab8 4331IOPhysicalAddress
b0d623f7
A
4332#ifdef __LP64__
4333IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
4334#else /* !__LP64__ */
4335IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
4336#endif /* !__LP64__ */
1c79356b
A
4337{
4338 IOPhysicalAddress address;
4339
4340 LOCK;
b0d623f7
A
4341#ifdef __LP64__
4342 address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
4343#else /* !__LP64__ */
2d21ac55 4344 address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
b0d623f7 4345#endif /* !__LP64__ */
1c79356b
A
4346 UNLOCK;
4347
4348 return( address );
4349}
4350
4351/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4352
4353#undef super
4354#define super OSObject
4355
4356/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4357
4358void IOMemoryDescriptor::initialize( void )
4359{
4360 if( 0 == gIOMemoryLock)
4361 gIOMemoryLock = IORecursiveLockAlloc();
55e303ae 4362
0c530ab8 4363 gIOLastPage = IOGetLastPageNumber();
1c79356b
A
4364}
4365
4366void IOMemoryDescriptor::free( void )
4367{
3e170ce0 4368 if( _mappings) _mappings->release();
1c79356b 4369
3e170ce0
A
4370 if (reserved)
4371 {
4372 IODelete(reserved, IOMemoryDescriptorReserved, 1);
4373 reserved = NULL;
4374 }
1c79356b
A
4375 super::free();
4376}
4377
4378IOMemoryMap * IOMemoryDescriptor::setMapping(
4379 task_t intoTask,
4380 IOVirtualAddress mapAddress,
55e303ae 4381 IOOptionBits options )
1c79356b 4382{
2d21ac55
A
4383 return (createMappingInTask( intoTask, mapAddress,
4384 options | kIOMapStatic,
4385 0, getLength() ));
1c79356b
A
4386}
4387
4388IOMemoryMap * IOMemoryDescriptor::map(
55e303ae 4389 IOOptionBits options )
1c79356b 4390{
2d21ac55
A
4391 return (createMappingInTask( kernel_task, 0,
4392 options | kIOMapAnywhere,
4393 0, getLength() ));
1c79356b
A
4394}
4395
b0d623f7 4396#ifndef __LP64__
2d21ac55
A
4397IOMemoryMap * IOMemoryDescriptor::map(
4398 task_t intoTask,
4399 IOVirtualAddress atAddress,
1c79356b 4400 IOOptionBits options,
55e303ae
A
4401 IOByteCount offset,
4402 IOByteCount length )
1c79356b 4403{
2d21ac55
A
4404 if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask)))
4405 {
4406 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
4407 return (0);
4408 }
4409
4410 return (createMappingInTask(intoTask, atAddress,
4411 options, offset, length));
4412}
b0d623f7 4413#endif /* !__LP64__ */
2d21ac55
A
4414
4415IOMemoryMap * IOMemoryDescriptor::createMappingInTask(
4416 task_t intoTask,
4417 mach_vm_address_t atAddress,
4418 IOOptionBits options,
4419 mach_vm_size_t offset,
4420 mach_vm_size_t length)
4421{
b0d623f7
A
4422 IOMemoryMap * result;
4423 IOMemoryMap * mapping;
2d21ac55
A
4424
4425 if (0 == length)
1c79356b
A
4426 length = getLength();
4427
b0d623f7 4428 mapping = new IOMemoryMap;
2d21ac55
A
4429
4430 if( mapping
4431 && !mapping->init( intoTask, atAddress,
4432 options, offset, length )) {
4433 mapping->release();
4434 mapping = 0;
4435 }
4436
4437 if (mapping)
4438 result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
4439 else
4440 result = 0;
4441
b0d623f7 4442#if DEBUG
2d21ac55 4443 if (!result)
316670eb
A
4444 IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
4445 this, atAddress, (uint32_t) options, offset, length);
2d21ac55
A
4446#endif
4447
4448 return (result);
1c79356b
A
4449}
4450
b0d623f7
A
4451#ifndef __LP64__ // there is only a 64 bit version for LP64
4452IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
91447636
A
4453 IOOptionBits options,
4454 IOByteCount offset)
2d21ac55
A
4455{
4456 return (redirect(newBackingMemory, options, (mach_vm_size_t)offset));
4457}
b0d623f7 4458#endif
2d21ac55 4459
b0d623f7 4460IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
2d21ac55
A
4461 IOOptionBits options,
4462 mach_vm_size_t offset)
91447636
A
4463{
4464 IOReturn err = kIOReturnSuccess;
4465 IOMemoryDescriptor * physMem = 0;
4466
4467 LOCK;
4468
2d21ac55 4469 if (fAddress && fAddressMap) do
91447636 4470 {
2d21ac55
A
4471 if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
4472 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
91447636 4473 {
2d21ac55 4474 physMem = fMemory;
91447636
A
4475 physMem->retain();
4476 }
4477
fe8ab488 4478 if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count))
91447636 4479 {
3e170ce0
A
4480 upl_size_t size = round_page(fLength);
4481 upl_control_flags_t flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
5ba3f43e 4482 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
fe8ab488 4483 if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL,
91447636 4484 NULL, NULL,
5ba3f43e 4485 &flags, fMemory->getVMTag(kernel_map)))
2d21ac55 4486 fRedirUPL = 0;
91447636
A
4487
4488 if (physMem)
4489 {
2d21ac55 4490 IOUnmapPages( fAddressMap, fAddress, fLength );
fe8ab488 4491 if ((false))
b0d623f7 4492 physMem->redirect(0, true);
91447636
A
4493 }
4494 }
4495
4496 if (newBackingMemory)
4497 {
2d21ac55 4498 if (newBackingMemory != fMemory)
91447636 4499 {
2d21ac55
A
4500 fOffset = 0;
4501 if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
4502 options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
4503 offset, fLength))
91447636
A
4504 err = kIOReturnError;
4505 }
2d21ac55 4506 if (fRedirUPL)
91447636 4507 {
2d21ac55
A
4508 upl_commit(fRedirUPL, NULL, 0);
4509 upl_deallocate(fRedirUPL);
4510 fRedirUPL = 0;
91447636 4511 }
fe8ab488 4512 if ((false) && physMem)
91447636
A
4513 physMem->redirect(0, false);
4514 }
4515 }
4516 while (false);
4517
4518 UNLOCK;
4519
4520 if (physMem)
4521 physMem->release();
4522
4523 return (err);
4524}
4525
1c79356b
A
4526IOMemoryMap * IOMemoryDescriptor::makeMapping(
4527 IOMemoryDescriptor * owner,
2d21ac55
A
4528 task_t __intoTask,
4529 IOVirtualAddress __address,
1c79356b 4530 IOOptionBits options,
2d21ac55
A
4531 IOByteCount __offset,
4532 IOByteCount __length )
1c79356b 4533{
b0d623f7 4534#ifndef __LP64__
2d21ac55 4535 if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::makeMapping !64bit");
b0d623f7 4536#endif /* !__LP64__ */
2d21ac55 4537
91447636 4538 IOMemoryDescriptor * mapDesc = 0;
b0d623f7 4539 IOMemoryMap * result = 0;
2d21ac55
A
4540 OSIterator * iter;
4541
b0d623f7 4542 IOMemoryMap * mapping = (IOMemoryMap *) __address;
2d21ac55
A
4543 mach_vm_size_t offset = mapping->fOffset + __offset;
4544 mach_vm_size_t length = mapping->fLength;
4545
4546 mapping->fOffset = offset;
1c79356b
A
4547
4548 LOCK;
4549
91447636
A
4550 do
4551 {
2d21ac55
A
4552 if (kIOMapStatic & options)
4553 {
4554 result = mapping;
4555 addMapping(mapping);
4556 mapping->setMemoryDescriptor(this, 0);
4557 continue;
4558 }
4559
91447636
A
4560 if (kIOMapUnique & options)
4561 {
060df5ea 4562 addr64_t phys;
91447636 4563 IOByteCount physLen;
1c79356b 4564
2d21ac55 4565// if (owner != this) continue;
1c79356b 4566
0c530ab8
A
4567 if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
4568 || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
91447636 4569 {
b0d623f7 4570 phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
91447636
A
4571 if (!phys || (physLen < length))
4572 continue;
4573
b0d623f7
A
4574 mapDesc = IOMemoryDescriptor::withAddressRange(
4575 phys, length, getDirection() | kIOMemoryMapperNone, NULL);
91447636
A
4576 if (!mapDesc)
4577 continue;
4578 offset = 0;
2d21ac55 4579 mapping->fOffset = offset;
91447636
A
4580 }
4581 }
4582 else
4583 {
2d21ac55
A
4584 // look for a compatible existing mapping
4585 if( (iter = OSCollectionIterator::withCollection(_mappings)))
4586 {
b0d623f7
A
4587 IOMemoryMap * lookMapping;
4588 while ((lookMapping = (IOMemoryMap *) iter->getNextObject()))
2d21ac55
A
4589 {
4590 if ((result = lookMapping->copyCompatible(mapping)))
4591 {
4592 addMapping(result);
4593 result->setMemoryDescriptor(this, offset);
91447636 4594 break;
2d21ac55 4595 }
91447636
A
4596 }
4597 iter->release();
4598 }
2d21ac55 4599 if (result || (options & kIOMapReference))
6d2010ae
A
4600 {
4601 if (result != mapping)
4602 {
4603 mapping->release();
4604 mapping = NULL;
4605 }
91447636 4606 continue;
6d2010ae 4607 }
2d21ac55 4608 }
91447636 4609
2d21ac55
A
4610 if (!mapDesc)
4611 {
4612 mapDesc = this;
91447636
A
4613 mapDesc->retain();
4614 }
2d21ac55
A
4615 IOReturn
4616 kr = mapDesc->doMap( 0, (IOVirtualAddress *) &mapping, options, 0, 0 );
4617 if (kIOReturnSuccess == kr)
4618 {
4619 result = mapping;
4620 mapDesc->addMapping(result);
4621 result->setMemoryDescriptor(mapDesc, offset);
4622 }
4623 else
4624 {
1c79356b 4625 mapping->release();
2d21ac55 4626 mapping = NULL;
1c79356b 4627 }
91447636 4628 }
2d21ac55 4629 while( false );
1c79356b
A
4630
4631 UNLOCK;
4632
91447636
A
4633 if (mapDesc)
4634 mapDesc->release();
4635
2d21ac55 4636 return (result);
1c79356b
A
4637}
4638
4639void IOMemoryDescriptor::addMapping(
4640 IOMemoryMap * mapping )
4641{
2d21ac55
A
4642 if( mapping)
4643 {
1c79356b
A
4644 if( 0 == _mappings)
4645 _mappings = OSSet::withCapacity(1);
9bccf70c
A
4646 if( _mappings )
4647 _mappings->setObject( mapping );
1c79356b
A
4648 }
4649}
4650
4651void IOMemoryDescriptor::removeMapping(
4652 IOMemoryMap * mapping )
4653{
9bccf70c 4654 if( _mappings)
1c79356b 4655 _mappings->removeObject( mapping);
1c79356b
A
4656}
4657
b0d623f7
A
4658#ifndef __LP64__
4659// obsolete initializers
4660// - initWithOptions is the designated initializer
1c79356b 4661bool
b0d623f7 4662IOMemoryDescriptor::initWithAddress(void * address,
55e303ae
A
4663 IOByteCount length,
4664 IODirection direction)
1c79356b
A
4665{
4666 return( false );
4667}
4668
4669bool
b0d623f7 4670IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
55e303ae
A
4671 IOByteCount length,
4672 IODirection direction,
4673 task_t task)
1c79356b
A
4674{
4675 return( false );
4676}
4677
4678bool
b0d623f7 4679IOMemoryDescriptor::initWithPhysicalAddress(
1c79356b 4680 IOPhysicalAddress address,
55e303ae
A
4681 IOByteCount length,
4682 IODirection direction )
1c79356b
A
4683{
4684 return( false );
4685}
4686
4687bool
b0d623f7 4688IOMemoryDescriptor::initWithRanges(
1c79356b
A
4689 IOVirtualRange * ranges,
4690 UInt32 withCount,
55e303ae
A
4691 IODirection direction,
4692 task_t task,
4693 bool asReference)
1c79356b
A
4694{
4695 return( false );
4696}
4697
4698bool
b0d623f7 4699IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
1c79356b 4700 UInt32 withCount,
55e303ae
A
4701 IODirection direction,
4702 bool asReference)
1c79356b
A
4703{
4704 return( false );
4705}
4706
b0d623f7
A
4707void * IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
4708 IOByteCount * lengthOfSegment)
4709{
4710 return( 0 );
4711}
4712#endif /* !__LP64__ */
4713
1c79356b
A
4714/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4715
9bccf70c
A
4716bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
4717{
a39ff7e2
A
4718 OSSymbol const *keys[2] = {0};
4719 OSObject *values[2] = {0};
fe8ab488 4720 OSArray * array;
a39ff7e2 4721 vm_size_t vcopy_size;
fe8ab488 4722
91447636
A
4723 struct SerData {
4724 user_addr_t address;
4725 user_size_t length;
a39ff7e2 4726 } *vcopy = NULL;
9bccf70c 4727 unsigned int index, nRanges;
a39ff7e2 4728 bool result = false;
9bccf70c 4729
91447636
A
4730 IOOptionBits type = _flags & kIOMemoryTypeMask;
4731
9bccf70c 4732 if (s == NULL) return false;
9bccf70c 4733
fe8ab488
A
4734 array = OSArray::withCapacity(4);
4735 if (!array) return (false);
9bccf70c
A
4736
4737 nRanges = _rangesCount;
a39ff7e2
A
4738 if (os_mul_overflow(sizeof(SerData), nRanges, &vcopy_size)) {
4739 result = false;
4740 goto bail;
4741 }
4742 vcopy = (SerData *) IOMalloc(vcopy_size);
4743 if (vcopy == 0) {
4744 result = false;
4745 goto bail;
4746 }
9bccf70c
A
4747
4748 keys[0] = OSSymbol::withCString("address");
4749 keys[1] = OSSymbol::withCString("length");
4750
9bccf70c
A
4751 // Copy the volatile data so we don't have to allocate memory
4752 // while the lock is held.
4753 LOCK;
4754 if (nRanges == _rangesCount) {
91447636 4755 Ranges vec = _ranges;
9bccf70c 4756 for (index = 0; index < nRanges; index++) {
fe8ab488 4757 mach_vm_address_t addr; mach_vm_size_t len;
91447636
A
4758 getAddrLenForInd(addr, len, type, vec, index);
4759 vcopy[index].address = addr;
4760 vcopy[index].length = len;
9bccf70c
A
4761 }
4762 } else {
4763 // The descriptor changed out from under us. Give up.
4764 UNLOCK;
4765 result = false;
4766 goto bail;
4767 }
4768 UNLOCK;
4769
4770 for (index = 0; index < nRanges; index++)
4771 {
91447636
A
4772 user_addr_t addr = vcopy[index].address;
4773 IOByteCount len = (IOByteCount) vcopy[index].length;
fe8ab488 4774 values[0] = OSNumber::withNumber(addr, sizeof(addr) * 8);
9bccf70c
A
4775 if (values[0] == 0) {
4776 result = false;
4777 goto bail;
4778 }
91447636 4779 values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
9bccf70c
A
4780 if (values[1] == 0) {
4781 result = false;
4782 goto bail;
4783 }
4784 OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
4785 if (dict == 0) {
4786 result = false;
4787 goto bail;
4788 }
fe8ab488
A
4789 array->setObject(dict);
4790 dict->release();
9bccf70c
A
4791 values[0]->release();
4792 values[1]->release();
4793 values[0] = values[1] = 0;
9bccf70c 4794 }
fe8ab488
A
4795
4796 result = array->serialize(s);
9bccf70c
A
4797
4798 bail:
fe8ab488
A
4799 if (array)
4800 array->release();
9bccf70c
A
4801 if (values[0])
4802 values[0]->release();
4803 if (values[1])
4804 values[1]->release();
4805 if (keys[0])
4806 keys[0]->release();
4807 if (keys[1])
4808 keys[1]->release();
4809 if (vcopy)
a39ff7e2 4810 IOFree(vcopy, vcopy_size);
fe8ab488 4811
9bccf70c
A
4812 return result;
4813}
4814
9bccf70c
A
4815/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4816
0b4e3aa0 4817OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
b0d623f7
A
4818#ifdef __LP64__
4819OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
4820OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
4821OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
4822OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
4823OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
4824OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
4825OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
4826#else /* !__LP64__ */
55e303ae
A
4827OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
4828OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
91447636
A
4829OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3);
4830OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4);
0c530ab8 4831OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5);
b0d623f7
A
4832OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 6);
4833OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 7);
4834#endif /* !__LP64__ */
1c79356b
A
4835OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
4836OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
4837OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
4838OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
4839OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
4840OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
4841OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
4842OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
9bccf70c 4843
55e303ae 4844/* ex-inline function implementation */
0c530ab8
A
4845IOPhysicalAddress
4846IOMemoryDescriptor::getPhysicalAddress()
9bccf70c 4847 { return( getPhysicalSegment( 0, 0 )); }