]> git.saurik.com Git - apple/xnu.git/blame - iokit/Kernel/IOMemoryDescriptor.cpp
xnu-2782.1.97.tar.gz
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
30 *
31 * HISTORY
32 *
33 */
b0d623f7
A
34
35
55e303ae 36#include <sys/cdefs.h>
1c79356b
A
37
38#include <IOKit/assert.h>
39#include <IOKit/system.h>
40#include <IOKit/IOLib.h>
41#include <IOKit/IOMemoryDescriptor.h>
55e303ae 42#include <IOKit/IOMapper.h>
99c3a104 43#include <IOKit/IODMACommand.h>
55e303ae 44#include <IOKit/IOKitKeysPrivate.h>
1c79356b 45
b0d623f7
A
46#ifndef __LP64__
47#include <IOKit/IOSubMemoryDescriptor.h>
48#endif /* !__LP64__ */
49
1c79356b 50#include <IOKit/IOKitDebug.h>
2d21ac55 51#include <libkern/OSDebug.h>
1c79356b 52
91447636
A
53#include "IOKitKernelInternal.h"
54
1c79356b 55#include <libkern/c++/OSContainers.h>
9bccf70c
A
56#include <libkern/c++/OSDictionary.h>
57#include <libkern/c++/OSArray.h>
58#include <libkern/c++/OSSymbol.h>
59#include <libkern/c++/OSNumber.h>
91447636
A
60
61#include <sys/uio.h>
1c79356b
A
62
63__BEGIN_DECLS
64#include <vm/pmap.h>
91447636 65#include <vm/vm_pageout.h>
55e303ae 66#include <mach/memory_object_types.h>
0b4e3aa0 67#include <device/device_port.h>
55e303ae 68
91447636 69#include <mach/vm_prot.h>
2d21ac55 70#include <mach/mach_vm.h>
91447636 71#include <vm/vm_fault.h>
2d21ac55 72#include <vm/vm_protos.h>
91447636 73
55e303ae 74extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
6d2010ae
A
75extern void ipc_port_release_send(ipc_port_t port);
76
55e303ae
A
77kern_return_t
78memory_object_iopl_request(
79 ipc_port_t port,
80 memory_object_offset_t offset,
81 vm_size_t *upl_size,
82 upl_t *upl_ptr,
83 upl_page_info_array_t user_page_list,
84 unsigned int *page_list_count,
85 int *flags);
0b4e3aa0 86
fe8ab488
A
87// osfmk/device/iokit_rpc.c
88unsigned int IODefaultCacheBits(addr64_t pa);
55e303ae 89unsigned int IOTranslateCacheBits(struct phys_entry *pp);
1c79356b 90
55e303ae 91__END_DECLS
1c79356b 92
99c3a104
A
93#define kIOMapperWaitSystem ((IOMapper *) 1)
94
0c530ab8
A
95static IOMapper * gIOSystemMapper = NULL;
96
0c530ab8
A
97ppnum_t gIOLastPage;
98
55e303ae 99/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
de355530 100
55e303ae 101OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
de355530 102
55e303ae 103#define super IOMemoryDescriptor
de355530 104
55e303ae 105OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
de355530 106
1c79356b
A
107/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
108
9bccf70c
A
109static IORecursiveLock * gIOMemoryLock;
110
111#define LOCK IORecursiveLockLock( gIOMemoryLock)
112#define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
113#define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
114#define WAKEUP \
115 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
116
0c530ab8
A
117#if 0
118#define DEBG(fmt, args...) { kprintf(fmt, ## args); }
119#else
120#define DEBG(fmt, args...) {}
121#endif
122
b0d623f7 123#define IOMD_DEBUG_DMAACTIVE 1
91447636
A
124
125/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
126
127// Some data structures and accessor macros used by the initWithOptions
128// Function
129
130enum ioPLBlockFlags {
131 kIOPLOnDevice = 0x00000001,
132 kIOPLExternUPL = 0x00000002,
133};
134
fe8ab488 135struct IOMDPersistentInitData
91447636 136{
fe8ab488
A
137 const IOGeneralMemoryDescriptor * fMD;
138 IOMemoryReference * fMemRef;
91447636
A
139};
140
141struct ioPLBlock {
142 upl_t fIOPL;
b0d623f7
A
143 vm_address_t fPageInfo; // Pointer to page list or index into it
144 uint32_t fIOMDOffset; // The offset of this iopl in descriptor
99c3a104 145 ppnum_t fMappedPage; // Page number of first page in this iopl
b0d623f7
A
146 unsigned int fPageOffset; // Offset within first page of iopl
147 unsigned int fFlags; // Flags
91447636
A
148};
149
150struct ioGMDData {
99c3a104
A
151 IOMapper * fMapper;
152 uint8_t fDMAMapNumAddressBits;
153 uint64_t fDMAMapAlignment;
154 addr64_t fMappedBase;
b0d623f7 155 uint64_t fPreparationID;
91447636 156 unsigned int fPageCnt;
fe8ab488
A
157 unsigned char fDiscontig:1;
158 unsigned char fCompletionError:1;
159 unsigned char _resv:6;
b0d623f7
A
160#if __LP64__
161 // align arrays to 8 bytes so following macros work
39236c6e 162 unsigned char fPad[3];
b0d623f7 163#endif
6d2010ae
A
164 upl_page_info_t fPageList[1]; /* variable length */
165 ioPLBlock fBlocks[1]; /* variable length */
91447636
A
166};
167
168#define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
99c3a104 169#define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
91447636
A
170#define getNumIOPL(osd, d) \
171 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
172#define getPageList(d) (&(d->fPageList[0]))
173#define computeDataSize(p, u) \
6d2010ae 174 (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
91447636 175
91447636
A
176/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
177
b0d623f7 178#define next_page(a) ( trunc_page(a) + PAGE_SIZE )
0b4e3aa0 179
0b4e3aa0
A
180extern "C" {
181
182kern_return_t device_data_action(
b0d623f7 183 uintptr_t device_handle,
0b4e3aa0
A
184 ipc_port_t device_pager,
185 vm_prot_t protection,
186 vm_object_offset_t offset,
187 vm_size_t size)
188{
9bccf70c 189 kern_return_t kr;
316670eb 190 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
9bccf70c 191 IOMemoryDescriptor * memDesc;
0b4e3aa0 192
9bccf70c 193 LOCK;
316670eb 194 memDesc = ref->dp.memory;
9bccf70c 195 if( memDesc)
91447636
A
196 {
197 memDesc->retain();
fe8ab488 198 kr = memDesc->handleFault(device_pager, offset, size);
91447636
A
199 memDesc->release();
200 }
9bccf70c
A
201 else
202 kr = KERN_ABORTED;
203 UNLOCK;
0b4e3aa0 204
9bccf70c 205 return( kr );
0b4e3aa0
A
206}
207
208kern_return_t device_close(
b0d623f7 209 uintptr_t device_handle)
0b4e3aa0 210{
316670eb 211 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
0b4e3aa0 212
316670eb 213 IODelete( ref, IOMemoryDescriptorReserved, 1 );
0b4e3aa0
A
214
215 return( kIOReturnSuccess );
216}
91447636 217}; // end extern "C"
0b4e3aa0 218
fe8ab488
A
219/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
220
91447636
A
221// Note this inline function uses C++ reference arguments to return values
222// This means that pointers are not passed and NULLs don't have to be
223// checked for as a NULL reference is illegal.
224static inline void
fe8ab488 225getAddrLenForInd(mach_vm_address_t &addr, mach_vm_size_t &len, // Output variables
91447636
A
226 UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
227{
0c530ab8
A
228 assert(kIOMemoryTypeUIO == type
229 || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
230 || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
91447636
A
231 if (kIOMemoryTypeUIO == type) {
232 user_size_t us;
fe8ab488
A
233 user_addr_t ad;
234 uio_getiov((uio_t) r.uio, ind, &ad, &us); addr = ad; len = us;
91447636 235 }
b0d623f7 236#ifndef __LP64__
0c530ab8
A
237 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
238 IOAddressRange cur = r.v64[ind];
239 addr = cur.address;
240 len = cur.length;
241 }
b0d623f7 242#endif /* !__LP64__ */
91447636
A
243 else {
244 IOVirtualRange cur = r.v[ind];
245 addr = cur.address;
246 len = cur.length;
247 }
0b4e3aa0
A
248}
249
1c79356b
A
250/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
251
fe8ab488
A
252static IOReturn
253purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
254{
255 IOReturn err = kIOReturnSuccess;
256
257 *control = VM_PURGABLE_SET_STATE;
258
259 enum { kIOMemoryPurgeableControlMask = 15 };
260
261 switch (kIOMemoryPurgeableControlMask & newState)
262 {
263 case kIOMemoryPurgeableKeepCurrent:
264 *control = VM_PURGABLE_GET_STATE;
265 break;
266
267 case kIOMemoryPurgeableNonVolatile:
268 *state = VM_PURGABLE_NONVOLATILE;
269 break;
270 case kIOMemoryPurgeableVolatile:
271 *state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask);
272 break;
273 case kIOMemoryPurgeableEmpty:
274 *state = VM_PURGABLE_EMPTY;
275 break;
276 default:
277 err = kIOReturnBadArgument;
278 break;
279 }
280 return (err);
281}
282
283static IOReturn
284purgeableStateBits(int * state)
285{
286 IOReturn err = kIOReturnSuccess;
287
288 switch (VM_PURGABLE_STATE_MASK & *state)
289 {
290 case VM_PURGABLE_NONVOLATILE:
291 *state = kIOMemoryPurgeableNonVolatile;
292 break;
293 case VM_PURGABLE_VOLATILE:
294 *state = kIOMemoryPurgeableVolatile;
295 break;
296 case VM_PURGABLE_EMPTY:
297 *state = kIOMemoryPurgeableEmpty;
298 break;
299 default:
300 *state = kIOMemoryPurgeableNonVolatile;
301 err = kIOReturnNotReady;
302 break;
303 }
304 return (err);
305}
306
307
308static vm_prot_t
309vmProtForCacheMode(IOOptionBits cacheMode)
310{
311 vm_prot_t prot = 0;
312 switch (cacheMode)
313 {
314 case kIOInhibitCache:
315 SET_MAP_MEM(MAP_MEM_IO, prot);
316 break;
317
318 case kIOWriteThruCache:
319 SET_MAP_MEM(MAP_MEM_WTHRU, prot);
320 break;
321
322 case kIOWriteCombineCache:
323 SET_MAP_MEM(MAP_MEM_WCOMB, prot);
324 break;
325
326 case kIOCopybackCache:
327 SET_MAP_MEM(MAP_MEM_COPYBACK, prot);
328 break;
329
330 case kIOCopybackInnerCache:
331 SET_MAP_MEM(MAP_MEM_INNERWBACK, prot);
332 break;
333
334 case kIODefaultCache:
335 default:
336 SET_MAP_MEM(MAP_MEM_NOOP, prot);
337 break;
338 }
339
340 return (prot);
341}
342
343static unsigned int
344pagerFlagsForCacheMode(IOOptionBits cacheMode)
345{
346 unsigned int pagerFlags = 0;
347 switch (cacheMode)
348 {
349 case kIOInhibitCache:
350 pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
351 break;
352
353 case kIOWriteThruCache:
354 pagerFlags = DEVICE_PAGER_WRITE_THROUGH | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
355 break;
356
357 case kIOWriteCombineCache:
358 pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT;
359 break;
360
361 case kIOCopybackCache:
362 pagerFlags = DEVICE_PAGER_COHERENT;
363 break;
364
365 case kIOCopybackInnerCache:
366 pagerFlags = DEVICE_PAGER_COHERENT;
367 break;
368
369 case kIODefaultCache:
370 default:
371 pagerFlags = -1U;
372 break;
373 }
374 return (pagerFlags);
375}
376
377/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
378/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
379
380struct IOMemoryEntry
381{
382 ipc_port_t entry;
383 int64_t offset;
384 uint64_t size;
385};
386
387struct IOMemoryReference
388{
389 volatile SInt32 refCount;
390 vm_prot_t prot;
391 uint32_t capacity;
392 uint32_t count;
393 IOMemoryEntry entries[0];
394};
395
396enum
397{
398 kIOMemoryReferenceReuse = 0x00000001,
399 kIOMemoryReferenceWrite = 0x00000002,
400};
401
402SInt32 gIOMemoryReferenceCount;
403
404IOMemoryReference *
405IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc)
406{
407 IOMemoryReference * ref;
408 size_t newSize, oldSize, copySize;
409
410 newSize = (sizeof(IOMemoryReference)
411 - sizeof(ref->entries)
412 + capacity * sizeof(ref->entries[0]));
413 ref = (typeof(ref)) IOMalloc(newSize);
414 if (realloc)
415 {
416 oldSize = (sizeof(IOMemoryReference)
417 - sizeof(realloc->entries)
418 + realloc->capacity * sizeof(realloc->entries[0]));
419 copySize = oldSize;
420 if (copySize > newSize) copySize = newSize;
421 if (ref) bcopy(realloc, ref, copySize);
422 IOFree(realloc, oldSize);
423 }
424 else if (ref)
425 {
426 bzero(ref, sizeof(*ref));
427 ref->refCount = 1;
428 OSIncrementAtomic(&gIOMemoryReferenceCount);
429 }
430 if (!ref) return (0);
431 ref->capacity = capacity;
432 return (ref);
433}
434
435void
436IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref)
437{
438 IOMemoryEntry * entries;
439 size_t size;
440
441 entries = ref->entries + ref->count;
442 while (entries > &ref->entries[0])
443 {
444 entries--;
445 ipc_port_release_send(entries->entry);
446 }
447 size = (sizeof(IOMemoryReference)
448 - sizeof(ref->entries)
449 + ref->capacity * sizeof(ref->entries[0]));
450 IOFree(ref, size);
451
452 OSDecrementAtomic(&gIOMemoryReferenceCount);
453}
454
455void
456IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref)
457{
458 if (1 == OSDecrementAtomic(&ref->refCount)) memoryReferenceFree(ref);
459}
460
461
462IOReturn
463IOGeneralMemoryDescriptor::memoryReferenceCreate(
464 IOOptionBits options,
465 IOMemoryReference ** reference)
466{
467 enum { kCapacity = 4, kCapacityInc = 4 };
468
469 kern_return_t err;
470 IOMemoryReference * ref;
471 IOMemoryEntry * entries;
472 IOMemoryEntry * cloneEntries;
473 vm_map_t map;
474 ipc_port_t entry, cloneEntry;
475 vm_prot_t prot;
476 memory_object_size_t actualSize;
477 uint32_t rangeIdx;
478 uint32_t count;
479 mach_vm_address_t entryAddr, endAddr, entrySize;
480 mach_vm_size_t srcAddr, srcLen;
481 mach_vm_size_t nextAddr, nextLen;
482 mach_vm_size_t offset, remain;
483 IOByteCount physLen;
484 IOOptionBits type = (_flags & kIOMemoryTypeMask);
485 IOOptionBits cacheMode;
486 unsigned int pagerFlags;
487
488 ref = memoryReferenceAlloc(kCapacity, NULL);
489 if (!ref) return (kIOReturnNoMemory);
490 entries = &ref->entries[0];
491 count = 0;
492
493 offset = 0;
494 rangeIdx = 0;
495 if (_task) getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
496 else
497 {
498 nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
499 nextLen = physLen;
500 // default cache mode for physical
501 if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift))
502 {
503 IOOptionBits mode;
504 pagerFlags = IODefaultCacheBits(nextAddr);
505 if (DEVICE_PAGER_CACHE_INHIB & pagerFlags)
506 {
507 if (DEVICE_PAGER_GUARDED & pagerFlags)
508 mode = kIOInhibitCache;
509 else
510 mode = kIOWriteCombineCache;
511 }
512 else if (DEVICE_PAGER_WRITE_THROUGH & pagerFlags)
513 mode = kIOWriteThruCache;
514 else
515 mode = kIOCopybackCache;
516 _flags |= (mode << kIOMemoryBufferCacheShift);
517 }
518 }
519
520 // cache mode & vm_prot
521 prot = VM_PROT_READ;
522 cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
523 prot |= vmProtForCacheMode(cacheMode);
524 // VM system requires write access to change cache mode
525 if (kIODefaultCache != cacheMode) prot |= VM_PROT_WRITE;
526 if (kIODirectionOut != (kIODirectionOutIn & _flags)) prot |= VM_PROT_WRITE;
527 if (kIOMemoryReferenceWrite & options) prot |= VM_PROT_WRITE;
528
529 if ((kIOMemoryReferenceReuse & options) && _memRef)
530 {
531 cloneEntries = &_memRef->entries[0];
532 prot |= MAP_MEM_NAMED_REUSE;
533 }
534
535 if (_task)
536 {
537 // virtual ranges
538
539 if (kIOMemoryBufferPageable & _flags)
540 {
541 // IOBufferMemoryDescriptor alloc - set flags for entry + object create
542 prot |= MAP_MEM_NAMED_CREATE;
543 if (kIOMemoryBufferPurgeable & _flags) prot |= MAP_MEM_PURGABLE;
544 prot |= VM_PROT_WRITE;
545 map = NULL;
546 }
547 else map = get_task_map(_task);
548
549 remain = _length;
550 while (remain)
551 {
552 srcAddr = nextAddr;
553 srcLen = nextLen;
554 nextAddr = 0;
555 nextLen = 0;
556 // coalesce addr range
557 for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++)
558 {
559 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
560 if ((srcAddr + srcLen) != nextAddr) break;
561 srcLen += nextLen;
562 }
563 entryAddr = trunc_page_64(srcAddr);
564 endAddr = round_page_64(srcAddr + srcLen);
565 do
566 {
567 entrySize = (endAddr - entryAddr);
568 if (!entrySize) break;
569 actualSize = entrySize;
570
571 cloneEntry = MACH_PORT_NULL;
572 if (MAP_MEM_NAMED_REUSE & prot)
573 {
574 if (cloneEntries < &_memRef->entries[_memRef->count]) cloneEntry = cloneEntries->entry;
575 else prot &= ~MAP_MEM_NAMED_REUSE;
576 }
577
578 err = mach_make_memory_entry_64(map,
579 &actualSize, entryAddr, prot, &entry, cloneEntry);
580
581 if (KERN_SUCCESS != err) break;
582 if (actualSize > entrySize) panic("mach_make_memory_entry_64 actualSize");
583
584 if (count >= ref->capacity)
585 {
586 ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref);
587 entries = &ref->entries[count];
588 }
589 entries->entry = entry;
590 entries->size = actualSize;
591 entries->offset = offset + (entryAddr - srcAddr);
592 entryAddr += actualSize;
593 if (MAP_MEM_NAMED_REUSE & prot)
594 {
595 if ((cloneEntries->entry == entries->entry)
596 && (cloneEntries->size == entries->size)
597 && (cloneEntries->offset == entries->offset)) cloneEntries++;
598 else prot &= ~MAP_MEM_NAMED_REUSE;
599 }
600 entries++;
601 count++;
602 }
603 while (true);
604 offset += srcLen;
605 remain -= srcLen;
606 }
607 }
608 else
609 {
610 // _task == 0, physical
611 memory_object_t pager;
612 vm_size_t size = ptoa_32(_pages);
613
614 if (!getKernelReserved()) panic("getKernelReserved");
615
616 reserved->dp.pagerContig = (1 == _rangesCount);
617 reserved->dp.memory = this;
618
619 pagerFlags = pagerFlagsForCacheMode(cacheMode);
620 if (-1U == pagerFlags) panic("phys is kIODefaultCache");
621 if (reserved->dp.pagerContig) pagerFlags |= DEVICE_PAGER_CONTIGUOUS;
622
623 pager = device_pager_setup((memory_object_t) 0, (uintptr_t) reserved,
624 size, pagerFlags);
625 assert (pager);
626 if (!pager) err = kIOReturnVMError;
627 else
628 {
629 srcAddr = nextAddr;
630 entryAddr = trunc_page_64(srcAddr);
631 err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/,
632 size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry);
633 assert (KERN_SUCCESS == err);
634 if (KERN_SUCCESS != err) device_pager_deallocate(pager);
635 else
636 {
637 reserved->dp.devicePager = pager;
638 entries->entry = entry;
639 entries->size = size;
640 entries->offset = offset + (entryAddr - srcAddr);
641 entries++;
642 count++;
643 }
644 }
645 }
646
647 ref->count = count;
648 ref->prot = prot;
649
650 if (KERN_SUCCESS == err)
651 {
652 if (MAP_MEM_NAMED_REUSE & prot)
653 {
654 memoryReferenceFree(ref);
655 OSIncrementAtomic(&_memRef->refCount);
656 ref = _memRef;
657 }
658 }
659 else
660 {
661 memoryReferenceFree(ref);
662 ref = NULL;
663 }
664
665 *reference = ref;
666
667 return (err);
668}
669
670struct IOMemoryDescriptorMapAllocRef
671{
672 vm_map_t map;
673 mach_vm_address_t mapped;
674 mach_vm_size_t size;
675 vm_prot_t prot;
676 IOOptionBits options;
677};
678
679static kern_return_t
680IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
681{
682 IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref;
683 IOReturn err;
684 vm_map_offset_t addr;
685
686 addr = ref->mapped;
687 err = vm_map_enter_mem_object(map, &addr, ref->size,
688 (vm_map_offset_t) 0,
689 (((ref->options & kIOMapAnywhere)
690 ? VM_FLAGS_ANYWHERE
691 : VM_FLAGS_FIXED)
692 | VM_MAKE_TAG(VM_MEMORY_IOKIT)
693 | VM_FLAGS_IOKIT_ACCT), /* iokit accounting */
694 IPC_PORT_NULL,
695 (memory_object_offset_t) 0,
696 false, /* copy */
697 ref->prot,
698 ref->prot,
699 VM_INHERIT_NONE);
700 if (KERN_SUCCESS == err)
701 {
702 ref->mapped = (mach_vm_address_t) addr;
703 ref->map = map;
704 }
705
706 return( err );
707}
708
709IOReturn
710IOGeneralMemoryDescriptor::memoryReferenceMap(
711 IOMemoryReference * ref,
712 vm_map_t map,
713 mach_vm_size_t inoffset,
714 mach_vm_size_t size,
715 IOOptionBits options,
716 mach_vm_address_t * inaddr)
717{
718 IOReturn err;
719 int64_t offset = inoffset;
720 uint32_t rangeIdx, entryIdx;
721 vm_map_offset_t addr, mapAddr;
722 vm_map_offset_t pageOffset, entryOffset, remain, chunk;
723
724 mach_vm_address_t srcAddr, nextAddr;
725 mach_vm_size_t srcLen, nextLen;
726 IOByteCount physLen;
727 IOMemoryEntry * entry;
728 vm_prot_t prot, memEntryCacheMode;
729 IOOptionBits type;
730 IOOptionBits cacheMode;
731
732 /*
733 * For the kIOMapPrefault option.
734 */
735 upl_page_info_t *pageList = NULL;
736 UInt currentPageIndex = 0;
737
738 type = _flags & kIOMemoryTypeMask;
739 prot = VM_PROT_READ;
740 if (!(kIOMapReadOnly & options)) prot |= VM_PROT_WRITE;
741 prot &= ref->prot;
742
743 cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
744 if (kIODefaultCache != cacheMode)
745 {
746 // VM system requires write access to change cache mode
747 prot |= VM_PROT_WRITE;
748 // update named entries cache mode
749 memEntryCacheMode = (MAP_MEM_ONLY | prot | vmProtForCacheMode(cacheMode));
750 }
751
752 if (_task)
753 {
754 // Find first range for offset
755 for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++)
756 {
757 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
758 if (remain < nextLen) break;
759 remain -= nextLen;
760 }
761 }
762 else
763 {
764 rangeIdx = 0;
765 remain = 0;
766 nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
767 nextLen = size;
768 }
769
770 assert(remain < nextLen);
771 if (remain >= nextLen) return (kIOReturnBadArgument);
772
773 nextAddr += remain;
774 nextLen -= remain;
775 pageOffset = (page_mask & nextAddr);
776 addr = 0;
777 if (!(options & kIOMapAnywhere))
778 {
779 addr = *inaddr;
780 if (pageOffset != (page_mask & addr)) return (kIOReturnNotAligned);
781 addr -= pageOffset;
782 }
783
784 // find first entry for offset
785 for (entryIdx = 0;
786 (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset);
787 entryIdx++) {}
788 entryIdx--;
789 entry = &ref->entries[entryIdx];
790
791 // allocate VM
792 size = round_page_64(size + pageOffset);
793 {
794 IOMemoryDescriptorMapAllocRef ref;
795 ref.map = map;
796 ref.options = options;
797 ref.size = size;
798 ref.prot = prot;
799 if (options & kIOMapAnywhere)
800 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
801 ref.mapped = 0;
802 else
803 ref.mapped = addr;
804
805 if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags))
806 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
807 else
808 err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
809 if (KERN_SUCCESS == err)
810 {
811 addr = ref.mapped;
812 map = ref.map;
813 }
814 }
815
816 /*
817 * Prefaulting is only possible if we wired the memory earlier. Check the
818 * memory type, and the underlying data.
819 */
820 if (options & kIOMapPrefault) {
821 /*
822 * The memory must have been wired by calling ::prepare(), otherwise
823 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
824 */
825 assert(map != kernel_map);
826 assert(_wireCount != 0);
827 assert(_memoryEntries != NULL);
828 if ((map == kernel_map) ||
829 (_wireCount == 0) ||
830 (_memoryEntries == NULL))
831 {
832 return kIOReturnBadArgument;
833 }
834
835 // Get the page list.
836 ioGMDData* dataP = getDataP(_memoryEntries);
837 ioPLBlock const* ioplList = getIOPLList(dataP);
838 pageList = getPageList(dataP);
839
840 // Get the number of IOPLs.
841 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
842
843 /*
844 * Scan through the IOPL Info Blocks, looking for the first block containing
845 * the offset. The research will go past it, so we'll need to go back to the
846 * right range at the end.
847 */
848 UInt ioplIndex = 0;
849 while (ioplIndex < numIOPLs && offset >= ioplList[ioplIndex].fIOMDOffset)
850 ioplIndex++;
851 ioplIndex--;
852
853 // Retrieve the IOPL info block.
854 ioPLBlock ioplInfo = ioplList[ioplIndex];
855
856 /*
857 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
858 * array.
859 */
860 if (ioplInfo.fFlags & kIOPLExternUPL)
861 pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
862 else
863 pageList = &pageList[ioplInfo.fPageInfo];
864
865 // Rebase [offset] into the IOPL in order to looks for the first page index.
866 mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
867
868 // Retrieve the index of the first page corresponding to the offset.
869 currentPageIndex = atop_32(offsetInIOPL);
870 }
871
872 // enter mappings
873 remain = size;
874 mapAddr = addr;
875 addr += pageOffset;
876 while (remain && nextLen && (KERN_SUCCESS == err))
877 {
878 srcAddr = nextAddr;
879 srcLen = nextLen;
880 nextAddr = 0;
881 nextLen = 0;
882 // coalesce addr range
883 for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++)
884 {
885 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
886 if ((srcAddr + srcLen) != nextAddr) break;
887 srcLen += nextLen;
888 }
889
890 while (srcLen && (KERN_SUCCESS == err))
891 {
892 entryOffset = offset - entry->offset;
893 if ((page_mask & entryOffset) != pageOffset)
894 {
895 err = kIOReturnNotAligned;
896 break;
897 }
898
899 if (kIODefaultCache != cacheMode)
900 {
901 vm_size_t unused = 0;
902 err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
903 memEntryCacheMode, NULL, entry->entry);
904 assert (KERN_SUCCESS == err);
905 }
906
907 entryOffset -= pageOffset;
908 if (entryOffset >= entry->size) panic("entryOffset");
909 chunk = entry->size - entryOffset;
910 if (chunk)
911 {
912 if (chunk > remain) chunk = remain;
913
914 if (options & kIOMapPrefault) {
915 UInt nb_pages = round_page(chunk) / PAGE_SIZE;
916 err = vm_map_enter_mem_object_prefault(map,
917 &mapAddr,
918 chunk, 0 /* mask */,
919 (VM_FLAGS_FIXED
920 | VM_FLAGS_OVERWRITE
921 | VM_MAKE_TAG(VM_MEMORY_IOKIT)
922 | VM_FLAGS_IOKIT_ACCT), /* iokit accounting */
923 entry->entry,
924 entryOffset,
925 prot, // cur
926 prot, // max
927 &pageList[currentPageIndex],
928 nb_pages);
929
930 // Compute the next index in the page list.
931 currentPageIndex += nb_pages;
932 assert(currentPageIndex <= _pages);
933 } else {
934 err = vm_map_enter_mem_object(map,
935 &mapAddr,
936 chunk, 0 /* mask */,
937 (VM_FLAGS_FIXED
938 | VM_FLAGS_OVERWRITE
939 | VM_MAKE_TAG(VM_MEMORY_IOKIT)
940 | VM_FLAGS_IOKIT_ACCT), /* iokit accounting */
941 entry->entry,
942 entryOffset,
943 false, // copy
944 prot, // cur
945 prot, // max
946 VM_INHERIT_NONE);
947 }
948
949 if (KERN_SUCCESS != err) break;
950 remain -= chunk;
951 if (!remain) break;
952 mapAddr += chunk;
953 offset += chunk - pageOffset;
954 }
955 pageOffset = 0;
956 entry++;
957 entryIdx++;
958 if (entryIdx >= ref->count)
959 {
960 err = kIOReturnOverrun;
961 break;
962 }
963 }
964 }
965
966 if ((KERN_SUCCESS != err) && addr)
967 {
968 (void) mach_vm_deallocate(map, trunc_page_64(addr), size);
969 addr = 0;
970 }
971 *inaddr = addr;
972
973 return (err);
974}
975
976IOReturn
977IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
978 IOMemoryReference * ref,
979 IOByteCount * residentPageCount,
980 IOByteCount * dirtyPageCount)
981{
982 IOReturn err;
983 IOMemoryEntry * entries;
984 unsigned int resident, dirty;
985 unsigned int totalResident, totalDirty;
986
987 totalResident = totalDirty = 0;
988 entries = ref->entries + ref->count;
989 while (entries > &ref->entries[0])
990 {
991 entries--;
992 err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty);
993 if (KERN_SUCCESS != err) break;
994 totalResident += resident;
995 totalDirty += dirty;
996 }
997
998 if (residentPageCount) *residentPageCount = totalResident;
999 if (dirtyPageCount) *dirtyPageCount = totalDirty;
1000 return (err);
1001}
1002
1003IOReturn
1004IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
1005 IOMemoryReference * ref,
1006 IOOptionBits newState,
1007 IOOptionBits * oldState)
1008{
1009 IOReturn err;
1010 IOMemoryEntry * entries;
1011 vm_purgable_t control;
1012 int totalState, state;
1013
1014 entries = ref->entries + ref->count;
1015 totalState = kIOMemoryPurgeableNonVolatile;
1016 while (entries > &ref->entries[0])
1017 {
1018 entries--;
1019
1020 err = purgeableControlBits(newState, &control, &state);
1021 if (KERN_SUCCESS != err) break;
1022 err = mach_memory_entry_purgable_control(entries->entry, control, &state);
1023 if (KERN_SUCCESS != err) break;
1024 err = purgeableStateBits(&state);
1025 if (KERN_SUCCESS != err) break;
1026
1027 if (kIOMemoryPurgeableEmpty == state) totalState = kIOMemoryPurgeableEmpty;
1028 else if (kIOMemoryPurgeableEmpty == totalState) continue;
1029 else if (kIOMemoryPurgeableVolatile == totalState) continue;
1030 else if (kIOMemoryPurgeableVolatile == state) totalState = kIOMemoryPurgeableVolatile;
1031 else totalState = kIOMemoryPurgeableNonVolatile;
1032 }
1033
1034 if (oldState) *oldState = totalState;
1035 return (err);
1036}
1037
1038/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1039
1c79356b
A
1040IOMemoryDescriptor *
1041IOMemoryDescriptor::withAddress(void * address,
55e303ae
A
1042 IOByteCount length,
1043 IODirection direction)
1044{
1045 return IOMemoryDescriptor::
b0d623f7 1046 withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
55e303ae
A
1047}
1048
b0d623f7 1049#ifndef __LP64__
55e303ae 1050IOMemoryDescriptor *
b0d623f7 1051IOMemoryDescriptor::withAddress(IOVirtualAddress address,
55e303ae
A
1052 IOByteCount length,
1053 IODirection direction,
1054 task_t task)
1c79356b
A
1055{
1056 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1057 if (that)
1058 {
55e303ae 1059 if (that->initWithAddress(address, length, direction, task))
1c79356b
A
1060 return that;
1061
1062 that->release();
1063 }
1064 return 0;
1065}
b0d623f7 1066#endif /* !__LP64__ */
1c79356b
A
1067
1068IOMemoryDescriptor *
55e303ae
A
1069IOMemoryDescriptor::withPhysicalAddress(
1070 IOPhysicalAddress address,
1071 IOByteCount length,
1072 IODirection direction )
1073{
b0d623f7 1074 return (IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL));
55e303ae
A
1075}
1076
b0d623f7 1077#ifndef __LP64__
55e303ae
A
1078IOMemoryDescriptor *
1079IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
1080 UInt32 withCount,
1081 IODirection direction,
1082 task_t task,
1083 bool asReference)
1c79356b
A
1084{
1085 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1086 if (that)
1087 {
55e303ae 1088 if (that->initWithRanges(ranges, withCount, direction, task, asReference))
1c79356b
A
1089 return that;
1090
1091 that->release();
1092 }
1093 return 0;
1094}
b0d623f7 1095#endif /* !__LP64__ */
1c79356b 1096
0c530ab8
A
1097IOMemoryDescriptor *
1098IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
2d21ac55
A
1099 mach_vm_size_t length,
1100 IOOptionBits options,
1101 task_t task)
0c530ab8
A
1102{
1103 IOAddressRange range = { address, length };
1104 return (IOMemoryDescriptor::withAddressRanges(&range, 1, options, task));
1105}
1106
1107IOMemoryDescriptor *
1108IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
2d21ac55
A
1109 UInt32 rangeCount,
1110 IOOptionBits options,
1111 task_t task)
0c530ab8
A
1112{
1113 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1114 if (that)
1115 {
1116 if (task)
1117 options |= kIOMemoryTypeVirtual64;
1118 else
1119 options |= kIOMemoryTypePhysical64;
1120
2d21ac55
A
1121 if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ 0))
1122 return that;
0c530ab8 1123
2d21ac55 1124 that->release();
0c530ab8
A
1125 }
1126
1127 return 0;
1128}
1129
1c79356b
A
1130
1131/*
b0d623f7 1132 * withOptions:
1c79356b
A
1133 *
1134 * Create a new IOMemoryDescriptor. The buffer is made up of several
1135 * virtual address ranges, from a given task.
1136 *
1137 * Passing the ranges as a reference will avoid an extra allocation.
1138 */
1139IOMemoryDescriptor *
55e303ae
A
1140IOMemoryDescriptor::withOptions(void * buffers,
1141 UInt32 count,
1142 UInt32 offset,
1143 task_t task,
1144 IOOptionBits opts,
1145 IOMapper * mapper)
1c79356b 1146{
55e303ae 1147 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
d7e50217 1148
55e303ae
A
1149 if (self
1150 && !self->initWithOptions(buffers, count, offset, task, opts, mapper))
1151 {
1152 self->release();
1153 return 0;
de355530 1154 }
55e303ae
A
1155
1156 return self;
1157}
1158
55e303ae
A
1159bool IOMemoryDescriptor::initWithOptions(void * buffers,
1160 UInt32 count,
1161 UInt32 offset,
1162 task_t task,
1163 IOOptionBits options,
1164 IOMapper * mapper)
1165{
b0d623f7 1166 return( false );
1c79356b
A
1167}
1168
b0d623f7 1169#ifndef __LP64__
1c79356b
A
1170IOMemoryDescriptor *
1171IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
1172 UInt32 withCount,
55e303ae
A
1173 IODirection direction,
1174 bool asReference)
1c79356b
A
1175{
1176 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1177 if (that)
1178 {
55e303ae 1179 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference))
1c79356b
A
1180 return that;
1181
1182 that->release();
1183 }
1184 return 0;
1185}
1186
1187IOMemoryDescriptor *
1188IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
1189 IOByteCount offset,
1190 IOByteCount length,
55e303ae 1191 IODirection direction)
1c79356b 1192{
b0d623f7 1193 return (IOSubMemoryDescriptor::withSubRange(of, offset, length, direction | kIOMemoryThreadSafe));
1c79356b 1194}
b0d623f7 1195#endif /* !__LP64__ */
1c79356b 1196
0c530ab8
A
1197IOMemoryDescriptor *
1198IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
91447636
A
1199{
1200 IOGeneralMemoryDescriptor *origGenMD =
1201 OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
1202
1203 if (origGenMD)
1204 return IOGeneralMemoryDescriptor::
1205 withPersistentMemoryDescriptor(origGenMD);
1206 else
1207 return 0;
1208}
1209
0c530ab8
A
1210IOMemoryDescriptor *
1211IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
91447636 1212{
fe8ab488
A
1213 IOMemoryReference * memRef;
1214
1215 if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) return (0);
91447636 1216
fe8ab488
A
1217 if (memRef == originalMD->_memRef)
1218 {
91447636 1219 originalMD->retain(); // Add a new reference to ourselves
fe8ab488 1220 originalMD->memoryReferenceRelease(memRef);
91447636
A
1221 return originalMD;
1222 }
1223
1224 IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor;
fe8ab488 1225 IOMDPersistentInitData initData = { originalMD, memRef };
91447636
A
1226
1227 if (self
1228 && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) {
1229 self->release();
1230 self = 0;
1231 }
1232 return self;
1233}
1234
b0d623f7 1235#ifndef __LP64__
1c79356b
A
1236bool
1237IOGeneralMemoryDescriptor::initWithAddress(void * address,
1238 IOByteCount withLength,
1239 IODirection withDirection)
1240{
b0d623f7 1241 _singleRange.v.address = (vm_offset_t) address;
1c79356b
A
1242 _singleRange.v.length = withLength;
1243
1244 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
1245}
1246
1247bool
b0d623f7 1248IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
1c79356b
A
1249 IOByteCount withLength,
1250 IODirection withDirection,
1251 task_t withTask)
1252{
1253 _singleRange.v.address = address;
1254 _singleRange.v.length = withLength;
1255
1256 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
1257}
1258
1259bool
1260IOGeneralMemoryDescriptor::initWithPhysicalAddress(
1261 IOPhysicalAddress address,
1262 IOByteCount withLength,
1263 IODirection withDirection )
1264{
1265 _singleRange.p.address = address;
1266 _singleRange.p.length = withLength;
1267
1268 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
1269}
1270
55e303ae
A
1271bool
1272IOGeneralMemoryDescriptor::initWithPhysicalRanges(
1273 IOPhysicalRange * ranges,
1274 UInt32 count,
1275 IODirection direction,
1276 bool reference)
1277{
1278 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
1279
1280 if (reference)
1281 mdOpts |= kIOMemoryAsReference;
1282
1283 return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0);
1284}
1285
1286bool
1287IOGeneralMemoryDescriptor::initWithRanges(
1288 IOVirtualRange * ranges,
1289 UInt32 count,
1290 IODirection direction,
1291 task_t task,
1292 bool reference)
1293{
1294 IOOptionBits mdOpts = direction;
1295
1296 if (reference)
1297 mdOpts |= kIOMemoryAsReference;
1298
1299 if (task) {
1300 mdOpts |= kIOMemoryTypeVirtual;
91447636
A
1301
1302 // Auto-prepare if this is a kernel memory descriptor as very few
1303 // clients bother to prepare() kernel memory.
2d21ac55 1304 // But it was not enforced so what are you going to do?
55e303ae
A
1305 if (task == kernel_task)
1306 mdOpts |= kIOMemoryAutoPrepare;
1307 }
1308 else
1309 mdOpts |= kIOMemoryTypePhysical;
55e303ae
A
1310
1311 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0);
1312}
b0d623f7 1313#endif /* !__LP64__ */
55e303ae 1314
1c79356b 1315/*
55e303ae 1316 * initWithOptions:
1c79356b 1317 *
55e303ae 1318 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
91447636
A
1319 * from a given task, several physical ranges, an UPL from the ubc
1320 * system or a uio (may be 64bit) from the BSD subsystem.
1c79356b
A
1321 *
1322 * Passing the ranges as a reference will avoid an extra allocation.
1323 *
55e303ae
A
1324 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
1325 * existing instance -- note this behavior is not commonly supported in other
1326 * I/O Kit classes, although it is supported here.
1c79356b 1327 */
55e303ae 1328
1c79356b 1329bool
55e303ae
A
1330IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
1331 UInt32 count,
1332 UInt32 offset,
1333 task_t task,
1334 IOOptionBits options,
1335 IOMapper * mapper)
1336{
91447636
A
1337 IOOptionBits type = options & kIOMemoryTypeMask;
1338
6d2010ae
A
1339#ifndef __LP64__
1340 if (task
1341 && (kIOMemoryTypeVirtual == type)
1342 && vm_map_is_64bit(get_task_map(task))
1343 && ((IOVirtualRange *) buffers)->address)
1344 {
1345 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
1346 return false;
1347 }
1348#endif /* !__LP64__ */
1349
91447636
A
1350 // Grab the original MD's configuation data to initialse the
1351 // arguments to this function.
1352 if (kIOMemoryTypePersistentMD == type) {
1353
fe8ab488 1354 IOMDPersistentInitData *initData = (typeof(initData)) buffers;
91447636
A
1355 const IOGeneralMemoryDescriptor *orig = initData->fMD;
1356 ioGMDData *dataP = getDataP(orig->_memoryEntries);
1357
1358 // Only accept persistent memory descriptors with valid dataP data.
1359 assert(orig->_rangesCount == 1);
1360 if ( !(orig->_flags & kIOMemoryPersistent) || !dataP)
1361 return false;
1362
fe8ab488 1363 _memRef = initData->fMemRef; // Grab the new named entry
6d2010ae
A
1364 options = orig->_flags & ~kIOMemoryAsReference;
1365 type = options & kIOMemoryTypeMask;
1366 buffers = orig->_ranges.v;
1367 count = orig->_rangesCount;
55e303ae 1368
91447636
A
1369 // Now grab the original task and whatever mapper was previously used
1370 task = orig->_task;
1371 mapper = dataP->fMapper;
1372
1373 // We are ready to go through the original initialisation now
1374 }
1375
1376 switch (type) {
1377 case kIOMemoryTypeUIO:
55e303ae 1378 case kIOMemoryTypeVirtual:
b0d623f7 1379#ifndef __LP64__
0c530ab8 1380 case kIOMemoryTypeVirtual64:
b0d623f7 1381#endif /* !__LP64__ */
55e303ae
A
1382 assert(task);
1383 if (!task)
1384 return false;
2d21ac55 1385 break;
55e303ae
A
1386
1387 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
b0d623f7 1388#ifndef __LP64__
0c530ab8 1389 case kIOMemoryTypePhysical64:
b0d623f7 1390#endif /* !__LP64__ */
55e303ae
A
1391 case kIOMemoryTypeUPL:
1392 assert(!task);
1393 break;
1394 default:
55e303ae
A
1395 return false; /* bad argument */
1396 }
1397
1398 assert(buffers);
1399 assert(count);
1c79356b
A
1400
1401 /*
1402 * We can check the _initialized instance variable before having ever set
1403 * it to an initial value because I/O Kit guarantees that all our instance
1404 * variables are zeroed on an object's allocation.
1405 */
1406
55e303ae 1407 if (_initialized) {
1c79356b
A
1408 /*
1409 * An existing memory descriptor is being retargeted to point to
1410 * somewhere else. Clean up our present state.
1411 */
2d21ac55
A
1412 IOOptionBits type = _flags & kIOMemoryTypeMask;
1413 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
1414 {
1415 while (_wireCount)
1416 complete();
1417 }
b0d623f7 1418 if (_ranges.v && !(kIOMemoryAsReference & _flags))
0c530ab8
A
1419 {
1420 if (kIOMemoryTypeUIO == type)
1421 uio_free((uio_t) _ranges.v);
b0d623f7 1422#ifndef __LP64__
0c530ab8
A
1423 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
1424 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
b0d623f7 1425#endif /* !__LP64__ */
0c530ab8
A
1426 else
1427 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
1428 }
2d21ac55 1429
39236c6e
A
1430 options |= (kIOMemoryRedirected & _flags);
1431 if (!(kIOMemoryRedirected & options))
6d2010ae 1432 {
fe8ab488 1433 if (_memRef)
39236c6e 1434 {
fe8ab488
A
1435 memoryReferenceRelease(_memRef);
1436 _memRef = 0;
39236c6e
A
1437 }
1438 if (_mappings)
1439 _mappings->flushCollection();
6d2010ae 1440 }
1c79356b 1441 }
55e303ae
A
1442 else {
1443 if (!super::init())
1444 return false;
1445 _initialized = true;
1446 }
d7e50217 1447
55e303ae 1448 // Grab the appropriate mapper
99c3a104 1449 if (kIOMemoryHostOnly & options) options |= kIOMemoryMapperNone;
b0d623f7 1450 if (kIOMemoryMapperNone & options)
55e303ae 1451 mapper = 0; // No Mapper
0c530ab8 1452 else if (mapper == kIOMapperSystem) {
55e303ae
A
1453 IOMapper::checkForSystemMapper();
1454 gIOSystemMapper = mapper = IOMapper::gSystem;
1455 }
1c79356b 1456
c910b4d9
A
1457 // Temp binary compatibility for kIOMemoryThreadSafe
1458 if (kIOMemoryReserved6156215 & options)
1459 {
1460 options &= ~kIOMemoryReserved6156215;
1461 options |= kIOMemoryThreadSafe;
1462 }
91447636
A
1463 // Remove the dynamic internal use flags from the initial setting
1464 options &= ~(kIOMemoryPreparedReadOnly);
55e303ae
A
1465 _flags = options;
1466 _task = task;
1467
b0d623f7 1468#ifndef __LP64__
55e303ae 1469 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
b0d623f7 1470#endif /* !__LP64__ */
0c530ab8
A
1471
1472 __iomd_reservedA = 0;
1473 __iomd_reservedB = 0;
0c530ab8 1474 _highestPage = 0;
1c79356b 1475
2d21ac55
A
1476 if (kIOMemoryThreadSafe & options)
1477 {
1478 if (!_prepareLock)
1479 _prepareLock = IOLockAlloc();
1480 }
1481 else if (_prepareLock)
1482 {
1483 IOLockFree(_prepareLock);
1484 _prepareLock = NULL;
1485 }
1486
91447636 1487 if (kIOMemoryTypeUPL == type) {
1c79356b 1488
55e303ae
A
1489 ioGMDData *dataP;
1490 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
d7e50217 1491
99c3a104 1492 if (!initMemoryEntries(dataSize, mapper)) return (false);
55e303ae 1493 dataP = getDataP(_memoryEntries);
55e303ae
A
1494 dataP->fPageCnt = 0;
1495
0c530ab8 1496 // _wireCount++; // UPLs start out life wired
55e303ae
A
1497
1498 _length = count;
1499 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
1500
1501 ioPLBlock iopl;
55e303ae 1502 iopl.fIOPL = (upl_t) buffers;
6d2010ae 1503 upl_set_referenced(iopl.fIOPL, true);
b0d623f7
A
1504 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
1505
1506 if (upl_get_size(iopl.fIOPL) < (count + offset))
1507 panic("short external upl");
1508
0c530ab8
A
1509 _highestPage = upl_get_highest_page(iopl.fIOPL);
1510
99c3a104
A
1511 // Set the flag kIOPLOnDevice convieniently equal to 1
1512 iopl.fFlags = pageList->device | kIOPLExternUPL;
55e303ae 1513 if (!pageList->device) {
55e303ae
A
1514 // Pre-compute the offset into the UPL's page list
1515 pageList = &pageList[atop_32(offset)];
1516 offset &= PAGE_MASK;
55e303ae 1517 }
99c3a104
A
1518 iopl.fIOMDOffset = 0;
1519 iopl.fMappedPage = 0;
55e303ae
A
1520 iopl.fPageInfo = (vm_address_t) pageList;
1521 iopl.fPageOffset = offset;
55e303ae 1522 _memoryEntries->appendBytes(&iopl, sizeof(iopl));
d7e50217 1523 }
91447636 1524 else {
0c530ab8
A
1525 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
1526 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
91447636
A
1527
1528 // Initialize the memory descriptor
1529 if (options & kIOMemoryAsReference) {
b0d623f7 1530#ifndef __LP64__
91447636 1531 _rangesIsAllocated = false;
b0d623f7 1532#endif /* !__LP64__ */
91447636
A
1533
1534 // Hack assignment to get the buffer arg into _ranges.
1535 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
1536 // work, C++ sigh.
1537 // This also initialises the uio & physical ranges.
1538 _ranges.v = (IOVirtualRange *) buffers;
1539 }
1540 else {
b0d623f7 1541#ifndef __LP64__
6601e61a 1542 _rangesIsAllocated = true;
b0d623f7
A
1543#endif /* !__LP64__ */
1544 switch (type)
0c530ab8
A
1545 {
1546 case kIOMemoryTypeUIO:
1547 _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
1548 break;
1549
b0d623f7 1550#ifndef __LP64__
0c530ab8
A
1551 case kIOMemoryTypeVirtual64:
1552 case kIOMemoryTypePhysical64:
b0d623f7 1553 if (count == 1
6d2010ae
A
1554 && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
1555 ) {
b0d623f7
A
1556 if (kIOMemoryTypeVirtual64 == type)
1557 type = kIOMemoryTypeVirtual;
1558 else
1559 type = kIOMemoryTypePhysical;
1560 _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
1561 _rangesIsAllocated = false;
1562 _ranges.v = &_singleRange.v;
1563 _singleRange.v.address = ((IOAddressRange *) buffers)->address;
1564 _singleRange.v.length = ((IOAddressRange *) buffers)->length;
1565 break;
1566 }
0c530ab8
A
1567 _ranges.v64 = IONew(IOAddressRange, count);
1568 if (!_ranges.v64)
1569 return false;
1570 bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
1571 break;
b0d623f7 1572#endif /* !__LP64__ */
0c530ab8 1573 case kIOMemoryTypeVirtual:
2d21ac55 1574 case kIOMemoryTypePhysical:
b0d623f7
A
1575 if (count == 1) {
1576 _flags |= kIOMemoryAsReference;
1577#ifndef __LP64__
1578 _rangesIsAllocated = false;
1579#endif /* !__LP64__ */
1580 _ranges.v = &_singleRange.v;
1581 } else {
1582 _ranges.v = IONew(IOVirtualRange, count);
1583 if (!_ranges.v)
1584 return false;
1585 }
0c530ab8
A
1586 bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
1587 break;
1588 }
91447636
A
1589 }
1590
1591 // Find starting address within the vector of ranges
1592 Ranges vec = _ranges;
1593 UInt32 length = 0;
1594 UInt32 pages = 0;
1595 for (unsigned ind = 0; ind < count; ind++) {
fe8ab488
A
1596 mach_vm_address_t addr;
1597 mach_vm_size_t len;
91447636
A
1598
1599 // addr & len are returned by this function
1600 getAddrLenForInd(addr, len, type, vec, ind);
1601 pages += (atop_64(addr + len + PAGE_MASK) - atop_64(addr));
1602 len += length;
0c530ab8 1603 assert(len >= length); // Check for 32 bit wrap around
91447636 1604 length = len;
0c530ab8
A
1605
1606 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
1607 {
1608 ppnum_t highPage = atop_64(addr + len - 1);
1609 if (highPage > _highestPage)
1610 _highestPage = highPage;
1611 }
91447636
A
1612 }
1613 _length = length;
1614 _pages = pages;
1615 _rangesCount = count;
55e303ae
A
1616
1617 // Auto-prepare memory at creation time.
1618 // Implied completion when descriptor is free-ed
0c530ab8 1619 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
91447636 1620 _wireCount++; // Physical MDs are, by definition, wired
0c530ab8 1621 else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
55e303ae 1622 ioGMDData *dataP;
91447636 1623 unsigned dataSize = computeDataSize(_pages, /* upls */ count * 2);
55e303ae 1624
99c3a104 1625 if (!initMemoryEntries(dataSize, mapper)) return false;
55e303ae 1626 dataP = getDataP(_memoryEntries);
55e303ae
A
1627 dataP->fPageCnt = _pages;
1628
fe8ab488
A
1629 if ( (kIOMemoryPersistent & _flags) && !_memRef)
1630 {
1631 IOReturn
1632 err = memoryReferenceCreate(0, &_memRef);
1633 if (kIOReturnSuccess != err) return false;
1634 }
55e303ae
A
1635
1636 if ((_flags & kIOMemoryAutoPrepare)
1637 && prepare() != kIOReturnSuccess)
1638 return false;
1639 }
1640 }
1641
1642 return true;
de355530
A
1643}
1644
1c79356b
A
1645/*
1646 * free
1647 *
1648 * Free resources.
1649 */
1650void IOGeneralMemoryDescriptor::free()
1651{
2d21ac55
A
1652 IOOptionBits type = _flags & kIOMemoryTypeMask;
1653
9bccf70c 1654 if( reserved)
2d21ac55
A
1655 {
1656 LOCK;
316670eb 1657 reserved->dp.memory = 0;
2d21ac55
A
1658 UNLOCK;
1659 }
bd504ef0 1660 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
2d21ac55 1661 {
bd504ef0
A
1662 ioGMDData * dataP;
1663 if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
1664 {
1665 dataP->fMapper->iovmFree(atop_64(dataP->fMappedBase), _pages);
1666 dataP->fMappedBase = 0;
1667 }
2d21ac55 1668 }
bd504ef0
A
1669 else
1670 {
1671 while (_wireCount) complete();
1672 }
1673
1674 if (_memoryEntries) _memoryEntries->release();
55e303ae 1675
b0d623f7 1676 if (_ranges.v && !(kIOMemoryAsReference & _flags))
0c530ab8 1677 {
0c530ab8
A
1678 if (kIOMemoryTypeUIO == type)
1679 uio_free((uio_t) _ranges.v);
b0d623f7 1680#ifndef __LP64__
0c530ab8
A
1681 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
1682 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
b0d623f7 1683#endif /* !__LP64__ */
0c530ab8
A
1684 else
1685 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
4a3eedf9
A
1686
1687 _ranges.v = NULL;
0c530ab8 1688 }
9bccf70c 1689
316670eb
A
1690 if (reserved)
1691 {
1692 if (reserved->dp.devicePager)
1693 {
1694 // memEntry holds a ref on the device pager which owns reserved
1695 // (IOMemoryDescriptorReserved) so no reserved access after this point
1696 device_pager_deallocate( (memory_object_t) reserved->dp.devicePager );
1697 }
1698 else
1699 IODelete(reserved, IOMemoryDescriptorReserved, 1);
1700 reserved = NULL;
1701 }
9bccf70c 1702
fe8ab488
A
1703 if (_memRef) memoryReferenceRelease(_memRef);
1704 if (_prepareLock) IOLockFree(_prepareLock);
2d21ac55 1705
1c79356b
A
1706 super::free();
1707}
1708
b0d623f7
A
1709#ifndef __LP64__
1710void IOGeneralMemoryDescriptor::unmapFromKernel()
1711{
1712 panic("IOGMD::unmapFromKernel deprecated");
1713}
1714
1715void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
1716{
1717 panic("IOGMD::mapIntoKernel deprecated");
1718}
1719#endif /* !__LP64__ */
1c79356b
A
1720
1721/*
1722 * getDirection:
1723 *
1724 * Get the direction of the transfer.
1725 */
1726IODirection IOMemoryDescriptor::getDirection() const
1727{
b0d623f7
A
1728#ifndef __LP64__
1729 if (_direction)
1730 return _direction;
1731#endif /* !__LP64__ */
1732 return (IODirection) (_flags & kIOMemoryDirectionMask);
1c79356b
A
1733}
1734
1735/*
1736 * getLength:
1737 *
1738 * Get the length of the transfer (over all ranges).
1739 */
1740IOByteCount IOMemoryDescriptor::getLength() const
1741{
1742 return _length;
1743}
1744
55e303ae 1745void IOMemoryDescriptor::setTag( IOOptionBits tag )
1c79356b
A
1746{
1747 _tag = tag;
1748}
1749
1750IOOptionBits IOMemoryDescriptor::getTag( void )
1751{
1752 return( _tag);
1753}
1754
b0d623f7 1755#ifndef __LP64__
55e303ae 1756// @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
0c530ab8
A
1757IOPhysicalAddress
1758IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
0b4e3aa0 1759{
0c530ab8 1760 addr64_t physAddr = 0;
1c79356b 1761
9bccf70c 1762 if( prepare() == kIOReturnSuccess) {
0c530ab8 1763 physAddr = getPhysicalSegment64( offset, length );
9bccf70c
A
1764 complete();
1765 }
0b4e3aa0 1766
0c530ab8 1767 return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used
0b4e3aa0 1768}
b0d623f7 1769#endif /* !__LP64__ */
0b4e3aa0 1770
55e303ae
A
1771IOByteCount IOMemoryDescriptor::readBytes
1772 (IOByteCount offset, void *bytes, IOByteCount length)
1c79356b 1773{
b0d623f7 1774 addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
55e303ae 1775 IOByteCount remaining;
1c79356b 1776
55e303ae 1777 // Assert that this entire I/O is withing the available range
fe8ab488 1778 assert(offset <= _length);
55e303ae
A
1779 assert(offset + length <= _length);
1780 if (offset >= _length) {
55e303ae
A
1781 return 0;
1782 }
1c79356b 1783
b0d623f7
A
1784 if (kIOMemoryThreadSafe & _flags)
1785 LOCK;
1786
55e303ae
A
1787 remaining = length = min(length, _length - offset);
1788 while (remaining) { // (process another target segment?)
1789 addr64_t srcAddr64;
1790 IOByteCount srcLen;
1c79356b 1791
b0d623f7 1792 srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
55e303ae
A
1793 if (!srcAddr64)
1794 break;
1c79356b 1795
55e303ae
A
1796 // Clip segment length to remaining
1797 if (srcLen > remaining)
1798 srcLen = remaining;
1c79356b 1799
55e303ae
A
1800 copypv(srcAddr64, dstAddr, srcLen,
1801 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
1c79356b 1802
55e303ae
A
1803 dstAddr += srcLen;
1804 offset += srcLen;
1805 remaining -= srcLen;
1806 }
1c79356b 1807
b0d623f7
A
1808 if (kIOMemoryThreadSafe & _flags)
1809 UNLOCK;
1810
55e303ae 1811 assert(!remaining);
1c79356b 1812
55e303ae
A
1813 return length - remaining;
1814}
0b4e3aa0 1815
55e303ae 1816IOByteCount IOMemoryDescriptor::writeBytes
fe8ab488 1817 (IOByteCount inoffset, const void *bytes, IOByteCount length)
55e303ae 1818{
b0d623f7 1819 addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
55e303ae 1820 IOByteCount remaining;
fe8ab488 1821 IOByteCount offset = inoffset;
0b4e3aa0 1822
55e303ae 1823 // Assert that this entire I/O is withing the available range
fe8ab488 1824 assert(offset <= _length);
55e303ae 1825 assert(offset + length <= _length);
0b4e3aa0 1826
55e303ae 1827 assert( !(kIOMemoryPreparedReadOnly & _flags) );
0b4e3aa0 1828
55e303ae 1829 if ( (kIOMemoryPreparedReadOnly & _flags) || offset >= _length) {
55e303ae
A
1830 return 0;
1831 }
0b4e3aa0 1832
b0d623f7
A
1833 if (kIOMemoryThreadSafe & _flags)
1834 LOCK;
1835
55e303ae
A
1836 remaining = length = min(length, _length - offset);
1837 while (remaining) { // (process another target segment?)
1838 addr64_t dstAddr64;
1839 IOByteCount dstLen;
0b4e3aa0 1840
b0d623f7 1841 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
55e303ae
A
1842 if (!dstAddr64)
1843 break;
0b4e3aa0 1844
55e303ae
A
1845 // Clip segment length to remaining
1846 if (dstLen > remaining)
1847 dstLen = remaining;
0b4e3aa0 1848
fe8ab488
A
1849 if (!srcAddr) bzero_phys(dstAddr64, dstLen);
1850 else
1851 {
1852 copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
1853 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
1854 srcAddr += dstLen;
1855 }
55e303ae
A
1856 offset += dstLen;
1857 remaining -= dstLen;
1c79356b 1858 }
1c79356b 1859
b0d623f7
A
1860 if (kIOMemoryThreadSafe & _flags)
1861 UNLOCK;
1862
55e303ae
A
1863 assert(!remaining);
1864
fe8ab488
A
1865 if (!srcAddr) performOperation(kIOMemoryIncoherentIOFlush, inoffset, length);
1866
55e303ae 1867 return length - remaining;
1c79356b
A
1868}
1869
b0d623f7
A
1870#ifndef __LP64__
1871void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
1872{
1873 panic("IOGMD::setPosition deprecated");
1874}
1875#endif /* !__LP64__ */
1876
1877static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
1878
1879uint64_t
1880IOGeneralMemoryDescriptor::getPreparationID( void )
1881{
1882 ioGMDData *dataP;
7e4a7d39
A
1883
1884 if (!_wireCount)
b0d623f7 1885 return (kIOPreparationIDUnprepared);
7e4a7d39 1886
99c3a104
A
1887 if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
1888 || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64))
316670eb
A
1889 {
1890 IOMemoryDescriptor::setPreparationID();
1891 return (IOMemoryDescriptor::getPreparationID());
1892 }
7e4a7d39
A
1893
1894 if (!_memoryEntries || !(dataP = getDataP(_memoryEntries)))
1895 return (kIOPreparationIDUnprepared);
1896
b0d623f7
A
1897 if (kIOPreparationIDUnprepared == dataP->fPreparationID)
1898 {
b0d623f7 1899 dataP->fPreparationID = OSIncrementAtomic64(&gIOMDPreparationID);
b0d623f7
A
1900 }
1901 return (dataP->fPreparationID);
1902}
1903
316670eb 1904IOMemoryDescriptorReserved * IOMemoryDescriptor::getKernelReserved( void )
b0d623f7 1905{
316670eb
A
1906 if (!reserved)
1907 {
1908 reserved = IONew(IOMemoryDescriptorReserved, 1);
1909 if (reserved)
1910 bzero(reserved, sizeof(IOMemoryDescriptorReserved));
1911 }
1912 return (reserved);
1913}
1914
1915void IOMemoryDescriptor::setPreparationID( void )
1916{
1917 if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID))
1918 {
1919#if defined(__ppc__ )
1920 reserved->preparationID = gIOMDPreparationID++;
1921#else
1922 reserved->preparationID = OSIncrementAtomic64(&gIOMDPreparationID);
1923#endif
1924 }
1925}
1926
1927uint64_t IOMemoryDescriptor::getPreparationID( void )
1928{
1929 if (reserved)
1930 return (reserved->preparationID);
1931 else
1932 return (kIOPreparationIDUnsupported);
b0d623f7 1933}
de355530 1934
0c530ab8 1935IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
55e303ae 1936{
99c3a104
A
1937 IOReturn err = kIOReturnSuccess;
1938 DMACommandOps params;
1939 IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
1940 ioGMDData *dataP;
1941
1942 params = (op & ~kIOMDDMACommandOperationMask & op);
1943 op &= kIOMDDMACommandOperationMask;
1944
1945 if (kIOMDDMAMap == op)
1946 {
1947 if (dataSize < sizeof(IOMDDMAMapArgs))
1948 return kIOReturnUnderrun;
1949
1950 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
1951
1952 if (!_memoryEntries
1953 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
1954
1955 if (_memoryEntries && data->fMapper)
1956 {
39236c6e 1957 bool remap;
99c3a104
A
1958 bool whole = ((data->fOffset == 0) && (data->fLength == _length));
1959 dataP = getDataP(_memoryEntries);
39236c6e
A
1960
1961 if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
1962 if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) dataP->fDMAMapAlignment = data->fMapSpec.alignment;
1963
1964 remap = (dataP->fDMAMapNumAddressBits < 64)
1965 && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
1966 remap |= (dataP->fDMAMapAlignment > page_size);
99c3a104
A
1967 remap |= (!whole);
1968 if (remap || !dataP->fMappedBase)
1969 {
1970// if (dataP->fMappedBase) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params);
1971 err = md->dmaMap(data->fMapper, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocCount);
1972 if ((kIOReturnSuccess == err) && whole && !dataP->fMappedBase)
1973 {
1974 dataP->fMappedBase = data->fAlloc;
1975 data->fAllocCount = 0; // IOMD owns the alloc now
1976 }
1977 }
1978 else
1979 {
1980 data->fAlloc = dataP->fMappedBase;
1981 data->fAllocCount = 0; // IOMD owns the alloc
1982 }
39236c6e 1983 data->fMapContig = !dataP->fDiscontig;
99c3a104
A
1984 }
1985
1986 return (err);
1987 }
1988
1989 if (kIOMDAddDMAMapSpec == op)
1990 {
1991 if (dataSize < sizeof(IODMAMapSpecification))
1992 return kIOReturnUnderrun;
1993
1994 IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
1995
1996 if (!_memoryEntries
1997 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
1998
1999 if (_memoryEntries)
2000 {
2001 dataP = getDataP(_memoryEntries);
2002 if (data->numAddressBits < dataP->fDMAMapNumAddressBits)
2003 dataP->fDMAMapNumAddressBits = data->numAddressBits;
2004 if (data->alignment > dataP->fDMAMapAlignment)
2005 dataP->fDMAMapAlignment = data->alignment;
2006 }
2007 return kIOReturnSuccess;
2008 }
2009
0c530ab8 2010 if (kIOMDGetCharacteristics == op) {
4452a7af 2011
0c530ab8
A
2012 if (dataSize < sizeof(IOMDDMACharacteristics))
2013 return kIOReturnUnderrun;
4452a7af 2014
0c530ab8
A
2015 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
2016 data->fLength = _length;
2017 data->fSGCount = _rangesCount;
2018 data->fPages = _pages;
b0d623f7 2019 data->fDirection = getDirection();
0c530ab8
A
2020 if (!_wireCount)
2021 data->fIsPrepared = false;
2022 else {
2023 data->fIsPrepared = true;
2024 data->fHighestPage = _highestPage;
99c3a104
A
2025 if (_memoryEntries)
2026 {
2027 dataP = getDataP(_memoryEntries);
2028 ioPLBlock *ioplList = getIOPLList(dataP);
2029 UInt count = getNumIOPL(_memoryEntries, dataP);
0c530ab8
A
2030 if (count == 1)
2031 data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
2032 }
0c530ab8 2033 }
4452a7af 2034
0c530ab8 2035 return kIOReturnSuccess;
b0d623f7
A
2036
2037#if IOMD_DEBUG_DMAACTIVE
99c3a104
A
2038 } else if (kIOMDDMAActive == op) {
2039 if (params) OSIncrementAtomic(&md->__iomd_reservedA);
2040 else {
2041 if (md->__iomd_reservedA)
2042 OSDecrementAtomic(&md->__iomd_reservedA);
2043 else
2044 panic("kIOMDSetDMAInactive");
2045 }
b0d623f7
A
2046#endif /* IOMD_DEBUG_DMAACTIVE */
2047
99c3a104 2048 } else if (kIOMDWalkSegments != op)
0c530ab8
A
2049 return kIOReturnBadArgument;
2050
2051 // Get the next segment
2052 struct InternalState {
2053 IOMDDMAWalkSegmentArgs fIO;
2054 UInt fOffset2Index;
2055 UInt fIndex;
2056 UInt fNextOffset;
2057 } *isP;
2058
2059 // Find the next segment
2060 if (dataSize < sizeof(*isP))
2061 return kIOReturnUnderrun;
2062
2063 isP = (InternalState *) vData;
2064 UInt offset = isP->fIO.fOffset;
2065 bool mapped = isP->fIO.fMapped;
2066
99c3a104
A
2067 if (IOMapper::gSystem && mapped
2068 && (!(kIOMemoryHostOnly & _flags))
2069 && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBase))
2070// && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBase))
2071 {
2072 if (!_memoryEntries
2073 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
2074
2075 dataP = getDataP(_memoryEntries);
2076 if (dataP->fMapper)
2077 {
2078 IODMAMapSpecification mapSpec;
2079 bzero(&mapSpec, sizeof(mapSpec));
2080 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
2081 mapSpec.alignment = dataP->fDMAMapAlignment;
2082 err = md->dmaMap(dataP->fMapper, &mapSpec, 0, _length, &dataP->fMappedBase, NULL);
2083 if (kIOReturnSuccess != err) return (err);
2084 }
2085 }
2086
0c530ab8
A
2087 if (offset >= _length)
2088 return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
2089
2090 // Validate the previous offset
2091 UInt ind, off2Ind = isP->fOffset2Index;
99c3a104 2092 if (!params
0c530ab8
A
2093 && offset
2094 && (offset == isP->fNextOffset || off2Ind <= offset))
2095 ind = isP->fIndex;
2096 else
2097 ind = off2Ind = 0; // Start from beginning
4452a7af 2098
0c530ab8
A
2099 UInt length;
2100 UInt64 address;
99c3a104
A
2101
2102
0c530ab8 2103 if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
4452a7af 2104
0c530ab8
A
2105 // Physical address based memory descriptor
2106 const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
4452a7af 2107
0c530ab8 2108 // Find the range after the one that contains the offset
b0d623f7 2109 mach_vm_size_t len;
0c530ab8
A
2110 for (len = 0; off2Ind <= offset; ind++) {
2111 len = physP[ind].length;
2112 off2Ind += len;
2113 }
4452a7af 2114
0c530ab8
A
2115 // Calculate length within range and starting address
2116 length = off2Ind - offset;
2117 address = physP[ind - 1].address + len - length;
89b3af67 2118
99c3a104
A
2119 if (true && mapped && _memoryEntries
2120 && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
2121 {
2122 address = dataP->fMappedBase + offset;
2123 }
2124 else
2125 {
2126 // see how far we can coalesce ranges
2127 while (ind < _rangesCount && address + length == physP[ind].address) {
2128 len = physP[ind].length;
2129 length += len;
2130 off2Ind += len;
2131 ind++;
2132 }
0c530ab8 2133 }
4452a7af 2134
0c530ab8
A
2135 // correct contiguous check overshoot
2136 ind--;
2137 off2Ind -= len;
2138 }
b0d623f7 2139#ifndef __LP64__
0c530ab8 2140 else if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
4452a7af 2141
0c530ab8
A
2142 // Physical address based memory descriptor
2143 const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
4452a7af 2144
0c530ab8
A
2145 // Find the range after the one that contains the offset
2146 mach_vm_size_t len;
2147 for (len = 0; off2Ind <= offset; ind++) {
2148 len = physP[ind].length;
2149 off2Ind += len;
2150 }
89b3af67 2151
0c530ab8
A
2152 // Calculate length within range and starting address
2153 length = off2Ind - offset;
2154 address = physP[ind - 1].address + len - length;
89b3af67 2155
99c3a104
A
2156 if (true && mapped && _memoryEntries
2157 && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
2158 {
2159 address = dataP->fMappedBase + offset;
2160 }
2161 else
2162 {
2163 // see how far we can coalesce ranges
2164 while (ind < _rangesCount && address + length == physP[ind].address) {
2165 len = physP[ind].length;
2166 length += len;
2167 off2Ind += len;
2168 ind++;
2169 }
0c530ab8 2170 }
0c530ab8
A
2171 // correct contiguous check overshoot
2172 ind--;
2173 off2Ind -= len;
99c3a104 2174 }
b0d623f7 2175#endif /* !__LP64__ */
0c530ab8
A
2176 else do {
2177 if (!_wireCount)
2178 panic("IOGMD: not wired for the IODMACommand");
4452a7af 2179
0c530ab8 2180 assert(_memoryEntries);
4452a7af 2181
99c3a104 2182 dataP = getDataP(_memoryEntries);
0c530ab8
A
2183 const ioPLBlock *ioplList = getIOPLList(dataP);
2184 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
2185 upl_page_info_t *pageList = getPageList(dataP);
4452a7af 2186
0c530ab8 2187 assert(numIOPLs > 0);
4452a7af 2188
0c530ab8
A
2189 // Scan through iopl info blocks looking for block containing offset
2190 while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset)
2191 ind++;
4452a7af 2192
0c530ab8
A
2193 // Go back to actual range as search goes past it
2194 ioPLBlock ioplInfo = ioplList[ind - 1];
2195 off2Ind = ioplInfo.fIOMDOffset;
2196
2197 if (ind < numIOPLs)
2198 length = ioplList[ind].fIOMDOffset;
2199 else
2200 length = _length;
2201 length -= offset; // Remainder within iopl
2202
2203 // Subtract offset till this iopl in total list
2204 offset -= off2Ind;
2205
2206 // If a mapped address is requested and this is a pre-mapped IOPL
2207 // then just need to compute an offset relative to the mapped base.
99c3a104 2208 if (mapped && dataP->fMappedBase) {
0c530ab8 2209 offset += (ioplInfo.fPageOffset & PAGE_MASK);
99c3a104 2210 address = trunc_page_64(dataP->fMappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
0c530ab8
A
2211 continue; // Done leave do/while(false) now
2212 }
2213
2214 // The offset is rebased into the current iopl.
2215 // Now add the iopl 1st page offset.
2216 offset += ioplInfo.fPageOffset;
2217
2218 // For external UPLs the fPageInfo field points directly to
2219 // the upl's upl_page_info_t array.
2220 if (ioplInfo.fFlags & kIOPLExternUPL)
2221 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
2222 else
2223 pageList = &pageList[ioplInfo.fPageInfo];
2224
2225 // Check for direct device non-paged memory
2226 if ( ioplInfo.fFlags & kIOPLOnDevice ) {
2227 address = ptoa_64(pageList->phys_addr) + offset;
2228 continue; // Done leave do/while(false) now
2229 }
4452a7af 2230
0c530ab8
A
2231 // Now we need compute the index into the pageList
2232 UInt pageInd = atop_32(offset);
2233 offset &= PAGE_MASK;
2234
2235 // Compute the starting address of this segment
2236 IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
b0d623f7
A
2237 if (!pageAddr) {
2238 panic("!pageList phys_addr");
6d2010ae 2239 }
b0d623f7 2240
0c530ab8
A
2241 address = ptoa_64(pageAddr) + offset;
2242
2243 // length is currently set to the length of the remainider of the iopl.
2244 // We need to check that the remainder of the iopl is contiguous.
2245 // This is indicated by pageList[ind].phys_addr being sequential.
2246 IOByteCount contigLength = PAGE_SIZE - offset;
2247 while (contigLength < length
2248 && ++pageAddr == pageList[++pageInd].phys_addr)
2249 {
2250 contigLength += PAGE_SIZE;
2251 }
2252
2253 if (contigLength < length)
2254 length = contigLength;
2255
2256
2257 assert(address);
2258 assert(length);
2259
2260 } while (false);
2261
2262 // Update return values and state
2263 isP->fIO.fIOVMAddr = address;
2264 isP->fIO.fLength = length;
2265 isP->fIndex = ind;
2266 isP->fOffset2Index = off2Ind;
2267 isP->fNextOffset = isP->fIO.fOffset + length;
2268
2269 return kIOReturnSuccess;
2270}
2271
2272addr64_t
b0d623f7 2273IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
0c530ab8 2274{
fe8ab488
A
2275 IOReturn ret;
2276 mach_vm_address_t address = 0;
2277 mach_vm_size_t length = 0;
2278 IOMapper * mapper = gIOSystemMapper;
2279 IOOptionBits type = _flags & kIOMemoryTypeMask;
b0d623f7
A
2280
2281 if (lengthOfSegment)
2282 *lengthOfSegment = 0;
2283
2284 if (offset >= _length)
2285 return 0;
4452a7af 2286
b0d623f7
A
2287 // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
2288 // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
2289 // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
2290 // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
2d21ac55 2291
b0d623f7
A
2292 if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type))
2293 {
2294 unsigned rangesIndex = 0;
2295 Ranges vec = _ranges;
fe8ab488 2296 mach_vm_address_t addr;
b0d623f7
A
2297
2298 // Find starting address within the vector of ranges
2299 for (;;) {
2300 getAddrLenForInd(addr, length, type, vec, rangesIndex);
2301 if (offset < length)
2302 break;
2303 offset -= length; // (make offset relative)
2304 rangesIndex++;
2305 }
2306
2307 // Now that we have the starting range,
2308 // lets find the last contiguous range
2309 addr += offset;
2310 length -= offset;
2311
2312 for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) {
fe8ab488
A
2313 mach_vm_address_t newAddr;
2314 mach_vm_size_t newLen;
b0d623f7
A
2315
2316 getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
2317 if (addr + length != newAddr)
2318 break;
2319 length += newLen;
2320 }
2321 if (addr)
2322 address = (IOPhysicalAddress) addr; // Truncate address to 32bit
2323 }
2324 else
0c530ab8
A
2325 {
2326 IOMDDMAWalkSegmentState _state;
99c3a104 2327 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
0c530ab8
A
2328
2329 state->fOffset = offset;
2330 state->fLength = _length - offset;
99c3a104 2331 state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOnly);
0c530ab8
A
2332
2333 ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
2334
2335 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret))
b0d623f7 2336 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
0c530ab8
A
2337 ret, this, state->fOffset,
2338 state->fIOVMAddr, state->fLength);
2339 if (kIOReturnSuccess == ret)
2340 {
2341 address = state->fIOVMAddr;
2342 length = state->fLength;
2343 }
b0d623f7
A
2344
2345 // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
2346 // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
2347
2348 if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)))
2349 {
2350 if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone))
2351 {
2352 addr64_t origAddr = address;
2353 IOByteCount origLen = length;
2354
2355 address = mapper->mapAddr(origAddr);
2356 length = page_size - (address & (page_size - 1));
2357 while ((length < origLen)
2358 && ((address + length) == mapper->mapAddr(origAddr + length)))
2359 length += page_size;
2360 if (length > origLen)
2361 length = origLen;
2362 }
b0d623f7 2363 }
4452a7af
A
2364 }
2365
b0d623f7
A
2366 if (!address)
2367 length = 0;
2368
4452a7af
A
2369 if (lengthOfSegment)
2370 *lengthOfSegment = length;
2371
0c530ab8
A
2372 return (address);
2373}
2374
b0d623f7
A
2375#ifndef __LP64__
2376addr64_t
2377IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
0c530ab8 2378{
b0d623f7 2379 addr64_t address = 0;
0c530ab8 2380
b0d623f7 2381 if (options & _kIOMemorySourceSegment)
0c530ab8 2382 {
b0d623f7
A
2383 address = getSourceSegment(offset, lengthOfSegment);
2384 }
2385 else if (options & kIOMemoryMapperNone)
2386 {
2387 address = getPhysicalSegment64(offset, lengthOfSegment);
2388 }
2389 else
2390 {
2391 address = getPhysicalSegment(offset, lengthOfSegment);
2392 }
0c530ab8 2393
b0d623f7
A
2394 return (address);
2395}
0c530ab8 2396
b0d623f7
A
2397addr64_t
2398IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
2399{
2400 return (getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone));
2401}
0c530ab8 2402
b0d623f7
A
2403IOPhysicalAddress
2404IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
2405{
2406 addr64_t address = 0;
2407 IOByteCount length = 0;
0c530ab8 2408
b0d623f7
A
2409 address = getPhysicalSegment(offset, lengthOfSegment, 0);
2410
2411 if (lengthOfSegment)
2412 length = *lengthOfSegment;
0c530ab8
A
2413
2414 if ((address + length) > 0x100000000ULL)
2415 {
2d21ac55 2416 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
b0d623f7 2417 address, (long) length, (getMetaClass())->getClassName());
0c530ab8
A
2418 }
2419
0c530ab8 2420 return ((IOPhysicalAddress) address);
55e303ae 2421}
de355530 2422
0c530ab8
A
2423addr64_t
2424IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
55e303ae
A
2425{
2426 IOPhysicalAddress phys32;
2427 IOByteCount length;
2428 addr64_t phys64;
0c530ab8 2429 IOMapper * mapper = 0;
0b4e3aa0 2430
55e303ae
A
2431 phys32 = getPhysicalSegment(offset, lengthOfSegment);
2432 if (!phys32)
2433 return 0;
0b4e3aa0 2434
55e303ae 2435 if (gIOSystemMapper)
0c530ab8
A
2436 mapper = gIOSystemMapper;
2437
2438 if (mapper)
1c79356b 2439 {
55e303ae
A
2440 IOByteCount origLen;
2441
0c530ab8 2442 phys64 = mapper->mapAddr(phys32);
55e303ae
A
2443 origLen = *lengthOfSegment;
2444 length = page_size - (phys64 & (page_size - 1));
2445 while ((length < origLen)
0c530ab8 2446 && ((phys64 + length) == mapper->mapAddr(phys32 + length)))
55e303ae
A
2447 length += page_size;
2448 if (length > origLen)
2449 length = origLen;
2450
2451 *lengthOfSegment = length;
0b4e3aa0 2452 }
55e303ae
A
2453 else
2454 phys64 = (addr64_t) phys32;
1c79356b 2455
55e303ae 2456 return phys64;
0b4e3aa0
A
2457}
2458
0c530ab8 2459IOPhysicalAddress
b0d623f7 2460IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1c79356b 2461{
b0d623f7 2462 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0));
0b4e3aa0
A
2463}
2464
b0d623f7
A
2465IOPhysicalAddress
2466IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
2467{
2468 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment));
2469}
1c79356b 2470
b0d623f7
A
2471void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
2472 IOByteCount * lengthOfSegment)
2473{
2474 if (_task == kernel_task)
2475 return (void *) getSourceSegment(offset, lengthOfSegment);
2476 else
2477 panic("IOGMD::getVirtualSegment deprecated");
91447636 2478
b0d623f7
A
2479 return 0;
2480}
2481#endif /* !__LP64__ */
91447636 2482
0c530ab8
A
2483IOReturn
2484IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
2485{
99c3a104
A
2486 IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
2487 DMACommandOps params;
2488 IOReturn err;
2489
2490 params = (op & ~kIOMDDMACommandOperationMask & op);
2491 op &= kIOMDDMACommandOperationMask;
2492
0c530ab8
A
2493 if (kIOMDGetCharacteristics == op) {
2494 if (dataSize < sizeof(IOMDDMACharacteristics))
2495 return kIOReturnUnderrun;
2496
2497 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
2498 data->fLength = getLength();
2499 data->fSGCount = 0;
b0d623f7 2500 data->fDirection = getDirection();
0c530ab8
A
2501 data->fIsPrepared = true; // Assume prepared - fails safe
2502 }
99c3a104 2503 else if (kIOMDWalkSegments == op) {
0c530ab8
A
2504 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs))
2505 return kIOReturnUnderrun;
2506
2507 IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
2508 IOByteCount offset = (IOByteCount) data->fOffset;
2509
2510 IOPhysicalLength length;
0c530ab8 2511 if (data->fMapped && IOMapper::gSystem)
99c3a104 2512 data->fIOVMAddr = md->getPhysicalSegment(offset, &length);
0c530ab8 2513 else
99c3a104 2514 data->fIOVMAddr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
0c530ab8
A
2515 data->fLength = length;
2516 }
99c3a104
A
2517 else if (kIOMDAddDMAMapSpec == op) return kIOReturnUnsupported;
2518 else if (kIOMDDMAMap == op)
2519 {
2520 if (dataSize < sizeof(IOMDDMAMapArgs))
2521 return kIOReturnUnderrun;
2522 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2523
2524 if (params) panic("class %s does not support IODMACommand::kIterateOnly", getMetaClass()->getClassName());
2525
39236c6e 2526 data->fMapContig = true;
99c3a104
A
2527 err = md->dmaMap(data->fMapper, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocCount);
2528 return (err);
2529 }
2530 else return kIOReturnBadArgument;
0c530ab8
A
2531
2532 return kIOReturnSuccess;
2533}
2534
b0d623f7
A
2535IOReturn
2536IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
2537 IOOptionBits * oldState )
2538{
2539 IOReturn err = kIOReturnSuccess;
fe8ab488 2540
b0d623f7
A
2541 vm_purgable_t control;
2542 int state;
2543
fe8ab488 2544 if (_memRef)
b0d623f7
A
2545 {
2546 err = super::setPurgeable(newState, oldState);
2547 }
2548 else
2549 {
2550 if (kIOMemoryThreadSafe & _flags)
2551 LOCK;
2552 do
2553 {
2554 // Find the appropriate vm_map for the given task
2555 vm_map_t curMap;
2556 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
2557 {
2558 err = kIOReturnNotReady;
2559 break;
2560 }
39236c6e
A
2561 else if (!_task)
2562 {
2563 err = kIOReturnUnsupported;
2564 break;
2565 }
b0d623f7
A
2566 else
2567 curMap = get_task_map(_task);
2568
2569 // can only do one range
2570 Ranges vec = _ranges;
2571 IOOptionBits type = _flags & kIOMemoryTypeMask;
fe8ab488
A
2572 mach_vm_address_t addr;
2573 mach_vm_size_t len;
b0d623f7
A
2574 getAddrLenForInd(addr, len, type, vec, 0);
2575
2576 err = purgeableControlBits(newState, &control, &state);
2577 if (kIOReturnSuccess != err)
2578 break;
2579 err = mach_vm_purgable_control(curMap, addr, control, &state);
2580 if (oldState)
2581 {
2582 if (kIOReturnSuccess == err)
2583 {
2584 err = purgeableStateBits(&state);
2585 *oldState = state;
2586 }
2587 }
2588 }
2589 while (false);
2590 if (kIOMemoryThreadSafe & _flags)
2591 UNLOCK;
2592 }
fe8ab488 2593
b0d623f7
A
2594 return (err);
2595}
2596
91447636
A
2597IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
2598 IOOptionBits * oldState )
2599{
fe8ab488 2600 IOReturn err = kIOReturnNotReady;
b0d623f7 2601
fe8ab488
A
2602 if (kIOMemoryThreadSafe & _flags) LOCK;
2603 if (_memRef) err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState);
2604 if (kIOMemoryThreadSafe & _flags) UNLOCK;
b0d623f7 2605
91447636
A
2606 return (err);
2607}
39236c6e
A
2608
2609IOReturn IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
2610 IOByteCount * dirtyPageCount )
2611{
fe8ab488 2612 IOReturn err = kIOReturnNotReady;
39236c6e 2613
fe8ab488
A
2614 if (kIOMemoryThreadSafe & _flags) LOCK;
2615 if (_memRef) err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount);
39236c6e
A
2616 if (kIOMemoryThreadSafe & _flags) UNLOCK;
2617
2618 return (err);
2619}
2620
2621
91447636
A
2622extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
2623extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
2624
0b4c1975
A
2625static void SetEncryptOp(addr64_t pa, unsigned int count)
2626{
2627 ppnum_t page, end;
2628
2629 page = atop_64(round_page_64(pa));
2630 end = atop_64(trunc_page_64(pa + count));
2631 for (; page < end; page++)
2632 {
2633 pmap_clear_noencrypt(page);
2634 }
2635}
2636
2637static void ClearEncryptOp(addr64_t pa, unsigned int count)
2638{
2639 ppnum_t page, end;
2640
2641 page = atop_64(round_page_64(pa));
2642 end = atop_64(trunc_page_64(pa + count));
2643 for (; page < end; page++)
2644 {
2645 pmap_set_noencrypt(page);
2646 }
2647}
2648
91447636
A
2649IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options,
2650 IOByteCount offset, IOByteCount length )
2651{
2652 IOByteCount remaining;
316670eb 2653 unsigned int res;
91447636
A
2654 void (*func)(addr64_t pa, unsigned int count) = 0;
2655
2656 switch (options)
2657 {
2658 case kIOMemoryIncoherentIOFlush:
2659 func = &dcache_incoherent_io_flush64;
2660 break;
2661 case kIOMemoryIncoherentIOStore:
2662 func = &dcache_incoherent_io_store64;
2663 break;
0b4c1975
A
2664
2665 case kIOMemorySetEncrypted:
2666 func = &SetEncryptOp;
2667 break;
2668 case kIOMemoryClearEncrypted:
2669 func = &ClearEncryptOp;
2670 break;
91447636
A
2671 }
2672
2673 if (!func)
2674 return (kIOReturnUnsupported);
2675
b0d623f7
A
2676 if (kIOMemoryThreadSafe & _flags)
2677 LOCK;
2678
316670eb 2679 res = 0x0UL;
91447636
A
2680 remaining = length = min(length, getLength() - offset);
2681 while (remaining)
2682 // (process another target segment?)
2683 {
2684 addr64_t dstAddr64;
2685 IOByteCount dstLen;
2686
b0d623f7 2687 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
91447636
A
2688 if (!dstAddr64)
2689 break;
2690
2691 // Clip segment length to remaining
2692 if (dstLen > remaining)
2693 dstLen = remaining;
2694
2695 (*func)(dstAddr64, dstLen);
2696
2697 offset += dstLen;
2698 remaining -= dstLen;
2699 }
2700
b0d623f7
A
2701 if (kIOMemoryThreadSafe & _flags)
2702 UNLOCK;
2703
91447636
A
2704 return (remaining ? kIOReturnUnderrun : kIOReturnSuccess);
2705}
2706
316670eb 2707#if defined(__i386__) || defined(__x86_64__)
55e303ae
A
2708extern vm_offset_t first_avail;
2709#define io_kernel_static_end first_avail
316670eb
A
2710#else
2711#error io_kernel_static_end is undefined for this architecture
2712#endif
55e303ae
A
2713
2714static kern_return_t
2715io_get_kernel_static_upl(
91447636 2716 vm_map_t /* map */,
b0d623f7 2717 uintptr_t offset,
55e303ae
A
2718 vm_size_t *upl_size,
2719 upl_t *upl,
2720 upl_page_info_array_t page_list,
0c530ab8
A
2721 unsigned int *count,
2722 ppnum_t *highest_page)
1c79356b 2723{
55e303ae
A
2724 unsigned int pageCount, page;
2725 ppnum_t phys;
0c530ab8 2726 ppnum_t highestPage = 0;
1c79356b 2727
55e303ae
A
2728 pageCount = atop_32(*upl_size);
2729 if (pageCount > *count)
2730 pageCount = *count;
1c79356b 2731
55e303ae 2732 *upl = NULL;
1c79356b 2733
55e303ae
A
2734 for (page = 0; page < pageCount; page++)
2735 {
2736 phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
2737 if (!phys)
2738 break;
2739 page_list[page].phys_addr = phys;
2740 page_list[page].pageout = 0;
2741 page_list[page].absent = 0;
2742 page_list[page].dirty = 0;
2743 page_list[page].precious = 0;
2744 page_list[page].device = 0;
0c530ab8 2745 if (phys > highestPage)
b0d623f7 2746 highestPage = phys;
55e303ae 2747 }
0b4e3aa0 2748
0c530ab8
A
2749 *highest_page = highestPage;
2750
55e303ae
A
2751 return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError);
2752}
0b4e3aa0 2753
55e303ae
A
2754IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
2755{
91447636 2756 IOOptionBits type = _flags & kIOMemoryTypeMask;
2d21ac55 2757 IOReturn error = kIOReturnCannotWire;
55e303ae 2758 ioGMDData *dataP;
99c3a104 2759 upl_page_info_array_t pageInfo;
39236c6e 2760 ppnum_t mapBase;
1c79356b 2761
0c530ab8 2762 assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
1c79356b 2763
39236c6e
A
2764 if ((kIODirectionOutIn & forDirection) == kIODirectionNone)
2765 forDirection = (IODirection) (forDirection | getDirection());
55e303ae
A
2766
2767 int uplFlags; // This Mem Desc's default flags for upl creation
0c530ab8 2768 switch (kIODirectionOutIn & forDirection)
55e303ae
A
2769 {
2770 case kIODirectionOut:
2771 // Pages do not need to be marked as dirty on commit
2772 uplFlags = UPL_COPYOUT_FROM;
55e303ae
A
2773 break;
2774
2775 case kIODirectionIn:
2776 default:
2777 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
2778 break;
2779 }
55e303ae 2780
39236c6e
A
2781 if (_wireCount)
2782 {
2783 if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags))
2784 {
2785 OSReportWithBacktrace("IOMemoryDescriptor 0x%lx prepared read only", VM_KERNEL_ADDRPERM(this));
2786 error = kIOReturnNotWritable;
2787 }
2788 else error = kIOReturnSuccess;
2789 return (error);
2790 }
2791
2792 dataP = getDataP(_memoryEntries);
2793 IOMapper *mapper;
2794 mapper = dataP->fMapper;
2795 dataP->fMappedBase = 0;
2796
2797 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
0c530ab8 2798 if (kIODirectionPrepareToPhys32 & forDirection)
99c3a104
A
2799 {
2800 if (!mapper) uplFlags |= UPL_NEED_32BIT_ADDR;
2801 if (dataP->fDMAMapNumAddressBits > 32) dataP->fDMAMapNumAddressBits = 32;
2802 }
fe8ab488
A
2803 if (kIODirectionPrepareNoFault & forDirection) uplFlags |= UPL_REQUEST_NO_FAULT;
2804 if (kIODirectionPrepareNoZeroFill & forDirection) uplFlags |= UPL_NOZEROFILLIO;
15129b1c 2805 if (kIODirectionPrepareNonCoherent & forDirection) uplFlags |= UPL_REQUEST_FORCE_COHERENCY;
39236c6e
A
2806
2807 mapBase = 0;
0c530ab8 2808
fe8ab488
A
2809 // Note that appendBytes(NULL) zeros the data up to the desired length
2810 // and the length parameter is an unsigned int
2811 size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t);
2812 if (uplPageSize > ((unsigned int)uplPageSize)) return (kIOReturnNoMemory);
2813 if (!_memoryEntries->appendBytes(0, uplPageSize)) return (kIOReturnNoMemory);
99c3a104
A
2814 dataP = 0;
2815
91447636 2816 // Find the appropriate vm_map for the given task
55e303ae 2817 vm_map_t curMap;
fe8ab488
A
2818 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) curMap = 0;
2819 else curMap = get_task_map(_task);
55e303ae 2820
91447636
A
2821 // Iterate over the vector of virtual ranges
2822 Ranges vec = _ranges;
39236c6e
A
2823 unsigned int pageIndex = 0;
2824 IOByteCount mdOffset = 0;
2825 ppnum_t highestPage = 0;
99c3a104 2826
fe8ab488
A
2827 IOMemoryEntry * memRefEntry = 0;
2828 if (_memRef) memRefEntry = &_memRef->entries[0];
2829
55e303ae
A
2830 for (UInt range = 0; range < _rangesCount; range++) {
2831 ioPLBlock iopl;
fe8ab488
A
2832 mach_vm_address_t startPage;
2833 mach_vm_size_t numBytes;
0c530ab8 2834 ppnum_t highPage = 0;
55e303ae 2835
91447636
A
2836 // Get the startPage address and length of vec[range]
2837 getAddrLenForInd(startPage, numBytes, type, vec, range);
b0d623f7 2838 iopl.fPageOffset = startPage & PAGE_MASK;
91447636
A
2839 numBytes += iopl.fPageOffset;
2840 startPage = trunc_page_64(startPage);
2841
55e303ae 2842 if (mapper)
99c3a104 2843 iopl.fMappedPage = mapBase + pageIndex;
55e303ae 2844 else
99c3a104 2845 iopl.fMappedPage = 0;
55e303ae 2846
91447636 2847 // Iterate over the current range, creating UPLs
55e303ae 2848 while (numBytes) {
91447636
A
2849 vm_address_t kernelStart = (vm_address_t) startPage;
2850 vm_map_t theMap;
fe8ab488
A
2851 if (curMap) theMap = curMap;
2852 else if (_memRef)
2853 {
2854 theMap = NULL;
2855 }
2856 else
2857 {
91447636
A
2858 assert(_task == kernel_task);
2859 theMap = IOPageableMapForAddress(kernelStart);
2860 }
91447636 2861
55e303ae 2862 int ioplFlags = uplFlags;
99c3a104
A
2863 dataP = getDataP(_memoryEntries);
2864 pageInfo = getPageList(dataP);
55e303ae
A
2865 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
2866
b0d623f7 2867 vm_size_t ioplSize = round_page(numBytes);
55e303ae
A
2868 unsigned int numPageInfo = atop_32(ioplSize);
2869
fe8ab488 2870 if ((theMap == kernel_map) && (kernelStart < io_kernel_static_end)) {
55e303ae 2871 error = io_get_kernel_static_upl(theMap,
91447636
A
2872 kernelStart,
2873 &ioplSize,
2874 &iopl.fIOPL,
2875 baseInfo,
0c530ab8
A
2876 &numPageInfo,
2877 &highPage);
91447636 2878 }
fe8ab488
A
2879 else if (_memRef) {
2880 memory_object_offset_t entryOffset;
2881
2882 entryOffset = (mdOffset - iopl.fPageOffset - memRefEntry->offset);
2883 if (entryOffset >= memRefEntry->size) {
2884 memRefEntry++;
2885 if (memRefEntry >= &_memRef->entries[_memRef->count]) panic("memRefEntry");
2886 entryOffset = 0;
2887 }
2888 if (ioplSize > (memRefEntry->size - entryOffset)) ioplSize = (memRefEntry->size - entryOffset);
2889 error = memory_object_iopl_request(memRefEntry->entry,
2890 entryOffset,
2891 &ioplSize,
2892 &iopl.fIOPL,
2893 baseInfo,
2894 &numPageInfo,
2895 &ioplFlags);
91447636
A
2896 }
2897 else {
2898 assert(theMap);
2899 error = vm_map_create_upl(theMap,
2900 startPage,
b0d623f7 2901 (upl_size_t*)&ioplSize,
91447636
A
2902 &iopl.fIOPL,
2903 baseInfo,
2904 &numPageInfo,
2905 &ioplFlags);
de355530
A
2906 }
2907
55e303ae
A
2908 assert(ioplSize);
2909 if (error != KERN_SUCCESS)
2910 goto abortExit;
2911
0c530ab8
A
2912 if (iopl.fIOPL)
2913 highPage = upl_get_highest_page(iopl.fIOPL);
2914 if (highPage > highestPage)
2915 highestPage = highPage;
2916
2d21ac55 2917 error = kIOReturnCannotWire;
55e303ae
A
2918
2919 if (baseInfo->device) {
2920 numPageInfo = 1;
39236c6e 2921 iopl.fFlags = kIOPLOnDevice;
55e303ae
A
2922 }
2923 else {
2924 iopl.fFlags = 0;
55e303ae
A
2925 }
2926
2927 iopl.fIOMDOffset = mdOffset;
2928 iopl.fPageInfo = pageIndex;
39236c6e 2929 if (mapper && pageIndex && (page_mask & (mdOffset + iopl.fPageOffset))) dataP->fDiscontig = true;
55e303ae 2930
6d2010ae
A
2931#if 0
2932 // used to remove the upl for auto prepares here, for some errant code
2933 // that freed memory before the descriptor pointing at it
55e303ae
A
2934 if ((_flags & kIOMemoryAutoPrepare) && iopl.fIOPL)
2935 {
91447636
A
2936 upl_commit(iopl.fIOPL, 0, 0);
2937 upl_deallocate(iopl.fIOPL);
55e303ae 2938 iopl.fIOPL = 0;
de355530 2939 }
6d2010ae 2940#endif
55e303ae
A
2941
2942 if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
2943 // Clean up partial created and unsaved iopl
91447636
A
2944 if (iopl.fIOPL) {
2945 upl_abort(iopl.fIOPL, 0);
2946 upl_deallocate(iopl.fIOPL);
2947 }
55e303ae
A
2948 goto abortExit;
2949 }
99c3a104 2950 dataP = 0;
55e303ae
A
2951
2952 // Check for a multiple iopl's in one virtual range
2953 pageIndex += numPageInfo;
2954 mdOffset -= iopl.fPageOffset;
2955 if (ioplSize < numBytes) {
2956 numBytes -= ioplSize;
2957 startPage += ioplSize;
2958 mdOffset += ioplSize;
2959 iopl.fPageOffset = 0;
99c3a104 2960 if (mapper) iopl.fMappedPage = mapBase + pageIndex;
55e303ae
A
2961 }
2962 else {
2963 mdOffset += numBytes;
2964 break;
2965 }
1c79356b
A
2966 }
2967 }
55e303ae 2968
0c530ab8
A
2969 _highestPage = highestPage;
2970
39236c6e
A
2971 if (UPL_COPYOUT_FROM & uplFlags) _flags |= kIOMemoryPreparedReadOnly;
2972
1c79356b
A
2973 return kIOReturnSuccess;
2974
2975abortExit:
55e303ae
A
2976 {
2977 dataP = getDataP(_memoryEntries);
91447636 2978 UInt done = getNumIOPL(_memoryEntries, dataP);
55e303ae
A
2979 ioPLBlock *ioplList = getIOPLList(dataP);
2980
2981 for (UInt range = 0; range < done; range++)
2982 {
91447636
A
2983 if (ioplList[range].fIOPL) {
2984 upl_abort(ioplList[range].fIOPL, 0);
2985 upl_deallocate(ioplList[range].fIOPL);
2986 }
55e303ae 2987 }
6d2010ae 2988 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
1c79356b
A
2989 }
2990
2d21ac55
A
2991 if (error == KERN_FAILURE)
2992 error = kIOReturnCannotWire;
39236c6e
A
2993 else if (error == KERN_MEMORY_ERROR)
2994 error = kIOReturnNoResources;
2d21ac55 2995
55e303ae
A
2996 return error;
2997}
d7e50217 2998
99c3a104
A
2999bool IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
3000{
3001 ioGMDData * dataP;
3002 unsigned dataSize = size;
3003
3004 if (!_memoryEntries) {
3005 _memoryEntries = OSData::withCapacity(dataSize);
3006 if (!_memoryEntries)
3007 return false;
3008 }
3009 else if (!_memoryEntries->initWithCapacity(dataSize))
3010 return false;
3011
3012 _memoryEntries->appendBytes(0, computeDataSize(0, 0));
3013 dataP = getDataP(_memoryEntries);
3014
3015 if (mapper == kIOMapperWaitSystem) {
3016 IOMapper::checkForSystemMapper();
3017 mapper = IOMapper::gSystem;
3018 }
3019 dataP->fMapper = mapper;
3020 dataP->fPageCnt = 0;
3021 dataP->fMappedBase = 0;
3022 dataP->fDMAMapNumAddressBits = 64;
3023 dataP->fDMAMapAlignment = 0;
3024 dataP->fPreparationID = kIOPreparationIDUnprepared;
39236c6e 3025 dataP->fDiscontig = false;
fe8ab488 3026 dataP->fCompletionError = false;
99c3a104
A
3027
3028 return (true);
3029}
3030
3031IOReturn IOMemoryDescriptor::dmaMap(
3032 IOMapper * mapper,
3033 const IODMAMapSpecification * mapSpec,
3034 uint64_t offset,
3035 uint64_t length,
3036 uint64_t * address,
3037 ppnum_t * mapPages)
3038{
3039 IOMDDMAWalkSegmentState walkState;
3040 IOMDDMAWalkSegmentArgs * walkArgs = (IOMDDMAWalkSegmentArgs *) (void *)&walkState;
3041 IOOptionBits mdOp;
3042 IOReturn ret;
3043 IOPhysicalLength segLen;
3044 addr64_t phys, align, pageOffset;
3045 ppnum_t base, pageIndex, pageCount;
3046 uint64_t index;
3047 uint32_t mapOptions = 0;
3048
3049 if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess;
3050
3051 walkArgs->fMapped = false;
3052 mdOp = kIOMDFirstSegment;
3053 pageCount = 0;
3054 for (index = 0; index < length; )
3055 {
3056 if (index && (page_mask & (index + pageOffset))) break;
3057
3058 walkArgs->fOffset = offset + index;
3059 ret = dmaCommandOperation(mdOp, &walkState, sizeof(walkState));
3060 mdOp = kIOMDWalkSegments;
3061 if (ret != kIOReturnSuccess) break;
3062 phys = walkArgs->fIOVMAddr;
3063 segLen = walkArgs->fLength;
3064
3065 align = (phys & page_mask);
3066 if (!index) pageOffset = align;
3067 else if (align) break;
3068 pageCount += atop_64(round_page_64(align + segLen));
3069 index += segLen;
3070 }
3071
3072 if (index < length) return (kIOReturnVMError);
3073
3074 base = mapper->iovmMapMemory(this, offset, pageCount,
3075 mapOptions, NULL, mapSpec);
3076
3077 if (!base) return (kIOReturnNoResources);
3078
3079 mdOp = kIOMDFirstSegment;
3080 for (pageIndex = 0, index = 0; index < length; )
3081 {
3082 walkArgs->fOffset = offset + index;
3083 ret = dmaCommandOperation(mdOp, &walkState, sizeof(walkState));
3084 mdOp = kIOMDWalkSegments;
3085 if (ret != kIOReturnSuccess) break;
3086 phys = walkArgs->fIOVMAddr;
3087 segLen = walkArgs->fLength;
3088
3089 ppnum_t page = atop_64(phys);
3090 ppnum_t count = atop_64(round_page_64(phys + segLen)) - page;
3091 while (count--)
3092 {
3093 mapper->iovmInsert(base, pageIndex, page);
3094 page++;
3095 pageIndex++;
3096 }
3097 index += segLen;
3098 }
3099 if (pageIndex != pageCount) panic("pageIndex");
3100
3101 *address = ptoa_64(base) + pageOffset;
3102 if (mapPages) *mapPages = pageCount;
3103
3104 return (kIOReturnSuccess);
3105}
3106
3107IOReturn IOGeneralMemoryDescriptor::dmaMap(
3108 IOMapper * mapper,
3109 const IODMAMapSpecification * mapSpec,
3110 uint64_t offset,
3111 uint64_t length,
3112 uint64_t * address,
3113 ppnum_t * mapPages)
3114{
3115 IOReturn err = kIOReturnSuccess;
3116 ioGMDData * dataP;
3117 IOOptionBits type = _flags & kIOMemoryTypeMask;
3118
3119 *address = 0;
3120 if (kIOMemoryHostOnly & _flags) return (kIOReturnSuccess);
3121
3122 if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
3123 || offset || (length != _length))
3124 {
3125 err = super::dmaMap(mapper, mapSpec, offset, length, address, mapPages);
3126 }
3127 else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries)))
3128 {
3129 const ioPLBlock * ioplList = getIOPLList(dataP);
3130 upl_page_info_t * pageList;
3131 uint32_t mapOptions = 0;
3132 ppnum_t base;
3133
3134 IODMAMapSpecification mapSpec;
3135 bzero(&mapSpec, sizeof(mapSpec));
3136 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
3137 mapSpec.alignment = dataP->fDMAMapAlignment;
3138
3139 // For external UPLs the fPageInfo field points directly to
3140 // the upl's upl_page_info_t array.
3141 if (ioplList->fFlags & kIOPLExternUPL)
3142 {
3143 pageList = (upl_page_info_t *) ioplList->fPageInfo;
3144 mapOptions |= kIODMAMapPagingPath;
3145 }
3146 else
3147 pageList = getPageList(dataP);
3148
3149 if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess;
3150
3151 // Check for direct device non-paged memory
3152 if (ioplList->fFlags & kIOPLOnDevice) mapOptions |= kIODMAMapPhysicallyContiguous;
3153
3154 base = mapper->iovmMapMemory(
3155 this, offset, _pages, mapOptions, &pageList[0], &mapSpec);
3156 *address = ptoa_64(base) + (ioplList->fPageOffset & PAGE_MASK);
3157 if (mapPages) *mapPages = _pages;
3158 }
3159
3160 return (err);
3161}
3162
55e303ae
A
3163/*
3164 * prepare
3165 *
3166 * Prepare the memory for an I/O transfer. This involves paging in
3167 * the memory, if necessary, and wiring it down for the duration of
3168 * the transfer. The complete() method completes the processing of
3169 * the memory after the I/O transfer finishes. This method needn't
3170 * called for non-pageable memory.
3171 */
99c3a104 3172
55e303ae
A
3173IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
3174{
91447636
A
3175 IOReturn error = kIOReturnSuccess;
3176 IOOptionBits type = _flags & kIOMemoryTypeMask;
55e303ae 3177
2d21ac55
A
3178 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
3179 return kIOReturnSuccess;
3180
3181 if (_prepareLock)
3182 IOLockLock(_prepareLock);
3183
39236c6e
A
3184 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type)
3185 {
3186 error = wireVirtual(forDirection);
de355530
A
3187 }
3188
2d21ac55 3189 if (kIOReturnSuccess == error)
0b4c1975 3190 {
99c3a104
A
3191 if (1 == ++_wireCount)
3192 {
3193 if (kIOMemoryClearEncrypt & _flags)
3194 {
3195 performOperation(kIOMemoryClearEncrypted, 0, _length);
3196 }
3197 }
0b4c1975
A
3198 }
3199
2d21ac55
A
3200 if (_prepareLock)
3201 IOLockUnlock(_prepareLock);
3202
3203 return error;
1c79356b
A
3204}
3205
3206/*
3207 * complete
3208 *
3209 * Complete processing of the memory after an I/O transfer finishes.
3210 * This method should not be called unless a prepare was previously
3211 * issued; the prepare() and complete() must occur in pairs, before
3212 * before and after an I/O transfer involving pageable memory.
3213 */
6d2010ae 3214
fe8ab488 3215IOReturn IOGeneralMemoryDescriptor::complete(IODirection forDirection)
1c79356b 3216{
2d21ac55 3217 IOOptionBits type = _flags & kIOMemoryTypeMask;
fe8ab488 3218 ioGMDData * dataP;
1c79356b 3219
2d21ac55
A
3220 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
3221 return kIOReturnSuccess;
1c79356b 3222
2d21ac55
A
3223 if (_prepareLock)
3224 IOLockLock(_prepareLock);
91447636 3225
2d21ac55
A
3226 assert(_wireCount);
3227
fe8ab488
A
3228 if ((kIODirectionCompleteWithError & forDirection)
3229 && (dataP = getDataP(_memoryEntries)))
3230 dataP->fCompletionError = true;
3231
2d21ac55
A
3232 if (_wireCount)
3233 {
0b4c1975
A
3234 if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount))
3235 {
3236 performOperation(kIOMemorySetEncrypted, 0, _length);
3237 }
3238
2d21ac55 3239 _wireCount--;
fe8ab488 3240 if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection))
2d21ac55
A
3241 {
3242 IOOptionBits type = _flags & kIOMemoryTypeMask;
fe8ab488 3243 dataP = getDataP(_memoryEntries);
2d21ac55 3244 ioPLBlock *ioplList = getIOPLList(dataP);
fe8ab488 3245 UInt ind, count = getNumIOPL(_memoryEntries, dataP);
55e303ae 3246
fe8ab488
A
3247 if (_wireCount)
3248 {
3249 // kIODirectionCompleteWithDataValid & forDirection
3250 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type)
3251 {
3252 for (ind = 0; ind < count; ind++)
3253 {
3254 if (ioplList[ind].fIOPL) iopl_valid_data(ioplList[ind].fIOPL);
3255 }
3256 }
3257 }
3258 else
3259 {
b0d623f7 3260#if IOMD_DEBUG_DMAACTIVE
fe8ab488 3261 if (__iomd_reservedA) panic("complete() while dma active");
b0d623f7
A
3262#endif /* IOMD_DEBUG_DMAACTIVE */
3263
fe8ab488
A
3264 if (dataP->fMappedBase) {
3265 dataP->fMapper->iovmFree(atop_64(dataP->fMappedBase), _pages);
3266 dataP->fMappedBase = 0;
3267 }
3268 // Only complete iopls that we created which are for TypeVirtual
3269 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
3270 for (ind = 0; ind < count; ind++)
3271 if (ioplList[ind].fIOPL) {
3272 if (dataP->fCompletionError)
3273 upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/);
3274 else
3275 upl_commit(ioplList[ind].fIOPL, 0, 0);
3276 upl_deallocate(ioplList[ind].fIOPL);
3277 }
3278 } else if (kIOMemoryTypeUPL == type) {
3279 upl_set_referenced(ioplList[0].fIOPL, false);
3280 }
6d2010ae 3281
fe8ab488 3282 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
b0d623f7 3283
fe8ab488
A
3284 dataP->fPreparationID = kIOPreparationIDUnprepared;
3285 }
2d21ac55 3286 }
1c79356b 3287 }
2d21ac55
A
3288
3289 if (_prepareLock)
3290 IOLockUnlock(_prepareLock);
3291
1c79356b
A
3292 return kIOReturnSuccess;
3293}
3294
3295IOReturn IOGeneralMemoryDescriptor::doMap(
2d21ac55
A
3296 vm_map_t __addressMap,
3297 IOVirtualAddress * __address,
1c79356b 3298 IOOptionBits options,
2d21ac55
A
3299 IOByteCount __offset,
3300 IOByteCount __length )
3301
1c79356b 3302{
b0d623f7 3303#ifndef __LP64__
2d21ac55 3304 if (!(kIOMap64Bit & options)) panic("IOGeneralMemoryDescriptor::doMap !64bit");
b0d623f7 3305#endif /* !__LP64__ */
2d21ac55 3306
fe8ab488
A
3307 kern_return_t err;
3308
b0d623f7 3309 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
2d21ac55
A
3310 mach_vm_size_t offset = mapping->fOffset + __offset;
3311 mach_vm_size_t length = mapping->fLength;
3312
91447636
A
3313 IOOptionBits type = _flags & kIOMemoryTypeMask;
3314 Ranges vec = _ranges;
3315
fe8ab488
A
3316 mach_vm_address_t range0Addr = 0;
3317 mach_vm_size_t range0Len = 0;
91447636 3318
060df5ea
A
3319 if ((offset >= _length) || ((offset + length) > _length))
3320 return( kIOReturnBadArgument );
3321
91447636
A
3322 if (vec.v)
3323 getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
3324
1c79356b 3325 // mapping source == dest? (could be much better)
fe8ab488
A
3326 if (_task
3327 && (mapping->fAddressTask == _task)
3328 && (mapping->fAddressMap == get_task_map(_task))
3329 && (options & kIOMapAnywhere)
3330 && (1 == _rangesCount)
3331 && (0 == offset)
3332 && range0Addr
3333 && (length <= range0Len))
2d21ac55
A
3334 {
3335 mapping->fAddress = range0Addr;
3336 mapping->fOptions |= kIOMapStatic;
3337
3338 return( kIOReturnSuccess );
1c79356b
A
3339 }
3340
fe8ab488
A
3341 if (!_memRef)
3342 {
3343 IOOptionBits createOptions = 0;
3344 if (!(kIOMapReadOnly & options))
3345 {
3346 createOptions |= kIOMemoryReferenceWrite;
3347#if DEVELOPMENT || DEBUG
3348 if (kIODirectionOut == (kIODirectionOutIn & _flags))
060df5ea 3349 {
fe8ab488
A
3350 OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction");
3351 }
0b4e3aa0 3352#endif
fe8ab488
A
3353 }
3354 err = memoryReferenceCreate(createOptions, &_memRef);
3355 if (kIOReturnSuccess != err) return (err);
3356 }
9bccf70c 3357
fe8ab488
A
3358 memory_object_t pager;
3359 pager = (memory_object_t) (reserved ? reserved->dp.devicePager : 0);
55e303ae 3360
fe8ab488
A
3361 // <upl_transpose //
3362 if ((kIOMapReference|kIOMapUnique) == ((kIOMapReference|kIOMapUnique) & options))
3363 {
3364 do
3365 {
3366 upl_t redirUPL2;
3367 vm_size_t size;
3368 int flags;
3369 unsigned int lock_count;
9bccf70c 3370
fe8ab488
A
3371 if (!_memRef || (1 != _memRef->count))
3372 {
3373 err = kIOReturnNotReadable;
3374 break;
3375 }
0b4e3aa0 3376
fe8ab488
A
3377 size = round_page(mapping->fLength);
3378 flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
3379 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
0b4e3aa0 3380
fe8ab488
A
3381 if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2,
3382 NULL, NULL,
3383 &flags))
3384 redirUPL2 = NULL;
1c79356b 3385
fe8ab488
A
3386 for (lock_count = 0;
3387 IORecursiveLockHaveLock(gIOMemoryLock);
3388 lock_count++) {
3389 UNLOCK;
3390 }
3391 err = upl_transpose(redirUPL2, mapping->fRedirUPL);
3392 for (;
3393 lock_count;
3394 lock_count--) {
3395 LOCK;
3396 }
1c79356b 3397
fe8ab488
A
3398 if (kIOReturnSuccess != err)
3399 {
3400 IOLog("upl_transpose(%x)\n", err);
3401 err = kIOReturnSuccess;
3402 }
0b4e3aa0 3403
fe8ab488
A
3404 if (redirUPL2)
3405 {
3406 upl_commit(redirUPL2, NULL, 0);
3407 upl_deallocate(redirUPL2);
3408 redirUPL2 = 0;
3409 }
3410 {
3411 // swap the memEntries since they now refer to different vm_objects
3412 IOMemoryReference * me = _memRef;
3413 _memRef = mapping->fMemory->_memRef;
3414 mapping->fMemory->_memRef = me;
3415 }
3416 if (pager)
3417 err = populateDevicePager( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
3418 }
3419 while (false);
3420 }
3421 // upl_transpose> //
9bccf70c 3422 else
fe8ab488
A
3423 {
3424 err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress);
0b4e3aa0 3425
fe8ab488
A
3426 if ((err == KERN_SUCCESS) && pager)
3427 {
3428 err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, offset, length, options);
3429 if (err != KERN_SUCCESS)
3430 {
3431 doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0);
3432 }
3433 else if (kIOMapDefaultCache == (options & kIOMapCacheMask))
3434 {
3435 mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
3436 }
3437 }
3438 }
3439
3440 return (err);
1c79356b
A
3441}
3442
3443IOReturn IOGeneralMemoryDescriptor::doUnmap(
3444 vm_map_t addressMap,
2d21ac55
A
3445 IOVirtualAddress __address,
3446 IOByteCount __length )
1c79356b 3447{
2d21ac55 3448 return (super::doUnmap(addressMap, __address, __length));
1c79356b
A
3449}
3450
3451/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3452
b0d623f7
A
3453#undef super
3454#define super OSObject
1c79356b 3455
b0d623f7 3456OSDefineMetaClassAndStructors( IOMemoryMap, OSObject )
1c79356b 3457
b0d623f7
A
3458OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
3459OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
3460OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
3461OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
3462OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
3463OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
3464OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
3465OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
1c79356b 3466
b0d623f7
A
3467/* ex-inline function implementation */
3468IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
3469 { return( getPhysicalSegment( 0, 0 )); }
1c79356b
A
3470
3471/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3472
b0d623f7 3473bool IOMemoryMap::init(
2d21ac55
A
3474 task_t intoTask,
3475 mach_vm_address_t toAddress,
3476 IOOptionBits _options,
3477 mach_vm_size_t _offset,
3478 mach_vm_size_t _length )
1c79356b 3479{
2d21ac55 3480 if (!intoTask)
1c79356b
A
3481 return( false);
3482
2d21ac55
A
3483 if (!super::init())
3484 return(false);
1c79356b 3485
2d21ac55
A
3486 fAddressMap = get_task_map(intoTask);
3487 if (!fAddressMap)
3488 return(false);
3489 vm_map_reference(fAddressMap);
1c79356b 3490
2d21ac55
A
3491 fAddressTask = intoTask;
3492 fOptions = _options;
3493 fLength = _length;
3494 fOffset = _offset;
3495 fAddress = toAddress;
1c79356b 3496
2d21ac55 3497 return (true);
1c79356b
A
3498}
3499
b0d623f7 3500bool IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
1c79356b 3501{
2d21ac55
A
3502 if (!_memory)
3503 return(false);
1c79356b 3504
2d21ac55 3505 if (!fSuperMap)
91447636 3506 {
2d21ac55 3507 if( (_offset + fLength) > _memory->getLength())
91447636 3508 return( false);
2d21ac55 3509 fOffset = _offset;
91447636 3510 }
1c79356b
A
3511
3512 _memory->retain();
2d21ac55 3513 if (fMemory)
91447636 3514 {
2d21ac55
A
3515 if (fMemory != _memory)
3516 fMemory->removeMapping(this);
3517 fMemory->release();
1c79356b 3518 }
2d21ac55 3519 fMemory = _memory;
91447636 3520
2d21ac55 3521 return( true );
1c79356b
A
3522}
3523
3524IOReturn IOMemoryDescriptor::doMap(
2d21ac55
A
3525 vm_map_t __addressMap,
3526 IOVirtualAddress * __address,
1c79356b 3527 IOOptionBits options,
2d21ac55
A
3528 IOByteCount __offset,
3529 IOByteCount __length )
1c79356b 3530{
fe8ab488
A
3531 return (kIOReturnUnsupported);
3532}
1c79356b 3533
fe8ab488
A
3534IOReturn IOMemoryDescriptor::handleFault(
3535 void * _pager,
3536 mach_vm_size_t sourceOffset,
3537 mach_vm_size_t length)
3538{
3539 if( kIOMemoryRedirected & _flags)
2d21ac55 3540 {
b0d623f7 3541#if DEBUG
fe8ab488 3542 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
2d21ac55 3543#endif
fe8ab488
A
3544 do {
3545 SLEEP;
3546 } while( kIOMemoryRedirected & _flags );
2d21ac55 3547 }
fe8ab488 3548 return (kIOReturnSuccess);
0b4e3aa0
A
3549}
3550
fe8ab488 3551IOReturn IOMemoryDescriptor::populateDevicePager(
0b4e3aa0
A
3552 void * _pager,
3553 vm_map_t addressMap,
2d21ac55
A
3554 mach_vm_address_t address,
3555 mach_vm_size_t sourceOffset,
3556 mach_vm_size_t length,
0b4e3aa0
A
3557 IOOptionBits options )
3558{
3559 IOReturn err = kIOReturnSuccess;
3560 memory_object_t pager = (memory_object_t) _pager;
2d21ac55
A
3561 mach_vm_size_t size;
3562 mach_vm_size_t bytes;
3563 mach_vm_size_t page;
3564 mach_vm_size_t pageOffset;
3565 mach_vm_size_t pagerOffset;
0b4e3aa0 3566 IOPhysicalLength segLen;
55e303ae 3567 addr64_t physAddr;
0b4e3aa0 3568
b0d623f7 3569 physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
0b4e3aa0 3570 assert( physAddr );
55e303ae
A
3571 pageOffset = physAddr - trunc_page_64( physAddr );
3572 pagerOffset = sourceOffset;
0b4e3aa0
A
3573
3574 size = length + pageOffset;
3575 physAddr -= pageOffset;
1c79356b
A
3576
3577 segLen += pageOffset;
0b4e3aa0 3578 bytes = size;
2d21ac55
A
3579 do
3580 {
1c79356b 3581 // in the middle of the loop only map whole pages
fe8ab488
A
3582 if( segLen >= bytes) segLen = bytes;
3583 else if (segLen != trunc_page(segLen)) err = kIOReturnVMError;
3584 if (physAddr != trunc_page_64(physAddr)) err = kIOReturnBadArgument;
1c79356b 3585
fe8ab488 3586 if (kIOReturnSuccess != err) break;
1c79356b 3587
fe8ab488
A
3588 if (reserved && reserved->dp.pagerContig)
3589 {
3590 IOPhysicalLength allLen;
3591 addr64_t allPhys;
2d21ac55 3592
fe8ab488
A
3593 allPhys = getPhysicalSegment( 0, &allLen, kIOMemoryMapperNone );
3594 assert( allPhys );
3595 err = device_pager_populate_object( pager, 0, atop_64(allPhys), round_page(allLen) );
3596 }
3597 else
3598 {
3599 for( page = 0;
3600 (page < segLen) && (KERN_SUCCESS == err);
3601 page += page_size)
2d21ac55 3602 {
fe8ab488
A
3603 err = device_pager_populate_object(pager, pagerOffset,
3604 (ppnum_t)(atop_64(physAddr + page)), page_size);
3605 pagerOffset += page_size;
3606 }
3607 }
3608 assert (KERN_SUCCESS == err);
3609 if (err) break;
0c530ab8 3610
2d21ac55
A
3611 // This call to vm_fault causes an early pmap level resolution
3612 // of the mappings created above for kernel mappings, since
3613 // faulting in later can't take place from interrupt level.
2d21ac55
A
3614 if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags))
3615 {
fe8ab488
A
3616 vm_fault(addressMap,
3617 (vm_map_offset_t)trunc_page_64(address),
3618 VM_PROT_READ|VM_PROT_WRITE,
3619 FALSE, THREAD_UNINT, NULL,
3620 (vm_map_offset_t)0);
9bccf70c
A
3621 }
3622
1c79356b 3623 sourceOffset += segLen - pageOffset;
0b4e3aa0 3624 address += segLen;
1c79356b
A
3625 bytes -= segLen;
3626 pageOffset = 0;
2d21ac55 3627 }
b0d623f7 3628 while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
1c79356b 3629
2d21ac55 3630 if (bytes)
1c79356b 3631 err = kIOReturnBadArgument;
1c79356b 3632
2d21ac55 3633 return (err);
1c79356b
A
3634}
3635
3636IOReturn IOMemoryDescriptor::doUnmap(
3637 vm_map_t addressMap,
2d21ac55
A
3638 IOVirtualAddress __address,
3639 IOByteCount __length )
1c79356b 3640{
2d21ac55
A
3641 IOReturn err;
3642 mach_vm_address_t address;
3643 mach_vm_size_t length;
3644
3645 if (__length)
3646 {
3647 address = __address;
3648 length = __length;
3649 }
3650 else
3651 {
b0d623f7
A
3652 addressMap = ((IOMemoryMap *) __address)->fAddressMap;
3653 address = ((IOMemoryMap *) __address)->fAddress;
3654 length = ((IOMemoryMap *) __address)->fLength;
2d21ac55
A
3655 }
3656
fe8ab488 3657 if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags))
2d21ac55 3658 addressMap = IOPageableMapForAddress( address );
1c79356b 3659
b0d623f7 3660#if DEBUG
1c79356b 3661 if( kIOLogMapping & gIOKitDebug)
2d21ac55
A
3662 IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
3663 addressMap, address, length );
1c79356b
A
3664#endif
3665
2d21ac55 3666 err = mach_vm_deallocate( addressMap, address, length );
1c79356b 3667
2d21ac55 3668 return (err);
1c79356b
A
3669}
3670
91447636 3671IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
e3027f41 3672{
91447636 3673 IOReturn err = kIOReturnSuccess;
b0d623f7 3674 IOMemoryMap * mapping = 0;
e3027f41
A
3675 OSIterator * iter;
3676
3677 LOCK;
3678
91447636
A
3679 if( doRedirect)
3680 _flags |= kIOMemoryRedirected;
3681 else
3682 _flags &= ~kIOMemoryRedirected;
3683
e3027f41
A
3684 do {
3685 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
39236c6e
A
3686
3687 memory_object_t pager;
3688
3689 if( reserved)
3690 pager = (memory_object_t) reserved->dp.devicePager;
3691 else
3692 pager = MACH_PORT_NULL;
3693
b0d623f7 3694 while( (mapping = (IOMemoryMap *) iter->getNextObject()))
39236c6e 3695 {
91447636 3696 mapping->redirect( safeTask, doRedirect );
39236c6e
A
3697 if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap))
3698 {
fe8ab488 3699 err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache );
39236c6e
A
3700 }
3701 }
e3027f41 3702
91447636
A
3703 iter->release();
3704 }
e3027f41
A
3705 } while( false );
3706
91447636
A
3707 if (!doRedirect)
3708 {
9bccf70c 3709 WAKEUP;
0b4e3aa0
A
3710 }
3711
e3027f41
A
3712 UNLOCK;
3713
b0d623f7 3714#ifndef __LP64__
e3027f41
A
3715 // temporary binary compatibility
3716 IOSubMemoryDescriptor * subMem;
3717 if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
91447636 3718 err = subMem->redirect( safeTask, doRedirect );
e3027f41 3719 else
91447636 3720 err = kIOReturnSuccess;
b0d623f7 3721#endif /* !__LP64__ */
e3027f41
A
3722
3723 return( err );
3724}
3725
b0d623f7 3726IOReturn IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
e3027f41
A
3727{
3728 IOReturn err = kIOReturnSuccess;
3729
2d21ac55 3730 if( fSuperMap) {
b0d623f7 3731// err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
e3027f41
A
3732 } else {
3733
3734 LOCK;
0c530ab8
A
3735
3736 do
91447636 3737 {
2d21ac55 3738 if (!fAddress)
0c530ab8 3739 break;
2d21ac55 3740 if (!fAddressMap)
0c530ab8
A
3741 break;
3742
2d21ac55
A
3743 if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
3744 && (0 == (fOptions & kIOMapStatic)))
0c530ab8 3745 {
2d21ac55 3746 IOUnmapPages( fAddressMap, fAddress, fLength );
b0d623f7
A
3747 err = kIOReturnSuccess;
3748#if DEBUG
2d21ac55 3749 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
e3027f41 3750#endif
0c530ab8 3751 }
2d21ac55 3752 else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask))
0c530ab8
A
3753 {
3754 IOOptionBits newMode;
2d21ac55
A
3755 newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
3756 IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
0c530ab8
A
3757 }
3758 }
3759 while (false);
0c530ab8 3760 UNLOCK;
e3027f41
A
3761 }
3762
2d21ac55
A
3763 if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3764 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
91447636 3765 && safeTask
2d21ac55
A
3766 && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected))))
3767 fMemory->redirect(safeTask, doRedirect);
91447636 3768
e3027f41
A
3769 return( err );
3770}
3771
b0d623f7 3772IOReturn IOMemoryMap::unmap( void )
1c79356b
A
3773{
3774 IOReturn err;
3775
3776 LOCK;
3777
2d21ac55
A
3778 if( fAddress && fAddressMap && (0 == fSuperMap) && fMemory
3779 && (0 == (fOptions & kIOMapStatic))) {
1c79356b 3780
2d21ac55 3781 err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
1c79356b
A
3782
3783 } else
3784 err = kIOReturnSuccess;
3785
2d21ac55
A
3786 if (fAddressMap)
3787 {
3788 vm_map_deallocate(fAddressMap);
3789 fAddressMap = 0;
3790 }
3791
3792 fAddress = 0;
1c79356b
A
3793
3794 UNLOCK;
3795
3796 return( err );
3797}
3798
b0d623f7 3799void IOMemoryMap::taskDied( void )
1c79356b
A
3800{
3801 LOCK;
b0d623f7
A
3802 if (fUserClientUnmap)
3803 unmap();
2d21ac55
A
3804 if( fAddressMap) {
3805 vm_map_deallocate(fAddressMap);
3806 fAddressMap = 0;
1c79356b 3807 }
2d21ac55
A
3808 fAddressTask = 0;
3809 fAddress = 0;
1c79356b
A
3810 UNLOCK;
3811}
3812
b0d623f7
A
3813IOReturn IOMemoryMap::userClientUnmap( void )
3814{
3815 fUserClientUnmap = true;
3816 return (kIOReturnSuccess);
3817}
3818
9bccf70c
A
3819// Overload the release mechanism. All mappings must be a member
3820// of a memory descriptors _mappings set. This means that we
3821// always have 2 references on a mapping. When either of these mappings
3822// are released we need to free ourselves.
b0d623f7 3823void IOMemoryMap::taggedRelease(const void *tag) const
9bccf70c 3824{
55e303ae 3825 LOCK;
9bccf70c 3826 super::taggedRelease(tag, 2);
55e303ae 3827 UNLOCK;
9bccf70c
A
3828}
3829
b0d623f7 3830void IOMemoryMap::free()
1c79356b
A
3831{
3832 unmap();
3833
2d21ac55
A
3834 if (fMemory)
3835 {
1c79356b 3836 LOCK;
2d21ac55 3837 fMemory->removeMapping(this);
1c79356b 3838 UNLOCK;
2d21ac55 3839 fMemory->release();
1c79356b
A
3840 }
3841
2d21ac55 3842 if (fOwner && (fOwner != fMemory))
91447636
A
3843 {
3844 LOCK;
2d21ac55 3845 fOwner->removeMapping(this);
91447636
A
3846 UNLOCK;
3847 }
3848
2d21ac55
A
3849 if (fSuperMap)
3850 fSuperMap->release();
1c79356b 3851
2d21ac55
A
3852 if (fRedirUPL) {
3853 upl_commit(fRedirUPL, NULL, 0);
3854 upl_deallocate(fRedirUPL);
91447636
A
3855 }
3856
1c79356b
A
3857 super::free();
3858}
3859
b0d623f7 3860IOByteCount IOMemoryMap::getLength()
1c79356b 3861{
2d21ac55 3862 return( fLength );
1c79356b
A
3863}
3864
b0d623f7 3865IOVirtualAddress IOMemoryMap::getVirtualAddress()
1c79356b 3866{
b0d623f7 3867#ifndef __LP64__
2d21ac55
A
3868 if (fSuperMap)
3869 fSuperMap->getVirtualAddress();
b0d623f7
A
3870 else if (fAddressMap
3871 && vm_map_is_64bit(fAddressMap)
3872 && (sizeof(IOVirtualAddress) < 8))
2d21ac55
A
3873 {
3874 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
3875 }
b0d623f7 3876#endif /* !__LP64__ */
2d21ac55
A
3877
3878 return (fAddress);
3879}
3880
b0d623f7
A
3881#ifndef __LP64__
3882mach_vm_address_t IOMemoryMap::getAddress()
2d21ac55
A
3883{
3884 return( fAddress);
3885}
3886
b0d623f7 3887mach_vm_size_t IOMemoryMap::getSize()
2d21ac55
A
3888{
3889 return( fLength );
1c79356b 3890}
b0d623f7 3891#endif /* !__LP64__ */
1c79356b 3892
2d21ac55 3893
b0d623f7 3894task_t IOMemoryMap::getAddressTask()
1c79356b 3895{
2d21ac55
A
3896 if( fSuperMap)
3897 return( fSuperMap->getAddressTask());
1c79356b 3898 else
2d21ac55 3899 return( fAddressTask);
1c79356b
A
3900}
3901
b0d623f7 3902IOOptionBits IOMemoryMap::getMapOptions()
1c79356b 3903{
2d21ac55 3904 return( fOptions);
1c79356b
A
3905}
3906
b0d623f7 3907IOMemoryDescriptor * IOMemoryMap::getMemoryDescriptor()
1c79356b 3908{
2d21ac55 3909 return( fMemory );
1c79356b
A
3910}
3911
b0d623f7
A
3912IOMemoryMap * IOMemoryMap::copyCompatible(
3913 IOMemoryMap * newMapping )
1c79356b 3914{
2d21ac55
A
3915 task_t task = newMapping->getAddressTask();
3916 mach_vm_address_t toAddress = newMapping->fAddress;
3917 IOOptionBits _options = newMapping->fOptions;
3918 mach_vm_size_t _offset = newMapping->fOffset;
3919 mach_vm_size_t _length = newMapping->fLength;
1c79356b 3920
2d21ac55 3921 if( (!task) || (!fAddressMap) || (fAddressMap != get_task_map(task)))
1c79356b 3922 return( 0 );
2d21ac55 3923 if( (fOptions ^ _options) & kIOMapReadOnly)
9bccf70c
A
3924 return( 0 );
3925 if( (kIOMapDefaultCache != (_options & kIOMapCacheMask))
2d21ac55 3926 && ((fOptions ^ _options) & kIOMapCacheMask))
1c79356b
A
3927 return( 0 );
3928
2d21ac55 3929 if( (0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress))
1c79356b
A
3930 return( 0 );
3931
2d21ac55 3932 if( _offset < fOffset)
1c79356b
A
3933 return( 0 );
3934
2d21ac55 3935 _offset -= fOffset;
1c79356b 3936
2d21ac55 3937 if( (_offset + _length) > fLength)
1c79356b
A
3938 return( 0 );
3939
2d21ac55
A
3940 retain();
3941 if( (fLength == _length) && (!_offset))
3942 {
2d21ac55
A
3943 newMapping = this;
3944 }
3945 else
3946 {
3947 newMapping->fSuperMap = this;
6d2010ae 3948 newMapping->fOffset = fOffset + _offset;
2d21ac55 3949 newMapping->fAddress = fAddress + _offset;
1c79356b
A
3950 }
3951
2d21ac55 3952 return( newMapping );
1c79356b
A
3953}
3954
99c3a104
A
3955IOReturn IOMemoryMap::wireRange(
3956 uint32_t options,
3957 mach_vm_size_t offset,
3958 mach_vm_size_t length)
3959{
3960 IOReturn kr;
3961 mach_vm_address_t start = trunc_page_64(fAddress + offset);
3962 mach_vm_address_t end = round_page_64(fAddress + offset + length);
3963
3964 if (kIODirectionOutIn & options)
3965 {
3966 kr = vm_map_wire(fAddressMap, start, end, (kIODirectionOutIn & options), FALSE);
3967 }
3968 else
3969 {
3970 kr = vm_map_unwire(fAddressMap, start, end, FALSE);
3971 }
3972
3973 return (kr);
3974}
3975
3976
0c530ab8 3977IOPhysicalAddress
b0d623f7
A
3978#ifdef __LP64__
3979IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
3980#else /* !__LP64__ */
3981IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
3982#endif /* !__LP64__ */
1c79356b
A
3983{
3984 IOPhysicalAddress address;
3985
3986 LOCK;
b0d623f7
A
3987#ifdef __LP64__
3988 address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
3989#else /* !__LP64__ */
2d21ac55 3990 address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
b0d623f7 3991#endif /* !__LP64__ */
1c79356b
A
3992 UNLOCK;
3993
3994 return( address );
3995}
3996
3997/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3998
3999#undef super
4000#define super OSObject
4001
4002/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4003
4004void IOMemoryDescriptor::initialize( void )
4005{
4006 if( 0 == gIOMemoryLock)
4007 gIOMemoryLock = IORecursiveLockAlloc();
55e303ae 4008
0c530ab8 4009 gIOLastPage = IOGetLastPageNumber();
1c79356b
A
4010}
4011
4012void IOMemoryDescriptor::free( void )
4013{
4014 if( _mappings)
4015 _mappings->release();
4016
4017 super::free();
4018}
4019
4020IOMemoryMap * IOMemoryDescriptor::setMapping(
4021 task_t intoTask,
4022 IOVirtualAddress mapAddress,
55e303ae 4023 IOOptionBits options )
1c79356b 4024{
2d21ac55
A
4025 return (createMappingInTask( intoTask, mapAddress,
4026 options | kIOMapStatic,
4027 0, getLength() ));
1c79356b
A
4028}
4029
4030IOMemoryMap * IOMemoryDescriptor::map(
55e303ae 4031 IOOptionBits options )
1c79356b 4032{
2d21ac55
A
4033 return (createMappingInTask( kernel_task, 0,
4034 options | kIOMapAnywhere,
4035 0, getLength() ));
1c79356b
A
4036}
4037
b0d623f7 4038#ifndef __LP64__
2d21ac55
A
4039IOMemoryMap * IOMemoryDescriptor::map(
4040 task_t intoTask,
4041 IOVirtualAddress atAddress,
1c79356b 4042 IOOptionBits options,
55e303ae
A
4043 IOByteCount offset,
4044 IOByteCount length )
1c79356b 4045{
2d21ac55
A
4046 if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask)))
4047 {
4048 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
4049 return (0);
4050 }
4051
4052 return (createMappingInTask(intoTask, atAddress,
4053 options, offset, length));
4054}
b0d623f7 4055#endif /* !__LP64__ */
2d21ac55
A
4056
4057IOMemoryMap * IOMemoryDescriptor::createMappingInTask(
4058 task_t intoTask,
4059 mach_vm_address_t atAddress,
4060 IOOptionBits options,
4061 mach_vm_size_t offset,
4062 mach_vm_size_t length)
4063{
b0d623f7
A
4064 IOMemoryMap * result;
4065 IOMemoryMap * mapping;
2d21ac55
A
4066
4067 if (0 == length)
1c79356b
A
4068 length = getLength();
4069
b0d623f7 4070 mapping = new IOMemoryMap;
2d21ac55
A
4071
4072 if( mapping
4073 && !mapping->init( intoTask, atAddress,
4074 options, offset, length )) {
4075 mapping->release();
4076 mapping = 0;
4077 }
4078
4079 if (mapping)
4080 result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
4081 else
4082 result = 0;
4083
b0d623f7 4084#if DEBUG
2d21ac55 4085 if (!result)
316670eb
A
4086 IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
4087 this, atAddress, (uint32_t) options, offset, length);
2d21ac55
A
4088#endif
4089
4090 return (result);
1c79356b
A
4091}
4092
b0d623f7
A
4093#ifndef __LP64__ // there is only a 64 bit version for LP64
4094IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
91447636
A
4095 IOOptionBits options,
4096 IOByteCount offset)
2d21ac55
A
4097{
4098 return (redirect(newBackingMemory, options, (mach_vm_size_t)offset));
4099}
b0d623f7 4100#endif
2d21ac55 4101
b0d623f7 4102IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
2d21ac55
A
4103 IOOptionBits options,
4104 mach_vm_size_t offset)
91447636
A
4105{
4106 IOReturn err = kIOReturnSuccess;
4107 IOMemoryDescriptor * physMem = 0;
4108
4109 LOCK;
4110
2d21ac55 4111 if (fAddress && fAddressMap) do
91447636 4112 {
2d21ac55
A
4113 if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
4114 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
91447636 4115 {
2d21ac55 4116 physMem = fMemory;
91447636
A
4117 physMem->retain();
4118 }
4119
fe8ab488 4120 if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count))
91447636 4121 {
b0d623f7 4122 vm_size_t size = round_page(fLength);
91447636
A
4123 int flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
4124 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
fe8ab488 4125 if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL,
91447636
A
4126 NULL, NULL,
4127 &flags))
2d21ac55 4128 fRedirUPL = 0;
91447636
A
4129
4130 if (physMem)
4131 {
2d21ac55 4132 IOUnmapPages( fAddressMap, fAddress, fLength );
fe8ab488 4133 if ((false))
b0d623f7 4134 physMem->redirect(0, true);
91447636
A
4135 }
4136 }
4137
4138 if (newBackingMemory)
4139 {
2d21ac55 4140 if (newBackingMemory != fMemory)
91447636 4141 {
2d21ac55
A
4142 fOffset = 0;
4143 if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
4144 options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
4145 offset, fLength))
91447636
A
4146 err = kIOReturnError;
4147 }
2d21ac55 4148 if (fRedirUPL)
91447636 4149 {
2d21ac55
A
4150 upl_commit(fRedirUPL, NULL, 0);
4151 upl_deallocate(fRedirUPL);
4152 fRedirUPL = 0;
91447636 4153 }
fe8ab488 4154 if ((false) && physMem)
91447636
A
4155 physMem->redirect(0, false);
4156 }
4157 }
4158 while (false);
4159
4160 UNLOCK;
4161
4162 if (physMem)
4163 physMem->release();
4164
4165 return (err);
4166}
4167
1c79356b
A
4168IOMemoryMap * IOMemoryDescriptor::makeMapping(
4169 IOMemoryDescriptor * owner,
2d21ac55
A
4170 task_t __intoTask,
4171 IOVirtualAddress __address,
1c79356b 4172 IOOptionBits options,
2d21ac55
A
4173 IOByteCount __offset,
4174 IOByteCount __length )
1c79356b 4175{
b0d623f7 4176#ifndef __LP64__
2d21ac55 4177 if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::makeMapping !64bit");
b0d623f7 4178#endif /* !__LP64__ */
2d21ac55 4179
91447636 4180 IOMemoryDescriptor * mapDesc = 0;
b0d623f7 4181 IOMemoryMap * result = 0;
2d21ac55
A
4182 OSIterator * iter;
4183
b0d623f7 4184 IOMemoryMap * mapping = (IOMemoryMap *) __address;
2d21ac55
A
4185 mach_vm_size_t offset = mapping->fOffset + __offset;
4186 mach_vm_size_t length = mapping->fLength;
4187
4188 mapping->fOffset = offset;
1c79356b
A
4189
4190 LOCK;
4191
91447636
A
4192 do
4193 {
2d21ac55
A
4194 if (kIOMapStatic & options)
4195 {
4196 result = mapping;
4197 addMapping(mapping);
4198 mapping->setMemoryDescriptor(this, 0);
4199 continue;
4200 }
4201
91447636
A
4202 if (kIOMapUnique & options)
4203 {
060df5ea 4204 addr64_t phys;
91447636 4205 IOByteCount physLen;
1c79356b 4206
2d21ac55 4207// if (owner != this) continue;
1c79356b 4208
0c530ab8
A
4209 if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
4210 || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
91447636 4211 {
b0d623f7 4212 phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
91447636
A
4213 if (!phys || (physLen < length))
4214 continue;
4215
b0d623f7
A
4216 mapDesc = IOMemoryDescriptor::withAddressRange(
4217 phys, length, getDirection() | kIOMemoryMapperNone, NULL);
91447636
A
4218 if (!mapDesc)
4219 continue;
4220 offset = 0;
2d21ac55 4221 mapping->fOffset = offset;
91447636
A
4222 }
4223 }
4224 else
4225 {
2d21ac55
A
4226 // look for a compatible existing mapping
4227 if( (iter = OSCollectionIterator::withCollection(_mappings)))
4228 {
b0d623f7
A
4229 IOMemoryMap * lookMapping;
4230 while ((lookMapping = (IOMemoryMap *) iter->getNextObject()))
2d21ac55
A
4231 {
4232 if ((result = lookMapping->copyCompatible(mapping)))
4233 {
4234 addMapping(result);
4235 result->setMemoryDescriptor(this, offset);
91447636 4236 break;
2d21ac55 4237 }
91447636
A
4238 }
4239 iter->release();
4240 }
2d21ac55 4241 if (result || (options & kIOMapReference))
6d2010ae
A
4242 {
4243 if (result != mapping)
4244 {
4245 mapping->release();
4246 mapping = NULL;
4247 }
91447636 4248 continue;
6d2010ae 4249 }
2d21ac55 4250 }
91447636 4251
2d21ac55
A
4252 if (!mapDesc)
4253 {
4254 mapDesc = this;
91447636
A
4255 mapDesc->retain();
4256 }
2d21ac55
A
4257 IOReturn
4258 kr = mapDesc->doMap( 0, (IOVirtualAddress *) &mapping, options, 0, 0 );
4259 if (kIOReturnSuccess == kr)
4260 {
4261 result = mapping;
4262 mapDesc->addMapping(result);
4263 result->setMemoryDescriptor(mapDesc, offset);
4264 }
4265 else
4266 {
1c79356b 4267 mapping->release();
2d21ac55 4268 mapping = NULL;
1c79356b 4269 }
91447636 4270 }
2d21ac55 4271 while( false );
1c79356b
A
4272
4273 UNLOCK;
4274
91447636
A
4275 if (mapDesc)
4276 mapDesc->release();
4277
2d21ac55 4278 return (result);
1c79356b
A
4279}
4280
4281void IOMemoryDescriptor::addMapping(
4282 IOMemoryMap * mapping )
4283{
2d21ac55
A
4284 if( mapping)
4285 {
1c79356b
A
4286 if( 0 == _mappings)
4287 _mappings = OSSet::withCapacity(1);
9bccf70c
A
4288 if( _mappings )
4289 _mappings->setObject( mapping );
1c79356b
A
4290 }
4291}
4292
4293void IOMemoryDescriptor::removeMapping(
4294 IOMemoryMap * mapping )
4295{
9bccf70c 4296 if( _mappings)
1c79356b 4297 _mappings->removeObject( mapping);
1c79356b
A
4298}
4299
b0d623f7
A
4300#ifndef __LP64__
4301// obsolete initializers
4302// - initWithOptions is the designated initializer
1c79356b 4303bool
b0d623f7 4304IOMemoryDescriptor::initWithAddress(void * address,
55e303ae
A
4305 IOByteCount length,
4306 IODirection direction)
1c79356b
A
4307{
4308 return( false );
4309}
4310
4311bool
b0d623f7 4312IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
55e303ae
A
4313 IOByteCount length,
4314 IODirection direction,
4315 task_t task)
1c79356b
A
4316{
4317 return( false );
4318}
4319
4320bool
b0d623f7 4321IOMemoryDescriptor::initWithPhysicalAddress(
1c79356b 4322 IOPhysicalAddress address,
55e303ae
A
4323 IOByteCount length,
4324 IODirection direction )
1c79356b
A
4325{
4326 return( false );
4327}
4328
4329bool
b0d623f7 4330IOMemoryDescriptor::initWithRanges(
1c79356b
A
4331 IOVirtualRange * ranges,
4332 UInt32 withCount,
55e303ae
A
4333 IODirection direction,
4334 task_t task,
4335 bool asReference)
1c79356b
A
4336{
4337 return( false );
4338}
4339
4340bool
b0d623f7 4341IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
1c79356b 4342 UInt32 withCount,
55e303ae
A
4343 IODirection direction,
4344 bool asReference)
1c79356b
A
4345{
4346 return( false );
4347}
4348
b0d623f7
A
4349void * IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
4350 IOByteCount * lengthOfSegment)
4351{
4352 return( 0 );
4353}
4354#endif /* !__LP64__ */
4355
1c79356b
A
4356/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4357
9bccf70c
A
4358bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
4359{
4360 OSSymbol const *keys[2];
4361 OSObject *values[2];
fe8ab488
A
4362 OSArray * array;
4363
91447636
A
4364 struct SerData {
4365 user_addr_t address;
4366 user_size_t length;
4367 } *vcopy;
9bccf70c
A
4368 unsigned int index, nRanges;
4369 bool result;
4370
91447636
A
4371 IOOptionBits type = _flags & kIOMemoryTypeMask;
4372
9bccf70c 4373 if (s == NULL) return false;
9bccf70c 4374
fe8ab488
A
4375 array = OSArray::withCapacity(4);
4376 if (!array) return (false);
9bccf70c
A
4377
4378 nRanges = _rangesCount;
91447636 4379 vcopy = (SerData *) IOMalloc(sizeof(SerData) * nRanges);
9bccf70c
A
4380 if (vcopy == 0) return false;
4381
4382 keys[0] = OSSymbol::withCString("address");
4383 keys[1] = OSSymbol::withCString("length");
4384
4385 result = false;
4386 values[0] = values[1] = 0;
4387
4388 // From this point on we can go to bail.
4389
4390 // Copy the volatile data so we don't have to allocate memory
4391 // while the lock is held.
4392 LOCK;
4393 if (nRanges == _rangesCount) {
91447636 4394 Ranges vec = _ranges;
9bccf70c 4395 for (index = 0; index < nRanges; index++) {
fe8ab488 4396 mach_vm_address_t addr; mach_vm_size_t len;
91447636
A
4397 getAddrLenForInd(addr, len, type, vec, index);
4398 vcopy[index].address = addr;
4399 vcopy[index].length = len;
9bccf70c
A
4400 }
4401 } else {
4402 // The descriptor changed out from under us. Give up.
4403 UNLOCK;
4404 result = false;
4405 goto bail;
4406 }
4407 UNLOCK;
4408
4409 for (index = 0; index < nRanges; index++)
4410 {
91447636
A
4411 user_addr_t addr = vcopy[index].address;
4412 IOByteCount len = (IOByteCount) vcopy[index].length;
fe8ab488 4413 values[0] = OSNumber::withNumber(addr, sizeof(addr) * 8);
9bccf70c
A
4414 if (values[0] == 0) {
4415 result = false;
4416 goto bail;
4417 }
91447636 4418 values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
9bccf70c
A
4419 if (values[1] == 0) {
4420 result = false;
4421 goto bail;
4422 }
4423 OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
4424 if (dict == 0) {
4425 result = false;
4426 goto bail;
4427 }
fe8ab488
A
4428 array->setObject(dict);
4429 dict->release();
9bccf70c
A
4430 values[0]->release();
4431 values[1]->release();
4432 values[0] = values[1] = 0;
9bccf70c 4433 }
fe8ab488
A
4434
4435 result = array->serialize(s);
9bccf70c
A
4436
4437 bail:
fe8ab488
A
4438 if (array)
4439 array->release();
9bccf70c
A
4440 if (values[0])
4441 values[0]->release();
4442 if (values[1])
4443 values[1]->release();
4444 if (keys[0])
4445 keys[0]->release();
4446 if (keys[1])
4447 keys[1]->release();
4448 if (vcopy)
2d21ac55 4449 IOFree(vcopy, sizeof(SerData) * nRanges);
fe8ab488 4450
9bccf70c
A
4451 return result;
4452}
4453
9bccf70c
A
4454/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4455
fe8ab488
A
4456#if DEVELOPMENT || DEBUG
4457
4458extern "C" void IOMemoryDescriptorTest(int x)
4459{
4460 IOGeneralMemoryDescriptor * md;
4461
4462 vm_offset_t data[2];
4463 vm_size_t bsize = 16*1024*1024;
4464
4465 vm_size_t srcsize, srcoffset, mapoffset, size;
4466
4467 kern_return_t kr;
4468
4469 kr = vm_allocate(kernel_map, &data[0], bsize, VM_FLAGS_ANYWHERE);
4470 vm_inherit(kernel_map, data[0] + 1*4096, 4096, VM_INHERIT_NONE);
4471 vm_inherit(kernel_map, data[0] + 16*4096, 4096, VM_INHERIT_NONE);
4472
4473 kprintf("data 0x%lx, 0x%lx\n", (long)data[0], (long)data[1]);
4474
4475 uint32_t idx, offidx;
4476 for (idx = 0; idx < (bsize / sizeof(uint32_t)); idx++)
4477 {
4478 ((uint32_t*)data[0])[idx] = idx;
4479 }
4480
4481 for (srcoffset = 0; srcoffset < bsize; srcoffset = ((srcoffset << 1) + 0x40c))
4482 {
4483 for (srcsize = 4; srcsize < (bsize - srcoffset - 1); srcsize = ((srcsize << 1) + 0x3fc))
4484 {
4485 IOAddressRange ranges[3];
4486 uint32_t rangeCount = 1;
4487
4488 bzero(&ranges[0], sizeof(ranges));
4489 ranges[0].address = data[0] + srcoffset;
4490 ranges[0].length = srcsize;
4491
4492 if (srcsize > 5*page_size)
4493 {
4494 ranges[0].length = 7634;
4495 ranges[1].length = 9870;
4496 ranges[2].length = srcsize - ranges[0].length - ranges[1].length;
4497 ranges[1].address = ranges[0].address + ranges[0].length;
4498 ranges[2].address = ranges[1].address + ranges[1].length;
4499 rangeCount = 3;
4500 }
4501 else if ((srcsize > 2*page_size) && !(page_mask & srcoffset))
4502 {
4503 ranges[0].length = 4096;
4504 ranges[1].length = 4096;
4505 ranges[2].length = srcsize - ranges[0].length - ranges[1].length;
4506 ranges[0].address = data[0] + srcoffset + 4096;
4507 ranges[1].address = data[0] + srcoffset;
4508 ranges[2].address = ranges[0].address + ranges[0].length;
4509 rangeCount = 3;
4510 }
4511
4512 md = OSDynamicCast(IOGeneralMemoryDescriptor,
4513 IOMemoryDescriptor::withAddressRanges(&ranges[0], rangeCount, kIODirectionInOut, kernel_task));
4514 assert(md);
4515
4516 kprintf("IOMemoryReferenceCreate [0x%lx @ 0x%lx]\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx]\n",
4517 (long) srcsize, (long) srcoffset,
4518 (long long) ranges[0].address - data[0], (long long) ranges[0].length,
4519 (long long) ranges[1].address - data[0], (long long) ranges[1].length,
4520 (long long) ranges[2].address - data[0], (long long) ranges[2].length);
4521
4522 if (kIOReturnSuccess == kr)
4523 {
4524 for (mapoffset = 0; mapoffset < srcsize; mapoffset = ((mapoffset << 1) + 0xf00))
4525 {
4526 for (size = 4; size < (srcsize - mapoffset - 1); size = ((size << 1) + 0x20))
4527 {
4528 IOMemoryMap * map;
4529 mach_vm_address_t addr = 0;
4530 uint32_t data;
4531
4532 kprintf("<mapRef [0x%lx @ 0x%lx]\n", (long) size, (long) mapoffset);
4533
4534 map = md->createMappingInTask(kernel_task, 0, kIOMapAnywhere, mapoffset, size);
4535 if (map) addr = map->getAddress();
4536 else kr = kIOReturnError;
4537
4538 kprintf(">mapRef 0x%x %llx\n", kr, addr);
4539
4540 if (kIOReturnSuccess != kr) break;
4541 kr = md->prepare();
4542 if (kIOReturnSuccess != kr)
4543 {
4544 kprintf("prepare() fail 0x%x\n", kr);
4545 break;
4546 }
4547 for (idx = 0; idx < size; idx += sizeof(uint32_t))
4548 {
4549 offidx = (idx + mapoffset + srcoffset);
4550 if ((srcsize <= 5*page_size) && (srcsize > 2*page_size) && !(page_mask & srcoffset))
4551 {
4552 if (offidx < 8192) offidx ^= 0x1000;
4553 }
4554 offidx /= sizeof(uint32_t);
4555
4556 if (offidx != ((uint32_t*)addr)[idx/sizeof(uint32_t)])
4557 {
4558 kprintf("vm mismatch @ 0x%x, 0x%lx, 0x%lx, \n", idx, (long) srcoffset, (long) mapoffset);
4559 kr = kIOReturnBadMedia;
4560 }
4561 else
4562 {
4563 if (sizeof(data) != md->readBytes(mapoffset + idx, &data, sizeof(data))) data = 0;
4564 if (offidx != data)
4565 {
4566 kprintf("phys mismatch @ 0x%x, 0x%lx, 0x%lx, \n", idx, (long) srcoffset, (long) mapoffset);
4567 kr = kIOReturnBadMedia;
4568 }
4569 }
4570 }
4571 md->complete();
4572 map->release();
4573 kprintf("unmapRef %llx\n", addr);
4574 }
4575 if (kIOReturnSuccess != kr) break;
4576 }
4577 }
4578 if (kIOReturnSuccess != kr) break;
4579 }
4580 if (kIOReturnSuccess != kr) break;
4581 }
4582
4583 if (kIOReturnSuccess != kr) kprintf("FAIL: src 0x%lx @ 0x%lx, map 0x%lx @ 0x%lx\n",
4584 (long) srcsize, (long) srcoffset, (long) size, (long) mapoffset);
4585
4586 vm_deallocate(kernel_map, data[0], bsize);
4587// vm_deallocate(kernel_map, data[1], size);
4588}
4589
4590#endif /* DEVELOPMENT || DEBUG */
4591
4592/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4593
0b4e3aa0 4594OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
b0d623f7
A
4595#ifdef __LP64__
4596OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
4597OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
4598OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
4599OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
4600OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
4601OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
4602OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
4603#else /* !__LP64__ */
55e303ae
A
4604OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
4605OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
91447636
A
4606OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3);
4607OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4);
0c530ab8 4608OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5);
b0d623f7
A
4609OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 6);
4610OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 7);
4611#endif /* !__LP64__ */
1c79356b
A
4612OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
4613OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
4614OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
4615OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
4616OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
4617OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
4618OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
4619OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
9bccf70c 4620
55e303ae 4621/* ex-inline function implementation */
0c530ab8
A
4622IOPhysicalAddress
4623IOMemoryDescriptor::getPhysicalAddress()
9bccf70c 4624 { return( getPhysicalSegment( 0, 0 )); }