]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOMemoryDescriptor.cpp
9dfb2f322cfd28797a57eab3b0cd63ae168eecec
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 #include <sys/cdefs.h>
31
32 #include <IOKit/assert.h>
33 #include <IOKit/system.h>
34 #include <IOKit/IOLib.h>
35 #include <IOKit/IOMemoryDescriptor.h>
36 #include <IOKit/IOMapper.h>
37 #include <IOKit/IODMACommand.h>
38 #include <IOKit/IOKitKeysPrivate.h>
39
40 #include <IOKit/IOSubMemoryDescriptor.h>
41 #include <IOKit/IOMultiMemoryDescriptor.h>
42
43 #include <IOKit/IOKitDebug.h>
44 #include <libkern/OSDebug.h>
45
46 #include "IOKitKernelInternal.h"
47
48 #include <libkern/c++/OSContainers.h>
49 #include <libkern/c++/OSDictionary.h>
50 #include <libkern/c++/OSArray.h>
51 #include <libkern/c++/OSSymbol.h>
52 #include <libkern/c++/OSNumber.h>
53 #include <os/overflow.h>
54
55 #include <sys/uio.h>
56
57 __BEGIN_DECLS
58 #include <vm/pmap.h>
59 #include <vm/vm_pageout.h>
60 #include <mach/memory_object_types.h>
61 #include <device/device_port.h>
62
63 #include <mach/vm_prot.h>
64 #include <mach/mach_vm.h>
65 #include <vm/vm_fault.h>
66 #include <vm/vm_protos.h>
67
68 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
69 extern void ipc_port_release_send(ipc_port_t port);
70
71 __END_DECLS
72
73 #define kIOMapperWaitSystem ((IOMapper *) 1)
74
75 static IOMapper * gIOSystemMapper = NULL;
76
77 ppnum_t gIOLastPage;
78
79 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
80
81 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
82
83 #define super IOMemoryDescriptor
84
85 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
86
87 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
88
89 static IORecursiveLock * gIOMemoryLock;
90
91 #define LOCK IORecursiveLockLock( gIOMemoryLock)
92 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
93 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
94 #define WAKEUP \
95 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
96
97 #if 0
98 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
99 #else
100 #define DEBG(fmt, args...) {}
101 #endif
102
103 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
104
105 // Some data structures and accessor macros used by the initWithOptions
106 // Function
107
108 enum ioPLBlockFlags {
109 kIOPLOnDevice = 0x00000001,
110 kIOPLExternUPL = 0x00000002,
111 };
112
113 struct IOMDPersistentInitData
114 {
115 const IOGeneralMemoryDescriptor * fMD;
116 IOMemoryReference * fMemRef;
117 };
118
119 struct ioPLBlock {
120 upl_t fIOPL;
121 vm_address_t fPageInfo; // Pointer to page list or index into it
122 uint32_t fIOMDOffset; // The offset of this iopl in descriptor
123 ppnum_t fMappedPage; // Page number of first page in this iopl
124 unsigned int fPageOffset; // Offset within first page of iopl
125 unsigned int fFlags; // Flags
126 };
127
128 enum { kMaxWireTags = 6 };
129
130 struct ioGMDData
131 {
132 IOMapper * fMapper;
133 uint64_t fDMAMapAlignment;
134 uint64_t fMappedBase;
135 uint64_t fMappedLength;
136 uint64_t fPreparationID;
137 #if IOTRACKING
138 IOTracking fWireTracking;
139 #endif /* IOTRACKING */
140 unsigned int fPageCnt;
141 uint8_t fDMAMapNumAddressBits;
142 unsigned char fDiscontig:1;
143 unsigned char fCompletionError:1;
144 unsigned char fMappedBaseValid:1;
145 unsigned char _resv:3;
146 unsigned char fDMAAccess:2;
147
148 /* variable length arrays */
149 upl_page_info_t fPageList[1]
150 #if __LP64__
151 // align fPageList as for ioPLBlock
152 __attribute__((aligned(sizeof(upl_t))))
153 #endif
154 ;
155 ioPLBlock fBlocks[1];
156 };
157
158 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
159 #define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
160 #define getNumIOPL(osd, d) \
161 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
162 #define getPageList(d) (&(d->fPageList[0]))
163 #define computeDataSize(p, u) \
164 (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
165
166 enum { kIOMemoryHostOrRemote = kIOMemoryHostOnly | kIOMemoryRemote };
167
168 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
169
170 #define next_page(a) ( trunc_page(a) + PAGE_SIZE )
171
172 extern "C" {
173
174 kern_return_t device_data_action(
175 uintptr_t device_handle,
176 ipc_port_t device_pager,
177 vm_prot_t protection,
178 vm_object_offset_t offset,
179 vm_size_t size)
180 {
181 kern_return_t kr;
182 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
183 IOMemoryDescriptor * memDesc;
184
185 LOCK;
186 memDesc = ref->dp.memory;
187 if( memDesc)
188 {
189 memDesc->retain();
190 kr = memDesc->handleFault(device_pager, offset, size);
191 memDesc->release();
192 }
193 else
194 kr = KERN_ABORTED;
195 UNLOCK;
196
197 return( kr );
198 }
199
200 kern_return_t device_close(
201 uintptr_t device_handle)
202 {
203 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
204
205 IODelete( ref, IOMemoryDescriptorReserved, 1 );
206
207 return( kIOReturnSuccess );
208 }
209 }; // end extern "C"
210
211 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
212
213 // Note this inline function uses C++ reference arguments to return values
214 // This means that pointers are not passed and NULLs don't have to be
215 // checked for as a NULL reference is illegal.
216 static inline void
217 getAddrLenForInd(mach_vm_address_t &addr, mach_vm_size_t &len, // Output variables
218 UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
219 {
220 assert(kIOMemoryTypeUIO == type
221 || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
222 || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
223 if (kIOMemoryTypeUIO == type) {
224 user_size_t us;
225 user_addr_t ad;
226 uio_getiov((uio_t) r.uio, ind, &ad, &us); addr = ad; len = us;
227 }
228 #ifndef __LP64__
229 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
230 IOAddressRange cur = r.v64[ind];
231 addr = cur.address;
232 len = cur.length;
233 }
234 #endif /* !__LP64__ */
235 else {
236 IOVirtualRange cur = r.v[ind];
237 addr = cur.address;
238 len = cur.length;
239 }
240 }
241
242 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
243
244 static IOReturn
245 purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
246 {
247 IOReturn err = kIOReturnSuccess;
248
249 *control = VM_PURGABLE_SET_STATE;
250
251 enum { kIOMemoryPurgeableControlMask = 15 };
252
253 switch (kIOMemoryPurgeableControlMask & newState)
254 {
255 case kIOMemoryPurgeableKeepCurrent:
256 *control = VM_PURGABLE_GET_STATE;
257 break;
258
259 case kIOMemoryPurgeableNonVolatile:
260 *state = VM_PURGABLE_NONVOLATILE;
261 break;
262 case kIOMemoryPurgeableVolatile:
263 *state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask);
264 break;
265 case kIOMemoryPurgeableEmpty:
266 *state = VM_PURGABLE_EMPTY | (newState & ~kIOMemoryPurgeableControlMask);
267 break;
268 default:
269 err = kIOReturnBadArgument;
270 break;
271 }
272
273 if (*control == VM_PURGABLE_SET_STATE) {
274 // let VM know this call is from the kernel and is allowed to alter
275 // the volatility of the memory entry even if it was created with
276 // MAP_MEM_PURGABLE_KERNEL_ONLY
277 *control = VM_PURGABLE_SET_STATE_FROM_KERNEL;
278 }
279
280 return (err);
281 }
282
283 static IOReturn
284 purgeableStateBits(int * state)
285 {
286 IOReturn err = kIOReturnSuccess;
287
288 switch (VM_PURGABLE_STATE_MASK & *state)
289 {
290 case VM_PURGABLE_NONVOLATILE:
291 *state = kIOMemoryPurgeableNonVolatile;
292 break;
293 case VM_PURGABLE_VOLATILE:
294 *state = kIOMemoryPurgeableVolatile;
295 break;
296 case VM_PURGABLE_EMPTY:
297 *state = kIOMemoryPurgeableEmpty;
298 break;
299 default:
300 *state = kIOMemoryPurgeableNonVolatile;
301 err = kIOReturnNotReady;
302 break;
303 }
304 return (err);
305 }
306
307
308 static vm_prot_t
309 vmProtForCacheMode(IOOptionBits cacheMode)
310 {
311 vm_prot_t prot = 0;
312 switch (cacheMode)
313 {
314 case kIOInhibitCache:
315 SET_MAP_MEM(MAP_MEM_IO, prot);
316 break;
317
318 case kIOWriteThruCache:
319 SET_MAP_MEM(MAP_MEM_WTHRU, prot);
320 break;
321
322 case kIOWriteCombineCache:
323 SET_MAP_MEM(MAP_MEM_WCOMB, prot);
324 break;
325
326 case kIOCopybackCache:
327 SET_MAP_MEM(MAP_MEM_COPYBACK, prot);
328 break;
329
330 case kIOCopybackInnerCache:
331 SET_MAP_MEM(MAP_MEM_INNERWBACK, prot);
332 break;
333
334 case kIOPostedWrite:
335 SET_MAP_MEM(MAP_MEM_POSTED, prot);
336 break;
337
338 case kIODefaultCache:
339 default:
340 SET_MAP_MEM(MAP_MEM_NOOP, prot);
341 break;
342 }
343
344 return (prot);
345 }
346
347 static unsigned int
348 pagerFlagsForCacheMode(IOOptionBits cacheMode)
349 {
350 unsigned int pagerFlags = 0;
351 switch (cacheMode)
352 {
353 case kIOInhibitCache:
354 pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
355 break;
356
357 case kIOWriteThruCache:
358 pagerFlags = DEVICE_PAGER_WRITE_THROUGH | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
359 break;
360
361 case kIOWriteCombineCache:
362 pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT;
363 break;
364
365 case kIOCopybackCache:
366 pagerFlags = DEVICE_PAGER_COHERENT;
367 break;
368
369 case kIOCopybackInnerCache:
370 pagerFlags = DEVICE_PAGER_COHERENT;
371 break;
372
373 case kIOPostedWrite:
374 pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED | DEVICE_PAGER_EARLY_ACK;
375 break;
376
377 case kIODefaultCache:
378 default:
379 pagerFlags = -1U;
380 break;
381 }
382 return (pagerFlags);
383 }
384
385 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
386 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
387
388 struct IOMemoryEntry
389 {
390 ipc_port_t entry;
391 int64_t offset;
392 uint64_t size;
393 };
394
395 struct IOMemoryReference
396 {
397 volatile SInt32 refCount;
398 vm_prot_t prot;
399 uint32_t capacity;
400 uint32_t count;
401 struct IOMemoryReference * mapRef;
402 IOMemoryEntry entries[0];
403 };
404
405 enum
406 {
407 kIOMemoryReferenceReuse = 0x00000001,
408 kIOMemoryReferenceWrite = 0x00000002,
409 kIOMemoryReferenceCOW = 0x00000004,
410 };
411
412 SInt32 gIOMemoryReferenceCount;
413
414 IOMemoryReference *
415 IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc)
416 {
417 IOMemoryReference * ref;
418 size_t newSize, oldSize, copySize;
419
420 newSize = (sizeof(IOMemoryReference)
421 - sizeof(ref->entries)
422 + capacity * sizeof(ref->entries[0]));
423 ref = (typeof(ref)) IOMalloc(newSize);
424 if (realloc)
425 {
426 oldSize = (sizeof(IOMemoryReference)
427 - sizeof(realloc->entries)
428 + realloc->capacity * sizeof(realloc->entries[0]));
429 copySize = oldSize;
430 if (copySize > newSize) copySize = newSize;
431 if (ref) bcopy(realloc, ref, copySize);
432 IOFree(realloc, oldSize);
433 }
434 else if (ref)
435 {
436 bzero(ref, sizeof(*ref));
437 ref->refCount = 1;
438 OSIncrementAtomic(&gIOMemoryReferenceCount);
439 }
440 if (!ref) return (0);
441 ref->capacity = capacity;
442 return (ref);
443 }
444
445 void
446 IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref)
447 {
448 IOMemoryEntry * entries;
449 size_t size;
450
451 if (ref->mapRef)
452 {
453 memoryReferenceFree(ref->mapRef);
454 ref->mapRef = 0;
455 }
456
457 entries = ref->entries + ref->count;
458 while (entries > &ref->entries[0])
459 {
460 entries--;
461 ipc_port_release_send(entries->entry);
462 }
463 size = (sizeof(IOMemoryReference)
464 - sizeof(ref->entries)
465 + ref->capacity * sizeof(ref->entries[0]));
466 IOFree(ref, size);
467
468 OSDecrementAtomic(&gIOMemoryReferenceCount);
469 }
470
471 void
472 IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref)
473 {
474 if (1 == OSDecrementAtomic(&ref->refCount)) memoryReferenceFree(ref);
475 }
476
477
478 IOReturn
479 IOGeneralMemoryDescriptor::memoryReferenceCreate(
480 IOOptionBits options,
481 IOMemoryReference ** reference)
482 {
483 enum { kCapacity = 4, kCapacityInc = 4 };
484
485 kern_return_t err;
486 IOMemoryReference * ref;
487 IOMemoryEntry * entries;
488 IOMemoryEntry * cloneEntries;
489 vm_map_t map;
490 ipc_port_t entry, cloneEntry;
491 vm_prot_t prot;
492 memory_object_size_t actualSize;
493 uint32_t rangeIdx;
494 uint32_t count;
495 mach_vm_address_t entryAddr, endAddr, entrySize;
496 mach_vm_size_t srcAddr, srcLen;
497 mach_vm_size_t nextAddr, nextLen;
498 mach_vm_size_t offset, remain;
499 IOByteCount physLen;
500 IOOptionBits type = (_flags & kIOMemoryTypeMask);
501 IOOptionBits cacheMode;
502 unsigned int pagerFlags;
503 vm_tag_t tag;
504
505 ref = memoryReferenceAlloc(kCapacity, NULL);
506 if (!ref) return (kIOReturnNoMemory);
507
508 tag = getVMTag(kernel_map);
509 entries = &ref->entries[0];
510 count = 0;
511 err = KERN_SUCCESS;
512
513 offset = 0;
514 rangeIdx = 0;
515 if (_task)
516 {
517 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
518 }
519 else
520 {
521 nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
522 nextLen = physLen;
523
524 // default cache mode for physical
525 if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift))
526 {
527 IOOptionBits mode;
528 pagerFlags = IODefaultCacheBits(nextAddr);
529 if (DEVICE_PAGER_CACHE_INHIB & pagerFlags)
530 {
531 if (DEVICE_PAGER_EARLY_ACK & pagerFlags)
532 mode = kIOPostedWrite;
533 else if (DEVICE_PAGER_GUARDED & pagerFlags)
534 mode = kIOInhibitCache;
535 else
536 mode = kIOWriteCombineCache;
537 }
538 else if (DEVICE_PAGER_WRITE_THROUGH & pagerFlags)
539 mode = kIOWriteThruCache;
540 else
541 mode = kIOCopybackCache;
542 _flags |= (mode << kIOMemoryBufferCacheShift);
543 }
544 }
545
546 // cache mode & vm_prot
547 prot = VM_PROT_READ;
548 cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
549 prot |= vmProtForCacheMode(cacheMode);
550 // VM system requires write access to change cache mode
551 if (kIODefaultCache != cacheMode) prot |= VM_PROT_WRITE;
552 if (kIODirectionOut != (kIODirectionOutIn & _flags)) prot |= VM_PROT_WRITE;
553 if (kIOMemoryReferenceWrite & options) prot |= VM_PROT_WRITE;
554 if (kIOMemoryReferenceCOW & options) prot |= MAP_MEM_VM_COPY;
555
556 if ((kIOMemoryReferenceReuse & options) && _memRef)
557 {
558 cloneEntries = &_memRef->entries[0];
559 prot |= MAP_MEM_NAMED_REUSE;
560 }
561
562 if (_task)
563 {
564 // virtual ranges
565
566 if (kIOMemoryBufferPageable & _flags)
567 {
568 // IOBufferMemoryDescriptor alloc - set flags for entry + object create
569 prot |= MAP_MEM_NAMED_CREATE;
570 if (kIOMemoryBufferPurgeable & _flags) prot |= (MAP_MEM_PURGABLE | MAP_MEM_PURGABLE_KERNEL_ONLY);
571 if (kIOMemoryUseReserve & _flags) prot |= MAP_MEM_GRAB_SECLUDED;
572
573 prot |= VM_PROT_WRITE;
574 map = NULL;
575 }
576 else map = get_task_map(_task);
577
578 remain = _length;
579 while (remain)
580 {
581 srcAddr = nextAddr;
582 srcLen = nextLen;
583 nextAddr = 0;
584 nextLen = 0;
585 // coalesce addr range
586 for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++)
587 {
588 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
589 if ((srcAddr + srcLen) != nextAddr) break;
590 srcLen += nextLen;
591 }
592 entryAddr = trunc_page_64(srcAddr);
593 endAddr = round_page_64(srcAddr + srcLen);
594 do
595 {
596 entrySize = (endAddr - entryAddr);
597 if (!entrySize) break;
598 actualSize = entrySize;
599
600 cloneEntry = MACH_PORT_NULL;
601 if (MAP_MEM_NAMED_REUSE & prot)
602 {
603 if (cloneEntries < &_memRef->entries[_memRef->count]) cloneEntry = cloneEntries->entry;
604 else prot &= ~MAP_MEM_NAMED_REUSE;
605 }
606
607 err = mach_make_memory_entry_64(map,
608 &actualSize, entryAddr, prot, &entry, cloneEntry);
609
610 if (KERN_SUCCESS != err) break;
611 if (actualSize > entrySize) panic("mach_make_memory_entry_64 actualSize");
612
613 if (count >= ref->capacity)
614 {
615 ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref);
616 entries = &ref->entries[count];
617 }
618 entries->entry = entry;
619 entries->size = actualSize;
620 entries->offset = offset + (entryAddr - srcAddr);
621 entryAddr += actualSize;
622 if (MAP_MEM_NAMED_REUSE & prot)
623 {
624 if ((cloneEntries->entry == entries->entry)
625 && (cloneEntries->size == entries->size)
626 && (cloneEntries->offset == entries->offset)) cloneEntries++;
627 else prot &= ~MAP_MEM_NAMED_REUSE;
628 }
629 entries++;
630 count++;
631 }
632 while (true);
633 offset += srcLen;
634 remain -= srcLen;
635 }
636 }
637 else
638 {
639 // _task == 0, physical or kIOMemoryTypeUPL
640 memory_object_t pager;
641 vm_size_t size = ptoa_32(_pages);
642
643 if (!getKernelReserved()) panic("getKernelReserved");
644
645 reserved->dp.pagerContig = (1 == _rangesCount);
646 reserved->dp.memory = this;
647
648 pagerFlags = pagerFlagsForCacheMode(cacheMode);
649 if (-1U == pagerFlags) panic("phys is kIODefaultCache");
650 if (reserved->dp.pagerContig) pagerFlags |= DEVICE_PAGER_CONTIGUOUS;
651
652 pager = device_pager_setup((memory_object_t) 0, (uintptr_t) reserved,
653 size, pagerFlags);
654 assert (pager);
655 if (!pager) err = kIOReturnVMError;
656 else
657 {
658 srcAddr = nextAddr;
659 entryAddr = trunc_page_64(srcAddr);
660 err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/,
661 size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry);
662 assert (KERN_SUCCESS == err);
663 if (KERN_SUCCESS != err) device_pager_deallocate(pager);
664 else
665 {
666 reserved->dp.devicePager = pager;
667 entries->entry = entry;
668 entries->size = size;
669 entries->offset = offset + (entryAddr - srcAddr);
670 entries++;
671 count++;
672 }
673 }
674 }
675
676 ref->count = count;
677 ref->prot = prot;
678
679 if (_task && (KERN_SUCCESS == err)
680 && (kIOMemoryMapCopyOnWrite & _flags)
681 && !(kIOMemoryReferenceCOW & options))
682 {
683 err = memoryReferenceCreate(options | kIOMemoryReferenceCOW, &ref->mapRef);
684 }
685
686 if (KERN_SUCCESS == err)
687 {
688 if (MAP_MEM_NAMED_REUSE & prot)
689 {
690 memoryReferenceFree(ref);
691 OSIncrementAtomic(&_memRef->refCount);
692 ref = _memRef;
693 }
694 }
695 else
696 {
697 memoryReferenceFree(ref);
698 ref = NULL;
699 }
700
701 *reference = ref;
702
703 return (err);
704 }
705
706 kern_return_t
707 IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
708 {
709 IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref;
710 IOReturn err;
711 vm_map_offset_t addr;
712
713 addr = ref->mapped;
714
715 err = vm_map_enter_mem_object(map, &addr, ref->size,
716 (vm_map_offset_t) 0,
717 (((ref->options & kIOMapAnywhere)
718 ? VM_FLAGS_ANYWHERE
719 : VM_FLAGS_FIXED)),
720 VM_MAP_KERNEL_FLAGS_NONE,
721 ref->tag,
722 IPC_PORT_NULL,
723 (memory_object_offset_t) 0,
724 false, /* copy */
725 ref->prot,
726 ref->prot,
727 VM_INHERIT_NONE);
728 if (KERN_SUCCESS == err)
729 {
730 ref->mapped = (mach_vm_address_t) addr;
731 ref->map = map;
732 }
733
734 return( err );
735 }
736
737 IOReturn
738 IOGeneralMemoryDescriptor::memoryReferenceMap(
739 IOMemoryReference * ref,
740 vm_map_t map,
741 mach_vm_size_t inoffset,
742 mach_vm_size_t size,
743 IOOptionBits options,
744 mach_vm_address_t * inaddr)
745 {
746 IOReturn err;
747 int64_t offset = inoffset;
748 uint32_t rangeIdx, entryIdx;
749 vm_map_offset_t addr, mapAddr;
750 vm_map_offset_t pageOffset, entryOffset, remain, chunk;
751
752 mach_vm_address_t nextAddr;
753 mach_vm_size_t nextLen;
754 IOByteCount physLen;
755 IOMemoryEntry * entry;
756 vm_prot_t prot, memEntryCacheMode;
757 IOOptionBits type;
758 IOOptionBits cacheMode;
759 vm_tag_t tag;
760 // for the kIOMapPrefault option.
761 upl_page_info_t * pageList = NULL;
762 UInt currentPageIndex = 0;
763 bool didAlloc;
764
765 if (ref->mapRef)
766 {
767 err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
768 return (err);
769 }
770
771 type = _flags & kIOMemoryTypeMask;
772
773 prot = VM_PROT_READ;
774 if (!(kIOMapReadOnly & options)) prot |= VM_PROT_WRITE;
775 prot &= ref->prot;
776
777 cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
778 if (kIODefaultCache != cacheMode)
779 {
780 // VM system requires write access to update named entry cache mode
781 memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
782 }
783
784 tag = getVMTag(map);
785
786 if (_task)
787 {
788 // Find first range for offset
789 if (!_rangesCount) return (kIOReturnBadArgument);
790 for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++)
791 {
792 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
793 if (remain < nextLen) break;
794 remain -= nextLen;
795 }
796 }
797 else
798 {
799 rangeIdx = 0;
800 remain = 0;
801 nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
802 nextLen = size;
803 }
804
805 assert(remain < nextLen);
806 if (remain >= nextLen) return (kIOReturnBadArgument);
807
808 nextAddr += remain;
809 nextLen -= remain;
810 pageOffset = (page_mask & nextAddr);
811 addr = 0;
812 didAlloc = false;
813
814 if (!(options & kIOMapAnywhere))
815 {
816 addr = *inaddr;
817 if (pageOffset != (page_mask & addr)) return (kIOReturnNotAligned);
818 addr -= pageOffset;
819 }
820
821 // find first entry for offset
822 for (entryIdx = 0;
823 (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset);
824 entryIdx++) {}
825 entryIdx--;
826 entry = &ref->entries[entryIdx];
827
828 // allocate VM
829 size = round_page_64(size + pageOffset);
830 if (kIOMapOverwrite & options)
831 {
832 if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags))
833 {
834 map = IOPageableMapForAddress(addr);
835 }
836 err = KERN_SUCCESS;
837 }
838 else
839 {
840 IOMemoryDescriptorMapAllocRef ref;
841 ref.map = map;
842 ref.tag = tag;
843 ref.options = options;
844 ref.size = size;
845 ref.prot = prot;
846 if (options & kIOMapAnywhere)
847 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
848 ref.mapped = 0;
849 else
850 ref.mapped = addr;
851 if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags))
852 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
853 else
854 err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
855 if (KERN_SUCCESS == err)
856 {
857 addr = ref.mapped;
858 map = ref.map;
859 didAlloc = true;
860 }
861 }
862
863 /*
864 * If the memory is associated with a device pager but doesn't have a UPL,
865 * it will be immediately faulted in through the pager via populateDevicePager().
866 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
867 * operations.
868 */
869 if ((reserved != NULL) && (reserved->dp.devicePager) && (_memoryEntries == NULL) && (_wireCount != 0))
870 options &= ~kIOMapPrefault;
871
872 /*
873 * Prefaulting is only possible if we wired the memory earlier. Check the
874 * memory type, and the underlying data.
875 */
876 if (options & kIOMapPrefault)
877 {
878 /*
879 * The memory must have been wired by calling ::prepare(), otherwise
880 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
881 */
882 assert(_wireCount != 0);
883 assert(_memoryEntries != NULL);
884 if ((_wireCount == 0) ||
885 (_memoryEntries == NULL))
886 {
887 return kIOReturnBadArgument;
888 }
889
890 // Get the page list.
891 ioGMDData* dataP = getDataP(_memoryEntries);
892 ioPLBlock const* ioplList = getIOPLList(dataP);
893 pageList = getPageList(dataP);
894
895 // Get the number of IOPLs.
896 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
897
898 /*
899 * Scan through the IOPL Info Blocks, looking for the first block containing
900 * the offset. The research will go past it, so we'll need to go back to the
901 * right range at the end.
902 */
903 UInt ioplIndex = 0;
904 while (ioplIndex < numIOPLs && offset >= ioplList[ioplIndex].fIOMDOffset)
905 ioplIndex++;
906 ioplIndex--;
907
908 // Retrieve the IOPL info block.
909 ioPLBlock ioplInfo = ioplList[ioplIndex];
910
911 /*
912 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
913 * array.
914 */
915 if (ioplInfo.fFlags & kIOPLExternUPL)
916 pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
917 else
918 pageList = &pageList[ioplInfo.fPageInfo];
919
920 // Rebase [offset] into the IOPL in order to looks for the first page index.
921 mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
922
923 // Retrieve the index of the first page corresponding to the offset.
924 currentPageIndex = atop_32(offsetInIOPL);
925 }
926
927 // enter mappings
928 remain = size;
929 mapAddr = addr;
930 addr += pageOffset;
931
932 while (remain && (KERN_SUCCESS == err))
933 {
934 entryOffset = offset - entry->offset;
935 if ((page_mask & entryOffset) != pageOffset)
936 {
937 err = kIOReturnNotAligned;
938 break;
939 }
940
941 if (kIODefaultCache != cacheMode)
942 {
943 vm_size_t unused = 0;
944 err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
945 memEntryCacheMode, NULL, entry->entry);
946 assert (KERN_SUCCESS == err);
947 }
948
949 entryOffset -= pageOffset;
950 if (entryOffset >= entry->size) panic("entryOffset");
951 chunk = entry->size - entryOffset;
952 if (chunk)
953 {
954 vm_map_kernel_flags_t vmk_flags;
955
956 vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
957 vmk_flags.vmkf_iokit_acct = TRUE; /* iokit accounting */
958
959 if (chunk > remain) chunk = remain;
960 if (options & kIOMapPrefault)
961 {
962 UInt nb_pages = round_page(chunk) / PAGE_SIZE;
963
964 err = vm_map_enter_mem_object_prefault(map,
965 &mapAddr,
966 chunk, 0 /* mask */,
967 (VM_FLAGS_FIXED
968 | VM_FLAGS_OVERWRITE),
969 vmk_flags,
970 tag,
971 entry->entry,
972 entryOffset,
973 prot, // cur
974 prot, // max
975 &pageList[currentPageIndex],
976 nb_pages);
977
978 // Compute the next index in the page list.
979 currentPageIndex += nb_pages;
980 assert(currentPageIndex <= _pages);
981 }
982 else
983 {
984 err = vm_map_enter_mem_object(map,
985 &mapAddr,
986 chunk, 0 /* mask */,
987 (VM_FLAGS_FIXED
988 | VM_FLAGS_OVERWRITE),
989 vmk_flags,
990 tag,
991 entry->entry,
992 entryOffset,
993 false, // copy
994 prot, // cur
995 prot, // max
996 VM_INHERIT_NONE);
997 }
998 if (KERN_SUCCESS != err) break;
999 remain -= chunk;
1000 if (!remain) break;
1001 mapAddr += chunk;
1002 offset += chunk - pageOffset;
1003 }
1004 pageOffset = 0;
1005 entry++;
1006 entryIdx++;
1007 if (entryIdx >= ref->count)
1008 {
1009 err = kIOReturnOverrun;
1010 break;
1011 }
1012 }
1013
1014 if ((KERN_SUCCESS != err) && didAlloc)
1015 {
1016 (void) mach_vm_deallocate(map, trunc_page_64(addr), size);
1017 addr = 0;
1018 }
1019 *inaddr = addr;
1020
1021 return (err);
1022 }
1023
1024 IOReturn
1025 IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
1026 IOMemoryReference * ref,
1027 IOByteCount * residentPageCount,
1028 IOByteCount * dirtyPageCount)
1029 {
1030 IOReturn err;
1031 IOMemoryEntry * entries;
1032 unsigned int resident, dirty;
1033 unsigned int totalResident, totalDirty;
1034
1035 totalResident = totalDirty = 0;
1036 err = kIOReturnSuccess;
1037 entries = ref->entries + ref->count;
1038 while (entries > &ref->entries[0])
1039 {
1040 entries--;
1041 err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty);
1042 if (KERN_SUCCESS != err) break;
1043 totalResident += resident;
1044 totalDirty += dirty;
1045 }
1046
1047 if (residentPageCount) *residentPageCount = totalResident;
1048 if (dirtyPageCount) *dirtyPageCount = totalDirty;
1049 return (err);
1050 }
1051
1052 IOReturn
1053 IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
1054 IOMemoryReference * ref,
1055 IOOptionBits newState,
1056 IOOptionBits * oldState)
1057 {
1058 IOReturn err;
1059 IOMemoryEntry * entries;
1060 vm_purgable_t control;
1061 int totalState, state;
1062
1063 totalState = kIOMemoryPurgeableNonVolatile;
1064 err = kIOReturnSuccess;
1065 entries = ref->entries + ref->count;
1066 while (entries > &ref->entries[0])
1067 {
1068 entries--;
1069
1070 err = purgeableControlBits(newState, &control, &state);
1071 if (KERN_SUCCESS != err) break;
1072 err = memory_entry_purgeable_control_internal(entries->entry, control, &state);
1073 if (KERN_SUCCESS != err) break;
1074 err = purgeableStateBits(&state);
1075 if (KERN_SUCCESS != err) break;
1076
1077 if (kIOMemoryPurgeableEmpty == state) totalState = kIOMemoryPurgeableEmpty;
1078 else if (kIOMemoryPurgeableEmpty == totalState) continue;
1079 else if (kIOMemoryPurgeableVolatile == totalState) continue;
1080 else if (kIOMemoryPurgeableVolatile == state) totalState = kIOMemoryPurgeableVolatile;
1081 else totalState = kIOMemoryPurgeableNonVolatile;
1082 }
1083
1084 if (oldState) *oldState = totalState;
1085 return (err);
1086 }
1087
1088 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1089
1090 IOMemoryDescriptor *
1091 IOMemoryDescriptor::withAddress(void * address,
1092 IOByteCount length,
1093 IODirection direction)
1094 {
1095 return IOMemoryDescriptor::
1096 withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
1097 }
1098
1099 #ifndef __LP64__
1100 IOMemoryDescriptor *
1101 IOMemoryDescriptor::withAddress(IOVirtualAddress address,
1102 IOByteCount length,
1103 IODirection direction,
1104 task_t task)
1105 {
1106 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1107 if (that)
1108 {
1109 if (that->initWithAddress(address, length, direction, task))
1110 return that;
1111
1112 that->release();
1113 }
1114 return 0;
1115 }
1116 #endif /* !__LP64__ */
1117
1118 IOMemoryDescriptor *
1119 IOMemoryDescriptor::withPhysicalAddress(
1120 IOPhysicalAddress address,
1121 IOByteCount length,
1122 IODirection direction )
1123 {
1124 return (IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL));
1125 }
1126
1127 #ifndef __LP64__
1128 IOMemoryDescriptor *
1129 IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
1130 UInt32 withCount,
1131 IODirection direction,
1132 task_t task,
1133 bool asReference)
1134 {
1135 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1136 if (that)
1137 {
1138 if (that->initWithRanges(ranges, withCount, direction, task, asReference))
1139 return that;
1140
1141 that->release();
1142 }
1143 return 0;
1144 }
1145 #endif /* !__LP64__ */
1146
1147 IOMemoryDescriptor *
1148 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
1149 mach_vm_size_t length,
1150 IOOptionBits options,
1151 task_t task)
1152 {
1153 IOAddressRange range = { address, length };
1154 return (IOMemoryDescriptor::withAddressRanges(&range, 1, options, task));
1155 }
1156
1157 IOMemoryDescriptor *
1158 IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
1159 UInt32 rangeCount,
1160 IOOptionBits options,
1161 task_t task)
1162 {
1163 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1164 if (that)
1165 {
1166 if (task)
1167 options |= kIOMemoryTypeVirtual64;
1168 else
1169 options |= kIOMemoryTypePhysical64;
1170
1171 if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ 0))
1172 return that;
1173
1174 that->release();
1175 }
1176
1177 return 0;
1178 }
1179
1180
1181 /*
1182 * withOptions:
1183 *
1184 * Create a new IOMemoryDescriptor. The buffer is made up of several
1185 * virtual address ranges, from a given task.
1186 *
1187 * Passing the ranges as a reference will avoid an extra allocation.
1188 */
1189 IOMemoryDescriptor *
1190 IOMemoryDescriptor::withOptions(void * buffers,
1191 UInt32 count,
1192 UInt32 offset,
1193 task_t task,
1194 IOOptionBits opts,
1195 IOMapper * mapper)
1196 {
1197 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
1198
1199 if (self
1200 && !self->initWithOptions(buffers, count, offset, task, opts, mapper))
1201 {
1202 self->release();
1203 return 0;
1204 }
1205
1206 return self;
1207 }
1208
1209 bool IOMemoryDescriptor::initWithOptions(void * buffers,
1210 UInt32 count,
1211 UInt32 offset,
1212 task_t task,
1213 IOOptionBits options,
1214 IOMapper * mapper)
1215 {
1216 return( false );
1217 }
1218
1219 #ifndef __LP64__
1220 IOMemoryDescriptor *
1221 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
1222 UInt32 withCount,
1223 IODirection direction,
1224 bool asReference)
1225 {
1226 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1227 if (that)
1228 {
1229 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference))
1230 return that;
1231
1232 that->release();
1233 }
1234 return 0;
1235 }
1236
1237 IOMemoryDescriptor *
1238 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
1239 IOByteCount offset,
1240 IOByteCount length,
1241 IODirection direction)
1242 {
1243 return (IOSubMemoryDescriptor::withSubRange(of, offset, length, direction));
1244 }
1245 #endif /* !__LP64__ */
1246
1247 IOMemoryDescriptor *
1248 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
1249 {
1250 IOGeneralMemoryDescriptor *origGenMD =
1251 OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
1252
1253 if (origGenMD)
1254 return IOGeneralMemoryDescriptor::
1255 withPersistentMemoryDescriptor(origGenMD);
1256 else
1257 return 0;
1258 }
1259
1260 IOMemoryDescriptor *
1261 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
1262 {
1263 IOMemoryReference * memRef;
1264
1265 if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) return (0);
1266
1267 if (memRef == originalMD->_memRef)
1268 {
1269 originalMD->retain(); // Add a new reference to ourselves
1270 originalMD->memoryReferenceRelease(memRef);
1271 return originalMD;
1272 }
1273
1274 IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor;
1275 IOMDPersistentInitData initData = { originalMD, memRef };
1276
1277 if (self
1278 && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) {
1279 self->release();
1280 self = 0;
1281 }
1282 return self;
1283 }
1284
1285 #ifndef __LP64__
1286 bool
1287 IOGeneralMemoryDescriptor::initWithAddress(void * address,
1288 IOByteCount withLength,
1289 IODirection withDirection)
1290 {
1291 _singleRange.v.address = (vm_offset_t) address;
1292 _singleRange.v.length = withLength;
1293
1294 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
1295 }
1296
1297 bool
1298 IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
1299 IOByteCount withLength,
1300 IODirection withDirection,
1301 task_t withTask)
1302 {
1303 _singleRange.v.address = address;
1304 _singleRange.v.length = withLength;
1305
1306 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
1307 }
1308
1309 bool
1310 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
1311 IOPhysicalAddress address,
1312 IOByteCount withLength,
1313 IODirection withDirection )
1314 {
1315 _singleRange.p.address = address;
1316 _singleRange.p.length = withLength;
1317
1318 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
1319 }
1320
1321 bool
1322 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
1323 IOPhysicalRange * ranges,
1324 UInt32 count,
1325 IODirection direction,
1326 bool reference)
1327 {
1328 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
1329
1330 if (reference)
1331 mdOpts |= kIOMemoryAsReference;
1332
1333 return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0);
1334 }
1335
1336 bool
1337 IOGeneralMemoryDescriptor::initWithRanges(
1338 IOVirtualRange * ranges,
1339 UInt32 count,
1340 IODirection direction,
1341 task_t task,
1342 bool reference)
1343 {
1344 IOOptionBits mdOpts = direction;
1345
1346 if (reference)
1347 mdOpts |= kIOMemoryAsReference;
1348
1349 if (task) {
1350 mdOpts |= kIOMemoryTypeVirtual;
1351
1352 // Auto-prepare if this is a kernel memory descriptor as very few
1353 // clients bother to prepare() kernel memory.
1354 // But it was not enforced so what are you going to do?
1355 if (task == kernel_task)
1356 mdOpts |= kIOMemoryAutoPrepare;
1357 }
1358 else
1359 mdOpts |= kIOMemoryTypePhysical;
1360
1361 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0);
1362 }
1363 #endif /* !__LP64__ */
1364
1365 /*
1366 * initWithOptions:
1367 *
1368 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
1369 * from a given task, several physical ranges, an UPL from the ubc
1370 * system or a uio (may be 64bit) from the BSD subsystem.
1371 *
1372 * Passing the ranges as a reference will avoid an extra allocation.
1373 *
1374 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
1375 * existing instance -- note this behavior is not commonly supported in other
1376 * I/O Kit classes, although it is supported here.
1377 */
1378
1379 bool
1380 IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
1381 UInt32 count,
1382 UInt32 offset,
1383 task_t task,
1384 IOOptionBits options,
1385 IOMapper * mapper)
1386 {
1387 IOOptionBits type = options & kIOMemoryTypeMask;
1388
1389 #ifndef __LP64__
1390 if (task
1391 && (kIOMemoryTypeVirtual == type)
1392 && vm_map_is_64bit(get_task_map(task))
1393 && ((IOVirtualRange *) buffers)->address)
1394 {
1395 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
1396 return false;
1397 }
1398 #endif /* !__LP64__ */
1399
1400 // Grab the original MD's configuation data to initialse the
1401 // arguments to this function.
1402 if (kIOMemoryTypePersistentMD == type) {
1403
1404 IOMDPersistentInitData *initData = (typeof(initData)) buffers;
1405 const IOGeneralMemoryDescriptor *orig = initData->fMD;
1406 ioGMDData *dataP = getDataP(orig->_memoryEntries);
1407
1408 // Only accept persistent memory descriptors with valid dataP data.
1409 assert(orig->_rangesCount == 1);
1410 if ( !(orig->_flags & kIOMemoryPersistent) || !dataP)
1411 return false;
1412
1413 _memRef = initData->fMemRef; // Grab the new named entry
1414 options = orig->_flags & ~kIOMemoryAsReference;
1415 type = options & kIOMemoryTypeMask;
1416 buffers = orig->_ranges.v;
1417 count = orig->_rangesCount;
1418
1419 // Now grab the original task and whatever mapper was previously used
1420 task = orig->_task;
1421 mapper = dataP->fMapper;
1422
1423 // We are ready to go through the original initialisation now
1424 }
1425
1426 switch (type) {
1427 case kIOMemoryTypeUIO:
1428 case kIOMemoryTypeVirtual:
1429 #ifndef __LP64__
1430 case kIOMemoryTypeVirtual64:
1431 #endif /* !__LP64__ */
1432 assert(task);
1433 if (!task)
1434 return false;
1435 break;
1436
1437 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
1438 #ifndef __LP64__
1439 case kIOMemoryTypePhysical64:
1440 #endif /* !__LP64__ */
1441 case kIOMemoryTypeUPL:
1442 assert(!task);
1443 break;
1444 default:
1445 return false; /* bad argument */
1446 }
1447
1448 assert(buffers);
1449 assert(count);
1450
1451 /*
1452 * We can check the _initialized instance variable before having ever set
1453 * it to an initial value because I/O Kit guarantees that all our instance
1454 * variables are zeroed on an object's allocation.
1455 */
1456
1457 if (_initialized) {
1458 /*
1459 * An existing memory descriptor is being retargeted to point to
1460 * somewhere else. Clean up our present state.
1461 */
1462 IOOptionBits type = _flags & kIOMemoryTypeMask;
1463 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
1464 {
1465 while (_wireCount)
1466 complete();
1467 }
1468 if (_ranges.v && !(kIOMemoryAsReference & _flags))
1469 {
1470 if (kIOMemoryTypeUIO == type)
1471 uio_free((uio_t) _ranges.v);
1472 #ifndef __LP64__
1473 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
1474 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
1475 #endif /* !__LP64__ */
1476 else
1477 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
1478 }
1479
1480 options |= (kIOMemoryRedirected & _flags);
1481 if (!(kIOMemoryRedirected & options))
1482 {
1483 if (_memRef)
1484 {
1485 memoryReferenceRelease(_memRef);
1486 _memRef = 0;
1487 }
1488 if (_mappings)
1489 _mappings->flushCollection();
1490 }
1491 }
1492 else {
1493 if (!super::init())
1494 return false;
1495 _initialized = true;
1496 }
1497
1498 // Grab the appropriate mapper
1499 if (kIOMemoryHostOrRemote & options) options |= kIOMemoryMapperNone;
1500 if (kIOMemoryMapperNone & options)
1501 mapper = 0; // No Mapper
1502 else if (mapper == kIOMapperSystem) {
1503 IOMapper::checkForSystemMapper();
1504 gIOSystemMapper = mapper = IOMapper::gSystem;
1505 }
1506
1507 // Remove the dynamic internal use flags from the initial setting
1508 options &= ~(kIOMemoryPreparedReadOnly);
1509 _flags = options;
1510 _task = task;
1511
1512 #ifndef __LP64__
1513 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
1514 #endif /* !__LP64__ */
1515
1516 _dmaReferences = 0;
1517 __iomd_reservedA = 0;
1518 __iomd_reservedB = 0;
1519 _highestPage = 0;
1520
1521 if (kIOMemoryThreadSafe & options)
1522 {
1523 if (!_prepareLock)
1524 _prepareLock = IOLockAlloc();
1525 }
1526 else if (_prepareLock)
1527 {
1528 IOLockFree(_prepareLock);
1529 _prepareLock = NULL;
1530 }
1531
1532 if (kIOMemoryTypeUPL == type) {
1533
1534 ioGMDData *dataP;
1535 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
1536
1537 if (!initMemoryEntries(dataSize, mapper)) return (false);
1538 dataP = getDataP(_memoryEntries);
1539 dataP->fPageCnt = 0;
1540 switch (kIOMemoryDirectionMask & options)
1541 {
1542 case kIODirectionOut:
1543 dataP->fDMAAccess = kIODMAMapReadAccess;
1544 break;
1545 case kIODirectionIn:
1546 dataP->fDMAAccess = kIODMAMapWriteAccess;
1547 break;
1548 case kIODirectionNone:
1549 case kIODirectionOutIn:
1550 default:
1551 panic("bad dir for upl 0x%x\n", (int) options);
1552 break;
1553 }
1554 // _wireCount++; // UPLs start out life wired
1555
1556 _length = count;
1557 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
1558
1559 ioPLBlock iopl;
1560 iopl.fIOPL = (upl_t) buffers;
1561 upl_set_referenced(iopl.fIOPL, true);
1562 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
1563
1564 if (upl_get_size(iopl.fIOPL) < (count + offset))
1565 panic("short external upl");
1566
1567 _highestPage = upl_get_highest_page(iopl.fIOPL);
1568
1569 // Set the flag kIOPLOnDevice convieniently equal to 1
1570 iopl.fFlags = pageList->device | kIOPLExternUPL;
1571 if (!pageList->device) {
1572 // Pre-compute the offset into the UPL's page list
1573 pageList = &pageList[atop_32(offset)];
1574 offset &= PAGE_MASK;
1575 }
1576 iopl.fIOMDOffset = 0;
1577 iopl.fMappedPage = 0;
1578 iopl.fPageInfo = (vm_address_t) pageList;
1579 iopl.fPageOffset = offset;
1580 _memoryEntries->appendBytes(&iopl, sizeof(iopl));
1581 }
1582 else {
1583 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
1584 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
1585
1586 // Initialize the memory descriptor
1587 if (options & kIOMemoryAsReference) {
1588 #ifndef __LP64__
1589 _rangesIsAllocated = false;
1590 #endif /* !__LP64__ */
1591
1592 // Hack assignment to get the buffer arg into _ranges.
1593 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
1594 // work, C++ sigh.
1595 // This also initialises the uio & physical ranges.
1596 _ranges.v = (IOVirtualRange *) buffers;
1597 }
1598 else {
1599 #ifndef __LP64__
1600 _rangesIsAllocated = true;
1601 #endif /* !__LP64__ */
1602 switch (type)
1603 {
1604 case kIOMemoryTypeUIO:
1605 _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
1606 break;
1607
1608 #ifndef __LP64__
1609 case kIOMemoryTypeVirtual64:
1610 case kIOMemoryTypePhysical64:
1611 if (count == 1
1612 #ifndef __arm__
1613 && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
1614 #endif
1615 ) {
1616 if (kIOMemoryTypeVirtual64 == type)
1617 type = kIOMemoryTypeVirtual;
1618 else
1619 type = kIOMemoryTypePhysical;
1620 _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
1621 _rangesIsAllocated = false;
1622 _ranges.v = &_singleRange.v;
1623 _singleRange.v.address = ((IOAddressRange *) buffers)->address;
1624 _singleRange.v.length = ((IOAddressRange *) buffers)->length;
1625 break;
1626 }
1627 _ranges.v64 = IONew(IOAddressRange, count);
1628 if (!_ranges.v64)
1629 return false;
1630 bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
1631 break;
1632 #endif /* !__LP64__ */
1633 case kIOMemoryTypeVirtual:
1634 case kIOMemoryTypePhysical:
1635 if (count == 1) {
1636 _flags |= kIOMemoryAsReference;
1637 #ifndef __LP64__
1638 _rangesIsAllocated = false;
1639 #endif /* !__LP64__ */
1640 _ranges.v = &_singleRange.v;
1641 } else {
1642 _ranges.v = IONew(IOVirtualRange, count);
1643 if (!_ranges.v)
1644 return false;
1645 }
1646 bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
1647 break;
1648 }
1649 }
1650 _rangesCount = count;
1651
1652 // Find starting address within the vector of ranges
1653 Ranges vec = _ranges;
1654 mach_vm_size_t totalLength = 0;
1655 unsigned int ind, pages = 0;
1656 for (ind = 0; ind < count; ind++) {
1657 mach_vm_address_t addr;
1658 mach_vm_address_t endAddr;
1659 mach_vm_size_t len;
1660
1661 // addr & len are returned by this function
1662 getAddrLenForInd(addr, len, type, vec, ind);
1663 if (os_add3_overflow(addr, len, PAGE_MASK, &endAddr)) break;
1664 if (os_add_overflow(pages, (atop_64(endAddr) - atop_64(addr)), &pages)) break;
1665 if (os_add_overflow(totalLength, len, &totalLength)) break;
1666 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
1667 {
1668 ppnum_t highPage = atop_64(addr + len - 1);
1669 if (highPage > _highestPage)
1670 _highestPage = highPage;
1671 }
1672 }
1673 if ((ind < count)
1674 || (totalLength != ((IOByteCount) totalLength))) return (false); /* overflow */
1675
1676 _length = totalLength;
1677 _pages = pages;
1678
1679 // Auto-prepare memory at creation time.
1680 // Implied completion when descriptor is free-ed
1681
1682
1683 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
1684 _wireCount++; // Physical MDs are, by definition, wired
1685 else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
1686 ioGMDData *dataP;
1687 unsigned dataSize;
1688
1689 if (_pages > atop_64(max_mem)) return false;
1690
1691 dataSize = computeDataSize(_pages, /* upls */ count * 2);
1692 if (!initMemoryEntries(dataSize, mapper)) return false;
1693 dataP = getDataP(_memoryEntries);
1694 dataP->fPageCnt = _pages;
1695
1696 if (((_task != kernel_task) || (kIOMemoryBufferPageable & _flags))
1697 && (VM_KERN_MEMORY_NONE == _kernelTag))
1698 {
1699 _kernelTag = IOMemoryTag(kernel_map);
1700 }
1701
1702 if ( (kIOMemoryPersistent & _flags) && !_memRef)
1703 {
1704 IOReturn
1705 err = memoryReferenceCreate(0, &_memRef);
1706 if (kIOReturnSuccess != err) return false;
1707 }
1708
1709 if ((_flags & kIOMemoryAutoPrepare)
1710 && prepare() != kIOReturnSuccess)
1711 return false;
1712 }
1713 }
1714
1715 return true;
1716 }
1717
1718 /*
1719 * free
1720 *
1721 * Free resources.
1722 */
1723 void IOGeneralMemoryDescriptor::free()
1724 {
1725 IOOptionBits type = _flags & kIOMemoryTypeMask;
1726
1727 if( reserved)
1728 {
1729 LOCK;
1730 reserved->dp.memory = 0;
1731 UNLOCK;
1732 }
1733 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
1734 {
1735 ioGMDData * dataP;
1736 if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBaseValid)
1737 {
1738 dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
1739 dataP->fMappedBaseValid = dataP->fMappedBase = 0;
1740 }
1741 }
1742 else
1743 {
1744 while (_wireCount) complete();
1745 }
1746
1747 if (_memoryEntries) _memoryEntries->release();
1748
1749 if (_ranges.v && !(kIOMemoryAsReference & _flags))
1750 {
1751 if (kIOMemoryTypeUIO == type)
1752 uio_free((uio_t) _ranges.v);
1753 #ifndef __LP64__
1754 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
1755 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
1756 #endif /* !__LP64__ */
1757 else
1758 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
1759
1760 _ranges.v = NULL;
1761 }
1762
1763 if (reserved)
1764 {
1765 if (reserved->dp.devicePager)
1766 {
1767 // memEntry holds a ref on the device pager which owns reserved
1768 // (IOMemoryDescriptorReserved) so no reserved access after this point
1769 device_pager_deallocate( (memory_object_t) reserved->dp.devicePager );
1770 }
1771 else
1772 IODelete(reserved, IOMemoryDescriptorReserved, 1);
1773 reserved = NULL;
1774 }
1775
1776 if (_memRef) memoryReferenceRelease(_memRef);
1777 if (_prepareLock) IOLockFree(_prepareLock);
1778
1779 super::free();
1780 }
1781
1782 #ifndef __LP64__
1783 void IOGeneralMemoryDescriptor::unmapFromKernel()
1784 {
1785 panic("IOGMD::unmapFromKernel deprecated");
1786 }
1787
1788 void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
1789 {
1790 panic("IOGMD::mapIntoKernel deprecated");
1791 }
1792 #endif /* !__LP64__ */
1793
1794 /*
1795 * getDirection:
1796 *
1797 * Get the direction of the transfer.
1798 */
1799 IODirection IOMemoryDescriptor::getDirection() const
1800 {
1801 #ifndef __LP64__
1802 if (_direction)
1803 return _direction;
1804 #endif /* !__LP64__ */
1805 return (IODirection) (_flags & kIOMemoryDirectionMask);
1806 }
1807
1808 /*
1809 * getLength:
1810 *
1811 * Get the length of the transfer (over all ranges).
1812 */
1813 IOByteCount IOMemoryDescriptor::getLength() const
1814 {
1815 return _length;
1816 }
1817
1818 void IOMemoryDescriptor::setTag( IOOptionBits tag )
1819 {
1820 _tag = tag;
1821 }
1822
1823 IOOptionBits IOMemoryDescriptor::getTag( void )
1824 {
1825 return( _tag);
1826 }
1827
1828 uint64_t IOMemoryDescriptor::getFlags(void)
1829 {
1830 return (_flags);
1831 }
1832
1833 #ifndef __LP64__
1834 #pragma clang diagnostic push
1835 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
1836
1837 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
1838 IOPhysicalAddress
1839 IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
1840 {
1841 addr64_t physAddr = 0;
1842
1843 if( prepare() == kIOReturnSuccess) {
1844 physAddr = getPhysicalSegment64( offset, length );
1845 complete();
1846 }
1847
1848 return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used
1849 }
1850
1851 #pragma clang diagnostic pop
1852
1853 #endif /* !__LP64__ */
1854
1855 IOByteCount IOMemoryDescriptor::readBytes
1856 (IOByteCount offset, void *bytes, IOByteCount length)
1857 {
1858 addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
1859 IOByteCount remaining;
1860
1861 // Assert that this entire I/O is withing the available range
1862 assert(offset <= _length);
1863 assert(offset + length <= _length);
1864 if ((offset >= _length)
1865 || ((offset + length) > _length)) {
1866 return 0;
1867 }
1868
1869 assert (!(kIOMemoryRemote & _flags));
1870 if (kIOMemoryRemote & _flags) return (0);
1871
1872 if (kIOMemoryThreadSafe & _flags)
1873 LOCK;
1874
1875 remaining = length = min(length, _length - offset);
1876 while (remaining) { // (process another target segment?)
1877 addr64_t srcAddr64;
1878 IOByteCount srcLen;
1879
1880 srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
1881 if (!srcAddr64)
1882 break;
1883
1884 // Clip segment length to remaining
1885 if (srcLen > remaining)
1886 srcLen = remaining;
1887
1888 copypv(srcAddr64, dstAddr, srcLen,
1889 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
1890
1891 dstAddr += srcLen;
1892 offset += srcLen;
1893 remaining -= srcLen;
1894 }
1895
1896 if (kIOMemoryThreadSafe & _flags)
1897 UNLOCK;
1898
1899 assert(!remaining);
1900
1901 return length - remaining;
1902 }
1903
1904 IOByteCount IOMemoryDescriptor::writeBytes
1905 (IOByteCount inoffset, const void *bytes, IOByteCount length)
1906 {
1907 addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
1908 IOByteCount remaining;
1909 IOByteCount offset = inoffset;
1910
1911 // Assert that this entire I/O is withing the available range
1912 assert(offset <= _length);
1913 assert(offset + length <= _length);
1914
1915 assert( !(kIOMemoryPreparedReadOnly & _flags) );
1916
1917 if ( (kIOMemoryPreparedReadOnly & _flags)
1918 || (offset >= _length)
1919 || ((offset + length) > _length)) {
1920 return 0;
1921 }
1922
1923 assert (!(kIOMemoryRemote & _flags));
1924 if (kIOMemoryRemote & _flags) return (0);
1925
1926 if (kIOMemoryThreadSafe & _flags)
1927 LOCK;
1928
1929 remaining = length = min(length, _length - offset);
1930 while (remaining) { // (process another target segment?)
1931 addr64_t dstAddr64;
1932 IOByteCount dstLen;
1933
1934 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
1935 if (!dstAddr64)
1936 break;
1937
1938 // Clip segment length to remaining
1939 if (dstLen > remaining)
1940 dstLen = remaining;
1941
1942 if (!srcAddr) bzero_phys(dstAddr64, dstLen);
1943 else
1944 {
1945 copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
1946 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
1947 srcAddr += dstLen;
1948 }
1949 offset += dstLen;
1950 remaining -= dstLen;
1951 }
1952
1953 if (kIOMemoryThreadSafe & _flags)
1954 UNLOCK;
1955
1956 assert(!remaining);
1957
1958 if (!srcAddr) performOperation(kIOMemoryIncoherentIOFlush, inoffset, length);
1959
1960 return length - remaining;
1961 }
1962
1963 #ifndef __LP64__
1964 void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
1965 {
1966 panic("IOGMD::setPosition deprecated");
1967 }
1968 #endif /* !__LP64__ */
1969
1970 static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
1971
1972 uint64_t
1973 IOGeneralMemoryDescriptor::getPreparationID( void )
1974 {
1975 ioGMDData *dataP;
1976
1977 if (!_wireCount)
1978 return (kIOPreparationIDUnprepared);
1979
1980 if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
1981 || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64))
1982 {
1983 IOMemoryDescriptor::setPreparationID();
1984 return (IOMemoryDescriptor::getPreparationID());
1985 }
1986
1987 if (!_memoryEntries || !(dataP = getDataP(_memoryEntries)))
1988 return (kIOPreparationIDUnprepared);
1989
1990 if (kIOPreparationIDUnprepared == dataP->fPreparationID)
1991 {
1992 dataP->fPreparationID = OSIncrementAtomic64(&gIOMDPreparationID);
1993 }
1994 return (dataP->fPreparationID);
1995 }
1996
1997 IOMemoryDescriptorReserved * IOMemoryDescriptor::getKernelReserved( void )
1998 {
1999 if (!reserved)
2000 {
2001 reserved = IONew(IOMemoryDescriptorReserved, 1);
2002 if (reserved)
2003 bzero(reserved, sizeof(IOMemoryDescriptorReserved));
2004 }
2005 return (reserved);
2006 }
2007
2008 void IOMemoryDescriptor::setPreparationID( void )
2009 {
2010 if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID))
2011 {
2012 reserved->preparationID = OSIncrementAtomic64(&gIOMDPreparationID);
2013 }
2014 }
2015
2016 uint64_t IOMemoryDescriptor::getPreparationID( void )
2017 {
2018 if (reserved)
2019 return (reserved->preparationID);
2020 else
2021 return (kIOPreparationIDUnsupported);
2022 }
2023
2024 void IOMemoryDescriptor::setVMTags(vm_tag_t kernelTag, vm_tag_t userTag)
2025 {
2026 _kernelTag = kernelTag;
2027 _userTag = userTag;
2028 }
2029
2030 vm_tag_t IOMemoryDescriptor::getVMTag(vm_map_t map)
2031 {
2032 if (vm_kernel_map_is_kernel(map))
2033 {
2034 if (VM_KERN_MEMORY_NONE != _kernelTag) return (_kernelTag);
2035 }
2036 else
2037 {
2038 if (VM_KERN_MEMORY_NONE != _userTag) return (_userTag);
2039 }
2040 return (IOMemoryTag(map));
2041 }
2042
2043 IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
2044 {
2045 IOReturn err = kIOReturnSuccess;
2046 DMACommandOps params;
2047 IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
2048 ioGMDData *dataP;
2049
2050 params = (op & ~kIOMDDMACommandOperationMask & op);
2051 op &= kIOMDDMACommandOperationMask;
2052
2053 if (kIOMDDMAMap == op)
2054 {
2055 if (dataSize < sizeof(IOMDDMAMapArgs))
2056 return kIOReturnUnderrun;
2057
2058 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2059
2060 if (!_memoryEntries
2061 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
2062
2063 if (_memoryEntries && data->fMapper)
2064 {
2065 bool remap, keepMap;
2066 dataP = getDataP(_memoryEntries);
2067
2068 if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
2069 if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) dataP->fDMAMapAlignment = data->fMapSpec.alignment;
2070
2071 keepMap = (data->fMapper == gIOSystemMapper);
2072 keepMap &= ((data->fOffset == 0) && (data->fLength == _length));
2073
2074 if ((data->fMapper == gIOSystemMapper) && _prepareLock) IOLockLock(_prepareLock);
2075
2076 remap = (!keepMap);
2077 remap |= (dataP->fDMAMapNumAddressBits < 64)
2078 && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
2079 remap |= (dataP->fDMAMapAlignment > page_size);
2080
2081 if (remap || !dataP->fMappedBaseValid)
2082 {
2083 // if (dataP->fMappedBaseValid) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params);
2084 err = md->dmaMap(data->fMapper, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
2085 if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBaseValid)
2086 {
2087 dataP->fMappedBase = data->fAlloc;
2088 dataP->fMappedBaseValid = true;
2089 dataP->fMappedLength = data->fAllocLength;
2090 data->fAllocLength = 0; // IOMD owns the alloc now
2091 }
2092 }
2093 else
2094 {
2095 data->fAlloc = dataP->fMappedBase;
2096 data->fAllocLength = 0; // give out IOMD map
2097 md->dmaMapRecord(data->fMapper, data->fCommand, dataP->fMappedLength);
2098 }
2099 data->fMapContig = !dataP->fDiscontig;
2100
2101 if ((data->fMapper == gIOSystemMapper) && _prepareLock) IOLockUnlock(_prepareLock);
2102 }
2103 return (err);
2104 }
2105 if (kIOMDDMAUnmap == op)
2106 {
2107 if (dataSize < sizeof(IOMDDMAMapArgs))
2108 return kIOReturnUnderrun;
2109 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2110
2111 err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
2112
2113 return kIOReturnSuccess;
2114 }
2115
2116 if (kIOMDAddDMAMapSpec == op)
2117 {
2118 if (dataSize < sizeof(IODMAMapSpecification))
2119 return kIOReturnUnderrun;
2120
2121 IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
2122
2123 if (!_memoryEntries
2124 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
2125
2126 if (_memoryEntries)
2127 {
2128 dataP = getDataP(_memoryEntries);
2129 if (data->numAddressBits < dataP->fDMAMapNumAddressBits)
2130 dataP->fDMAMapNumAddressBits = data->numAddressBits;
2131 if (data->alignment > dataP->fDMAMapAlignment)
2132 dataP->fDMAMapAlignment = data->alignment;
2133 }
2134 return kIOReturnSuccess;
2135 }
2136
2137 if (kIOMDGetCharacteristics == op) {
2138
2139 if (dataSize < sizeof(IOMDDMACharacteristics))
2140 return kIOReturnUnderrun;
2141
2142 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
2143 data->fLength = _length;
2144 data->fSGCount = _rangesCount;
2145 data->fPages = _pages;
2146 data->fDirection = getDirection();
2147 if (!_wireCount)
2148 data->fIsPrepared = false;
2149 else {
2150 data->fIsPrepared = true;
2151 data->fHighestPage = _highestPage;
2152 if (_memoryEntries)
2153 {
2154 dataP = getDataP(_memoryEntries);
2155 ioPLBlock *ioplList = getIOPLList(dataP);
2156 UInt count = getNumIOPL(_memoryEntries, dataP);
2157 if (count == 1)
2158 data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
2159 }
2160 }
2161
2162 return kIOReturnSuccess;
2163 }
2164
2165 else if (kIOMDDMAActive == op)
2166 {
2167 if (params)
2168 {
2169 int16_t prior;
2170 prior = OSAddAtomic16(1, &md->_dmaReferences);
2171 if (!prior) md->_mapName = NULL;
2172 }
2173 else
2174 {
2175 if (md->_dmaReferences) OSAddAtomic16(-1, &md->_dmaReferences);
2176 else panic("_dmaReferences underflow");
2177 }
2178 }
2179 else if (kIOMDWalkSegments != op)
2180 return kIOReturnBadArgument;
2181
2182 // Get the next segment
2183 struct InternalState {
2184 IOMDDMAWalkSegmentArgs fIO;
2185 UInt fOffset2Index;
2186 UInt fIndex;
2187 UInt fNextOffset;
2188 } *isP;
2189
2190 // Find the next segment
2191 if (dataSize < sizeof(*isP))
2192 return kIOReturnUnderrun;
2193
2194 isP = (InternalState *) vData;
2195 UInt offset = isP->fIO.fOffset;
2196 uint8_t mapped = isP->fIO.fMapped;
2197 uint64_t mappedBase;
2198
2199 if (mapped && (kIOMemoryRemote & _flags)) return (kIOReturnNotAttached);
2200
2201 if (IOMapper::gSystem && mapped
2202 && (!(kIOMemoryHostOnly & _flags))
2203 && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBaseValid))
2204 // && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBaseValid))
2205 {
2206 if (!_memoryEntries
2207 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
2208
2209 dataP = getDataP(_memoryEntries);
2210 if (dataP->fMapper)
2211 {
2212 IODMAMapSpecification mapSpec;
2213 bzero(&mapSpec, sizeof(mapSpec));
2214 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
2215 mapSpec.alignment = dataP->fDMAMapAlignment;
2216 err = md->dmaMap(dataP->fMapper, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength);
2217 if (kIOReturnSuccess != err) return (err);
2218 dataP->fMappedBaseValid = true;
2219 }
2220 }
2221
2222 if (kIOMDDMAWalkMappedLocal == mapped) mappedBase = isP->fIO.fMappedBase;
2223 else if (mapped)
2224 {
2225 if (IOMapper::gSystem
2226 && (!(kIOMemoryHostOnly & _flags))
2227 && _memoryEntries
2228 && (dataP = getDataP(_memoryEntries))
2229 && dataP->fMappedBaseValid)
2230 {
2231 mappedBase = dataP->fMappedBase;
2232 }
2233 else mapped = 0;
2234 }
2235
2236 if (offset >= _length)
2237 return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
2238
2239 // Validate the previous offset
2240 UInt ind, off2Ind = isP->fOffset2Index;
2241 if (!params
2242 && offset
2243 && (offset == isP->fNextOffset || off2Ind <= offset))
2244 ind = isP->fIndex;
2245 else
2246 ind = off2Ind = 0; // Start from beginning
2247
2248 UInt length;
2249 UInt64 address;
2250
2251 if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
2252
2253 // Physical address based memory descriptor
2254 const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
2255
2256 // Find the range after the one that contains the offset
2257 mach_vm_size_t len;
2258 for (len = 0; off2Ind <= offset; ind++) {
2259 len = physP[ind].length;
2260 off2Ind += len;
2261 }
2262
2263 // Calculate length within range and starting address
2264 length = off2Ind - offset;
2265 address = physP[ind - 1].address + len - length;
2266
2267 if (true && mapped)
2268 {
2269 address = mappedBase + offset;
2270 }
2271 else
2272 {
2273 // see how far we can coalesce ranges
2274 while (ind < _rangesCount && address + length == physP[ind].address) {
2275 len = physP[ind].length;
2276 length += len;
2277 off2Ind += len;
2278 ind++;
2279 }
2280 }
2281
2282 // correct contiguous check overshoot
2283 ind--;
2284 off2Ind -= len;
2285 }
2286 #ifndef __LP64__
2287 else if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
2288
2289 // Physical address based memory descriptor
2290 const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
2291
2292 // Find the range after the one that contains the offset
2293 mach_vm_size_t len;
2294 for (len = 0; off2Ind <= offset; ind++) {
2295 len = physP[ind].length;
2296 off2Ind += len;
2297 }
2298
2299 // Calculate length within range and starting address
2300 length = off2Ind - offset;
2301 address = physP[ind - 1].address + len - length;
2302
2303 if (true && mapped)
2304 {
2305 address = mappedBase + offset;
2306 }
2307 else
2308 {
2309 // see how far we can coalesce ranges
2310 while (ind < _rangesCount && address + length == physP[ind].address) {
2311 len = physP[ind].length;
2312 length += len;
2313 off2Ind += len;
2314 ind++;
2315 }
2316 }
2317 // correct contiguous check overshoot
2318 ind--;
2319 off2Ind -= len;
2320 }
2321 #endif /* !__LP64__ */
2322 else do {
2323 if (!_wireCount)
2324 panic("IOGMD: not wired for the IODMACommand");
2325
2326 assert(_memoryEntries);
2327
2328 dataP = getDataP(_memoryEntries);
2329 const ioPLBlock *ioplList = getIOPLList(dataP);
2330 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
2331 upl_page_info_t *pageList = getPageList(dataP);
2332
2333 assert(numIOPLs > 0);
2334
2335 // Scan through iopl info blocks looking for block containing offset
2336 while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset)
2337 ind++;
2338
2339 // Go back to actual range as search goes past it
2340 ioPLBlock ioplInfo = ioplList[ind - 1];
2341 off2Ind = ioplInfo.fIOMDOffset;
2342
2343 if (ind < numIOPLs)
2344 length = ioplList[ind].fIOMDOffset;
2345 else
2346 length = _length;
2347 length -= offset; // Remainder within iopl
2348
2349 // Subtract offset till this iopl in total list
2350 offset -= off2Ind;
2351
2352 // If a mapped address is requested and this is a pre-mapped IOPL
2353 // then just need to compute an offset relative to the mapped base.
2354 if (mapped) {
2355 offset += (ioplInfo.fPageOffset & PAGE_MASK);
2356 address = trunc_page_64(mappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
2357 continue; // Done leave do/while(false) now
2358 }
2359
2360 // The offset is rebased into the current iopl.
2361 // Now add the iopl 1st page offset.
2362 offset += ioplInfo.fPageOffset;
2363
2364 // For external UPLs the fPageInfo field points directly to
2365 // the upl's upl_page_info_t array.
2366 if (ioplInfo.fFlags & kIOPLExternUPL)
2367 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
2368 else
2369 pageList = &pageList[ioplInfo.fPageInfo];
2370
2371 // Check for direct device non-paged memory
2372 if ( ioplInfo.fFlags & kIOPLOnDevice ) {
2373 address = ptoa_64(pageList->phys_addr) + offset;
2374 continue; // Done leave do/while(false) now
2375 }
2376
2377 // Now we need compute the index into the pageList
2378 UInt pageInd = atop_32(offset);
2379 offset &= PAGE_MASK;
2380
2381 // Compute the starting address of this segment
2382 IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
2383 if (!pageAddr) {
2384 panic("!pageList phys_addr");
2385 }
2386
2387 address = ptoa_64(pageAddr) + offset;
2388
2389 // length is currently set to the length of the remainider of the iopl.
2390 // We need to check that the remainder of the iopl is contiguous.
2391 // This is indicated by pageList[ind].phys_addr being sequential.
2392 IOByteCount contigLength = PAGE_SIZE - offset;
2393 while (contigLength < length
2394 && ++pageAddr == pageList[++pageInd].phys_addr)
2395 {
2396 contigLength += PAGE_SIZE;
2397 }
2398
2399 if (contigLength < length)
2400 length = contigLength;
2401
2402
2403 assert(address);
2404 assert(length);
2405
2406 } while (false);
2407
2408 // Update return values and state
2409 isP->fIO.fIOVMAddr = address;
2410 isP->fIO.fLength = length;
2411 isP->fIndex = ind;
2412 isP->fOffset2Index = off2Ind;
2413 isP->fNextOffset = isP->fIO.fOffset + length;
2414
2415 return kIOReturnSuccess;
2416 }
2417
2418 addr64_t
2419 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
2420 {
2421 IOReturn ret;
2422 mach_vm_address_t address = 0;
2423 mach_vm_size_t length = 0;
2424 IOMapper * mapper = gIOSystemMapper;
2425 IOOptionBits type = _flags & kIOMemoryTypeMask;
2426
2427 if (lengthOfSegment)
2428 *lengthOfSegment = 0;
2429
2430 if (offset >= _length)
2431 return 0;
2432
2433 // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
2434 // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
2435 // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
2436 // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
2437
2438 if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type))
2439 {
2440 unsigned rangesIndex = 0;
2441 Ranges vec = _ranges;
2442 mach_vm_address_t addr;
2443
2444 // Find starting address within the vector of ranges
2445 for (;;) {
2446 getAddrLenForInd(addr, length, type, vec, rangesIndex);
2447 if (offset < length)
2448 break;
2449 offset -= length; // (make offset relative)
2450 rangesIndex++;
2451 }
2452
2453 // Now that we have the starting range,
2454 // lets find the last contiguous range
2455 addr += offset;
2456 length -= offset;
2457
2458 for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) {
2459 mach_vm_address_t newAddr;
2460 mach_vm_size_t newLen;
2461
2462 getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
2463 if (addr + length != newAddr)
2464 break;
2465 length += newLen;
2466 }
2467 if (addr)
2468 address = (IOPhysicalAddress) addr; // Truncate address to 32bit
2469 }
2470 else
2471 {
2472 IOMDDMAWalkSegmentState _state;
2473 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
2474
2475 state->fOffset = offset;
2476 state->fLength = _length - offset;
2477 state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOrRemote);
2478
2479 ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
2480
2481 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret))
2482 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
2483 ret, this, state->fOffset,
2484 state->fIOVMAddr, state->fLength);
2485 if (kIOReturnSuccess == ret)
2486 {
2487 address = state->fIOVMAddr;
2488 length = state->fLength;
2489 }
2490
2491 // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
2492 // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
2493
2494 if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)))
2495 {
2496 if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone))
2497 {
2498 addr64_t origAddr = address;
2499 IOByteCount origLen = length;
2500
2501 address = mapper->mapToPhysicalAddress(origAddr);
2502 length = page_size - (address & (page_size - 1));
2503 while ((length < origLen)
2504 && ((address + length) == mapper->mapToPhysicalAddress(origAddr + length)))
2505 length += page_size;
2506 if (length > origLen)
2507 length = origLen;
2508 }
2509 }
2510 }
2511
2512 if (!address)
2513 length = 0;
2514
2515 if (lengthOfSegment)
2516 *lengthOfSegment = length;
2517
2518 return (address);
2519 }
2520
2521 #ifndef __LP64__
2522 #pragma clang diagnostic push
2523 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2524
2525 addr64_t
2526 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
2527 {
2528 addr64_t address = 0;
2529
2530 if (options & _kIOMemorySourceSegment)
2531 {
2532 address = getSourceSegment(offset, lengthOfSegment);
2533 }
2534 else if (options & kIOMemoryMapperNone)
2535 {
2536 address = getPhysicalSegment64(offset, lengthOfSegment);
2537 }
2538 else
2539 {
2540 address = getPhysicalSegment(offset, lengthOfSegment);
2541 }
2542
2543 return (address);
2544 }
2545 #pragma clang diagnostic pop
2546
2547 addr64_t
2548 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
2549 {
2550 return (getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone));
2551 }
2552
2553 IOPhysicalAddress
2554 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
2555 {
2556 addr64_t address = 0;
2557 IOByteCount length = 0;
2558
2559 address = getPhysicalSegment(offset, lengthOfSegment, 0);
2560
2561 if (lengthOfSegment)
2562 length = *lengthOfSegment;
2563
2564 if ((address + length) > 0x100000000ULL)
2565 {
2566 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
2567 address, (long) length, (getMetaClass())->getClassName());
2568 }
2569
2570 return ((IOPhysicalAddress) address);
2571 }
2572
2573 addr64_t
2574 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
2575 {
2576 IOPhysicalAddress phys32;
2577 IOByteCount length;
2578 addr64_t phys64;
2579 IOMapper * mapper = 0;
2580
2581 phys32 = getPhysicalSegment(offset, lengthOfSegment);
2582 if (!phys32)
2583 return 0;
2584
2585 if (gIOSystemMapper)
2586 mapper = gIOSystemMapper;
2587
2588 if (mapper)
2589 {
2590 IOByteCount origLen;
2591
2592 phys64 = mapper->mapToPhysicalAddress(phys32);
2593 origLen = *lengthOfSegment;
2594 length = page_size - (phys64 & (page_size - 1));
2595 while ((length < origLen)
2596 && ((phys64 + length) == mapper->mapToPhysicalAddress(phys32 + length)))
2597 length += page_size;
2598 if (length > origLen)
2599 length = origLen;
2600
2601 *lengthOfSegment = length;
2602 }
2603 else
2604 phys64 = (addr64_t) phys32;
2605
2606 return phys64;
2607 }
2608
2609 IOPhysicalAddress
2610 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
2611 {
2612 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0));
2613 }
2614
2615 IOPhysicalAddress
2616 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
2617 {
2618 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment));
2619 }
2620
2621 #pragma clang diagnostic push
2622 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2623
2624 void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
2625 IOByteCount * lengthOfSegment)
2626 {
2627 if (_task == kernel_task)
2628 return (void *) getSourceSegment(offset, lengthOfSegment);
2629 else
2630 panic("IOGMD::getVirtualSegment deprecated");
2631
2632 return 0;
2633 }
2634 #pragma clang diagnostic pop
2635 #endif /* !__LP64__ */
2636
2637 IOReturn
2638 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
2639 {
2640 IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
2641 DMACommandOps params;
2642 IOReturn err;
2643
2644 params = (op & ~kIOMDDMACommandOperationMask & op);
2645 op &= kIOMDDMACommandOperationMask;
2646
2647 if (kIOMDGetCharacteristics == op) {
2648 if (dataSize < sizeof(IOMDDMACharacteristics))
2649 return kIOReturnUnderrun;
2650
2651 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
2652 data->fLength = getLength();
2653 data->fSGCount = 0;
2654 data->fDirection = getDirection();
2655 data->fIsPrepared = true; // Assume prepared - fails safe
2656 }
2657 else if (kIOMDWalkSegments == op) {
2658 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs))
2659 return kIOReturnUnderrun;
2660
2661 IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
2662 IOByteCount offset = (IOByteCount) data->fOffset;
2663
2664 IOPhysicalLength length;
2665 if (data->fMapped && IOMapper::gSystem)
2666 data->fIOVMAddr = md->getPhysicalSegment(offset, &length);
2667 else
2668 data->fIOVMAddr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
2669 data->fLength = length;
2670 }
2671 else if (kIOMDAddDMAMapSpec == op) return kIOReturnUnsupported;
2672 else if (kIOMDDMAMap == op)
2673 {
2674 if (dataSize < sizeof(IOMDDMAMapArgs))
2675 return kIOReturnUnderrun;
2676 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2677
2678 if (params) panic("class %s does not support IODMACommand::kIterateOnly", getMetaClass()->getClassName());
2679
2680 data->fMapContig = true;
2681 err = md->dmaMap(data->fMapper, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
2682
2683 return (err);
2684 }
2685 else if (kIOMDDMAUnmap == op)
2686 {
2687 if (dataSize < sizeof(IOMDDMAMapArgs))
2688 return kIOReturnUnderrun;
2689 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2690
2691 err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
2692
2693 return (kIOReturnSuccess);
2694 }
2695 else return kIOReturnBadArgument;
2696
2697 return kIOReturnSuccess;
2698 }
2699
2700 IOReturn
2701 IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
2702 IOOptionBits * oldState )
2703 {
2704 IOReturn err = kIOReturnSuccess;
2705
2706 vm_purgable_t control;
2707 int state;
2708
2709 assert (!(kIOMemoryRemote & _flags));
2710 if (kIOMemoryRemote & _flags) return (kIOReturnNotAttached);
2711
2712 if (_memRef)
2713 {
2714 err = super::setPurgeable(newState, oldState);
2715 }
2716 else
2717 {
2718 if (kIOMemoryThreadSafe & _flags)
2719 LOCK;
2720 do
2721 {
2722 // Find the appropriate vm_map for the given task
2723 vm_map_t curMap;
2724 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
2725 {
2726 err = kIOReturnNotReady;
2727 break;
2728 }
2729 else if (!_task)
2730 {
2731 err = kIOReturnUnsupported;
2732 break;
2733 }
2734 else
2735 {
2736 curMap = get_task_map(_task);
2737 if (NULL == curMap)
2738 {
2739 err = KERN_INVALID_ARGUMENT;
2740 break;
2741 }
2742 }
2743
2744 // can only do one range
2745 Ranges vec = _ranges;
2746 IOOptionBits type = _flags & kIOMemoryTypeMask;
2747 mach_vm_address_t addr;
2748 mach_vm_size_t len;
2749 getAddrLenForInd(addr, len, type, vec, 0);
2750
2751 err = purgeableControlBits(newState, &control, &state);
2752 if (kIOReturnSuccess != err)
2753 break;
2754 err = vm_map_purgable_control(curMap, addr, control, &state);
2755 if (oldState)
2756 {
2757 if (kIOReturnSuccess == err)
2758 {
2759 err = purgeableStateBits(&state);
2760 *oldState = state;
2761 }
2762 }
2763 }
2764 while (false);
2765 if (kIOMemoryThreadSafe & _flags)
2766 UNLOCK;
2767 }
2768
2769 return (err);
2770 }
2771
2772 IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
2773 IOOptionBits * oldState )
2774 {
2775 IOReturn err = kIOReturnNotReady;
2776
2777 if (kIOMemoryThreadSafe & _flags) LOCK;
2778 if (_memRef) err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState);
2779 if (kIOMemoryThreadSafe & _flags) UNLOCK;
2780
2781 return (err);
2782 }
2783
2784 IOReturn IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
2785 IOByteCount * dirtyPageCount )
2786 {
2787 IOReturn err = kIOReturnNotReady;
2788
2789 assert (!(kIOMemoryRemote & _flags));
2790 if (kIOMemoryRemote & _flags) return (kIOReturnNotAttached);
2791
2792 if (kIOMemoryThreadSafe & _flags) LOCK;
2793 if (_memRef) err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount);
2794 else
2795 {
2796 IOMultiMemoryDescriptor * mmd;
2797 IOSubMemoryDescriptor * smd;
2798 if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this)))
2799 {
2800 err = smd->getPageCounts(residentPageCount, dirtyPageCount);
2801 }
2802 else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this)))
2803 {
2804 err = mmd->getPageCounts(residentPageCount, dirtyPageCount);
2805 }
2806 }
2807 if (kIOMemoryThreadSafe & _flags) UNLOCK;
2808
2809 return (err);
2810 }
2811
2812
2813 #if defined(__arm__) || defined(__arm64__)
2814 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
2815 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
2816 #else /* defined(__arm__) || defined(__arm64__) */
2817 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
2818 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
2819 #endif /* defined(__arm__) || defined(__arm64__) */
2820
2821 static void SetEncryptOp(addr64_t pa, unsigned int count)
2822 {
2823 ppnum_t page, end;
2824
2825 page = atop_64(round_page_64(pa));
2826 end = atop_64(trunc_page_64(pa + count));
2827 for (; page < end; page++)
2828 {
2829 pmap_clear_noencrypt(page);
2830 }
2831 }
2832
2833 static void ClearEncryptOp(addr64_t pa, unsigned int count)
2834 {
2835 ppnum_t page, end;
2836
2837 page = atop_64(round_page_64(pa));
2838 end = atop_64(trunc_page_64(pa + count));
2839 for (; page < end; page++)
2840 {
2841 pmap_set_noencrypt(page);
2842 }
2843 }
2844
2845 IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options,
2846 IOByteCount offset, IOByteCount length )
2847 {
2848 IOByteCount remaining;
2849 unsigned int res;
2850 void (*func)(addr64_t pa, unsigned int count) = 0;
2851 #if defined(__arm__) || defined(__arm64__)
2852 void (*func_ext)(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *result) = 0;
2853 #endif
2854
2855 assert (!(kIOMemoryRemote & _flags));
2856 if (kIOMemoryRemote & _flags) return (kIOReturnNotAttached);
2857
2858 switch (options)
2859 {
2860 case kIOMemoryIncoherentIOFlush:
2861 #if defined(__arm__) || defined(__arm64__)
2862 func_ext = &dcache_incoherent_io_flush64;
2863 #if __ARM_COHERENT_IO__
2864 func_ext(0, 0, 0, &res);
2865 return kIOReturnSuccess;
2866 #else /* __ARM_COHERENT_IO__ */
2867 break;
2868 #endif /* __ARM_COHERENT_IO__ */
2869 #else /* defined(__arm__) || defined(__arm64__) */
2870 func = &dcache_incoherent_io_flush64;
2871 break;
2872 #endif /* defined(__arm__) || defined(__arm64__) */
2873 case kIOMemoryIncoherentIOStore:
2874 #if defined(__arm__) || defined(__arm64__)
2875 func_ext = &dcache_incoherent_io_store64;
2876 #if __ARM_COHERENT_IO__
2877 func_ext(0, 0, 0, &res);
2878 return kIOReturnSuccess;
2879 #else /* __ARM_COHERENT_IO__ */
2880 break;
2881 #endif /* __ARM_COHERENT_IO__ */
2882 #else /* defined(__arm__) || defined(__arm64__) */
2883 func = &dcache_incoherent_io_store64;
2884 break;
2885 #endif /* defined(__arm__) || defined(__arm64__) */
2886
2887 case kIOMemorySetEncrypted:
2888 func = &SetEncryptOp;
2889 break;
2890 case kIOMemoryClearEncrypted:
2891 func = &ClearEncryptOp;
2892 break;
2893 }
2894
2895 #if defined(__arm__) || defined(__arm64__)
2896 if ((func == 0) && (func_ext == 0))
2897 return (kIOReturnUnsupported);
2898 #else /* defined(__arm__) || defined(__arm64__) */
2899 if (!func)
2900 return (kIOReturnUnsupported);
2901 #endif /* defined(__arm__) || defined(__arm64__) */
2902
2903 if (kIOMemoryThreadSafe & _flags)
2904 LOCK;
2905
2906 res = 0x0UL;
2907 remaining = length = min(length, getLength() - offset);
2908 while (remaining)
2909 // (process another target segment?)
2910 {
2911 addr64_t dstAddr64;
2912 IOByteCount dstLen;
2913
2914 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
2915 if (!dstAddr64)
2916 break;
2917
2918 // Clip segment length to remaining
2919 if (dstLen > remaining)
2920 dstLen = remaining;
2921
2922 #if defined(__arm__) || defined(__arm64__)
2923 if (func)
2924 (*func)(dstAddr64, dstLen);
2925 if (func_ext) {
2926 (*func_ext)(dstAddr64, dstLen, remaining, &res);
2927 if (res != 0x0UL) {
2928 remaining = 0;
2929 break;
2930 }
2931 }
2932 #else /* defined(__arm__) || defined(__arm64__) */
2933 (*func)(dstAddr64, dstLen);
2934 #endif /* defined(__arm__) || defined(__arm64__) */
2935
2936 offset += dstLen;
2937 remaining -= dstLen;
2938 }
2939
2940 if (kIOMemoryThreadSafe & _flags)
2941 UNLOCK;
2942
2943 return (remaining ? kIOReturnUnderrun : kIOReturnSuccess);
2944 }
2945
2946 /*
2947 *
2948 */
2949
2950 #if defined(__i386__) || defined(__x86_64__)
2951
2952 #define io_kernel_static_start vm_kernel_stext
2953 #define io_kernel_static_end vm_kernel_etext
2954
2955 #elif defined(__arm__) || defined(__arm64__)
2956
2957 extern vm_offset_t static_memory_end;
2958
2959 #if defined(__arm64__)
2960 #define io_kernel_static_start vm_kext_base
2961 #else /* defined(__arm64__) */
2962 #define io_kernel_static_start vm_kernel_stext
2963 #endif /* defined(__arm64__) */
2964
2965 #define io_kernel_static_end static_memory_end
2966
2967 #else
2968 #error io_kernel_static_end is undefined for this architecture
2969 #endif
2970
2971 static kern_return_t
2972 io_get_kernel_static_upl(
2973 vm_map_t /* map */,
2974 uintptr_t offset,
2975 upl_size_t *upl_size,
2976 upl_t *upl,
2977 upl_page_info_array_t page_list,
2978 unsigned int *count,
2979 ppnum_t *highest_page)
2980 {
2981 unsigned int pageCount, page;
2982 ppnum_t phys;
2983 ppnum_t highestPage = 0;
2984
2985 pageCount = atop_32(*upl_size);
2986 if (pageCount > *count)
2987 pageCount = *count;
2988
2989 *upl = NULL;
2990
2991 for (page = 0; page < pageCount; page++)
2992 {
2993 phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
2994 if (!phys)
2995 break;
2996 page_list[page].phys_addr = phys;
2997 page_list[page].free_when_done = 0;
2998 page_list[page].absent = 0;
2999 page_list[page].dirty = 0;
3000 page_list[page].precious = 0;
3001 page_list[page].device = 0;
3002 if (phys > highestPage)
3003 highestPage = phys;
3004 }
3005
3006 *highest_page = highestPage;
3007
3008 return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError);
3009 }
3010
3011 IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
3012 {
3013 IOOptionBits type = _flags & kIOMemoryTypeMask;
3014 IOReturn error = kIOReturnSuccess;
3015 ioGMDData *dataP;
3016 upl_page_info_array_t pageInfo;
3017 ppnum_t mapBase;
3018 vm_tag_t tag = VM_KERN_MEMORY_NONE;
3019
3020 assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
3021
3022 if ((kIODirectionOutIn & forDirection) == kIODirectionNone)
3023 forDirection = (IODirection) (forDirection | getDirection());
3024
3025 dataP = getDataP(_memoryEntries);
3026 upl_control_flags_t uplFlags; // This Mem Desc's default flags for upl creation
3027 switch (kIODirectionOutIn & forDirection)
3028 {
3029 case kIODirectionOut:
3030 // Pages do not need to be marked as dirty on commit
3031 uplFlags = UPL_COPYOUT_FROM;
3032 dataP->fDMAAccess = kIODMAMapReadAccess;
3033 break;
3034
3035 case kIODirectionIn:
3036 dataP->fDMAAccess = kIODMAMapWriteAccess;
3037 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
3038 break;
3039
3040 default:
3041 dataP->fDMAAccess = kIODMAMapReadAccess | kIODMAMapWriteAccess;
3042 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
3043 break;
3044 }
3045
3046 if (_wireCount)
3047 {
3048 if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags))
3049 {
3050 OSReportWithBacktrace("IOMemoryDescriptor 0x%lx prepared read only", VM_KERNEL_ADDRPERM(this));
3051 error = kIOReturnNotWritable;
3052 }
3053 }
3054 else
3055 {
3056 IOMapper *mapper;
3057
3058 mapper = dataP->fMapper;
3059 dataP->fMappedBaseValid = dataP->fMappedBase = 0;
3060
3061 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
3062 tag = _kernelTag;
3063 if (VM_KERN_MEMORY_NONE == tag) tag = IOMemoryTag(kernel_map);
3064
3065 if (kIODirectionPrepareToPhys32 & forDirection)
3066 {
3067 if (!mapper) uplFlags |= UPL_NEED_32BIT_ADDR;
3068 if (dataP->fDMAMapNumAddressBits > 32) dataP->fDMAMapNumAddressBits = 32;
3069 }
3070 if (kIODirectionPrepareNoFault & forDirection) uplFlags |= UPL_REQUEST_NO_FAULT;
3071 if (kIODirectionPrepareNoZeroFill & forDirection) uplFlags |= UPL_NOZEROFILLIO;
3072 if (kIODirectionPrepareNonCoherent & forDirection) uplFlags |= UPL_REQUEST_FORCE_COHERENCY;
3073
3074 mapBase = 0;
3075
3076 // Note that appendBytes(NULL) zeros the data up to the desired length
3077 // and the length parameter is an unsigned int
3078 size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t);
3079 if (uplPageSize > ((unsigned int)uplPageSize)) return (kIOReturnNoMemory);
3080 if (!_memoryEntries->appendBytes(0, uplPageSize)) return (kIOReturnNoMemory);
3081 dataP = 0;
3082
3083 // Find the appropriate vm_map for the given task
3084 vm_map_t curMap;
3085 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) curMap = 0;
3086 else curMap = get_task_map(_task);
3087
3088 // Iterate over the vector of virtual ranges
3089 Ranges vec = _ranges;
3090 unsigned int pageIndex = 0;
3091 IOByteCount mdOffset = 0;
3092 ppnum_t highestPage = 0;
3093
3094 IOMemoryEntry * memRefEntry = 0;
3095 if (_memRef) memRefEntry = &_memRef->entries[0];
3096
3097 for (UInt range = 0; range < _rangesCount; range++) {
3098 ioPLBlock iopl;
3099 mach_vm_address_t startPage, startPageOffset;
3100 mach_vm_size_t numBytes;
3101 ppnum_t highPage = 0;
3102
3103 // Get the startPage address and length of vec[range]
3104 getAddrLenForInd(startPage, numBytes, type, vec, range);
3105 startPageOffset = startPage & PAGE_MASK;
3106 iopl.fPageOffset = startPageOffset;
3107 numBytes += startPageOffset;
3108 startPage = trunc_page_64(startPage);
3109
3110 if (mapper)
3111 iopl.fMappedPage = mapBase + pageIndex;
3112 else
3113 iopl.fMappedPage = 0;
3114
3115 // Iterate over the current range, creating UPLs
3116 while (numBytes) {
3117 vm_address_t kernelStart = (vm_address_t) startPage;
3118 vm_map_t theMap;
3119 if (curMap) theMap = curMap;
3120 else if (_memRef)
3121 {
3122 theMap = NULL;
3123 }
3124 else
3125 {
3126 assert(_task == kernel_task);
3127 theMap = IOPageableMapForAddress(kernelStart);
3128 }
3129
3130 // ioplFlags is an in/out parameter
3131 upl_control_flags_t ioplFlags = uplFlags;
3132 dataP = getDataP(_memoryEntries);
3133 pageInfo = getPageList(dataP);
3134 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
3135
3136 mach_vm_size_t _ioplSize = round_page(numBytes);
3137 upl_size_t ioplSize = (_ioplSize <= MAX_UPL_SIZE_BYTES) ? _ioplSize : MAX_UPL_SIZE_BYTES;
3138 unsigned int numPageInfo = atop_32(ioplSize);
3139
3140 if ((theMap == kernel_map)
3141 && (kernelStart >= io_kernel_static_start)
3142 && (kernelStart < io_kernel_static_end)) {
3143 error = io_get_kernel_static_upl(theMap,
3144 kernelStart,
3145 &ioplSize,
3146 &iopl.fIOPL,
3147 baseInfo,
3148 &numPageInfo,
3149 &highPage);
3150 }
3151 else if (_memRef) {
3152 memory_object_offset_t entryOffset;
3153
3154 entryOffset = mdOffset;
3155 entryOffset = (entryOffset - iopl.fPageOffset - memRefEntry->offset);
3156 if (entryOffset >= memRefEntry->size) {
3157 memRefEntry++;
3158 if (memRefEntry >= &_memRef->entries[_memRef->count]) panic("memRefEntry");
3159 entryOffset = 0;
3160 }
3161 if (ioplSize > (memRefEntry->size - entryOffset)) ioplSize = (memRefEntry->size - entryOffset);
3162 error = memory_object_iopl_request(memRefEntry->entry,
3163 entryOffset,
3164 &ioplSize,
3165 &iopl.fIOPL,
3166 baseInfo,
3167 &numPageInfo,
3168 &ioplFlags,
3169 tag);
3170 }
3171 else {
3172 assert(theMap);
3173 error = vm_map_create_upl(theMap,
3174 startPage,
3175 (upl_size_t*)&ioplSize,
3176 &iopl.fIOPL,
3177 baseInfo,
3178 &numPageInfo,
3179 &ioplFlags,
3180 tag);
3181 }
3182
3183 if (error != KERN_SUCCESS) goto abortExit;
3184
3185 assert(ioplSize);
3186
3187 if (iopl.fIOPL)
3188 highPage = upl_get_highest_page(iopl.fIOPL);
3189 if (highPage > highestPage)
3190 highestPage = highPage;
3191
3192 if (baseInfo->device) {
3193 numPageInfo = 1;
3194 iopl.fFlags = kIOPLOnDevice;
3195 }
3196 else {
3197 iopl.fFlags = 0;
3198 }
3199
3200 iopl.fIOMDOffset = mdOffset;
3201 iopl.fPageInfo = pageIndex;
3202 if (mapper && pageIndex && (page_mask & (mdOffset + startPageOffset))) dataP->fDiscontig = true;
3203
3204 if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
3205 // Clean up partial created and unsaved iopl
3206 if (iopl.fIOPL) {
3207 upl_abort(iopl.fIOPL, 0);
3208 upl_deallocate(iopl.fIOPL);
3209 }
3210 goto abortExit;
3211 }
3212 dataP = 0;
3213
3214 // Check for a multiple iopl's in one virtual range
3215 pageIndex += numPageInfo;
3216 mdOffset -= iopl.fPageOffset;
3217 if (ioplSize < numBytes) {
3218 numBytes -= ioplSize;
3219 startPage += ioplSize;
3220 mdOffset += ioplSize;
3221 iopl.fPageOffset = 0;
3222 if (mapper) iopl.fMappedPage = mapBase + pageIndex;
3223 }
3224 else {
3225 mdOffset += numBytes;
3226 break;
3227 }
3228 }
3229 }
3230
3231 _highestPage = highestPage;
3232
3233 if (UPL_COPYOUT_FROM & uplFlags) _flags |= kIOMemoryPreparedReadOnly;
3234 }
3235
3236 #if IOTRACKING
3237 if (!(_flags & kIOMemoryAutoPrepare) && (kIOReturnSuccess == error))
3238 {
3239 dataP = getDataP(_memoryEntries);
3240 if (!dataP->fWireTracking.link.next)
3241 {
3242 IOTrackingAdd(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages), false, tag);
3243 }
3244 }
3245 #endif /* IOTRACKING */
3246
3247 return (error);
3248
3249 abortExit:
3250 {
3251 dataP = getDataP(_memoryEntries);
3252 UInt done = getNumIOPL(_memoryEntries, dataP);
3253 ioPLBlock *ioplList = getIOPLList(dataP);
3254
3255 for (UInt range = 0; range < done; range++)
3256 {
3257 if (ioplList[range].fIOPL) {
3258 upl_abort(ioplList[range].fIOPL, 0);
3259 upl_deallocate(ioplList[range].fIOPL);
3260 }
3261 }
3262 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
3263 }
3264
3265 if (error == KERN_FAILURE)
3266 error = kIOReturnCannotWire;
3267 else if (error == KERN_MEMORY_ERROR)
3268 error = kIOReturnNoResources;
3269
3270 return error;
3271 }
3272
3273 bool IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
3274 {
3275 ioGMDData * dataP;
3276 unsigned dataSize = size;
3277
3278 if (!_memoryEntries) {
3279 _memoryEntries = OSData::withCapacity(dataSize);
3280 if (!_memoryEntries)
3281 return false;
3282 }
3283 else if (!_memoryEntries->initWithCapacity(dataSize))
3284 return false;
3285
3286 _memoryEntries->appendBytes(0, computeDataSize(0, 0));
3287 dataP = getDataP(_memoryEntries);
3288
3289 if (mapper == kIOMapperWaitSystem) {
3290 IOMapper::checkForSystemMapper();
3291 mapper = IOMapper::gSystem;
3292 }
3293 dataP->fMapper = mapper;
3294 dataP->fPageCnt = 0;
3295 dataP->fMappedBase = 0;
3296 dataP->fDMAMapNumAddressBits = 64;
3297 dataP->fDMAMapAlignment = 0;
3298 dataP->fPreparationID = kIOPreparationIDUnprepared;
3299 dataP->fDiscontig = false;
3300 dataP->fCompletionError = false;
3301 dataP->fMappedBaseValid = false;
3302
3303 return (true);
3304 }
3305
3306 IOReturn IOMemoryDescriptor::dmaMap(
3307 IOMapper * mapper,
3308 IODMACommand * command,
3309 const IODMAMapSpecification * mapSpec,
3310 uint64_t offset,
3311 uint64_t length,
3312 uint64_t * mapAddress,
3313 uint64_t * mapLength)
3314 {
3315 IOReturn err;
3316 uint32_t mapOptions;
3317
3318 mapOptions = 0;
3319 mapOptions |= kIODMAMapReadAccess;
3320 if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess;
3321
3322 err = mapper->iovmMapMemory(this, offset, length, mapOptions,
3323 mapSpec, command, NULL, mapAddress, mapLength);
3324
3325 if (kIOReturnSuccess == err) dmaMapRecord(mapper, command, *mapLength);
3326
3327 return (err);
3328 }
3329
3330 void IOMemoryDescriptor::dmaMapRecord(
3331 IOMapper * mapper,
3332 IODMACommand * command,
3333 uint64_t mapLength)
3334 {
3335 kern_allocation_name_t alloc;
3336 int16_t prior;
3337
3338 if ((alloc = mapper->fAllocName) /* && mapper != IOMapper::gSystem */)
3339 {
3340 kern_allocation_update_size(mapper->fAllocName, mapLength);
3341 }
3342
3343 if (!command) return;
3344 prior = OSAddAtomic16(1, &_dmaReferences);
3345 if (!prior)
3346 {
3347 if (alloc && (VM_KERN_MEMORY_NONE != _kernelTag))
3348 {
3349 _mapName = alloc;
3350 mapLength = _length;
3351 kern_allocation_update_subtotal(alloc, _kernelTag, mapLength);
3352 }
3353 else _mapName = NULL;
3354 }
3355 }
3356
3357 IOReturn IOMemoryDescriptor::dmaUnmap(
3358 IOMapper * mapper,
3359 IODMACommand * command,
3360 uint64_t offset,
3361 uint64_t mapAddress,
3362 uint64_t mapLength)
3363 {
3364 IOReturn ret;
3365 kern_allocation_name_t alloc;
3366 kern_allocation_name_t mapName;
3367 int16_t prior;
3368
3369 mapName = 0;
3370 prior = 0;
3371 if (command)
3372 {
3373 mapName = _mapName;
3374 if (_dmaReferences) prior = OSAddAtomic16(-1, &_dmaReferences);
3375 else panic("_dmaReferences underflow");
3376 }
3377
3378 if (!mapLength) return (kIOReturnSuccess);
3379
3380 ret = mapper->iovmUnmapMemory(this, command, mapAddress, mapLength);
3381
3382 if ((alloc = mapper->fAllocName))
3383 {
3384 kern_allocation_update_size(alloc, -mapLength);
3385 if ((1 == prior) && mapName && (VM_KERN_MEMORY_NONE != _kernelTag))
3386 {
3387 mapLength = _length;
3388 kern_allocation_update_subtotal(mapName, _kernelTag, -mapLength);
3389 }
3390 }
3391
3392 return (ret);
3393 }
3394
3395 IOReturn IOGeneralMemoryDescriptor::dmaMap(
3396 IOMapper * mapper,
3397 IODMACommand * command,
3398 const IODMAMapSpecification * mapSpec,
3399 uint64_t offset,
3400 uint64_t length,
3401 uint64_t * mapAddress,
3402 uint64_t * mapLength)
3403 {
3404 IOReturn err = kIOReturnSuccess;
3405 ioGMDData * dataP;
3406 IOOptionBits type = _flags & kIOMemoryTypeMask;
3407
3408 *mapAddress = 0;
3409 if (kIOMemoryHostOnly & _flags) return (kIOReturnSuccess);
3410 if (kIOMemoryRemote & _flags) return (kIOReturnNotAttached);
3411
3412 if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
3413 || offset || (length != _length))
3414 {
3415 err = super::dmaMap(mapper, command, mapSpec, offset, length, mapAddress, mapLength);
3416 }
3417 else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries)))
3418 {
3419 const ioPLBlock * ioplList = getIOPLList(dataP);
3420 upl_page_info_t * pageList;
3421 uint32_t mapOptions = 0;
3422
3423 IODMAMapSpecification mapSpec;
3424 bzero(&mapSpec, sizeof(mapSpec));
3425 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
3426 mapSpec.alignment = dataP->fDMAMapAlignment;
3427
3428 // For external UPLs the fPageInfo field points directly to
3429 // the upl's upl_page_info_t array.
3430 if (ioplList->fFlags & kIOPLExternUPL)
3431 {
3432 pageList = (upl_page_info_t *) ioplList->fPageInfo;
3433 mapOptions |= kIODMAMapPagingPath;
3434 }
3435 else pageList = getPageList(dataP);
3436
3437 if ((_length == ptoa_64(_pages)) && !(page_mask & ioplList->fPageOffset))
3438 {
3439 mapOptions |= kIODMAMapPageListFullyOccupied;
3440 }
3441
3442 assert(dataP->fDMAAccess);
3443 mapOptions |= dataP->fDMAAccess;
3444
3445 // Check for direct device non-paged memory
3446 if (ioplList->fFlags & kIOPLOnDevice) mapOptions |= kIODMAMapPhysicallyContiguous;
3447
3448 IODMAMapPageList dmaPageList =
3449 {
3450 .pageOffset = (uint32_t)(ioplList->fPageOffset & page_mask),
3451 .pageListCount = _pages,
3452 .pageList = &pageList[0]
3453 };
3454 err = mapper->iovmMapMemory(this, offset, length, mapOptions, &mapSpec,
3455 command, &dmaPageList, mapAddress, mapLength);
3456
3457 if (kIOReturnSuccess == err) dmaMapRecord(mapper, command, *mapLength);
3458 }
3459
3460 return (err);
3461 }
3462
3463 /*
3464 * prepare
3465 *
3466 * Prepare the memory for an I/O transfer. This involves paging in
3467 * the memory, if necessary, and wiring it down for the duration of
3468 * the transfer. The complete() method completes the processing of
3469 * the memory after the I/O transfer finishes. This method needn't
3470 * called for non-pageable memory.
3471 */
3472
3473 IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
3474 {
3475 IOReturn error = kIOReturnSuccess;
3476 IOOptionBits type = _flags & kIOMemoryTypeMask;
3477
3478 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
3479 return kIOReturnSuccess;
3480
3481 assert (!(kIOMemoryRemote & _flags));
3482 if (kIOMemoryRemote & _flags) return (kIOReturnNotAttached);
3483
3484 if (_prepareLock) IOLockLock(_prepareLock);
3485
3486 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type)
3487 {
3488 error = wireVirtual(forDirection);
3489 }
3490
3491 if (kIOReturnSuccess == error)
3492 {
3493 if (1 == ++_wireCount)
3494 {
3495 if (kIOMemoryClearEncrypt & _flags)
3496 {
3497 performOperation(kIOMemoryClearEncrypted, 0, _length);
3498 }
3499 }
3500 }
3501
3502 if (_prepareLock) IOLockUnlock(_prepareLock);
3503
3504 return error;
3505 }
3506
3507 /*
3508 * complete
3509 *
3510 * Complete processing of the memory after an I/O transfer finishes.
3511 * This method should not be called unless a prepare was previously
3512 * issued; the prepare() and complete() must occur in pairs, before
3513 * before and after an I/O transfer involving pageable memory.
3514 */
3515
3516 IOReturn IOGeneralMemoryDescriptor::complete(IODirection forDirection)
3517 {
3518 IOOptionBits type = _flags & kIOMemoryTypeMask;
3519 ioGMDData * dataP;
3520
3521 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
3522 return kIOReturnSuccess;
3523
3524 assert (!(kIOMemoryRemote & _flags));
3525 if (kIOMemoryRemote & _flags) return (kIOReturnNotAttached);
3526
3527 if (_prepareLock) IOLockLock(_prepareLock);
3528 do
3529 {
3530 assert(_wireCount);
3531 if (!_wireCount) break;
3532 dataP = getDataP(_memoryEntries);
3533 if (!dataP) break;
3534
3535 if (kIODirectionCompleteWithError & forDirection) dataP->fCompletionError = true;
3536
3537 if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount))
3538 {
3539 performOperation(kIOMemorySetEncrypted, 0, _length);
3540 }
3541
3542 _wireCount--;
3543 if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection))
3544 {
3545 ioPLBlock *ioplList = getIOPLList(dataP);
3546 UInt ind, count = getNumIOPL(_memoryEntries, dataP);
3547
3548 if (_wireCount)
3549 {
3550 // kIODirectionCompleteWithDataValid & forDirection
3551 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type)
3552 {
3553 vm_tag_t tag;
3554 tag = getVMTag(kernel_map);
3555 for (ind = 0; ind < count; ind++)
3556 {
3557 if (ioplList[ind].fIOPL) iopl_valid_data(ioplList[ind].fIOPL, tag);
3558 }
3559 }
3560 }
3561 else
3562 {
3563 if (_dmaReferences) panic("complete() while dma active");
3564
3565 if (dataP->fMappedBaseValid) {
3566 dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
3567 dataP->fMappedBaseValid = dataP->fMappedBase = 0;
3568 }
3569 #if IOTRACKING
3570 if (dataP->fWireTracking.link.next) IOTrackingRemove(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages));
3571 #endif /* IOTRACKING */
3572 // Only complete iopls that we created which are for TypeVirtual
3573 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type)
3574 {
3575 for (ind = 0; ind < count; ind++)
3576 if (ioplList[ind].fIOPL) {
3577 if (dataP->fCompletionError)
3578 upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/);
3579 else
3580 upl_commit(ioplList[ind].fIOPL, 0, 0);
3581 upl_deallocate(ioplList[ind].fIOPL);
3582 }
3583 } else if (kIOMemoryTypeUPL == type) {
3584 upl_set_referenced(ioplList[0].fIOPL, false);
3585 }
3586
3587 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
3588
3589 dataP->fPreparationID = kIOPreparationIDUnprepared;
3590 _flags &= ~kIOMemoryPreparedReadOnly;
3591 }
3592 }
3593 }
3594 while (false);
3595
3596 if (_prepareLock) IOLockUnlock(_prepareLock);
3597
3598 return kIOReturnSuccess;
3599 }
3600
3601 IOReturn IOGeneralMemoryDescriptor::doMap(
3602 vm_map_t __addressMap,
3603 IOVirtualAddress * __address,
3604 IOOptionBits options,
3605 IOByteCount __offset,
3606 IOByteCount __length )
3607 {
3608 #ifndef __LP64__
3609 if (!(kIOMap64Bit & options)) panic("IOGeneralMemoryDescriptor::doMap !64bit");
3610 #endif /* !__LP64__ */
3611
3612 kern_return_t err;
3613
3614 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
3615 mach_vm_size_t offset = mapping->fOffset + __offset;
3616 mach_vm_size_t length = mapping->fLength;
3617
3618 IOOptionBits type = _flags & kIOMemoryTypeMask;
3619 Ranges vec = _ranges;
3620
3621 mach_vm_address_t range0Addr = 0;
3622 mach_vm_size_t range0Len = 0;
3623
3624 if ((offset >= _length) || ((offset + length) > _length))
3625 return( kIOReturnBadArgument );
3626
3627 assert (!(kIOMemoryRemote & _flags));
3628 if (kIOMemoryRemote & _flags) return (0);
3629
3630 if (vec.v)
3631 getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
3632
3633 // mapping source == dest? (could be much better)
3634 if (_task
3635 && (mapping->fAddressTask == _task)
3636 && (mapping->fAddressMap == get_task_map(_task))
3637 && (options & kIOMapAnywhere)
3638 && (1 == _rangesCount)
3639 && (0 == offset)
3640 && range0Addr
3641 && (length <= range0Len))
3642 {
3643 mapping->fAddress = range0Addr;
3644 mapping->fOptions |= kIOMapStatic;
3645
3646 return( kIOReturnSuccess );
3647 }
3648
3649 if (!_memRef)
3650 {
3651 IOOptionBits createOptions = 0;
3652 if (!(kIOMapReadOnly & options))
3653 {
3654 createOptions |= kIOMemoryReferenceWrite;
3655 #if DEVELOPMENT || DEBUG
3656 if (kIODirectionOut == (kIODirectionOutIn & _flags))
3657 {
3658 OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction");
3659 }
3660 #endif
3661 }
3662 err = memoryReferenceCreate(createOptions, &_memRef);
3663 if (kIOReturnSuccess != err) return (err);
3664 }
3665
3666 memory_object_t pager;
3667 pager = (memory_object_t) (reserved ? reserved->dp.devicePager : 0);
3668
3669 // <upl_transpose //
3670 if ((kIOMapReference|kIOMapUnique) == ((kIOMapReference|kIOMapUnique) & options))
3671 {
3672 do
3673 {
3674 upl_t redirUPL2;
3675 upl_size_t size;
3676 upl_control_flags_t flags;
3677 unsigned int lock_count;
3678
3679 if (!_memRef || (1 != _memRef->count))
3680 {
3681 err = kIOReturnNotReadable;
3682 break;
3683 }
3684
3685 size = round_page(mapping->fLength);
3686 flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
3687 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
3688
3689 if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2,
3690 NULL, NULL,
3691 &flags, getVMTag(kernel_map)))
3692 redirUPL2 = NULL;
3693
3694 for (lock_count = 0;
3695 IORecursiveLockHaveLock(gIOMemoryLock);
3696 lock_count++) {
3697 UNLOCK;
3698 }
3699 err = upl_transpose(redirUPL2, mapping->fRedirUPL);
3700 for (;
3701 lock_count;
3702 lock_count--) {
3703 LOCK;
3704 }
3705
3706 if (kIOReturnSuccess != err)
3707 {
3708 IOLog("upl_transpose(%x)\n", err);
3709 err = kIOReturnSuccess;
3710 }
3711
3712 if (redirUPL2)
3713 {
3714 upl_commit(redirUPL2, NULL, 0);
3715 upl_deallocate(redirUPL2);
3716 redirUPL2 = 0;
3717 }
3718 {
3719 // swap the memEntries since they now refer to different vm_objects
3720 IOMemoryReference * me = _memRef;
3721 _memRef = mapping->fMemory->_memRef;
3722 mapping->fMemory->_memRef = me;
3723 }
3724 if (pager)
3725 err = populateDevicePager( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
3726 }
3727 while (false);
3728 }
3729 // upl_transpose> //
3730 else
3731 {
3732 err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress);
3733 #if IOTRACKING
3734 if ((err == KERN_SUCCESS) && ((kIOTracking & gIOKitDebug) || _task))
3735 {
3736 // only dram maps in the default on developement case
3737 IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength);
3738 }
3739 #endif /* IOTRACKING */
3740 if ((err == KERN_SUCCESS) && pager)
3741 {
3742 err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, offset, length, options);
3743
3744 if (err != KERN_SUCCESS) doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0);
3745 else if (kIOMapDefaultCache == (options & kIOMapCacheMask))
3746 {
3747 mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
3748 }
3749 }
3750 }
3751
3752 return (err);
3753 }
3754
3755 #if IOTRACKING
3756 IOReturn
3757 IOMemoryMapTracking(IOTrackingUser * tracking, task_t * task,
3758 mach_vm_address_t * address, mach_vm_size_t * size)
3759 {
3760 #define iomap_offsetof(type, field) ((size_t)(&((type *)0)->field))
3761
3762 IOMemoryMap * map = (typeof(map)) (((uintptr_t) tracking) - iomap_offsetof(IOMemoryMap, fTracking));
3763
3764 if (!map->fAddressMap || (map->fAddressMap != get_task_map(map->fAddressTask))) return (kIOReturnNotReady);
3765
3766 *task = map->fAddressTask;
3767 *address = map->fAddress;
3768 *size = map->fLength;
3769
3770 return (kIOReturnSuccess);
3771 }
3772 #endif /* IOTRACKING */
3773
3774 IOReturn IOGeneralMemoryDescriptor::doUnmap(
3775 vm_map_t addressMap,
3776 IOVirtualAddress __address,
3777 IOByteCount __length )
3778 {
3779 return (super::doUnmap(addressMap, __address, __length));
3780 }
3781
3782 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3783
3784 #undef super
3785 #define super OSObject
3786
3787 OSDefineMetaClassAndStructors( IOMemoryMap, OSObject )
3788
3789 OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
3790 OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
3791 OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
3792 OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
3793 OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
3794 OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
3795 OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
3796 OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
3797
3798 /* ex-inline function implementation */
3799 IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
3800 { return( getPhysicalSegment( 0, 0 )); }
3801
3802 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3803
3804 bool IOMemoryMap::init(
3805 task_t intoTask,
3806 mach_vm_address_t toAddress,
3807 IOOptionBits _options,
3808 mach_vm_size_t _offset,
3809 mach_vm_size_t _length )
3810 {
3811 if (!intoTask)
3812 return( false);
3813
3814 if (!super::init())
3815 return(false);
3816
3817 fAddressMap = get_task_map(intoTask);
3818 if (!fAddressMap)
3819 return(false);
3820 vm_map_reference(fAddressMap);
3821
3822 fAddressTask = intoTask;
3823 fOptions = _options;
3824 fLength = _length;
3825 fOffset = _offset;
3826 fAddress = toAddress;
3827
3828 return (true);
3829 }
3830
3831 bool IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
3832 {
3833 if (!_memory)
3834 return(false);
3835
3836 if (!fSuperMap)
3837 {
3838 if( (_offset + fLength) > _memory->getLength())
3839 return( false);
3840 fOffset = _offset;
3841 }
3842
3843 _memory->retain();
3844 if (fMemory)
3845 {
3846 if (fMemory != _memory)
3847 fMemory->removeMapping(this);
3848 fMemory->release();
3849 }
3850 fMemory = _memory;
3851
3852 return( true );
3853 }
3854
3855 IOReturn IOMemoryDescriptor::doMap(
3856 vm_map_t __addressMap,
3857 IOVirtualAddress * __address,
3858 IOOptionBits options,
3859 IOByteCount __offset,
3860 IOByteCount __length )
3861 {
3862 return (kIOReturnUnsupported);
3863 }
3864
3865 IOReturn IOMemoryDescriptor::handleFault(
3866 void * _pager,
3867 mach_vm_size_t sourceOffset,
3868 mach_vm_size_t length)
3869 {
3870 if( kIOMemoryRedirected & _flags)
3871 {
3872 #if DEBUG
3873 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
3874 #endif
3875 do {
3876 SLEEP;
3877 } while( kIOMemoryRedirected & _flags );
3878 }
3879 return (kIOReturnSuccess);
3880 }
3881
3882 IOReturn IOMemoryDescriptor::populateDevicePager(
3883 void * _pager,
3884 vm_map_t addressMap,
3885 mach_vm_address_t address,
3886 mach_vm_size_t sourceOffset,
3887 mach_vm_size_t length,
3888 IOOptionBits options )
3889 {
3890 IOReturn err = kIOReturnSuccess;
3891 memory_object_t pager = (memory_object_t) _pager;
3892 mach_vm_size_t size;
3893 mach_vm_size_t bytes;
3894 mach_vm_size_t page;
3895 mach_vm_size_t pageOffset;
3896 mach_vm_size_t pagerOffset;
3897 IOPhysicalLength segLen, chunk;
3898 addr64_t physAddr;
3899 IOOptionBits type;
3900
3901 type = _flags & kIOMemoryTypeMask;
3902
3903 if (reserved->dp.pagerContig)
3904 {
3905 sourceOffset = 0;
3906 pagerOffset = 0;
3907 }
3908
3909 physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
3910 assert( physAddr );
3911 pageOffset = physAddr - trunc_page_64( physAddr );
3912 pagerOffset = sourceOffset;
3913
3914 size = length + pageOffset;
3915 physAddr -= pageOffset;
3916
3917 segLen += pageOffset;
3918 bytes = size;
3919 do
3920 {
3921 // in the middle of the loop only map whole pages
3922 if( segLen >= bytes) segLen = bytes;
3923 else if (segLen != trunc_page(segLen)) err = kIOReturnVMError;
3924 if (physAddr != trunc_page_64(physAddr)) err = kIOReturnBadArgument;
3925
3926 if (kIOReturnSuccess != err) break;
3927
3928 #if DEBUG || DEVELOPMENT
3929 if ((kIOMemoryTypeUPL != type)
3930 && pmap_has_managed_page(atop_64(physAddr), atop_64(physAddr + segLen - 1)))
3931 {
3932 OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx", physAddr, segLen);
3933 }
3934 #endif /* DEBUG || DEVELOPMENT */
3935
3936 chunk = (reserved->dp.pagerContig ? round_page(segLen) : page_size);
3937 for (page = 0;
3938 (page < segLen) && (KERN_SUCCESS == err);
3939 page += chunk)
3940 {
3941 err = device_pager_populate_object(pager, pagerOffset,
3942 (ppnum_t)(atop_64(physAddr + page)), chunk);
3943 pagerOffset += chunk;
3944 }
3945
3946 assert (KERN_SUCCESS == err);
3947 if (err) break;
3948
3949 // This call to vm_fault causes an early pmap level resolution
3950 // of the mappings created above for kernel mappings, since
3951 // faulting in later can't take place from interrupt level.
3952 if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags))
3953 {
3954 err = vm_fault(addressMap,
3955 (vm_map_offset_t)trunc_page_64(address),
3956 options & kIOMapReadOnly ? VM_PROT_READ : VM_PROT_READ|VM_PROT_WRITE,
3957 FALSE, VM_KERN_MEMORY_NONE,
3958 THREAD_UNINT, NULL,
3959 (vm_map_offset_t)0);
3960
3961 if (KERN_SUCCESS != err) break;
3962 }
3963
3964 sourceOffset += segLen - pageOffset;
3965 address += segLen;
3966 bytes -= segLen;
3967 pageOffset = 0;
3968 }
3969 while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
3970
3971 if (bytes)
3972 err = kIOReturnBadArgument;
3973
3974 return (err);
3975 }
3976
3977 IOReturn IOMemoryDescriptor::doUnmap(
3978 vm_map_t addressMap,
3979 IOVirtualAddress __address,
3980 IOByteCount __length )
3981 {
3982 IOReturn err;
3983 IOMemoryMap * mapping;
3984 mach_vm_address_t address;
3985 mach_vm_size_t length;
3986
3987 if (__length) panic("doUnmap");
3988
3989 mapping = (IOMemoryMap *) __address;
3990 addressMap = mapping->fAddressMap;
3991 address = mapping->fAddress;
3992 length = mapping->fLength;
3993
3994 if (kIOMapOverwrite & mapping->fOptions) err = KERN_SUCCESS;
3995 else
3996 {
3997 if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags))
3998 addressMap = IOPageableMapForAddress( address );
3999 #if DEBUG
4000 if( kIOLogMapping & gIOKitDebug) IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
4001 addressMap, address, length );
4002 #endif
4003 err = mach_vm_deallocate( addressMap, address, length );
4004 }
4005
4006 #if IOTRACKING
4007 IOTrackingRemoveUser(gIOMapTracking, &mapping->fTracking);
4008 #endif /* IOTRACKING */
4009
4010 return (err);
4011 }
4012
4013 IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
4014 {
4015 IOReturn err = kIOReturnSuccess;
4016 IOMemoryMap * mapping = 0;
4017 OSIterator * iter;
4018
4019 LOCK;
4020
4021 if( doRedirect)
4022 _flags |= kIOMemoryRedirected;
4023 else
4024 _flags &= ~kIOMemoryRedirected;
4025
4026 do {
4027 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
4028
4029 memory_object_t pager;
4030
4031 if( reserved)
4032 pager = (memory_object_t) reserved->dp.devicePager;
4033 else
4034 pager = MACH_PORT_NULL;
4035
4036 while( (mapping = (IOMemoryMap *) iter->getNextObject()))
4037 {
4038 mapping->redirect( safeTask, doRedirect );
4039 if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap))
4040 {
4041 err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache );
4042 }
4043 }
4044
4045 iter->release();
4046 }
4047 } while( false );
4048
4049 if (!doRedirect)
4050 {
4051 WAKEUP;
4052 }
4053
4054 UNLOCK;
4055
4056 #ifndef __LP64__
4057 // temporary binary compatibility
4058 IOSubMemoryDescriptor * subMem;
4059 if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
4060 err = subMem->redirect( safeTask, doRedirect );
4061 else
4062 err = kIOReturnSuccess;
4063 #endif /* !__LP64__ */
4064
4065 return( err );
4066 }
4067
4068 IOReturn IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
4069 {
4070 IOReturn err = kIOReturnSuccess;
4071
4072 if( fSuperMap) {
4073 // err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
4074 } else {
4075
4076 LOCK;
4077
4078 do
4079 {
4080 if (!fAddress)
4081 break;
4082 if (!fAddressMap)
4083 break;
4084
4085 if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
4086 && (0 == (fOptions & kIOMapStatic)))
4087 {
4088 IOUnmapPages( fAddressMap, fAddress, fLength );
4089 err = kIOReturnSuccess;
4090 #if DEBUG
4091 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
4092 #endif
4093 }
4094 else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask))
4095 {
4096 IOOptionBits newMode;
4097 newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
4098 IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
4099 }
4100 }
4101 while (false);
4102 UNLOCK;
4103 }
4104
4105 if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
4106 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
4107 && safeTask
4108 && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected))))
4109 fMemory->redirect(safeTask, doRedirect);
4110
4111 return( err );
4112 }
4113
4114 IOReturn IOMemoryMap::unmap( void )
4115 {
4116 IOReturn err;
4117
4118 LOCK;
4119
4120 if( fAddress && fAddressMap && (0 == fSuperMap) && fMemory
4121 && (0 == (kIOMapStatic & fOptions))) {
4122
4123 err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
4124
4125 } else
4126 err = kIOReturnSuccess;
4127
4128 if (fAddressMap)
4129 {
4130 vm_map_deallocate(fAddressMap);
4131 fAddressMap = 0;
4132 }
4133
4134 fAddress = 0;
4135
4136 UNLOCK;
4137
4138 return( err );
4139 }
4140
4141 void IOMemoryMap::taskDied( void )
4142 {
4143 LOCK;
4144 if (fUserClientUnmap) unmap();
4145 #if IOTRACKING
4146 else IOTrackingRemoveUser(gIOMapTracking, &fTracking);
4147 #endif /* IOTRACKING */
4148
4149 if( fAddressMap) {
4150 vm_map_deallocate(fAddressMap);
4151 fAddressMap = 0;
4152 }
4153 fAddressTask = 0;
4154 fAddress = 0;
4155 UNLOCK;
4156 }
4157
4158 IOReturn IOMemoryMap::userClientUnmap( void )
4159 {
4160 fUserClientUnmap = true;
4161 return (kIOReturnSuccess);
4162 }
4163
4164 // Overload the release mechanism. All mappings must be a member
4165 // of a memory descriptors _mappings set. This means that we
4166 // always have 2 references on a mapping. When either of these mappings
4167 // are released we need to free ourselves.
4168 void IOMemoryMap::taggedRelease(const void *tag) const
4169 {
4170 LOCK;
4171 super::taggedRelease(tag, 2);
4172 UNLOCK;
4173 }
4174
4175 void IOMemoryMap::free()
4176 {
4177 unmap();
4178
4179 if (fMemory)
4180 {
4181 LOCK;
4182 fMemory->removeMapping(this);
4183 UNLOCK;
4184 fMemory->release();
4185 }
4186
4187 if (fOwner && (fOwner != fMemory))
4188 {
4189 LOCK;
4190 fOwner->removeMapping(this);
4191 UNLOCK;
4192 }
4193
4194 if (fSuperMap)
4195 fSuperMap->release();
4196
4197 if (fRedirUPL) {
4198 upl_commit(fRedirUPL, NULL, 0);
4199 upl_deallocate(fRedirUPL);
4200 }
4201
4202 super::free();
4203 }
4204
4205 IOByteCount IOMemoryMap::getLength()
4206 {
4207 return( fLength );
4208 }
4209
4210 IOVirtualAddress IOMemoryMap::getVirtualAddress()
4211 {
4212 #ifndef __LP64__
4213 if (fSuperMap)
4214 fSuperMap->getVirtualAddress();
4215 else if (fAddressMap
4216 && vm_map_is_64bit(fAddressMap)
4217 && (sizeof(IOVirtualAddress) < 8))
4218 {
4219 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
4220 }
4221 #endif /* !__LP64__ */
4222
4223 return (fAddress);
4224 }
4225
4226 #ifndef __LP64__
4227 mach_vm_address_t IOMemoryMap::getAddress()
4228 {
4229 return( fAddress);
4230 }
4231
4232 mach_vm_size_t IOMemoryMap::getSize()
4233 {
4234 return( fLength );
4235 }
4236 #endif /* !__LP64__ */
4237
4238
4239 task_t IOMemoryMap::getAddressTask()
4240 {
4241 if( fSuperMap)
4242 return( fSuperMap->getAddressTask());
4243 else
4244 return( fAddressTask);
4245 }
4246
4247 IOOptionBits IOMemoryMap::getMapOptions()
4248 {
4249 return( fOptions);
4250 }
4251
4252 IOMemoryDescriptor * IOMemoryMap::getMemoryDescriptor()
4253 {
4254 return( fMemory );
4255 }
4256
4257 IOMemoryMap * IOMemoryMap::copyCompatible(
4258 IOMemoryMap * newMapping )
4259 {
4260 task_t task = newMapping->getAddressTask();
4261 mach_vm_address_t toAddress = newMapping->fAddress;
4262 IOOptionBits _options = newMapping->fOptions;
4263 mach_vm_size_t _offset = newMapping->fOffset;
4264 mach_vm_size_t _length = newMapping->fLength;
4265
4266 if( (!task) || (!fAddressMap) || (fAddressMap != get_task_map(task)))
4267 return( 0 );
4268 if( (fOptions ^ _options) & kIOMapReadOnly)
4269 return( 0 );
4270 if( (kIOMapDefaultCache != (_options & kIOMapCacheMask))
4271 && ((fOptions ^ _options) & kIOMapCacheMask))
4272 return( 0 );
4273
4274 if( (0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress))
4275 return( 0 );
4276
4277 if( _offset < fOffset)
4278 return( 0 );
4279
4280 _offset -= fOffset;
4281
4282 if( (_offset + _length) > fLength)
4283 return( 0 );
4284
4285 retain();
4286 if( (fLength == _length) && (!_offset))
4287 {
4288 newMapping = this;
4289 }
4290 else
4291 {
4292 newMapping->fSuperMap = this;
4293 newMapping->fOffset = fOffset + _offset;
4294 newMapping->fAddress = fAddress + _offset;
4295 }
4296
4297 return( newMapping );
4298 }
4299
4300 IOReturn IOMemoryMap::wireRange(
4301 uint32_t options,
4302 mach_vm_size_t offset,
4303 mach_vm_size_t length)
4304 {
4305 IOReturn kr;
4306 mach_vm_address_t start = trunc_page_64(fAddress + offset);
4307 mach_vm_address_t end = round_page_64(fAddress + offset + length);
4308 vm_prot_t prot;
4309
4310 prot = (kIODirectionOutIn & options);
4311 if (prot)
4312 {
4313 kr = vm_map_wire_kernel(fAddressMap, start, end, prot, fMemory->getVMTag(kernel_map), FALSE);
4314 }
4315 else
4316 {
4317 kr = vm_map_unwire(fAddressMap, start, end, FALSE);
4318 }
4319
4320 return (kr);
4321 }
4322
4323
4324 IOPhysicalAddress
4325 #ifdef __LP64__
4326 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
4327 #else /* !__LP64__ */
4328 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
4329 #endif /* !__LP64__ */
4330 {
4331 IOPhysicalAddress address;
4332
4333 LOCK;
4334 #ifdef __LP64__
4335 address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
4336 #else /* !__LP64__ */
4337 address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
4338 #endif /* !__LP64__ */
4339 UNLOCK;
4340
4341 return( address );
4342 }
4343
4344 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4345
4346 #undef super
4347 #define super OSObject
4348
4349 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4350
4351 void IOMemoryDescriptor::initialize( void )
4352 {
4353 if( 0 == gIOMemoryLock)
4354 gIOMemoryLock = IORecursiveLockAlloc();
4355
4356 gIOLastPage = IOGetLastPageNumber();
4357 }
4358
4359 void IOMemoryDescriptor::free( void )
4360 {
4361 if( _mappings) _mappings->release();
4362
4363 if (reserved)
4364 {
4365 IODelete(reserved, IOMemoryDescriptorReserved, 1);
4366 reserved = NULL;
4367 }
4368 super::free();
4369 }
4370
4371 IOMemoryMap * IOMemoryDescriptor::setMapping(
4372 task_t intoTask,
4373 IOVirtualAddress mapAddress,
4374 IOOptionBits options )
4375 {
4376 return (createMappingInTask( intoTask, mapAddress,
4377 options | kIOMapStatic,
4378 0, getLength() ));
4379 }
4380
4381 IOMemoryMap * IOMemoryDescriptor::map(
4382 IOOptionBits options )
4383 {
4384 return (createMappingInTask( kernel_task, 0,
4385 options | kIOMapAnywhere,
4386 0, getLength() ));
4387 }
4388
4389 #ifndef __LP64__
4390 IOMemoryMap * IOMemoryDescriptor::map(
4391 task_t intoTask,
4392 IOVirtualAddress atAddress,
4393 IOOptionBits options,
4394 IOByteCount offset,
4395 IOByteCount length )
4396 {
4397 if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask)))
4398 {
4399 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
4400 return (0);
4401 }
4402
4403 return (createMappingInTask(intoTask, atAddress,
4404 options, offset, length));
4405 }
4406 #endif /* !__LP64__ */
4407
4408 IOMemoryMap * IOMemoryDescriptor::createMappingInTask(
4409 task_t intoTask,
4410 mach_vm_address_t atAddress,
4411 IOOptionBits options,
4412 mach_vm_size_t offset,
4413 mach_vm_size_t length)
4414 {
4415 IOMemoryMap * result;
4416 IOMemoryMap * mapping;
4417
4418 if (0 == length)
4419 length = getLength();
4420
4421 mapping = new IOMemoryMap;
4422
4423 if( mapping
4424 && !mapping->init( intoTask, atAddress,
4425 options, offset, length )) {
4426 mapping->release();
4427 mapping = 0;
4428 }
4429
4430 if (mapping)
4431 result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
4432 else
4433 result = 0;
4434
4435 #if DEBUG
4436 if (!result)
4437 IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
4438 this, atAddress, (uint32_t) options, offset, length);
4439 #endif
4440
4441 return (result);
4442 }
4443
4444 #ifndef __LP64__ // there is only a 64 bit version for LP64
4445 IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
4446 IOOptionBits options,
4447 IOByteCount offset)
4448 {
4449 return (redirect(newBackingMemory, options, (mach_vm_size_t)offset));
4450 }
4451 #endif
4452
4453 IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
4454 IOOptionBits options,
4455 mach_vm_size_t offset)
4456 {
4457 IOReturn err = kIOReturnSuccess;
4458 IOMemoryDescriptor * physMem = 0;
4459
4460 LOCK;
4461
4462 if (fAddress && fAddressMap) do
4463 {
4464 if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
4465 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
4466 {
4467 physMem = fMemory;
4468 physMem->retain();
4469 }
4470
4471 if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count))
4472 {
4473 upl_size_t size = round_page(fLength);
4474 upl_control_flags_t flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
4475 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
4476 if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL,
4477 NULL, NULL,
4478 &flags, fMemory->getVMTag(kernel_map)))
4479 fRedirUPL = 0;
4480
4481 if (physMem)
4482 {
4483 IOUnmapPages( fAddressMap, fAddress, fLength );
4484 if ((false))
4485 physMem->redirect(0, true);
4486 }
4487 }
4488
4489 if (newBackingMemory)
4490 {
4491 if (newBackingMemory != fMemory)
4492 {
4493 fOffset = 0;
4494 if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
4495 options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
4496 offset, fLength))
4497 err = kIOReturnError;
4498 }
4499 if (fRedirUPL)
4500 {
4501 upl_commit(fRedirUPL, NULL, 0);
4502 upl_deallocate(fRedirUPL);
4503 fRedirUPL = 0;
4504 }
4505 if ((false) && physMem)
4506 physMem->redirect(0, false);
4507 }
4508 }
4509 while (false);
4510
4511 UNLOCK;
4512
4513 if (physMem)
4514 physMem->release();
4515
4516 return (err);
4517 }
4518
4519 IOMemoryMap * IOMemoryDescriptor::makeMapping(
4520 IOMemoryDescriptor * owner,
4521 task_t __intoTask,
4522 IOVirtualAddress __address,
4523 IOOptionBits options,
4524 IOByteCount __offset,
4525 IOByteCount __length )
4526 {
4527 #ifndef __LP64__
4528 if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::makeMapping !64bit");
4529 #endif /* !__LP64__ */
4530
4531 IOMemoryDescriptor * mapDesc = 0;
4532 IOMemoryMap * result = 0;
4533 OSIterator * iter;
4534
4535 IOMemoryMap * mapping = (IOMemoryMap *) __address;
4536 mach_vm_size_t offset = mapping->fOffset + __offset;
4537 mach_vm_size_t length = mapping->fLength;
4538
4539 mapping->fOffset = offset;
4540
4541 LOCK;
4542
4543 do
4544 {
4545 if (kIOMapStatic & options)
4546 {
4547 result = mapping;
4548 addMapping(mapping);
4549 mapping->setMemoryDescriptor(this, 0);
4550 continue;
4551 }
4552
4553 if (kIOMapUnique & options)
4554 {
4555 addr64_t phys;
4556 IOByteCount physLen;
4557
4558 // if (owner != this) continue;
4559
4560 if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
4561 || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
4562 {
4563 phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
4564 if (!phys || (physLen < length))
4565 continue;
4566
4567 mapDesc = IOMemoryDescriptor::withAddressRange(
4568 phys, length, getDirection() | kIOMemoryMapperNone, NULL);
4569 if (!mapDesc)
4570 continue;
4571 offset = 0;
4572 mapping->fOffset = offset;
4573 }
4574 }
4575 else
4576 {
4577 // look for a compatible existing mapping
4578 if( (iter = OSCollectionIterator::withCollection(_mappings)))
4579 {
4580 IOMemoryMap * lookMapping;
4581 while ((lookMapping = (IOMemoryMap *) iter->getNextObject()))
4582 {
4583 if ((result = lookMapping->copyCompatible(mapping)))
4584 {
4585 addMapping(result);
4586 result->setMemoryDescriptor(this, offset);
4587 break;
4588 }
4589 }
4590 iter->release();
4591 }
4592 if (result || (options & kIOMapReference))
4593 {
4594 if (result != mapping)
4595 {
4596 mapping->release();
4597 mapping = NULL;
4598 }
4599 continue;
4600 }
4601 }
4602
4603 if (!mapDesc)
4604 {
4605 mapDesc = this;
4606 mapDesc->retain();
4607 }
4608 IOReturn
4609 kr = mapDesc->doMap( 0, (IOVirtualAddress *) &mapping, options, 0, 0 );
4610 if (kIOReturnSuccess == kr)
4611 {
4612 result = mapping;
4613 mapDesc->addMapping(result);
4614 result->setMemoryDescriptor(mapDesc, offset);
4615 }
4616 else
4617 {
4618 mapping->release();
4619 mapping = NULL;
4620 }
4621 }
4622 while( false );
4623
4624 UNLOCK;
4625
4626 if (mapDesc)
4627 mapDesc->release();
4628
4629 return (result);
4630 }
4631
4632 void IOMemoryDescriptor::addMapping(
4633 IOMemoryMap * mapping )
4634 {
4635 if( mapping)
4636 {
4637 if( 0 == _mappings)
4638 _mappings = OSSet::withCapacity(1);
4639 if( _mappings )
4640 _mappings->setObject( mapping );
4641 }
4642 }
4643
4644 void IOMemoryDescriptor::removeMapping(
4645 IOMemoryMap * mapping )
4646 {
4647 if( _mappings)
4648 _mappings->removeObject( mapping);
4649 }
4650
4651 #ifndef __LP64__
4652 // obsolete initializers
4653 // - initWithOptions is the designated initializer
4654 bool
4655 IOMemoryDescriptor::initWithAddress(void * address,
4656 IOByteCount length,
4657 IODirection direction)
4658 {
4659 return( false );
4660 }
4661
4662 bool
4663 IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
4664 IOByteCount length,
4665 IODirection direction,
4666 task_t task)
4667 {
4668 return( false );
4669 }
4670
4671 bool
4672 IOMemoryDescriptor::initWithPhysicalAddress(
4673 IOPhysicalAddress address,
4674 IOByteCount length,
4675 IODirection direction )
4676 {
4677 return( false );
4678 }
4679
4680 bool
4681 IOMemoryDescriptor::initWithRanges(
4682 IOVirtualRange * ranges,
4683 UInt32 withCount,
4684 IODirection direction,
4685 task_t task,
4686 bool asReference)
4687 {
4688 return( false );
4689 }
4690
4691 bool
4692 IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
4693 UInt32 withCount,
4694 IODirection direction,
4695 bool asReference)
4696 {
4697 return( false );
4698 }
4699
4700 void * IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
4701 IOByteCount * lengthOfSegment)
4702 {
4703 return( 0 );
4704 }
4705 #endif /* !__LP64__ */
4706
4707 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4708
4709 bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
4710 {
4711 OSSymbol const *keys[2] = {0};
4712 OSObject *values[2] = {0};
4713 OSArray * array;
4714 vm_size_t vcopy_size;
4715
4716 struct SerData {
4717 user_addr_t address;
4718 user_size_t length;
4719 } *vcopy = NULL;
4720 unsigned int index, nRanges;
4721 bool result = false;
4722
4723 IOOptionBits type = _flags & kIOMemoryTypeMask;
4724
4725 if (s == NULL) return false;
4726
4727 array = OSArray::withCapacity(4);
4728 if (!array) return (false);
4729
4730 nRanges = _rangesCount;
4731 if (os_mul_overflow(sizeof(SerData), nRanges, &vcopy_size)) {
4732 result = false;
4733 goto bail;
4734 }
4735 vcopy = (SerData *) IOMalloc(vcopy_size);
4736 if (vcopy == 0) {
4737 result = false;
4738 goto bail;
4739 }
4740
4741 keys[0] = OSSymbol::withCString("address");
4742 keys[1] = OSSymbol::withCString("length");
4743
4744 // Copy the volatile data so we don't have to allocate memory
4745 // while the lock is held.
4746 LOCK;
4747 if (nRanges == _rangesCount) {
4748 Ranges vec = _ranges;
4749 for (index = 0; index < nRanges; index++) {
4750 mach_vm_address_t addr; mach_vm_size_t len;
4751 getAddrLenForInd(addr, len, type, vec, index);
4752 vcopy[index].address = addr;
4753 vcopy[index].length = len;
4754 }
4755 } else {
4756 // The descriptor changed out from under us. Give up.
4757 UNLOCK;
4758 result = false;
4759 goto bail;
4760 }
4761 UNLOCK;
4762
4763 for (index = 0; index < nRanges; index++)
4764 {
4765 user_addr_t addr = vcopy[index].address;
4766 IOByteCount len = (IOByteCount) vcopy[index].length;
4767 values[0] = OSNumber::withNumber(addr, sizeof(addr) * 8);
4768 if (values[0] == 0) {
4769 result = false;
4770 goto bail;
4771 }
4772 values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
4773 if (values[1] == 0) {
4774 result = false;
4775 goto bail;
4776 }
4777 OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
4778 if (dict == 0) {
4779 result = false;
4780 goto bail;
4781 }
4782 array->setObject(dict);
4783 dict->release();
4784 values[0]->release();
4785 values[1]->release();
4786 values[0] = values[1] = 0;
4787 }
4788
4789 result = array->serialize(s);
4790
4791 bail:
4792 if (array)
4793 array->release();
4794 if (values[0])
4795 values[0]->release();
4796 if (values[1])
4797 values[1]->release();
4798 if (keys[0])
4799 keys[0]->release();
4800 if (keys[1])
4801 keys[1]->release();
4802 if (vcopy)
4803 IOFree(vcopy, vcopy_size);
4804
4805 return result;
4806 }
4807
4808 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4809
4810 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
4811 #ifdef __LP64__
4812 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
4813 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
4814 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
4815 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
4816 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
4817 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
4818 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
4819 #else /* !__LP64__ */
4820 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
4821 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
4822 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3);
4823 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4);
4824 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5);
4825 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 6);
4826 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 7);
4827 #endif /* !__LP64__ */
4828 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
4829 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
4830 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
4831 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
4832 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
4833 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
4834 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
4835 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
4836
4837 /* ex-inline function implementation */
4838 IOPhysicalAddress
4839 IOMemoryDescriptor::getPhysicalAddress()
4840 { return( getPhysicalSegment( 0, 0 )); }