]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOMemoryDescriptor.cpp
3b59323b83b82ca268379504e5184cd57f540b2c
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 #include <sys/cdefs.h>
31
32 #include <IOKit/assert.h>
33 #include <IOKit/system.h>
34 #include <IOKit/IOLib.h>
35 #include <IOKit/IOMemoryDescriptor.h>
36 #include <IOKit/IOMapper.h>
37 #include <IOKit/IODMACommand.h>
38 #include <IOKit/IOKitKeysPrivate.h>
39
40 #include <IOKit/IOSubMemoryDescriptor.h>
41 #include <IOKit/IOMultiMemoryDescriptor.h>
42
43 #include <IOKit/IOKitDebug.h>
44 #include <libkern/OSDebug.h>
45
46 #include "IOKitKernelInternal.h"
47
48 #include <libkern/c++/OSContainers.h>
49 #include <libkern/c++/OSDictionary.h>
50 #include <libkern/c++/OSArray.h>
51 #include <libkern/c++/OSSymbol.h>
52 #include <libkern/c++/OSNumber.h>
53 #include <os/overflow.h>
54
55 #include <sys/uio.h>
56
57 __BEGIN_DECLS
58 #include <vm/pmap.h>
59 #include <vm/vm_pageout.h>
60 #include <mach/memory_object_types.h>
61 #include <device/device_port.h>
62
63 #include <mach/vm_prot.h>
64 #include <mach/mach_vm.h>
65 #include <vm/vm_fault.h>
66 #include <vm/vm_protos.h>
67
68 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
69 extern void ipc_port_release_send(ipc_port_t port);
70
71 // osfmk/device/iokit_rpc.c
72 unsigned int IODefaultCacheBits(addr64_t pa);
73 unsigned int IOTranslateCacheBits(struct phys_entry *pp);
74
75 __END_DECLS
76
77 #define kIOMapperWaitSystem ((IOMapper *) 1)
78
79 static IOMapper * gIOSystemMapper = NULL;
80
81 ppnum_t gIOLastPage;
82
83 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
84
85 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
86
87 #define super IOMemoryDescriptor
88
89 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
90
91 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
92
93 static IORecursiveLock * gIOMemoryLock;
94
95 #define LOCK IORecursiveLockLock( gIOMemoryLock)
96 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
97 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
98 #define WAKEUP \
99 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
100
101 #if 0
102 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
103 #else
104 #define DEBG(fmt, args...) {}
105 #endif
106
107 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
108
109 // Some data structures and accessor macros used by the initWithOptions
110 // Function
111
112 enum ioPLBlockFlags {
113 kIOPLOnDevice = 0x00000001,
114 kIOPLExternUPL = 0x00000002,
115 };
116
117 struct IOMDPersistentInitData
118 {
119 const IOGeneralMemoryDescriptor * fMD;
120 IOMemoryReference * fMemRef;
121 };
122
123 struct ioPLBlock {
124 upl_t fIOPL;
125 vm_address_t fPageInfo; // Pointer to page list or index into it
126 uint32_t fIOMDOffset; // The offset of this iopl in descriptor
127 ppnum_t fMappedPage; // Page number of first page in this iopl
128 unsigned int fPageOffset; // Offset within first page of iopl
129 unsigned int fFlags; // Flags
130 };
131
132 enum { kMaxWireTags = 6 };
133
134 struct ioGMDData
135 {
136 IOMapper * fMapper;
137 uint64_t fDMAMapAlignment;
138 uint64_t fMappedBase;
139 uint64_t fMappedLength;
140 uint64_t fPreparationID;
141 #if IOTRACKING
142 IOTracking fWireTracking;
143 #endif /* IOTRACKING */
144 unsigned int fPageCnt;
145 uint8_t fDMAMapNumAddressBits;
146 unsigned char fDiscontig:1;
147 unsigned char fCompletionError:1;
148 unsigned char fMappedBaseValid:1;
149 unsigned char _resv:3;
150 unsigned char fDMAAccess:2;
151
152 /* variable length arrays */
153 upl_page_info_t fPageList[1]
154 #if __LP64__
155 // align fPageList as for ioPLBlock
156 __attribute__((aligned(sizeof(upl_t))))
157 #endif
158 ;
159 ioPLBlock fBlocks[1];
160 };
161
162 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
163 #define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
164 #define getNumIOPL(osd, d) \
165 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
166 #define getPageList(d) (&(d->fPageList[0]))
167 #define computeDataSize(p, u) \
168 (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
169
170 enum { kIOMemoryHostOrRemote = kIOMemoryHostOnly | kIOMemoryRemote };
171
172 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
173
174 #define next_page(a) ( trunc_page(a) + PAGE_SIZE )
175
176 extern "C" {
177
178 kern_return_t device_data_action(
179 uintptr_t device_handle,
180 ipc_port_t device_pager,
181 vm_prot_t protection,
182 vm_object_offset_t offset,
183 vm_size_t size)
184 {
185 kern_return_t kr;
186 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
187 IOMemoryDescriptor * memDesc;
188
189 LOCK;
190 memDesc = ref->dp.memory;
191 if( memDesc)
192 {
193 memDesc->retain();
194 kr = memDesc->handleFault(device_pager, offset, size);
195 memDesc->release();
196 }
197 else
198 kr = KERN_ABORTED;
199 UNLOCK;
200
201 return( kr );
202 }
203
204 kern_return_t device_close(
205 uintptr_t device_handle)
206 {
207 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
208
209 IODelete( ref, IOMemoryDescriptorReserved, 1 );
210
211 return( kIOReturnSuccess );
212 }
213 }; // end extern "C"
214
215 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
216
217 // Note this inline function uses C++ reference arguments to return values
218 // This means that pointers are not passed and NULLs don't have to be
219 // checked for as a NULL reference is illegal.
220 static inline void
221 getAddrLenForInd(mach_vm_address_t &addr, mach_vm_size_t &len, // Output variables
222 UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
223 {
224 assert(kIOMemoryTypeUIO == type
225 || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
226 || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
227 if (kIOMemoryTypeUIO == type) {
228 user_size_t us;
229 user_addr_t ad;
230 uio_getiov((uio_t) r.uio, ind, &ad, &us); addr = ad; len = us;
231 }
232 #ifndef __LP64__
233 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
234 IOAddressRange cur = r.v64[ind];
235 addr = cur.address;
236 len = cur.length;
237 }
238 #endif /* !__LP64__ */
239 else {
240 IOVirtualRange cur = r.v[ind];
241 addr = cur.address;
242 len = cur.length;
243 }
244 }
245
246 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
247
248 static IOReturn
249 purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
250 {
251 IOReturn err = kIOReturnSuccess;
252
253 *control = VM_PURGABLE_SET_STATE;
254
255 enum { kIOMemoryPurgeableControlMask = 15 };
256
257 switch (kIOMemoryPurgeableControlMask & newState)
258 {
259 case kIOMemoryPurgeableKeepCurrent:
260 *control = VM_PURGABLE_GET_STATE;
261 break;
262
263 case kIOMemoryPurgeableNonVolatile:
264 *state = VM_PURGABLE_NONVOLATILE;
265 break;
266 case kIOMemoryPurgeableVolatile:
267 *state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask);
268 break;
269 case kIOMemoryPurgeableEmpty:
270 *state = VM_PURGABLE_EMPTY | (newState & ~kIOMemoryPurgeableControlMask);
271 break;
272 default:
273 err = kIOReturnBadArgument;
274 break;
275 }
276
277 if (*control == VM_PURGABLE_SET_STATE) {
278 // let VM know this call is from the kernel and is allowed to alter
279 // the volatility of the memory entry even if it was created with
280 // MAP_MEM_PURGABLE_KERNEL_ONLY
281 *control = VM_PURGABLE_SET_STATE_FROM_KERNEL;
282 }
283
284 return (err);
285 }
286
287 static IOReturn
288 purgeableStateBits(int * state)
289 {
290 IOReturn err = kIOReturnSuccess;
291
292 switch (VM_PURGABLE_STATE_MASK & *state)
293 {
294 case VM_PURGABLE_NONVOLATILE:
295 *state = kIOMemoryPurgeableNonVolatile;
296 break;
297 case VM_PURGABLE_VOLATILE:
298 *state = kIOMemoryPurgeableVolatile;
299 break;
300 case VM_PURGABLE_EMPTY:
301 *state = kIOMemoryPurgeableEmpty;
302 break;
303 default:
304 *state = kIOMemoryPurgeableNonVolatile;
305 err = kIOReturnNotReady;
306 break;
307 }
308 return (err);
309 }
310
311
312 static vm_prot_t
313 vmProtForCacheMode(IOOptionBits cacheMode)
314 {
315 vm_prot_t prot = 0;
316 switch (cacheMode)
317 {
318 case kIOInhibitCache:
319 SET_MAP_MEM(MAP_MEM_IO, prot);
320 break;
321
322 case kIOWriteThruCache:
323 SET_MAP_MEM(MAP_MEM_WTHRU, prot);
324 break;
325
326 case kIOWriteCombineCache:
327 SET_MAP_MEM(MAP_MEM_WCOMB, prot);
328 break;
329
330 case kIOCopybackCache:
331 SET_MAP_MEM(MAP_MEM_COPYBACK, prot);
332 break;
333
334 case kIOCopybackInnerCache:
335 SET_MAP_MEM(MAP_MEM_INNERWBACK, prot);
336 break;
337
338 case kIOPostedWrite:
339 SET_MAP_MEM(MAP_MEM_POSTED, prot);
340 break;
341
342 case kIODefaultCache:
343 default:
344 SET_MAP_MEM(MAP_MEM_NOOP, prot);
345 break;
346 }
347
348 return (prot);
349 }
350
351 static unsigned int
352 pagerFlagsForCacheMode(IOOptionBits cacheMode)
353 {
354 unsigned int pagerFlags = 0;
355 switch (cacheMode)
356 {
357 case kIOInhibitCache:
358 pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
359 break;
360
361 case kIOWriteThruCache:
362 pagerFlags = DEVICE_PAGER_WRITE_THROUGH | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
363 break;
364
365 case kIOWriteCombineCache:
366 pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT;
367 break;
368
369 case kIOCopybackCache:
370 pagerFlags = DEVICE_PAGER_COHERENT;
371 break;
372
373 case kIOCopybackInnerCache:
374 pagerFlags = DEVICE_PAGER_COHERENT;
375 break;
376
377 case kIOPostedWrite:
378 pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED | DEVICE_PAGER_EARLY_ACK;
379 break;
380
381 case kIODefaultCache:
382 default:
383 pagerFlags = -1U;
384 break;
385 }
386 return (pagerFlags);
387 }
388
389 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
390 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
391
392 struct IOMemoryEntry
393 {
394 ipc_port_t entry;
395 int64_t offset;
396 uint64_t size;
397 };
398
399 struct IOMemoryReference
400 {
401 volatile SInt32 refCount;
402 vm_prot_t prot;
403 uint32_t capacity;
404 uint32_t count;
405 struct IOMemoryReference * mapRef;
406 IOMemoryEntry entries[0];
407 };
408
409 enum
410 {
411 kIOMemoryReferenceReuse = 0x00000001,
412 kIOMemoryReferenceWrite = 0x00000002,
413 kIOMemoryReferenceCOW = 0x00000004,
414 };
415
416 SInt32 gIOMemoryReferenceCount;
417
418 IOMemoryReference *
419 IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc)
420 {
421 IOMemoryReference * ref;
422 size_t newSize, oldSize, copySize;
423
424 newSize = (sizeof(IOMemoryReference)
425 - sizeof(ref->entries)
426 + capacity * sizeof(ref->entries[0]));
427 ref = (typeof(ref)) IOMalloc(newSize);
428 if (realloc)
429 {
430 oldSize = (sizeof(IOMemoryReference)
431 - sizeof(realloc->entries)
432 + realloc->capacity * sizeof(realloc->entries[0]));
433 copySize = oldSize;
434 if (copySize > newSize) copySize = newSize;
435 if (ref) bcopy(realloc, ref, copySize);
436 IOFree(realloc, oldSize);
437 }
438 else if (ref)
439 {
440 bzero(ref, sizeof(*ref));
441 ref->refCount = 1;
442 OSIncrementAtomic(&gIOMemoryReferenceCount);
443 }
444 if (!ref) return (0);
445 ref->capacity = capacity;
446 return (ref);
447 }
448
449 void
450 IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref)
451 {
452 IOMemoryEntry * entries;
453 size_t size;
454
455 if (ref->mapRef)
456 {
457 memoryReferenceFree(ref->mapRef);
458 ref->mapRef = 0;
459 }
460
461 entries = ref->entries + ref->count;
462 while (entries > &ref->entries[0])
463 {
464 entries--;
465 ipc_port_release_send(entries->entry);
466 }
467 size = (sizeof(IOMemoryReference)
468 - sizeof(ref->entries)
469 + ref->capacity * sizeof(ref->entries[0]));
470 IOFree(ref, size);
471
472 OSDecrementAtomic(&gIOMemoryReferenceCount);
473 }
474
475 void
476 IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref)
477 {
478 if (1 == OSDecrementAtomic(&ref->refCount)) memoryReferenceFree(ref);
479 }
480
481
482 IOReturn
483 IOGeneralMemoryDescriptor::memoryReferenceCreate(
484 IOOptionBits options,
485 IOMemoryReference ** reference)
486 {
487 enum { kCapacity = 4, kCapacityInc = 4 };
488
489 kern_return_t err;
490 IOMemoryReference * ref;
491 IOMemoryEntry * entries;
492 IOMemoryEntry * cloneEntries;
493 vm_map_t map;
494 ipc_port_t entry, cloneEntry;
495 vm_prot_t prot;
496 memory_object_size_t actualSize;
497 uint32_t rangeIdx;
498 uint32_t count;
499 mach_vm_address_t entryAddr, endAddr, entrySize;
500 mach_vm_size_t srcAddr, srcLen;
501 mach_vm_size_t nextAddr, nextLen;
502 mach_vm_size_t offset, remain;
503 IOByteCount physLen;
504 IOOptionBits type = (_flags & kIOMemoryTypeMask);
505 IOOptionBits cacheMode;
506 unsigned int pagerFlags;
507 vm_tag_t tag;
508
509 ref = memoryReferenceAlloc(kCapacity, NULL);
510 if (!ref) return (kIOReturnNoMemory);
511
512 tag = getVMTag(kernel_map);
513 entries = &ref->entries[0];
514 count = 0;
515 err = KERN_SUCCESS;
516
517 offset = 0;
518 rangeIdx = 0;
519 if (_task)
520 {
521 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
522 }
523 else
524 {
525 nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
526 nextLen = physLen;
527
528 // default cache mode for physical
529 if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift))
530 {
531 IOOptionBits mode;
532 pagerFlags = IODefaultCacheBits(nextAddr);
533 if (DEVICE_PAGER_CACHE_INHIB & pagerFlags)
534 {
535 if (DEVICE_PAGER_EARLY_ACK & pagerFlags)
536 mode = kIOPostedWrite;
537 else if (DEVICE_PAGER_GUARDED & pagerFlags)
538 mode = kIOInhibitCache;
539 else
540 mode = kIOWriteCombineCache;
541 }
542 else if (DEVICE_PAGER_WRITE_THROUGH & pagerFlags)
543 mode = kIOWriteThruCache;
544 else
545 mode = kIOCopybackCache;
546 _flags |= (mode << kIOMemoryBufferCacheShift);
547 }
548 }
549
550 // cache mode & vm_prot
551 prot = VM_PROT_READ;
552 cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
553 prot |= vmProtForCacheMode(cacheMode);
554 // VM system requires write access to change cache mode
555 if (kIODefaultCache != cacheMode) prot |= VM_PROT_WRITE;
556 if (kIODirectionOut != (kIODirectionOutIn & _flags)) prot |= VM_PROT_WRITE;
557 if (kIOMemoryReferenceWrite & options) prot |= VM_PROT_WRITE;
558 if (kIOMemoryReferenceCOW & options) prot |= MAP_MEM_VM_COPY;
559
560 if ((kIOMemoryReferenceReuse & options) && _memRef)
561 {
562 cloneEntries = &_memRef->entries[0];
563 prot |= MAP_MEM_NAMED_REUSE;
564 }
565
566 if (_task)
567 {
568 // virtual ranges
569
570 if (kIOMemoryBufferPageable & _flags)
571 {
572 // IOBufferMemoryDescriptor alloc - set flags for entry + object create
573 prot |= MAP_MEM_NAMED_CREATE;
574 if (kIOMemoryBufferPurgeable & _flags) prot |= (MAP_MEM_PURGABLE | MAP_MEM_PURGABLE_KERNEL_ONLY);
575 if (kIOMemoryUseReserve & _flags) prot |= MAP_MEM_GRAB_SECLUDED;
576
577 prot |= VM_PROT_WRITE;
578 map = NULL;
579 }
580 else map = get_task_map(_task);
581
582 remain = _length;
583 while (remain)
584 {
585 srcAddr = nextAddr;
586 srcLen = nextLen;
587 nextAddr = 0;
588 nextLen = 0;
589 // coalesce addr range
590 for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++)
591 {
592 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
593 if ((srcAddr + srcLen) != nextAddr) break;
594 srcLen += nextLen;
595 }
596 entryAddr = trunc_page_64(srcAddr);
597 endAddr = round_page_64(srcAddr + srcLen);
598 do
599 {
600 entrySize = (endAddr - entryAddr);
601 if (!entrySize) break;
602 actualSize = entrySize;
603
604 cloneEntry = MACH_PORT_NULL;
605 if (MAP_MEM_NAMED_REUSE & prot)
606 {
607 if (cloneEntries < &_memRef->entries[_memRef->count]) cloneEntry = cloneEntries->entry;
608 else prot &= ~MAP_MEM_NAMED_REUSE;
609 }
610
611 err = mach_make_memory_entry_64(map,
612 &actualSize, entryAddr, prot, &entry, cloneEntry);
613
614 if (KERN_SUCCESS != err) break;
615 if (actualSize > entrySize) panic("mach_make_memory_entry_64 actualSize");
616
617 if (count >= ref->capacity)
618 {
619 ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref);
620 entries = &ref->entries[count];
621 }
622 entries->entry = entry;
623 entries->size = actualSize;
624 entries->offset = offset + (entryAddr - srcAddr);
625 entryAddr += actualSize;
626 if (MAP_MEM_NAMED_REUSE & prot)
627 {
628 if ((cloneEntries->entry == entries->entry)
629 && (cloneEntries->size == entries->size)
630 && (cloneEntries->offset == entries->offset)) cloneEntries++;
631 else prot &= ~MAP_MEM_NAMED_REUSE;
632 }
633 entries++;
634 count++;
635 }
636 while (true);
637 offset += srcLen;
638 remain -= srcLen;
639 }
640 }
641 else
642 {
643 // _task == 0, physical or kIOMemoryTypeUPL
644 memory_object_t pager;
645 vm_size_t size = ptoa_32(_pages);
646
647 if (!getKernelReserved()) panic("getKernelReserved");
648
649 reserved->dp.pagerContig = (1 == _rangesCount);
650 reserved->dp.memory = this;
651
652 pagerFlags = pagerFlagsForCacheMode(cacheMode);
653 if (-1U == pagerFlags) panic("phys is kIODefaultCache");
654 if (reserved->dp.pagerContig) pagerFlags |= DEVICE_PAGER_CONTIGUOUS;
655
656 pager = device_pager_setup((memory_object_t) 0, (uintptr_t) reserved,
657 size, pagerFlags);
658 assert (pager);
659 if (!pager) err = kIOReturnVMError;
660 else
661 {
662 srcAddr = nextAddr;
663 entryAddr = trunc_page_64(srcAddr);
664 err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/,
665 size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry);
666 assert (KERN_SUCCESS == err);
667 if (KERN_SUCCESS != err) device_pager_deallocate(pager);
668 else
669 {
670 reserved->dp.devicePager = pager;
671 entries->entry = entry;
672 entries->size = size;
673 entries->offset = offset + (entryAddr - srcAddr);
674 entries++;
675 count++;
676 }
677 }
678 }
679
680 ref->count = count;
681 ref->prot = prot;
682
683 if (_task && (KERN_SUCCESS == err)
684 && (kIOMemoryMapCopyOnWrite & _flags)
685 && !(kIOMemoryReferenceCOW & options))
686 {
687 err = memoryReferenceCreate(options | kIOMemoryReferenceCOW, &ref->mapRef);
688 }
689
690 if (KERN_SUCCESS == err)
691 {
692 if (MAP_MEM_NAMED_REUSE & prot)
693 {
694 memoryReferenceFree(ref);
695 OSIncrementAtomic(&_memRef->refCount);
696 ref = _memRef;
697 }
698 }
699 else
700 {
701 memoryReferenceFree(ref);
702 ref = NULL;
703 }
704
705 *reference = ref;
706
707 return (err);
708 }
709
710 kern_return_t
711 IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
712 {
713 IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref;
714 IOReturn err;
715 vm_map_offset_t addr;
716
717 addr = ref->mapped;
718
719 err = vm_map_enter_mem_object(map, &addr, ref->size,
720 (vm_map_offset_t) 0,
721 (((ref->options & kIOMapAnywhere)
722 ? VM_FLAGS_ANYWHERE
723 : VM_FLAGS_FIXED)),
724 VM_MAP_KERNEL_FLAGS_NONE,
725 ref->tag,
726 IPC_PORT_NULL,
727 (memory_object_offset_t) 0,
728 false, /* copy */
729 ref->prot,
730 ref->prot,
731 VM_INHERIT_NONE);
732 if (KERN_SUCCESS == err)
733 {
734 ref->mapped = (mach_vm_address_t) addr;
735 ref->map = map;
736 }
737
738 return( err );
739 }
740
741 IOReturn
742 IOGeneralMemoryDescriptor::memoryReferenceMap(
743 IOMemoryReference * ref,
744 vm_map_t map,
745 mach_vm_size_t inoffset,
746 mach_vm_size_t size,
747 IOOptionBits options,
748 mach_vm_address_t * inaddr)
749 {
750 IOReturn err;
751 int64_t offset = inoffset;
752 uint32_t rangeIdx, entryIdx;
753 vm_map_offset_t addr, mapAddr;
754 vm_map_offset_t pageOffset, entryOffset, remain, chunk;
755
756 mach_vm_address_t nextAddr;
757 mach_vm_size_t nextLen;
758 IOByteCount physLen;
759 IOMemoryEntry * entry;
760 vm_prot_t prot, memEntryCacheMode;
761 IOOptionBits type;
762 IOOptionBits cacheMode;
763 vm_tag_t tag;
764 // for the kIOMapPrefault option.
765 upl_page_info_t * pageList = NULL;
766 UInt currentPageIndex = 0;
767 bool didAlloc;
768
769 if (ref->mapRef)
770 {
771 err = memoryReferenceMap(ref->mapRef, map, inoffset, size, options, inaddr);
772 return (err);
773 }
774
775 type = _flags & kIOMemoryTypeMask;
776
777 prot = VM_PROT_READ;
778 if (!(kIOMapReadOnly & options)) prot |= VM_PROT_WRITE;
779 prot &= ref->prot;
780
781 cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
782 if (kIODefaultCache != cacheMode)
783 {
784 // VM system requires write access to update named entry cache mode
785 memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
786 }
787
788 tag = getVMTag(map);
789
790 if (_task)
791 {
792 // Find first range for offset
793 if (!_rangesCount) return (kIOReturnBadArgument);
794 for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++)
795 {
796 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
797 if (remain < nextLen) break;
798 remain -= nextLen;
799 }
800 }
801 else
802 {
803 rangeIdx = 0;
804 remain = 0;
805 nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
806 nextLen = size;
807 }
808
809 assert(remain < nextLen);
810 if (remain >= nextLen) return (kIOReturnBadArgument);
811
812 nextAddr += remain;
813 nextLen -= remain;
814 pageOffset = (page_mask & nextAddr);
815 addr = 0;
816 didAlloc = false;
817
818 if (!(options & kIOMapAnywhere))
819 {
820 addr = *inaddr;
821 if (pageOffset != (page_mask & addr)) return (kIOReturnNotAligned);
822 addr -= pageOffset;
823 }
824
825 // find first entry for offset
826 for (entryIdx = 0;
827 (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset);
828 entryIdx++) {}
829 entryIdx--;
830 entry = &ref->entries[entryIdx];
831
832 // allocate VM
833 size = round_page_64(size + pageOffset);
834 if (kIOMapOverwrite & options)
835 {
836 if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags))
837 {
838 map = IOPageableMapForAddress(addr);
839 }
840 err = KERN_SUCCESS;
841 }
842 else
843 {
844 IOMemoryDescriptorMapAllocRef ref;
845 ref.map = map;
846 ref.tag = tag;
847 ref.options = options;
848 ref.size = size;
849 ref.prot = prot;
850 if (options & kIOMapAnywhere)
851 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
852 ref.mapped = 0;
853 else
854 ref.mapped = addr;
855 if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags))
856 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
857 else
858 err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
859 if (KERN_SUCCESS == err)
860 {
861 addr = ref.mapped;
862 map = ref.map;
863 didAlloc = true;
864 }
865 }
866
867 /*
868 * If the memory is associated with a device pager but doesn't have a UPL,
869 * it will be immediately faulted in through the pager via populateDevicePager().
870 * kIOMapPrefault is redundant in that case, so don't try to use it for UPL
871 * operations.
872 */
873 if ((reserved != NULL) && (reserved->dp.devicePager) && (_memoryEntries == NULL) && (_wireCount != 0))
874 options &= ~kIOMapPrefault;
875
876 /*
877 * Prefaulting is only possible if we wired the memory earlier. Check the
878 * memory type, and the underlying data.
879 */
880 if (options & kIOMapPrefault)
881 {
882 /*
883 * The memory must have been wired by calling ::prepare(), otherwise
884 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
885 */
886 assert(_wireCount != 0);
887 assert(_memoryEntries != NULL);
888 if ((_wireCount == 0) ||
889 (_memoryEntries == NULL))
890 {
891 return kIOReturnBadArgument;
892 }
893
894 // Get the page list.
895 ioGMDData* dataP = getDataP(_memoryEntries);
896 ioPLBlock const* ioplList = getIOPLList(dataP);
897 pageList = getPageList(dataP);
898
899 // Get the number of IOPLs.
900 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
901
902 /*
903 * Scan through the IOPL Info Blocks, looking for the first block containing
904 * the offset. The research will go past it, so we'll need to go back to the
905 * right range at the end.
906 */
907 UInt ioplIndex = 0;
908 while (ioplIndex < numIOPLs && offset >= ioplList[ioplIndex].fIOMDOffset)
909 ioplIndex++;
910 ioplIndex--;
911
912 // Retrieve the IOPL info block.
913 ioPLBlock ioplInfo = ioplList[ioplIndex];
914
915 /*
916 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
917 * array.
918 */
919 if (ioplInfo.fFlags & kIOPLExternUPL)
920 pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
921 else
922 pageList = &pageList[ioplInfo.fPageInfo];
923
924 // Rebase [offset] into the IOPL in order to looks for the first page index.
925 mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
926
927 // Retrieve the index of the first page corresponding to the offset.
928 currentPageIndex = atop_32(offsetInIOPL);
929 }
930
931 // enter mappings
932 remain = size;
933 mapAddr = addr;
934 addr += pageOffset;
935
936 while (remain && (KERN_SUCCESS == err))
937 {
938 entryOffset = offset - entry->offset;
939 if ((page_mask & entryOffset) != pageOffset)
940 {
941 err = kIOReturnNotAligned;
942 break;
943 }
944
945 if (kIODefaultCache != cacheMode)
946 {
947 vm_size_t unused = 0;
948 err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
949 memEntryCacheMode, NULL, entry->entry);
950 assert (KERN_SUCCESS == err);
951 }
952
953 entryOffset -= pageOffset;
954 if (entryOffset >= entry->size) panic("entryOffset");
955 chunk = entry->size - entryOffset;
956 if (chunk)
957 {
958 vm_map_kernel_flags_t vmk_flags;
959
960 vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
961 vmk_flags.vmkf_iokit_acct = TRUE; /* iokit accounting */
962
963 if (chunk > remain) chunk = remain;
964 if (options & kIOMapPrefault)
965 {
966 UInt nb_pages = round_page(chunk) / PAGE_SIZE;
967
968 err = vm_map_enter_mem_object_prefault(map,
969 &mapAddr,
970 chunk, 0 /* mask */,
971 (VM_FLAGS_FIXED
972 | VM_FLAGS_OVERWRITE),
973 vmk_flags,
974 tag,
975 entry->entry,
976 entryOffset,
977 prot, // cur
978 prot, // max
979 &pageList[currentPageIndex],
980 nb_pages);
981
982 // Compute the next index in the page list.
983 currentPageIndex += nb_pages;
984 assert(currentPageIndex <= _pages);
985 }
986 else
987 {
988 err = vm_map_enter_mem_object(map,
989 &mapAddr,
990 chunk, 0 /* mask */,
991 (VM_FLAGS_FIXED
992 | VM_FLAGS_OVERWRITE),
993 vmk_flags,
994 tag,
995 entry->entry,
996 entryOffset,
997 false, // copy
998 prot, // cur
999 prot, // max
1000 VM_INHERIT_NONE);
1001 }
1002 if (KERN_SUCCESS != err) break;
1003 remain -= chunk;
1004 if (!remain) break;
1005 mapAddr += chunk;
1006 offset += chunk - pageOffset;
1007 }
1008 pageOffset = 0;
1009 entry++;
1010 entryIdx++;
1011 if (entryIdx >= ref->count)
1012 {
1013 err = kIOReturnOverrun;
1014 break;
1015 }
1016 }
1017
1018 if ((KERN_SUCCESS != err) && didAlloc)
1019 {
1020 (void) mach_vm_deallocate(map, trunc_page_64(addr), size);
1021 addr = 0;
1022 }
1023 *inaddr = addr;
1024
1025 return (err);
1026 }
1027
1028 IOReturn
1029 IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
1030 IOMemoryReference * ref,
1031 IOByteCount * residentPageCount,
1032 IOByteCount * dirtyPageCount)
1033 {
1034 IOReturn err;
1035 IOMemoryEntry * entries;
1036 unsigned int resident, dirty;
1037 unsigned int totalResident, totalDirty;
1038
1039 totalResident = totalDirty = 0;
1040 err = kIOReturnSuccess;
1041 entries = ref->entries + ref->count;
1042 while (entries > &ref->entries[0])
1043 {
1044 entries--;
1045 err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty);
1046 if (KERN_SUCCESS != err) break;
1047 totalResident += resident;
1048 totalDirty += dirty;
1049 }
1050
1051 if (residentPageCount) *residentPageCount = totalResident;
1052 if (dirtyPageCount) *dirtyPageCount = totalDirty;
1053 return (err);
1054 }
1055
1056 IOReturn
1057 IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
1058 IOMemoryReference * ref,
1059 IOOptionBits newState,
1060 IOOptionBits * oldState)
1061 {
1062 IOReturn err;
1063 IOMemoryEntry * entries;
1064 vm_purgable_t control;
1065 int totalState, state;
1066
1067 totalState = kIOMemoryPurgeableNonVolatile;
1068 err = kIOReturnSuccess;
1069 entries = ref->entries + ref->count;
1070 while (entries > &ref->entries[0])
1071 {
1072 entries--;
1073
1074 err = purgeableControlBits(newState, &control, &state);
1075 if (KERN_SUCCESS != err) break;
1076 err = memory_entry_purgeable_control_internal(entries->entry, control, &state);
1077 if (KERN_SUCCESS != err) break;
1078 err = purgeableStateBits(&state);
1079 if (KERN_SUCCESS != err) break;
1080
1081 if (kIOMemoryPurgeableEmpty == state) totalState = kIOMemoryPurgeableEmpty;
1082 else if (kIOMemoryPurgeableEmpty == totalState) continue;
1083 else if (kIOMemoryPurgeableVolatile == totalState) continue;
1084 else if (kIOMemoryPurgeableVolatile == state) totalState = kIOMemoryPurgeableVolatile;
1085 else totalState = kIOMemoryPurgeableNonVolatile;
1086 }
1087
1088 if (oldState) *oldState = totalState;
1089 return (err);
1090 }
1091
1092 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1093
1094 IOMemoryDescriptor *
1095 IOMemoryDescriptor::withAddress(void * address,
1096 IOByteCount length,
1097 IODirection direction)
1098 {
1099 return IOMemoryDescriptor::
1100 withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
1101 }
1102
1103 #ifndef __LP64__
1104 IOMemoryDescriptor *
1105 IOMemoryDescriptor::withAddress(IOVirtualAddress address,
1106 IOByteCount length,
1107 IODirection direction,
1108 task_t task)
1109 {
1110 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1111 if (that)
1112 {
1113 if (that->initWithAddress(address, length, direction, task))
1114 return that;
1115
1116 that->release();
1117 }
1118 return 0;
1119 }
1120 #endif /* !__LP64__ */
1121
1122 IOMemoryDescriptor *
1123 IOMemoryDescriptor::withPhysicalAddress(
1124 IOPhysicalAddress address,
1125 IOByteCount length,
1126 IODirection direction )
1127 {
1128 return (IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL));
1129 }
1130
1131 #ifndef __LP64__
1132 IOMemoryDescriptor *
1133 IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
1134 UInt32 withCount,
1135 IODirection direction,
1136 task_t task,
1137 bool asReference)
1138 {
1139 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1140 if (that)
1141 {
1142 if (that->initWithRanges(ranges, withCount, direction, task, asReference))
1143 return that;
1144
1145 that->release();
1146 }
1147 return 0;
1148 }
1149 #endif /* !__LP64__ */
1150
1151 IOMemoryDescriptor *
1152 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
1153 mach_vm_size_t length,
1154 IOOptionBits options,
1155 task_t task)
1156 {
1157 IOAddressRange range = { address, length };
1158 return (IOMemoryDescriptor::withAddressRanges(&range, 1, options, task));
1159 }
1160
1161 IOMemoryDescriptor *
1162 IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
1163 UInt32 rangeCount,
1164 IOOptionBits options,
1165 task_t task)
1166 {
1167 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1168 if (that)
1169 {
1170 if (task)
1171 options |= kIOMemoryTypeVirtual64;
1172 else
1173 options |= kIOMemoryTypePhysical64;
1174
1175 if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ 0))
1176 return that;
1177
1178 that->release();
1179 }
1180
1181 return 0;
1182 }
1183
1184
1185 /*
1186 * withOptions:
1187 *
1188 * Create a new IOMemoryDescriptor. The buffer is made up of several
1189 * virtual address ranges, from a given task.
1190 *
1191 * Passing the ranges as a reference will avoid an extra allocation.
1192 */
1193 IOMemoryDescriptor *
1194 IOMemoryDescriptor::withOptions(void * buffers,
1195 UInt32 count,
1196 UInt32 offset,
1197 task_t task,
1198 IOOptionBits opts,
1199 IOMapper * mapper)
1200 {
1201 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
1202
1203 if (self
1204 && !self->initWithOptions(buffers, count, offset, task, opts, mapper))
1205 {
1206 self->release();
1207 return 0;
1208 }
1209
1210 return self;
1211 }
1212
1213 bool IOMemoryDescriptor::initWithOptions(void * buffers,
1214 UInt32 count,
1215 UInt32 offset,
1216 task_t task,
1217 IOOptionBits options,
1218 IOMapper * mapper)
1219 {
1220 return( false );
1221 }
1222
1223 #ifndef __LP64__
1224 IOMemoryDescriptor *
1225 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
1226 UInt32 withCount,
1227 IODirection direction,
1228 bool asReference)
1229 {
1230 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1231 if (that)
1232 {
1233 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference))
1234 return that;
1235
1236 that->release();
1237 }
1238 return 0;
1239 }
1240
1241 IOMemoryDescriptor *
1242 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
1243 IOByteCount offset,
1244 IOByteCount length,
1245 IODirection direction)
1246 {
1247 return (IOSubMemoryDescriptor::withSubRange(of, offset, length, direction));
1248 }
1249 #endif /* !__LP64__ */
1250
1251 IOMemoryDescriptor *
1252 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
1253 {
1254 IOGeneralMemoryDescriptor *origGenMD =
1255 OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
1256
1257 if (origGenMD)
1258 return IOGeneralMemoryDescriptor::
1259 withPersistentMemoryDescriptor(origGenMD);
1260 else
1261 return 0;
1262 }
1263
1264 IOMemoryDescriptor *
1265 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
1266 {
1267 IOMemoryReference * memRef;
1268
1269 if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) return (0);
1270
1271 if (memRef == originalMD->_memRef)
1272 {
1273 originalMD->retain(); // Add a new reference to ourselves
1274 originalMD->memoryReferenceRelease(memRef);
1275 return originalMD;
1276 }
1277
1278 IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor;
1279 IOMDPersistentInitData initData = { originalMD, memRef };
1280
1281 if (self
1282 && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) {
1283 self->release();
1284 self = 0;
1285 }
1286 return self;
1287 }
1288
1289 #ifndef __LP64__
1290 bool
1291 IOGeneralMemoryDescriptor::initWithAddress(void * address,
1292 IOByteCount withLength,
1293 IODirection withDirection)
1294 {
1295 _singleRange.v.address = (vm_offset_t) address;
1296 _singleRange.v.length = withLength;
1297
1298 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
1299 }
1300
1301 bool
1302 IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
1303 IOByteCount withLength,
1304 IODirection withDirection,
1305 task_t withTask)
1306 {
1307 _singleRange.v.address = address;
1308 _singleRange.v.length = withLength;
1309
1310 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
1311 }
1312
1313 bool
1314 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
1315 IOPhysicalAddress address,
1316 IOByteCount withLength,
1317 IODirection withDirection )
1318 {
1319 _singleRange.p.address = address;
1320 _singleRange.p.length = withLength;
1321
1322 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
1323 }
1324
1325 bool
1326 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
1327 IOPhysicalRange * ranges,
1328 UInt32 count,
1329 IODirection direction,
1330 bool reference)
1331 {
1332 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
1333
1334 if (reference)
1335 mdOpts |= kIOMemoryAsReference;
1336
1337 return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0);
1338 }
1339
1340 bool
1341 IOGeneralMemoryDescriptor::initWithRanges(
1342 IOVirtualRange * ranges,
1343 UInt32 count,
1344 IODirection direction,
1345 task_t task,
1346 bool reference)
1347 {
1348 IOOptionBits mdOpts = direction;
1349
1350 if (reference)
1351 mdOpts |= kIOMemoryAsReference;
1352
1353 if (task) {
1354 mdOpts |= kIOMemoryTypeVirtual;
1355
1356 // Auto-prepare if this is a kernel memory descriptor as very few
1357 // clients bother to prepare() kernel memory.
1358 // But it was not enforced so what are you going to do?
1359 if (task == kernel_task)
1360 mdOpts |= kIOMemoryAutoPrepare;
1361 }
1362 else
1363 mdOpts |= kIOMemoryTypePhysical;
1364
1365 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0);
1366 }
1367 #endif /* !__LP64__ */
1368
1369 /*
1370 * initWithOptions:
1371 *
1372 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
1373 * from a given task, several physical ranges, an UPL from the ubc
1374 * system or a uio (may be 64bit) from the BSD subsystem.
1375 *
1376 * Passing the ranges as a reference will avoid an extra allocation.
1377 *
1378 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
1379 * existing instance -- note this behavior is not commonly supported in other
1380 * I/O Kit classes, although it is supported here.
1381 */
1382
1383 bool
1384 IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
1385 UInt32 count,
1386 UInt32 offset,
1387 task_t task,
1388 IOOptionBits options,
1389 IOMapper * mapper)
1390 {
1391 IOOptionBits type = options & kIOMemoryTypeMask;
1392
1393 #ifndef __LP64__
1394 if (task
1395 && (kIOMemoryTypeVirtual == type)
1396 && vm_map_is_64bit(get_task_map(task))
1397 && ((IOVirtualRange *) buffers)->address)
1398 {
1399 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
1400 return false;
1401 }
1402 #endif /* !__LP64__ */
1403
1404 // Grab the original MD's configuation data to initialse the
1405 // arguments to this function.
1406 if (kIOMemoryTypePersistentMD == type) {
1407
1408 IOMDPersistentInitData *initData = (typeof(initData)) buffers;
1409 const IOGeneralMemoryDescriptor *orig = initData->fMD;
1410 ioGMDData *dataP = getDataP(orig->_memoryEntries);
1411
1412 // Only accept persistent memory descriptors with valid dataP data.
1413 assert(orig->_rangesCount == 1);
1414 if ( !(orig->_flags & kIOMemoryPersistent) || !dataP)
1415 return false;
1416
1417 _memRef = initData->fMemRef; // Grab the new named entry
1418 options = orig->_flags & ~kIOMemoryAsReference;
1419 type = options & kIOMemoryTypeMask;
1420 buffers = orig->_ranges.v;
1421 count = orig->_rangesCount;
1422
1423 // Now grab the original task and whatever mapper was previously used
1424 task = orig->_task;
1425 mapper = dataP->fMapper;
1426
1427 // We are ready to go through the original initialisation now
1428 }
1429
1430 switch (type) {
1431 case kIOMemoryTypeUIO:
1432 case kIOMemoryTypeVirtual:
1433 #ifndef __LP64__
1434 case kIOMemoryTypeVirtual64:
1435 #endif /* !__LP64__ */
1436 assert(task);
1437 if (!task)
1438 return false;
1439 break;
1440
1441 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
1442 #ifndef __LP64__
1443 case kIOMemoryTypePhysical64:
1444 #endif /* !__LP64__ */
1445 case kIOMemoryTypeUPL:
1446 assert(!task);
1447 break;
1448 default:
1449 return false; /* bad argument */
1450 }
1451
1452 assert(buffers);
1453 assert(count);
1454
1455 /*
1456 * We can check the _initialized instance variable before having ever set
1457 * it to an initial value because I/O Kit guarantees that all our instance
1458 * variables are zeroed on an object's allocation.
1459 */
1460
1461 if (_initialized) {
1462 /*
1463 * An existing memory descriptor is being retargeted to point to
1464 * somewhere else. Clean up our present state.
1465 */
1466 IOOptionBits type = _flags & kIOMemoryTypeMask;
1467 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
1468 {
1469 while (_wireCount)
1470 complete();
1471 }
1472 if (_ranges.v && !(kIOMemoryAsReference & _flags))
1473 {
1474 if (kIOMemoryTypeUIO == type)
1475 uio_free((uio_t) _ranges.v);
1476 #ifndef __LP64__
1477 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
1478 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
1479 #endif /* !__LP64__ */
1480 else
1481 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
1482 }
1483
1484 options |= (kIOMemoryRedirected & _flags);
1485 if (!(kIOMemoryRedirected & options))
1486 {
1487 if (_memRef)
1488 {
1489 memoryReferenceRelease(_memRef);
1490 _memRef = 0;
1491 }
1492 if (_mappings)
1493 _mappings->flushCollection();
1494 }
1495 }
1496 else {
1497 if (!super::init())
1498 return false;
1499 _initialized = true;
1500 }
1501
1502 // Grab the appropriate mapper
1503 if (kIOMemoryHostOrRemote & options) options |= kIOMemoryMapperNone;
1504 if (kIOMemoryMapperNone & options)
1505 mapper = 0; // No Mapper
1506 else if (mapper == kIOMapperSystem) {
1507 IOMapper::checkForSystemMapper();
1508 gIOSystemMapper = mapper = IOMapper::gSystem;
1509 }
1510
1511 // Remove the dynamic internal use flags from the initial setting
1512 options &= ~(kIOMemoryPreparedReadOnly);
1513 _flags = options;
1514 _task = task;
1515
1516 #ifndef __LP64__
1517 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
1518 #endif /* !__LP64__ */
1519
1520 _dmaReferences = 0;
1521 __iomd_reservedA = 0;
1522 __iomd_reservedB = 0;
1523 _highestPage = 0;
1524
1525 if (kIOMemoryThreadSafe & options)
1526 {
1527 if (!_prepareLock)
1528 _prepareLock = IOLockAlloc();
1529 }
1530 else if (_prepareLock)
1531 {
1532 IOLockFree(_prepareLock);
1533 _prepareLock = NULL;
1534 }
1535
1536 if (kIOMemoryTypeUPL == type) {
1537
1538 ioGMDData *dataP;
1539 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
1540
1541 if (!initMemoryEntries(dataSize, mapper)) return (false);
1542 dataP = getDataP(_memoryEntries);
1543 dataP->fPageCnt = 0;
1544 switch (kIOMemoryDirectionMask & options)
1545 {
1546 case kIODirectionOut:
1547 dataP->fDMAAccess = kIODMAMapReadAccess;
1548 break;
1549 case kIODirectionIn:
1550 dataP->fDMAAccess = kIODMAMapWriteAccess;
1551 break;
1552 case kIODirectionNone:
1553 case kIODirectionOutIn:
1554 default:
1555 panic("bad dir for upl 0x%x\n", (int) options);
1556 break;
1557 }
1558 // _wireCount++; // UPLs start out life wired
1559
1560 _length = count;
1561 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
1562
1563 ioPLBlock iopl;
1564 iopl.fIOPL = (upl_t) buffers;
1565 upl_set_referenced(iopl.fIOPL, true);
1566 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
1567
1568 if (upl_get_size(iopl.fIOPL) < (count + offset))
1569 panic("short external upl");
1570
1571 _highestPage = upl_get_highest_page(iopl.fIOPL);
1572
1573 // Set the flag kIOPLOnDevice convieniently equal to 1
1574 iopl.fFlags = pageList->device | kIOPLExternUPL;
1575 if (!pageList->device) {
1576 // Pre-compute the offset into the UPL's page list
1577 pageList = &pageList[atop_32(offset)];
1578 offset &= PAGE_MASK;
1579 }
1580 iopl.fIOMDOffset = 0;
1581 iopl.fMappedPage = 0;
1582 iopl.fPageInfo = (vm_address_t) pageList;
1583 iopl.fPageOffset = offset;
1584 _memoryEntries->appendBytes(&iopl, sizeof(iopl));
1585 }
1586 else {
1587 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
1588 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
1589
1590 // Initialize the memory descriptor
1591 if (options & kIOMemoryAsReference) {
1592 #ifndef __LP64__
1593 _rangesIsAllocated = false;
1594 #endif /* !__LP64__ */
1595
1596 // Hack assignment to get the buffer arg into _ranges.
1597 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
1598 // work, C++ sigh.
1599 // This also initialises the uio & physical ranges.
1600 _ranges.v = (IOVirtualRange *) buffers;
1601 }
1602 else {
1603 #ifndef __LP64__
1604 _rangesIsAllocated = true;
1605 #endif /* !__LP64__ */
1606 switch (type)
1607 {
1608 case kIOMemoryTypeUIO:
1609 _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
1610 break;
1611
1612 #ifndef __LP64__
1613 case kIOMemoryTypeVirtual64:
1614 case kIOMemoryTypePhysical64:
1615 if (count == 1
1616 #ifndef __arm__
1617 && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
1618 #endif
1619 ) {
1620 if (kIOMemoryTypeVirtual64 == type)
1621 type = kIOMemoryTypeVirtual;
1622 else
1623 type = kIOMemoryTypePhysical;
1624 _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
1625 _rangesIsAllocated = false;
1626 _ranges.v = &_singleRange.v;
1627 _singleRange.v.address = ((IOAddressRange *) buffers)->address;
1628 _singleRange.v.length = ((IOAddressRange *) buffers)->length;
1629 break;
1630 }
1631 _ranges.v64 = IONew(IOAddressRange, count);
1632 if (!_ranges.v64)
1633 return false;
1634 bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
1635 break;
1636 #endif /* !__LP64__ */
1637 case kIOMemoryTypeVirtual:
1638 case kIOMemoryTypePhysical:
1639 if (count == 1) {
1640 _flags |= kIOMemoryAsReference;
1641 #ifndef __LP64__
1642 _rangesIsAllocated = false;
1643 #endif /* !__LP64__ */
1644 _ranges.v = &_singleRange.v;
1645 } else {
1646 _ranges.v = IONew(IOVirtualRange, count);
1647 if (!_ranges.v)
1648 return false;
1649 }
1650 bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
1651 break;
1652 }
1653 }
1654 _rangesCount = count;
1655
1656 // Find starting address within the vector of ranges
1657 Ranges vec = _ranges;
1658 mach_vm_size_t totalLength = 0;
1659 unsigned int ind, pages = 0;
1660 for (ind = 0; ind < count; ind++) {
1661 mach_vm_address_t addr;
1662 mach_vm_address_t endAddr;
1663 mach_vm_size_t len;
1664
1665 // addr & len are returned by this function
1666 getAddrLenForInd(addr, len, type, vec, ind);
1667 if (os_add3_overflow(addr, len, PAGE_MASK, &endAddr)) break;
1668 if (os_add_overflow(pages, (atop_64(endAddr) - atop_64(addr)), &pages)) break;
1669 if (os_add_overflow(totalLength, len, &totalLength)) break;
1670 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
1671 {
1672 ppnum_t highPage = atop_64(addr + len - 1);
1673 if (highPage > _highestPage)
1674 _highestPage = highPage;
1675 }
1676 }
1677 if ((ind < count)
1678 || (totalLength != ((IOByteCount) totalLength))) return (false); /* overflow */
1679
1680 _length = totalLength;
1681 _pages = pages;
1682
1683 // Auto-prepare memory at creation time.
1684 // Implied completion when descriptor is free-ed
1685
1686
1687 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
1688 _wireCount++; // Physical MDs are, by definition, wired
1689 else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
1690 ioGMDData *dataP;
1691 unsigned dataSize;
1692
1693 if (_pages > atop_64(max_mem)) return false;
1694
1695 dataSize = computeDataSize(_pages, /* upls */ count * 2);
1696 if (!initMemoryEntries(dataSize, mapper)) return false;
1697 dataP = getDataP(_memoryEntries);
1698 dataP->fPageCnt = _pages;
1699
1700 if (((_task != kernel_task) || (kIOMemoryBufferPageable & _flags))
1701 && (VM_KERN_MEMORY_NONE == _kernelTag))
1702 {
1703 _kernelTag = IOMemoryTag(kernel_map);
1704 }
1705
1706 if ( (kIOMemoryPersistent & _flags) && !_memRef)
1707 {
1708 IOReturn
1709 err = memoryReferenceCreate(0, &_memRef);
1710 if (kIOReturnSuccess != err) return false;
1711 }
1712
1713 if ((_flags & kIOMemoryAutoPrepare)
1714 && prepare() != kIOReturnSuccess)
1715 return false;
1716 }
1717 }
1718
1719 return true;
1720 }
1721
1722 /*
1723 * free
1724 *
1725 * Free resources.
1726 */
1727 void IOGeneralMemoryDescriptor::free()
1728 {
1729 IOOptionBits type = _flags & kIOMemoryTypeMask;
1730
1731 if( reserved)
1732 {
1733 LOCK;
1734 reserved->dp.memory = 0;
1735 UNLOCK;
1736 }
1737 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
1738 {
1739 ioGMDData * dataP;
1740 if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBaseValid)
1741 {
1742 dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
1743 dataP->fMappedBaseValid = dataP->fMappedBase = 0;
1744 }
1745 }
1746 else
1747 {
1748 while (_wireCount) complete();
1749 }
1750
1751 if (_memoryEntries) _memoryEntries->release();
1752
1753 if (_ranges.v && !(kIOMemoryAsReference & _flags))
1754 {
1755 if (kIOMemoryTypeUIO == type)
1756 uio_free((uio_t) _ranges.v);
1757 #ifndef __LP64__
1758 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
1759 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
1760 #endif /* !__LP64__ */
1761 else
1762 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
1763
1764 _ranges.v = NULL;
1765 }
1766
1767 if (reserved)
1768 {
1769 if (reserved->dp.devicePager)
1770 {
1771 // memEntry holds a ref on the device pager which owns reserved
1772 // (IOMemoryDescriptorReserved) so no reserved access after this point
1773 device_pager_deallocate( (memory_object_t) reserved->dp.devicePager );
1774 }
1775 else
1776 IODelete(reserved, IOMemoryDescriptorReserved, 1);
1777 reserved = NULL;
1778 }
1779
1780 if (_memRef) memoryReferenceRelease(_memRef);
1781 if (_prepareLock) IOLockFree(_prepareLock);
1782
1783 super::free();
1784 }
1785
1786 #ifndef __LP64__
1787 void IOGeneralMemoryDescriptor::unmapFromKernel()
1788 {
1789 panic("IOGMD::unmapFromKernel deprecated");
1790 }
1791
1792 void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
1793 {
1794 panic("IOGMD::mapIntoKernel deprecated");
1795 }
1796 #endif /* !__LP64__ */
1797
1798 /*
1799 * getDirection:
1800 *
1801 * Get the direction of the transfer.
1802 */
1803 IODirection IOMemoryDescriptor::getDirection() const
1804 {
1805 #ifndef __LP64__
1806 if (_direction)
1807 return _direction;
1808 #endif /* !__LP64__ */
1809 return (IODirection) (_flags & kIOMemoryDirectionMask);
1810 }
1811
1812 /*
1813 * getLength:
1814 *
1815 * Get the length of the transfer (over all ranges).
1816 */
1817 IOByteCount IOMemoryDescriptor::getLength() const
1818 {
1819 return _length;
1820 }
1821
1822 void IOMemoryDescriptor::setTag( IOOptionBits tag )
1823 {
1824 _tag = tag;
1825 }
1826
1827 IOOptionBits IOMemoryDescriptor::getTag( void )
1828 {
1829 return( _tag);
1830 }
1831
1832 uint64_t IOMemoryDescriptor::getFlags(void)
1833 {
1834 return (_flags);
1835 }
1836
1837 #ifndef __LP64__
1838 #pragma clang diagnostic push
1839 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
1840
1841 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
1842 IOPhysicalAddress
1843 IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
1844 {
1845 addr64_t physAddr = 0;
1846
1847 if( prepare() == kIOReturnSuccess) {
1848 physAddr = getPhysicalSegment64( offset, length );
1849 complete();
1850 }
1851
1852 return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used
1853 }
1854
1855 #pragma clang diagnostic pop
1856
1857 #endif /* !__LP64__ */
1858
1859 IOByteCount IOMemoryDescriptor::readBytes
1860 (IOByteCount offset, void *bytes, IOByteCount length)
1861 {
1862 addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
1863 IOByteCount remaining;
1864
1865 // Assert that this entire I/O is withing the available range
1866 assert(offset <= _length);
1867 assert(offset + length <= _length);
1868 if ((offset >= _length)
1869 || ((offset + length) > _length)) {
1870 return 0;
1871 }
1872
1873 assert (!(kIOMemoryRemote & _flags));
1874 if (kIOMemoryRemote & _flags) return (0);
1875
1876 if (kIOMemoryThreadSafe & _flags)
1877 LOCK;
1878
1879 remaining = length = min(length, _length - offset);
1880 while (remaining) { // (process another target segment?)
1881 addr64_t srcAddr64;
1882 IOByteCount srcLen;
1883
1884 srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
1885 if (!srcAddr64)
1886 break;
1887
1888 // Clip segment length to remaining
1889 if (srcLen > remaining)
1890 srcLen = remaining;
1891
1892 copypv(srcAddr64, dstAddr, srcLen,
1893 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
1894
1895 dstAddr += srcLen;
1896 offset += srcLen;
1897 remaining -= srcLen;
1898 }
1899
1900 if (kIOMemoryThreadSafe & _flags)
1901 UNLOCK;
1902
1903 assert(!remaining);
1904
1905 return length - remaining;
1906 }
1907
1908 IOByteCount IOMemoryDescriptor::writeBytes
1909 (IOByteCount inoffset, const void *bytes, IOByteCount length)
1910 {
1911 addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
1912 IOByteCount remaining;
1913 IOByteCount offset = inoffset;
1914
1915 // Assert that this entire I/O is withing the available range
1916 assert(offset <= _length);
1917 assert(offset + length <= _length);
1918
1919 assert( !(kIOMemoryPreparedReadOnly & _flags) );
1920
1921 if ( (kIOMemoryPreparedReadOnly & _flags)
1922 || (offset >= _length)
1923 || ((offset + length) > _length)) {
1924 return 0;
1925 }
1926
1927 assert (!(kIOMemoryRemote & _flags));
1928 if (kIOMemoryRemote & _flags) return (0);
1929
1930 if (kIOMemoryThreadSafe & _flags)
1931 LOCK;
1932
1933 remaining = length = min(length, _length - offset);
1934 while (remaining) { // (process another target segment?)
1935 addr64_t dstAddr64;
1936 IOByteCount dstLen;
1937
1938 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
1939 if (!dstAddr64)
1940 break;
1941
1942 // Clip segment length to remaining
1943 if (dstLen > remaining)
1944 dstLen = remaining;
1945
1946 if (!srcAddr) bzero_phys(dstAddr64, dstLen);
1947 else
1948 {
1949 copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
1950 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
1951 srcAddr += dstLen;
1952 }
1953 offset += dstLen;
1954 remaining -= dstLen;
1955 }
1956
1957 if (kIOMemoryThreadSafe & _flags)
1958 UNLOCK;
1959
1960 assert(!remaining);
1961
1962 if (!srcAddr) performOperation(kIOMemoryIncoherentIOFlush, inoffset, length);
1963
1964 return length - remaining;
1965 }
1966
1967 #ifndef __LP64__
1968 void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
1969 {
1970 panic("IOGMD::setPosition deprecated");
1971 }
1972 #endif /* !__LP64__ */
1973
1974 static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
1975
1976 uint64_t
1977 IOGeneralMemoryDescriptor::getPreparationID( void )
1978 {
1979 ioGMDData *dataP;
1980
1981 if (!_wireCount)
1982 return (kIOPreparationIDUnprepared);
1983
1984 if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
1985 || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64))
1986 {
1987 IOMemoryDescriptor::setPreparationID();
1988 return (IOMemoryDescriptor::getPreparationID());
1989 }
1990
1991 if (!_memoryEntries || !(dataP = getDataP(_memoryEntries)))
1992 return (kIOPreparationIDUnprepared);
1993
1994 if (kIOPreparationIDUnprepared == dataP->fPreparationID)
1995 {
1996 dataP->fPreparationID = OSIncrementAtomic64(&gIOMDPreparationID);
1997 }
1998 return (dataP->fPreparationID);
1999 }
2000
2001 IOMemoryDescriptorReserved * IOMemoryDescriptor::getKernelReserved( void )
2002 {
2003 if (!reserved)
2004 {
2005 reserved = IONew(IOMemoryDescriptorReserved, 1);
2006 if (reserved)
2007 bzero(reserved, sizeof(IOMemoryDescriptorReserved));
2008 }
2009 return (reserved);
2010 }
2011
2012 void IOMemoryDescriptor::setPreparationID( void )
2013 {
2014 if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID))
2015 {
2016 reserved->preparationID = OSIncrementAtomic64(&gIOMDPreparationID);
2017 }
2018 }
2019
2020 uint64_t IOMemoryDescriptor::getPreparationID( void )
2021 {
2022 if (reserved)
2023 return (reserved->preparationID);
2024 else
2025 return (kIOPreparationIDUnsupported);
2026 }
2027
2028 void IOMemoryDescriptor::setVMTags(vm_tag_t kernelTag, vm_tag_t userTag)
2029 {
2030 _kernelTag = kernelTag;
2031 _userTag = userTag;
2032 }
2033
2034 vm_tag_t IOMemoryDescriptor::getVMTag(vm_map_t map)
2035 {
2036 if (vm_kernel_map_is_kernel(map))
2037 {
2038 if (VM_KERN_MEMORY_NONE != _kernelTag) return (_kernelTag);
2039 }
2040 else
2041 {
2042 if (VM_KERN_MEMORY_NONE != _userTag) return (_userTag);
2043 }
2044 return (IOMemoryTag(map));
2045 }
2046
2047 IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
2048 {
2049 IOReturn err = kIOReturnSuccess;
2050 DMACommandOps params;
2051 IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
2052 ioGMDData *dataP;
2053
2054 params = (op & ~kIOMDDMACommandOperationMask & op);
2055 op &= kIOMDDMACommandOperationMask;
2056
2057 if (kIOMDDMAMap == op)
2058 {
2059 if (dataSize < sizeof(IOMDDMAMapArgs))
2060 return kIOReturnUnderrun;
2061
2062 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2063
2064 if (!_memoryEntries
2065 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
2066
2067 if (_memoryEntries && data->fMapper)
2068 {
2069 bool remap, keepMap;
2070 dataP = getDataP(_memoryEntries);
2071
2072 if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
2073 if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) dataP->fDMAMapAlignment = data->fMapSpec.alignment;
2074
2075 keepMap = (data->fMapper == gIOSystemMapper);
2076 keepMap &= ((data->fOffset == 0) && (data->fLength == _length));
2077
2078 remap = (!keepMap);
2079 remap |= (dataP->fDMAMapNumAddressBits < 64)
2080 && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
2081 remap |= (dataP->fDMAMapAlignment > page_size);
2082
2083 if (remap || !dataP->fMappedBaseValid)
2084 {
2085 // if (dataP->fMappedBaseValid) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params);
2086 err = md->dmaMap(data->fMapper, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
2087 if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBaseValid)
2088 {
2089 dataP->fMappedBase = data->fAlloc;
2090 dataP->fMappedBaseValid = true;
2091 dataP->fMappedLength = data->fAllocLength;
2092 data->fAllocLength = 0; // IOMD owns the alloc now
2093 }
2094 }
2095 else
2096 {
2097 data->fAlloc = dataP->fMappedBase;
2098 data->fAllocLength = 0; // give out IOMD map
2099 md->dmaMapRecord(data->fMapper, data->fCommand, dataP->fMappedLength);
2100 }
2101 data->fMapContig = !dataP->fDiscontig;
2102 }
2103 return (err);
2104 }
2105 if (kIOMDDMAUnmap == op)
2106 {
2107 if (dataSize < sizeof(IOMDDMAMapArgs))
2108 return kIOReturnUnderrun;
2109 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2110
2111 err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
2112
2113 return kIOReturnSuccess;
2114 }
2115
2116 if (kIOMDAddDMAMapSpec == op)
2117 {
2118 if (dataSize < sizeof(IODMAMapSpecification))
2119 return kIOReturnUnderrun;
2120
2121 IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
2122
2123 if (!_memoryEntries
2124 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
2125
2126 if (_memoryEntries)
2127 {
2128 dataP = getDataP(_memoryEntries);
2129 if (data->numAddressBits < dataP->fDMAMapNumAddressBits)
2130 dataP->fDMAMapNumAddressBits = data->numAddressBits;
2131 if (data->alignment > dataP->fDMAMapAlignment)
2132 dataP->fDMAMapAlignment = data->alignment;
2133 }
2134 return kIOReturnSuccess;
2135 }
2136
2137 if (kIOMDGetCharacteristics == op) {
2138
2139 if (dataSize < sizeof(IOMDDMACharacteristics))
2140 return kIOReturnUnderrun;
2141
2142 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
2143 data->fLength = _length;
2144 data->fSGCount = _rangesCount;
2145 data->fPages = _pages;
2146 data->fDirection = getDirection();
2147 if (!_wireCount)
2148 data->fIsPrepared = false;
2149 else {
2150 data->fIsPrepared = true;
2151 data->fHighestPage = _highestPage;
2152 if (_memoryEntries)
2153 {
2154 dataP = getDataP(_memoryEntries);
2155 ioPLBlock *ioplList = getIOPLList(dataP);
2156 UInt count = getNumIOPL(_memoryEntries, dataP);
2157 if (count == 1)
2158 data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
2159 }
2160 }
2161
2162 return kIOReturnSuccess;
2163 }
2164
2165 else if (kIOMDDMAActive == op)
2166 {
2167 if (params)
2168 {
2169 int16_t prior;
2170 prior = OSAddAtomic16(1, &md->_dmaReferences);
2171 if (!prior) md->_mapName = NULL;
2172 }
2173 else
2174 {
2175 if (md->_dmaReferences) OSAddAtomic16(-1, &md->_dmaReferences);
2176 else panic("_dmaReferences underflow");
2177 }
2178 }
2179 else if (kIOMDWalkSegments != op)
2180 return kIOReturnBadArgument;
2181
2182 // Get the next segment
2183 struct InternalState {
2184 IOMDDMAWalkSegmentArgs fIO;
2185 UInt fOffset2Index;
2186 UInt fIndex;
2187 UInt fNextOffset;
2188 } *isP;
2189
2190 // Find the next segment
2191 if (dataSize < sizeof(*isP))
2192 return kIOReturnUnderrun;
2193
2194 isP = (InternalState *) vData;
2195 UInt offset = isP->fIO.fOffset;
2196 bool mapped = isP->fIO.fMapped;
2197
2198 if (mapped && (kIOMemoryRemote & _flags)) return (kIOReturnNotAttached);
2199
2200 if (IOMapper::gSystem && mapped
2201 && (!(kIOMemoryHostOnly & _flags))
2202 && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBaseValid))
2203 // && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBaseValid))
2204 {
2205 if (!_memoryEntries
2206 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
2207
2208 dataP = getDataP(_memoryEntries);
2209 if (dataP->fMapper)
2210 {
2211 IODMAMapSpecification mapSpec;
2212 bzero(&mapSpec, sizeof(mapSpec));
2213 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
2214 mapSpec.alignment = dataP->fDMAMapAlignment;
2215 err = md->dmaMap(dataP->fMapper, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength);
2216 if (kIOReturnSuccess != err) return (err);
2217 dataP->fMappedBaseValid = true;
2218 }
2219 }
2220
2221 if (offset >= _length)
2222 return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
2223
2224 // Validate the previous offset
2225 UInt ind, off2Ind = isP->fOffset2Index;
2226 if (!params
2227 && offset
2228 && (offset == isP->fNextOffset || off2Ind <= offset))
2229 ind = isP->fIndex;
2230 else
2231 ind = off2Ind = 0; // Start from beginning
2232
2233 UInt length;
2234 UInt64 address;
2235
2236
2237 if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
2238
2239 // Physical address based memory descriptor
2240 const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
2241
2242 // Find the range after the one that contains the offset
2243 mach_vm_size_t len;
2244 for (len = 0; off2Ind <= offset; ind++) {
2245 len = physP[ind].length;
2246 off2Ind += len;
2247 }
2248
2249 // Calculate length within range and starting address
2250 length = off2Ind - offset;
2251 address = physP[ind - 1].address + len - length;
2252
2253 if (true && mapped && _memoryEntries
2254 && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBaseValid)
2255 {
2256 address = dataP->fMappedBase + offset;
2257 }
2258 else
2259 {
2260 // see how far we can coalesce ranges
2261 while (ind < _rangesCount && address + length == physP[ind].address) {
2262 len = physP[ind].length;
2263 length += len;
2264 off2Ind += len;
2265 ind++;
2266 }
2267 }
2268
2269 // correct contiguous check overshoot
2270 ind--;
2271 off2Ind -= len;
2272 }
2273 #ifndef __LP64__
2274 else if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
2275
2276 // Physical address based memory descriptor
2277 const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
2278
2279 // Find the range after the one that contains the offset
2280 mach_vm_size_t len;
2281 for (len = 0; off2Ind <= offset; ind++) {
2282 len = physP[ind].length;
2283 off2Ind += len;
2284 }
2285
2286 // Calculate length within range and starting address
2287 length = off2Ind - offset;
2288 address = physP[ind - 1].address + len - length;
2289
2290 if (true && mapped && _memoryEntries
2291 && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBaseValid)
2292 {
2293 address = dataP->fMappedBase + offset;
2294 }
2295 else
2296 {
2297 // see how far we can coalesce ranges
2298 while (ind < _rangesCount && address + length == physP[ind].address) {
2299 len = physP[ind].length;
2300 length += len;
2301 off2Ind += len;
2302 ind++;
2303 }
2304 }
2305 // correct contiguous check overshoot
2306 ind--;
2307 off2Ind -= len;
2308 }
2309 #endif /* !__LP64__ */
2310 else do {
2311 if (!_wireCount)
2312 panic("IOGMD: not wired for the IODMACommand");
2313
2314 assert(_memoryEntries);
2315
2316 dataP = getDataP(_memoryEntries);
2317 const ioPLBlock *ioplList = getIOPLList(dataP);
2318 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
2319 upl_page_info_t *pageList = getPageList(dataP);
2320
2321 assert(numIOPLs > 0);
2322
2323 // Scan through iopl info blocks looking for block containing offset
2324 while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset)
2325 ind++;
2326
2327 // Go back to actual range as search goes past it
2328 ioPLBlock ioplInfo = ioplList[ind - 1];
2329 off2Ind = ioplInfo.fIOMDOffset;
2330
2331 if (ind < numIOPLs)
2332 length = ioplList[ind].fIOMDOffset;
2333 else
2334 length = _length;
2335 length -= offset; // Remainder within iopl
2336
2337 // Subtract offset till this iopl in total list
2338 offset -= off2Ind;
2339
2340 // If a mapped address is requested and this is a pre-mapped IOPL
2341 // then just need to compute an offset relative to the mapped base.
2342 if (mapped && dataP->fMappedBaseValid) {
2343 offset += (ioplInfo.fPageOffset & PAGE_MASK);
2344 address = trunc_page_64(dataP->fMappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
2345 continue; // Done leave do/while(false) now
2346 }
2347
2348 // The offset is rebased into the current iopl.
2349 // Now add the iopl 1st page offset.
2350 offset += ioplInfo.fPageOffset;
2351
2352 // For external UPLs the fPageInfo field points directly to
2353 // the upl's upl_page_info_t array.
2354 if (ioplInfo.fFlags & kIOPLExternUPL)
2355 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
2356 else
2357 pageList = &pageList[ioplInfo.fPageInfo];
2358
2359 // Check for direct device non-paged memory
2360 if ( ioplInfo.fFlags & kIOPLOnDevice ) {
2361 address = ptoa_64(pageList->phys_addr) + offset;
2362 continue; // Done leave do/while(false) now
2363 }
2364
2365 // Now we need compute the index into the pageList
2366 UInt pageInd = atop_32(offset);
2367 offset &= PAGE_MASK;
2368
2369 // Compute the starting address of this segment
2370 IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
2371 if (!pageAddr) {
2372 panic("!pageList phys_addr");
2373 }
2374
2375 address = ptoa_64(pageAddr) + offset;
2376
2377 // length is currently set to the length of the remainider of the iopl.
2378 // We need to check that the remainder of the iopl is contiguous.
2379 // This is indicated by pageList[ind].phys_addr being sequential.
2380 IOByteCount contigLength = PAGE_SIZE - offset;
2381 while (contigLength < length
2382 && ++pageAddr == pageList[++pageInd].phys_addr)
2383 {
2384 contigLength += PAGE_SIZE;
2385 }
2386
2387 if (contigLength < length)
2388 length = contigLength;
2389
2390
2391 assert(address);
2392 assert(length);
2393
2394 } while (false);
2395
2396 // Update return values and state
2397 isP->fIO.fIOVMAddr = address;
2398 isP->fIO.fLength = length;
2399 isP->fIndex = ind;
2400 isP->fOffset2Index = off2Ind;
2401 isP->fNextOffset = isP->fIO.fOffset + length;
2402
2403 return kIOReturnSuccess;
2404 }
2405
2406 addr64_t
2407 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
2408 {
2409 IOReturn ret;
2410 mach_vm_address_t address = 0;
2411 mach_vm_size_t length = 0;
2412 IOMapper * mapper = gIOSystemMapper;
2413 IOOptionBits type = _flags & kIOMemoryTypeMask;
2414
2415 if (lengthOfSegment)
2416 *lengthOfSegment = 0;
2417
2418 if (offset >= _length)
2419 return 0;
2420
2421 // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
2422 // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
2423 // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
2424 // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
2425
2426 if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type))
2427 {
2428 unsigned rangesIndex = 0;
2429 Ranges vec = _ranges;
2430 mach_vm_address_t addr;
2431
2432 // Find starting address within the vector of ranges
2433 for (;;) {
2434 getAddrLenForInd(addr, length, type, vec, rangesIndex);
2435 if (offset < length)
2436 break;
2437 offset -= length; // (make offset relative)
2438 rangesIndex++;
2439 }
2440
2441 // Now that we have the starting range,
2442 // lets find the last contiguous range
2443 addr += offset;
2444 length -= offset;
2445
2446 for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) {
2447 mach_vm_address_t newAddr;
2448 mach_vm_size_t newLen;
2449
2450 getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
2451 if (addr + length != newAddr)
2452 break;
2453 length += newLen;
2454 }
2455 if (addr)
2456 address = (IOPhysicalAddress) addr; // Truncate address to 32bit
2457 }
2458 else
2459 {
2460 IOMDDMAWalkSegmentState _state;
2461 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
2462
2463 state->fOffset = offset;
2464 state->fLength = _length - offset;
2465 state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOrRemote);
2466
2467 ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
2468
2469 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret))
2470 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
2471 ret, this, state->fOffset,
2472 state->fIOVMAddr, state->fLength);
2473 if (kIOReturnSuccess == ret)
2474 {
2475 address = state->fIOVMAddr;
2476 length = state->fLength;
2477 }
2478
2479 // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
2480 // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
2481
2482 if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)))
2483 {
2484 if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone))
2485 {
2486 addr64_t origAddr = address;
2487 IOByteCount origLen = length;
2488
2489 address = mapper->mapToPhysicalAddress(origAddr);
2490 length = page_size - (address & (page_size - 1));
2491 while ((length < origLen)
2492 && ((address + length) == mapper->mapToPhysicalAddress(origAddr + length)))
2493 length += page_size;
2494 if (length > origLen)
2495 length = origLen;
2496 }
2497 }
2498 }
2499
2500 if (!address)
2501 length = 0;
2502
2503 if (lengthOfSegment)
2504 *lengthOfSegment = length;
2505
2506 return (address);
2507 }
2508
2509 #ifndef __LP64__
2510 #pragma clang diagnostic push
2511 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2512
2513 addr64_t
2514 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
2515 {
2516 addr64_t address = 0;
2517
2518 if (options & _kIOMemorySourceSegment)
2519 {
2520 address = getSourceSegment(offset, lengthOfSegment);
2521 }
2522 else if (options & kIOMemoryMapperNone)
2523 {
2524 address = getPhysicalSegment64(offset, lengthOfSegment);
2525 }
2526 else
2527 {
2528 address = getPhysicalSegment(offset, lengthOfSegment);
2529 }
2530
2531 return (address);
2532 }
2533 #pragma clang diagnostic pop
2534
2535 addr64_t
2536 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
2537 {
2538 return (getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone));
2539 }
2540
2541 IOPhysicalAddress
2542 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
2543 {
2544 addr64_t address = 0;
2545 IOByteCount length = 0;
2546
2547 address = getPhysicalSegment(offset, lengthOfSegment, 0);
2548
2549 if (lengthOfSegment)
2550 length = *lengthOfSegment;
2551
2552 if ((address + length) > 0x100000000ULL)
2553 {
2554 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
2555 address, (long) length, (getMetaClass())->getClassName());
2556 }
2557
2558 return ((IOPhysicalAddress) address);
2559 }
2560
2561 addr64_t
2562 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
2563 {
2564 IOPhysicalAddress phys32;
2565 IOByteCount length;
2566 addr64_t phys64;
2567 IOMapper * mapper = 0;
2568
2569 phys32 = getPhysicalSegment(offset, lengthOfSegment);
2570 if (!phys32)
2571 return 0;
2572
2573 if (gIOSystemMapper)
2574 mapper = gIOSystemMapper;
2575
2576 if (mapper)
2577 {
2578 IOByteCount origLen;
2579
2580 phys64 = mapper->mapToPhysicalAddress(phys32);
2581 origLen = *lengthOfSegment;
2582 length = page_size - (phys64 & (page_size - 1));
2583 while ((length < origLen)
2584 && ((phys64 + length) == mapper->mapToPhysicalAddress(phys32 + length)))
2585 length += page_size;
2586 if (length > origLen)
2587 length = origLen;
2588
2589 *lengthOfSegment = length;
2590 }
2591 else
2592 phys64 = (addr64_t) phys32;
2593
2594 return phys64;
2595 }
2596
2597 IOPhysicalAddress
2598 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
2599 {
2600 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0));
2601 }
2602
2603 IOPhysicalAddress
2604 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
2605 {
2606 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment));
2607 }
2608
2609 #pragma clang diagnostic push
2610 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
2611
2612 void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
2613 IOByteCount * lengthOfSegment)
2614 {
2615 if (_task == kernel_task)
2616 return (void *) getSourceSegment(offset, lengthOfSegment);
2617 else
2618 panic("IOGMD::getVirtualSegment deprecated");
2619
2620 return 0;
2621 }
2622 #pragma clang diagnostic pop
2623 #endif /* !__LP64__ */
2624
2625 IOReturn
2626 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
2627 {
2628 IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
2629 DMACommandOps params;
2630 IOReturn err;
2631
2632 params = (op & ~kIOMDDMACommandOperationMask & op);
2633 op &= kIOMDDMACommandOperationMask;
2634
2635 if (kIOMDGetCharacteristics == op) {
2636 if (dataSize < sizeof(IOMDDMACharacteristics))
2637 return kIOReturnUnderrun;
2638
2639 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
2640 data->fLength = getLength();
2641 data->fSGCount = 0;
2642 data->fDirection = getDirection();
2643 data->fIsPrepared = true; // Assume prepared - fails safe
2644 }
2645 else if (kIOMDWalkSegments == op) {
2646 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs))
2647 return kIOReturnUnderrun;
2648
2649 IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
2650 IOByteCount offset = (IOByteCount) data->fOffset;
2651
2652 IOPhysicalLength length;
2653 if (data->fMapped && IOMapper::gSystem)
2654 data->fIOVMAddr = md->getPhysicalSegment(offset, &length);
2655 else
2656 data->fIOVMAddr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
2657 data->fLength = length;
2658 }
2659 else if (kIOMDAddDMAMapSpec == op) return kIOReturnUnsupported;
2660 else if (kIOMDDMAMap == op)
2661 {
2662 if (dataSize < sizeof(IOMDDMAMapArgs))
2663 return kIOReturnUnderrun;
2664 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2665
2666 if (params) panic("class %s does not support IODMACommand::kIterateOnly", getMetaClass()->getClassName());
2667
2668 data->fMapContig = true;
2669 err = md->dmaMap(data->fMapper, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
2670
2671 return (err);
2672 }
2673 else if (kIOMDDMAUnmap == op)
2674 {
2675 if (dataSize < sizeof(IOMDDMAMapArgs))
2676 return kIOReturnUnderrun;
2677 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2678
2679 err = md->dmaUnmap(data->fMapper, data->fCommand, data->fOffset, data->fAlloc, data->fAllocLength);
2680
2681 return (kIOReturnSuccess);
2682 }
2683 else return kIOReturnBadArgument;
2684
2685 return kIOReturnSuccess;
2686 }
2687
2688 IOReturn
2689 IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
2690 IOOptionBits * oldState )
2691 {
2692 IOReturn err = kIOReturnSuccess;
2693
2694 vm_purgable_t control;
2695 int state;
2696
2697 assert (!(kIOMemoryRemote & _flags));
2698 if (kIOMemoryRemote & _flags) return (kIOReturnNotAttached);
2699
2700 if (_memRef)
2701 {
2702 err = super::setPurgeable(newState, oldState);
2703 }
2704 else
2705 {
2706 if (kIOMemoryThreadSafe & _flags)
2707 LOCK;
2708 do
2709 {
2710 // Find the appropriate vm_map for the given task
2711 vm_map_t curMap;
2712 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
2713 {
2714 err = kIOReturnNotReady;
2715 break;
2716 }
2717 else if (!_task)
2718 {
2719 err = kIOReturnUnsupported;
2720 break;
2721 }
2722 else
2723 {
2724 curMap = get_task_map(_task);
2725 if (NULL == curMap)
2726 {
2727 err = KERN_INVALID_ARGUMENT;
2728 break;
2729 }
2730 }
2731
2732 // can only do one range
2733 Ranges vec = _ranges;
2734 IOOptionBits type = _flags & kIOMemoryTypeMask;
2735 mach_vm_address_t addr;
2736 mach_vm_size_t len;
2737 getAddrLenForInd(addr, len, type, vec, 0);
2738
2739 err = purgeableControlBits(newState, &control, &state);
2740 if (kIOReturnSuccess != err)
2741 break;
2742 err = vm_map_purgable_control(curMap, addr, control, &state);
2743 if (oldState)
2744 {
2745 if (kIOReturnSuccess == err)
2746 {
2747 err = purgeableStateBits(&state);
2748 *oldState = state;
2749 }
2750 }
2751 }
2752 while (false);
2753 if (kIOMemoryThreadSafe & _flags)
2754 UNLOCK;
2755 }
2756
2757 return (err);
2758 }
2759
2760 IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
2761 IOOptionBits * oldState )
2762 {
2763 IOReturn err = kIOReturnNotReady;
2764
2765 if (kIOMemoryThreadSafe & _flags) LOCK;
2766 if (_memRef) err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState);
2767 if (kIOMemoryThreadSafe & _flags) UNLOCK;
2768
2769 return (err);
2770 }
2771
2772 IOReturn IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
2773 IOByteCount * dirtyPageCount )
2774 {
2775 IOReturn err = kIOReturnNotReady;
2776
2777 assert (!(kIOMemoryRemote & _flags));
2778 if (kIOMemoryRemote & _flags) return (kIOReturnNotAttached);
2779
2780 if (kIOMemoryThreadSafe & _flags) LOCK;
2781 if (_memRef) err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount);
2782 else
2783 {
2784 IOMultiMemoryDescriptor * mmd;
2785 IOSubMemoryDescriptor * smd;
2786 if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this)))
2787 {
2788 err = smd->getPageCounts(residentPageCount, dirtyPageCount);
2789 }
2790 else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this)))
2791 {
2792 err = mmd->getPageCounts(residentPageCount, dirtyPageCount);
2793 }
2794 }
2795 if (kIOMemoryThreadSafe & _flags) UNLOCK;
2796
2797 return (err);
2798 }
2799
2800
2801 #if defined(__arm__) || defined(__arm64__)
2802 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
2803 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *res);
2804 #else /* defined(__arm__) || defined(__arm64__) */
2805 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
2806 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
2807 #endif /* defined(__arm__) || defined(__arm64__) */
2808
2809 static void SetEncryptOp(addr64_t pa, unsigned int count)
2810 {
2811 ppnum_t page, end;
2812
2813 page = atop_64(round_page_64(pa));
2814 end = atop_64(trunc_page_64(pa + count));
2815 for (; page < end; page++)
2816 {
2817 pmap_clear_noencrypt(page);
2818 }
2819 }
2820
2821 static void ClearEncryptOp(addr64_t pa, unsigned int count)
2822 {
2823 ppnum_t page, end;
2824
2825 page = atop_64(round_page_64(pa));
2826 end = atop_64(trunc_page_64(pa + count));
2827 for (; page < end; page++)
2828 {
2829 pmap_set_noencrypt(page);
2830 }
2831 }
2832
2833 IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options,
2834 IOByteCount offset, IOByteCount length )
2835 {
2836 IOByteCount remaining;
2837 unsigned int res;
2838 void (*func)(addr64_t pa, unsigned int count) = 0;
2839 #if defined(__arm__) || defined(__arm64__)
2840 void (*func_ext)(addr64_t pa, unsigned int count, unsigned int remaining, unsigned int *result) = 0;
2841 #endif
2842
2843 assert (!(kIOMemoryRemote & _flags));
2844 if (kIOMemoryRemote & _flags) return (kIOReturnNotAttached);
2845
2846 switch (options)
2847 {
2848 case kIOMemoryIncoherentIOFlush:
2849 #if defined(__arm__) || defined(__arm64__)
2850 func_ext = &dcache_incoherent_io_flush64;
2851 #if __ARM_COHERENT_IO__
2852 func_ext(0, 0, 0, &res);
2853 return kIOReturnSuccess;
2854 #else /* __ARM_COHERENT_IO__ */
2855 break;
2856 #endif /* __ARM_COHERENT_IO__ */
2857 #else /* defined(__arm__) || defined(__arm64__) */
2858 func = &dcache_incoherent_io_flush64;
2859 break;
2860 #endif /* defined(__arm__) || defined(__arm64__) */
2861 case kIOMemoryIncoherentIOStore:
2862 #if defined(__arm__) || defined(__arm64__)
2863 func_ext = &dcache_incoherent_io_store64;
2864 #if __ARM_COHERENT_IO__
2865 func_ext(0, 0, 0, &res);
2866 return kIOReturnSuccess;
2867 #else /* __ARM_COHERENT_IO__ */
2868 break;
2869 #endif /* __ARM_COHERENT_IO__ */
2870 #else /* defined(__arm__) || defined(__arm64__) */
2871 func = &dcache_incoherent_io_store64;
2872 break;
2873 #endif /* defined(__arm__) || defined(__arm64__) */
2874
2875 case kIOMemorySetEncrypted:
2876 func = &SetEncryptOp;
2877 break;
2878 case kIOMemoryClearEncrypted:
2879 func = &ClearEncryptOp;
2880 break;
2881 }
2882
2883 #if defined(__arm__) || defined(__arm64__)
2884 if ((func == 0) && (func_ext == 0))
2885 return (kIOReturnUnsupported);
2886 #else /* defined(__arm__) || defined(__arm64__) */
2887 if (!func)
2888 return (kIOReturnUnsupported);
2889 #endif /* defined(__arm__) || defined(__arm64__) */
2890
2891 if (kIOMemoryThreadSafe & _flags)
2892 LOCK;
2893
2894 res = 0x0UL;
2895 remaining = length = min(length, getLength() - offset);
2896 while (remaining)
2897 // (process another target segment?)
2898 {
2899 addr64_t dstAddr64;
2900 IOByteCount dstLen;
2901
2902 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
2903 if (!dstAddr64)
2904 break;
2905
2906 // Clip segment length to remaining
2907 if (dstLen > remaining)
2908 dstLen = remaining;
2909
2910 #if defined(__arm__) || defined(__arm64__)
2911 if (func)
2912 (*func)(dstAddr64, dstLen);
2913 if (func_ext) {
2914 (*func_ext)(dstAddr64, dstLen, remaining, &res);
2915 if (res != 0x0UL) {
2916 remaining = 0;
2917 break;
2918 }
2919 }
2920 #else /* defined(__arm__) || defined(__arm64__) */
2921 (*func)(dstAddr64, dstLen);
2922 #endif /* defined(__arm__) || defined(__arm64__) */
2923
2924 offset += dstLen;
2925 remaining -= dstLen;
2926 }
2927
2928 if (kIOMemoryThreadSafe & _flags)
2929 UNLOCK;
2930
2931 return (remaining ? kIOReturnUnderrun : kIOReturnSuccess);
2932 }
2933
2934 /*
2935 *
2936 */
2937
2938 #if defined(__i386__) || defined(__x86_64__)
2939
2940 #define io_kernel_static_start vm_kernel_stext
2941 #define io_kernel_static_end vm_kernel_etext
2942
2943 #elif defined(__arm__) || defined(__arm64__)
2944
2945 extern vm_offset_t static_memory_end;
2946
2947 #if defined(__arm64__)
2948 #define io_kernel_static_start vm_kext_base
2949 #else /* defined(__arm64__) */
2950 #define io_kernel_static_start vm_kernel_stext
2951 #endif /* defined(__arm64__) */
2952
2953 #define io_kernel_static_end static_memory_end
2954
2955 #else
2956 #error io_kernel_static_end is undefined for this architecture
2957 #endif
2958
2959 static kern_return_t
2960 io_get_kernel_static_upl(
2961 vm_map_t /* map */,
2962 uintptr_t offset,
2963 upl_size_t *upl_size,
2964 upl_t *upl,
2965 upl_page_info_array_t page_list,
2966 unsigned int *count,
2967 ppnum_t *highest_page)
2968 {
2969 unsigned int pageCount, page;
2970 ppnum_t phys;
2971 ppnum_t highestPage = 0;
2972
2973 pageCount = atop_32(*upl_size);
2974 if (pageCount > *count)
2975 pageCount = *count;
2976
2977 *upl = NULL;
2978
2979 for (page = 0; page < pageCount; page++)
2980 {
2981 phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
2982 if (!phys)
2983 break;
2984 page_list[page].phys_addr = phys;
2985 page_list[page].free_when_done = 0;
2986 page_list[page].absent = 0;
2987 page_list[page].dirty = 0;
2988 page_list[page].precious = 0;
2989 page_list[page].device = 0;
2990 if (phys > highestPage)
2991 highestPage = phys;
2992 }
2993
2994 *highest_page = highestPage;
2995
2996 return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError);
2997 }
2998
2999 IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
3000 {
3001 IOOptionBits type = _flags & kIOMemoryTypeMask;
3002 IOReturn error = kIOReturnSuccess;
3003 ioGMDData *dataP;
3004 upl_page_info_array_t pageInfo;
3005 ppnum_t mapBase;
3006 vm_tag_t tag = VM_KERN_MEMORY_NONE;
3007
3008 assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
3009
3010 if ((kIODirectionOutIn & forDirection) == kIODirectionNone)
3011 forDirection = (IODirection) (forDirection | getDirection());
3012
3013 dataP = getDataP(_memoryEntries);
3014 upl_control_flags_t uplFlags; // This Mem Desc's default flags for upl creation
3015 switch (kIODirectionOutIn & forDirection)
3016 {
3017 case kIODirectionOut:
3018 // Pages do not need to be marked as dirty on commit
3019 uplFlags = UPL_COPYOUT_FROM;
3020 dataP->fDMAAccess = kIODMAMapReadAccess;
3021 break;
3022
3023 case kIODirectionIn:
3024 dataP->fDMAAccess = kIODMAMapWriteAccess;
3025 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
3026 break;
3027
3028 default:
3029 dataP->fDMAAccess = kIODMAMapReadAccess | kIODMAMapWriteAccess;
3030 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
3031 break;
3032 }
3033
3034 if (_wireCount)
3035 {
3036 if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags))
3037 {
3038 OSReportWithBacktrace("IOMemoryDescriptor 0x%lx prepared read only", VM_KERNEL_ADDRPERM(this));
3039 error = kIOReturnNotWritable;
3040 }
3041 }
3042 else
3043 {
3044 IOMapper *mapper;
3045
3046 mapper = dataP->fMapper;
3047 dataP->fMappedBaseValid = dataP->fMappedBase = 0;
3048
3049 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
3050 tag = _kernelTag;
3051 if (VM_KERN_MEMORY_NONE == tag) tag = IOMemoryTag(kernel_map);
3052
3053 if (kIODirectionPrepareToPhys32 & forDirection)
3054 {
3055 if (!mapper) uplFlags |= UPL_NEED_32BIT_ADDR;
3056 if (dataP->fDMAMapNumAddressBits > 32) dataP->fDMAMapNumAddressBits = 32;
3057 }
3058 if (kIODirectionPrepareNoFault & forDirection) uplFlags |= UPL_REQUEST_NO_FAULT;
3059 if (kIODirectionPrepareNoZeroFill & forDirection) uplFlags |= UPL_NOZEROFILLIO;
3060 if (kIODirectionPrepareNonCoherent & forDirection) uplFlags |= UPL_REQUEST_FORCE_COHERENCY;
3061
3062 mapBase = 0;
3063
3064 // Note that appendBytes(NULL) zeros the data up to the desired length
3065 // and the length parameter is an unsigned int
3066 size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t);
3067 if (uplPageSize > ((unsigned int)uplPageSize)) return (kIOReturnNoMemory);
3068 if (!_memoryEntries->appendBytes(0, uplPageSize)) return (kIOReturnNoMemory);
3069 dataP = 0;
3070
3071 // Find the appropriate vm_map for the given task
3072 vm_map_t curMap;
3073 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) curMap = 0;
3074 else curMap = get_task_map(_task);
3075
3076 // Iterate over the vector of virtual ranges
3077 Ranges vec = _ranges;
3078 unsigned int pageIndex = 0;
3079 IOByteCount mdOffset = 0;
3080 ppnum_t highestPage = 0;
3081
3082 IOMemoryEntry * memRefEntry = 0;
3083 if (_memRef) memRefEntry = &_memRef->entries[0];
3084
3085 for (UInt range = 0; range < _rangesCount; range++) {
3086 ioPLBlock iopl;
3087 mach_vm_address_t startPage;
3088 mach_vm_size_t numBytes;
3089 ppnum_t highPage = 0;
3090
3091 // Get the startPage address and length of vec[range]
3092 getAddrLenForInd(startPage, numBytes, type, vec, range);
3093 iopl.fPageOffset = startPage & PAGE_MASK;
3094 numBytes += iopl.fPageOffset;
3095 startPage = trunc_page_64(startPage);
3096
3097 if (mapper)
3098 iopl.fMappedPage = mapBase + pageIndex;
3099 else
3100 iopl.fMappedPage = 0;
3101
3102 // Iterate over the current range, creating UPLs
3103 while (numBytes) {
3104 vm_address_t kernelStart = (vm_address_t) startPage;
3105 vm_map_t theMap;
3106 if (curMap) theMap = curMap;
3107 else if (_memRef)
3108 {
3109 theMap = NULL;
3110 }
3111 else
3112 {
3113 assert(_task == kernel_task);
3114 theMap = IOPageableMapForAddress(kernelStart);
3115 }
3116
3117 // ioplFlags is an in/out parameter
3118 upl_control_flags_t ioplFlags = uplFlags;
3119 dataP = getDataP(_memoryEntries);
3120 pageInfo = getPageList(dataP);
3121 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
3122
3123 mach_vm_size_t _ioplSize = round_page(numBytes);
3124 upl_size_t ioplSize = (_ioplSize <= MAX_UPL_SIZE_BYTES) ? _ioplSize : MAX_UPL_SIZE_BYTES;
3125 unsigned int numPageInfo = atop_32(ioplSize);
3126
3127 if ((theMap == kernel_map)
3128 && (kernelStart >= io_kernel_static_start)
3129 && (kernelStart < io_kernel_static_end)) {
3130 error = io_get_kernel_static_upl(theMap,
3131 kernelStart,
3132 &ioplSize,
3133 &iopl.fIOPL,
3134 baseInfo,
3135 &numPageInfo,
3136 &highPage);
3137 }
3138 else if (_memRef) {
3139 memory_object_offset_t entryOffset;
3140
3141 entryOffset = mdOffset;
3142 entryOffset = (entryOffset - iopl.fPageOffset - memRefEntry->offset);
3143 if (entryOffset >= memRefEntry->size) {
3144 memRefEntry++;
3145 if (memRefEntry >= &_memRef->entries[_memRef->count]) panic("memRefEntry");
3146 entryOffset = 0;
3147 }
3148 if (ioplSize > (memRefEntry->size - entryOffset)) ioplSize = (memRefEntry->size - entryOffset);
3149 error = memory_object_iopl_request(memRefEntry->entry,
3150 entryOffset,
3151 &ioplSize,
3152 &iopl.fIOPL,
3153 baseInfo,
3154 &numPageInfo,
3155 &ioplFlags,
3156 tag);
3157 }
3158 else {
3159 assert(theMap);
3160 error = vm_map_create_upl(theMap,
3161 startPage,
3162 (upl_size_t*)&ioplSize,
3163 &iopl.fIOPL,
3164 baseInfo,
3165 &numPageInfo,
3166 &ioplFlags,
3167 tag);
3168 }
3169
3170 if (error != KERN_SUCCESS) goto abortExit;
3171
3172 assert(ioplSize);
3173
3174 if (iopl.fIOPL)
3175 highPage = upl_get_highest_page(iopl.fIOPL);
3176 if (highPage > highestPage)
3177 highestPage = highPage;
3178
3179 if (baseInfo->device) {
3180 numPageInfo = 1;
3181 iopl.fFlags = kIOPLOnDevice;
3182 }
3183 else {
3184 iopl.fFlags = 0;
3185 }
3186
3187 iopl.fIOMDOffset = mdOffset;
3188 iopl.fPageInfo = pageIndex;
3189 if (mapper && pageIndex && (page_mask & (mdOffset + iopl.fPageOffset))) dataP->fDiscontig = true;
3190
3191 if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
3192 // Clean up partial created and unsaved iopl
3193 if (iopl.fIOPL) {
3194 upl_abort(iopl.fIOPL, 0);
3195 upl_deallocate(iopl.fIOPL);
3196 }
3197 goto abortExit;
3198 }
3199 dataP = 0;
3200
3201 // Check for a multiple iopl's in one virtual range
3202 pageIndex += numPageInfo;
3203 mdOffset -= iopl.fPageOffset;
3204 if (ioplSize < numBytes) {
3205 numBytes -= ioplSize;
3206 startPage += ioplSize;
3207 mdOffset += ioplSize;
3208 iopl.fPageOffset = 0;
3209 if (mapper) iopl.fMappedPage = mapBase + pageIndex;
3210 }
3211 else {
3212 mdOffset += numBytes;
3213 break;
3214 }
3215 }
3216 }
3217
3218 _highestPage = highestPage;
3219
3220 if (UPL_COPYOUT_FROM & uplFlags) _flags |= kIOMemoryPreparedReadOnly;
3221 }
3222
3223 #if IOTRACKING
3224 if (!(_flags & kIOMemoryAutoPrepare) && (kIOReturnSuccess == error))
3225 {
3226 dataP = getDataP(_memoryEntries);
3227 if (!dataP->fWireTracking.link.next)
3228 {
3229 IOTrackingAdd(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages), false, tag);
3230 }
3231 }
3232 #endif /* IOTRACKING */
3233
3234 return (error);
3235
3236 abortExit:
3237 {
3238 dataP = getDataP(_memoryEntries);
3239 UInt done = getNumIOPL(_memoryEntries, dataP);
3240 ioPLBlock *ioplList = getIOPLList(dataP);
3241
3242 for (UInt range = 0; range < done; range++)
3243 {
3244 if (ioplList[range].fIOPL) {
3245 upl_abort(ioplList[range].fIOPL, 0);
3246 upl_deallocate(ioplList[range].fIOPL);
3247 }
3248 }
3249 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
3250 }
3251
3252 if (error == KERN_FAILURE)
3253 error = kIOReturnCannotWire;
3254 else if (error == KERN_MEMORY_ERROR)
3255 error = kIOReturnNoResources;
3256
3257 return error;
3258 }
3259
3260 bool IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
3261 {
3262 ioGMDData * dataP;
3263 unsigned dataSize = size;
3264
3265 if (!_memoryEntries) {
3266 _memoryEntries = OSData::withCapacity(dataSize);
3267 if (!_memoryEntries)
3268 return false;
3269 }
3270 else if (!_memoryEntries->initWithCapacity(dataSize))
3271 return false;
3272
3273 _memoryEntries->appendBytes(0, computeDataSize(0, 0));
3274 dataP = getDataP(_memoryEntries);
3275
3276 if (mapper == kIOMapperWaitSystem) {
3277 IOMapper::checkForSystemMapper();
3278 mapper = IOMapper::gSystem;
3279 }
3280 dataP->fMapper = mapper;
3281 dataP->fPageCnt = 0;
3282 dataP->fMappedBase = 0;
3283 dataP->fDMAMapNumAddressBits = 64;
3284 dataP->fDMAMapAlignment = 0;
3285 dataP->fPreparationID = kIOPreparationIDUnprepared;
3286 dataP->fDiscontig = false;
3287 dataP->fCompletionError = false;
3288 dataP->fMappedBaseValid = false;
3289
3290 return (true);
3291 }
3292
3293 IOReturn IOMemoryDescriptor::dmaMap(
3294 IOMapper * mapper,
3295 IODMACommand * command,
3296 const IODMAMapSpecification * mapSpec,
3297 uint64_t offset,
3298 uint64_t length,
3299 uint64_t * mapAddress,
3300 uint64_t * mapLength)
3301 {
3302 IOReturn err;
3303 uint32_t mapOptions;
3304
3305 mapOptions = 0;
3306 mapOptions |= kIODMAMapReadAccess;
3307 if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess;
3308
3309 err = mapper->iovmMapMemory(this, offset, length, mapOptions,
3310 mapSpec, command, NULL, mapAddress, mapLength);
3311
3312 if (kIOReturnSuccess == err) dmaMapRecord(mapper, command, *mapLength);
3313
3314 return (err);
3315 }
3316
3317 void IOMemoryDescriptor::dmaMapRecord(
3318 IOMapper * mapper,
3319 IODMACommand * command,
3320 uint64_t mapLength)
3321 {
3322 kern_allocation_name_t alloc;
3323 int16_t prior;
3324
3325 if ((alloc = mapper->fAllocName) /* && mapper != IOMapper::gSystem */)
3326 {
3327 kern_allocation_update_size(mapper->fAllocName, mapLength);
3328 }
3329
3330 if (!command) return;
3331 prior = OSAddAtomic16(1, &_dmaReferences);
3332 if (!prior)
3333 {
3334 if (alloc && (VM_KERN_MEMORY_NONE != _kernelTag))
3335 {
3336 _mapName = alloc;
3337 mapLength = _length;
3338 kern_allocation_update_subtotal(alloc, _kernelTag, mapLength);
3339 }
3340 else _mapName = NULL;
3341 }
3342 }
3343
3344 IOReturn IOMemoryDescriptor::dmaUnmap(
3345 IOMapper * mapper,
3346 IODMACommand * command,
3347 uint64_t offset,
3348 uint64_t mapAddress,
3349 uint64_t mapLength)
3350 {
3351 IOReturn ret;
3352 kern_allocation_name_t alloc;
3353 kern_allocation_name_t mapName;
3354 int16_t prior;
3355
3356 mapName = 0;
3357 prior = 0;
3358 if (command)
3359 {
3360 mapName = _mapName;
3361 if (_dmaReferences) prior = OSAddAtomic16(-1, &_dmaReferences);
3362 else panic("_dmaReferences underflow");
3363 }
3364
3365 if (!mapLength) return (kIOReturnSuccess);
3366
3367 ret = mapper->iovmUnmapMemory(this, command, mapAddress, mapLength);
3368
3369 if ((alloc = mapper->fAllocName))
3370 {
3371 kern_allocation_update_size(alloc, -mapLength);
3372 if ((1 == prior) && mapName && (VM_KERN_MEMORY_NONE != _kernelTag))
3373 {
3374 mapLength = _length;
3375 kern_allocation_update_subtotal(mapName, _kernelTag, -mapLength);
3376 }
3377 }
3378
3379 return (ret);
3380 }
3381
3382 IOReturn IOGeneralMemoryDescriptor::dmaMap(
3383 IOMapper * mapper,
3384 IODMACommand * command,
3385 const IODMAMapSpecification * mapSpec,
3386 uint64_t offset,
3387 uint64_t length,
3388 uint64_t * mapAddress,
3389 uint64_t * mapLength)
3390 {
3391 IOReturn err = kIOReturnSuccess;
3392 ioGMDData * dataP;
3393 IOOptionBits type = _flags & kIOMemoryTypeMask;
3394
3395 *mapAddress = 0;
3396 if (kIOMemoryHostOnly & _flags) return (kIOReturnSuccess);
3397 if (kIOMemoryRemote & _flags) return (kIOReturnNotAttached);
3398
3399 if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
3400 || offset || (length != _length))
3401 {
3402 err = super::dmaMap(mapper, command, mapSpec, offset, length, mapAddress, mapLength);
3403 }
3404 else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries)))
3405 {
3406 const ioPLBlock * ioplList = getIOPLList(dataP);
3407 upl_page_info_t * pageList;
3408 uint32_t mapOptions = 0;
3409
3410 IODMAMapSpecification mapSpec;
3411 bzero(&mapSpec, sizeof(mapSpec));
3412 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
3413 mapSpec.alignment = dataP->fDMAMapAlignment;
3414
3415 // For external UPLs the fPageInfo field points directly to
3416 // the upl's upl_page_info_t array.
3417 if (ioplList->fFlags & kIOPLExternUPL)
3418 {
3419 pageList = (upl_page_info_t *) ioplList->fPageInfo;
3420 mapOptions |= kIODMAMapPagingPath;
3421 }
3422 else pageList = getPageList(dataP);
3423
3424 if ((_length == ptoa_64(_pages)) && !(page_mask & ioplList->fPageOffset))
3425 {
3426 mapOptions |= kIODMAMapPageListFullyOccupied;
3427 }
3428
3429 assert(dataP->fDMAAccess);
3430 mapOptions |= dataP->fDMAAccess;
3431
3432 // Check for direct device non-paged memory
3433 if (ioplList->fFlags & kIOPLOnDevice) mapOptions |= kIODMAMapPhysicallyContiguous;
3434
3435 IODMAMapPageList dmaPageList =
3436 {
3437 .pageOffset = (uint32_t)(ioplList->fPageOffset & page_mask),
3438 .pageListCount = _pages,
3439 .pageList = &pageList[0]
3440 };
3441 err = mapper->iovmMapMemory(this, offset, length, mapOptions, &mapSpec,
3442 command, &dmaPageList, mapAddress, mapLength);
3443
3444 if (kIOReturnSuccess == err) dmaMapRecord(mapper, command, *mapLength);
3445 }
3446
3447 return (err);
3448 }
3449
3450 /*
3451 * prepare
3452 *
3453 * Prepare the memory for an I/O transfer. This involves paging in
3454 * the memory, if necessary, and wiring it down for the duration of
3455 * the transfer. The complete() method completes the processing of
3456 * the memory after the I/O transfer finishes. This method needn't
3457 * called for non-pageable memory.
3458 */
3459
3460 IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
3461 {
3462 IOReturn error = kIOReturnSuccess;
3463 IOOptionBits type = _flags & kIOMemoryTypeMask;
3464
3465 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
3466 return kIOReturnSuccess;
3467
3468 assert (!(kIOMemoryRemote & _flags));
3469 if (kIOMemoryRemote & _flags) return (kIOReturnNotAttached);
3470
3471 if (_prepareLock) IOLockLock(_prepareLock);
3472
3473 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type)
3474 {
3475 error = wireVirtual(forDirection);
3476 }
3477
3478 if (kIOReturnSuccess == error)
3479 {
3480 if (1 == ++_wireCount)
3481 {
3482 if (kIOMemoryClearEncrypt & _flags)
3483 {
3484 performOperation(kIOMemoryClearEncrypted, 0, _length);
3485 }
3486 }
3487 }
3488
3489 if (_prepareLock) IOLockUnlock(_prepareLock);
3490
3491 return error;
3492 }
3493
3494 /*
3495 * complete
3496 *
3497 * Complete processing of the memory after an I/O transfer finishes.
3498 * This method should not be called unless a prepare was previously
3499 * issued; the prepare() and complete() must occur in pairs, before
3500 * before and after an I/O transfer involving pageable memory.
3501 */
3502
3503 IOReturn IOGeneralMemoryDescriptor::complete(IODirection forDirection)
3504 {
3505 IOOptionBits type = _flags & kIOMemoryTypeMask;
3506 ioGMDData * dataP;
3507
3508 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
3509 return kIOReturnSuccess;
3510
3511 assert (!(kIOMemoryRemote & _flags));
3512 if (kIOMemoryRemote & _flags) return (kIOReturnNotAttached);
3513
3514 if (_prepareLock) IOLockLock(_prepareLock);
3515 do
3516 {
3517 assert(_wireCount);
3518 if (!_wireCount) break;
3519 dataP = getDataP(_memoryEntries);
3520 if (!dataP) break;
3521
3522 if (kIODirectionCompleteWithError & forDirection) dataP->fCompletionError = true;
3523
3524 if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount))
3525 {
3526 performOperation(kIOMemorySetEncrypted, 0, _length);
3527 }
3528
3529 _wireCount--;
3530 if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection))
3531 {
3532 ioPLBlock *ioplList = getIOPLList(dataP);
3533 UInt ind, count = getNumIOPL(_memoryEntries, dataP);
3534
3535 if (_wireCount)
3536 {
3537 // kIODirectionCompleteWithDataValid & forDirection
3538 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type)
3539 {
3540 vm_tag_t tag;
3541 tag = getVMTag(kernel_map);
3542 for (ind = 0; ind < count; ind++)
3543 {
3544 if (ioplList[ind].fIOPL) iopl_valid_data(ioplList[ind].fIOPL, tag);
3545 }
3546 }
3547 }
3548 else
3549 {
3550 if (_dmaReferences) panic("complete() while dma active");
3551
3552 if (dataP->fMappedBaseValid) {
3553 dmaUnmap(dataP->fMapper, NULL, 0, dataP->fMappedBase, dataP->fMappedLength);
3554 dataP->fMappedBaseValid = dataP->fMappedBase = 0;
3555 }
3556 #if IOTRACKING
3557 if (dataP->fWireTracking.link.next) IOTrackingRemove(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages));
3558 #endif /* IOTRACKING */
3559 // Only complete iopls that we created which are for TypeVirtual
3560 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type)
3561 {
3562 for (ind = 0; ind < count; ind++)
3563 if (ioplList[ind].fIOPL) {
3564 if (dataP->fCompletionError)
3565 upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/);
3566 else
3567 upl_commit(ioplList[ind].fIOPL, 0, 0);
3568 upl_deallocate(ioplList[ind].fIOPL);
3569 }
3570 } else if (kIOMemoryTypeUPL == type) {
3571 upl_set_referenced(ioplList[0].fIOPL, false);
3572 }
3573
3574 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
3575
3576 dataP->fPreparationID = kIOPreparationIDUnprepared;
3577 _flags &= ~kIOMemoryPreparedReadOnly;
3578 }
3579 }
3580 }
3581 while (false);
3582
3583 if (_prepareLock) IOLockUnlock(_prepareLock);
3584
3585 return kIOReturnSuccess;
3586 }
3587
3588 IOReturn IOGeneralMemoryDescriptor::doMap(
3589 vm_map_t __addressMap,
3590 IOVirtualAddress * __address,
3591 IOOptionBits options,
3592 IOByteCount __offset,
3593 IOByteCount __length )
3594 {
3595 #ifndef __LP64__
3596 if (!(kIOMap64Bit & options)) panic("IOGeneralMemoryDescriptor::doMap !64bit");
3597 #endif /* !__LP64__ */
3598
3599 kern_return_t err;
3600
3601 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
3602 mach_vm_size_t offset = mapping->fOffset + __offset;
3603 mach_vm_size_t length = mapping->fLength;
3604
3605 IOOptionBits type = _flags & kIOMemoryTypeMask;
3606 Ranges vec = _ranges;
3607
3608 mach_vm_address_t range0Addr = 0;
3609 mach_vm_size_t range0Len = 0;
3610
3611 if ((offset >= _length) || ((offset + length) > _length))
3612 return( kIOReturnBadArgument );
3613
3614 assert (!(kIOMemoryRemote & _flags));
3615 if (kIOMemoryRemote & _flags) return (0);
3616
3617 if (vec.v)
3618 getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
3619
3620 // mapping source == dest? (could be much better)
3621 if (_task
3622 && (mapping->fAddressTask == _task)
3623 && (mapping->fAddressMap == get_task_map(_task))
3624 && (options & kIOMapAnywhere)
3625 && (1 == _rangesCount)
3626 && (0 == offset)
3627 && range0Addr
3628 && (length <= range0Len))
3629 {
3630 mapping->fAddress = range0Addr;
3631 mapping->fOptions |= kIOMapStatic;
3632
3633 return( kIOReturnSuccess );
3634 }
3635
3636 if (!_memRef)
3637 {
3638 IOOptionBits createOptions = 0;
3639 if (!(kIOMapReadOnly & options))
3640 {
3641 createOptions |= kIOMemoryReferenceWrite;
3642 #if DEVELOPMENT || DEBUG
3643 if (kIODirectionOut == (kIODirectionOutIn & _flags))
3644 {
3645 OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction");
3646 }
3647 #endif
3648 }
3649 err = memoryReferenceCreate(createOptions, &_memRef);
3650 if (kIOReturnSuccess != err) return (err);
3651 }
3652
3653 memory_object_t pager;
3654 pager = (memory_object_t) (reserved ? reserved->dp.devicePager : 0);
3655
3656 // <upl_transpose //
3657 if ((kIOMapReference|kIOMapUnique) == ((kIOMapReference|kIOMapUnique) & options))
3658 {
3659 do
3660 {
3661 upl_t redirUPL2;
3662 upl_size_t size;
3663 upl_control_flags_t flags;
3664 unsigned int lock_count;
3665
3666 if (!_memRef || (1 != _memRef->count))
3667 {
3668 err = kIOReturnNotReadable;
3669 break;
3670 }
3671
3672 size = round_page(mapping->fLength);
3673 flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
3674 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
3675
3676 if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2,
3677 NULL, NULL,
3678 &flags, getVMTag(kernel_map)))
3679 redirUPL2 = NULL;
3680
3681 for (lock_count = 0;
3682 IORecursiveLockHaveLock(gIOMemoryLock);
3683 lock_count++) {
3684 UNLOCK;
3685 }
3686 err = upl_transpose(redirUPL2, mapping->fRedirUPL);
3687 for (;
3688 lock_count;
3689 lock_count--) {
3690 LOCK;
3691 }
3692
3693 if (kIOReturnSuccess != err)
3694 {
3695 IOLog("upl_transpose(%x)\n", err);
3696 err = kIOReturnSuccess;
3697 }
3698
3699 if (redirUPL2)
3700 {
3701 upl_commit(redirUPL2, NULL, 0);
3702 upl_deallocate(redirUPL2);
3703 redirUPL2 = 0;
3704 }
3705 {
3706 // swap the memEntries since they now refer to different vm_objects
3707 IOMemoryReference * me = _memRef;
3708 _memRef = mapping->fMemory->_memRef;
3709 mapping->fMemory->_memRef = me;
3710 }
3711 if (pager)
3712 err = populateDevicePager( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
3713 }
3714 while (false);
3715 }
3716 // upl_transpose> //
3717 else
3718 {
3719 err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress);
3720 #if IOTRACKING
3721 if ((err == KERN_SUCCESS) && ((kIOTracking & gIOKitDebug) || _task))
3722 {
3723 // only dram maps in the default on developement case
3724 IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength);
3725 }
3726 #endif /* IOTRACKING */
3727 if ((err == KERN_SUCCESS) && pager)
3728 {
3729 err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, offset, length, options);
3730
3731 if (err != KERN_SUCCESS) doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0);
3732 else if (kIOMapDefaultCache == (options & kIOMapCacheMask))
3733 {
3734 mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
3735 }
3736 }
3737 }
3738
3739 return (err);
3740 }
3741
3742 #if IOTRACKING
3743 IOReturn
3744 IOMemoryMapTracking(IOTrackingUser * tracking, task_t * task,
3745 mach_vm_address_t * address, mach_vm_size_t * size)
3746 {
3747 #define iomap_offsetof(type, field) ((size_t)(&((type *)0)->field))
3748
3749 IOMemoryMap * map = (typeof(map)) (((uintptr_t) tracking) - iomap_offsetof(IOMemoryMap, fTracking));
3750
3751 if (!map->fAddressMap || (map->fAddressMap != get_task_map(map->fAddressTask))) return (kIOReturnNotReady);
3752
3753 *task = map->fAddressTask;
3754 *address = map->fAddress;
3755 *size = map->fLength;
3756
3757 return (kIOReturnSuccess);
3758 }
3759 #endif /* IOTRACKING */
3760
3761 IOReturn IOGeneralMemoryDescriptor::doUnmap(
3762 vm_map_t addressMap,
3763 IOVirtualAddress __address,
3764 IOByteCount __length )
3765 {
3766 return (super::doUnmap(addressMap, __address, __length));
3767 }
3768
3769 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3770
3771 #undef super
3772 #define super OSObject
3773
3774 OSDefineMetaClassAndStructors( IOMemoryMap, OSObject )
3775
3776 OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
3777 OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
3778 OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
3779 OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
3780 OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
3781 OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
3782 OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
3783 OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
3784
3785 /* ex-inline function implementation */
3786 IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
3787 { return( getPhysicalSegment( 0, 0 )); }
3788
3789 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3790
3791 bool IOMemoryMap::init(
3792 task_t intoTask,
3793 mach_vm_address_t toAddress,
3794 IOOptionBits _options,
3795 mach_vm_size_t _offset,
3796 mach_vm_size_t _length )
3797 {
3798 if (!intoTask)
3799 return( false);
3800
3801 if (!super::init())
3802 return(false);
3803
3804 fAddressMap = get_task_map(intoTask);
3805 if (!fAddressMap)
3806 return(false);
3807 vm_map_reference(fAddressMap);
3808
3809 fAddressTask = intoTask;
3810 fOptions = _options;
3811 fLength = _length;
3812 fOffset = _offset;
3813 fAddress = toAddress;
3814
3815 return (true);
3816 }
3817
3818 bool IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
3819 {
3820 if (!_memory)
3821 return(false);
3822
3823 if (!fSuperMap)
3824 {
3825 if( (_offset + fLength) > _memory->getLength())
3826 return( false);
3827 fOffset = _offset;
3828 }
3829
3830 _memory->retain();
3831 if (fMemory)
3832 {
3833 if (fMemory != _memory)
3834 fMemory->removeMapping(this);
3835 fMemory->release();
3836 }
3837 fMemory = _memory;
3838
3839 return( true );
3840 }
3841
3842 IOReturn IOMemoryDescriptor::doMap(
3843 vm_map_t __addressMap,
3844 IOVirtualAddress * __address,
3845 IOOptionBits options,
3846 IOByteCount __offset,
3847 IOByteCount __length )
3848 {
3849 return (kIOReturnUnsupported);
3850 }
3851
3852 IOReturn IOMemoryDescriptor::handleFault(
3853 void * _pager,
3854 mach_vm_size_t sourceOffset,
3855 mach_vm_size_t length)
3856 {
3857 if( kIOMemoryRedirected & _flags)
3858 {
3859 #if DEBUG
3860 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
3861 #endif
3862 do {
3863 SLEEP;
3864 } while( kIOMemoryRedirected & _flags );
3865 }
3866 return (kIOReturnSuccess);
3867 }
3868
3869 IOReturn IOMemoryDescriptor::populateDevicePager(
3870 void * _pager,
3871 vm_map_t addressMap,
3872 mach_vm_address_t address,
3873 mach_vm_size_t sourceOffset,
3874 mach_vm_size_t length,
3875 IOOptionBits options )
3876 {
3877 IOReturn err = kIOReturnSuccess;
3878 memory_object_t pager = (memory_object_t) _pager;
3879 mach_vm_size_t size;
3880 mach_vm_size_t bytes;
3881 mach_vm_size_t page;
3882 mach_vm_size_t pageOffset;
3883 mach_vm_size_t pagerOffset;
3884 IOPhysicalLength segLen, chunk;
3885 addr64_t physAddr;
3886 IOOptionBits type;
3887
3888 type = _flags & kIOMemoryTypeMask;
3889
3890 if (reserved->dp.pagerContig)
3891 {
3892 sourceOffset = 0;
3893 pagerOffset = 0;
3894 }
3895
3896 physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
3897 assert( physAddr );
3898 pageOffset = physAddr - trunc_page_64( physAddr );
3899 pagerOffset = sourceOffset;
3900
3901 size = length + pageOffset;
3902 physAddr -= pageOffset;
3903
3904 segLen += pageOffset;
3905 bytes = size;
3906 do
3907 {
3908 // in the middle of the loop only map whole pages
3909 if( segLen >= bytes) segLen = bytes;
3910 else if (segLen != trunc_page(segLen)) err = kIOReturnVMError;
3911 if (physAddr != trunc_page_64(physAddr)) err = kIOReturnBadArgument;
3912
3913 if (kIOReturnSuccess != err) break;
3914
3915 #if DEBUG || DEVELOPMENT
3916 if ((kIOMemoryTypeUPL != type)
3917 && pmap_has_managed_page(atop_64(physAddr), atop_64(physAddr + segLen - 1)))
3918 {
3919 OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx", physAddr, segLen);
3920 }
3921 #endif /* DEBUG || DEVELOPMENT */
3922
3923 chunk = (reserved->dp.pagerContig ? round_page(segLen) : page_size);
3924 for (page = 0;
3925 (page < segLen) && (KERN_SUCCESS == err);
3926 page += chunk)
3927 {
3928 err = device_pager_populate_object(pager, pagerOffset,
3929 (ppnum_t)(atop_64(physAddr + page)), chunk);
3930 pagerOffset += chunk;
3931 }
3932
3933 assert (KERN_SUCCESS == err);
3934 if (err) break;
3935
3936 // This call to vm_fault causes an early pmap level resolution
3937 // of the mappings created above for kernel mappings, since
3938 // faulting in later can't take place from interrupt level.
3939 if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags))
3940 {
3941 err = vm_fault(addressMap,
3942 (vm_map_offset_t)trunc_page_64(address),
3943 options & kIOMapReadOnly ? VM_PROT_READ : VM_PROT_READ|VM_PROT_WRITE,
3944 FALSE, VM_KERN_MEMORY_NONE,
3945 THREAD_UNINT, NULL,
3946 (vm_map_offset_t)0);
3947
3948 if (KERN_SUCCESS != err) break;
3949 }
3950
3951 sourceOffset += segLen - pageOffset;
3952 address += segLen;
3953 bytes -= segLen;
3954 pageOffset = 0;
3955 }
3956 while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
3957
3958 if (bytes)
3959 err = kIOReturnBadArgument;
3960
3961 return (err);
3962 }
3963
3964 IOReturn IOMemoryDescriptor::doUnmap(
3965 vm_map_t addressMap,
3966 IOVirtualAddress __address,
3967 IOByteCount __length )
3968 {
3969 IOReturn err;
3970 IOMemoryMap * mapping;
3971 mach_vm_address_t address;
3972 mach_vm_size_t length;
3973
3974 if (__length) panic("doUnmap");
3975
3976 mapping = (IOMemoryMap *) __address;
3977 addressMap = mapping->fAddressMap;
3978 address = mapping->fAddress;
3979 length = mapping->fLength;
3980
3981 if (kIOMapOverwrite & mapping->fOptions) err = KERN_SUCCESS;
3982 else
3983 {
3984 if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags))
3985 addressMap = IOPageableMapForAddress( address );
3986 #if DEBUG
3987 if( kIOLogMapping & gIOKitDebug) IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
3988 addressMap, address, length );
3989 #endif
3990 err = mach_vm_deallocate( addressMap, address, length );
3991 }
3992
3993 #if IOTRACKING
3994 IOTrackingRemoveUser(gIOMapTracking, &mapping->fTracking);
3995 #endif /* IOTRACKING */
3996
3997 return (err);
3998 }
3999
4000 IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
4001 {
4002 IOReturn err = kIOReturnSuccess;
4003 IOMemoryMap * mapping = 0;
4004 OSIterator * iter;
4005
4006 LOCK;
4007
4008 if( doRedirect)
4009 _flags |= kIOMemoryRedirected;
4010 else
4011 _flags &= ~kIOMemoryRedirected;
4012
4013 do {
4014 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
4015
4016 memory_object_t pager;
4017
4018 if( reserved)
4019 pager = (memory_object_t) reserved->dp.devicePager;
4020 else
4021 pager = MACH_PORT_NULL;
4022
4023 while( (mapping = (IOMemoryMap *) iter->getNextObject()))
4024 {
4025 mapping->redirect( safeTask, doRedirect );
4026 if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap))
4027 {
4028 err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache );
4029 }
4030 }
4031
4032 iter->release();
4033 }
4034 } while( false );
4035
4036 if (!doRedirect)
4037 {
4038 WAKEUP;
4039 }
4040
4041 UNLOCK;
4042
4043 #ifndef __LP64__
4044 // temporary binary compatibility
4045 IOSubMemoryDescriptor * subMem;
4046 if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
4047 err = subMem->redirect( safeTask, doRedirect );
4048 else
4049 err = kIOReturnSuccess;
4050 #endif /* !__LP64__ */
4051
4052 return( err );
4053 }
4054
4055 IOReturn IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
4056 {
4057 IOReturn err = kIOReturnSuccess;
4058
4059 if( fSuperMap) {
4060 // err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
4061 } else {
4062
4063 LOCK;
4064
4065 do
4066 {
4067 if (!fAddress)
4068 break;
4069 if (!fAddressMap)
4070 break;
4071
4072 if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
4073 && (0 == (fOptions & kIOMapStatic)))
4074 {
4075 IOUnmapPages( fAddressMap, fAddress, fLength );
4076 err = kIOReturnSuccess;
4077 #if DEBUG
4078 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
4079 #endif
4080 }
4081 else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask))
4082 {
4083 IOOptionBits newMode;
4084 newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
4085 IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
4086 }
4087 }
4088 while (false);
4089 UNLOCK;
4090 }
4091
4092 if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
4093 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
4094 && safeTask
4095 && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected))))
4096 fMemory->redirect(safeTask, doRedirect);
4097
4098 return( err );
4099 }
4100
4101 IOReturn IOMemoryMap::unmap( void )
4102 {
4103 IOReturn err;
4104
4105 LOCK;
4106
4107 if( fAddress && fAddressMap && (0 == fSuperMap) && fMemory
4108 && (0 == (kIOMapStatic & fOptions))) {
4109
4110 err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
4111
4112 } else
4113 err = kIOReturnSuccess;
4114
4115 if (fAddressMap)
4116 {
4117 vm_map_deallocate(fAddressMap);
4118 fAddressMap = 0;
4119 }
4120
4121 fAddress = 0;
4122
4123 UNLOCK;
4124
4125 return( err );
4126 }
4127
4128 void IOMemoryMap::taskDied( void )
4129 {
4130 LOCK;
4131 if (fUserClientUnmap) unmap();
4132 #if IOTRACKING
4133 else IOTrackingRemoveUser(gIOMapTracking, &fTracking);
4134 #endif /* IOTRACKING */
4135
4136 if( fAddressMap) {
4137 vm_map_deallocate(fAddressMap);
4138 fAddressMap = 0;
4139 }
4140 fAddressTask = 0;
4141 fAddress = 0;
4142 UNLOCK;
4143 }
4144
4145 IOReturn IOMemoryMap::userClientUnmap( void )
4146 {
4147 fUserClientUnmap = true;
4148 return (kIOReturnSuccess);
4149 }
4150
4151 // Overload the release mechanism. All mappings must be a member
4152 // of a memory descriptors _mappings set. This means that we
4153 // always have 2 references on a mapping. When either of these mappings
4154 // are released we need to free ourselves.
4155 void IOMemoryMap::taggedRelease(const void *tag) const
4156 {
4157 LOCK;
4158 super::taggedRelease(tag, 2);
4159 UNLOCK;
4160 }
4161
4162 void IOMemoryMap::free()
4163 {
4164 unmap();
4165
4166 if (fMemory)
4167 {
4168 LOCK;
4169 fMemory->removeMapping(this);
4170 UNLOCK;
4171 fMemory->release();
4172 }
4173
4174 if (fOwner && (fOwner != fMemory))
4175 {
4176 LOCK;
4177 fOwner->removeMapping(this);
4178 UNLOCK;
4179 }
4180
4181 if (fSuperMap)
4182 fSuperMap->release();
4183
4184 if (fRedirUPL) {
4185 upl_commit(fRedirUPL, NULL, 0);
4186 upl_deallocate(fRedirUPL);
4187 }
4188
4189 super::free();
4190 }
4191
4192 IOByteCount IOMemoryMap::getLength()
4193 {
4194 return( fLength );
4195 }
4196
4197 IOVirtualAddress IOMemoryMap::getVirtualAddress()
4198 {
4199 #ifndef __LP64__
4200 if (fSuperMap)
4201 fSuperMap->getVirtualAddress();
4202 else if (fAddressMap
4203 && vm_map_is_64bit(fAddressMap)
4204 && (sizeof(IOVirtualAddress) < 8))
4205 {
4206 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
4207 }
4208 #endif /* !__LP64__ */
4209
4210 return (fAddress);
4211 }
4212
4213 #ifndef __LP64__
4214 mach_vm_address_t IOMemoryMap::getAddress()
4215 {
4216 return( fAddress);
4217 }
4218
4219 mach_vm_size_t IOMemoryMap::getSize()
4220 {
4221 return( fLength );
4222 }
4223 #endif /* !__LP64__ */
4224
4225
4226 task_t IOMemoryMap::getAddressTask()
4227 {
4228 if( fSuperMap)
4229 return( fSuperMap->getAddressTask());
4230 else
4231 return( fAddressTask);
4232 }
4233
4234 IOOptionBits IOMemoryMap::getMapOptions()
4235 {
4236 return( fOptions);
4237 }
4238
4239 IOMemoryDescriptor * IOMemoryMap::getMemoryDescriptor()
4240 {
4241 return( fMemory );
4242 }
4243
4244 IOMemoryMap * IOMemoryMap::copyCompatible(
4245 IOMemoryMap * newMapping )
4246 {
4247 task_t task = newMapping->getAddressTask();
4248 mach_vm_address_t toAddress = newMapping->fAddress;
4249 IOOptionBits _options = newMapping->fOptions;
4250 mach_vm_size_t _offset = newMapping->fOffset;
4251 mach_vm_size_t _length = newMapping->fLength;
4252
4253 if( (!task) || (!fAddressMap) || (fAddressMap != get_task_map(task)))
4254 return( 0 );
4255 if( (fOptions ^ _options) & kIOMapReadOnly)
4256 return( 0 );
4257 if( (kIOMapDefaultCache != (_options & kIOMapCacheMask))
4258 && ((fOptions ^ _options) & kIOMapCacheMask))
4259 return( 0 );
4260
4261 if( (0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress))
4262 return( 0 );
4263
4264 if( _offset < fOffset)
4265 return( 0 );
4266
4267 _offset -= fOffset;
4268
4269 if( (_offset + _length) > fLength)
4270 return( 0 );
4271
4272 retain();
4273 if( (fLength == _length) && (!_offset))
4274 {
4275 newMapping = this;
4276 }
4277 else
4278 {
4279 newMapping->fSuperMap = this;
4280 newMapping->fOffset = fOffset + _offset;
4281 newMapping->fAddress = fAddress + _offset;
4282 }
4283
4284 return( newMapping );
4285 }
4286
4287 IOReturn IOMemoryMap::wireRange(
4288 uint32_t options,
4289 mach_vm_size_t offset,
4290 mach_vm_size_t length)
4291 {
4292 IOReturn kr;
4293 mach_vm_address_t start = trunc_page_64(fAddress + offset);
4294 mach_vm_address_t end = round_page_64(fAddress + offset + length);
4295 vm_prot_t prot;
4296
4297 prot = (kIODirectionOutIn & options);
4298 if (prot)
4299 {
4300 kr = vm_map_wire_kernel(fAddressMap, start, end, prot, fMemory->getVMTag(kernel_map), FALSE);
4301 }
4302 else
4303 {
4304 kr = vm_map_unwire(fAddressMap, start, end, FALSE);
4305 }
4306
4307 return (kr);
4308 }
4309
4310
4311 IOPhysicalAddress
4312 #ifdef __LP64__
4313 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
4314 #else /* !__LP64__ */
4315 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
4316 #endif /* !__LP64__ */
4317 {
4318 IOPhysicalAddress address;
4319
4320 LOCK;
4321 #ifdef __LP64__
4322 address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
4323 #else /* !__LP64__ */
4324 address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
4325 #endif /* !__LP64__ */
4326 UNLOCK;
4327
4328 return( address );
4329 }
4330
4331 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4332
4333 #undef super
4334 #define super OSObject
4335
4336 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4337
4338 void IOMemoryDescriptor::initialize( void )
4339 {
4340 if( 0 == gIOMemoryLock)
4341 gIOMemoryLock = IORecursiveLockAlloc();
4342
4343 gIOLastPage = IOGetLastPageNumber();
4344 }
4345
4346 void IOMemoryDescriptor::free( void )
4347 {
4348 if( _mappings) _mappings->release();
4349
4350 if (reserved)
4351 {
4352 IODelete(reserved, IOMemoryDescriptorReserved, 1);
4353 reserved = NULL;
4354 }
4355 super::free();
4356 }
4357
4358 IOMemoryMap * IOMemoryDescriptor::setMapping(
4359 task_t intoTask,
4360 IOVirtualAddress mapAddress,
4361 IOOptionBits options )
4362 {
4363 return (createMappingInTask( intoTask, mapAddress,
4364 options | kIOMapStatic,
4365 0, getLength() ));
4366 }
4367
4368 IOMemoryMap * IOMemoryDescriptor::map(
4369 IOOptionBits options )
4370 {
4371 return (createMappingInTask( kernel_task, 0,
4372 options | kIOMapAnywhere,
4373 0, getLength() ));
4374 }
4375
4376 #ifndef __LP64__
4377 IOMemoryMap * IOMemoryDescriptor::map(
4378 task_t intoTask,
4379 IOVirtualAddress atAddress,
4380 IOOptionBits options,
4381 IOByteCount offset,
4382 IOByteCount length )
4383 {
4384 if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask)))
4385 {
4386 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
4387 return (0);
4388 }
4389
4390 return (createMappingInTask(intoTask, atAddress,
4391 options, offset, length));
4392 }
4393 #endif /* !__LP64__ */
4394
4395 IOMemoryMap * IOMemoryDescriptor::createMappingInTask(
4396 task_t intoTask,
4397 mach_vm_address_t atAddress,
4398 IOOptionBits options,
4399 mach_vm_size_t offset,
4400 mach_vm_size_t length)
4401 {
4402 IOMemoryMap * result;
4403 IOMemoryMap * mapping;
4404
4405 if (0 == length)
4406 length = getLength();
4407
4408 mapping = new IOMemoryMap;
4409
4410 if( mapping
4411 && !mapping->init( intoTask, atAddress,
4412 options, offset, length )) {
4413 mapping->release();
4414 mapping = 0;
4415 }
4416
4417 if (mapping)
4418 result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
4419 else
4420 result = 0;
4421
4422 #if DEBUG
4423 if (!result)
4424 IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
4425 this, atAddress, (uint32_t) options, offset, length);
4426 #endif
4427
4428 return (result);
4429 }
4430
4431 #ifndef __LP64__ // there is only a 64 bit version for LP64
4432 IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
4433 IOOptionBits options,
4434 IOByteCount offset)
4435 {
4436 return (redirect(newBackingMemory, options, (mach_vm_size_t)offset));
4437 }
4438 #endif
4439
4440 IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
4441 IOOptionBits options,
4442 mach_vm_size_t offset)
4443 {
4444 IOReturn err = kIOReturnSuccess;
4445 IOMemoryDescriptor * physMem = 0;
4446
4447 LOCK;
4448
4449 if (fAddress && fAddressMap) do
4450 {
4451 if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
4452 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
4453 {
4454 physMem = fMemory;
4455 physMem->retain();
4456 }
4457
4458 if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count))
4459 {
4460 upl_size_t size = round_page(fLength);
4461 upl_control_flags_t flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
4462 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
4463 if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL,
4464 NULL, NULL,
4465 &flags, fMemory->getVMTag(kernel_map)))
4466 fRedirUPL = 0;
4467
4468 if (physMem)
4469 {
4470 IOUnmapPages( fAddressMap, fAddress, fLength );
4471 if ((false))
4472 physMem->redirect(0, true);
4473 }
4474 }
4475
4476 if (newBackingMemory)
4477 {
4478 if (newBackingMemory != fMemory)
4479 {
4480 fOffset = 0;
4481 if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
4482 options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
4483 offset, fLength))
4484 err = kIOReturnError;
4485 }
4486 if (fRedirUPL)
4487 {
4488 upl_commit(fRedirUPL, NULL, 0);
4489 upl_deallocate(fRedirUPL);
4490 fRedirUPL = 0;
4491 }
4492 if ((false) && physMem)
4493 physMem->redirect(0, false);
4494 }
4495 }
4496 while (false);
4497
4498 UNLOCK;
4499
4500 if (physMem)
4501 physMem->release();
4502
4503 return (err);
4504 }
4505
4506 IOMemoryMap * IOMemoryDescriptor::makeMapping(
4507 IOMemoryDescriptor * owner,
4508 task_t __intoTask,
4509 IOVirtualAddress __address,
4510 IOOptionBits options,
4511 IOByteCount __offset,
4512 IOByteCount __length )
4513 {
4514 #ifndef __LP64__
4515 if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::makeMapping !64bit");
4516 #endif /* !__LP64__ */
4517
4518 IOMemoryDescriptor * mapDesc = 0;
4519 IOMemoryMap * result = 0;
4520 OSIterator * iter;
4521
4522 IOMemoryMap * mapping = (IOMemoryMap *) __address;
4523 mach_vm_size_t offset = mapping->fOffset + __offset;
4524 mach_vm_size_t length = mapping->fLength;
4525
4526 mapping->fOffset = offset;
4527
4528 LOCK;
4529
4530 do
4531 {
4532 if (kIOMapStatic & options)
4533 {
4534 result = mapping;
4535 addMapping(mapping);
4536 mapping->setMemoryDescriptor(this, 0);
4537 continue;
4538 }
4539
4540 if (kIOMapUnique & options)
4541 {
4542 addr64_t phys;
4543 IOByteCount physLen;
4544
4545 // if (owner != this) continue;
4546
4547 if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
4548 || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
4549 {
4550 phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
4551 if (!phys || (physLen < length))
4552 continue;
4553
4554 mapDesc = IOMemoryDescriptor::withAddressRange(
4555 phys, length, getDirection() | kIOMemoryMapperNone, NULL);
4556 if (!mapDesc)
4557 continue;
4558 offset = 0;
4559 mapping->fOffset = offset;
4560 }
4561 }
4562 else
4563 {
4564 // look for a compatible existing mapping
4565 if( (iter = OSCollectionIterator::withCollection(_mappings)))
4566 {
4567 IOMemoryMap * lookMapping;
4568 while ((lookMapping = (IOMemoryMap *) iter->getNextObject()))
4569 {
4570 if ((result = lookMapping->copyCompatible(mapping)))
4571 {
4572 addMapping(result);
4573 result->setMemoryDescriptor(this, offset);
4574 break;
4575 }
4576 }
4577 iter->release();
4578 }
4579 if (result || (options & kIOMapReference))
4580 {
4581 if (result != mapping)
4582 {
4583 mapping->release();
4584 mapping = NULL;
4585 }
4586 continue;
4587 }
4588 }
4589
4590 if (!mapDesc)
4591 {
4592 mapDesc = this;
4593 mapDesc->retain();
4594 }
4595 IOReturn
4596 kr = mapDesc->doMap( 0, (IOVirtualAddress *) &mapping, options, 0, 0 );
4597 if (kIOReturnSuccess == kr)
4598 {
4599 result = mapping;
4600 mapDesc->addMapping(result);
4601 result->setMemoryDescriptor(mapDesc, offset);
4602 }
4603 else
4604 {
4605 mapping->release();
4606 mapping = NULL;
4607 }
4608 }
4609 while( false );
4610
4611 UNLOCK;
4612
4613 if (mapDesc)
4614 mapDesc->release();
4615
4616 return (result);
4617 }
4618
4619 void IOMemoryDescriptor::addMapping(
4620 IOMemoryMap * mapping )
4621 {
4622 if( mapping)
4623 {
4624 if( 0 == _mappings)
4625 _mappings = OSSet::withCapacity(1);
4626 if( _mappings )
4627 _mappings->setObject( mapping );
4628 }
4629 }
4630
4631 void IOMemoryDescriptor::removeMapping(
4632 IOMemoryMap * mapping )
4633 {
4634 if( _mappings)
4635 _mappings->removeObject( mapping);
4636 }
4637
4638 #ifndef __LP64__
4639 // obsolete initializers
4640 // - initWithOptions is the designated initializer
4641 bool
4642 IOMemoryDescriptor::initWithAddress(void * address,
4643 IOByteCount length,
4644 IODirection direction)
4645 {
4646 return( false );
4647 }
4648
4649 bool
4650 IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
4651 IOByteCount length,
4652 IODirection direction,
4653 task_t task)
4654 {
4655 return( false );
4656 }
4657
4658 bool
4659 IOMemoryDescriptor::initWithPhysicalAddress(
4660 IOPhysicalAddress address,
4661 IOByteCount length,
4662 IODirection direction )
4663 {
4664 return( false );
4665 }
4666
4667 bool
4668 IOMemoryDescriptor::initWithRanges(
4669 IOVirtualRange * ranges,
4670 UInt32 withCount,
4671 IODirection direction,
4672 task_t task,
4673 bool asReference)
4674 {
4675 return( false );
4676 }
4677
4678 bool
4679 IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
4680 UInt32 withCount,
4681 IODirection direction,
4682 bool asReference)
4683 {
4684 return( false );
4685 }
4686
4687 void * IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
4688 IOByteCount * lengthOfSegment)
4689 {
4690 return( 0 );
4691 }
4692 #endif /* !__LP64__ */
4693
4694 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4695
4696 bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
4697 {
4698 OSSymbol const *keys[2];
4699 OSObject *values[2];
4700 OSArray * array;
4701
4702 struct SerData {
4703 user_addr_t address;
4704 user_size_t length;
4705 } *vcopy;
4706 unsigned int index, nRanges;
4707 bool result;
4708
4709 IOOptionBits type = _flags & kIOMemoryTypeMask;
4710
4711 if (s == NULL) return false;
4712
4713 array = OSArray::withCapacity(4);
4714 if (!array) return (false);
4715
4716 nRanges = _rangesCount;
4717 vcopy = (SerData *) IOMalloc(sizeof(SerData) * nRanges);
4718 if (vcopy == 0) return false;
4719
4720 keys[0] = OSSymbol::withCString("address");
4721 keys[1] = OSSymbol::withCString("length");
4722
4723 result = false;
4724 values[0] = values[1] = 0;
4725
4726 // From this point on we can go to bail.
4727
4728 // Copy the volatile data so we don't have to allocate memory
4729 // while the lock is held.
4730 LOCK;
4731 if (nRanges == _rangesCount) {
4732 Ranges vec = _ranges;
4733 for (index = 0; index < nRanges; index++) {
4734 mach_vm_address_t addr; mach_vm_size_t len;
4735 getAddrLenForInd(addr, len, type, vec, index);
4736 vcopy[index].address = addr;
4737 vcopy[index].length = len;
4738 }
4739 } else {
4740 // The descriptor changed out from under us. Give up.
4741 UNLOCK;
4742 result = false;
4743 goto bail;
4744 }
4745 UNLOCK;
4746
4747 for (index = 0; index < nRanges; index++)
4748 {
4749 user_addr_t addr = vcopy[index].address;
4750 IOByteCount len = (IOByteCount) vcopy[index].length;
4751 values[0] = OSNumber::withNumber(addr, sizeof(addr) * 8);
4752 if (values[0] == 0) {
4753 result = false;
4754 goto bail;
4755 }
4756 values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
4757 if (values[1] == 0) {
4758 result = false;
4759 goto bail;
4760 }
4761 OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
4762 if (dict == 0) {
4763 result = false;
4764 goto bail;
4765 }
4766 array->setObject(dict);
4767 dict->release();
4768 values[0]->release();
4769 values[1]->release();
4770 values[0] = values[1] = 0;
4771 }
4772
4773 result = array->serialize(s);
4774
4775 bail:
4776 if (array)
4777 array->release();
4778 if (values[0])
4779 values[0]->release();
4780 if (values[1])
4781 values[1]->release();
4782 if (keys[0])
4783 keys[0]->release();
4784 if (keys[1])
4785 keys[1]->release();
4786 if (vcopy)
4787 IOFree(vcopy, sizeof(SerData) * nRanges);
4788
4789 return result;
4790 }
4791
4792 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4793
4794 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
4795 #ifdef __LP64__
4796 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
4797 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
4798 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
4799 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
4800 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
4801 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
4802 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
4803 #else /* !__LP64__ */
4804 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
4805 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
4806 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3);
4807 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4);
4808 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5);
4809 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 6);
4810 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 7);
4811 #endif /* !__LP64__ */
4812 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
4813 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
4814 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
4815 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
4816 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
4817 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
4818 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
4819 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
4820
4821 /* ex-inline function implementation */
4822 IOPhysicalAddress
4823 IOMemoryDescriptor::getPhysicalAddress()
4824 { return( getPhysicalSegment( 0, 0 )); }