]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOMemoryDescriptor.cpp
f61b0f9d60e8e29ff3cf9cfb1fd6252c9c9aca14
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 #include <sys/cdefs.h>
31
32 #include <IOKit/assert.h>
33 #include <IOKit/system.h>
34 #include <IOKit/IOLib.h>
35 #include <IOKit/IOMemoryDescriptor.h>
36 #include <IOKit/IOMapper.h>
37 #include <IOKit/IODMACommand.h>
38 #include <IOKit/IOKitKeysPrivate.h>
39
40 #include <IOKit/IOSubMemoryDescriptor.h>
41 #include <IOKit/IOMultiMemoryDescriptor.h>
42
43 #include <IOKit/IOKitDebug.h>
44 #include <libkern/OSDebug.h>
45
46 #include "IOKitKernelInternal.h"
47
48 #include <libkern/c++/OSContainers.h>
49 #include <libkern/c++/OSDictionary.h>
50 #include <libkern/c++/OSArray.h>
51 #include <libkern/c++/OSSymbol.h>
52 #include <libkern/c++/OSNumber.h>
53
54 #include <sys/uio.h>
55
56 __BEGIN_DECLS
57 #include <vm/pmap.h>
58 #include <vm/vm_pageout.h>
59 #include <mach/memory_object_types.h>
60 #include <device/device_port.h>
61
62 #include <mach/vm_prot.h>
63 #include <mach/mach_vm.h>
64 #include <vm/vm_fault.h>
65 #include <vm/vm_protos.h>
66
67 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
68 extern void ipc_port_release_send(ipc_port_t port);
69
70 // osfmk/device/iokit_rpc.c
71 unsigned int IODefaultCacheBits(addr64_t pa);
72 unsigned int IOTranslateCacheBits(struct phys_entry *pp);
73
74 __END_DECLS
75
76 #define kIOMapperWaitSystem ((IOMapper *) 1)
77
78 static IOMapper * gIOSystemMapper = NULL;
79
80 ppnum_t gIOLastPage;
81
82 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
83
84 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
85
86 #define super IOMemoryDescriptor
87
88 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
89
90 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
91
92 static IORecursiveLock * gIOMemoryLock;
93
94 #define LOCK IORecursiveLockLock( gIOMemoryLock)
95 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
96 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
97 #define WAKEUP \
98 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
99
100 #if 0
101 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
102 #else
103 #define DEBG(fmt, args...) {}
104 #endif
105
106 #define IOMD_DEBUG_DMAACTIVE 1
107
108 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
109
110 // Some data structures and accessor macros used by the initWithOptions
111 // Function
112
113 enum ioPLBlockFlags {
114 kIOPLOnDevice = 0x00000001,
115 kIOPLExternUPL = 0x00000002,
116 };
117
118 struct IOMDPersistentInitData
119 {
120 const IOGeneralMemoryDescriptor * fMD;
121 IOMemoryReference * fMemRef;
122 };
123
124 struct ioPLBlock {
125 upl_t fIOPL;
126 vm_address_t fPageInfo; // Pointer to page list or index into it
127 uint32_t fIOMDOffset; // The offset of this iopl in descriptor
128 ppnum_t fMappedPage; // Page number of first page in this iopl
129 unsigned int fPageOffset; // Offset within first page of iopl
130 unsigned int fFlags; // Flags
131 };
132
133 struct ioGMDData {
134 IOMapper * fMapper;
135 uint8_t fDMAMapNumAddressBits;
136 uint64_t fDMAMapAlignment;
137 uint64_t fMappedBase;
138 uint64_t fMappedLength;
139 uint64_t fPreparationID;
140 #if IOTRACKING
141 IOTracking fWireTracking;
142 #endif
143 unsigned int fPageCnt;
144 unsigned char fDiscontig:1;
145 unsigned char fCompletionError:1;
146 unsigned char _resv:6;
147 #if __LP64__
148 // align arrays to 8 bytes so following macros work
149 unsigned char fPad[3];
150 #endif
151 upl_page_info_t fPageList[1]; /* variable length */
152 ioPLBlock fBlocks[1]; /* variable length */
153 };
154
155 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
156 #define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
157 #define getNumIOPL(osd, d) \
158 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
159 #define getPageList(d) (&(d->fPageList[0]))
160 #define computeDataSize(p, u) \
161 (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
162
163 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
164
165 #define next_page(a) ( trunc_page(a) + PAGE_SIZE )
166
167 extern "C" {
168
169 kern_return_t device_data_action(
170 uintptr_t device_handle,
171 ipc_port_t device_pager,
172 vm_prot_t protection,
173 vm_object_offset_t offset,
174 vm_size_t size)
175 {
176 kern_return_t kr;
177 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
178 IOMemoryDescriptor * memDesc;
179
180 LOCK;
181 memDesc = ref->dp.memory;
182 if( memDesc)
183 {
184 memDesc->retain();
185 kr = memDesc->handleFault(device_pager, offset, size);
186 memDesc->release();
187 }
188 else
189 kr = KERN_ABORTED;
190 UNLOCK;
191
192 return( kr );
193 }
194
195 kern_return_t device_close(
196 uintptr_t device_handle)
197 {
198 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
199
200 IODelete( ref, IOMemoryDescriptorReserved, 1 );
201
202 return( kIOReturnSuccess );
203 }
204 }; // end extern "C"
205
206 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
207
208 // Note this inline function uses C++ reference arguments to return values
209 // This means that pointers are not passed and NULLs don't have to be
210 // checked for as a NULL reference is illegal.
211 static inline void
212 getAddrLenForInd(mach_vm_address_t &addr, mach_vm_size_t &len, // Output variables
213 UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
214 {
215 assert(kIOMemoryTypeUIO == type
216 || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
217 || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
218 if (kIOMemoryTypeUIO == type) {
219 user_size_t us;
220 user_addr_t ad;
221 uio_getiov((uio_t) r.uio, ind, &ad, &us); addr = ad; len = us;
222 }
223 #ifndef __LP64__
224 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
225 IOAddressRange cur = r.v64[ind];
226 addr = cur.address;
227 len = cur.length;
228 }
229 #endif /* !__LP64__ */
230 else {
231 IOVirtualRange cur = r.v[ind];
232 addr = cur.address;
233 len = cur.length;
234 }
235 }
236
237 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
238
239 static IOReturn
240 purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
241 {
242 IOReturn err = kIOReturnSuccess;
243
244 *control = VM_PURGABLE_SET_STATE;
245
246 enum { kIOMemoryPurgeableControlMask = 15 };
247
248 switch (kIOMemoryPurgeableControlMask & newState)
249 {
250 case kIOMemoryPurgeableKeepCurrent:
251 *control = VM_PURGABLE_GET_STATE;
252 break;
253
254 case kIOMemoryPurgeableNonVolatile:
255 *state = VM_PURGABLE_NONVOLATILE;
256 break;
257 case kIOMemoryPurgeableVolatile:
258 *state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask);
259 break;
260 case kIOMemoryPurgeableEmpty:
261 *state = VM_PURGABLE_EMPTY;
262 break;
263 default:
264 err = kIOReturnBadArgument;
265 break;
266 }
267 return (err);
268 }
269
270 static IOReturn
271 purgeableStateBits(int * state)
272 {
273 IOReturn err = kIOReturnSuccess;
274
275 switch (VM_PURGABLE_STATE_MASK & *state)
276 {
277 case VM_PURGABLE_NONVOLATILE:
278 *state = kIOMemoryPurgeableNonVolatile;
279 break;
280 case VM_PURGABLE_VOLATILE:
281 *state = kIOMemoryPurgeableVolatile;
282 break;
283 case VM_PURGABLE_EMPTY:
284 *state = kIOMemoryPurgeableEmpty;
285 break;
286 default:
287 *state = kIOMemoryPurgeableNonVolatile;
288 err = kIOReturnNotReady;
289 break;
290 }
291 return (err);
292 }
293
294
295 static vm_prot_t
296 vmProtForCacheMode(IOOptionBits cacheMode)
297 {
298 vm_prot_t prot = 0;
299 switch (cacheMode)
300 {
301 case kIOInhibitCache:
302 SET_MAP_MEM(MAP_MEM_IO, prot);
303 break;
304
305 case kIOWriteThruCache:
306 SET_MAP_MEM(MAP_MEM_WTHRU, prot);
307 break;
308
309 case kIOWriteCombineCache:
310 SET_MAP_MEM(MAP_MEM_WCOMB, prot);
311 break;
312
313 case kIOCopybackCache:
314 SET_MAP_MEM(MAP_MEM_COPYBACK, prot);
315 break;
316
317 case kIOCopybackInnerCache:
318 SET_MAP_MEM(MAP_MEM_INNERWBACK, prot);
319 break;
320
321 case kIODefaultCache:
322 default:
323 SET_MAP_MEM(MAP_MEM_NOOP, prot);
324 break;
325 }
326
327 return (prot);
328 }
329
330 static unsigned int
331 pagerFlagsForCacheMode(IOOptionBits cacheMode)
332 {
333 unsigned int pagerFlags = 0;
334 switch (cacheMode)
335 {
336 case kIOInhibitCache:
337 pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
338 break;
339
340 case kIOWriteThruCache:
341 pagerFlags = DEVICE_PAGER_WRITE_THROUGH | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
342 break;
343
344 case kIOWriteCombineCache:
345 pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT;
346 break;
347
348 case kIOCopybackCache:
349 pagerFlags = DEVICE_PAGER_COHERENT;
350 break;
351
352 case kIOCopybackInnerCache:
353 pagerFlags = DEVICE_PAGER_COHERENT;
354 break;
355
356 case kIODefaultCache:
357 default:
358 pagerFlags = -1U;
359 break;
360 }
361 return (pagerFlags);
362 }
363
364 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
365 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
366
367 struct IOMemoryEntry
368 {
369 ipc_port_t entry;
370 int64_t offset;
371 uint64_t size;
372 };
373
374 struct IOMemoryReference
375 {
376 volatile SInt32 refCount;
377 vm_prot_t prot;
378 uint32_t capacity;
379 uint32_t count;
380 IOMemoryEntry entries[0];
381 };
382
383 enum
384 {
385 kIOMemoryReferenceReuse = 0x00000001,
386 kIOMemoryReferenceWrite = 0x00000002,
387 };
388
389 SInt32 gIOMemoryReferenceCount;
390
391 IOMemoryReference *
392 IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc)
393 {
394 IOMemoryReference * ref;
395 size_t newSize, oldSize, copySize;
396
397 newSize = (sizeof(IOMemoryReference)
398 - sizeof(ref->entries)
399 + capacity * sizeof(ref->entries[0]));
400 ref = (typeof(ref)) IOMalloc(newSize);
401 if (realloc)
402 {
403 oldSize = (sizeof(IOMemoryReference)
404 - sizeof(realloc->entries)
405 + realloc->capacity * sizeof(realloc->entries[0]));
406 copySize = oldSize;
407 if (copySize > newSize) copySize = newSize;
408 if (ref) bcopy(realloc, ref, copySize);
409 IOFree(realloc, oldSize);
410 }
411 else if (ref)
412 {
413 bzero(ref, sizeof(*ref));
414 ref->refCount = 1;
415 OSIncrementAtomic(&gIOMemoryReferenceCount);
416 }
417 if (!ref) return (0);
418 ref->capacity = capacity;
419 return (ref);
420 }
421
422 void
423 IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref)
424 {
425 IOMemoryEntry * entries;
426 size_t size;
427
428 entries = ref->entries + ref->count;
429 while (entries > &ref->entries[0])
430 {
431 entries--;
432 ipc_port_release_send(entries->entry);
433 }
434 size = (sizeof(IOMemoryReference)
435 - sizeof(ref->entries)
436 + ref->capacity * sizeof(ref->entries[0]));
437 IOFree(ref, size);
438
439 OSDecrementAtomic(&gIOMemoryReferenceCount);
440 }
441
442 void
443 IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref)
444 {
445 if (1 == OSDecrementAtomic(&ref->refCount)) memoryReferenceFree(ref);
446 }
447
448
449 IOReturn
450 IOGeneralMemoryDescriptor::memoryReferenceCreate(
451 IOOptionBits options,
452 IOMemoryReference ** reference)
453 {
454 enum { kCapacity = 4, kCapacityInc = 4 };
455
456 kern_return_t err;
457 IOMemoryReference * ref;
458 IOMemoryEntry * entries;
459 IOMemoryEntry * cloneEntries;
460 vm_map_t map;
461 ipc_port_t entry, cloneEntry;
462 vm_prot_t prot;
463 memory_object_size_t actualSize;
464 uint32_t rangeIdx;
465 uint32_t count;
466 mach_vm_address_t entryAddr, endAddr, entrySize;
467 mach_vm_size_t srcAddr, srcLen;
468 mach_vm_size_t nextAddr, nextLen;
469 mach_vm_size_t offset, remain;
470 IOByteCount physLen;
471 IOOptionBits type = (_flags & kIOMemoryTypeMask);
472 IOOptionBits cacheMode;
473 unsigned int pagerFlags;
474 vm_tag_t tag;
475
476 ref = memoryReferenceAlloc(kCapacity, NULL);
477 if (!ref) return (kIOReturnNoMemory);
478
479 tag = IOMemoryTag(kernel_map);
480 entries = &ref->entries[0];
481 count = 0;
482
483 offset = 0;
484 rangeIdx = 0;
485 if (_task) getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
486 else
487 {
488 nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
489 nextLen = physLen;
490
491 // default cache mode for physical
492 if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift))
493 {
494 IOOptionBits mode;
495 pagerFlags = IODefaultCacheBits(nextAddr);
496 if (DEVICE_PAGER_CACHE_INHIB & pagerFlags)
497 {
498 if (DEVICE_PAGER_GUARDED & pagerFlags)
499 mode = kIOInhibitCache;
500 else
501 mode = kIOWriteCombineCache;
502 }
503 else if (DEVICE_PAGER_WRITE_THROUGH & pagerFlags)
504 mode = kIOWriteThruCache;
505 else
506 mode = kIOCopybackCache;
507 _flags |= (mode << kIOMemoryBufferCacheShift);
508 }
509 }
510
511 // cache mode & vm_prot
512 prot = VM_PROT_READ;
513 cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
514 prot |= vmProtForCacheMode(cacheMode);
515 // VM system requires write access to change cache mode
516 if (kIODefaultCache != cacheMode) prot |= VM_PROT_WRITE;
517 if (kIODirectionOut != (kIODirectionOutIn & _flags)) prot |= VM_PROT_WRITE;
518 if (kIOMemoryReferenceWrite & options) prot |= VM_PROT_WRITE;
519
520 if ((kIOMemoryReferenceReuse & options) && _memRef)
521 {
522 cloneEntries = &_memRef->entries[0];
523 prot |= MAP_MEM_NAMED_REUSE;
524 }
525
526 if (_task)
527 {
528 // virtual ranges
529
530 if (kIOMemoryBufferPageable & _flags)
531 {
532 // IOBufferMemoryDescriptor alloc - set flags for entry + object create
533 prot |= MAP_MEM_NAMED_CREATE;
534 if (kIOMemoryBufferPurgeable & _flags) prot |= MAP_MEM_PURGABLE;
535 prot |= VM_PROT_WRITE;
536 map = NULL;
537 }
538 else map = get_task_map(_task);
539
540 remain = _length;
541 while (remain)
542 {
543 srcAddr = nextAddr;
544 srcLen = nextLen;
545 nextAddr = 0;
546 nextLen = 0;
547 // coalesce addr range
548 for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++)
549 {
550 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
551 if ((srcAddr + srcLen) != nextAddr) break;
552 srcLen += nextLen;
553 }
554 entryAddr = trunc_page_64(srcAddr);
555 endAddr = round_page_64(srcAddr + srcLen);
556 do
557 {
558 entrySize = (endAddr - entryAddr);
559 if (!entrySize) break;
560 actualSize = entrySize;
561
562 cloneEntry = MACH_PORT_NULL;
563 if (MAP_MEM_NAMED_REUSE & prot)
564 {
565 if (cloneEntries < &_memRef->entries[_memRef->count]) cloneEntry = cloneEntries->entry;
566 else prot &= ~MAP_MEM_NAMED_REUSE;
567 }
568
569 err = mach_make_memory_entry_64(map,
570 &actualSize, entryAddr, prot, &entry, cloneEntry);
571
572 if (KERN_SUCCESS != err) break;
573 if (actualSize > entrySize) panic("mach_make_memory_entry_64 actualSize");
574
575 if (count >= ref->capacity)
576 {
577 ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref);
578 entries = &ref->entries[count];
579 }
580 entries->entry = entry;
581 entries->size = actualSize;
582 entries->offset = offset + (entryAddr - srcAddr);
583 entryAddr += actualSize;
584 if (MAP_MEM_NAMED_REUSE & prot)
585 {
586 if ((cloneEntries->entry == entries->entry)
587 && (cloneEntries->size == entries->size)
588 && (cloneEntries->offset == entries->offset)) cloneEntries++;
589 else prot &= ~MAP_MEM_NAMED_REUSE;
590 }
591 entries++;
592 count++;
593 }
594 while (true);
595 offset += srcLen;
596 remain -= srcLen;
597 }
598 }
599 else
600 {
601 // _task == 0, physical or kIOMemoryTypeUPL
602 memory_object_t pager;
603 vm_size_t size = ptoa_32(_pages);
604
605 if (!getKernelReserved()) panic("getKernelReserved");
606
607 reserved->dp.pagerContig = (1 == _rangesCount);
608 reserved->dp.memory = this;
609
610 pagerFlags = pagerFlagsForCacheMode(cacheMode);
611 if (-1U == pagerFlags) panic("phys is kIODefaultCache");
612 if (reserved->dp.pagerContig) pagerFlags |= DEVICE_PAGER_CONTIGUOUS;
613
614 pager = device_pager_setup((memory_object_t) 0, (uintptr_t) reserved,
615 size, pagerFlags);
616 assert (pager);
617 if (!pager) err = kIOReturnVMError;
618 else
619 {
620 srcAddr = nextAddr;
621 entryAddr = trunc_page_64(srcAddr);
622 err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/,
623 size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry);
624 assert (KERN_SUCCESS == err);
625 if (KERN_SUCCESS != err) device_pager_deallocate(pager);
626 else
627 {
628 reserved->dp.devicePager = pager;
629 entries->entry = entry;
630 entries->size = size;
631 entries->offset = offset + (entryAddr - srcAddr);
632 entries++;
633 count++;
634 }
635 }
636 }
637
638 ref->count = count;
639 ref->prot = prot;
640
641 if (KERN_SUCCESS == err)
642 {
643 if (MAP_MEM_NAMED_REUSE & prot)
644 {
645 memoryReferenceFree(ref);
646 OSIncrementAtomic(&_memRef->refCount);
647 ref = _memRef;
648 }
649 }
650 else
651 {
652 memoryReferenceFree(ref);
653 ref = NULL;
654 }
655
656 *reference = ref;
657
658 return (err);
659 }
660
661 kern_return_t
662 IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
663 {
664 IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref;
665 IOReturn err;
666 vm_map_offset_t addr;
667
668 addr = ref->mapped;
669
670 err = vm_map_enter_mem_object(map, &addr, ref->size,
671 (vm_map_offset_t) 0,
672 (((ref->options & kIOMapAnywhere)
673 ? VM_FLAGS_ANYWHERE
674 : VM_FLAGS_FIXED)
675 | VM_MAKE_TAG(ref->tag)
676 | VM_FLAGS_IOKIT_ACCT), /* iokit accounting */
677 IPC_PORT_NULL,
678 (memory_object_offset_t) 0,
679 false, /* copy */
680 ref->prot,
681 ref->prot,
682 VM_INHERIT_NONE);
683 if (KERN_SUCCESS == err)
684 {
685 ref->mapped = (mach_vm_address_t) addr;
686 ref->map = map;
687 }
688
689 return( err );
690 }
691
692 IOReturn
693 IOGeneralMemoryDescriptor::memoryReferenceMap(
694 IOMemoryReference * ref,
695 vm_map_t map,
696 mach_vm_size_t inoffset,
697 mach_vm_size_t size,
698 IOOptionBits options,
699 mach_vm_address_t * inaddr)
700 {
701 IOReturn err;
702 int64_t offset = inoffset;
703 uint32_t rangeIdx, entryIdx;
704 vm_map_offset_t addr, mapAddr;
705 vm_map_offset_t pageOffset, entryOffset, remain, chunk;
706
707 mach_vm_address_t nextAddr;
708 mach_vm_size_t nextLen;
709 IOByteCount physLen;
710 IOMemoryEntry * entry;
711 vm_prot_t prot, memEntryCacheMode;
712 IOOptionBits type;
713 IOOptionBits cacheMode;
714 vm_tag_t tag;
715
716 /*
717 * For the kIOMapPrefault option.
718 */
719 upl_page_info_t *pageList = NULL;
720 UInt currentPageIndex = 0;
721
722 type = _flags & kIOMemoryTypeMask;
723 prot = VM_PROT_READ;
724 if (!(kIOMapReadOnly & options)) prot |= VM_PROT_WRITE;
725 prot &= ref->prot;
726
727 cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
728 if (kIODefaultCache != cacheMode)
729 {
730 // VM system requires write access to update named entry cache mode
731 memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
732 }
733
734 tag = IOMemoryTag(map);
735
736 if (_task)
737 {
738 // Find first range for offset
739 for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++)
740 {
741 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
742 if (remain < nextLen) break;
743 remain -= nextLen;
744 }
745 }
746 else
747 {
748 rangeIdx = 0;
749 remain = 0;
750 nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
751 nextLen = size;
752 }
753
754 assert(remain < nextLen);
755 if (remain >= nextLen) return (kIOReturnBadArgument);
756
757 nextAddr += remain;
758 nextLen -= remain;
759 pageOffset = (page_mask & nextAddr);
760 addr = 0;
761 if (!(options & kIOMapAnywhere))
762 {
763 addr = *inaddr;
764 if (pageOffset != (page_mask & addr)) return (kIOReturnNotAligned);
765 addr -= pageOffset;
766 }
767
768 // find first entry for offset
769 for (entryIdx = 0;
770 (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset);
771 entryIdx++) {}
772 entryIdx--;
773 entry = &ref->entries[entryIdx];
774
775 // allocate VM
776 size = round_page_64(size + pageOffset);
777 if (kIOMapOverwrite & options)
778 {
779 if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags))
780 {
781 map = IOPageableMapForAddress(addr);
782 }
783 err = KERN_SUCCESS;
784 }
785 else
786 {
787 IOMemoryDescriptorMapAllocRef ref;
788 ref.map = map;
789 ref.tag = tag;
790 ref.options = options;
791 ref.size = size;
792 ref.prot = prot;
793 if (options & kIOMapAnywhere)
794 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
795 ref.mapped = 0;
796 else
797 ref.mapped = addr;
798 if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags))
799 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
800 else
801 err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
802 if (KERN_SUCCESS == err)
803 {
804 addr = ref.mapped;
805 map = ref.map;
806 }
807 }
808
809 /*
810 * Prefaulting is only possible if we wired the memory earlier. Check the
811 * memory type, and the underlying data.
812 */
813 if (options & kIOMapPrefault)
814 {
815 /*
816 * The memory must have been wired by calling ::prepare(), otherwise
817 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
818 */
819 assert(map != kernel_map);
820 assert(_wireCount != 0);
821 assert(_memoryEntries != NULL);
822 if ((map == kernel_map) ||
823 (_wireCount == 0) ||
824 (_memoryEntries == NULL))
825 {
826 return kIOReturnBadArgument;
827 }
828
829 // Get the page list.
830 ioGMDData* dataP = getDataP(_memoryEntries);
831 ioPLBlock const* ioplList = getIOPLList(dataP);
832 pageList = getPageList(dataP);
833
834 // Get the number of IOPLs.
835 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
836
837 /*
838 * Scan through the IOPL Info Blocks, looking for the first block containing
839 * the offset. The research will go past it, so we'll need to go back to the
840 * right range at the end.
841 */
842 UInt ioplIndex = 0;
843 while (ioplIndex < numIOPLs && offset >= ioplList[ioplIndex].fIOMDOffset)
844 ioplIndex++;
845 ioplIndex--;
846
847 // Retrieve the IOPL info block.
848 ioPLBlock ioplInfo = ioplList[ioplIndex];
849
850 /*
851 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
852 * array.
853 */
854 if (ioplInfo.fFlags & kIOPLExternUPL)
855 pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
856 else
857 pageList = &pageList[ioplInfo.fPageInfo];
858
859 // Rebase [offset] into the IOPL in order to looks for the first page index.
860 mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
861
862 // Retrieve the index of the first page corresponding to the offset.
863 currentPageIndex = atop_32(offsetInIOPL);
864 }
865
866 // enter mappings
867 remain = size;
868 mapAddr = addr;
869 addr += pageOffset;
870
871 while (remain && (KERN_SUCCESS == err))
872 {
873 entryOffset = offset - entry->offset;
874 if ((page_mask & entryOffset) != pageOffset)
875 {
876 err = kIOReturnNotAligned;
877 break;
878 }
879
880 if (kIODefaultCache != cacheMode)
881 {
882 vm_size_t unused = 0;
883 err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
884 memEntryCacheMode, NULL, entry->entry);
885 assert (KERN_SUCCESS == err);
886 }
887
888 entryOffset -= pageOffset;
889 if (entryOffset >= entry->size) panic("entryOffset");
890 chunk = entry->size - entryOffset;
891 if (chunk)
892 {
893 if (chunk > remain) chunk = remain;
894 if (options & kIOMapPrefault)
895 {
896 UInt nb_pages = round_page(chunk) / PAGE_SIZE;
897 err = vm_map_enter_mem_object_prefault(map,
898 &mapAddr,
899 chunk, 0 /* mask */,
900 (VM_FLAGS_FIXED
901 | VM_FLAGS_OVERWRITE
902 | VM_MAKE_TAG(tag)
903 | VM_FLAGS_IOKIT_ACCT), /* iokit accounting */
904 entry->entry,
905 entryOffset,
906 prot, // cur
907 prot, // max
908 &pageList[currentPageIndex],
909 nb_pages);
910
911 // Compute the next index in the page list.
912 currentPageIndex += nb_pages;
913 assert(currentPageIndex <= _pages);
914 }
915 else
916 {
917 err = vm_map_enter_mem_object(map,
918 &mapAddr,
919 chunk, 0 /* mask */,
920 (VM_FLAGS_FIXED
921 | VM_FLAGS_OVERWRITE
922 | VM_MAKE_TAG(tag)
923 | VM_FLAGS_IOKIT_ACCT), /* iokit accounting */
924 entry->entry,
925 entryOffset,
926 false, // copy
927 prot, // cur
928 prot, // max
929 VM_INHERIT_NONE);
930 }
931 if (KERN_SUCCESS != err) break;
932 remain -= chunk;
933 if (!remain) break;
934 mapAddr += chunk;
935 offset += chunk - pageOffset;
936 }
937 pageOffset = 0;
938 entry++;
939 entryIdx++;
940 if (entryIdx >= ref->count)
941 {
942 err = kIOReturnOverrun;
943 break;
944 }
945 }
946
947 if ((KERN_SUCCESS != err) && addr && !(kIOMapOverwrite & options))
948 {
949 (void) mach_vm_deallocate(map, trunc_page_64(addr), size);
950 addr = 0;
951 }
952 *inaddr = addr;
953
954 return (err);
955 }
956
957 IOReturn
958 IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
959 IOMemoryReference * ref,
960 IOByteCount * residentPageCount,
961 IOByteCount * dirtyPageCount)
962 {
963 IOReturn err;
964 IOMemoryEntry * entries;
965 unsigned int resident, dirty;
966 unsigned int totalResident, totalDirty;
967
968 totalResident = totalDirty = 0;
969 entries = ref->entries + ref->count;
970 while (entries > &ref->entries[0])
971 {
972 entries--;
973 err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty);
974 if (KERN_SUCCESS != err) break;
975 totalResident += resident;
976 totalDirty += dirty;
977 }
978
979 if (residentPageCount) *residentPageCount = totalResident;
980 if (dirtyPageCount) *dirtyPageCount = totalDirty;
981 return (err);
982 }
983
984 IOReturn
985 IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
986 IOMemoryReference * ref,
987 IOOptionBits newState,
988 IOOptionBits * oldState)
989 {
990 IOReturn err;
991 IOMemoryEntry * entries;
992 vm_purgable_t control;
993 int totalState, state;
994
995 entries = ref->entries + ref->count;
996 totalState = kIOMemoryPurgeableNonVolatile;
997 while (entries > &ref->entries[0])
998 {
999 entries--;
1000
1001 err = purgeableControlBits(newState, &control, &state);
1002 if (KERN_SUCCESS != err) break;
1003 err = mach_memory_entry_purgable_control(entries->entry, control, &state);
1004 if (KERN_SUCCESS != err) break;
1005 err = purgeableStateBits(&state);
1006 if (KERN_SUCCESS != err) break;
1007
1008 if (kIOMemoryPurgeableEmpty == state) totalState = kIOMemoryPurgeableEmpty;
1009 else if (kIOMemoryPurgeableEmpty == totalState) continue;
1010 else if (kIOMemoryPurgeableVolatile == totalState) continue;
1011 else if (kIOMemoryPurgeableVolatile == state) totalState = kIOMemoryPurgeableVolatile;
1012 else totalState = kIOMemoryPurgeableNonVolatile;
1013 }
1014
1015 if (oldState) *oldState = totalState;
1016 return (err);
1017 }
1018
1019 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1020
1021 IOMemoryDescriptor *
1022 IOMemoryDescriptor::withAddress(void * address,
1023 IOByteCount length,
1024 IODirection direction)
1025 {
1026 return IOMemoryDescriptor::
1027 withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
1028 }
1029
1030 #ifndef __LP64__
1031 IOMemoryDescriptor *
1032 IOMemoryDescriptor::withAddress(IOVirtualAddress address,
1033 IOByteCount length,
1034 IODirection direction,
1035 task_t task)
1036 {
1037 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1038 if (that)
1039 {
1040 if (that->initWithAddress(address, length, direction, task))
1041 return that;
1042
1043 that->release();
1044 }
1045 return 0;
1046 }
1047 #endif /* !__LP64__ */
1048
1049 IOMemoryDescriptor *
1050 IOMemoryDescriptor::withPhysicalAddress(
1051 IOPhysicalAddress address,
1052 IOByteCount length,
1053 IODirection direction )
1054 {
1055 return (IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL));
1056 }
1057
1058 #ifndef __LP64__
1059 IOMemoryDescriptor *
1060 IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
1061 UInt32 withCount,
1062 IODirection direction,
1063 task_t task,
1064 bool asReference)
1065 {
1066 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1067 if (that)
1068 {
1069 if (that->initWithRanges(ranges, withCount, direction, task, asReference))
1070 return that;
1071
1072 that->release();
1073 }
1074 return 0;
1075 }
1076 #endif /* !__LP64__ */
1077
1078 IOMemoryDescriptor *
1079 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
1080 mach_vm_size_t length,
1081 IOOptionBits options,
1082 task_t task)
1083 {
1084 IOAddressRange range = { address, length };
1085 return (IOMemoryDescriptor::withAddressRanges(&range, 1, options, task));
1086 }
1087
1088 IOMemoryDescriptor *
1089 IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
1090 UInt32 rangeCount,
1091 IOOptionBits options,
1092 task_t task)
1093 {
1094 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1095 if (that)
1096 {
1097 if (task)
1098 options |= kIOMemoryTypeVirtual64;
1099 else
1100 options |= kIOMemoryTypePhysical64;
1101
1102 if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ 0))
1103 return that;
1104
1105 that->release();
1106 }
1107
1108 return 0;
1109 }
1110
1111
1112 /*
1113 * withOptions:
1114 *
1115 * Create a new IOMemoryDescriptor. The buffer is made up of several
1116 * virtual address ranges, from a given task.
1117 *
1118 * Passing the ranges as a reference will avoid an extra allocation.
1119 */
1120 IOMemoryDescriptor *
1121 IOMemoryDescriptor::withOptions(void * buffers,
1122 UInt32 count,
1123 UInt32 offset,
1124 task_t task,
1125 IOOptionBits opts,
1126 IOMapper * mapper)
1127 {
1128 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
1129
1130 if (self
1131 && !self->initWithOptions(buffers, count, offset, task, opts, mapper))
1132 {
1133 self->release();
1134 return 0;
1135 }
1136
1137 return self;
1138 }
1139
1140 bool IOMemoryDescriptor::initWithOptions(void * buffers,
1141 UInt32 count,
1142 UInt32 offset,
1143 task_t task,
1144 IOOptionBits options,
1145 IOMapper * mapper)
1146 {
1147 return( false );
1148 }
1149
1150 #ifndef __LP64__
1151 IOMemoryDescriptor *
1152 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
1153 UInt32 withCount,
1154 IODirection direction,
1155 bool asReference)
1156 {
1157 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1158 if (that)
1159 {
1160 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference))
1161 return that;
1162
1163 that->release();
1164 }
1165 return 0;
1166 }
1167
1168 IOMemoryDescriptor *
1169 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
1170 IOByteCount offset,
1171 IOByteCount length,
1172 IODirection direction)
1173 {
1174 return (IOSubMemoryDescriptor::withSubRange(of, offset, length, direction));
1175 }
1176 #endif /* !__LP64__ */
1177
1178 IOMemoryDescriptor *
1179 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
1180 {
1181 IOGeneralMemoryDescriptor *origGenMD =
1182 OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
1183
1184 if (origGenMD)
1185 return IOGeneralMemoryDescriptor::
1186 withPersistentMemoryDescriptor(origGenMD);
1187 else
1188 return 0;
1189 }
1190
1191 IOMemoryDescriptor *
1192 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
1193 {
1194 IOMemoryReference * memRef;
1195
1196 if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) return (0);
1197
1198 if (memRef == originalMD->_memRef)
1199 {
1200 originalMD->retain(); // Add a new reference to ourselves
1201 originalMD->memoryReferenceRelease(memRef);
1202 return originalMD;
1203 }
1204
1205 IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor;
1206 IOMDPersistentInitData initData = { originalMD, memRef };
1207
1208 if (self
1209 && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) {
1210 self->release();
1211 self = 0;
1212 }
1213 return self;
1214 }
1215
1216 #ifndef __LP64__
1217 bool
1218 IOGeneralMemoryDescriptor::initWithAddress(void * address,
1219 IOByteCount withLength,
1220 IODirection withDirection)
1221 {
1222 _singleRange.v.address = (vm_offset_t) address;
1223 _singleRange.v.length = withLength;
1224
1225 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
1226 }
1227
1228 bool
1229 IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
1230 IOByteCount withLength,
1231 IODirection withDirection,
1232 task_t withTask)
1233 {
1234 _singleRange.v.address = address;
1235 _singleRange.v.length = withLength;
1236
1237 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
1238 }
1239
1240 bool
1241 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
1242 IOPhysicalAddress address,
1243 IOByteCount withLength,
1244 IODirection withDirection )
1245 {
1246 _singleRange.p.address = address;
1247 _singleRange.p.length = withLength;
1248
1249 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
1250 }
1251
1252 bool
1253 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
1254 IOPhysicalRange * ranges,
1255 UInt32 count,
1256 IODirection direction,
1257 bool reference)
1258 {
1259 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
1260
1261 if (reference)
1262 mdOpts |= kIOMemoryAsReference;
1263
1264 return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0);
1265 }
1266
1267 bool
1268 IOGeneralMemoryDescriptor::initWithRanges(
1269 IOVirtualRange * ranges,
1270 UInt32 count,
1271 IODirection direction,
1272 task_t task,
1273 bool reference)
1274 {
1275 IOOptionBits mdOpts = direction;
1276
1277 if (reference)
1278 mdOpts |= kIOMemoryAsReference;
1279
1280 if (task) {
1281 mdOpts |= kIOMemoryTypeVirtual;
1282
1283 // Auto-prepare if this is a kernel memory descriptor as very few
1284 // clients bother to prepare() kernel memory.
1285 // But it was not enforced so what are you going to do?
1286 if (task == kernel_task)
1287 mdOpts |= kIOMemoryAutoPrepare;
1288 }
1289 else
1290 mdOpts |= kIOMemoryTypePhysical;
1291
1292 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0);
1293 }
1294 #endif /* !__LP64__ */
1295
1296 /*
1297 * initWithOptions:
1298 *
1299 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
1300 * from a given task, several physical ranges, an UPL from the ubc
1301 * system or a uio (may be 64bit) from the BSD subsystem.
1302 *
1303 * Passing the ranges as a reference will avoid an extra allocation.
1304 *
1305 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
1306 * existing instance -- note this behavior is not commonly supported in other
1307 * I/O Kit classes, although it is supported here.
1308 */
1309
1310 bool
1311 IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
1312 UInt32 count,
1313 UInt32 offset,
1314 task_t task,
1315 IOOptionBits options,
1316 IOMapper * mapper)
1317 {
1318 IOOptionBits type = options & kIOMemoryTypeMask;
1319
1320 #ifndef __LP64__
1321 if (task
1322 && (kIOMemoryTypeVirtual == type)
1323 && vm_map_is_64bit(get_task_map(task))
1324 && ((IOVirtualRange *) buffers)->address)
1325 {
1326 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
1327 return false;
1328 }
1329 #endif /* !__LP64__ */
1330
1331 // Grab the original MD's configuation data to initialse the
1332 // arguments to this function.
1333 if (kIOMemoryTypePersistentMD == type) {
1334
1335 IOMDPersistentInitData *initData = (typeof(initData)) buffers;
1336 const IOGeneralMemoryDescriptor *orig = initData->fMD;
1337 ioGMDData *dataP = getDataP(orig->_memoryEntries);
1338
1339 // Only accept persistent memory descriptors with valid dataP data.
1340 assert(orig->_rangesCount == 1);
1341 if ( !(orig->_flags & kIOMemoryPersistent) || !dataP)
1342 return false;
1343
1344 _memRef = initData->fMemRef; // Grab the new named entry
1345 options = orig->_flags & ~kIOMemoryAsReference;
1346 type = options & kIOMemoryTypeMask;
1347 buffers = orig->_ranges.v;
1348 count = orig->_rangesCount;
1349
1350 // Now grab the original task and whatever mapper was previously used
1351 task = orig->_task;
1352 mapper = dataP->fMapper;
1353
1354 // We are ready to go through the original initialisation now
1355 }
1356
1357 switch (type) {
1358 case kIOMemoryTypeUIO:
1359 case kIOMemoryTypeVirtual:
1360 #ifndef __LP64__
1361 case kIOMemoryTypeVirtual64:
1362 #endif /* !__LP64__ */
1363 assert(task);
1364 if (!task)
1365 return false;
1366 break;
1367
1368 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
1369 #ifndef __LP64__
1370 case kIOMemoryTypePhysical64:
1371 #endif /* !__LP64__ */
1372 case kIOMemoryTypeUPL:
1373 assert(!task);
1374 break;
1375 default:
1376 return false; /* bad argument */
1377 }
1378
1379 assert(buffers);
1380 assert(count);
1381
1382 /*
1383 * We can check the _initialized instance variable before having ever set
1384 * it to an initial value because I/O Kit guarantees that all our instance
1385 * variables are zeroed on an object's allocation.
1386 */
1387
1388 if (_initialized) {
1389 /*
1390 * An existing memory descriptor is being retargeted to point to
1391 * somewhere else. Clean up our present state.
1392 */
1393 IOOptionBits type = _flags & kIOMemoryTypeMask;
1394 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
1395 {
1396 while (_wireCount)
1397 complete();
1398 }
1399 if (_ranges.v && !(kIOMemoryAsReference & _flags))
1400 {
1401 if (kIOMemoryTypeUIO == type)
1402 uio_free((uio_t) _ranges.v);
1403 #ifndef __LP64__
1404 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
1405 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
1406 #endif /* !__LP64__ */
1407 else
1408 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
1409 }
1410
1411 options |= (kIOMemoryRedirected & _flags);
1412 if (!(kIOMemoryRedirected & options))
1413 {
1414 if (_memRef)
1415 {
1416 memoryReferenceRelease(_memRef);
1417 _memRef = 0;
1418 }
1419 if (_mappings)
1420 _mappings->flushCollection();
1421 }
1422 }
1423 else {
1424 if (!super::init())
1425 return false;
1426 _initialized = true;
1427 }
1428
1429 // Grab the appropriate mapper
1430 if (kIOMemoryHostOnly & options) options |= kIOMemoryMapperNone;
1431 if (kIOMemoryMapperNone & options)
1432 mapper = 0; // No Mapper
1433 else if (mapper == kIOMapperSystem) {
1434 IOMapper::checkForSystemMapper();
1435 gIOSystemMapper = mapper = IOMapper::gSystem;
1436 }
1437
1438 // Temp binary compatibility for kIOMemoryThreadSafe
1439 if (kIOMemoryReserved6156215 & options)
1440 {
1441 options &= ~kIOMemoryReserved6156215;
1442 options |= kIOMemoryThreadSafe;
1443 }
1444 // Remove the dynamic internal use flags from the initial setting
1445 options &= ~(kIOMemoryPreparedReadOnly);
1446 _flags = options;
1447 _task = task;
1448
1449 #ifndef __LP64__
1450 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
1451 #endif /* !__LP64__ */
1452
1453 __iomd_reservedA = 0;
1454 __iomd_reservedB = 0;
1455 _highestPage = 0;
1456
1457 if (kIOMemoryThreadSafe & options)
1458 {
1459 if (!_prepareLock)
1460 _prepareLock = IOLockAlloc();
1461 }
1462 else if (_prepareLock)
1463 {
1464 IOLockFree(_prepareLock);
1465 _prepareLock = NULL;
1466 }
1467
1468 if (kIOMemoryTypeUPL == type) {
1469
1470 ioGMDData *dataP;
1471 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
1472
1473 if (!initMemoryEntries(dataSize, mapper)) return (false);
1474 dataP = getDataP(_memoryEntries);
1475 dataP->fPageCnt = 0;
1476
1477 // _wireCount++; // UPLs start out life wired
1478
1479 _length = count;
1480 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
1481
1482 ioPLBlock iopl;
1483 iopl.fIOPL = (upl_t) buffers;
1484 upl_set_referenced(iopl.fIOPL, true);
1485 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
1486
1487 if (upl_get_size(iopl.fIOPL) < (count + offset))
1488 panic("short external upl");
1489
1490 _highestPage = upl_get_highest_page(iopl.fIOPL);
1491
1492 // Set the flag kIOPLOnDevice convieniently equal to 1
1493 iopl.fFlags = pageList->device | kIOPLExternUPL;
1494 if (!pageList->device) {
1495 // Pre-compute the offset into the UPL's page list
1496 pageList = &pageList[atop_32(offset)];
1497 offset &= PAGE_MASK;
1498 }
1499 iopl.fIOMDOffset = 0;
1500 iopl.fMappedPage = 0;
1501 iopl.fPageInfo = (vm_address_t) pageList;
1502 iopl.fPageOffset = offset;
1503 _memoryEntries->appendBytes(&iopl, sizeof(iopl));
1504 }
1505 else {
1506 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
1507 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
1508
1509 // Initialize the memory descriptor
1510 if (options & kIOMemoryAsReference) {
1511 #ifndef __LP64__
1512 _rangesIsAllocated = false;
1513 #endif /* !__LP64__ */
1514
1515 // Hack assignment to get the buffer arg into _ranges.
1516 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
1517 // work, C++ sigh.
1518 // This also initialises the uio & physical ranges.
1519 _ranges.v = (IOVirtualRange *) buffers;
1520 }
1521 else {
1522 #ifndef __LP64__
1523 _rangesIsAllocated = true;
1524 #endif /* !__LP64__ */
1525 switch (type)
1526 {
1527 case kIOMemoryTypeUIO:
1528 _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
1529 break;
1530
1531 #ifndef __LP64__
1532 case kIOMemoryTypeVirtual64:
1533 case kIOMemoryTypePhysical64:
1534 if (count == 1
1535 && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
1536 ) {
1537 if (kIOMemoryTypeVirtual64 == type)
1538 type = kIOMemoryTypeVirtual;
1539 else
1540 type = kIOMemoryTypePhysical;
1541 _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
1542 _rangesIsAllocated = false;
1543 _ranges.v = &_singleRange.v;
1544 _singleRange.v.address = ((IOAddressRange *) buffers)->address;
1545 _singleRange.v.length = ((IOAddressRange *) buffers)->length;
1546 break;
1547 }
1548 _ranges.v64 = IONew(IOAddressRange, count);
1549 if (!_ranges.v64)
1550 return false;
1551 bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
1552 break;
1553 #endif /* !__LP64__ */
1554 case kIOMemoryTypeVirtual:
1555 case kIOMemoryTypePhysical:
1556 if (count == 1) {
1557 _flags |= kIOMemoryAsReference;
1558 #ifndef __LP64__
1559 _rangesIsAllocated = false;
1560 #endif /* !__LP64__ */
1561 _ranges.v = &_singleRange.v;
1562 } else {
1563 _ranges.v = IONew(IOVirtualRange, count);
1564 if (!_ranges.v)
1565 return false;
1566 }
1567 bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
1568 break;
1569 }
1570 }
1571
1572 // Find starting address within the vector of ranges
1573 Ranges vec = _ranges;
1574 mach_vm_size_t totalLength = 0;
1575 unsigned int ind, pages = 0;
1576 for (ind = 0; ind < count; ind++) {
1577 mach_vm_address_t addr;
1578 mach_vm_size_t len;
1579
1580 // addr & len are returned by this function
1581 getAddrLenForInd(addr, len, type, vec, ind);
1582 if ((addr + len + PAGE_MASK) < addr) break; /* overflow */
1583 pages += (atop_64(addr + len + PAGE_MASK) - atop_64(addr));
1584 totalLength += len;
1585 if (totalLength < len) break; /* overflow */
1586 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
1587 {
1588 ppnum_t highPage = atop_64(addr + len - 1);
1589 if (highPage > _highestPage)
1590 _highestPage = highPage;
1591 }
1592 }
1593 if ((ind < count)
1594 || (totalLength != ((IOByteCount) totalLength))) return (false); /* overflow */
1595
1596 _length = totalLength;
1597 _pages = pages;
1598 _rangesCount = count;
1599
1600 // Auto-prepare memory at creation time.
1601 // Implied completion when descriptor is free-ed
1602 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
1603 _wireCount++; // Physical MDs are, by definition, wired
1604 else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
1605 ioGMDData *dataP;
1606 mach_vm_size_t dataSize = computeDataSize(_pages, /* upls */ count * 2);
1607 if (dataSize != ((unsigned) dataSize)) return false; /* overflow */
1608
1609 if (!initMemoryEntries(dataSize, mapper)) return false;
1610 dataP = getDataP(_memoryEntries);
1611 dataP->fPageCnt = _pages;
1612
1613 if ( (kIOMemoryPersistent & _flags) && !_memRef)
1614 {
1615 IOReturn
1616 err = memoryReferenceCreate(0, &_memRef);
1617 if (kIOReturnSuccess != err) return false;
1618 }
1619
1620 if ((_flags & kIOMemoryAutoPrepare)
1621 && prepare() != kIOReturnSuccess)
1622 return false;
1623 }
1624 }
1625
1626 return true;
1627 }
1628
1629 /*
1630 * free
1631 *
1632 * Free resources.
1633 */
1634 void IOGeneralMemoryDescriptor::free()
1635 {
1636 IOOptionBits type = _flags & kIOMemoryTypeMask;
1637
1638 if( reserved)
1639 {
1640 LOCK;
1641 reserved->dp.memory = 0;
1642 UNLOCK;
1643 }
1644 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
1645 {
1646 ioGMDData * dataP;
1647 if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
1648 {
1649 dataP->fMapper->iovmUnmapMemory(this, NULL, dataP->fMappedBase, dataP->fMappedLength);
1650 dataP->fMappedBase = 0;
1651 }
1652 }
1653 else
1654 {
1655 while (_wireCount) complete();
1656 }
1657
1658 if (_memoryEntries) _memoryEntries->release();
1659
1660 if (_ranges.v && !(kIOMemoryAsReference & _flags))
1661 {
1662 if (kIOMemoryTypeUIO == type)
1663 uio_free((uio_t) _ranges.v);
1664 #ifndef __LP64__
1665 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
1666 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
1667 #endif /* !__LP64__ */
1668 else
1669 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
1670
1671 _ranges.v = NULL;
1672 }
1673
1674 if (reserved)
1675 {
1676 if (reserved->dp.devicePager)
1677 {
1678 // memEntry holds a ref on the device pager which owns reserved
1679 // (IOMemoryDescriptorReserved) so no reserved access after this point
1680 device_pager_deallocate( (memory_object_t) reserved->dp.devicePager );
1681 }
1682 else
1683 IODelete(reserved, IOMemoryDescriptorReserved, 1);
1684 reserved = NULL;
1685 }
1686
1687 if (_memRef) memoryReferenceRelease(_memRef);
1688 if (_prepareLock) IOLockFree(_prepareLock);
1689
1690 super::free();
1691 }
1692
1693 #ifndef __LP64__
1694 void IOGeneralMemoryDescriptor::unmapFromKernel()
1695 {
1696 panic("IOGMD::unmapFromKernel deprecated");
1697 }
1698
1699 void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
1700 {
1701 panic("IOGMD::mapIntoKernel deprecated");
1702 }
1703 #endif /* !__LP64__ */
1704
1705 /*
1706 * getDirection:
1707 *
1708 * Get the direction of the transfer.
1709 */
1710 IODirection IOMemoryDescriptor::getDirection() const
1711 {
1712 #ifndef __LP64__
1713 if (_direction)
1714 return _direction;
1715 #endif /* !__LP64__ */
1716 return (IODirection) (_flags & kIOMemoryDirectionMask);
1717 }
1718
1719 /*
1720 * getLength:
1721 *
1722 * Get the length of the transfer (over all ranges).
1723 */
1724 IOByteCount IOMemoryDescriptor::getLength() const
1725 {
1726 return _length;
1727 }
1728
1729 void IOMemoryDescriptor::setTag( IOOptionBits tag )
1730 {
1731 _tag = tag;
1732 }
1733
1734 IOOptionBits IOMemoryDescriptor::getTag( void )
1735 {
1736 return( _tag);
1737 }
1738
1739 #ifndef __LP64__
1740 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
1741 IOPhysicalAddress
1742 IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
1743 {
1744 addr64_t physAddr = 0;
1745
1746 if( prepare() == kIOReturnSuccess) {
1747 physAddr = getPhysicalSegment64( offset, length );
1748 complete();
1749 }
1750
1751 return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used
1752 }
1753 #endif /* !__LP64__ */
1754
1755 IOByteCount IOMemoryDescriptor::readBytes
1756 (IOByteCount offset, void *bytes, IOByteCount length)
1757 {
1758 addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
1759 IOByteCount remaining;
1760
1761 // Assert that this entire I/O is withing the available range
1762 assert(offset <= _length);
1763 assert(offset + length <= _length);
1764 if ((offset >= _length)
1765 || ((offset + length) > _length)) {
1766 return 0;
1767 }
1768
1769 if (kIOMemoryThreadSafe & _flags)
1770 LOCK;
1771
1772 remaining = length = min(length, _length - offset);
1773 while (remaining) { // (process another target segment?)
1774 addr64_t srcAddr64;
1775 IOByteCount srcLen;
1776
1777 srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
1778 if (!srcAddr64)
1779 break;
1780
1781 // Clip segment length to remaining
1782 if (srcLen > remaining)
1783 srcLen = remaining;
1784
1785 copypv(srcAddr64, dstAddr, srcLen,
1786 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
1787
1788 dstAddr += srcLen;
1789 offset += srcLen;
1790 remaining -= srcLen;
1791 }
1792
1793 if (kIOMemoryThreadSafe & _flags)
1794 UNLOCK;
1795
1796 assert(!remaining);
1797
1798 return length - remaining;
1799 }
1800
1801 IOByteCount IOMemoryDescriptor::writeBytes
1802 (IOByteCount inoffset, const void *bytes, IOByteCount length)
1803 {
1804 addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
1805 IOByteCount remaining;
1806 IOByteCount offset = inoffset;
1807
1808 // Assert that this entire I/O is withing the available range
1809 assert(offset <= _length);
1810 assert(offset + length <= _length);
1811
1812 assert( !(kIOMemoryPreparedReadOnly & _flags) );
1813
1814 if ( (kIOMemoryPreparedReadOnly & _flags)
1815 || (offset >= _length)
1816 || ((offset + length) > _length)) {
1817 return 0;
1818 }
1819
1820 if (kIOMemoryThreadSafe & _flags)
1821 LOCK;
1822
1823 remaining = length = min(length, _length - offset);
1824 while (remaining) { // (process another target segment?)
1825 addr64_t dstAddr64;
1826 IOByteCount dstLen;
1827
1828 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
1829 if (!dstAddr64)
1830 break;
1831
1832 // Clip segment length to remaining
1833 if (dstLen > remaining)
1834 dstLen = remaining;
1835
1836 if (!srcAddr) bzero_phys(dstAddr64, dstLen);
1837 else
1838 {
1839 copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
1840 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
1841 srcAddr += dstLen;
1842 }
1843 offset += dstLen;
1844 remaining -= dstLen;
1845 }
1846
1847 if (kIOMemoryThreadSafe & _flags)
1848 UNLOCK;
1849
1850 assert(!remaining);
1851
1852 if (!srcAddr) performOperation(kIOMemoryIncoherentIOFlush, inoffset, length);
1853
1854 return length - remaining;
1855 }
1856
1857 #ifndef __LP64__
1858 void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
1859 {
1860 panic("IOGMD::setPosition deprecated");
1861 }
1862 #endif /* !__LP64__ */
1863
1864 static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
1865
1866 uint64_t
1867 IOGeneralMemoryDescriptor::getPreparationID( void )
1868 {
1869 ioGMDData *dataP;
1870
1871 if (!_wireCount)
1872 return (kIOPreparationIDUnprepared);
1873
1874 if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
1875 || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64))
1876 {
1877 IOMemoryDescriptor::setPreparationID();
1878 return (IOMemoryDescriptor::getPreparationID());
1879 }
1880
1881 if (!_memoryEntries || !(dataP = getDataP(_memoryEntries)))
1882 return (kIOPreparationIDUnprepared);
1883
1884 if (kIOPreparationIDUnprepared == dataP->fPreparationID)
1885 {
1886 dataP->fPreparationID = OSIncrementAtomic64(&gIOMDPreparationID);
1887 }
1888 return (dataP->fPreparationID);
1889 }
1890
1891 IOMemoryDescriptorReserved * IOMemoryDescriptor::getKernelReserved( void )
1892 {
1893 if (!reserved)
1894 {
1895 reserved = IONew(IOMemoryDescriptorReserved, 1);
1896 if (reserved)
1897 bzero(reserved, sizeof(IOMemoryDescriptorReserved));
1898 }
1899 return (reserved);
1900 }
1901
1902 void IOMemoryDescriptor::setPreparationID( void )
1903 {
1904 if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID))
1905 {
1906 #if defined(__ppc__ )
1907 reserved->preparationID = gIOMDPreparationID++;
1908 #else
1909 reserved->preparationID = OSIncrementAtomic64(&gIOMDPreparationID);
1910 #endif
1911 }
1912 }
1913
1914 uint64_t IOMemoryDescriptor::getPreparationID( void )
1915 {
1916 if (reserved)
1917 return (reserved->preparationID);
1918 else
1919 return (kIOPreparationIDUnsupported);
1920 }
1921
1922 IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
1923 {
1924 IOReturn err = kIOReturnSuccess;
1925 DMACommandOps params;
1926 IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
1927 ioGMDData *dataP;
1928
1929 params = (op & ~kIOMDDMACommandOperationMask & op);
1930 op &= kIOMDDMACommandOperationMask;
1931
1932 if (kIOMDDMAMap == op)
1933 {
1934 if (dataSize < sizeof(IOMDDMAMapArgs))
1935 return kIOReturnUnderrun;
1936
1937 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
1938
1939 if (!_memoryEntries
1940 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
1941
1942 if (_memoryEntries && data->fMapper)
1943 {
1944 bool remap, keepMap;
1945 dataP = getDataP(_memoryEntries);
1946
1947 if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
1948 if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) dataP->fDMAMapAlignment = data->fMapSpec.alignment;
1949
1950 keepMap = (data->fMapper == gIOSystemMapper);
1951 keepMap &= ((data->fOffset == 0) && (data->fLength == _length));
1952
1953 remap = (!keepMap);
1954 remap |= (dataP->fDMAMapNumAddressBits < 64)
1955 && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
1956 remap |= (dataP->fDMAMapAlignment > page_size);
1957
1958 if (remap || !dataP->fMappedBase)
1959 {
1960 // if (dataP->fMappedBase) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params);
1961 err = md->dmaMap(data->fMapper, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
1962 if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBase)
1963 {
1964 dataP->fMappedBase = data->fAlloc;
1965 dataP->fMappedLength = data->fAllocLength;
1966 data->fAllocLength = 0; // IOMD owns the alloc now
1967 }
1968 }
1969 else
1970 {
1971 data->fAlloc = dataP->fMappedBase;
1972 data->fAllocLength = 0; // give out IOMD map
1973 }
1974 data->fMapContig = !dataP->fDiscontig;
1975 }
1976
1977 return (err);
1978 }
1979
1980 if (kIOMDAddDMAMapSpec == op)
1981 {
1982 if (dataSize < sizeof(IODMAMapSpecification))
1983 return kIOReturnUnderrun;
1984
1985 IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
1986
1987 if (!_memoryEntries
1988 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
1989
1990 if (_memoryEntries)
1991 {
1992 dataP = getDataP(_memoryEntries);
1993 if (data->numAddressBits < dataP->fDMAMapNumAddressBits)
1994 dataP->fDMAMapNumAddressBits = data->numAddressBits;
1995 if (data->alignment > dataP->fDMAMapAlignment)
1996 dataP->fDMAMapAlignment = data->alignment;
1997 }
1998 return kIOReturnSuccess;
1999 }
2000
2001 if (kIOMDGetCharacteristics == op) {
2002
2003 if (dataSize < sizeof(IOMDDMACharacteristics))
2004 return kIOReturnUnderrun;
2005
2006 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
2007 data->fLength = _length;
2008 data->fSGCount = _rangesCount;
2009 data->fPages = _pages;
2010 data->fDirection = getDirection();
2011 if (!_wireCount)
2012 data->fIsPrepared = false;
2013 else {
2014 data->fIsPrepared = true;
2015 data->fHighestPage = _highestPage;
2016 if (_memoryEntries)
2017 {
2018 dataP = getDataP(_memoryEntries);
2019 ioPLBlock *ioplList = getIOPLList(dataP);
2020 UInt count = getNumIOPL(_memoryEntries, dataP);
2021 if (count == 1)
2022 data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
2023 }
2024 }
2025
2026 return kIOReturnSuccess;
2027
2028 #if IOMD_DEBUG_DMAACTIVE
2029 } else if (kIOMDDMAActive == op) {
2030 if (params) OSIncrementAtomic(&md->__iomd_reservedA);
2031 else {
2032 if (md->__iomd_reservedA)
2033 OSDecrementAtomic(&md->__iomd_reservedA);
2034 else
2035 panic("kIOMDSetDMAInactive");
2036 }
2037 #endif /* IOMD_DEBUG_DMAACTIVE */
2038
2039 } else if (kIOMDWalkSegments != op)
2040 return kIOReturnBadArgument;
2041
2042 // Get the next segment
2043 struct InternalState {
2044 IOMDDMAWalkSegmentArgs fIO;
2045 UInt fOffset2Index;
2046 UInt fIndex;
2047 UInt fNextOffset;
2048 } *isP;
2049
2050 // Find the next segment
2051 if (dataSize < sizeof(*isP))
2052 return kIOReturnUnderrun;
2053
2054 isP = (InternalState *) vData;
2055 UInt offset = isP->fIO.fOffset;
2056 bool mapped = isP->fIO.fMapped;
2057
2058 if (IOMapper::gSystem && mapped
2059 && (!(kIOMemoryHostOnly & _flags))
2060 && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBase))
2061 // && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBase))
2062 {
2063 if (!_memoryEntries
2064 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
2065
2066 dataP = getDataP(_memoryEntries);
2067 if (dataP->fMapper)
2068 {
2069 IODMAMapSpecification mapSpec;
2070 bzero(&mapSpec, sizeof(mapSpec));
2071 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
2072 mapSpec.alignment = dataP->fDMAMapAlignment;
2073 err = md->dmaMap(dataP->fMapper, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength);
2074 if (kIOReturnSuccess != err) return (err);
2075 }
2076 }
2077
2078 if (offset >= _length)
2079 return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
2080
2081 // Validate the previous offset
2082 UInt ind, off2Ind = isP->fOffset2Index;
2083 if (!params
2084 && offset
2085 && (offset == isP->fNextOffset || off2Ind <= offset))
2086 ind = isP->fIndex;
2087 else
2088 ind = off2Ind = 0; // Start from beginning
2089
2090 UInt length;
2091 UInt64 address;
2092
2093
2094 if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
2095
2096 // Physical address based memory descriptor
2097 const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
2098
2099 // Find the range after the one that contains the offset
2100 mach_vm_size_t len;
2101 for (len = 0; off2Ind <= offset; ind++) {
2102 len = physP[ind].length;
2103 off2Ind += len;
2104 }
2105
2106 // Calculate length within range and starting address
2107 length = off2Ind - offset;
2108 address = physP[ind - 1].address + len - length;
2109
2110 if (true && mapped && _memoryEntries
2111 && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
2112 {
2113 address = dataP->fMappedBase + offset;
2114 }
2115 else
2116 {
2117 // see how far we can coalesce ranges
2118 while (ind < _rangesCount && address + length == physP[ind].address) {
2119 len = physP[ind].length;
2120 length += len;
2121 off2Ind += len;
2122 ind++;
2123 }
2124 }
2125
2126 // correct contiguous check overshoot
2127 ind--;
2128 off2Ind -= len;
2129 }
2130 #ifndef __LP64__
2131 else if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
2132
2133 // Physical address based memory descriptor
2134 const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
2135
2136 // Find the range after the one that contains the offset
2137 mach_vm_size_t len;
2138 for (len = 0; off2Ind <= offset; ind++) {
2139 len = physP[ind].length;
2140 off2Ind += len;
2141 }
2142
2143 // Calculate length within range and starting address
2144 length = off2Ind - offset;
2145 address = physP[ind - 1].address + len - length;
2146
2147 if (true && mapped && _memoryEntries
2148 && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
2149 {
2150 address = dataP->fMappedBase + offset;
2151 }
2152 else
2153 {
2154 // see how far we can coalesce ranges
2155 while (ind < _rangesCount && address + length == physP[ind].address) {
2156 len = physP[ind].length;
2157 length += len;
2158 off2Ind += len;
2159 ind++;
2160 }
2161 }
2162 // correct contiguous check overshoot
2163 ind--;
2164 off2Ind -= len;
2165 }
2166 #endif /* !__LP64__ */
2167 else do {
2168 if (!_wireCount)
2169 panic("IOGMD: not wired for the IODMACommand");
2170
2171 assert(_memoryEntries);
2172
2173 dataP = getDataP(_memoryEntries);
2174 const ioPLBlock *ioplList = getIOPLList(dataP);
2175 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
2176 upl_page_info_t *pageList = getPageList(dataP);
2177
2178 assert(numIOPLs > 0);
2179
2180 // Scan through iopl info blocks looking for block containing offset
2181 while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset)
2182 ind++;
2183
2184 // Go back to actual range as search goes past it
2185 ioPLBlock ioplInfo = ioplList[ind - 1];
2186 off2Ind = ioplInfo.fIOMDOffset;
2187
2188 if (ind < numIOPLs)
2189 length = ioplList[ind].fIOMDOffset;
2190 else
2191 length = _length;
2192 length -= offset; // Remainder within iopl
2193
2194 // Subtract offset till this iopl in total list
2195 offset -= off2Ind;
2196
2197 // If a mapped address is requested and this is a pre-mapped IOPL
2198 // then just need to compute an offset relative to the mapped base.
2199 if (mapped && dataP->fMappedBase) {
2200 offset += (ioplInfo.fPageOffset & PAGE_MASK);
2201 address = trunc_page_64(dataP->fMappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
2202 continue; // Done leave do/while(false) now
2203 }
2204
2205 // The offset is rebased into the current iopl.
2206 // Now add the iopl 1st page offset.
2207 offset += ioplInfo.fPageOffset;
2208
2209 // For external UPLs the fPageInfo field points directly to
2210 // the upl's upl_page_info_t array.
2211 if (ioplInfo.fFlags & kIOPLExternUPL)
2212 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
2213 else
2214 pageList = &pageList[ioplInfo.fPageInfo];
2215
2216 // Check for direct device non-paged memory
2217 if ( ioplInfo.fFlags & kIOPLOnDevice ) {
2218 address = ptoa_64(pageList->phys_addr) + offset;
2219 continue; // Done leave do/while(false) now
2220 }
2221
2222 // Now we need compute the index into the pageList
2223 UInt pageInd = atop_32(offset);
2224 offset &= PAGE_MASK;
2225
2226 // Compute the starting address of this segment
2227 IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
2228 if (!pageAddr) {
2229 panic("!pageList phys_addr");
2230 }
2231
2232 address = ptoa_64(pageAddr) + offset;
2233
2234 // length is currently set to the length of the remainider of the iopl.
2235 // We need to check that the remainder of the iopl is contiguous.
2236 // This is indicated by pageList[ind].phys_addr being sequential.
2237 IOByteCount contigLength = PAGE_SIZE - offset;
2238 while (contigLength < length
2239 && ++pageAddr == pageList[++pageInd].phys_addr)
2240 {
2241 contigLength += PAGE_SIZE;
2242 }
2243
2244 if (contigLength < length)
2245 length = contigLength;
2246
2247
2248 assert(address);
2249 assert(length);
2250
2251 } while (false);
2252
2253 // Update return values and state
2254 isP->fIO.fIOVMAddr = address;
2255 isP->fIO.fLength = length;
2256 isP->fIndex = ind;
2257 isP->fOffset2Index = off2Ind;
2258 isP->fNextOffset = isP->fIO.fOffset + length;
2259
2260 return kIOReturnSuccess;
2261 }
2262
2263 addr64_t
2264 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
2265 {
2266 IOReturn ret;
2267 mach_vm_address_t address = 0;
2268 mach_vm_size_t length = 0;
2269 IOMapper * mapper = gIOSystemMapper;
2270 IOOptionBits type = _flags & kIOMemoryTypeMask;
2271
2272 if (lengthOfSegment)
2273 *lengthOfSegment = 0;
2274
2275 if (offset >= _length)
2276 return 0;
2277
2278 // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
2279 // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
2280 // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
2281 // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
2282
2283 if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type))
2284 {
2285 unsigned rangesIndex = 0;
2286 Ranges vec = _ranges;
2287 mach_vm_address_t addr;
2288
2289 // Find starting address within the vector of ranges
2290 for (;;) {
2291 getAddrLenForInd(addr, length, type, vec, rangesIndex);
2292 if (offset < length)
2293 break;
2294 offset -= length; // (make offset relative)
2295 rangesIndex++;
2296 }
2297
2298 // Now that we have the starting range,
2299 // lets find the last contiguous range
2300 addr += offset;
2301 length -= offset;
2302
2303 for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) {
2304 mach_vm_address_t newAddr;
2305 mach_vm_size_t newLen;
2306
2307 getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
2308 if (addr + length != newAddr)
2309 break;
2310 length += newLen;
2311 }
2312 if (addr)
2313 address = (IOPhysicalAddress) addr; // Truncate address to 32bit
2314 }
2315 else
2316 {
2317 IOMDDMAWalkSegmentState _state;
2318 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
2319
2320 state->fOffset = offset;
2321 state->fLength = _length - offset;
2322 state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOnly);
2323
2324 ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
2325
2326 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret))
2327 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
2328 ret, this, state->fOffset,
2329 state->fIOVMAddr, state->fLength);
2330 if (kIOReturnSuccess == ret)
2331 {
2332 address = state->fIOVMAddr;
2333 length = state->fLength;
2334 }
2335
2336 // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
2337 // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
2338
2339 if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)))
2340 {
2341 if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone))
2342 {
2343 addr64_t origAddr = address;
2344 IOByteCount origLen = length;
2345
2346 address = mapper->mapToPhysicalAddress(origAddr);
2347 length = page_size - (address & (page_size - 1));
2348 while ((length < origLen)
2349 && ((address + length) == mapper->mapToPhysicalAddress(origAddr + length)))
2350 length += page_size;
2351 if (length > origLen)
2352 length = origLen;
2353 }
2354 }
2355 }
2356
2357 if (!address)
2358 length = 0;
2359
2360 if (lengthOfSegment)
2361 *lengthOfSegment = length;
2362
2363 return (address);
2364 }
2365
2366 #ifndef __LP64__
2367 addr64_t
2368 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
2369 {
2370 addr64_t address = 0;
2371
2372 if (options & _kIOMemorySourceSegment)
2373 {
2374 address = getSourceSegment(offset, lengthOfSegment);
2375 }
2376 else if (options & kIOMemoryMapperNone)
2377 {
2378 address = getPhysicalSegment64(offset, lengthOfSegment);
2379 }
2380 else
2381 {
2382 address = getPhysicalSegment(offset, lengthOfSegment);
2383 }
2384
2385 return (address);
2386 }
2387
2388 addr64_t
2389 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
2390 {
2391 return (getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone));
2392 }
2393
2394 IOPhysicalAddress
2395 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
2396 {
2397 addr64_t address = 0;
2398 IOByteCount length = 0;
2399
2400 address = getPhysicalSegment(offset, lengthOfSegment, 0);
2401
2402 if (lengthOfSegment)
2403 length = *lengthOfSegment;
2404
2405 if ((address + length) > 0x100000000ULL)
2406 {
2407 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
2408 address, (long) length, (getMetaClass())->getClassName());
2409 }
2410
2411 return ((IOPhysicalAddress) address);
2412 }
2413
2414 addr64_t
2415 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
2416 {
2417 IOPhysicalAddress phys32;
2418 IOByteCount length;
2419 addr64_t phys64;
2420 IOMapper * mapper = 0;
2421
2422 phys32 = getPhysicalSegment(offset, lengthOfSegment);
2423 if (!phys32)
2424 return 0;
2425
2426 if (gIOSystemMapper)
2427 mapper = gIOSystemMapper;
2428
2429 if (mapper)
2430 {
2431 IOByteCount origLen;
2432
2433 phys64 = mapper->mapToPhysicalAddress(phys32);
2434 origLen = *lengthOfSegment;
2435 length = page_size - (phys64 & (page_size - 1));
2436 while ((length < origLen)
2437 && ((phys64 + length) == mapper->mapToPhysicalAddress(phys32 + length)))
2438 length += page_size;
2439 if (length > origLen)
2440 length = origLen;
2441
2442 *lengthOfSegment = length;
2443 }
2444 else
2445 phys64 = (addr64_t) phys32;
2446
2447 return phys64;
2448 }
2449
2450 IOPhysicalAddress
2451 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
2452 {
2453 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0));
2454 }
2455
2456 IOPhysicalAddress
2457 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
2458 {
2459 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment));
2460 }
2461
2462 void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
2463 IOByteCount * lengthOfSegment)
2464 {
2465 if (_task == kernel_task)
2466 return (void *) getSourceSegment(offset, lengthOfSegment);
2467 else
2468 panic("IOGMD::getVirtualSegment deprecated");
2469
2470 return 0;
2471 }
2472 #endif /* !__LP64__ */
2473
2474 IOReturn
2475 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
2476 {
2477 IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
2478 DMACommandOps params;
2479 IOReturn err;
2480
2481 params = (op & ~kIOMDDMACommandOperationMask & op);
2482 op &= kIOMDDMACommandOperationMask;
2483
2484 if (kIOMDGetCharacteristics == op) {
2485 if (dataSize < sizeof(IOMDDMACharacteristics))
2486 return kIOReturnUnderrun;
2487
2488 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
2489 data->fLength = getLength();
2490 data->fSGCount = 0;
2491 data->fDirection = getDirection();
2492 data->fIsPrepared = true; // Assume prepared - fails safe
2493 }
2494 else if (kIOMDWalkSegments == op) {
2495 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs))
2496 return kIOReturnUnderrun;
2497
2498 IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
2499 IOByteCount offset = (IOByteCount) data->fOffset;
2500
2501 IOPhysicalLength length;
2502 if (data->fMapped && IOMapper::gSystem)
2503 data->fIOVMAddr = md->getPhysicalSegment(offset, &length);
2504 else
2505 data->fIOVMAddr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
2506 data->fLength = length;
2507 }
2508 else if (kIOMDAddDMAMapSpec == op) return kIOReturnUnsupported;
2509 else if (kIOMDDMAMap == op)
2510 {
2511 if (dataSize < sizeof(IOMDDMAMapArgs))
2512 return kIOReturnUnderrun;
2513 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2514
2515 if (params) panic("class %s does not support IODMACommand::kIterateOnly", getMetaClass()->getClassName());
2516
2517 data->fMapContig = true;
2518 err = md->dmaMap(data->fMapper, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
2519 return (err);
2520 }
2521 else return kIOReturnBadArgument;
2522
2523 return kIOReturnSuccess;
2524 }
2525
2526 IOReturn
2527 IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
2528 IOOptionBits * oldState )
2529 {
2530 IOReturn err = kIOReturnSuccess;
2531
2532 vm_purgable_t control;
2533 int state;
2534
2535 if (_memRef)
2536 {
2537 err = super::setPurgeable(newState, oldState);
2538 }
2539 else
2540 {
2541 if (kIOMemoryThreadSafe & _flags)
2542 LOCK;
2543 do
2544 {
2545 // Find the appropriate vm_map for the given task
2546 vm_map_t curMap;
2547 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
2548 {
2549 err = kIOReturnNotReady;
2550 break;
2551 }
2552 else if (!_task)
2553 {
2554 err = kIOReturnUnsupported;
2555 break;
2556 }
2557 else
2558 curMap = get_task_map(_task);
2559
2560 // can only do one range
2561 Ranges vec = _ranges;
2562 IOOptionBits type = _flags & kIOMemoryTypeMask;
2563 mach_vm_address_t addr;
2564 mach_vm_size_t len;
2565 getAddrLenForInd(addr, len, type, vec, 0);
2566
2567 err = purgeableControlBits(newState, &control, &state);
2568 if (kIOReturnSuccess != err)
2569 break;
2570 err = mach_vm_purgable_control(curMap, addr, control, &state);
2571 if (oldState)
2572 {
2573 if (kIOReturnSuccess == err)
2574 {
2575 err = purgeableStateBits(&state);
2576 *oldState = state;
2577 }
2578 }
2579 }
2580 while (false);
2581 if (kIOMemoryThreadSafe & _flags)
2582 UNLOCK;
2583 }
2584
2585 return (err);
2586 }
2587
2588 IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
2589 IOOptionBits * oldState )
2590 {
2591 IOReturn err = kIOReturnNotReady;
2592
2593 if (kIOMemoryThreadSafe & _flags) LOCK;
2594 if (_memRef) err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState);
2595 if (kIOMemoryThreadSafe & _flags) UNLOCK;
2596
2597 return (err);
2598 }
2599
2600 IOReturn IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
2601 IOByteCount * dirtyPageCount )
2602 {
2603 IOReturn err = kIOReturnNotReady;
2604
2605 if (kIOMemoryThreadSafe & _flags) LOCK;
2606 if (_memRef) err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount);
2607 else
2608 {
2609 IOMultiMemoryDescriptor * mmd;
2610 IOSubMemoryDescriptor * smd;
2611 if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this)))
2612 {
2613 err = smd->getPageCounts(residentPageCount, dirtyPageCount);
2614 }
2615 else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this)))
2616 {
2617 err = mmd->getPageCounts(residentPageCount, dirtyPageCount);
2618 }
2619 }
2620 if (kIOMemoryThreadSafe & _flags) UNLOCK;
2621
2622 return (err);
2623 }
2624
2625
2626 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
2627 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
2628
2629 static void SetEncryptOp(addr64_t pa, unsigned int count)
2630 {
2631 ppnum_t page, end;
2632
2633 page = atop_64(round_page_64(pa));
2634 end = atop_64(trunc_page_64(pa + count));
2635 for (; page < end; page++)
2636 {
2637 pmap_clear_noencrypt(page);
2638 }
2639 }
2640
2641 static void ClearEncryptOp(addr64_t pa, unsigned int count)
2642 {
2643 ppnum_t page, end;
2644
2645 page = atop_64(round_page_64(pa));
2646 end = atop_64(trunc_page_64(pa + count));
2647 for (; page < end; page++)
2648 {
2649 pmap_set_noencrypt(page);
2650 }
2651 }
2652
2653 IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options,
2654 IOByteCount offset, IOByteCount length )
2655 {
2656 IOByteCount remaining;
2657 unsigned int res;
2658 void (*func)(addr64_t pa, unsigned int count) = 0;
2659
2660 switch (options)
2661 {
2662 case kIOMemoryIncoherentIOFlush:
2663 func = &dcache_incoherent_io_flush64;
2664 break;
2665 case kIOMemoryIncoherentIOStore:
2666 func = &dcache_incoherent_io_store64;
2667 break;
2668
2669 case kIOMemorySetEncrypted:
2670 func = &SetEncryptOp;
2671 break;
2672 case kIOMemoryClearEncrypted:
2673 func = &ClearEncryptOp;
2674 break;
2675 }
2676
2677 if (!func)
2678 return (kIOReturnUnsupported);
2679
2680 if (kIOMemoryThreadSafe & _flags)
2681 LOCK;
2682
2683 res = 0x0UL;
2684 remaining = length = min(length, getLength() - offset);
2685 while (remaining)
2686 // (process another target segment?)
2687 {
2688 addr64_t dstAddr64;
2689 IOByteCount dstLen;
2690
2691 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
2692 if (!dstAddr64)
2693 break;
2694
2695 // Clip segment length to remaining
2696 if (dstLen > remaining)
2697 dstLen = remaining;
2698
2699 (*func)(dstAddr64, dstLen);
2700
2701 offset += dstLen;
2702 remaining -= dstLen;
2703 }
2704
2705 if (kIOMemoryThreadSafe & _flags)
2706 UNLOCK;
2707
2708 return (remaining ? kIOReturnUnderrun : kIOReturnSuccess);
2709 }
2710
2711 #if defined(__i386__) || defined(__x86_64__)
2712
2713 #define io_kernel_static_start vm_kernel_stext
2714 #define io_kernel_static_end vm_kernel_etext
2715
2716 #else
2717 #error io_kernel_static_end is undefined for this architecture
2718 #endif
2719
2720 static kern_return_t
2721 io_get_kernel_static_upl(
2722 vm_map_t /* map */,
2723 uintptr_t offset,
2724 upl_size_t *upl_size,
2725 upl_t *upl,
2726 upl_page_info_array_t page_list,
2727 unsigned int *count,
2728 ppnum_t *highest_page)
2729 {
2730 unsigned int pageCount, page;
2731 ppnum_t phys;
2732 ppnum_t highestPage = 0;
2733
2734 pageCount = atop_32(*upl_size);
2735 if (pageCount > *count)
2736 pageCount = *count;
2737
2738 *upl = NULL;
2739
2740 for (page = 0; page < pageCount; page++)
2741 {
2742 phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
2743 if (!phys)
2744 break;
2745 page_list[page].phys_addr = phys;
2746 page_list[page].pageout = 0;
2747 page_list[page].absent = 0;
2748 page_list[page].dirty = 0;
2749 page_list[page].precious = 0;
2750 page_list[page].device = 0;
2751 if (phys > highestPage)
2752 highestPage = phys;
2753 }
2754
2755 *highest_page = highestPage;
2756
2757 return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError);
2758 }
2759
2760 IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
2761 {
2762 IOOptionBits type = _flags & kIOMemoryTypeMask;
2763 IOReturn error = kIOReturnCannotWire;
2764 ioGMDData *dataP;
2765 upl_page_info_array_t pageInfo;
2766 ppnum_t mapBase;
2767
2768 assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
2769
2770 if ((kIODirectionOutIn & forDirection) == kIODirectionNone)
2771 forDirection = (IODirection) (forDirection | getDirection());
2772
2773 upl_control_flags_t uplFlags; // This Mem Desc's default flags for upl creation
2774 switch (kIODirectionOutIn & forDirection)
2775 {
2776 case kIODirectionOut:
2777 // Pages do not need to be marked as dirty on commit
2778 uplFlags = UPL_COPYOUT_FROM;
2779 break;
2780
2781 case kIODirectionIn:
2782 default:
2783 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
2784 break;
2785 }
2786
2787 if (_wireCount)
2788 {
2789 if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags))
2790 {
2791 OSReportWithBacktrace("IOMemoryDescriptor 0x%lx prepared read only", VM_KERNEL_ADDRPERM(this));
2792 error = kIOReturnNotWritable;
2793 }
2794 else error = kIOReturnSuccess;
2795 return (error);
2796 }
2797
2798 dataP = getDataP(_memoryEntries);
2799 IOMapper *mapper;
2800 mapper = dataP->fMapper;
2801 dataP->fMappedBase = 0;
2802
2803 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
2804 uplFlags |= UPL_MEMORY_TAG_MAKE(IOMemoryTag(kernel_map));
2805
2806 if (kIODirectionPrepareToPhys32 & forDirection)
2807 {
2808 if (!mapper) uplFlags |= UPL_NEED_32BIT_ADDR;
2809 if (dataP->fDMAMapNumAddressBits > 32) dataP->fDMAMapNumAddressBits = 32;
2810 }
2811 if (kIODirectionPrepareNoFault & forDirection) uplFlags |= UPL_REQUEST_NO_FAULT;
2812 if (kIODirectionPrepareNoZeroFill & forDirection) uplFlags |= UPL_NOZEROFILLIO;
2813 if (kIODirectionPrepareNonCoherent & forDirection) uplFlags |= UPL_REQUEST_FORCE_COHERENCY;
2814
2815 mapBase = 0;
2816
2817 // Note that appendBytes(NULL) zeros the data up to the desired length
2818 // and the length parameter is an unsigned int
2819 size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t);
2820 if (uplPageSize > ((unsigned int)uplPageSize)) return (kIOReturnNoMemory);
2821 if (!_memoryEntries->appendBytes(0, uplPageSize)) return (kIOReturnNoMemory);
2822 dataP = 0;
2823
2824 // Find the appropriate vm_map for the given task
2825 vm_map_t curMap;
2826 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) curMap = 0;
2827 else curMap = get_task_map(_task);
2828
2829 // Iterate over the vector of virtual ranges
2830 Ranges vec = _ranges;
2831 unsigned int pageIndex = 0;
2832 IOByteCount mdOffset = 0;
2833 ppnum_t highestPage = 0;
2834
2835 IOMemoryEntry * memRefEntry = 0;
2836 if (_memRef) memRefEntry = &_memRef->entries[0];
2837
2838 for (UInt range = 0; range < _rangesCount; range++) {
2839 ioPLBlock iopl;
2840 mach_vm_address_t startPage;
2841 mach_vm_size_t numBytes;
2842 ppnum_t highPage = 0;
2843
2844 // Get the startPage address and length of vec[range]
2845 getAddrLenForInd(startPage, numBytes, type, vec, range);
2846 iopl.fPageOffset = startPage & PAGE_MASK;
2847 numBytes += iopl.fPageOffset;
2848 startPage = trunc_page_64(startPage);
2849
2850 if (mapper)
2851 iopl.fMappedPage = mapBase + pageIndex;
2852 else
2853 iopl.fMappedPage = 0;
2854
2855 // Iterate over the current range, creating UPLs
2856 while (numBytes) {
2857 vm_address_t kernelStart = (vm_address_t) startPage;
2858 vm_map_t theMap;
2859 if (curMap) theMap = curMap;
2860 else if (_memRef)
2861 {
2862 theMap = NULL;
2863 }
2864 else
2865 {
2866 assert(_task == kernel_task);
2867 theMap = IOPageableMapForAddress(kernelStart);
2868 }
2869
2870 // ioplFlags is an in/out parameter
2871 upl_control_flags_t ioplFlags = uplFlags;
2872 dataP = getDataP(_memoryEntries);
2873 pageInfo = getPageList(dataP);
2874 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
2875
2876 upl_size_t ioplSize = round_page(numBytes);
2877 unsigned int numPageInfo = atop_32(ioplSize);
2878
2879 if ((theMap == kernel_map)
2880 && (kernelStart >= io_kernel_static_start)
2881 && (kernelStart < io_kernel_static_end)) {
2882 error = io_get_kernel_static_upl(theMap,
2883 kernelStart,
2884 &ioplSize,
2885 &iopl.fIOPL,
2886 baseInfo,
2887 &numPageInfo,
2888 &highPage);
2889 }
2890 else if (_memRef) {
2891 memory_object_offset_t entryOffset;
2892
2893 entryOffset = mdOffset;
2894 entryOffset = (entryOffset - iopl.fPageOffset - memRefEntry->offset);
2895 if (entryOffset >= memRefEntry->size) {
2896 memRefEntry++;
2897 if (memRefEntry >= &_memRef->entries[_memRef->count]) panic("memRefEntry");
2898 entryOffset = 0;
2899 }
2900 if (ioplSize > (memRefEntry->size - entryOffset)) ioplSize = (memRefEntry->size - entryOffset);
2901 error = memory_object_iopl_request(memRefEntry->entry,
2902 entryOffset,
2903 &ioplSize,
2904 &iopl.fIOPL,
2905 baseInfo,
2906 &numPageInfo,
2907 &ioplFlags);
2908 }
2909 else {
2910 assert(theMap);
2911 error = vm_map_create_upl(theMap,
2912 startPage,
2913 (upl_size_t*)&ioplSize,
2914 &iopl.fIOPL,
2915 baseInfo,
2916 &numPageInfo,
2917 &ioplFlags);
2918 }
2919
2920 assert(ioplSize);
2921 if (error != KERN_SUCCESS)
2922 goto abortExit;
2923
2924 if (iopl.fIOPL)
2925 highPage = upl_get_highest_page(iopl.fIOPL);
2926 if (highPage > highestPage)
2927 highestPage = highPage;
2928
2929 error = kIOReturnCannotWire;
2930
2931 if (baseInfo->device) {
2932 numPageInfo = 1;
2933 iopl.fFlags = kIOPLOnDevice;
2934 }
2935 else {
2936 iopl.fFlags = 0;
2937 }
2938
2939 iopl.fIOMDOffset = mdOffset;
2940 iopl.fPageInfo = pageIndex;
2941 if (mapper && pageIndex && (page_mask & (mdOffset + iopl.fPageOffset))) dataP->fDiscontig = true;
2942
2943 #if 0
2944 // used to remove the upl for auto prepares here, for some errant code
2945 // that freed memory before the descriptor pointing at it
2946 if ((_flags & kIOMemoryAutoPrepare) && iopl.fIOPL)
2947 {
2948 upl_commit(iopl.fIOPL, 0, 0);
2949 upl_deallocate(iopl.fIOPL);
2950 iopl.fIOPL = 0;
2951 }
2952 #endif
2953
2954 if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
2955 // Clean up partial created and unsaved iopl
2956 if (iopl.fIOPL) {
2957 upl_abort(iopl.fIOPL, 0);
2958 upl_deallocate(iopl.fIOPL);
2959 }
2960 goto abortExit;
2961 }
2962 dataP = 0;
2963
2964 // Check for a multiple iopl's in one virtual range
2965 pageIndex += numPageInfo;
2966 mdOffset -= iopl.fPageOffset;
2967 if (ioplSize < numBytes) {
2968 numBytes -= ioplSize;
2969 startPage += ioplSize;
2970 mdOffset += ioplSize;
2971 iopl.fPageOffset = 0;
2972 if (mapper) iopl.fMappedPage = mapBase + pageIndex;
2973 }
2974 else {
2975 mdOffset += numBytes;
2976 break;
2977 }
2978 }
2979 }
2980
2981 _highestPage = highestPage;
2982
2983 if (UPL_COPYOUT_FROM & uplFlags) _flags |= kIOMemoryPreparedReadOnly;
2984
2985 if ((kIOTracking & gIOKitDebug)
2986 //&& !(_flags & kIOMemoryAutoPrepare)
2987 )
2988 {
2989 dataP = getDataP(_memoryEntries);
2990 #if IOTRACKING
2991 IOTrackingAdd(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages), false);
2992 #endif
2993 }
2994
2995 return kIOReturnSuccess;
2996
2997 abortExit:
2998 {
2999 dataP = getDataP(_memoryEntries);
3000 UInt done = getNumIOPL(_memoryEntries, dataP);
3001 ioPLBlock *ioplList = getIOPLList(dataP);
3002
3003 for (UInt range = 0; range < done; range++)
3004 {
3005 if (ioplList[range].fIOPL) {
3006 upl_abort(ioplList[range].fIOPL, 0);
3007 upl_deallocate(ioplList[range].fIOPL);
3008 }
3009 }
3010 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
3011 }
3012
3013 if (error == KERN_FAILURE)
3014 error = kIOReturnCannotWire;
3015 else if (error == KERN_MEMORY_ERROR)
3016 error = kIOReturnNoResources;
3017
3018 return error;
3019 }
3020
3021 bool IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
3022 {
3023 ioGMDData * dataP;
3024 unsigned dataSize = size;
3025
3026 if (!_memoryEntries) {
3027 _memoryEntries = OSData::withCapacity(dataSize);
3028 if (!_memoryEntries)
3029 return false;
3030 }
3031 else if (!_memoryEntries->initWithCapacity(dataSize))
3032 return false;
3033
3034 _memoryEntries->appendBytes(0, computeDataSize(0, 0));
3035 dataP = getDataP(_memoryEntries);
3036
3037 if (mapper == kIOMapperWaitSystem) {
3038 IOMapper::checkForSystemMapper();
3039 mapper = IOMapper::gSystem;
3040 }
3041 dataP->fMapper = mapper;
3042 dataP->fPageCnt = 0;
3043 dataP->fMappedBase = 0;
3044 dataP->fDMAMapNumAddressBits = 64;
3045 dataP->fDMAMapAlignment = 0;
3046 dataP->fPreparationID = kIOPreparationIDUnprepared;
3047 dataP->fDiscontig = false;
3048 dataP->fCompletionError = false;
3049
3050 return (true);
3051 }
3052
3053 IOReturn IOMemoryDescriptor::dmaMap(
3054 IOMapper * mapper,
3055 IODMACommand * command,
3056 const IODMAMapSpecification * mapSpec,
3057 uint64_t offset,
3058 uint64_t length,
3059 uint64_t * mapAddress,
3060 uint64_t * mapLength)
3061 {
3062 IOReturn ret;
3063 uint32_t mapOptions;
3064
3065 mapOptions = 0;
3066 mapOptions |= kIODMAMapReadAccess;
3067 if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess;
3068
3069 ret = mapper->iovmMapMemory(this, offset, length, mapOptions,
3070 mapSpec, command, NULL, mapAddress, mapLength);
3071
3072 return (ret);
3073 }
3074
3075 IOReturn IOGeneralMemoryDescriptor::dmaMap(
3076 IOMapper * mapper,
3077 IODMACommand * command,
3078 const IODMAMapSpecification * mapSpec,
3079 uint64_t offset,
3080 uint64_t length,
3081 uint64_t * mapAddress,
3082 uint64_t * mapLength)
3083 {
3084 IOReturn err = kIOReturnSuccess;
3085 ioGMDData * dataP;
3086 IOOptionBits type = _flags & kIOMemoryTypeMask;
3087
3088 *mapAddress = 0;
3089 if (kIOMemoryHostOnly & _flags) return (kIOReturnSuccess);
3090
3091 if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
3092 || offset || (length != _length))
3093 {
3094 err = super::dmaMap(mapper, command, mapSpec, offset, length, mapAddress, mapLength);
3095 }
3096 else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries)))
3097 {
3098 const ioPLBlock * ioplList = getIOPLList(dataP);
3099 upl_page_info_t * pageList;
3100 uint32_t mapOptions = 0;
3101
3102 IODMAMapSpecification mapSpec;
3103 bzero(&mapSpec, sizeof(mapSpec));
3104 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
3105 mapSpec.alignment = dataP->fDMAMapAlignment;
3106
3107 // For external UPLs the fPageInfo field points directly to
3108 // the upl's upl_page_info_t array.
3109 if (ioplList->fFlags & kIOPLExternUPL)
3110 {
3111 pageList = (upl_page_info_t *) ioplList->fPageInfo;
3112 mapOptions |= kIODMAMapPagingPath;
3113 }
3114 else pageList = getPageList(dataP);
3115
3116 if ((_length == ptoa_64(_pages)) && !(page_mask & ioplList->fPageOffset))
3117 {
3118 mapOptions |= kIODMAMapPageListFullyOccupied;
3119 }
3120
3121 mapOptions |= kIODMAMapReadAccess;
3122 if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess;
3123
3124 // Check for direct device non-paged memory
3125 if (ioplList->fFlags & kIOPLOnDevice) mapOptions |= kIODMAMapPhysicallyContiguous;
3126
3127 IODMAMapPageList dmaPageList =
3128 {
3129 .pageOffset = ioplList->fPageOffset & page_mask,
3130 .pageListCount = _pages,
3131 .pageList = &pageList[0]
3132 };
3133 err = mapper->iovmMapMemory(this, offset, length, mapOptions, &mapSpec,
3134 command, &dmaPageList, mapAddress, mapLength);
3135 }
3136
3137 return (err);
3138 }
3139
3140 /*
3141 * prepare
3142 *
3143 * Prepare the memory for an I/O transfer. This involves paging in
3144 * the memory, if necessary, and wiring it down for the duration of
3145 * the transfer. The complete() method completes the processing of
3146 * the memory after the I/O transfer finishes. This method needn't
3147 * called for non-pageable memory.
3148 */
3149
3150 IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
3151 {
3152 IOReturn error = kIOReturnSuccess;
3153 IOOptionBits type = _flags & kIOMemoryTypeMask;
3154
3155 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
3156 return kIOReturnSuccess;
3157
3158 if (_prepareLock)
3159 IOLockLock(_prepareLock);
3160
3161 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type)
3162 {
3163 error = wireVirtual(forDirection);
3164 }
3165
3166 if (kIOReturnSuccess == error)
3167 {
3168 if (1 == ++_wireCount)
3169 {
3170 if (kIOMemoryClearEncrypt & _flags)
3171 {
3172 performOperation(kIOMemoryClearEncrypted, 0, _length);
3173 }
3174 }
3175 }
3176
3177 if (_prepareLock)
3178 IOLockUnlock(_prepareLock);
3179
3180 return error;
3181 }
3182
3183 /*
3184 * complete
3185 *
3186 * Complete processing of the memory after an I/O transfer finishes.
3187 * This method should not be called unless a prepare was previously
3188 * issued; the prepare() and complete() must occur in pairs, before
3189 * before and after an I/O transfer involving pageable memory.
3190 */
3191
3192 IOReturn IOGeneralMemoryDescriptor::complete(IODirection forDirection)
3193 {
3194 IOOptionBits type = _flags & kIOMemoryTypeMask;
3195 ioGMDData * dataP;
3196
3197 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
3198 return kIOReturnSuccess;
3199
3200 if (_prepareLock)
3201 IOLockLock(_prepareLock);
3202
3203 assert(_wireCount);
3204
3205 if ((kIODirectionCompleteWithError & forDirection)
3206 && (dataP = getDataP(_memoryEntries)))
3207 dataP->fCompletionError = true;
3208
3209 if (_wireCount)
3210 {
3211 if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount))
3212 {
3213 performOperation(kIOMemorySetEncrypted, 0, _length);
3214 }
3215
3216 _wireCount--;
3217 if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection))
3218 {
3219 IOOptionBits type = _flags & kIOMemoryTypeMask;
3220 dataP = getDataP(_memoryEntries);
3221 ioPLBlock *ioplList = getIOPLList(dataP);
3222 UInt ind, count = getNumIOPL(_memoryEntries, dataP);
3223
3224 if (_wireCount)
3225 {
3226 // kIODirectionCompleteWithDataValid & forDirection
3227 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type)
3228 {
3229 for (ind = 0; ind < count; ind++)
3230 {
3231 if (ioplList[ind].fIOPL) iopl_valid_data(ioplList[ind].fIOPL);
3232 }
3233 }
3234 }
3235 else
3236 {
3237 #if IOMD_DEBUG_DMAACTIVE
3238 if (__iomd_reservedA) panic("complete() while dma active");
3239 #endif /* IOMD_DEBUG_DMAACTIVE */
3240
3241 if (dataP->fMappedBase) {
3242 dataP->fMapper->iovmUnmapMemory(this, NULL, dataP->fMappedBase, dataP->fMappedLength);
3243 dataP->fMappedBase = 0;
3244 }
3245 // Only complete iopls that we created which are for TypeVirtual
3246 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
3247 #if IOTRACKING
3248 if ((kIOTracking & gIOKitDebug)
3249 //&& !(_flags & kIOMemoryAutoPrepare)
3250 )
3251 {
3252 IOTrackingRemove(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages));
3253 }
3254 #endif
3255 for (ind = 0; ind < count; ind++)
3256 if (ioplList[ind].fIOPL) {
3257 if (dataP->fCompletionError)
3258 upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/);
3259 else
3260 upl_commit(ioplList[ind].fIOPL, 0, 0);
3261 upl_deallocate(ioplList[ind].fIOPL);
3262 }
3263 } else if (kIOMemoryTypeUPL == type) {
3264 upl_set_referenced(ioplList[0].fIOPL, false);
3265 }
3266
3267 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
3268
3269 dataP->fPreparationID = kIOPreparationIDUnprepared;
3270 }
3271 }
3272 }
3273
3274 if (_prepareLock)
3275 IOLockUnlock(_prepareLock);
3276
3277 return kIOReturnSuccess;
3278 }
3279
3280 IOReturn IOGeneralMemoryDescriptor::doMap(
3281 vm_map_t __addressMap,
3282 IOVirtualAddress * __address,
3283 IOOptionBits options,
3284 IOByteCount __offset,
3285 IOByteCount __length )
3286 {
3287 #ifndef __LP64__
3288 if (!(kIOMap64Bit & options)) panic("IOGeneralMemoryDescriptor::doMap !64bit");
3289 #endif /* !__LP64__ */
3290
3291 kern_return_t err;
3292
3293 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
3294 mach_vm_size_t offset = mapping->fOffset + __offset;
3295 mach_vm_size_t length = mapping->fLength;
3296
3297 IOOptionBits type = _flags & kIOMemoryTypeMask;
3298 Ranges vec = _ranges;
3299
3300 mach_vm_address_t range0Addr = 0;
3301 mach_vm_size_t range0Len = 0;
3302
3303 if ((offset >= _length) || ((offset + length) > _length))
3304 return( kIOReturnBadArgument );
3305
3306 if (vec.v)
3307 getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
3308
3309 // mapping source == dest? (could be much better)
3310 if (_task
3311 && (mapping->fAddressTask == _task)
3312 && (mapping->fAddressMap == get_task_map(_task))
3313 && (options & kIOMapAnywhere)
3314 && (1 == _rangesCount)
3315 && (0 == offset)
3316 && range0Addr
3317 && (length <= range0Len))
3318 {
3319 mapping->fAddress = range0Addr;
3320 mapping->fOptions |= kIOMapStatic;
3321
3322 return( kIOReturnSuccess );
3323 }
3324
3325 if (!_memRef)
3326 {
3327 IOOptionBits createOptions = 0;
3328 if (!(kIOMapReadOnly & options))
3329 {
3330 createOptions |= kIOMemoryReferenceWrite;
3331 #if DEVELOPMENT || DEBUG
3332 if (kIODirectionOut == (kIODirectionOutIn & _flags))
3333 {
3334 OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction");
3335 }
3336 #endif
3337 }
3338 err = memoryReferenceCreate(createOptions, &_memRef);
3339 if (kIOReturnSuccess != err) return (err);
3340 }
3341
3342 memory_object_t pager;
3343 pager = (memory_object_t) (reserved ? reserved->dp.devicePager : 0);
3344
3345 // <upl_transpose //
3346 if ((kIOMapReference|kIOMapUnique) == ((kIOMapReference|kIOMapUnique) & options))
3347 {
3348 do
3349 {
3350 upl_t redirUPL2;
3351 upl_size_t size;
3352 upl_control_flags_t flags;
3353 unsigned int lock_count;
3354
3355 if (!_memRef || (1 != _memRef->count))
3356 {
3357 err = kIOReturnNotReadable;
3358 break;
3359 }
3360
3361 size = round_page(mapping->fLength);
3362 flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
3363 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS
3364 | UPL_MEMORY_TAG_MAKE(IOMemoryTag(kernel_map));
3365
3366 if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2,
3367 NULL, NULL,
3368 &flags))
3369 redirUPL2 = NULL;
3370
3371 for (lock_count = 0;
3372 IORecursiveLockHaveLock(gIOMemoryLock);
3373 lock_count++) {
3374 UNLOCK;
3375 }
3376 err = upl_transpose(redirUPL2, mapping->fRedirUPL);
3377 for (;
3378 lock_count;
3379 lock_count--) {
3380 LOCK;
3381 }
3382
3383 if (kIOReturnSuccess != err)
3384 {
3385 IOLog("upl_transpose(%x)\n", err);
3386 err = kIOReturnSuccess;
3387 }
3388
3389 if (redirUPL2)
3390 {
3391 upl_commit(redirUPL2, NULL, 0);
3392 upl_deallocate(redirUPL2);
3393 redirUPL2 = 0;
3394 }
3395 {
3396 // swap the memEntries since they now refer to different vm_objects
3397 IOMemoryReference * me = _memRef;
3398 _memRef = mapping->fMemory->_memRef;
3399 mapping->fMemory->_memRef = me;
3400 }
3401 if (pager)
3402 err = populateDevicePager( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
3403 }
3404 while (false);
3405 }
3406 // upl_transpose> //
3407 else
3408 {
3409 err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress);
3410 #if IOTRACKING
3411 if (err == KERN_SUCCESS) IOTrackingAdd(gIOMapTracking, &mapping->fTracking, length, false);
3412 #endif
3413 if ((err == KERN_SUCCESS) && pager)
3414 {
3415 err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, offset, length, options);
3416
3417 if (err != KERN_SUCCESS) doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0);
3418 else if (kIOMapDefaultCache == (options & kIOMapCacheMask))
3419 {
3420 mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
3421 }
3422 }
3423 }
3424
3425 return (err);
3426 }
3427
3428 IOReturn IOGeneralMemoryDescriptor::doUnmap(
3429 vm_map_t addressMap,
3430 IOVirtualAddress __address,
3431 IOByteCount __length )
3432 {
3433 return (super::doUnmap(addressMap, __address, __length));
3434 }
3435
3436 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3437
3438 #undef super
3439 #define super OSObject
3440
3441 OSDefineMetaClassAndStructors( IOMemoryMap, OSObject )
3442
3443 OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
3444 OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
3445 OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
3446 OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
3447 OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
3448 OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
3449 OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
3450 OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
3451
3452 /* ex-inline function implementation */
3453 IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
3454 { return( getPhysicalSegment( 0, 0 )); }
3455
3456 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3457
3458 bool IOMemoryMap::init(
3459 task_t intoTask,
3460 mach_vm_address_t toAddress,
3461 IOOptionBits _options,
3462 mach_vm_size_t _offset,
3463 mach_vm_size_t _length )
3464 {
3465 if (!intoTask)
3466 return( false);
3467
3468 if (!super::init())
3469 return(false);
3470
3471 fAddressMap = get_task_map(intoTask);
3472 if (!fAddressMap)
3473 return(false);
3474 vm_map_reference(fAddressMap);
3475
3476 fAddressTask = intoTask;
3477 fOptions = _options;
3478 fLength = _length;
3479 fOffset = _offset;
3480 fAddress = toAddress;
3481
3482 return (true);
3483 }
3484
3485 bool IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
3486 {
3487 if (!_memory)
3488 return(false);
3489
3490 if (!fSuperMap)
3491 {
3492 if( (_offset + fLength) > _memory->getLength())
3493 return( false);
3494 fOffset = _offset;
3495 }
3496
3497 _memory->retain();
3498 if (fMemory)
3499 {
3500 if (fMemory != _memory)
3501 fMemory->removeMapping(this);
3502 fMemory->release();
3503 }
3504 fMemory = _memory;
3505
3506 return( true );
3507 }
3508
3509 IOReturn IOMemoryDescriptor::doMap(
3510 vm_map_t __addressMap,
3511 IOVirtualAddress * __address,
3512 IOOptionBits options,
3513 IOByteCount __offset,
3514 IOByteCount __length )
3515 {
3516 return (kIOReturnUnsupported);
3517 }
3518
3519 IOReturn IOMemoryDescriptor::handleFault(
3520 void * _pager,
3521 mach_vm_size_t sourceOffset,
3522 mach_vm_size_t length)
3523 {
3524 if( kIOMemoryRedirected & _flags)
3525 {
3526 #if DEBUG
3527 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
3528 #endif
3529 do {
3530 SLEEP;
3531 } while( kIOMemoryRedirected & _flags );
3532 }
3533 return (kIOReturnSuccess);
3534 }
3535
3536 IOReturn IOMemoryDescriptor::populateDevicePager(
3537 void * _pager,
3538 vm_map_t addressMap,
3539 mach_vm_address_t address,
3540 mach_vm_size_t sourceOffset,
3541 mach_vm_size_t length,
3542 IOOptionBits options )
3543 {
3544 IOReturn err = kIOReturnSuccess;
3545 memory_object_t pager = (memory_object_t) _pager;
3546 mach_vm_size_t size;
3547 mach_vm_size_t bytes;
3548 mach_vm_size_t page;
3549 mach_vm_size_t pageOffset;
3550 mach_vm_size_t pagerOffset;
3551 IOPhysicalLength segLen, chunk;
3552 addr64_t physAddr;
3553 IOOptionBits type;
3554
3555 type = _flags & kIOMemoryTypeMask;
3556
3557 if (reserved->dp.pagerContig)
3558 {
3559 sourceOffset = 0;
3560 pagerOffset = 0;
3561 }
3562
3563 physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
3564 assert( physAddr );
3565 pageOffset = physAddr - trunc_page_64( physAddr );
3566 pagerOffset = sourceOffset;
3567
3568 size = length + pageOffset;
3569 physAddr -= pageOffset;
3570
3571 segLen += pageOffset;
3572 bytes = size;
3573 do
3574 {
3575 // in the middle of the loop only map whole pages
3576 if( segLen >= bytes) segLen = bytes;
3577 else if (segLen != trunc_page(segLen)) err = kIOReturnVMError;
3578 if (physAddr != trunc_page_64(physAddr)) err = kIOReturnBadArgument;
3579
3580 if (kIOReturnSuccess != err) break;
3581
3582 #if DEBUG || DEVELOPMENT
3583 if ((kIOMemoryTypeUPL != type)
3584 && pmap_has_managed_page(atop_64(physAddr), atop_64(physAddr + segLen - 1)))
3585 {
3586 OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx", physAddr, segLen);
3587 }
3588 #endif /* DEBUG || DEVELOPMENT */
3589
3590 chunk = (reserved->dp.pagerContig ? round_page(segLen) : page_size);
3591 for (page = 0;
3592 (page < segLen) && (KERN_SUCCESS == err);
3593 page += chunk)
3594 {
3595 err = device_pager_populate_object(pager, pagerOffset,
3596 (ppnum_t)(atop_64(physAddr + page)), chunk);
3597 pagerOffset += chunk;
3598 }
3599
3600 assert (KERN_SUCCESS == err);
3601 if (err) break;
3602
3603 // This call to vm_fault causes an early pmap level resolution
3604 // of the mappings created above for kernel mappings, since
3605 // faulting in later can't take place from interrupt level.
3606 if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags))
3607 {
3608 vm_fault(addressMap,
3609 (vm_map_offset_t)trunc_page_64(address),
3610 VM_PROT_READ|VM_PROT_WRITE,
3611 FALSE, THREAD_UNINT, NULL,
3612 (vm_map_offset_t)0);
3613 }
3614
3615 sourceOffset += segLen - pageOffset;
3616 address += segLen;
3617 bytes -= segLen;
3618 pageOffset = 0;
3619 }
3620 while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
3621
3622 if (bytes)
3623 err = kIOReturnBadArgument;
3624
3625 return (err);
3626 }
3627
3628 IOReturn IOMemoryDescriptor::doUnmap(
3629 vm_map_t addressMap,
3630 IOVirtualAddress __address,
3631 IOByteCount __length )
3632 {
3633 IOReturn err;
3634 IOMemoryMap * mapping;
3635 mach_vm_address_t address;
3636 mach_vm_size_t length;
3637
3638 if (__length) panic("doUnmap");
3639
3640 mapping = (IOMemoryMap *) __address;
3641 addressMap = mapping->fAddressMap;
3642 address = mapping->fAddress;
3643 length = mapping->fLength;
3644
3645 if (kIOMapOverwrite & mapping->fOptions) err = KERN_SUCCESS;
3646 else
3647 {
3648 if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags))
3649 addressMap = IOPageableMapForAddress( address );
3650 #if DEBUG
3651 if( kIOLogMapping & gIOKitDebug) IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
3652 addressMap, address, length );
3653 #endif
3654 err = mach_vm_deallocate( addressMap, address, length );
3655 }
3656
3657 #if IOTRACKING
3658 IOTrackingRemove(gIOMapTracking, &mapping->fTracking, length);
3659 #endif
3660
3661 return (err);
3662 }
3663
3664 IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
3665 {
3666 IOReturn err = kIOReturnSuccess;
3667 IOMemoryMap * mapping = 0;
3668 OSIterator * iter;
3669
3670 LOCK;
3671
3672 if( doRedirect)
3673 _flags |= kIOMemoryRedirected;
3674 else
3675 _flags &= ~kIOMemoryRedirected;
3676
3677 do {
3678 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
3679
3680 memory_object_t pager;
3681
3682 if( reserved)
3683 pager = (memory_object_t) reserved->dp.devicePager;
3684 else
3685 pager = MACH_PORT_NULL;
3686
3687 while( (mapping = (IOMemoryMap *) iter->getNextObject()))
3688 {
3689 mapping->redirect( safeTask, doRedirect );
3690 if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap))
3691 {
3692 err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache );
3693 }
3694 }
3695
3696 iter->release();
3697 }
3698 } while( false );
3699
3700 if (!doRedirect)
3701 {
3702 WAKEUP;
3703 }
3704
3705 UNLOCK;
3706
3707 #ifndef __LP64__
3708 // temporary binary compatibility
3709 IOSubMemoryDescriptor * subMem;
3710 if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
3711 err = subMem->redirect( safeTask, doRedirect );
3712 else
3713 err = kIOReturnSuccess;
3714 #endif /* !__LP64__ */
3715
3716 return( err );
3717 }
3718
3719 IOReturn IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
3720 {
3721 IOReturn err = kIOReturnSuccess;
3722
3723 if( fSuperMap) {
3724 // err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
3725 } else {
3726
3727 LOCK;
3728
3729 do
3730 {
3731 if (!fAddress)
3732 break;
3733 if (!fAddressMap)
3734 break;
3735
3736 if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
3737 && (0 == (fOptions & kIOMapStatic)))
3738 {
3739 IOUnmapPages( fAddressMap, fAddress, fLength );
3740 err = kIOReturnSuccess;
3741 #if DEBUG
3742 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
3743 #endif
3744 }
3745 else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask))
3746 {
3747 IOOptionBits newMode;
3748 newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
3749 IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
3750 }
3751 }
3752 while (false);
3753 UNLOCK;
3754 }
3755
3756 if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3757 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3758 && safeTask
3759 && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected))))
3760 fMemory->redirect(safeTask, doRedirect);
3761
3762 return( err );
3763 }
3764
3765 IOReturn IOMemoryMap::unmap( void )
3766 {
3767 IOReturn err;
3768
3769 LOCK;
3770
3771 if( fAddress && fAddressMap && (0 == fSuperMap) && fMemory
3772 && (0 == (kIOMapStatic & fOptions))) {
3773
3774 err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
3775
3776 } else
3777 err = kIOReturnSuccess;
3778
3779 if (fAddressMap)
3780 {
3781 vm_map_deallocate(fAddressMap);
3782 fAddressMap = 0;
3783 }
3784
3785 fAddress = 0;
3786
3787 UNLOCK;
3788
3789 return( err );
3790 }
3791
3792 void IOMemoryMap::taskDied( void )
3793 {
3794 LOCK;
3795 if (fUserClientUnmap) unmap();
3796 #if IOTRACKING
3797 else IOTrackingRemove(gIOMapTracking, &fTracking, fLength);
3798 #endif
3799
3800 if( fAddressMap) {
3801 vm_map_deallocate(fAddressMap);
3802 fAddressMap = 0;
3803 }
3804 fAddressTask = 0;
3805 fAddress = 0;
3806 UNLOCK;
3807 }
3808
3809 IOReturn IOMemoryMap::userClientUnmap( void )
3810 {
3811 fUserClientUnmap = true;
3812 return (kIOReturnSuccess);
3813 }
3814
3815 // Overload the release mechanism. All mappings must be a member
3816 // of a memory descriptors _mappings set. This means that we
3817 // always have 2 references on a mapping. When either of these mappings
3818 // are released we need to free ourselves.
3819 void IOMemoryMap::taggedRelease(const void *tag) const
3820 {
3821 LOCK;
3822 super::taggedRelease(tag, 2);
3823 UNLOCK;
3824 }
3825
3826 void IOMemoryMap::free()
3827 {
3828 unmap();
3829
3830 if (fMemory)
3831 {
3832 LOCK;
3833 fMemory->removeMapping(this);
3834 UNLOCK;
3835 fMemory->release();
3836 }
3837
3838 if (fOwner && (fOwner != fMemory))
3839 {
3840 LOCK;
3841 fOwner->removeMapping(this);
3842 UNLOCK;
3843 }
3844
3845 if (fSuperMap)
3846 fSuperMap->release();
3847
3848 if (fRedirUPL) {
3849 upl_commit(fRedirUPL, NULL, 0);
3850 upl_deallocate(fRedirUPL);
3851 }
3852
3853 super::free();
3854 }
3855
3856 IOByteCount IOMemoryMap::getLength()
3857 {
3858 return( fLength );
3859 }
3860
3861 IOVirtualAddress IOMemoryMap::getVirtualAddress()
3862 {
3863 #ifndef __LP64__
3864 if (fSuperMap)
3865 fSuperMap->getVirtualAddress();
3866 else if (fAddressMap
3867 && vm_map_is_64bit(fAddressMap)
3868 && (sizeof(IOVirtualAddress) < 8))
3869 {
3870 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
3871 }
3872 #endif /* !__LP64__ */
3873
3874 return (fAddress);
3875 }
3876
3877 #ifndef __LP64__
3878 mach_vm_address_t IOMemoryMap::getAddress()
3879 {
3880 return( fAddress);
3881 }
3882
3883 mach_vm_size_t IOMemoryMap::getSize()
3884 {
3885 return( fLength );
3886 }
3887 #endif /* !__LP64__ */
3888
3889
3890 task_t IOMemoryMap::getAddressTask()
3891 {
3892 if( fSuperMap)
3893 return( fSuperMap->getAddressTask());
3894 else
3895 return( fAddressTask);
3896 }
3897
3898 IOOptionBits IOMemoryMap::getMapOptions()
3899 {
3900 return( fOptions);
3901 }
3902
3903 IOMemoryDescriptor * IOMemoryMap::getMemoryDescriptor()
3904 {
3905 return( fMemory );
3906 }
3907
3908 IOMemoryMap * IOMemoryMap::copyCompatible(
3909 IOMemoryMap * newMapping )
3910 {
3911 task_t task = newMapping->getAddressTask();
3912 mach_vm_address_t toAddress = newMapping->fAddress;
3913 IOOptionBits _options = newMapping->fOptions;
3914 mach_vm_size_t _offset = newMapping->fOffset;
3915 mach_vm_size_t _length = newMapping->fLength;
3916
3917 if( (!task) || (!fAddressMap) || (fAddressMap != get_task_map(task)))
3918 return( 0 );
3919 if( (fOptions ^ _options) & kIOMapReadOnly)
3920 return( 0 );
3921 if( (kIOMapDefaultCache != (_options & kIOMapCacheMask))
3922 && ((fOptions ^ _options) & kIOMapCacheMask))
3923 return( 0 );
3924
3925 if( (0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress))
3926 return( 0 );
3927
3928 if( _offset < fOffset)
3929 return( 0 );
3930
3931 _offset -= fOffset;
3932
3933 if( (_offset + _length) > fLength)
3934 return( 0 );
3935
3936 retain();
3937 if( (fLength == _length) && (!_offset))
3938 {
3939 newMapping = this;
3940 }
3941 else
3942 {
3943 newMapping->fSuperMap = this;
3944 newMapping->fOffset = fOffset + _offset;
3945 newMapping->fAddress = fAddress + _offset;
3946 }
3947
3948 return( newMapping );
3949 }
3950
3951 IOReturn IOMemoryMap::wireRange(
3952 uint32_t options,
3953 mach_vm_size_t offset,
3954 mach_vm_size_t length)
3955 {
3956 IOReturn kr;
3957 mach_vm_address_t start = trunc_page_64(fAddress + offset);
3958 mach_vm_address_t end = round_page_64(fAddress + offset + length);
3959 vm_prot_t prot;
3960
3961 prot = (kIODirectionOutIn & options);
3962 if (prot)
3963 {
3964 prot |= VM_PROT_MEMORY_TAG_MAKE(IOMemoryTag(kernel_map));
3965 kr = vm_map_wire(fAddressMap, start, end, prot, FALSE);
3966 }
3967 else
3968 {
3969 kr = vm_map_unwire(fAddressMap, start, end, FALSE);
3970 }
3971
3972 return (kr);
3973 }
3974
3975
3976 IOPhysicalAddress
3977 #ifdef __LP64__
3978 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
3979 #else /* !__LP64__ */
3980 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
3981 #endif /* !__LP64__ */
3982 {
3983 IOPhysicalAddress address;
3984
3985 LOCK;
3986 #ifdef __LP64__
3987 address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
3988 #else /* !__LP64__ */
3989 address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
3990 #endif /* !__LP64__ */
3991 UNLOCK;
3992
3993 return( address );
3994 }
3995
3996 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3997
3998 #undef super
3999 #define super OSObject
4000
4001 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4002
4003 void IOMemoryDescriptor::initialize( void )
4004 {
4005 if( 0 == gIOMemoryLock)
4006 gIOMemoryLock = IORecursiveLockAlloc();
4007
4008 gIOLastPage = IOGetLastPageNumber();
4009 }
4010
4011 void IOMemoryDescriptor::free( void )
4012 {
4013 if( _mappings) _mappings->release();
4014
4015 if (reserved)
4016 {
4017 IODelete(reserved, IOMemoryDescriptorReserved, 1);
4018 reserved = NULL;
4019 }
4020 super::free();
4021 }
4022
4023 IOMemoryMap * IOMemoryDescriptor::setMapping(
4024 task_t intoTask,
4025 IOVirtualAddress mapAddress,
4026 IOOptionBits options )
4027 {
4028 return (createMappingInTask( intoTask, mapAddress,
4029 options | kIOMapStatic,
4030 0, getLength() ));
4031 }
4032
4033 IOMemoryMap * IOMemoryDescriptor::map(
4034 IOOptionBits options )
4035 {
4036 return (createMappingInTask( kernel_task, 0,
4037 options | kIOMapAnywhere,
4038 0, getLength() ));
4039 }
4040
4041 #ifndef __LP64__
4042 IOMemoryMap * IOMemoryDescriptor::map(
4043 task_t intoTask,
4044 IOVirtualAddress atAddress,
4045 IOOptionBits options,
4046 IOByteCount offset,
4047 IOByteCount length )
4048 {
4049 if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask)))
4050 {
4051 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
4052 return (0);
4053 }
4054
4055 return (createMappingInTask(intoTask, atAddress,
4056 options, offset, length));
4057 }
4058 #endif /* !__LP64__ */
4059
4060 IOMemoryMap * IOMemoryDescriptor::createMappingInTask(
4061 task_t intoTask,
4062 mach_vm_address_t atAddress,
4063 IOOptionBits options,
4064 mach_vm_size_t offset,
4065 mach_vm_size_t length)
4066 {
4067 IOMemoryMap * result;
4068 IOMemoryMap * mapping;
4069
4070 if (0 == length)
4071 length = getLength();
4072
4073 mapping = new IOMemoryMap;
4074
4075 if( mapping
4076 && !mapping->init( intoTask, atAddress,
4077 options, offset, length )) {
4078 mapping->release();
4079 mapping = 0;
4080 }
4081
4082 if (mapping)
4083 result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
4084 else
4085 result = 0;
4086
4087 #if DEBUG
4088 if (!result)
4089 IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
4090 this, atAddress, (uint32_t) options, offset, length);
4091 #endif
4092
4093 return (result);
4094 }
4095
4096 #ifndef __LP64__ // there is only a 64 bit version for LP64
4097 IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
4098 IOOptionBits options,
4099 IOByteCount offset)
4100 {
4101 return (redirect(newBackingMemory, options, (mach_vm_size_t)offset));
4102 }
4103 #endif
4104
4105 IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
4106 IOOptionBits options,
4107 mach_vm_size_t offset)
4108 {
4109 IOReturn err = kIOReturnSuccess;
4110 IOMemoryDescriptor * physMem = 0;
4111
4112 LOCK;
4113
4114 if (fAddress && fAddressMap) do
4115 {
4116 if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
4117 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
4118 {
4119 physMem = fMemory;
4120 physMem->retain();
4121 }
4122
4123 if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count))
4124 {
4125 upl_size_t size = round_page(fLength);
4126 upl_control_flags_t flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
4127 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS
4128 | UPL_MEMORY_TAG_MAKE(IOMemoryTag(kernel_map));
4129 if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL,
4130 NULL, NULL,
4131 &flags))
4132 fRedirUPL = 0;
4133
4134 if (physMem)
4135 {
4136 IOUnmapPages( fAddressMap, fAddress, fLength );
4137 if ((false))
4138 physMem->redirect(0, true);
4139 }
4140 }
4141
4142 if (newBackingMemory)
4143 {
4144 if (newBackingMemory != fMemory)
4145 {
4146 fOffset = 0;
4147 if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
4148 options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
4149 offset, fLength))
4150 err = kIOReturnError;
4151 }
4152 if (fRedirUPL)
4153 {
4154 upl_commit(fRedirUPL, NULL, 0);
4155 upl_deallocate(fRedirUPL);
4156 fRedirUPL = 0;
4157 }
4158 if ((false) && physMem)
4159 physMem->redirect(0, false);
4160 }
4161 }
4162 while (false);
4163
4164 UNLOCK;
4165
4166 if (physMem)
4167 physMem->release();
4168
4169 return (err);
4170 }
4171
4172 IOMemoryMap * IOMemoryDescriptor::makeMapping(
4173 IOMemoryDescriptor * owner,
4174 task_t __intoTask,
4175 IOVirtualAddress __address,
4176 IOOptionBits options,
4177 IOByteCount __offset,
4178 IOByteCount __length )
4179 {
4180 #ifndef __LP64__
4181 if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::makeMapping !64bit");
4182 #endif /* !__LP64__ */
4183
4184 IOMemoryDescriptor * mapDesc = 0;
4185 IOMemoryMap * result = 0;
4186 OSIterator * iter;
4187
4188 IOMemoryMap * mapping = (IOMemoryMap *) __address;
4189 mach_vm_size_t offset = mapping->fOffset + __offset;
4190 mach_vm_size_t length = mapping->fLength;
4191
4192 mapping->fOffset = offset;
4193
4194 LOCK;
4195
4196 do
4197 {
4198 if (kIOMapStatic & options)
4199 {
4200 result = mapping;
4201 addMapping(mapping);
4202 mapping->setMemoryDescriptor(this, 0);
4203 continue;
4204 }
4205
4206 if (kIOMapUnique & options)
4207 {
4208 addr64_t phys;
4209 IOByteCount physLen;
4210
4211 // if (owner != this) continue;
4212
4213 if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
4214 || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
4215 {
4216 phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
4217 if (!phys || (physLen < length))
4218 continue;
4219
4220 mapDesc = IOMemoryDescriptor::withAddressRange(
4221 phys, length, getDirection() | kIOMemoryMapperNone, NULL);
4222 if (!mapDesc)
4223 continue;
4224 offset = 0;
4225 mapping->fOffset = offset;
4226 }
4227 }
4228 else
4229 {
4230 // look for a compatible existing mapping
4231 if( (iter = OSCollectionIterator::withCollection(_mappings)))
4232 {
4233 IOMemoryMap * lookMapping;
4234 while ((lookMapping = (IOMemoryMap *) iter->getNextObject()))
4235 {
4236 if ((result = lookMapping->copyCompatible(mapping)))
4237 {
4238 addMapping(result);
4239 result->setMemoryDescriptor(this, offset);
4240 break;
4241 }
4242 }
4243 iter->release();
4244 }
4245 if (result || (options & kIOMapReference))
4246 {
4247 if (result != mapping)
4248 {
4249 mapping->release();
4250 mapping = NULL;
4251 }
4252 continue;
4253 }
4254 }
4255
4256 if (!mapDesc)
4257 {
4258 mapDesc = this;
4259 mapDesc->retain();
4260 }
4261 IOReturn
4262 kr = mapDesc->doMap( 0, (IOVirtualAddress *) &mapping, options, 0, 0 );
4263 if (kIOReturnSuccess == kr)
4264 {
4265 result = mapping;
4266 mapDesc->addMapping(result);
4267 result->setMemoryDescriptor(mapDesc, offset);
4268 }
4269 else
4270 {
4271 mapping->release();
4272 mapping = NULL;
4273 }
4274 }
4275 while( false );
4276
4277 UNLOCK;
4278
4279 if (mapDesc)
4280 mapDesc->release();
4281
4282 return (result);
4283 }
4284
4285 void IOMemoryDescriptor::addMapping(
4286 IOMemoryMap * mapping )
4287 {
4288 if( mapping)
4289 {
4290 if( 0 == _mappings)
4291 _mappings = OSSet::withCapacity(1);
4292 if( _mappings )
4293 _mappings->setObject( mapping );
4294 }
4295 }
4296
4297 void IOMemoryDescriptor::removeMapping(
4298 IOMemoryMap * mapping )
4299 {
4300 if( _mappings)
4301 _mappings->removeObject( mapping);
4302 }
4303
4304 #ifndef __LP64__
4305 // obsolete initializers
4306 // - initWithOptions is the designated initializer
4307 bool
4308 IOMemoryDescriptor::initWithAddress(void * address,
4309 IOByteCount length,
4310 IODirection direction)
4311 {
4312 return( false );
4313 }
4314
4315 bool
4316 IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
4317 IOByteCount length,
4318 IODirection direction,
4319 task_t task)
4320 {
4321 return( false );
4322 }
4323
4324 bool
4325 IOMemoryDescriptor::initWithPhysicalAddress(
4326 IOPhysicalAddress address,
4327 IOByteCount length,
4328 IODirection direction )
4329 {
4330 return( false );
4331 }
4332
4333 bool
4334 IOMemoryDescriptor::initWithRanges(
4335 IOVirtualRange * ranges,
4336 UInt32 withCount,
4337 IODirection direction,
4338 task_t task,
4339 bool asReference)
4340 {
4341 return( false );
4342 }
4343
4344 bool
4345 IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
4346 UInt32 withCount,
4347 IODirection direction,
4348 bool asReference)
4349 {
4350 return( false );
4351 }
4352
4353 void * IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
4354 IOByteCount * lengthOfSegment)
4355 {
4356 return( 0 );
4357 }
4358 #endif /* !__LP64__ */
4359
4360 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4361
4362 bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
4363 {
4364 OSSymbol const *keys[2];
4365 OSObject *values[2];
4366 OSArray * array;
4367
4368 struct SerData {
4369 user_addr_t address;
4370 user_size_t length;
4371 } *vcopy;
4372 unsigned int index, nRanges;
4373 bool result;
4374
4375 IOOptionBits type = _flags & kIOMemoryTypeMask;
4376
4377 if (s == NULL) return false;
4378
4379 array = OSArray::withCapacity(4);
4380 if (!array) return (false);
4381
4382 nRanges = _rangesCount;
4383 vcopy = (SerData *) IOMalloc(sizeof(SerData) * nRanges);
4384 if (vcopy == 0) return false;
4385
4386 keys[0] = OSSymbol::withCString("address");
4387 keys[1] = OSSymbol::withCString("length");
4388
4389 result = false;
4390 values[0] = values[1] = 0;
4391
4392 // From this point on we can go to bail.
4393
4394 // Copy the volatile data so we don't have to allocate memory
4395 // while the lock is held.
4396 LOCK;
4397 if (nRanges == _rangesCount) {
4398 Ranges vec = _ranges;
4399 for (index = 0; index < nRanges; index++) {
4400 mach_vm_address_t addr; mach_vm_size_t len;
4401 getAddrLenForInd(addr, len, type, vec, index);
4402 vcopy[index].address = addr;
4403 vcopy[index].length = len;
4404 }
4405 } else {
4406 // The descriptor changed out from under us. Give up.
4407 UNLOCK;
4408 result = false;
4409 goto bail;
4410 }
4411 UNLOCK;
4412
4413 for (index = 0; index < nRanges; index++)
4414 {
4415 user_addr_t addr = vcopy[index].address;
4416 IOByteCount len = (IOByteCount) vcopy[index].length;
4417 values[0] = OSNumber::withNumber(addr, sizeof(addr) * 8);
4418 if (values[0] == 0) {
4419 result = false;
4420 goto bail;
4421 }
4422 values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
4423 if (values[1] == 0) {
4424 result = false;
4425 goto bail;
4426 }
4427 OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
4428 if (dict == 0) {
4429 result = false;
4430 goto bail;
4431 }
4432 array->setObject(dict);
4433 dict->release();
4434 values[0]->release();
4435 values[1]->release();
4436 values[0] = values[1] = 0;
4437 }
4438
4439 result = array->serialize(s);
4440
4441 bail:
4442 if (array)
4443 array->release();
4444 if (values[0])
4445 values[0]->release();
4446 if (values[1])
4447 values[1]->release();
4448 if (keys[0])
4449 keys[0]->release();
4450 if (keys[1])
4451 keys[1]->release();
4452 if (vcopy)
4453 IOFree(vcopy, sizeof(SerData) * nRanges);
4454
4455 return result;
4456 }
4457
4458 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4459
4460 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
4461 #ifdef __LP64__
4462 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
4463 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
4464 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
4465 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
4466 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
4467 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
4468 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
4469 #else /* !__LP64__ */
4470 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
4471 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
4472 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3);
4473 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4);
4474 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5);
4475 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 6);
4476 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 7);
4477 #endif /* !__LP64__ */
4478 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
4479 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
4480 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
4481 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
4482 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
4483 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
4484 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
4485 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
4486
4487 /* ex-inline function implementation */
4488 IOPhysicalAddress
4489 IOMemoryDescriptor::getPhysicalAddress()
4490 { return( getPhysicalSegment( 0, 0 )); }