]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOMemoryDescriptor.cpp
xnu-3248.50.21.tar.gz
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 #include <sys/cdefs.h>
31
32 #include <IOKit/assert.h>
33 #include <IOKit/system.h>
34 #include <IOKit/IOLib.h>
35 #include <IOKit/IOMemoryDescriptor.h>
36 #include <IOKit/IOMapper.h>
37 #include <IOKit/IODMACommand.h>
38 #include <IOKit/IOKitKeysPrivate.h>
39
40 #include <IOKit/IOSubMemoryDescriptor.h>
41 #include <IOKit/IOMultiMemoryDescriptor.h>
42
43 #include <IOKit/IOKitDebug.h>
44 #include <libkern/OSDebug.h>
45
46 #include "IOKitKernelInternal.h"
47
48 #include <libkern/c++/OSContainers.h>
49 #include <libkern/c++/OSDictionary.h>
50 #include <libkern/c++/OSArray.h>
51 #include <libkern/c++/OSSymbol.h>
52 #include <libkern/c++/OSNumber.h>
53
54 #include <sys/uio.h>
55
56 __BEGIN_DECLS
57 #include <vm/pmap.h>
58 #include <vm/vm_pageout.h>
59 #include <mach/memory_object_types.h>
60 #include <device/device_port.h>
61
62 #include <mach/vm_prot.h>
63 #include <mach/mach_vm.h>
64 #include <vm/vm_fault.h>
65 #include <vm/vm_protos.h>
66
67 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
68 extern void ipc_port_release_send(ipc_port_t port);
69
70 // osfmk/device/iokit_rpc.c
71 unsigned int IODefaultCacheBits(addr64_t pa);
72 unsigned int IOTranslateCacheBits(struct phys_entry *pp);
73
74 __END_DECLS
75
76 #define kIOMapperWaitSystem ((IOMapper *) 1)
77
78 static IOMapper * gIOSystemMapper = NULL;
79
80 ppnum_t gIOLastPage;
81
82 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
83
84 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
85
86 #define super IOMemoryDescriptor
87
88 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
89
90 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
91
92 static IORecursiveLock * gIOMemoryLock;
93
94 #define LOCK IORecursiveLockLock( gIOMemoryLock)
95 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
96 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
97 #define WAKEUP \
98 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
99
100 #if 0
101 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
102 #else
103 #define DEBG(fmt, args...) {}
104 #endif
105
106 #define IOMD_DEBUG_DMAACTIVE 1
107
108 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
109
110 // Some data structures and accessor macros used by the initWithOptions
111 // Function
112
113 enum ioPLBlockFlags {
114 kIOPLOnDevice = 0x00000001,
115 kIOPLExternUPL = 0x00000002,
116 };
117
118 struct IOMDPersistentInitData
119 {
120 const IOGeneralMemoryDescriptor * fMD;
121 IOMemoryReference * fMemRef;
122 };
123
124 struct ioPLBlock {
125 upl_t fIOPL;
126 vm_address_t fPageInfo; // Pointer to page list or index into it
127 uint32_t fIOMDOffset; // The offset of this iopl in descriptor
128 ppnum_t fMappedPage; // Page number of first page in this iopl
129 unsigned int fPageOffset; // Offset within first page of iopl
130 unsigned int fFlags; // Flags
131 };
132
133 struct ioGMDData {
134 IOMapper * fMapper;
135 uint8_t fDMAMapNumAddressBits;
136 uint64_t fDMAMapAlignment;
137 uint64_t fMappedBase;
138 uint64_t fMappedLength;
139 uint64_t fPreparationID;
140 #if IOTRACKING
141 IOTracking fWireTracking;
142 #endif
143 unsigned int fPageCnt;
144 unsigned char fDiscontig:1;
145 unsigned char fCompletionError:1;
146 unsigned char _resv:6;
147 #if __LP64__
148 // align arrays to 8 bytes so following macros work
149 unsigned char fPad[3];
150 #endif
151 upl_page_info_t fPageList[1]; /* variable length */
152 ioPLBlock fBlocks[1]; /* variable length */
153 };
154
155 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
156 #define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
157 #define getNumIOPL(osd, d) \
158 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
159 #define getPageList(d) (&(d->fPageList[0]))
160 #define computeDataSize(p, u) \
161 (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
162
163 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
164
165 #define next_page(a) ( trunc_page(a) + PAGE_SIZE )
166
167 extern "C" {
168
169 kern_return_t device_data_action(
170 uintptr_t device_handle,
171 ipc_port_t device_pager,
172 vm_prot_t protection,
173 vm_object_offset_t offset,
174 vm_size_t size)
175 {
176 kern_return_t kr;
177 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
178 IOMemoryDescriptor * memDesc;
179
180 LOCK;
181 memDesc = ref->dp.memory;
182 if( memDesc)
183 {
184 memDesc->retain();
185 kr = memDesc->handleFault(device_pager, offset, size);
186 memDesc->release();
187 }
188 else
189 kr = KERN_ABORTED;
190 UNLOCK;
191
192 return( kr );
193 }
194
195 kern_return_t device_close(
196 uintptr_t device_handle)
197 {
198 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
199
200 IODelete( ref, IOMemoryDescriptorReserved, 1 );
201
202 return( kIOReturnSuccess );
203 }
204 }; // end extern "C"
205
206 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
207
208 // Note this inline function uses C++ reference arguments to return values
209 // This means that pointers are not passed and NULLs don't have to be
210 // checked for as a NULL reference is illegal.
211 static inline void
212 getAddrLenForInd(mach_vm_address_t &addr, mach_vm_size_t &len, // Output variables
213 UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
214 {
215 assert(kIOMemoryTypeUIO == type
216 || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
217 || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
218 if (kIOMemoryTypeUIO == type) {
219 user_size_t us;
220 user_addr_t ad;
221 uio_getiov((uio_t) r.uio, ind, &ad, &us); addr = ad; len = us;
222 }
223 #ifndef __LP64__
224 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
225 IOAddressRange cur = r.v64[ind];
226 addr = cur.address;
227 len = cur.length;
228 }
229 #endif /* !__LP64__ */
230 else {
231 IOVirtualRange cur = r.v[ind];
232 addr = cur.address;
233 len = cur.length;
234 }
235 }
236
237 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
238
239 static IOReturn
240 purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
241 {
242 IOReturn err = kIOReturnSuccess;
243
244 *control = VM_PURGABLE_SET_STATE;
245
246 enum { kIOMemoryPurgeableControlMask = 15 };
247
248 switch (kIOMemoryPurgeableControlMask & newState)
249 {
250 case kIOMemoryPurgeableKeepCurrent:
251 *control = VM_PURGABLE_GET_STATE;
252 break;
253
254 case kIOMemoryPurgeableNonVolatile:
255 *state = VM_PURGABLE_NONVOLATILE;
256 break;
257 case kIOMemoryPurgeableVolatile:
258 *state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask);
259 break;
260 case kIOMemoryPurgeableEmpty:
261 *state = VM_PURGABLE_EMPTY;
262 break;
263 default:
264 err = kIOReturnBadArgument;
265 break;
266 }
267 return (err);
268 }
269
270 static IOReturn
271 purgeableStateBits(int * state)
272 {
273 IOReturn err = kIOReturnSuccess;
274
275 switch (VM_PURGABLE_STATE_MASK & *state)
276 {
277 case VM_PURGABLE_NONVOLATILE:
278 *state = kIOMemoryPurgeableNonVolatile;
279 break;
280 case VM_PURGABLE_VOLATILE:
281 *state = kIOMemoryPurgeableVolatile;
282 break;
283 case VM_PURGABLE_EMPTY:
284 *state = kIOMemoryPurgeableEmpty;
285 break;
286 default:
287 *state = kIOMemoryPurgeableNonVolatile;
288 err = kIOReturnNotReady;
289 break;
290 }
291 return (err);
292 }
293
294
295 static vm_prot_t
296 vmProtForCacheMode(IOOptionBits cacheMode)
297 {
298 vm_prot_t prot = 0;
299 switch (cacheMode)
300 {
301 case kIOInhibitCache:
302 SET_MAP_MEM(MAP_MEM_IO, prot);
303 break;
304
305 case kIOWriteThruCache:
306 SET_MAP_MEM(MAP_MEM_WTHRU, prot);
307 break;
308
309 case kIOWriteCombineCache:
310 SET_MAP_MEM(MAP_MEM_WCOMB, prot);
311 break;
312
313 case kIOCopybackCache:
314 SET_MAP_MEM(MAP_MEM_COPYBACK, prot);
315 break;
316
317 case kIOCopybackInnerCache:
318 SET_MAP_MEM(MAP_MEM_INNERWBACK, prot);
319 break;
320
321 case kIODefaultCache:
322 default:
323 SET_MAP_MEM(MAP_MEM_NOOP, prot);
324 break;
325 }
326
327 return (prot);
328 }
329
330 static unsigned int
331 pagerFlagsForCacheMode(IOOptionBits cacheMode)
332 {
333 unsigned int pagerFlags = 0;
334 switch (cacheMode)
335 {
336 case kIOInhibitCache:
337 pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
338 break;
339
340 case kIOWriteThruCache:
341 pagerFlags = DEVICE_PAGER_WRITE_THROUGH | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
342 break;
343
344 case kIOWriteCombineCache:
345 pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT;
346 break;
347
348 case kIOCopybackCache:
349 pagerFlags = DEVICE_PAGER_COHERENT;
350 break;
351
352 case kIOCopybackInnerCache:
353 pagerFlags = DEVICE_PAGER_COHERENT;
354 break;
355
356 case kIODefaultCache:
357 default:
358 pagerFlags = -1U;
359 break;
360 }
361 return (pagerFlags);
362 }
363
364 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
365 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
366
367 struct IOMemoryEntry
368 {
369 ipc_port_t entry;
370 int64_t offset;
371 uint64_t size;
372 };
373
374 struct IOMemoryReference
375 {
376 volatile SInt32 refCount;
377 vm_prot_t prot;
378 uint32_t capacity;
379 uint32_t count;
380 IOMemoryEntry entries[0];
381 };
382
383 enum
384 {
385 kIOMemoryReferenceReuse = 0x00000001,
386 kIOMemoryReferenceWrite = 0x00000002,
387 };
388
389 SInt32 gIOMemoryReferenceCount;
390
391 IOMemoryReference *
392 IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc)
393 {
394 IOMemoryReference * ref;
395 size_t newSize, oldSize, copySize;
396
397 newSize = (sizeof(IOMemoryReference)
398 - sizeof(ref->entries)
399 + capacity * sizeof(ref->entries[0]));
400 ref = (typeof(ref)) IOMalloc(newSize);
401 if (realloc)
402 {
403 oldSize = (sizeof(IOMemoryReference)
404 - sizeof(realloc->entries)
405 + realloc->capacity * sizeof(realloc->entries[0]));
406 copySize = oldSize;
407 if (copySize > newSize) copySize = newSize;
408 if (ref) bcopy(realloc, ref, copySize);
409 IOFree(realloc, oldSize);
410 }
411 else if (ref)
412 {
413 bzero(ref, sizeof(*ref));
414 ref->refCount = 1;
415 OSIncrementAtomic(&gIOMemoryReferenceCount);
416 }
417 if (!ref) return (0);
418 ref->capacity = capacity;
419 return (ref);
420 }
421
422 void
423 IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref)
424 {
425 IOMemoryEntry * entries;
426 size_t size;
427
428 entries = ref->entries + ref->count;
429 while (entries > &ref->entries[0])
430 {
431 entries--;
432 ipc_port_release_send(entries->entry);
433 }
434 size = (sizeof(IOMemoryReference)
435 - sizeof(ref->entries)
436 + ref->capacity * sizeof(ref->entries[0]));
437 IOFree(ref, size);
438
439 OSDecrementAtomic(&gIOMemoryReferenceCount);
440 }
441
442 void
443 IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref)
444 {
445 if (1 == OSDecrementAtomic(&ref->refCount)) memoryReferenceFree(ref);
446 }
447
448
449 IOReturn
450 IOGeneralMemoryDescriptor::memoryReferenceCreate(
451 IOOptionBits options,
452 IOMemoryReference ** reference)
453 {
454 enum { kCapacity = 4, kCapacityInc = 4 };
455
456 kern_return_t err;
457 IOMemoryReference * ref;
458 IOMemoryEntry * entries;
459 IOMemoryEntry * cloneEntries;
460 vm_map_t map;
461 ipc_port_t entry, cloneEntry;
462 vm_prot_t prot;
463 memory_object_size_t actualSize;
464 uint32_t rangeIdx;
465 uint32_t count;
466 mach_vm_address_t entryAddr, endAddr, entrySize;
467 mach_vm_size_t srcAddr, srcLen;
468 mach_vm_size_t nextAddr, nextLen;
469 mach_vm_size_t offset, remain;
470 IOByteCount physLen;
471 IOOptionBits type = (_flags & kIOMemoryTypeMask);
472 IOOptionBits cacheMode;
473 unsigned int pagerFlags;
474 vm_tag_t tag;
475
476 ref = memoryReferenceAlloc(kCapacity, NULL);
477 if (!ref) return (kIOReturnNoMemory);
478
479 tag = IOMemoryTag(kernel_map);
480 entries = &ref->entries[0];
481 count = 0;
482
483 offset = 0;
484 rangeIdx = 0;
485 if (_task) getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
486 else
487 {
488 nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
489 nextLen = physLen;
490
491 // default cache mode for physical
492 if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift))
493 {
494 IOOptionBits mode;
495 pagerFlags = IODefaultCacheBits(nextAddr);
496 if (DEVICE_PAGER_CACHE_INHIB & pagerFlags)
497 {
498 if (DEVICE_PAGER_GUARDED & pagerFlags)
499 mode = kIOInhibitCache;
500 else
501 mode = kIOWriteCombineCache;
502 }
503 else if (DEVICE_PAGER_WRITE_THROUGH & pagerFlags)
504 mode = kIOWriteThruCache;
505 else
506 mode = kIOCopybackCache;
507 _flags |= (mode << kIOMemoryBufferCacheShift);
508 }
509 }
510
511 // cache mode & vm_prot
512 prot = VM_PROT_READ;
513 cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
514 prot |= vmProtForCacheMode(cacheMode);
515 // VM system requires write access to change cache mode
516 if (kIODefaultCache != cacheMode) prot |= VM_PROT_WRITE;
517 if (kIODirectionOut != (kIODirectionOutIn & _flags)) prot |= VM_PROT_WRITE;
518 if (kIOMemoryReferenceWrite & options) prot |= VM_PROT_WRITE;
519
520 if ((kIOMemoryReferenceReuse & options) && _memRef)
521 {
522 cloneEntries = &_memRef->entries[0];
523 prot |= MAP_MEM_NAMED_REUSE;
524 }
525
526 if (_task)
527 {
528 // virtual ranges
529
530 if (kIOMemoryBufferPageable & _flags)
531 {
532 // IOBufferMemoryDescriptor alloc - set flags for entry + object create
533 prot |= MAP_MEM_NAMED_CREATE;
534 if (kIOMemoryBufferPurgeable & _flags) prot |= MAP_MEM_PURGABLE;
535 prot |= VM_PROT_WRITE;
536 map = NULL;
537 }
538 else map = get_task_map(_task);
539
540 remain = _length;
541 while (remain)
542 {
543 srcAddr = nextAddr;
544 srcLen = nextLen;
545 nextAddr = 0;
546 nextLen = 0;
547 // coalesce addr range
548 for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++)
549 {
550 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
551 if ((srcAddr + srcLen) != nextAddr) break;
552 srcLen += nextLen;
553 }
554 entryAddr = trunc_page_64(srcAddr);
555 endAddr = round_page_64(srcAddr + srcLen);
556 do
557 {
558 entrySize = (endAddr - entryAddr);
559 if (!entrySize) break;
560 actualSize = entrySize;
561
562 cloneEntry = MACH_PORT_NULL;
563 if (MAP_MEM_NAMED_REUSE & prot)
564 {
565 if (cloneEntries < &_memRef->entries[_memRef->count]) cloneEntry = cloneEntries->entry;
566 else prot &= ~MAP_MEM_NAMED_REUSE;
567 }
568
569 err = mach_make_memory_entry_64(map,
570 &actualSize, entryAddr, prot, &entry, cloneEntry);
571
572 if (KERN_SUCCESS != err) break;
573 if (actualSize > entrySize) panic("mach_make_memory_entry_64 actualSize");
574
575 if (count >= ref->capacity)
576 {
577 ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref);
578 entries = &ref->entries[count];
579 }
580 entries->entry = entry;
581 entries->size = actualSize;
582 entries->offset = offset + (entryAddr - srcAddr);
583 entryAddr += actualSize;
584 if (MAP_MEM_NAMED_REUSE & prot)
585 {
586 if ((cloneEntries->entry == entries->entry)
587 && (cloneEntries->size == entries->size)
588 && (cloneEntries->offset == entries->offset)) cloneEntries++;
589 else prot &= ~MAP_MEM_NAMED_REUSE;
590 }
591 entries++;
592 count++;
593 }
594 while (true);
595 offset += srcLen;
596 remain -= srcLen;
597 }
598 }
599 else
600 {
601 // _task == 0, physical or kIOMemoryTypeUPL
602 memory_object_t pager;
603 vm_size_t size = ptoa_32(_pages);
604
605 if (!getKernelReserved()) panic("getKernelReserved");
606
607 reserved->dp.pagerContig = (1 == _rangesCount);
608 reserved->dp.memory = this;
609
610 pagerFlags = pagerFlagsForCacheMode(cacheMode);
611 if (-1U == pagerFlags) panic("phys is kIODefaultCache");
612 if (reserved->dp.pagerContig) pagerFlags |= DEVICE_PAGER_CONTIGUOUS;
613
614 pager = device_pager_setup((memory_object_t) 0, (uintptr_t) reserved,
615 size, pagerFlags);
616 assert (pager);
617 if (!pager) err = kIOReturnVMError;
618 else
619 {
620 srcAddr = nextAddr;
621 entryAddr = trunc_page_64(srcAddr);
622 err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/,
623 size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry);
624 assert (KERN_SUCCESS == err);
625 if (KERN_SUCCESS != err) device_pager_deallocate(pager);
626 else
627 {
628 reserved->dp.devicePager = pager;
629 entries->entry = entry;
630 entries->size = size;
631 entries->offset = offset + (entryAddr - srcAddr);
632 entries++;
633 count++;
634 }
635 }
636 }
637
638 ref->count = count;
639 ref->prot = prot;
640
641 if (KERN_SUCCESS == err)
642 {
643 if (MAP_MEM_NAMED_REUSE & prot)
644 {
645 memoryReferenceFree(ref);
646 OSIncrementAtomic(&_memRef->refCount);
647 ref = _memRef;
648 }
649 }
650 else
651 {
652 memoryReferenceFree(ref);
653 ref = NULL;
654 }
655
656 *reference = ref;
657
658 return (err);
659 }
660
661 kern_return_t
662 IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
663 {
664 IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref;
665 IOReturn err;
666 vm_map_offset_t addr;
667
668 addr = ref->mapped;
669
670 err = vm_map_enter_mem_object(map, &addr, ref->size,
671 (vm_map_offset_t) 0,
672 (((ref->options & kIOMapAnywhere)
673 ? VM_FLAGS_ANYWHERE
674 : VM_FLAGS_FIXED)
675 | VM_MAKE_TAG(ref->tag)),
676 IPC_PORT_NULL,
677 (memory_object_offset_t) 0,
678 false, /* copy */
679 ref->prot,
680 ref->prot,
681 VM_INHERIT_NONE);
682 if (KERN_SUCCESS == err)
683 {
684 ref->mapped = (mach_vm_address_t) addr;
685 ref->map = map;
686 }
687
688 return( err );
689 }
690
691 IOReturn
692 IOGeneralMemoryDescriptor::memoryReferenceMap(
693 IOMemoryReference * ref,
694 vm_map_t map,
695 mach_vm_size_t inoffset,
696 mach_vm_size_t size,
697 IOOptionBits options,
698 mach_vm_address_t * inaddr)
699 {
700 IOReturn err;
701 int64_t offset = inoffset;
702 uint32_t rangeIdx, entryIdx;
703 vm_map_offset_t addr, mapAddr;
704 vm_map_offset_t pageOffset, entryOffset, remain, chunk;
705
706 mach_vm_address_t nextAddr;
707 mach_vm_size_t nextLen;
708 IOByteCount physLen;
709 IOMemoryEntry * entry;
710 vm_prot_t prot, memEntryCacheMode;
711 IOOptionBits type;
712 IOOptionBits cacheMode;
713 vm_tag_t tag;
714
715 /*
716 * For the kIOMapPrefault option.
717 */
718 upl_page_info_t *pageList = NULL;
719 UInt currentPageIndex = 0;
720
721 type = _flags & kIOMemoryTypeMask;
722 prot = VM_PROT_READ;
723 if (!(kIOMapReadOnly & options)) prot |= VM_PROT_WRITE;
724 prot &= ref->prot;
725
726 cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
727 if (kIODefaultCache != cacheMode)
728 {
729 // VM system requires write access to update named entry cache mode
730 memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
731 }
732
733 tag = IOMemoryTag(map);
734
735 if (_task)
736 {
737 // Find first range for offset
738 for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++)
739 {
740 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
741 if (remain < nextLen) break;
742 remain -= nextLen;
743 }
744 }
745 else
746 {
747 rangeIdx = 0;
748 remain = 0;
749 nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
750 nextLen = size;
751 }
752
753 assert(remain < nextLen);
754 if (remain >= nextLen) return (kIOReturnBadArgument);
755
756 nextAddr += remain;
757 nextLen -= remain;
758 pageOffset = (page_mask & nextAddr);
759 addr = 0;
760 if (!(options & kIOMapAnywhere))
761 {
762 addr = *inaddr;
763 if (pageOffset != (page_mask & addr)) return (kIOReturnNotAligned);
764 addr -= pageOffset;
765 }
766
767 // find first entry for offset
768 for (entryIdx = 0;
769 (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset);
770 entryIdx++) {}
771 entryIdx--;
772 entry = &ref->entries[entryIdx];
773
774 // allocate VM
775 size = round_page_64(size + pageOffset);
776 if (kIOMapOverwrite & options)
777 {
778 if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags))
779 {
780 map = IOPageableMapForAddress(addr);
781 }
782 err = KERN_SUCCESS;
783 }
784 else
785 {
786 IOMemoryDescriptorMapAllocRef ref;
787 ref.map = map;
788 ref.tag = tag;
789 ref.options = options;
790 ref.size = size;
791 ref.prot = prot;
792 if (options & kIOMapAnywhere)
793 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
794 ref.mapped = 0;
795 else
796 ref.mapped = addr;
797 if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags))
798 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
799 else
800 err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
801 if (KERN_SUCCESS == err)
802 {
803 addr = ref.mapped;
804 map = ref.map;
805 }
806 }
807
808 /*
809 * Prefaulting is only possible if we wired the memory earlier. Check the
810 * memory type, and the underlying data.
811 */
812 if (options & kIOMapPrefault)
813 {
814 /*
815 * The memory must have been wired by calling ::prepare(), otherwise
816 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
817 */
818 assert(map != kernel_map);
819 assert(_wireCount != 0);
820 assert(_memoryEntries != NULL);
821 if ((map == kernel_map) ||
822 (_wireCount == 0) ||
823 (_memoryEntries == NULL))
824 {
825 return kIOReturnBadArgument;
826 }
827
828 // Get the page list.
829 ioGMDData* dataP = getDataP(_memoryEntries);
830 ioPLBlock const* ioplList = getIOPLList(dataP);
831 pageList = getPageList(dataP);
832
833 // Get the number of IOPLs.
834 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
835
836 /*
837 * Scan through the IOPL Info Blocks, looking for the first block containing
838 * the offset. The research will go past it, so we'll need to go back to the
839 * right range at the end.
840 */
841 UInt ioplIndex = 0;
842 while (ioplIndex < numIOPLs && offset >= ioplList[ioplIndex].fIOMDOffset)
843 ioplIndex++;
844 ioplIndex--;
845
846 // Retrieve the IOPL info block.
847 ioPLBlock ioplInfo = ioplList[ioplIndex];
848
849 /*
850 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
851 * array.
852 */
853 if (ioplInfo.fFlags & kIOPLExternUPL)
854 pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
855 else
856 pageList = &pageList[ioplInfo.fPageInfo];
857
858 // Rebase [offset] into the IOPL in order to looks for the first page index.
859 mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
860
861 // Retrieve the index of the first page corresponding to the offset.
862 currentPageIndex = atop_32(offsetInIOPL);
863 }
864
865 // enter mappings
866 remain = size;
867 mapAddr = addr;
868 addr += pageOffset;
869
870 while (remain && (KERN_SUCCESS == err))
871 {
872 entryOffset = offset - entry->offset;
873 if ((page_mask & entryOffset) != pageOffset)
874 {
875 err = kIOReturnNotAligned;
876 break;
877 }
878
879 if (kIODefaultCache != cacheMode)
880 {
881 vm_size_t unused = 0;
882 err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
883 memEntryCacheMode, NULL, entry->entry);
884 assert (KERN_SUCCESS == err);
885 }
886
887 entryOffset -= pageOffset;
888 if (entryOffset >= entry->size) panic("entryOffset");
889 chunk = entry->size - entryOffset;
890 if (chunk)
891 {
892 if (chunk > remain) chunk = remain;
893 if (options & kIOMapPrefault)
894 {
895 UInt nb_pages = round_page(chunk) / PAGE_SIZE;
896 err = vm_map_enter_mem_object_prefault(map,
897 &mapAddr,
898 chunk, 0 /* mask */,
899 (VM_FLAGS_FIXED
900 | VM_FLAGS_OVERWRITE
901 | VM_MAKE_TAG(tag)
902 | VM_FLAGS_IOKIT_ACCT), /* iokit accounting */
903 entry->entry,
904 entryOffset,
905 prot, // cur
906 prot, // max
907 &pageList[currentPageIndex],
908 nb_pages);
909
910 // Compute the next index in the page list.
911 currentPageIndex += nb_pages;
912 assert(currentPageIndex <= _pages);
913 }
914 else
915 {
916 err = vm_map_enter_mem_object(map,
917 &mapAddr,
918 chunk, 0 /* mask */,
919 (VM_FLAGS_FIXED
920 | VM_FLAGS_OVERWRITE
921 | VM_MAKE_TAG(tag)
922 | VM_FLAGS_IOKIT_ACCT), /* iokit accounting */
923 entry->entry,
924 entryOffset,
925 false, // copy
926 prot, // cur
927 prot, // max
928 VM_INHERIT_NONE);
929 }
930 if (KERN_SUCCESS != err) break;
931 remain -= chunk;
932 if (!remain) break;
933 mapAddr += chunk;
934 offset += chunk - pageOffset;
935 }
936 pageOffset = 0;
937 entry++;
938 entryIdx++;
939 if (entryIdx >= ref->count)
940 {
941 err = kIOReturnOverrun;
942 break;
943 }
944 }
945
946 if ((KERN_SUCCESS != err) && addr && !(kIOMapOverwrite & options))
947 {
948 (void) mach_vm_deallocate(map, trunc_page_64(addr), size);
949 addr = 0;
950 }
951 *inaddr = addr;
952
953 return (err);
954 }
955
956 IOReturn
957 IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
958 IOMemoryReference * ref,
959 IOByteCount * residentPageCount,
960 IOByteCount * dirtyPageCount)
961 {
962 IOReturn err;
963 IOMemoryEntry * entries;
964 unsigned int resident, dirty;
965 unsigned int totalResident, totalDirty;
966
967 totalResident = totalDirty = 0;
968 entries = ref->entries + ref->count;
969 while (entries > &ref->entries[0])
970 {
971 entries--;
972 err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty);
973 if (KERN_SUCCESS != err) break;
974 totalResident += resident;
975 totalDirty += dirty;
976 }
977
978 if (residentPageCount) *residentPageCount = totalResident;
979 if (dirtyPageCount) *dirtyPageCount = totalDirty;
980 return (err);
981 }
982
983 IOReturn
984 IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
985 IOMemoryReference * ref,
986 IOOptionBits newState,
987 IOOptionBits * oldState)
988 {
989 IOReturn err;
990 IOMemoryEntry * entries;
991 vm_purgable_t control;
992 int totalState, state;
993
994 entries = ref->entries + ref->count;
995 totalState = kIOMemoryPurgeableNonVolatile;
996 while (entries > &ref->entries[0])
997 {
998 entries--;
999
1000 err = purgeableControlBits(newState, &control, &state);
1001 if (KERN_SUCCESS != err) break;
1002 err = mach_memory_entry_purgable_control(entries->entry, control, &state);
1003 if (KERN_SUCCESS != err) break;
1004 err = purgeableStateBits(&state);
1005 if (KERN_SUCCESS != err) break;
1006
1007 if (kIOMemoryPurgeableEmpty == state) totalState = kIOMemoryPurgeableEmpty;
1008 else if (kIOMemoryPurgeableEmpty == totalState) continue;
1009 else if (kIOMemoryPurgeableVolatile == totalState) continue;
1010 else if (kIOMemoryPurgeableVolatile == state) totalState = kIOMemoryPurgeableVolatile;
1011 else totalState = kIOMemoryPurgeableNonVolatile;
1012 }
1013
1014 if (oldState) *oldState = totalState;
1015 return (err);
1016 }
1017
1018 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1019
1020 IOMemoryDescriptor *
1021 IOMemoryDescriptor::withAddress(void * address,
1022 IOByteCount length,
1023 IODirection direction)
1024 {
1025 return IOMemoryDescriptor::
1026 withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
1027 }
1028
1029 #ifndef __LP64__
1030 IOMemoryDescriptor *
1031 IOMemoryDescriptor::withAddress(IOVirtualAddress address,
1032 IOByteCount length,
1033 IODirection direction,
1034 task_t task)
1035 {
1036 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1037 if (that)
1038 {
1039 if (that->initWithAddress(address, length, direction, task))
1040 return that;
1041
1042 that->release();
1043 }
1044 return 0;
1045 }
1046 #endif /* !__LP64__ */
1047
1048 IOMemoryDescriptor *
1049 IOMemoryDescriptor::withPhysicalAddress(
1050 IOPhysicalAddress address,
1051 IOByteCount length,
1052 IODirection direction )
1053 {
1054 return (IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL));
1055 }
1056
1057 #ifndef __LP64__
1058 IOMemoryDescriptor *
1059 IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
1060 UInt32 withCount,
1061 IODirection direction,
1062 task_t task,
1063 bool asReference)
1064 {
1065 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1066 if (that)
1067 {
1068 if (that->initWithRanges(ranges, withCount, direction, task, asReference))
1069 return that;
1070
1071 that->release();
1072 }
1073 return 0;
1074 }
1075 #endif /* !__LP64__ */
1076
1077 IOMemoryDescriptor *
1078 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
1079 mach_vm_size_t length,
1080 IOOptionBits options,
1081 task_t task)
1082 {
1083 IOAddressRange range = { address, length };
1084 return (IOMemoryDescriptor::withAddressRanges(&range, 1, options, task));
1085 }
1086
1087 IOMemoryDescriptor *
1088 IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
1089 UInt32 rangeCount,
1090 IOOptionBits options,
1091 task_t task)
1092 {
1093 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1094 if (that)
1095 {
1096 if (task)
1097 options |= kIOMemoryTypeVirtual64;
1098 else
1099 options |= kIOMemoryTypePhysical64;
1100
1101 if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ 0))
1102 return that;
1103
1104 that->release();
1105 }
1106
1107 return 0;
1108 }
1109
1110
1111 /*
1112 * withOptions:
1113 *
1114 * Create a new IOMemoryDescriptor. The buffer is made up of several
1115 * virtual address ranges, from a given task.
1116 *
1117 * Passing the ranges as a reference will avoid an extra allocation.
1118 */
1119 IOMemoryDescriptor *
1120 IOMemoryDescriptor::withOptions(void * buffers,
1121 UInt32 count,
1122 UInt32 offset,
1123 task_t task,
1124 IOOptionBits opts,
1125 IOMapper * mapper)
1126 {
1127 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
1128
1129 if (self
1130 && !self->initWithOptions(buffers, count, offset, task, opts, mapper))
1131 {
1132 self->release();
1133 return 0;
1134 }
1135
1136 return self;
1137 }
1138
1139 bool IOMemoryDescriptor::initWithOptions(void * buffers,
1140 UInt32 count,
1141 UInt32 offset,
1142 task_t task,
1143 IOOptionBits options,
1144 IOMapper * mapper)
1145 {
1146 return( false );
1147 }
1148
1149 #ifndef __LP64__
1150 IOMemoryDescriptor *
1151 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
1152 UInt32 withCount,
1153 IODirection direction,
1154 bool asReference)
1155 {
1156 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1157 if (that)
1158 {
1159 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference))
1160 return that;
1161
1162 that->release();
1163 }
1164 return 0;
1165 }
1166
1167 IOMemoryDescriptor *
1168 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
1169 IOByteCount offset,
1170 IOByteCount length,
1171 IODirection direction)
1172 {
1173 return (IOSubMemoryDescriptor::withSubRange(of, offset, length, direction));
1174 }
1175 #endif /* !__LP64__ */
1176
1177 IOMemoryDescriptor *
1178 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
1179 {
1180 IOGeneralMemoryDescriptor *origGenMD =
1181 OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
1182
1183 if (origGenMD)
1184 return IOGeneralMemoryDescriptor::
1185 withPersistentMemoryDescriptor(origGenMD);
1186 else
1187 return 0;
1188 }
1189
1190 IOMemoryDescriptor *
1191 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
1192 {
1193 IOMemoryReference * memRef;
1194
1195 if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) return (0);
1196
1197 if (memRef == originalMD->_memRef)
1198 {
1199 originalMD->retain(); // Add a new reference to ourselves
1200 originalMD->memoryReferenceRelease(memRef);
1201 return originalMD;
1202 }
1203
1204 IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor;
1205 IOMDPersistentInitData initData = { originalMD, memRef };
1206
1207 if (self
1208 && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) {
1209 self->release();
1210 self = 0;
1211 }
1212 return self;
1213 }
1214
1215 #ifndef __LP64__
1216 bool
1217 IOGeneralMemoryDescriptor::initWithAddress(void * address,
1218 IOByteCount withLength,
1219 IODirection withDirection)
1220 {
1221 _singleRange.v.address = (vm_offset_t) address;
1222 _singleRange.v.length = withLength;
1223
1224 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
1225 }
1226
1227 bool
1228 IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
1229 IOByteCount withLength,
1230 IODirection withDirection,
1231 task_t withTask)
1232 {
1233 _singleRange.v.address = address;
1234 _singleRange.v.length = withLength;
1235
1236 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
1237 }
1238
1239 bool
1240 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
1241 IOPhysicalAddress address,
1242 IOByteCount withLength,
1243 IODirection withDirection )
1244 {
1245 _singleRange.p.address = address;
1246 _singleRange.p.length = withLength;
1247
1248 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
1249 }
1250
1251 bool
1252 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
1253 IOPhysicalRange * ranges,
1254 UInt32 count,
1255 IODirection direction,
1256 bool reference)
1257 {
1258 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
1259
1260 if (reference)
1261 mdOpts |= kIOMemoryAsReference;
1262
1263 return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0);
1264 }
1265
1266 bool
1267 IOGeneralMemoryDescriptor::initWithRanges(
1268 IOVirtualRange * ranges,
1269 UInt32 count,
1270 IODirection direction,
1271 task_t task,
1272 bool reference)
1273 {
1274 IOOptionBits mdOpts = direction;
1275
1276 if (reference)
1277 mdOpts |= kIOMemoryAsReference;
1278
1279 if (task) {
1280 mdOpts |= kIOMemoryTypeVirtual;
1281
1282 // Auto-prepare if this is a kernel memory descriptor as very few
1283 // clients bother to prepare() kernel memory.
1284 // But it was not enforced so what are you going to do?
1285 if (task == kernel_task)
1286 mdOpts |= kIOMemoryAutoPrepare;
1287 }
1288 else
1289 mdOpts |= kIOMemoryTypePhysical;
1290
1291 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0);
1292 }
1293 #endif /* !__LP64__ */
1294
1295 /*
1296 * initWithOptions:
1297 *
1298 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
1299 * from a given task, several physical ranges, an UPL from the ubc
1300 * system or a uio (may be 64bit) from the BSD subsystem.
1301 *
1302 * Passing the ranges as a reference will avoid an extra allocation.
1303 *
1304 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
1305 * existing instance -- note this behavior is not commonly supported in other
1306 * I/O Kit classes, although it is supported here.
1307 */
1308
1309 bool
1310 IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
1311 UInt32 count,
1312 UInt32 offset,
1313 task_t task,
1314 IOOptionBits options,
1315 IOMapper * mapper)
1316 {
1317 IOOptionBits type = options & kIOMemoryTypeMask;
1318
1319 #ifndef __LP64__
1320 if (task
1321 && (kIOMemoryTypeVirtual == type)
1322 && vm_map_is_64bit(get_task_map(task))
1323 && ((IOVirtualRange *) buffers)->address)
1324 {
1325 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
1326 return false;
1327 }
1328 #endif /* !__LP64__ */
1329
1330 // Grab the original MD's configuation data to initialse the
1331 // arguments to this function.
1332 if (kIOMemoryTypePersistentMD == type) {
1333
1334 IOMDPersistentInitData *initData = (typeof(initData)) buffers;
1335 const IOGeneralMemoryDescriptor *orig = initData->fMD;
1336 ioGMDData *dataP = getDataP(orig->_memoryEntries);
1337
1338 // Only accept persistent memory descriptors with valid dataP data.
1339 assert(orig->_rangesCount == 1);
1340 if ( !(orig->_flags & kIOMemoryPersistent) || !dataP)
1341 return false;
1342
1343 _memRef = initData->fMemRef; // Grab the new named entry
1344 options = orig->_flags & ~kIOMemoryAsReference;
1345 type = options & kIOMemoryTypeMask;
1346 buffers = orig->_ranges.v;
1347 count = orig->_rangesCount;
1348
1349 // Now grab the original task and whatever mapper was previously used
1350 task = orig->_task;
1351 mapper = dataP->fMapper;
1352
1353 // We are ready to go through the original initialisation now
1354 }
1355
1356 switch (type) {
1357 case kIOMemoryTypeUIO:
1358 case kIOMemoryTypeVirtual:
1359 #ifndef __LP64__
1360 case kIOMemoryTypeVirtual64:
1361 #endif /* !__LP64__ */
1362 assert(task);
1363 if (!task)
1364 return false;
1365 break;
1366
1367 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
1368 #ifndef __LP64__
1369 case kIOMemoryTypePhysical64:
1370 #endif /* !__LP64__ */
1371 case kIOMemoryTypeUPL:
1372 assert(!task);
1373 break;
1374 default:
1375 return false; /* bad argument */
1376 }
1377
1378 assert(buffers);
1379 assert(count);
1380
1381 /*
1382 * We can check the _initialized instance variable before having ever set
1383 * it to an initial value because I/O Kit guarantees that all our instance
1384 * variables are zeroed on an object's allocation.
1385 */
1386
1387 if (_initialized) {
1388 /*
1389 * An existing memory descriptor is being retargeted to point to
1390 * somewhere else. Clean up our present state.
1391 */
1392 IOOptionBits type = _flags & kIOMemoryTypeMask;
1393 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
1394 {
1395 while (_wireCount)
1396 complete();
1397 }
1398 if (_ranges.v && !(kIOMemoryAsReference & _flags))
1399 {
1400 if (kIOMemoryTypeUIO == type)
1401 uio_free((uio_t) _ranges.v);
1402 #ifndef __LP64__
1403 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
1404 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
1405 #endif /* !__LP64__ */
1406 else
1407 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
1408 }
1409
1410 options |= (kIOMemoryRedirected & _flags);
1411 if (!(kIOMemoryRedirected & options))
1412 {
1413 if (_memRef)
1414 {
1415 memoryReferenceRelease(_memRef);
1416 _memRef = 0;
1417 }
1418 if (_mappings)
1419 _mappings->flushCollection();
1420 }
1421 }
1422 else {
1423 if (!super::init())
1424 return false;
1425 _initialized = true;
1426 }
1427
1428 // Grab the appropriate mapper
1429 if (kIOMemoryHostOnly & options) options |= kIOMemoryMapperNone;
1430 if (kIOMemoryMapperNone & options)
1431 mapper = 0; // No Mapper
1432 else if (mapper == kIOMapperSystem) {
1433 IOMapper::checkForSystemMapper();
1434 gIOSystemMapper = mapper = IOMapper::gSystem;
1435 }
1436
1437 // Temp binary compatibility for kIOMemoryThreadSafe
1438 if (kIOMemoryReserved6156215 & options)
1439 {
1440 options &= ~kIOMemoryReserved6156215;
1441 options |= kIOMemoryThreadSafe;
1442 }
1443 // Remove the dynamic internal use flags from the initial setting
1444 options &= ~(kIOMemoryPreparedReadOnly);
1445 _flags = options;
1446 _task = task;
1447
1448 #ifndef __LP64__
1449 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
1450 #endif /* !__LP64__ */
1451
1452 __iomd_reservedA = 0;
1453 __iomd_reservedB = 0;
1454 _highestPage = 0;
1455
1456 if (kIOMemoryThreadSafe & options)
1457 {
1458 if (!_prepareLock)
1459 _prepareLock = IOLockAlloc();
1460 }
1461 else if (_prepareLock)
1462 {
1463 IOLockFree(_prepareLock);
1464 _prepareLock = NULL;
1465 }
1466
1467 if (kIOMemoryTypeUPL == type) {
1468
1469 ioGMDData *dataP;
1470 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
1471
1472 if (!initMemoryEntries(dataSize, mapper)) return (false);
1473 dataP = getDataP(_memoryEntries);
1474 dataP->fPageCnt = 0;
1475
1476 // _wireCount++; // UPLs start out life wired
1477
1478 _length = count;
1479 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
1480
1481 ioPLBlock iopl;
1482 iopl.fIOPL = (upl_t) buffers;
1483 upl_set_referenced(iopl.fIOPL, true);
1484 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
1485
1486 if (upl_get_size(iopl.fIOPL) < (count + offset))
1487 panic("short external upl");
1488
1489 _highestPage = upl_get_highest_page(iopl.fIOPL);
1490
1491 // Set the flag kIOPLOnDevice convieniently equal to 1
1492 iopl.fFlags = pageList->device | kIOPLExternUPL;
1493 if (!pageList->device) {
1494 // Pre-compute the offset into the UPL's page list
1495 pageList = &pageList[atop_32(offset)];
1496 offset &= PAGE_MASK;
1497 }
1498 iopl.fIOMDOffset = 0;
1499 iopl.fMappedPage = 0;
1500 iopl.fPageInfo = (vm_address_t) pageList;
1501 iopl.fPageOffset = offset;
1502 _memoryEntries->appendBytes(&iopl, sizeof(iopl));
1503 }
1504 else {
1505 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
1506 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
1507
1508 // Initialize the memory descriptor
1509 if (options & kIOMemoryAsReference) {
1510 #ifndef __LP64__
1511 _rangesIsAllocated = false;
1512 #endif /* !__LP64__ */
1513
1514 // Hack assignment to get the buffer arg into _ranges.
1515 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
1516 // work, C++ sigh.
1517 // This also initialises the uio & physical ranges.
1518 _ranges.v = (IOVirtualRange *) buffers;
1519 }
1520 else {
1521 #ifndef __LP64__
1522 _rangesIsAllocated = true;
1523 #endif /* !__LP64__ */
1524 switch (type)
1525 {
1526 case kIOMemoryTypeUIO:
1527 _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
1528 break;
1529
1530 #ifndef __LP64__
1531 case kIOMemoryTypeVirtual64:
1532 case kIOMemoryTypePhysical64:
1533 if (count == 1
1534 && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
1535 ) {
1536 if (kIOMemoryTypeVirtual64 == type)
1537 type = kIOMemoryTypeVirtual;
1538 else
1539 type = kIOMemoryTypePhysical;
1540 _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
1541 _rangesIsAllocated = false;
1542 _ranges.v = &_singleRange.v;
1543 _singleRange.v.address = ((IOAddressRange *) buffers)->address;
1544 _singleRange.v.length = ((IOAddressRange *) buffers)->length;
1545 break;
1546 }
1547 _ranges.v64 = IONew(IOAddressRange, count);
1548 if (!_ranges.v64)
1549 return false;
1550 bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
1551 break;
1552 #endif /* !__LP64__ */
1553 case kIOMemoryTypeVirtual:
1554 case kIOMemoryTypePhysical:
1555 if (count == 1) {
1556 _flags |= kIOMemoryAsReference;
1557 #ifndef __LP64__
1558 _rangesIsAllocated = false;
1559 #endif /* !__LP64__ */
1560 _ranges.v = &_singleRange.v;
1561 } else {
1562 _ranges.v = IONew(IOVirtualRange, count);
1563 if (!_ranges.v)
1564 return false;
1565 }
1566 bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
1567 break;
1568 }
1569 }
1570
1571 // Find starting address within the vector of ranges
1572 Ranges vec = _ranges;
1573 mach_vm_size_t totalLength = 0;
1574 unsigned int ind, pages = 0;
1575 for (ind = 0; ind < count; ind++) {
1576 mach_vm_address_t addr;
1577 mach_vm_size_t len;
1578
1579 // addr & len are returned by this function
1580 getAddrLenForInd(addr, len, type, vec, ind);
1581 if ((addr + len + PAGE_MASK) < addr) break; /* overflow */
1582 pages += (atop_64(addr + len + PAGE_MASK) - atop_64(addr));
1583 totalLength += len;
1584 if (totalLength < len) break; /* overflow */
1585 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
1586 {
1587 ppnum_t highPage = atop_64(addr + len - 1);
1588 if (highPage > _highestPage)
1589 _highestPage = highPage;
1590 }
1591 }
1592 if ((ind < count)
1593 || (totalLength != ((IOByteCount) totalLength))) return (false); /* overflow */
1594
1595 _length = totalLength;
1596 _pages = pages;
1597 _rangesCount = count;
1598
1599 // Auto-prepare memory at creation time.
1600 // Implied completion when descriptor is free-ed
1601 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
1602 _wireCount++; // Physical MDs are, by definition, wired
1603 else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
1604 ioGMDData *dataP;
1605 unsigned dataSize;
1606
1607 if (_pages > atop_64(max_mem)) return false;
1608
1609 dataSize = computeDataSize(_pages, /* upls */ count * 2);
1610 if (!initMemoryEntries(dataSize, mapper)) return false;
1611 dataP = getDataP(_memoryEntries);
1612 dataP->fPageCnt = _pages;
1613
1614 if ( (kIOMemoryPersistent & _flags) && !_memRef)
1615 {
1616 IOReturn
1617 err = memoryReferenceCreate(0, &_memRef);
1618 if (kIOReturnSuccess != err) return false;
1619 }
1620
1621 if ((_flags & kIOMemoryAutoPrepare)
1622 && prepare() != kIOReturnSuccess)
1623 return false;
1624 }
1625 }
1626
1627 return true;
1628 }
1629
1630 /*
1631 * free
1632 *
1633 * Free resources.
1634 */
1635 void IOGeneralMemoryDescriptor::free()
1636 {
1637 IOOptionBits type = _flags & kIOMemoryTypeMask;
1638
1639 if( reserved)
1640 {
1641 LOCK;
1642 reserved->dp.memory = 0;
1643 UNLOCK;
1644 }
1645 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
1646 {
1647 ioGMDData * dataP;
1648 if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
1649 {
1650 dataP->fMapper->iovmUnmapMemory(this, NULL, dataP->fMappedBase, dataP->fMappedLength);
1651 dataP->fMappedBase = 0;
1652 }
1653 }
1654 else
1655 {
1656 while (_wireCount) complete();
1657 }
1658
1659 if (_memoryEntries) _memoryEntries->release();
1660
1661 if (_ranges.v && !(kIOMemoryAsReference & _flags))
1662 {
1663 if (kIOMemoryTypeUIO == type)
1664 uio_free((uio_t) _ranges.v);
1665 #ifndef __LP64__
1666 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
1667 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
1668 #endif /* !__LP64__ */
1669 else
1670 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
1671
1672 _ranges.v = NULL;
1673 }
1674
1675 if (reserved)
1676 {
1677 if (reserved->dp.devicePager)
1678 {
1679 // memEntry holds a ref on the device pager which owns reserved
1680 // (IOMemoryDescriptorReserved) so no reserved access after this point
1681 device_pager_deallocate( (memory_object_t) reserved->dp.devicePager );
1682 }
1683 else
1684 IODelete(reserved, IOMemoryDescriptorReserved, 1);
1685 reserved = NULL;
1686 }
1687
1688 if (_memRef) memoryReferenceRelease(_memRef);
1689 if (_prepareLock) IOLockFree(_prepareLock);
1690
1691 super::free();
1692 }
1693
1694 #ifndef __LP64__
1695 void IOGeneralMemoryDescriptor::unmapFromKernel()
1696 {
1697 panic("IOGMD::unmapFromKernel deprecated");
1698 }
1699
1700 void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
1701 {
1702 panic("IOGMD::mapIntoKernel deprecated");
1703 }
1704 #endif /* !__LP64__ */
1705
1706 /*
1707 * getDirection:
1708 *
1709 * Get the direction of the transfer.
1710 */
1711 IODirection IOMemoryDescriptor::getDirection() const
1712 {
1713 #ifndef __LP64__
1714 if (_direction)
1715 return _direction;
1716 #endif /* !__LP64__ */
1717 return (IODirection) (_flags & kIOMemoryDirectionMask);
1718 }
1719
1720 /*
1721 * getLength:
1722 *
1723 * Get the length of the transfer (over all ranges).
1724 */
1725 IOByteCount IOMemoryDescriptor::getLength() const
1726 {
1727 return _length;
1728 }
1729
1730 void IOMemoryDescriptor::setTag( IOOptionBits tag )
1731 {
1732 _tag = tag;
1733 }
1734
1735 IOOptionBits IOMemoryDescriptor::getTag( void )
1736 {
1737 return( _tag);
1738 }
1739
1740 #ifndef __LP64__
1741 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
1742 IOPhysicalAddress
1743 IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
1744 {
1745 addr64_t physAddr = 0;
1746
1747 if( prepare() == kIOReturnSuccess) {
1748 physAddr = getPhysicalSegment64( offset, length );
1749 complete();
1750 }
1751
1752 return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used
1753 }
1754 #endif /* !__LP64__ */
1755
1756 IOByteCount IOMemoryDescriptor::readBytes
1757 (IOByteCount offset, void *bytes, IOByteCount length)
1758 {
1759 addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
1760 IOByteCount remaining;
1761
1762 // Assert that this entire I/O is withing the available range
1763 assert(offset <= _length);
1764 assert(offset + length <= _length);
1765 if ((offset >= _length)
1766 || ((offset + length) > _length)) {
1767 return 0;
1768 }
1769
1770 if (kIOMemoryThreadSafe & _flags)
1771 LOCK;
1772
1773 remaining = length = min(length, _length - offset);
1774 while (remaining) { // (process another target segment?)
1775 addr64_t srcAddr64;
1776 IOByteCount srcLen;
1777
1778 srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
1779 if (!srcAddr64)
1780 break;
1781
1782 // Clip segment length to remaining
1783 if (srcLen > remaining)
1784 srcLen = remaining;
1785
1786 copypv(srcAddr64, dstAddr, srcLen,
1787 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
1788
1789 dstAddr += srcLen;
1790 offset += srcLen;
1791 remaining -= srcLen;
1792 }
1793
1794 if (kIOMemoryThreadSafe & _flags)
1795 UNLOCK;
1796
1797 assert(!remaining);
1798
1799 return length - remaining;
1800 }
1801
1802 IOByteCount IOMemoryDescriptor::writeBytes
1803 (IOByteCount inoffset, const void *bytes, IOByteCount length)
1804 {
1805 addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
1806 IOByteCount remaining;
1807 IOByteCount offset = inoffset;
1808
1809 // Assert that this entire I/O is withing the available range
1810 assert(offset <= _length);
1811 assert(offset + length <= _length);
1812
1813 assert( !(kIOMemoryPreparedReadOnly & _flags) );
1814
1815 if ( (kIOMemoryPreparedReadOnly & _flags)
1816 || (offset >= _length)
1817 || ((offset + length) > _length)) {
1818 return 0;
1819 }
1820
1821 if (kIOMemoryThreadSafe & _flags)
1822 LOCK;
1823
1824 remaining = length = min(length, _length - offset);
1825 while (remaining) { // (process another target segment?)
1826 addr64_t dstAddr64;
1827 IOByteCount dstLen;
1828
1829 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
1830 if (!dstAddr64)
1831 break;
1832
1833 // Clip segment length to remaining
1834 if (dstLen > remaining)
1835 dstLen = remaining;
1836
1837 if (!srcAddr) bzero_phys(dstAddr64, dstLen);
1838 else
1839 {
1840 copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
1841 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
1842 srcAddr += dstLen;
1843 }
1844 offset += dstLen;
1845 remaining -= dstLen;
1846 }
1847
1848 if (kIOMemoryThreadSafe & _flags)
1849 UNLOCK;
1850
1851 assert(!remaining);
1852
1853 if (!srcAddr) performOperation(kIOMemoryIncoherentIOFlush, inoffset, length);
1854
1855 return length - remaining;
1856 }
1857
1858 #ifndef __LP64__
1859 void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
1860 {
1861 panic("IOGMD::setPosition deprecated");
1862 }
1863 #endif /* !__LP64__ */
1864
1865 static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
1866
1867 uint64_t
1868 IOGeneralMemoryDescriptor::getPreparationID( void )
1869 {
1870 ioGMDData *dataP;
1871
1872 if (!_wireCount)
1873 return (kIOPreparationIDUnprepared);
1874
1875 if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
1876 || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64))
1877 {
1878 IOMemoryDescriptor::setPreparationID();
1879 return (IOMemoryDescriptor::getPreparationID());
1880 }
1881
1882 if (!_memoryEntries || !(dataP = getDataP(_memoryEntries)))
1883 return (kIOPreparationIDUnprepared);
1884
1885 if (kIOPreparationIDUnprepared == dataP->fPreparationID)
1886 {
1887 dataP->fPreparationID = OSIncrementAtomic64(&gIOMDPreparationID);
1888 }
1889 return (dataP->fPreparationID);
1890 }
1891
1892 IOMemoryDescriptorReserved * IOMemoryDescriptor::getKernelReserved( void )
1893 {
1894 if (!reserved)
1895 {
1896 reserved = IONew(IOMemoryDescriptorReserved, 1);
1897 if (reserved)
1898 bzero(reserved, sizeof(IOMemoryDescriptorReserved));
1899 }
1900 return (reserved);
1901 }
1902
1903 void IOMemoryDescriptor::setPreparationID( void )
1904 {
1905 if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID))
1906 {
1907 #if defined(__ppc__ )
1908 reserved->preparationID = gIOMDPreparationID++;
1909 #else
1910 reserved->preparationID = OSIncrementAtomic64(&gIOMDPreparationID);
1911 #endif
1912 }
1913 }
1914
1915 uint64_t IOMemoryDescriptor::getPreparationID( void )
1916 {
1917 if (reserved)
1918 return (reserved->preparationID);
1919 else
1920 return (kIOPreparationIDUnsupported);
1921 }
1922
1923 IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
1924 {
1925 IOReturn err = kIOReturnSuccess;
1926 DMACommandOps params;
1927 IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
1928 ioGMDData *dataP;
1929
1930 params = (op & ~kIOMDDMACommandOperationMask & op);
1931 op &= kIOMDDMACommandOperationMask;
1932
1933 if (kIOMDDMAMap == op)
1934 {
1935 if (dataSize < sizeof(IOMDDMAMapArgs))
1936 return kIOReturnUnderrun;
1937
1938 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
1939
1940 if (!_memoryEntries
1941 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
1942
1943 if (_memoryEntries && data->fMapper)
1944 {
1945 bool remap, keepMap;
1946 dataP = getDataP(_memoryEntries);
1947
1948 if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
1949 if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) dataP->fDMAMapAlignment = data->fMapSpec.alignment;
1950
1951 keepMap = (data->fMapper == gIOSystemMapper);
1952 keepMap &= ((data->fOffset == 0) && (data->fLength == _length));
1953
1954 remap = (!keepMap);
1955 remap |= (dataP->fDMAMapNumAddressBits < 64)
1956 && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
1957 remap |= (dataP->fDMAMapAlignment > page_size);
1958
1959 if (remap || !dataP->fMappedBase)
1960 {
1961 // if (dataP->fMappedBase) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params);
1962 err = md->dmaMap(data->fMapper, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
1963 if (keepMap && (kIOReturnSuccess == err) && !dataP->fMappedBase)
1964 {
1965 dataP->fMappedBase = data->fAlloc;
1966 dataP->fMappedLength = data->fAllocLength;
1967 data->fAllocLength = 0; // IOMD owns the alloc now
1968 }
1969 }
1970 else
1971 {
1972 data->fAlloc = dataP->fMappedBase;
1973 data->fAllocLength = 0; // give out IOMD map
1974 }
1975 data->fMapContig = !dataP->fDiscontig;
1976 }
1977
1978 return (err);
1979 }
1980
1981 if (kIOMDAddDMAMapSpec == op)
1982 {
1983 if (dataSize < sizeof(IODMAMapSpecification))
1984 return kIOReturnUnderrun;
1985
1986 IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
1987
1988 if (!_memoryEntries
1989 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
1990
1991 if (_memoryEntries)
1992 {
1993 dataP = getDataP(_memoryEntries);
1994 if (data->numAddressBits < dataP->fDMAMapNumAddressBits)
1995 dataP->fDMAMapNumAddressBits = data->numAddressBits;
1996 if (data->alignment > dataP->fDMAMapAlignment)
1997 dataP->fDMAMapAlignment = data->alignment;
1998 }
1999 return kIOReturnSuccess;
2000 }
2001
2002 if (kIOMDGetCharacteristics == op) {
2003
2004 if (dataSize < sizeof(IOMDDMACharacteristics))
2005 return kIOReturnUnderrun;
2006
2007 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
2008 data->fLength = _length;
2009 data->fSGCount = _rangesCount;
2010 data->fPages = _pages;
2011 data->fDirection = getDirection();
2012 if (!_wireCount)
2013 data->fIsPrepared = false;
2014 else {
2015 data->fIsPrepared = true;
2016 data->fHighestPage = _highestPage;
2017 if (_memoryEntries)
2018 {
2019 dataP = getDataP(_memoryEntries);
2020 ioPLBlock *ioplList = getIOPLList(dataP);
2021 UInt count = getNumIOPL(_memoryEntries, dataP);
2022 if (count == 1)
2023 data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
2024 }
2025 }
2026
2027 return kIOReturnSuccess;
2028
2029 #if IOMD_DEBUG_DMAACTIVE
2030 } else if (kIOMDDMAActive == op) {
2031 if (params) OSIncrementAtomic(&md->__iomd_reservedA);
2032 else {
2033 if (md->__iomd_reservedA)
2034 OSDecrementAtomic(&md->__iomd_reservedA);
2035 else
2036 panic("kIOMDSetDMAInactive");
2037 }
2038 #endif /* IOMD_DEBUG_DMAACTIVE */
2039
2040 } else if (kIOMDWalkSegments != op)
2041 return kIOReturnBadArgument;
2042
2043 // Get the next segment
2044 struct InternalState {
2045 IOMDDMAWalkSegmentArgs fIO;
2046 UInt fOffset2Index;
2047 UInt fIndex;
2048 UInt fNextOffset;
2049 } *isP;
2050
2051 // Find the next segment
2052 if (dataSize < sizeof(*isP))
2053 return kIOReturnUnderrun;
2054
2055 isP = (InternalState *) vData;
2056 UInt offset = isP->fIO.fOffset;
2057 bool mapped = isP->fIO.fMapped;
2058
2059 if (IOMapper::gSystem && mapped
2060 && (!(kIOMemoryHostOnly & _flags))
2061 && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBase))
2062 // && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBase))
2063 {
2064 if (!_memoryEntries
2065 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
2066
2067 dataP = getDataP(_memoryEntries);
2068 if (dataP->fMapper)
2069 {
2070 IODMAMapSpecification mapSpec;
2071 bzero(&mapSpec, sizeof(mapSpec));
2072 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
2073 mapSpec.alignment = dataP->fDMAMapAlignment;
2074 err = md->dmaMap(dataP->fMapper, NULL, &mapSpec, 0, _length, &dataP->fMappedBase, &dataP->fMappedLength);
2075 if (kIOReturnSuccess != err) return (err);
2076 }
2077 }
2078
2079 if (offset >= _length)
2080 return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
2081
2082 // Validate the previous offset
2083 UInt ind, off2Ind = isP->fOffset2Index;
2084 if (!params
2085 && offset
2086 && (offset == isP->fNextOffset || off2Ind <= offset))
2087 ind = isP->fIndex;
2088 else
2089 ind = off2Ind = 0; // Start from beginning
2090
2091 UInt length;
2092 UInt64 address;
2093
2094
2095 if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
2096
2097 // Physical address based memory descriptor
2098 const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
2099
2100 // Find the range after the one that contains the offset
2101 mach_vm_size_t len;
2102 for (len = 0; off2Ind <= offset; ind++) {
2103 len = physP[ind].length;
2104 off2Ind += len;
2105 }
2106
2107 // Calculate length within range and starting address
2108 length = off2Ind - offset;
2109 address = physP[ind - 1].address + len - length;
2110
2111 if (true && mapped && _memoryEntries
2112 && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
2113 {
2114 address = dataP->fMappedBase + offset;
2115 }
2116 else
2117 {
2118 // see how far we can coalesce ranges
2119 while (ind < _rangesCount && address + length == physP[ind].address) {
2120 len = physP[ind].length;
2121 length += len;
2122 off2Ind += len;
2123 ind++;
2124 }
2125 }
2126
2127 // correct contiguous check overshoot
2128 ind--;
2129 off2Ind -= len;
2130 }
2131 #ifndef __LP64__
2132 else if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
2133
2134 // Physical address based memory descriptor
2135 const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
2136
2137 // Find the range after the one that contains the offset
2138 mach_vm_size_t len;
2139 for (len = 0; off2Ind <= offset; ind++) {
2140 len = physP[ind].length;
2141 off2Ind += len;
2142 }
2143
2144 // Calculate length within range and starting address
2145 length = off2Ind - offset;
2146 address = physP[ind - 1].address + len - length;
2147
2148 if (true && mapped && _memoryEntries
2149 && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
2150 {
2151 address = dataP->fMappedBase + offset;
2152 }
2153 else
2154 {
2155 // see how far we can coalesce ranges
2156 while (ind < _rangesCount && address + length == physP[ind].address) {
2157 len = physP[ind].length;
2158 length += len;
2159 off2Ind += len;
2160 ind++;
2161 }
2162 }
2163 // correct contiguous check overshoot
2164 ind--;
2165 off2Ind -= len;
2166 }
2167 #endif /* !__LP64__ */
2168 else do {
2169 if (!_wireCount)
2170 panic("IOGMD: not wired for the IODMACommand");
2171
2172 assert(_memoryEntries);
2173
2174 dataP = getDataP(_memoryEntries);
2175 const ioPLBlock *ioplList = getIOPLList(dataP);
2176 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
2177 upl_page_info_t *pageList = getPageList(dataP);
2178
2179 assert(numIOPLs > 0);
2180
2181 // Scan through iopl info blocks looking for block containing offset
2182 while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset)
2183 ind++;
2184
2185 // Go back to actual range as search goes past it
2186 ioPLBlock ioplInfo = ioplList[ind - 1];
2187 off2Ind = ioplInfo.fIOMDOffset;
2188
2189 if (ind < numIOPLs)
2190 length = ioplList[ind].fIOMDOffset;
2191 else
2192 length = _length;
2193 length -= offset; // Remainder within iopl
2194
2195 // Subtract offset till this iopl in total list
2196 offset -= off2Ind;
2197
2198 // If a mapped address is requested and this is a pre-mapped IOPL
2199 // then just need to compute an offset relative to the mapped base.
2200 if (mapped && dataP->fMappedBase) {
2201 offset += (ioplInfo.fPageOffset & PAGE_MASK);
2202 address = trunc_page_64(dataP->fMappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
2203 continue; // Done leave do/while(false) now
2204 }
2205
2206 // The offset is rebased into the current iopl.
2207 // Now add the iopl 1st page offset.
2208 offset += ioplInfo.fPageOffset;
2209
2210 // For external UPLs the fPageInfo field points directly to
2211 // the upl's upl_page_info_t array.
2212 if (ioplInfo.fFlags & kIOPLExternUPL)
2213 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
2214 else
2215 pageList = &pageList[ioplInfo.fPageInfo];
2216
2217 // Check for direct device non-paged memory
2218 if ( ioplInfo.fFlags & kIOPLOnDevice ) {
2219 address = ptoa_64(pageList->phys_addr) + offset;
2220 continue; // Done leave do/while(false) now
2221 }
2222
2223 // Now we need compute the index into the pageList
2224 UInt pageInd = atop_32(offset);
2225 offset &= PAGE_MASK;
2226
2227 // Compute the starting address of this segment
2228 IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
2229 if (!pageAddr) {
2230 panic("!pageList phys_addr");
2231 }
2232
2233 address = ptoa_64(pageAddr) + offset;
2234
2235 // length is currently set to the length of the remainider of the iopl.
2236 // We need to check that the remainder of the iopl is contiguous.
2237 // This is indicated by pageList[ind].phys_addr being sequential.
2238 IOByteCount contigLength = PAGE_SIZE - offset;
2239 while (contigLength < length
2240 && ++pageAddr == pageList[++pageInd].phys_addr)
2241 {
2242 contigLength += PAGE_SIZE;
2243 }
2244
2245 if (contigLength < length)
2246 length = contigLength;
2247
2248
2249 assert(address);
2250 assert(length);
2251
2252 } while (false);
2253
2254 // Update return values and state
2255 isP->fIO.fIOVMAddr = address;
2256 isP->fIO.fLength = length;
2257 isP->fIndex = ind;
2258 isP->fOffset2Index = off2Ind;
2259 isP->fNextOffset = isP->fIO.fOffset + length;
2260
2261 return kIOReturnSuccess;
2262 }
2263
2264 addr64_t
2265 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
2266 {
2267 IOReturn ret;
2268 mach_vm_address_t address = 0;
2269 mach_vm_size_t length = 0;
2270 IOMapper * mapper = gIOSystemMapper;
2271 IOOptionBits type = _flags & kIOMemoryTypeMask;
2272
2273 if (lengthOfSegment)
2274 *lengthOfSegment = 0;
2275
2276 if (offset >= _length)
2277 return 0;
2278
2279 // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
2280 // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
2281 // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
2282 // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
2283
2284 if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type))
2285 {
2286 unsigned rangesIndex = 0;
2287 Ranges vec = _ranges;
2288 mach_vm_address_t addr;
2289
2290 // Find starting address within the vector of ranges
2291 for (;;) {
2292 getAddrLenForInd(addr, length, type, vec, rangesIndex);
2293 if (offset < length)
2294 break;
2295 offset -= length; // (make offset relative)
2296 rangesIndex++;
2297 }
2298
2299 // Now that we have the starting range,
2300 // lets find the last contiguous range
2301 addr += offset;
2302 length -= offset;
2303
2304 for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) {
2305 mach_vm_address_t newAddr;
2306 mach_vm_size_t newLen;
2307
2308 getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
2309 if (addr + length != newAddr)
2310 break;
2311 length += newLen;
2312 }
2313 if (addr)
2314 address = (IOPhysicalAddress) addr; // Truncate address to 32bit
2315 }
2316 else
2317 {
2318 IOMDDMAWalkSegmentState _state;
2319 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
2320
2321 state->fOffset = offset;
2322 state->fLength = _length - offset;
2323 state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOnly);
2324
2325 ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
2326
2327 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret))
2328 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
2329 ret, this, state->fOffset,
2330 state->fIOVMAddr, state->fLength);
2331 if (kIOReturnSuccess == ret)
2332 {
2333 address = state->fIOVMAddr;
2334 length = state->fLength;
2335 }
2336
2337 // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
2338 // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
2339
2340 if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)))
2341 {
2342 if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone))
2343 {
2344 addr64_t origAddr = address;
2345 IOByteCount origLen = length;
2346
2347 address = mapper->mapToPhysicalAddress(origAddr);
2348 length = page_size - (address & (page_size - 1));
2349 while ((length < origLen)
2350 && ((address + length) == mapper->mapToPhysicalAddress(origAddr + length)))
2351 length += page_size;
2352 if (length > origLen)
2353 length = origLen;
2354 }
2355 }
2356 }
2357
2358 if (!address)
2359 length = 0;
2360
2361 if (lengthOfSegment)
2362 *lengthOfSegment = length;
2363
2364 return (address);
2365 }
2366
2367 #ifndef __LP64__
2368 addr64_t
2369 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
2370 {
2371 addr64_t address = 0;
2372
2373 if (options & _kIOMemorySourceSegment)
2374 {
2375 address = getSourceSegment(offset, lengthOfSegment);
2376 }
2377 else if (options & kIOMemoryMapperNone)
2378 {
2379 address = getPhysicalSegment64(offset, lengthOfSegment);
2380 }
2381 else
2382 {
2383 address = getPhysicalSegment(offset, lengthOfSegment);
2384 }
2385
2386 return (address);
2387 }
2388
2389 addr64_t
2390 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
2391 {
2392 return (getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone));
2393 }
2394
2395 IOPhysicalAddress
2396 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
2397 {
2398 addr64_t address = 0;
2399 IOByteCount length = 0;
2400
2401 address = getPhysicalSegment(offset, lengthOfSegment, 0);
2402
2403 if (lengthOfSegment)
2404 length = *lengthOfSegment;
2405
2406 if ((address + length) > 0x100000000ULL)
2407 {
2408 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
2409 address, (long) length, (getMetaClass())->getClassName());
2410 }
2411
2412 return ((IOPhysicalAddress) address);
2413 }
2414
2415 addr64_t
2416 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
2417 {
2418 IOPhysicalAddress phys32;
2419 IOByteCount length;
2420 addr64_t phys64;
2421 IOMapper * mapper = 0;
2422
2423 phys32 = getPhysicalSegment(offset, lengthOfSegment);
2424 if (!phys32)
2425 return 0;
2426
2427 if (gIOSystemMapper)
2428 mapper = gIOSystemMapper;
2429
2430 if (mapper)
2431 {
2432 IOByteCount origLen;
2433
2434 phys64 = mapper->mapToPhysicalAddress(phys32);
2435 origLen = *lengthOfSegment;
2436 length = page_size - (phys64 & (page_size - 1));
2437 while ((length < origLen)
2438 && ((phys64 + length) == mapper->mapToPhysicalAddress(phys32 + length)))
2439 length += page_size;
2440 if (length > origLen)
2441 length = origLen;
2442
2443 *lengthOfSegment = length;
2444 }
2445 else
2446 phys64 = (addr64_t) phys32;
2447
2448 return phys64;
2449 }
2450
2451 IOPhysicalAddress
2452 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
2453 {
2454 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0));
2455 }
2456
2457 IOPhysicalAddress
2458 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
2459 {
2460 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment));
2461 }
2462
2463 void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
2464 IOByteCount * lengthOfSegment)
2465 {
2466 if (_task == kernel_task)
2467 return (void *) getSourceSegment(offset, lengthOfSegment);
2468 else
2469 panic("IOGMD::getVirtualSegment deprecated");
2470
2471 return 0;
2472 }
2473 #endif /* !__LP64__ */
2474
2475 IOReturn
2476 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
2477 {
2478 IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
2479 DMACommandOps params;
2480 IOReturn err;
2481
2482 params = (op & ~kIOMDDMACommandOperationMask & op);
2483 op &= kIOMDDMACommandOperationMask;
2484
2485 if (kIOMDGetCharacteristics == op) {
2486 if (dataSize < sizeof(IOMDDMACharacteristics))
2487 return kIOReturnUnderrun;
2488
2489 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
2490 data->fLength = getLength();
2491 data->fSGCount = 0;
2492 data->fDirection = getDirection();
2493 data->fIsPrepared = true; // Assume prepared - fails safe
2494 }
2495 else if (kIOMDWalkSegments == op) {
2496 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs))
2497 return kIOReturnUnderrun;
2498
2499 IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
2500 IOByteCount offset = (IOByteCount) data->fOffset;
2501
2502 IOPhysicalLength length;
2503 if (data->fMapped && IOMapper::gSystem)
2504 data->fIOVMAddr = md->getPhysicalSegment(offset, &length);
2505 else
2506 data->fIOVMAddr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
2507 data->fLength = length;
2508 }
2509 else if (kIOMDAddDMAMapSpec == op) return kIOReturnUnsupported;
2510 else if (kIOMDDMAMap == op)
2511 {
2512 if (dataSize < sizeof(IOMDDMAMapArgs))
2513 return kIOReturnUnderrun;
2514 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2515
2516 if (params) panic("class %s does not support IODMACommand::kIterateOnly", getMetaClass()->getClassName());
2517
2518 data->fMapContig = true;
2519 err = md->dmaMap(data->fMapper, data->fCommand, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocLength);
2520 return (err);
2521 }
2522 else return kIOReturnBadArgument;
2523
2524 return kIOReturnSuccess;
2525 }
2526
2527 IOReturn
2528 IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
2529 IOOptionBits * oldState )
2530 {
2531 IOReturn err = kIOReturnSuccess;
2532
2533 vm_purgable_t control;
2534 int state;
2535
2536 if (_memRef)
2537 {
2538 err = super::setPurgeable(newState, oldState);
2539 }
2540 else
2541 {
2542 if (kIOMemoryThreadSafe & _flags)
2543 LOCK;
2544 do
2545 {
2546 // Find the appropriate vm_map for the given task
2547 vm_map_t curMap;
2548 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
2549 {
2550 err = kIOReturnNotReady;
2551 break;
2552 }
2553 else if (!_task)
2554 {
2555 err = kIOReturnUnsupported;
2556 break;
2557 }
2558 else
2559 curMap = get_task_map(_task);
2560
2561 // can only do one range
2562 Ranges vec = _ranges;
2563 IOOptionBits type = _flags & kIOMemoryTypeMask;
2564 mach_vm_address_t addr;
2565 mach_vm_size_t len;
2566 getAddrLenForInd(addr, len, type, vec, 0);
2567
2568 err = purgeableControlBits(newState, &control, &state);
2569 if (kIOReturnSuccess != err)
2570 break;
2571 err = mach_vm_purgable_control(curMap, addr, control, &state);
2572 if (oldState)
2573 {
2574 if (kIOReturnSuccess == err)
2575 {
2576 err = purgeableStateBits(&state);
2577 *oldState = state;
2578 }
2579 }
2580 }
2581 while (false);
2582 if (kIOMemoryThreadSafe & _flags)
2583 UNLOCK;
2584 }
2585
2586 return (err);
2587 }
2588
2589 IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
2590 IOOptionBits * oldState )
2591 {
2592 IOReturn err = kIOReturnNotReady;
2593
2594 if (kIOMemoryThreadSafe & _flags) LOCK;
2595 if (_memRef) err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState);
2596 if (kIOMemoryThreadSafe & _flags) UNLOCK;
2597
2598 return (err);
2599 }
2600
2601 IOReturn IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
2602 IOByteCount * dirtyPageCount )
2603 {
2604 IOReturn err = kIOReturnNotReady;
2605
2606 if (kIOMemoryThreadSafe & _flags) LOCK;
2607 if (_memRef) err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount);
2608 else
2609 {
2610 IOMultiMemoryDescriptor * mmd;
2611 IOSubMemoryDescriptor * smd;
2612 if ((smd = OSDynamicCast(IOSubMemoryDescriptor, this)))
2613 {
2614 err = smd->getPageCounts(residentPageCount, dirtyPageCount);
2615 }
2616 else if ((mmd = OSDynamicCast(IOMultiMemoryDescriptor, this)))
2617 {
2618 err = mmd->getPageCounts(residentPageCount, dirtyPageCount);
2619 }
2620 }
2621 if (kIOMemoryThreadSafe & _flags) UNLOCK;
2622
2623 return (err);
2624 }
2625
2626
2627 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
2628 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
2629
2630 static void SetEncryptOp(addr64_t pa, unsigned int count)
2631 {
2632 ppnum_t page, end;
2633
2634 page = atop_64(round_page_64(pa));
2635 end = atop_64(trunc_page_64(pa + count));
2636 for (; page < end; page++)
2637 {
2638 pmap_clear_noencrypt(page);
2639 }
2640 }
2641
2642 static void ClearEncryptOp(addr64_t pa, unsigned int count)
2643 {
2644 ppnum_t page, end;
2645
2646 page = atop_64(round_page_64(pa));
2647 end = atop_64(trunc_page_64(pa + count));
2648 for (; page < end; page++)
2649 {
2650 pmap_set_noencrypt(page);
2651 }
2652 }
2653
2654 IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options,
2655 IOByteCount offset, IOByteCount length )
2656 {
2657 IOByteCount remaining;
2658 unsigned int res;
2659 void (*func)(addr64_t pa, unsigned int count) = 0;
2660
2661 switch (options)
2662 {
2663 case kIOMemoryIncoherentIOFlush:
2664 func = &dcache_incoherent_io_flush64;
2665 break;
2666 case kIOMemoryIncoherentIOStore:
2667 func = &dcache_incoherent_io_store64;
2668 break;
2669
2670 case kIOMemorySetEncrypted:
2671 func = &SetEncryptOp;
2672 break;
2673 case kIOMemoryClearEncrypted:
2674 func = &ClearEncryptOp;
2675 break;
2676 }
2677
2678 if (!func)
2679 return (kIOReturnUnsupported);
2680
2681 if (kIOMemoryThreadSafe & _flags)
2682 LOCK;
2683
2684 res = 0x0UL;
2685 remaining = length = min(length, getLength() - offset);
2686 while (remaining)
2687 // (process another target segment?)
2688 {
2689 addr64_t dstAddr64;
2690 IOByteCount dstLen;
2691
2692 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
2693 if (!dstAddr64)
2694 break;
2695
2696 // Clip segment length to remaining
2697 if (dstLen > remaining)
2698 dstLen = remaining;
2699
2700 (*func)(dstAddr64, dstLen);
2701
2702 offset += dstLen;
2703 remaining -= dstLen;
2704 }
2705
2706 if (kIOMemoryThreadSafe & _flags)
2707 UNLOCK;
2708
2709 return (remaining ? kIOReturnUnderrun : kIOReturnSuccess);
2710 }
2711
2712 #if defined(__i386__) || defined(__x86_64__)
2713
2714 #define io_kernel_static_start vm_kernel_stext
2715 #define io_kernel_static_end vm_kernel_etext
2716
2717 #else
2718 #error io_kernel_static_end is undefined for this architecture
2719 #endif
2720
2721 static kern_return_t
2722 io_get_kernel_static_upl(
2723 vm_map_t /* map */,
2724 uintptr_t offset,
2725 upl_size_t *upl_size,
2726 upl_t *upl,
2727 upl_page_info_array_t page_list,
2728 unsigned int *count,
2729 ppnum_t *highest_page)
2730 {
2731 unsigned int pageCount, page;
2732 ppnum_t phys;
2733 ppnum_t highestPage = 0;
2734
2735 pageCount = atop_32(*upl_size);
2736 if (pageCount > *count)
2737 pageCount = *count;
2738
2739 *upl = NULL;
2740
2741 for (page = 0; page < pageCount; page++)
2742 {
2743 phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
2744 if (!phys)
2745 break;
2746 page_list[page].phys_addr = phys;
2747 page_list[page].pageout = 0;
2748 page_list[page].absent = 0;
2749 page_list[page].dirty = 0;
2750 page_list[page].precious = 0;
2751 page_list[page].device = 0;
2752 if (phys > highestPage)
2753 highestPage = phys;
2754 }
2755
2756 *highest_page = highestPage;
2757
2758 return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError);
2759 }
2760
2761 IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
2762 {
2763 IOOptionBits type = _flags & kIOMemoryTypeMask;
2764 IOReturn error = kIOReturnCannotWire;
2765 ioGMDData *dataP;
2766 upl_page_info_array_t pageInfo;
2767 ppnum_t mapBase;
2768
2769 assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
2770
2771 if ((kIODirectionOutIn & forDirection) == kIODirectionNone)
2772 forDirection = (IODirection) (forDirection | getDirection());
2773
2774 upl_control_flags_t uplFlags; // This Mem Desc's default flags for upl creation
2775 switch (kIODirectionOutIn & forDirection)
2776 {
2777 case kIODirectionOut:
2778 // Pages do not need to be marked as dirty on commit
2779 uplFlags = UPL_COPYOUT_FROM;
2780 break;
2781
2782 case kIODirectionIn:
2783 default:
2784 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
2785 break;
2786 }
2787
2788 if (_wireCount)
2789 {
2790 if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags))
2791 {
2792 OSReportWithBacktrace("IOMemoryDescriptor 0x%lx prepared read only", VM_KERNEL_ADDRPERM(this));
2793 error = kIOReturnNotWritable;
2794 }
2795 else error = kIOReturnSuccess;
2796 return (error);
2797 }
2798
2799 dataP = getDataP(_memoryEntries);
2800 IOMapper *mapper;
2801 mapper = dataP->fMapper;
2802 dataP->fMappedBase = 0;
2803
2804 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
2805 uplFlags |= UPL_MEMORY_TAG_MAKE(IOMemoryTag(kernel_map));
2806
2807 if (kIODirectionPrepareToPhys32 & forDirection)
2808 {
2809 if (!mapper) uplFlags |= UPL_NEED_32BIT_ADDR;
2810 if (dataP->fDMAMapNumAddressBits > 32) dataP->fDMAMapNumAddressBits = 32;
2811 }
2812 if (kIODirectionPrepareNoFault & forDirection) uplFlags |= UPL_REQUEST_NO_FAULT;
2813 if (kIODirectionPrepareNoZeroFill & forDirection) uplFlags |= UPL_NOZEROFILLIO;
2814 if (kIODirectionPrepareNonCoherent & forDirection) uplFlags |= UPL_REQUEST_FORCE_COHERENCY;
2815
2816 mapBase = 0;
2817
2818 // Note that appendBytes(NULL) zeros the data up to the desired length
2819 // and the length parameter is an unsigned int
2820 size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t);
2821 if (uplPageSize > ((unsigned int)uplPageSize)) return (kIOReturnNoMemory);
2822 if (!_memoryEntries->appendBytes(0, uplPageSize)) return (kIOReturnNoMemory);
2823 dataP = 0;
2824
2825 // Find the appropriate vm_map for the given task
2826 vm_map_t curMap;
2827 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) curMap = 0;
2828 else curMap = get_task_map(_task);
2829
2830 // Iterate over the vector of virtual ranges
2831 Ranges vec = _ranges;
2832 unsigned int pageIndex = 0;
2833 IOByteCount mdOffset = 0;
2834 ppnum_t highestPage = 0;
2835
2836 IOMemoryEntry * memRefEntry = 0;
2837 if (_memRef) memRefEntry = &_memRef->entries[0];
2838
2839 for (UInt range = 0; range < _rangesCount; range++) {
2840 ioPLBlock iopl;
2841 mach_vm_address_t startPage;
2842 mach_vm_size_t numBytes;
2843 ppnum_t highPage = 0;
2844
2845 // Get the startPage address and length of vec[range]
2846 getAddrLenForInd(startPage, numBytes, type, vec, range);
2847 iopl.fPageOffset = startPage & PAGE_MASK;
2848 numBytes += iopl.fPageOffset;
2849 startPage = trunc_page_64(startPage);
2850
2851 if (mapper)
2852 iopl.fMappedPage = mapBase + pageIndex;
2853 else
2854 iopl.fMappedPage = 0;
2855
2856 // Iterate over the current range, creating UPLs
2857 while (numBytes) {
2858 vm_address_t kernelStart = (vm_address_t) startPage;
2859 vm_map_t theMap;
2860 if (curMap) theMap = curMap;
2861 else if (_memRef)
2862 {
2863 theMap = NULL;
2864 }
2865 else
2866 {
2867 assert(_task == kernel_task);
2868 theMap = IOPageableMapForAddress(kernelStart);
2869 }
2870
2871 // ioplFlags is an in/out parameter
2872 upl_control_flags_t ioplFlags = uplFlags;
2873 dataP = getDataP(_memoryEntries);
2874 pageInfo = getPageList(dataP);
2875 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
2876
2877 mach_vm_size_t _ioplSize = round_page(numBytes);
2878 upl_size_t ioplSize = (_ioplSize <= MAX_UPL_SIZE_BYTES) ? _ioplSize : MAX_UPL_SIZE_BYTES;
2879 unsigned int numPageInfo = atop_32(ioplSize);
2880
2881 if ((theMap == kernel_map)
2882 && (kernelStart >= io_kernel_static_start)
2883 && (kernelStart < io_kernel_static_end)) {
2884 error = io_get_kernel_static_upl(theMap,
2885 kernelStart,
2886 &ioplSize,
2887 &iopl.fIOPL,
2888 baseInfo,
2889 &numPageInfo,
2890 &highPage);
2891 }
2892 else if (_memRef) {
2893 memory_object_offset_t entryOffset;
2894
2895 entryOffset = mdOffset;
2896 entryOffset = (entryOffset - iopl.fPageOffset - memRefEntry->offset);
2897 if (entryOffset >= memRefEntry->size) {
2898 memRefEntry++;
2899 if (memRefEntry >= &_memRef->entries[_memRef->count]) panic("memRefEntry");
2900 entryOffset = 0;
2901 }
2902 if (ioplSize > (memRefEntry->size - entryOffset)) ioplSize = (memRefEntry->size - entryOffset);
2903 error = memory_object_iopl_request(memRefEntry->entry,
2904 entryOffset,
2905 &ioplSize,
2906 &iopl.fIOPL,
2907 baseInfo,
2908 &numPageInfo,
2909 &ioplFlags);
2910 }
2911 else {
2912 assert(theMap);
2913 error = vm_map_create_upl(theMap,
2914 startPage,
2915 (upl_size_t*)&ioplSize,
2916 &iopl.fIOPL,
2917 baseInfo,
2918 &numPageInfo,
2919 &ioplFlags);
2920 }
2921
2922 if (error != KERN_SUCCESS)
2923 goto abortExit;
2924
2925 assert(ioplSize);
2926
2927 if (iopl.fIOPL)
2928 highPage = upl_get_highest_page(iopl.fIOPL);
2929 if (highPage > highestPage)
2930 highestPage = highPage;
2931
2932 error = kIOReturnCannotWire;
2933
2934 if (baseInfo->device) {
2935 numPageInfo = 1;
2936 iopl.fFlags = kIOPLOnDevice;
2937 }
2938 else {
2939 iopl.fFlags = 0;
2940 }
2941
2942 iopl.fIOMDOffset = mdOffset;
2943 iopl.fPageInfo = pageIndex;
2944 if (mapper && pageIndex && (page_mask & (mdOffset + iopl.fPageOffset))) dataP->fDiscontig = true;
2945
2946 #if 0
2947 // used to remove the upl for auto prepares here, for some errant code
2948 // that freed memory before the descriptor pointing at it
2949 if ((_flags & kIOMemoryAutoPrepare) && iopl.fIOPL)
2950 {
2951 upl_commit(iopl.fIOPL, 0, 0);
2952 upl_deallocate(iopl.fIOPL);
2953 iopl.fIOPL = 0;
2954 }
2955 #endif
2956
2957 if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
2958 // Clean up partial created and unsaved iopl
2959 if (iopl.fIOPL) {
2960 upl_abort(iopl.fIOPL, 0);
2961 upl_deallocate(iopl.fIOPL);
2962 }
2963 goto abortExit;
2964 }
2965 dataP = 0;
2966
2967 // Check for a multiple iopl's in one virtual range
2968 pageIndex += numPageInfo;
2969 mdOffset -= iopl.fPageOffset;
2970 if (ioplSize < numBytes) {
2971 numBytes -= ioplSize;
2972 startPage += ioplSize;
2973 mdOffset += ioplSize;
2974 iopl.fPageOffset = 0;
2975 if (mapper) iopl.fMappedPage = mapBase + pageIndex;
2976 }
2977 else {
2978 mdOffset += numBytes;
2979 break;
2980 }
2981 }
2982 }
2983
2984 _highestPage = highestPage;
2985
2986 if (UPL_COPYOUT_FROM & uplFlags) _flags |= kIOMemoryPreparedReadOnly;
2987
2988 if ((kIOTracking & gIOKitDebug)
2989 //&& !(_flags & kIOMemoryAutoPrepare)
2990 )
2991 {
2992 dataP = getDataP(_memoryEntries);
2993 #if IOTRACKING
2994 IOTrackingAdd(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages), false);
2995 #endif
2996 }
2997
2998 return kIOReturnSuccess;
2999
3000 abortExit:
3001 {
3002 dataP = getDataP(_memoryEntries);
3003 UInt done = getNumIOPL(_memoryEntries, dataP);
3004 ioPLBlock *ioplList = getIOPLList(dataP);
3005
3006 for (UInt range = 0; range < done; range++)
3007 {
3008 if (ioplList[range].fIOPL) {
3009 upl_abort(ioplList[range].fIOPL, 0);
3010 upl_deallocate(ioplList[range].fIOPL);
3011 }
3012 }
3013 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
3014 }
3015
3016 if (error == KERN_FAILURE)
3017 error = kIOReturnCannotWire;
3018 else if (error == KERN_MEMORY_ERROR)
3019 error = kIOReturnNoResources;
3020
3021 return error;
3022 }
3023
3024 bool IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
3025 {
3026 ioGMDData * dataP;
3027 unsigned dataSize = size;
3028
3029 if (!_memoryEntries) {
3030 _memoryEntries = OSData::withCapacity(dataSize);
3031 if (!_memoryEntries)
3032 return false;
3033 }
3034 else if (!_memoryEntries->initWithCapacity(dataSize))
3035 return false;
3036
3037 _memoryEntries->appendBytes(0, computeDataSize(0, 0));
3038 dataP = getDataP(_memoryEntries);
3039
3040 if (mapper == kIOMapperWaitSystem) {
3041 IOMapper::checkForSystemMapper();
3042 mapper = IOMapper::gSystem;
3043 }
3044 dataP->fMapper = mapper;
3045 dataP->fPageCnt = 0;
3046 dataP->fMappedBase = 0;
3047 dataP->fDMAMapNumAddressBits = 64;
3048 dataP->fDMAMapAlignment = 0;
3049 dataP->fPreparationID = kIOPreparationIDUnprepared;
3050 dataP->fDiscontig = false;
3051 dataP->fCompletionError = false;
3052
3053 return (true);
3054 }
3055
3056 IOReturn IOMemoryDescriptor::dmaMap(
3057 IOMapper * mapper,
3058 IODMACommand * command,
3059 const IODMAMapSpecification * mapSpec,
3060 uint64_t offset,
3061 uint64_t length,
3062 uint64_t * mapAddress,
3063 uint64_t * mapLength)
3064 {
3065 IOReturn ret;
3066 uint32_t mapOptions;
3067
3068 mapOptions = 0;
3069 mapOptions |= kIODMAMapReadAccess;
3070 if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess;
3071
3072 ret = mapper->iovmMapMemory(this, offset, length, mapOptions,
3073 mapSpec, command, NULL, mapAddress, mapLength);
3074
3075 return (ret);
3076 }
3077
3078 IOReturn IOGeneralMemoryDescriptor::dmaMap(
3079 IOMapper * mapper,
3080 IODMACommand * command,
3081 const IODMAMapSpecification * mapSpec,
3082 uint64_t offset,
3083 uint64_t length,
3084 uint64_t * mapAddress,
3085 uint64_t * mapLength)
3086 {
3087 IOReturn err = kIOReturnSuccess;
3088 ioGMDData * dataP;
3089 IOOptionBits type = _flags & kIOMemoryTypeMask;
3090
3091 *mapAddress = 0;
3092 if (kIOMemoryHostOnly & _flags) return (kIOReturnSuccess);
3093
3094 if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
3095 || offset || (length != _length))
3096 {
3097 err = super::dmaMap(mapper, command, mapSpec, offset, length, mapAddress, mapLength);
3098 }
3099 else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries)))
3100 {
3101 const ioPLBlock * ioplList = getIOPLList(dataP);
3102 upl_page_info_t * pageList;
3103 uint32_t mapOptions = 0;
3104
3105 IODMAMapSpecification mapSpec;
3106 bzero(&mapSpec, sizeof(mapSpec));
3107 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
3108 mapSpec.alignment = dataP->fDMAMapAlignment;
3109
3110 // For external UPLs the fPageInfo field points directly to
3111 // the upl's upl_page_info_t array.
3112 if (ioplList->fFlags & kIOPLExternUPL)
3113 {
3114 pageList = (upl_page_info_t *) ioplList->fPageInfo;
3115 mapOptions |= kIODMAMapPagingPath;
3116 }
3117 else pageList = getPageList(dataP);
3118
3119 if ((_length == ptoa_64(_pages)) && !(page_mask & ioplList->fPageOffset))
3120 {
3121 mapOptions |= kIODMAMapPageListFullyOccupied;
3122 }
3123
3124 mapOptions |= kIODMAMapReadAccess;
3125 if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess;
3126
3127 // Check for direct device non-paged memory
3128 if (ioplList->fFlags & kIOPLOnDevice) mapOptions |= kIODMAMapPhysicallyContiguous;
3129
3130 IODMAMapPageList dmaPageList =
3131 {
3132 .pageOffset = ioplList->fPageOffset & page_mask,
3133 .pageListCount = _pages,
3134 .pageList = &pageList[0]
3135 };
3136 err = mapper->iovmMapMemory(this, offset, length, mapOptions, &mapSpec,
3137 command, &dmaPageList, mapAddress, mapLength);
3138 }
3139
3140 return (err);
3141 }
3142
3143 /*
3144 * prepare
3145 *
3146 * Prepare the memory for an I/O transfer. This involves paging in
3147 * the memory, if necessary, and wiring it down for the duration of
3148 * the transfer. The complete() method completes the processing of
3149 * the memory after the I/O transfer finishes. This method needn't
3150 * called for non-pageable memory.
3151 */
3152
3153 IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
3154 {
3155 IOReturn error = kIOReturnSuccess;
3156 IOOptionBits type = _flags & kIOMemoryTypeMask;
3157
3158 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
3159 return kIOReturnSuccess;
3160
3161 if (_prepareLock)
3162 IOLockLock(_prepareLock);
3163
3164 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type)
3165 {
3166 error = wireVirtual(forDirection);
3167 }
3168
3169 if (kIOReturnSuccess == error)
3170 {
3171 if (1 == ++_wireCount)
3172 {
3173 if (kIOMemoryClearEncrypt & _flags)
3174 {
3175 performOperation(kIOMemoryClearEncrypted, 0, _length);
3176 }
3177 }
3178 }
3179
3180 if (_prepareLock)
3181 IOLockUnlock(_prepareLock);
3182
3183 return error;
3184 }
3185
3186 /*
3187 * complete
3188 *
3189 * Complete processing of the memory after an I/O transfer finishes.
3190 * This method should not be called unless a prepare was previously
3191 * issued; the prepare() and complete() must occur in pairs, before
3192 * before and after an I/O transfer involving pageable memory.
3193 */
3194
3195 IOReturn IOGeneralMemoryDescriptor::complete(IODirection forDirection)
3196 {
3197 IOOptionBits type = _flags & kIOMemoryTypeMask;
3198 ioGMDData * dataP;
3199
3200 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
3201 return kIOReturnSuccess;
3202
3203 if (_prepareLock)
3204 IOLockLock(_prepareLock);
3205
3206 assert(_wireCount);
3207
3208 if ((kIODirectionCompleteWithError & forDirection)
3209 && (dataP = getDataP(_memoryEntries)))
3210 dataP->fCompletionError = true;
3211
3212 if (_wireCount)
3213 {
3214 if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount))
3215 {
3216 performOperation(kIOMemorySetEncrypted, 0, _length);
3217 }
3218
3219 _wireCount--;
3220 if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection))
3221 {
3222 IOOptionBits type = _flags & kIOMemoryTypeMask;
3223 dataP = getDataP(_memoryEntries);
3224 ioPLBlock *ioplList = getIOPLList(dataP);
3225 UInt ind, count = getNumIOPL(_memoryEntries, dataP);
3226
3227 if (_wireCount)
3228 {
3229 // kIODirectionCompleteWithDataValid & forDirection
3230 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type)
3231 {
3232 for (ind = 0; ind < count; ind++)
3233 {
3234 if (ioplList[ind].fIOPL) iopl_valid_data(ioplList[ind].fIOPL);
3235 }
3236 }
3237 }
3238 else
3239 {
3240 #if IOMD_DEBUG_DMAACTIVE
3241 if (__iomd_reservedA) panic("complete() while dma active");
3242 #endif /* IOMD_DEBUG_DMAACTIVE */
3243
3244 if (dataP->fMappedBase) {
3245 dataP->fMapper->iovmUnmapMemory(this, NULL, dataP->fMappedBase, dataP->fMappedLength);
3246 dataP->fMappedBase = 0;
3247 }
3248 // Only complete iopls that we created which are for TypeVirtual
3249 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
3250 #if IOTRACKING
3251 if ((kIOTracking & gIOKitDebug)
3252 //&& !(_flags & kIOMemoryAutoPrepare)
3253 )
3254 {
3255 IOTrackingRemove(gIOWireTracking, &dataP->fWireTracking, ptoa(_pages));
3256 }
3257 #endif
3258 for (ind = 0; ind < count; ind++)
3259 if (ioplList[ind].fIOPL) {
3260 if (dataP->fCompletionError)
3261 upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/);
3262 else
3263 upl_commit(ioplList[ind].fIOPL, 0, 0);
3264 upl_deallocate(ioplList[ind].fIOPL);
3265 }
3266 } else if (kIOMemoryTypeUPL == type) {
3267 upl_set_referenced(ioplList[0].fIOPL, false);
3268 }
3269
3270 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
3271
3272 dataP->fPreparationID = kIOPreparationIDUnprepared;
3273 }
3274 }
3275 }
3276
3277 if (_prepareLock)
3278 IOLockUnlock(_prepareLock);
3279
3280 return kIOReturnSuccess;
3281 }
3282
3283 IOReturn IOGeneralMemoryDescriptor::doMap(
3284 vm_map_t __addressMap,
3285 IOVirtualAddress * __address,
3286 IOOptionBits options,
3287 IOByteCount __offset,
3288 IOByteCount __length )
3289 {
3290 #ifndef __LP64__
3291 if (!(kIOMap64Bit & options)) panic("IOGeneralMemoryDescriptor::doMap !64bit");
3292 #endif /* !__LP64__ */
3293
3294 kern_return_t err;
3295
3296 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
3297 mach_vm_size_t offset = mapping->fOffset + __offset;
3298 mach_vm_size_t length = mapping->fLength;
3299
3300 IOOptionBits type = _flags & kIOMemoryTypeMask;
3301 Ranges vec = _ranges;
3302
3303 mach_vm_address_t range0Addr = 0;
3304 mach_vm_size_t range0Len = 0;
3305
3306 if ((offset >= _length) || ((offset + length) > _length))
3307 return( kIOReturnBadArgument );
3308
3309 if (vec.v)
3310 getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
3311
3312 // mapping source == dest? (could be much better)
3313 if (_task
3314 && (mapping->fAddressTask == _task)
3315 && (mapping->fAddressMap == get_task_map(_task))
3316 && (options & kIOMapAnywhere)
3317 && (1 == _rangesCount)
3318 && (0 == offset)
3319 && range0Addr
3320 && (length <= range0Len))
3321 {
3322 mapping->fAddress = range0Addr;
3323 mapping->fOptions |= kIOMapStatic;
3324
3325 return( kIOReturnSuccess );
3326 }
3327
3328 if (!_memRef)
3329 {
3330 IOOptionBits createOptions = 0;
3331 if (!(kIOMapReadOnly & options))
3332 {
3333 createOptions |= kIOMemoryReferenceWrite;
3334 #if DEVELOPMENT || DEBUG
3335 if (kIODirectionOut == (kIODirectionOutIn & _flags))
3336 {
3337 OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction");
3338 }
3339 #endif
3340 }
3341 err = memoryReferenceCreate(createOptions, &_memRef);
3342 if (kIOReturnSuccess != err) return (err);
3343 }
3344
3345 memory_object_t pager;
3346 pager = (memory_object_t) (reserved ? reserved->dp.devicePager : 0);
3347
3348 // <upl_transpose //
3349 if ((kIOMapReference|kIOMapUnique) == ((kIOMapReference|kIOMapUnique) & options))
3350 {
3351 do
3352 {
3353 upl_t redirUPL2;
3354 upl_size_t size;
3355 upl_control_flags_t flags;
3356 unsigned int lock_count;
3357
3358 if (!_memRef || (1 != _memRef->count))
3359 {
3360 err = kIOReturnNotReadable;
3361 break;
3362 }
3363
3364 size = round_page(mapping->fLength);
3365 flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
3366 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS
3367 | UPL_MEMORY_TAG_MAKE(IOMemoryTag(kernel_map));
3368
3369 if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2,
3370 NULL, NULL,
3371 &flags))
3372 redirUPL2 = NULL;
3373
3374 for (lock_count = 0;
3375 IORecursiveLockHaveLock(gIOMemoryLock);
3376 lock_count++) {
3377 UNLOCK;
3378 }
3379 err = upl_transpose(redirUPL2, mapping->fRedirUPL);
3380 for (;
3381 lock_count;
3382 lock_count--) {
3383 LOCK;
3384 }
3385
3386 if (kIOReturnSuccess != err)
3387 {
3388 IOLog("upl_transpose(%x)\n", err);
3389 err = kIOReturnSuccess;
3390 }
3391
3392 if (redirUPL2)
3393 {
3394 upl_commit(redirUPL2, NULL, 0);
3395 upl_deallocate(redirUPL2);
3396 redirUPL2 = 0;
3397 }
3398 {
3399 // swap the memEntries since they now refer to different vm_objects
3400 IOMemoryReference * me = _memRef;
3401 _memRef = mapping->fMemory->_memRef;
3402 mapping->fMemory->_memRef = me;
3403 }
3404 if (pager)
3405 err = populateDevicePager( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
3406 }
3407 while (false);
3408 }
3409 // upl_transpose> //
3410 else
3411 {
3412 err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress);
3413 #if IOTRACKING
3414 if (err == KERN_SUCCESS) IOTrackingAdd(gIOMapTracking, &mapping->fTracking, length, false);
3415 #endif
3416 if ((err == KERN_SUCCESS) && pager)
3417 {
3418 err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, offset, length, options);
3419
3420 if (err != KERN_SUCCESS) doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0);
3421 else if (kIOMapDefaultCache == (options & kIOMapCacheMask))
3422 {
3423 mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
3424 }
3425 }
3426 }
3427
3428 return (err);
3429 }
3430
3431 IOReturn IOGeneralMemoryDescriptor::doUnmap(
3432 vm_map_t addressMap,
3433 IOVirtualAddress __address,
3434 IOByteCount __length )
3435 {
3436 return (super::doUnmap(addressMap, __address, __length));
3437 }
3438
3439 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3440
3441 #undef super
3442 #define super OSObject
3443
3444 OSDefineMetaClassAndStructors( IOMemoryMap, OSObject )
3445
3446 OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
3447 OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
3448 OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
3449 OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
3450 OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
3451 OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
3452 OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
3453 OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
3454
3455 /* ex-inline function implementation */
3456 IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
3457 { return( getPhysicalSegment( 0, 0 )); }
3458
3459 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3460
3461 bool IOMemoryMap::init(
3462 task_t intoTask,
3463 mach_vm_address_t toAddress,
3464 IOOptionBits _options,
3465 mach_vm_size_t _offset,
3466 mach_vm_size_t _length )
3467 {
3468 if (!intoTask)
3469 return( false);
3470
3471 if (!super::init())
3472 return(false);
3473
3474 fAddressMap = get_task_map(intoTask);
3475 if (!fAddressMap)
3476 return(false);
3477 vm_map_reference(fAddressMap);
3478
3479 fAddressTask = intoTask;
3480 fOptions = _options;
3481 fLength = _length;
3482 fOffset = _offset;
3483 fAddress = toAddress;
3484
3485 return (true);
3486 }
3487
3488 bool IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
3489 {
3490 if (!_memory)
3491 return(false);
3492
3493 if (!fSuperMap)
3494 {
3495 if( (_offset + fLength) > _memory->getLength())
3496 return( false);
3497 fOffset = _offset;
3498 }
3499
3500 _memory->retain();
3501 if (fMemory)
3502 {
3503 if (fMemory != _memory)
3504 fMemory->removeMapping(this);
3505 fMemory->release();
3506 }
3507 fMemory = _memory;
3508
3509 return( true );
3510 }
3511
3512 IOReturn IOMemoryDescriptor::doMap(
3513 vm_map_t __addressMap,
3514 IOVirtualAddress * __address,
3515 IOOptionBits options,
3516 IOByteCount __offset,
3517 IOByteCount __length )
3518 {
3519 return (kIOReturnUnsupported);
3520 }
3521
3522 IOReturn IOMemoryDescriptor::handleFault(
3523 void * _pager,
3524 mach_vm_size_t sourceOffset,
3525 mach_vm_size_t length)
3526 {
3527 if( kIOMemoryRedirected & _flags)
3528 {
3529 #if DEBUG
3530 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
3531 #endif
3532 do {
3533 SLEEP;
3534 } while( kIOMemoryRedirected & _flags );
3535 }
3536 return (kIOReturnSuccess);
3537 }
3538
3539 IOReturn IOMemoryDescriptor::populateDevicePager(
3540 void * _pager,
3541 vm_map_t addressMap,
3542 mach_vm_address_t address,
3543 mach_vm_size_t sourceOffset,
3544 mach_vm_size_t length,
3545 IOOptionBits options )
3546 {
3547 IOReturn err = kIOReturnSuccess;
3548 memory_object_t pager = (memory_object_t) _pager;
3549 mach_vm_size_t size;
3550 mach_vm_size_t bytes;
3551 mach_vm_size_t page;
3552 mach_vm_size_t pageOffset;
3553 mach_vm_size_t pagerOffset;
3554 IOPhysicalLength segLen, chunk;
3555 addr64_t physAddr;
3556 IOOptionBits type;
3557
3558 type = _flags & kIOMemoryTypeMask;
3559
3560 if (reserved->dp.pagerContig)
3561 {
3562 sourceOffset = 0;
3563 pagerOffset = 0;
3564 }
3565
3566 physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
3567 assert( physAddr );
3568 pageOffset = physAddr - trunc_page_64( physAddr );
3569 pagerOffset = sourceOffset;
3570
3571 size = length + pageOffset;
3572 physAddr -= pageOffset;
3573
3574 segLen += pageOffset;
3575 bytes = size;
3576 do
3577 {
3578 // in the middle of the loop only map whole pages
3579 if( segLen >= bytes) segLen = bytes;
3580 else if (segLen != trunc_page(segLen)) err = kIOReturnVMError;
3581 if (physAddr != trunc_page_64(physAddr)) err = kIOReturnBadArgument;
3582
3583 if (kIOReturnSuccess != err) break;
3584
3585 #if DEBUG || DEVELOPMENT
3586 if ((kIOMemoryTypeUPL != type)
3587 && pmap_has_managed_page(atop_64(physAddr), atop_64(physAddr + segLen - 1)))
3588 {
3589 OSReportWithBacktrace("IOMemoryDescriptor physical with managed page 0x%qx:0x%qx", physAddr, segLen);
3590 }
3591 #endif /* DEBUG || DEVELOPMENT */
3592
3593 chunk = (reserved->dp.pagerContig ? round_page(segLen) : page_size);
3594 for (page = 0;
3595 (page < segLen) && (KERN_SUCCESS == err);
3596 page += chunk)
3597 {
3598 err = device_pager_populate_object(pager, pagerOffset,
3599 (ppnum_t)(atop_64(physAddr + page)), chunk);
3600 pagerOffset += chunk;
3601 }
3602
3603 assert (KERN_SUCCESS == err);
3604 if (err) break;
3605
3606 // This call to vm_fault causes an early pmap level resolution
3607 // of the mappings created above for kernel mappings, since
3608 // faulting in later can't take place from interrupt level.
3609 if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags))
3610 {
3611 vm_fault(addressMap,
3612 (vm_map_offset_t)trunc_page_64(address),
3613 VM_PROT_READ|VM_PROT_WRITE,
3614 FALSE, THREAD_UNINT, NULL,
3615 (vm_map_offset_t)0);
3616 }
3617
3618 sourceOffset += segLen - pageOffset;
3619 address += segLen;
3620 bytes -= segLen;
3621 pageOffset = 0;
3622 }
3623 while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
3624
3625 if (bytes)
3626 err = kIOReturnBadArgument;
3627
3628 return (err);
3629 }
3630
3631 IOReturn IOMemoryDescriptor::doUnmap(
3632 vm_map_t addressMap,
3633 IOVirtualAddress __address,
3634 IOByteCount __length )
3635 {
3636 IOReturn err;
3637 IOMemoryMap * mapping;
3638 mach_vm_address_t address;
3639 mach_vm_size_t length;
3640
3641 if (__length) panic("doUnmap");
3642
3643 mapping = (IOMemoryMap *) __address;
3644 addressMap = mapping->fAddressMap;
3645 address = mapping->fAddress;
3646 length = mapping->fLength;
3647
3648 if (kIOMapOverwrite & mapping->fOptions) err = KERN_SUCCESS;
3649 else
3650 {
3651 if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags))
3652 addressMap = IOPageableMapForAddress( address );
3653 #if DEBUG
3654 if( kIOLogMapping & gIOKitDebug) IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
3655 addressMap, address, length );
3656 #endif
3657 err = mach_vm_deallocate( addressMap, address, length );
3658 }
3659
3660 #if IOTRACKING
3661 IOTrackingRemove(gIOMapTracking, &mapping->fTracking, length);
3662 #endif
3663
3664 return (err);
3665 }
3666
3667 IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
3668 {
3669 IOReturn err = kIOReturnSuccess;
3670 IOMemoryMap * mapping = 0;
3671 OSIterator * iter;
3672
3673 LOCK;
3674
3675 if( doRedirect)
3676 _flags |= kIOMemoryRedirected;
3677 else
3678 _flags &= ~kIOMemoryRedirected;
3679
3680 do {
3681 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
3682
3683 memory_object_t pager;
3684
3685 if( reserved)
3686 pager = (memory_object_t) reserved->dp.devicePager;
3687 else
3688 pager = MACH_PORT_NULL;
3689
3690 while( (mapping = (IOMemoryMap *) iter->getNextObject()))
3691 {
3692 mapping->redirect( safeTask, doRedirect );
3693 if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap))
3694 {
3695 err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache );
3696 }
3697 }
3698
3699 iter->release();
3700 }
3701 } while( false );
3702
3703 if (!doRedirect)
3704 {
3705 WAKEUP;
3706 }
3707
3708 UNLOCK;
3709
3710 #ifndef __LP64__
3711 // temporary binary compatibility
3712 IOSubMemoryDescriptor * subMem;
3713 if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
3714 err = subMem->redirect( safeTask, doRedirect );
3715 else
3716 err = kIOReturnSuccess;
3717 #endif /* !__LP64__ */
3718
3719 return( err );
3720 }
3721
3722 IOReturn IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
3723 {
3724 IOReturn err = kIOReturnSuccess;
3725
3726 if( fSuperMap) {
3727 // err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
3728 } else {
3729
3730 LOCK;
3731
3732 do
3733 {
3734 if (!fAddress)
3735 break;
3736 if (!fAddressMap)
3737 break;
3738
3739 if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
3740 && (0 == (fOptions & kIOMapStatic)))
3741 {
3742 IOUnmapPages( fAddressMap, fAddress, fLength );
3743 err = kIOReturnSuccess;
3744 #if DEBUG
3745 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
3746 #endif
3747 }
3748 else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask))
3749 {
3750 IOOptionBits newMode;
3751 newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
3752 IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
3753 }
3754 }
3755 while (false);
3756 UNLOCK;
3757 }
3758
3759 if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3760 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3761 && safeTask
3762 && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected))))
3763 fMemory->redirect(safeTask, doRedirect);
3764
3765 return( err );
3766 }
3767
3768 IOReturn IOMemoryMap::unmap( void )
3769 {
3770 IOReturn err;
3771
3772 LOCK;
3773
3774 if( fAddress && fAddressMap && (0 == fSuperMap) && fMemory
3775 && (0 == (kIOMapStatic & fOptions))) {
3776
3777 err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
3778
3779 } else
3780 err = kIOReturnSuccess;
3781
3782 if (fAddressMap)
3783 {
3784 vm_map_deallocate(fAddressMap);
3785 fAddressMap = 0;
3786 }
3787
3788 fAddress = 0;
3789
3790 UNLOCK;
3791
3792 return( err );
3793 }
3794
3795 void IOMemoryMap::taskDied( void )
3796 {
3797 LOCK;
3798 if (fUserClientUnmap) unmap();
3799 #if IOTRACKING
3800 else IOTrackingRemove(gIOMapTracking, &fTracking, fLength);
3801 #endif
3802
3803 if( fAddressMap) {
3804 vm_map_deallocate(fAddressMap);
3805 fAddressMap = 0;
3806 }
3807 fAddressTask = 0;
3808 fAddress = 0;
3809 UNLOCK;
3810 }
3811
3812 IOReturn IOMemoryMap::userClientUnmap( void )
3813 {
3814 fUserClientUnmap = true;
3815 return (kIOReturnSuccess);
3816 }
3817
3818 // Overload the release mechanism. All mappings must be a member
3819 // of a memory descriptors _mappings set. This means that we
3820 // always have 2 references on a mapping. When either of these mappings
3821 // are released we need to free ourselves.
3822 void IOMemoryMap::taggedRelease(const void *tag) const
3823 {
3824 LOCK;
3825 super::taggedRelease(tag, 2);
3826 UNLOCK;
3827 }
3828
3829 void IOMemoryMap::free()
3830 {
3831 unmap();
3832
3833 if (fMemory)
3834 {
3835 LOCK;
3836 fMemory->removeMapping(this);
3837 UNLOCK;
3838 fMemory->release();
3839 }
3840
3841 if (fOwner && (fOwner != fMemory))
3842 {
3843 LOCK;
3844 fOwner->removeMapping(this);
3845 UNLOCK;
3846 }
3847
3848 if (fSuperMap)
3849 fSuperMap->release();
3850
3851 if (fRedirUPL) {
3852 upl_commit(fRedirUPL, NULL, 0);
3853 upl_deallocate(fRedirUPL);
3854 }
3855
3856 super::free();
3857 }
3858
3859 IOByteCount IOMemoryMap::getLength()
3860 {
3861 return( fLength );
3862 }
3863
3864 IOVirtualAddress IOMemoryMap::getVirtualAddress()
3865 {
3866 #ifndef __LP64__
3867 if (fSuperMap)
3868 fSuperMap->getVirtualAddress();
3869 else if (fAddressMap
3870 && vm_map_is_64bit(fAddressMap)
3871 && (sizeof(IOVirtualAddress) < 8))
3872 {
3873 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
3874 }
3875 #endif /* !__LP64__ */
3876
3877 return (fAddress);
3878 }
3879
3880 #ifndef __LP64__
3881 mach_vm_address_t IOMemoryMap::getAddress()
3882 {
3883 return( fAddress);
3884 }
3885
3886 mach_vm_size_t IOMemoryMap::getSize()
3887 {
3888 return( fLength );
3889 }
3890 #endif /* !__LP64__ */
3891
3892
3893 task_t IOMemoryMap::getAddressTask()
3894 {
3895 if( fSuperMap)
3896 return( fSuperMap->getAddressTask());
3897 else
3898 return( fAddressTask);
3899 }
3900
3901 IOOptionBits IOMemoryMap::getMapOptions()
3902 {
3903 return( fOptions);
3904 }
3905
3906 IOMemoryDescriptor * IOMemoryMap::getMemoryDescriptor()
3907 {
3908 return( fMemory );
3909 }
3910
3911 IOMemoryMap * IOMemoryMap::copyCompatible(
3912 IOMemoryMap * newMapping )
3913 {
3914 task_t task = newMapping->getAddressTask();
3915 mach_vm_address_t toAddress = newMapping->fAddress;
3916 IOOptionBits _options = newMapping->fOptions;
3917 mach_vm_size_t _offset = newMapping->fOffset;
3918 mach_vm_size_t _length = newMapping->fLength;
3919
3920 if( (!task) || (!fAddressMap) || (fAddressMap != get_task_map(task)))
3921 return( 0 );
3922 if( (fOptions ^ _options) & kIOMapReadOnly)
3923 return( 0 );
3924 if( (kIOMapDefaultCache != (_options & kIOMapCacheMask))
3925 && ((fOptions ^ _options) & kIOMapCacheMask))
3926 return( 0 );
3927
3928 if( (0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress))
3929 return( 0 );
3930
3931 if( _offset < fOffset)
3932 return( 0 );
3933
3934 _offset -= fOffset;
3935
3936 if( (_offset + _length) > fLength)
3937 return( 0 );
3938
3939 retain();
3940 if( (fLength == _length) && (!_offset))
3941 {
3942 newMapping = this;
3943 }
3944 else
3945 {
3946 newMapping->fSuperMap = this;
3947 newMapping->fOffset = fOffset + _offset;
3948 newMapping->fAddress = fAddress + _offset;
3949 }
3950
3951 return( newMapping );
3952 }
3953
3954 IOReturn IOMemoryMap::wireRange(
3955 uint32_t options,
3956 mach_vm_size_t offset,
3957 mach_vm_size_t length)
3958 {
3959 IOReturn kr;
3960 mach_vm_address_t start = trunc_page_64(fAddress + offset);
3961 mach_vm_address_t end = round_page_64(fAddress + offset + length);
3962 vm_prot_t prot;
3963
3964 prot = (kIODirectionOutIn & options);
3965 if (prot)
3966 {
3967 prot |= VM_PROT_MEMORY_TAG_MAKE(IOMemoryTag(kernel_map));
3968 kr = vm_map_wire(fAddressMap, start, end, prot, FALSE);
3969 }
3970 else
3971 {
3972 kr = vm_map_unwire(fAddressMap, start, end, FALSE);
3973 }
3974
3975 return (kr);
3976 }
3977
3978
3979 IOPhysicalAddress
3980 #ifdef __LP64__
3981 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
3982 #else /* !__LP64__ */
3983 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
3984 #endif /* !__LP64__ */
3985 {
3986 IOPhysicalAddress address;
3987
3988 LOCK;
3989 #ifdef __LP64__
3990 address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
3991 #else /* !__LP64__ */
3992 address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
3993 #endif /* !__LP64__ */
3994 UNLOCK;
3995
3996 return( address );
3997 }
3998
3999 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4000
4001 #undef super
4002 #define super OSObject
4003
4004 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4005
4006 void IOMemoryDescriptor::initialize( void )
4007 {
4008 if( 0 == gIOMemoryLock)
4009 gIOMemoryLock = IORecursiveLockAlloc();
4010
4011 gIOLastPage = IOGetLastPageNumber();
4012 }
4013
4014 void IOMemoryDescriptor::free( void )
4015 {
4016 if( _mappings) _mappings->release();
4017
4018 if (reserved)
4019 {
4020 IODelete(reserved, IOMemoryDescriptorReserved, 1);
4021 reserved = NULL;
4022 }
4023 super::free();
4024 }
4025
4026 IOMemoryMap * IOMemoryDescriptor::setMapping(
4027 task_t intoTask,
4028 IOVirtualAddress mapAddress,
4029 IOOptionBits options )
4030 {
4031 return (createMappingInTask( intoTask, mapAddress,
4032 options | kIOMapStatic,
4033 0, getLength() ));
4034 }
4035
4036 IOMemoryMap * IOMemoryDescriptor::map(
4037 IOOptionBits options )
4038 {
4039 return (createMappingInTask( kernel_task, 0,
4040 options | kIOMapAnywhere,
4041 0, getLength() ));
4042 }
4043
4044 #ifndef __LP64__
4045 IOMemoryMap * IOMemoryDescriptor::map(
4046 task_t intoTask,
4047 IOVirtualAddress atAddress,
4048 IOOptionBits options,
4049 IOByteCount offset,
4050 IOByteCount length )
4051 {
4052 if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask)))
4053 {
4054 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
4055 return (0);
4056 }
4057
4058 return (createMappingInTask(intoTask, atAddress,
4059 options, offset, length));
4060 }
4061 #endif /* !__LP64__ */
4062
4063 IOMemoryMap * IOMemoryDescriptor::createMappingInTask(
4064 task_t intoTask,
4065 mach_vm_address_t atAddress,
4066 IOOptionBits options,
4067 mach_vm_size_t offset,
4068 mach_vm_size_t length)
4069 {
4070 IOMemoryMap * result;
4071 IOMemoryMap * mapping;
4072
4073 if (0 == length)
4074 length = getLength();
4075
4076 mapping = new IOMemoryMap;
4077
4078 if( mapping
4079 && !mapping->init( intoTask, atAddress,
4080 options, offset, length )) {
4081 mapping->release();
4082 mapping = 0;
4083 }
4084
4085 if (mapping)
4086 result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
4087 else
4088 result = 0;
4089
4090 #if DEBUG
4091 if (!result)
4092 IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
4093 this, atAddress, (uint32_t) options, offset, length);
4094 #endif
4095
4096 return (result);
4097 }
4098
4099 #ifndef __LP64__ // there is only a 64 bit version for LP64
4100 IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
4101 IOOptionBits options,
4102 IOByteCount offset)
4103 {
4104 return (redirect(newBackingMemory, options, (mach_vm_size_t)offset));
4105 }
4106 #endif
4107
4108 IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
4109 IOOptionBits options,
4110 mach_vm_size_t offset)
4111 {
4112 IOReturn err = kIOReturnSuccess;
4113 IOMemoryDescriptor * physMem = 0;
4114
4115 LOCK;
4116
4117 if (fAddress && fAddressMap) do
4118 {
4119 if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
4120 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
4121 {
4122 physMem = fMemory;
4123 physMem->retain();
4124 }
4125
4126 if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count))
4127 {
4128 upl_size_t size = round_page(fLength);
4129 upl_control_flags_t flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
4130 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS
4131 | UPL_MEMORY_TAG_MAKE(IOMemoryTag(kernel_map));
4132 if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL,
4133 NULL, NULL,
4134 &flags))
4135 fRedirUPL = 0;
4136
4137 if (physMem)
4138 {
4139 IOUnmapPages( fAddressMap, fAddress, fLength );
4140 if ((false))
4141 physMem->redirect(0, true);
4142 }
4143 }
4144
4145 if (newBackingMemory)
4146 {
4147 if (newBackingMemory != fMemory)
4148 {
4149 fOffset = 0;
4150 if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
4151 options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
4152 offset, fLength))
4153 err = kIOReturnError;
4154 }
4155 if (fRedirUPL)
4156 {
4157 upl_commit(fRedirUPL, NULL, 0);
4158 upl_deallocate(fRedirUPL);
4159 fRedirUPL = 0;
4160 }
4161 if ((false) && physMem)
4162 physMem->redirect(0, false);
4163 }
4164 }
4165 while (false);
4166
4167 UNLOCK;
4168
4169 if (physMem)
4170 physMem->release();
4171
4172 return (err);
4173 }
4174
4175 IOMemoryMap * IOMemoryDescriptor::makeMapping(
4176 IOMemoryDescriptor * owner,
4177 task_t __intoTask,
4178 IOVirtualAddress __address,
4179 IOOptionBits options,
4180 IOByteCount __offset,
4181 IOByteCount __length )
4182 {
4183 #ifndef __LP64__
4184 if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::makeMapping !64bit");
4185 #endif /* !__LP64__ */
4186
4187 IOMemoryDescriptor * mapDesc = 0;
4188 IOMemoryMap * result = 0;
4189 OSIterator * iter;
4190
4191 IOMemoryMap * mapping = (IOMemoryMap *) __address;
4192 mach_vm_size_t offset = mapping->fOffset + __offset;
4193 mach_vm_size_t length = mapping->fLength;
4194
4195 mapping->fOffset = offset;
4196
4197 LOCK;
4198
4199 do
4200 {
4201 if (kIOMapStatic & options)
4202 {
4203 result = mapping;
4204 addMapping(mapping);
4205 mapping->setMemoryDescriptor(this, 0);
4206 continue;
4207 }
4208
4209 if (kIOMapUnique & options)
4210 {
4211 addr64_t phys;
4212 IOByteCount physLen;
4213
4214 // if (owner != this) continue;
4215
4216 if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
4217 || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
4218 {
4219 phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
4220 if (!phys || (physLen < length))
4221 continue;
4222
4223 mapDesc = IOMemoryDescriptor::withAddressRange(
4224 phys, length, getDirection() | kIOMemoryMapperNone, NULL);
4225 if (!mapDesc)
4226 continue;
4227 offset = 0;
4228 mapping->fOffset = offset;
4229 }
4230 }
4231 else
4232 {
4233 // look for a compatible existing mapping
4234 if( (iter = OSCollectionIterator::withCollection(_mappings)))
4235 {
4236 IOMemoryMap * lookMapping;
4237 while ((lookMapping = (IOMemoryMap *) iter->getNextObject()))
4238 {
4239 if ((result = lookMapping->copyCompatible(mapping)))
4240 {
4241 addMapping(result);
4242 result->setMemoryDescriptor(this, offset);
4243 break;
4244 }
4245 }
4246 iter->release();
4247 }
4248 if (result || (options & kIOMapReference))
4249 {
4250 if (result != mapping)
4251 {
4252 mapping->release();
4253 mapping = NULL;
4254 }
4255 continue;
4256 }
4257 }
4258
4259 if (!mapDesc)
4260 {
4261 mapDesc = this;
4262 mapDesc->retain();
4263 }
4264 IOReturn
4265 kr = mapDesc->doMap( 0, (IOVirtualAddress *) &mapping, options, 0, 0 );
4266 if (kIOReturnSuccess == kr)
4267 {
4268 result = mapping;
4269 mapDesc->addMapping(result);
4270 result->setMemoryDescriptor(mapDesc, offset);
4271 }
4272 else
4273 {
4274 mapping->release();
4275 mapping = NULL;
4276 }
4277 }
4278 while( false );
4279
4280 UNLOCK;
4281
4282 if (mapDesc)
4283 mapDesc->release();
4284
4285 return (result);
4286 }
4287
4288 void IOMemoryDescriptor::addMapping(
4289 IOMemoryMap * mapping )
4290 {
4291 if( mapping)
4292 {
4293 if( 0 == _mappings)
4294 _mappings = OSSet::withCapacity(1);
4295 if( _mappings )
4296 _mappings->setObject( mapping );
4297 }
4298 }
4299
4300 void IOMemoryDescriptor::removeMapping(
4301 IOMemoryMap * mapping )
4302 {
4303 if( _mappings)
4304 _mappings->removeObject( mapping);
4305 }
4306
4307 #ifndef __LP64__
4308 // obsolete initializers
4309 // - initWithOptions is the designated initializer
4310 bool
4311 IOMemoryDescriptor::initWithAddress(void * address,
4312 IOByteCount length,
4313 IODirection direction)
4314 {
4315 return( false );
4316 }
4317
4318 bool
4319 IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
4320 IOByteCount length,
4321 IODirection direction,
4322 task_t task)
4323 {
4324 return( false );
4325 }
4326
4327 bool
4328 IOMemoryDescriptor::initWithPhysicalAddress(
4329 IOPhysicalAddress address,
4330 IOByteCount length,
4331 IODirection direction )
4332 {
4333 return( false );
4334 }
4335
4336 bool
4337 IOMemoryDescriptor::initWithRanges(
4338 IOVirtualRange * ranges,
4339 UInt32 withCount,
4340 IODirection direction,
4341 task_t task,
4342 bool asReference)
4343 {
4344 return( false );
4345 }
4346
4347 bool
4348 IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
4349 UInt32 withCount,
4350 IODirection direction,
4351 bool asReference)
4352 {
4353 return( false );
4354 }
4355
4356 void * IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
4357 IOByteCount * lengthOfSegment)
4358 {
4359 return( 0 );
4360 }
4361 #endif /* !__LP64__ */
4362
4363 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4364
4365 bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
4366 {
4367 OSSymbol const *keys[2];
4368 OSObject *values[2];
4369 OSArray * array;
4370
4371 struct SerData {
4372 user_addr_t address;
4373 user_size_t length;
4374 } *vcopy;
4375 unsigned int index, nRanges;
4376 bool result;
4377
4378 IOOptionBits type = _flags & kIOMemoryTypeMask;
4379
4380 if (s == NULL) return false;
4381
4382 array = OSArray::withCapacity(4);
4383 if (!array) return (false);
4384
4385 nRanges = _rangesCount;
4386 vcopy = (SerData *) IOMalloc(sizeof(SerData) * nRanges);
4387 if (vcopy == 0) return false;
4388
4389 keys[0] = OSSymbol::withCString("address");
4390 keys[1] = OSSymbol::withCString("length");
4391
4392 result = false;
4393 values[0] = values[1] = 0;
4394
4395 // From this point on we can go to bail.
4396
4397 // Copy the volatile data so we don't have to allocate memory
4398 // while the lock is held.
4399 LOCK;
4400 if (nRanges == _rangesCount) {
4401 Ranges vec = _ranges;
4402 for (index = 0; index < nRanges; index++) {
4403 mach_vm_address_t addr; mach_vm_size_t len;
4404 getAddrLenForInd(addr, len, type, vec, index);
4405 vcopy[index].address = addr;
4406 vcopy[index].length = len;
4407 }
4408 } else {
4409 // The descriptor changed out from under us. Give up.
4410 UNLOCK;
4411 result = false;
4412 goto bail;
4413 }
4414 UNLOCK;
4415
4416 for (index = 0; index < nRanges; index++)
4417 {
4418 user_addr_t addr = vcopy[index].address;
4419 IOByteCount len = (IOByteCount) vcopy[index].length;
4420 values[0] = OSNumber::withNumber(addr, sizeof(addr) * 8);
4421 if (values[0] == 0) {
4422 result = false;
4423 goto bail;
4424 }
4425 values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
4426 if (values[1] == 0) {
4427 result = false;
4428 goto bail;
4429 }
4430 OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
4431 if (dict == 0) {
4432 result = false;
4433 goto bail;
4434 }
4435 array->setObject(dict);
4436 dict->release();
4437 values[0]->release();
4438 values[1]->release();
4439 values[0] = values[1] = 0;
4440 }
4441
4442 result = array->serialize(s);
4443
4444 bail:
4445 if (array)
4446 array->release();
4447 if (values[0])
4448 values[0]->release();
4449 if (values[1])
4450 values[1]->release();
4451 if (keys[0])
4452 keys[0]->release();
4453 if (keys[1])
4454 keys[1]->release();
4455 if (vcopy)
4456 IOFree(vcopy, sizeof(SerData) * nRanges);
4457
4458 return result;
4459 }
4460
4461 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4462
4463 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
4464 #ifdef __LP64__
4465 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
4466 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
4467 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
4468 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
4469 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
4470 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
4471 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
4472 #else /* !__LP64__ */
4473 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
4474 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
4475 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3);
4476 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4);
4477 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5);
4478 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 6);
4479 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 7);
4480 #endif /* !__LP64__ */
4481 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
4482 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
4483 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
4484 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
4485 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
4486 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
4487 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
4488 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
4489
4490 /* ex-inline function implementation */
4491 IOPhysicalAddress
4492 IOMemoryDescriptor::getPhysicalAddress()
4493 { return( getPhysicalSegment( 0, 0 )); }