]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOMemoryDescriptor.cpp
0c77443861c05e2b02c8ac46dd5ffafc0181b486
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
30 *
31 * HISTORY
32 *
33 */
34
35
36 #include <sys/cdefs.h>
37
38 #include <IOKit/assert.h>
39 #include <IOKit/system.h>
40 #include <IOKit/IOLib.h>
41 #include <IOKit/IOMemoryDescriptor.h>
42 #include <IOKit/IOMapper.h>
43 #include <IOKit/IODMACommand.h>
44 #include <IOKit/IOKitKeysPrivate.h>
45
46 #ifndef __LP64__
47 #include <IOKit/IOSubMemoryDescriptor.h>
48 #endif /* !__LP64__ */
49
50 #include <IOKit/IOKitDebug.h>
51 #include <libkern/OSDebug.h>
52
53 #include "IOKitKernelInternal.h"
54
55 #include <libkern/c++/OSContainers.h>
56 #include <libkern/c++/OSDictionary.h>
57 #include <libkern/c++/OSArray.h>
58 #include <libkern/c++/OSSymbol.h>
59 #include <libkern/c++/OSNumber.h>
60
61 #include <sys/uio.h>
62
63 __BEGIN_DECLS
64 #include <vm/pmap.h>
65 #include <vm/vm_pageout.h>
66 #include <mach/memory_object_types.h>
67 #include <device/device_port.h>
68
69 #include <mach/vm_prot.h>
70 #include <mach/mach_vm.h>
71 #include <vm/vm_fault.h>
72 #include <vm/vm_protos.h>
73
74 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
75 extern void ipc_port_release_send(ipc_port_t port);
76
77 kern_return_t
78 memory_object_iopl_request(
79 ipc_port_t port,
80 memory_object_offset_t offset,
81 vm_size_t *upl_size,
82 upl_t *upl_ptr,
83 upl_page_info_array_t user_page_list,
84 unsigned int *page_list_count,
85 int *flags);
86
87 // osfmk/device/iokit_rpc.c
88 unsigned int IODefaultCacheBits(addr64_t pa);
89 unsigned int IOTranslateCacheBits(struct phys_entry *pp);
90
91 __END_DECLS
92
93 #define kIOMapperWaitSystem ((IOMapper *) 1)
94
95 static IOMapper * gIOSystemMapper = NULL;
96
97 ppnum_t gIOLastPage;
98
99 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
100
101 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
102
103 #define super IOMemoryDescriptor
104
105 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
106
107 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
108
109 static IORecursiveLock * gIOMemoryLock;
110
111 #define LOCK IORecursiveLockLock( gIOMemoryLock)
112 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
113 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
114 #define WAKEUP \
115 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
116
117 #if 0
118 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
119 #else
120 #define DEBG(fmt, args...) {}
121 #endif
122
123 #define IOMD_DEBUG_DMAACTIVE 1
124
125 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
126
127 // Some data structures and accessor macros used by the initWithOptions
128 // Function
129
130 enum ioPLBlockFlags {
131 kIOPLOnDevice = 0x00000001,
132 kIOPLExternUPL = 0x00000002,
133 };
134
135 struct IOMDPersistentInitData
136 {
137 const IOGeneralMemoryDescriptor * fMD;
138 IOMemoryReference * fMemRef;
139 };
140
141 struct ioPLBlock {
142 upl_t fIOPL;
143 vm_address_t fPageInfo; // Pointer to page list or index into it
144 uint32_t fIOMDOffset; // The offset of this iopl in descriptor
145 ppnum_t fMappedPage; // Page number of first page in this iopl
146 unsigned int fPageOffset; // Offset within first page of iopl
147 unsigned int fFlags; // Flags
148 };
149
150 struct ioGMDData {
151 IOMapper * fMapper;
152 uint8_t fDMAMapNumAddressBits;
153 uint64_t fDMAMapAlignment;
154 addr64_t fMappedBase;
155 uint64_t fPreparationID;
156 unsigned int fPageCnt;
157 unsigned char fDiscontig:1;
158 unsigned char fCompletionError:1;
159 unsigned char _resv:6;
160 #if __LP64__
161 // align arrays to 8 bytes so following macros work
162 unsigned char fPad[3];
163 #endif
164 upl_page_info_t fPageList[1]; /* variable length */
165 ioPLBlock fBlocks[1]; /* variable length */
166 };
167
168 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
169 #define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
170 #define getNumIOPL(osd, d) \
171 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
172 #define getPageList(d) (&(d->fPageList[0]))
173 #define computeDataSize(p, u) \
174 (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
175
176 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
177
178 #define next_page(a) ( trunc_page(a) + PAGE_SIZE )
179
180 extern "C" {
181
182 kern_return_t device_data_action(
183 uintptr_t device_handle,
184 ipc_port_t device_pager,
185 vm_prot_t protection,
186 vm_object_offset_t offset,
187 vm_size_t size)
188 {
189 kern_return_t kr;
190 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
191 IOMemoryDescriptor * memDesc;
192
193 LOCK;
194 memDesc = ref->dp.memory;
195 if( memDesc)
196 {
197 memDesc->retain();
198 kr = memDesc->handleFault(device_pager, offset, size);
199 memDesc->release();
200 }
201 else
202 kr = KERN_ABORTED;
203 UNLOCK;
204
205 return( kr );
206 }
207
208 kern_return_t device_close(
209 uintptr_t device_handle)
210 {
211 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
212
213 IODelete( ref, IOMemoryDescriptorReserved, 1 );
214
215 return( kIOReturnSuccess );
216 }
217 }; // end extern "C"
218
219 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
220
221 // Note this inline function uses C++ reference arguments to return values
222 // This means that pointers are not passed and NULLs don't have to be
223 // checked for as a NULL reference is illegal.
224 static inline void
225 getAddrLenForInd(mach_vm_address_t &addr, mach_vm_size_t &len, // Output variables
226 UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
227 {
228 assert(kIOMemoryTypeUIO == type
229 || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
230 || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
231 if (kIOMemoryTypeUIO == type) {
232 user_size_t us;
233 user_addr_t ad;
234 uio_getiov((uio_t) r.uio, ind, &ad, &us); addr = ad; len = us;
235 }
236 #ifndef __LP64__
237 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
238 IOAddressRange cur = r.v64[ind];
239 addr = cur.address;
240 len = cur.length;
241 }
242 #endif /* !__LP64__ */
243 else {
244 IOVirtualRange cur = r.v[ind];
245 addr = cur.address;
246 len = cur.length;
247 }
248 }
249
250 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
251
252 static IOReturn
253 purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
254 {
255 IOReturn err = kIOReturnSuccess;
256
257 *control = VM_PURGABLE_SET_STATE;
258
259 enum { kIOMemoryPurgeableControlMask = 15 };
260
261 switch (kIOMemoryPurgeableControlMask & newState)
262 {
263 case kIOMemoryPurgeableKeepCurrent:
264 *control = VM_PURGABLE_GET_STATE;
265 break;
266
267 case kIOMemoryPurgeableNonVolatile:
268 *state = VM_PURGABLE_NONVOLATILE;
269 break;
270 case kIOMemoryPurgeableVolatile:
271 *state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask);
272 break;
273 case kIOMemoryPurgeableEmpty:
274 *state = VM_PURGABLE_EMPTY;
275 break;
276 default:
277 err = kIOReturnBadArgument;
278 break;
279 }
280 return (err);
281 }
282
283 static IOReturn
284 purgeableStateBits(int * state)
285 {
286 IOReturn err = kIOReturnSuccess;
287
288 switch (VM_PURGABLE_STATE_MASK & *state)
289 {
290 case VM_PURGABLE_NONVOLATILE:
291 *state = kIOMemoryPurgeableNonVolatile;
292 break;
293 case VM_PURGABLE_VOLATILE:
294 *state = kIOMemoryPurgeableVolatile;
295 break;
296 case VM_PURGABLE_EMPTY:
297 *state = kIOMemoryPurgeableEmpty;
298 break;
299 default:
300 *state = kIOMemoryPurgeableNonVolatile;
301 err = kIOReturnNotReady;
302 break;
303 }
304 return (err);
305 }
306
307
308 static vm_prot_t
309 vmProtForCacheMode(IOOptionBits cacheMode)
310 {
311 vm_prot_t prot = 0;
312 switch (cacheMode)
313 {
314 case kIOInhibitCache:
315 SET_MAP_MEM(MAP_MEM_IO, prot);
316 break;
317
318 case kIOWriteThruCache:
319 SET_MAP_MEM(MAP_MEM_WTHRU, prot);
320 break;
321
322 case kIOWriteCombineCache:
323 SET_MAP_MEM(MAP_MEM_WCOMB, prot);
324 break;
325
326 case kIOCopybackCache:
327 SET_MAP_MEM(MAP_MEM_COPYBACK, prot);
328 break;
329
330 case kIOCopybackInnerCache:
331 SET_MAP_MEM(MAP_MEM_INNERWBACK, prot);
332 break;
333
334 case kIODefaultCache:
335 default:
336 SET_MAP_MEM(MAP_MEM_NOOP, prot);
337 break;
338 }
339
340 return (prot);
341 }
342
343 static unsigned int
344 pagerFlagsForCacheMode(IOOptionBits cacheMode)
345 {
346 unsigned int pagerFlags = 0;
347 switch (cacheMode)
348 {
349 case kIOInhibitCache:
350 pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
351 break;
352
353 case kIOWriteThruCache:
354 pagerFlags = DEVICE_PAGER_WRITE_THROUGH | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
355 break;
356
357 case kIOWriteCombineCache:
358 pagerFlags = DEVICE_PAGER_CACHE_INHIB | DEVICE_PAGER_COHERENT;
359 break;
360
361 case kIOCopybackCache:
362 pagerFlags = DEVICE_PAGER_COHERENT;
363 break;
364
365 case kIOCopybackInnerCache:
366 pagerFlags = DEVICE_PAGER_COHERENT;
367 break;
368
369 case kIODefaultCache:
370 default:
371 pagerFlags = -1U;
372 break;
373 }
374 return (pagerFlags);
375 }
376
377 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
378 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
379
380 struct IOMemoryEntry
381 {
382 ipc_port_t entry;
383 int64_t offset;
384 uint64_t size;
385 };
386
387 struct IOMemoryReference
388 {
389 volatile SInt32 refCount;
390 vm_prot_t prot;
391 uint32_t capacity;
392 uint32_t count;
393 IOMemoryEntry entries[0];
394 };
395
396 enum
397 {
398 kIOMemoryReferenceReuse = 0x00000001,
399 kIOMemoryReferenceWrite = 0x00000002,
400 };
401
402 SInt32 gIOMemoryReferenceCount;
403
404 IOMemoryReference *
405 IOGeneralMemoryDescriptor::memoryReferenceAlloc(uint32_t capacity, IOMemoryReference * realloc)
406 {
407 IOMemoryReference * ref;
408 size_t newSize, oldSize, copySize;
409
410 newSize = (sizeof(IOMemoryReference)
411 - sizeof(ref->entries)
412 + capacity * sizeof(ref->entries[0]));
413 ref = (typeof(ref)) IOMalloc(newSize);
414 if (realloc)
415 {
416 oldSize = (sizeof(IOMemoryReference)
417 - sizeof(realloc->entries)
418 + realloc->capacity * sizeof(realloc->entries[0]));
419 copySize = oldSize;
420 if (copySize > newSize) copySize = newSize;
421 if (ref) bcopy(realloc, ref, copySize);
422 IOFree(realloc, oldSize);
423 }
424 else if (ref)
425 {
426 bzero(ref, sizeof(*ref));
427 ref->refCount = 1;
428 OSIncrementAtomic(&gIOMemoryReferenceCount);
429 }
430 if (!ref) return (0);
431 ref->capacity = capacity;
432 return (ref);
433 }
434
435 void
436 IOGeneralMemoryDescriptor::memoryReferenceFree(IOMemoryReference * ref)
437 {
438 IOMemoryEntry * entries;
439 size_t size;
440
441 entries = ref->entries + ref->count;
442 while (entries > &ref->entries[0])
443 {
444 entries--;
445 ipc_port_release_send(entries->entry);
446 }
447 size = (sizeof(IOMemoryReference)
448 - sizeof(ref->entries)
449 + ref->capacity * sizeof(ref->entries[0]));
450 IOFree(ref, size);
451
452 OSDecrementAtomic(&gIOMemoryReferenceCount);
453 }
454
455 void
456 IOGeneralMemoryDescriptor::memoryReferenceRelease(IOMemoryReference * ref)
457 {
458 if (1 == OSDecrementAtomic(&ref->refCount)) memoryReferenceFree(ref);
459 }
460
461
462 IOReturn
463 IOGeneralMemoryDescriptor::memoryReferenceCreate(
464 IOOptionBits options,
465 IOMemoryReference ** reference)
466 {
467 enum { kCapacity = 4, kCapacityInc = 4 };
468
469 kern_return_t err;
470 IOMemoryReference * ref;
471 IOMemoryEntry * entries;
472 IOMemoryEntry * cloneEntries;
473 vm_map_t map;
474 ipc_port_t entry, cloneEntry;
475 vm_prot_t prot;
476 memory_object_size_t actualSize;
477 uint32_t rangeIdx;
478 uint32_t count;
479 mach_vm_address_t entryAddr, endAddr, entrySize;
480 mach_vm_size_t srcAddr, srcLen;
481 mach_vm_size_t nextAddr, nextLen;
482 mach_vm_size_t offset, remain;
483 IOByteCount physLen;
484 IOOptionBits type = (_flags & kIOMemoryTypeMask);
485 IOOptionBits cacheMode;
486 unsigned int pagerFlags;
487
488 ref = memoryReferenceAlloc(kCapacity, NULL);
489 if (!ref) return (kIOReturnNoMemory);
490 entries = &ref->entries[0];
491 count = 0;
492
493 offset = 0;
494 rangeIdx = 0;
495 if (_task) getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
496 else
497 {
498 nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
499 nextLen = physLen;
500 // default cache mode for physical
501 if (kIODefaultCache == ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift))
502 {
503 IOOptionBits mode;
504 pagerFlags = IODefaultCacheBits(nextAddr);
505 if (DEVICE_PAGER_CACHE_INHIB & pagerFlags)
506 {
507 if (DEVICE_PAGER_GUARDED & pagerFlags)
508 mode = kIOInhibitCache;
509 else
510 mode = kIOWriteCombineCache;
511 }
512 else if (DEVICE_PAGER_WRITE_THROUGH & pagerFlags)
513 mode = kIOWriteThruCache;
514 else
515 mode = kIOCopybackCache;
516 _flags |= (mode << kIOMemoryBufferCacheShift);
517 }
518 }
519
520 // cache mode & vm_prot
521 prot = VM_PROT_READ;
522 cacheMode = ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
523 prot |= vmProtForCacheMode(cacheMode);
524 // VM system requires write access to change cache mode
525 if (kIODefaultCache != cacheMode) prot |= VM_PROT_WRITE;
526 if (kIODirectionOut != (kIODirectionOutIn & _flags)) prot |= VM_PROT_WRITE;
527 if (kIOMemoryReferenceWrite & options) prot |= VM_PROT_WRITE;
528
529 if ((kIOMemoryReferenceReuse & options) && _memRef)
530 {
531 cloneEntries = &_memRef->entries[0];
532 prot |= MAP_MEM_NAMED_REUSE;
533 }
534
535 if (_task)
536 {
537 // virtual ranges
538
539 if (kIOMemoryBufferPageable & _flags)
540 {
541 // IOBufferMemoryDescriptor alloc - set flags for entry + object create
542 prot |= MAP_MEM_NAMED_CREATE;
543 if (kIOMemoryBufferPurgeable & _flags) prot |= MAP_MEM_PURGABLE;
544 prot |= VM_PROT_WRITE;
545 map = NULL;
546 }
547 else map = get_task_map(_task);
548
549 remain = _length;
550 while (remain)
551 {
552 srcAddr = nextAddr;
553 srcLen = nextLen;
554 nextAddr = 0;
555 nextLen = 0;
556 // coalesce addr range
557 for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++)
558 {
559 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
560 if ((srcAddr + srcLen) != nextAddr) break;
561 srcLen += nextLen;
562 }
563 entryAddr = trunc_page_64(srcAddr);
564 endAddr = round_page_64(srcAddr + srcLen);
565 do
566 {
567 entrySize = (endAddr - entryAddr);
568 if (!entrySize) break;
569 actualSize = entrySize;
570
571 cloneEntry = MACH_PORT_NULL;
572 if (MAP_MEM_NAMED_REUSE & prot)
573 {
574 if (cloneEntries < &_memRef->entries[_memRef->count]) cloneEntry = cloneEntries->entry;
575 else prot &= ~MAP_MEM_NAMED_REUSE;
576 }
577
578 err = mach_make_memory_entry_64(map,
579 &actualSize, entryAddr, prot, &entry, cloneEntry);
580
581 if (KERN_SUCCESS != err) break;
582 if (actualSize > entrySize) panic("mach_make_memory_entry_64 actualSize");
583
584 if (count >= ref->capacity)
585 {
586 ref = memoryReferenceAlloc(ref->capacity + kCapacityInc, ref);
587 entries = &ref->entries[count];
588 }
589 entries->entry = entry;
590 entries->size = actualSize;
591 entries->offset = offset + (entryAddr - srcAddr);
592 entryAddr += actualSize;
593 if (MAP_MEM_NAMED_REUSE & prot)
594 {
595 if ((cloneEntries->entry == entries->entry)
596 && (cloneEntries->size == entries->size)
597 && (cloneEntries->offset == entries->offset)) cloneEntries++;
598 else prot &= ~MAP_MEM_NAMED_REUSE;
599 }
600 entries++;
601 count++;
602 }
603 while (true);
604 offset += srcLen;
605 remain -= srcLen;
606 }
607 }
608 else
609 {
610 // _task == 0, physical
611 memory_object_t pager;
612 vm_size_t size = ptoa_32(_pages);
613
614 if (!getKernelReserved()) panic("getKernelReserved");
615
616 reserved->dp.pagerContig = (1 == _rangesCount);
617 reserved->dp.memory = this;
618
619 pagerFlags = pagerFlagsForCacheMode(cacheMode);
620 if (-1U == pagerFlags) panic("phys is kIODefaultCache");
621 if (reserved->dp.pagerContig) pagerFlags |= DEVICE_PAGER_CONTIGUOUS;
622
623 pager = device_pager_setup((memory_object_t) 0, (uintptr_t) reserved,
624 size, pagerFlags);
625 assert (pager);
626 if (!pager) err = kIOReturnVMError;
627 else
628 {
629 srcAddr = nextAddr;
630 entryAddr = trunc_page_64(srcAddr);
631 err = mach_memory_object_memory_entry_64((host_t) 1, false /*internal*/,
632 size, VM_PROT_READ | VM_PROT_WRITE, pager, &entry);
633 assert (KERN_SUCCESS == err);
634 if (KERN_SUCCESS != err) device_pager_deallocate(pager);
635 else
636 {
637 reserved->dp.devicePager = pager;
638 entries->entry = entry;
639 entries->size = size;
640 entries->offset = offset + (entryAddr - srcAddr);
641 entries++;
642 count++;
643 }
644 }
645 }
646
647 ref->count = count;
648 ref->prot = prot;
649
650 if (KERN_SUCCESS == err)
651 {
652 if (MAP_MEM_NAMED_REUSE & prot)
653 {
654 memoryReferenceFree(ref);
655 OSIncrementAtomic(&_memRef->refCount);
656 ref = _memRef;
657 }
658 }
659 else
660 {
661 memoryReferenceFree(ref);
662 ref = NULL;
663 }
664
665 *reference = ref;
666
667 return (err);
668 }
669
670 struct IOMemoryDescriptorMapAllocRef
671 {
672 vm_map_t map;
673 mach_vm_address_t mapped;
674 mach_vm_size_t size;
675 vm_prot_t prot;
676 IOOptionBits options;
677 };
678
679 static kern_return_t
680 IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
681 {
682 IOMemoryDescriptorMapAllocRef * ref = (typeof(ref))_ref;
683 IOReturn err;
684 vm_map_offset_t addr;
685
686 addr = ref->mapped;
687 err = vm_map_enter_mem_object(map, &addr, ref->size,
688 (vm_map_offset_t) 0,
689 (((ref->options & kIOMapAnywhere)
690 ? VM_FLAGS_ANYWHERE
691 : VM_FLAGS_FIXED)
692 | VM_MAKE_TAG(VM_MEMORY_IOKIT)
693 | VM_FLAGS_IOKIT_ACCT), /* iokit accounting */
694 IPC_PORT_NULL,
695 (memory_object_offset_t) 0,
696 false, /* copy */
697 ref->prot,
698 ref->prot,
699 VM_INHERIT_NONE);
700 if (KERN_SUCCESS == err)
701 {
702 ref->mapped = (mach_vm_address_t) addr;
703 ref->map = map;
704 }
705
706 return( err );
707 }
708
709 IOReturn
710 IOGeneralMemoryDescriptor::memoryReferenceMap(
711 IOMemoryReference * ref,
712 vm_map_t map,
713 mach_vm_size_t inoffset,
714 mach_vm_size_t size,
715 IOOptionBits options,
716 mach_vm_address_t * inaddr)
717 {
718 IOReturn err;
719 int64_t offset = inoffset;
720 uint32_t rangeIdx, entryIdx;
721 vm_map_offset_t addr, mapAddr;
722 vm_map_offset_t pageOffset, entryOffset, remain, chunk;
723
724 mach_vm_address_t srcAddr, nextAddr;
725 mach_vm_size_t srcLen, nextLen;
726 IOByteCount physLen;
727 IOMemoryEntry * entry;
728 vm_prot_t prot, memEntryCacheMode;
729 IOOptionBits type;
730 IOOptionBits cacheMode;
731
732 /*
733 * For the kIOMapPrefault option.
734 */
735 upl_page_info_t *pageList = NULL;
736 UInt currentPageIndex = 0;
737
738 type = _flags & kIOMemoryTypeMask;
739 prot = VM_PROT_READ;
740 if (!(kIOMapReadOnly & options)) prot |= VM_PROT_WRITE;
741 prot &= ref->prot;
742
743 cacheMode = ((options & kIOMapCacheMask) >> kIOMapCacheShift);
744 if (kIODefaultCache != cacheMode)
745 {
746 // VM system requires write access to update named entry cache mode
747 memEntryCacheMode = (MAP_MEM_ONLY | VM_PROT_WRITE | prot | vmProtForCacheMode(cacheMode));
748 }
749
750 if (_task)
751 {
752 // Find first range for offset
753 for (remain = offset, rangeIdx = 0; rangeIdx < _rangesCount; rangeIdx++)
754 {
755 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
756 if (remain < nextLen) break;
757 remain -= nextLen;
758 }
759 }
760 else
761 {
762 rangeIdx = 0;
763 remain = 0;
764 nextAddr = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
765 nextLen = size;
766 }
767
768 assert(remain < nextLen);
769 if (remain >= nextLen) return (kIOReturnBadArgument);
770
771 nextAddr += remain;
772 nextLen -= remain;
773 pageOffset = (page_mask & nextAddr);
774 addr = 0;
775 if (!(options & kIOMapAnywhere))
776 {
777 addr = *inaddr;
778 if (pageOffset != (page_mask & addr)) return (kIOReturnNotAligned);
779 addr -= pageOffset;
780 }
781
782 // find first entry for offset
783 for (entryIdx = 0;
784 (entryIdx < ref->count) && (offset >= ref->entries[entryIdx].offset);
785 entryIdx++) {}
786 entryIdx--;
787 entry = &ref->entries[entryIdx];
788
789 // allocate VM
790 size = round_page_64(size + pageOffset);
791 {
792 IOMemoryDescriptorMapAllocRef ref;
793 ref.map = map;
794 ref.options = options;
795 ref.size = size;
796 ref.prot = prot;
797 if (options & kIOMapAnywhere)
798 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
799 ref.mapped = 0;
800 else
801 ref.mapped = addr;
802
803 if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags))
804 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
805 else
806 err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
807 if (KERN_SUCCESS == err)
808 {
809 addr = ref.mapped;
810 map = ref.map;
811 }
812 }
813
814 /*
815 * Prefaulting is only possible if we wired the memory earlier. Check the
816 * memory type, and the underlying data.
817 */
818 if (options & kIOMapPrefault) {
819 /*
820 * The memory must have been wired by calling ::prepare(), otherwise
821 * we don't have the UPL. Without UPLs, pages cannot be pre-faulted
822 */
823 assert(map != kernel_map);
824 assert(_wireCount != 0);
825 assert(_memoryEntries != NULL);
826 if ((map == kernel_map) ||
827 (_wireCount == 0) ||
828 (_memoryEntries == NULL))
829 {
830 return kIOReturnBadArgument;
831 }
832
833 // Get the page list.
834 ioGMDData* dataP = getDataP(_memoryEntries);
835 ioPLBlock const* ioplList = getIOPLList(dataP);
836 pageList = getPageList(dataP);
837
838 // Get the number of IOPLs.
839 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
840
841 /*
842 * Scan through the IOPL Info Blocks, looking for the first block containing
843 * the offset. The research will go past it, so we'll need to go back to the
844 * right range at the end.
845 */
846 UInt ioplIndex = 0;
847 while (ioplIndex < numIOPLs && offset >= ioplList[ioplIndex].fIOMDOffset)
848 ioplIndex++;
849 ioplIndex--;
850
851 // Retrieve the IOPL info block.
852 ioPLBlock ioplInfo = ioplList[ioplIndex];
853
854 /*
855 * For external UPLs, the fPageInfo points directly to the UPL's page_info_t
856 * array.
857 */
858 if (ioplInfo.fFlags & kIOPLExternUPL)
859 pageList = (upl_page_info_t*) ioplInfo.fPageInfo;
860 else
861 pageList = &pageList[ioplInfo.fPageInfo];
862
863 // Rebase [offset] into the IOPL in order to looks for the first page index.
864 mach_vm_size_t offsetInIOPL = offset - ioplInfo.fIOMDOffset + ioplInfo.fPageOffset;
865
866 // Retrieve the index of the first page corresponding to the offset.
867 currentPageIndex = atop_32(offsetInIOPL);
868 }
869
870 // enter mappings
871 remain = size;
872 mapAddr = addr;
873 addr += pageOffset;
874 while (remain && nextLen && (KERN_SUCCESS == err))
875 {
876 srcAddr = nextAddr;
877 srcLen = nextLen;
878 nextAddr = 0;
879 nextLen = 0;
880 // coalesce addr range
881 for (++rangeIdx; rangeIdx < _rangesCount; rangeIdx++)
882 {
883 getAddrLenForInd(nextAddr, nextLen, type, _ranges, rangeIdx);
884 if ((srcAddr + srcLen) != nextAddr) break;
885 srcLen += nextLen;
886 }
887
888 while (srcLen && (KERN_SUCCESS == err))
889 {
890 entryOffset = offset - entry->offset;
891 if ((page_mask & entryOffset) != pageOffset)
892 {
893 err = kIOReturnNotAligned;
894 break;
895 }
896
897 if (kIODefaultCache != cacheMode)
898 {
899 vm_size_t unused = 0;
900 err = mach_make_memory_entry(NULL /*unused*/, &unused, 0 /*unused*/,
901 memEntryCacheMode, NULL, entry->entry);
902 assert (KERN_SUCCESS == err);
903 }
904
905 entryOffset -= pageOffset;
906 if (entryOffset >= entry->size) panic("entryOffset");
907 chunk = entry->size - entryOffset;
908 if (chunk)
909 {
910 if (chunk > remain) chunk = remain;
911
912 if (options & kIOMapPrefault) {
913 UInt nb_pages = round_page(chunk) / PAGE_SIZE;
914 err = vm_map_enter_mem_object_prefault(map,
915 &mapAddr,
916 chunk, 0 /* mask */,
917 (VM_FLAGS_FIXED
918 | VM_FLAGS_OVERWRITE
919 | VM_MAKE_TAG(VM_MEMORY_IOKIT)
920 | VM_FLAGS_IOKIT_ACCT), /* iokit accounting */
921 entry->entry,
922 entryOffset,
923 prot, // cur
924 prot, // max
925 &pageList[currentPageIndex],
926 nb_pages);
927
928 // Compute the next index in the page list.
929 currentPageIndex += nb_pages;
930 assert(currentPageIndex <= _pages);
931 } else {
932 err = vm_map_enter_mem_object(map,
933 &mapAddr,
934 chunk, 0 /* mask */,
935 (VM_FLAGS_FIXED
936 | VM_FLAGS_OVERWRITE
937 | VM_MAKE_TAG(VM_MEMORY_IOKIT)
938 | VM_FLAGS_IOKIT_ACCT), /* iokit accounting */
939 entry->entry,
940 entryOffset,
941 false, // copy
942 prot, // cur
943 prot, // max
944 VM_INHERIT_NONE);
945 }
946
947 if (KERN_SUCCESS != err) break;
948 remain -= chunk;
949 if (!remain) break;
950 mapAddr += chunk;
951 offset += chunk - pageOffset;
952 }
953 pageOffset = 0;
954 entry++;
955 entryIdx++;
956 if (entryIdx >= ref->count)
957 {
958 err = kIOReturnOverrun;
959 break;
960 }
961 }
962 }
963
964 if ((KERN_SUCCESS != err) && addr)
965 {
966 (void) mach_vm_deallocate(map, trunc_page_64(addr), size);
967 addr = 0;
968 }
969 *inaddr = addr;
970
971 return (err);
972 }
973
974 IOReturn
975 IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(
976 IOMemoryReference * ref,
977 IOByteCount * residentPageCount,
978 IOByteCount * dirtyPageCount)
979 {
980 IOReturn err;
981 IOMemoryEntry * entries;
982 unsigned int resident, dirty;
983 unsigned int totalResident, totalDirty;
984
985 totalResident = totalDirty = 0;
986 entries = ref->entries + ref->count;
987 while (entries > &ref->entries[0])
988 {
989 entries--;
990 err = mach_memory_entry_get_page_counts(entries->entry, &resident, &dirty);
991 if (KERN_SUCCESS != err) break;
992 totalResident += resident;
993 totalDirty += dirty;
994 }
995
996 if (residentPageCount) *residentPageCount = totalResident;
997 if (dirtyPageCount) *dirtyPageCount = totalDirty;
998 return (err);
999 }
1000
1001 IOReturn
1002 IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(
1003 IOMemoryReference * ref,
1004 IOOptionBits newState,
1005 IOOptionBits * oldState)
1006 {
1007 IOReturn err;
1008 IOMemoryEntry * entries;
1009 vm_purgable_t control;
1010 int totalState, state;
1011
1012 entries = ref->entries + ref->count;
1013 totalState = kIOMemoryPurgeableNonVolatile;
1014 while (entries > &ref->entries[0])
1015 {
1016 entries--;
1017
1018 err = purgeableControlBits(newState, &control, &state);
1019 if (KERN_SUCCESS != err) break;
1020 err = mach_memory_entry_purgable_control(entries->entry, control, &state);
1021 if (KERN_SUCCESS != err) break;
1022 err = purgeableStateBits(&state);
1023 if (KERN_SUCCESS != err) break;
1024
1025 if (kIOMemoryPurgeableEmpty == state) totalState = kIOMemoryPurgeableEmpty;
1026 else if (kIOMemoryPurgeableEmpty == totalState) continue;
1027 else if (kIOMemoryPurgeableVolatile == totalState) continue;
1028 else if (kIOMemoryPurgeableVolatile == state) totalState = kIOMemoryPurgeableVolatile;
1029 else totalState = kIOMemoryPurgeableNonVolatile;
1030 }
1031
1032 if (oldState) *oldState = totalState;
1033 return (err);
1034 }
1035
1036 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1037
1038 IOMemoryDescriptor *
1039 IOMemoryDescriptor::withAddress(void * address,
1040 IOByteCount length,
1041 IODirection direction)
1042 {
1043 return IOMemoryDescriptor::
1044 withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
1045 }
1046
1047 #ifndef __LP64__
1048 IOMemoryDescriptor *
1049 IOMemoryDescriptor::withAddress(IOVirtualAddress address,
1050 IOByteCount length,
1051 IODirection direction,
1052 task_t task)
1053 {
1054 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1055 if (that)
1056 {
1057 if (that->initWithAddress(address, length, direction, task))
1058 return that;
1059
1060 that->release();
1061 }
1062 return 0;
1063 }
1064 #endif /* !__LP64__ */
1065
1066 IOMemoryDescriptor *
1067 IOMemoryDescriptor::withPhysicalAddress(
1068 IOPhysicalAddress address,
1069 IOByteCount length,
1070 IODirection direction )
1071 {
1072 return (IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL));
1073 }
1074
1075 #ifndef __LP64__
1076 IOMemoryDescriptor *
1077 IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
1078 UInt32 withCount,
1079 IODirection direction,
1080 task_t task,
1081 bool asReference)
1082 {
1083 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1084 if (that)
1085 {
1086 if (that->initWithRanges(ranges, withCount, direction, task, asReference))
1087 return that;
1088
1089 that->release();
1090 }
1091 return 0;
1092 }
1093 #endif /* !__LP64__ */
1094
1095 IOMemoryDescriptor *
1096 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
1097 mach_vm_size_t length,
1098 IOOptionBits options,
1099 task_t task)
1100 {
1101 IOAddressRange range = { address, length };
1102 return (IOMemoryDescriptor::withAddressRanges(&range, 1, options, task));
1103 }
1104
1105 IOMemoryDescriptor *
1106 IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
1107 UInt32 rangeCount,
1108 IOOptionBits options,
1109 task_t task)
1110 {
1111 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1112 if (that)
1113 {
1114 if (task)
1115 options |= kIOMemoryTypeVirtual64;
1116 else
1117 options |= kIOMemoryTypePhysical64;
1118
1119 if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ 0))
1120 return that;
1121
1122 that->release();
1123 }
1124
1125 return 0;
1126 }
1127
1128
1129 /*
1130 * withOptions:
1131 *
1132 * Create a new IOMemoryDescriptor. The buffer is made up of several
1133 * virtual address ranges, from a given task.
1134 *
1135 * Passing the ranges as a reference will avoid an extra allocation.
1136 */
1137 IOMemoryDescriptor *
1138 IOMemoryDescriptor::withOptions(void * buffers,
1139 UInt32 count,
1140 UInt32 offset,
1141 task_t task,
1142 IOOptionBits opts,
1143 IOMapper * mapper)
1144 {
1145 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
1146
1147 if (self
1148 && !self->initWithOptions(buffers, count, offset, task, opts, mapper))
1149 {
1150 self->release();
1151 return 0;
1152 }
1153
1154 return self;
1155 }
1156
1157 bool IOMemoryDescriptor::initWithOptions(void * buffers,
1158 UInt32 count,
1159 UInt32 offset,
1160 task_t task,
1161 IOOptionBits options,
1162 IOMapper * mapper)
1163 {
1164 return( false );
1165 }
1166
1167 #ifndef __LP64__
1168 IOMemoryDescriptor *
1169 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
1170 UInt32 withCount,
1171 IODirection direction,
1172 bool asReference)
1173 {
1174 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
1175 if (that)
1176 {
1177 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference))
1178 return that;
1179
1180 that->release();
1181 }
1182 return 0;
1183 }
1184
1185 IOMemoryDescriptor *
1186 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
1187 IOByteCount offset,
1188 IOByteCount length,
1189 IODirection direction)
1190 {
1191 return (IOSubMemoryDescriptor::withSubRange(of, offset, length, direction | kIOMemoryThreadSafe));
1192 }
1193 #endif /* !__LP64__ */
1194
1195 IOMemoryDescriptor *
1196 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
1197 {
1198 IOGeneralMemoryDescriptor *origGenMD =
1199 OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
1200
1201 if (origGenMD)
1202 return IOGeneralMemoryDescriptor::
1203 withPersistentMemoryDescriptor(origGenMD);
1204 else
1205 return 0;
1206 }
1207
1208 IOMemoryDescriptor *
1209 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
1210 {
1211 IOMemoryReference * memRef;
1212
1213 if (kIOReturnSuccess != originalMD->memoryReferenceCreate(kIOMemoryReferenceReuse, &memRef)) return (0);
1214
1215 if (memRef == originalMD->_memRef)
1216 {
1217 originalMD->retain(); // Add a new reference to ourselves
1218 originalMD->memoryReferenceRelease(memRef);
1219 return originalMD;
1220 }
1221
1222 IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor;
1223 IOMDPersistentInitData initData = { originalMD, memRef };
1224
1225 if (self
1226 && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) {
1227 self->release();
1228 self = 0;
1229 }
1230 return self;
1231 }
1232
1233 #ifndef __LP64__
1234 bool
1235 IOGeneralMemoryDescriptor::initWithAddress(void * address,
1236 IOByteCount withLength,
1237 IODirection withDirection)
1238 {
1239 _singleRange.v.address = (vm_offset_t) address;
1240 _singleRange.v.length = withLength;
1241
1242 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
1243 }
1244
1245 bool
1246 IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
1247 IOByteCount withLength,
1248 IODirection withDirection,
1249 task_t withTask)
1250 {
1251 _singleRange.v.address = address;
1252 _singleRange.v.length = withLength;
1253
1254 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
1255 }
1256
1257 bool
1258 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
1259 IOPhysicalAddress address,
1260 IOByteCount withLength,
1261 IODirection withDirection )
1262 {
1263 _singleRange.p.address = address;
1264 _singleRange.p.length = withLength;
1265
1266 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
1267 }
1268
1269 bool
1270 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
1271 IOPhysicalRange * ranges,
1272 UInt32 count,
1273 IODirection direction,
1274 bool reference)
1275 {
1276 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
1277
1278 if (reference)
1279 mdOpts |= kIOMemoryAsReference;
1280
1281 return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0);
1282 }
1283
1284 bool
1285 IOGeneralMemoryDescriptor::initWithRanges(
1286 IOVirtualRange * ranges,
1287 UInt32 count,
1288 IODirection direction,
1289 task_t task,
1290 bool reference)
1291 {
1292 IOOptionBits mdOpts = direction;
1293
1294 if (reference)
1295 mdOpts |= kIOMemoryAsReference;
1296
1297 if (task) {
1298 mdOpts |= kIOMemoryTypeVirtual;
1299
1300 // Auto-prepare if this is a kernel memory descriptor as very few
1301 // clients bother to prepare() kernel memory.
1302 // But it was not enforced so what are you going to do?
1303 if (task == kernel_task)
1304 mdOpts |= kIOMemoryAutoPrepare;
1305 }
1306 else
1307 mdOpts |= kIOMemoryTypePhysical;
1308
1309 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0);
1310 }
1311 #endif /* !__LP64__ */
1312
1313 /*
1314 * initWithOptions:
1315 *
1316 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
1317 * from a given task, several physical ranges, an UPL from the ubc
1318 * system or a uio (may be 64bit) from the BSD subsystem.
1319 *
1320 * Passing the ranges as a reference will avoid an extra allocation.
1321 *
1322 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
1323 * existing instance -- note this behavior is not commonly supported in other
1324 * I/O Kit classes, although it is supported here.
1325 */
1326
1327 bool
1328 IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
1329 UInt32 count,
1330 UInt32 offset,
1331 task_t task,
1332 IOOptionBits options,
1333 IOMapper * mapper)
1334 {
1335 IOOptionBits type = options & kIOMemoryTypeMask;
1336
1337 #ifndef __LP64__
1338 if (task
1339 && (kIOMemoryTypeVirtual == type)
1340 && vm_map_is_64bit(get_task_map(task))
1341 && ((IOVirtualRange *) buffers)->address)
1342 {
1343 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
1344 return false;
1345 }
1346 #endif /* !__LP64__ */
1347
1348 // Grab the original MD's configuation data to initialse the
1349 // arguments to this function.
1350 if (kIOMemoryTypePersistentMD == type) {
1351
1352 IOMDPersistentInitData *initData = (typeof(initData)) buffers;
1353 const IOGeneralMemoryDescriptor *orig = initData->fMD;
1354 ioGMDData *dataP = getDataP(orig->_memoryEntries);
1355
1356 // Only accept persistent memory descriptors with valid dataP data.
1357 assert(orig->_rangesCount == 1);
1358 if ( !(orig->_flags & kIOMemoryPersistent) || !dataP)
1359 return false;
1360
1361 _memRef = initData->fMemRef; // Grab the new named entry
1362 options = orig->_flags & ~kIOMemoryAsReference;
1363 type = options & kIOMemoryTypeMask;
1364 buffers = orig->_ranges.v;
1365 count = orig->_rangesCount;
1366
1367 // Now grab the original task and whatever mapper was previously used
1368 task = orig->_task;
1369 mapper = dataP->fMapper;
1370
1371 // We are ready to go through the original initialisation now
1372 }
1373
1374 switch (type) {
1375 case kIOMemoryTypeUIO:
1376 case kIOMemoryTypeVirtual:
1377 #ifndef __LP64__
1378 case kIOMemoryTypeVirtual64:
1379 #endif /* !__LP64__ */
1380 assert(task);
1381 if (!task)
1382 return false;
1383 break;
1384
1385 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
1386 #ifndef __LP64__
1387 case kIOMemoryTypePhysical64:
1388 #endif /* !__LP64__ */
1389 case kIOMemoryTypeUPL:
1390 assert(!task);
1391 break;
1392 default:
1393 return false; /* bad argument */
1394 }
1395
1396 assert(buffers);
1397 assert(count);
1398
1399 /*
1400 * We can check the _initialized instance variable before having ever set
1401 * it to an initial value because I/O Kit guarantees that all our instance
1402 * variables are zeroed on an object's allocation.
1403 */
1404
1405 if (_initialized) {
1406 /*
1407 * An existing memory descriptor is being retargeted to point to
1408 * somewhere else. Clean up our present state.
1409 */
1410 IOOptionBits type = _flags & kIOMemoryTypeMask;
1411 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
1412 {
1413 while (_wireCount)
1414 complete();
1415 }
1416 if (_ranges.v && !(kIOMemoryAsReference & _flags))
1417 {
1418 if (kIOMemoryTypeUIO == type)
1419 uio_free((uio_t) _ranges.v);
1420 #ifndef __LP64__
1421 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
1422 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
1423 #endif /* !__LP64__ */
1424 else
1425 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
1426 }
1427
1428 options |= (kIOMemoryRedirected & _flags);
1429 if (!(kIOMemoryRedirected & options))
1430 {
1431 if (_memRef)
1432 {
1433 memoryReferenceRelease(_memRef);
1434 _memRef = 0;
1435 }
1436 if (_mappings)
1437 _mappings->flushCollection();
1438 }
1439 }
1440 else {
1441 if (!super::init())
1442 return false;
1443 _initialized = true;
1444 }
1445
1446 // Grab the appropriate mapper
1447 if (kIOMemoryHostOnly & options) options |= kIOMemoryMapperNone;
1448 if (kIOMemoryMapperNone & options)
1449 mapper = 0; // No Mapper
1450 else if (mapper == kIOMapperSystem) {
1451 IOMapper::checkForSystemMapper();
1452 gIOSystemMapper = mapper = IOMapper::gSystem;
1453 }
1454
1455 // Temp binary compatibility for kIOMemoryThreadSafe
1456 if (kIOMemoryReserved6156215 & options)
1457 {
1458 options &= ~kIOMemoryReserved6156215;
1459 options |= kIOMemoryThreadSafe;
1460 }
1461 // Remove the dynamic internal use flags from the initial setting
1462 options &= ~(kIOMemoryPreparedReadOnly);
1463 _flags = options;
1464 _task = task;
1465
1466 #ifndef __LP64__
1467 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
1468 #endif /* !__LP64__ */
1469
1470 __iomd_reservedA = 0;
1471 __iomd_reservedB = 0;
1472 _highestPage = 0;
1473
1474 if (kIOMemoryThreadSafe & options)
1475 {
1476 if (!_prepareLock)
1477 _prepareLock = IOLockAlloc();
1478 }
1479 else if (_prepareLock)
1480 {
1481 IOLockFree(_prepareLock);
1482 _prepareLock = NULL;
1483 }
1484
1485 if (kIOMemoryTypeUPL == type) {
1486
1487 ioGMDData *dataP;
1488 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
1489
1490 if (!initMemoryEntries(dataSize, mapper)) return (false);
1491 dataP = getDataP(_memoryEntries);
1492 dataP->fPageCnt = 0;
1493
1494 // _wireCount++; // UPLs start out life wired
1495
1496 _length = count;
1497 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
1498
1499 ioPLBlock iopl;
1500 iopl.fIOPL = (upl_t) buffers;
1501 upl_set_referenced(iopl.fIOPL, true);
1502 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
1503
1504 if (upl_get_size(iopl.fIOPL) < (count + offset))
1505 panic("short external upl");
1506
1507 _highestPage = upl_get_highest_page(iopl.fIOPL);
1508
1509 // Set the flag kIOPLOnDevice convieniently equal to 1
1510 iopl.fFlags = pageList->device | kIOPLExternUPL;
1511 if (!pageList->device) {
1512 // Pre-compute the offset into the UPL's page list
1513 pageList = &pageList[atop_32(offset)];
1514 offset &= PAGE_MASK;
1515 }
1516 iopl.fIOMDOffset = 0;
1517 iopl.fMappedPage = 0;
1518 iopl.fPageInfo = (vm_address_t) pageList;
1519 iopl.fPageOffset = offset;
1520 _memoryEntries->appendBytes(&iopl, sizeof(iopl));
1521 }
1522 else {
1523 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
1524 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
1525
1526 // Initialize the memory descriptor
1527 if (options & kIOMemoryAsReference) {
1528 #ifndef __LP64__
1529 _rangesIsAllocated = false;
1530 #endif /* !__LP64__ */
1531
1532 // Hack assignment to get the buffer arg into _ranges.
1533 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
1534 // work, C++ sigh.
1535 // This also initialises the uio & physical ranges.
1536 _ranges.v = (IOVirtualRange *) buffers;
1537 }
1538 else {
1539 #ifndef __LP64__
1540 _rangesIsAllocated = true;
1541 #endif /* !__LP64__ */
1542 switch (type)
1543 {
1544 case kIOMemoryTypeUIO:
1545 _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
1546 break;
1547
1548 #ifndef __LP64__
1549 case kIOMemoryTypeVirtual64:
1550 case kIOMemoryTypePhysical64:
1551 if (count == 1
1552 && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
1553 ) {
1554 if (kIOMemoryTypeVirtual64 == type)
1555 type = kIOMemoryTypeVirtual;
1556 else
1557 type = kIOMemoryTypePhysical;
1558 _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
1559 _rangesIsAllocated = false;
1560 _ranges.v = &_singleRange.v;
1561 _singleRange.v.address = ((IOAddressRange *) buffers)->address;
1562 _singleRange.v.length = ((IOAddressRange *) buffers)->length;
1563 break;
1564 }
1565 _ranges.v64 = IONew(IOAddressRange, count);
1566 if (!_ranges.v64)
1567 return false;
1568 bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
1569 break;
1570 #endif /* !__LP64__ */
1571 case kIOMemoryTypeVirtual:
1572 case kIOMemoryTypePhysical:
1573 if (count == 1) {
1574 _flags |= kIOMemoryAsReference;
1575 #ifndef __LP64__
1576 _rangesIsAllocated = false;
1577 #endif /* !__LP64__ */
1578 _ranges.v = &_singleRange.v;
1579 } else {
1580 _ranges.v = IONew(IOVirtualRange, count);
1581 if (!_ranges.v)
1582 return false;
1583 }
1584 bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
1585 break;
1586 }
1587 }
1588
1589 // Find starting address within the vector of ranges
1590 Ranges vec = _ranges;
1591 UInt32 length = 0;
1592 UInt32 pages = 0;
1593 for (unsigned ind = 0; ind < count; ind++) {
1594 mach_vm_address_t addr;
1595 mach_vm_size_t len;
1596
1597 // addr & len are returned by this function
1598 getAddrLenForInd(addr, len, type, vec, ind);
1599 pages += (atop_64(addr + len + PAGE_MASK) - atop_64(addr));
1600 len += length;
1601 assert(len >= length); // Check for 32 bit wrap around
1602 length = len;
1603
1604 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
1605 {
1606 ppnum_t highPage = atop_64(addr + len - 1);
1607 if (highPage > _highestPage)
1608 _highestPage = highPage;
1609 }
1610 }
1611 _length = length;
1612 _pages = pages;
1613 _rangesCount = count;
1614
1615 // Auto-prepare memory at creation time.
1616 // Implied completion when descriptor is free-ed
1617 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
1618 _wireCount++; // Physical MDs are, by definition, wired
1619 else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
1620 ioGMDData *dataP;
1621 unsigned dataSize = computeDataSize(_pages, /* upls */ count * 2);
1622
1623 if (!initMemoryEntries(dataSize, mapper)) return false;
1624 dataP = getDataP(_memoryEntries);
1625 dataP->fPageCnt = _pages;
1626
1627 if ( (kIOMemoryPersistent & _flags) && !_memRef)
1628 {
1629 IOReturn
1630 err = memoryReferenceCreate(0, &_memRef);
1631 if (kIOReturnSuccess != err) return false;
1632 }
1633
1634 if ((_flags & kIOMemoryAutoPrepare)
1635 && prepare() != kIOReturnSuccess)
1636 return false;
1637 }
1638 }
1639
1640 return true;
1641 }
1642
1643 /*
1644 * free
1645 *
1646 * Free resources.
1647 */
1648 void IOGeneralMemoryDescriptor::free()
1649 {
1650 IOOptionBits type = _flags & kIOMemoryTypeMask;
1651
1652 if( reserved)
1653 {
1654 LOCK;
1655 reserved->dp.memory = 0;
1656 UNLOCK;
1657 }
1658 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
1659 {
1660 ioGMDData * dataP;
1661 if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
1662 {
1663 dataP->fMapper->iovmFree(atop_64(dataP->fMappedBase), _pages);
1664 dataP->fMappedBase = 0;
1665 }
1666 }
1667 else
1668 {
1669 while (_wireCount) complete();
1670 }
1671
1672 if (_memoryEntries) _memoryEntries->release();
1673
1674 if (_ranges.v && !(kIOMemoryAsReference & _flags))
1675 {
1676 if (kIOMemoryTypeUIO == type)
1677 uio_free((uio_t) _ranges.v);
1678 #ifndef __LP64__
1679 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
1680 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
1681 #endif /* !__LP64__ */
1682 else
1683 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
1684
1685 _ranges.v = NULL;
1686 }
1687
1688 if (reserved)
1689 {
1690 if (reserved->dp.devicePager)
1691 {
1692 // memEntry holds a ref on the device pager which owns reserved
1693 // (IOMemoryDescriptorReserved) so no reserved access after this point
1694 device_pager_deallocate( (memory_object_t) reserved->dp.devicePager );
1695 }
1696 else
1697 IODelete(reserved, IOMemoryDescriptorReserved, 1);
1698 reserved = NULL;
1699 }
1700
1701 if (_memRef) memoryReferenceRelease(_memRef);
1702 if (_prepareLock) IOLockFree(_prepareLock);
1703
1704 super::free();
1705 }
1706
1707 #ifndef __LP64__
1708 void IOGeneralMemoryDescriptor::unmapFromKernel()
1709 {
1710 panic("IOGMD::unmapFromKernel deprecated");
1711 }
1712
1713 void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
1714 {
1715 panic("IOGMD::mapIntoKernel deprecated");
1716 }
1717 #endif /* !__LP64__ */
1718
1719 /*
1720 * getDirection:
1721 *
1722 * Get the direction of the transfer.
1723 */
1724 IODirection IOMemoryDescriptor::getDirection() const
1725 {
1726 #ifndef __LP64__
1727 if (_direction)
1728 return _direction;
1729 #endif /* !__LP64__ */
1730 return (IODirection) (_flags & kIOMemoryDirectionMask);
1731 }
1732
1733 /*
1734 * getLength:
1735 *
1736 * Get the length of the transfer (over all ranges).
1737 */
1738 IOByteCount IOMemoryDescriptor::getLength() const
1739 {
1740 return _length;
1741 }
1742
1743 void IOMemoryDescriptor::setTag( IOOptionBits tag )
1744 {
1745 _tag = tag;
1746 }
1747
1748 IOOptionBits IOMemoryDescriptor::getTag( void )
1749 {
1750 return( _tag);
1751 }
1752
1753 #ifndef __LP64__
1754 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
1755 IOPhysicalAddress
1756 IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
1757 {
1758 addr64_t physAddr = 0;
1759
1760 if( prepare() == kIOReturnSuccess) {
1761 physAddr = getPhysicalSegment64( offset, length );
1762 complete();
1763 }
1764
1765 return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used
1766 }
1767 #endif /* !__LP64__ */
1768
1769 IOByteCount IOMemoryDescriptor::readBytes
1770 (IOByteCount offset, void *bytes, IOByteCount length)
1771 {
1772 addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
1773 IOByteCount remaining;
1774
1775 // Assert that this entire I/O is withing the available range
1776 assert(offset <= _length);
1777 assert(offset + length <= _length);
1778 if (offset >= _length) {
1779 return 0;
1780 }
1781
1782 if (kIOMemoryThreadSafe & _flags)
1783 LOCK;
1784
1785 remaining = length = min(length, _length - offset);
1786 while (remaining) { // (process another target segment?)
1787 addr64_t srcAddr64;
1788 IOByteCount srcLen;
1789
1790 srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
1791 if (!srcAddr64)
1792 break;
1793
1794 // Clip segment length to remaining
1795 if (srcLen > remaining)
1796 srcLen = remaining;
1797
1798 copypv(srcAddr64, dstAddr, srcLen,
1799 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
1800
1801 dstAddr += srcLen;
1802 offset += srcLen;
1803 remaining -= srcLen;
1804 }
1805
1806 if (kIOMemoryThreadSafe & _flags)
1807 UNLOCK;
1808
1809 assert(!remaining);
1810
1811 return length - remaining;
1812 }
1813
1814 IOByteCount IOMemoryDescriptor::writeBytes
1815 (IOByteCount inoffset, const void *bytes, IOByteCount length)
1816 {
1817 addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
1818 IOByteCount remaining;
1819 IOByteCount offset = inoffset;
1820
1821 // Assert that this entire I/O is withing the available range
1822 assert(offset <= _length);
1823 assert(offset + length <= _length);
1824
1825 assert( !(kIOMemoryPreparedReadOnly & _flags) );
1826
1827 if ( (kIOMemoryPreparedReadOnly & _flags) || offset >= _length) {
1828 return 0;
1829 }
1830
1831 if (kIOMemoryThreadSafe & _flags)
1832 LOCK;
1833
1834 remaining = length = min(length, _length - offset);
1835 while (remaining) { // (process another target segment?)
1836 addr64_t dstAddr64;
1837 IOByteCount dstLen;
1838
1839 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
1840 if (!dstAddr64)
1841 break;
1842
1843 // Clip segment length to remaining
1844 if (dstLen > remaining)
1845 dstLen = remaining;
1846
1847 if (!srcAddr) bzero_phys(dstAddr64, dstLen);
1848 else
1849 {
1850 copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
1851 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
1852 srcAddr += dstLen;
1853 }
1854 offset += dstLen;
1855 remaining -= dstLen;
1856 }
1857
1858 if (kIOMemoryThreadSafe & _flags)
1859 UNLOCK;
1860
1861 assert(!remaining);
1862
1863 if (!srcAddr) performOperation(kIOMemoryIncoherentIOFlush, inoffset, length);
1864
1865 return length - remaining;
1866 }
1867
1868 #ifndef __LP64__
1869 void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
1870 {
1871 panic("IOGMD::setPosition deprecated");
1872 }
1873 #endif /* !__LP64__ */
1874
1875 static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
1876
1877 uint64_t
1878 IOGeneralMemoryDescriptor::getPreparationID( void )
1879 {
1880 ioGMDData *dataP;
1881
1882 if (!_wireCount)
1883 return (kIOPreparationIDUnprepared);
1884
1885 if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
1886 || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64))
1887 {
1888 IOMemoryDescriptor::setPreparationID();
1889 return (IOMemoryDescriptor::getPreparationID());
1890 }
1891
1892 if (!_memoryEntries || !(dataP = getDataP(_memoryEntries)))
1893 return (kIOPreparationIDUnprepared);
1894
1895 if (kIOPreparationIDUnprepared == dataP->fPreparationID)
1896 {
1897 dataP->fPreparationID = OSIncrementAtomic64(&gIOMDPreparationID);
1898 }
1899 return (dataP->fPreparationID);
1900 }
1901
1902 IOMemoryDescriptorReserved * IOMemoryDescriptor::getKernelReserved( void )
1903 {
1904 if (!reserved)
1905 {
1906 reserved = IONew(IOMemoryDescriptorReserved, 1);
1907 if (reserved)
1908 bzero(reserved, sizeof(IOMemoryDescriptorReserved));
1909 }
1910 return (reserved);
1911 }
1912
1913 void IOMemoryDescriptor::setPreparationID( void )
1914 {
1915 if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID))
1916 {
1917 #if defined(__ppc__ )
1918 reserved->preparationID = gIOMDPreparationID++;
1919 #else
1920 reserved->preparationID = OSIncrementAtomic64(&gIOMDPreparationID);
1921 #endif
1922 }
1923 }
1924
1925 uint64_t IOMemoryDescriptor::getPreparationID( void )
1926 {
1927 if (reserved)
1928 return (reserved->preparationID);
1929 else
1930 return (kIOPreparationIDUnsupported);
1931 }
1932
1933 IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
1934 {
1935 IOReturn err = kIOReturnSuccess;
1936 DMACommandOps params;
1937 IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
1938 ioGMDData *dataP;
1939
1940 params = (op & ~kIOMDDMACommandOperationMask & op);
1941 op &= kIOMDDMACommandOperationMask;
1942
1943 if (kIOMDDMAMap == op)
1944 {
1945 if (dataSize < sizeof(IOMDDMAMapArgs))
1946 return kIOReturnUnderrun;
1947
1948 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
1949
1950 if (!_memoryEntries
1951 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
1952
1953 if (_memoryEntries && data->fMapper)
1954 {
1955 bool remap;
1956 bool whole = ((data->fOffset == 0) && (data->fLength == _length));
1957 dataP = getDataP(_memoryEntries);
1958
1959 if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
1960 if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) dataP->fDMAMapAlignment = data->fMapSpec.alignment;
1961
1962 remap = (dataP->fDMAMapNumAddressBits < 64)
1963 && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
1964 remap |= (dataP->fDMAMapAlignment > page_size);
1965 remap |= (!whole);
1966 if (remap || !dataP->fMappedBase)
1967 {
1968 // if (dataP->fMappedBase) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params);
1969 err = md->dmaMap(data->fMapper, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocCount);
1970 if ((kIOReturnSuccess == err) && whole && !dataP->fMappedBase)
1971 {
1972 dataP->fMappedBase = data->fAlloc;
1973 data->fAllocCount = 0; // IOMD owns the alloc now
1974 }
1975 }
1976 else
1977 {
1978 data->fAlloc = dataP->fMappedBase;
1979 data->fAllocCount = 0; // IOMD owns the alloc
1980 }
1981 data->fMapContig = !dataP->fDiscontig;
1982 }
1983
1984 return (err);
1985 }
1986
1987 if (kIOMDAddDMAMapSpec == op)
1988 {
1989 if (dataSize < sizeof(IODMAMapSpecification))
1990 return kIOReturnUnderrun;
1991
1992 IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
1993
1994 if (!_memoryEntries
1995 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
1996
1997 if (_memoryEntries)
1998 {
1999 dataP = getDataP(_memoryEntries);
2000 if (data->numAddressBits < dataP->fDMAMapNumAddressBits)
2001 dataP->fDMAMapNumAddressBits = data->numAddressBits;
2002 if (data->alignment > dataP->fDMAMapAlignment)
2003 dataP->fDMAMapAlignment = data->alignment;
2004 }
2005 return kIOReturnSuccess;
2006 }
2007
2008 if (kIOMDGetCharacteristics == op) {
2009
2010 if (dataSize < sizeof(IOMDDMACharacteristics))
2011 return kIOReturnUnderrun;
2012
2013 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
2014 data->fLength = _length;
2015 data->fSGCount = _rangesCount;
2016 data->fPages = _pages;
2017 data->fDirection = getDirection();
2018 if (!_wireCount)
2019 data->fIsPrepared = false;
2020 else {
2021 data->fIsPrepared = true;
2022 data->fHighestPage = _highestPage;
2023 if (_memoryEntries)
2024 {
2025 dataP = getDataP(_memoryEntries);
2026 ioPLBlock *ioplList = getIOPLList(dataP);
2027 UInt count = getNumIOPL(_memoryEntries, dataP);
2028 if (count == 1)
2029 data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
2030 }
2031 }
2032
2033 return kIOReturnSuccess;
2034
2035 #if IOMD_DEBUG_DMAACTIVE
2036 } else if (kIOMDDMAActive == op) {
2037 if (params) OSIncrementAtomic(&md->__iomd_reservedA);
2038 else {
2039 if (md->__iomd_reservedA)
2040 OSDecrementAtomic(&md->__iomd_reservedA);
2041 else
2042 panic("kIOMDSetDMAInactive");
2043 }
2044 #endif /* IOMD_DEBUG_DMAACTIVE */
2045
2046 } else if (kIOMDWalkSegments != op)
2047 return kIOReturnBadArgument;
2048
2049 // Get the next segment
2050 struct InternalState {
2051 IOMDDMAWalkSegmentArgs fIO;
2052 UInt fOffset2Index;
2053 UInt fIndex;
2054 UInt fNextOffset;
2055 } *isP;
2056
2057 // Find the next segment
2058 if (dataSize < sizeof(*isP))
2059 return kIOReturnUnderrun;
2060
2061 isP = (InternalState *) vData;
2062 UInt offset = isP->fIO.fOffset;
2063 bool mapped = isP->fIO.fMapped;
2064
2065 if (IOMapper::gSystem && mapped
2066 && (!(kIOMemoryHostOnly & _flags))
2067 && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBase))
2068 // && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBase))
2069 {
2070 if (!_memoryEntries
2071 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
2072
2073 dataP = getDataP(_memoryEntries);
2074 if (dataP->fMapper)
2075 {
2076 IODMAMapSpecification mapSpec;
2077 bzero(&mapSpec, sizeof(mapSpec));
2078 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
2079 mapSpec.alignment = dataP->fDMAMapAlignment;
2080 err = md->dmaMap(dataP->fMapper, &mapSpec, 0, _length, &dataP->fMappedBase, NULL);
2081 if (kIOReturnSuccess != err) return (err);
2082 }
2083 }
2084
2085 if (offset >= _length)
2086 return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
2087
2088 // Validate the previous offset
2089 UInt ind, off2Ind = isP->fOffset2Index;
2090 if (!params
2091 && offset
2092 && (offset == isP->fNextOffset || off2Ind <= offset))
2093 ind = isP->fIndex;
2094 else
2095 ind = off2Ind = 0; // Start from beginning
2096
2097 UInt length;
2098 UInt64 address;
2099
2100
2101 if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
2102
2103 // Physical address based memory descriptor
2104 const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
2105
2106 // Find the range after the one that contains the offset
2107 mach_vm_size_t len;
2108 for (len = 0; off2Ind <= offset; ind++) {
2109 len = physP[ind].length;
2110 off2Ind += len;
2111 }
2112
2113 // Calculate length within range and starting address
2114 length = off2Ind - offset;
2115 address = physP[ind - 1].address + len - length;
2116
2117 if (true && mapped && _memoryEntries
2118 && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
2119 {
2120 address = dataP->fMappedBase + offset;
2121 }
2122 else
2123 {
2124 // see how far we can coalesce ranges
2125 while (ind < _rangesCount && address + length == physP[ind].address) {
2126 len = physP[ind].length;
2127 length += len;
2128 off2Ind += len;
2129 ind++;
2130 }
2131 }
2132
2133 // correct contiguous check overshoot
2134 ind--;
2135 off2Ind -= len;
2136 }
2137 #ifndef __LP64__
2138 else if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
2139
2140 // Physical address based memory descriptor
2141 const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
2142
2143 // Find the range after the one that contains the offset
2144 mach_vm_size_t len;
2145 for (len = 0; off2Ind <= offset; ind++) {
2146 len = physP[ind].length;
2147 off2Ind += len;
2148 }
2149
2150 // Calculate length within range and starting address
2151 length = off2Ind - offset;
2152 address = physP[ind - 1].address + len - length;
2153
2154 if (true && mapped && _memoryEntries
2155 && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
2156 {
2157 address = dataP->fMappedBase + offset;
2158 }
2159 else
2160 {
2161 // see how far we can coalesce ranges
2162 while (ind < _rangesCount && address + length == physP[ind].address) {
2163 len = physP[ind].length;
2164 length += len;
2165 off2Ind += len;
2166 ind++;
2167 }
2168 }
2169 // correct contiguous check overshoot
2170 ind--;
2171 off2Ind -= len;
2172 }
2173 #endif /* !__LP64__ */
2174 else do {
2175 if (!_wireCount)
2176 panic("IOGMD: not wired for the IODMACommand");
2177
2178 assert(_memoryEntries);
2179
2180 dataP = getDataP(_memoryEntries);
2181 const ioPLBlock *ioplList = getIOPLList(dataP);
2182 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
2183 upl_page_info_t *pageList = getPageList(dataP);
2184
2185 assert(numIOPLs > 0);
2186
2187 // Scan through iopl info blocks looking for block containing offset
2188 while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset)
2189 ind++;
2190
2191 // Go back to actual range as search goes past it
2192 ioPLBlock ioplInfo = ioplList[ind - 1];
2193 off2Ind = ioplInfo.fIOMDOffset;
2194
2195 if (ind < numIOPLs)
2196 length = ioplList[ind].fIOMDOffset;
2197 else
2198 length = _length;
2199 length -= offset; // Remainder within iopl
2200
2201 // Subtract offset till this iopl in total list
2202 offset -= off2Ind;
2203
2204 // If a mapped address is requested and this is a pre-mapped IOPL
2205 // then just need to compute an offset relative to the mapped base.
2206 if (mapped && dataP->fMappedBase) {
2207 offset += (ioplInfo.fPageOffset & PAGE_MASK);
2208 address = trunc_page_64(dataP->fMappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
2209 continue; // Done leave do/while(false) now
2210 }
2211
2212 // The offset is rebased into the current iopl.
2213 // Now add the iopl 1st page offset.
2214 offset += ioplInfo.fPageOffset;
2215
2216 // For external UPLs the fPageInfo field points directly to
2217 // the upl's upl_page_info_t array.
2218 if (ioplInfo.fFlags & kIOPLExternUPL)
2219 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
2220 else
2221 pageList = &pageList[ioplInfo.fPageInfo];
2222
2223 // Check for direct device non-paged memory
2224 if ( ioplInfo.fFlags & kIOPLOnDevice ) {
2225 address = ptoa_64(pageList->phys_addr) + offset;
2226 continue; // Done leave do/while(false) now
2227 }
2228
2229 // Now we need compute the index into the pageList
2230 UInt pageInd = atop_32(offset);
2231 offset &= PAGE_MASK;
2232
2233 // Compute the starting address of this segment
2234 IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
2235 if (!pageAddr) {
2236 panic("!pageList phys_addr");
2237 }
2238
2239 address = ptoa_64(pageAddr) + offset;
2240
2241 // length is currently set to the length of the remainider of the iopl.
2242 // We need to check that the remainder of the iopl is contiguous.
2243 // This is indicated by pageList[ind].phys_addr being sequential.
2244 IOByteCount contigLength = PAGE_SIZE - offset;
2245 while (contigLength < length
2246 && ++pageAddr == pageList[++pageInd].phys_addr)
2247 {
2248 contigLength += PAGE_SIZE;
2249 }
2250
2251 if (contigLength < length)
2252 length = contigLength;
2253
2254
2255 assert(address);
2256 assert(length);
2257
2258 } while (false);
2259
2260 // Update return values and state
2261 isP->fIO.fIOVMAddr = address;
2262 isP->fIO.fLength = length;
2263 isP->fIndex = ind;
2264 isP->fOffset2Index = off2Ind;
2265 isP->fNextOffset = isP->fIO.fOffset + length;
2266
2267 return kIOReturnSuccess;
2268 }
2269
2270 addr64_t
2271 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
2272 {
2273 IOReturn ret;
2274 mach_vm_address_t address = 0;
2275 mach_vm_size_t length = 0;
2276 IOMapper * mapper = gIOSystemMapper;
2277 IOOptionBits type = _flags & kIOMemoryTypeMask;
2278
2279 if (lengthOfSegment)
2280 *lengthOfSegment = 0;
2281
2282 if (offset >= _length)
2283 return 0;
2284
2285 // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
2286 // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
2287 // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
2288 // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
2289
2290 if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type))
2291 {
2292 unsigned rangesIndex = 0;
2293 Ranges vec = _ranges;
2294 mach_vm_address_t addr;
2295
2296 // Find starting address within the vector of ranges
2297 for (;;) {
2298 getAddrLenForInd(addr, length, type, vec, rangesIndex);
2299 if (offset < length)
2300 break;
2301 offset -= length; // (make offset relative)
2302 rangesIndex++;
2303 }
2304
2305 // Now that we have the starting range,
2306 // lets find the last contiguous range
2307 addr += offset;
2308 length -= offset;
2309
2310 for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) {
2311 mach_vm_address_t newAddr;
2312 mach_vm_size_t newLen;
2313
2314 getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
2315 if (addr + length != newAddr)
2316 break;
2317 length += newLen;
2318 }
2319 if (addr)
2320 address = (IOPhysicalAddress) addr; // Truncate address to 32bit
2321 }
2322 else
2323 {
2324 IOMDDMAWalkSegmentState _state;
2325 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
2326
2327 state->fOffset = offset;
2328 state->fLength = _length - offset;
2329 state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOnly);
2330
2331 ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
2332
2333 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret))
2334 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
2335 ret, this, state->fOffset,
2336 state->fIOVMAddr, state->fLength);
2337 if (kIOReturnSuccess == ret)
2338 {
2339 address = state->fIOVMAddr;
2340 length = state->fLength;
2341 }
2342
2343 // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
2344 // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
2345
2346 if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)))
2347 {
2348 if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone))
2349 {
2350 addr64_t origAddr = address;
2351 IOByteCount origLen = length;
2352
2353 address = mapper->mapAddr(origAddr);
2354 length = page_size - (address & (page_size - 1));
2355 while ((length < origLen)
2356 && ((address + length) == mapper->mapAddr(origAddr + length)))
2357 length += page_size;
2358 if (length > origLen)
2359 length = origLen;
2360 }
2361 }
2362 }
2363
2364 if (!address)
2365 length = 0;
2366
2367 if (lengthOfSegment)
2368 *lengthOfSegment = length;
2369
2370 return (address);
2371 }
2372
2373 #ifndef __LP64__
2374 addr64_t
2375 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
2376 {
2377 addr64_t address = 0;
2378
2379 if (options & _kIOMemorySourceSegment)
2380 {
2381 address = getSourceSegment(offset, lengthOfSegment);
2382 }
2383 else if (options & kIOMemoryMapperNone)
2384 {
2385 address = getPhysicalSegment64(offset, lengthOfSegment);
2386 }
2387 else
2388 {
2389 address = getPhysicalSegment(offset, lengthOfSegment);
2390 }
2391
2392 return (address);
2393 }
2394
2395 addr64_t
2396 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
2397 {
2398 return (getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone));
2399 }
2400
2401 IOPhysicalAddress
2402 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
2403 {
2404 addr64_t address = 0;
2405 IOByteCount length = 0;
2406
2407 address = getPhysicalSegment(offset, lengthOfSegment, 0);
2408
2409 if (lengthOfSegment)
2410 length = *lengthOfSegment;
2411
2412 if ((address + length) > 0x100000000ULL)
2413 {
2414 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
2415 address, (long) length, (getMetaClass())->getClassName());
2416 }
2417
2418 return ((IOPhysicalAddress) address);
2419 }
2420
2421 addr64_t
2422 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
2423 {
2424 IOPhysicalAddress phys32;
2425 IOByteCount length;
2426 addr64_t phys64;
2427 IOMapper * mapper = 0;
2428
2429 phys32 = getPhysicalSegment(offset, lengthOfSegment);
2430 if (!phys32)
2431 return 0;
2432
2433 if (gIOSystemMapper)
2434 mapper = gIOSystemMapper;
2435
2436 if (mapper)
2437 {
2438 IOByteCount origLen;
2439
2440 phys64 = mapper->mapAddr(phys32);
2441 origLen = *lengthOfSegment;
2442 length = page_size - (phys64 & (page_size - 1));
2443 while ((length < origLen)
2444 && ((phys64 + length) == mapper->mapAddr(phys32 + length)))
2445 length += page_size;
2446 if (length > origLen)
2447 length = origLen;
2448
2449 *lengthOfSegment = length;
2450 }
2451 else
2452 phys64 = (addr64_t) phys32;
2453
2454 return phys64;
2455 }
2456
2457 IOPhysicalAddress
2458 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
2459 {
2460 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0));
2461 }
2462
2463 IOPhysicalAddress
2464 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
2465 {
2466 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment));
2467 }
2468
2469 void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
2470 IOByteCount * lengthOfSegment)
2471 {
2472 if (_task == kernel_task)
2473 return (void *) getSourceSegment(offset, lengthOfSegment);
2474 else
2475 panic("IOGMD::getVirtualSegment deprecated");
2476
2477 return 0;
2478 }
2479 #endif /* !__LP64__ */
2480
2481 IOReturn
2482 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
2483 {
2484 IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
2485 DMACommandOps params;
2486 IOReturn err;
2487
2488 params = (op & ~kIOMDDMACommandOperationMask & op);
2489 op &= kIOMDDMACommandOperationMask;
2490
2491 if (kIOMDGetCharacteristics == op) {
2492 if (dataSize < sizeof(IOMDDMACharacteristics))
2493 return kIOReturnUnderrun;
2494
2495 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
2496 data->fLength = getLength();
2497 data->fSGCount = 0;
2498 data->fDirection = getDirection();
2499 data->fIsPrepared = true; // Assume prepared - fails safe
2500 }
2501 else if (kIOMDWalkSegments == op) {
2502 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs))
2503 return kIOReturnUnderrun;
2504
2505 IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
2506 IOByteCount offset = (IOByteCount) data->fOffset;
2507
2508 IOPhysicalLength length;
2509 if (data->fMapped && IOMapper::gSystem)
2510 data->fIOVMAddr = md->getPhysicalSegment(offset, &length);
2511 else
2512 data->fIOVMAddr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
2513 data->fLength = length;
2514 }
2515 else if (kIOMDAddDMAMapSpec == op) return kIOReturnUnsupported;
2516 else if (kIOMDDMAMap == op)
2517 {
2518 if (dataSize < sizeof(IOMDDMAMapArgs))
2519 return kIOReturnUnderrun;
2520 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
2521
2522 if (params) panic("class %s does not support IODMACommand::kIterateOnly", getMetaClass()->getClassName());
2523
2524 data->fMapContig = true;
2525 err = md->dmaMap(data->fMapper, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocCount);
2526 return (err);
2527 }
2528 else return kIOReturnBadArgument;
2529
2530 return kIOReturnSuccess;
2531 }
2532
2533 IOReturn
2534 IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
2535 IOOptionBits * oldState )
2536 {
2537 IOReturn err = kIOReturnSuccess;
2538
2539 vm_purgable_t control;
2540 int state;
2541
2542 if (_memRef)
2543 {
2544 err = super::setPurgeable(newState, oldState);
2545 }
2546 else
2547 {
2548 if (kIOMemoryThreadSafe & _flags)
2549 LOCK;
2550 do
2551 {
2552 // Find the appropriate vm_map for the given task
2553 vm_map_t curMap;
2554 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
2555 {
2556 err = kIOReturnNotReady;
2557 break;
2558 }
2559 else if (!_task)
2560 {
2561 err = kIOReturnUnsupported;
2562 break;
2563 }
2564 else
2565 curMap = get_task_map(_task);
2566
2567 // can only do one range
2568 Ranges vec = _ranges;
2569 IOOptionBits type = _flags & kIOMemoryTypeMask;
2570 mach_vm_address_t addr;
2571 mach_vm_size_t len;
2572 getAddrLenForInd(addr, len, type, vec, 0);
2573
2574 err = purgeableControlBits(newState, &control, &state);
2575 if (kIOReturnSuccess != err)
2576 break;
2577 err = mach_vm_purgable_control(curMap, addr, control, &state);
2578 if (oldState)
2579 {
2580 if (kIOReturnSuccess == err)
2581 {
2582 err = purgeableStateBits(&state);
2583 *oldState = state;
2584 }
2585 }
2586 }
2587 while (false);
2588 if (kIOMemoryThreadSafe & _flags)
2589 UNLOCK;
2590 }
2591
2592 return (err);
2593 }
2594
2595 IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
2596 IOOptionBits * oldState )
2597 {
2598 IOReturn err = kIOReturnNotReady;
2599
2600 if (kIOMemoryThreadSafe & _flags) LOCK;
2601 if (_memRef) err = IOGeneralMemoryDescriptor::memoryReferenceSetPurgeable(_memRef, newState, oldState);
2602 if (kIOMemoryThreadSafe & _flags) UNLOCK;
2603
2604 return (err);
2605 }
2606
2607 IOReturn IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
2608 IOByteCount * dirtyPageCount )
2609 {
2610 IOReturn err = kIOReturnNotReady;
2611
2612 if (kIOMemoryThreadSafe & _flags) LOCK;
2613 if (_memRef) err = IOGeneralMemoryDescriptor::memoryReferenceGetPageCounts(_memRef, residentPageCount, dirtyPageCount);
2614 if (kIOMemoryThreadSafe & _flags) UNLOCK;
2615
2616 return (err);
2617 }
2618
2619
2620 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
2621 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
2622
2623 static void SetEncryptOp(addr64_t pa, unsigned int count)
2624 {
2625 ppnum_t page, end;
2626
2627 page = atop_64(round_page_64(pa));
2628 end = atop_64(trunc_page_64(pa + count));
2629 for (; page < end; page++)
2630 {
2631 pmap_clear_noencrypt(page);
2632 }
2633 }
2634
2635 static void ClearEncryptOp(addr64_t pa, unsigned int count)
2636 {
2637 ppnum_t page, end;
2638
2639 page = atop_64(round_page_64(pa));
2640 end = atop_64(trunc_page_64(pa + count));
2641 for (; page < end; page++)
2642 {
2643 pmap_set_noencrypt(page);
2644 }
2645 }
2646
2647 IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options,
2648 IOByteCount offset, IOByteCount length )
2649 {
2650 IOByteCount remaining;
2651 unsigned int res;
2652 void (*func)(addr64_t pa, unsigned int count) = 0;
2653
2654 switch (options)
2655 {
2656 case kIOMemoryIncoherentIOFlush:
2657 func = &dcache_incoherent_io_flush64;
2658 break;
2659 case kIOMemoryIncoherentIOStore:
2660 func = &dcache_incoherent_io_store64;
2661 break;
2662
2663 case kIOMemorySetEncrypted:
2664 func = &SetEncryptOp;
2665 break;
2666 case kIOMemoryClearEncrypted:
2667 func = &ClearEncryptOp;
2668 break;
2669 }
2670
2671 if (!func)
2672 return (kIOReturnUnsupported);
2673
2674 if (kIOMemoryThreadSafe & _flags)
2675 LOCK;
2676
2677 res = 0x0UL;
2678 remaining = length = min(length, getLength() - offset);
2679 while (remaining)
2680 // (process another target segment?)
2681 {
2682 addr64_t dstAddr64;
2683 IOByteCount dstLen;
2684
2685 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
2686 if (!dstAddr64)
2687 break;
2688
2689 // Clip segment length to remaining
2690 if (dstLen > remaining)
2691 dstLen = remaining;
2692
2693 (*func)(dstAddr64, dstLen);
2694
2695 offset += dstLen;
2696 remaining -= dstLen;
2697 }
2698
2699 if (kIOMemoryThreadSafe & _flags)
2700 UNLOCK;
2701
2702 return (remaining ? kIOReturnUnderrun : kIOReturnSuccess);
2703 }
2704
2705 #if defined(__i386__) || defined(__x86_64__)
2706 extern vm_offset_t first_avail;
2707 #define io_kernel_static_end first_avail
2708 #else
2709 #error io_kernel_static_end is undefined for this architecture
2710 #endif
2711
2712 static kern_return_t
2713 io_get_kernel_static_upl(
2714 vm_map_t /* map */,
2715 uintptr_t offset,
2716 vm_size_t *upl_size,
2717 upl_t *upl,
2718 upl_page_info_array_t page_list,
2719 unsigned int *count,
2720 ppnum_t *highest_page)
2721 {
2722 unsigned int pageCount, page;
2723 ppnum_t phys;
2724 ppnum_t highestPage = 0;
2725
2726 pageCount = atop_32(*upl_size);
2727 if (pageCount > *count)
2728 pageCount = *count;
2729
2730 *upl = NULL;
2731
2732 for (page = 0; page < pageCount; page++)
2733 {
2734 phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
2735 if (!phys)
2736 break;
2737 page_list[page].phys_addr = phys;
2738 page_list[page].pageout = 0;
2739 page_list[page].absent = 0;
2740 page_list[page].dirty = 0;
2741 page_list[page].precious = 0;
2742 page_list[page].device = 0;
2743 if (phys > highestPage)
2744 highestPage = phys;
2745 }
2746
2747 *highest_page = highestPage;
2748
2749 return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError);
2750 }
2751
2752 IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
2753 {
2754 IOOptionBits type = _flags & kIOMemoryTypeMask;
2755 IOReturn error = kIOReturnCannotWire;
2756 ioGMDData *dataP;
2757 upl_page_info_array_t pageInfo;
2758 ppnum_t mapBase;
2759
2760 assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
2761
2762 if ((kIODirectionOutIn & forDirection) == kIODirectionNone)
2763 forDirection = (IODirection) (forDirection | getDirection());
2764
2765 int uplFlags; // This Mem Desc's default flags for upl creation
2766 switch (kIODirectionOutIn & forDirection)
2767 {
2768 case kIODirectionOut:
2769 // Pages do not need to be marked as dirty on commit
2770 uplFlags = UPL_COPYOUT_FROM;
2771 break;
2772
2773 case kIODirectionIn:
2774 default:
2775 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
2776 break;
2777 }
2778
2779 if (_wireCount)
2780 {
2781 if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags))
2782 {
2783 OSReportWithBacktrace("IOMemoryDescriptor 0x%lx prepared read only", VM_KERNEL_ADDRPERM(this));
2784 error = kIOReturnNotWritable;
2785 }
2786 else error = kIOReturnSuccess;
2787 return (error);
2788 }
2789
2790 dataP = getDataP(_memoryEntries);
2791 IOMapper *mapper;
2792 mapper = dataP->fMapper;
2793 dataP->fMappedBase = 0;
2794
2795 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
2796 if (kIODirectionPrepareToPhys32 & forDirection)
2797 {
2798 if (!mapper) uplFlags |= UPL_NEED_32BIT_ADDR;
2799 if (dataP->fDMAMapNumAddressBits > 32) dataP->fDMAMapNumAddressBits = 32;
2800 }
2801 if (kIODirectionPrepareNoFault & forDirection) uplFlags |= UPL_REQUEST_NO_FAULT;
2802 if (kIODirectionPrepareNoZeroFill & forDirection) uplFlags |= UPL_NOZEROFILLIO;
2803 if (kIODirectionPrepareNonCoherent & forDirection) uplFlags |= UPL_REQUEST_FORCE_COHERENCY;
2804
2805 mapBase = 0;
2806
2807 // Note that appendBytes(NULL) zeros the data up to the desired length
2808 // and the length parameter is an unsigned int
2809 size_t uplPageSize = dataP->fPageCnt * sizeof(upl_page_info_t);
2810 if (uplPageSize > ((unsigned int)uplPageSize)) return (kIOReturnNoMemory);
2811 if (!_memoryEntries->appendBytes(0, uplPageSize)) return (kIOReturnNoMemory);
2812 dataP = 0;
2813
2814 // Find the appropriate vm_map for the given task
2815 vm_map_t curMap;
2816 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) curMap = 0;
2817 else curMap = get_task_map(_task);
2818
2819 // Iterate over the vector of virtual ranges
2820 Ranges vec = _ranges;
2821 unsigned int pageIndex = 0;
2822 IOByteCount mdOffset = 0;
2823 ppnum_t highestPage = 0;
2824
2825 IOMemoryEntry * memRefEntry = 0;
2826 if (_memRef) memRefEntry = &_memRef->entries[0];
2827
2828 for (UInt range = 0; range < _rangesCount; range++) {
2829 ioPLBlock iopl;
2830 mach_vm_address_t startPage;
2831 mach_vm_size_t numBytes;
2832 ppnum_t highPage = 0;
2833
2834 // Get the startPage address and length of vec[range]
2835 getAddrLenForInd(startPage, numBytes, type, vec, range);
2836 iopl.fPageOffset = startPage & PAGE_MASK;
2837 numBytes += iopl.fPageOffset;
2838 startPage = trunc_page_64(startPage);
2839
2840 if (mapper)
2841 iopl.fMappedPage = mapBase + pageIndex;
2842 else
2843 iopl.fMappedPage = 0;
2844
2845 // Iterate over the current range, creating UPLs
2846 while (numBytes) {
2847 vm_address_t kernelStart = (vm_address_t) startPage;
2848 vm_map_t theMap;
2849 if (curMap) theMap = curMap;
2850 else if (_memRef)
2851 {
2852 theMap = NULL;
2853 }
2854 else
2855 {
2856 assert(_task == kernel_task);
2857 theMap = IOPageableMapForAddress(kernelStart);
2858 }
2859
2860 int ioplFlags = uplFlags;
2861 dataP = getDataP(_memoryEntries);
2862 pageInfo = getPageList(dataP);
2863 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
2864
2865 vm_size_t ioplSize = round_page(numBytes);
2866 unsigned int numPageInfo = atop_32(ioplSize);
2867
2868 if ((theMap == kernel_map) && (kernelStart < io_kernel_static_end)) {
2869 error = io_get_kernel_static_upl(theMap,
2870 kernelStart,
2871 &ioplSize,
2872 &iopl.fIOPL,
2873 baseInfo,
2874 &numPageInfo,
2875 &highPage);
2876 }
2877 else if (_memRef) {
2878 memory_object_offset_t entryOffset;
2879
2880 entryOffset = (mdOffset - iopl.fPageOffset - memRefEntry->offset);
2881 if (entryOffset >= memRefEntry->size) {
2882 memRefEntry++;
2883 if (memRefEntry >= &_memRef->entries[_memRef->count]) panic("memRefEntry");
2884 entryOffset = 0;
2885 }
2886 if (ioplSize > (memRefEntry->size - entryOffset)) ioplSize = (memRefEntry->size - entryOffset);
2887 error = memory_object_iopl_request(memRefEntry->entry,
2888 entryOffset,
2889 &ioplSize,
2890 &iopl.fIOPL,
2891 baseInfo,
2892 &numPageInfo,
2893 &ioplFlags);
2894 }
2895 else {
2896 assert(theMap);
2897 error = vm_map_create_upl(theMap,
2898 startPage,
2899 (upl_size_t*)&ioplSize,
2900 &iopl.fIOPL,
2901 baseInfo,
2902 &numPageInfo,
2903 &ioplFlags);
2904 }
2905
2906 assert(ioplSize);
2907 if (error != KERN_SUCCESS)
2908 goto abortExit;
2909
2910 if (iopl.fIOPL)
2911 highPage = upl_get_highest_page(iopl.fIOPL);
2912 if (highPage > highestPage)
2913 highestPage = highPage;
2914
2915 error = kIOReturnCannotWire;
2916
2917 if (baseInfo->device) {
2918 numPageInfo = 1;
2919 iopl.fFlags = kIOPLOnDevice;
2920 }
2921 else {
2922 iopl.fFlags = 0;
2923 }
2924
2925 iopl.fIOMDOffset = mdOffset;
2926 iopl.fPageInfo = pageIndex;
2927 if (mapper && pageIndex && (page_mask & (mdOffset + iopl.fPageOffset))) dataP->fDiscontig = true;
2928
2929 #if 0
2930 // used to remove the upl for auto prepares here, for some errant code
2931 // that freed memory before the descriptor pointing at it
2932 if ((_flags & kIOMemoryAutoPrepare) && iopl.fIOPL)
2933 {
2934 upl_commit(iopl.fIOPL, 0, 0);
2935 upl_deallocate(iopl.fIOPL);
2936 iopl.fIOPL = 0;
2937 }
2938 #endif
2939
2940 if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
2941 // Clean up partial created and unsaved iopl
2942 if (iopl.fIOPL) {
2943 upl_abort(iopl.fIOPL, 0);
2944 upl_deallocate(iopl.fIOPL);
2945 }
2946 goto abortExit;
2947 }
2948 dataP = 0;
2949
2950 // Check for a multiple iopl's in one virtual range
2951 pageIndex += numPageInfo;
2952 mdOffset -= iopl.fPageOffset;
2953 if (ioplSize < numBytes) {
2954 numBytes -= ioplSize;
2955 startPage += ioplSize;
2956 mdOffset += ioplSize;
2957 iopl.fPageOffset = 0;
2958 if (mapper) iopl.fMappedPage = mapBase + pageIndex;
2959 }
2960 else {
2961 mdOffset += numBytes;
2962 break;
2963 }
2964 }
2965 }
2966
2967 _highestPage = highestPage;
2968
2969 if (UPL_COPYOUT_FROM & uplFlags) _flags |= kIOMemoryPreparedReadOnly;
2970
2971 return kIOReturnSuccess;
2972
2973 abortExit:
2974 {
2975 dataP = getDataP(_memoryEntries);
2976 UInt done = getNumIOPL(_memoryEntries, dataP);
2977 ioPLBlock *ioplList = getIOPLList(dataP);
2978
2979 for (UInt range = 0; range < done; range++)
2980 {
2981 if (ioplList[range].fIOPL) {
2982 upl_abort(ioplList[range].fIOPL, 0);
2983 upl_deallocate(ioplList[range].fIOPL);
2984 }
2985 }
2986 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
2987 }
2988
2989 if (error == KERN_FAILURE)
2990 error = kIOReturnCannotWire;
2991 else if (error == KERN_MEMORY_ERROR)
2992 error = kIOReturnNoResources;
2993
2994 return error;
2995 }
2996
2997 bool IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
2998 {
2999 ioGMDData * dataP;
3000 unsigned dataSize = size;
3001
3002 if (!_memoryEntries) {
3003 _memoryEntries = OSData::withCapacity(dataSize);
3004 if (!_memoryEntries)
3005 return false;
3006 }
3007 else if (!_memoryEntries->initWithCapacity(dataSize))
3008 return false;
3009
3010 _memoryEntries->appendBytes(0, computeDataSize(0, 0));
3011 dataP = getDataP(_memoryEntries);
3012
3013 if (mapper == kIOMapperWaitSystem) {
3014 IOMapper::checkForSystemMapper();
3015 mapper = IOMapper::gSystem;
3016 }
3017 dataP->fMapper = mapper;
3018 dataP->fPageCnt = 0;
3019 dataP->fMappedBase = 0;
3020 dataP->fDMAMapNumAddressBits = 64;
3021 dataP->fDMAMapAlignment = 0;
3022 dataP->fPreparationID = kIOPreparationIDUnprepared;
3023 dataP->fDiscontig = false;
3024 dataP->fCompletionError = false;
3025
3026 return (true);
3027 }
3028
3029 IOReturn IOMemoryDescriptor::dmaMap(
3030 IOMapper * mapper,
3031 const IODMAMapSpecification * mapSpec,
3032 uint64_t offset,
3033 uint64_t length,
3034 uint64_t * address,
3035 ppnum_t * mapPages)
3036 {
3037 IOMDDMAWalkSegmentState walkState;
3038 IOMDDMAWalkSegmentArgs * walkArgs = (IOMDDMAWalkSegmentArgs *) (void *)&walkState;
3039 IOOptionBits mdOp;
3040 IOReturn ret;
3041 IOPhysicalLength segLen;
3042 addr64_t phys, align, pageOffset;
3043 ppnum_t base, pageIndex, pageCount;
3044 uint64_t index;
3045 uint32_t mapOptions = 0;
3046
3047 if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess;
3048
3049 walkArgs->fMapped = false;
3050 mdOp = kIOMDFirstSegment;
3051 pageCount = 0;
3052 for (index = 0; index < length; )
3053 {
3054 if (index && (page_mask & (index + pageOffset))) break;
3055
3056 walkArgs->fOffset = offset + index;
3057 ret = dmaCommandOperation(mdOp, &walkState, sizeof(walkState));
3058 mdOp = kIOMDWalkSegments;
3059 if (ret != kIOReturnSuccess) break;
3060 phys = walkArgs->fIOVMAddr;
3061 segLen = walkArgs->fLength;
3062
3063 align = (phys & page_mask);
3064 if (!index) pageOffset = align;
3065 else if (align) break;
3066 pageCount += atop_64(round_page_64(align + segLen));
3067 index += segLen;
3068 }
3069
3070 if (index < length) return (kIOReturnVMError);
3071
3072 base = mapper->iovmMapMemory(this, offset, pageCount,
3073 mapOptions, NULL, mapSpec);
3074
3075 if (!base) return (kIOReturnNoResources);
3076
3077 mdOp = kIOMDFirstSegment;
3078 for (pageIndex = 0, index = 0; index < length; )
3079 {
3080 walkArgs->fOffset = offset + index;
3081 ret = dmaCommandOperation(mdOp, &walkState, sizeof(walkState));
3082 mdOp = kIOMDWalkSegments;
3083 if (ret != kIOReturnSuccess) break;
3084 phys = walkArgs->fIOVMAddr;
3085 segLen = walkArgs->fLength;
3086
3087 ppnum_t page = atop_64(phys);
3088 ppnum_t count = atop_64(round_page_64(phys + segLen)) - page;
3089 while (count--)
3090 {
3091 mapper->iovmInsert(base, pageIndex, page);
3092 page++;
3093 pageIndex++;
3094 }
3095 index += segLen;
3096 }
3097 if (pageIndex != pageCount) panic("pageIndex");
3098
3099 *address = ptoa_64(base) + pageOffset;
3100 if (mapPages) *mapPages = pageCount;
3101
3102 return (kIOReturnSuccess);
3103 }
3104
3105 IOReturn IOGeneralMemoryDescriptor::dmaMap(
3106 IOMapper * mapper,
3107 const IODMAMapSpecification * mapSpec,
3108 uint64_t offset,
3109 uint64_t length,
3110 uint64_t * address,
3111 ppnum_t * mapPages)
3112 {
3113 IOReturn err = kIOReturnSuccess;
3114 ioGMDData * dataP;
3115 IOOptionBits type = _flags & kIOMemoryTypeMask;
3116
3117 *address = 0;
3118 if (kIOMemoryHostOnly & _flags) return (kIOReturnSuccess);
3119
3120 if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
3121 || offset || (length != _length))
3122 {
3123 err = super::dmaMap(mapper, mapSpec, offset, length, address, mapPages);
3124 }
3125 else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries)))
3126 {
3127 const ioPLBlock * ioplList = getIOPLList(dataP);
3128 upl_page_info_t * pageList;
3129 uint32_t mapOptions = 0;
3130 ppnum_t base;
3131
3132 IODMAMapSpecification mapSpec;
3133 bzero(&mapSpec, sizeof(mapSpec));
3134 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
3135 mapSpec.alignment = dataP->fDMAMapAlignment;
3136
3137 // For external UPLs the fPageInfo field points directly to
3138 // the upl's upl_page_info_t array.
3139 if (ioplList->fFlags & kIOPLExternUPL)
3140 {
3141 pageList = (upl_page_info_t *) ioplList->fPageInfo;
3142 mapOptions |= kIODMAMapPagingPath;
3143 }
3144 else
3145 pageList = getPageList(dataP);
3146
3147 if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess;
3148
3149 // Check for direct device non-paged memory
3150 if (ioplList->fFlags & kIOPLOnDevice) mapOptions |= kIODMAMapPhysicallyContiguous;
3151
3152 base = mapper->iovmMapMemory(
3153 this, offset, _pages, mapOptions, &pageList[0], &mapSpec);
3154 *address = ptoa_64(base) + (ioplList->fPageOffset & PAGE_MASK);
3155 if (mapPages) *mapPages = _pages;
3156 }
3157
3158 return (err);
3159 }
3160
3161 /*
3162 * prepare
3163 *
3164 * Prepare the memory for an I/O transfer. This involves paging in
3165 * the memory, if necessary, and wiring it down for the duration of
3166 * the transfer. The complete() method completes the processing of
3167 * the memory after the I/O transfer finishes. This method needn't
3168 * called for non-pageable memory.
3169 */
3170
3171 IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
3172 {
3173 IOReturn error = kIOReturnSuccess;
3174 IOOptionBits type = _flags & kIOMemoryTypeMask;
3175
3176 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
3177 return kIOReturnSuccess;
3178
3179 if (_prepareLock)
3180 IOLockLock(_prepareLock);
3181
3182 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type)
3183 {
3184 error = wireVirtual(forDirection);
3185 }
3186
3187 if (kIOReturnSuccess == error)
3188 {
3189 if (1 == ++_wireCount)
3190 {
3191 if (kIOMemoryClearEncrypt & _flags)
3192 {
3193 performOperation(kIOMemoryClearEncrypted, 0, _length);
3194 }
3195 }
3196 }
3197
3198 if (_prepareLock)
3199 IOLockUnlock(_prepareLock);
3200
3201 return error;
3202 }
3203
3204 /*
3205 * complete
3206 *
3207 * Complete processing of the memory after an I/O transfer finishes.
3208 * This method should not be called unless a prepare was previously
3209 * issued; the prepare() and complete() must occur in pairs, before
3210 * before and after an I/O transfer involving pageable memory.
3211 */
3212
3213 IOReturn IOGeneralMemoryDescriptor::complete(IODirection forDirection)
3214 {
3215 IOOptionBits type = _flags & kIOMemoryTypeMask;
3216 ioGMDData * dataP;
3217
3218 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
3219 return kIOReturnSuccess;
3220
3221 if (_prepareLock)
3222 IOLockLock(_prepareLock);
3223
3224 assert(_wireCount);
3225
3226 if ((kIODirectionCompleteWithError & forDirection)
3227 && (dataP = getDataP(_memoryEntries)))
3228 dataP->fCompletionError = true;
3229
3230 if (_wireCount)
3231 {
3232 if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount))
3233 {
3234 performOperation(kIOMemorySetEncrypted, 0, _length);
3235 }
3236
3237 _wireCount--;
3238 if (!_wireCount || (kIODirectionCompleteWithDataValid & forDirection))
3239 {
3240 IOOptionBits type = _flags & kIOMemoryTypeMask;
3241 dataP = getDataP(_memoryEntries);
3242 ioPLBlock *ioplList = getIOPLList(dataP);
3243 UInt ind, count = getNumIOPL(_memoryEntries, dataP);
3244
3245 if (_wireCount)
3246 {
3247 // kIODirectionCompleteWithDataValid & forDirection
3248 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type)
3249 {
3250 for (ind = 0; ind < count; ind++)
3251 {
3252 if (ioplList[ind].fIOPL) iopl_valid_data(ioplList[ind].fIOPL);
3253 }
3254 }
3255 }
3256 else
3257 {
3258 #if IOMD_DEBUG_DMAACTIVE
3259 if (__iomd_reservedA) panic("complete() while dma active");
3260 #endif /* IOMD_DEBUG_DMAACTIVE */
3261
3262 if (dataP->fMappedBase) {
3263 dataP->fMapper->iovmFree(atop_64(dataP->fMappedBase), _pages);
3264 dataP->fMappedBase = 0;
3265 }
3266 // Only complete iopls that we created which are for TypeVirtual
3267 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
3268 for (ind = 0; ind < count; ind++)
3269 if (ioplList[ind].fIOPL) {
3270 if (dataP->fCompletionError)
3271 upl_abort(ioplList[ind].fIOPL, 0 /*!UPL_ABORT_DUMP_PAGES*/);
3272 else
3273 upl_commit(ioplList[ind].fIOPL, 0, 0);
3274 upl_deallocate(ioplList[ind].fIOPL);
3275 }
3276 } else if (kIOMemoryTypeUPL == type) {
3277 upl_set_referenced(ioplList[0].fIOPL, false);
3278 }
3279
3280 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
3281
3282 dataP->fPreparationID = kIOPreparationIDUnprepared;
3283 }
3284 }
3285 }
3286
3287 if (_prepareLock)
3288 IOLockUnlock(_prepareLock);
3289
3290 return kIOReturnSuccess;
3291 }
3292
3293 IOReturn IOGeneralMemoryDescriptor::doMap(
3294 vm_map_t __addressMap,
3295 IOVirtualAddress * __address,
3296 IOOptionBits options,
3297 IOByteCount __offset,
3298 IOByteCount __length )
3299
3300 {
3301 #ifndef __LP64__
3302 if (!(kIOMap64Bit & options)) panic("IOGeneralMemoryDescriptor::doMap !64bit");
3303 #endif /* !__LP64__ */
3304
3305 kern_return_t err;
3306
3307 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
3308 mach_vm_size_t offset = mapping->fOffset + __offset;
3309 mach_vm_size_t length = mapping->fLength;
3310
3311 IOOptionBits type = _flags & kIOMemoryTypeMask;
3312 Ranges vec = _ranges;
3313
3314 mach_vm_address_t range0Addr = 0;
3315 mach_vm_size_t range0Len = 0;
3316
3317 if ((offset >= _length) || ((offset + length) > _length))
3318 return( kIOReturnBadArgument );
3319
3320 if (vec.v)
3321 getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
3322
3323 // mapping source == dest? (could be much better)
3324 if (_task
3325 && (mapping->fAddressTask == _task)
3326 && (mapping->fAddressMap == get_task_map(_task))
3327 && (options & kIOMapAnywhere)
3328 && (1 == _rangesCount)
3329 && (0 == offset)
3330 && range0Addr
3331 && (length <= range0Len))
3332 {
3333 mapping->fAddress = range0Addr;
3334 mapping->fOptions |= kIOMapStatic;
3335
3336 return( kIOReturnSuccess );
3337 }
3338
3339 if (!_memRef)
3340 {
3341 IOOptionBits createOptions = 0;
3342 if (!(kIOMapReadOnly & options))
3343 {
3344 createOptions |= kIOMemoryReferenceWrite;
3345 #if DEVELOPMENT || DEBUG
3346 if (kIODirectionOut == (kIODirectionOutIn & _flags))
3347 {
3348 OSReportWithBacktrace("warning: creating writable mapping from IOMemoryDescriptor(kIODirectionOut) - use kIOMapReadOnly or change direction");
3349 }
3350 #endif
3351 }
3352 err = memoryReferenceCreate(createOptions, &_memRef);
3353 if (kIOReturnSuccess != err) return (err);
3354 }
3355
3356 memory_object_t pager;
3357 pager = (memory_object_t) (reserved ? reserved->dp.devicePager : 0);
3358
3359 // <upl_transpose //
3360 if ((kIOMapReference|kIOMapUnique) == ((kIOMapReference|kIOMapUnique) & options))
3361 {
3362 do
3363 {
3364 upl_t redirUPL2;
3365 vm_size_t size;
3366 int flags;
3367 unsigned int lock_count;
3368
3369 if (!_memRef || (1 != _memRef->count))
3370 {
3371 err = kIOReturnNotReadable;
3372 break;
3373 }
3374
3375 size = round_page(mapping->fLength);
3376 flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
3377 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
3378
3379 if (KERN_SUCCESS != memory_object_iopl_request(_memRef->entries[0].entry, 0, &size, &redirUPL2,
3380 NULL, NULL,
3381 &flags))
3382 redirUPL2 = NULL;
3383
3384 for (lock_count = 0;
3385 IORecursiveLockHaveLock(gIOMemoryLock);
3386 lock_count++) {
3387 UNLOCK;
3388 }
3389 err = upl_transpose(redirUPL2, mapping->fRedirUPL);
3390 for (;
3391 lock_count;
3392 lock_count--) {
3393 LOCK;
3394 }
3395
3396 if (kIOReturnSuccess != err)
3397 {
3398 IOLog("upl_transpose(%x)\n", err);
3399 err = kIOReturnSuccess;
3400 }
3401
3402 if (redirUPL2)
3403 {
3404 upl_commit(redirUPL2, NULL, 0);
3405 upl_deallocate(redirUPL2);
3406 redirUPL2 = 0;
3407 }
3408 {
3409 // swap the memEntries since they now refer to different vm_objects
3410 IOMemoryReference * me = _memRef;
3411 _memRef = mapping->fMemory->_memRef;
3412 mapping->fMemory->_memRef = me;
3413 }
3414 if (pager)
3415 err = populateDevicePager( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
3416 }
3417 while (false);
3418 }
3419 // upl_transpose> //
3420 else
3421 {
3422 err = memoryReferenceMap(_memRef, mapping->fAddressMap, offset, length, options, &mapping->fAddress);
3423
3424 if ((err == KERN_SUCCESS) && pager)
3425 {
3426 err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, offset, length, options);
3427 if (err != KERN_SUCCESS)
3428 {
3429 doUnmap(mapping->fAddressMap, (IOVirtualAddress) mapping, 0);
3430 }
3431 else if (kIOMapDefaultCache == (options & kIOMapCacheMask))
3432 {
3433 mapping->fOptions |= ((_flags & kIOMemoryBufferCacheMask) >> kIOMemoryBufferCacheShift);
3434 }
3435 }
3436 }
3437
3438 return (err);
3439 }
3440
3441 IOReturn IOGeneralMemoryDescriptor::doUnmap(
3442 vm_map_t addressMap,
3443 IOVirtualAddress __address,
3444 IOByteCount __length )
3445 {
3446 return (super::doUnmap(addressMap, __address, __length));
3447 }
3448
3449 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3450
3451 #undef super
3452 #define super OSObject
3453
3454 OSDefineMetaClassAndStructors( IOMemoryMap, OSObject )
3455
3456 OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
3457 OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
3458 OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
3459 OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
3460 OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
3461 OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
3462 OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
3463 OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
3464
3465 /* ex-inline function implementation */
3466 IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
3467 { return( getPhysicalSegment( 0, 0 )); }
3468
3469 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3470
3471 bool IOMemoryMap::init(
3472 task_t intoTask,
3473 mach_vm_address_t toAddress,
3474 IOOptionBits _options,
3475 mach_vm_size_t _offset,
3476 mach_vm_size_t _length )
3477 {
3478 if (!intoTask)
3479 return( false);
3480
3481 if (!super::init())
3482 return(false);
3483
3484 fAddressMap = get_task_map(intoTask);
3485 if (!fAddressMap)
3486 return(false);
3487 vm_map_reference(fAddressMap);
3488
3489 fAddressTask = intoTask;
3490 fOptions = _options;
3491 fLength = _length;
3492 fOffset = _offset;
3493 fAddress = toAddress;
3494
3495 return (true);
3496 }
3497
3498 bool IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
3499 {
3500 if (!_memory)
3501 return(false);
3502
3503 if (!fSuperMap)
3504 {
3505 if( (_offset + fLength) > _memory->getLength())
3506 return( false);
3507 fOffset = _offset;
3508 }
3509
3510 _memory->retain();
3511 if (fMemory)
3512 {
3513 if (fMemory != _memory)
3514 fMemory->removeMapping(this);
3515 fMemory->release();
3516 }
3517 fMemory = _memory;
3518
3519 return( true );
3520 }
3521
3522 IOReturn IOMemoryDescriptor::doMap(
3523 vm_map_t __addressMap,
3524 IOVirtualAddress * __address,
3525 IOOptionBits options,
3526 IOByteCount __offset,
3527 IOByteCount __length )
3528 {
3529 return (kIOReturnUnsupported);
3530 }
3531
3532 IOReturn IOMemoryDescriptor::handleFault(
3533 void * _pager,
3534 mach_vm_size_t sourceOffset,
3535 mach_vm_size_t length)
3536 {
3537 if( kIOMemoryRedirected & _flags)
3538 {
3539 #if DEBUG
3540 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
3541 #endif
3542 do {
3543 SLEEP;
3544 } while( kIOMemoryRedirected & _flags );
3545 }
3546 return (kIOReturnSuccess);
3547 }
3548
3549 IOReturn IOMemoryDescriptor::populateDevicePager(
3550 void * _pager,
3551 vm_map_t addressMap,
3552 mach_vm_address_t address,
3553 mach_vm_size_t sourceOffset,
3554 mach_vm_size_t length,
3555 IOOptionBits options )
3556 {
3557 IOReturn err = kIOReturnSuccess;
3558 memory_object_t pager = (memory_object_t) _pager;
3559 mach_vm_size_t size;
3560 mach_vm_size_t bytes;
3561 mach_vm_size_t page;
3562 mach_vm_size_t pageOffset;
3563 mach_vm_size_t pagerOffset;
3564 IOPhysicalLength segLen;
3565 addr64_t physAddr;
3566
3567 physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
3568 assert( physAddr );
3569 pageOffset = physAddr - trunc_page_64( physAddr );
3570 pagerOffset = sourceOffset;
3571
3572 size = length + pageOffset;
3573 physAddr -= pageOffset;
3574
3575 segLen += pageOffset;
3576 bytes = size;
3577 do
3578 {
3579 // in the middle of the loop only map whole pages
3580 if( segLen >= bytes) segLen = bytes;
3581 else if (segLen != trunc_page(segLen)) err = kIOReturnVMError;
3582 if (physAddr != trunc_page_64(physAddr)) err = kIOReturnBadArgument;
3583
3584 if (kIOReturnSuccess != err) break;
3585
3586 if (reserved && reserved->dp.pagerContig)
3587 {
3588 IOPhysicalLength allLen;
3589 addr64_t allPhys;
3590
3591 allPhys = getPhysicalSegment( 0, &allLen, kIOMemoryMapperNone );
3592 assert( allPhys );
3593 err = device_pager_populate_object( pager, 0, atop_64(allPhys), round_page(allLen) );
3594 }
3595 else
3596 {
3597 for( page = 0;
3598 (page < segLen) && (KERN_SUCCESS == err);
3599 page += page_size)
3600 {
3601 err = device_pager_populate_object(pager, pagerOffset,
3602 (ppnum_t)(atop_64(physAddr + page)), page_size);
3603 pagerOffset += page_size;
3604 }
3605 }
3606 assert (KERN_SUCCESS == err);
3607 if (err) break;
3608
3609 // This call to vm_fault causes an early pmap level resolution
3610 // of the mappings created above for kernel mappings, since
3611 // faulting in later can't take place from interrupt level.
3612 if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags))
3613 {
3614 vm_fault(addressMap,
3615 (vm_map_offset_t)trunc_page_64(address),
3616 VM_PROT_READ|VM_PROT_WRITE,
3617 FALSE, THREAD_UNINT, NULL,
3618 (vm_map_offset_t)0);
3619 }
3620
3621 sourceOffset += segLen - pageOffset;
3622 address += segLen;
3623 bytes -= segLen;
3624 pageOffset = 0;
3625 }
3626 while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
3627
3628 if (bytes)
3629 err = kIOReturnBadArgument;
3630
3631 return (err);
3632 }
3633
3634 IOReturn IOMemoryDescriptor::doUnmap(
3635 vm_map_t addressMap,
3636 IOVirtualAddress __address,
3637 IOByteCount __length )
3638 {
3639 IOReturn err;
3640 mach_vm_address_t address;
3641 mach_vm_size_t length;
3642
3643 if (__length)
3644 {
3645 address = __address;
3646 length = __length;
3647 }
3648 else
3649 {
3650 addressMap = ((IOMemoryMap *) __address)->fAddressMap;
3651 address = ((IOMemoryMap *) __address)->fAddress;
3652 length = ((IOMemoryMap *) __address)->fLength;
3653 }
3654
3655 if ((addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags))
3656 addressMap = IOPageableMapForAddress( address );
3657
3658 #if DEBUG
3659 if( kIOLogMapping & gIOKitDebug)
3660 IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
3661 addressMap, address, length );
3662 #endif
3663
3664 err = mach_vm_deallocate( addressMap, address, length );
3665
3666 return (err);
3667 }
3668
3669 IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
3670 {
3671 IOReturn err = kIOReturnSuccess;
3672 IOMemoryMap * mapping = 0;
3673 OSIterator * iter;
3674
3675 LOCK;
3676
3677 if( doRedirect)
3678 _flags |= kIOMemoryRedirected;
3679 else
3680 _flags &= ~kIOMemoryRedirected;
3681
3682 do {
3683 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
3684
3685 memory_object_t pager;
3686
3687 if( reserved)
3688 pager = (memory_object_t) reserved->dp.devicePager;
3689 else
3690 pager = MACH_PORT_NULL;
3691
3692 while( (mapping = (IOMemoryMap *) iter->getNextObject()))
3693 {
3694 mapping->redirect( safeTask, doRedirect );
3695 if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap))
3696 {
3697 err = populateDevicePager(pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache );
3698 }
3699 }
3700
3701 iter->release();
3702 }
3703 } while( false );
3704
3705 if (!doRedirect)
3706 {
3707 WAKEUP;
3708 }
3709
3710 UNLOCK;
3711
3712 #ifndef __LP64__
3713 // temporary binary compatibility
3714 IOSubMemoryDescriptor * subMem;
3715 if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
3716 err = subMem->redirect( safeTask, doRedirect );
3717 else
3718 err = kIOReturnSuccess;
3719 #endif /* !__LP64__ */
3720
3721 return( err );
3722 }
3723
3724 IOReturn IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
3725 {
3726 IOReturn err = kIOReturnSuccess;
3727
3728 if( fSuperMap) {
3729 // err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
3730 } else {
3731
3732 LOCK;
3733
3734 do
3735 {
3736 if (!fAddress)
3737 break;
3738 if (!fAddressMap)
3739 break;
3740
3741 if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
3742 && (0 == (fOptions & kIOMapStatic)))
3743 {
3744 IOUnmapPages( fAddressMap, fAddress, fLength );
3745 err = kIOReturnSuccess;
3746 #if DEBUG
3747 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
3748 #endif
3749 }
3750 else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask))
3751 {
3752 IOOptionBits newMode;
3753 newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
3754 IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
3755 }
3756 }
3757 while (false);
3758 UNLOCK;
3759 }
3760
3761 if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3762 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3763 && safeTask
3764 && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected))))
3765 fMemory->redirect(safeTask, doRedirect);
3766
3767 return( err );
3768 }
3769
3770 IOReturn IOMemoryMap::unmap( void )
3771 {
3772 IOReturn err;
3773
3774 LOCK;
3775
3776 if( fAddress && fAddressMap && (0 == fSuperMap) && fMemory
3777 && (0 == (fOptions & kIOMapStatic))) {
3778
3779 err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
3780
3781 } else
3782 err = kIOReturnSuccess;
3783
3784 if (fAddressMap)
3785 {
3786 vm_map_deallocate(fAddressMap);
3787 fAddressMap = 0;
3788 }
3789
3790 fAddress = 0;
3791
3792 UNLOCK;
3793
3794 return( err );
3795 }
3796
3797 void IOMemoryMap::taskDied( void )
3798 {
3799 LOCK;
3800 if (fUserClientUnmap)
3801 unmap();
3802 if( fAddressMap) {
3803 vm_map_deallocate(fAddressMap);
3804 fAddressMap = 0;
3805 }
3806 fAddressTask = 0;
3807 fAddress = 0;
3808 UNLOCK;
3809 }
3810
3811 IOReturn IOMemoryMap::userClientUnmap( void )
3812 {
3813 fUserClientUnmap = true;
3814 return (kIOReturnSuccess);
3815 }
3816
3817 // Overload the release mechanism. All mappings must be a member
3818 // of a memory descriptors _mappings set. This means that we
3819 // always have 2 references on a mapping. When either of these mappings
3820 // are released we need to free ourselves.
3821 void IOMemoryMap::taggedRelease(const void *tag) const
3822 {
3823 LOCK;
3824 super::taggedRelease(tag, 2);
3825 UNLOCK;
3826 }
3827
3828 void IOMemoryMap::free()
3829 {
3830 unmap();
3831
3832 if (fMemory)
3833 {
3834 LOCK;
3835 fMemory->removeMapping(this);
3836 UNLOCK;
3837 fMemory->release();
3838 }
3839
3840 if (fOwner && (fOwner != fMemory))
3841 {
3842 LOCK;
3843 fOwner->removeMapping(this);
3844 UNLOCK;
3845 }
3846
3847 if (fSuperMap)
3848 fSuperMap->release();
3849
3850 if (fRedirUPL) {
3851 upl_commit(fRedirUPL, NULL, 0);
3852 upl_deallocate(fRedirUPL);
3853 }
3854
3855 super::free();
3856 }
3857
3858 IOByteCount IOMemoryMap::getLength()
3859 {
3860 return( fLength );
3861 }
3862
3863 IOVirtualAddress IOMemoryMap::getVirtualAddress()
3864 {
3865 #ifndef __LP64__
3866 if (fSuperMap)
3867 fSuperMap->getVirtualAddress();
3868 else if (fAddressMap
3869 && vm_map_is_64bit(fAddressMap)
3870 && (sizeof(IOVirtualAddress) < 8))
3871 {
3872 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
3873 }
3874 #endif /* !__LP64__ */
3875
3876 return (fAddress);
3877 }
3878
3879 #ifndef __LP64__
3880 mach_vm_address_t IOMemoryMap::getAddress()
3881 {
3882 return( fAddress);
3883 }
3884
3885 mach_vm_size_t IOMemoryMap::getSize()
3886 {
3887 return( fLength );
3888 }
3889 #endif /* !__LP64__ */
3890
3891
3892 task_t IOMemoryMap::getAddressTask()
3893 {
3894 if( fSuperMap)
3895 return( fSuperMap->getAddressTask());
3896 else
3897 return( fAddressTask);
3898 }
3899
3900 IOOptionBits IOMemoryMap::getMapOptions()
3901 {
3902 return( fOptions);
3903 }
3904
3905 IOMemoryDescriptor * IOMemoryMap::getMemoryDescriptor()
3906 {
3907 return( fMemory );
3908 }
3909
3910 IOMemoryMap * IOMemoryMap::copyCompatible(
3911 IOMemoryMap * newMapping )
3912 {
3913 task_t task = newMapping->getAddressTask();
3914 mach_vm_address_t toAddress = newMapping->fAddress;
3915 IOOptionBits _options = newMapping->fOptions;
3916 mach_vm_size_t _offset = newMapping->fOffset;
3917 mach_vm_size_t _length = newMapping->fLength;
3918
3919 if( (!task) || (!fAddressMap) || (fAddressMap != get_task_map(task)))
3920 return( 0 );
3921 if( (fOptions ^ _options) & kIOMapReadOnly)
3922 return( 0 );
3923 if( (kIOMapDefaultCache != (_options & kIOMapCacheMask))
3924 && ((fOptions ^ _options) & kIOMapCacheMask))
3925 return( 0 );
3926
3927 if( (0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress))
3928 return( 0 );
3929
3930 if( _offset < fOffset)
3931 return( 0 );
3932
3933 _offset -= fOffset;
3934
3935 if( (_offset + _length) > fLength)
3936 return( 0 );
3937
3938 retain();
3939 if( (fLength == _length) && (!_offset))
3940 {
3941 newMapping = this;
3942 }
3943 else
3944 {
3945 newMapping->fSuperMap = this;
3946 newMapping->fOffset = fOffset + _offset;
3947 newMapping->fAddress = fAddress + _offset;
3948 }
3949
3950 return( newMapping );
3951 }
3952
3953 IOReturn IOMemoryMap::wireRange(
3954 uint32_t options,
3955 mach_vm_size_t offset,
3956 mach_vm_size_t length)
3957 {
3958 IOReturn kr;
3959 mach_vm_address_t start = trunc_page_64(fAddress + offset);
3960 mach_vm_address_t end = round_page_64(fAddress + offset + length);
3961
3962 if (kIODirectionOutIn & options)
3963 {
3964 kr = vm_map_wire(fAddressMap, start, end, (kIODirectionOutIn & options), FALSE);
3965 }
3966 else
3967 {
3968 kr = vm_map_unwire(fAddressMap, start, end, FALSE);
3969 }
3970
3971 return (kr);
3972 }
3973
3974
3975 IOPhysicalAddress
3976 #ifdef __LP64__
3977 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
3978 #else /* !__LP64__ */
3979 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
3980 #endif /* !__LP64__ */
3981 {
3982 IOPhysicalAddress address;
3983
3984 LOCK;
3985 #ifdef __LP64__
3986 address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
3987 #else /* !__LP64__ */
3988 address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
3989 #endif /* !__LP64__ */
3990 UNLOCK;
3991
3992 return( address );
3993 }
3994
3995 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3996
3997 #undef super
3998 #define super OSObject
3999
4000 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4001
4002 void IOMemoryDescriptor::initialize( void )
4003 {
4004 if( 0 == gIOMemoryLock)
4005 gIOMemoryLock = IORecursiveLockAlloc();
4006
4007 gIOLastPage = IOGetLastPageNumber();
4008 }
4009
4010 void IOMemoryDescriptor::free( void )
4011 {
4012 if( _mappings)
4013 _mappings->release();
4014
4015 super::free();
4016 }
4017
4018 IOMemoryMap * IOMemoryDescriptor::setMapping(
4019 task_t intoTask,
4020 IOVirtualAddress mapAddress,
4021 IOOptionBits options )
4022 {
4023 return (createMappingInTask( intoTask, mapAddress,
4024 options | kIOMapStatic,
4025 0, getLength() ));
4026 }
4027
4028 IOMemoryMap * IOMemoryDescriptor::map(
4029 IOOptionBits options )
4030 {
4031 return (createMappingInTask( kernel_task, 0,
4032 options | kIOMapAnywhere,
4033 0, getLength() ));
4034 }
4035
4036 #ifndef __LP64__
4037 IOMemoryMap * IOMemoryDescriptor::map(
4038 task_t intoTask,
4039 IOVirtualAddress atAddress,
4040 IOOptionBits options,
4041 IOByteCount offset,
4042 IOByteCount length )
4043 {
4044 if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask)))
4045 {
4046 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
4047 return (0);
4048 }
4049
4050 return (createMappingInTask(intoTask, atAddress,
4051 options, offset, length));
4052 }
4053 #endif /* !__LP64__ */
4054
4055 IOMemoryMap * IOMemoryDescriptor::createMappingInTask(
4056 task_t intoTask,
4057 mach_vm_address_t atAddress,
4058 IOOptionBits options,
4059 mach_vm_size_t offset,
4060 mach_vm_size_t length)
4061 {
4062 IOMemoryMap * result;
4063 IOMemoryMap * mapping;
4064
4065 if (0 == length)
4066 length = getLength();
4067
4068 mapping = new IOMemoryMap;
4069
4070 if( mapping
4071 && !mapping->init( intoTask, atAddress,
4072 options, offset, length )) {
4073 mapping->release();
4074 mapping = 0;
4075 }
4076
4077 if (mapping)
4078 result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
4079 else
4080 result = 0;
4081
4082 #if DEBUG
4083 if (!result)
4084 IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
4085 this, atAddress, (uint32_t) options, offset, length);
4086 #endif
4087
4088 return (result);
4089 }
4090
4091 #ifndef __LP64__ // there is only a 64 bit version for LP64
4092 IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
4093 IOOptionBits options,
4094 IOByteCount offset)
4095 {
4096 return (redirect(newBackingMemory, options, (mach_vm_size_t)offset));
4097 }
4098 #endif
4099
4100 IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
4101 IOOptionBits options,
4102 mach_vm_size_t offset)
4103 {
4104 IOReturn err = kIOReturnSuccess;
4105 IOMemoryDescriptor * physMem = 0;
4106
4107 LOCK;
4108
4109 if (fAddress && fAddressMap) do
4110 {
4111 if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
4112 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
4113 {
4114 physMem = fMemory;
4115 physMem->retain();
4116 }
4117
4118 if (!fRedirUPL && fMemory->_memRef && (1 == fMemory->_memRef->count))
4119 {
4120 vm_size_t size = round_page(fLength);
4121 int flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
4122 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
4123 if (KERN_SUCCESS != memory_object_iopl_request(fMemory->_memRef->entries[0].entry, 0, &size, &fRedirUPL,
4124 NULL, NULL,
4125 &flags))
4126 fRedirUPL = 0;
4127
4128 if (physMem)
4129 {
4130 IOUnmapPages( fAddressMap, fAddress, fLength );
4131 if ((false))
4132 physMem->redirect(0, true);
4133 }
4134 }
4135
4136 if (newBackingMemory)
4137 {
4138 if (newBackingMemory != fMemory)
4139 {
4140 fOffset = 0;
4141 if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
4142 options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
4143 offset, fLength))
4144 err = kIOReturnError;
4145 }
4146 if (fRedirUPL)
4147 {
4148 upl_commit(fRedirUPL, NULL, 0);
4149 upl_deallocate(fRedirUPL);
4150 fRedirUPL = 0;
4151 }
4152 if ((false) && physMem)
4153 physMem->redirect(0, false);
4154 }
4155 }
4156 while (false);
4157
4158 UNLOCK;
4159
4160 if (physMem)
4161 physMem->release();
4162
4163 return (err);
4164 }
4165
4166 IOMemoryMap * IOMemoryDescriptor::makeMapping(
4167 IOMemoryDescriptor * owner,
4168 task_t __intoTask,
4169 IOVirtualAddress __address,
4170 IOOptionBits options,
4171 IOByteCount __offset,
4172 IOByteCount __length )
4173 {
4174 #ifndef __LP64__
4175 if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::makeMapping !64bit");
4176 #endif /* !__LP64__ */
4177
4178 IOMemoryDescriptor * mapDesc = 0;
4179 IOMemoryMap * result = 0;
4180 OSIterator * iter;
4181
4182 IOMemoryMap * mapping = (IOMemoryMap *) __address;
4183 mach_vm_size_t offset = mapping->fOffset + __offset;
4184 mach_vm_size_t length = mapping->fLength;
4185
4186 mapping->fOffset = offset;
4187
4188 LOCK;
4189
4190 do
4191 {
4192 if (kIOMapStatic & options)
4193 {
4194 result = mapping;
4195 addMapping(mapping);
4196 mapping->setMemoryDescriptor(this, 0);
4197 continue;
4198 }
4199
4200 if (kIOMapUnique & options)
4201 {
4202 addr64_t phys;
4203 IOByteCount physLen;
4204
4205 // if (owner != this) continue;
4206
4207 if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
4208 || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
4209 {
4210 phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
4211 if (!phys || (physLen < length))
4212 continue;
4213
4214 mapDesc = IOMemoryDescriptor::withAddressRange(
4215 phys, length, getDirection() | kIOMemoryMapperNone, NULL);
4216 if (!mapDesc)
4217 continue;
4218 offset = 0;
4219 mapping->fOffset = offset;
4220 }
4221 }
4222 else
4223 {
4224 // look for a compatible existing mapping
4225 if( (iter = OSCollectionIterator::withCollection(_mappings)))
4226 {
4227 IOMemoryMap * lookMapping;
4228 while ((lookMapping = (IOMemoryMap *) iter->getNextObject()))
4229 {
4230 if ((result = lookMapping->copyCompatible(mapping)))
4231 {
4232 addMapping(result);
4233 result->setMemoryDescriptor(this, offset);
4234 break;
4235 }
4236 }
4237 iter->release();
4238 }
4239 if (result || (options & kIOMapReference))
4240 {
4241 if (result != mapping)
4242 {
4243 mapping->release();
4244 mapping = NULL;
4245 }
4246 continue;
4247 }
4248 }
4249
4250 if (!mapDesc)
4251 {
4252 mapDesc = this;
4253 mapDesc->retain();
4254 }
4255 IOReturn
4256 kr = mapDesc->doMap( 0, (IOVirtualAddress *) &mapping, options, 0, 0 );
4257 if (kIOReturnSuccess == kr)
4258 {
4259 result = mapping;
4260 mapDesc->addMapping(result);
4261 result->setMemoryDescriptor(mapDesc, offset);
4262 }
4263 else
4264 {
4265 mapping->release();
4266 mapping = NULL;
4267 }
4268 }
4269 while( false );
4270
4271 UNLOCK;
4272
4273 if (mapDesc)
4274 mapDesc->release();
4275
4276 return (result);
4277 }
4278
4279 void IOMemoryDescriptor::addMapping(
4280 IOMemoryMap * mapping )
4281 {
4282 if( mapping)
4283 {
4284 if( 0 == _mappings)
4285 _mappings = OSSet::withCapacity(1);
4286 if( _mappings )
4287 _mappings->setObject( mapping );
4288 }
4289 }
4290
4291 void IOMemoryDescriptor::removeMapping(
4292 IOMemoryMap * mapping )
4293 {
4294 if( _mappings)
4295 _mappings->removeObject( mapping);
4296 }
4297
4298 #ifndef __LP64__
4299 // obsolete initializers
4300 // - initWithOptions is the designated initializer
4301 bool
4302 IOMemoryDescriptor::initWithAddress(void * address,
4303 IOByteCount length,
4304 IODirection direction)
4305 {
4306 return( false );
4307 }
4308
4309 bool
4310 IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
4311 IOByteCount length,
4312 IODirection direction,
4313 task_t task)
4314 {
4315 return( false );
4316 }
4317
4318 bool
4319 IOMemoryDescriptor::initWithPhysicalAddress(
4320 IOPhysicalAddress address,
4321 IOByteCount length,
4322 IODirection direction )
4323 {
4324 return( false );
4325 }
4326
4327 bool
4328 IOMemoryDescriptor::initWithRanges(
4329 IOVirtualRange * ranges,
4330 UInt32 withCount,
4331 IODirection direction,
4332 task_t task,
4333 bool asReference)
4334 {
4335 return( false );
4336 }
4337
4338 bool
4339 IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
4340 UInt32 withCount,
4341 IODirection direction,
4342 bool asReference)
4343 {
4344 return( false );
4345 }
4346
4347 void * IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
4348 IOByteCount * lengthOfSegment)
4349 {
4350 return( 0 );
4351 }
4352 #endif /* !__LP64__ */
4353
4354 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4355
4356 bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
4357 {
4358 OSSymbol const *keys[2];
4359 OSObject *values[2];
4360 OSArray * array;
4361
4362 struct SerData {
4363 user_addr_t address;
4364 user_size_t length;
4365 } *vcopy;
4366 unsigned int index, nRanges;
4367 bool result;
4368
4369 IOOptionBits type = _flags & kIOMemoryTypeMask;
4370
4371 if (s == NULL) return false;
4372
4373 array = OSArray::withCapacity(4);
4374 if (!array) return (false);
4375
4376 nRanges = _rangesCount;
4377 vcopy = (SerData *) IOMalloc(sizeof(SerData) * nRanges);
4378 if (vcopy == 0) return false;
4379
4380 keys[0] = OSSymbol::withCString("address");
4381 keys[1] = OSSymbol::withCString("length");
4382
4383 result = false;
4384 values[0] = values[1] = 0;
4385
4386 // From this point on we can go to bail.
4387
4388 // Copy the volatile data so we don't have to allocate memory
4389 // while the lock is held.
4390 LOCK;
4391 if (nRanges == _rangesCount) {
4392 Ranges vec = _ranges;
4393 for (index = 0; index < nRanges; index++) {
4394 mach_vm_address_t addr; mach_vm_size_t len;
4395 getAddrLenForInd(addr, len, type, vec, index);
4396 vcopy[index].address = addr;
4397 vcopy[index].length = len;
4398 }
4399 } else {
4400 // The descriptor changed out from under us. Give up.
4401 UNLOCK;
4402 result = false;
4403 goto bail;
4404 }
4405 UNLOCK;
4406
4407 for (index = 0; index < nRanges; index++)
4408 {
4409 user_addr_t addr = vcopy[index].address;
4410 IOByteCount len = (IOByteCount) vcopy[index].length;
4411 values[0] = OSNumber::withNumber(addr, sizeof(addr) * 8);
4412 if (values[0] == 0) {
4413 result = false;
4414 goto bail;
4415 }
4416 values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
4417 if (values[1] == 0) {
4418 result = false;
4419 goto bail;
4420 }
4421 OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
4422 if (dict == 0) {
4423 result = false;
4424 goto bail;
4425 }
4426 array->setObject(dict);
4427 dict->release();
4428 values[0]->release();
4429 values[1]->release();
4430 values[0] = values[1] = 0;
4431 }
4432
4433 result = array->serialize(s);
4434
4435 bail:
4436 if (array)
4437 array->release();
4438 if (values[0])
4439 values[0]->release();
4440 if (values[1])
4441 values[1]->release();
4442 if (keys[0])
4443 keys[0]->release();
4444 if (keys[1])
4445 keys[1]->release();
4446 if (vcopy)
4447 IOFree(vcopy, sizeof(SerData) * nRanges);
4448
4449 return result;
4450 }
4451
4452 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4453
4454 #if DEVELOPMENT || DEBUG
4455
4456 extern "C" void IOMemoryDescriptorTest(int x)
4457 {
4458 IOGeneralMemoryDescriptor * md;
4459
4460 vm_offset_t data[2];
4461 vm_size_t bsize = 16*1024*1024;
4462
4463 vm_size_t srcsize, srcoffset, mapoffset, size;
4464
4465 kern_return_t kr;
4466
4467 kr = vm_allocate(kernel_map, &data[0], bsize, VM_FLAGS_ANYWHERE);
4468 vm_inherit(kernel_map, data[0] + 1*4096, 4096, VM_INHERIT_NONE);
4469 vm_inherit(kernel_map, data[0] + 16*4096, 4096, VM_INHERIT_NONE);
4470
4471 kprintf("data 0x%lx, 0x%lx\n", (long)data[0], (long)data[1]);
4472
4473 uint32_t idx, offidx;
4474 for (idx = 0; idx < (bsize / sizeof(uint32_t)); idx++)
4475 {
4476 ((uint32_t*)data[0])[idx] = idx;
4477 }
4478
4479 for (srcoffset = 0; srcoffset < bsize; srcoffset = ((srcoffset << 1) + 0x40c))
4480 {
4481 for (srcsize = 4; srcsize < (bsize - srcoffset - 1); srcsize = ((srcsize << 1) + 0x3fc))
4482 {
4483 IOAddressRange ranges[3];
4484 uint32_t rangeCount = 1;
4485
4486 bzero(&ranges[0], sizeof(ranges));
4487 ranges[0].address = data[0] + srcoffset;
4488 ranges[0].length = srcsize;
4489
4490 if (srcsize > 5*page_size)
4491 {
4492 ranges[0].length = 7634;
4493 ranges[1].length = 9870;
4494 ranges[2].length = srcsize - ranges[0].length - ranges[1].length;
4495 ranges[1].address = ranges[0].address + ranges[0].length;
4496 ranges[2].address = ranges[1].address + ranges[1].length;
4497 rangeCount = 3;
4498 }
4499 else if ((srcsize > 2*page_size) && !(page_mask & srcoffset))
4500 {
4501 ranges[0].length = 4096;
4502 ranges[1].length = 4096;
4503 ranges[2].length = srcsize - ranges[0].length - ranges[1].length;
4504 ranges[0].address = data[0] + srcoffset + 4096;
4505 ranges[1].address = data[0] + srcoffset;
4506 ranges[2].address = ranges[0].address + ranges[0].length;
4507 rangeCount = 3;
4508 }
4509
4510 md = OSDynamicCast(IOGeneralMemoryDescriptor,
4511 IOMemoryDescriptor::withAddressRanges(&ranges[0], rangeCount, kIODirectionInOut, kernel_task));
4512 assert(md);
4513
4514 kprintf("IOMemoryReferenceCreate [0x%lx @ 0x%lx]\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx]\n",
4515 (long) srcsize, (long) srcoffset,
4516 (long long) ranges[0].address - data[0], (long long) ranges[0].length,
4517 (long long) ranges[1].address - data[0], (long long) ranges[1].length,
4518 (long long) ranges[2].address - data[0], (long long) ranges[2].length);
4519
4520 if (kIOReturnSuccess == kr)
4521 {
4522 for (mapoffset = 0; mapoffset < srcsize; mapoffset = ((mapoffset << 1) + 0xf00))
4523 {
4524 for (size = 4; size < (srcsize - mapoffset - 1); size = ((size << 1) + 0x20))
4525 {
4526 IOMemoryMap * map;
4527 mach_vm_address_t addr = 0;
4528 uint32_t data;
4529
4530 kprintf("<mapRef [0x%lx @ 0x%lx]\n", (long) size, (long) mapoffset);
4531
4532 map = md->createMappingInTask(kernel_task, 0, kIOMapAnywhere, mapoffset, size);
4533 if (map) addr = map->getAddress();
4534 else kr = kIOReturnError;
4535
4536 kprintf(">mapRef 0x%x %llx\n", kr, addr);
4537
4538 if (kIOReturnSuccess != kr) break;
4539 kr = md->prepare();
4540 if (kIOReturnSuccess != kr)
4541 {
4542 kprintf("prepare() fail 0x%x\n", kr);
4543 break;
4544 }
4545 for (idx = 0; idx < size; idx += sizeof(uint32_t))
4546 {
4547 offidx = (idx + mapoffset + srcoffset);
4548 if ((srcsize <= 5*page_size) && (srcsize > 2*page_size) && !(page_mask & srcoffset))
4549 {
4550 if (offidx < 8192) offidx ^= 0x1000;
4551 }
4552 offidx /= sizeof(uint32_t);
4553
4554 if (offidx != ((uint32_t*)addr)[idx/sizeof(uint32_t)])
4555 {
4556 kprintf("vm mismatch @ 0x%x, 0x%lx, 0x%lx, \n", idx, (long) srcoffset, (long) mapoffset);
4557 kr = kIOReturnBadMedia;
4558 }
4559 else
4560 {
4561 if (sizeof(data) != md->readBytes(mapoffset + idx, &data, sizeof(data))) data = 0;
4562 if (offidx != data)
4563 {
4564 kprintf("phys mismatch @ 0x%x, 0x%lx, 0x%lx, \n", idx, (long) srcoffset, (long) mapoffset);
4565 kr = kIOReturnBadMedia;
4566 }
4567 }
4568 }
4569 md->complete();
4570 map->release();
4571 kprintf("unmapRef %llx\n", addr);
4572 }
4573 if (kIOReturnSuccess != kr) break;
4574 }
4575 }
4576 if (kIOReturnSuccess != kr) break;
4577 }
4578 if (kIOReturnSuccess != kr) break;
4579 }
4580
4581 if (kIOReturnSuccess != kr) kprintf("FAIL: src 0x%lx @ 0x%lx, map 0x%lx @ 0x%lx\n",
4582 (long) srcsize, (long) srcoffset, (long) size, (long) mapoffset);
4583
4584 vm_deallocate(kernel_map, data[0], bsize);
4585 // vm_deallocate(kernel_map, data[1], size);
4586 }
4587
4588 #endif /* DEVELOPMENT || DEBUG */
4589
4590 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4591
4592 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
4593 #ifdef __LP64__
4594 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
4595 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
4596 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
4597 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
4598 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
4599 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
4600 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
4601 #else /* !__LP64__ */
4602 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
4603 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
4604 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3);
4605 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4);
4606 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5);
4607 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 6);
4608 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 7);
4609 #endif /* !__LP64__ */
4610 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
4611 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
4612 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
4613 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
4614 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
4615 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
4616 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
4617 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
4618
4619 /* ex-inline function implementation */
4620 IOPhysicalAddress
4621 IOMemoryDescriptor::getPhysicalAddress()
4622 { return( getPhysicalSegment( 0, 0 )); }