]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOMemoryDescriptor.cpp
fe8f9f2713ace17a8fa1b95e6873a0fe2eb42d1c
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
30 *
31 * HISTORY
32 *
33 */
34
35
36 #include <sys/cdefs.h>
37
38 #include <IOKit/assert.h>
39 #include <IOKit/system.h>
40 #include <IOKit/IOLib.h>
41 #include <IOKit/IOMemoryDescriptor.h>
42 #include <IOKit/IOMapper.h>
43 #include <IOKit/IOKitKeysPrivate.h>
44
45 #ifndef __LP64__
46 #include <IOKit/IOSubMemoryDescriptor.h>
47 #endif /* !__LP64__ */
48
49 #include <IOKit/IOKitDebug.h>
50 #include <libkern/OSDebug.h>
51
52 #include "IOKitKernelInternal.h"
53
54 #include <libkern/c++/OSContainers.h>
55 #include <libkern/c++/OSDictionary.h>
56 #include <libkern/c++/OSArray.h>
57 #include <libkern/c++/OSSymbol.h>
58 #include <libkern/c++/OSNumber.h>
59
60 #include <sys/uio.h>
61
62 __BEGIN_DECLS
63 #include <vm/pmap.h>
64 #include <vm/vm_pageout.h>
65 #include <mach/memory_object_types.h>
66 #include <device/device_port.h>
67
68 #include <mach/vm_prot.h>
69 #include <mach/mach_vm.h>
70 #include <vm/vm_fault.h>
71 #include <vm/vm_protos.h>
72
73 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
74 extern void ipc_port_release_send(ipc_port_t port);
75
76 kern_return_t
77 memory_object_iopl_request(
78 ipc_port_t port,
79 memory_object_offset_t offset,
80 vm_size_t *upl_size,
81 upl_t *upl_ptr,
82 upl_page_info_array_t user_page_list,
83 unsigned int *page_list_count,
84 int *flags);
85
86 unsigned int IOTranslateCacheBits(struct phys_entry *pp);
87
88 __END_DECLS
89
90 #define kIOMaximumMappedIOByteCount (512*1024*1024)
91
92 static IOMapper * gIOSystemMapper = NULL;
93
94 static ppnum_t gIOMaximumMappedIOPageCount = atop_32(kIOMaximumMappedIOByteCount);
95
96 ppnum_t gIOLastPage;
97
98 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
99
100 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
101
102 #define super IOMemoryDescriptor
103
104 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
105
106 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
107
108 static IORecursiveLock * gIOMemoryLock;
109
110 #define LOCK IORecursiveLockLock( gIOMemoryLock)
111 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
112 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
113 #define WAKEUP \
114 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
115
116 #if 0
117 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
118 #else
119 #define DEBG(fmt, args...) {}
120 #endif
121
122 #define IOMD_DEBUG_DMAACTIVE 1
123
124 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
125
126 // Some data structures and accessor macros used by the initWithOptions
127 // Function
128
129 enum ioPLBlockFlags {
130 kIOPLOnDevice = 0x00000001,
131 kIOPLExternUPL = 0x00000002,
132 };
133
134 struct typePersMDData
135 {
136 const IOGeneralMemoryDescriptor *fMD;
137 ipc_port_t fMemEntry;
138 };
139
140 struct ioPLBlock {
141 upl_t fIOPL;
142 vm_address_t fPageInfo; // Pointer to page list or index into it
143 uint32_t fIOMDOffset; // The offset of this iopl in descriptor
144 ppnum_t fMappedBase; // Page number of first page in this iopl
145 unsigned int fPageOffset; // Offset within first page of iopl
146 unsigned int fFlags; // Flags
147 };
148
149 struct ioGMDData {
150 IOMapper *fMapper;
151 uint64_t fPreparationID;
152 unsigned int fPageCnt;
153 #if __LP64__
154 // align arrays to 8 bytes so following macros work
155 unsigned int fPad;
156 #endif
157 upl_page_info_t fPageList[1]; /* variable length */
158 ioPLBlock fBlocks[1]; /* variable length */
159 };
160
161 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
162 #define getIOPLList(d) ((ioPLBlock *) &(d->fPageList[d->fPageCnt]))
163 #define getNumIOPL(osd, d) \
164 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
165 #define getPageList(d) (&(d->fPageList[0]))
166 #define computeDataSize(p, u) \
167 (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
168
169
170 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
171
172 #define next_page(a) ( trunc_page(a) + PAGE_SIZE )
173
174
175 extern "C" {
176
177 kern_return_t device_data_action(
178 uintptr_t device_handle,
179 ipc_port_t device_pager,
180 vm_prot_t protection,
181 vm_object_offset_t offset,
182 vm_size_t size)
183 {
184 kern_return_t kr;
185 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
186 IOMemoryDescriptor * memDesc;
187
188 LOCK;
189 memDesc = ref->dp.memory;
190 if( memDesc)
191 {
192 memDesc->retain();
193 kr = memDesc->handleFault( device_pager, 0, 0,
194 offset, size, kIOMapDefaultCache /*?*/);
195 memDesc->release();
196 }
197 else
198 kr = KERN_ABORTED;
199 UNLOCK;
200
201 return( kr );
202 }
203
204 kern_return_t device_close(
205 uintptr_t device_handle)
206 {
207 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
208
209 IODelete( ref, IOMemoryDescriptorReserved, 1 );
210
211 return( kIOReturnSuccess );
212 }
213 }; // end extern "C"
214
215 // Note this inline function uses C++ reference arguments to return values
216 // This means that pointers are not passed and NULLs don't have to be
217 // checked for as a NULL reference is illegal.
218 static inline void
219 getAddrLenForInd(user_addr_t &addr, IOPhysicalLength &len, // Output variables
220 UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
221 {
222 assert(kIOMemoryTypeUIO == type
223 || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
224 || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
225 if (kIOMemoryTypeUIO == type) {
226 user_size_t us;
227 uio_getiov((uio_t) r.uio, ind, &addr, &us); len = us;
228 }
229 #ifndef __LP64__
230 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
231 IOAddressRange cur = r.v64[ind];
232 addr = cur.address;
233 len = cur.length;
234 }
235 #endif /* !__LP64__ */
236 else {
237 IOVirtualRange cur = r.v[ind];
238 addr = cur.address;
239 len = cur.length;
240 }
241 }
242
243 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
244
245 IOMemoryDescriptor *
246 IOMemoryDescriptor::withAddress(void * address,
247 IOByteCount length,
248 IODirection direction)
249 {
250 return IOMemoryDescriptor::
251 withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
252 }
253
254 #ifndef __LP64__
255 IOMemoryDescriptor *
256 IOMemoryDescriptor::withAddress(IOVirtualAddress address,
257 IOByteCount length,
258 IODirection direction,
259 task_t task)
260 {
261 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
262 if (that)
263 {
264 if (that->initWithAddress(address, length, direction, task))
265 return that;
266
267 that->release();
268 }
269 return 0;
270 }
271 #endif /* !__LP64__ */
272
273 IOMemoryDescriptor *
274 IOMemoryDescriptor::withPhysicalAddress(
275 IOPhysicalAddress address,
276 IOByteCount length,
277 IODirection direction )
278 {
279 return (IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL));
280 }
281
282 #ifndef __LP64__
283 IOMemoryDescriptor *
284 IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
285 UInt32 withCount,
286 IODirection direction,
287 task_t task,
288 bool asReference)
289 {
290 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
291 if (that)
292 {
293 if (that->initWithRanges(ranges, withCount, direction, task, asReference))
294 return that;
295
296 that->release();
297 }
298 return 0;
299 }
300 #endif /* !__LP64__ */
301
302 IOMemoryDescriptor *
303 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
304 mach_vm_size_t length,
305 IOOptionBits options,
306 task_t task)
307 {
308 IOAddressRange range = { address, length };
309 return (IOMemoryDescriptor::withAddressRanges(&range, 1, options, task));
310 }
311
312 IOMemoryDescriptor *
313 IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
314 UInt32 rangeCount,
315 IOOptionBits options,
316 task_t task)
317 {
318 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
319 if (that)
320 {
321 if (task)
322 options |= kIOMemoryTypeVirtual64;
323 else
324 options |= kIOMemoryTypePhysical64;
325
326 if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ 0))
327 return that;
328
329 that->release();
330 }
331
332 return 0;
333 }
334
335
336 /*
337 * withOptions:
338 *
339 * Create a new IOMemoryDescriptor. The buffer is made up of several
340 * virtual address ranges, from a given task.
341 *
342 * Passing the ranges as a reference will avoid an extra allocation.
343 */
344 IOMemoryDescriptor *
345 IOMemoryDescriptor::withOptions(void * buffers,
346 UInt32 count,
347 UInt32 offset,
348 task_t task,
349 IOOptionBits opts,
350 IOMapper * mapper)
351 {
352 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
353
354 if (self
355 && !self->initWithOptions(buffers, count, offset, task, opts, mapper))
356 {
357 self->release();
358 return 0;
359 }
360
361 return self;
362 }
363
364 bool IOMemoryDescriptor::initWithOptions(void * buffers,
365 UInt32 count,
366 UInt32 offset,
367 task_t task,
368 IOOptionBits options,
369 IOMapper * mapper)
370 {
371 return( false );
372 }
373
374 #ifndef __LP64__
375 IOMemoryDescriptor *
376 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
377 UInt32 withCount,
378 IODirection direction,
379 bool asReference)
380 {
381 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
382 if (that)
383 {
384 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference))
385 return that;
386
387 that->release();
388 }
389 return 0;
390 }
391
392 IOMemoryDescriptor *
393 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
394 IOByteCount offset,
395 IOByteCount length,
396 IODirection direction)
397 {
398 return (IOSubMemoryDescriptor::withSubRange(of, offset, length, direction | kIOMemoryThreadSafe));
399 }
400 #endif /* !__LP64__ */
401
402 IOMemoryDescriptor *
403 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
404 {
405 IOGeneralMemoryDescriptor *origGenMD =
406 OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
407
408 if (origGenMD)
409 return IOGeneralMemoryDescriptor::
410 withPersistentMemoryDescriptor(origGenMD);
411 else
412 return 0;
413 }
414
415 IOMemoryDescriptor *
416 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
417 {
418 ipc_port_t sharedMem = (ipc_port_t) originalMD->createNamedEntry();
419
420 if (!sharedMem)
421 return 0;
422
423 if (sharedMem == originalMD->_memEntry) {
424 originalMD->retain(); // Add a new reference to ourselves
425 ipc_port_release_send(sharedMem); // Remove extra send right
426 return originalMD;
427 }
428
429 IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor;
430 typePersMDData initData = { originalMD, sharedMem };
431
432 if (self
433 && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) {
434 self->release();
435 self = 0;
436 }
437 return self;
438 }
439
440 void *IOGeneralMemoryDescriptor::createNamedEntry()
441 {
442 kern_return_t error;
443 ipc_port_t sharedMem;
444
445 IOOptionBits type = _flags & kIOMemoryTypeMask;
446
447 user_addr_t range0Addr;
448 IOByteCount range0Len;
449 getAddrLenForInd(range0Addr, range0Len, type, _ranges, 0);
450 range0Addr = trunc_page_64(range0Addr);
451
452 vm_size_t size = ptoa_32(_pages);
453 vm_address_t kernelPage = (vm_address_t) range0Addr;
454
455 vm_map_t theMap = ((_task == kernel_task)
456 && (kIOMemoryBufferPageable & _flags))
457 ? IOPageableMapForAddress(kernelPage)
458 : get_task_map(_task);
459
460 memory_object_size_t actualSize = size;
461 vm_prot_t prot = VM_PROT_READ;
462 if (kIODirectionOut != (kIODirectionOutIn & _flags))
463 prot |= VM_PROT_WRITE;
464
465 if (_memEntry)
466 prot |= MAP_MEM_NAMED_REUSE;
467
468 error = mach_make_memory_entry_64(theMap,
469 &actualSize, range0Addr, prot, &sharedMem, (ipc_port_t) _memEntry);
470
471 if (KERN_SUCCESS == error) {
472 if (actualSize == size) {
473 return sharedMem;
474 } else {
475 #if IOASSERT
476 IOLog("IOGMD::mach_make_memory_entry_64 (%08llx) size (%08llx:%08llx)\n",
477 (UInt64)range0Addr, (UInt64)actualSize, (UInt64)size);
478 #endif
479 ipc_port_release_send( sharedMem );
480 }
481 }
482
483 return MACH_PORT_NULL;
484 }
485
486 #ifndef __LP64__
487 bool
488 IOGeneralMemoryDescriptor::initWithAddress(void * address,
489 IOByteCount withLength,
490 IODirection withDirection)
491 {
492 _singleRange.v.address = (vm_offset_t) address;
493 _singleRange.v.length = withLength;
494
495 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
496 }
497
498 bool
499 IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
500 IOByteCount withLength,
501 IODirection withDirection,
502 task_t withTask)
503 {
504 _singleRange.v.address = address;
505 _singleRange.v.length = withLength;
506
507 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
508 }
509
510 bool
511 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
512 IOPhysicalAddress address,
513 IOByteCount withLength,
514 IODirection withDirection )
515 {
516 _singleRange.p.address = address;
517 _singleRange.p.length = withLength;
518
519 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
520 }
521
522 bool
523 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
524 IOPhysicalRange * ranges,
525 UInt32 count,
526 IODirection direction,
527 bool reference)
528 {
529 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
530
531 if (reference)
532 mdOpts |= kIOMemoryAsReference;
533
534 return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0);
535 }
536
537 bool
538 IOGeneralMemoryDescriptor::initWithRanges(
539 IOVirtualRange * ranges,
540 UInt32 count,
541 IODirection direction,
542 task_t task,
543 bool reference)
544 {
545 IOOptionBits mdOpts = direction;
546
547 if (reference)
548 mdOpts |= kIOMemoryAsReference;
549
550 if (task) {
551 mdOpts |= kIOMemoryTypeVirtual;
552
553 // Auto-prepare if this is a kernel memory descriptor as very few
554 // clients bother to prepare() kernel memory.
555 // But it was not enforced so what are you going to do?
556 if (task == kernel_task)
557 mdOpts |= kIOMemoryAutoPrepare;
558 }
559 else
560 mdOpts |= kIOMemoryTypePhysical;
561
562 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0);
563 }
564 #endif /* !__LP64__ */
565
566 /*
567 * initWithOptions:
568 *
569 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
570 * from a given task, several physical ranges, an UPL from the ubc
571 * system or a uio (may be 64bit) from the BSD subsystem.
572 *
573 * Passing the ranges as a reference will avoid an extra allocation.
574 *
575 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
576 * existing instance -- note this behavior is not commonly supported in other
577 * I/O Kit classes, although it is supported here.
578 */
579
580 bool
581 IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
582 UInt32 count,
583 UInt32 offset,
584 task_t task,
585 IOOptionBits options,
586 IOMapper * mapper)
587 {
588 IOOptionBits type = options & kIOMemoryTypeMask;
589
590 #ifndef __LP64__
591 if (task
592 && (kIOMemoryTypeVirtual == type)
593 && vm_map_is_64bit(get_task_map(task))
594 && ((IOVirtualRange *) buffers)->address)
595 {
596 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
597 return false;
598 }
599 #endif /* !__LP64__ */
600
601 // Grab the original MD's configuation data to initialse the
602 // arguments to this function.
603 if (kIOMemoryTypePersistentMD == type) {
604
605 typePersMDData *initData = (typePersMDData *) buffers;
606 const IOGeneralMemoryDescriptor *orig = initData->fMD;
607 ioGMDData *dataP = getDataP(orig->_memoryEntries);
608
609 // Only accept persistent memory descriptors with valid dataP data.
610 assert(orig->_rangesCount == 1);
611 if ( !(orig->_flags & kIOMemoryPersistent) || !dataP)
612 return false;
613
614 _memEntry = initData->fMemEntry; // Grab the new named entry
615 options = orig->_flags & ~kIOMemoryAsReference;
616 type = options & kIOMemoryTypeMask;
617 buffers = orig->_ranges.v;
618 count = orig->_rangesCount;
619
620 // Now grab the original task and whatever mapper was previously used
621 task = orig->_task;
622 mapper = dataP->fMapper;
623
624 // We are ready to go through the original initialisation now
625 }
626
627 switch (type) {
628 case kIOMemoryTypeUIO:
629 case kIOMemoryTypeVirtual:
630 #ifndef __LP64__
631 case kIOMemoryTypeVirtual64:
632 #endif /* !__LP64__ */
633 assert(task);
634 if (!task)
635 return false;
636 break;
637
638 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
639 #ifndef __LP64__
640 case kIOMemoryTypePhysical64:
641 #endif /* !__LP64__ */
642 case kIOMemoryTypeUPL:
643 assert(!task);
644 break;
645 default:
646 return false; /* bad argument */
647 }
648
649 assert(buffers);
650 assert(count);
651
652 /*
653 * We can check the _initialized instance variable before having ever set
654 * it to an initial value because I/O Kit guarantees that all our instance
655 * variables are zeroed on an object's allocation.
656 */
657
658 if (_initialized) {
659 /*
660 * An existing memory descriptor is being retargeted to point to
661 * somewhere else. Clean up our present state.
662 */
663 IOOptionBits type = _flags & kIOMemoryTypeMask;
664 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
665 {
666 while (_wireCount)
667 complete();
668 }
669 if (_ranges.v && !(kIOMemoryAsReference & _flags))
670 {
671 if (kIOMemoryTypeUIO == type)
672 uio_free((uio_t) _ranges.v);
673 #ifndef __LP64__
674 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
675 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
676 #endif /* !__LP64__ */
677 else
678 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
679 }
680
681 if (_memEntry)
682 {
683 ipc_port_release_send((ipc_port_t) _memEntry);
684 _memEntry = 0;
685 }
686 if (_mappings)
687 _mappings->flushCollection();
688 }
689 else {
690 if (!super::init())
691 return false;
692 _initialized = true;
693 }
694
695 // Grab the appropriate mapper
696 if (kIOMemoryMapperNone & options)
697 mapper = 0; // No Mapper
698 else if (mapper == kIOMapperSystem) {
699 IOMapper::checkForSystemMapper();
700 gIOSystemMapper = mapper = IOMapper::gSystem;
701 }
702
703 // Temp binary compatibility for kIOMemoryThreadSafe
704 if (kIOMemoryReserved6156215 & options)
705 {
706 options &= ~kIOMemoryReserved6156215;
707 options |= kIOMemoryThreadSafe;
708 }
709 // Remove the dynamic internal use flags from the initial setting
710 options &= ~(kIOMemoryPreparedReadOnly);
711 _flags = options;
712 _task = task;
713
714 #ifndef __LP64__
715 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
716 #endif /* !__LP64__ */
717
718 __iomd_reservedA = 0;
719 __iomd_reservedB = 0;
720 _highestPage = 0;
721
722 if (kIOMemoryThreadSafe & options)
723 {
724 if (!_prepareLock)
725 _prepareLock = IOLockAlloc();
726 }
727 else if (_prepareLock)
728 {
729 IOLockFree(_prepareLock);
730 _prepareLock = NULL;
731 }
732
733 if (kIOMemoryTypeUPL == type) {
734
735 ioGMDData *dataP;
736 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
737
738 if (!_memoryEntries) {
739 _memoryEntries = OSData::withCapacity(dataSize);
740 if (!_memoryEntries)
741 return false;
742 }
743 else if (!_memoryEntries->initWithCapacity(dataSize))
744 return false;
745
746 _memoryEntries->appendBytes(0, computeDataSize(0, 0));
747 dataP = getDataP(_memoryEntries);
748 dataP->fMapper = mapper;
749 dataP->fPageCnt = 0;
750
751 // _wireCount++; // UPLs start out life wired
752
753 _length = count;
754 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
755
756 ioPLBlock iopl;
757 iopl.fIOPL = (upl_t) buffers;
758 upl_set_referenced(iopl.fIOPL, true);
759 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
760
761 if (upl_get_size(iopl.fIOPL) < (count + offset))
762 panic("short external upl");
763
764 // Set the flag kIOPLOnDevice convieniently equal to 1
765 iopl.fFlags = pageList->device | kIOPLExternUPL;
766 iopl.fIOMDOffset = 0;
767
768 _highestPage = upl_get_highest_page(iopl.fIOPL);
769
770 if (!pageList->device) {
771 // Pre-compute the offset into the UPL's page list
772 pageList = &pageList[atop_32(offset)];
773 offset &= PAGE_MASK;
774 if (mapper) {
775 iopl.fMappedBase = mapper->iovmAlloc(_pages);
776 mapper->iovmInsert(iopl.fMappedBase, 0, pageList, _pages);
777 }
778 else
779 iopl.fMappedBase = 0;
780 }
781 else
782 iopl.fMappedBase = 0;
783 iopl.fPageInfo = (vm_address_t) pageList;
784 iopl.fPageOffset = offset;
785
786 _memoryEntries->appendBytes(&iopl, sizeof(iopl));
787 }
788 else {
789 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
790 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
791
792 // Initialize the memory descriptor
793 if (options & kIOMemoryAsReference) {
794 #ifndef __LP64__
795 _rangesIsAllocated = false;
796 #endif /* !__LP64__ */
797
798 // Hack assignment to get the buffer arg into _ranges.
799 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
800 // work, C++ sigh.
801 // This also initialises the uio & physical ranges.
802 _ranges.v = (IOVirtualRange *) buffers;
803 }
804 else {
805 #ifndef __LP64__
806 _rangesIsAllocated = true;
807 #endif /* !__LP64__ */
808 switch (type)
809 {
810 case kIOMemoryTypeUIO:
811 _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
812 break;
813
814 #ifndef __LP64__
815 case kIOMemoryTypeVirtual64:
816 case kIOMemoryTypePhysical64:
817 if (count == 1
818 && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
819 ) {
820 if (kIOMemoryTypeVirtual64 == type)
821 type = kIOMemoryTypeVirtual;
822 else
823 type = kIOMemoryTypePhysical;
824 _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
825 _rangesIsAllocated = false;
826 _ranges.v = &_singleRange.v;
827 _singleRange.v.address = ((IOAddressRange *) buffers)->address;
828 _singleRange.v.length = ((IOAddressRange *) buffers)->length;
829 break;
830 }
831 _ranges.v64 = IONew(IOAddressRange, count);
832 if (!_ranges.v64)
833 return false;
834 bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
835 break;
836 #endif /* !__LP64__ */
837 case kIOMemoryTypeVirtual:
838 case kIOMemoryTypePhysical:
839 if (count == 1) {
840 _flags |= kIOMemoryAsReference;
841 #ifndef __LP64__
842 _rangesIsAllocated = false;
843 #endif /* !__LP64__ */
844 _ranges.v = &_singleRange.v;
845 } else {
846 _ranges.v = IONew(IOVirtualRange, count);
847 if (!_ranges.v)
848 return false;
849 }
850 bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
851 break;
852 }
853 }
854
855 // Find starting address within the vector of ranges
856 Ranges vec = _ranges;
857 UInt32 length = 0;
858 UInt32 pages = 0;
859 for (unsigned ind = 0; ind < count; ind++) {
860 user_addr_t addr;
861 IOPhysicalLength len;
862
863 // addr & len are returned by this function
864 getAddrLenForInd(addr, len, type, vec, ind);
865 pages += (atop_64(addr + len + PAGE_MASK) - atop_64(addr));
866 len += length;
867 assert(len >= length); // Check for 32 bit wrap around
868 length = len;
869
870 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
871 {
872 ppnum_t highPage = atop_64(addr + len - 1);
873 if (highPage > _highestPage)
874 _highestPage = highPage;
875 }
876 }
877 _length = length;
878 _pages = pages;
879 _rangesCount = count;
880
881 // Auto-prepare memory at creation time.
882 // Implied completion when descriptor is free-ed
883 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
884 _wireCount++; // Physical MDs are, by definition, wired
885 else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
886 ioGMDData *dataP;
887 unsigned dataSize = computeDataSize(_pages, /* upls */ count * 2);
888
889 if (!_memoryEntries) {
890 _memoryEntries = OSData::withCapacity(dataSize);
891 if (!_memoryEntries)
892 return false;
893 }
894 else if (!_memoryEntries->initWithCapacity(dataSize))
895 return false;
896
897 _memoryEntries->appendBytes(0, computeDataSize(0, 0));
898 dataP = getDataP(_memoryEntries);
899 dataP->fMapper = mapper;
900 dataP->fPageCnt = _pages;
901
902 if ( (kIOMemoryPersistent & _flags) && !_memEntry)
903 _memEntry = createNamedEntry();
904
905 if ((_flags & kIOMemoryAutoPrepare)
906 && prepare() != kIOReturnSuccess)
907 return false;
908 }
909 }
910
911 return true;
912 }
913
914 /*
915 * free
916 *
917 * Free resources.
918 */
919 void IOGeneralMemoryDescriptor::free()
920 {
921 IOOptionBits type = _flags & kIOMemoryTypeMask;
922
923 if( reserved)
924 {
925 LOCK;
926 reserved->dp.memory = 0;
927 UNLOCK;
928 }
929
930 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
931 {
932 while (_wireCount)
933 complete();
934 }
935 if (_memoryEntries)
936 _memoryEntries->release();
937
938 if (_ranges.v && !(kIOMemoryAsReference & _flags))
939 {
940 if (kIOMemoryTypeUIO == type)
941 uio_free((uio_t) _ranges.v);
942 #ifndef __LP64__
943 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
944 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
945 #endif /* !__LP64__ */
946 else
947 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
948
949 _ranges.v = NULL;
950 }
951
952 if (reserved)
953 {
954 if (reserved->dp.devicePager)
955 {
956 // memEntry holds a ref on the device pager which owns reserved
957 // (IOMemoryDescriptorReserved) so no reserved access after this point
958 device_pager_deallocate( (memory_object_t) reserved->dp.devicePager );
959 }
960 else
961 IODelete(reserved, IOMemoryDescriptorReserved, 1);
962 reserved = NULL;
963 }
964
965 if (_memEntry)
966 ipc_port_release_send( (ipc_port_t) _memEntry );
967
968 if (_prepareLock)
969 IOLockFree(_prepareLock);
970
971 super::free();
972 }
973
974 #ifndef __LP64__
975 void IOGeneralMemoryDescriptor::unmapFromKernel()
976 {
977 panic("IOGMD::unmapFromKernel deprecated");
978 }
979
980 void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
981 {
982 panic("IOGMD::mapIntoKernel deprecated");
983 }
984 #endif /* !__LP64__ */
985
986 /*
987 * getDirection:
988 *
989 * Get the direction of the transfer.
990 */
991 IODirection IOMemoryDescriptor::getDirection() const
992 {
993 #ifndef __LP64__
994 if (_direction)
995 return _direction;
996 #endif /* !__LP64__ */
997 return (IODirection) (_flags & kIOMemoryDirectionMask);
998 }
999
1000 /*
1001 * getLength:
1002 *
1003 * Get the length of the transfer (over all ranges).
1004 */
1005 IOByteCount IOMemoryDescriptor::getLength() const
1006 {
1007 return _length;
1008 }
1009
1010 void IOMemoryDescriptor::setTag( IOOptionBits tag )
1011 {
1012 _tag = tag;
1013 }
1014
1015 IOOptionBits IOMemoryDescriptor::getTag( void )
1016 {
1017 return( _tag);
1018 }
1019
1020 #ifndef __LP64__
1021 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
1022 IOPhysicalAddress
1023 IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
1024 {
1025 addr64_t physAddr = 0;
1026
1027 if( prepare() == kIOReturnSuccess) {
1028 physAddr = getPhysicalSegment64( offset, length );
1029 complete();
1030 }
1031
1032 return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used
1033 }
1034 #endif /* !__LP64__ */
1035
1036 IOByteCount IOMemoryDescriptor::readBytes
1037 (IOByteCount offset, void *bytes, IOByteCount length)
1038 {
1039 addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
1040 IOByteCount remaining;
1041
1042 // Assert that this entire I/O is withing the available range
1043 assert(offset < _length);
1044 assert(offset + length <= _length);
1045 if (offset >= _length) {
1046 return 0;
1047 }
1048
1049 if (kIOMemoryThreadSafe & _flags)
1050 LOCK;
1051
1052 remaining = length = min(length, _length - offset);
1053 while (remaining) { // (process another target segment?)
1054 addr64_t srcAddr64;
1055 IOByteCount srcLen;
1056
1057 srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
1058 if (!srcAddr64)
1059 break;
1060
1061 // Clip segment length to remaining
1062 if (srcLen > remaining)
1063 srcLen = remaining;
1064
1065 copypv(srcAddr64, dstAddr, srcLen,
1066 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
1067
1068 dstAddr += srcLen;
1069 offset += srcLen;
1070 remaining -= srcLen;
1071 }
1072
1073 if (kIOMemoryThreadSafe & _flags)
1074 UNLOCK;
1075
1076 assert(!remaining);
1077
1078 return length - remaining;
1079 }
1080
1081 IOByteCount IOMemoryDescriptor::writeBytes
1082 (IOByteCount offset, const void *bytes, IOByteCount length)
1083 {
1084 addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
1085 IOByteCount remaining;
1086
1087 // Assert that this entire I/O is withing the available range
1088 assert(offset < _length);
1089 assert(offset + length <= _length);
1090
1091 assert( !(kIOMemoryPreparedReadOnly & _flags) );
1092
1093 if ( (kIOMemoryPreparedReadOnly & _flags) || offset >= _length) {
1094 return 0;
1095 }
1096
1097 if (kIOMemoryThreadSafe & _flags)
1098 LOCK;
1099
1100 remaining = length = min(length, _length - offset);
1101 while (remaining) { // (process another target segment?)
1102 addr64_t dstAddr64;
1103 IOByteCount dstLen;
1104
1105 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
1106 if (!dstAddr64)
1107 break;
1108
1109 // Clip segment length to remaining
1110 if (dstLen > remaining)
1111 dstLen = remaining;
1112
1113 copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
1114 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
1115
1116 srcAddr += dstLen;
1117 offset += dstLen;
1118 remaining -= dstLen;
1119 }
1120
1121 if (kIOMemoryThreadSafe & _flags)
1122 UNLOCK;
1123
1124 assert(!remaining);
1125
1126 return length - remaining;
1127 }
1128
1129 // osfmk/device/iokit_rpc.c
1130 extern "C" unsigned int IODefaultCacheBits(addr64_t pa);
1131
1132 #ifndef __LP64__
1133 void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
1134 {
1135 panic("IOGMD::setPosition deprecated");
1136 }
1137 #endif /* !__LP64__ */
1138
1139 static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
1140
1141 uint64_t
1142 IOGeneralMemoryDescriptor::getPreparationID( void )
1143 {
1144 ioGMDData *dataP;
1145
1146 if (!_wireCount)
1147 return (kIOPreparationIDUnprepared);
1148
1149 if (_flags & (kIOMemoryTypePhysical | kIOMemoryTypePhysical64))
1150 {
1151 IOMemoryDescriptor::setPreparationID();
1152 return (IOMemoryDescriptor::getPreparationID());
1153 }
1154
1155 if (!_memoryEntries || !(dataP = getDataP(_memoryEntries)))
1156 return (kIOPreparationIDUnprepared);
1157
1158 if (kIOPreparationIDUnprepared == dataP->fPreparationID)
1159 {
1160 dataP->fPreparationID = OSIncrementAtomic64(&gIOMDPreparationID);
1161 }
1162 return (dataP->fPreparationID);
1163 }
1164
1165 IOMemoryDescriptorReserved * IOMemoryDescriptor::getKernelReserved( void )
1166 {
1167 if (!reserved)
1168 {
1169 reserved = IONew(IOMemoryDescriptorReserved, 1);
1170 if (reserved)
1171 bzero(reserved, sizeof(IOMemoryDescriptorReserved));
1172 }
1173 return (reserved);
1174 }
1175
1176 void IOMemoryDescriptor::setPreparationID( void )
1177 {
1178 if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID))
1179 {
1180 #if defined(__ppc__ )
1181 reserved->preparationID = gIOMDPreparationID++;
1182 #else
1183 reserved->preparationID = OSIncrementAtomic64(&gIOMDPreparationID);
1184 #endif
1185 }
1186 }
1187
1188 uint64_t IOMemoryDescriptor::getPreparationID( void )
1189 {
1190 if (reserved)
1191 return (reserved->preparationID);
1192 else
1193 return (kIOPreparationIDUnsupported);
1194 }
1195
1196 IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
1197 {
1198 if (kIOMDGetCharacteristics == op) {
1199
1200 if (dataSize < sizeof(IOMDDMACharacteristics))
1201 return kIOReturnUnderrun;
1202
1203 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
1204 data->fLength = _length;
1205 data->fSGCount = _rangesCount;
1206 data->fPages = _pages;
1207 data->fDirection = getDirection();
1208 if (!_wireCount)
1209 data->fIsPrepared = false;
1210 else {
1211 data->fIsPrepared = true;
1212 data->fHighestPage = _highestPage;
1213 if (_memoryEntries) {
1214 ioGMDData *gmdData = getDataP(_memoryEntries);
1215 ioPLBlock *ioplList = getIOPLList(gmdData);
1216 UInt count = getNumIOPL(_memoryEntries, gmdData);
1217
1218 data->fIsMapped = (gmdData->fMapper && _pages && (count > 0)
1219 && ioplList[0].fMappedBase);
1220 if (count == 1)
1221 data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
1222 }
1223 else
1224 data->fIsMapped = false;
1225 }
1226
1227 return kIOReturnSuccess;
1228
1229 #if IOMD_DEBUG_DMAACTIVE
1230 } else if (kIOMDSetDMAActive == op) {
1231 IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
1232 OSIncrementAtomic(&md->__iomd_reservedA);
1233 } else if (kIOMDSetDMAInactive == op) {
1234 IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
1235 if (md->__iomd_reservedA)
1236 OSDecrementAtomic(&md->__iomd_reservedA);
1237 else
1238 panic("kIOMDSetDMAInactive");
1239 #endif /* IOMD_DEBUG_DMAACTIVE */
1240
1241 } else if (!(kIOMDWalkSegments & op))
1242 return kIOReturnBadArgument;
1243
1244 // Get the next segment
1245 struct InternalState {
1246 IOMDDMAWalkSegmentArgs fIO;
1247 UInt fOffset2Index;
1248 UInt fIndex;
1249 UInt fNextOffset;
1250 } *isP;
1251
1252 // Find the next segment
1253 if (dataSize < sizeof(*isP))
1254 return kIOReturnUnderrun;
1255
1256 isP = (InternalState *) vData;
1257 UInt offset = isP->fIO.fOffset;
1258 bool mapped = isP->fIO.fMapped;
1259
1260 if (offset >= _length)
1261 return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
1262
1263 // Validate the previous offset
1264 UInt ind, off2Ind = isP->fOffset2Index;
1265 if ((kIOMDFirstSegment != op)
1266 && offset
1267 && (offset == isP->fNextOffset || off2Ind <= offset))
1268 ind = isP->fIndex;
1269 else
1270 ind = off2Ind = 0; // Start from beginning
1271
1272 UInt length;
1273 UInt64 address;
1274 if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
1275
1276 // Physical address based memory descriptor
1277 const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
1278
1279 // Find the range after the one that contains the offset
1280 mach_vm_size_t len;
1281 for (len = 0; off2Ind <= offset; ind++) {
1282 len = physP[ind].length;
1283 off2Ind += len;
1284 }
1285
1286 // Calculate length within range and starting address
1287 length = off2Ind - offset;
1288 address = physP[ind - 1].address + len - length;
1289
1290 // see how far we can coalesce ranges
1291 while (ind < _rangesCount && address + length == physP[ind].address) {
1292 len = physP[ind].length;
1293 length += len;
1294 off2Ind += len;
1295 ind++;
1296 }
1297
1298 // correct contiguous check overshoot
1299 ind--;
1300 off2Ind -= len;
1301 }
1302 #ifndef __LP64__
1303 else if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
1304
1305 // Physical address based memory descriptor
1306 const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
1307
1308 // Find the range after the one that contains the offset
1309 mach_vm_size_t len;
1310 for (len = 0; off2Ind <= offset; ind++) {
1311 len = physP[ind].length;
1312 off2Ind += len;
1313 }
1314
1315 // Calculate length within range and starting address
1316 length = off2Ind - offset;
1317 address = physP[ind - 1].address + len - length;
1318
1319 // see how far we can coalesce ranges
1320 while (ind < _rangesCount && address + length == physP[ind].address) {
1321 len = physP[ind].length;
1322 length += len;
1323 off2Ind += len;
1324 ind++;
1325 }
1326
1327 // correct contiguous check overshoot
1328 ind--;
1329 off2Ind -= len;
1330 }
1331 #endif /* !__LP64__ */
1332 else do {
1333 if (!_wireCount)
1334 panic("IOGMD: not wired for the IODMACommand");
1335
1336 assert(_memoryEntries);
1337
1338 ioGMDData * dataP = getDataP(_memoryEntries);
1339 const ioPLBlock *ioplList = getIOPLList(dataP);
1340 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1341 upl_page_info_t *pageList = getPageList(dataP);
1342
1343 assert(numIOPLs > 0);
1344
1345 // Scan through iopl info blocks looking for block containing offset
1346 while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset)
1347 ind++;
1348
1349 // Go back to actual range as search goes past it
1350 ioPLBlock ioplInfo = ioplList[ind - 1];
1351 off2Ind = ioplInfo.fIOMDOffset;
1352
1353 if (ind < numIOPLs)
1354 length = ioplList[ind].fIOMDOffset;
1355 else
1356 length = _length;
1357 length -= offset; // Remainder within iopl
1358
1359 // Subtract offset till this iopl in total list
1360 offset -= off2Ind;
1361
1362 // If a mapped address is requested and this is a pre-mapped IOPL
1363 // then just need to compute an offset relative to the mapped base.
1364 if (mapped && ioplInfo.fMappedBase) {
1365 offset += (ioplInfo.fPageOffset & PAGE_MASK);
1366 address = ptoa_64(ioplInfo.fMappedBase) + offset;
1367 continue; // Done leave do/while(false) now
1368 }
1369
1370 // The offset is rebased into the current iopl.
1371 // Now add the iopl 1st page offset.
1372 offset += ioplInfo.fPageOffset;
1373
1374 // For external UPLs the fPageInfo field points directly to
1375 // the upl's upl_page_info_t array.
1376 if (ioplInfo.fFlags & kIOPLExternUPL)
1377 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
1378 else
1379 pageList = &pageList[ioplInfo.fPageInfo];
1380
1381 // Check for direct device non-paged memory
1382 if ( ioplInfo.fFlags & kIOPLOnDevice ) {
1383 address = ptoa_64(pageList->phys_addr) + offset;
1384 continue; // Done leave do/while(false) now
1385 }
1386
1387 // Now we need compute the index into the pageList
1388 UInt pageInd = atop_32(offset);
1389 offset &= PAGE_MASK;
1390
1391 // Compute the starting address of this segment
1392 IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
1393 if (!pageAddr) {
1394 panic("!pageList phys_addr");
1395 }
1396
1397 address = ptoa_64(pageAddr) + offset;
1398
1399 // length is currently set to the length of the remainider of the iopl.
1400 // We need to check that the remainder of the iopl is contiguous.
1401 // This is indicated by pageList[ind].phys_addr being sequential.
1402 IOByteCount contigLength = PAGE_SIZE - offset;
1403 while (contigLength < length
1404 && ++pageAddr == pageList[++pageInd].phys_addr)
1405 {
1406 contigLength += PAGE_SIZE;
1407 }
1408
1409 if (contigLength < length)
1410 length = contigLength;
1411
1412
1413 assert(address);
1414 assert(length);
1415
1416 } while (false);
1417
1418 // Update return values and state
1419 isP->fIO.fIOVMAddr = address;
1420 isP->fIO.fLength = length;
1421 isP->fIndex = ind;
1422 isP->fOffset2Index = off2Ind;
1423 isP->fNextOffset = isP->fIO.fOffset + length;
1424
1425 return kIOReturnSuccess;
1426 }
1427
1428 addr64_t
1429 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
1430 {
1431 IOReturn ret;
1432 addr64_t address = 0;
1433 IOByteCount length = 0;
1434 IOMapper * mapper = gIOSystemMapper;
1435 IOOptionBits type = _flags & kIOMemoryTypeMask;
1436
1437 if (lengthOfSegment)
1438 *lengthOfSegment = 0;
1439
1440 if (offset >= _length)
1441 return 0;
1442
1443 // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
1444 // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
1445 // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
1446 // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
1447
1448 if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type))
1449 {
1450 unsigned rangesIndex = 0;
1451 Ranges vec = _ranges;
1452 user_addr_t addr;
1453
1454 // Find starting address within the vector of ranges
1455 for (;;) {
1456 getAddrLenForInd(addr, length, type, vec, rangesIndex);
1457 if (offset < length)
1458 break;
1459 offset -= length; // (make offset relative)
1460 rangesIndex++;
1461 }
1462
1463 // Now that we have the starting range,
1464 // lets find the last contiguous range
1465 addr += offset;
1466 length -= offset;
1467
1468 for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) {
1469 user_addr_t newAddr;
1470 IOPhysicalLength newLen;
1471
1472 getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
1473 if (addr + length != newAddr)
1474 break;
1475 length += newLen;
1476 }
1477 if (addr)
1478 address = (IOPhysicalAddress) addr; // Truncate address to 32bit
1479 }
1480 else
1481 {
1482 IOMDDMAWalkSegmentState _state;
1483 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) &_state;
1484
1485 state->fOffset = offset;
1486 state->fLength = _length - offset;
1487 state->fMapped = (0 == (options & kIOMemoryMapperNone));
1488
1489 ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
1490
1491 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret))
1492 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
1493 ret, this, state->fOffset,
1494 state->fIOVMAddr, state->fLength);
1495 if (kIOReturnSuccess == ret)
1496 {
1497 address = state->fIOVMAddr;
1498 length = state->fLength;
1499 }
1500
1501 // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
1502 // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
1503
1504 if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)))
1505 {
1506 if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone))
1507 {
1508 addr64_t origAddr = address;
1509 IOByteCount origLen = length;
1510
1511 address = mapper->mapAddr(origAddr);
1512 length = page_size - (address & (page_size - 1));
1513 while ((length < origLen)
1514 && ((address + length) == mapper->mapAddr(origAddr + length)))
1515 length += page_size;
1516 if (length > origLen)
1517 length = origLen;
1518 }
1519 #ifdef __LP64__
1520 else if (!(options & kIOMemoryMapperNone) && (_flags & kIOMemoryMapperNone))
1521 {
1522 panic("getPhysicalSegment not mapped for I/O");
1523 }
1524 #endif /* __LP64__ */
1525 }
1526 }
1527
1528 if (!address)
1529 length = 0;
1530
1531 if (lengthOfSegment)
1532 *lengthOfSegment = length;
1533
1534 return (address);
1535 }
1536
1537 #ifndef __LP64__
1538 addr64_t
1539 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
1540 {
1541 addr64_t address = 0;
1542
1543 if (options & _kIOMemorySourceSegment)
1544 {
1545 address = getSourceSegment(offset, lengthOfSegment);
1546 }
1547 else if (options & kIOMemoryMapperNone)
1548 {
1549 address = getPhysicalSegment64(offset, lengthOfSegment);
1550 }
1551 else
1552 {
1553 address = getPhysicalSegment(offset, lengthOfSegment);
1554 }
1555
1556 return (address);
1557 }
1558
1559 addr64_t
1560 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
1561 {
1562 return (getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone));
1563 }
1564
1565 IOPhysicalAddress
1566 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1567 {
1568 addr64_t address = 0;
1569 IOByteCount length = 0;
1570
1571 address = getPhysicalSegment(offset, lengthOfSegment, 0);
1572
1573 if (lengthOfSegment)
1574 length = *lengthOfSegment;
1575
1576 if ((address + length) > 0x100000000ULL)
1577 {
1578 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
1579 address, (long) length, (getMetaClass())->getClassName());
1580 }
1581
1582 return ((IOPhysicalAddress) address);
1583 }
1584
1585 addr64_t
1586 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
1587 {
1588 IOPhysicalAddress phys32;
1589 IOByteCount length;
1590 addr64_t phys64;
1591 IOMapper * mapper = 0;
1592
1593 phys32 = getPhysicalSegment(offset, lengthOfSegment);
1594 if (!phys32)
1595 return 0;
1596
1597 if (gIOSystemMapper)
1598 mapper = gIOSystemMapper;
1599
1600 if (mapper)
1601 {
1602 IOByteCount origLen;
1603
1604 phys64 = mapper->mapAddr(phys32);
1605 origLen = *lengthOfSegment;
1606 length = page_size - (phys64 & (page_size - 1));
1607 while ((length < origLen)
1608 && ((phys64 + length) == mapper->mapAddr(phys32 + length)))
1609 length += page_size;
1610 if (length > origLen)
1611 length = origLen;
1612
1613 *lengthOfSegment = length;
1614 }
1615 else
1616 phys64 = (addr64_t) phys32;
1617
1618 return phys64;
1619 }
1620
1621 IOPhysicalAddress
1622 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1623 {
1624 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0));
1625 }
1626
1627 IOPhysicalAddress
1628 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1629 {
1630 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment));
1631 }
1632
1633 void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
1634 IOByteCount * lengthOfSegment)
1635 {
1636 if (_task == kernel_task)
1637 return (void *) getSourceSegment(offset, lengthOfSegment);
1638 else
1639 panic("IOGMD::getVirtualSegment deprecated");
1640
1641 return 0;
1642 }
1643 #endif /* !__LP64__ */
1644
1645 IOReturn
1646 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
1647 {
1648 if (kIOMDGetCharacteristics == op) {
1649 if (dataSize < sizeof(IOMDDMACharacteristics))
1650 return kIOReturnUnderrun;
1651
1652 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
1653 data->fLength = getLength();
1654 data->fSGCount = 0;
1655 data->fDirection = getDirection();
1656 if (IOMapper::gSystem)
1657 data->fIsMapped = true;
1658 data->fIsPrepared = true; // Assume prepared - fails safe
1659 }
1660 else if (kIOMDWalkSegments & op) {
1661 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs))
1662 return kIOReturnUnderrun;
1663
1664 IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
1665 IOByteCount offset = (IOByteCount) data->fOffset;
1666
1667 IOPhysicalLength length;
1668 IOMemoryDescriptor *ncmd = const_cast<IOMemoryDescriptor *>(this);
1669 if (data->fMapped && IOMapper::gSystem)
1670 data->fIOVMAddr = ncmd->getPhysicalSegment(offset, &length);
1671 else
1672 data->fIOVMAddr = ncmd->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
1673 data->fLength = length;
1674 }
1675 else
1676 return kIOReturnBadArgument;
1677
1678 return kIOReturnSuccess;
1679 }
1680
1681 static IOReturn
1682 purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
1683 {
1684 IOReturn err = kIOReturnSuccess;
1685
1686 *control = VM_PURGABLE_SET_STATE;
1687 switch (newState)
1688 {
1689 case kIOMemoryPurgeableKeepCurrent:
1690 *control = VM_PURGABLE_GET_STATE;
1691 break;
1692
1693 case kIOMemoryPurgeableNonVolatile:
1694 *state = VM_PURGABLE_NONVOLATILE;
1695 break;
1696 case kIOMemoryPurgeableVolatile:
1697 *state = VM_PURGABLE_VOLATILE;
1698 break;
1699 case kIOMemoryPurgeableEmpty:
1700 *state = VM_PURGABLE_EMPTY;
1701 break;
1702 default:
1703 err = kIOReturnBadArgument;
1704 break;
1705 }
1706 return (err);
1707 }
1708
1709 static IOReturn
1710 purgeableStateBits(int * state)
1711 {
1712 IOReturn err = kIOReturnSuccess;
1713
1714 switch (*state)
1715 {
1716 case VM_PURGABLE_NONVOLATILE:
1717 *state = kIOMemoryPurgeableNonVolatile;
1718 break;
1719 case VM_PURGABLE_VOLATILE:
1720 *state = kIOMemoryPurgeableVolatile;
1721 break;
1722 case VM_PURGABLE_EMPTY:
1723 *state = kIOMemoryPurgeableEmpty;
1724 break;
1725 default:
1726 *state = kIOMemoryPurgeableNonVolatile;
1727 err = kIOReturnNotReady;
1728 break;
1729 }
1730 return (err);
1731 }
1732
1733 IOReturn
1734 IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
1735 IOOptionBits * oldState )
1736 {
1737 IOReturn err = kIOReturnSuccess;
1738 vm_purgable_t control;
1739 int state;
1740
1741 if (_memEntry)
1742 {
1743 err = super::setPurgeable(newState, oldState);
1744 }
1745 else
1746 {
1747 if (kIOMemoryThreadSafe & _flags)
1748 LOCK;
1749 do
1750 {
1751 // Find the appropriate vm_map for the given task
1752 vm_map_t curMap;
1753 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
1754 {
1755 err = kIOReturnNotReady;
1756 break;
1757 }
1758 else
1759 curMap = get_task_map(_task);
1760
1761 // can only do one range
1762 Ranges vec = _ranges;
1763 IOOptionBits type = _flags & kIOMemoryTypeMask;
1764 user_addr_t addr;
1765 IOByteCount len;
1766 getAddrLenForInd(addr, len, type, vec, 0);
1767
1768 err = purgeableControlBits(newState, &control, &state);
1769 if (kIOReturnSuccess != err)
1770 break;
1771 err = mach_vm_purgable_control(curMap, addr, control, &state);
1772 if (oldState)
1773 {
1774 if (kIOReturnSuccess == err)
1775 {
1776 err = purgeableStateBits(&state);
1777 *oldState = state;
1778 }
1779 }
1780 }
1781 while (false);
1782 if (kIOMemoryThreadSafe & _flags)
1783 UNLOCK;
1784 }
1785 return (err);
1786 }
1787
1788 IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
1789 IOOptionBits * oldState )
1790 {
1791 IOReturn err = kIOReturnSuccess;
1792 vm_purgable_t control;
1793 int state;
1794
1795 if (kIOMemoryThreadSafe & _flags)
1796 LOCK;
1797
1798 do
1799 {
1800 if (!_memEntry)
1801 {
1802 err = kIOReturnNotReady;
1803 break;
1804 }
1805 err = purgeableControlBits(newState, &control, &state);
1806 if (kIOReturnSuccess != err)
1807 break;
1808 err = mach_memory_entry_purgable_control((ipc_port_t) _memEntry, control, &state);
1809 if (oldState)
1810 {
1811 if (kIOReturnSuccess == err)
1812 {
1813 err = purgeableStateBits(&state);
1814 *oldState = state;
1815 }
1816 }
1817 }
1818 while (false);
1819
1820 if (kIOMemoryThreadSafe & _flags)
1821 UNLOCK;
1822
1823 return (err);
1824 }
1825
1826 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
1827 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
1828
1829 static void SetEncryptOp(addr64_t pa, unsigned int count)
1830 {
1831 ppnum_t page, end;
1832
1833 page = atop_64(round_page_64(pa));
1834 end = atop_64(trunc_page_64(pa + count));
1835 for (; page < end; page++)
1836 {
1837 pmap_clear_noencrypt(page);
1838 }
1839 }
1840
1841 static void ClearEncryptOp(addr64_t pa, unsigned int count)
1842 {
1843 ppnum_t page, end;
1844
1845 page = atop_64(round_page_64(pa));
1846 end = atop_64(trunc_page_64(pa + count));
1847 for (; page < end; page++)
1848 {
1849 pmap_set_noencrypt(page);
1850 }
1851 }
1852
1853 IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options,
1854 IOByteCount offset, IOByteCount length )
1855 {
1856 IOByteCount remaining;
1857 unsigned int res;
1858 void (*func)(addr64_t pa, unsigned int count) = 0;
1859
1860 switch (options)
1861 {
1862 case kIOMemoryIncoherentIOFlush:
1863 func = &dcache_incoherent_io_flush64;
1864 break;
1865 case kIOMemoryIncoherentIOStore:
1866 func = &dcache_incoherent_io_store64;
1867 break;
1868
1869 case kIOMemorySetEncrypted:
1870 func = &SetEncryptOp;
1871 break;
1872 case kIOMemoryClearEncrypted:
1873 func = &ClearEncryptOp;
1874 break;
1875 }
1876
1877 if (!func)
1878 return (kIOReturnUnsupported);
1879
1880 if (kIOMemoryThreadSafe & _flags)
1881 LOCK;
1882
1883 res = 0x0UL;
1884 remaining = length = min(length, getLength() - offset);
1885 while (remaining)
1886 // (process another target segment?)
1887 {
1888 addr64_t dstAddr64;
1889 IOByteCount dstLen;
1890
1891 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
1892 if (!dstAddr64)
1893 break;
1894
1895 // Clip segment length to remaining
1896 if (dstLen > remaining)
1897 dstLen = remaining;
1898
1899 (*func)(dstAddr64, dstLen);
1900
1901 offset += dstLen;
1902 remaining -= dstLen;
1903 }
1904
1905 if (kIOMemoryThreadSafe & _flags)
1906 UNLOCK;
1907
1908 return (remaining ? kIOReturnUnderrun : kIOReturnSuccess);
1909 }
1910
1911 #if defined(__i386__) || defined(__x86_64__)
1912 extern vm_offset_t first_avail;
1913 #define io_kernel_static_end first_avail
1914 #else
1915 #error io_kernel_static_end is undefined for this architecture
1916 #endif
1917
1918 static kern_return_t
1919 io_get_kernel_static_upl(
1920 vm_map_t /* map */,
1921 uintptr_t offset,
1922 vm_size_t *upl_size,
1923 upl_t *upl,
1924 upl_page_info_array_t page_list,
1925 unsigned int *count,
1926 ppnum_t *highest_page)
1927 {
1928 unsigned int pageCount, page;
1929 ppnum_t phys;
1930 ppnum_t highestPage = 0;
1931
1932 pageCount = atop_32(*upl_size);
1933 if (pageCount > *count)
1934 pageCount = *count;
1935
1936 *upl = NULL;
1937
1938 for (page = 0; page < pageCount; page++)
1939 {
1940 phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
1941 if (!phys)
1942 break;
1943 page_list[page].phys_addr = phys;
1944 page_list[page].pageout = 0;
1945 page_list[page].absent = 0;
1946 page_list[page].dirty = 0;
1947 page_list[page].precious = 0;
1948 page_list[page].device = 0;
1949 if (phys > highestPage)
1950 highestPage = phys;
1951 }
1952
1953 *highest_page = highestPage;
1954
1955 return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError);
1956 }
1957
1958 IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
1959 {
1960 IOOptionBits type = _flags & kIOMemoryTypeMask;
1961 IOReturn error = kIOReturnCannotWire;
1962 ioGMDData *dataP;
1963 ppnum_t mapBase = 0;
1964 IOMapper *mapper;
1965 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
1966
1967 assert(!_wireCount);
1968 assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
1969
1970 if (_pages > gIOMaximumMappedIOPageCount)
1971 return kIOReturnNoResources;
1972
1973 dataP = getDataP(_memoryEntries);
1974 mapper = dataP->fMapper;
1975 if (mapper && _pages)
1976 mapBase = mapper->iovmAlloc(_pages);
1977
1978 // Note that appendBytes(NULL) zeros the data up to the
1979 // desired length.
1980 _memoryEntries->appendBytes(0, dataP->fPageCnt * sizeof(upl_page_info_t));
1981 dataP = 0; // May no longer be valid so lets not get tempted.
1982
1983 if (forDirection == kIODirectionNone)
1984 forDirection = getDirection();
1985
1986 int uplFlags; // This Mem Desc's default flags for upl creation
1987 switch (kIODirectionOutIn & forDirection)
1988 {
1989 case kIODirectionOut:
1990 // Pages do not need to be marked as dirty on commit
1991 uplFlags = UPL_COPYOUT_FROM;
1992 _flags |= kIOMemoryPreparedReadOnly;
1993 break;
1994
1995 case kIODirectionIn:
1996 default:
1997 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
1998 break;
1999 }
2000 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
2001
2002 #ifdef UPL_NEED_32BIT_ADDR
2003 if (kIODirectionPrepareToPhys32 & forDirection)
2004 uplFlags |= UPL_NEED_32BIT_ADDR;
2005 #endif
2006
2007 // Find the appropriate vm_map for the given task
2008 vm_map_t curMap;
2009 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
2010 curMap = 0;
2011 else
2012 { curMap = get_task_map(_task); }
2013
2014 // Iterate over the vector of virtual ranges
2015 Ranges vec = _ranges;
2016 unsigned int pageIndex = 0;
2017 IOByteCount mdOffset = 0;
2018 ppnum_t highestPage = 0;
2019 for (UInt range = 0; range < _rangesCount; range++) {
2020 ioPLBlock iopl;
2021 user_addr_t startPage;
2022 IOByteCount numBytes;
2023 ppnum_t highPage = 0;
2024
2025 // Get the startPage address and length of vec[range]
2026 getAddrLenForInd(startPage, numBytes, type, vec, range);
2027 iopl.fPageOffset = startPage & PAGE_MASK;
2028 numBytes += iopl.fPageOffset;
2029 startPage = trunc_page_64(startPage);
2030
2031 if (mapper)
2032 iopl.fMappedBase = mapBase + pageIndex;
2033 else
2034 iopl.fMappedBase = 0;
2035
2036 // Iterate over the current range, creating UPLs
2037 while (numBytes) {
2038 dataP = getDataP(_memoryEntries);
2039 vm_address_t kernelStart = (vm_address_t) startPage;
2040 vm_map_t theMap;
2041 if (curMap)
2042 theMap = curMap;
2043 else if (!sharedMem) {
2044 assert(_task == kernel_task);
2045 theMap = IOPageableMapForAddress(kernelStart);
2046 }
2047 else
2048 theMap = NULL;
2049
2050 upl_page_info_array_t pageInfo = getPageList(dataP);
2051 int ioplFlags = uplFlags;
2052 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
2053
2054 vm_size_t ioplSize = round_page(numBytes);
2055 unsigned int numPageInfo = atop_32(ioplSize);
2056
2057 if (theMap == kernel_map && kernelStart < io_kernel_static_end) {
2058 error = io_get_kernel_static_upl(theMap,
2059 kernelStart,
2060 &ioplSize,
2061 &iopl.fIOPL,
2062 baseInfo,
2063 &numPageInfo,
2064 &highPage);
2065 }
2066 else if (sharedMem) {
2067 error = memory_object_iopl_request(sharedMem,
2068 ptoa_32(pageIndex),
2069 &ioplSize,
2070 &iopl.fIOPL,
2071 baseInfo,
2072 &numPageInfo,
2073 &ioplFlags);
2074 }
2075 else {
2076 assert(theMap);
2077 error = vm_map_create_upl(theMap,
2078 startPage,
2079 (upl_size_t*)&ioplSize,
2080 &iopl.fIOPL,
2081 baseInfo,
2082 &numPageInfo,
2083 &ioplFlags);
2084 }
2085
2086 assert(ioplSize);
2087 if (error != KERN_SUCCESS)
2088 goto abortExit;
2089
2090 if (iopl.fIOPL)
2091 highPage = upl_get_highest_page(iopl.fIOPL);
2092 if (highPage > highestPage)
2093 highestPage = highPage;
2094
2095 error = kIOReturnCannotWire;
2096
2097 if (baseInfo->device) {
2098 numPageInfo = 1;
2099 iopl.fFlags = kIOPLOnDevice;
2100 // Don't translate device memory at all
2101 if (mapper && mapBase) {
2102 mapper->iovmFree(mapBase, _pages);
2103 mapBase = 0;
2104 iopl.fMappedBase = 0;
2105 }
2106 }
2107 else {
2108 iopl.fFlags = 0;
2109 if (mapper)
2110 mapper->iovmInsert(mapBase, pageIndex,
2111 baseInfo, numPageInfo);
2112 }
2113
2114 iopl.fIOMDOffset = mdOffset;
2115 iopl.fPageInfo = pageIndex;
2116
2117 #if 0
2118 // used to remove the upl for auto prepares here, for some errant code
2119 // that freed memory before the descriptor pointing at it
2120 if ((_flags & kIOMemoryAutoPrepare) && iopl.fIOPL)
2121 {
2122 upl_commit(iopl.fIOPL, 0, 0);
2123 upl_deallocate(iopl.fIOPL);
2124 iopl.fIOPL = 0;
2125 }
2126 #endif
2127
2128 if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
2129 // Clean up partial created and unsaved iopl
2130 if (iopl.fIOPL) {
2131 upl_abort(iopl.fIOPL, 0);
2132 upl_deallocate(iopl.fIOPL);
2133 }
2134 goto abortExit;
2135 }
2136
2137 // Check for a multiple iopl's in one virtual range
2138 pageIndex += numPageInfo;
2139 mdOffset -= iopl.fPageOffset;
2140 if (ioplSize < numBytes) {
2141 numBytes -= ioplSize;
2142 startPage += ioplSize;
2143 mdOffset += ioplSize;
2144 iopl.fPageOffset = 0;
2145 if (mapper)
2146 iopl.fMappedBase = mapBase + pageIndex;
2147 }
2148 else {
2149 mdOffset += numBytes;
2150 break;
2151 }
2152 }
2153 }
2154
2155 _highestPage = highestPage;
2156
2157 return kIOReturnSuccess;
2158
2159 abortExit:
2160 {
2161 dataP = getDataP(_memoryEntries);
2162 UInt done = getNumIOPL(_memoryEntries, dataP);
2163 ioPLBlock *ioplList = getIOPLList(dataP);
2164
2165 for (UInt range = 0; range < done; range++)
2166 {
2167 if (ioplList[range].fIOPL) {
2168 upl_abort(ioplList[range].fIOPL, 0);
2169 upl_deallocate(ioplList[range].fIOPL);
2170 }
2171 }
2172 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
2173
2174 if (mapper && mapBase)
2175 mapper->iovmFree(mapBase, _pages);
2176 }
2177
2178 if (error == KERN_FAILURE)
2179 error = kIOReturnCannotWire;
2180
2181 return error;
2182 }
2183
2184 /*
2185 * prepare
2186 *
2187 * Prepare the memory for an I/O transfer. This involves paging in
2188 * the memory, if necessary, and wiring it down for the duration of
2189 * the transfer. The complete() method completes the processing of
2190 * the memory after the I/O transfer finishes. This method needn't
2191 * called for non-pageable memory.
2192 */
2193 IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
2194 {
2195 IOReturn error = kIOReturnSuccess;
2196 IOOptionBits type = _flags & kIOMemoryTypeMask;
2197
2198 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
2199 return kIOReturnSuccess;
2200
2201 if (_prepareLock)
2202 IOLockLock(_prepareLock);
2203
2204 if (!_wireCount
2205 && (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) ) {
2206 error = wireVirtual(forDirection);
2207 }
2208
2209 if (kIOReturnSuccess == error)
2210 _wireCount++;
2211
2212 if (1 == _wireCount)
2213 {
2214 if (kIOMemoryClearEncrypt & _flags)
2215 {
2216 performOperation(kIOMemoryClearEncrypted, 0, _length);
2217 }
2218 }
2219
2220 if (_prepareLock)
2221 IOLockUnlock(_prepareLock);
2222
2223 return error;
2224 }
2225
2226 /*
2227 * complete
2228 *
2229 * Complete processing of the memory after an I/O transfer finishes.
2230 * This method should not be called unless a prepare was previously
2231 * issued; the prepare() and complete() must occur in pairs, before
2232 * before and after an I/O transfer involving pageable memory.
2233 */
2234
2235 IOReturn IOGeneralMemoryDescriptor::complete(IODirection /* forDirection */)
2236 {
2237 IOOptionBits type = _flags & kIOMemoryTypeMask;
2238
2239 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
2240 return kIOReturnSuccess;
2241
2242 if (_prepareLock)
2243 IOLockLock(_prepareLock);
2244
2245 assert(_wireCount);
2246
2247 if (_wireCount)
2248 {
2249 if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount))
2250 {
2251 performOperation(kIOMemorySetEncrypted, 0, _length);
2252 }
2253
2254 _wireCount--;
2255 if (!_wireCount)
2256 {
2257 IOOptionBits type = _flags & kIOMemoryTypeMask;
2258 ioGMDData * dataP = getDataP(_memoryEntries);
2259 ioPLBlock *ioplList = getIOPLList(dataP);
2260 UInt count = getNumIOPL(_memoryEntries, dataP);
2261
2262 #if IOMD_DEBUG_DMAACTIVE
2263 if (__iomd_reservedA) panic("complete() while dma active");
2264 #endif /* IOMD_DEBUG_DMAACTIVE */
2265
2266 if (dataP->fMapper && _pages && ioplList[0].fMappedBase)
2267 dataP->fMapper->iovmFree(ioplList[0].fMappedBase, _pages);
2268
2269 // Only complete iopls that we created which are for TypeVirtual
2270 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
2271 for (UInt ind = 0; ind < count; ind++)
2272 if (ioplList[ind].fIOPL) {
2273 upl_commit(ioplList[ind].fIOPL, 0, 0);
2274 upl_deallocate(ioplList[ind].fIOPL);
2275 }
2276 } else if (kIOMemoryTypeUPL == type) {
2277 upl_set_referenced(ioplList[0].fIOPL, false);
2278 }
2279
2280 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
2281
2282 dataP->fPreparationID = kIOPreparationIDUnprepared;
2283 }
2284 }
2285
2286 if (_prepareLock)
2287 IOLockUnlock(_prepareLock);
2288
2289 return kIOReturnSuccess;
2290 }
2291
2292 IOReturn IOGeneralMemoryDescriptor::doMap(
2293 vm_map_t __addressMap,
2294 IOVirtualAddress * __address,
2295 IOOptionBits options,
2296 IOByteCount __offset,
2297 IOByteCount __length )
2298
2299 {
2300 #ifndef __LP64__
2301 if (!(kIOMap64Bit & options)) panic("IOGeneralMemoryDescriptor::doMap !64bit");
2302 #endif /* !__LP64__ */
2303
2304 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
2305 mach_vm_size_t offset = mapping->fOffset + __offset;
2306 mach_vm_size_t length = mapping->fLength;
2307
2308 kern_return_t kr = kIOReturnVMError;
2309 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
2310
2311 IOOptionBits type = _flags & kIOMemoryTypeMask;
2312 Ranges vec = _ranges;
2313
2314 user_addr_t range0Addr = 0;
2315 IOByteCount range0Len = 0;
2316
2317 if ((offset >= _length) || ((offset + length) > _length))
2318 return( kIOReturnBadArgument );
2319
2320 if (vec.v)
2321 getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
2322
2323 // mapping source == dest? (could be much better)
2324 if( _task
2325 && (mapping->fAddressMap == get_task_map(_task)) && (options & kIOMapAnywhere)
2326 && (1 == _rangesCount) && (0 == offset)
2327 && range0Addr && (length <= range0Len) )
2328 {
2329 mapping->fAddress = range0Addr;
2330 mapping->fOptions |= kIOMapStatic;
2331
2332 return( kIOReturnSuccess );
2333 }
2334
2335 if( 0 == sharedMem) {
2336
2337 vm_size_t size = ptoa_32(_pages);
2338
2339 if( _task) {
2340
2341 memory_object_size_t actualSize = size;
2342 vm_prot_t prot = VM_PROT_READ;
2343 if (!(kIOMapReadOnly & options))
2344 prot |= VM_PROT_WRITE;
2345 else if (kIOMapDefaultCache != (options & kIOMapCacheMask))
2346 prot |= VM_PROT_WRITE;
2347
2348 if (_rangesCount == 1)
2349 {
2350 kr = mach_make_memory_entry_64(get_task_map(_task),
2351 &actualSize, range0Addr,
2352 prot, &sharedMem,
2353 NULL);
2354 }
2355 if( (_rangesCount != 1)
2356 || ((KERN_SUCCESS == kr) && (actualSize != round_page(size))))
2357 do
2358 {
2359 #if IOASSERT
2360 IOLog("mach_vm_remap path for ranges %d size (%08llx:%08llx)\n",
2361 _rangesCount, (UInt64)actualSize, (UInt64)size);
2362 #endif
2363 kr = kIOReturnVMError;
2364 if (sharedMem)
2365 {
2366 ipc_port_release_send(sharedMem);
2367 sharedMem = MACH_PORT_NULL;
2368 }
2369
2370 mach_vm_address_t address, segDestAddr;
2371 mach_vm_size_t mapLength;
2372 unsigned rangesIndex;
2373 IOOptionBits type = _flags & kIOMemoryTypeMask;
2374 user_addr_t srcAddr;
2375 IOPhysicalLength segLen = 0;
2376
2377 // Find starting address within the vector of ranges
2378 for (rangesIndex = 0; rangesIndex < _rangesCount; rangesIndex++) {
2379 getAddrLenForInd(srcAddr, segLen, type, _ranges, rangesIndex);
2380 if (offset < segLen)
2381 break;
2382 offset -= segLen; // (make offset relative)
2383 }
2384
2385 mach_vm_size_t pageOffset = (srcAddr & PAGE_MASK);
2386 address = trunc_page_64(mapping->fAddress);
2387
2388 if ((options & kIOMapAnywhere) || ((mapping->fAddress - address) == pageOffset))
2389 {
2390 vm_map_t map = mapping->fAddressMap;
2391 kr = IOMemoryDescriptorMapCopy(&map,
2392 options,
2393 offset, &address, round_page_64(length + pageOffset));
2394 if (kr == KERN_SUCCESS)
2395 {
2396 segDestAddr = address;
2397 segLen -= offset;
2398 srcAddr += offset;
2399 mapLength = length;
2400
2401 while (true)
2402 {
2403 vm_prot_t cur_prot, max_prot;
2404
2405 if (segLen > length) segLen = length;
2406 kr = mach_vm_remap(map, &segDestAddr, round_page_64(segLen), PAGE_MASK,
2407 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
2408 get_task_map(_task), trunc_page_64(srcAddr),
2409 FALSE /* copy */,
2410 &cur_prot,
2411 &max_prot,
2412 VM_INHERIT_NONE);
2413 if (KERN_SUCCESS == kr)
2414 {
2415 if ((!(VM_PROT_READ & cur_prot))
2416 || (!(kIOMapReadOnly & options) && !(VM_PROT_WRITE & cur_prot)))
2417 {
2418 kr = KERN_PROTECTION_FAILURE;
2419 }
2420 }
2421 if (KERN_SUCCESS != kr)
2422 break;
2423 segDestAddr += segLen;
2424 mapLength -= segLen;
2425 if (!mapLength)
2426 break;
2427 rangesIndex++;
2428 if (rangesIndex >= _rangesCount)
2429 {
2430 kr = kIOReturnBadArgument;
2431 break;
2432 }
2433 getAddrLenForInd(srcAddr, segLen, type, vec, rangesIndex);
2434 if (srcAddr & PAGE_MASK)
2435 {
2436 kr = kIOReturnBadArgument;
2437 break;
2438 }
2439 if (segLen > mapLength)
2440 segLen = mapLength;
2441 }
2442 if (KERN_SUCCESS != kr)
2443 {
2444 mach_vm_deallocate(mapping->fAddressMap, address, round_page_64(length + pageOffset));
2445 }
2446 }
2447
2448 if (KERN_SUCCESS == kr)
2449 mapping->fAddress = address + pageOffset;
2450 else
2451 mapping->fAddress = NULL;
2452 }
2453 }
2454 while (false);
2455 }
2456 else do
2457 { // _task == 0, must be physical
2458
2459 memory_object_t pager;
2460 unsigned int flags = 0;
2461 addr64_t pa;
2462 IOPhysicalLength segLen;
2463
2464 pa = getPhysicalSegment( offset, &segLen, kIOMemoryMapperNone );
2465
2466 if( !getKernelReserved())
2467 continue;
2468 reserved->dp.pagerContig = (1 == _rangesCount);
2469 reserved->dp.memory = this;
2470
2471 /*What cache mode do we need*/
2472 switch(options & kIOMapCacheMask ) {
2473
2474 case kIOMapDefaultCache:
2475 default:
2476 flags = IODefaultCacheBits(pa);
2477 if (DEVICE_PAGER_CACHE_INHIB & flags)
2478 {
2479 if (DEVICE_PAGER_GUARDED & flags)
2480 mapping->fOptions |= kIOMapInhibitCache;
2481 else
2482 mapping->fOptions |= kIOMapWriteCombineCache;
2483 }
2484 else if (DEVICE_PAGER_WRITE_THROUGH & flags)
2485 mapping->fOptions |= kIOMapWriteThruCache;
2486 else
2487 mapping->fOptions |= kIOMapCopybackCache;
2488 break;
2489
2490 case kIOMapInhibitCache:
2491 flags = DEVICE_PAGER_CACHE_INHIB |
2492 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
2493 break;
2494
2495 case kIOMapWriteThruCache:
2496 flags = DEVICE_PAGER_WRITE_THROUGH |
2497 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
2498 break;
2499
2500 case kIOMapCopybackCache:
2501 flags = DEVICE_PAGER_COHERENT;
2502 break;
2503
2504 case kIOMapWriteCombineCache:
2505 flags = DEVICE_PAGER_CACHE_INHIB |
2506 DEVICE_PAGER_COHERENT;
2507 break;
2508 }
2509
2510 flags |= reserved->dp.pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0;
2511
2512 pager = device_pager_setup( (memory_object_t) 0, (uintptr_t) reserved,
2513 size, flags);
2514 assert( pager );
2515
2516 if( pager) {
2517 kr = mach_memory_object_memory_entry_64( (host_t) 1, false /*internal*/,
2518 size, VM_PROT_READ | VM_PROT_WRITE, pager, &sharedMem );
2519
2520 assert( KERN_SUCCESS == kr );
2521 if( KERN_SUCCESS != kr)
2522 {
2523 device_pager_deallocate( pager );
2524 pager = MACH_PORT_NULL;
2525 sharedMem = MACH_PORT_NULL;
2526 }
2527 }
2528 if( pager && sharedMem)
2529 reserved->dp.devicePager = pager;
2530
2531 } while( false );
2532
2533 _memEntry = (void *) sharedMem;
2534 }
2535
2536 IOReturn result;
2537 if (0 == sharedMem)
2538 result = kr;
2539 else
2540 result = super::doMap( __addressMap, __address,
2541 options, __offset, __length );
2542
2543 return( result );
2544 }
2545
2546 IOReturn IOGeneralMemoryDescriptor::doUnmap(
2547 vm_map_t addressMap,
2548 IOVirtualAddress __address,
2549 IOByteCount __length )
2550 {
2551 return (super::doUnmap(addressMap, __address, __length));
2552 }
2553
2554 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2555
2556 #undef super
2557 #define super OSObject
2558
2559 OSDefineMetaClassAndStructors( IOMemoryMap, OSObject )
2560
2561 OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
2562 OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
2563 OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
2564 OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
2565 OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
2566 OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
2567 OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
2568 OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
2569
2570 /* ex-inline function implementation */
2571 IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
2572 { return( getPhysicalSegment( 0, 0 )); }
2573
2574 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2575
2576 bool IOMemoryMap::init(
2577 task_t intoTask,
2578 mach_vm_address_t toAddress,
2579 IOOptionBits _options,
2580 mach_vm_size_t _offset,
2581 mach_vm_size_t _length )
2582 {
2583 if (!intoTask)
2584 return( false);
2585
2586 if (!super::init())
2587 return(false);
2588
2589 fAddressMap = get_task_map(intoTask);
2590 if (!fAddressMap)
2591 return(false);
2592 vm_map_reference(fAddressMap);
2593
2594 fAddressTask = intoTask;
2595 fOptions = _options;
2596 fLength = _length;
2597 fOffset = _offset;
2598 fAddress = toAddress;
2599
2600 return (true);
2601 }
2602
2603 bool IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
2604 {
2605 if (!_memory)
2606 return(false);
2607
2608 if (!fSuperMap)
2609 {
2610 if( (_offset + fLength) > _memory->getLength())
2611 return( false);
2612 fOffset = _offset;
2613 }
2614
2615 _memory->retain();
2616 if (fMemory)
2617 {
2618 if (fMemory != _memory)
2619 fMemory->removeMapping(this);
2620 fMemory->release();
2621 }
2622 fMemory = _memory;
2623
2624 return( true );
2625 }
2626
2627 struct IOMemoryDescriptorMapAllocRef
2628 {
2629 ipc_port_t sharedMem;
2630 vm_map_t map;
2631 mach_vm_address_t mapped;
2632 mach_vm_size_t size;
2633 mach_vm_size_t sourceOffset;
2634 IOOptionBits options;
2635 };
2636
2637 static kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
2638 {
2639 IOMemoryDescriptorMapAllocRef * ref = (IOMemoryDescriptorMapAllocRef *)_ref;
2640 IOReturn err;
2641
2642 do {
2643 if( ref->sharedMem)
2644 {
2645 vm_prot_t prot = VM_PROT_READ
2646 | ((ref->options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE);
2647
2648 // VM system requires write access to change cache mode
2649 if (kIOMapDefaultCache != (ref->options & kIOMapCacheMask))
2650 prot |= VM_PROT_WRITE;
2651
2652 // set memory entry cache
2653 vm_prot_t memEntryCacheMode = prot | MAP_MEM_ONLY;
2654 switch (ref->options & kIOMapCacheMask)
2655 {
2656 case kIOMapInhibitCache:
2657 SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
2658 break;
2659
2660 case kIOMapWriteThruCache:
2661 SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
2662 break;
2663
2664 case kIOMapWriteCombineCache:
2665 SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
2666 break;
2667
2668 case kIOMapCopybackCache:
2669 SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
2670 break;
2671
2672 case kIOMapCopybackInnerCache:
2673 SET_MAP_MEM(MAP_MEM_INNERWBACK, memEntryCacheMode);
2674 break;
2675
2676 case kIOMapDefaultCache:
2677 default:
2678 SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
2679 break;
2680 }
2681
2682 vm_size_t unused = 0;
2683
2684 err = mach_make_memory_entry( NULL /*unused*/, &unused, 0 /*unused*/,
2685 memEntryCacheMode, NULL, ref->sharedMem );
2686 if (KERN_SUCCESS != err)
2687 IOLog("MAP_MEM_ONLY failed %d\n", err);
2688
2689 err = mach_vm_map( map,
2690 &ref->mapped,
2691 ref->size, 0 /* mask */,
2692 (( ref->options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
2693 | VM_MAKE_TAG(VM_MEMORY_IOKIT),
2694 ref->sharedMem, ref->sourceOffset,
2695 false, // copy
2696 prot, // cur
2697 prot, // max
2698 VM_INHERIT_NONE);
2699
2700 if( KERN_SUCCESS != err) {
2701 ref->mapped = 0;
2702 continue;
2703 }
2704 ref->map = map;
2705 }
2706 else
2707 {
2708 err = mach_vm_allocate(map, &ref->mapped, ref->size,
2709 ((ref->options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
2710 | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
2711 if( KERN_SUCCESS != err) {
2712 ref->mapped = 0;
2713 continue;
2714 }
2715 ref->map = map;
2716 // we have to make sure that these guys don't get copied if we fork.
2717 err = vm_inherit(map, ref->mapped, ref->size, VM_INHERIT_NONE);
2718 assert( KERN_SUCCESS == err );
2719 }
2720 }
2721 while( false );
2722
2723 return( err );
2724 }
2725
2726 kern_return_t
2727 IOMemoryDescriptorMapMemEntry(vm_map_t * map, ipc_port_t entry, IOOptionBits options, bool pageable,
2728 mach_vm_size_t offset,
2729 mach_vm_address_t * address, mach_vm_size_t length)
2730 {
2731 IOReturn err;
2732 IOMemoryDescriptorMapAllocRef ref;
2733
2734 ref.map = *map;
2735 ref.sharedMem = entry;
2736 ref.sourceOffset = trunc_page_64(offset);
2737 ref.options = options;
2738 ref.size = length;
2739
2740 if (options & kIOMapAnywhere)
2741 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
2742 ref.mapped = 0;
2743 else
2744 ref.mapped = *address;
2745
2746 if( ref.sharedMem && (ref.map == kernel_map) && pageable)
2747 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
2748 else
2749 err = IOMemoryDescriptorMapAlloc( ref.map, &ref );
2750
2751 *address = ref.mapped;
2752 *map = ref.map;
2753
2754 return (err);
2755 }
2756
2757 kern_return_t
2758 IOMemoryDescriptorMapCopy(vm_map_t * map,
2759 IOOptionBits options,
2760 mach_vm_size_t offset,
2761 mach_vm_address_t * address, mach_vm_size_t length)
2762 {
2763 IOReturn err;
2764 IOMemoryDescriptorMapAllocRef ref;
2765
2766 ref.map = *map;
2767 ref.sharedMem = NULL;
2768 ref.sourceOffset = trunc_page_64(offset);
2769 ref.options = options;
2770 ref.size = length;
2771
2772 if (options & kIOMapAnywhere)
2773 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
2774 ref.mapped = 0;
2775 else
2776 ref.mapped = *address;
2777
2778 if (ref.map == kernel_map)
2779 err = IOIteratePageableMaps(ref.size, &IOMemoryDescriptorMapAlloc, &ref);
2780 else
2781 err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
2782
2783 *address = ref.mapped;
2784 *map = ref.map;
2785
2786 return (err);
2787 }
2788
2789 IOReturn IOMemoryDescriptor::doMap(
2790 vm_map_t __addressMap,
2791 IOVirtualAddress * __address,
2792 IOOptionBits options,
2793 IOByteCount __offset,
2794 IOByteCount __length )
2795 {
2796 #ifndef __LP64__
2797 if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::doMap !64bit");
2798 #endif /* !__LP64__ */
2799
2800 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
2801 mach_vm_size_t offset = mapping->fOffset + __offset;
2802 mach_vm_size_t length = mapping->fLength;
2803
2804 IOReturn err = kIOReturnSuccess;
2805 memory_object_t pager;
2806 mach_vm_size_t pageOffset;
2807 IOPhysicalAddress sourceAddr;
2808 unsigned int lock_count;
2809
2810 do
2811 {
2812 sourceAddr = getPhysicalSegment( offset, NULL, _kIOMemorySourceSegment );
2813 pageOffset = sourceAddr - trunc_page( sourceAddr );
2814
2815 if( reserved)
2816 pager = (memory_object_t) reserved->dp.devicePager;
2817 else
2818 pager = MACH_PORT_NULL;
2819
2820 if ((kIOMapReference|kIOMapUnique) == ((kIOMapReference|kIOMapUnique) & options))
2821 {
2822 upl_t redirUPL2;
2823 vm_size_t size;
2824 int flags;
2825
2826 if (!_memEntry)
2827 {
2828 err = kIOReturnNotReadable;
2829 continue;
2830 }
2831
2832 size = round_page(mapping->fLength + pageOffset);
2833 flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
2834 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
2835
2836 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) _memEntry, 0, &size, &redirUPL2,
2837 NULL, NULL,
2838 &flags))
2839 redirUPL2 = NULL;
2840
2841 for (lock_count = 0;
2842 IORecursiveLockHaveLock(gIOMemoryLock);
2843 lock_count++) {
2844 UNLOCK;
2845 }
2846 err = upl_transpose(redirUPL2, mapping->fRedirUPL);
2847 for (;
2848 lock_count;
2849 lock_count--) {
2850 LOCK;
2851 }
2852
2853 if (kIOReturnSuccess != err)
2854 {
2855 IOLog("upl_transpose(%x)\n", err);
2856 err = kIOReturnSuccess;
2857 }
2858
2859 if (redirUPL2)
2860 {
2861 upl_commit(redirUPL2, NULL, 0);
2862 upl_deallocate(redirUPL2);
2863 redirUPL2 = 0;
2864 }
2865 {
2866 // swap the memEntries since they now refer to different vm_objects
2867 void * me = _memEntry;
2868 _memEntry = mapping->fMemory->_memEntry;
2869 mapping->fMemory->_memEntry = me;
2870 }
2871 if (pager)
2872 err = handleFault( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
2873 }
2874 else
2875 {
2876 mach_vm_address_t address;
2877
2878 if (!(options & kIOMapAnywhere))
2879 {
2880 address = trunc_page_64(mapping->fAddress);
2881 if( (mapping->fAddress - address) != pageOffset)
2882 {
2883 err = kIOReturnVMError;
2884 continue;
2885 }
2886 }
2887
2888 vm_map_t map = mapping->fAddressMap;
2889 err = IOMemoryDescriptorMapMemEntry(&map, (ipc_port_t) _memEntry,
2890 options, (kIOMemoryBufferPageable & _flags),
2891 offset, &address, round_page_64(length + pageOffset));
2892 if( err != KERN_SUCCESS)
2893 continue;
2894
2895 if (!_memEntry || pager)
2896 {
2897 err = handleFault( pager, mapping->fAddressMap, address, offset, length, options );
2898 if (err != KERN_SUCCESS)
2899 doUnmap( mapping->fAddressMap, (IOVirtualAddress) mapping, 0 );
2900 }
2901
2902 #if DEBUG
2903 if (kIOLogMapping & gIOKitDebug)
2904 IOLog("mapping(%x) desc %p @ %qx, map %p, address %qx, offset %qx, length %qx\n",
2905 err, this, (uint64_t)sourceAddr, mapping, address, offset, length);
2906 #endif
2907
2908 if (err == KERN_SUCCESS)
2909 mapping->fAddress = address + pageOffset;
2910 else
2911 mapping->fAddress = NULL;
2912 }
2913 }
2914 while( false );
2915
2916 return (err);
2917 }
2918
2919 IOReturn IOMemoryDescriptor::handleFault(
2920 void * _pager,
2921 vm_map_t addressMap,
2922 mach_vm_address_t address,
2923 mach_vm_size_t sourceOffset,
2924 mach_vm_size_t length,
2925 IOOptionBits options )
2926 {
2927 IOReturn err = kIOReturnSuccess;
2928 memory_object_t pager = (memory_object_t) _pager;
2929 mach_vm_size_t size;
2930 mach_vm_size_t bytes;
2931 mach_vm_size_t page;
2932 mach_vm_size_t pageOffset;
2933 mach_vm_size_t pagerOffset;
2934 IOPhysicalLength segLen;
2935 addr64_t physAddr;
2936
2937 if( !addressMap)
2938 {
2939 if( kIOMemoryRedirected & _flags)
2940 {
2941 #if DEBUG
2942 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
2943 #endif
2944 do {
2945 SLEEP;
2946 } while( kIOMemoryRedirected & _flags );
2947 }
2948
2949 return( kIOReturnSuccess );
2950 }
2951
2952 physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
2953 assert( physAddr );
2954 pageOffset = physAddr - trunc_page_64( physAddr );
2955 pagerOffset = sourceOffset;
2956
2957 size = length + pageOffset;
2958 physAddr -= pageOffset;
2959
2960 segLen += pageOffset;
2961 bytes = size;
2962 do
2963 {
2964 // in the middle of the loop only map whole pages
2965 if( segLen >= bytes)
2966 segLen = bytes;
2967 else if( segLen != trunc_page( segLen))
2968 err = kIOReturnVMError;
2969 if( physAddr != trunc_page_64( physAddr))
2970 err = kIOReturnBadArgument;
2971 if (kIOReturnSuccess != err)
2972 break;
2973
2974 #if DEBUG
2975 if( kIOLogMapping & gIOKitDebug)
2976 IOLog("IOMemoryMap::map(%p) 0x%qx->0x%qx:0x%qx\n",
2977 addressMap, address + pageOffset, physAddr + pageOffset,
2978 segLen - pageOffset);
2979 #endif
2980
2981
2982 if( pager) {
2983 if( reserved && reserved->dp.pagerContig) {
2984 IOPhysicalLength allLen;
2985 addr64_t allPhys;
2986
2987 allPhys = getPhysicalSegment( 0, &allLen, kIOMemoryMapperNone );
2988 assert( allPhys );
2989 err = device_pager_populate_object( pager, 0, atop_64(allPhys), round_page(allLen) );
2990 }
2991 else
2992 {
2993
2994 for( page = 0;
2995 (page < segLen) && (KERN_SUCCESS == err);
2996 page += page_size)
2997 {
2998 err = device_pager_populate_object(pager, pagerOffset,
2999 (ppnum_t)(atop_64(physAddr + page)), page_size);
3000 pagerOffset += page_size;
3001 }
3002 }
3003 assert( KERN_SUCCESS == err );
3004 if( err)
3005 break;
3006 }
3007
3008 // This call to vm_fault causes an early pmap level resolution
3009 // of the mappings created above for kernel mappings, since
3010 // faulting in later can't take place from interrupt level.
3011 /* *** ALERT *** */
3012 /* *** Temporary Workaround *** */
3013
3014 if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags))
3015 {
3016 vm_fault(addressMap,
3017 (vm_map_offset_t)address,
3018 VM_PROT_READ|VM_PROT_WRITE,
3019 FALSE, THREAD_UNINT, NULL,
3020 (vm_map_offset_t)0);
3021 }
3022
3023 /* *** Temporary Workaround *** */
3024 /* *** ALERT *** */
3025
3026 sourceOffset += segLen - pageOffset;
3027 address += segLen;
3028 bytes -= segLen;
3029 pageOffset = 0;
3030
3031 }
3032 while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
3033
3034 if (bytes)
3035 err = kIOReturnBadArgument;
3036
3037 return (err);
3038 }
3039
3040 IOReturn IOMemoryDescriptor::doUnmap(
3041 vm_map_t addressMap,
3042 IOVirtualAddress __address,
3043 IOByteCount __length )
3044 {
3045 IOReturn err;
3046 mach_vm_address_t address;
3047 mach_vm_size_t length;
3048
3049 if (__length)
3050 {
3051 address = __address;
3052 length = __length;
3053 }
3054 else
3055 {
3056 addressMap = ((IOMemoryMap *) __address)->fAddressMap;
3057 address = ((IOMemoryMap *) __address)->fAddress;
3058 length = ((IOMemoryMap *) __address)->fLength;
3059 }
3060
3061 if ((addressMap == kernel_map)
3062 && ((kIOMemoryBufferPageable & _flags) || !_memEntry))
3063 addressMap = IOPageableMapForAddress( address );
3064
3065 #if DEBUG
3066 if( kIOLogMapping & gIOKitDebug)
3067 IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
3068 addressMap, address, length );
3069 #endif
3070
3071 err = mach_vm_deallocate( addressMap, address, length );
3072
3073 return (err);
3074 }
3075
3076 IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
3077 {
3078 IOReturn err = kIOReturnSuccess;
3079 IOMemoryMap * mapping = 0;
3080 OSIterator * iter;
3081
3082 LOCK;
3083
3084 if( doRedirect)
3085 _flags |= kIOMemoryRedirected;
3086 else
3087 _flags &= ~kIOMemoryRedirected;
3088
3089 do {
3090 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
3091 while( (mapping = (IOMemoryMap *) iter->getNextObject()))
3092 mapping->redirect( safeTask, doRedirect );
3093
3094 iter->release();
3095 }
3096 } while( false );
3097
3098 if (!doRedirect)
3099 {
3100 WAKEUP;
3101 }
3102
3103 UNLOCK;
3104
3105 #ifndef __LP64__
3106 // temporary binary compatibility
3107 IOSubMemoryDescriptor * subMem;
3108 if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
3109 err = subMem->redirect( safeTask, doRedirect );
3110 else
3111 err = kIOReturnSuccess;
3112 #endif /* !__LP64__ */
3113
3114 return( err );
3115 }
3116
3117 IOReturn IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
3118 {
3119 IOReturn err = kIOReturnSuccess;
3120
3121 if( fSuperMap) {
3122 // err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
3123 } else {
3124
3125 LOCK;
3126
3127 do
3128 {
3129 if (!fAddress)
3130 break;
3131 if (!fAddressMap)
3132 break;
3133
3134 if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
3135 && (0 == (fOptions & kIOMapStatic)))
3136 {
3137 IOUnmapPages( fAddressMap, fAddress, fLength );
3138 err = kIOReturnSuccess;
3139 #if DEBUG
3140 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
3141 #endif
3142 }
3143 else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask))
3144 {
3145 IOOptionBits newMode;
3146 newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
3147 IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
3148 }
3149 }
3150 while (false);
3151 UNLOCK;
3152 }
3153
3154 if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3155 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3156 && safeTask
3157 && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected))))
3158 fMemory->redirect(safeTask, doRedirect);
3159
3160 return( err );
3161 }
3162
3163 IOReturn IOMemoryMap::unmap( void )
3164 {
3165 IOReturn err;
3166
3167 LOCK;
3168
3169 if( fAddress && fAddressMap && (0 == fSuperMap) && fMemory
3170 && (0 == (fOptions & kIOMapStatic))) {
3171
3172 err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
3173
3174 } else
3175 err = kIOReturnSuccess;
3176
3177 if (fAddressMap)
3178 {
3179 vm_map_deallocate(fAddressMap);
3180 fAddressMap = 0;
3181 }
3182
3183 fAddress = 0;
3184
3185 UNLOCK;
3186
3187 return( err );
3188 }
3189
3190 void IOMemoryMap::taskDied( void )
3191 {
3192 LOCK;
3193 if (fUserClientUnmap)
3194 unmap();
3195 if( fAddressMap) {
3196 vm_map_deallocate(fAddressMap);
3197 fAddressMap = 0;
3198 }
3199 fAddressTask = 0;
3200 fAddress = 0;
3201 UNLOCK;
3202 }
3203
3204 IOReturn IOMemoryMap::userClientUnmap( void )
3205 {
3206 fUserClientUnmap = true;
3207 return (kIOReturnSuccess);
3208 }
3209
3210 // Overload the release mechanism. All mappings must be a member
3211 // of a memory descriptors _mappings set. This means that we
3212 // always have 2 references on a mapping. When either of these mappings
3213 // are released we need to free ourselves.
3214 void IOMemoryMap::taggedRelease(const void *tag) const
3215 {
3216 LOCK;
3217 super::taggedRelease(tag, 2);
3218 UNLOCK;
3219 }
3220
3221 void IOMemoryMap::free()
3222 {
3223 unmap();
3224
3225 if (fMemory)
3226 {
3227 LOCK;
3228 fMemory->removeMapping(this);
3229 UNLOCK;
3230 fMemory->release();
3231 }
3232
3233 if (fOwner && (fOwner != fMemory))
3234 {
3235 LOCK;
3236 fOwner->removeMapping(this);
3237 UNLOCK;
3238 }
3239
3240 if (fSuperMap)
3241 fSuperMap->release();
3242
3243 if (fRedirUPL) {
3244 upl_commit(fRedirUPL, NULL, 0);
3245 upl_deallocate(fRedirUPL);
3246 }
3247
3248 super::free();
3249 }
3250
3251 IOByteCount IOMemoryMap::getLength()
3252 {
3253 return( fLength );
3254 }
3255
3256 IOVirtualAddress IOMemoryMap::getVirtualAddress()
3257 {
3258 #ifndef __LP64__
3259 if (fSuperMap)
3260 fSuperMap->getVirtualAddress();
3261 else if (fAddressMap
3262 && vm_map_is_64bit(fAddressMap)
3263 && (sizeof(IOVirtualAddress) < 8))
3264 {
3265 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
3266 }
3267 #endif /* !__LP64__ */
3268
3269 return (fAddress);
3270 }
3271
3272 #ifndef __LP64__
3273 mach_vm_address_t IOMemoryMap::getAddress()
3274 {
3275 return( fAddress);
3276 }
3277
3278 mach_vm_size_t IOMemoryMap::getSize()
3279 {
3280 return( fLength );
3281 }
3282 #endif /* !__LP64__ */
3283
3284
3285 task_t IOMemoryMap::getAddressTask()
3286 {
3287 if( fSuperMap)
3288 return( fSuperMap->getAddressTask());
3289 else
3290 return( fAddressTask);
3291 }
3292
3293 IOOptionBits IOMemoryMap::getMapOptions()
3294 {
3295 return( fOptions);
3296 }
3297
3298 IOMemoryDescriptor * IOMemoryMap::getMemoryDescriptor()
3299 {
3300 return( fMemory );
3301 }
3302
3303 IOMemoryMap * IOMemoryMap::copyCompatible(
3304 IOMemoryMap * newMapping )
3305 {
3306 task_t task = newMapping->getAddressTask();
3307 mach_vm_address_t toAddress = newMapping->fAddress;
3308 IOOptionBits _options = newMapping->fOptions;
3309 mach_vm_size_t _offset = newMapping->fOffset;
3310 mach_vm_size_t _length = newMapping->fLength;
3311
3312 if( (!task) || (!fAddressMap) || (fAddressMap != get_task_map(task)))
3313 return( 0 );
3314 if( (fOptions ^ _options) & kIOMapReadOnly)
3315 return( 0 );
3316 if( (kIOMapDefaultCache != (_options & kIOMapCacheMask))
3317 && ((fOptions ^ _options) & kIOMapCacheMask))
3318 return( 0 );
3319
3320 if( (0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress))
3321 return( 0 );
3322
3323 if( _offset < fOffset)
3324 return( 0 );
3325
3326 _offset -= fOffset;
3327
3328 if( (_offset + _length) > fLength)
3329 return( 0 );
3330
3331 retain();
3332 if( (fLength == _length) && (!_offset))
3333 {
3334 newMapping = this;
3335 }
3336 else
3337 {
3338 newMapping->fSuperMap = this;
3339 newMapping->fOffset = fOffset + _offset;
3340 newMapping->fAddress = fAddress + _offset;
3341 }
3342
3343 return( newMapping );
3344 }
3345
3346 IOPhysicalAddress
3347 #ifdef __LP64__
3348 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
3349 #else /* !__LP64__ */
3350 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
3351 #endif /* !__LP64__ */
3352 {
3353 IOPhysicalAddress address;
3354
3355 LOCK;
3356 #ifdef __LP64__
3357 address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
3358 #else /* !__LP64__ */
3359 address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
3360 #endif /* !__LP64__ */
3361 UNLOCK;
3362
3363 return( address );
3364 }
3365
3366 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3367
3368 #undef super
3369 #define super OSObject
3370
3371 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3372
3373 void IOMemoryDescriptor::initialize( void )
3374 {
3375 if( 0 == gIOMemoryLock)
3376 gIOMemoryLock = IORecursiveLockAlloc();
3377
3378 IORegistryEntry::getRegistryRoot()->setProperty(kIOMaximumMappedIOByteCountKey,
3379 ptoa_64(gIOMaximumMappedIOPageCount), 64);
3380 gIOLastPage = IOGetLastPageNumber();
3381 }
3382
3383 void IOMemoryDescriptor::free( void )
3384 {
3385 if( _mappings)
3386 _mappings->release();
3387
3388 super::free();
3389 }
3390
3391 IOMemoryMap * IOMemoryDescriptor::setMapping(
3392 task_t intoTask,
3393 IOVirtualAddress mapAddress,
3394 IOOptionBits options )
3395 {
3396 return (createMappingInTask( intoTask, mapAddress,
3397 options | kIOMapStatic,
3398 0, getLength() ));
3399 }
3400
3401 IOMemoryMap * IOMemoryDescriptor::map(
3402 IOOptionBits options )
3403 {
3404 return (createMappingInTask( kernel_task, 0,
3405 options | kIOMapAnywhere,
3406 0, getLength() ));
3407 }
3408
3409 #ifndef __LP64__
3410 IOMemoryMap * IOMemoryDescriptor::map(
3411 task_t intoTask,
3412 IOVirtualAddress atAddress,
3413 IOOptionBits options,
3414 IOByteCount offset,
3415 IOByteCount length )
3416 {
3417 if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask)))
3418 {
3419 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
3420 return (0);
3421 }
3422
3423 return (createMappingInTask(intoTask, atAddress,
3424 options, offset, length));
3425 }
3426 #endif /* !__LP64__ */
3427
3428 IOMemoryMap * IOMemoryDescriptor::createMappingInTask(
3429 task_t intoTask,
3430 mach_vm_address_t atAddress,
3431 IOOptionBits options,
3432 mach_vm_size_t offset,
3433 mach_vm_size_t length)
3434 {
3435 IOMemoryMap * result;
3436 IOMemoryMap * mapping;
3437
3438 if (0 == length)
3439 length = getLength();
3440
3441 mapping = new IOMemoryMap;
3442
3443 if( mapping
3444 && !mapping->init( intoTask, atAddress,
3445 options, offset, length )) {
3446 mapping->release();
3447 mapping = 0;
3448 }
3449
3450 if (mapping)
3451 result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
3452 else
3453 result = 0;
3454
3455 #if DEBUG
3456 if (!result)
3457 IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
3458 this, atAddress, (uint32_t) options, offset, length);
3459 #endif
3460
3461 return (result);
3462 }
3463
3464 #ifndef __LP64__ // there is only a 64 bit version for LP64
3465 IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
3466 IOOptionBits options,
3467 IOByteCount offset)
3468 {
3469 return (redirect(newBackingMemory, options, (mach_vm_size_t)offset));
3470 }
3471 #endif
3472
3473 IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
3474 IOOptionBits options,
3475 mach_vm_size_t offset)
3476 {
3477 IOReturn err = kIOReturnSuccess;
3478 IOMemoryDescriptor * physMem = 0;
3479
3480 LOCK;
3481
3482 if (fAddress && fAddressMap) do
3483 {
3484 if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3485 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3486 {
3487 physMem = fMemory;
3488 physMem->retain();
3489 }
3490
3491 if (!fRedirUPL)
3492 {
3493 vm_size_t size = round_page(fLength);
3494 int flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
3495 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
3496 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) fMemory->_memEntry, 0, &size, &fRedirUPL,
3497 NULL, NULL,
3498 &flags))
3499 fRedirUPL = 0;
3500
3501 if (physMem)
3502 {
3503 IOUnmapPages( fAddressMap, fAddress, fLength );
3504 if (false)
3505 physMem->redirect(0, true);
3506 }
3507 }
3508
3509 if (newBackingMemory)
3510 {
3511 if (newBackingMemory != fMemory)
3512 {
3513 fOffset = 0;
3514 if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
3515 options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
3516 offset, fLength))
3517 err = kIOReturnError;
3518 }
3519 if (fRedirUPL)
3520 {
3521 upl_commit(fRedirUPL, NULL, 0);
3522 upl_deallocate(fRedirUPL);
3523 fRedirUPL = 0;
3524 }
3525 if (false && physMem)
3526 physMem->redirect(0, false);
3527 }
3528 }
3529 while (false);
3530
3531 UNLOCK;
3532
3533 if (physMem)
3534 physMem->release();
3535
3536 return (err);
3537 }
3538
3539 IOMemoryMap * IOMemoryDescriptor::makeMapping(
3540 IOMemoryDescriptor * owner,
3541 task_t __intoTask,
3542 IOVirtualAddress __address,
3543 IOOptionBits options,
3544 IOByteCount __offset,
3545 IOByteCount __length )
3546 {
3547 #ifndef __LP64__
3548 if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::makeMapping !64bit");
3549 #endif /* !__LP64__ */
3550
3551 IOMemoryDescriptor * mapDesc = 0;
3552 IOMemoryMap * result = 0;
3553 OSIterator * iter;
3554
3555 IOMemoryMap * mapping = (IOMemoryMap *) __address;
3556 mach_vm_size_t offset = mapping->fOffset + __offset;
3557 mach_vm_size_t length = mapping->fLength;
3558
3559 mapping->fOffset = offset;
3560
3561 LOCK;
3562
3563 do
3564 {
3565 if (kIOMapStatic & options)
3566 {
3567 result = mapping;
3568 addMapping(mapping);
3569 mapping->setMemoryDescriptor(this, 0);
3570 continue;
3571 }
3572
3573 if (kIOMapUnique & options)
3574 {
3575 addr64_t phys;
3576 IOByteCount physLen;
3577
3578 // if (owner != this) continue;
3579
3580 if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3581 || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3582 {
3583 phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
3584 if (!phys || (physLen < length))
3585 continue;
3586
3587 mapDesc = IOMemoryDescriptor::withAddressRange(
3588 phys, length, getDirection() | kIOMemoryMapperNone, NULL);
3589 if (!mapDesc)
3590 continue;
3591 offset = 0;
3592 mapping->fOffset = offset;
3593 }
3594 }
3595 else
3596 {
3597 // look for a compatible existing mapping
3598 if( (iter = OSCollectionIterator::withCollection(_mappings)))
3599 {
3600 IOMemoryMap * lookMapping;
3601 while ((lookMapping = (IOMemoryMap *) iter->getNextObject()))
3602 {
3603 if ((result = lookMapping->copyCompatible(mapping)))
3604 {
3605 addMapping(result);
3606 result->setMemoryDescriptor(this, offset);
3607 break;
3608 }
3609 }
3610 iter->release();
3611 }
3612 if (result || (options & kIOMapReference))
3613 {
3614 if (result != mapping)
3615 {
3616 mapping->release();
3617 mapping = NULL;
3618 }
3619 continue;
3620 }
3621 }
3622
3623 if (!mapDesc)
3624 {
3625 mapDesc = this;
3626 mapDesc->retain();
3627 }
3628 IOReturn
3629 kr = mapDesc->doMap( 0, (IOVirtualAddress *) &mapping, options, 0, 0 );
3630 if (kIOReturnSuccess == kr)
3631 {
3632 result = mapping;
3633 mapDesc->addMapping(result);
3634 result->setMemoryDescriptor(mapDesc, offset);
3635 }
3636 else
3637 {
3638 mapping->release();
3639 mapping = NULL;
3640 }
3641 }
3642 while( false );
3643
3644 UNLOCK;
3645
3646 if (mapDesc)
3647 mapDesc->release();
3648
3649 return (result);
3650 }
3651
3652 void IOMemoryDescriptor::addMapping(
3653 IOMemoryMap * mapping )
3654 {
3655 if( mapping)
3656 {
3657 if( 0 == _mappings)
3658 _mappings = OSSet::withCapacity(1);
3659 if( _mappings )
3660 _mappings->setObject( mapping );
3661 }
3662 }
3663
3664 void IOMemoryDescriptor::removeMapping(
3665 IOMemoryMap * mapping )
3666 {
3667 if( _mappings)
3668 _mappings->removeObject( mapping);
3669 }
3670
3671 #ifndef __LP64__
3672 // obsolete initializers
3673 // - initWithOptions is the designated initializer
3674 bool
3675 IOMemoryDescriptor::initWithAddress(void * address,
3676 IOByteCount length,
3677 IODirection direction)
3678 {
3679 return( false );
3680 }
3681
3682 bool
3683 IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
3684 IOByteCount length,
3685 IODirection direction,
3686 task_t task)
3687 {
3688 return( false );
3689 }
3690
3691 bool
3692 IOMemoryDescriptor::initWithPhysicalAddress(
3693 IOPhysicalAddress address,
3694 IOByteCount length,
3695 IODirection direction )
3696 {
3697 return( false );
3698 }
3699
3700 bool
3701 IOMemoryDescriptor::initWithRanges(
3702 IOVirtualRange * ranges,
3703 UInt32 withCount,
3704 IODirection direction,
3705 task_t task,
3706 bool asReference)
3707 {
3708 return( false );
3709 }
3710
3711 bool
3712 IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
3713 UInt32 withCount,
3714 IODirection direction,
3715 bool asReference)
3716 {
3717 return( false );
3718 }
3719
3720 void * IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
3721 IOByteCount * lengthOfSegment)
3722 {
3723 return( 0 );
3724 }
3725 #endif /* !__LP64__ */
3726
3727 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3728
3729 bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
3730 {
3731 OSSymbol const *keys[2];
3732 OSObject *values[2];
3733 struct SerData {
3734 user_addr_t address;
3735 user_size_t length;
3736 } *vcopy;
3737 unsigned int index, nRanges;
3738 bool result;
3739
3740 IOOptionBits type = _flags & kIOMemoryTypeMask;
3741
3742 if (s == NULL) return false;
3743 if (s->previouslySerialized(this)) return true;
3744
3745 // Pretend we are an array.
3746 if (!s->addXMLStartTag(this, "array")) return false;
3747
3748 nRanges = _rangesCount;
3749 vcopy = (SerData *) IOMalloc(sizeof(SerData) * nRanges);
3750 if (vcopy == 0) return false;
3751
3752 keys[0] = OSSymbol::withCString("address");
3753 keys[1] = OSSymbol::withCString("length");
3754
3755 result = false;
3756 values[0] = values[1] = 0;
3757
3758 // From this point on we can go to bail.
3759
3760 // Copy the volatile data so we don't have to allocate memory
3761 // while the lock is held.
3762 LOCK;
3763 if (nRanges == _rangesCount) {
3764 Ranges vec = _ranges;
3765 for (index = 0; index < nRanges; index++) {
3766 user_addr_t addr; IOByteCount len;
3767 getAddrLenForInd(addr, len, type, vec, index);
3768 vcopy[index].address = addr;
3769 vcopy[index].length = len;
3770 }
3771 } else {
3772 // The descriptor changed out from under us. Give up.
3773 UNLOCK;
3774 result = false;
3775 goto bail;
3776 }
3777 UNLOCK;
3778
3779 for (index = 0; index < nRanges; index++)
3780 {
3781 user_addr_t addr = vcopy[index].address;
3782 IOByteCount len = (IOByteCount) vcopy[index].length;
3783 values[0] =
3784 OSNumber::withNumber(addr, sizeof(addr) * 8);
3785 if (values[0] == 0) {
3786 result = false;
3787 goto bail;
3788 }
3789 values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
3790 if (values[1] == 0) {
3791 result = false;
3792 goto bail;
3793 }
3794 OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
3795 if (dict == 0) {
3796 result = false;
3797 goto bail;
3798 }
3799 values[0]->release();
3800 values[1]->release();
3801 values[0] = values[1] = 0;
3802
3803 result = dict->serialize(s);
3804 dict->release();
3805 if (!result) {
3806 goto bail;
3807 }
3808 }
3809 result = s->addXMLEndTag("array");
3810
3811 bail:
3812 if (values[0])
3813 values[0]->release();
3814 if (values[1])
3815 values[1]->release();
3816 if (keys[0])
3817 keys[0]->release();
3818 if (keys[1])
3819 keys[1]->release();
3820 if (vcopy)
3821 IOFree(vcopy, sizeof(SerData) * nRanges);
3822 return result;
3823 }
3824
3825 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3826
3827 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
3828 #ifdef __LP64__
3829 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
3830 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
3831 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
3832 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
3833 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
3834 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
3835 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
3836 #else /* !__LP64__ */
3837 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
3838 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
3839 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3);
3840 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4);
3841 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5);
3842 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 6);
3843 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 7);
3844 #endif /* !__LP64__ */
3845 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
3846 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
3847 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
3848 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
3849 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
3850 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
3851 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
3852 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
3853
3854 /* ex-inline function implementation */
3855 IOPhysicalAddress
3856 IOMemoryDescriptor::getPhysicalAddress()
3857 { return( getPhysicalSegment( 0, 0 )); }