]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOMemoryDescriptor.cpp
xnu-1699.24.8.tar.gz
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
30 *
31 * HISTORY
32 *
33 */
34
35
36 #include <sys/cdefs.h>
37
38 #include <IOKit/assert.h>
39 #include <IOKit/system.h>
40 #include <IOKit/IOLib.h>
41 #include <IOKit/IOMemoryDescriptor.h>
42 #include <IOKit/IOMapper.h>
43 #include <IOKit/IOKitKeysPrivate.h>
44
45 #ifndef __LP64__
46 #include <IOKit/IOSubMemoryDescriptor.h>
47 #endif /* !__LP64__ */
48
49 #include <IOKit/IOKitDebug.h>
50 #include <libkern/OSDebug.h>
51
52 #include "IOKitKernelInternal.h"
53
54 #include <libkern/c++/OSContainers.h>
55 #include <libkern/c++/OSDictionary.h>
56 #include <libkern/c++/OSArray.h>
57 #include <libkern/c++/OSSymbol.h>
58 #include <libkern/c++/OSNumber.h>
59
60 #include <sys/uio.h>
61
62 __BEGIN_DECLS
63 #include <vm/pmap.h>
64 #include <vm/vm_pageout.h>
65 #include <mach/memory_object_types.h>
66 #include <device/device_port.h>
67
68 #include <mach/vm_prot.h>
69 #include <mach/mach_vm.h>
70 #include <vm/vm_fault.h>
71 #include <vm/vm_protos.h>
72
73 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
74 extern void ipc_port_release_send(ipc_port_t port);
75
76 kern_return_t
77 memory_object_iopl_request(
78 ipc_port_t port,
79 memory_object_offset_t offset,
80 vm_size_t *upl_size,
81 upl_t *upl_ptr,
82 upl_page_info_array_t user_page_list,
83 unsigned int *page_list_count,
84 int *flags);
85
86 unsigned int IOTranslateCacheBits(struct phys_entry *pp);
87
88 __END_DECLS
89
90 #define kIOMaximumMappedIOByteCount (512*1024*1024)
91
92 static IOMapper * gIOSystemMapper = NULL;
93
94 static ppnum_t gIOMaximumMappedIOPageCount = atop_32(kIOMaximumMappedIOByteCount);
95
96 ppnum_t gIOLastPage;
97
98 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
99
100 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
101
102 #define super IOMemoryDescriptor
103
104 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
105
106 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
107
108 static IORecursiveLock * gIOMemoryLock;
109
110 #define LOCK IORecursiveLockLock( gIOMemoryLock)
111 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
112 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
113 #define WAKEUP \
114 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
115
116 #if 0
117 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
118 #else
119 #define DEBG(fmt, args...) {}
120 #endif
121
122 #define IOMD_DEBUG_DMAACTIVE 1
123
124 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
125
126 // Some data structures and accessor macros used by the initWithOptions
127 // Function
128
129 enum ioPLBlockFlags {
130 kIOPLOnDevice = 0x00000001,
131 kIOPLExternUPL = 0x00000002,
132 };
133
134 struct typePersMDData
135 {
136 const IOGeneralMemoryDescriptor *fMD;
137 ipc_port_t fMemEntry;
138 };
139
140 struct ioPLBlock {
141 upl_t fIOPL;
142 vm_address_t fPageInfo; // Pointer to page list or index into it
143 uint32_t fIOMDOffset; // The offset of this iopl in descriptor
144 ppnum_t fMappedBase; // Page number of first page in this iopl
145 unsigned int fPageOffset; // Offset within first page of iopl
146 unsigned int fFlags; // Flags
147 };
148
149 struct ioGMDData {
150 IOMapper *fMapper;
151 uint64_t fPreparationID;
152 unsigned int fPageCnt;
153 #if __LP64__
154 // align arrays to 8 bytes so following macros work
155 unsigned int fPad;
156 #endif
157 upl_page_info_t fPageList[1]; /* variable length */
158 ioPLBlock fBlocks[1]; /* variable length */
159 };
160
161 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
162 #define getIOPLList(d) ((ioPLBlock *) &(d->fPageList[d->fPageCnt]))
163 #define getNumIOPL(osd, d) \
164 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
165 #define getPageList(d) (&(d->fPageList[0]))
166 #define computeDataSize(p, u) \
167 (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
168
169
170 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
171
172 #define next_page(a) ( trunc_page(a) + PAGE_SIZE )
173
174
175 extern "C" {
176
177 kern_return_t device_data_action(
178 uintptr_t device_handle,
179 ipc_port_t device_pager,
180 vm_prot_t protection,
181 vm_object_offset_t offset,
182 vm_size_t size)
183 {
184 struct ExpansionData {
185 void * devicePager;
186 unsigned int pagerContig:1;
187 unsigned int unused:31;
188 IOMemoryDescriptor * memory;
189 };
190 kern_return_t kr;
191 ExpansionData * ref = (ExpansionData *) device_handle;
192 IOMemoryDescriptor * memDesc;
193
194 LOCK;
195 memDesc = ref->memory;
196 if( memDesc)
197 {
198 memDesc->retain();
199 kr = memDesc->handleFault( device_pager, 0, 0,
200 offset, size, kIOMapDefaultCache /*?*/);
201 memDesc->release();
202 }
203 else
204 kr = KERN_ABORTED;
205 UNLOCK;
206
207 return( kr );
208 }
209
210 kern_return_t device_close(
211 uintptr_t device_handle)
212 {
213 struct ExpansionData {
214 void * devicePager;
215 unsigned int pagerContig:1;
216 unsigned int unused:31;
217 IOMemoryDescriptor * memory;
218 };
219 ExpansionData * ref = (ExpansionData *) device_handle;
220
221 IODelete( ref, ExpansionData, 1 );
222
223 return( kIOReturnSuccess );
224 }
225 }; // end extern "C"
226
227 // Note this inline function uses C++ reference arguments to return values
228 // This means that pointers are not passed and NULLs don't have to be
229 // checked for as a NULL reference is illegal.
230 static inline void
231 getAddrLenForInd(user_addr_t &addr, IOPhysicalLength &len, // Output variables
232 UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
233 {
234 assert(kIOMemoryTypeUIO == type
235 || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
236 || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
237 if (kIOMemoryTypeUIO == type) {
238 user_size_t us;
239 uio_getiov((uio_t) r.uio, ind, &addr, &us); len = us;
240 }
241 #ifndef __LP64__
242 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
243 IOAddressRange cur = r.v64[ind];
244 addr = cur.address;
245 len = cur.length;
246 }
247 #endif /* !__LP64__ */
248 else {
249 IOVirtualRange cur = r.v[ind];
250 addr = cur.address;
251 len = cur.length;
252 }
253 }
254
255 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
256
257 IOMemoryDescriptor *
258 IOMemoryDescriptor::withAddress(void * address,
259 IOByteCount length,
260 IODirection direction)
261 {
262 return IOMemoryDescriptor::
263 withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
264 }
265
266 #ifndef __LP64__
267 IOMemoryDescriptor *
268 IOMemoryDescriptor::withAddress(IOVirtualAddress address,
269 IOByteCount length,
270 IODirection direction,
271 task_t task)
272 {
273 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
274 if (that)
275 {
276 if (that->initWithAddress(address, length, direction, task))
277 return that;
278
279 that->release();
280 }
281 return 0;
282 }
283 #endif /* !__LP64__ */
284
285 IOMemoryDescriptor *
286 IOMemoryDescriptor::withPhysicalAddress(
287 IOPhysicalAddress address,
288 IOByteCount length,
289 IODirection direction )
290 {
291 return (IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL));
292 }
293
294 #ifndef __LP64__
295 IOMemoryDescriptor *
296 IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
297 UInt32 withCount,
298 IODirection direction,
299 task_t task,
300 bool asReference)
301 {
302 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
303 if (that)
304 {
305 if (that->initWithRanges(ranges, withCount, direction, task, asReference))
306 return that;
307
308 that->release();
309 }
310 return 0;
311 }
312 #endif /* !__LP64__ */
313
314 IOMemoryDescriptor *
315 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
316 mach_vm_size_t length,
317 IOOptionBits options,
318 task_t task)
319 {
320 IOAddressRange range = { address, length };
321 return (IOMemoryDescriptor::withAddressRanges(&range, 1, options, task));
322 }
323
324 IOMemoryDescriptor *
325 IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
326 UInt32 rangeCount,
327 IOOptionBits options,
328 task_t task)
329 {
330 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
331 if (that)
332 {
333 if (task)
334 options |= kIOMemoryTypeVirtual64;
335 else
336 options |= kIOMemoryTypePhysical64;
337
338 if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ 0))
339 return that;
340
341 that->release();
342 }
343
344 return 0;
345 }
346
347
348 /*
349 * withOptions:
350 *
351 * Create a new IOMemoryDescriptor. The buffer is made up of several
352 * virtual address ranges, from a given task.
353 *
354 * Passing the ranges as a reference will avoid an extra allocation.
355 */
356 IOMemoryDescriptor *
357 IOMemoryDescriptor::withOptions(void * buffers,
358 UInt32 count,
359 UInt32 offset,
360 task_t task,
361 IOOptionBits opts,
362 IOMapper * mapper)
363 {
364 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
365
366 if (self
367 && !self->initWithOptions(buffers, count, offset, task, opts, mapper))
368 {
369 self->release();
370 return 0;
371 }
372
373 return self;
374 }
375
376 bool IOMemoryDescriptor::initWithOptions(void * buffers,
377 UInt32 count,
378 UInt32 offset,
379 task_t task,
380 IOOptionBits options,
381 IOMapper * mapper)
382 {
383 return( false );
384 }
385
386 #ifndef __LP64__
387 IOMemoryDescriptor *
388 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
389 UInt32 withCount,
390 IODirection direction,
391 bool asReference)
392 {
393 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
394 if (that)
395 {
396 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference))
397 return that;
398
399 that->release();
400 }
401 return 0;
402 }
403
404 IOMemoryDescriptor *
405 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
406 IOByteCount offset,
407 IOByteCount length,
408 IODirection direction)
409 {
410 return (IOSubMemoryDescriptor::withSubRange(of, offset, length, direction | kIOMemoryThreadSafe));
411 }
412 #endif /* !__LP64__ */
413
414 IOMemoryDescriptor *
415 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
416 {
417 IOGeneralMemoryDescriptor *origGenMD =
418 OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
419
420 if (origGenMD)
421 return IOGeneralMemoryDescriptor::
422 withPersistentMemoryDescriptor(origGenMD);
423 else
424 return 0;
425 }
426
427 IOMemoryDescriptor *
428 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
429 {
430 ipc_port_t sharedMem = (ipc_port_t) originalMD->createNamedEntry();
431
432 if (!sharedMem)
433 return 0;
434
435 if (sharedMem == originalMD->_memEntry) {
436 originalMD->retain(); // Add a new reference to ourselves
437 ipc_port_release_send(sharedMem); // Remove extra send right
438 return originalMD;
439 }
440
441 IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor;
442 typePersMDData initData = { originalMD, sharedMem };
443
444 if (self
445 && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) {
446 self->release();
447 self = 0;
448 }
449 return self;
450 }
451
452 void *IOGeneralMemoryDescriptor::createNamedEntry()
453 {
454 kern_return_t error;
455 ipc_port_t sharedMem;
456
457 IOOptionBits type = _flags & kIOMemoryTypeMask;
458
459 user_addr_t range0Addr;
460 IOByteCount range0Len;
461 getAddrLenForInd(range0Addr, range0Len, type, _ranges, 0);
462 range0Addr = trunc_page_64(range0Addr);
463
464 vm_size_t size = ptoa_32(_pages);
465 vm_address_t kernelPage = (vm_address_t) range0Addr;
466
467 vm_map_t theMap = ((_task == kernel_task)
468 && (kIOMemoryBufferPageable & _flags))
469 ? IOPageableMapForAddress(kernelPage)
470 : get_task_map(_task);
471
472 memory_object_size_t actualSize = size;
473 vm_prot_t prot = VM_PROT_READ;
474 if (kIODirectionOut != (kIODirectionOutIn & _flags))
475 prot |= VM_PROT_WRITE;
476
477 if (_memEntry)
478 prot |= MAP_MEM_NAMED_REUSE;
479
480 error = mach_make_memory_entry_64(theMap,
481 &actualSize, range0Addr, prot, &sharedMem, (ipc_port_t) _memEntry);
482
483 if (KERN_SUCCESS == error) {
484 if (actualSize == size) {
485 return sharedMem;
486 } else {
487 #if IOASSERT
488 IOLog("IOGMD::mach_make_memory_entry_64 (%08llx) size (%08llx:%08llx)\n",
489 (UInt64)range0Addr, (UInt64)actualSize, (UInt64)size);
490 #endif
491 ipc_port_release_send( sharedMem );
492 }
493 }
494
495 return MACH_PORT_NULL;
496 }
497
498 #ifndef __LP64__
499 bool
500 IOGeneralMemoryDescriptor::initWithAddress(void * address,
501 IOByteCount withLength,
502 IODirection withDirection)
503 {
504 _singleRange.v.address = (vm_offset_t) address;
505 _singleRange.v.length = withLength;
506
507 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
508 }
509
510 bool
511 IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
512 IOByteCount withLength,
513 IODirection withDirection,
514 task_t withTask)
515 {
516 _singleRange.v.address = address;
517 _singleRange.v.length = withLength;
518
519 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
520 }
521
522 bool
523 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
524 IOPhysicalAddress address,
525 IOByteCount withLength,
526 IODirection withDirection )
527 {
528 _singleRange.p.address = address;
529 _singleRange.p.length = withLength;
530
531 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
532 }
533
534 bool
535 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
536 IOPhysicalRange * ranges,
537 UInt32 count,
538 IODirection direction,
539 bool reference)
540 {
541 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
542
543 if (reference)
544 mdOpts |= kIOMemoryAsReference;
545
546 return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0);
547 }
548
549 bool
550 IOGeneralMemoryDescriptor::initWithRanges(
551 IOVirtualRange * ranges,
552 UInt32 count,
553 IODirection direction,
554 task_t task,
555 bool reference)
556 {
557 IOOptionBits mdOpts = direction;
558
559 if (reference)
560 mdOpts |= kIOMemoryAsReference;
561
562 if (task) {
563 mdOpts |= kIOMemoryTypeVirtual;
564
565 // Auto-prepare if this is a kernel memory descriptor as very few
566 // clients bother to prepare() kernel memory.
567 // But it was not enforced so what are you going to do?
568 if (task == kernel_task)
569 mdOpts |= kIOMemoryAutoPrepare;
570 }
571 else
572 mdOpts |= kIOMemoryTypePhysical;
573
574 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0);
575 }
576 #endif /* !__LP64__ */
577
578 /*
579 * initWithOptions:
580 *
581 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
582 * from a given task, several physical ranges, an UPL from the ubc
583 * system or a uio (may be 64bit) from the BSD subsystem.
584 *
585 * Passing the ranges as a reference will avoid an extra allocation.
586 *
587 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
588 * existing instance -- note this behavior is not commonly supported in other
589 * I/O Kit classes, although it is supported here.
590 */
591
592 bool
593 IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
594 UInt32 count,
595 UInt32 offset,
596 task_t task,
597 IOOptionBits options,
598 IOMapper * mapper)
599 {
600 IOOptionBits type = options & kIOMemoryTypeMask;
601
602 #ifndef __LP64__
603 if (task
604 && (kIOMemoryTypeVirtual == type)
605 && vm_map_is_64bit(get_task_map(task))
606 && ((IOVirtualRange *) buffers)->address)
607 {
608 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
609 return false;
610 }
611 #endif /* !__LP64__ */
612
613 // Grab the original MD's configuation data to initialse the
614 // arguments to this function.
615 if (kIOMemoryTypePersistentMD == type) {
616
617 typePersMDData *initData = (typePersMDData *) buffers;
618 const IOGeneralMemoryDescriptor *orig = initData->fMD;
619 ioGMDData *dataP = getDataP(orig->_memoryEntries);
620
621 // Only accept persistent memory descriptors with valid dataP data.
622 assert(orig->_rangesCount == 1);
623 if ( !(orig->_flags & kIOMemoryPersistent) || !dataP)
624 return false;
625
626 _memEntry = initData->fMemEntry; // Grab the new named entry
627 options = orig->_flags & ~kIOMemoryAsReference;
628 type = options & kIOMemoryTypeMask;
629 buffers = orig->_ranges.v;
630 count = orig->_rangesCount;
631
632 // Now grab the original task and whatever mapper was previously used
633 task = orig->_task;
634 mapper = dataP->fMapper;
635
636 // We are ready to go through the original initialisation now
637 }
638
639 switch (type) {
640 case kIOMemoryTypeUIO:
641 case kIOMemoryTypeVirtual:
642 #ifndef __LP64__
643 case kIOMemoryTypeVirtual64:
644 #endif /* !__LP64__ */
645 assert(task);
646 if (!task)
647 return false;
648 break;
649
650 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
651 #ifndef __LP64__
652 case kIOMemoryTypePhysical64:
653 #endif /* !__LP64__ */
654 case kIOMemoryTypeUPL:
655 assert(!task);
656 break;
657 default:
658 return false; /* bad argument */
659 }
660
661 assert(buffers);
662 assert(count);
663
664 /*
665 * We can check the _initialized instance variable before having ever set
666 * it to an initial value because I/O Kit guarantees that all our instance
667 * variables are zeroed on an object's allocation.
668 */
669
670 if (_initialized) {
671 /*
672 * An existing memory descriptor is being retargeted to point to
673 * somewhere else. Clean up our present state.
674 */
675 IOOptionBits type = _flags & kIOMemoryTypeMask;
676 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
677 {
678 while (_wireCount)
679 complete();
680 }
681 if (_ranges.v && !(kIOMemoryAsReference & _flags))
682 {
683 if (kIOMemoryTypeUIO == type)
684 uio_free((uio_t) _ranges.v);
685 #ifndef __LP64__
686 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
687 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
688 #endif /* !__LP64__ */
689 else
690 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
691 }
692
693 if (_memEntry)
694 {
695 ipc_port_release_send((ipc_port_t) _memEntry);
696 _memEntry = 0;
697 }
698 if (_mappings)
699 _mappings->flushCollection();
700 }
701 else {
702 if (!super::init())
703 return false;
704 _initialized = true;
705 }
706
707 // Grab the appropriate mapper
708 if (kIOMemoryMapperNone & options)
709 mapper = 0; // No Mapper
710 else if (mapper == kIOMapperSystem) {
711 IOMapper::checkForSystemMapper();
712 gIOSystemMapper = mapper = IOMapper::gSystem;
713 }
714
715 // Temp binary compatibility for kIOMemoryThreadSafe
716 if (kIOMemoryReserved6156215 & options)
717 {
718 options &= ~kIOMemoryReserved6156215;
719 options |= kIOMemoryThreadSafe;
720 }
721 // Remove the dynamic internal use flags from the initial setting
722 options &= ~(kIOMemoryPreparedReadOnly);
723 _flags = options;
724 _task = task;
725
726 #ifndef __LP64__
727 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
728 #endif /* !__LP64__ */
729
730 __iomd_reservedA = 0;
731 __iomd_reservedB = 0;
732 _highestPage = 0;
733
734 if (kIOMemoryThreadSafe & options)
735 {
736 if (!_prepareLock)
737 _prepareLock = IOLockAlloc();
738 }
739 else if (_prepareLock)
740 {
741 IOLockFree(_prepareLock);
742 _prepareLock = NULL;
743 }
744
745 if (kIOMemoryTypeUPL == type) {
746
747 ioGMDData *dataP;
748 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
749
750 if (!_memoryEntries) {
751 _memoryEntries = OSData::withCapacity(dataSize);
752 if (!_memoryEntries)
753 return false;
754 }
755 else if (!_memoryEntries->initWithCapacity(dataSize))
756 return false;
757
758 _memoryEntries->appendBytes(0, computeDataSize(0, 0));
759 dataP = getDataP(_memoryEntries);
760 dataP->fMapper = mapper;
761 dataP->fPageCnt = 0;
762
763 // _wireCount++; // UPLs start out life wired
764
765 _length = count;
766 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
767
768 ioPLBlock iopl;
769 iopl.fIOPL = (upl_t) buffers;
770 upl_set_referenced(iopl.fIOPL, true);
771 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
772
773 if (upl_get_size(iopl.fIOPL) < (count + offset))
774 panic("short external upl");
775
776 // Set the flag kIOPLOnDevice convieniently equal to 1
777 iopl.fFlags = pageList->device | kIOPLExternUPL;
778 iopl.fIOMDOffset = 0;
779
780 _highestPage = upl_get_highest_page(iopl.fIOPL);
781
782 if (!pageList->device) {
783 // Pre-compute the offset into the UPL's page list
784 pageList = &pageList[atop_32(offset)];
785 offset &= PAGE_MASK;
786 if (mapper) {
787 iopl.fMappedBase = mapper->iovmAlloc(_pages);
788 mapper->iovmInsert(iopl.fMappedBase, 0, pageList, _pages);
789 }
790 else
791 iopl.fMappedBase = 0;
792 }
793 else
794 iopl.fMappedBase = 0;
795 iopl.fPageInfo = (vm_address_t) pageList;
796 iopl.fPageOffset = offset;
797
798 _memoryEntries->appendBytes(&iopl, sizeof(iopl));
799 }
800 else {
801 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
802 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
803
804 // Initialize the memory descriptor
805 if (options & kIOMemoryAsReference) {
806 #ifndef __LP64__
807 _rangesIsAllocated = false;
808 #endif /* !__LP64__ */
809
810 // Hack assignment to get the buffer arg into _ranges.
811 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
812 // work, C++ sigh.
813 // This also initialises the uio & physical ranges.
814 _ranges.v = (IOVirtualRange *) buffers;
815 }
816 else {
817 #ifndef __LP64__
818 _rangesIsAllocated = true;
819 #endif /* !__LP64__ */
820 switch (type)
821 {
822 case kIOMemoryTypeUIO:
823 _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
824 break;
825
826 #ifndef __LP64__
827 case kIOMemoryTypeVirtual64:
828 case kIOMemoryTypePhysical64:
829 if (count == 1
830 && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
831 ) {
832 if (kIOMemoryTypeVirtual64 == type)
833 type = kIOMemoryTypeVirtual;
834 else
835 type = kIOMemoryTypePhysical;
836 _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
837 _rangesIsAllocated = false;
838 _ranges.v = &_singleRange.v;
839 _singleRange.v.address = ((IOAddressRange *) buffers)->address;
840 _singleRange.v.length = ((IOAddressRange *) buffers)->length;
841 break;
842 }
843 _ranges.v64 = IONew(IOAddressRange, count);
844 if (!_ranges.v64)
845 return false;
846 bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
847 break;
848 #endif /* !__LP64__ */
849 case kIOMemoryTypeVirtual:
850 case kIOMemoryTypePhysical:
851 if (count == 1) {
852 _flags |= kIOMemoryAsReference;
853 #ifndef __LP64__
854 _rangesIsAllocated = false;
855 #endif /* !__LP64__ */
856 _ranges.v = &_singleRange.v;
857 } else {
858 _ranges.v = IONew(IOVirtualRange, count);
859 if (!_ranges.v)
860 return false;
861 }
862 bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
863 break;
864 }
865 }
866
867 // Find starting address within the vector of ranges
868 Ranges vec = _ranges;
869 UInt32 length = 0;
870 UInt32 pages = 0;
871 for (unsigned ind = 0; ind < count; ind++) {
872 user_addr_t addr;
873 IOPhysicalLength len;
874
875 // addr & len are returned by this function
876 getAddrLenForInd(addr, len, type, vec, ind);
877 pages += (atop_64(addr + len + PAGE_MASK) - atop_64(addr));
878 len += length;
879 assert(len >= length); // Check for 32 bit wrap around
880 length = len;
881
882 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
883 {
884 ppnum_t highPage = atop_64(addr + len - 1);
885 if (highPage > _highestPage)
886 _highestPage = highPage;
887 }
888 }
889 _length = length;
890 _pages = pages;
891 _rangesCount = count;
892
893 // Auto-prepare memory at creation time.
894 // Implied completion when descriptor is free-ed
895 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
896 _wireCount++; // Physical MDs are, by definition, wired
897 else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
898 ioGMDData *dataP;
899 unsigned dataSize = computeDataSize(_pages, /* upls */ count * 2);
900
901 if (!_memoryEntries) {
902 _memoryEntries = OSData::withCapacity(dataSize);
903 if (!_memoryEntries)
904 return false;
905 }
906 else if (!_memoryEntries->initWithCapacity(dataSize))
907 return false;
908
909 _memoryEntries->appendBytes(0, computeDataSize(0, 0));
910 dataP = getDataP(_memoryEntries);
911 dataP->fMapper = mapper;
912 dataP->fPageCnt = _pages;
913
914 if ( (kIOMemoryPersistent & _flags) && !_memEntry)
915 _memEntry = createNamedEntry();
916
917 if ((_flags & kIOMemoryAutoPrepare)
918 && prepare() != kIOReturnSuccess)
919 return false;
920 }
921 }
922
923 return true;
924 }
925
926 /*
927 * free
928 *
929 * Free resources.
930 */
931 void IOGeneralMemoryDescriptor::free()
932 {
933 IOOptionBits type = _flags & kIOMemoryTypeMask;
934
935 if( reserved)
936 {
937 LOCK;
938 reserved->memory = 0;
939 UNLOCK;
940 }
941
942 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
943 {
944 while (_wireCount)
945 complete();
946 }
947 if (_memoryEntries)
948 _memoryEntries->release();
949
950 if (_ranges.v && !(kIOMemoryAsReference & _flags))
951 {
952 if (kIOMemoryTypeUIO == type)
953 uio_free((uio_t) _ranges.v);
954 #ifndef __LP64__
955 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
956 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
957 #endif /* !__LP64__ */
958 else
959 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
960
961 _ranges.v = NULL;
962 }
963
964 if (reserved && reserved->devicePager)
965 device_pager_deallocate( (memory_object_t) reserved->devicePager );
966
967 // memEntry holds a ref on the device pager which owns reserved
968 // (ExpansionData) so no reserved access after this point
969 if (_memEntry)
970 ipc_port_release_send( (ipc_port_t) _memEntry );
971
972 if (_prepareLock)
973 IOLockFree(_prepareLock);
974
975 super::free();
976 }
977
978 #ifndef __LP64__
979 void IOGeneralMemoryDescriptor::unmapFromKernel()
980 {
981 panic("IOGMD::unmapFromKernel deprecated");
982 }
983
984 void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
985 {
986 panic("IOGMD::mapIntoKernel deprecated");
987 }
988 #endif /* !__LP64__ */
989
990 /*
991 * getDirection:
992 *
993 * Get the direction of the transfer.
994 */
995 IODirection IOMemoryDescriptor::getDirection() const
996 {
997 #ifndef __LP64__
998 if (_direction)
999 return _direction;
1000 #endif /* !__LP64__ */
1001 return (IODirection) (_flags & kIOMemoryDirectionMask);
1002 }
1003
1004 /*
1005 * getLength:
1006 *
1007 * Get the length of the transfer (over all ranges).
1008 */
1009 IOByteCount IOMemoryDescriptor::getLength() const
1010 {
1011 return _length;
1012 }
1013
1014 void IOMemoryDescriptor::setTag( IOOptionBits tag )
1015 {
1016 _tag = tag;
1017 }
1018
1019 IOOptionBits IOMemoryDescriptor::getTag( void )
1020 {
1021 return( _tag);
1022 }
1023
1024 #ifndef __LP64__
1025 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
1026 IOPhysicalAddress
1027 IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
1028 {
1029 addr64_t physAddr = 0;
1030
1031 if( prepare() == kIOReturnSuccess) {
1032 physAddr = getPhysicalSegment64( offset, length );
1033 complete();
1034 }
1035
1036 return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used
1037 }
1038 #endif /* !__LP64__ */
1039
1040 IOByteCount IOMemoryDescriptor::readBytes
1041 (IOByteCount offset, void *bytes, IOByteCount length)
1042 {
1043 addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
1044 IOByteCount remaining;
1045
1046 // Assert that this entire I/O is withing the available range
1047 assert(offset < _length);
1048 assert(offset + length <= _length);
1049 if (offset >= _length) {
1050 return 0;
1051 }
1052
1053 if (kIOMemoryThreadSafe & _flags)
1054 LOCK;
1055
1056 remaining = length = min(length, _length - offset);
1057 while (remaining) { // (process another target segment?)
1058 addr64_t srcAddr64;
1059 IOByteCount srcLen;
1060
1061 srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
1062 if (!srcAddr64)
1063 break;
1064
1065 // Clip segment length to remaining
1066 if (srcLen > remaining)
1067 srcLen = remaining;
1068
1069 copypv(srcAddr64, dstAddr, srcLen,
1070 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
1071
1072 dstAddr += srcLen;
1073 offset += srcLen;
1074 remaining -= srcLen;
1075 }
1076
1077 if (kIOMemoryThreadSafe & _flags)
1078 UNLOCK;
1079
1080 assert(!remaining);
1081
1082 return length - remaining;
1083 }
1084
1085 IOByteCount IOMemoryDescriptor::writeBytes
1086 (IOByteCount offset, const void *bytes, IOByteCount length)
1087 {
1088 addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
1089 IOByteCount remaining;
1090
1091 // Assert that this entire I/O is withing the available range
1092 assert(offset < _length);
1093 assert(offset + length <= _length);
1094
1095 assert( !(kIOMemoryPreparedReadOnly & _flags) );
1096
1097 if ( (kIOMemoryPreparedReadOnly & _flags) || offset >= _length) {
1098 return 0;
1099 }
1100
1101 if (kIOMemoryThreadSafe & _flags)
1102 LOCK;
1103
1104 remaining = length = min(length, _length - offset);
1105 while (remaining) { // (process another target segment?)
1106 addr64_t dstAddr64;
1107 IOByteCount dstLen;
1108
1109 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
1110 if (!dstAddr64)
1111 break;
1112
1113 // Clip segment length to remaining
1114 if (dstLen > remaining)
1115 dstLen = remaining;
1116
1117 copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
1118 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
1119
1120 srcAddr += dstLen;
1121 offset += dstLen;
1122 remaining -= dstLen;
1123 }
1124
1125 if (kIOMemoryThreadSafe & _flags)
1126 UNLOCK;
1127
1128 assert(!remaining);
1129
1130 return length - remaining;
1131 }
1132
1133 // osfmk/device/iokit_rpc.c
1134 extern "C" unsigned int IODefaultCacheBits(addr64_t pa);
1135
1136 #ifndef __LP64__
1137 void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
1138 {
1139 panic("IOGMD::setPosition deprecated");
1140 }
1141 #endif /* !__LP64__ */
1142
1143 static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
1144
1145 uint64_t
1146 IOGeneralMemoryDescriptor::getPreparationID( void )
1147 {
1148 ioGMDData *dataP;
1149
1150 if (!_wireCount)
1151 return (kIOPreparationIDUnprepared);
1152
1153 if (_flags & (kIOMemoryTypePhysical | kIOMemoryTypePhysical64))
1154 return (kIOPreparationIDAlwaysPrepared);
1155
1156 if (!_memoryEntries || !(dataP = getDataP(_memoryEntries)))
1157 return (kIOPreparationIDUnprepared);
1158
1159 if (kIOPreparationIDUnprepared == dataP->fPreparationID)
1160 {
1161 dataP->fPreparationID = OSIncrementAtomic64(&gIOMDPreparationID);
1162 }
1163 return (dataP->fPreparationID);
1164 }
1165
1166 uint64_t
1167 IOMemoryDescriptor::getPreparationID( void )
1168 {
1169 return (kIOPreparationIDUnsupported);
1170 }
1171
1172 IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
1173 {
1174 if (kIOMDGetCharacteristics == op) {
1175
1176 if (dataSize < sizeof(IOMDDMACharacteristics))
1177 return kIOReturnUnderrun;
1178
1179 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
1180 data->fLength = _length;
1181 data->fSGCount = _rangesCount;
1182 data->fPages = _pages;
1183 data->fDirection = getDirection();
1184 if (!_wireCount)
1185 data->fIsPrepared = false;
1186 else {
1187 data->fIsPrepared = true;
1188 data->fHighestPage = _highestPage;
1189 if (_memoryEntries) {
1190 ioGMDData *gmdData = getDataP(_memoryEntries);
1191 ioPLBlock *ioplList = getIOPLList(gmdData);
1192 UInt count = getNumIOPL(_memoryEntries, gmdData);
1193
1194 data->fIsMapped = (gmdData->fMapper && _pages && (count > 0)
1195 && ioplList[0].fMappedBase);
1196 if (count == 1)
1197 data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
1198 }
1199 else
1200 data->fIsMapped = false;
1201 }
1202
1203 return kIOReturnSuccess;
1204
1205 #if IOMD_DEBUG_DMAACTIVE
1206 } else if (kIOMDSetDMAActive == op) {
1207 IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
1208 OSIncrementAtomic(&md->__iomd_reservedA);
1209 } else if (kIOMDSetDMAInactive == op) {
1210 IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
1211 if (md->__iomd_reservedA)
1212 OSDecrementAtomic(&md->__iomd_reservedA);
1213 else
1214 panic("kIOMDSetDMAInactive");
1215 #endif /* IOMD_DEBUG_DMAACTIVE */
1216
1217 } else if (!(kIOMDWalkSegments & op))
1218 return kIOReturnBadArgument;
1219
1220 // Get the next segment
1221 struct InternalState {
1222 IOMDDMAWalkSegmentArgs fIO;
1223 UInt fOffset2Index;
1224 UInt fIndex;
1225 UInt fNextOffset;
1226 } *isP;
1227
1228 // Find the next segment
1229 if (dataSize < sizeof(*isP))
1230 return kIOReturnUnderrun;
1231
1232 isP = (InternalState *) vData;
1233 UInt offset = isP->fIO.fOffset;
1234 bool mapped = isP->fIO.fMapped;
1235
1236 if (offset >= _length)
1237 return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
1238
1239 // Validate the previous offset
1240 UInt ind, off2Ind = isP->fOffset2Index;
1241 if ((kIOMDFirstSegment != op)
1242 && offset
1243 && (offset == isP->fNextOffset || off2Ind <= offset))
1244 ind = isP->fIndex;
1245 else
1246 ind = off2Ind = 0; // Start from beginning
1247
1248 UInt length;
1249 UInt64 address;
1250 if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
1251
1252 // Physical address based memory descriptor
1253 const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
1254
1255 // Find the range after the one that contains the offset
1256 mach_vm_size_t len;
1257 for (len = 0; off2Ind <= offset; ind++) {
1258 len = physP[ind].length;
1259 off2Ind += len;
1260 }
1261
1262 // Calculate length within range and starting address
1263 length = off2Ind - offset;
1264 address = physP[ind - 1].address + len - length;
1265
1266 // see how far we can coalesce ranges
1267 while (ind < _rangesCount && address + length == physP[ind].address) {
1268 len = physP[ind].length;
1269 length += len;
1270 off2Ind += len;
1271 ind++;
1272 }
1273
1274 // correct contiguous check overshoot
1275 ind--;
1276 off2Ind -= len;
1277 }
1278 #ifndef __LP64__
1279 else if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
1280
1281 // Physical address based memory descriptor
1282 const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
1283
1284 // Find the range after the one that contains the offset
1285 mach_vm_size_t len;
1286 for (len = 0; off2Ind <= offset; ind++) {
1287 len = physP[ind].length;
1288 off2Ind += len;
1289 }
1290
1291 // Calculate length within range and starting address
1292 length = off2Ind - offset;
1293 address = physP[ind - 1].address + len - length;
1294
1295 // see how far we can coalesce ranges
1296 while (ind < _rangesCount && address + length == physP[ind].address) {
1297 len = physP[ind].length;
1298 length += len;
1299 off2Ind += len;
1300 ind++;
1301 }
1302
1303 // correct contiguous check overshoot
1304 ind--;
1305 off2Ind -= len;
1306 }
1307 #endif /* !__LP64__ */
1308 else do {
1309 if (!_wireCount)
1310 panic("IOGMD: not wired for the IODMACommand");
1311
1312 assert(_memoryEntries);
1313
1314 ioGMDData * dataP = getDataP(_memoryEntries);
1315 const ioPLBlock *ioplList = getIOPLList(dataP);
1316 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1317 upl_page_info_t *pageList = getPageList(dataP);
1318
1319 assert(numIOPLs > 0);
1320
1321 // Scan through iopl info blocks looking for block containing offset
1322 while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset)
1323 ind++;
1324
1325 // Go back to actual range as search goes past it
1326 ioPLBlock ioplInfo = ioplList[ind - 1];
1327 off2Ind = ioplInfo.fIOMDOffset;
1328
1329 if (ind < numIOPLs)
1330 length = ioplList[ind].fIOMDOffset;
1331 else
1332 length = _length;
1333 length -= offset; // Remainder within iopl
1334
1335 // Subtract offset till this iopl in total list
1336 offset -= off2Ind;
1337
1338 // If a mapped address is requested and this is a pre-mapped IOPL
1339 // then just need to compute an offset relative to the mapped base.
1340 if (mapped && ioplInfo.fMappedBase) {
1341 offset += (ioplInfo.fPageOffset & PAGE_MASK);
1342 address = ptoa_64(ioplInfo.fMappedBase) + offset;
1343 continue; // Done leave do/while(false) now
1344 }
1345
1346 // The offset is rebased into the current iopl.
1347 // Now add the iopl 1st page offset.
1348 offset += ioplInfo.fPageOffset;
1349
1350 // For external UPLs the fPageInfo field points directly to
1351 // the upl's upl_page_info_t array.
1352 if (ioplInfo.fFlags & kIOPLExternUPL)
1353 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
1354 else
1355 pageList = &pageList[ioplInfo.fPageInfo];
1356
1357 // Check for direct device non-paged memory
1358 if ( ioplInfo.fFlags & kIOPLOnDevice ) {
1359 address = ptoa_64(pageList->phys_addr) + offset;
1360 continue; // Done leave do/while(false) now
1361 }
1362
1363 // Now we need compute the index into the pageList
1364 UInt pageInd = atop_32(offset);
1365 offset &= PAGE_MASK;
1366
1367 // Compute the starting address of this segment
1368 IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
1369 if (!pageAddr) {
1370 panic("!pageList phys_addr");
1371 }
1372
1373 address = ptoa_64(pageAddr) + offset;
1374
1375 // length is currently set to the length of the remainider of the iopl.
1376 // We need to check that the remainder of the iopl is contiguous.
1377 // This is indicated by pageList[ind].phys_addr being sequential.
1378 IOByteCount contigLength = PAGE_SIZE - offset;
1379 while (contigLength < length
1380 && ++pageAddr == pageList[++pageInd].phys_addr)
1381 {
1382 contigLength += PAGE_SIZE;
1383 }
1384
1385 if (contigLength < length)
1386 length = contigLength;
1387
1388
1389 assert(address);
1390 assert(length);
1391
1392 } while (false);
1393
1394 // Update return values and state
1395 isP->fIO.fIOVMAddr = address;
1396 isP->fIO.fLength = length;
1397 isP->fIndex = ind;
1398 isP->fOffset2Index = off2Ind;
1399 isP->fNextOffset = isP->fIO.fOffset + length;
1400
1401 return kIOReturnSuccess;
1402 }
1403
1404 addr64_t
1405 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
1406 {
1407 IOReturn ret;
1408 addr64_t address = 0;
1409 IOByteCount length = 0;
1410 IOMapper * mapper = gIOSystemMapper;
1411 IOOptionBits type = _flags & kIOMemoryTypeMask;
1412
1413 if (lengthOfSegment)
1414 *lengthOfSegment = 0;
1415
1416 if (offset >= _length)
1417 return 0;
1418
1419 // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
1420 // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
1421 // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
1422 // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
1423
1424 if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type))
1425 {
1426 unsigned rangesIndex = 0;
1427 Ranges vec = _ranges;
1428 user_addr_t addr;
1429
1430 // Find starting address within the vector of ranges
1431 for (;;) {
1432 getAddrLenForInd(addr, length, type, vec, rangesIndex);
1433 if (offset < length)
1434 break;
1435 offset -= length; // (make offset relative)
1436 rangesIndex++;
1437 }
1438
1439 // Now that we have the starting range,
1440 // lets find the last contiguous range
1441 addr += offset;
1442 length -= offset;
1443
1444 for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) {
1445 user_addr_t newAddr;
1446 IOPhysicalLength newLen;
1447
1448 getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
1449 if (addr + length != newAddr)
1450 break;
1451 length += newLen;
1452 }
1453 if (addr)
1454 address = (IOPhysicalAddress) addr; // Truncate address to 32bit
1455 }
1456 else
1457 {
1458 IOMDDMAWalkSegmentState _state;
1459 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) &_state;
1460
1461 state->fOffset = offset;
1462 state->fLength = _length - offset;
1463 state->fMapped = (0 == (options & kIOMemoryMapperNone));
1464
1465 ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
1466
1467 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret))
1468 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
1469 ret, this, state->fOffset,
1470 state->fIOVMAddr, state->fLength);
1471 if (kIOReturnSuccess == ret)
1472 {
1473 address = state->fIOVMAddr;
1474 length = state->fLength;
1475 }
1476
1477 // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
1478 // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
1479
1480 if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)))
1481 {
1482 if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone))
1483 {
1484 addr64_t origAddr = address;
1485 IOByteCount origLen = length;
1486
1487 address = mapper->mapAddr(origAddr);
1488 length = page_size - (address & (page_size - 1));
1489 while ((length < origLen)
1490 && ((address + length) == mapper->mapAddr(origAddr + length)))
1491 length += page_size;
1492 if (length > origLen)
1493 length = origLen;
1494 }
1495 #ifdef __LP64__
1496 else if (!(options & kIOMemoryMapperNone) && (_flags & kIOMemoryMapperNone))
1497 {
1498 panic("getPhysicalSegment not mapped for I/O");
1499 }
1500 #endif /* __LP64__ */
1501 }
1502 }
1503
1504 if (!address)
1505 length = 0;
1506
1507 if (lengthOfSegment)
1508 *lengthOfSegment = length;
1509
1510 return (address);
1511 }
1512
1513 #ifndef __LP64__
1514 addr64_t
1515 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
1516 {
1517 addr64_t address = 0;
1518
1519 if (options & _kIOMemorySourceSegment)
1520 {
1521 address = getSourceSegment(offset, lengthOfSegment);
1522 }
1523 else if (options & kIOMemoryMapperNone)
1524 {
1525 address = getPhysicalSegment64(offset, lengthOfSegment);
1526 }
1527 else
1528 {
1529 address = getPhysicalSegment(offset, lengthOfSegment);
1530 }
1531
1532 return (address);
1533 }
1534
1535 addr64_t
1536 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
1537 {
1538 return (getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone));
1539 }
1540
1541 IOPhysicalAddress
1542 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1543 {
1544 addr64_t address = 0;
1545 IOByteCount length = 0;
1546
1547 address = getPhysicalSegment(offset, lengthOfSegment, 0);
1548
1549 if (lengthOfSegment)
1550 length = *lengthOfSegment;
1551
1552 if ((address + length) > 0x100000000ULL)
1553 {
1554 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
1555 address, (long) length, (getMetaClass())->getClassName());
1556 }
1557
1558 return ((IOPhysicalAddress) address);
1559 }
1560
1561 addr64_t
1562 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
1563 {
1564 IOPhysicalAddress phys32;
1565 IOByteCount length;
1566 addr64_t phys64;
1567 IOMapper * mapper = 0;
1568
1569 phys32 = getPhysicalSegment(offset, lengthOfSegment);
1570 if (!phys32)
1571 return 0;
1572
1573 if (gIOSystemMapper)
1574 mapper = gIOSystemMapper;
1575
1576 if (mapper)
1577 {
1578 IOByteCount origLen;
1579
1580 phys64 = mapper->mapAddr(phys32);
1581 origLen = *lengthOfSegment;
1582 length = page_size - (phys64 & (page_size - 1));
1583 while ((length < origLen)
1584 && ((phys64 + length) == mapper->mapAddr(phys32 + length)))
1585 length += page_size;
1586 if (length > origLen)
1587 length = origLen;
1588
1589 *lengthOfSegment = length;
1590 }
1591 else
1592 phys64 = (addr64_t) phys32;
1593
1594 return phys64;
1595 }
1596
1597 IOPhysicalAddress
1598 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1599 {
1600 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0));
1601 }
1602
1603 IOPhysicalAddress
1604 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1605 {
1606 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment));
1607 }
1608
1609 void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
1610 IOByteCount * lengthOfSegment)
1611 {
1612 if (_task == kernel_task)
1613 return (void *) getSourceSegment(offset, lengthOfSegment);
1614 else
1615 panic("IOGMD::getVirtualSegment deprecated");
1616
1617 return 0;
1618 }
1619 #endif /* !__LP64__ */
1620
1621 IOReturn
1622 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
1623 {
1624 if (kIOMDGetCharacteristics == op) {
1625 if (dataSize < sizeof(IOMDDMACharacteristics))
1626 return kIOReturnUnderrun;
1627
1628 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
1629 data->fLength = getLength();
1630 data->fSGCount = 0;
1631 data->fDirection = getDirection();
1632 if (IOMapper::gSystem)
1633 data->fIsMapped = true;
1634 data->fIsPrepared = true; // Assume prepared - fails safe
1635 }
1636 else if (kIOMDWalkSegments & op) {
1637 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs))
1638 return kIOReturnUnderrun;
1639
1640 IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
1641 IOByteCount offset = (IOByteCount) data->fOffset;
1642
1643 IOPhysicalLength length;
1644 IOMemoryDescriptor *ncmd = const_cast<IOMemoryDescriptor *>(this);
1645 if (data->fMapped && IOMapper::gSystem)
1646 data->fIOVMAddr = ncmd->getPhysicalSegment(offset, &length);
1647 else
1648 data->fIOVMAddr = ncmd->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
1649 data->fLength = length;
1650 }
1651 else
1652 return kIOReturnBadArgument;
1653
1654 return kIOReturnSuccess;
1655 }
1656
1657 static IOReturn
1658 purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
1659 {
1660 IOReturn err = kIOReturnSuccess;
1661
1662 *control = VM_PURGABLE_SET_STATE;
1663 switch (newState)
1664 {
1665 case kIOMemoryPurgeableKeepCurrent:
1666 *control = VM_PURGABLE_GET_STATE;
1667 break;
1668
1669 case kIOMemoryPurgeableNonVolatile:
1670 *state = VM_PURGABLE_NONVOLATILE;
1671 break;
1672 case kIOMemoryPurgeableVolatile:
1673 *state = VM_PURGABLE_VOLATILE;
1674 break;
1675 case kIOMemoryPurgeableEmpty:
1676 *state = VM_PURGABLE_EMPTY;
1677 break;
1678 default:
1679 err = kIOReturnBadArgument;
1680 break;
1681 }
1682 return (err);
1683 }
1684
1685 static IOReturn
1686 purgeableStateBits(int * state)
1687 {
1688 IOReturn err = kIOReturnSuccess;
1689
1690 switch (*state)
1691 {
1692 case VM_PURGABLE_NONVOLATILE:
1693 *state = kIOMemoryPurgeableNonVolatile;
1694 break;
1695 case VM_PURGABLE_VOLATILE:
1696 *state = kIOMemoryPurgeableVolatile;
1697 break;
1698 case VM_PURGABLE_EMPTY:
1699 *state = kIOMemoryPurgeableEmpty;
1700 break;
1701 default:
1702 *state = kIOMemoryPurgeableNonVolatile;
1703 err = kIOReturnNotReady;
1704 break;
1705 }
1706 return (err);
1707 }
1708
1709 IOReturn
1710 IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
1711 IOOptionBits * oldState )
1712 {
1713 IOReturn err = kIOReturnSuccess;
1714 vm_purgable_t control;
1715 int state;
1716
1717 if (_memEntry)
1718 {
1719 err = super::setPurgeable(newState, oldState);
1720 }
1721 else
1722 {
1723 if (kIOMemoryThreadSafe & _flags)
1724 LOCK;
1725 do
1726 {
1727 // Find the appropriate vm_map for the given task
1728 vm_map_t curMap;
1729 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
1730 {
1731 err = kIOReturnNotReady;
1732 break;
1733 }
1734 else
1735 curMap = get_task_map(_task);
1736
1737 // can only do one range
1738 Ranges vec = _ranges;
1739 IOOptionBits type = _flags & kIOMemoryTypeMask;
1740 user_addr_t addr;
1741 IOByteCount len;
1742 getAddrLenForInd(addr, len, type, vec, 0);
1743
1744 err = purgeableControlBits(newState, &control, &state);
1745 if (kIOReturnSuccess != err)
1746 break;
1747 err = mach_vm_purgable_control(curMap, addr, control, &state);
1748 if (oldState)
1749 {
1750 if (kIOReturnSuccess == err)
1751 {
1752 err = purgeableStateBits(&state);
1753 *oldState = state;
1754 }
1755 }
1756 }
1757 while (false);
1758 if (kIOMemoryThreadSafe & _flags)
1759 UNLOCK;
1760 }
1761 return (err);
1762 }
1763
1764 IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
1765 IOOptionBits * oldState )
1766 {
1767 IOReturn err = kIOReturnSuccess;
1768 vm_purgable_t control;
1769 int state;
1770
1771 if (kIOMemoryThreadSafe & _flags)
1772 LOCK;
1773
1774 do
1775 {
1776 if (!_memEntry)
1777 {
1778 err = kIOReturnNotReady;
1779 break;
1780 }
1781 err = purgeableControlBits(newState, &control, &state);
1782 if (kIOReturnSuccess != err)
1783 break;
1784 err = mach_memory_entry_purgable_control((ipc_port_t) _memEntry, control, &state);
1785 if (oldState)
1786 {
1787 if (kIOReturnSuccess == err)
1788 {
1789 err = purgeableStateBits(&state);
1790 *oldState = state;
1791 }
1792 }
1793 }
1794 while (false);
1795
1796 if (kIOMemoryThreadSafe & _flags)
1797 UNLOCK;
1798
1799 return (err);
1800 }
1801
1802 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
1803 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
1804
1805 static void SetEncryptOp(addr64_t pa, unsigned int count)
1806 {
1807 ppnum_t page, end;
1808
1809 page = atop_64(round_page_64(pa));
1810 end = atop_64(trunc_page_64(pa + count));
1811 for (; page < end; page++)
1812 {
1813 pmap_clear_noencrypt(page);
1814 }
1815 }
1816
1817 static void ClearEncryptOp(addr64_t pa, unsigned int count)
1818 {
1819 ppnum_t page, end;
1820
1821 page = atop_64(round_page_64(pa));
1822 end = atop_64(trunc_page_64(pa + count));
1823 for (; page < end; page++)
1824 {
1825 pmap_set_noencrypt(page);
1826 }
1827 }
1828
1829 IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options,
1830 IOByteCount offset, IOByteCount length )
1831 {
1832 IOByteCount remaining;
1833 void (*func)(addr64_t pa, unsigned int count) = 0;
1834
1835 switch (options)
1836 {
1837 case kIOMemoryIncoherentIOFlush:
1838 func = &dcache_incoherent_io_flush64;
1839 break;
1840 case kIOMemoryIncoherentIOStore:
1841 func = &dcache_incoherent_io_store64;
1842 break;
1843
1844 case kIOMemorySetEncrypted:
1845 func = &SetEncryptOp;
1846 break;
1847 case kIOMemoryClearEncrypted:
1848 func = &ClearEncryptOp;
1849 break;
1850 }
1851
1852 if (!func)
1853 return (kIOReturnUnsupported);
1854
1855 if (kIOMemoryThreadSafe & _flags)
1856 LOCK;
1857
1858 remaining = length = min(length, getLength() - offset);
1859 while (remaining)
1860 // (process another target segment?)
1861 {
1862 addr64_t dstAddr64;
1863 IOByteCount dstLen;
1864
1865 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
1866 if (!dstAddr64)
1867 break;
1868
1869 // Clip segment length to remaining
1870 if (dstLen > remaining)
1871 dstLen = remaining;
1872
1873 (*func)(dstAddr64, dstLen);
1874
1875 offset += dstLen;
1876 remaining -= dstLen;
1877 }
1878
1879 if (kIOMemoryThreadSafe & _flags)
1880 UNLOCK;
1881
1882 return (remaining ? kIOReturnUnderrun : kIOReturnSuccess);
1883 }
1884
1885 extern vm_offset_t first_avail;
1886 #define io_kernel_static_end first_avail
1887
1888 static kern_return_t
1889 io_get_kernel_static_upl(
1890 vm_map_t /* map */,
1891 uintptr_t offset,
1892 vm_size_t *upl_size,
1893 upl_t *upl,
1894 upl_page_info_array_t page_list,
1895 unsigned int *count,
1896 ppnum_t *highest_page)
1897 {
1898 unsigned int pageCount, page;
1899 ppnum_t phys;
1900 ppnum_t highestPage = 0;
1901
1902 pageCount = atop_32(*upl_size);
1903 if (pageCount > *count)
1904 pageCount = *count;
1905
1906 *upl = NULL;
1907
1908 for (page = 0; page < pageCount; page++)
1909 {
1910 phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
1911 if (!phys)
1912 break;
1913 page_list[page].phys_addr = phys;
1914 page_list[page].pageout = 0;
1915 page_list[page].absent = 0;
1916 page_list[page].dirty = 0;
1917 page_list[page].precious = 0;
1918 page_list[page].device = 0;
1919 if (phys > highestPage)
1920 highestPage = phys;
1921 }
1922
1923 *highest_page = highestPage;
1924
1925 return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError);
1926 }
1927
1928 IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
1929 {
1930 IOOptionBits type = _flags & kIOMemoryTypeMask;
1931 IOReturn error = kIOReturnCannotWire;
1932 ioGMDData *dataP;
1933 ppnum_t mapBase = 0;
1934 IOMapper *mapper;
1935 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
1936
1937 assert(!_wireCount);
1938 assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
1939
1940 if (_pages >= gIOMaximumMappedIOPageCount)
1941 return kIOReturnNoResources;
1942
1943 dataP = getDataP(_memoryEntries);
1944 mapper = dataP->fMapper;
1945 if (mapper && _pages)
1946 mapBase = mapper->iovmAlloc(_pages);
1947
1948 // Note that appendBytes(NULL) zeros the data up to the
1949 // desired length.
1950 _memoryEntries->appendBytes(0, dataP->fPageCnt * sizeof(upl_page_info_t));
1951 dataP = 0; // May no longer be valid so lets not get tempted.
1952
1953 if (forDirection == kIODirectionNone)
1954 forDirection = getDirection();
1955
1956 int uplFlags; // This Mem Desc's default flags for upl creation
1957 switch (kIODirectionOutIn & forDirection)
1958 {
1959 case kIODirectionOut:
1960 // Pages do not need to be marked as dirty on commit
1961 uplFlags = UPL_COPYOUT_FROM;
1962 _flags |= kIOMemoryPreparedReadOnly;
1963 break;
1964
1965 case kIODirectionIn:
1966 default:
1967 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
1968 break;
1969 }
1970 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
1971
1972 #ifdef UPL_NEED_32BIT_ADDR
1973 if (kIODirectionPrepareToPhys32 & forDirection)
1974 uplFlags |= UPL_NEED_32BIT_ADDR;
1975 #endif
1976
1977 // Find the appropriate vm_map for the given task
1978 vm_map_t curMap;
1979 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
1980 curMap = 0;
1981 else
1982 { curMap = get_task_map(_task); }
1983
1984 // Iterate over the vector of virtual ranges
1985 Ranges vec = _ranges;
1986 unsigned int pageIndex = 0;
1987 IOByteCount mdOffset = 0;
1988 ppnum_t highestPage = 0;
1989 for (UInt range = 0; range < _rangesCount; range++) {
1990 ioPLBlock iopl;
1991 user_addr_t startPage;
1992 IOByteCount numBytes;
1993 ppnum_t highPage = 0;
1994
1995 // Get the startPage address and length of vec[range]
1996 getAddrLenForInd(startPage, numBytes, type, vec, range);
1997 iopl.fPageOffset = startPage & PAGE_MASK;
1998 numBytes += iopl.fPageOffset;
1999 startPage = trunc_page_64(startPage);
2000
2001 if (mapper)
2002 iopl.fMappedBase = mapBase + pageIndex;
2003 else
2004 iopl.fMappedBase = 0;
2005
2006 // Iterate over the current range, creating UPLs
2007 while (numBytes) {
2008 dataP = getDataP(_memoryEntries);
2009 vm_address_t kernelStart = (vm_address_t) startPage;
2010 vm_map_t theMap;
2011 if (curMap)
2012 theMap = curMap;
2013 else if (!sharedMem) {
2014 assert(_task == kernel_task);
2015 theMap = IOPageableMapForAddress(kernelStart);
2016 }
2017 else
2018 theMap = NULL;
2019
2020 upl_page_info_array_t pageInfo = getPageList(dataP);
2021 int ioplFlags = uplFlags;
2022 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
2023
2024 vm_size_t ioplSize = round_page(numBytes);
2025 unsigned int numPageInfo = atop_32(ioplSize);
2026
2027 if (theMap == kernel_map && kernelStart < io_kernel_static_end) {
2028 error = io_get_kernel_static_upl(theMap,
2029 kernelStart,
2030 &ioplSize,
2031 &iopl.fIOPL,
2032 baseInfo,
2033 &numPageInfo,
2034 &highPage);
2035 }
2036 else if (sharedMem) {
2037 error = memory_object_iopl_request(sharedMem,
2038 ptoa_32(pageIndex),
2039 &ioplSize,
2040 &iopl.fIOPL,
2041 baseInfo,
2042 &numPageInfo,
2043 &ioplFlags);
2044 }
2045 else {
2046 assert(theMap);
2047 error = vm_map_create_upl(theMap,
2048 startPage,
2049 (upl_size_t*)&ioplSize,
2050 &iopl.fIOPL,
2051 baseInfo,
2052 &numPageInfo,
2053 &ioplFlags);
2054 }
2055
2056 assert(ioplSize);
2057 if (error != KERN_SUCCESS)
2058 goto abortExit;
2059
2060 if (iopl.fIOPL)
2061 highPage = upl_get_highest_page(iopl.fIOPL);
2062 if (highPage > highestPage)
2063 highestPage = highPage;
2064
2065 error = kIOReturnCannotWire;
2066
2067 if (baseInfo->device) {
2068 numPageInfo = 1;
2069 iopl.fFlags = kIOPLOnDevice;
2070 // Don't translate device memory at all
2071 if (mapper && mapBase) {
2072 mapper->iovmFree(mapBase, _pages);
2073 mapBase = 0;
2074 iopl.fMappedBase = 0;
2075 }
2076 }
2077 else {
2078 iopl.fFlags = 0;
2079 if (mapper)
2080 mapper->iovmInsert(mapBase, pageIndex,
2081 baseInfo, numPageInfo);
2082 }
2083
2084 iopl.fIOMDOffset = mdOffset;
2085 iopl.fPageInfo = pageIndex;
2086
2087 #if 0
2088 // used to remove the upl for auto prepares here, for some errant code
2089 // that freed memory before the descriptor pointing at it
2090 if ((_flags & kIOMemoryAutoPrepare) && iopl.fIOPL)
2091 {
2092 upl_commit(iopl.fIOPL, 0, 0);
2093 upl_deallocate(iopl.fIOPL);
2094 iopl.fIOPL = 0;
2095 }
2096 #endif
2097
2098 if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
2099 // Clean up partial created and unsaved iopl
2100 if (iopl.fIOPL) {
2101 upl_abort(iopl.fIOPL, 0);
2102 upl_deallocate(iopl.fIOPL);
2103 }
2104 goto abortExit;
2105 }
2106
2107 // Check for a multiple iopl's in one virtual range
2108 pageIndex += numPageInfo;
2109 mdOffset -= iopl.fPageOffset;
2110 if (ioplSize < numBytes) {
2111 numBytes -= ioplSize;
2112 startPage += ioplSize;
2113 mdOffset += ioplSize;
2114 iopl.fPageOffset = 0;
2115 if (mapper)
2116 iopl.fMappedBase = mapBase + pageIndex;
2117 }
2118 else {
2119 mdOffset += numBytes;
2120 break;
2121 }
2122 }
2123 }
2124
2125 _highestPage = highestPage;
2126
2127 return kIOReturnSuccess;
2128
2129 abortExit:
2130 {
2131 dataP = getDataP(_memoryEntries);
2132 UInt done = getNumIOPL(_memoryEntries, dataP);
2133 ioPLBlock *ioplList = getIOPLList(dataP);
2134
2135 for (UInt range = 0; range < done; range++)
2136 {
2137 if (ioplList[range].fIOPL) {
2138 upl_abort(ioplList[range].fIOPL, 0);
2139 upl_deallocate(ioplList[range].fIOPL);
2140 }
2141 }
2142 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
2143
2144 if (mapper && mapBase)
2145 mapper->iovmFree(mapBase, _pages);
2146 }
2147
2148 if (error == KERN_FAILURE)
2149 error = kIOReturnCannotWire;
2150
2151 return error;
2152 }
2153
2154 /*
2155 * prepare
2156 *
2157 * Prepare the memory for an I/O transfer. This involves paging in
2158 * the memory, if necessary, and wiring it down for the duration of
2159 * the transfer. The complete() method completes the processing of
2160 * the memory after the I/O transfer finishes. This method needn't
2161 * called for non-pageable memory.
2162 */
2163 IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
2164 {
2165 IOReturn error = kIOReturnSuccess;
2166 IOOptionBits type = _flags & kIOMemoryTypeMask;
2167
2168 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
2169 return kIOReturnSuccess;
2170
2171 if (_prepareLock)
2172 IOLockLock(_prepareLock);
2173
2174 if (!_wireCount
2175 && (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) ) {
2176 error = wireVirtual(forDirection);
2177 }
2178
2179 if (kIOReturnSuccess == error)
2180 _wireCount++;
2181
2182 if (1 == _wireCount)
2183 {
2184 if (kIOMemoryClearEncrypt & _flags)
2185 {
2186 performOperation(kIOMemoryClearEncrypted, 0, _length);
2187 }
2188 }
2189
2190 if (_prepareLock)
2191 IOLockUnlock(_prepareLock);
2192
2193 return error;
2194 }
2195
2196 /*
2197 * complete
2198 *
2199 * Complete processing of the memory after an I/O transfer finishes.
2200 * This method should not be called unless a prepare was previously
2201 * issued; the prepare() and complete() must occur in pairs, before
2202 * before and after an I/O transfer involving pageable memory.
2203 */
2204
2205 IOReturn IOGeneralMemoryDescriptor::complete(IODirection /* forDirection */)
2206 {
2207 IOOptionBits type = _flags & kIOMemoryTypeMask;
2208
2209 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
2210 return kIOReturnSuccess;
2211
2212 if (_prepareLock)
2213 IOLockLock(_prepareLock);
2214
2215 assert(_wireCount);
2216
2217 if (_wireCount)
2218 {
2219 if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount))
2220 {
2221 performOperation(kIOMemorySetEncrypted, 0, _length);
2222 }
2223
2224 _wireCount--;
2225 if (!_wireCount)
2226 {
2227 IOOptionBits type = _flags & kIOMemoryTypeMask;
2228 ioGMDData * dataP = getDataP(_memoryEntries);
2229 ioPLBlock *ioplList = getIOPLList(dataP);
2230 UInt count = getNumIOPL(_memoryEntries, dataP);
2231
2232 #if IOMD_DEBUG_DMAACTIVE
2233 if (__iomd_reservedA) panic("complete() while dma active");
2234 #endif /* IOMD_DEBUG_DMAACTIVE */
2235
2236 if (dataP->fMapper && _pages && ioplList[0].fMappedBase)
2237 dataP->fMapper->iovmFree(ioplList[0].fMappedBase, _pages);
2238
2239 // Only complete iopls that we created which are for TypeVirtual
2240 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
2241 for (UInt ind = 0; ind < count; ind++)
2242 if (ioplList[ind].fIOPL) {
2243 upl_commit(ioplList[ind].fIOPL, 0, 0);
2244 upl_deallocate(ioplList[ind].fIOPL);
2245 }
2246 } else if (kIOMemoryTypeUPL == type) {
2247 upl_set_referenced(ioplList[0].fIOPL, false);
2248 }
2249
2250 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
2251
2252 dataP->fPreparationID = kIOPreparationIDUnprepared;
2253 }
2254 }
2255
2256 if (_prepareLock)
2257 IOLockUnlock(_prepareLock);
2258
2259 return kIOReturnSuccess;
2260 }
2261
2262 IOReturn IOGeneralMemoryDescriptor::doMap(
2263 vm_map_t __addressMap,
2264 IOVirtualAddress * __address,
2265 IOOptionBits options,
2266 IOByteCount __offset,
2267 IOByteCount __length )
2268
2269 {
2270 #ifndef __LP64__
2271 if (!(kIOMap64Bit & options)) panic("IOGeneralMemoryDescriptor::doMap !64bit");
2272 #endif /* !__LP64__ */
2273
2274 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
2275 mach_vm_size_t offset = mapping->fOffset + __offset;
2276 mach_vm_size_t length = mapping->fLength;
2277
2278 kern_return_t kr = kIOReturnVMError;
2279 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
2280
2281 IOOptionBits type = _flags & kIOMemoryTypeMask;
2282 Ranges vec = _ranges;
2283
2284 user_addr_t range0Addr = 0;
2285 IOByteCount range0Len = 0;
2286
2287 if ((offset >= _length) || ((offset + length) > _length))
2288 return( kIOReturnBadArgument );
2289
2290 if (vec.v)
2291 getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
2292
2293 // mapping source == dest? (could be much better)
2294 if( _task
2295 && (mapping->fAddressMap == get_task_map(_task)) && (options & kIOMapAnywhere)
2296 && (1 == _rangesCount) && (0 == offset)
2297 && range0Addr && (length <= range0Len) )
2298 {
2299 mapping->fAddress = range0Addr;
2300 mapping->fOptions |= kIOMapStatic;
2301
2302 return( kIOReturnSuccess );
2303 }
2304
2305 if( 0 == sharedMem) {
2306
2307 vm_size_t size = ptoa_32(_pages);
2308
2309 if( _task) {
2310
2311 memory_object_size_t actualSize = size;
2312 vm_prot_t prot = VM_PROT_READ;
2313 if (!(kIOMapReadOnly & options))
2314 prot |= VM_PROT_WRITE;
2315 else if (kIOMapDefaultCache != (options & kIOMapCacheMask))
2316 prot |= VM_PROT_WRITE;
2317
2318 if (_rangesCount == 1)
2319 {
2320 kr = mach_make_memory_entry_64(get_task_map(_task),
2321 &actualSize, range0Addr,
2322 prot, &sharedMem,
2323 NULL);
2324 }
2325 if( (_rangesCount != 1)
2326 || ((KERN_SUCCESS == kr) && (actualSize != round_page(size))))
2327 do
2328 {
2329 #if IOASSERT
2330 IOLog("mach_vm_remap path for ranges %d size (%08llx:%08llx)\n",
2331 _rangesCount, (UInt64)actualSize, (UInt64)size);
2332 #endif
2333 kr = kIOReturnVMError;
2334 if (sharedMem)
2335 {
2336 ipc_port_release_send(sharedMem);
2337 sharedMem = MACH_PORT_NULL;
2338 }
2339
2340 mach_vm_address_t address, segDestAddr;
2341 mach_vm_size_t mapLength;
2342 unsigned rangesIndex;
2343 IOOptionBits type = _flags & kIOMemoryTypeMask;
2344 user_addr_t srcAddr;
2345 IOPhysicalLength segLen = 0;
2346
2347 // Find starting address within the vector of ranges
2348 for (rangesIndex = 0; rangesIndex < _rangesCount; rangesIndex++) {
2349 getAddrLenForInd(srcAddr, segLen, type, _ranges, rangesIndex);
2350 if (offset < segLen)
2351 break;
2352 offset -= segLen; // (make offset relative)
2353 }
2354
2355 mach_vm_size_t pageOffset = (srcAddr & PAGE_MASK);
2356 address = trunc_page_64(mapping->fAddress);
2357
2358 if ((options & kIOMapAnywhere) || ((mapping->fAddress - address) == pageOffset))
2359 {
2360 vm_map_t map = mapping->fAddressMap;
2361 kr = IOMemoryDescriptorMapCopy(&map,
2362 options,
2363 offset, &address, round_page_64(length + pageOffset));
2364 if (kr == KERN_SUCCESS)
2365 {
2366 segDestAddr = address;
2367 segLen -= offset;
2368 mapLength = length;
2369
2370 while (true)
2371 {
2372 vm_prot_t cur_prot, max_prot;
2373 kr = mach_vm_remap(map, &segDestAddr, round_page_64(segLen), PAGE_MASK,
2374 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
2375 get_task_map(_task), trunc_page_64(srcAddr),
2376 FALSE /* copy */,
2377 &cur_prot,
2378 &max_prot,
2379 VM_INHERIT_NONE);
2380 if (KERN_SUCCESS == kr)
2381 {
2382 if ((!(VM_PROT_READ & cur_prot))
2383 || (!(kIOMapReadOnly & options) && !(VM_PROT_WRITE & cur_prot)))
2384 {
2385 kr = KERN_PROTECTION_FAILURE;
2386 }
2387 }
2388 if (KERN_SUCCESS != kr)
2389 break;
2390 segDestAddr += segLen;
2391 mapLength -= segLen;
2392 if (!mapLength)
2393 break;
2394 rangesIndex++;
2395 if (rangesIndex >= _rangesCount)
2396 {
2397 kr = kIOReturnBadArgument;
2398 break;
2399 }
2400 getAddrLenForInd(srcAddr, segLen, type, vec, rangesIndex);
2401 if (srcAddr & PAGE_MASK)
2402 {
2403 kr = kIOReturnBadArgument;
2404 break;
2405 }
2406 if (segLen > mapLength)
2407 segLen = mapLength;
2408 }
2409 if (KERN_SUCCESS != kr)
2410 {
2411 mach_vm_deallocate(mapping->fAddressMap, address, round_page_64(length + pageOffset));
2412 }
2413 }
2414
2415 if (KERN_SUCCESS == kr)
2416 mapping->fAddress = address + pageOffset;
2417 else
2418 mapping->fAddress = NULL;
2419 }
2420 }
2421 while (false);
2422 }
2423 else do
2424 { // _task == 0, must be physical
2425
2426 memory_object_t pager;
2427 unsigned int flags = 0;
2428 addr64_t pa;
2429 IOPhysicalLength segLen;
2430
2431 pa = getPhysicalSegment( offset, &segLen, kIOMemoryMapperNone );
2432
2433 if( !reserved) {
2434 reserved = IONew( ExpansionData, 1 );
2435 if( !reserved)
2436 continue;
2437 }
2438 reserved->pagerContig = (1 == _rangesCount);
2439 reserved->memory = this;
2440
2441 /*What cache mode do we need*/
2442 switch(options & kIOMapCacheMask ) {
2443
2444 case kIOMapDefaultCache:
2445 default:
2446 flags = IODefaultCacheBits(pa);
2447 if (DEVICE_PAGER_CACHE_INHIB & flags)
2448 {
2449 if (DEVICE_PAGER_GUARDED & flags)
2450 mapping->fOptions |= kIOMapInhibitCache;
2451 else
2452 mapping->fOptions |= kIOMapWriteCombineCache;
2453 }
2454 else if (DEVICE_PAGER_WRITE_THROUGH & flags)
2455 mapping->fOptions |= kIOMapWriteThruCache;
2456 else
2457 mapping->fOptions |= kIOMapCopybackCache;
2458 break;
2459
2460 case kIOMapInhibitCache:
2461 flags = DEVICE_PAGER_CACHE_INHIB |
2462 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
2463 break;
2464
2465 case kIOMapWriteThruCache:
2466 flags = DEVICE_PAGER_WRITE_THROUGH |
2467 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
2468 break;
2469
2470 case kIOMapCopybackCache:
2471 flags = DEVICE_PAGER_COHERENT;
2472 break;
2473
2474 case kIOMapWriteCombineCache:
2475 flags = DEVICE_PAGER_CACHE_INHIB |
2476 DEVICE_PAGER_COHERENT;
2477 break;
2478 }
2479
2480 flags |= reserved->pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0;
2481
2482 pager = device_pager_setup( (memory_object_t) 0, (uintptr_t) reserved,
2483 size, flags);
2484 assert( pager );
2485
2486 if( pager) {
2487 kr = mach_memory_object_memory_entry_64( (host_t) 1, false /*internal*/,
2488 size, VM_PROT_READ | VM_PROT_WRITE, pager, &sharedMem );
2489
2490 assert( KERN_SUCCESS == kr );
2491 if( KERN_SUCCESS != kr)
2492 {
2493 device_pager_deallocate( pager );
2494 pager = MACH_PORT_NULL;
2495 sharedMem = MACH_PORT_NULL;
2496 }
2497 }
2498 if( pager && sharedMem)
2499 reserved->devicePager = pager;
2500 else {
2501 IODelete( reserved, ExpansionData, 1 );
2502 reserved = 0;
2503 }
2504
2505 } while( false );
2506
2507 _memEntry = (void *) sharedMem;
2508 }
2509
2510 IOReturn result;
2511 if (0 == sharedMem)
2512 result = kr;
2513 else
2514 result = super::doMap( __addressMap, __address,
2515 options, __offset, __length );
2516
2517 return( result );
2518 }
2519
2520 IOReturn IOGeneralMemoryDescriptor::doUnmap(
2521 vm_map_t addressMap,
2522 IOVirtualAddress __address,
2523 IOByteCount __length )
2524 {
2525 return (super::doUnmap(addressMap, __address, __length));
2526 }
2527
2528 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2529
2530 #undef super
2531 #define super OSObject
2532
2533 OSDefineMetaClassAndStructors( IOMemoryMap, OSObject )
2534
2535 OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
2536 OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
2537 OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
2538 OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
2539 OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
2540 OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
2541 OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
2542 OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
2543
2544 /* ex-inline function implementation */
2545 IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
2546 { return( getPhysicalSegment( 0, 0 )); }
2547
2548 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2549
2550 bool IOMemoryMap::init(
2551 task_t intoTask,
2552 mach_vm_address_t toAddress,
2553 IOOptionBits _options,
2554 mach_vm_size_t _offset,
2555 mach_vm_size_t _length )
2556 {
2557 if (!intoTask)
2558 return( false);
2559
2560 if (!super::init())
2561 return(false);
2562
2563 fAddressMap = get_task_map(intoTask);
2564 if (!fAddressMap)
2565 return(false);
2566 vm_map_reference(fAddressMap);
2567
2568 fAddressTask = intoTask;
2569 fOptions = _options;
2570 fLength = _length;
2571 fOffset = _offset;
2572 fAddress = toAddress;
2573
2574 return (true);
2575 }
2576
2577 bool IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
2578 {
2579 if (!_memory)
2580 return(false);
2581
2582 if (!fSuperMap)
2583 {
2584 if( (_offset + fLength) > _memory->getLength())
2585 return( false);
2586 fOffset = _offset;
2587 }
2588
2589 _memory->retain();
2590 if (fMemory)
2591 {
2592 if (fMemory != _memory)
2593 fMemory->removeMapping(this);
2594 fMemory->release();
2595 }
2596 fMemory = _memory;
2597
2598 return( true );
2599 }
2600
2601 struct IOMemoryDescriptorMapAllocRef
2602 {
2603 ipc_port_t sharedMem;
2604 vm_map_t map;
2605 mach_vm_address_t mapped;
2606 mach_vm_size_t size;
2607 mach_vm_size_t sourceOffset;
2608 IOOptionBits options;
2609 };
2610
2611 static kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
2612 {
2613 IOMemoryDescriptorMapAllocRef * ref = (IOMemoryDescriptorMapAllocRef *)_ref;
2614 IOReturn err;
2615
2616 do {
2617 if( ref->sharedMem)
2618 {
2619 vm_prot_t prot = VM_PROT_READ
2620 | ((ref->options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE);
2621
2622 // VM system requires write access to change cache mode
2623 if (kIOMapDefaultCache != (ref->options & kIOMapCacheMask))
2624 prot |= VM_PROT_WRITE;
2625
2626 // set memory entry cache
2627 vm_prot_t memEntryCacheMode = prot | MAP_MEM_ONLY;
2628 switch (ref->options & kIOMapCacheMask)
2629 {
2630 case kIOMapInhibitCache:
2631 SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
2632 break;
2633
2634 case kIOMapWriteThruCache:
2635 SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
2636 break;
2637
2638 case kIOMapWriteCombineCache:
2639 SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
2640 break;
2641
2642 case kIOMapCopybackCache:
2643 SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
2644 break;
2645
2646 case kIOMapDefaultCache:
2647 default:
2648 SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
2649 break;
2650 }
2651
2652 vm_size_t unused = 0;
2653
2654 err = mach_make_memory_entry( NULL /*unused*/, &unused, 0 /*unused*/,
2655 memEntryCacheMode, NULL, ref->sharedMem );
2656 if (KERN_SUCCESS != err)
2657 IOLog("MAP_MEM_ONLY failed %d\n", err);
2658
2659 err = mach_vm_map( map,
2660 &ref->mapped,
2661 ref->size, 0 /* mask */,
2662 (( ref->options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
2663 | VM_MAKE_TAG(VM_MEMORY_IOKIT),
2664 ref->sharedMem, ref->sourceOffset,
2665 false, // copy
2666 prot, // cur
2667 prot, // max
2668 VM_INHERIT_NONE);
2669
2670 if( KERN_SUCCESS != err) {
2671 ref->mapped = 0;
2672 continue;
2673 }
2674 ref->map = map;
2675 }
2676 else
2677 {
2678 err = mach_vm_allocate(map, &ref->mapped, ref->size,
2679 ((ref->options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
2680 | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
2681 if( KERN_SUCCESS != err) {
2682 ref->mapped = 0;
2683 continue;
2684 }
2685 ref->map = map;
2686 // we have to make sure that these guys don't get copied if we fork.
2687 err = vm_inherit(map, ref->mapped, ref->size, VM_INHERIT_NONE);
2688 assert( KERN_SUCCESS == err );
2689 }
2690 }
2691 while( false );
2692
2693 return( err );
2694 }
2695
2696 kern_return_t
2697 IOMemoryDescriptorMapMemEntry(vm_map_t * map, ipc_port_t entry, IOOptionBits options, bool pageable,
2698 mach_vm_size_t offset,
2699 mach_vm_address_t * address, mach_vm_size_t length)
2700 {
2701 IOReturn err;
2702 IOMemoryDescriptorMapAllocRef ref;
2703
2704 ref.map = *map;
2705 ref.sharedMem = entry;
2706 ref.sourceOffset = trunc_page_64(offset);
2707 ref.options = options;
2708 ref.size = length;
2709
2710 if (options & kIOMapAnywhere)
2711 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
2712 ref.mapped = 0;
2713 else
2714 ref.mapped = *address;
2715
2716 if( ref.sharedMem && (ref.map == kernel_map) && pageable)
2717 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
2718 else
2719 err = IOMemoryDescriptorMapAlloc( ref.map, &ref );
2720
2721 *address = ref.mapped;
2722 *map = ref.map;
2723
2724 return (err);
2725 }
2726
2727 kern_return_t
2728 IOMemoryDescriptorMapCopy(vm_map_t * map,
2729 IOOptionBits options,
2730 mach_vm_size_t offset,
2731 mach_vm_address_t * address, mach_vm_size_t length)
2732 {
2733 IOReturn err;
2734 IOMemoryDescriptorMapAllocRef ref;
2735
2736 ref.map = *map;
2737 ref.sharedMem = NULL;
2738 ref.sourceOffset = trunc_page_64(offset);
2739 ref.options = options;
2740 ref.size = length;
2741
2742 if (options & kIOMapAnywhere)
2743 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
2744 ref.mapped = 0;
2745 else
2746 ref.mapped = *address;
2747
2748 if (ref.map == kernel_map)
2749 err = IOIteratePageableMaps(ref.size, &IOMemoryDescriptorMapAlloc, &ref);
2750 else
2751 err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
2752
2753 *address = ref.mapped;
2754 *map = ref.map;
2755
2756 return (err);
2757 }
2758
2759 IOReturn IOMemoryDescriptor::doMap(
2760 vm_map_t __addressMap,
2761 IOVirtualAddress * __address,
2762 IOOptionBits options,
2763 IOByteCount __offset,
2764 IOByteCount __length )
2765 {
2766 #ifndef __LP64__
2767 if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::doMap !64bit");
2768 #endif /* !__LP64__ */
2769
2770 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
2771 mach_vm_size_t offset = mapping->fOffset + __offset;
2772 mach_vm_size_t length = mapping->fLength;
2773
2774 IOReturn err = kIOReturnSuccess;
2775 memory_object_t pager;
2776 mach_vm_size_t pageOffset;
2777 IOPhysicalAddress sourceAddr;
2778 unsigned int lock_count;
2779
2780 do
2781 {
2782 sourceAddr = getPhysicalSegment( offset, NULL, _kIOMemorySourceSegment );
2783 pageOffset = sourceAddr - trunc_page( sourceAddr );
2784
2785 if( reserved)
2786 pager = (memory_object_t) reserved->devicePager;
2787 else
2788 pager = MACH_PORT_NULL;
2789
2790 if ((kIOMapReference|kIOMapUnique) == ((kIOMapReference|kIOMapUnique) & options))
2791 {
2792 upl_t redirUPL2;
2793 vm_size_t size;
2794 int flags;
2795
2796 if (!_memEntry)
2797 {
2798 err = kIOReturnNotReadable;
2799 continue;
2800 }
2801
2802 size = round_page(mapping->fLength + pageOffset);
2803 flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
2804 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
2805
2806 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) _memEntry, 0, &size, &redirUPL2,
2807 NULL, NULL,
2808 &flags))
2809 redirUPL2 = NULL;
2810
2811 for (lock_count = 0;
2812 IORecursiveLockHaveLock(gIOMemoryLock);
2813 lock_count++) {
2814 UNLOCK;
2815 }
2816 err = upl_transpose(redirUPL2, mapping->fRedirUPL);
2817 for (;
2818 lock_count;
2819 lock_count--) {
2820 LOCK;
2821 }
2822
2823 if (kIOReturnSuccess != err)
2824 {
2825 IOLog("upl_transpose(%x)\n", err);
2826 err = kIOReturnSuccess;
2827 }
2828
2829 if (redirUPL2)
2830 {
2831 upl_commit(redirUPL2, NULL, 0);
2832 upl_deallocate(redirUPL2);
2833 redirUPL2 = 0;
2834 }
2835 {
2836 // swap the memEntries since they now refer to different vm_objects
2837 void * me = _memEntry;
2838 _memEntry = mapping->fMemory->_memEntry;
2839 mapping->fMemory->_memEntry = me;
2840 }
2841 if (pager)
2842 err = handleFault( reserved->devicePager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
2843 }
2844 else
2845 {
2846 mach_vm_address_t address;
2847
2848 if (!(options & kIOMapAnywhere))
2849 {
2850 address = trunc_page_64(mapping->fAddress);
2851 if( (mapping->fAddress - address) != pageOffset)
2852 {
2853 err = kIOReturnVMError;
2854 continue;
2855 }
2856 }
2857
2858 vm_map_t map = mapping->fAddressMap;
2859 err = IOMemoryDescriptorMapMemEntry(&map, (ipc_port_t) _memEntry,
2860 options, (kIOMemoryBufferPageable & _flags),
2861 offset, &address, round_page_64(length + pageOffset));
2862 if( err != KERN_SUCCESS)
2863 continue;
2864
2865 if (!_memEntry || pager)
2866 {
2867 err = handleFault( pager, mapping->fAddressMap, address, offset, length, options );
2868 if (err != KERN_SUCCESS)
2869 doUnmap( mapping->fAddressMap, (IOVirtualAddress) mapping, 0 );
2870 }
2871
2872 #if DEBUG
2873 if (kIOLogMapping & gIOKitDebug)
2874 IOLog("mapping(%x) desc %p @ %lx, map %p, address %qx, offset %qx, length %qx\n",
2875 err, this, sourceAddr, mapping, address, offset, length);
2876 #endif
2877
2878 if (err == KERN_SUCCESS)
2879 mapping->fAddress = address + pageOffset;
2880 else
2881 mapping->fAddress = NULL;
2882 }
2883 }
2884 while( false );
2885
2886 return (err);
2887 }
2888
2889 IOReturn IOMemoryDescriptor::handleFault(
2890 void * _pager,
2891 vm_map_t addressMap,
2892 mach_vm_address_t address,
2893 mach_vm_size_t sourceOffset,
2894 mach_vm_size_t length,
2895 IOOptionBits options )
2896 {
2897 IOReturn err = kIOReturnSuccess;
2898 memory_object_t pager = (memory_object_t) _pager;
2899 mach_vm_size_t size;
2900 mach_vm_size_t bytes;
2901 mach_vm_size_t page;
2902 mach_vm_size_t pageOffset;
2903 mach_vm_size_t pagerOffset;
2904 IOPhysicalLength segLen;
2905 addr64_t physAddr;
2906
2907 if( !addressMap)
2908 {
2909 if( kIOMemoryRedirected & _flags)
2910 {
2911 #if DEBUG
2912 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
2913 #endif
2914 do {
2915 SLEEP;
2916 } while( kIOMemoryRedirected & _flags );
2917 }
2918
2919 return( kIOReturnSuccess );
2920 }
2921
2922 physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
2923 assert( physAddr );
2924 pageOffset = physAddr - trunc_page_64( physAddr );
2925 pagerOffset = sourceOffset;
2926
2927 size = length + pageOffset;
2928 physAddr -= pageOffset;
2929
2930 segLen += pageOffset;
2931 bytes = size;
2932 do
2933 {
2934 // in the middle of the loop only map whole pages
2935 if( segLen >= bytes)
2936 segLen = bytes;
2937 else if( segLen != trunc_page( segLen))
2938 err = kIOReturnVMError;
2939 if( physAddr != trunc_page_64( physAddr))
2940 err = kIOReturnBadArgument;
2941 if (kIOReturnSuccess != err)
2942 break;
2943
2944 #if DEBUG
2945 if( kIOLogMapping & gIOKitDebug)
2946 IOLog("IOMemoryMap::map(%p) 0x%qx->0x%qx:0x%qx\n",
2947 addressMap, address + pageOffset, physAddr + pageOffset,
2948 segLen - pageOffset);
2949 #endif
2950
2951
2952 if( pager) {
2953 if( reserved && reserved->pagerContig) {
2954 IOPhysicalLength allLen;
2955 addr64_t allPhys;
2956
2957 allPhys = getPhysicalSegment( 0, &allLen, kIOMemoryMapperNone );
2958 assert( allPhys );
2959 err = device_pager_populate_object( pager, 0, atop_64(allPhys), round_page(allLen) );
2960 }
2961 else
2962 {
2963
2964 for( page = 0;
2965 (page < segLen) && (KERN_SUCCESS == err);
2966 page += page_size)
2967 {
2968 err = device_pager_populate_object(pager, pagerOffset,
2969 (ppnum_t)(atop_64(physAddr + page)), page_size);
2970 pagerOffset += page_size;
2971 }
2972 }
2973 assert( KERN_SUCCESS == err );
2974 if( err)
2975 break;
2976 }
2977
2978 // This call to vm_fault causes an early pmap level resolution
2979 // of the mappings created above for kernel mappings, since
2980 // faulting in later can't take place from interrupt level.
2981 /* *** ALERT *** */
2982 /* *** Temporary Workaround *** */
2983
2984 if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags))
2985 {
2986 vm_fault(addressMap,
2987 (vm_map_offset_t)address,
2988 VM_PROT_READ|VM_PROT_WRITE,
2989 FALSE, THREAD_UNINT, NULL,
2990 (vm_map_offset_t)0);
2991 }
2992
2993 /* *** Temporary Workaround *** */
2994 /* *** ALERT *** */
2995
2996 sourceOffset += segLen - pageOffset;
2997 address += segLen;
2998 bytes -= segLen;
2999 pageOffset = 0;
3000
3001 }
3002 while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
3003
3004 if (bytes)
3005 err = kIOReturnBadArgument;
3006
3007 return (err);
3008 }
3009
3010 IOReturn IOMemoryDescriptor::doUnmap(
3011 vm_map_t addressMap,
3012 IOVirtualAddress __address,
3013 IOByteCount __length )
3014 {
3015 IOReturn err;
3016 mach_vm_address_t address;
3017 mach_vm_size_t length;
3018
3019 if (__length)
3020 {
3021 address = __address;
3022 length = __length;
3023 }
3024 else
3025 {
3026 addressMap = ((IOMemoryMap *) __address)->fAddressMap;
3027 address = ((IOMemoryMap *) __address)->fAddress;
3028 length = ((IOMemoryMap *) __address)->fLength;
3029 }
3030
3031 if ((addressMap == kernel_map)
3032 && ((kIOMemoryBufferPageable & _flags) || !_memEntry))
3033 addressMap = IOPageableMapForAddress( address );
3034
3035 #if DEBUG
3036 if( kIOLogMapping & gIOKitDebug)
3037 IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
3038 addressMap, address, length );
3039 #endif
3040
3041 err = mach_vm_deallocate( addressMap, address, length );
3042
3043 return (err);
3044 }
3045
3046 IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
3047 {
3048 IOReturn err = kIOReturnSuccess;
3049 IOMemoryMap * mapping = 0;
3050 OSIterator * iter;
3051
3052 LOCK;
3053
3054 if( doRedirect)
3055 _flags |= kIOMemoryRedirected;
3056 else
3057 _flags &= ~kIOMemoryRedirected;
3058
3059 do {
3060 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
3061 while( (mapping = (IOMemoryMap *) iter->getNextObject()))
3062 mapping->redirect( safeTask, doRedirect );
3063
3064 iter->release();
3065 }
3066 } while( false );
3067
3068 if (!doRedirect)
3069 {
3070 WAKEUP;
3071 }
3072
3073 UNLOCK;
3074
3075 #ifndef __LP64__
3076 // temporary binary compatibility
3077 IOSubMemoryDescriptor * subMem;
3078 if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
3079 err = subMem->redirect( safeTask, doRedirect );
3080 else
3081 err = kIOReturnSuccess;
3082 #endif /* !__LP64__ */
3083
3084 return( err );
3085 }
3086
3087 IOReturn IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
3088 {
3089 IOReturn err = kIOReturnSuccess;
3090
3091 if( fSuperMap) {
3092 // err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
3093 } else {
3094
3095 LOCK;
3096
3097 do
3098 {
3099 if (!fAddress)
3100 break;
3101 if (!fAddressMap)
3102 break;
3103
3104 if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
3105 && (0 == (fOptions & kIOMapStatic)))
3106 {
3107 IOUnmapPages( fAddressMap, fAddress, fLength );
3108 err = kIOReturnSuccess;
3109 #if DEBUG
3110 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
3111 #endif
3112 }
3113 else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask))
3114 {
3115 IOOptionBits newMode;
3116 newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
3117 IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
3118 }
3119 }
3120 while (false);
3121 UNLOCK;
3122 }
3123
3124 if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3125 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3126 && safeTask
3127 && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected))))
3128 fMemory->redirect(safeTask, doRedirect);
3129
3130 return( err );
3131 }
3132
3133 IOReturn IOMemoryMap::unmap( void )
3134 {
3135 IOReturn err;
3136
3137 LOCK;
3138
3139 if( fAddress && fAddressMap && (0 == fSuperMap) && fMemory
3140 && (0 == (fOptions & kIOMapStatic))) {
3141
3142 err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
3143
3144 } else
3145 err = kIOReturnSuccess;
3146
3147 if (fAddressMap)
3148 {
3149 vm_map_deallocate(fAddressMap);
3150 fAddressMap = 0;
3151 }
3152
3153 fAddress = 0;
3154
3155 UNLOCK;
3156
3157 return( err );
3158 }
3159
3160 void IOMemoryMap::taskDied( void )
3161 {
3162 LOCK;
3163 if (fUserClientUnmap)
3164 unmap();
3165 if( fAddressMap) {
3166 vm_map_deallocate(fAddressMap);
3167 fAddressMap = 0;
3168 }
3169 fAddressTask = 0;
3170 fAddress = 0;
3171 UNLOCK;
3172 }
3173
3174 IOReturn IOMemoryMap::userClientUnmap( void )
3175 {
3176 fUserClientUnmap = true;
3177 return (kIOReturnSuccess);
3178 }
3179
3180 // Overload the release mechanism. All mappings must be a member
3181 // of a memory descriptors _mappings set. This means that we
3182 // always have 2 references on a mapping. When either of these mappings
3183 // are released we need to free ourselves.
3184 void IOMemoryMap::taggedRelease(const void *tag) const
3185 {
3186 LOCK;
3187 super::taggedRelease(tag, 2);
3188 UNLOCK;
3189 }
3190
3191 void IOMemoryMap::free()
3192 {
3193 unmap();
3194
3195 if (fMemory)
3196 {
3197 LOCK;
3198 fMemory->removeMapping(this);
3199 UNLOCK;
3200 fMemory->release();
3201 }
3202
3203 if (fOwner && (fOwner != fMemory))
3204 {
3205 LOCK;
3206 fOwner->removeMapping(this);
3207 UNLOCK;
3208 }
3209
3210 if (fSuperMap)
3211 fSuperMap->release();
3212
3213 if (fRedirUPL) {
3214 upl_commit(fRedirUPL, NULL, 0);
3215 upl_deallocate(fRedirUPL);
3216 }
3217
3218 super::free();
3219 }
3220
3221 IOByteCount IOMemoryMap::getLength()
3222 {
3223 return( fLength );
3224 }
3225
3226 IOVirtualAddress IOMemoryMap::getVirtualAddress()
3227 {
3228 #ifndef __LP64__
3229 if (fSuperMap)
3230 fSuperMap->getVirtualAddress();
3231 else if (fAddressMap
3232 && vm_map_is_64bit(fAddressMap)
3233 && (sizeof(IOVirtualAddress) < 8))
3234 {
3235 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
3236 }
3237 #endif /* !__LP64__ */
3238
3239 return (fAddress);
3240 }
3241
3242 #ifndef __LP64__
3243 mach_vm_address_t IOMemoryMap::getAddress()
3244 {
3245 return( fAddress);
3246 }
3247
3248 mach_vm_size_t IOMemoryMap::getSize()
3249 {
3250 return( fLength );
3251 }
3252 #endif /* !__LP64__ */
3253
3254
3255 task_t IOMemoryMap::getAddressTask()
3256 {
3257 if( fSuperMap)
3258 return( fSuperMap->getAddressTask());
3259 else
3260 return( fAddressTask);
3261 }
3262
3263 IOOptionBits IOMemoryMap::getMapOptions()
3264 {
3265 return( fOptions);
3266 }
3267
3268 IOMemoryDescriptor * IOMemoryMap::getMemoryDescriptor()
3269 {
3270 return( fMemory );
3271 }
3272
3273 IOMemoryMap * IOMemoryMap::copyCompatible(
3274 IOMemoryMap * newMapping )
3275 {
3276 task_t task = newMapping->getAddressTask();
3277 mach_vm_address_t toAddress = newMapping->fAddress;
3278 IOOptionBits _options = newMapping->fOptions;
3279 mach_vm_size_t _offset = newMapping->fOffset;
3280 mach_vm_size_t _length = newMapping->fLength;
3281
3282 if( (!task) || (!fAddressMap) || (fAddressMap != get_task_map(task)))
3283 return( 0 );
3284 if( (fOptions ^ _options) & kIOMapReadOnly)
3285 return( 0 );
3286 if( (kIOMapDefaultCache != (_options & kIOMapCacheMask))
3287 && ((fOptions ^ _options) & kIOMapCacheMask))
3288 return( 0 );
3289
3290 if( (0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress))
3291 return( 0 );
3292
3293 if( _offset < fOffset)
3294 return( 0 );
3295
3296 _offset -= fOffset;
3297
3298 if( (_offset + _length) > fLength)
3299 return( 0 );
3300
3301 retain();
3302 if( (fLength == _length) && (!_offset))
3303 {
3304 newMapping = this;
3305 }
3306 else
3307 {
3308 newMapping->fSuperMap = this;
3309 newMapping->fOffset = fOffset + _offset;
3310 newMapping->fAddress = fAddress + _offset;
3311 }
3312
3313 return( newMapping );
3314 }
3315
3316 IOPhysicalAddress
3317 #ifdef __LP64__
3318 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
3319 #else /* !__LP64__ */
3320 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
3321 #endif /* !__LP64__ */
3322 {
3323 IOPhysicalAddress address;
3324
3325 LOCK;
3326 #ifdef __LP64__
3327 address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
3328 #else /* !__LP64__ */
3329 address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
3330 #endif /* !__LP64__ */
3331 UNLOCK;
3332
3333 return( address );
3334 }
3335
3336 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3337
3338 #undef super
3339 #define super OSObject
3340
3341 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3342
3343 void IOMemoryDescriptor::initialize( void )
3344 {
3345 if( 0 == gIOMemoryLock)
3346 gIOMemoryLock = IORecursiveLockAlloc();
3347
3348 IORegistryEntry::getRegistryRoot()->setProperty(kIOMaximumMappedIOByteCountKey,
3349 ptoa_64(gIOMaximumMappedIOPageCount), 64);
3350 gIOLastPage = IOGetLastPageNumber();
3351 }
3352
3353 void IOMemoryDescriptor::free( void )
3354 {
3355 if( _mappings)
3356 _mappings->release();
3357
3358 super::free();
3359 }
3360
3361 IOMemoryMap * IOMemoryDescriptor::setMapping(
3362 task_t intoTask,
3363 IOVirtualAddress mapAddress,
3364 IOOptionBits options )
3365 {
3366 return (createMappingInTask( intoTask, mapAddress,
3367 options | kIOMapStatic,
3368 0, getLength() ));
3369 }
3370
3371 IOMemoryMap * IOMemoryDescriptor::map(
3372 IOOptionBits options )
3373 {
3374 return (createMappingInTask( kernel_task, 0,
3375 options | kIOMapAnywhere,
3376 0, getLength() ));
3377 }
3378
3379 #ifndef __LP64__
3380 IOMemoryMap * IOMemoryDescriptor::map(
3381 task_t intoTask,
3382 IOVirtualAddress atAddress,
3383 IOOptionBits options,
3384 IOByteCount offset,
3385 IOByteCount length )
3386 {
3387 if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask)))
3388 {
3389 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
3390 return (0);
3391 }
3392
3393 return (createMappingInTask(intoTask, atAddress,
3394 options, offset, length));
3395 }
3396 #endif /* !__LP64__ */
3397
3398 IOMemoryMap * IOMemoryDescriptor::createMappingInTask(
3399 task_t intoTask,
3400 mach_vm_address_t atAddress,
3401 IOOptionBits options,
3402 mach_vm_size_t offset,
3403 mach_vm_size_t length)
3404 {
3405 IOMemoryMap * result;
3406 IOMemoryMap * mapping;
3407
3408 if (0 == length)
3409 length = getLength();
3410
3411 mapping = new IOMemoryMap;
3412
3413 if( mapping
3414 && !mapping->init( intoTask, atAddress,
3415 options, offset, length )) {
3416 mapping->release();
3417 mapping = 0;
3418 }
3419
3420 if (mapping)
3421 result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
3422 else
3423 result = 0;
3424
3425 #if DEBUG
3426 if (!result)
3427 IOLog("createMappingInTask failed desc %p, addr %qx, options %lx, offset %qx, length %qx\n",
3428 this, atAddress, options, offset, length);
3429 #endif
3430
3431 return (result);
3432 }
3433
3434 #ifndef __LP64__ // there is only a 64 bit version for LP64
3435 IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
3436 IOOptionBits options,
3437 IOByteCount offset)
3438 {
3439 return (redirect(newBackingMemory, options, (mach_vm_size_t)offset));
3440 }
3441 #endif
3442
3443 IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
3444 IOOptionBits options,
3445 mach_vm_size_t offset)
3446 {
3447 IOReturn err = kIOReturnSuccess;
3448 IOMemoryDescriptor * physMem = 0;
3449
3450 LOCK;
3451
3452 if (fAddress && fAddressMap) do
3453 {
3454 if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3455 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3456 {
3457 physMem = fMemory;
3458 physMem->retain();
3459 }
3460
3461 if (!fRedirUPL)
3462 {
3463 vm_size_t size = round_page(fLength);
3464 int flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
3465 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
3466 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) fMemory->_memEntry, 0, &size, &fRedirUPL,
3467 NULL, NULL,
3468 &flags))
3469 fRedirUPL = 0;
3470
3471 if (physMem)
3472 {
3473 IOUnmapPages( fAddressMap, fAddress, fLength );
3474 if (false)
3475 physMem->redirect(0, true);
3476 }
3477 }
3478
3479 if (newBackingMemory)
3480 {
3481 if (newBackingMemory != fMemory)
3482 {
3483 fOffset = 0;
3484 if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
3485 options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
3486 offset, fLength))
3487 err = kIOReturnError;
3488 }
3489 if (fRedirUPL)
3490 {
3491 upl_commit(fRedirUPL, NULL, 0);
3492 upl_deallocate(fRedirUPL);
3493 fRedirUPL = 0;
3494 }
3495 if (false && physMem)
3496 physMem->redirect(0, false);
3497 }
3498 }
3499 while (false);
3500
3501 UNLOCK;
3502
3503 if (physMem)
3504 physMem->release();
3505
3506 return (err);
3507 }
3508
3509 IOMemoryMap * IOMemoryDescriptor::makeMapping(
3510 IOMemoryDescriptor * owner,
3511 task_t __intoTask,
3512 IOVirtualAddress __address,
3513 IOOptionBits options,
3514 IOByteCount __offset,
3515 IOByteCount __length )
3516 {
3517 #ifndef __LP64__
3518 if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::makeMapping !64bit");
3519 #endif /* !__LP64__ */
3520
3521 IOMemoryDescriptor * mapDesc = 0;
3522 IOMemoryMap * result = 0;
3523 OSIterator * iter;
3524
3525 IOMemoryMap * mapping = (IOMemoryMap *) __address;
3526 mach_vm_size_t offset = mapping->fOffset + __offset;
3527 mach_vm_size_t length = mapping->fLength;
3528
3529 mapping->fOffset = offset;
3530
3531 LOCK;
3532
3533 do
3534 {
3535 if (kIOMapStatic & options)
3536 {
3537 result = mapping;
3538 addMapping(mapping);
3539 mapping->setMemoryDescriptor(this, 0);
3540 continue;
3541 }
3542
3543 if (kIOMapUnique & options)
3544 {
3545 addr64_t phys;
3546 IOByteCount physLen;
3547
3548 // if (owner != this) continue;
3549
3550 if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3551 || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3552 {
3553 phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
3554 if (!phys || (physLen < length))
3555 continue;
3556
3557 mapDesc = IOMemoryDescriptor::withAddressRange(
3558 phys, length, getDirection() | kIOMemoryMapperNone, NULL);
3559 if (!mapDesc)
3560 continue;
3561 offset = 0;
3562 mapping->fOffset = offset;
3563 }
3564 }
3565 else
3566 {
3567 // look for a compatible existing mapping
3568 if( (iter = OSCollectionIterator::withCollection(_mappings)))
3569 {
3570 IOMemoryMap * lookMapping;
3571 while ((lookMapping = (IOMemoryMap *) iter->getNextObject()))
3572 {
3573 if ((result = lookMapping->copyCompatible(mapping)))
3574 {
3575 addMapping(result);
3576 result->setMemoryDescriptor(this, offset);
3577 break;
3578 }
3579 }
3580 iter->release();
3581 }
3582 if (result || (options & kIOMapReference))
3583 {
3584 if (result != mapping)
3585 {
3586 mapping->release();
3587 mapping = NULL;
3588 }
3589 continue;
3590 }
3591 }
3592
3593 if (!mapDesc)
3594 {
3595 mapDesc = this;
3596 mapDesc->retain();
3597 }
3598 IOReturn
3599 kr = mapDesc->doMap( 0, (IOVirtualAddress *) &mapping, options, 0, 0 );
3600 if (kIOReturnSuccess == kr)
3601 {
3602 result = mapping;
3603 mapDesc->addMapping(result);
3604 result->setMemoryDescriptor(mapDesc, offset);
3605 }
3606 else
3607 {
3608 mapping->release();
3609 mapping = NULL;
3610 }
3611 }
3612 while( false );
3613
3614 UNLOCK;
3615
3616 if (mapDesc)
3617 mapDesc->release();
3618
3619 return (result);
3620 }
3621
3622 void IOMemoryDescriptor::addMapping(
3623 IOMemoryMap * mapping )
3624 {
3625 if( mapping)
3626 {
3627 if( 0 == _mappings)
3628 _mappings = OSSet::withCapacity(1);
3629 if( _mappings )
3630 _mappings->setObject( mapping );
3631 }
3632 }
3633
3634 void IOMemoryDescriptor::removeMapping(
3635 IOMemoryMap * mapping )
3636 {
3637 if( _mappings)
3638 _mappings->removeObject( mapping);
3639 }
3640
3641 #ifndef __LP64__
3642 // obsolete initializers
3643 // - initWithOptions is the designated initializer
3644 bool
3645 IOMemoryDescriptor::initWithAddress(void * address,
3646 IOByteCount length,
3647 IODirection direction)
3648 {
3649 return( false );
3650 }
3651
3652 bool
3653 IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
3654 IOByteCount length,
3655 IODirection direction,
3656 task_t task)
3657 {
3658 return( false );
3659 }
3660
3661 bool
3662 IOMemoryDescriptor::initWithPhysicalAddress(
3663 IOPhysicalAddress address,
3664 IOByteCount length,
3665 IODirection direction )
3666 {
3667 return( false );
3668 }
3669
3670 bool
3671 IOMemoryDescriptor::initWithRanges(
3672 IOVirtualRange * ranges,
3673 UInt32 withCount,
3674 IODirection direction,
3675 task_t task,
3676 bool asReference)
3677 {
3678 return( false );
3679 }
3680
3681 bool
3682 IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
3683 UInt32 withCount,
3684 IODirection direction,
3685 bool asReference)
3686 {
3687 return( false );
3688 }
3689
3690 void * IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
3691 IOByteCount * lengthOfSegment)
3692 {
3693 return( 0 );
3694 }
3695 #endif /* !__LP64__ */
3696
3697 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3698
3699 bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
3700 {
3701 OSSymbol const *keys[2];
3702 OSObject *values[2];
3703 struct SerData {
3704 user_addr_t address;
3705 user_size_t length;
3706 } *vcopy;
3707 unsigned int index, nRanges;
3708 bool result;
3709
3710 IOOptionBits type = _flags & kIOMemoryTypeMask;
3711
3712 if (s == NULL) return false;
3713 if (s->previouslySerialized(this)) return true;
3714
3715 // Pretend we are an array.
3716 if (!s->addXMLStartTag(this, "array")) return false;
3717
3718 nRanges = _rangesCount;
3719 vcopy = (SerData *) IOMalloc(sizeof(SerData) * nRanges);
3720 if (vcopy == 0) return false;
3721
3722 keys[0] = OSSymbol::withCString("address");
3723 keys[1] = OSSymbol::withCString("length");
3724
3725 result = false;
3726 values[0] = values[1] = 0;
3727
3728 // From this point on we can go to bail.
3729
3730 // Copy the volatile data so we don't have to allocate memory
3731 // while the lock is held.
3732 LOCK;
3733 if (nRanges == _rangesCount) {
3734 Ranges vec = _ranges;
3735 for (index = 0; index < nRanges; index++) {
3736 user_addr_t addr; IOByteCount len;
3737 getAddrLenForInd(addr, len, type, vec, index);
3738 vcopy[index].address = addr;
3739 vcopy[index].length = len;
3740 }
3741 } else {
3742 // The descriptor changed out from under us. Give up.
3743 UNLOCK;
3744 result = false;
3745 goto bail;
3746 }
3747 UNLOCK;
3748
3749 for (index = 0; index < nRanges; index++)
3750 {
3751 user_addr_t addr = vcopy[index].address;
3752 IOByteCount len = (IOByteCount) vcopy[index].length;
3753 values[0] =
3754 OSNumber::withNumber(addr, sizeof(addr) * 8);
3755 if (values[0] == 0) {
3756 result = false;
3757 goto bail;
3758 }
3759 values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
3760 if (values[1] == 0) {
3761 result = false;
3762 goto bail;
3763 }
3764 OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
3765 if (dict == 0) {
3766 result = false;
3767 goto bail;
3768 }
3769 values[0]->release();
3770 values[1]->release();
3771 values[0] = values[1] = 0;
3772
3773 result = dict->serialize(s);
3774 dict->release();
3775 if (!result) {
3776 goto bail;
3777 }
3778 }
3779 result = s->addXMLEndTag("array");
3780
3781 bail:
3782 if (values[0])
3783 values[0]->release();
3784 if (values[1])
3785 values[1]->release();
3786 if (keys[0])
3787 keys[0]->release();
3788 if (keys[1])
3789 keys[1]->release();
3790 if (vcopy)
3791 IOFree(vcopy, sizeof(SerData) * nRanges);
3792 return result;
3793 }
3794
3795 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3796
3797 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
3798 #ifdef __LP64__
3799 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
3800 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
3801 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
3802 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
3803 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
3804 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
3805 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
3806 #else /* !__LP64__ */
3807 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
3808 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
3809 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3);
3810 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4);
3811 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5);
3812 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 6);
3813 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 7);
3814 #endif /* !__LP64__ */
3815 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
3816 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
3817 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
3818 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
3819 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
3820 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
3821 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
3822 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
3823
3824 /* ex-inline function implementation */
3825 IOPhysicalAddress
3826 IOMemoryDescriptor::getPhysicalAddress()
3827 { return( getPhysicalSegment( 0, 0 )); }