]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOMemoryDescriptor.cpp
a46021ede7021636d71e0883c41519803169ed28
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
30 *
31 * HISTORY
32 *
33 */
34
35
36 #include <sys/cdefs.h>
37
38 #include <IOKit/assert.h>
39 #include <IOKit/system.h>
40 #include <IOKit/IOLib.h>
41 #include <IOKit/IOMemoryDescriptor.h>
42 #include <IOKit/IOMapper.h>
43 #include <IOKit/IOKitKeysPrivate.h>
44
45 #ifndef __LP64__
46 #include <IOKit/IOSubMemoryDescriptor.h>
47 #endif /* !__LP64__ */
48
49 #include <IOKit/IOKitDebug.h>
50 #include <libkern/OSDebug.h>
51
52 #include "IOKitKernelInternal.h"
53
54 #include <libkern/c++/OSContainers.h>
55 #include <libkern/c++/OSDictionary.h>
56 #include <libkern/c++/OSArray.h>
57 #include <libkern/c++/OSSymbol.h>
58 #include <libkern/c++/OSNumber.h>
59
60 #include <sys/uio.h>
61
62 __BEGIN_DECLS
63 #include <vm/pmap.h>
64 #include <vm/vm_pageout.h>
65 #include <mach/memory_object_types.h>
66 #include <device/device_port.h>
67
68 #include <mach/vm_prot.h>
69 #include <mach/mach_vm.h>
70 #include <vm/vm_fault.h>
71 #include <vm/vm_protos.h>
72
73 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
74 void ipc_port_release_send(ipc_port_t port);
75
76 /* Copy between a physical page and a virtual address in the given vm_map */
77 kern_return_t copypv(addr64_t source, addr64_t sink, unsigned int size, int which);
78
79 memory_object_t
80 device_pager_setup(
81 memory_object_t pager,
82 uintptr_t device_handle,
83 vm_size_t size,
84 int flags);
85 void
86 device_pager_deallocate(
87 memory_object_t);
88 kern_return_t
89 device_pager_populate_object(
90 memory_object_t pager,
91 vm_object_offset_t offset,
92 ppnum_t phys_addr,
93 vm_size_t size);
94 kern_return_t
95 memory_object_iopl_request(
96 ipc_port_t port,
97 memory_object_offset_t offset,
98 vm_size_t *upl_size,
99 upl_t *upl_ptr,
100 upl_page_info_array_t user_page_list,
101 unsigned int *page_list_count,
102 int *flags);
103
104 unsigned int IOTranslateCacheBits(struct phys_entry *pp);
105
106 __END_DECLS
107
108 #define kIOMaximumMappedIOByteCount (512*1024*1024)
109
110 static IOMapper * gIOSystemMapper = NULL;
111
112 static ppnum_t gIOMaximumMappedIOPageCount = atop_32(kIOMaximumMappedIOByteCount);
113
114 ppnum_t gIOLastPage;
115
116 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
117
118 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
119
120 #define super IOMemoryDescriptor
121
122 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
123
124 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
125
126 static IORecursiveLock * gIOMemoryLock;
127
128 #define LOCK IORecursiveLockLock( gIOMemoryLock)
129 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
130 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
131 #define WAKEUP \
132 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
133
134 #if 0
135 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
136 #else
137 #define DEBG(fmt, args...) {}
138 #endif
139
140 #define IOMD_DEBUG_DMAACTIVE 1
141
142 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
143
144 // Some data structures and accessor macros used by the initWithOptions
145 // Function
146
147 enum ioPLBlockFlags {
148 kIOPLOnDevice = 0x00000001,
149 kIOPLExternUPL = 0x00000002,
150 };
151
152 struct typePersMDData
153 {
154 const IOGeneralMemoryDescriptor *fMD;
155 ipc_port_t fMemEntry;
156 };
157
158 struct ioPLBlock {
159 upl_t fIOPL;
160 vm_address_t fPageInfo; // Pointer to page list or index into it
161 uint32_t fIOMDOffset; // The offset of this iopl in descriptor
162 ppnum_t fMappedBase; // Page number of first page in this iopl
163 unsigned int fPageOffset; // Offset within first page of iopl
164 unsigned int fFlags; // Flags
165 };
166
167 struct ioGMDData {
168 IOMapper *fMapper;
169 uint64_t fPreparationID;
170 unsigned int fPageCnt;
171 #if __LP64__
172 // align arrays to 8 bytes so following macros work
173 unsigned int fPad;
174 #endif
175 upl_page_info_t fPageList[];
176 ioPLBlock fBlocks[];
177 };
178
179 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
180 #define getIOPLList(d) ((ioPLBlock *) &(d->fPageList[d->fPageCnt]))
181 #define getNumIOPL(osd, d) \
182 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
183 #define getPageList(d) (&(d->fPageList[0]))
184 #define computeDataSize(p, u) \
185 (sizeof(ioGMDData) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
186
187
188 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
189
190 #define next_page(a) ( trunc_page(a) + PAGE_SIZE )
191
192
193 extern "C" {
194
195 kern_return_t device_data_action(
196 uintptr_t device_handle,
197 ipc_port_t device_pager,
198 vm_prot_t protection,
199 vm_object_offset_t offset,
200 vm_size_t size)
201 {
202 struct ExpansionData {
203 void * devicePager;
204 unsigned int pagerContig:1;
205 unsigned int unused:31;
206 IOMemoryDescriptor * memory;
207 };
208 kern_return_t kr;
209 ExpansionData * ref = (ExpansionData *) device_handle;
210 IOMemoryDescriptor * memDesc;
211
212 LOCK;
213 memDesc = ref->memory;
214 if( memDesc)
215 {
216 memDesc->retain();
217 kr = memDesc->handleFault( device_pager, 0, 0,
218 offset, size, kIOMapDefaultCache /*?*/);
219 memDesc->release();
220 }
221 else
222 kr = KERN_ABORTED;
223 UNLOCK;
224
225 return( kr );
226 }
227
228 kern_return_t device_close(
229 uintptr_t device_handle)
230 {
231 struct ExpansionData {
232 void * devicePager;
233 unsigned int pagerContig:1;
234 unsigned int unused:31;
235 IOMemoryDescriptor * memory;
236 };
237 ExpansionData * ref = (ExpansionData *) device_handle;
238
239 IODelete( ref, ExpansionData, 1 );
240
241 return( kIOReturnSuccess );
242 }
243 }; // end extern "C"
244
245 // Note this inline function uses C++ reference arguments to return values
246 // This means that pointers are not passed and NULLs don't have to be
247 // checked for as a NULL reference is illegal.
248 static inline void
249 getAddrLenForInd(user_addr_t &addr, IOPhysicalLength &len, // Output variables
250 UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
251 {
252 assert(kIOMemoryTypeUIO == type
253 || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
254 || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
255 if (kIOMemoryTypeUIO == type) {
256 user_size_t us;
257 uio_getiov((uio_t) r.uio, ind, &addr, &us); len = us;
258 }
259 #ifndef __LP64__
260 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
261 IOAddressRange cur = r.v64[ind];
262 addr = cur.address;
263 len = cur.length;
264 }
265 #endif /* !__LP64__ */
266 else {
267 IOVirtualRange cur = r.v[ind];
268 addr = cur.address;
269 len = cur.length;
270 }
271 }
272
273 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
274
275 IOMemoryDescriptor *
276 IOMemoryDescriptor::withAddress(void * address,
277 IOByteCount length,
278 IODirection direction)
279 {
280 return IOMemoryDescriptor::
281 withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
282 }
283
284 #ifndef __LP64__
285 IOMemoryDescriptor *
286 IOMemoryDescriptor::withAddress(IOVirtualAddress address,
287 IOByteCount length,
288 IODirection direction,
289 task_t task)
290 {
291 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
292 if (that)
293 {
294 if (that->initWithAddress(address, length, direction, task))
295 return that;
296
297 that->release();
298 }
299 return 0;
300 }
301 #endif /* !__LP64__ */
302
303 IOMemoryDescriptor *
304 IOMemoryDescriptor::withPhysicalAddress(
305 IOPhysicalAddress address,
306 IOByteCount length,
307 IODirection direction )
308 {
309 #ifdef __LP64__
310 return (IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL));
311 #else /* !__LP64__ */
312 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
313 if (self
314 && !self->initWithPhysicalAddress(address, length, direction)) {
315 self->release();
316 return 0;
317 }
318
319 return self;
320 #endif /* !__LP64__ */
321 }
322
323 #ifndef __LP64__
324 IOMemoryDescriptor *
325 IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
326 UInt32 withCount,
327 IODirection direction,
328 task_t task,
329 bool asReference)
330 {
331 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
332 if (that)
333 {
334 if (that->initWithRanges(ranges, withCount, direction, task, asReference))
335 return that;
336
337 that->release();
338 }
339 return 0;
340 }
341 #endif /* !__LP64__ */
342
343 IOMemoryDescriptor *
344 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
345 mach_vm_size_t length,
346 IOOptionBits options,
347 task_t task)
348 {
349 IOAddressRange range = { address, length };
350 return (IOMemoryDescriptor::withAddressRanges(&range, 1, options, task));
351 }
352
353 IOMemoryDescriptor *
354 IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
355 UInt32 rangeCount,
356 IOOptionBits options,
357 task_t task)
358 {
359 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
360 if (that)
361 {
362 if (task)
363 options |= kIOMemoryTypeVirtual64;
364 else
365 options |= kIOMemoryTypePhysical64;
366
367 if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ 0))
368 return that;
369
370 that->release();
371 }
372
373 return 0;
374 }
375
376
377 /*
378 * withOptions:
379 *
380 * Create a new IOMemoryDescriptor. The buffer is made up of several
381 * virtual address ranges, from a given task.
382 *
383 * Passing the ranges as a reference will avoid an extra allocation.
384 */
385 IOMemoryDescriptor *
386 IOMemoryDescriptor::withOptions(void * buffers,
387 UInt32 count,
388 UInt32 offset,
389 task_t task,
390 IOOptionBits opts,
391 IOMapper * mapper)
392 {
393 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
394
395 if (self
396 && !self->initWithOptions(buffers, count, offset, task, opts, mapper))
397 {
398 self->release();
399 return 0;
400 }
401
402 return self;
403 }
404
405 bool IOMemoryDescriptor::initWithOptions(void * buffers,
406 UInt32 count,
407 UInt32 offset,
408 task_t task,
409 IOOptionBits options,
410 IOMapper * mapper)
411 {
412 return( false );
413 }
414
415 #ifndef __LP64__
416 IOMemoryDescriptor *
417 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
418 UInt32 withCount,
419 IODirection direction,
420 bool asReference)
421 {
422 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
423 if (that)
424 {
425 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference))
426 return that;
427
428 that->release();
429 }
430 return 0;
431 }
432
433 IOMemoryDescriptor *
434 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
435 IOByteCount offset,
436 IOByteCount length,
437 IODirection direction)
438 {
439 return (IOSubMemoryDescriptor::withSubRange(of, offset, length, direction | kIOMemoryThreadSafe));
440 }
441 #endif /* !__LP64__ */
442
443 IOMemoryDescriptor *
444 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
445 {
446 IOGeneralMemoryDescriptor *origGenMD =
447 OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
448
449 if (origGenMD)
450 return IOGeneralMemoryDescriptor::
451 withPersistentMemoryDescriptor(origGenMD);
452 else
453 return 0;
454 }
455
456 IOMemoryDescriptor *
457 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
458 {
459 ipc_port_t sharedMem = (ipc_port_t) originalMD->createNamedEntry();
460
461 if (!sharedMem)
462 return 0;
463
464 if (sharedMem == originalMD->_memEntry) {
465 originalMD->retain(); // Add a new reference to ourselves
466 ipc_port_release_send(sharedMem); // Remove extra send right
467 return originalMD;
468 }
469
470 IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor;
471 typePersMDData initData = { originalMD, sharedMem };
472
473 if (self
474 && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) {
475 self->release();
476 self = 0;
477 }
478 return self;
479 }
480
481 void *IOGeneralMemoryDescriptor::createNamedEntry()
482 {
483 kern_return_t error;
484 ipc_port_t sharedMem;
485
486 IOOptionBits type = _flags & kIOMemoryTypeMask;
487
488 user_addr_t range0Addr;
489 IOByteCount range0Len;
490 getAddrLenForInd(range0Addr, range0Len, type, _ranges, 0);
491 range0Addr = trunc_page_64(range0Addr);
492
493 vm_size_t size = ptoa_32(_pages);
494 vm_address_t kernelPage = (vm_address_t) range0Addr;
495
496 vm_map_t theMap = ((_task == kernel_task)
497 && (kIOMemoryBufferPageable & _flags))
498 ? IOPageableMapForAddress(kernelPage)
499 : get_task_map(_task);
500
501 memory_object_size_t actualSize = size;
502 vm_prot_t prot = VM_PROT_READ;
503 #if CONFIG_EMBEDDED
504 if (kIODirectionOut != (kIODirectionOutIn & _flags))
505 #endif
506 prot |= VM_PROT_WRITE;
507
508 if (_memEntry)
509 prot |= MAP_MEM_NAMED_REUSE;
510
511 error = mach_make_memory_entry_64(theMap,
512 &actualSize, range0Addr, prot, &sharedMem, (ipc_port_t) _memEntry);
513
514 if (KERN_SUCCESS == error) {
515 if (actualSize == size) {
516 return sharedMem;
517 } else {
518 #if IOASSERT
519 IOLog("IOGMD::mach_make_memory_entry_64 (%08llx) size (%08llx:%08llx)\n",
520 (UInt64)range0Addr, (UInt64)actualSize, (UInt64)size);
521 #endif
522 ipc_port_release_send( sharedMem );
523 }
524 }
525
526 return MACH_PORT_NULL;
527 }
528
529 #ifndef __LP64__
530 bool
531 IOGeneralMemoryDescriptor::initWithAddress(void * address,
532 IOByteCount withLength,
533 IODirection withDirection)
534 {
535 _singleRange.v.address = (vm_offset_t) address;
536 _singleRange.v.length = withLength;
537
538 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
539 }
540
541 bool
542 IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
543 IOByteCount withLength,
544 IODirection withDirection,
545 task_t withTask)
546 {
547 _singleRange.v.address = address;
548 _singleRange.v.length = withLength;
549
550 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
551 }
552
553 bool
554 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
555 IOPhysicalAddress address,
556 IOByteCount withLength,
557 IODirection withDirection )
558 {
559 _singleRange.p.address = address;
560 _singleRange.p.length = withLength;
561
562 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
563 }
564
565 bool
566 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
567 IOPhysicalRange * ranges,
568 UInt32 count,
569 IODirection direction,
570 bool reference)
571 {
572 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
573
574 if (reference)
575 mdOpts |= kIOMemoryAsReference;
576
577 return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0);
578 }
579
580 bool
581 IOGeneralMemoryDescriptor::initWithRanges(
582 IOVirtualRange * ranges,
583 UInt32 count,
584 IODirection direction,
585 task_t task,
586 bool reference)
587 {
588 IOOptionBits mdOpts = direction;
589
590 if (reference)
591 mdOpts |= kIOMemoryAsReference;
592
593 if (task) {
594 mdOpts |= kIOMemoryTypeVirtual;
595
596 // Auto-prepare if this is a kernel memory descriptor as very few
597 // clients bother to prepare() kernel memory.
598 // But it was not enforced so what are you going to do?
599 if (task == kernel_task)
600 mdOpts |= kIOMemoryAutoPrepare;
601 }
602 else
603 mdOpts |= kIOMemoryTypePhysical;
604
605 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0);
606 }
607 #endif /* !__LP64__ */
608
609 /*
610 * initWithOptions:
611 *
612 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
613 * from a given task, several physical ranges, an UPL from the ubc
614 * system or a uio (may be 64bit) from the BSD subsystem.
615 *
616 * Passing the ranges as a reference will avoid an extra allocation.
617 *
618 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
619 * existing instance -- note this behavior is not commonly supported in other
620 * I/O Kit classes, although it is supported here.
621 */
622
623 bool
624 IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
625 UInt32 count,
626 UInt32 offset,
627 task_t task,
628 IOOptionBits options,
629 IOMapper * mapper)
630 {
631 IOOptionBits type = options & kIOMemoryTypeMask;
632
633 // Grab the original MD's configuation data to initialse the
634 // arguments to this function.
635 if (kIOMemoryTypePersistentMD == type) {
636
637 typePersMDData *initData = (typePersMDData *) buffers;
638 const IOGeneralMemoryDescriptor *orig = initData->fMD;
639 ioGMDData *dataP = getDataP(orig->_memoryEntries);
640
641 // Only accept persistent memory descriptors with valid dataP data.
642 assert(orig->_rangesCount == 1);
643 if ( !(orig->_flags & kIOMemoryPersistent) || !dataP)
644 return false;
645
646 _memEntry = initData->fMemEntry; // Grab the new named entry
647 options = orig->_flags | kIOMemoryAsReference;
648 _singleRange = orig->_singleRange; // Initialise our range
649 buffers = &_singleRange;
650 count = 1;
651
652 // Now grab the original task and whatever mapper was previously used
653 task = orig->_task;
654 mapper = dataP->fMapper;
655
656 // We are ready to go through the original initialisation now
657 }
658
659 switch (type) {
660 case kIOMemoryTypeUIO:
661 case kIOMemoryTypeVirtual:
662 #ifndef __LP64__
663 case kIOMemoryTypeVirtual64:
664 #endif /* !__LP64__ */
665 assert(task);
666 if (!task)
667 return false;
668
669 #ifndef __LP64__
670 if (vm_map_is_64bit(get_task_map(task))
671 && (kIOMemoryTypeVirtual == type)
672 && ((IOVirtualRange *) buffers)->address)
673 {
674 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
675 return false;
676 }
677 #endif /* !__LP64__ */
678 break;
679
680 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
681 #ifndef __LP64__
682 case kIOMemoryTypePhysical64:
683 #endif /* !__LP64__ */
684 case kIOMemoryTypeUPL:
685 assert(!task);
686 break;
687 default:
688 return false; /* bad argument */
689 }
690
691 assert(buffers);
692 assert(count);
693
694 /*
695 * We can check the _initialized instance variable before having ever set
696 * it to an initial value because I/O Kit guarantees that all our instance
697 * variables are zeroed on an object's allocation.
698 */
699
700 if (_initialized) {
701 /*
702 * An existing memory descriptor is being retargeted to point to
703 * somewhere else. Clean up our present state.
704 */
705 IOOptionBits type = _flags & kIOMemoryTypeMask;
706 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
707 {
708 while (_wireCount)
709 complete();
710 }
711 if (_ranges.v && !(kIOMemoryAsReference & _flags))
712 {
713 if (kIOMemoryTypeUIO == type)
714 uio_free((uio_t) _ranges.v);
715 #ifndef __LP64__
716 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
717 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
718 #endif /* !__LP64__ */
719 else
720 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
721 }
722
723 if (_memEntry)
724 { ipc_port_release_send((ipc_port_t) _memEntry); _memEntry = 0; }
725 if (_mappings)
726 _mappings->flushCollection();
727 }
728 else {
729 if (!super::init())
730 return false;
731 _initialized = true;
732 }
733
734 // Grab the appropriate mapper
735 if (kIOMemoryMapperNone & options)
736 mapper = 0; // No Mapper
737 else if (mapper == kIOMapperSystem) {
738 IOMapper::checkForSystemMapper();
739 gIOSystemMapper = mapper = IOMapper::gSystem;
740 }
741
742 // Temp binary compatibility for kIOMemoryThreadSafe
743 if (kIOMemoryReserved6156215 & options)
744 {
745 options &= ~kIOMemoryReserved6156215;
746 options |= kIOMemoryThreadSafe;
747 }
748 // Remove the dynamic internal use flags from the initial setting
749 options &= ~(kIOMemoryPreparedReadOnly);
750 _flags = options;
751 _task = task;
752
753 #ifndef __LP64__
754 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
755 #endif /* !__LP64__ */
756
757 __iomd_reservedA = 0;
758 __iomd_reservedB = 0;
759 _highestPage = 0;
760
761 if (kIOMemoryThreadSafe & options)
762 {
763 if (!_prepareLock)
764 _prepareLock = IOLockAlloc();
765 }
766 else if (_prepareLock)
767 {
768 IOLockFree(_prepareLock);
769 _prepareLock = NULL;
770 }
771
772 if (kIOMemoryTypeUPL == type) {
773
774 ioGMDData *dataP;
775 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
776
777 if (!_memoryEntries) {
778 _memoryEntries = OSData::withCapacity(dataSize);
779 if (!_memoryEntries)
780 return false;
781 }
782 else if (!_memoryEntries->initWithCapacity(dataSize))
783 return false;
784
785 _memoryEntries->appendBytes(0, sizeof(ioGMDData));
786 dataP = getDataP(_memoryEntries);
787 dataP->fMapper = mapper;
788 dataP->fPageCnt = 0;
789
790 // _wireCount++; // UPLs start out life wired
791
792 _length = count;
793 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
794
795 ioPLBlock iopl;
796 iopl.fIOPL = (upl_t) buffers;
797 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
798
799 if (upl_get_size(iopl.fIOPL) < (count + offset))
800 panic("short external upl");
801
802 // Set the flag kIOPLOnDevice convieniently equal to 1
803 iopl.fFlags = pageList->device | kIOPLExternUPL;
804 iopl.fIOMDOffset = 0;
805
806 _highestPage = upl_get_highest_page(iopl.fIOPL);
807
808 if (!pageList->device) {
809 // Pre-compute the offset into the UPL's page list
810 pageList = &pageList[atop_32(offset)];
811 offset &= PAGE_MASK;
812 if (mapper) {
813 iopl.fMappedBase = mapper->iovmAlloc(_pages);
814 mapper->iovmInsert(iopl.fMappedBase, 0, pageList, _pages);
815 }
816 else
817 iopl.fMappedBase = 0;
818 }
819 else
820 iopl.fMappedBase = 0;
821 iopl.fPageInfo = (vm_address_t) pageList;
822 iopl.fPageOffset = offset;
823
824 _memoryEntries->appendBytes(&iopl, sizeof(iopl));
825 }
826 else {
827 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
828 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
829
830 // Initialize the memory descriptor
831 if (options & kIOMemoryAsReference) {
832 #ifndef __LP64__
833 _rangesIsAllocated = false;
834 #endif /* !__LP64__ */
835
836 // Hack assignment to get the buffer arg into _ranges.
837 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
838 // work, C++ sigh.
839 // This also initialises the uio & physical ranges.
840 _ranges.v = (IOVirtualRange *) buffers;
841 }
842 else {
843 #ifndef __LP64__
844 _rangesIsAllocated = true;
845 #endif /* !__LP64__ */
846 switch (type)
847 {
848 case kIOMemoryTypeUIO:
849 _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
850 break;
851
852 #ifndef __LP64__
853 case kIOMemoryTypeVirtual64:
854 case kIOMemoryTypePhysical64:
855 if (count == 1
856 && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL) {
857 if (kIOMemoryTypeVirtual64 == type)
858 type = kIOMemoryTypeVirtual;
859 else
860 type = kIOMemoryTypePhysical;
861 _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
862 _rangesIsAllocated = false;
863 _ranges.v = &_singleRange.v;
864 _singleRange.v.address = ((IOAddressRange *) buffers)->address;
865 _singleRange.v.length = ((IOAddressRange *) buffers)->length;
866 break;
867 }
868 _ranges.v64 = IONew(IOAddressRange, count);
869 if (!_ranges.v64)
870 return false;
871 bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
872 break;
873 #endif /* !__LP64__ */
874 case kIOMemoryTypeVirtual:
875 case kIOMemoryTypePhysical:
876 if (count == 1) {
877 _flags |= kIOMemoryAsReference;
878 #ifndef __LP64__
879 _rangesIsAllocated = false;
880 #endif /* !__LP64__ */
881 _ranges.v = &_singleRange.v;
882 } else {
883 _ranges.v = IONew(IOVirtualRange, count);
884 if (!_ranges.v)
885 return false;
886 }
887 bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
888 break;
889 }
890 }
891
892 // Find starting address within the vector of ranges
893 Ranges vec = _ranges;
894 UInt32 length = 0;
895 UInt32 pages = 0;
896 for (unsigned ind = 0; ind < count; ind++) {
897 user_addr_t addr;
898 IOPhysicalLength len;
899
900 // addr & len are returned by this function
901 getAddrLenForInd(addr, len, type, vec, ind);
902 pages += (atop_64(addr + len + PAGE_MASK) - atop_64(addr));
903 len += length;
904 assert(len >= length); // Check for 32 bit wrap around
905 length = len;
906
907 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
908 {
909 ppnum_t highPage = atop_64(addr + len - 1);
910 if (highPage > _highestPage)
911 _highestPage = highPage;
912 }
913 }
914 _length = length;
915 _pages = pages;
916 _rangesCount = count;
917
918 // Auto-prepare memory at creation time.
919 // Implied completion when descriptor is free-ed
920 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
921 _wireCount++; // Physical MDs are, by definition, wired
922 else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
923 ioGMDData *dataP;
924 unsigned dataSize = computeDataSize(_pages, /* upls */ count * 2);
925
926 if (!_memoryEntries) {
927 _memoryEntries = OSData::withCapacity(dataSize);
928 if (!_memoryEntries)
929 return false;
930 }
931 else if (!_memoryEntries->initWithCapacity(dataSize))
932 return false;
933
934 _memoryEntries->appendBytes(0, sizeof(ioGMDData));
935 dataP = getDataP(_memoryEntries);
936 dataP->fMapper = mapper;
937 dataP->fPageCnt = _pages;
938
939 if ( (kIOMemoryPersistent & _flags) && !_memEntry)
940 _memEntry = createNamedEntry();
941
942 if ((_flags & kIOMemoryAutoPrepare)
943 && prepare() != kIOReturnSuccess)
944 return false;
945 }
946 }
947
948 return true;
949 }
950
951 /*
952 * free
953 *
954 * Free resources.
955 */
956 void IOGeneralMemoryDescriptor::free()
957 {
958 IOOptionBits type = _flags & kIOMemoryTypeMask;
959
960 if( reserved)
961 {
962 LOCK;
963 reserved->memory = 0;
964 UNLOCK;
965 }
966
967 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
968 {
969 while (_wireCount)
970 complete();
971 }
972 if (_memoryEntries)
973 _memoryEntries->release();
974
975 if (_ranges.v && !(kIOMemoryAsReference & _flags))
976 {
977 if (kIOMemoryTypeUIO == type)
978 uio_free((uio_t) _ranges.v);
979 #ifndef __LP64__
980 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
981 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
982 #endif /* !__LP64__ */
983 else
984 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
985
986 _ranges.v = NULL;
987 }
988
989 if (reserved && reserved->devicePager)
990 device_pager_deallocate( (memory_object_t) reserved->devicePager );
991
992 // memEntry holds a ref on the device pager which owns reserved
993 // (ExpansionData) so no reserved access after this point
994 if (_memEntry)
995 ipc_port_release_send( (ipc_port_t) _memEntry );
996
997 if (_prepareLock)
998 IOLockFree(_prepareLock);
999
1000 super::free();
1001 }
1002
1003 #ifndef __LP64__
1004 void IOGeneralMemoryDescriptor::unmapFromKernel()
1005 {
1006 panic("IOGMD::unmapFromKernel deprecated");
1007 }
1008
1009 void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
1010 {
1011 panic("IOGMD::mapIntoKernel deprecated");
1012 }
1013 #endif /* !__LP64__ */
1014
1015 /*
1016 * getDirection:
1017 *
1018 * Get the direction of the transfer.
1019 */
1020 IODirection IOMemoryDescriptor::getDirection() const
1021 {
1022 #ifndef __LP64__
1023 if (_direction)
1024 return _direction;
1025 #endif /* !__LP64__ */
1026 return (IODirection) (_flags & kIOMemoryDirectionMask);
1027 }
1028
1029 /*
1030 * getLength:
1031 *
1032 * Get the length of the transfer (over all ranges).
1033 */
1034 IOByteCount IOMemoryDescriptor::getLength() const
1035 {
1036 return _length;
1037 }
1038
1039 void IOMemoryDescriptor::setTag( IOOptionBits tag )
1040 {
1041 _tag = tag;
1042 }
1043
1044 IOOptionBits IOMemoryDescriptor::getTag( void )
1045 {
1046 return( _tag);
1047 }
1048
1049 #ifndef __LP64__
1050 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
1051 IOPhysicalAddress
1052 IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
1053 {
1054 addr64_t physAddr = 0;
1055
1056 if( prepare() == kIOReturnSuccess) {
1057 physAddr = getPhysicalSegment64( offset, length );
1058 complete();
1059 }
1060
1061 return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used
1062 }
1063 #endif /* !__LP64__ */
1064
1065 IOByteCount IOMemoryDescriptor::readBytes
1066 (IOByteCount offset, void *bytes, IOByteCount length)
1067 {
1068 addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
1069 IOByteCount remaining;
1070
1071 // Assert that this entire I/O is withing the available range
1072 assert(offset < _length);
1073 assert(offset + length <= _length);
1074 if (offset >= _length) {
1075 return 0;
1076 }
1077
1078 if (kIOMemoryThreadSafe & _flags)
1079 LOCK;
1080
1081 remaining = length = min(length, _length - offset);
1082 while (remaining) { // (process another target segment?)
1083 addr64_t srcAddr64;
1084 IOByteCount srcLen;
1085
1086 srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
1087 if (!srcAddr64)
1088 break;
1089
1090 // Clip segment length to remaining
1091 if (srcLen > remaining)
1092 srcLen = remaining;
1093
1094 copypv(srcAddr64, dstAddr, srcLen,
1095 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
1096
1097 dstAddr += srcLen;
1098 offset += srcLen;
1099 remaining -= srcLen;
1100 }
1101
1102 if (kIOMemoryThreadSafe & _flags)
1103 UNLOCK;
1104
1105 assert(!remaining);
1106
1107 return length - remaining;
1108 }
1109
1110 IOByteCount IOMemoryDescriptor::writeBytes
1111 (IOByteCount offset, const void *bytes, IOByteCount length)
1112 {
1113 addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
1114 IOByteCount remaining;
1115
1116 // Assert that this entire I/O is withing the available range
1117 assert(offset < _length);
1118 assert(offset + length <= _length);
1119
1120 assert( !(kIOMemoryPreparedReadOnly & _flags) );
1121
1122 if ( (kIOMemoryPreparedReadOnly & _flags) || offset >= _length) {
1123 return 0;
1124 }
1125
1126 if (kIOMemoryThreadSafe & _flags)
1127 LOCK;
1128
1129 remaining = length = min(length, _length - offset);
1130 while (remaining) { // (process another target segment?)
1131 addr64_t dstAddr64;
1132 IOByteCount dstLen;
1133
1134 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
1135 if (!dstAddr64)
1136 break;
1137
1138 // Clip segment length to remaining
1139 if (dstLen > remaining)
1140 dstLen = remaining;
1141
1142 copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
1143 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
1144
1145 srcAddr += dstLen;
1146 offset += dstLen;
1147 remaining -= dstLen;
1148 }
1149
1150 if (kIOMemoryThreadSafe & _flags)
1151 UNLOCK;
1152
1153 assert(!remaining);
1154
1155 return length - remaining;
1156 }
1157
1158 // osfmk/device/iokit_rpc.c
1159 extern "C" unsigned int IODefaultCacheBits(addr64_t pa);
1160
1161 #ifndef __LP64__
1162 void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
1163 {
1164 panic("IOGMD::setPosition deprecated");
1165 }
1166 #endif /* !__LP64__ */
1167
1168 static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
1169
1170 uint64_t
1171 IOGeneralMemoryDescriptor::getPreparationID( void )
1172 {
1173 ioGMDData *dataP;
1174
1175 if (!_wireCount)
1176 return (kIOPreparationIDUnprepared);
1177
1178 if (_flags & (kIOMemoryTypePhysical | kIOMemoryTypePhysical64))
1179 return (kIOPreparationIDAlwaysPrepared);
1180
1181 if (!_memoryEntries || !(dataP = getDataP(_memoryEntries)))
1182 return (kIOPreparationIDUnprepared);
1183
1184 if (kIOPreparationIDUnprepared == dataP->fPreparationID)
1185 {
1186 #if defined(__ppc__ )
1187 dataP->fPreparationID = gIOMDPreparationID++;
1188 #else
1189 dataP->fPreparationID = OSIncrementAtomic64(&gIOMDPreparationID);
1190 #endif
1191 }
1192 return (dataP->fPreparationID);
1193 }
1194
1195 uint64_t
1196 IOMemoryDescriptor::getPreparationID( void )
1197 {
1198 return (kIOPreparationIDUnsupported);
1199 }
1200
1201 IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
1202 {
1203 if (kIOMDGetCharacteristics == op) {
1204
1205 if (dataSize < sizeof(IOMDDMACharacteristics))
1206 return kIOReturnUnderrun;
1207
1208 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
1209 data->fLength = _length;
1210 data->fSGCount = _rangesCount;
1211 data->fPages = _pages;
1212 data->fDirection = getDirection();
1213 if (!_wireCount)
1214 data->fIsPrepared = false;
1215 else {
1216 data->fIsPrepared = true;
1217 data->fHighestPage = _highestPage;
1218 if (_memoryEntries) {
1219 ioGMDData *gmdData = getDataP(_memoryEntries);
1220 ioPLBlock *ioplList = getIOPLList(gmdData);
1221 UInt count = getNumIOPL(_memoryEntries, gmdData);
1222
1223 data->fIsMapped = (gmdData->fMapper && _pages && (count > 0)
1224 && ioplList[0].fMappedBase);
1225 if (count == 1)
1226 data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
1227 }
1228 else
1229 data->fIsMapped = false;
1230 }
1231
1232 return kIOReturnSuccess;
1233
1234 #if IOMD_DEBUG_DMAACTIVE
1235 } else if (kIOMDSetDMAActive == op) {
1236 IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
1237 OSIncrementAtomic(&md->__iomd_reservedA);
1238 } else if (kIOMDSetDMAInactive == op) {
1239 IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
1240 if (md->__iomd_reservedA)
1241 OSDecrementAtomic(&md->__iomd_reservedA);
1242 else
1243 panic("kIOMDSetDMAInactive");
1244 #endif /* IOMD_DEBUG_DMAACTIVE */
1245
1246 } else if (!(kIOMDWalkSegments & op))
1247 return kIOReturnBadArgument;
1248
1249 // Get the next segment
1250 struct InternalState {
1251 IOMDDMAWalkSegmentArgs fIO;
1252 UInt fOffset2Index;
1253 UInt fIndex;
1254 UInt fNextOffset;
1255 } *isP;
1256
1257 // Find the next segment
1258 if (dataSize < sizeof(*isP))
1259 return kIOReturnUnderrun;
1260
1261 isP = (InternalState *) vData;
1262 UInt offset = isP->fIO.fOffset;
1263 bool mapped = isP->fIO.fMapped;
1264
1265 if (offset >= _length)
1266 return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
1267
1268 // Validate the previous offset
1269 UInt ind, off2Ind = isP->fOffset2Index;
1270 if ((kIOMDFirstSegment != op)
1271 && offset
1272 && (offset == isP->fNextOffset || off2Ind <= offset))
1273 ind = isP->fIndex;
1274 else
1275 ind = off2Ind = 0; // Start from beginning
1276
1277 UInt length;
1278 UInt64 address;
1279 if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
1280
1281 // Physical address based memory descriptor
1282 const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
1283
1284 // Find the range after the one that contains the offset
1285 mach_vm_size_t len;
1286 for (len = 0; off2Ind <= offset; ind++) {
1287 len = physP[ind].length;
1288 off2Ind += len;
1289 }
1290
1291 // Calculate length within range and starting address
1292 length = off2Ind - offset;
1293 address = physP[ind - 1].address + len - length;
1294
1295 // see how far we can coalesce ranges
1296 while (ind < _rangesCount && address + length == physP[ind].address) {
1297 len = physP[ind].length;
1298 length += len;
1299 off2Ind += len;
1300 ind++;
1301 }
1302
1303 // correct contiguous check overshoot
1304 ind--;
1305 off2Ind -= len;
1306 }
1307 #ifndef __LP64__
1308 else if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
1309
1310 // Physical address based memory descriptor
1311 const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
1312
1313 // Find the range after the one that contains the offset
1314 mach_vm_size_t len;
1315 for (len = 0; off2Ind <= offset; ind++) {
1316 len = physP[ind].length;
1317 off2Ind += len;
1318 }
1319
1320 // Calculate length within range and starting address
1321 length = off2Ind - offset;
1322 address = physP[ind - 1].address + len - length;
1323
1324 // see how far we can coalesce ranges
1325 while (ind < _rangesCount && address + length == physP[ind].address) {
1326 len = physP[ind].length;
1327 length += len;
1328 off2Ind += len;
1329 ind++;
1330 }
1331
1332 // correct contiguous check overshoot
1333 ind--;
1334 off2Ind -= len;
1335 }
1336 #endif /* !__LP64__ */
1337 else do {
1338 if (!_wireCount)
1339 panic("IOGMD: not wired for the IODMACommand");
1340
1341 assert(_memoryEntries);
1342
1343 ioGMDData * dataP = getDataP(_memoryEntries);
1344 const ioPLBlock *ioplList = getIOPLList(dataP);
1345 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1346 upl_page_info_t *pageList = getPageList(dataP);
1347
1348 assert(numIOPLs > 0);
1349
1350 // Scan through iopl info blocks looking for block containing offset
1351 while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset)
1352 ind++;
1353
1354 // Go back to actual range as search goes past it
1355 ioPLBlock ioplInfo = ioplList[ind - 1];
1356 off2Ind = ioplInfo.fIOMDOffset;
1357
1358 if (ind < numIOPLs)
1359 length = ioplList[ind].fIOMDOffset;
1360 else
1361 length = _length;
1362 length -= offset; // Remainder within iopl
1363
1364 // Subtract offset till this iopl in total list
1365 offset -= off2Ind;
1366
1367 // If a mapped address is requested and this is a pre-mapped IOPL
1368 // then just need to compute an offset relative to the mapped base.
1369 if (mapped && ioplInfo.fMappedBase) {
1370 offset += (ioplInfo.fPageOffset & PAGE_MASK);
1371 address = ptoa_64(ioplInfo.fMappedBase) + offset;
1372 continue; // Done leave do/while(false) now
1373 }
1374
1375 // The offset is rebased into the current iopl.
1376 // Now add the iopl 1st page offset.
1377 offset += ioplInfo.fPageOffset;
1378
1379 // For external UPLs the fPageInfo field points directly to
1380 // the upl's upl_page_info_t array.
1381 if (ioplInfo.fFlags & kIOPLExternUPL)
1382 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
1383 else
1384 pageList = &pageList[ioplInfo.fPageInfo];
1385
1386 // Check for direct device non-paged memory
1387 if ( ioplInfo.fFlags & kIOPLOnDevice ) {
1388 address = ptoa_64(pageList->phys_addr) + offset;
1389 continue; // Done leave do/while(false) now
1390 }
1391
1392 // Now we need compute the index into the pageList
1393 UInt pageInd = atop_32(offset);
1394 offset &= PAGE_MASK;
1395
1396 // Compute the starting address of this segment
1397 IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
1398 if (!pageAddr) {
1399 panic("!pageList phys_addr");
1400 }
1401
1402 address = ptoa_64(pageAddr) + offset;
1403
1404 // length is currently set to the length of the remainider of the iopl.
1405 // We need to check that the remainder of the iopl is contiguous.
1406 // This is indicated by pageList[ind].phys_addr being sequential.
1407 IOByteCount contigLength = PAGE_SIZE - offset;
1408 while (contigLength < length
1409 && ++pageAddr == pageList[++pageInd].phys_addr)
1410 {
1411 contigLength += PAGE_SIZE;
1412 }
1413
1414 if (contigLength < length)
1415 length = contigLength;
1416
1417
1418 assert(address);
1419 assert(length);
1420
1421 } while (false);
1422
1423 // Update return values and state
1424 isP->fIO.fIOVMAddr = address;
1425 isP->fIO.fLength = length;
1426 isP->fIndex = ind;
1427 isP->fOffset2Index = off2Ind;
1428 isP->fNextOffset = isP->fIO.fOffset + length;
1429
1430 return kIOReturnSuccess;
1431 }
1432
1433 addr64_t
1434 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
1435 {
1436 IOReturn ret;
1437 addr64_t address = 0;
1438 IOByteCount length = 0;
1439 IOMapper * mapper = gIOSystemMapper;
1440 IOOptionBits type = _flags & kIOMemoryTypeMask;
1441
1442 if (lengthOfSegment)
1443 *lengthOfSegment = 0;
1444
1445 if (offset >= _length)
1446 return 0;
1447
1448 // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
1449 // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
1450 // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
1451 // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
1452
1453 if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type))
1454 {
1455 unsigned rangesIndex = 0;
1456 Ranges vec = _ranges;
1457 user_addr_t addr;
1458
1459 // Find starting address within the vector of ranges
1460 for (;;) {
1461 getAddrLenForInd(addr, length, type, vec, rangesIndex);
1462 if (offset < length)
1463 break;
1464 offset -= length; // (make offset relative)
1465 rangesIndex++;
1466 }
1467
1468 // Now that we have the starting range,
1469 // lets find the last contiguous range
1470 addr += offset;
1471 length -= offset;
1472
1473 for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) {
1474 user_addr_t newAddr;
1475 IOPhysicalLength newLen;
1476
1477 getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
1478 if (addr + length != newAddr)
1479 break;
1480 length += newLen;
1481 }
1482 if (addr)
1483 address = (IOPhysicalAddress) addr; // Truncate address to 32bit
1484 }
1485 else
1486 {
1487 IOMDDMAWalkSegmentState _state;
1488 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) &_state;
1489
1490 state->fOffset = offset;
1491 state->fLength = _length - offset;
1492 state->fMapped = (0 == (options & kIOMemoryMapperNone));
1493
1494 ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
1495
1496 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret))
1497 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
1498 ret, this, state->fOffset,
1499 state->fIOVMAddr, state->fLength);
1500 if (kIOReturnSuccess == ret)
1501 {
1502 address = state->fIOVMAddr;
1503 length = state->fLength;
1504 }
1505
1506 // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
1507 // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
1508
1509 if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)))
1510 {
1511 if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone))
1512 {
1513 addr64_t origAddr = address;
1514 IOByteCount origLen = length;
1515
1516 address = mapper->mapAddr(origAddr);
1517 length = page_size - (address & (page_size - 1));
1518 while ((length < origLen)
1519 && ((address + length) == mapper->mapAddr(origAddr + length)))
1520 length += page_size;
1521 if (length > origLen)
1522 length = origLen;
1523 }
1524 #ifdef __LP64__
1525 else if (!(options & kIOMemoryMapperNone) && (_flags & kIOMemoryMapperNone))
1526 {
1527 panic("getPhysicalSegment not mapped for I/O");
1528 }
1529 #endif /* __LP64__ */
1530 }
1531 }
1532
1533 if (!address)
1534 length = 0;
1535
1536 if (lengthOfSegment)
1537 *lengthOfSegment = length;
1538
1539 return (address);
1540 }
1541
1542 #ifndef __LP64__
1543 addr64_t
1544 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
1545 {
1546 addr64_t address = 0;
1547
1548 if (options & _kIOMemorySourceSegment)
1549 {
1550 address = getSourceSegment(offset, lengthOfSegment);
1551 }
1552 else if (options & kIOMemoryMapperNone)
1553 {
1554 address = getPhysicalSegment64(offset, lengthOfSegment);
1555 }
1556 else
1557 {
1558 address = getPhysicalSegment(offset, lengthOfSegment);
1559 }
1560
1561 return (address);
1562 }
1563
1564 addr64_t
1565 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
1566 {
1567 return (getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone));
1568 }
1569
1570 IOPhysicalAddress
1571 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1572 {
1573 addr64_t address = 0;
1574 IOByteCount length = 0;
1575
1576 address = getPhysicalSegment(offset, lengthOfSegment, 0);
1577
1578 if (lengthOfSegment)
1579 length = *lengthOfSegment;
1580
1581 if ((address + length) > 0x100000000ULL)
1582 {
1583 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
1584 address, (long) length, (getMetaClass())->getClassName());
1585 }
1586
1587 return ((IOPhysicalAddress) address);
1588 }
1589
1590 addr64_t
1591 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
1592 {
1593 IOPhysicalAddress phys32;
1594 IOByteCount length;
1595 addr64_t phys64;
1596 IOMapper * mapper = 0;
1597
1598 phys32 = getPhysicalSegment(offset, lengthOfSegment);
1599 if (!phys32)
1600 return 0;
1601
1602 if (gIOSystemMapper)
1603 mapper = gIOSystemMapper;
1604
1605 if (mapper)
1606 {
1607 IOByteCount origLen;
1608
1609 phys64 = mapper->mapAddr(phys32);
1610 origLen = *lengthOfSegment;
1611 length = page_size - (phys64 & (page_size - 1));
1612 while ((length < origLen)
1613 && ((phys64 + length) == mapper->mapAddr(phys32 + length)))
1614 length += page_size;
1615 if (length > origLen)
1616 length = origLen;
1617
1618 *lengthOfSegment = length;
1619 }
1620 else
1621 phys64 = (addr64_t) phys32;
1622
1623 return phys64;
1624 }
1625
1626 IOPhysicalAddress
1627 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1628 {
1629 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0));
1630 }
1631
1632 IOPhysicalAddress
1633 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1634 {
1635 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment));
1636 }
1637
1638 void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
1639 IOByteCount * lengthOfSegment)
1640 {
1641 if (_task == kernel_task)
1642 return (void *) getSourceSegment(offset, lengthOfSegment);
1643 else
1644 panic("IOGMD::getVirtualSegment deprecated");
1645
1646 return 0;
1647 }
1648 #endif /* !__LP64__ */
1649
1650 IOReturn
1651 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
1652 {
1653 if (kIOMDGetCharacteristics == op) {
1654 if (dataSize < sizeof(IOMDDMACharacteristics))
1655 return kIOReturnUnderrun;
1656
1657 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
1658 data->fLength = getLength();
1659 data->fSGCount = 0;
1660 data->fDirection = getDirection();
1661 if (IOMapper::gSystem)
1662 data->fIsMapped = true;
1663 data->fIsPrepared = true; // Assume prepared - fails safe
1664 }
1665 else if (kIOMDWalkSegments & op) {
1666 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs))
1667 return kIOReturnUnderrun;
1668
1669 IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
1670 IOByteCount offset = (IOByteCount) data->fOffset;
1671
1672 IOPhysicalLength length;
1673 IOMemoryDescriptor *ncmd = const_cast<IOMemoryDescriptor *>(this);
1674 if (data->fMapped && IOMapper::gSystem)
1675 data->fIOVMAddr = ncmd->getPhysicalSegment(offset, &length);
1676 else
1677 data->fIOVMAddr = ncmd->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
1678 data->fLength = length;
1679 }
1680 else
1681 return kIOReturnBadArgument;
1682
1683 return kIOReturnSuccess;
1684 }
1685
1686 static IOReturn
1687 purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
1688 {
1689 IOReturn err = kIOReturnSuccess;
1690
1691 *control = VM_PURGABLE_SET_STATE;
1692 switch (newState)
1693 {
1694 case kIOMemoryPurgeableKeepCurrent:
1695 *control = VM_PURGABLE_GET_STATE;
1696 break;
1697
1698 case kIOMemoryPurgeableNonVolatile:
1699 *state = VM_PURGABLE_NONVOLATILE;
1700 break;
1701 case kIOMemoryPurgeableVolatile:
1702 *state = VM_PURGABLE_VOLATILE;
1703 break;
1704 case kIOMemoryPurgeableEmpty:
1705 *state = VM_PURGABLE_EMPTY;
1706 break;
1707 default:
1708 err = kIOReturnBadArgument;
1709 break;
1710 }
1711 return (err);
1712 }
1713
1714 static IOReturn
1715 purgeableStateBits(int * state)
1716 {
1717 IOReturn err = kIOReturnSuccess;
1718
1719 switch (*state)
1720 {
1721 case VM_PURGABLE_NONVOLATILE:
1722 *state = kIOMemoryPurgeableNonVolatile;
1723 break;
1724 case VM_PURGABLE_VOLATILE:
1725 *state = kIOMemoryPurgeableVolatile;
1726 break;
1727 case VM_PURGABLE_EMPTY:
1728 *state = kIOMemoryPurgeableEmpty;
1729 break;
1730 default:
1731 *state = kIOMemoryPurgeableNonVolatile;
1732 err = kIOReturnNotReady;
1733 break;
1734 }
1735 return (err);
1736 }
1737
1738 IOReturn
1739 IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
1740 IOOptionBits * oldState )
1741 {
1742 IOReturn err = kIOReturnSuccess;
1743 vm_purgable_t control;
1744 int state;
1745
1746 if (_memEntry)
1747 {
1748 err = super::setPurgeable(newState, oldState);
1749 }
1750 else
1751 {
1752 if (kIOMemoryThreadSafe & _flags)
1753 LOCK;
1754 do
1755 {
1756 // Find the appropriate vm_map for the given task
1757 vm_map_t curMap;
1758 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
1759 {
1760 err = kIOReturnNotReady;
1761 break;
1762 }
1763 else
1764 curMap = get_task_map(_task);
1765
1766 // can only do one range
1767 Ranges vec = _ranges;
1768 IOOptionBits type = _flags & kIOMemoryTypeMask;
1769 user_addr_t addr;
1770 IOByteCount len;
1771 getAddrLenForInd(addr, len, type, vec, 0);
1772
1773 err = purgeableControlBits(newState, &control, &state);
1774 if (kIOReturnSuccess != err)
1775 break;
1776 err = mach_vm_purgable_control(curMap, addr, control, &state);
1777 if (oldState)
1778 {
1779 if (kIOReturnSuccess == err)
1780 {
1781 err = purgeableStateBits(&state);
1782 *oldState = state;
1783 }
1784 }
1785 }
1786 while (false);
1787 if (kIOMemoryThreadSafe & _flags)
1788 UNLOCK;
1789 }
1790 return (err);
1791 }
1792
1793 IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
1794 IOOptionBits * oldState )
1795 {
1796 IOReturn err = kIOReturnSuccess;
1797 vm_purgable_t control;
1798 int state;
1799
1800 if (kIOMemoryThreadSafe & _flags)
1801 LOCK;
1802
1803 do
1804 {
1805 if (!_memEntry)
1806 {
1807 err = kIOReturnNotReady;
1808 break;
1809 }
1810 err = purgeableControlBits(newState, &control, &state);
1811 if (kIOReturnSuccess != err)
1812 break;
1813 err = mach_memory_entry_purgable_control((ipc_port_t) _memEntry, control, &state);
1814 if (oldState)
1815 {
1816 if (kIOReturnSuccess == err)
1817 {
1818 err = purgeableStateBits(&state);
1819 *oldState = state;
1820 }
1821 }
1822 }
1823 while (false);
1824
1825 if (kIOMemoryThreadSafe & _flags)
1826 UNLOCK;
1827
1828 return (err);
1829 }
1830
1831 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
1832 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
1833
1834 static void SetEncryptOp(addr64_t pa, unsigned int count)
1835 {
1836 ppnum_t page, end;
1837
1838 page = atop_64(round_page_64(pa));
1839 end = atop_64(trunc_page_64(pa + count));
1840 for (; page < end; page++)
1841 {
1842 pmap_clear_noencrypt(page);
1843 }
1844 }
1845
1846 static void ClearEncryptOp(addr64_t pa, unsigned int count)
1847 {
1848 ppnum_t page, end;
1849
1850 page = atop_64(round_page_64(pa));
1851 end = atop_64(trunc_page_64(pa + count));
1852 for (; page < end; page++)
1853 {
1854 pmap_set_noencrypt(page);
1855 }
1856 }
1857
1858 IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options,
1859 IOByteCount offset, IOByteCount length )
1860 {
1861 IOByteCount remaining;
1862 void (*func)(addr64_t pa, unsigned int count) = 0;
1863
1864 switch (options)
1865 {
1866 case kIOMemoryIncoherentIOFlush:
1867 func = &dcache_incoherent_io_flush64;
1868 break;
1869 case kIOMemoryIncoherentIOStore:
1870 func = &dcache_incoherent_io_store64;
1871 break;
1872
1873 case kIOMemorySetEncrypted:
1874 func = &SetEncryptOp;
1875 break;
1876 case kIOMemoryClearEncrypted:
1877 func = &ClearEncryptOp;
1878 break;
1879 }
1880
1881 if (!func)
1882 return (kIOReturnUnsupported);
1883
1884 if (kIOMemoryThreadSafe & _flags)
1885 LOCK;
1886
1887 remaining = length = min(length, getLength() - offset);
1888 while (remaining)
1889 // (process another target segment?)
1890 {
1891 addr64_t dstAddr64;
1892 IOByteCount dstLen;
1893
1894 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
1895 if (!dstAddr64)
1896 break;
1897
1898 // Clip segment length to remaining
1899 if (dstLen > remaining)
1900 dstLen = remaining;
1901
1902 (*func)(dstAddr64, dstLen);
1903
1904 offset += dstLen;
1905 remaining -= dstLen;
1906 }
1907
1908 if (kIOMemoryThreadSafe & _flags)
1909 UNLOCK;
1910
1911 return (remaining ? kIOReturnUnderrun : kIOReturnSuccess);
1912 }
1913
1914 #if defined(__ppc__) || defined(__arm__)
1915 extern vm_offset_t static_memory_end;
1916 #define io_kernel_static_end static_memory_end
1917 #else
1918 extern vm_offset_t first_avail;
1919 #define io_kernel_static_end first_avail
1920 #endif
1921
1922 static kern_return_t
1923 io_get_kernel_static_upl(
1924 vm_map_t /* map */,
1925 uintptr_t offset,
1926 vm_size_t *upl_size,
1927 upl_t *upl,
1928 upl_page_info_array_t page_list,
1929 unsigned int *count,
1930 ppnum_t *highest_page)
1931 {
1932 unsigned int pageCount, page;
1933 ppnum_t phys;
1934 ppnum_t highestPage = 0;
1935
1936 pageCount = atop_32(*upl_size);
1937 if (pageCount > *count)
1938 pageCount = *count;
1939
1940 *upl = NULL;
1941
1942 for (page = 0; page < pageCount; page++)
1943 {
1944 phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
1945 if (!phys)
1946 break;
1947 page_list[page].phys_addr = phys;
1948 page_list[page].pageout = 0;
1949 page_list[page].absent = 0;
1950 page_list[page].dirty = 0;
1951 page_list[page].precious = 0;
1952 page_list[page].device = 0;
1953 if (phys > highestPage)
1954 highestPage = phys;
1955 }
1956
1957 *highest_page = highestPage;
1958
1959 return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError);
1960 }
1961
1962 IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
1963 {
1964 IOOptionBits type = _flags & kIOMemoryTypeMask;
1965 IOReturn error = kIOReturnCannotWire;
1966 ioGMDData *dataP;
1967 ppnum_t mapBase = 0;
1968 IOMapper *mapper;
1969 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
1970
1971 assert(!_wireCount);
1972 assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
1973
1974 if (_pages >= gIOMaximumMappedIOPageCount)
1975 return kIOReturnNoResources;
1976
1977 dataP = getDataP(_memoryEntries);
1978 mapper = dataP->fMapper;
1979 if (mapper && _pages)
1980 mapBase = mapper->iovmAlloc(_pages);
1981
1982 // Note that appendBytes(NULL) zeros the data up to the
1983 // desired length.
1984 _memoryEntries->appendBytes(0, dataP->fPageCnt * sizeof(upl_page_info_t));
1985 dataP = 0; // May no longer be valid so lets not get tempted.
1986
1987 if (forDirection == kIODirectionNone)
1988 forDirection = getDirection();
1989
1990 int uplFlags; // This Mem Desc's default flags for upl creation
1991 switch (kIODirectionOutIn & forDirection)
1992 {
1993 case kIODirectionOut:
1994 // Pages do not need to be marked as dirty on commit
1995 uplFlags = UPL_COPYOUT_FROM;
1996 _flags |= kIOMemoryPreparedReadOnly;
1997 break;
1998
1999 case kIODirectionIn:
2000 default:
2001 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
2002 break;
2003 }
2004 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
2005
2006 #ifdef UPL_NEED_32BIT_ADDR
2007 if (kIODirectionPrepareToPhys32 & forDirection)
2008 uplFlags |= UPL_NEED_32BIT_ADDR;
2009 #endif
2010
2011 // Find the appropriate vm_map for the given task
2012 vm_map_t curMap;
2013 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
2014 curMap = 0;
2015 else
2016 { curMap = get_task_map(_task); }
2017
2018 // Iterate over the vector of virtual ranges
2019 Ranges vec = _ranges;
2020 unsigned int pageIndex = 0;
2021 IOByteCount mdOffset = 0;
2022 ppnum_t highestPage = 0;
2023 for (UInt range = 0; range < _rangesCount; range++) {
2024 ioPLBlock iopl;
2025 user_addr_t startPage;
2026 IOByteCount numBytes;
2027 ppnum_t highPage = 0;
2028
2029 // Get the startPage address and length of vec[range]
2030 getAddrLenForInd(startPage, numBytes, type, vec, range);
2031 iopl.fPageOffset = startPage & PAGE_MASK;
2032 numBytes += iopl.fPageOffset;
2033 startPage = trunc_page_64(startPage);
2034
2035 if (mapper)
2036 iopl.fMappedBase = mapBase + pageIndex;
2037 else
2038 iopl.fMappedBase = 0;
2039
2040 // Iterate over the current range, creating UPLs
2041 while (numBytes) {
2042 dataP = getDataP(_memoryEntries);
2043 vm_address_t kernelStart = (vm_address_t) startPage;
2044 vm_map_t theMap;
2045 if (curMap)
2046 theMap = curMap;
2047 else if (!sharedMem) {
2048 assert(_task == kernel_task);
2049 theMap = IOPageableMapForAddress(kernelStart);
2050 }
2051 else
2052 theMap = NULL;
2053
2054 upl_page_info_array_t pageInfo = getPageList(dataP);
2055 int ioplFlags = uplFlags;
2056 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
2057
2058 vm_size_t ioplSize = round_page(numBytes);
2059 unsigned int numPageInfo = atop_32(ioplSize);
2060
2061 if (theMap == kernel_map && kernelStart < io_kernel_static_end) {
2062 error = io_get_kernel_static_upl(theMap,
2063 kernelStart,
2064 &ioplSize,
2065 &iopl.fIOPL,
2066 baseInfo,
2067 &numPageInfo,
2068 &highPage);
2069 }
2070 else if (sharedMem) {
2071 error = memory_object_iopl_request(sharedMem,
2072 ptoa_32(pageIndex),
2073 &ioplSize,
2074 &iopl.fIOPL,
2075 baseInfo,
2076 &numPageInfo,
2077 &ioplFlags);
2078 }
2079 else {
2080 assert(theMap);
2081 error = vm_map_create_upl(theMap,
2082 startPage,
2083 (upl_size_t*)&ioplSize,
2084 &iopl.fIOPL,
2085 baseInfo,
2086 &numPageInfo,
2087 &ioplFlags);
2088 }
2089
2090 assert(ioplSize);
2091 if (error != KERN_SUCCESS)
2092 goto abortExit;
2093
2094 if (iopl.fIOPL)
2095 highPage = upl_get_highest_page(iopl.fIOPL);
2096 if (highPage > highestPage)
2097 highestPage = highPage;
2098
2099 error = kIOReturnCannotWire;
2100
2101 if (baseInfo->device) {
2102 numPageInfo = 1;
2103 iopl.fFlags = kIOPLOnDevice;
2104 // Don't translate device memory at all
2105 if (mapper && mapBase) {
2106 mapper->iovmFree(mapBase, _pages);
2107 mapBase = 0;
2108 iopl.fMappedBase = 0;
2109 }
2110 }
2111 else {
2112 iopl.fFlags = 0;
2113 if (mapper)
2114 mapper->iovmInsert(mapBase, pageIndex,
2115 baseInfo, numPageInfo);
2116 }
2117
2118 iopl.fIOMDOffset = mdOffset;
2119 iopl.fPageInfo = pageIndex;
2120
2121 if ((_flags & kIOMemoryAutoPrepare) && iopl.fIOPL)
2122 {
2123 upl_commit(iopl.fIOPL, 0, 0);
2124 upl_deallocate(iopl.fIOPL);
2125 iopl.fIOPL = 0;
2126 }
2127
2128 if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
2129 // Clean up partial created and unsaved iopl
2130 if (iopl.fIOPL) {
2131 upl_abort(iopl.fIOPL, 0);
2132 upl_deallocate(iopl.fIOPL);
2133 }
2134 goto abortExit;
2135 }
2136
2137 // Check for a multiple iopl's in one virtual range
2138 pageIndex += numPageInfo;
2139 mdOffset -= iopl.fPageOffset;
2140 if (ioplSize < numBytes) {
2141 numBytes -= ioplSize;
2142 startPage += ioplSize;
2143 mdOffset += ioplSize;
2144 iopl.fPageOffset = 0;
2145 if (mapper)
2146 iopl.fMappedBase = mapBase + pageIndex;
2147 }
2148 else {
2149 mdOffset += numBytes;
2150 break;
2151 }
2152 }
2153 }
2154
2155 _highestPage = highestPage;
2156
2157 return kIOReturnSuccess;
2158
2159 abortExit:
2160 {
2161 dataP = getDataP(_memoryEntries);
2162 UInt done = getNumIOPL(_memoryEntries, dataP);
2163 ioPLBlock *ioplList = getIOPLList(dataP);
2164
2165 for (UInt range = 0; range < done; range++)
2166 {
2167 if (ioplList[range].fIOPL) {
2168 upl_abort(ioplList[range].fIOPL, 0);
2169 upl_deallocate(ioplList[range].fIOPL);
2170 }
2171 }
2172 (void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); // == setLength()
2173
2174 if (mapper && mapBase)
2175 mapper->iovmFree(mapBase, _pages);
2176 }
2177
2178 if (error == KERN_FAILURE)
2179 error = kIOReturnCannotWire;
2180
2181 return error;
2182 }
2183
2184 /*
2185 * prepare
2186 *
2187 * Prepare the memory for an I/O transfer. This involves paging in
2188 * the memory, if necessary, and wiring it down for the duration of
2189 * the transfer. The complete() method completes the processing of
2190 * the memory after the I/O transfer finishes. This method needn't
2191 * called for non-pageable memory.
2192 */
2193 IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
2194 {
2195 IOReturn error = kIOReturnSuccess;
2196 IOOptionBits type = _flags & kIOMemoryTypeMask;
2197
2198 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
2199 return kIOReturnSuccess;
2200
2201 if (_prepareLock)
2202 IOLockLock(_prepareLock);
2203
2204 if (!_wireCount
2205 && (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) ) {
2206 error = wireVirtual(forDirection);
2207 }
2208
2209 if (kIOReturnSuccess == error)
2210 _wireCount++;
2211
2212 if (1 == _wireCount)
2213 {
2214 if (kIOMemoryClearEncrypt & _flags)
2215 {
2216 performOperation(kIOMemoryClearEncrypted, 0, _length);
2217 }
2218 }
2219
2220 if (_prepareLock)
2221 IOLockUnlock(_prepareLock);
2222
2223 return error;
2224 }
2225
2226 /*
2227 * complete
2228 *
2229 * Complete processing of the memory after an I/O transfer finishes.
2230 * This method should not be called unless a prepare was previously
2231 * issued; the prepare() and complete() must occur in pairs, before
2232 * before and after an I/O transfer involving pageable memory.
2233 */
2234
2235 IOReturn IOGeneralMemoryDescriptor::complete(IODirection /* forDirection */)
2236 {
2237 IOOptionBits type = _flags & kIOMemoryTypeMask;
2238
2239 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
2240 return kIOReturnSuccess;
2241
2242 if (_prepareLock)
2243 IOLockLock(_prepareLock);
2244
2245 assert(_wireCount);
2246
2247 if (_wireCount)
2248 {
2249 if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount))
2250 {
2251 performOperation(kIOMemorySetEncrypted, 0, _length);
2252 }
2253
2254 _wireCount--;
2255 if (!_wireCount)
2256 {
2257 IOOptionBits type = _flags & kIOMemoryTypeMask;
2258 ioGMDData * dataP = getDataP(_memoryEntries);
2259 ioPLBlock *ioplList = getIOPLList(dataP);
2260 UInt count = getNumIOPL(_memoryEntries, dataP);
2261
2262 #if IOMD_DEBUG_DMAACTIVE
2263 if (__iomd_reservedA) panic("complete() while dma active");
2264 #endif /* IOMD_DEBUG_DMAACTIVE */
2265
2266 if (dataP->fMapper && _pages && ioplList[0].fMappedBase)
2267 dataP->fMapper->iovmFree(ioplList[0].fMappedBase, _pages);
2268
2269 // Only complete iopls that we created which are for TypeVirtual
2270 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
2271 for (UInt ind = 0; ind < count; ind++)
2272 if (ioplList[ind].fIOPL) {
2273 upl_commit(ioplList[ind].fIOPL, 0, 0);
2274 upl_deallocate(ioplList[ind].fIOPL);
2275 }
2276 }
2277 (void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); // == setLength()
2278
2279 dataP->fPreparationID = kIOPreparationIDUnprepared;
2280 }
2281 }
2282
2283 if (_prepareLock)
2284 IOLockUnlock(_prepareLock);
2285
2286 return kIOReturnSuccess;
2287 }
2288
2289 IOReturn IOGeneralMemoryDescriptor::doMap(
2290 vm_map_t __addressMap,
2291 IOVirtualAddress * __address,
2292 IOOptionBits options,
2293 IOByteCount __offset,
2294 IOByteCount __length )
2295
2296 {
2297 #ifndef __LP64__
2298 if (!(kIOMap64Bit & options)) panic("IOGeneralMemoryDescriptor::doMap !64bit");
2299 #endif /* !__LP64__ */
2300
2301 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
2302 mach_vm_size_t offset = mapping->fOffset + __offset;
2303 mach_vm_size_t length = mapping->fLength;
2304
2305 kern_return_t kr = kIOReturnVMError;
2306 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
2307
2308 IOOptionBits type = _flags & kIOMemoryTypeMask;
2309 Ranges vec = _ranges;
2310
2311 user_addr_t range0Addr = 0;
2312 IOByteCount range0Len = 0;
2313
2314 if ((offset >= _length) || ((offset + length) > _length))
2315 return( kIOReturnBadArgument );
2316
2317 if (vec.v)
2318 getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
2319
2320 // mapping source == dest? (could be much better)
2321 if( _task
2322 && (mapping->fAddressMap == get_task_map(_task)) && (options & kIOMapAnywhere)
2323 && (1 == _rangesCount) && (0 == offset)
2324 && range0Addr && (length <= range0Len) )
2325 {
2326 mapping->fAddress = range0Addr;
2327 mapping->fOptions |= kIOMapStatic;
2328
2329 return( kIOReturnSuccess );
2330 }
2331
2332 if( 0 == sharedMem) {
2333
2334 vm_size_t size = ptoa_32(_pages);
2335
2336 if( _task) {
2337
2338 memory_object_size_t actualSize = size;
2339 vm_prot_t prot = VM_PROT_READ;
2340 if (!(kIOMapReadOnly & options))
2341 prot |= VM_PROT_WRITE;
2342 else if (kIOMapDefaultCache != (options & kIOMapCacheMask))
2343 prot |= VM_PROT_WRITE;
2344
2345 if (_rangesCount == 1)
2346 {
2347 kr = mach_make_memory_entry_64(get_task_map(_task),
2348 &actualSize, range0Addr,
2349 prot, &sharedMem,
2350 NULL);
2351 }
2352 if( (_rangesCount != 1)
2353 || ((KERN_SUCCESS == kr) && (actualSize != round_page(size))))
2354 do
2355 {
2356 #if IOASSERT
2357 IOLog("mach_vm_remap path for ranges %d size (%08llx:%08llx)\n",
2358 _rangesCount, (UInt64)actualSize, (UInt64)size);
2359 #endif
2360 kr = kIOReturnVMError;
2361 if (sharedMem)
2362 {
2363 ipc_port_release_send(sharedMem);
2364 sharedMem = MACH_PORT_NULL;
2365 }
2366
2367 mach_vm_address_t address, segDestAddr;
2368 mach_vm_size_t mapLength;
2369 unsigned rangesIndex;
2370 IOOptionBits type = _flags & kIOMemoryTypeMask;
2371 user_addr_t srcAddr;
2372 IOPhysicalLength segLen = 0;
2373
2374 // Find starting address within the vector of ranges
2375 for (rangesIndex = 0; rangesIndex < _rangesCount; rangesIndex++) {
2376 getAddrLenForInd(srcAddr, segLen, type, _ranges, rangesIndex);
2377 if (offset < segLen)
2378 break;
2379 offset -= segLen; // (make offset relative)
2380 }
2381
2382 mach_vm_size_t pageOffset = (srcAddr & PAGE_MASK);
2383 address = trunc_page_64(mapping->fAddress);
2384
2385 if ((options & kIOMapAnywhere) || ((mapping->fAddress - address) == pageOffset))
2386 {
2387 vm_map_t map = mapping->fAddressMap;
2388 kr = IOMemoryDescriptorMapCopy(&map,
2389 options,
2390 offset, &address, round_page_64(length + pageOffset));
2391 if (kr == KERN_SUCCESS)
2392 {
2393 segDestAddr = address;
2394 segLen -= offset;
2395 mapLength = length;
2396
2397 while (true)
2398 {
2399 vm_prot_t cur_prot, max_prot;
2400 kr = mach_vm_remap(map, &segDestAddr, round_page_64(segLen), PAGE_MASK,
2401 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
2402 get_task_map(_task), trunc_page_64(srcAddr),
2403 FALSE /* copy */,
2404 &cur_prot,
2405 &max_prot,
2406 VM_INHERIT_NONE);
2407 if (KERN_SUCCESS == kr)
2408 {
2409 if ((!(VM_PROT_READ & cur_prot))
2410 || (!(kIOMapReadOnly & options) && !(VM_PROT_WRITE & cur_prot)))
2411 {
2412 kr = KERN_PROTECTION_FAILURE;
2413 }
2414 }
2415 if (KERN_SUCCESS != kr)
2416 break;
2417 segDestAddr += segLen;
2418 mapLength -= segLen;
2419 if (!mapLength)
2420 break;
2421 rangesIndex++;
2422 if (rangesIndex >= _rangesCount)
2423 {
2424 kr = kIOReturnBadArgument;
2425 break;
2426 }
2427 getAddrLenForInd(srcAddr, segLen, type, vec, rangesIndex);
2428 if (srcAddr & PAGE_MASK)
2429 {
2430 kr = kIOReturnBadArgument;
2431 break;
2432 }
2433 if (segLen > mapLength)
2434 segLen = mapLength;
2435 }
2436 if (KERN_SUCCESS != kr)
2437 {
2438 mach_vm_deallocate(mapping->fAddressMap, address, round_page_64(length + pageOffset));
2439 }
2440 }
2441
2442 if (KERN_SUCCESS == kr)
2443 mapping->fAddress = address + pageOffset;
2444 else
2445 mapping->fAddress = NULL;
2446 }
2447 }
2448 while (false);
2449 }
2450 else do
2451 { // _task == 0, must be physical
2452
2453 memory_object_t pager;
2454 unsigned int flags = 0;
2455 addr64_t pa;
2456 IOPhysicalLength segLen;
2457
2458 pa = getPhysicalSegment( offset, &segLen, kIOMemoryMapperNone );
2459
2460 if( !reserved) {
2461 reserved = IONew( ExpansionData, 1 );
2462 if( !reserved)
2463 continue;
2464 }
2465 reserved->pagerContig = (1 == _rangesCount);
2466 reserved->memory = this;
2467
2468 /*What cache mode do we need*/
2469 switch(options & kIOMapCacheMask ) {
2470
2471 case kIOMapDefaultCache:
2472 default:
2473 flags = IODefaultCacheBits(pa);
2474 if (DEVICE_PAGER_CACHE_INHIB & flags)
2475 {
2476 if (DEVICE_PAGER_GUARDED & flags)
2477 mapping->fOptions |= kIOMapInhibitCache;
2478 else
2479 mapping->fOptions |= kIOMapWriteCombineCache;
2480 }
2481 else if (DEVICE_PAGER_WRITE_THROUGH & flags)
2482 mapping->fOptions |= kIOMapWriteThruCache;
2483 else
2484 mapping->fOptions |= kIOMapCopybackCache;
2485 break;
2486
2487 case kIOMapInhibitCache:
2488 flags = DEVICE_PAGER_CACHE_INHIB |
2489 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
2490 break;
2491
2492 case kIOMapWriteThruCache:
2493 flags = DEVICE_PAGER_WRITE_THROUGH |
2494 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
2495 break;
2496
2497 case kIOMapCopybackCache:
2498 flags = DEVICE_PAGER_COHERENT;
2499 break;
2500
2501 case kIOMapWriteCombineCache:
2502 flags = DEVICE_PAGER_CACHE_INHIB |
2503 DEVICE_PAGER_COHERENT;
2504 break;
2505 }
2506
2507 flags |= reserved->pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0;
2508
2509 pager = device_pager_setup( (memory_object_t) 0, (uintptr_t) reserved,
2510 size, flags);
2511 assert( pager );
2512
2513 if( pager) {
2514 kr = mach_memory_object_memory_entry_64( (host_t) 1, false /*internal*/,
2515 size, VM_PROT_READ | VM_PROT_WRITE, pager, &sharedMem );
2516
2517 assert( KERN_SUCCESS == kr );
2518 if( KERN_SUCCESS != kr)
2519 {
2520 device_pager_deallocate( pager );
2521 pager = MACH_PORT_NULL;
2522 sharedMem = MACH_PORT_NULL;
2523 }
2524 }
2525 if( pager && sharedMem)
2526 reserved->devicePager = pager;
2527 else {
2528 IODelete( reserved, ExpansionData, 1 );
2529 reserved = 0;
2530 }
2531
2532 } while( false );
2533
2534 _memEntry = (void *) sharedMem;
2535 }
2536
2537 IOReturn result;
2538 if (0 == sharedMem)
2539 result = kr;
2540 else
2541 result = super::doMap( __addressMap, __address,
2542 options, __offset, __length );
2543
2544 return( result );
2545 }
2546
2547 IOReturn IOGeneralMemoryDescriptor::doUnmap(
2548 vm_map_t addressMap,
2549 IOVirtualAddress __address,
2550 IOByteCount __length )
2551 {
2552 return (super::doUnmap(addressMap, __address, __length));
2553 }
2554
2555 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2556
2557 #undef super
2558 #define super OSObject
2559
2560 OSDefineMetaClassAndStructors( IOMemoryMap, OSObject )
2561
2562 OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
2563 OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
2564 OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
2565 OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
2566 OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
2567 OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
2568 OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
2569 OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
2570
2571 /* ex-inline function implementation */
2572 IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
2573 { return( getPhysicalSegment( 0, 0 )); }
2574
2575 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2576
2577 bool IOMemoryMap::init(
2578 task_t intoTask,
2579 mach_vm_address_t toAddress,
2580 IOOptionBits _options,
2581 mach_vm_size_t _offset,
2582 mach_vm_size_t _length )
2583 {
2584 if (!intoTask)
2585 return( false);
2586
2587 if (!super::init())
2588 return(false);
2589
2590 fAddressMap = get_task_map(intoTask);
2591 if (!fAddressMap)
2592 return(false);
2593 vm_map_reference(fAddressMap);
2594
2595 fAddressTask = intoTask;
2596 fOptions = _options;
2597 fLength = _length;
2598 fOffset = _offset;
2599 fAddress = toAddress;
2600
2601 return (true);
2602 }
2603
2604 bool IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
2605 {
2606 if (!_memory)
2607 return(false);
2608
2609 if (!fSuperMap)
2610 {
2611 if( (_offset + fLength) > _memory->getLength())
2612 return( false);
2613 fOffset = _offset;
2614 }
2615
2616 _memory->retain();
2617 if (fMemory)
2618 {
2619 if (fMemory != _memory)
2620 fMemory->removeMapping(this);
2621 fMemory->release();
2622 }
2623 fMemory = _memory;
2624
2625 return( true );
2626 }
2627
2628 struct IOMemoryDescriptorMapAllocRef
2629 {
2630 ipc_port_t sharedMem;
2631 vm_map_t map;
2632 mach_vm_address_t mapped;
2633 mach_vm_size_t size;
2634 mach_vm_size_t sourceOffset;
2635 IOOptionBits options;
2636 };
2637
2638 static kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
2639 {
2640 IOMemoryDescriptorMapAllocRef * ref = (IOMemoryDescriptorMapAllocRef *)_ref;
2641 IOReturn err;
2642
2643 do {
2644 if( ref->sharedMem)
2645 {
2646 vm_prot_t prot = VM_PROT_READ
2647 | ((ref->options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE);
2648
2649 // VM system requires write access to change cache mode
2650 if (kIOMapDefaultCache != (ref->options & kIOMapCacheMask))
2651 prot |= VM_PROT_WRITE;
2652
2653 // set memory entry cache
2654 vm_prot_t memEntryCacheMode = prot | MAP_MEM_ONLY;
2655 switch (ref->options & kIOMapCacheMask)
2656 {
2657 case kIOMapInhibitCache:
2658 SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
2659 break;
2660
2661 case kIOMapWriteThruCache:
2662 SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
2663 break;
2664
2665 case kIOMapWriteCombineCache:
2666 SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
2667 break;
2668
2669 case kIOMapCopybackCache:
2670 SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
2671 break;
2672
2673 case kIOMapDefaultCache:
2674 default:
2675 SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
2676 break;
2677 }
2678
2679 vm_size_t unused = 0;
2680
2681 err = mach_make_memory_entry( NULL /*unused*/, &unused, 0 /*unused*/,
2682 memEntryCacheMode, NULL, ref->sharedMem );
2683 if (KERN_SUCCESS != err)
2684 IOLog("MAP_MEM_ONLY failed %d\n", err);
2685
2686 err = mach_vm_map( map,
2687 &ref->mapped,
2688 ref->size, 0 /* mask */,
2689 (( ref->options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
2690 | VM_MAKE_TAG(VM_MEMORY_IOKIT),
2691 ref->sharedMem, ref->sourceOffset,
2692 false, // copy
2693 prot, // cur
2694 prot, // max
2695 VM_INHERIT_NONE);
2696
2697 if( KERN_SUCCESS != err) {
2698 ref->mapped = 0;
2699 continue;
2700 }
2701 ref->map = map;
2702 }
2703 else
2704 {
2705 err = mach_vm_allocate(map, &ref->mapped, ref->size,
2706 ((ref->options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
2707 | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
2708 if( KERN_SUCCESS != err) {
2709 ref->mapped = 0;
2710 continue;
2711 }
2712 ref->map = map;
2713 // we have to make sure that these guys don't get copied if we fork.
2714 err = vm_inherit(map, ref->mapped, ref->size, VM_INHERIT_NONE);
2715 assert( KERN_SUCCESS == err );
2716 }
2717 }
2718 while( false );
2719
2720 return( err );
2721 }
2722
2723 kern_return_t
2724 IOMemoryDescriptorMapMemEntry(vm_map_t * map, ipc_port_t entry, IOOptionBits options, bool pageable,
2725 mach_vm_size_t offset,
2726 mach_vm_address_t * address, mach_vm_size_t length)
2727 {
2728 IOReturn err;
2729 IOMemoryDescriptorMapAllocRef ref;
2730
2731 ref.map = *map;
2732 ref.sharedMem = entry;
2733 ref.sourceOffset = trunc_page_64(offset);
2734 ref.options = options;
2735 ref.size = length;
2736
2737 if (options & kIOMapAnywhere)
2738 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
2739 ref.mapped = 0;
2740 else
2741 ref.mapped = *address;
2742
2743 if( ref.sharedMem && (ref.map == kernel_map) && pageable)
2744 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
2745 else
2746 err = IOMemoryDescriptorMapAlloc( ref.map, &ref );
2747
2748 *address = ref.mapped;
2749 *map = ref.map;
2750
2751 return (err);
2752 }
2753
2754 kern_return_t
2755 IOMemoryDescriptorMapCopy(vm_map_t * map,
2756 IOOptionBits options,
2757 mach_vm_size_t offset,
2758 mach_vm_address_t * address, mach_vm_size_t length)
2759 {
2760 IOReturn err;
2761 IOMemoryDescriptorMapAllocRef ref;
2762
2763 ref.map = *map;
2764 ref.sharedMem = NULL;
2765 ref.sourceOffset = trunc_page_64(offset);
2766 ref.options = options;
2767 ref.size = length;
2768
2769 if (options & kIOMapAnywhere)
2770 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
2771 ref.mapped = 0;
2772 else
2773 ref.mapped = *address;
2774
2775 if (ref.map == kernel_map)
2776 err = IOIteratePageableMaps(ref.size, &IOMemoryDescriptorMapAlloc, &ref);
2777 else
2778 err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
2779
2780 *address = ref.mapped;
2781 *map = ref.map;
2782
2783 return (err);
2784 }
2785
2786 IOReturn IOMemoryDescriptor::doMap(
2787 vm_map_t __addressMap,
2788 IOVirtualAddress * __address,
2789 IOOptionBits options,
2790 IOByteCount __offset,
2791 IOByteCount __length )
2792 {
2793 #ifndef __LP64__
2794 if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::doMap !64bit");
2795 #endif /* !__LP64__ */
2796
2797 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
2798 mach_vm_size_t offset = mapping->fOffset + __offset;
2799 mach_vm_size_t length = mapping->fLength;
2800
2801 IOReturn err = kIOReturnSuccess;
2802 memory_object_t pager;
2803 mach_vm_size_t pageOffset;
2804 IOPhysicalAddress sourceAddr;
2805 unsigned int lock_count;
2806
2807 do
2808 {
2809 sourceAddr = getPhysicalSegment( offset, NULL, _kIOMemorySourceSegment );
2810 pageOffset = sourceAddr - trunc_page( sourceAddr );
2811
2812 if( reserved)
2813 pager = (memory_object_t) reserved->devicePager;
2814 else
2815 pager = MACH_PORT_NULL;
2816
2817 if ((kIOMapReference|kIOMapUnique) == ((kIOMapReference|kIOMapUnique) & options))
2818 {
2819 upl_t redirUPL2;
2820 vm_size_t size;
2821 int flags;
2822
2823 if (!_memEntry)
2824 {
2825 err = kIOReturnNotReadable;
2826 continue;
2827 }
2828
2829 size = round_page(mapping->fLength + pageOffset);
2830 flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
2831 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
2832
2833 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) _memEntry, 0, &size, &redirUPL2,
2834 NULL, NULL,
2835 &flags))
2836 redirUPL2 = NULL;
2837
2838 for (lock_count = 0;
2839 IORecursiveLockHaveLock(gIOMemoryLock);
2840 lock_count++) {
2841 UNLOCK;
2842 }
2843 err = upl_transpose(redirUPL2, mapping->fRedirUPL);
2844 for (;
2845 lock_count;
2846 lock_count--) {
2847 LOCK;
2848 }
2849
2850 if (kIOReturnSuccess != err)
2851 {
2852 IOLog("upl_transpose(%x)\n", err);
2853 err = kIOReturnSuccess;
2854 }
2855
2856 if (redirUPL2)
2857 {
2858 upl_commit(redirUPL2, NULL, 0);
2859 upl_deallocate(redirUPL2);
2860 redirUPL2 = 0;
2861 }
2862 {
2863 // swap the memEntries since they now refer to different vm_objects
2864 void * me = _memEntry;
2865 _memEntry = mapping->fMemory->_memEntry;
2866 mapping->fMemory->_memEntry = me;
2867 }
2868 if (pager)
2869 err = handleFault( reserved->devicePager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
2870 }
2871 else
2872 {
2873 mach_vm_address_t address;
2874
2875 if (!(options & kIOMapAnywhere))
2876 {
2877 address = trunc_page_64(mapping->fAddress);
2878 if( (mapping->fAddress - address) != pageOffset)
2879 {
2880 err = kIOReturnVMError;
2881 continue;
2882 }
2883 }
2884
2885 vm_map_t map = mapping->fAddressMap;
2886 err = IOMemoryDescriptorMapMemEntry(&map, (ipc_port_t) _memEntry,
2887 options, (kIOMemoryBufferPageable & _flags),
2888 offset, &address, round_page_64(length + pageOffset));
2889 if( err != KERN_SUCCESS)
2890 continue;
2891
2892 if (!_memEntry || pager)
2893 {
2894 err = handleFault( pager, mapping->fAddressMap, address, offset, length, options );
2895 if (err != KERN_SUCCESS)
2896 doUnmap( mapping->fAddressMap, (IOVirtualAddress) mapping, 0 );
2897 }
2898
2899 #if DEBUG
2900 if (kIOLogMapping & gIOKitDebug)
2901 IOLog("mapping(%x) desc %p @ %lx, map %p, address %qx, offset %qx, length %qx\n",
2902 err, this, sourceAddr, mapping, address, offset, length);
2903 #endif
2904
2905 if (err == KERN_SUCCESS)
2906 mapping->fAddress = address + pageOffset;
2907 else
2908 mapping->fAddress = NULL;
2909 }
2910 }
2911 while( false );
2912
2913 return (err);
2914 }
2915
2916 IOReturn IOMemoryDescriptor::handleFault(
2917 void * _pager,
2918 vm_map_t addressMap,
2919 mach_vm_address_t address,
2920 mach_vm_size_t sourceOffset,
2921 mach_vm_size_t length,
2922 IOOptionBits options )
2923 {
2924 IOReturn err = kIOReturnSuccess;
2925 memory_object_t pager = (memory_object_t) _pager;
2926 mach_vm_size_t size;
2927 mach_vm_size_t bytes;
2928 mach_vm_size_t page;
2929 mach_vm_size_t pageOffset;
2930 mach_vm_size_t pagerOffset;
2931 IOPhysicalLength segLen;
2932 addr64_t physAddr;
2933
2934 if( !addressMap)
2935 {
2936 if( kIOMemoryRedirected & _flags)
2937 {
2938 #if DEBUG
2939 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
2940 #endif
2941 do {
2942 SLEEP;
2943 } while( kIOMemoryRedirected & _flags );
2944 }
2945
2946 return( kIOReturnSuccess );
2947 }
2948
2949 physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
2950 assert( physAddr );
2951 pageOffset = physAddr - trunc_page_64( physAddr );
2952 pagerOffset = sourceOffset;
2953
2954 size = length + pageOffset;
2955 physAddr -= pageOffset;
2956
2957 segLen += pageOffset;
2958 bytes = size;
2959 do
2960 {
2961 // in the middle of the loop only map whole pages
2962 if( segLen >= bytes)
2963 segLen = bytes;
2964 else if( segLen != trunc_page( segLen))
2965 err = kIOReturnVMError;
2966 if( physAddr != trunc_page_64( physAddr))
2967 err = kIOReturnBadArgument;
2968 if (kIOReturnSuccess != err)
2969 break;
2970
2971 #if DEBUG
2972 if( kIOLogMapping & gIOKitDebug)
2973 IOLog("IOMemoryMap::map(%p) 0x%qx->0x%qx:0x%qx\n",
2974 addressMap, address + pageOffset, physAddr + pageOffset,
2975 segLen - pageOffset);
2976 #endif
2977
2978
2979 if( pager) {
2980 if( reserved && reserved->pagerContig) {
2981 IOPhysicalLength allLen;
2982 addr64_t allPhys;
2983
2984 allPhys = getPhysicalSegment( 0, &allLen, kIOMemoryMapperNone );
2985 assert( allPhys );
2986 err = device_pager_populate_object( pager, 0, atop_64(allPhys), round_page(allLen) );
2987 }
2988 else
2989 {
2990
2991 for( page = 0;
2992 (page < segLen) && (KERN_SUCCESS == err);
2993 page += page_size)
2994 {
2995 err = device_pager_populate_object(pager, pagerOffset,
2996 (ppnum_t)(atop_64(physAddr + page)), page_size);
2997 pagerOffset += page_size;
2998 }
2999 }
3000 assert( KERN_SUCCESS == err );
3001 if( err)
3002 break;
3003 }
3004
3005 // This call to vm_fault causes an early pmap level resolution
3006 // of the mappings created above for kernel mappings, since
3007 // faulting in later can't take place from interrupt level.
3008 /* *** ALERT *** */
3009 /* *** Temporary Workaround *** */
3010
3011 if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags))
3012 {
3013 vm_fault(addressMap,
3014 (vm_map_offset_t)address,
3015 VM_PROT_READ|VM_PROT_WRITE,
3016 FALSE, THREAD_UNINT, NULL,
3017 (vm_map_offset_t)0);
3018 }
3019
3020 /* *** Temporary Workaround *** */
3021 /* *** ALERT *** */
3022
3023 sourceOffset += segLen - pageOffset;
3024 address += segLen;
3025 bytes -= segLen;
3026 pageOffset = 0;
3027
3028 }
3029 while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
3030
3031 if (bytes)
3032 err = kIOReturnBadArgument;
3033
3034 return (err);
3035 }
3036
3037 IOReturn IOMemoryDescriptor::doUnmap(
3038 vm_map_t addressMap,
3039 IOVirtualAddress __address,
3040 IOByteCount __length )
3041 {
3042 IOReturn err;
3043 mach_vm_address_t address;
3044 mach_vm_size_t length;
3045
3046 if (__length)
3047 {
3048 address = __address;
3049 length = __length;
3050 }
3051 else
3052 {
3053 addressMap = ((IOMemoryMap *) __address)->fAddressMap;
3054 address = ((IOMemoryMap *) __address)->fAddress;
3055 length = ((IOMemoryMap *) __address)->fLength;
3056 }
3057
3058 if ((addressMap == kernel_map)
3059 && ((kIOMemoryBufferPageable & _flags) || !_memEntry))
3060 addressMap = IOPageableMapForAddress( address );
3061
3062 #if DEBUG
3063 if( kIOLogMapping & gIOKitDebug)
3064 IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
3065 addressMap, address, length );
3066 #endif
3067
3068 err = mach_vm_deallocate( addressMap, address, length );
3069
3070 return (err);
3071 }
3072
3073 IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
3074 {
3075 IOReturn err = kIOReturnSuccess;
3076 IOMemoryMap * mapping = 0;
3077 OSIterator * iter;
3078
3079 LOCK;
3080
3081 if( doRedirect)
3082 _flags |= kIOMemoryRedirected;
3083 else
3084 _flags &= ~kIOMemoryRedirected;
3085
3086 do {
3087 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
3088 while( (mapping = (IOMemoryMap *) iter->getNextObject()))
3089 mapping->redirect( safeTask, doRedirect );
3090
3091 iter->release();
3092 }
3093 } while( false );
3094
3095 if (!doRedirect)
3096 {
3097 WAKEUP;
3098 }
3099
3100 UNLOCK;
3101
3102 #ifndef __LP64__
3103 // temporary binary compatibility
3104 IOSubMemoryDescriptor * subMem;
3105 if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
3106 err = subMem->redirect( safeTask, doRedirect );
3107 else
3108 err = kIOReturnSuccess;
3109 #endif /* !__LP64__ */
3110
3111 return( err );
3112 }
3113
3114 IOReturn IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
3115 {
3116 IOReturn err = kIOReturnSuccess;
3117
3118 if( fSuperMap) {
3119 // err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
3120 } else {
3121
3122 LOCK;
3123
3124 do
3125 {
3126 if (!fAddress)
3127 break;
3128 if (!fAddressMap)
3129 break;
3130
3131 if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
3132 && (0 == (fOptions & kIOMapStatic)))
3133 {
3134 IOUnmapPages( fAddressMap, fAddress, fLength );
3135 err = kIOReturnSuccess;
3136 #if DEBUG
3137 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
3138 #endif
3139 }
3140 else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask))
3141 {
3142 IOOptionBits newMode;
3143 newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
3144 IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
3145 }
3146 }
3147 while (false);
3148 UNLOCK;
3149 }
3150
3151 if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3152 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3153 && safeTask
3154 && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected))))
3155 fMemory->redirect(safeTask, doRedirect);
3156
3157 return( err );
3158 }
3159
3160 IOReturn IOMemoryMap::unmap( void )
3161 {
3162 IOReturn err;
3163
3164 LOCK;
3165
3166 if( fAddress && fAddressMap && (0 == fSuperMap) && fMemory
3167 && (0 == (fOptions & kIOMapStatic))) {
3168
3169 err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
3170
3171 } else
3172 err = kIOReturnSuccess;
3173
3174 if (fAddressMap)
3175 {
3176 vm_map_deallocate(fAddressMap);
3177 fAddressMap = 0;
3178 }
3179
3180 fAddress = 0;
3181
3182 UNLOCK;
3183
3184 return( err );
3185 }
3186
3187 void IOMemoryMap::taskDied( void )
3188 {
3189 LOCK;
3190 if (fUserClientUnmap)
3191 unmap();
3192 if( fAddressMap) {
3193 vm_map_deallocate(fAddressMap);
3194 fAddressMap = 0;
3195 }
3196 fAddressTask = 0;
3197 fAddress = 0;
3198 UNLOCK;
3199 }
3200
3201 IOReturn IOMemoryMap::userClientUnmap( void )
3202 {
3203 fUserClientUnmap = true;
3204 return (kIOReturnSuccess);
3205 }
3206
3207 // Overload the release mechanism. All mappings must be a member
3208 // of a memory descriptors _mappings set. This means that we
3209 // always have 2 references on a mapping. When either of these mappings
3210 // are released we need to free ourselves.
3211 void IOMemoryMap::taggedRelease(const void *tag) const
3212 {
3213 LOCK;
3214 super::taggedRelease(tag, 2);
3215 UNLOCK;
3216 }
3217
3218 void IOMemoryMap::free()
3219 {
3220 unmap();
3221
3222 if (fMemory)
3223 {
3224 LOCK;
3225 fMemory->removeMapping(this);
3226 UNLOCK;
3227 fMemory->release();
3228 }
3229
3230 if (fOwner && (fOwner != fMemory))
3231 {
3232 LOCK;
3233 fOwner->removeMapping(this);
3234 UNLOCK;
3235 }
3236
3237 if (fSuperMap)
3238 fSuperMap->release();
3239
3240 if (fRedirUPL) {
3241 upl_commit(fRedirUPL, NULL, 0);
3242 upl_deallocate(fRedirUPL);
3243 }
3244
3245 super::free();
3246 }
3247
3248 IOByteCount IOMemoryMap::getLength()
3249 {
3250 return( fLength );
3251 }
3252
3253 IOVirtualAddress IOMemoryMap::getVirtualAddress()
3254 {
3255 #ifndef __LP64__
3256 if (fSuperMap)
3257 fSuperMap->getVirtualAddress();
3258 else if (fAddressMap
3259 && vm_map_is_64bit(fAddressMap)
3260 && (sizeof(IOVirtualAddress) < 8))
3261 {
3262 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
3263 }
3264 #endif /* !__LP64__ */
3265
3266 return (fAddress);
3267 }
3268
3269 #ifndef __LP64__
3270 mach_vm_address_t IOMemoryMap::getAddress()
3271 {
3272 return( fAddress);
3273 }
3274
3275 mach_vm_size_t IOMemoryMap::getSize()
3276 {
3277 return( fLength );
3278 }
3279 #endif /* !__LP64__ */
3280
3281
3282 task_t IOMemoryMap::getAddressTask()
3283 {
3284 if( fSuperMap)
3285 return( fSuperMap->getAddressTask());
3286 else
3287 return( fAddressTask);
3288 }
3289
3290 IOOptionBits IOMemoryMap::getMapOptions()
3291 {
3292 return( fOptions);
3293 }
3294
3295 IOMemoryDescriptor * IOMemoryMap::getMemoryDescriptor()
3296 {
3297 return( fMemory );
3298 }
3299
3300 IOMemoryMap * IOMemoryMap::copyCompatible(
3301 IOMemoryMap * newMapping )
3302 {
3303 task_t task = newMapping->getAddressTask();
3304 mach_vm_address_t toAddress = newMapping->fAddress;
3305 IOOptionBits _options = newMapping->fOptions;
3306 mach_vm_size_t _offset = newMapping->fOffset;
3307 mach_vm_size_t _length = newMapping->fLength;
3308
3309 if( (!task) || (!fAddressMap) || (fAddressMap != get_task_map(task)))
3310 return( 0 );
3311 if( (fOptions ^ _options) & kIOMapReadOnly)
3312 return( 0 );
3313 if( (kIOMapDefaultCache != (_options & kIOMapCacheMask))
3314 && ((fOptions ^ _options) & kIOMapCacheMask))
3315 return( 0 );
3316
3317 if( (0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress))
3318 return( 0 );
3319
3320 if( _offset < fOffset)
3321 return( 0 );
3322
3323 _offset -= fOffset;
3324
3325 if( (_offset + _length) > fLength)
3326 return( 0 );
3327
3328 retain();
3329 if( (fLength == _length) && (!_offset))
3330 {
3331 newMapping->release();
3332 newMapping = this;
3333 }
3334 else
3335 {
3336 newMapping->fSuperMap = this;
3337 newMapping->fOffset = _offset;
3338 newMapping->fAddress = fAddress + _offset;
3339 }
3340
3341 return( newMapping );
3342 }
3343
3344 IOPhysicalAddress
3345 #ifdef __LP64__
3346 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
3347 #else /* !__LP64__ */
3348 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
3349 #endif /* !__LP64__ */
3350 {
3351 IOPhysicalAddress address;
3352
3353 LOCK;
3354 #ifdef __LP64__
3355 address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
3356 #else /* !__LP64__ */
3357 address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
3358 #endif /* !__LP64__ */
3359 UNLOCK;
3360
3361 return( address );
3362 }
3363
3364 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3365
3366 #undef super
3367 #define super OSObject
3368
3369 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3370
3371 void IOMemoryDescriptor::initialize( void )
3372 {
3373 if( 0 == gIOMemoryLock)
3374 gIOMemoryLock = IORecursiveLockAlloc();
3375
3376 IORegistryEntry::getRegistryRoot()->setProperty(kIOMaximumMappedIOByteCountKey,
3377 ptoa_64(gIOMaximumMappedIOPageCount), 64);
3378 gIOLastPage = IOGetLastPageNumber();
3379 }
3380
3381 void IOMemoryDescriptor::free( void )
3382 {
3383 if( _mappings)
3384 _mappings->release();
3385
3386 super::free();
3387 }
3388
3389 IOMemoryMap * IOMemoryDescriptor::setMapping(
3390 task_t intoTask,
3391 IOVirtualAddress mapAddress,
3392 IOOptionBits options )
3393 {
3394 return (createMappingInTask( intoTask, mapAddress,
3395 options | kIOMapStatic,
3396 0, getLength() ));
3397 }
3398
3399 IOMemoryMap * IOMemoryDescriptor::map(
3400 IOOptionBits options )
3401 {
3402 return (createMappingInTask( kernel_task, 0,
3403 options | kIOMapAnywhere,
3404 0, getLength() ));
3405 }
3406
3407 #ifndef __LP64__
3408 IOMemoryMap * IOMemoryDescriptor::map(
3409 task_t intoTask,
3410 IOVirtualAddress atAddress,
3411 IOOptionBits options,
3412 IOByteCount offset,
3413 IOByteCount length )
3414 {
3415 if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask)))
3416 {
3417 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
3418 return (0);
3419 }
3420
3421 return (createMappingInTask(intoTask, atAddress,
3422 options, offset, length));
3423 }
3424 #endif /* !__LP64__ */
3425
3426 IOMemoryMap * IOMemoryDescriptor::createMappingInTask(
3427 task_t intoTask,
3428 mach_vm_address_t atAddress,
3429 IOOptionBits options,
3430 mach_vm_size_t offset,
3431 mach_vm_size_t length)
3432 {
3433 IOMemoryMap * result;
3434 IOMemoryMap * mapping;
3435
3436 if (0 == length)
3437 length = getLength();
3438
3439 mapping = new IOMemoryMap;
3440
3441 if( mapping
3442 && !mapping->init( intoTask, atAddress,
3443 options, offset, length )) {
3444 mapping->release();
3445 mapping = 0;
3446 }
3447
3448 if (mapping)
3449 result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
3450 else
3451 result = 0;
3452
3453 #if DEBUG
3454 if (!result)
3455 IOLog("createMappingInTask failed desc %p, addr %qx, options %lx, offset %qx, length %qx\n",
3456 this, atAddress, options, offset, length);
3457 #endif
3458
3459 return (result);
3460 }
3461
3462 #ifndef __LP64__ // there is only a 64 bit version for LP64
3463 IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
3464 IOOptionBits options,
3465 IOByteCount offset)
3466 {
3467 return (redirect(newBackingMemory, options, (mach_vm_size_t)offset));
3468 }
3469 #endif
3470
3471 IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
3472 IOOptionBits options,
3473 mach_vm_size_t offset)
3474 {
3475 IOReturn err = kIOReturnSuccess;
3476 IOMemoryDescriptor * physMem = 0;
3477
3478 LOCK;
3479
3480 if (fAddress && fAddressMap) do
3481 {
3482 if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3483 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3484 {
3485 physMem = fMemory;
3486 physMem->retain();
3487 }
3488
3489 if (!fRedirUPL)
3490 {
3491 vm_size_t size = round_page(fLength);
3492 int flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
3493 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
3494 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) fMemory->_memEntry, 0, &size, &fRedirUPL,
3495 NULL, NULL,
3496 &flags))
3497 fRedirUPL = 0;
3498
3499 if (physMem)
3500 {
3501 IOUnmapPages( fAddressMap, fAddress, fLength );
3502 if (false)
3503 physMem->redirect(0, true);
3504 }
3505 }
3506
3507 if (newBackingMemory)
3508 {
3509 if (newBackingMemory != fMemory)
3510 {
3511 fOffset = 0;
3512 if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
3513 options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
3514 offset, fLength))
3515 err = kIOReturnError;
3516 }
3517 if (fRedirUPL)
3518 {
3519 upl_commit(fRedirUPL, NULL, 0);
3520 upl_deallocate(fRedirUPL);
3521 fRedirUPL = 0;
3522 }
3523 if (false && physMem)
3524 physMem->redirect(0, false);
3525 }
3526 }
3527 while (false);
3528
3529 UNLOCK;
3530
3531 if (physMem)
3532 physMem->release();
3533
3534 return (err);
3535 }
3536
3537 IOMemoryMap * IOMemoryDescriptor::makeMapping(
3538 IOMemoryDescriptor * owner,
3539 task_t __intoTask,
3540 IOVirtualAddress __address,
3541 IOOptionBits options,
3542 IOByteCount __offset,
3543 IOByteCount __length )
3544 {
3545 #ifndef __LP64__
3546 if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::makeMapping !64bit");
3547 #endif /* !__LP64__ */
3548
3549 IOMemoryDescriptor * mapDesc = 0;
3550 IOMemoryMap * result = 0;
3551 OSIterator * iter;
3552
3553 IOMemoryMap * mapping = (IOMemoryMap *) __address;
3554 mach_vm_size_t offset = mapping->fOffset + __offset;
3555 mach_vm_size_t length = mapping->fLength;
3556
3557 mapping->fOffset = offset;
3558
3559 LOCK;
3560
3561 do
3562 {
3563 if (kIOMapStatic & options)
3564 {
3565 result = mapping;
3566 addMapping(mapping);
3567 mapping->setMemoryDescriptor(this, 0);
3568 continue;
3569 }
3570
3571 if (kIOMapUnique & options)
3572 {
3573 addr64_t phys;
3574 IOByteCount physLen;
3575
3576 // if (owner != this) continue;
3577
3578 if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3579 || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3580 {
3581 phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
3582 if (!phys || (physLen < length))
3583 continue;
3584
3585 mapDesc = IOMemoryDescriptor::withAddressRange(
3586 phys, length, getDirection() | kIOMemoryMapperNone, NULL);
3587 if (!mapDesc)
3588 continue;
3589 offset = 0;
3590 mapping->fOffset = offset;
3591 }
3592 }
3593 else
3594 {
3595 // look for a compatible existing mapping
3596 if( (iter = OSCollectionIterator::withCollection(_mappings)))
3597 {
3598 IOMemoryMap * lookMapping;
3599 while ((lookMapping = (IOMemoryMap *) iter->getNextObject()))
3600 {
3601 if ((result = lookMapping->copyCompatible(mapping)))
3602 {
3603 addMapping(result);
3604 result->setMemoryDescriptor(this, offset);
3605 break;
3606 }
3607 }
3608 iter->release();
3609 }
3610 if (result || (options & kIOMapReference))
3611 continue;
3612 }
3613
3614 if (!mapDesc)
3615 {
3616 mapDesc = this;
3617 mapDesc->retain();
3618 }
3619 IOReturn
3620 kr = mapDesc->doMap( 0, (IOVirtualAddress *) &mapping, options, 0, 0 );
3621 if (kIOReturnSuccess == kr)
3622 {
3623 result = mapping;
3624 mapDesc->addMapping(result);
3625 result->setMemoryDescriptor(mapDesc, offset);
3626 }
3627 else
3628 {
3629 mapping->release();
3630 mapping = NULL;
3631 }
3632 }
3633 while( false );
3634
3635 UNLOCK;
3636
3637 if (mapDesc)
3638 mapDesc->release();
3639
3640 return (result);
3641 }
3642
3643 void IOMemoryDescriptor::addMapping(
3644 IOMemoryMap * mapping )
3645 {
3646 if( mapping)
3647 {
3648 if( 0 == _mappings)
3649 _mappings = OSSet::withCapacity(1);
3650 if( _mappings )
3651 _mappings->setObject( mapping );
3652 }
3653 }
3654
3655 void IOMemoryDescriptor::removeMapping(
3656 IOMemoryMap * mapping )
3657 {
3658 if( _mappings)
3659 _mappings->removeObject( mapping);
3660 }
3661
3662 #ifndef __LP64__
3663 // obsolete initializers
3664 // - initWithOptions is the designated initializer
3665 bool
3666 IOMemoryDescriptor::initWithAddress(void * address,
3667 IOByteCount length,
3668 IODirection direction)
3669 {
3670 return( false );
3671 }
3672
3673 bool
3674 IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
3675 IOByteCount length,
3676 IODirection direction,
3677 task_t task)
3678 {
3679 return( false );
3680 }
3681
3682 bool
3683 IOMemoryDescriptor::initWithPhysicalAddress(
3684 IOPhysicalAddress address,
3685 IOByteCount length,
3686 IODirection direction )
3687 {
3688 return( false );
3689 }
3690
3691 bool
3692 IOMemoryDescriptor::initWithRanges(
3693 IOVirtualRange * ranges,
3694 UInt32 withCount,
3695 IODirection direction,
3696 task_t task,
3697 bool asReference)
3698 {
3699 return( false );
3700 }
3701
3702 bool
3703 IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
3704 UInt32 withCount,
3705 IODirection direction,
3706 bool asReference)
3707 {
3708 return( false );
3709 }
3710
3711 void * IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
3712 IOByteCount * lengthOfSegment)
3713 {
3714 return( 0 );
3715 }
3716 #endif /* !__LP64__ */
3717
3718 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3719
3720 bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
3721 {
3722 OSSymbol const *keys[2];
3723 OSObject *values[2];
3724 struct SerData {
3725 user_addr_t address;
3726 user_size_t length;
3727 } *vcopy;
3728 unsigned int index, nRanges;
3729 bool result;
3730
3731 IOOptionBits type = _flags & kIOMemoryTypeMask;
3732
3733 if (s == NULL) return false;
3734 if (s->previouslySerialized(this)) return true;
3735
3736 // Pretend we are an array.
3737 if (!s->addXMLStartTag(this, "array")) return false;
3738
3739 nRanges = _rangesCount;
3740 vcopy = (SerData *) IOMalloc(sizeof(SerData) * nRanges);
3741 if (vcopy == 0) return false;
3742
3743 keys[0] = OSSymbol::withCString("address");
3744 keys[1] = OSSymbol::withCString("length");
3745
3746 result = false;
3747 values[0] = values[1] = 0;
3748
3749 // From this point on we can go to bail.
3750
3751 // Copy the volatile data so we don't have to allocate memory
3752 // while the lock is held.
3753 LOCK;
3754 if (nRanges == _rangesCount) {
3755 Ranges vec = _ranges;
3756 for (index = 0; index < nRanges; index++) {
3757 user_addr_t addr; IOByteCount len;
3758 getAddrLenForInd(addr, len, type, vec, index);
3759 vcopy[index].address = addr;
3760 vcopy[index].length = len;
3761 }
3762 } else {
3763 // The descriptor changed out from under us. Give up.
3764 UNLOCK;
3765 result = false;
3766 goto bail;
3767 }
3768 UNLOCK;
3769
3770 for (index = 0; index < nRanges; index++)
3771 {
3772 user_addr_t addr = vcopy[index].address;
3773 IOByteCount len = (IOByteCount) vcopy[index].length;
3774 values[0] =
3775 OSNumber::withNumber(addr, sizeof(addr) * 8);
3776 if (values[0] == 0) {
3777 result = false;
3778 goto bail;
3779 }
3780 values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
3781 if (values[1] == 0) {
3782 result = false;
3783 goto bail;
3784 }
3785 OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
3786 if (dict == 0) {
3787 result = false;
3788 goto bail;
3789 }
3790 values[0]->release();
3791 values[1]->release();
3792 values[0] = values[1] = 0;
3793
3794 result = dict->serialize(s);
3795 dict->release();
3796 if (!result) {
3797 goto bail;
3798 }
3799 }
3800 result = s->addXMLEndTag("array");
3801
3802 bail:
3803 if (values[0])
3804 values[0]->release();
3805 if (values[1])
3806 values[1]->release();
3807 if (keys[0])
3808 keys[0]->release();
3809 if (keys[1])
3810 keys[1]->release();
3811 if (vcopy)
3812 IOFree(vcopy, sizeof(SerData) * nRanges);
3813 return result;
3814 }
3815
3816 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3817
3818 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
3819 #ifdef __LP64__
3820 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
3821 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
3822 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
3823 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
3824 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
3825 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
3826 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
3827 #else /* !__LP64__ */
3828 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
3829 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
3830 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3);
3831 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4);
3832 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5);
3833 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 6);
3834 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 7);
3835 #endif /* !__LP64__ */
3836 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
3837 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
3838 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
3839 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
3840 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
3841 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
3842 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
3843 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
3844
3845 /* ex-inline function implementation */
3846 IOPhysicalAddress
3847 IOMemoryDescriptor::getPhysicalAddress()
3848 { return( getPhysicalSegment( 0, 0 )); }