]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOMemoryDescriptor.cpp
3eee2e740ffac016e46ca1f6f8a18fa057b58064
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
30 *
31 * HISTORY
32 *
33 */
34
35
36 #include <sys/cdefs.h>
37
38 #include <IOKit/assert.h>
39 #include <IOKit/system.h>
40 #include <IOKit/IOLib.h>
41 #include <IOKit/IOMemoryDescriptor.h>
42 #include <IOKit/IOMapper.h>
43 #include <IOKit/IODMACommand.h>
44 #include <IOKit/IOKitKeysPrivate.h>
45
46 #ifndef __LP64__
47 #include <IOKit/IOSubMemoryDescriptor.h>
48 #endif /* !__LP64__ */
49
50 #include <IOKit/IOKitDebug.h>
51 #include <libkern/OSDebug.h>
52
53 #include "IOKitKernelInternal.h"
54
55 #include <libkern/c++/OSContainers.h>
56 #include <libkern/c++/OSDictionary.h>
57 #include <libkern/c++/OSArray.h>
58 #include <libkern/c++/OSSymbol.h>
59 #include <libkern/c++/OSNumber.h>
60
61 #include <sys/uio.h>
62
63 __BEGIN_DECLS
64 #include <vm/pmap.h>
65 #include <vm/vm_pageout.h>
66 #include <mach/memory_object_types.h>
67 #include <device/device_port.h>
68
69 #include <mach/vm_prot.h>
70 #include <mach/mach_vm.h>
71 #include <vm/vm_fault.h>
72 #include <vm/vm_protos.h>
73
74 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
75 extern void ipc_port_release_send(ipc_port_t port);
76
77 kern_return_t
78 memory_object_iopl_request(
79 ipc_port_t port,
80 memory_object_offset_t offset,
81 vm_size_t *upl_size,
82 upl_t *upl_ptr,
83 upl_page_info_array_t user_page_list,
84 unsigned int *page_list_count,
85 int *flags);
86
87 unsigned int IOTranslateCacheBits(struct phys_entry *pp);
88
89 __END_DECLS
90
91 #define kIOMapperWaitSystem ((IOMapper *) 1)
92
93 static IOMapper * gIOSystemMapper = NULL;
94
95 ppnum_t gIOLastPage;
96
97 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
98
99 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
100
101 #define super IOMemoryDescriptor
102
103 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
104
105 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
106
107 static IORecursiveLock * gIOMemoryLock;
108
109 #define LOCK IORecursiveLockLock( gIOMemoryLock)
110 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
111 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
112 #define WAKEUP \
113 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
114
115 #if 0
116 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
117 #else
118 #define DEBG(fmt, args...) {}
119 #endif
120
121 #define IOMD_DEBUG_DMAACTIVE 1
122
123 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
124
125 // Some data structures and accessor macros used by the initWithOptions
126 // Function
127
128 enum ioPLBlockFlags {
129 kIOPLOnDevice = 0x00000001,
130 kIOPLExternUPL = 0x00000002,
131 };
132
133 struct typePersMDData
134 {
135 const IOGeneralMemoryDescriptor *fMD;
136 ipc_port_t fMemEntry;
137 };
138
139 struct ioPLBlock {
140 upl_t fIOPL;
141 vm_address_t fPageInfo; // Pointer to page list or index into it
142 uint32_t fIOMDOffset; // The offset of this iopl in descriptor
143 ppnum_t fMappedPage; // Page number of first page in this iopl
144 unsigned int fPageOffset; // Offset within first page of iopl
145 unsigned int fFlags; // Flags
146 };
147
148 struct ioGMDData {
149 IOMapper * fMapper;
150 uint8_t fDMAMapNumAddressBits;
151 uint64_t fDMAMapAlignment;
152 addr64_t fMappedBase;
153 uint64_t fPreparationID;
154 unsigned int fPageCnt;
155 unsigned char fDiscontig;
156 #if __LP64__
157 // align arrays to 8 bytes so following macros work
158 unsigned char fPad[3];
159 #endif
160 upl_page_info_t fPageList[1]; /* variable length */
161 ioPLBlock fBlocks[1]; /* variable length */
162 };
163
164 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
165 #define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
166 #define getNumIOPL(osd, d) \
167 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
168 #define getPageList(d) (&(d->fPageList[0]))
169 #define computeDataSize(p, u) \
170 (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
171
172
173 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
174
175 #define next_page(a) ( trunc_page(a) + PAGE_SIZE )
176
177
178 extern "C" {
179
180 kern_return_t device_data_action(
181 uintptr_t device_handle,
182 ipc_port_t device_pager,
183 vm_prot_t protection,
184 vm_object_offset_t offset,
185 vm_size_t size)
186 {
187 kern_return_t kr;
188 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
189 IOMemoryDescriptor * memDesc;
190
191 LOCK;
192 memDesc = ref->dp.memory;
193 if( memDesc)
194 {
195 memDesc->retain();
196 kr = memDesc->handleFault( device_pager, 0, 0,
197 offset, size, kIOMapDefaultCache /*?*/);
198 memDesc->release();
199 }
200 else
201 kr = KERN_ABORTED;
202 UNLOCK;
203
204 return( kr );
205 }
206
207 kern_return_t device_close(
208 uintptr_t device_handle)
209 {
210 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
211
212 IODelete( ref, IOMemoryDescriptorReserved, 1 );
213
214 return( kIOReturnSuccess );
215 }
216 }; // end extern "C"
217
218 // Note this inline function uses C++ reference arguments to return values
219 // This means that pointers are not passed and NULLs don't have to be
220 // checked for as a NULL reference is illegal.
221 static inline void
222 getAddrLenForInd(user_addr_t &addr, IOPhysicalLength &len, // Output variables
223 UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
224 {
225 assert(kIOMemoryTypeUIO == type
226 || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
227 || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
228 if (kIOMemoryTypeUIO == type) {
229 user_size_t us;
230 uio_getiov((uio_t) r.uio, ind, &addr, &us); len = us;
231 }
232 #ifndef __LP64__
233 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
234 IOAddressRange cur = r.v64[ind];
235 addr = cur.address;
236 len = cur.length;
237 }
238 #endif /* !__LP64__ */
239 else {
240 IOVirtualRange cur = r.v[ind];
241 addr = cur.address;
242 len = cur.length;
243 }
244 }
245
246 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
247
248 IOMemoryDescriptor *
249 IOMemoryDescriptor::withAddress(void * address,
250 IOByteCount length,
251 IODirection direction)
252 {
253 return IOMemoryDescriptor::
254 withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
255 }
256
257 #ifndef __LP64__
258 IOMemoryDescriptor *
259 IOMemoryDescriptor::withAddress(IOVirtualAddress address,
260 IOByteCount length,
261 IODirection direction,
262 task_t task)
263 {
264 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
265 if (that)
266 {
267 if (that->initWithAddress(address, length, direction, task))
268 return that;
269
270 that->release();
271 }
272 return 0;
273 }
274 #endif /* !__LP64__ */
275
276 IOMemoryDescriptor *
277 IOMemoryDescriptor::withPhysicalAddress(
278 IOPhysicalAddress address,
279 IOByteCount length,
280 IODirection direction )
281 {
282 return (IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL));
283 }
284
285 #ifndef __LP64__
286 IOMemoryDescriptor *
287 IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
288 UInt32 withCount,
289 IODirection direction,
290 task_t task,
291 bool asReference)
292 {
293 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
294 if (that)
295 {
296 if (that->initWithRanges(ranges, withCount, direction, task, asReference))
297 return that;
298
299 that->release();
300 }
301 return 0;
302 }
303 #endif /* !__LP64__ */
304
305 IOMemoryDescriptor *
306 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
307 mach_vm_size_t length,
308 IOOptionBits options,
309 task_t task)
310 {
311 IOAddressRange range = { address, length };
312 return (IOMemoryDescriptor::withAddressRanges(&range, 1, options, task));
313 }
314
315 IOMemoryDescriptor *
316 IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
317 UInt32 rangeCount,
318 IOOptionBits options,
319 task_t task)
320 {
321 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
322 if (that)
323 {
324 if (task)
325 options |= kIOMemoryTypeVirtual64;
326 else
327 options |= kIOMemoryTypePhysical64;
328
329 if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ 0))
330 return that;
331
332 that->release();
333 }
334
335 return 0;
336 }
337
338
339 /*
340 * withOptions:
341 *
342 * Create a new IOMemoryDescriptor. The buffer is made up of several
343 * virtual address ranges, from a given task.
344 *
345 * Passing the ranges as a reference will avoid an extra allocation.
346 */
347 IOMemoryDescriptor *
348 IOMemoryDescriptor::withOptions(void * buffers,
349 UInt32 count,
350 UInt32 offset,
351 task_t task,
352 IOOptionBits opts,
353 IOMapper * mapper)
354 {
355 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
356
357 if (self
358 && !self->initWithOptions(buffers, count, offset, task, opts, mapper))
359 {
360 self->release();
361 return 0;
362 }
363
364 return self;
365 }
366
367 bool IOMemoryDescriptor::initWithOptions(void * buffers,
368 UInt32 count,
369 UInt32 offset,
370 task_t task,
371 IOOptionBits options,
372 IOMapper * mapper)
373 {
374 return( false );
375 }
376
377 #ifndef __LP64__
378 IOMemoryDescriptor *
379 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
380 UInt32 withCount,
381 IODirection direction,
382 bool asReference)
383 {
384 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
385 if (that)
386 {
387 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference))
388 return that;
389
390 that->release();
391 }
392 return 0;
393 }
394
395 IOMemoryDescriptor *
396 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
397 IOByteCount offset,
398 IOByteCount length,
399 IODirection direction)
400 {
401 return (IOSubMemoryDescriptor::withSubRange(of, offset, length, direction | kIOMemoryThreadSafe));
402 }
403 #endif /* !__LP64__ */
404
405 IOMemoryDescriptor *
406 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
407 {
408 IOGeneralMemoryDescriptor *origGenMD =
409 OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
410
411 if (origGenMD)
412 return IOGeneralMemoryDescriptor::
413 withPersistentMemoryDescriptor(origGenMD);
414 else
415 return 0;
416 }
417
418 IOMemoryDescriptor *
419 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
420 {
421 ipc_port_t sharedMem = (ipc_port_t) originalMD->createNamedEntry();
422
423 if (!sharedMem)
424 return 0;
425
426 if (sharedMem == originalMD->_memEntry) {
427 originalMD->retain(); // Add a new reference to ourselves
428 ipc_port_release_send(sharedMem); // Remove extra send right
429 return originalMD;
430 }
431
432 IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor;
433 typePersMDData initData = { originalMD, sharedMem };
434
435 if (self
436 && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) {
437 self->release();
438 self = 0;
439 }
440 return self;
441 }
442
443 void *IOGeneralMemoryDescriptor::createNamedEntry()
444 {
445 kern_return_t error;
446 ipc_port_t sharedMem;
447
448 IOOptionBits type = _flags & kIOMemoryTypeMask;
449
450 user_addr_t range0Addr;
451 IOByteCount range0Len;
452 getAddrLenForInd(range0Addr, range0Len, type, _ranges, 0);
453 range0Addr = trunc_page_64(range0Addr);
454
455 vm_size_t size = ptoa_32(_pages);
456 vm_address_t kernelPage = (vm_address_t) range0Addr;
457
458 vm_map_t theMap = ((_task == kernel_task)
459 && (kIOMemoryBufferPageable & _flags))
460 ? IOPageableMapForAddress(kernelPage)
461 : get_task_map(_task);
462
463 memory_object_size_t actualSize = size;
464 vm_prot_t prot = VM_PROT_READ;
465 if (kIODirectionOut != (kIODirectionOutIn & _flags))
466 prot |= VM_PROT_WRITE;
467
468 if (_memEntry)
469 prot |= MAP_MEM_NAMED_REUSE;
470
471 error = mach_make_memory_entry_64(theMap,
472 &actualSize, range0Addr, prot, &sharedMem, (ipc_port_t) _memEntry);
473
474 if (KERN_SUCCESS == error) {
475 if (actualSize == size) {
476 return sharedMem;
477 } else {
478 #if IOASSERT
479 IOLog("IOGMD::mach_make_memory_entry_64 (%08llx) size (%08llx:%08llx)\n",
480 (UInt64)range0Addr, (UInt64)actualSize, (UInt64)size);
481 #endif
482 ipc_port_release_send( sharedMem );
483 }
484 }
485
486 return MACH_PORT_NULL;
487 }
488
489 #ifndef __LP64__
490 bool
491 IOGeneralMemoryDescriptor::initWithAddress(void * address,
492 IOByteCount withLength,
493 IODirection withDirection)
494 {
495 _singleRange.v.address = (vm_offset_t) address;
496 _singleRange.v.length = withLength;
497
498 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
499 }
500
501 bool
502 IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
503 IOByteCount withLength,
504 IODirection withDirection,
505 task_t withTask)
506 {
507 _singleRange.v.address = address;
508 _singleRange.v.length = withLength;
509
510 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
511 }
512
513 bool
514 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
515 IOPhysicalAddress address,
516 IOByteCount withLength,
517 IODirection withDirection )
518 {
519 _singleRange.p.address = address;
520 _singleRange.p.length = withLength;
521
522 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
523 }
524
525 bool
526 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
527 IOPhysicalRange * ranges,
528 UInt32 count,
529 IODirection direction,
530 bool reference)
531 {
532 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
533
534 if (reference)
535 mdOpts |= kIOMemoryAsReference;
536
537 return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0);
538 }
539
540 bool
541 IOGeneralMemoryDescriptor::initWithRanges(
542 IOVirtualRange * ranges,
543 UInt32 count,
544 IODirection direction,
545 task_t task,
546 bool reference)
547 {
548 IOOptionBits mdOpts = direction;
549
550 if (reference)
551 mdOpts |= kIOMemoryAsReference;
552
553 if (task) {
554 mdOpts |= kIOMemoryTypeVirtual;
555
556 // Auto-prepare if this is a kernel memory descriptor as very few
557 // clients bother to prepare() kernel memory.
558 // But it was not enforced so what are you going to do?
559 if (task == kernel_task)
560 mdOpts |= kIOMemoryAutoPrepare;
561 }
562 else
563 mdOpts |= kIOMemoryTypePhysical;
564
565 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0);
566 }
567 #endif /* !__LP64__ */
568
569 /*
570 * initWithOptions:
571 *
572 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
573 * from a given task, several physical ranges, an UPL from the ubc
574 * system or a uio (may be 64bit) from the BSD subsystem.
575 *
576 * Passing the ranges as a reference will avoid an extra allocation.
577 *
578 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
579 * existing instance -- note this behavior is not commonly supported in other
580 * I/O Kit classes, although it is supported here.
581 */
582
583 bool
584 IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
585 UInt32 count,
586 UInt32 offset,
587 task_t task,
588 IOOptionBits options,
589 IOMapper * mapper)
590 {
591 IOOptionBits type = options & kIOMemoryTypeMask;
592
593 #ifndef __LP64__
594 if (task
595 && (kIOMemoryTypeVirtual == type)
596 && vm_map_is_64bit(get_task_map(task))
597 && ((IOVirtualRange *) buffers)->address)
598 {
599 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
600 return false;
601 }
602 #endif /* !__LP64__ */
603
604 // Grab the original MD's configuation data to initialse the
605 // arguments to this function.
606 if (kIOMemoryTypePersistentMD == type) {
607
608 typePersMDData *initData = (typePersMDData *) buffers;
609 const IOGeneralMemoryDescriptor *orig = initData->fMD;
610 ioGMDData *dataP = getDataP(orig->_memoryEntries);
611
612 // Only accept persistent memory descriptors with valid dataP data.
613 assert(orig->_rangesCount == 1);
614 if ( !(orig->_flags & kIOMemoryPersistent) || !dataP)
615 return false;
616
617 _memEntry = initData->fMemEntry; // Grab the new named entry
618 options = orig->_flags & ~kIOMemoryAsReference;
619 type = options & kIOMemoryTypeMask;
620 buffers = orig->_ranges.v;
621 count = orig->_rangesCount;
622
623 // Now grab the original task and whatever mapper was previously used
624 task = orig->_task;
625 mapper = dataP->fMapper;
626
627 // We are ready to go through the original initialisation now
628 }
629
630 switch (type) {
631 case kIOMemoryTypeUIO:
632 case kIOMemoryTypeVirtual:
633 #ifndef __LP64__
634 case kIOMemoryTypeVirtual64:
635 #endif /* !__LP64__ */
636 assert(task);
637 if (!task)
638 return false;
639 break;
640
641 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
642 #ifndef __LP64__
643 case kIOMemoryTypePhysical64:
644 #endif /* !__LP64__ */
645 case kIOMemoryTypeUPL:
646 assert(!task);
647 break;
648 default:
649 return false; /* bad argument */
650 }
651
652 assert(buffers);
653 assert(count);
654
655 /*
656 * We can check the _initialized instance variable before having ever set
657 * it to an initial value because I/O Kit guarantees that all our instance
658 * variables are zeroed on an object's allocation.
659 */
660
661 if (_initialized) {
662 /*
663 * An existing memory descriptor is being retargeted to point to
664 * somewhere else. Clean up our present state.
665 */
666 IOOptionBits type = _flags & kIOMemoryTypeMask;
667 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
668 {
669 while (_wireCount)
670 complete();
671 }
672 if (_ranges.v && !(kIOMemoryAsReference & _flags))
673 {
674 if (kIOMemoryTypeUIO == type)
675 uio_free((uio_t) _ranges.v);
676 #ifndef __LP64__
677 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
678 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
679 #endif /* !__LP64__ */
680 else
681 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
682 }
683
684 options |= (kIOMemoryRedirected & _flags);
685 if (!(kIOMemoryRedirected & options))
686 {
687 if (_memEntry)
688 {
689 ipc_port_release_send((ipc_port_t) _memEntry);
690 _memEntry = 0;
691 }
692 if (_mappings)
693 _mappings->flushCollection();
694 }
695 }
696 else {
697 if (!super::init())
698 return false;
699 _initialized = true;
700 }
701
702 // Grab the appropriate mapper
703 if (kIOMemoryHostOnly & options) options |= kIOMemoryMapperNone;
704 if (kIOMemoryMapperNone & options)
705 mapper = 0; // No Mapper
706 else if (mapper == kIOMapperSystem) {
707 IOMapper::checkForSystemMapper();
708 gIOSystemMapper = mapper = IOMapper::gSystem;
709 }
710
711 // Temp binary compatibility for kIOMemoryThreadSafe
712 if (kIOMemoryReserved6156215 & options)
713 {
714 options &= ~kIOMemoryReserved6156215;
715 options |= kIOMemoryThreadSafe;
716 }
717 // Remove the dynamic internal use flags from the initial setting
718 options &= ~(kIOMemoryPreparedReadOnly);
719 _flags = options;
720 _task = task;
721
722 #ifndef __LP64__
723 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
724 #endif /* !__LP64__ */
725
726 __iomd_reservedA = 0;
727 __iomd_reservedB = 0;
728 _highestPage = 0;
729
730 if (kIOMemoryThreadSafe & options)
731 {
732 if (!_prepareLock)
733 _prepareLock = IOLockAlloc();
734 }
735 else if (_prepareLock)
736 {
737 IOLockFree(_prepareLock);
738 _prepareLock = NULL;
739 }
740
741 if (kIOMemoryTypeUPL == type) {
742
743 ioGMDData *dataP;
744 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
745
746 if (!initMemoryEntries(dataSize, mapper)) return (false);
747 dataP = getDataP(_memoryEntries);
748 dataP->fPageCnt = 0;
749
750 // _wireCount++; // UPLs start out life wired
751
752 _length = count;
753 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
754
755 ioPLBlock iopl;
756 iopl.fIOPL = (upl_t) buffers;
757 upl_set_referenced(iopl.fIOPL, true);
758 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
759
760 if (upl_get_size(iopl.fIOPL) < (count + offset))
761 panic("short external upl");
762
763 _highestPage = upl_get_highest_page(iopl.fIOPL);
764
765 // Set the flag kIOPLOnDevice convieniently equal to 1
766 iopl.fFlags = pageList->device | kIOPLExternUPL;
767 if (!pageList->device) {
768 // Pre-compute the offset into the UPL's page list
769 pageList = &pageList[atop_32(offset)];
770 offset &= PAGE_MASK;
771 }
772 iopl.fIOMDOffset = 0;
773 iopl.fMappedPage = 0;
774 iopl.fPageInfo = (vm_address_t) pageList;
775 iopl.fPageOffset = offset;
776 _memoryEntries->appendBytes(&iopl, sizeof(iopl));
777 }
778 else {
779 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
780 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
781
782 // Initialize the memory descriptor
783 if (options & kIOMemoryAsReference) {
784 #ifndef __LP64__
785 _rangesIsAllocated = false;
786 #endif /* !__LP64__ */
787
788 // Hack assignment to get the buffer arg into _ranges.
789 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
790 // work, C++ sigh.
791 // This also initialises the uio & physical ranges.
792 _ranges.v = (IOVirtualRange *) buffers;
793 }
794 else {
795 #ifndef __LP64__
796 _rangesIsAllocated = true;
797 #endif /* !__LP64__ */
798 switch (type)
799 {
800 case kIOMemoryTypeUIO:
801 _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
802 break;
803
804 #ifndef __LP64__
805 case kIOMemoryTypeVirtual64:
806 case kIOMemoryTypePhysical64:
807 if (count == 1
808 && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
809 ) {
810 if (kIOMemoryTypeVirtual64 == type)
811 type = kIOMemoryTypeVirtual;
812 else
813 type = kIOMemoryTypePhysical;
814 _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
815 _rangesIsAllocated = false;
816 _ranges.v = &_singleRange.v;
817 _singleRange.v.address = ((IOAddressRange *) buffers)->address;
818 _singleRange.v.length = ((IOAddressRange *) buffers)->length;
819 break;
820 }
821 _ranges.v64 = IONew(IOAddressRange, count);
822 if (!_ranges.v64)
823 return false;
824 bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
825 break;
826 #endif /* !__LP64__ */
827 case kIOMemoryTypeVirtual:
828 case kIOMemoryTypePhysical:
829 if (count == 1) {
830 _flags |= kIOMemoryAsReference;
831 #ifndef __LP64__
832 _rangesIsAllocated = false;
833 #endif /* !__LP64__ */
834 _ranges.v = &_singleRange.v;
835 } else {
836 _ranges.v = IONew(IOVirtualRange, count);
837 if (!_ranges.v)
838 return false;
839 }
840 bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
841 break;
842 }
843 }
844
845 // Find starting address within the vector of ranges
846 Ranges vec = _ranges;
847 UInt32 length = 0;
848 UInt32 pages = 0;
849 for (unsigned ind = 0; ind < count; ind++) {
850 user_addr_t addr;
851 IOPhysicalLength len;
852
853 // addr & len are returned by this function
854 getAddrLenForInd(addr, len, type, vec, ind);
855 pages += (atop_64(addr + len + PAGE_MASK) - atop_64(addr));
856 len += length;
857 assert(len >= length); // Check for 32 bit wrap around
858 length = len;
859
860 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
861 {
862 ppnum_t highPage = atop_64(addr + len - 1);
863 if (highPage > _highestPage)
864 _highestPage = highPage;
865 }
866 }
867 _length = length;
868 _pages = pages;
869 _rangesCount = count;
870
871 // Auto-prepare memory at creation time.
872 // Implied completion when descriptor is free-ed
873 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
874 _wireCount++; // Physical MDs are, by definition, wired
875 else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
876 ioGMDData *dataP;
877 unsigned dataSize = computeDataSize(_pages, /* upls */ count * 2);
878
879 if (!initMemoryEntries(dataSize, mapper)) return false;
880 dataP = getDataP(_memoryEntries);
881 dataP->fPageCnt = _pages;
882
883 if ( (kIOMemoryPersistent & _flags) && !_memEntry)
884 _memEntry = createNamedEntry();
885
886 if ((_flags & kIOMemoryAutoPrepare)
887 && prepare() != kIOReturnSuccess)
888 return false;
889 }
890 }
891
892 return true;
893 }
894
895 /*
896 * free
897 *
898 * Free resources.
899 */
900 void IOGeneralMemoryDescriptor::free()
901 {
902 IOOptionBits type = _flags & kIOMemoryTypeMask;
903
904 if( reserved)
905 {
906 LOCK;
907 reserved->dp.memory = 0;
908 UNLOCK;
909 }
910 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
911 {
912 ioGMDData * dataP;
913 if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
914 {
915 dataP->fMapper->iovmFree(atop_64(dataP->fMappedBase), _pages);
916 dataP->fMappedBase = 0;
917 }
918 }
919 else
920 {
921 while (_wireCount) complete();
922 }
923
924 if (_memoryEntries) _memoryEntries->release();
925
926 if (_ranges.v && !(kIOMemoryAsReference & _flags))
927 {
928 if (kIOMemoryTypeUIO == type)
929 uio_free((uio_t) _ranges.v);
930 #ifndef __LP64__
931 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
932 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
933 #endif /* !__LP64__ */
934 else
935 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
936
937 _ranges.v = NULL;
938 }
939
940 if (reserved)
941 {
942 if (reserved->dp.devicePager)
943 {
944 // memEntry holds a ref on the device pager which owns reserved
945 // (IOMemoryDescriptorReserved) so no reserved access after this point
946 device_pager_deallocate( (memory_object_t) reserved->dp.devicePager );
947 }
948 else
949 IODelete(reserved, IOMemoryDescriptorReserved, 1);
950 reserved = NULL;
951 }
952
953 if (_memEntry)
954 ipc_port_release_send( (ipc_port_t) _memEntry );
955
956 if (_prepareLock)
957 IOLockFree(_prepareLock);
958
959 super::free();
960 }
961
962 #ifndef __LP64__
963 void IOGeneralMemoryDescriptor::unmapFromKernel()
964 {
965 panic("IOGMD::unmapFromKernel deprecated");
966 }
967
968 void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
969 {
970 panic("IOGMD::mapIntoKernel deprecated");
971 }
972 #endif /* !__LP64__ */
973
974 /*
975 * getDirection:
976 *
977 * Get the direction of the transfer.
978 */
979 IODirection IOMemoryDescriptor::getDirection() const
980 {
981 #ifndef __LP64__
982 if (_direction)
983 return _direction;
984 #endif /* !__LP64__ */
985 return (IODirection) (_flags & kIOMemoryDirectionMask);
986 }
987
988 /*
989 * getLength:
990 *
991 * Get the length of the transfer (over all ranges).
992 */
993 IOByteCount IOMemoryDescriptor::getLength() const
994 {
995 return _length;
996 }
997
998 void IOMemoryDescriptor::setTag( IOOptionBits tag )
999 {
1000 _tag = tag;
1001 }
1002
1003 IOOptionBits IOMemoryDescriptor::getTag( void )
1004 {
1005 return( _tag);
1006 }
1007
1008 #ifndef __LP64__
1009 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
1010 IOPhysicalAddress
1011 IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
1012 {
1013 addr64_t physAddr = 0;
1014
1015 if( prepare() == kIOReturnSuccess) {
1016 physAddr = getPhysicalSegment64( offset, length );
1017 complete();
1018 }
1019
1020 return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used
1021 }
1022 #endif /* !__LP64__ */
1023
1024 IOByteCount IOMemoryDescriptor::readBytes
1025 (IOByteCount offset, void *bytes, IOByteCount length)
1026 {
1027 addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
1028 IOByteCount remaining;
1029
1030 // Assert that this entire I/O is withing the available range
1031 assert(offset < _length);
1032 assert(offset + length <= _length);
1033 if (offset >= _length) {
1034 return 0;
1035 }
1036
1037 if (kIOMemoryThreadSafe & _flags)
1038 LOCK;
1039
1040 remaining = length = min(length, _length - offset);
1041 while (remaining) { // (process another target segment?)
1042 addr64_t srcAddr64;
1043 IOByteCount srcLen;
1044
1045 srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
1046 if (!srcAddr64)
1047 break;
1048
1049 // Clip segment length to remaining
1050 if (srcLen > remaining)
1051 srcLen = remaining;
1052
1053 copypv(srcAddr64, dstAddr, srcLen,
1054 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
1055
1056 dstAddr += srcLen;
1057 offset += srcLen;
1058 remaining -= srcLen;
1059 }
1060
1061 if (kIOMemoryThreadSafe & _flags)
1062 UNLOCK;
1063
1064 assert(!remaining);
1065
1066 return length - remaining;
1067 }
1068
1069 IOByteCount IOMemoryDescriptor::writeBytes
1070 (IOByteCount offset, const void *bytes, IOByteCount length)
1071 {
1072 addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
1073 IOByteCount remaining;
1074
1075 // Assert that this entire I/O is withing the available range
1076 assert(offset < _length);
1077 assert(offset + length <= _length);
1078
1079 assert( !(kIOMemoryPreparedReadOnly & _flags) );
1080
1081 if ( (kIOMemoryPreparedReadOnly & _flags) || offset >= _length) {
1082 return 0;
1083 }
1084
1085 if (kIOMemoryThreadSafe & _flags)
1086 LOCK;
1087
1088 remaining = length = min(length, _length - offset);
1089 while (remaining) { // (process another target segment?)
1090 addr64_t dstAddr64;
1091 IOByteCount dstLen;
1092
1093 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
1094 if (!dstAddr64)
1095 break;
1096
1097 // Clip segment length to remaining
1098 if (dstLen > remaining)
1099 dstLen = remaining;
1100
1101 copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
1102 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
1103
1104 srcAddr += dstLen;
1105 offset += dstLen;
1106 remaining -= dstLen;
1107 }
1108
1109 if (kIOMemoryThreadSafe & _flags)
1110 UNLOCK;
1111
1112 assert(!remaining);
1113
1114 return length - remaining;
1115 }
1116
1117 // osfmk/device/iokit_rpc.c
1118 extern "C" unsigned int IODefaultCacheBits(addr64_t pa);
1119
1120 #ifndef __LP64__
1121 void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
1122 {
1123 panic("IOGMD::setPosition deprecated");
1124 }
1125 #endif /* !__LP64__ */
1126
1127 static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
1128
1129 uint64_t
1130 IOGeneralMemoryDescriptor::getPreparationID( void )
1131 {
1132 ioGMDData *dataP;
1133
1134 if (!_wireCount)
1135 return (kIOPreparationIDUnprepared);
1136
1137 if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
1138 || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64))
1139 {
1140 IOMemoryDescriptor::setPreparationID();
1141 return (IOMemoryDescriptor::getPreparationID());
1142 }
1143
1144 if (!_memoryEntries || !(dataP = getDataP(_memoryEntries)))
1145 return (kIOPreparationIDUnprepared);
1146
1147 if (kIOPreparationIDUnprepared == dataP->fPreparationID)
1148 {
1149 dataP->fPreparationID = OSIncrementAtomic64(&gIOMDPreparationID);
1150 }
1151 return (dataP->fPreparationID);
1152 }
1153
1154 IOMemoryDescriptorReserved * IOMemoryDescriptor::getKernelReserved( void )
1155 {
1156 if (!reserved)
1157 {
1158 reserved = IONew(IOMemoryDescriptorReserved, 1);
1159 if (reserved)
1160 bzero(reserved, sizeof(IOMemoryDescriptorReserved));
1161 }
1162 return (reserved);
1163 }
1164
1165 void IOMemoryDescriptor::setPreparationID( void )
1166 {
1167 if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID))
1168 {
1169 #if defined(__ppc__ )
1170 reserved->preparationID = gIOMDPreparationID++;
1171 #else
1172 reserved->preparationID = OSIncrementAtomic64(&gIOMDPreparationID);
1173 #endif
1174 }
1175 }
1176
1177 uint64_t IOMemoryDescriptor::getPreparationID( void )
1178 {
1179 if (reserved)
1180 return (reserved->preparationID);
1181 else
1182 return (kIOPreparationIDUnsupported);
1183 }
1184
1185 IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
1186 {
1187 IOReturn err = kIOReturnSuccess;
1188 DMACommandOps params;
1189 IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
1190 ioGMDData *dataP;
1191
1192 params = (op & ~kIOMDDMACommandOperationMask & op);
1193 op &= kIOMDDMACommandOperationMask;
1194
1195 if (kIOMDDMAMap == op)
1196 {
1197 if (dataSize < sizeof(IOMDDMAMapArgs))
1198 return kIOReturnUnderrun;
1199
1200 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
1201
1202 if (!_memoryEntries
1203 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
1204
1205 if (_memoryEntries && data->fMapper)
1206 {
1207 bool remap;
1208 bool whole = ((data->fOffset == 0) && (data->fLength == _length));
1209 dataP = getDataP(_memoryEntries);
1210
1211 if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
1212 if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) dataP->fDMAMapAlignment = data->fMapSpec.alignment;
1213
1214 remap = (dataP->fDMAMapNumAddressBits < 64)
1215 && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
1216 remap |= (dataP->fDMAMapAlignment > page_size);
1217 remap |= (!whole);
1218 if (remap || !dataP->fMappedBase)
1219 {
1220 // if (dataP->fMappedBase) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params);
1221 err = md->dmaMap(data->fMapper, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocCount);
1222 if ((kIOReturnSuccess == err) && whole && !dataP->fMappedBase)
1223 {
1224 dataP->fMappedBase = data->fAlloc;
1225 data->fAllocCount = 0; // IOMD owns the alloc now
1226 }
1227 }
1228 else
1229 {
1230 data->fAlloc = dataP->fMappedBase;
1231 data->fAllocCount = 0; // IOMD owns the alloc
1232 }
1233 data->fMapContig = !dataP->fDiscontig;
1234 }
1235
1236 return (err);
1237 }
1238
1239 if (kIOMDAddDMAMapSpec == op)
1240 {
1241 if (dataSize < sizeof(IODMAMapSpecification))
1242 return kIOReturnUnderrun;
1243
1244 IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
1245
1246 if (!_memoryEntries
1247 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
1248
1249 if (_memoryEntries)
1250 {
1251 dataP = getDataP(_memoryEntries);
1252 if (data->numAddressBits < dataP->fDMAMapNumAddressBits)
1253 dataP->fDMAMapNumAddressBits = data->numAddressBits;
1254 if (data->alignment > dataP->fDMAMapAlignment)
1255 dataP->fDMAMapAlignment = data->alignment;
1256 }
1257 return kIOReturnSuccess;
1258 }
1259
1260 if (kIOMDGetCharacteristics == op) {
1261
1262 if (dataSize < sizeof(IOMDDMACharacteristics))
1263 return kIOReturnUnderrun;
1264
1265 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
1266 data->fLength = _length;
1267 data->fSGCount = _rangesCount;
1268 data->fPages = _pages;
1269 data->fDirection = getDirection();
1270 if (!_wireCount)
1271 data->fIsPrepared = false;
1272 else {
1273 data->fIsPrepared = true;
1274 data->fHighestPage = _highestPage;
1275 if (_memoryEntries)
1276 {
1277 dataP = getDataP(_memoryEntries);
1278 ioPLBlock *ioplList = getIOPLList(dataP);
1279 UInt count = getNumIOPL(_memoryEntries, dataP);
1280 if (count == 1)
1281 data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
1282 }
1283 }
1284
1285 return kIOReturnSuccess;
1286
1287 #if IOMD_DEBUG_DMAACTIVE
1288 } else if (kIOMDDMAActive == op) {
1289 if (params) OSIncrementAtomic(&md->__iomd_reservedA);
1290 else {
1291 if (md->__iomd_reservedA)
1292 OSDecrementAtomic(&md->__iomd_reservedA);
1293 else
1294 panic("kIOMDSetDMAInactive");
1295 }
1296 #endif /* IOMD_DEBUG_DMAACTIVE */
1297
1298 } else if (kIOMDWalkSegments != op)
1299 return kIOReturnBadArgument;
1300
1301 // Get the next segment
1302 struct InternalState {
1303 IOMDDMAWalkSegmentArgs fIO;
1304 UInt fOffset2Index;
1305 UInt fIndex;
1306 UInt fNextOffset;
1307 } *isP;
1308
1309 // Find the next segment
1310 if (dataSize < sizeof(*isP))
1311 return kIOReturnUnderrun;
1312
1313 isP = (InternalState *) vData;
1314 UInt offset = isP->fIO.fOffset;
1315 bool mapped = isP->fIO.fMapped;
1316
1317 if (IOMapper::gSystem && mapped
1318 && (!(kIOMemoryHostOnly & _flags))
1319 && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBase))
1320 // && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBase))
1321 {
1322 if (!_memoryEntries
1323 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
1324
1325 dataP = getDataP(_memoryEntries);
1326 if (dataP->fMapper)
1327 {
1328 IODMAMapSpecification mapSpec;
1329 bzero(&mapSpec, sizeof(mapSpec));
1330 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
1331 mapSpec.alignment = dataP->fDMAMapAlignment;
1332 err = md->dmaMap(dataP->fMapper, &mapSpec, 0, _length, &dataP->fMappedBase, NULL);
1333 if (kIOReturnSuccess != err) return (err);
1334 }
1335 }
1336
1337 if (offset >= _length)
1338 return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
1339
1340 // Validate the previous offset
1341 UInt ind, off2Ind = isP->fOffset2Index;
1342 if (!params
1343 && offset
1344 && (offset == isP->fNextOffset || off2Ind <= offset))
1345 ind = isP->fIndex;
1346 else
1347 ind = off2Ind = 0; // Start from beginning
1348
1349 UInt length;
1350 UInt64 address;
1351
1352
1353 if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
1354
1355 // Physical address based memory descriptor
1356 const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
1357
1358 // Find the range after the one that contains the offset
1359 mach_vm_size_t len;
1360 for (len = 0; off2Ind <= offset; ind++) {
1361 len = physP[ind].length;
1362 off2Ind += len;
1363 }
1364
1365 // Calculate length within range and starting address
1366 length = off2Ind - offset;
1367 address = physP[ind - 1].address + len - length;
1368
1369 if (true && mapped && _memoryEntries
1370 && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
1371 {
1372 address = dataP->fMappedBase + offset;
1373 }
1374 else
1375 {
1376 // see how far we can coalesce ranges
1377 while (ind < _rangesCount && address + length == physP[ind].address) {
1378 len = physP[ind].length;
1379 length += len;
1380 off2Ind += len;
1381 ind++;
1382 }
1383 }
1384
1385 // correct contiguous check overshoot
1386 ind--;
1387 off2Ind -= len;
1388 }
1389 #ifndef __LP64__
1390 else if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
1391
1392 // Physical address based memory descriptor
1393 const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
1394
1395 // Find the range after the one that contains the offset
1396 mach_vm_size_t len;
1397 for (len = 0; off2Ind <= offset; ind++) {
1398 len = physP[ind].length;
1399 off2Ind += len;
1400 }
1401
1402 // Calculate length within range and starting address
1403 length = off2Ind - offset;
1404 address = physP[ind - 1].address + len - length;
1405
1406 if (true && mapped && _memoryEntries
1407 && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
1408 {
1409 address = dataP->fMappedBase + offset;
1410 }
1411 else
1412 {
1413 // see how far we can coalesce ranges
1414 while (ind < _rangesCount && address + length == physP[ind].address) {
1415 len = physP[ind].length;
1416 length += len;
1417 off2Ind += len;
1418 ind++;
1419 }
1420 }
1421 // correct contiguous check overshoot
1422 ind--;
1423 off2Ind -= len;
1424 }
1425 #endif /* !__LP64__ */
1426 else do {
1427 if (!_wireCount)
1428 panic("IOGMD: not wired for the IODMACommand");
1429
1430 assert(_memoryEntries);
1431
1432 dataP = getDataP(_memoryEntries);
1433 const ioPLBlock *ioplList = getIOPLList(dataP);
1434 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1435 upl_page_info_t *pageList = getPageList(dataP);
1436
1437 assert(numIOPLs > 0);
1438
1439 // Scan through iopl info blocks looking for block containing offset
1440 while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset)
1441 ind++;
1442
1443 // Go back to actual range as search goes past it
1444 ioPLBlock ioplInfo = ioplList[ind - 1];
1445 off2Ind = ioplInfo.fIOMDOffset;
1446
1447 if (ind < numIOPLs)
1448 length = ioplList[ind].fIOMDOffset;
1449 else
1450 length = _length;
1451 length -= offset; // Remainder within iopl
1452
1453 // Subtract offset till this iopl in total list
1454 offset -= off2Ind;
1455
1456 // If a mapped address is requested and this is a pre-mapped IOPL
1457 // then just need to compute an offset relative to the mapped base.
1458 if (mapped && dataP->fMappedBase) {
1459 offset += (ioplInfo.fPageOffset & PAGE_MASK);
1460 address = trunc_page_64(dataP->fMappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
1461 continue; // Done leave do/while(false) now
1462 }
1463
1464 // The offset is rebased into the current iopl.
1465 // Now add the iopl 1st page offset.
1466 offset += ioplInfo.fPageOffset;
1467
1468 // For external UPLs the fPageInfo field points directly to
1469 // the upl's upl_page_info_t array.
1470 if (ioplInfo.fFlags & kIOPLExternUPL)
1471 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
1472 else
1473 pageList = &pageList[ioplInfo.fPageInfo];
1474
1475 // Check for direct device non-paged memory
1476 if ( ioplInfo.fFlags & kIOPLOnDevice ) {
1477 address = ptoa_64(pageList->phys_addr) + offset;
1478 continue; // Done leave do/while(false) now
1479 }
1480
1481 // Now we need compute the index into the pageList
1482 UInt pageInd = atop_32(offset);
1483 offset &= PAGE_MASK;
1484
1485 // Compute the starting address of this segment
1486 IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
1487 if (!pageAddr) {
1488 panic("!pageList phys_addr");
1489 }
1490
1491 address = ptoa_64(pageAddr) + offset;
1492
1493 // length is currently set to the length of the remainider of the iopl.
1494 // We need to check that the remainder of the iopl is contiguous.
1495 // This is indicated by pageList[ind].phys_addr being sequential.
1496 IOByteCount contigLength = PAGE_SIZE - offset;
1497 while (contigLength < length
1498 && ++pageAddr == pageList[++pageInd].phys_addr)
1499 {
1500 contigLength += PAGE_SIZE;
1501 }
1502
1503 if (contigLength < length)
1504 length = contigLength;
1505
1506
1507 assert(address);
1508 assert(length);
1509
1510 } while (false);
1511
1512 // Update return values and state
1513 isP->fIO.fIOVMAddr = address;
1514 isP->fIO.fLength = length;
1515 isP->fIndex = ind;
1516 isP->fOffset2Index = off2Ind;
1517 isP->fNextOffset = isP->fIO.fOffset + length;
1518
1519 return kIOReturnSuccess;
1520 }
1521
1522 addr64_t
1523 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
1524 {
1525 IOReturn ret;
1526 addr64_t address = 0;
1527 IOByteCount length = 0;
1528 IOMapper * mapper = gIOSystemMapper;
1529 IOOptionBits type = _flags & kIOMemoryTypeMask;
1530
1531 if (lengthOfSegment)
1532 *lengthOfSegment = 0;
1533
1534 if (offset >= _length)
1535 return 0;
1536
1537 // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
1538 // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
1539 // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
1540 // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
1541
1542 if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type))
1543 {
1544 unsigned rangesIndex = 0;
1545 Ranges vec = _ranges;
1546 user_addr_t addr;
1547
1548 // Find starting address within the vector of ranges
1549 for (;;) {
1550 getAddrLenForInd(addr, length, type, vec, rangesIndex);
1551 if (offset < length)
1552 break;
1553 offset -= length; // (make offset relative)
1554 rangesIndex++;
1555 }
1556
1557 // Now that we have the starting range,
1558 // lets find the last contiguous range
1559 addr += offset;
1560 length -= offset;
1561
1562 for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) {
1563 user_addr_t newAddr;
1564 IOPhysicalLength newLen;
1565
1566 getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
1567 if (addr + length != newAddr)
1568 break;
1569 length += newLen;
1570 }
1571 if (addr)
1572 address = (IOPhysicalAddress) addr; // Truncate address to 32bit
1573 }
1574 else
1575 {
1576 IOMDDMAWalkSegmentState _state;
1577 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
1578
1579 state->fOffset = offset;
1580 state->fLength = _length - offset;
1581 state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOnly);
1582
1583 ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
1584
1585 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret))
1586 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
1587 ret, this, state->fOffset,
1588 state->fIOVMAddr, state->fLength);
1589 if (kIOReturnSuccess == ret)
1590 {
1591 address = state->fIOVMAddr;
1592 length = state->fLength;
1593 }
1594
1595 // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
1596 // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
1597
1598 if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)))
1599 {
1600 if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone))
1601 {
1602 addr64_t origAddr = address;
1603 IOByteCount origLen = length;
1604
1605 address = mapper->mapAddr(origAddr);
1606 length = page_size - (address & (page_size - 1));
1607 while ((length < origLen)
1608 && ((address + length) == mapper->mapAddr(origAddr + length)))
1609 length += page_size;
1610 if (length > origLen)
1611 length = origLen;
1612 }
1613 }
1614 }
1615
1616 if (!address)
1617 length = 0;
1618
1619 if (lengthOfSegment)
1620 *lengthOfSegment = length;
1621
1622 return (address);
1623 }
1624
1625 #ifndef __LP64__
1626 addr64_t
1627 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
1628 {
1629 addr64_t address = 0;
1630
1631 if (options & _kIOMemorySourceSegment)
1632 {
1633 address = getSourceSegment(offset, lengthOfSegment);
1634 }
1635 else if (options & kIOMemoryMapperNone)
1636 {
1637 address = getPhysicalSegment64(offset, lengthOfSegment);
1638 }
1639 else
1640 {
1641 address = getPhysicalSegment(offset, lengthOfSegment);
1642 }
1643
1644 return (address);
1645 }
1646
1647 addr64_t
1648 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
1649 {
1650 return (getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone));
1651 }
1652
1653 IOPhysicalAddress
1654 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1655 {
1656 addr64_t address = 0;
1657 IOByteCount length = 0;
1658
1659 address = getPhysicalSegment(offset, lengthOfSegment, 0);
1660
1661 if (lengthOfSegment)
1662 length = *lengthOfSegment;
1663
1664 if ((address + length) > 0x100000000ULL)
1665 {
1666 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
1667 address, (long) length, (getMetaClass())->getClassName());
1668 }
1669
1670 return ((IOPhysicalAddress) address);
1671 }
1672
1673 addr64_t
1674 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
1675 {
1676 IOPhysicalAddress phys32;
1677 IOByteCount length;
1678 addr64_t phys64;
1679 IOMapper * mapper = 0;
1680
1681 phys32 = getPhysicalSegment(offset, lengthOfSegment);
1682 if (!phys32)
1683 return 0;
1684
1685 if (gIOSystemMapper)
1686 mapper = gIOSystemMapper;
1687
1688 if (mapper)
1689 {
1690 IOByteCount origLen;
1691
1692 phys64 = mapper->mapAddr(phys32);
1693 origLen = *lengthOfSegment;
1694 length = page_size - (phys64 & (page_size - 1));
1695 while ((length < origLen)
1696 && ((phys64 + length) == mapper->mapAddr(phys32 + length)))
1697 length += page_size;
1698 if (length > origLen)
1699 length = origLen;
1700
1701 *lengthOfSegment = length;
1702 }
1703 else
1704 phys64 = (addr64_t) phys32;
1705
1706 return phys64;
1707 }
1708
1709 IOPhysicalAddress
1710 IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1711 {
1712 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0));
1713 }
1714
1715 IOPhysicalAddress
1716 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1717 {
1718 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment));
1719 }
1720
1721 void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
1722 IOByteCount * lengthOfSegment)
1723 {
1724 if (_task == kernel_task)
1725 return (void *) getSourceSegment(offset, lengthOfSegment);
1726 else
1727 panic("IOGMD::getVirtualSegment deprecated");
1728
1729 return 0;
1730 }
1731 #endif /* !__LP64__ */
1732
1733 IOReturn
1734 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
1735 {
1736 IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
1737 DMACommandOps params;
1738 IOReturn err;
1739
1740 params = (op & ~kIOMDDMACommandOperationMask & op);
1741 op &= kIOMDDMACommandOperationMask;
1742
1743 if (kIOMDGetCharacteristics == op) {
1744 if (dataSize < sizeof(IOMDDMACharacteristics))
1745 return kIOReturnUnderrun;
1746
1747 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
1748 data->fLength = getLength();
1749 data->fSGCount = 0;
1750 data->fDirection = getDirection();
1751 data->fIsPrepared = true; // Assume prepared - fails safe
1752 }
1753 else if (kIOMDWalkSegments == op) {
1754 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs))
1755 return kIOReturnUnderrun;
1756
1757 IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
1758 IOByteCount offset = (IOByteCount) data->fOffset;
1759
1760 IOPhysicalLength length;
1761 if (data->fMapped && IOMapper::gSystem)
1762 data->fIOVMAddr = md->getPhysicalSegment(offset, &length);
1763 else
1764 data->fIOVMAddr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
1765 data->fLength = length;
1766 }
1767 else if (kIOMDAddDMAMapSpec == op) return kIOReturnUnsupported;
1768 else if (kIOMDDMAMap == op)
1769 {
1770 if (dataSize < sizeof(IOMDDMAMapArgs))
1771 return kIOReturnUnderrun;
1772 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
1773
1774 if (params) panic("class %s does not support IODMACommand::kIterateOnly", getMetaClass()->getClassName());
1775
1776 data->fMapContig = true;
1777 err = md->dmaMap(data->fMapper, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocCount);
1778 return (err);
1779 }
1780 else return kIOReturnBadArgument;
1781
1782 return kIOReturnSuccess;
1783 }
1784
1785 static IOReturn
1786 purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
1787 {
1788 IOReturn err = kIOReturnSuccess;
1789
1790 *control = VM_PURGABLE_SET_STATE;
1791
1792 enum { kIOMemoryPurgeableControlMask = 15 };
1793
1794 switch (kIOMemoryPurgeableControlMask & newState)
1795 {
1796 case kIOMemoryPurgeableKeepCurrent:
1797 *control = VM_PURGABLE_GET_STATE;
1798 break;
1799
1800 case kIOMemoryPurgeableNonVolatile:
1801 *state = VM_PURGABLE_NONVOLATILE;
1802 break;
1803 case kIOMemoryPurgeableVolatile:
1804 *state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask);
1805 break;
1806 case kIOMemoryPurgeableEmpty:
1807 *state = VM_PURGABLE_EMPTY;
1808 break;
1809 default:
1810 err = kIOReturnBadArgument;
1811 break;
1812 }
1813 return (err);
1814 }
1815
1816 static IOReturn
1817 purgeableStateBits(int * state)
1818 {
1819 IOReturn err = kIOReturnSuccess;
1820
1821 switch (VM_PURGABLE_STATE_MASK & *state)
1822 {
1823 case VM_PURGABLE_NONVOLATILE:
1824 *state = kIOMemoryPurgeableNonVolatile;
1825 break;
1826 case VM_PURGABLE_VOLATILE:
1827 *state = kIOMemoryPurgeableVolatile;
1828 break;
1829 case VM_PURGABLE_EMPTY:
1830 *state = kIOMemoryPurgeableEmpty;
1831 break;
1832 default:
1833 *state = kIOMemoryPurgeableNonVolatile;
1834 err = kIOReturnNotReady;
1835 break;
1836 }
1837 return (err);
1838 }
1839
1840 IOReturn
1841 IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
1842 IOOptionBits * oldState )
1843 {
1844 IOReturn err = kIOReturnSuccess;
1845 vm_purgable_t control;
1846 int state;
1847
1848 if (_memEntry)
1849 {
1850 err = super::setPurgeable(newState, oldState);
1851 }
1852 else
1853 {
1854 if (kIOMemoryThreadSafe & _flags)
1855 LOCK;
1856 do
1857 {
1858 // Find the appropriate vm_map for the given task
1859 vm_map_t curMap;
1860 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
1861 {
1862 err = kIOReturnNotReady;
1863 break;
1864 }
1865 else if (!_task)
1866 {
1867 err = kIOReturnUnsupported;
1868 break;
1869 }
1870 else
1871 curMap = get_task_map(_task);
1872
1873 // can only do one range
1874 Ranges vec = _ranges;
1875 IOOptionBits type = _flags & kIOMemoryTypeMask;
1876 user_addr_t addr;
1877 IOByteCount len;
1878 getAddrLenForInd(addr, len, type, vec, 0);
1879
1880 err = purgeableControlBits(newState, &control, &state);
1881 if (kIOReturnSuccess != err)
1882 break;
1883 err = mach_vm_purgable_control(curMap, addr, control, &state);
1884 if (oldState)
1885 {
1886 if (kIOReturnSuccess == err)
1887 {
1888 err = purgeableStateBits(&state);
1889 *oldState = state;
1890 }
1891 }
1892 }
1893 while (false);
1894 if (kIOMemoryThreadSafe & _flags)
1895 UNLOCK;
1896 }
1897 return (err);
1898 }
1899
1900 IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
1901 IOOptionBits * oldState )
1902 {
1903 IOReturn err = kIOReturnSuccess;
1904 vm_purgable_t control;
1905 int state;
1906
1907 if (kIOMemoryThreadSafe & _flags)
1908 LOCK;
1909
1910 do
1911 {
1912 if (!_memEntry)
1913 {
1914 err = kIOReturnNotReady;
1915 break;
1916 }
1917 err = purgeableControlBits(newState, &control, &state);
1918 if (kIOReturnSuccess != err)
1919 break;
1920 err = mach_memory_entry_purgable_control((ipc_port_t) _memEntry, control, &state);
1921 if (oldState)
1922 {
1923 if (kIOReturnSuccess == err)
1924 {
1925 err = purgeableStateBits(&state);
1926 *oldState = state;
1927 }
1928 }
1929 }
1930 while (false);
1931
1932 if (kIOMemoryThreadSafe & _flags)
1933 UNLOCK;
1934
1935 return (err);
1936 }
1937
1938
1939 IOReturn IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
1940 IOByteCount * dirtyPageCount )
1941 {
1942 IOReturn err = kIOReturnSuccess;
1943 unsigned int _residentPageCount, _dirtyPageCount;
1944
1945 if (kIOMemoryThreadSafe & _flags) LOCK;
1946
1947 do
1948 {
1949 if (!_memEntry)
1950 {
1951 err = kIOReturnNotReady;
1952 break;
1953 }
1954 if ((residentPageCount == NULL) && (dirtyPageCount == NULL))
1955 {
1956 err = kIOReturnBadArgument;
1957 break;
1958 }
1959
1960 err = mach_memory_entry_get_page_counts((ipc_port_t) _memEntry,
1961 residentPageCount ? &_residentPageCount : NULL,
1962 dirtyPageCount ? &_dirtyPageCount : NULL);
1963 if (kIOReturnSuccess != err) break;
1964 if (residentPageCount) *residentPageCount = _residentPageCount;
1965 if (dirtyPageCount) *dirtyPageCount = _dirtyPageCount;
1966 }
1967 while (false);
1968
1969 if (kIOMemoryThreadSafe & _flags) UNLOCK;
1970
1971 return (err);
1972 }
1973
1974
1975 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
1976 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
1977
1978 static void SetEncryptOp(addr64_t pa, unsigned int count)
1979 {
1980 ppnum_t page, end;
1981
1982 page = atop_64(round_page_64(pa));
1983 end = atop_64(trunc_page_64(pa + count));
1984 for (; page < end; page++)
1985 {
1986 pmap_clear_noencrypt(page);
1987 }
1988 }
1989
1990 static void ClearEncryptOp(addr64_t pa, unsigned int count)
1991 {
1992 ppnum_t page, end;
1993
1994 page = atop_64(round_page_64(pa));
1995 end = atop_64(trunc_page_64(pa + count));
1996 for (; page < end; page++)
1997 {
1998 pmap_set_noencrypt(page);
1999 }
2000 }
2001
2002 IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options,
2003 IOByteCount offset, IOByteCount length )
2004 {
2005 IOByteCount remaining;
2006 unsigned int res;
2007 void (*func)(addr64_t pa, unsigned int count) = 0;
2008
2009 switch (options)
2010 {
2011 case kIOMemoryIncoherentIOFlush:
2012 func = &dcache_incoherent_io_flush64;
2013 break;
2014 case kIOMemoryIncoherentIOStore:
2015 func = &dcache_incoherent_io_store64;
2016 break;
2017
2018 case kIOMemorySetEncrypted:
2019 func = &SetEncryptOp;
2020 break;
2021 case kIOMemoryClearEncrypted:
2022 func = &ClearEncryptOp;
2023 break;
2024 }
2025
2026 if (!func)
2027 return (kIOReturnUnsupported);
2028
2029 if (kIOMemoryThreadSafe & _flags)
2030 LOCK;
2031
2032 res = 0x0UL;
2033 remaining = length = min(length, getLength() - offset);
2034 while (remaining)
2035 // (process another target segment?)
2036 {
2037 addr64_t dstAddr64;
2038 IOByteCount dstLen;
2039
2040 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
2041 if (!dstAddr64)
2042 break;
2043
2044 // Clip segment length to remaining
2045 if (dstLen > remaining)
2046 dstLen = remaining;
2047
2048 (*func)(dstAddr64, dstLen);
2049
2050 offset += dstLen;
2051 remaining -= dstLen;
2052 }
2053
2054 if (kIOMemoryThreadSafe & _flags)
2055 UNLOCK;
2056
2057 return (remaining ? kIOReturnUnderrun : kIOReturnSuccess);
2058 }
2059
2060 #if defined(__i386__) || defined(__x86_64__)
2061 extern vm_offset_t first_avail;
2062 #define io_kernel_static_end first_avail
2063 #else
2064 #error io_kernel_static_end is undefined for this architecture
2065 #endif
2066
2067 static kern_return_t
2068 io_get_kernel_static_upl(
2069 vm_map_t /* map */,
2070 uintptr_t offset,
2071 vm_size_t *upl_size,
2072 upl_t *upl,
2073 upl_page_info_array_t page_list,
2074 unsigned int *count,
2075 ppnum_t *highest_page)
2076 {
2077 unsigned int pageCount, page;
2078 ppnum_t phys;
2079 ppnum_t highestPage = 0;
2080
2081 pageCount = atop_32(*upl_size);
2082 if (pageCount > *count)
2083 pageCount = *count;
2084
2085 *upl = NULL;
2086
2087 for (page = 0; page < pageCount; page++)
2088 {
2089 phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
2090 if (!phys)
2091 break;
2092 page_list[page].phys_addr = phys;
2093 page_list[page].pageout = 0;
2094 page_list[page].absent = 0;
2095 page_list[page].dirty = 0;
2096 page_list[page].precious = 0;
2097 page_list[page].device = 0;
2098 if (phys > highestPage)
2099 highestPage = phys;
2100 }
2101
2102 *highest_page = highestPage;
2103
2104 return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError);
2105 }
2106
2107 IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
2108 {
2109 IOOptionBits type = _flags & kIOMemoryTypeMask;
2110 IOReturn error = kIOReturnCannotWire;
2111 ioGMDData *dataP;
2112 upl_page_info_array_t pageInfo;
2113 ppnum_t mapBase;
2114 ipc_port_t sharedMem;
2115
2116 assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
2117
2118 if ((kIODirectionOutIn & forDirection) == kIODirectionNone)
2119 forDirection = (IODirection) (forDirection | getDirection());
2120
2121 int uplFlags; // This Mem Desc's default flags for upl creation
2122 switch (kIODirectionOutIn & forDirection)
2123 {
2124 case kIODirectionOut:
2125 // Pages do not need to be marked as dirty on commit
2126 uplFlags = UPL_COPYOUT_FROM;
2127 break;
2128
2129 case kIODirectionIn:
2130 default:
2131 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
2132 break;
2133 }
2134
2135 if (_wireCount)
2136 {
2137 if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags))
2138 {
2139 OSReportWithBacktrace("IOMemoryDescriptor 0x%lx prepared read only", VM_KERNEL_ADDRPERM(this));
2140 error = kIOReturnNotWritable;
2141 }
2142 else error = kIOReturnSuccess;
2143 return (error);
2144 }
2145
2146 dataP = getDataP(_memoryEntries);
2147 IOMapper *mapper;
2148 mapper = dataP->fMapper;
2149 dataP->fMappedBase = 0;
2150
2151 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
2152 if (kIODirectionPrepareToPhys32 & forDirection)
2153 {
2154 if (!mapper) uplFlags |= UPL_NEED_32BIT_ADDR;
2155 if (dataP->fDMAMapNumAddressBits > 32) dataP->fDMAMapNumAddressBits = 32;
2156 }
2157 if (kIODirectionPrepareNoFault & forDirection) uplFlags |= UPL_REQUEST_NO_FAULT;
2158 if (kIODirectionPrepareNoZeroFill & forDirection) uplFlags |= UPL_NOZEROFILLIO;
2159
2160 mapBase = 0;
2161 sharedMem = (ipc_port_t) _memEntry;
2162
2163 // Note that appendBytes(NULL) zeros the data up to the desired length.
2164 _memoryEntries->appendBytes(0, dataP->fPageCnt * sizeof(upl_page_info_t));
2165 dataP = 0;
2166
2167 // Find the appropriate vm_map for the given task
2168 vm_map_t curMap;
2169 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
2170 curMap = 0;
2171 else
2172 { curMap = get_task_map(_task); }
2173
2174 // Iterate over the vector of virtual ranges
2175 Ranges vec = _ranges;
2176 unsigned int pageIndex = 0;
2177 IOByteCount mdOffset = 0;
2178 ppnum_t highestPage = 0;
2179
2180 for (UInt range = 0; range < _rangesCount; range++) {
2181 ioPLBlock iopl;
2182 user_addr_t startPage;
2183 IOByteCount numBytes;
2184 ppnum_t highPage = 0;
2185
2186 // Get the startPage address and length of vec[range]
2187 getAddrLenForInd(startPage, numBytes, type, vec, range);
2188 iopl.fPageOffset = startPage & PAGE_MASK;
2189 numBytes += iopl.fPageOffset;
2190 startPage = trunc_page_64(startPage);
2191
2192 if (mapper)
2193 iopl.fMappedPage = mapBase + pageIndex;
2194 else
2195 iopl.fMappedPage = 0;
2196
2197 // Iterate over the current range, creating UPLs
2198 while (numBytes) {
2199 vm_address_t kernelStart = (vm_address_t) startPage;
2200 vm_map_t theMap;
2201 if (curMap)
2202 theMap = curMap;
2203 else if (!sharedMem) {
2204 assert(_task == kernel_task);
2205 theMap = IOPageableMapForAddress(kernelStart);
2206 }
2207 else
2208 theMap = NULL;
2209
2210 int ioplFlags = uplFlags;
2211 dataP = getDataP(_memoryEntries);
2212 pageInfo = getPageList(dataP);
2213 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
2214
2215 vm_size_t ioplSize = round_page(numBytes);
2216 unsigned int numPageInfo = atop_32(ioplSize);
2217
2218 if (theMap == kernel_map && kernelStart < io_kernel_static_end) {
2219 error = io_get_kernel_static_upl(theMap,
2220 kernelStart,
2221 &ioplSize,
2222 &iopl.fIOPL,
2223 baseInfo,
2224 &numPageInfo,
2225 &highPage);
2226 }
2227 else if (sharedMem) {
2228 error = memory_object_iopl_request(sharedMem,
2229 ptoa_32(pageIndex),
2230 &ioplSize,
2231 &iopl.fIOPL,
2232 baseInfo,
2233 &numPageInfo,
2234 &ioplFlags);
2235 }
2236 else {
2237 assert(theMap);
2238 error = vm_map_create_upl(theMap,
2239 startPage,
2240 (upl_size_t*)&ioplSize,
2241 &iopl.fIOPL,
2242 baseInfo,
2243 &numPageInfo,
2244 &ioplFlags);
2245 }
2246
2247 assert(ioplSize);
2248 if (error != KERN_SUCCESS)
2249 goto abortExit;
2250
2251 if (iopl.fIOPL)
2252 highPage = upl_get_highest_page(iopl.fIOPL);
2253 if (highPage > highestPage)
2254 highestPage = highPage;
2255
2256 error = kIOReturnCannotWire;
2257
2258 if (baseInfo->device) {
2259 numPageInfo = 1;
2260 iopl.fFlags = kIOPLOnDevice;
2261 }
2262 else {
2263 iopl.fFlags = 0;
2264 }
2265
2266 iopl.fIOMDOffset = mdOffset;
2267 iopl.fPageInfo = pageIndex;
2268 if (mapper && pageIndex && (page_mask & (mdOffset + iopl.fPageOffset))) dataP->fDiscontig = true;
2269
2270 #if 0
2271 // used to remove the upl for auto prepares here, for some errant code
2272 // that freed memory before the descriptor pointing at it
2273 if ((_flags & kIOMemoryAutoPrepare) && iopl.fIOPL)
2274 {
2275 upl_commit(iopl.fIOPL, 0, 0);
2276 upl_deallocate(iopl.fIOPL);
2277 iopl.fIOPL = 0;
2278 }
2279 #endif
2280
2281 if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
2282 // Clean up partial created and unsaved iopl
2283 if (iopl.fIOPL) {
2284 upl_abort(iopl.fIOPL, 0);
2285 upl_deallocate(iopl.fIOPL);
2286 }
2287 goto abortExit;
2288 }
2289 dataP = 0;
2290
2291 // Check for a multiple iopl's in one virtual range
2292 pageIndex += numPageInfo;
2293 mdOffset -= iopl.fPageOffset;
2294 if (ioplSize < numBytes) {
2295 numBytes -= ioplSize;
2296 startPage += ioplSize;
2297 mdOffset += ioplSize;
2298 iopl.fPageOffset = 0;
2299 if (mapper) iopl.fMappedPage = mapBase + pageIndex;
2300 }
2301 else {
2302 mdOffset += numBytes;
2303 break;
2304 }
2305 }
2306 }
2307
2308 _highestPage = highestPage;
2309
2310 if (UPL_COPYOUT_FROM & uplFlags) _flags |= kIOMemoryPreparedReadOnly;
2311
2312 return kIOReturnSuccess;
2313
2314 abortExit:
2315 {
2316 dataP = getDataP(_memoryEntries);
2317 UInt done = getNumIOPL(_memoryEntries, dataP);
2318 ioPLBlock *ioplList = getIOPLList(dataP);
2319
2320 for (UInt range = 0; range < done; range++)
2321 {
2322 if (ioplList[range].fIOPL) {
2323 upl_abort(ioplList[range].fIOPL, 0);
2324 upl_deallocate(ioplList[range].fIOPL);
2325 }
2326 }
2327 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
2328 }
2329
2330 if (error == KERN_FAILURE)
2331 error = kIOReturnCannotWire;
2332 else if (error == KERN_MEMORY_ERROR)
2333 error = kIOReturnNoResources;
2334
2335 return error;
2336 }
2337
2338 bool IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
2339 {
2340 ioGMDData * dataP;
2341 unsigned dataSize = size;
2342
2343 if (!_memoryEntries) {
2344 _memoryEntries = OSData::withCapacity(dataSize);
2345 if (!_memoryEntries)
2346 return false;
2347 }
2348 else if (!_memoryEntries->initWithCapacity(dataSize))
2349 return false;
2350
2351 _memoryEntries->appendBytes(0, computeDataSize(0, 0));
2352 dataP = getDataP(_memoryEntries);
2353
2354 if (mapper == kIOMapperWaitSystem) {
2355 IOMapper::checkForSystemMapper();
2356 mapper = IOMapper::gSystem;
2357 }
2358 dataP->fMapper = mapper;
2359 dataP->fPageCnt = 0;
2360 dataP->fMappedBase = 0;
2361 dataP->fDMAMapNumAddressBits = 64;
2362 dataP->fDMAMapAlignment = 0;
2363 dataP->fPreparationID = kIOPreparationIDUnprepared;
2364 dataP->fDiscontig = false;
2365
2366 return (true);
2367 }
2368
2369 IOReturn IOMemoryDescriptor::dmaMap(
2370 IOMapper * mapper,
2371 const IODMAMapSpecification * mapSpec,
2372 uint64_t offset,
2373 uint64_t length,
2374 uint64_t * address,
2375 ppnum_t * mapPages)
2376 {
2377 IOMDDMAWalkSegmentState walkState;
2378 IOMDDMAWalkSegmentArgs * walkArgs = (IOMDDMAWalkSegmentArgs *) (void *)&walkState;
2379 IOOptionBits mdOp;
2380 IOReturn ret;
2381 IOPhysicalLength segLen;
2382 addr64_t phys, align, pageOffset;
2383 ppnum_t base, pageIndex, pageCount;
2384 uint64_t index;
2385 uint32_t mapOptions = 0;
2386
2387 if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess;
2388
2389 walkArgs->fMapped = false;
2390 mdOp = kIOMDFirstSegment;
2391 pageCount = 0;
2392 for (index = 0; index < length; )
2393 {
2394 if (index && (page_mask & (index + pageOffset))) break;
2395
2396 walkArgs->fOffset = offset + index;
2397 ret = dmaCommandOperation(mdOp, &walkState, sizeof(walkState));
2398 mdOp = kIOMDWalkSegments;
2399 if (ret != kIOReturnSuccess) break;
2400 phys = walkArgs->fIOVMAddr;
2401 segLen = walkArgs->fLength;
2402
2403 align = (phys & page_mask);
2404 if (!index) pageOffset = align;
2405 else if (align) break;
2406 pageCount += atop_64(round_page_64(align + segLen));
2407 index += segLen;
2408 }
2409
2410 if (index < length) return (kIOReturnVMError);
2411
2412 base = mapper->iovmMapMemory(this, offset, pageCount,
2413 mapOptions, NULL, mapSpec);
2414
2415 if (!base) return (kIOReturnNoResources);
2416
2417 mdOp = kIOMDFirstSegment;
2418 for (pageIndex = 0, index = 0; index < length; )
2419 {
2420 walkArgs->fOffset = offset + index;
2421 ret = dmaCommandOperation(mdOp, &walkState, sizeof(walkState));
2422 mdOp = kIOMDWalkSegments;
2423 if (ret != kIOReturnSuccess) break;
2424 phys = walkArgs->fIOVMAddr;
2425 segLen = walkArgs->fLength;
2426
2427 ppnum_t page = atop_64(phys);
2428 ppnum_t count = atop_64(round_page_64(phys + segLen)) - page;
2429 while (count--)
2430 {
2431 mapper->iovmInsert(base, pageIndex, page);
2432 page++;
2433 pageIndex++;
2434 }
2435 index += segLen;
2436 }
2437 if (pageIndex != pageCount) panic("pageIndex");
2438
2439 *address = ptoa_64(base) + pageOffset;
2440 if (mapPages) *mapPages = pageCount;
2441
2442 return (kIOReturnSuccess);
2443 }
2444
2445 IOReturn IOGeneralMemoryDescriptor::dmaMap(
2446 IOMapper * mapper,
2447 const IODMAMapSpecification * mapSpec,
2448 uint64_t offset,
2449 uint64_t length,
2450 uint64_t * address,
2451 ppnum_t * mapPages)
2452 {
2453 IOReturn err = kIOReturnSuccess;
2454 ioGMDData * dataP;
2455 IOOptionBits type = _flags & kIOMemoryTypeMask;
2456
2457 *address = 0;
2458 if (kIOMemoryHostOnly & _flags) return (kIOReturnSuccess);
2459
2460 if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
2461 || offset || (length != _length))
2462 {
2463 err = super::dmaMap(mapper, mapSpec, offset, length, address, mapPages);
2464 }
2465 else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries)))
2466 {
2467 const ioPLBlock * ioplList = getIOPLList(dataP);
2468 upl_page_info_t * pageList;
2469 uint32_t mapOptions = 0;
2470 ppnum_t base;
2471
2472 IODMAMapSpecification mapSpec;
2473 bzero(&mapSpec, sizeof(mapSpec));
2474 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
2475 mapSpec.alignment = dataP->fDMAMapAlignment;
2476
2477 // For external UPLs the fPageInfo field points directly to
2478 // the upl's upl_page_info_t array.
2479 if (ioplList->fFlags & kIOPLExternUPL)
2480 {
2481 pageList = (upl_page_info_t *) ioplList->fPageInfo;
2482 mapOptions |= kIODMAMapPagingPath;
2483 }
2484 else
2485 pageList = getPageList(dataP);
2486
2487 if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess;
2488
2489 // Check for direct device non-paged memory
2490 if (ioplList->fFlags & kIOPLOnDevice) mapOptions |= kIODMAMapPhysicallyContiguous;
2491
2492 base = mapper->iovmMapMemory(
2493 this, offset, _pages, mapOptions, &pageList[0], &mapSpec);
2494 *address = ptoa_64(base) + (ioplList->fPageOffset & PAGE_MASK);
2495 if (mapPages) *mapPages = _pages;
2496 }
2497
2498 return (err);
2499 }
2500
2501 /*
2502 * prepare
2503 *
2504 * Prepare the memory for an I/O transfer. This involves paging in
2505 * the memory, if necessary, and wiring it down for the duration of
2506 * the transfer. The complete() method completes the processing of
2507 * the memory after the I/O transfer finishes. This method needn't
2508 * called for non-pageable memory.
2509 */
2510
2511 IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
2512 {
2513 IOReturn error = kIOReturnSuccess;
2514 IOOptionBits type = _flags & kIOMemoryTypeMask;
2515
2516 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
2517 return kIOReturnSuccess;
2518
2519 if (_prepareLock)
2520 IOLockLock(_prepareLock);
2521
2522 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type)
2523 {
2524 error = wireVirtual(forDirection);
2525 }
2526
2527 if (kIOReturnSuccess == error)
2528 {
2529 if (1 == ++_wireCount)
2530 {
2531 if (kIOMemoryClearEncrypt & _flags)
2532 {
2533 performOperation(kIOMemoryClearEncrypted, 0, _length);
2534 }
2535 }
2536 }
2537
2538 if (_prepareLock)
2539 IOLockUnlock(_prepareLock);
2540
2541 return error;
2542 }
2543
2544 /*
2545 * complete
2546 *
2547 * Complete processing of the memory after an I/O transfer finishes.
2548 * This method should not be called unless a prepare was previously
2549 * issued; the prepare() and complete() must occur in pairs, before
2550 * before and after an I/O transfer involving pageable memory.
2551 */
2552
2553 IOReturn IOGeneralMemoryDescriptor::complete(IODirection /* forDirection */)
2554 {
2555 IOOptionBits type = _flags & kIOMemoryTypeMask;
2556
2557 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
2558 return kIOReturnSuccess;
2559
2560 if (_prepareLock)
2561 IOLockLock(_prepareLock);
2562
2563 assert(_wireCount);
2564
2565 if (_wireCount)
2566 {
2567 if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount))
2568 {
2569 performOperation(kIOMemorySetEncrypted, 0, _length);
2570 }
2571
2572 _wireCount--;
2573 if (!_wireCount)
2574 {
2575 IOOptionBits type = _flags & kIOMemoryTypeMask;
2576 ioGMDData * dataP = getDataP(_memoryEntries);
2577 ioPLBlock *ioplList = getIOPLList(dataP);
2578 UInt count = getNumIOPL(_memoryEntries, dataP);
2579
2580 #if IOMD_DEBUG_DMAACTIVE
2581 if (__iomd_reservedA) panic("complete() while dma active");
2582 #endif /* IOMD_DEBUG_DMAACTIVE */
2583
2584 if (dataP->fMappedBase) {
2585 dataP->fMapper->iovmFree(atop_64(dataP->fMappedBase), _pages);
2586 dataP->fMappedBase = 0;
2587 }
2588 // Only complete iopls that we created which are for TypeVirtual
2589 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
2590 for (UInt ind = 0; ind < count; ind++)
2591 if (ioplList[ind].fIOPL) {
2592 upl_commit(ioplList[ind].fIOPL, 0, 0);
2593 upl_deallocate(ioplList[ind].fIOPL);
2594 }
2595 } else if (kIOMemoryTypeUPL == type) {
2596 upl_set_referenced(ioplList[0].fIOPL, false);
2597 }
2598
2599 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
2600
2601 dataP->fPreparationID = kIOPreparationIDUnprepared;
2602 }
2603 }
2604
2605 if (_prepareLock)
2606 IOLockUnlock(_prepareLock);
2607
2608 return kIOReturnSuccess;
2609 }
2610
2611 IOReturn IOGeneralMemoryDescriptor::doMap(
2612 vm_map_t __addressMap,
2613 IOVirtualAddress * __address,
2614 IOOptionBits options,
2615 IOByteCount __offset,
2616 IOByteCount __length )
2617
2618 {
2619 #ifndef __LP64__
2620 if (!(kIOMap64Bit & options)) panic("IOGeneralMemoryDescriptor::doMap !64bit");
2621 #endif /* !__LP64__ */
2622
2623 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
2624 mach_vm_size_t offset = mapping->fOffset + __offset;
2625 mach_vm_size_t length = mapping->fLength;
2626
2627 kern_return_t kr = kIOReturnVMError;
2628 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
2629
2630 IOOptionBits type = _flags & kIOMemoryTypeMask;
2631 Ranges vec = _ranges;
2632
2633 user_addr_t range0Addr = 0;
2634 IOByteCount range0Len = 0;
2635
2636 if ((offset >= _length) || ((offset + length) > _length))
2637 return( kIOReturnBadArgument );
2638
2639 if (vec.v)
2640 getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
2641
2642 // mapping source == dest? (could be much better)
2643 if( _task
2644 && (mapping->fAddressMap == get_task_map(_task)) && (options & kIOMapAnywhere)
2645 && (1 == _rangesCount) && (0 == offset)
2646 && range0Addr && (length <= range0Len) )
2647 {
2648 mapping->fAddress = range0Addr;
2649 mapping->fOptions |= kIOMapStatic;
2650
2651 return( kIOReturnSuccess );
2652 }
2653
2654 if( 0 == sharedMem) {
2655
2656 vm_size_t size = ptoa_32(_pages);
2657
2658 if( _task) {
2659
2660 memory_object_size_t actualSize = size;
2661 vm_prot_t prot = VM_PROT_READ;
2662 if (!(kIOMapReadOnly & options))
2663 prot |= VM_PROT_WRITE;
2664 else if (kIOMapDefaultCache != (options & kIOMapCacheMask))
2665 prot |= VM_PROT_WRITE;
2666
2667 if (_rangesCount == 1)
2668 {
2669 kr = mach_make_memory_entry_64(get_task_map(_task),
2670 &actualSize, range0Addr,
2671 prot, &sharedMem,
2672 NULL);
2673 }
2674 if( (_rangesCount != 1)
2675 || ((KERN_SUCCESS == kr) && (actualSize != round_page(size))))
2676 do
2677 {
2678 #if IOASSERT
2679 IOLog("mach_vm_remap path for ranges %d size (%08llx:%08llx)\n",
2680 _rangesCount, (UInt64)actualSize, (UInt64)size);
2681 #endif
2682 kr = kIOReturnVMError;
2683 if (sharedMem)
2684 {
2685 ipc_port_release_send(sharedMem);
2686 sharedMem = MACH_PORT_NULL;
2687 }
2688
2689 mach_vm_address_t address, segDestAddr;
2690 mach_vm_size_t mapLength;
2691 unsigned rangesIndex;
2692 IOOptionBits type = _flags & kIOMemoryTypeMask;
2693 user_addr_t srcAddr;
2694 IOPhysicalLength segLen = 0;
2695
2696 // Find starting address within the vector of ranges
2697 for (rangesIndex = 0; rangesIndex < _rangesCount; rangesIndex++) {
2698 getAddrLenForInd(srcAddr, segLen, type, _ranges, rangesIndex);
2699 if (offset < segLen)
2700 break;
2701 offset -= segLen; // (make offset relative)
2702 }
2703
2704 mach_vm_size_t pageOffset = (srcAddr & PAGE_MASK);
2705 address = trunc_page_64(mapping->fAddress);
2706
2707 if ((options & kIOMapAnywhere) || ((mapping->fAddress - address) == pageOffset))
2708 {
2709 vm_map_t map = mapping->fAddressMap;
2710 kr = IOMemoryDescriptorMapCopy(&map,
2711 options,
2712 offset, &address, round_page_64(length + pageOffset));
2713 if (kr == KERN_SUCCESS)
2714 {
2715 segDestAddr = address;
2716 segLen -= offset;
2717 srcAddr += offset;
2718 mapLength = length;
2719
2720 while (true)
2721 {
2722 vm_prot_t cur_prot, max_prot;
2723
2724 if (segLen > length) segLen = length;
2725 kr = mach_vm_remap(map, &segDestAddr, round_page_64(segLen), PAGE_MASK,
2726 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
2727 get_task_map(_task), trunc_page_64(srcAddr),
2728 FALSE /* copy */,
2729 &cur_prot,
2730 &max_prot,
2731 VM_INHERIT_NONE);
2732 if (KERN_SUCCESS == kr)
2733 {
2734 if ((!(VM_PROT_READ & cur_prot))
2735 || (!(kIOMapReadOnly & options) && !(VM_PROT_WRITE & cur_prot)))
2736 {
2737 kr = KERN_PROTECTION_FAILURE;
2738 }
2739 }
2740 if (KERN_SUCCESS != kr)
2741 break;
2742 segDestAddr += segLen;
2743 mapLength -= segLen;
2744 if (!mapLength)
2745 break;
2746 rangesIndex++;
2747 if (rangesIndex >= _rangesCount)
2748 {
2749 kr = kIOReturnBadArgument;
2750 break;
2751 }
2752 getAddrLenForInd(srcAddr, segLen, type, vec, rangesIndex);
2753 if (srcAddr & PAGE_MASK)
2754 {
2755 kr = kIOReturnBadArgument;
2756 break;
2757 }
2758 if (segLen > mapLength)
2759 segLen = mapLength;
2760 }
2761 if (KERN_SUCCESS != kr)
2762 {
2763 mach_vm_deallocate(mapping->fAddressMap, address, round_page_64(length + pageOffset));
2764 }
2765 }
2766
2767 if (KERN_SUCCESS == kr)
2768 mapping->fAddress = address + pageOffset;
2769 else
2770 mapping->fAddress = NULL;
2771 }
2772 }
2773 while (false);
2774 }
2775 else do
2776 { // _task == 0, must be physical
2777
2778 memory_object_t pager;
2779 unsigned int flags = 0;
2780 addr64_t pa;
2781 IOPhysicalLength segLen;
2782
2783 pa = getPhysicalSegment( offset, &segLen, kIOMemoryMapperNone );
2784
2785 if( !getKernelReserved())
2786 continue;
2787 reserved->dp.pagerContig = (1 == _rangesCount);
2788 reserved->dp.memory = this;
2789
2790 /*What cache mode do we need*/
2791 switch(options & kIOMapCacheMask ) {
2792
2793 case kIOMapDefaultCache:
2794 default:
2795 flags = IODefaultCacheBits(pa);
2796 if (DEVICE_PAGER_CACHE_INHIB & flags)
2797 {
2798 if (DEVICE_PAGER_GUARDED & flags)
2799 mapping->fOptions |= kIOMapInhibitCache;
2800 else
2801 mapping->fOptions |= kIOMapWriteCombineCache;
2802 }
2803 else if (DEVICE_PAGER_WRITE_THROUGH & flags)
2804 mapping->fOptions |= kIOMapWriteThruCache;
2805 else
2806 mapping->fOptions |= kIOMapCopybackCache;
2807 break;
2808
2809 case kIOMapInhibitCache:
2810 flags = DEVICE_PAGER_CACHE_INHIB |
2811 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
2812 break;
2813
2814 case kIOMapWriteThruCache:
2815 flags = DEVICE_PAGER_WRITE_THROUGH |
2816 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
2817 break;
2818
2819 case kIOMapCopybackCache:
2820 flags = DEVICE_PAGER_COHERENT;
2821 break;
2822
2823 case kIOMapWriteCombineCache:
2824 flags = DEVICE_PAGER_CACHE_INHIB |
2825 DEVICE_PAGER_COHERENT;
2826 break;
2827 }
2828
2829 flags |= reserved->dp.pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0;
2830
2831 pager = device_pager_setup( (memory_object_t) 0, (uintptr_t) reserved,
2832 size, flags);
2833 assert( pager );
2834
2835 if( pager) {
2836 kr = mach_memory_object_memory_entry_64( (host_t) 1, false /*internal*/,
2837 size, VM_PROT_READ | VM_PROT_WRITE, pager, &sharedMem );
2838
2839 assert( KERN_SUCCESS == kr );
2840 if( KERN_SUCCESS != kr)
2841 {
2842 device_pager_deallocate( pager );
2843 pager = MACH_PORT_NULL;
2844 sharedMem = MACH_PORT_NULL;
2845 }
2846 }
2847 if( pager && sharedMem)
2848 reserved->dp.devicePager = pager;
2849
2850 } while( false );
2851
2852 _memEntry = (void *) sharedMem;
2853 }
2854
2855 IOReturn result;
2856 if (0 == sharedMem)
2857 result = kr;
2858 else
2859 result = super::doMap( __addressMap, __address,
2860 options, __offset, __length );
2861
2862 return( result );
2863 }
2864
2865 IOReturn IOGeneralMemoryDescriptor::doUnmap(
2866 vm_map_t addressMap,
2867 IOVirtualAddress __address,
2868 IOByteCount __length )
2869 {
2870 return (super::doUnmap(addressMap, __address, __length));
2871 }
2872
2873 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2874
2875 #undef super
2876 #define super OSObject
2877
2878 OSDefineMetaClassAndStructors( IOMemoryMap, OSObject )
2879
2880 OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
2881 OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
2882 OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
2883 OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
2884 OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
2885 OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
2886 OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
2887 OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
2888
2889 /* ex-inline function implementation */
2890 IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
2891 { return( getPhysicalSegment( 0, 0 )); }
2892
2893 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2894
2895 bool IOMemoryMap::init(
2896 task_t intoTask,
2897 mach_vm_address_t toAddress,
2898 IOOptionBits _options,
2899 mach_vm_size_t _offset,
2900 mach_vm_size_t _length )
2901 {
2902 if (!intoTask)
2903 return( false);
2904
2905 if (!super::init())
2906 return(false);
2907
2908 fAddressMap = get_task_map(intoTask);
2909 if (!fAddressMap)
2910 return(false);
2911 vm_map_reference(fAddressMap);
2912
2913 fAddressTask = intoTask;
2914 fOptions = _options;
2915 fLength = _length;
2916 fOffset = _offset;
2917 fAddress = toAddress;
2918
2919 return (true);
2920 }
2921
2922 bool IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
2923 {
2924 if (!_memory)
2925 return(false);
2926
2927 if (!fSuperMap)
2928 {
2929 if( (_offset + fLength) > _memory->getLength())
2930 return( false);
2931 fOffset = _offset;
2932 }
2933
2934 _memory->retain();
2935 if (fMemory)
2936 {
2937 if (fMemory != _memory)
2938 fMemory->removeMapping(this);
2939 fMemory->release();
2940 }
2941 fMemory = _memory;
2942
2943 return( true );
2944 }
2945
2946 struct IOMemoryDescriptorMapAllocRef
2947 {
2948 ipc_port_t sharedMem;
2949 vm_map_t map;
2950 mach_vm_address_t mapped;
2951 mach_vm_size_t size;
2952 mach_vm_size_t sourceOffset;
2953 IOOptionBits options;
2954 };
2955
2956 static kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
2957 {
2958 IOMemoryDescriptorMapAllocRef * ref = (IOMemoryDescriptorMapAllocRef *)_ref;
2959 IOReturn err;
2960
2961 do {
2962 if( ref->sharedMem)
2963 {
2964 vm_prot_t prot = VM_PROT_READ
2965 | ((ref->options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE);
2966
2967 // VM system requires write access to change cache mode
2968 if (kIOMapDefaultCache != (ref->options & kIOMapCacheMask))
2969 prot |= VM_PROT_WRITE;
2970
2971 // set memory entry cache
2972 vm_prot_t memEntryCacheMode = prot | MAP_MEM_ONLY;
2973 switch (ref->options & kIOMapCacheMask)
2974 {
2975 case kIOMapInhibitCache:
2976 SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
2977 break;
2978
2979 case kIOMapWriteThruCache:
2980 SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
2981 break;
2982
2983 case kIOMapWriteCombineCache:
2984 SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
2985 break;
2986
2987 case kIOMapCopybackCache:
2988 SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
2989 break;
2990
2991 case kIOMapCopybackInnerCache:
2992 SET_MAP_MEM(MAP_MEM_INNERWBACK, memEntryCacheMode);
2993 break;
2994
2995 case kIOMapDefaultCache:
2996 default:
2997 SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
2998 break;
2999 }
3000
3001 vm_size_t unused = 0;
3002
3003 err = mach_make_memory_entry( NULL /*unused*/, &unused, 0 /*unused*/,
3004 memEntryCacheMode, NULL, ref->sharedMem );
3005 if (KERN_SUCCESS != err)
3006 IOLog("MAP_MEM_ONLY failed %d\n", err);
3007
3008 err = mach_vm_map( map,
3009 &ref->mapped,
3010 ref->size, 0 /* mask */,
3011 (( ref->options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
3012 | VM_MAKE_TAG(VM_MEMORY_IOKIT),
3013 ref->sharedMem, ref->sourceOffset,
3014 false, // copy
3015 prot, // cur
3016 prot, // max
3017 VM_INHERIT_NONE);
3018
3019 if( KERN_SUCCESS != err) {
3020 ref->mapped = 0;
3021 continue;
3022 }
3023 ref->map = map;
3024 }
3025 else
3026 {
3027 err = mach_vm_allocate(map, &ref->mapped, ref->size,
3028 ((ref->options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
3029 | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
3030 if( KERN_SUCCESS != err) {
3031 ref->mapped = 0;
3032 continue;
3033 }
3034 ref->map = map;
3035 // we have to make sure that these guys don't get copied if we fork.
3036 err = vm_inherit(map, ref->mapped, ref->size, VM_INHERIT_NONE);
3037 assert( KERN_SUCCESS == err );
3038 }
3039 }
3040 while( false );
3041
3042 return( err );
3043 }
3044
3045 kern_return_t
3046 IOMemoryDescriptorMapMemEntry(vm_map_t * map, ipc_port_t entry, IOOptionBits options, bool pageable,
3047 mach_vm_size_t offset,
3048 mach_vm_address_t * address, mach_vm_size_t length)
3049 {
3050 IOReturn err;
3051 IOMemoryDescriptorMapAllocRef ref;
3052
3053 ref.map = *map;
3054 ref.sharedMem = entry;
3055 ref.sourceOffset = trunc_page_64(offset);
3056 ref.options = options;
3057 ref.size = length;
3058
3059 if (options & kIOMapAnywhere)
3060 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
3061 ref.mapped = 0;
3062 else
3063 ref.mapped = *address;
3064
3065 if( ref.sharedMem && (ref.map == kernel_map) && pageable)
3066 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
3067 else
3068 err = IOMemoryDescriptorMapAlloc( ref.map, &ref );
3069
3070 *address = ref.mapped;
3071 *map = ref.map;
3072
3073 return (err);
3074 }
3075
3076 kern_return_t
3077 IOMemoryDescriptorMapCopy(vm_map_t * map,
3078 IOOptionBits options,
3079 mach_vm_size_t offset,
3080 mach_vm_address_t * address, mach_vm_size_t length)
3081 {
3082 IOReturn err;
3083 IOMemoryDescriptorMapAllocRef ref;
3084
3085 ref.map = *map;
3086 ref.sharedMem = NULL;
3087 ref.sourceOffset = trunc_page_64(offset);
3088 ref.options = options;
3089 ref.size = length;
3090
3091 if (options & kIOMapAnywhere)
3092 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
3093 ref.mapped = 0;
3094 else
3095 ref.mapped = *address;
3096
3097 if (ref.map == kernel_map)
3098 err = IOIteratePageableMaps(ref.size, &IOMemoryDescriptorMapAlloc, &ref);
3099 else
3100 err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
3101
3102 *address = ref.mapped;
3103 *map = ref.map;
3104
3105 return (err);
3106 }
3107
3108 IOReturn IOMemoryDescriptor::doMap(
3109 vm_map_t __addressMap,
3110 IOVirtualAddress * __address,
3111 IOOptionBits options,
3112 IOByteCount __offset,
3113 IOByteCount __length )
3114 {
3115 #ifndef __LP64__
3116 if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::doMap !64bit");
3117 #endif /* !__LP64__ */
3118
3119 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
3120 mach_vm_size_t offset = mapping->fOffset + __offset;
3121 mach_vm_size_t length = mapping->fLength;
3122
3123 IOReturn err = kIOReturnSuccess;
3124 memory_object_t pager;
3125 mach_vm_size_t pageOffset;
3126 IOPhysicalAddress sourceAddr;
3127 unsigned int lock_count;
3128
3129 do
3130 {
3131 sourceAddr = getPhysicalSegment( offset, NULL, _kIOMemorySourceSegment );
3132 pageOffset = sourceAddr - trunc_page( sourceAddr );
3133
3134 if( reserved)
3135 pager = (memory_object_t) reserved->dp.devicePager;
3136 else
3137 pager = MACH_PORT_NULL;
3138
3139 if ((kIOMapReference|kIOMapUnique) == ((kIOMapReference|kIOMapUnique) & options))
3140 {
3141 upl_t redirUPL2;
3142 vm_size_t size;
3143 int flags;
3144
3145 if (!_memEntry)
3146 {
3147 err = kIOReturnNotReadable;
3148 continue;
3149 }
3150
3151 size = round_page(mapping->fLength + pageOffset);
3152 flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
3153 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
3154
3155 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) _memEntry, 0, &size, &redirUPL2,
3156 NULL, NULL,
3157 &flags))
3158 redirUPL2 = NULL;
3159
3160 for (lock_count = 0;
3161 IORecursiveLockHaveLock(gIOMemoryLock);
3162 lock_count++) {
3163 UNLOCK;
3164 }
3165 err = upl_transpose(redirUPL2, mapping->fRedirUPL);
3166 for (;
3167 lock_count;
3168 lock_count--) {
3169 LOCK;
3170 }
3171
3172 if (kIOReturnSuccess != err)
3173 {
3174 IOLog("upl_transpose(%x)\n", err);
3175 err = kIOReturnSuccess;
3176 }
3177
3178 if (redirUPL2)
3179 {
3180 upl_commit(redirUPL2, NULL, 0);
3181 upl_deallocate(redirUPL2);
3182 redirUPL2 = 0;
3183 }
3184 {
3185 // swap the memEntries since they now refer to different vm_objects
3186 void * me = _memEntry;
3187 _memEntry = mapping->fMemory->_memEntry;
3188 mapping->fMemory->_memEntry = me;
3189 }
3190 if (pager)
3191 err = handleFault( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
3192 }
3193 else
3194 {
3195 mach_vm_address_t address;
3196
3197 if (!(options & kIOMapAnywhere))
3198 {
3199 address = trunc_page_64(mapping->fAddress);
3200 if( (mapping->fAddress - address) != pageOffset)
3201 {
3202 err = kIOReturnVMError;
3203 continue;
3204 }
3205 }
3206
3207 vm_map_t map = mapping->fAddressMap;
3208 err = IOMemoryDescriptorMapMemEntry(&map, (ipc_port_t) _memEntry,
3209 options, (kIOMemoryBufferPageable & _flags),
3210 offset, &address, round_page_64(length + pageOffset));
3211 if( err != KERN_SUCCESS)
3212 continue;
3213
3214 if (!_memEntry || pager)
3215 {
3216 err = handleFault( pager, mapping->fAddressMap, address, offset, length, options );
3217 if (err != KERN_SUCCESS)
3218 doUnmap( mapping->fAddressMap, (IOVirtualAddress) mapping, 0 );
3219 }
3220
3221 #if DEBUG
3222 if (kIOLogMapping & gIOKitDebug)
3223 IOLog("mapping(%x) desc %p @ %qx, map %p, address %qx, offset %qx, length %qx\n",
3224 err, this, (uint64_t)sourceAddr, mapping, address, offset, length);
3225 #endif
3226
3227 if (err == KERN_SUCCESS)
3228 mapping->fAddress = address + pageOffset;
3229 else
3230 mapping->fAddress = NULL;
3231 }
3232 }
3233 while( false );
3234
3235 return (err);
3236 }
3237
3238 IOReturn IOMemoryDescriptor::handleFault(
3239 void * _pager,
3240 vm_map_t addressMap,
3241 mach_vm_address_t address,
3242 mach_vm_size_t sourceOffset,
3243 mach_vm_size_t length,
3244 IOOptionBits options )
3245 {
3246 IOReturn err = kIOReturnSuccess;
3247 memory_object_t pager = (memory_object_t) _pager;
3248 mach_vm_size_t size;
3249 mach_vm_size_t bytes;
3250 mach_vm_size_t page;
3251 mach_vm_size_t pageOffset;
3252 mach_vm_size_t pagerOffset;
3253 IOPhysicalLength segLen;
3254 addr64_t physAddr;
3255
3256 if( !addressMap)
3257 {
3258 if( kIOMemoryRedirected & _flags)
3259 {
3260 #if DEBUG
3261 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
3262 #endif
3263 do {
3264 SLEEP;
3265 } while( kIOMemoryRedirected & _flags );
3266 }
3267
3268 return( kIOReturnSuccess );
3269 }
3270
3271 physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
3272 assert( physAddr );
3273 pageOffset = physAddr - trunc_page_64( physAddr );
3274 pagerOffset = sourceOffset;
3275
3276 size = length + pageOffset;
3277 physAddr -= pageOffset;
3278
3279 segLen += pageOffset;
3280 bytes = size;
3281 do
3282 {
3283 // in the middle of the loop only map whole pages
3284 if( segLen >= bytes)
3285 segLen = bytes;
3286 else if( segLen != trunc_page( segLen))
3287 err = kIOReturnVMError;
3288 if( physAddr != trunc_page_64( physAddr))
3289 err = kIOReturnBadArgument;
3290 if (kIOReturnSuccess != err)
3291 break;
3292
3293 #if DEBUG
3294 if( kIOLogMapping & gIOKitDebug)
3295 IOLog("IOMemoryMap::map(%p) 0x%qx->0x%qx:0x%qx\n",
3296 addressMap, address + pageOffset, physAddr + pageOffset,
3297 segLen - pageOffset);
3298 #endif
3299
3300
3301 if( pager) {
3302 if( reserved && reserved->dp.pagerContig) {
3303 IOPhysicalLength allLen;
3304 addr64_t allPhys;
3305
3306 allPhys = getPhysicalSegment( 0, &allLen, kIOMemoryMapperNone );
3307 assert( allPhys );
3308 err = device_pager_populate_object( pager, 0, atop_64(allPhys), round_page(allLen) );
3309 }
3310 else
3311 {
3312
3313 for( page = 0;
3314 (page < segLen) && (KERN_SUCCESS == err);
3315 page += page_size)
3316 {
3317 err = device_pager_populate_object(pager, pagerOffset,
3318 (ppnum_t)(atop_64(physAddr + page)), page_size);
3319 pagerOffset += page_size;
3320 }
3321 }
3322 assert( KERN_SUCCESS == err );
3323 if( err)
3324 break;
3325 }
3326
3327 // This call to vm_fault causes an early pmap level resolution
3328 // of the mappings created above for kernel mappings, since
3329 // faulting in later can't take place from interrupt level.
3330 /* *** ALERT *** */
3331 /* *** Temporary Workaround *** */
3332
3333 if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags))
3334 {
3335 vm_fault(addressMap,
3336 (vm_map_offset_t)address,
3337 VM_PROT_READ|VM_PROT_WRITE,
3338 FALSE, THREAD_UNINT, NULL,
3339 (vm_map_offset_t)0);
3340 }
3341
3342 /* *** Temporary Workaround *** */
3343 /* *** ALERT *** */
3344
3345 sourceOffset += segLen - pageOffset;
3346 address += segLen;
3347 bytes -= segLen;
3348 pageOffset = 0;
3349
3350 }
3351 while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
3352
3353 if (bytes)
3354 err = kIOReturnBadArgument;
3355
3356 return (err);
3357 }
3358
3359 IOReturn IOMemoryDescriptor::doUnmap(
3360 vm_map_t addressMap,
3361 IOVirtualAddress __address,
3362 IOByteCount __length )
3363 {
3364 IOReturn err;
3365 mach_vm_address_t address;
3366 mach_vm_size_t length;
3367
3368 if (__length)
3369 {
3370 address = __address;
3371 length = __length;
3372 }
3373 else
3374 {
3375 addressMap = ((IOMemoryMap *) __address)->fAddressMap;
3376 address = ((IOMemoryMap *) __address)->fAddress;
3377 length = ((IOMemoryMap *) __address)->fLength;
3378 }
3379
3380 if ((addressMap == kernel_map)
3381 && ((kIOMemoryBufferPageable & _flags) || !_memEntry))
3382 addressMap = IOPageableMapForAddress( address );
3383
3384 #if DEBUG
3385 if( kIOLogMapping & gIOKitDebug)
3386 IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
3387 addressMap, address, length );
3388 #endif
3389
3390 err = mach_vm_deallocate( addressMap, address, length );
3391
3392 return (err);
3393 }
3394
3395 IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
3396 {
3397 IOReturn err = kIOReturnSuccess;
3398 IOMemoryMap * mapping = 0;
3399 OSIterator * iter;
3400
3401 LOCK;
3402
3403 if( doRedirect)
3404 _flags |= kIOMemoryRedirected;
3405 else
3406 _flags &= ~kIOMemoryRedirected;
3407
3408 do {
3409 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
3410
3411 memory_object_t pager;
3412
3413 if( reserved)
3414 pager = (memory_object_t) reserved->dp.devicePager;
3415 else
3416 pager = MACH_PORT_NULL;
3417
3418 while( (mapping = (IOMemoryMap *) iter->getNextObject()))
3419 {
3420 mapping->redirect( safeTask, doRedirect );
3421 if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap))
3422 {
3423 err = handleFault( pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache );
3424 }
3425 }
3426
3427 iter->release();
3428 }
3429 } while( false );
3430
3431 if (!doRedirect)
3432 {
3433 WAKEUP;
3434 }
3435
3436 UNLOCK;
3437
3438 #ifndef __LP64__
3439 // temporary binary compatibility
3440 IOSubMemoryDescriptor * subMem;
3441 if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
3442 err = subMem->redirect( safeTask, doRedirect );
3443 else
3444 err = kIOReturnSuccess;
3445 #endif /* !__LP64__ */
3446
3447 return( err );
3448 }
3449
3450 IOReturn IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
3451 {
3452 IOReturn err = kIOReturnSuccess;
3453
3454 if( fSuperMap) {
3455 // err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
3456 } else {
3457
3458 LOCK;
3459
3460 do
3461 {
3462 if (!fAddress)
3463 break;
3464 if (!fAddressMap)
3465 break;
3466
3467 if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
3468 && (0 == (fOptions & kIOMapStatic)))
3469 {
3470 IOUnmapPages( fAddressMap, fAddress, fLength );
3471 err = kIOReturnSuccess;
3472 #if DEBUG
3473 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
3474 #endif
3475 }
3476 else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask))
3477 {
3478 IOOptionBits newMode;
3479 newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
3480 IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
3481 }
3482 }
3483 while (false);
3484 UNLOCK;
3485 }
3486
3487 if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3488 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3489 && safeTask
3490 && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected))))
3491 fMemory->redirect(safeTask, doRedirect);
3492
3493 return( err );
3494 }
3495
3496 IOReturn IOMemoryMap::unmap( void )
3497 {
3498 IOReturn err;
3499
3500 LOCK;
3501
3502 if( fAddress && fAddressMap && (0 == fSuperMap) && fMemory
3503 && (0 == (fOptions & kIOMapStatic))) {
3504
3505 vm_map_iokit_unmapped_region(fAddressMap, fLength);
3506
3507 err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
3508
3509 } else
3510 err = kIOReturnSuccess;
3511
3512 if (fAddressMap)
3513 {
3514 vm_map_deallocate(fAddressMap);
3515 fAddressMap = 0;
3516 }
3517
3518 fAddress = 0;
3519
3520 UNLOCK;
3521
3522 return( err );
3523 }
3524
3525 void IOMemoryMap::taskDied( void )
3526 {
3527 LOCK;
3528 if (fUserClientUnmap)
3529 unmap();
3530 if( fAddressMap) {
3531 vm_map_deallocate(fAddressMap);
3532 fAddressMap = 0;
3533 }
3534 fAddressTask = 0;
3535 fAddress = 0;
3536 UNLOCK;
3537 }
3538
3539 IOReturn IOMemoryMap::userClientUnmap( void )
3540 {
3541 fUserClientUnmap = true;
3542 return (kIOReturnSuccess);
3543 }
3544
3545 // Overload the release mechanism. All mappings must be a member
3546 // of a memory descriptors _mappings set. This means that we
3547 // always have 2 references on a mapping. When either of these mappings
3548 // are released we need to free ourselves.
3549 void IOMemoryMap::taggedRelease(const void *tag) const
3550 {
3551 LOCK;
3552 super::taggedRelease(tag, 2);
3553 UNLOCK;
3554 }
3555
3556 void IOMemoryMap::free()
3557 {
3558 unmap();
3559
3560 if (fMemory)
3561 {
3562 LOCK;
3563 fMemory->removeMapping(this);
3564 UNLOCK;
3565 fMemory->release();
3566 }
3567
3568 if (fOwner && (fOwner != fMemory))
3569 {
3570 LOCK;
3571 fOwner->removeMapping(this);
3572 UNLOCK;
3573 }
3574
3575 if (fSuperMap)
3576 fSuperMap->release();
3577
3578 if (fRedirUPL) {
3579 upl_commit(fRedirUPL, NULL, 0);
3580 upl_deallocate(fRedirUPL);
3581 }
3582
3583 super::free();
3584 }
3585
3586 IOByteCount IOMemoryMap::getLength()
3587 {
3588 return( fLength );
3589 }
3590
3591 IOVirtualAddress IOMemoryMap::getVirtualAddress()
3592 {
3593 #ifndef __LP64__
3594 if (fSuperMap)
3595 fSuperMap->getVirtualAddress();
3596 else if (fAddressMap
3597 && vm_map_is_64bit(fAddressMap)
3598 && (sizeof(IOVirtualAddress) < 8))
3599 {
3600 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
3601 }
3602 #endif /* !__LP64__ */
3603
3604 return (fAddress);
3605 }
3606
3607 #ifndef __LP64__
3608 mach_vm_address_t IOMemoryMap::getAddress()
3609 {
3610 return( fAddress);
3611 }
3612
3613 mach_vm_size_t IOMemoryMap::getSize()
3614 {
3615 return( fLength );
3616 }
3617 #endif /* !__LP64__ */
3618
3619
3620 task_t IOMemoryMap::getAddressTask()
3621 {
3622 if( fSuperMap)
3623 return( fSuperMap->getAddressTask());
3624 else
3625 return( fAddressTask);
3626 }
3627
3628 IOOptionBits IOMemoryMap::getMapOptions()
3629 {
3630 return( fOptions);
3631 }
3632
3633 IOMemoryDescriptor * IOMemoryMap::getMemoryDescriptor()
3634 {
3635 return( fMemory );
3636 }
3637
3638 IOMemoryMap * IOMemoryMap::copyCompatible(
3639 IOMemoryMap * newMapping )
3640 {
3641 task_t task = newMapping->getAddressTask();
3642 mach_vm_address_t toAddress = newMapping->fAddress;
3643 IOOptionBits _options = newMapping->fOptions;
3644 mach_vm_size_t _offset = newMapping->fOffset;
3645 mach_vm_size_t _length = newMapping->fLength;
3646
3647 if( (!task) || (!fAddressMap) || (fAddressMap != get_task_map(task)))
3648 return( 0 );
3649 if( (fOptions ^ _options) & kIOMapReadOnly)
3650 return( 0 );
3651 if( (kIOMapDefaultCache != (_options & kIOMapCacheMask))
3652 && ((fOptions ^ _options) & kIOMapCacheMask))
3653 return( 0 );
3654
3655 if( (0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress))
3656 return( 0 );
3657
3658 if( _offset < fOffset)
3659 return( 0 );
3660
3661 _offset -= fOffset;
3662
3663 if( (_offset + _length) > fLength)
3664 return( 0 );
3665
3666 retain();
3667 if( (fLength == _length) && (!_offset))
3668 {
3669 newMapping = this;
3670 }
3671 else
3672 {
3673 newMapping->fSuperMap = this;
3674 newMapping->fOffset = fOffset + _offset;
3675 newMapping->fAddress = fAddress + _offset;
3676 }
3677
3678 return( newMapping );
3679 }
3680
3681 IOReturn IOMemoryMap::wireRange(
3682 uint32_t options,
3683 mach_vm_size_t offset,
3684 mach_vm_size_t length)
3685 {
3686 IOReturn kr;
3687 mach_vm_address_t start = trunc_page_64(fAddress + offset);
3688 mach_vm_address_t end = round_page_64(fAddress + offset + length);
3689
3690 if (kIODirectionOutIn & options)
3691 {
3692 kr = vm_map_wire(fAddressMap, start, end, (kIODirectionOutIn & options), FALSE);
3693 }
3694 else
3695 {
3696 kr = vm_map_unwire(fAddressMap, start, end, FALSE);
3697 }
3698
3699 return (kr);
3700 }
3701
3702
3703 IOPhysicalAddress
3704 #ifdef __LP64__
3705 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
3706 #else /* !__LP64__ */
3707 IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
3708 #endif /* !__LP64__ */
3709 {
3710 IOPhysicalAddress address;
3711
3712 LOCK;
3713 #ifdef __LP64__
3714 address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
3715 #else /* !__LP64__ */
3716 address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
3717 #endif /* !__LP64__ */
3718 UNLOCK;
3719
3720 return( address );
3721 }
3722
3723 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3724
3725 #undef super
3726 #define super OSObject
3727
3728 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3729
3730 void IOMemoryDescriptor::initialize( void )
3731 {
3732 if( 0 == gIOMemoryLock)
3733 gIOMemoryLock = IORecursiveLockAlloc();
3734
3735 gIOLastPage = IOGetLastPageNumber();
3736 }
3737
3738 void IOMemoryDescriptor::free( void )
3739 {
3740 if( _mappings)
3741 _mappings->release();
3742
3743 super::free();
3744 }
3745
3746 IOMemoryMap * IOMemoryDescriptor::setMapping(
3747 task_t intoTask,
3748 IOVirtualAddress mapAddress,
3749 IOOptionBits options )
3750 {
3751 return (createMappingInTask( intoTask, mapAddress,
3752 options | kIOMapStatic,
3753 0, getLength() ));
3754 }
3755
3756 IOMemoryMap * IOMemoryDescriptor::map(
3757 IOOptionBits options )
3758 {
3759 return (createMappingInTask( kernel_task, 0,
3760 options | kIOMapAnywhere,
3761 0, getLength() ));
3762 }
3763
3764 #ifndef __LP64__
3765 IOMemoryMap * IOMemoryDescriptor::map(
3766 task_t intoTask,
3767 IOVirtualAddress atAddress,
3768 IOOptionBits options,
3769 IOByteCount offset,
3770 IOByteCount length )
3771 {
3772 if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask)))
3773 {
3774 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
3775 return (0);
3776 }
3777
3778 return (createMappingInTask(intoTask, atAddress,
3779 options, offset, length));
3780 }
3781 #endif /* !__LP64__ */
3782
3783 IOMemoryMap * IOMemoryDescriptor::createMappingInTask(
3784 task_t intoTask,
3785 mach_vm_address_t atAddress,
3786 IOOptionBits options,
3787 mach_vm_size_t offset,
3788 mach_vm_size_t length)
3789 {
3790 IOMemoryMap * result;
3791 IOMemoryMap * mapping;
3792
3793 if (0 == length)
3794 length = getLength();
3795
3796 mapping = new IOMemoryMap;
3797
3798 if( mapping
3799 && !mapping->init( intoTask, atAddress,
3800 options, offset, length )) {
3801 mapping->release();
3802 mapping = 0;
3803 }
3804
3805 if (mapping)
3806 result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
3807 else
3808 result = 0;
3809
3810 #if DEBUG
3811 if (!result)
3812 IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
3813 this, atAddress, (uint32_t) options, offset, length);
3814 #endif
3815
3816 return (result);
3817 }
3818
3819 #ifndef __LP64__ // there is only a 64 bit version for LP64
3820 IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
3821 IOOptionBits options,
3822 IOByteCount offset)
3823 {
3824 return (redirect(newBackingMemory, options, (mach_vm_size_t)offset));
3825 }
3826 #endif
3827
3828 IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
3829 IOOptionBits options,
3830 mach_vm_size_t offset)
3831 {
3832 IOReturn err = kIOReturnSuccess;
3833 IOMemoryDescriptor * physMem = 0;
3834
3835 LOCK;
3836
3837 if (fAddress && fAddressMap) do
3838 {
3839 if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3840 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3841 {
3842 physMem = fMemory;
3843 physMem->retain();
3844 }
3845
3846 if (!fRedirUPL)
3847 {
3848 vm_size_t size = round_page(fLength);
3849 int flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
3850 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
3851 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) fMemory->_memEntry, 0, &size, &fRedirUPL,
3852 NULL, NULL,
3853 &flags))
3854 fRedirUPL = 0;
3855
3856 if (physMem)
3857 {
3858 IOUnmapPages( fAddressMap, fAddress, fLength );
3859 if (false)
3860 physMem->redirect(0, true);
3861 }
3862 }
3863
3864 if (newBackingMemory)
3865 {
3866 if (newBackingMemory != fMemory)
3867 {
3868 fOffset = 0;
3869 if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
3870 options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
3871 offset, fLength))
3872 err = kIOReturnError;
3873 }
3874 if (fRedirUPL)
3875 {
3876 upl_commit(fRedirUPL, NULL, 0);
3877 upl_deallocate(fRedirUPL);
3878 fRedirUPL = 0;
3879 }
3880 if (false && physMem)
3881 physMem->redirect(0, false);
3882 }
3883 }
3884 while (false);
3885
3886 UNLOCK;
3887
3888 if (physMem)
3889 physMem->release();
3890
3891 return (err);
3892 }
3893
3894 IOMemoryMap * IOMemoryDescriptor::makeMapping(
3895 IOMemoryDescriptor * owner,
3896 task_t __intoTask,
3897 IOVirtualAddress __address,
3898 IOOptionBits options,
3899 IOByteCount __offset,
3900 IOByteCount __length )
3901 {
3902 #ifndef __LP64__
3903 if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::makeMapping !64bit");
3904 #endif /* !__LP64__ */
3905
3906 IOMemoryDescriptor * mapDesc = 0;
3907 IOMemoryMap * result = 0;
3908 OSIterator * iter;
3909
3910 IOMemoryMap * mapping = (IOMemoryMap *) __address;
3911 mach_vm_size_t offset = mapping->fOffset + __offset;
3912 mach_vm_size_t length = mapping->fLength;
3913
3914 mapping->fOffset = offset;
3915
3916 LOCK;
3917
3918 do
3919 {
3920 if (kIOMapStatic & options)
3921 {
3922 result = mapping;
3923 addMapping(mapping);
3924 mapping->setMemoryDescriptor(this, 0);
3925 continue;
3926 }
3927
3928 if (kIOMapUnique & options)
3929 {
3930 addr64_t phys;
3931 IOByteCount physLen;
3932
3933 // if (owner != this) continue;
3934
3935 if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3936 || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3937 {
3938 phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
3939 if (!phys || (physLen < length))
3940 continue;
3941
3942 mapDesc = IOMemoryDescriptor::withAddressRange(
3943 phys, length, getDirection() | kIOMemoryMapperNone, NULL);
3944 if (!mapDesc)
3945 continue;
3946 offset = 0;
3947 mapping->fOffset = offset;
3948 }
3949 }
3950 else
3951 {
3952 // look for a compatible existing mapping
3953 if( (iter = OSCollectionIterator::withCollection(_mappings)))
3954 {
3955 IOMemoryMap * lookMapping;
3956 while ((lookMapping = (IOMemoryMap *) iter->getNextObject()))
3957 {
3958 if ((result = lookMapping->copyCompatible(mapping)))
3959 {
3960 addMapping(result);
3961 result->setMemoryDescriptor(this, offset);
3962 break;
3963 }
3964 }
3965 iter->release();
3966 }
3967 if (result || (options & kIOMapReference))
3968 {
3969 if (result != mapping)
3970 {
3971 mapping->release();
3972 mapping = NULL;
3973 }
3974 continue;
3975 }
3976 }
3977
3978 if (!mapDesc)
3979 {
3980 mapDesc = this;
3981 mapDesc->retain();
3982 }
3983 IOReturn
3984 kr = mapDesc->doMap( 0, (IOVirtualAddress *) &mapping, options, 0, 0 );
3985 if (kIOReturnSuccess == kr)
3986 {
3987 if (0 == (mapping->fOptions & kIOMapStatic)) {
3988 vm_map_iokit_mapped_region(mapping->fAddressMap, length);
3989 }
3990
3991 result = mapping;
3992 mapDesc->addMapping(result);
3993 result->setMemoryDescriptor(mapDesc, offset);
3994 }
3995 else
3996 {
3997 mapping->release();
3998 mapping = NULL;
3999 }
4000 }
4001 while( false );
4002
4003 UNLOCK;
4004
4005 if (mapDesc)
4006 mapDesc->release();
4007
4008 return (result);
4009 }
4010
4011 void IOMemoryDescriptor::addMapping(
4012 IOMemoryMap * mapping )
4013 {
4014 if( mapping)
4015 {
4016 if( 0 == _mappings)
4017 _mappings = OSSet::withCapacity(1);
4018 if( _mappings )
4019 _mappings->setObject( mapping );
4020 }
4021 }
4022
4023 void IOMemoryDescriptor::removeMapping(
4024 IOMemoryMap * mapping )
4025 {
4026 if( _mappings)
4027 _mappings->removeObject( mapping);
4028 }
4029
4030 #ifndef __LP64__
4031 // obsolete initializers
4032 // - initWithOptions is the designated initializer
4033 bool
4034 IOMemoryDescriptor::initWithAddress(void * address,
4035 IOByteCount length,
4036 IODirection direction)
4037 {
4038 return( false );
4039 }
4040
4041 bool
4042 IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
4043 IOByteCount length,
4044 IODirection direction,
4045 task_t task)
4046 {
4047 return( false );
4048 }
4049
4050 bool
4051 IOMemoryDescriptor::initWithPhysicalAddress(
4052 IOPhysicalAddress address,
4053 IOByteCount length,
4054 IODirection direction )
4055 {
4056 return( false );
4057 }
4058
4059 bool
4060 IOMemoryDescriptor::initWithRanges(
4061 IOVirtualRange * ranges,
4062 UInt32 withCount,
4063 IODirection direction,
4064 task_t task,
4065 bool asReference)
4066 {
4067 return( false );
4068 }
4069
4070 bool
4071 IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
4072 UInt32 withCount,
4073 IODirection direction,
4074 bool asReference)
4075 {
4076 return( false );
4077 }
4078
4079 void * IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
4080 IOByteCount * lengthOfSegment)
4081 {
4082 return( 0 );
4083 }
4084 #endif /* !__LP64__ */
4085
4086 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4087
4088 bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
4089 {
4090 OSSymbol const *keys[2];
4091 OSObject *values[2];
4092 struct SerData {
4093 user_addr_t address;
4094 user_size_t length;
4095 } *vcopy;
4096 unsigned int index, nRanges;
4097 bool result;
4098
4099 IOOptionBits type = _flags & kIOMemoryTypeMask;
4100
4101 if (s == NULL) return false;
4102 if (s->previouslySerialized(this)) return true;
4103
4104 // Pretend we are an array.
4105 if (!s->addXMLStartTag(this, "array")) return false;
4106
4107 nRanges = _rangesCount;
4108 vcopy = (SerData *) IOMalloc(sizeof(SerData) * nRanges);
4109 if (vcopy == 0) return false;
4110
4111 keys[0] = OSSymbol::withCString("address");
4112 keys[1] = OSSymbol::withCString("length");
4113
4114 result = false;
4115 values[0] = values[1] = 0;
4116
4117 // From this point on we can go to bail.
4118
4119 // Copy the volatile data so we don't have to allocate memory
4120 // while the lock is held.
4121 LOCK;
4122 if (nRanges == _rangesCount) {
4123 Ranges vec = _ranges;
4124 for (index = 0; index < nRanges; index++) {
4125 user_addr_t addr; IOByteCount len;
4126 getAddrLenForInd(addr, len, type, vec, index);
4127 vcopy[index].address = addr;
4128 vcopy[index].length = len;
4129 }
4130 } else {
4131 // The descriptor changed out from under us. Give up.
4132 UNLOCK;
4133 result = false;
4134 goto bail;
4135 }
4136 UNLOCK;
4137
4138 for (index = 0; index < nRanges; index++)
4139 {
4140 user_addr_t addr = vcopy[index].address;
4141 IOByteCount len = (IOByteCount) vcopy[index].length;
4142 values[0] =
4143 OSNumber::withNumber(addr, sizeof(addr) * 8);
4144 if (values[0] == 0) {
4145 result = false;
4146 goto bail;
4147 }
4148 values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
4149 if (values[1] == 0) {
4150 result = false;
4151 goto bail;
4152 }
4153 OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
4154 if (dict == 0) {
4155 result = false;
4156 goto bail;
4157 }
4158 values[0]->release();
4159 values[1]->release();
4160 values[0] = values[1] = 0;
4161
4162 result = dict->serialize(s);
4163 dict->release();
4164 if (!result) {
4165 goto bail;
4166 }
4167 }
4168 result = s->addXMLEndTag("array");
4169
4170 bail:
4171 if (values[0])
4172 values[0]->release();
4173 if (values[1])
4174 values[1]->release();
4175 if (keys[0])
4176 keys[0]->release();
4177 if (keys[1])
4178 keys[1]->release();
4179 if (vcopy)
4180 IOFree(vcopy, sizeof(SerData) * nRanges);
4181 return result;
4182 }
4183
4184 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4185
4186 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
4187 #ifdef __LP64__
4188 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
4189 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
4190 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
4191 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
4192 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
4193 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
4194 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
4195 #else /* !__LP64__ */
4196 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
4197 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
4198 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3);
4199 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4);
4200 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5);
4201 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 6);
4202 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 7);
4203 #endif /* !__LP64__ */
4204 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
4205 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
4206 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
4207 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
4208 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
4209 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
4210 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
4211 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
4212
4213 /* ex-inline function implementation */
4214 IOPhysicalAddress
4215 IOMemoryDescriptor::getPhysicalAddress()
4216 { return( getPhysicalSegment( 0, 0 )); }