]> git.saurik.com Git - apple/xnu.git/blame - iokit/Kernel/IOMemoryDescriptor.cpp
xnu-2422.1.72.tar.gz
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
30 *
31 * HISTORY
32 *
33 */
b0d623f7
A
34
35
55e303ae 36#include <sys/cdefs.h>
1c79356b
A
37
38#include <IOKit/assert.h>
39#include <IOKit/system.h>
40#include <IOKit/IOLib.h>
41#include <IOKit/IOMemoryDescriptor.h>
55e303ae 42#include <IOKit/IOMapper.h>
99c3a104 43#include <IOKit/IODMACommand.h>
55e303ae 44#include <IOKit/IOKitKeysPrivate.h>
1c79356b 45
b0d623f7
A
46#ifndef __LP64__
47#include <IOKit/IOSubMemoryDescriptor.h>
48#endif /* !__LP64__ */
49
1c79356b 50#include <IOKit/IOKitDebug.h>
2d21ac55 51#include <libkern/OSDebug.h>
1c79356b 52
91447636
A
53#include "IOKitKernelInternal.h"
54
1c79356b 55#include <libkern/c++/OSContainers.h>
9bccf70c
A
56#include <libkern/c++/OSDictionary.h>
57#include <libkern/c++/OSArray.h>
58#include <libkern/c++/OSSymbol.h>
59#include <libkern/c++/OSNumber.h>
91447636
A
60
61#include <sys/uio.h>
1c79356b
A
62
63__BEGIN_DECLS
64#include <vm/pmap.h>
91447636 65#include <vm/vm_pageout.h>
55e303ae 66#include <mach/memory_object_types.h>
0b4e3aa0 67#include <device/device_port.h>
55e303ae 68
91447636 69#include <mach/vm_prot.h>
2d21ac55 70#include <mach/mach_vm.h>
91447636 71#include <vm/vm_fault.h>
2d21ac55 72#include <vm/vm_protos.h>
91447636 73
55e303ae 74extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
6d2010ae
A
75extern void ipc_port_release_send(ipc_port_t port);
76
55e303ae
A
77kern_return_t
78memory_object_iopl_request(
79 ipc_port_t port,
80 memory_object_offset_t offset,
81 vm_size_t *upl_size,
82 upl_t *upl_ptr,
83 upl_page_info_array_t user_page_list,
84 unsigned int *page_list_count,
85 int *flags);
0b4e3aa0 86
55e303ae 87unsigned int IOTranslateCacheBits(struct phys_entry *pp);
1c79356b 88
55e303ae 89__END_DECLS
1c79356b 90
99c3a104
A
91#define kIOMapperWaitSystem ((IOMapper *) 1)
92
0c530ab8
A
93static IOMapper * gIOSystemMapper = NULL;
94
0c530ab8
A
95ppnum_t gIOLastPage;
96
55e303ae 97/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
de355530 98
55e303ae 99OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
de355530 100
55e303ae 101#define super IOMemoryDescriptor
de355530 102
55e303ae 103OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
de355530 104
1c79356b
A
105/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
106
9bccf70c
A
107static IORecursiveLock * gIOMemoryLock;
108
109#define LOCK IORecursiveLockLock( gIOMemoryLock)
110#define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
111#define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
112#define WAKEUP \
113 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
114
0c530ab8
A
115#if 0
116#define DEBG(fmt, args...) { kprintf(fmt, ## args); }
117#else
118#define DEBG(fmt, args...) {}
119#endif
120
b0d623f7 121#define IOMD_DEBUG_DMAACTIVE 1
91447636
A
122
123/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
124
125// Some data structures and accessor macros used by the initWithOptions
126// Function
127
128enum ioPLBlockFlags {
129 kIOPLOnDevice = 0x00000001,
130 kIOPLExternUPL = 0x00000002,
131};
132
133struct typePersMDData
134{
135 const IOGeneralMemoryDescriptor *fMD;
136 ipc_port_t fMemEntry;
137};
138
139struct ioPLBlock {
140 upl_t fIOPL;
b0d623f7
A
141 vm_address_t fPageInfo; // Pointer to page list or index into it
142 uint32_t fIOMDOffset; // The offset of this iopl in descriptor
99c3a104 143 ppnum_t fMappedPage; // Page number of first page in this iopl
b0d623f7
A
144 unsigned int fPageOffset; // Offset within first page of iopl
145 unsigned int fFlags; // Flags
91447636
A
146};
147
148struct ioGMDData {
99c3a104
A
149 IOMapper * fMapper;
150 uint8_t fDMAMapNumAddressBits;
151 uint64_t fDMAMapAlignment;
152 addr64_t fMappedBase;
b0d623f7 153 uint64_t fPreparationID;
91447636 154 unsigned int fPageCnt;
39236c6e 155 unsigned char fDiscontig;
b0d623f7
A
156#if __LP64__
157 // align arrays to 8 bytes so following macros work
39236c6e 158 unsigned char fPad[3];
b0d623f7 159#endif
6d2010ae
A
160 upl_page_info_t fPageList[1]; /* variable length */
161 ioPLBlock fBlocks[1]; /* variable length */
91447636
A
162};
163
164#define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
99c3a104 165#define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
91447636
A
166#define getNumIOPL(osd, d) \
167 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
168#define getPageList(d) (&(d->fPageList[0]))
169#define computeDataSize(p, u) \
6d2010ae 170 (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
91447636
A
171
172
173/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
174
b0d623f7 175#define next_page(a) ( trunc_page(a) + PAGE_SIZE )
0b4e3aa0
A
176
177
178extern "C" {
179
180kern_return_t device_data_action(
b0d623f7 181 uintptr_t device_handle,
0b4e3aa0
A
182 ipc_port_t device_pager,
183 vm_prot_t protection,
184 vm_object_offset_t offset,
185 vm_size_t size)
186{
9bccf70c 187 kern_return_t kr;
316670eb 188 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
9bccf70c 189 IOMemoryDescriptor * memDesc;
0b4e3aa0 190
9bccf70c 191 LOCK;
316670eb 192 memDesc = ref->dp.memory;
9bccf70c 193 if( memDesc)
91447636
A
194 {
195 memDesc->retain();
9bccf70c
A
196 kr = memDesc->handleFault( device_pager, 0, 0,
197 offset, size, kIOMapDefaultCache /*?*/);
91447636
A
198 memDesc->release();
199 }
9bccf70c
A
200 else
201 kr = KERN_ABORTED;
202 UNLOCK;
0b4e3aa0 203
9bccf70c 204 return( kr );
0b4e3aa0
A
205}
206
207kern_return_t device_close(
b0d623f7 208 uintptr_t device_handle)
0b4e3aa0 209{
316670eb 210 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
0b4e3aa0 211
316670eb 212 IODelete( ref, IOMemoryDescriptorReserved, 1 );
0b4e3aa0
A
213
214 return( kIOReturnSuccess );
215}
91447636 216}; // end extern "C"
0b4e3aa0 217
91447636
A
218// Note this inline function uses C++ reference arguments to return values
219// This means that pointers are not passed and NULLs don't have to be
220// checked for as a NULL reference is illegal.
221static inline void
2d21ac55 222getAddrLenForInd(user_addr_t &addr, IOPhysicalLength &len, // Output variables
91447636
A
223 UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
224{
0c530ab8
A
225 assert(kIOMemoryTypeUIO == type
226 || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
227 || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
91447636
A
228 if (kIOMemoryTypeUIO == type) {
229 user_size_t us;
230 uio_getiov((uio_t) r.uio, ind, &addr, &us); len = us;
231 }
b0d623f7 232#ifndef __LP64__
0c530ab8
A
233 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
234 IOAddressRange cur = r.v64[ind];
235 addr = cur.address;
236 len = cur.length;
237 }
b0d623f7 238#endif /* !__LP64__ */
91447636
A
239 else {
240 IOVirtualRange cur = r.v[ind];
241 addr = cur.address;
242 len = cur.length;
243 }
0b4e3aa0
A
244}
245
1c79356b
A
246/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
247
1c79356b
A
248IOMemoryDescriptor *
249IOMemoryDescriptor::withAddress(void * address,
55e303ae
A
250 IOByteCount length,
251 IODirection direction)
252{
253 return IOMemoryDescriptor::
b0d623f7 254 withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
55e303ae
A
255}
256
b0d623f7 257#ifndef __LP64__
55e303ae 258IOMemoryDescriptor *
b0d623f7 259IOMemoryDescriptor::withAddress(IOVirtualAddress address,
55e303ae
A
260 IOByteCount length,
261 IODirection direction,
262 task_t task)
1c79356b
A
263{
264 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
265 if (that)
266 {
55e303ae 267 if (that->initWithAddress(address, length, direction, task))
1c79356b
A
268 return that;
269
270 that->release();
271 }
272 return 0;
273}
b0d623f7 274#endif /* !__LP64__ */
1c79356b
A
275
276IOMemoryDescriptor *
55e303ae
A
277IOMemoryDescriptor::withPhysicalAddress(
278 IOPhysicalAddress address,
279 IOByteCount length,
280 IODirection direction )
281{
b0d623f7 282 return (IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL));
55e303ae
A
283}
284
b0d623f7 285#ifndef __LP64__
55e303ae
A
286IOMemoryDescriptor *
287IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
288 UInt32 withCount,
289 IODirection direction,
290 task_t task,
291 bool asReference)
1c79356b
A
292{
293 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
294 if (that)
295 {
55e303ae 296 if (that->initWithRanges(ranges, withCount, direction, task, asReference))
1c79356b
A
297 return that;
298
299 that->release();
300 }
301 return 0;
302}
b0d623f7 303#endif /* !__LP64__ */
1c79356b 304
0c530ab8
A
305IOMemoryDescriptor *
306IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
2d21ac55
A
307 mach_vm_size_t length,
308 IOOptionBits options,
309 task_t task)
0c530ab8
A
310{
311 IOAddressRange range = { address, length };
312 return (IOMemoryDescriptor::withAddressRanges(&range, 1, options, task));
313}
314
315IOMemoryDescriptor *
316IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
2d21ac55
A
317 UInt32 rangeCount,
318 IOOptionBits options,
319 task_t task)
0c530ab8
A
320{
321 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
322 if (that)
323 {
324 if (task)
325 options |= kIOMemoryTypeVirtual64;
326 else
327 options |= kIOMemoryTypePhysical64;
328
2d21ac55
A
329 if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ 0))
330 return that;
0c530ab8 331
2d21ac55 332 that->release();
0c530ab8
A
333 }
334
335 return 0;
336}
337
1c79356b
A
338
339/*
b0d623f7 340 * withOptions:
1c79356b
A
341 *
342 * Create a new IOMemoryDescriptor. The buffer is made up of several
343 * virtual address ranges, from a given task.
344 *
345 * Passing the ranges as a reference will avoid an extra allocation.
346 */
347IOMemoryDescriptor *
55e303ae
A
348IOMemoryDescriptor::withOptions(void * buffers,
349 UInt32 count,
350 UInt32 offset,
351 task_t task,
352 IOOptionBits opts,
353 IOMapper * mapper)
1c79356b 354{
55e303ae 355 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
d7e50217 356
55e303ae
A
357 if (self
358 && !self->initWithOptions(buffers, count, offset, task, opts, mapper))
359 {
360 self->release();
361 return 0;
de355530 362 }
55e303ae
A
363
364 return self;
365}
366
55e303ae
A
367bool IOMemoryDescriptor::initWithOptions(void * buffers,
368 UInt32 count,
369 UInt32 offset,
370 task_t task,
371 IOOptionBits options,
372 IOMapper * mapper)
373{
b0d623f7 374 return( false );
1c79356b
A
375}
376
b0d623f7 377#ifndef __LP64__
1c79356b
A
378IOMemoryDescriptor *
379IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
380 UInt32 withCount,
55e303ae
A
381 IODirection direction,
382 bool asReference)
1c79356b
A
383{
384 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
385 if (that)
386 {
55e303ae 387 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference))
1c79356b
A
388 return that;
389
390 that->release();
391 }
392 return 0;
393}
394
395IOMemoryDescriptor *
396IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
397 IOByteCount offset,
398 IOByteCount length,
55e303ae 399 IODirection direction)
1c79356b 400{
b0d623f7 401 return (IOSubMemoryDescriptor::withSubRange(of, offset, length, direction | kIOMemoryThreadSafe));
1c79356b 402}
b0d623f7 403#endif /* !__LP64__ */
1c79356b 404
0c530ab8
A
405IOMemoryDescriptor *
406IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
91447636
A
407{
408 IOGeneralMemoryDescriptor *origGenMD =
409 OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
410
411 if (origGenMD)
412 return IOGeneralMemoryDescriptor::
413 withPersistentMemoryDescriptor(origGenMD);
414 else
415 return 0;
416}
417
0c530ab8
A
418IOMemoryDescriptor *
419IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
91447636
A
420{
421 ipc_port_t sharedMem = (ipc_port_t) originalMD->createNamedEntry();
422
423 if (!sharedMem)
424 return 0;
425
426 if (sharedMem == originalMD->_memEntry) {
427 originalMD->retain(); // Add a new reference to ourselves
428 ipc_port_release_send(sharedMem); // Remove extra send right
429 return originalMD;
430 }
431
432 IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor;
433 typePersMDData initData = { originalMD, sharedMem };
434
435 if (self
436 && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) {
437 self->release();
438 self = 0;
439 }
440 return self;
441}
442
443void *IOGeneralMemoryDescriptor::createNamedEntry()
444{
445 kern_return_t error;
446 ipc_port_t sharedMem;
447
448 IOOptionBits type = _flags & kIOMemoryTypeMask;
449
450 user_addr_t range0Addr;
451 IOByteCount range0Len;
452 getAddrLenForInd(range0Addr, range0Len, type, _ranges, 0);
453 range0Addr = trunc_page_64(range0Addr);
454
455 vm_size_t size = ptoa_32(_pages);
456 vm_address_t kernelPage = (vm_address_t) range0Addr;
457
458 vm_map_t theMap = ((_task == kernel_task)
459 && (kIOMemoryBufferPageable & _flags))
460 ? IOPageableMapForAddress(kernelPage)
461 : get_task_map(_task);
462
463 memory_object_size_t actualSize = size;
2d21ac55 464 vm_prot_t prot = VM_PROT_READ;
2d21ac55 465 if (kIODirectionOut != (kIODirectionOutIn & _flags))
2d21ac55
A
466 prot |= VM_PROT_WRITE;
467
91447636
A
468 if (_memEntry)
469 prot |= MAP_MEM_NAMED_REUSE;
470
471 error = mach_make_memory_entry_64(theMap,
472 &actualSize, range0Addr, prot, &sharedMem, (ipc_port_t) _memEntry);
473
474 if (KERN_SUCCESS == error) {
475 if (actualSize == size) {
476 return sharedMem;
477 } else {
478#if IOASSERT
b0d623f7
A
479 IOLog("IOGMD::mach_make_memory_entry_64 (%08llx) size (%08llx:%08llx)\n",
480 (UInt64)range0Addr, (UInt64)actualSize, (UInt64)size);
91447636
A
481#endif
482 ipc_port_release_send( sharedMem );
483 }
484 }
485
486 return MACH_PORT_NULL;
487}
488
b0d623f7 489#ifndef __LP64__
1c79356b
A
490bool
491IOGeneralMemoryDescriptor::initWithAddress(void * address,
492 IOByteCount withLength,
493 IODirection withDirection)
494{
b0d623f7 495 _singleRange.v.address = (vm_offset_t) address;
1c79356b
A
496 _singleRange.v.length = withLength;
497
498 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
499}
500
501bool
b0d623f7 502IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
1c79356b
A
503 IOByteCount withLength,
504 IODirection withDirection,
505 task_t withTask)
506{
507 _singleRange.v.address = address;
508 _singleRange.v.length = withLength;
509
510 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
511}
512
513bool
514IOGeneralMemoryDescriptor::initWithPhysicalAddress(
515 IOPhysicalAddress address,
516 IOByteCount withLength,
517 IODirection withDirection )
518{
519 _singleRange.p.address = address;
520 _singleRange.p.length = withLength;
521
522 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
523}
524
55e303ae
A
525bool
526IOGeneralMemoryDescriptor::initWithPhysicalRanges(
527 IOPhysicalRange * ranges,
528 UInt32 count,
529 IODirection direction,
530 bool reference)
531{
532 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
533
534 if (reference)
535 mdOpts |= kIOMemoryAsReference;
536
537 return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0);
538}
539
540bool
541IOGeneralMemoryDescriptor::initWithRanges(
542 IOVirtualRange * ranges,
543 UInt32 count,
544 IODirection direction,
545 task_t task,
546 bool reference)
547{
548 IOOptionBits mdOpts = direction;
549
550 if (reference)
551 mdOpts |= kIOMemoryAsReference;
552
553 if (task) {
554 mdOpts |= kIOMemoryTypeVirtual;
91447636
A
555
556 // Auto-prepare if this is a kernel memory descriptor as very few
557 // clients bother to prepare() kernel memory.
2d21ac55 558 // But it was not enforced so what are you going to do?
55e303ae
A
559 if (task == kernel_task)
560 mdOpts |= kIOMemoryAutoPrepare;
561 }
562 else
563 mdOpts |= kIOMemoryTypePhysical;
55e303ae
A
564
565 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0);
566}
b0d623f7 567#endif /* !__LP64__ */
55e303ae 568
1c79356b 569/*
55e303ae 570 * initWithOptions:
1c79356b 571 *
55e303ae 572 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
91447636
A
573 * from a given task, several physical ranges, an UPL from the ubc
574 * system or a uio (may be 64bit) from the BSD subsystem.
1c79356b
A
575 *
576 * Passing the ranges as a reference will avoid an extra allocation.
577 *
55e303ae
A
578 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
579 * existing instance -- note this behavior is not commonly supported in other
580 * I/O Kit classes, although it is supported here.
1c79356b 581 */
55e303ae 582
1c79356b 583bool
55e303ae
A
584IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
585 UInt32 count,
586 UInt32 offset,
587 task_t task,
588 IOOptionBits options,
589 IOMapper * mapper)
590{
91447636
A
591 IOOptionBits type = options & kIOMemoryTypeMask;
592
6d2010ae
A
593#ifndef __LP64__
594 if (task
595 && (kIOMemoryTypeVirtual == type)
596 && vm_map_is_64bit(get_task_map(task))
597 && ((IOVirtualRange *) buffers)->address)
598 {
599 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
600 return false;
601 }
602#endif /* !__LP64__ */
603
91447636
A
604 // Grab the original MD's configuation data to initialse the
605 // arguments to this function.
606 if (kIOMemoryTypePersistentMD == type) {
607
608 typePersMDData *initData = (typePersMDData *) buffers;
609 const IOGeneralMemoryDescriptor *orig = initData->fMD;
610 ioGMDData *dataP = getDataP(orig->_memoryEntries);
611
612 // Only accept persistent memory descriptors with valid dataP data.
613 assert(orig->_rangesCount == 1);
614 if ( !(orig->_flags & kIOMemoryPersistent) || !dataP)
615 return false;
616
617 _memEntry = initData->fMemEntry; // Grab the new named entry
6d2010ae
A
618 options = orig->_flags & ~kIOMemoryAsReference;
619 type = options & kIOMemoryTypeMask;
620 buffers = orig->_ranges.v;
621 count = orig->_rangesCount;
55e303ae 622
91447636
A
623 // Now grab the original task and whatever mapper was previously used
624 task = orig->_task;
625 mapper = dataP->fMapper;
626
627 // We are ready to go through the original initialisation now
628 }
629
630 switch (type) {
631 case kIOMemoryTypeUIO:
55e303ae 632 case kIOMemoryTypeVirtual:
b0d623f7 633#ifndef __LP64__
0c530ab8 634 case kIOMemoryTypeVirtual64:
b0d623f7 635#endif /* !__LP64__ */
55e303ae
A
636 assert(task);
637 if (!task)
638 return false;
2d21ac55 639 break;
55e303ae
A
640
641 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
b0d623f7 642#ifndef __LP64__
0c530ab8 643 case kIOMemoryTypePhysical64:
b0d623f7 644#endif /* !__LP64__ */
55e303ae
A
645 case kIOMemoryTypeUPL:
646 assert(!task);
647 break;
648 default:
55e303ae
A
649 return false; /* bad argument */
650 }
651
652 assert(buffers);
653 assert(count);
1c79356b
A
654
655 /*
656 * We can check the _initialized instance variable before having ever set
657 * it to an initial value because I/O Kit guarantees that all our instance
658 * variables are zeroed on an object's allocation.
659 */
660
55e303ae 661 if (_initialized) {
1c79356b
A
662 /*
663 * An existing memory descriptor is being retargeted to point to
664 * somewhere else. Clean up our present state.
665 */
2d21ac55
A
666 IOOptionBits type = _flags & kIOMemoryTypeMask;
667 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
668 {
669 while (_wireCount)
670 complete();
671 }
b0d623f7 672 if (_ranges.v && !(kIOMemoryAsReference & _flags))
0c530ab8
A
673 {
674 if (kIOMemoryTypeUIO == type)
675 uio_free((uio_t) _ranges.v);
b0d623f7 676#ifndef __LP64__
0c530ab8
A
677 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
678 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
b0d623f7 679#endif /* !__LP64__ */
0c530ab8
A
680 else
681 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
682 }
2d21ac55 683
39236c6e
A
684 options |= (kIOMemoryRedirected & _flags);
685 if (!(kIOMemoryRedirected & options))
6d2010ae 686 {
39236c6e
A
687 if (_memEntry)
688 {
689 ipc_port_release_send((ipc_port_t) _memEntry);
690 _memEntry = 0;
691 }
692 if (_mappings)
693 _mappings->flushCollection();
6d2010ae 694 }
1c79356b 695 }
55e303ae
A
696 else {
697 if (!super::init())
698 return false;
699 _initialized = true;
700 }
d7e50217 701
55e303ae 702 // Grab the appropriate mapper
99c3a104 703 if (kIOMemoryHostOnly & options) options |= kIOMemoryMapperNone;
b0d623f7 704 if (kIOMemoryMapperNone & options)
55e303ae 705 mapper = 0; // No Mapper
0c530ab8 706 else if (mapper == kIOMapperSystem) {
55e303ae
A
707 IOMapper::checkForSystemMapper();
708 gIOSystemMapper = mapper = IOMapper::gSystem;
709 }
1c79356b 710
c910b4d9
A
711 // Temp binary compatibility for kIOMemoryThreadSafe
712 if (kIOMemoryReserved6156215 & options)
713 {
714 options &= ~kIOMemoryReserved6156215;
715 options |= kIOMemoryThreadSafe;
716 }
91447636
A
717 // Remove the dynamic internal use flags from the initial setting
718 options &= ~(kIOMemoryPreparedReadOnly);
55e303ae
A
719 _flags = options;
720 _task = task;
721
b0d623f7 722#ifndef __LP64__
55e303ae 723 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
b0d623f7 724#endif /* !__LP64__ */
0c530ab8
A
725
726 __iomd_reservedA = 0;
727 __iomd_reservedB = 0;
0c530ab8 728 _highestPage = 0;
1c79356b 729
2d21ac55
A
730 if (kIOMemoryThreadSafe & options)
731 {
732 if (!_prepareLock)
733 _prepareLock = IOLockAlloc();
734 }
735 else if (_prepareLock)
736 {
737 IOLockFree(_prepareLock);
738 _prepareLock = NULL;
739 }
740
91447636 741 if (kIOMemoryTypeUPL == type) {
1c79356b 742
55e303ae
A
743 ioGMDData *dataP;
744 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
d7e50217 745
99c3a104 746 if (!initMemoryEntries(dataSize, mapper)) return (false);
55e303ae 747 dataP = getDataP(_memoryEntries);
55e303ae
A
748 dataP->fPageCnt = 0;
749
0c530ab8 750 // _wireCount++; // UPLs start out life wired
55e303ae
A
751
752 _length = count;
753 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
754
755 ioPLBlock iopl;
55e303ae 756 iopl.fIOPL = (upl_t) buffers;
6d2010ae 757 upl_set_referenced(iopl.fIOPL, true);
b0d623f7
A
758 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
759
760 if (upl_get_size(iopl.fIOPL) < (count + offset))
761 panic("short external upl");
762
0c530ab8
A
763 _highestPage = upl_get_highest_page(iopl.fIOPL);
764
99c3a104
A
765 // Set the flag kIOPLOnDevice convieniently equal to 1
766 iopl.fFlags = pageList->device | kIOPLExternUPL;
55e303ae 767 if (!pageList->device) {
55e303ae
A
768 // Pre-compute the offset into the UPL's page list
769 pageList = &pageList[atop_32(offset)];
770 offset &= PAGE_MASK;
55e303ae 771 }
99c3a104
A
772 iopl.fIOMDOffset = 0;
773 iopl.fMappedPage = 0;
55e303ae
A
774 iopl.fPageInfo = (vm_address_t) pageList;
775 iopl.fPageOffset = offset;
55e303ae 776 _memoryEntries->appendBytes(&iopl, sizeof(iopl));
d7e50217 777 }
91447636 778 else {
0c530ab8
A
779 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
780 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
91447636
A
781
782 // Initialize the memory descriptor
783 if (options & kIOMemoryAsReference) {
b0d623f7 784#ifndef __LP64__
91447636 785 _rangesIsAllocated = false;
b0d623f7 786#endif /* !__LP64__ */
91447636
A
787
788 // Hack assignment to get the buffer arg into _ranges.
789 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
790 // work, C++ sigh.
791 // This also initialises the uio & physical ranges.
792 _ranges.v = (IOVirtualRange *) buffers;
793 }
794 else {
b0d623f7 795#ifndef __LP64__
6601e61a 796 _rangesIsAllocated = true;
b0d623f7
A
797#endif /* !__LP64__ */
798 switch (type)
0c530ab8
A
799 {
800 case kIOMemoryTypeUIO:
801 _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
802 break;
803
b0d623f7 804#ifndef __LP64__
0c530ab8
A
805 case kIOMemoryTypeVirtual64:
806 case kIOMemoryTypePhysical64:
b0d623f7 807 if (count == 1
6d2010ae
A
808 && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
809 ) {
b0d623f7
A
810 if (kIOMemoryTypeVirtual64 == type)
811 type = kIOMemoryTypeVirtual;
812 else
813 type = kIOMemoryTypePhysical;
814 _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
815 _rangesIsAllocated = false;
816 _ranges.v = &_singleRange.v;
817 _singleRange.v.address = ((IOAddressRange *) buffers)->address;
818 _singleRange.v.length = ((IOAddressRange *) buffers)->length;
819 break;
820 }
0c530ab8
A
821 _ranges.v64 = IONew(IOAddressRange, count);
822 if (!_ranges.v64)
823 return false;
824 bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
825 break;
b0d623f7 826#endif /* !__LP64__ */
0c530ab8 827 case kIOMemoryTypeVirtual:
2d21ac55 828 case kIOMemoryTypePhysical:
b0d623f7
A
829 if (count == 1) {
830 _flags |= kIOMemoryAsReference;
831#ifndef __LP64__
832 _rangesIsAllocated = false;
833#endif /* !__LP64__ */
834 _ranges.v = &_singleRange.v;
835 } else {
836 _ranges.v = IONew(IOVirtualRange, count);
837 if (!_ranges.v)
838 return false;
839 }
0c530ab8
A
840 bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
841 break;
842 }
91447636
A
843 }
844
845 // Find starting address within the vector of ranges
846 Ranges vec = _ranges;
847 UInt32 length = 0;
848 UInt32 pages = 0;
849 for (unsigned ind = 0; ind < count; ind++) {
850 user_addr_t addr;
b0d623f7 851 IOPhysicalLength len;
91447636
A
852
853 // addr & len are returned by this function
854 getAddrLenForInd(addr, len, type, vec, ind);
855 pages += (atop_64(addr + len + PAGE_MASK) - atop_64(addr));
856 len += length;
0c530ab8 857 assert(len >= length); // Check for 32 bit wrap around
91447636 858 length = len;
0c530ab8
A
859
860 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
861 {
862 ppnum_t highPage = atop_64(addr + len - 1);
863 if (highPage > _highestPage)
864 _highestPage = highPage;
865 }
91447636
A
866 }
867 _length = length;
868 _pages = pages;
869 _rangesCount = count;
55e303ae
A
870
871 // Auto-prepare memory at creation time.
872 // Implied completion when descriptor is free-ed
0c530ab8 873 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
91447636 874 _wireCount++; // Physical MDs are, by definition, wired
0c530ab8 875 else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
55e303ae 876 ioGMDData *dataP;
91447636 877 unsigned dataSize = computeDataSize(_pages, /* upls */ count * 2);
55e303ae 878
99c3a104 879 if (!initMemoryEntries(dataSize, mapper)) return false;
55e303ae 880 dataP = getDataP(_memoryEntries);
55e303ae
A
881 dataP->fPageCnt = _pages;
882
91447636
A
883 if ( (kIOMemoryPersistent & _flags) && !_memEntry)
884 _memEntry = createNamedEntry();
55e303ae
A
885
886 if ((_flags & kIOMemoryAutoPrepare)
887 && prepare() != kIOReturnSuccess)
888 return false;
889 }
890 }
891
892 return true;
de355530
A
893}
894
1c79356b
A
895/*
896 * free
897 *
898 * Free resources.
899 */
900void IOGeneralMemoryDescriptor::free()
901{
2d21ac55
A
902 IOOptionBits type = _flags & kIOMemoryTypeMask;
903
9bccf70c 904 if( reserved)
2d21ac55
A
905 {
906 LOCK;
316670eb 907 reserved->dp.memory = 0;
2d21ac55
A
908 UNLOCK;
909 }
bd504ef0 910 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
2d21ac55 911 {
bd504ef0
A
912 ioGMDData * dataP;
913 if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
914 {
915 dataP->fMapper->iovmFree(atop_64(dataP->fMappedBase), _pages);
916 dataP->fMappedBase = 0;
917 }
2d21ac55 918 }
bd504ef0
A
919 else
920 {
921 while (_wireCount) complete();
922 }
923
924 if (_memoryEntries) _memoryEntries->release();
55e303ae 925
b0d623f7 926 if (_ranges.v && !(kIOMemoryAsReference & _flags))
0c530ab8 927 {
0c530ab8
A
928 if (kIOMemoryTypeUIO == type)
929 uio_free((uio_t) _ranges.v);
b0d623f7 930#ifndef __LP64__
0c530ab8
A
931 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
932 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
b0d623f7 933#endif /* !__LP64__ */
0c530ab8
A
934 else
935 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
4a3eedf9
A
936
937 _ranges.v = NULL;
0c530ab8 938 }
9bccf70c 939
316670eb
A
940 if (reserved)
941 {
942 if (reserved->dp.devicePager)
943 {
944 // memEntry holds a ref on the device pager which owns reserved
945 // (IOMemoryDescriptorReserved) so no reserved access after this point
946 device_pager_deallocate( (memory_object_t) reserved->dp.devicePager );
947 }
948 else
949 IODelete(reserved, IOMemoryDescriptorReserved, 1);
950 reserved = NULL;
951 }
9bccf70c 952
55e303ae 953 if (_memEntry)
1c79356b 954 ipc_port_release_send( (ipc_port_t) _memEntry );
55e303ae 955
2d21ac55
A
956 if (_prepareLock)
957 IOLockFree(_prepareLock);
958
1c79356b
A
959 super::free();
960}
961
b0d623f7
A
962#ifndef __LP64__
963void IOGeneralMemoryDescriptor::unmapFromKernel()
964{
965 panic("IOGMD::unmapFromKernel deprecated");
966}
967
968void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
969{
970 panic("IOGMD::mapIntoKernel deprecated");
971}
972#endif /* !__LP64__ */
1c79356b
A
973
974/*
975 * getDirection:
976 *
977 * Get the direction of the transfer.
978 */
979IODirection IOMemoryDescriptor::getDirection() const
980{
b0d623f7
A
981#ifndef __LP64__
982 if (_direction)
983 return _direction;
984#endif /* !__LP64__ */
985 return (IODirection) (_flags & kIOMemoryDirectionMask);
1c79356b
A
986}
987
988/*
989 * getLength:
990 *
991 * Get the length of the transfer (over all ranges).
992 */
993IOByteCount IOMemoryDescriptor::getLength() const
994{
995 return _length;
996}
997
55e303ae 998void IOMemoryDescriptor::setTag( IOOptionBits tag )
1c79356b
A
999{
1000 _tag = tag;
1001}
1002
1003IOOptionBits IOMemoryDescriptor::getTag( void )
1004{
1005 return( _tag);
1006}
1007
b0d623f7 1008#ifndef __LP64__
55e303ae 1009// @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
0c530ab8
A
1010IOPhysicalAddress
1011IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
0b4e3aa0 1012{
0c530ab8 1013 addr64_t physAddr = 0;
1c79356b 1014
9bccf70c 1015 if( prepare() == kIOReturnSuccess) {
0c530ab8 1016 physAddr = getPhysicalSegment64( offset, length );
9bccf70c
A
1017 complete();
1018 }
0b4e3aa0 1019
0c530ab8 1020 return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used
0b4e3aa0 1021}
b0d623f7 1022#endif /* !__LP64__ */
0b4e3aa0 1023
55e303ae
A
1024IOByteCount IOMemoryDescriptor::readBytes
1025 (IOByteCount offset, void *bytes, IOByteCount length)
1c79356b 1026{
b0d623f7 1027 addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
55e303ae 1028 IOByteCount remaining;
1c79356b 1029
55e303ae
A
1030 // Assert that this entire I/O is withing the available range
1031 assert(offset < _length);
1032 assert(offset + length <= _length);
1033 if (offset >= _length) {
55e303ae
A
1034 return 0;
1035 }
1c79356b 1036
b0d623f7
A
1037 if (kIOMemoryThreadSafe & _flags)
1038 LOCK;
1039
55e303ae
A
1040 remaining = length = min(length, _length - offset);
1041 while (remaining) { // (process another target segment?)
1042 addr64_t srcAddr64;
1043 IOByteCount srcLen;
1c79356b 1044
b0d623f7 1045 srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
55e303ae
A
1046 if (!srcAddr64)
1047 break;
1c79356b 1048
55e303ae
A
1049 // Clip segment length to remaining
1050 if (srcLen > remaining)
1051 srcLen = remaining;
1c79356b 1052
55e303ae
A
1053 copypv(srcAddr64, dstAddr, srcLen,
1054 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
1c79356b 1055
55e303ae
A
1056 dstAddr += srcLen;
1057 offset += srcLen;
1058 remaining -= srcLen;
1059 }
1c79356b 1060
b0d623f7
A
1061 if (kIOMemoryThreadSafe & _flags)
1062 UNLOCK;
1063
55e303ae 1064 assert(!remaining);
1c79356b 1065
55e303ae
A
1066 return length - remaining;
1067}
0b4e3aa0 1068
55e303ae
A
1069IOByteCount IOMemoryDescriptor::writeBytes
1070 (IOByteCount offset, const void *bytes, IOByteCount length)
1071{
b0d623f7 1072 addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
55e303ae 1073 IOByteCount remaining;
0b4e3aa0 1074
55e303ae
A
1075 // Assert that this entire I/O is withing the available range
1076 assert(offset < _length);
1077 assert(offset + length <= _length);
0b4e3aa0 1078
55e303ae 1079 assert( !(kIOMemoryPreparedReadOnly & _flags) );
0b4e3aa0 1080
55e303ae 1081 if ( (kIOMemoryPreparedReadOnly & _flags) || offset >= _length) {
55e303ae
A
1082 return 0;
1083 }
0b4e3aa0 1084
b0d623f7
A
1085 if (kIOMemoryThreadSafe & _flags)
1086 LOCK;
1087
55e303ae
A
1088 remaining = length = min(length, _length - offset);
1089 while (remaining) { // (process another target segment?)
1090 addr64_t dstAddr64;
1091 IOByteCount dstLen;
0b4e3aa0 1092
b0d623f7 1093 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
55e303ae
A
1094 if (!dstAddr64)
1095 break;
0b4e3aa0 1096
55e303ae
A
1097 // Clip segment length to remaining
1098 if (dstLen > remaining)
1099 dstLen = remaining;
0b4e3aa0 1100
55e303ae
A
1101 copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
1102 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
0b4e3aa0 1103
55e303ae
A
1104 srcAddr += dstLen;
1105 offset += dstLen;
1106 remaining -= dstLen;
1c79356b 1107 }
1c79356b 1108
b0d623f7
A
1109 if (kIOMemoryThreadSafe & _flags)
1110 UNLOCK;
1111
55e303ae
A
1112 assert(!remaining);
1113
1114 return length - remaining;
1c79356b
A
1115}
1116
55e303ae
A
1117// osfmk/device/iokit_rpc.c
1118extern "C" unsigned int IODefaultCacheBits(addr64_t pa);
1c79356b 1119
b0d623f7
A
1120#ifndef __LP64__
1121void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
1122{
1123 panic("IOGMD::setPosition deprecated");
1124}
1125#endif /* !__LP64__ */
1126
1127static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
1128
1129uint64_t
1130IOGeneralMemoryDescriptor::getPreparationID( void )
1131{
1132 ioGMDData *dataP;
7e4a7d39
A
1133
1134 if (!_wireCount)
b0d623f7 1135 return (kIOPreparationIDUnprepared);
7e4a7d39 1136
99c3a104
A
1137 if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
1138 || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64))
316670eb
A
1139 {
1140 IOMemoryDescriptor::setPreparationID();
1141 return (IOMemoryDescriptor::getPreparationID());
1142 }
7e4a7d39
A
1143
1144 if (!_memoryEntries || !(dataP = getDataP(_memoryEntries)))
1145 return (kIOPreparationIDUnprepared);
1146
b0d623f7
A
1147 if (kIOPreparationIDUnprepared == dataP->fPreparationID)
1148 {
b0d623f7 1149 dataP->fPreparationID = OSIncrementAtomic64(&gIOMDPreparationID);
b0d623f7
A
1150 }
1151 return (dataP->fPreparationID);
1152}
1153
316670eb 1154IOMemoryDescriptorReserved * IOMemoryDescriptor::getKernelReserved( void )
b0d623f7 1155{
316670eb
A
1156 if (!reserved)
1157 {
1158 reserved = IONew(IOMemoryDescriptorReserved, 1);
1159 if (reserved)
1160 bzero(reserved, sizeof(IOMemoryDescriptorReserved));
1161 }
1162 return (reserved);
1163}
1164
1165void IOMemoryDescriptor::setPreparationID( void )
1166{
1167 if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID))
1168 {
1169#if defined(__ppc__ )
1170 reserved->preparationID = gIOMDPreparationID++;
1171#else
1172 reserved->preparationID = OSIncrementAtomic64(&gIOMDPreparationID);
1173#endif
1174 }
1175}
1176
1177uint64_t IOMemoryDescriptor::getPreparationID( void )
1178{
1179 if (reserved)
1180 return (reserved->preparationID);
1181 else
1182 return (kIOPreparationIDUnsupported);
b0d623f7 1183}
de355530 1184
0c530ab8 1185IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
55e303ae 1186{
99c3a104
A
1187 IOReturn err = kIOReturnSuccess;
1188 DMACommandOps params;
1189 IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
1190 ioGMDData *dataP;
1191
1192 params = (op & ~kIOMDDMACommandOperationMask & op);
1193 op &= kIOMDDMACommandOperationMask;
1194
1195 if (kIOMDDMAMap == op)
1196 {
1197 if (dataSize < sizeof(IOMDDMAMapArgs))
1198 return kIOReturnUnderrun;
1199
1200 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
1201
1202 if (!_memoryEntries
1203 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
1204
1205 if (_memoryEntries && data->fMapper)
1206 {
39236c6e 1207 bool remap;
99c3a104
A
1208 bool whole = ((data->fOffset == 0) && (data->fLength == _length));
1209 dataP = getDataP(_memoryEntries);
39236c6e
A
1210
1211 if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
1212 if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) dataP->fDMAMapAlignment = data->fMapSpec.alignment;
1213
1214 remap = (dataP->fDMAMapNumAddressBits < 64)
1215 && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
1216 remap |= (dataP->fDMAMapAlignment > page_size);
99c3a104
A
1217 remap |= (!whole);
1218 if (remap || !dataP->fMappedBase)
1219 {
1220// if (dataP->fMappedBase) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params);
1221 err = md->dmaMap(data->fMapper, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocCount);
1222 if ((kIOReturnSuccess == err) && whole && !dataP->fMappedBase)
1223 {
1224 dataP->fMappedBase = data->fAlloc;
1225 data->fAllocCount = 0; // IOMD owns the alloc now
1226 }
1227 }
1228 else
1229 {
1230 data->fAlloc = dataP->fMappedBase;
1231 data->fAllocCount = 0; // IOMD owns the alloc
1232 }
39236c6e 1233 data->fMapContig = !dataP->fDiscontig;
99c3a104
A
1234 }
1235
1236 return (err);
1237 }
1238
1239 if (kIOMDAddDMAMapSpec == op)
1240 {
1241 if (dataSize < sizeof(IODMAMapSpecification))
1242 return kIOReturnUnderrun;
1243
1244 IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
1245
1246 if (!_memoryEntries
1247 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
1248
1249 if (_memoryEntries)
1250 {
1251 dataP = getDataP(_memoryEntries);
1252 if (data->numAddressBits < dataP->fDMAMapNumAddressBits)
1253 dataP->fDMAMapNumAddressBits = data->numAddressBits;
1254 if (data->alignment > dataP->fDMAMapAlignment)
1255 dataP->fDMAMapAlignment = data->alignment;
1256 }
1257 return kIOReturnSuccess;
1258 }
1259
0c530ab8 1260 if (kIOMDGetCharacteristics == op) {
4452a7af 1261
0c530ab8
A
1262 if (dataSize < sizeof(IOMDDMACharacteristics))
1263 return kIOReturnUnderrun;
4452a7af 1264
0c530ab8
A
1265 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
1266 data->fLength = _length;
1267 data->fSGCount = _rangesCount;
1268 data->fPages = _pages;
b0d623f7 1269 data->fDirection = getDirection();
0c530ab8
A
1270 if (!_wireCount)
1271 data->fIsPrepared = false;
1272 else {
1273 data->fIsPrepared = true;
1274 data->fHighestPage = _highestPage;
99c3a104
A
1275 if (_memoryEntries)
1276 {
1277 dataP = getDataP(_memoryEntries);
1278 ioPLBlock *ioplList = getIOPLList(dataP);
1279 UInt count = getNumIOPL(_memoryEntries, dataP);
0c530ab8
A
1280 if (count == 1)
1281 data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
1282 }
0c530ab8 1283 }
4452a7af 1284
0c530ab8 1285 return kIOReturnSuccess;
b0d623f7
A
1286
1287#if IOMD_DEBUG_DMAACTIVE
99c3a104
A
1288 } else if (kIOMDDMAActive == op) {
1289 if (params) OSIncrementAtomic(&md->__iomd_reservedA);
1290 else {
1291 if (md->__iomd_reservedA)
1292 OSDecrementAtomic(&md->__iomd_reservedA);
1293 else
1294 panic("kIOMDSetDMAInactive");
1295 }
b0d623f7
A
1296#endif /* IOMD_DEBUG_DMAACTIVE */
1297
99c3a104 1298 } else if (kIOMDWalkSegments != op)
0c530ab8
A
1299 return kIOReturnBadArgument;
1300
1301 // Get the next segment
1302 struct InternalState {
1303 IOMDDMAWalkSegmentArgs fIO;
1304 UInt fOffset2Index;
1305 UInt fIndex;
1306 UInt fNextOffset;
1307 } *isP;
1308
1309 // Find the next segment
1310 if (dataSize < sizeof(*isP))
1311 return kIOReturnUnderrun;
1312
1313 isP = (InternalState *) vData;
1314 UInt offset = isP->fIO.fOffset;
1315 bool mapped = isP->fIO.fMapped;
1316
99c3a104
A
1317 if (IOMapper::gSystem && mapped
1318 && (!(kIOMemoryHostOnly & _flags))
1319 && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBase))
1320// && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBase))
1321 {
1322 if (!_memoryEntries
1323 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
1324
1325 dataP = getDataP(_memoryEntries);
1326 if (dataP->fMapper)
1327 {
1328 IODMAMapSpecification mapSpec;
1329 bzero(&mapSpec, sizeof(mapSpec));
1330 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
1331 mapSpec.alignment = dataP->fDMAMapAlignment;
1332 err = md->dmaMap(dataP->fMapper, &mapSpec, 0, _length, &dataP->fMappedBase, NULL);
1333 if (kIOReturnSuccess != err) return (err);
1334 }
1335 }
1336
0c530ab8
A
1337 if (offset >= _length)
1338 return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
1339
1340 // Validate the previous offset
1341 UInt ind, off2Ind = isP->fOffset2Index;
99c3a104 1342 if (!params
0c530ab8
A
1343 && offset
1344 && (offset == isP->fNextOffset || off2Ind <= offset))
1345 ind = isP->fIndex;
1346 else
1347 ind = off2Ind = 0; // Start from beginning
4452a7af 1348
0c530ab8
A
1349 UInt length;
1350 UInt64 address;
99c3a104
A
1351
1352
0c530ab8 1353 if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
4452a7af 1354
0c530ab8
A
1355 // Physical address based memory descriptor
1356 const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
4452a7af 1357
0c530ab8 1358 // Find the range after the one that contains the offset
b0d623f7 1359 mach_vm_size_t len;
0c530ab8
A
1360 for (len = 0; off2Ind <= offset; ind++) {
1361 len = physP[ind].length;
1362 off2Ind += len;
1363 }
4452a7af 1364
0c530ab8
A
1365 // Calculate length within range and starting address
1366 length = off2Ind - offset;
1367 address = physP[ind - 1].address + len - length;
89b3af67 1368
99c3a104
A
1369 if (true && mapped && _memoryEntries
1370 && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
1371 {
1372 address = dataP->fMappedBase + offset;
1373 }
1374 else
1375 {
1376 // see how far we can coalesce ranges
1377 while (ind < _rangesCount && address + length == physP[ind].address) {
1378 len = physP[ind].length;
1379 length += len;
1380 off2Ind += len;
1381 ind++;
1382 }
0c530ab8 1383 }
4452a7af 1384
0c530ab8
A
1385 // correct contiguous check overshoot
1386 ind--;
1387 off2Ind -= len;
1388 }
b0d623f7 1389#ifndef __LP64__
0c530ab8 1390 else if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
4452a7af 1391
0c530ab8
A
1392 // Physical address based memory descriptor
1393 const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
4452a7af 1394
0c530ab8
A
1395 // Find the range after the one that contains the offset
1396 mach_vm_size_t len;
1397 for (len = 0; off2Ind <= offset; ind++) {
1398 len = physP[ind].length;
1399 off2Ind += len;
1400 }
89b3af67 1401
0c530ab8
A
1402 // Calculate length within range and starting address
1403 length = off2Ind - offset;
1404 address = physP[ind - 1].address + len - length;
89b3af67 1405
99c3a104
A
1406 if (true && mapped && _memoryEntries
1407 && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
1408 {
1409 address = dataP->fMappedBase + offset;
1410 }
1411 else
1412 {
1413 // see how far we can coalesce ranges
1414 while (ind < _rangesCount && address + length == physP[ind].address) {
1415 len = physP[ind].length;
1416 length += len;
1417 off2Ind += len;
1418 ind++;
1419 }
0c530ab8 1420 }
0c530ab8
A
1421 // correct contiguous check overshoot
1422 ind--;
1423 off2Ind -= len;
99c3a104 1424 }
b0d623f7 1425#endif /* !__LP64__ */
0c530ab8
A
1426 else do {
1427 if (!_wireCount)
1428 panic("IOGMD: not wired for the IODMACommand");
4452a7af 1429
0c530ab8 1430 assert(_memoryEntries);
4452a7af 1431
99c3a104 1432 dataP = getDataP(_memoryEntries);
0c530ab8
A
1433 const ioPLBlock *ioplList = getIOPLList(dataP);
1434 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1435 upl_page_info_t *pageList = getPageList(dataP);
4452a7af 1436
0c530ab8 1437 assert(numIOPLs > 0);
4452a7af 1438
0c530ab8
A
1439 // Scan through iopl info blocks looking for block containing offset
1440 while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset)
1441 ind++;
4452a7af 1442
0c530ab8
A
1443 // Go back to actual range as search goes past it
1444 ioPLBlock ioplInfo = ioplList[ind - 1];
1445 off2Ind = ioplInfo.fIOMDOffset;
1446
1447 if (ind < numIOPLs)
1448 length = ioplList[ind].fIOMDOffset;
1449 else
1450 length = _length;
1451 length -= offset; // Remainder within iopl
1452
1453 // Subtract offset till this iopl in total list
1454 offset -= off2Ind;
1455
1456 // If a mapped address is requested and this is a pre-mapped IOPL
1457 // then just need to compute an offset relative to the mapped base.
99c3a104 1458 if (mapped && dataP->fMappedBase) {
0c530ab8 1459 offset += (ioplInfo.fPageOffset & PAGE_MASK);
99c3a104 1460 address = trunc_page_64(dataP->fMappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
0c530ab8
A
1461 continue; // Done leave do/while(false) now
1462 }
1463
1464 // The offset is rebased into the current iopl.
1465 // Now add the iopl 1st page offset.
1466 offset += ioplInfo.fPageOffset;
1467
1468 // For external UPLs the fPageInfo field points directly to
1469 // the upl's upl_page_info_t array.
1470 if (ioplInfo.fFlags & kIOPLExternUPL)
1471 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
1472 else
1473 pageList = &pageList[ioplInfo.fPageInfo];
1474
1475 // Check for direct device non-paged memory
1476 if ( ioplInfo.fFlags & kIOPLOnDevice ) {
1477 address = ptoa_64(pageList->phys_addr) + offset;
1478 continue; // Done leave do/while(false) now
1479 }
4452a7af 1480
0c530ab8
A
1481 // Now we need compute the index into the pageList
1482 UInt pageInd = atop_32(offset);
1483 offset &= PAGE_MASK;
1484
1485 // Compute the starting address of this segment
1486 IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
b0d623f7
A
1487 if (!pageAddr) {
1488 panic("!pageList phys_addr");
6d2010ae 1489 }
b0d623f7 1490
0c530ab8
A
1491 address = ptoa_64(pageAddr) + offset;
1492
1493 // length is currently set to the length of the remainider of the iopl.
1494 // We need to check that the remainder of the iopl is contiguous.
1495 // This is indicated by pageList[ind].phys_addr being sequential.
1496 IOByteCount contigLength = PAGE_SIZE - offset;
1497 while (contigLength < length
1498 && ++pageAddr == pageList[++pageInd].phys_addr)
1499 {
1500 contigLength += PAGE_SIZE;
1501 }
1502
1503 if (contigLength < length)
1504 length = contigLength;
1505
1506
1507 assert(address);
1508 assert(length);
1509
1510 } while (false);
1511
1512 // Update return values and state
1513 isP->fIO.fIOVMAddr = address;
1514 isP->fIO.fLength = length;
1515 isP->fIndex = ind;
1516 isP->fOffset2Index = off2Ind;
1517 isP->fNextOffset = isP->fIO.fOffset + length;
1518
1519 return kIOReturnSuccess;
1520}
1521
1522addr64_t
b0d623f7 1523IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
0c530ab8 1524{
b0d623f7
A
1525 IOReturn ret;
1526 addr64_t address = 0;
1527 IOByteCount length = 0;
1528 IOMapper * mapper = gIOSystemMapper;
1529 IOOptionBits type = _flags & kIOMemoryTypeMask;
1530
1531 if (lengthOfSegment)
1532 *lengthOfSegment = 0;
1533
1534 if (offset >= _length)
1535 return 0;
4452a7af 1536
b0d623f7
A
1537 // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
1538 // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
1539 // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
1540 // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
2d21ac55 1541
b0d623f7
A
1542 if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type))
1543 {
1544 unsigned rangesIndex = 0;
1545 Ranges vec = _ranges;
1546 user_addr_t addr;
1547
1548 // Find starting address within the vector of ranges
1549 for (;;) {
1550 getAddrLenForInd(addr, length, type, vec, rangesIndex);
1551 if (offset < length)
1552 break;
1553 offset -= length; // (make offset relative)
1554 rangesIndex++;
1555 }
1556
1557 // Now that we have the starting range,
1558 // lets find the last contiguous range
1559 addr += offset;
1560 length -= offset;
1561
1562 for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) {
1563 user_addr_t newAddr;
1564 IOPhysicalLength newLen;
1565
1566 getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
1567 if (addr + length != newAddr)
1568 break;
1569 length += newLen;
1570 }
1571 if (addr)
1572 address = (IOPhysicalAddress) addr; // Truncate address to 32bit
1573 }
1574 else
0c530ab8
A
1575 {
1576 IOMDDMAWalkSegmentState _state;
99c3a104 1577 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
0c530ab8
A
1578
1579 state->fOffset = offset;
1580 state->fLength = _length - offset;
99c3a104 1581 state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOnly);
0c530ab8
A
1582
1583 ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
1584
1585 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret))
b0d623f7 1586 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
0c530ab8
A
1587 ret, this, state->fOffset,
1588 state->fIOVMAddr, state->fLength);
1589 if (kIOReturnSuccess == ret)
1590 {
1591 address = state->fIOVMAddr;
1592 length = state->fLength;
1593 }
b0d623f7
A
1594
1595 // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
1596 // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
1597
1598 if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)))
1599 {
1600 if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone))
1601 {
1602 addr64_t origAddr = address;
1603 IOByteCount origLen = length;
1604
1605 address = mapper->mapAddr(origAddr);
1606 length = page_size - (address & (page_size - 1));
1607 while ((length < origLen)
1608 && ((address + length) == mapper->mapAddr(origAddr + length)))
1609 length += page_size;
1610 if (length > origLen)
1611 length = origLen;
1612 }
b0d623f7 1613 }
4452a7af
A
1614 }
1615
b0d623f7
A
1616 if (!address)
1617 length = 0;
1618
4452a7af
A
1619 if (lengthOfSegment)
1620 *lengthOfSegment = length;
1621
0c530ab8
A
1622 return (address);
1623}
1624
b0d623f7
A
1625#ifndef __LP64__
1626addr64_t
1627IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
0c530ab8 1628{
b0d623f7 1629 addr64_t address = 0;
0c530ab8 1630
b0d623f7 1631 if (options & _kIOMemorySourceSegment)
0c530ab8 1632 {
b0d623f7
A
1633 address = getSourceSegment(offset, lengthOfSegment);
1634 }
1635 else if (options & kIOMemoryMapperNone)
1636 {
1637 address = getPhysicalSegment64(offset, lengthOfSegment);
1638 }
1639 else
1640 {
1641 address = getPhysicalSegment(offset, lengthOfSegment);
1642 }
0c530ab8 1643
b0d623f7
A
1644 return (address);
1645}
0c530ab8 1646
b0d623f7
A
1647addr64_t
1648IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
1649{
1650 return (getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone));
1651}
0c530ab8 1652
b0d623f7
A
1653IOPhysicalAddress
1654IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1655{
1656 addr64_t address = 0;
1657 IOByteCount length = 0;
0c530ab8 1658
b0d623f7
A
1659 address = getPhysicalSegment(offset, lengthOfSegment, 0);
1660
1661 if (lengthOfSegment)
1662 length = *lengthOfSegment;
0c530ab8
A
1663
1664 if ((address + length) > 0x100000000ULL)
1665 {
2d21ac55 1666 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
b0d623f7 1667 address, (long) length, (getMetaClass())->getClassName());
0c530ab8
A
1668 }
1669
0c530ab8 1670 return ((IOPhysicalAddress) address);
55e303ae 1671}
de355530 1672
0c530ab8
A
1673addr64_t
1674IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
55e303ae
A
1675{
1676 IOPhysicalAddress phys32;
1677 IOByteCount length;
1678 addr64_t phys64;
0c530ab8 1679 IOMapper * mapper = 0;
0b4e3aa0 1680
55e303ae
A
1681 phys32 = getPhysicalSegment(offset, lengthOfSegment);
1682 if (!phys32)
1683 return 0;
0b4e3aa0 1684
55e303ae 1685 if (gIOSystemMapper)
0c530ab8
A
1686 mapper = gIOSystemMapper;
1687
1688 if (mapper)
1c79356b 1689 {
55e303ae
A
1690 IOByteCount origLen;
1691
0c530ab8 1692 phys64 = mapper->mapAddr(phys32);
55e303ae
A
1693 origLen = *lengthOfSegment;
1694 length = page_size - (phys64 & (page_size - 1));
1695 while ((length < origLen)
0c530ab8 1696 && ((phys64 + length) == mapper->mapAddr(phys32 + length)))
55e303ae
A
1697 length += page_size;
1698 if (length > origLen)
1699 length = origLen;
1700
1701 *lengthOfSegment = length;
0b4e3aa0 1702 }
55e303ae
A
1703 else
1704 phys64 = (addr64_t) phys32;
1c79356b 1705
55e303ae 1706 return phys64;
0b4e3aa0
A
1707}
1708
0c530ab8 1709IOPhysicalAddress
b0d623f7 1710IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1c79356b 1711{
b0d623f7 1712 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0));
0b4e3aa0
A
1713}
1714
b0d623f7
A
1715IOPhysicalAddress
1716IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1717{
1718 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment));
1719}
1c79356b 1720
b0d623f7
A
1721void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
1722 IOByteCount * lengthOfSegment)
1723{
1724 if (_task == kernel_task)
1725 return (void *) getSourceSegment(offset, lengthOfSegment);
1726 else
1727 panic("IOGMD::getVirtualSegment deprecated");
91447636 1728
b0d623f7
A
1729 return 0;
1730}
1731#endif /* !__LP64__ */
91447636 1732
0c530ab8
A
1733IOReturn
1734IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
1735{
99c3a104
A
1736 IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
1737 DMACommandOps params;
1738 IOReturn err;
1739
1740 params = (op & ~kIOMDDMACommandOperationMask & op);
1741 op &= kIOMDDMACommandOperationMask;
1742
0c530ab8
A
1743 if (kIOMDGetCharacteristics == op) {
1744 if (dataSize < sizeof(IOMDDMACharacteristics))
1745 return kIOReturnUnderrun;
1746
1747 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
1748 data->fLength = getLength();
1749 data->fSGCount = 0;
b0d623f7 1750 data->fDirection = getDirection();
0c530ab8
A
1751 data->fIsPrepared = true; // Assume prepared - fails safe
1752 }
99c3a104 1753 else if (kIOMDWalkSegments == op) {
0c530ab8
A
1754 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs))
1755 return kIOReturnUnderrun;
1756
1757 IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
1758 IOByteCount offset = (IOByteCount) data->fOffset;
1759
1760 IOPhysicalLength length;
0c530ab8 1761 if (data->fMapped && IOMapper::gSystem)
99c3a104 1762 data->fIOVMAddr = md->getPhysicalSegment(offset, &length);
0c530ab8 1763 else
99c3a104 1764 data->fIOVMAddr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
0c530ab8
A
1765 data->fLength = length;
1766 }
99c3a104
A
1767 else if (kIOMDAddDMAMapSpec == op) return kIOReturnUnsupported;
1768 else if (kIOMDDMAMap == op)
1769 {
1770 if (dataSize < sizeof(IOMDDMAMapArgs))
1771 return kIOReturnUnderrun;
1772 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
1773
1774 if (params) panic("class %s does not support IODMACommand::kIterateOnly", getMetaClass()->getClassName());
1775
39236c6e 1776 data->fMapContig = true;
99c3a104
A
1777 err = md->dmaMap(data->fMapper, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocCount);
1778 return (err);
1779 }
1780 else return kIOReturnBadArgument;
0c530ab8
A
1781
1782 return kIOReturnSuccess;
1783}
1784
b0d623f7
A
1785static IOReturn
1786purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
1787{
1788 IOReturn err = kIOReturnSuccess;
1789
1790 *control = VM_PURGABLE_SET_STATE;
39236c6e
A
1791
1792 enum { kIOMemoryPurgeableControlMask = 15 };
1793
1794 switch (kIOMemoryPurgeableControlMask & newState)
b0d623f7
A
1795 {
1796 case kIOMemoryPurgeableKeepCurrent:
1797 *control = VM_PURGABLE_GET_STATE;
1798 break;
1799
1800 case kIOMemoryPurgeableNonVolatile:
1801 *state = VM_PURGABLE_NONVOLATILE;
1802 break;
1803 case kIOMemoryPurgeableVolatile:
39236c6e 1804 *state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask);
b0d623f7
A
1805 break;
1806 case kIOMemoryPurgeableEmpty:
1807 *state = VM_PURGABLE_EMPTY;
1808 break;
1809 default:
1810 err = kIOReturnBadArgument;
1811 break;
1812 }
1813 return (err);
1814}
1815
1816static IOReturn
1817purgeableStateBits(int * state)
1818{
1819 IOReturn err = kIOReturnSuccess;
1820
39236c6e 1821 switch (VM_PURGABLE_STATE_MASK & *state)
b0d623f7
A
1822 {
1823 case VM_PURGABLE_NONVOLATILE:
1824 *state = kIOMemoryPurgeableNonVolatile;
1825 break;
1826 case VM_PURGABLE_VOLATILE:
1827 *state = kIOMemoryPurgeableVolatile;
1828 break;
1829 case VM_PURGABLE_EMPTY:
1830 *state = kIOMemoryPurgeableEmpty;
1831 break;
1832 default:
1833 *state = kIOMemoryPurgeableNonVolatile;
1834 err = kIOReturnNotReady;
1835 break;
1836 }
1837 return (err);
1838}
1839
1840IOReturn
1841IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
1842 IOOptionBits * oldState )
1843{
1844 IOReturn err = kIOReturnSuccess;
1845 vm_purgable_t control;
1846 int state;
1847
1848 if (_memEntry)
1849 {
1850 err = super::setPurgeable(newState, oldState);
1851 }
1852 else
1853 {
1854 if (kIOMemoryThreadSafe & _flags)
1855 LOCK;
1856 do
1857 {
1858 // Find the appropriate vm_map for the given task
1859 vm_map_t curMap;
1860 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
1861 {
1862 err = kIOReturnNotReady;
1863 break;
1864 }
39236c6e
A
1865 else if (!_task)
1866 {
1867 err = kIOReturnUnsupported;
1868 break;
1869 }
b0d623f7
A
1870 else
1871 curMap = get_task_map(_task);
1872
1873 // can only do one range
1874 Ranges vec = _ranges;
1875 IOOptionBits type = _flags & kIOMemoryTypeMask;
1876 user_addr_t addr;
1877 IOByteCount len;
1878 getAddrLenForInd(addr, len, type, vec, 0);
1879
1880 err = purgeableControlBits(newState, &control, &state);
1881 if (kIOReturnSuccess != err)
1882 break;
1883 err = mach_vm_purgable_control(curMap, addr, control, &state);
1884 if (oldState)
1885 {
1886 if (kIOReturnSuccess == err)
1887 {
1888 err = purgeableStateBits(&state);
1889 *oldState = state;
1890 }
1891 }
1892 }
1893 while (false);
1894 if (kIOMemoryThreadSafe & _flags)
1895 UNLOCK;
1896 }
1897 return (err);
1898}
1899
91447636
A
1900IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
1901 IOOptionBits * oldState )
1902{
1903 IOReturn err = kIOReturnSuccess;
1904 vm_purgable_t control;
1905 int state;
1906
b0d623f7
A
1907 if (kIOMemoryThreadSafe & _flags)
1908 LOCK;
1909
91447636
A
1910 do
1911 {
1912 if (!_memEntry)
1913 {
1914 err = kIOReturnNotReady;
1915 break;
1916 }
b0d623f7
A
1917 err = purgeableControlBits(newState, &control, &state);
1918 if (kIOReturnSuccess != err)
1919 break;
91447636 1920 err = mach_memory_entry_purgable_control((ipc_port_t) _memEntry, control, &state);
b0d623f7
A
1921 if (oldState)
1922 {
1923 if (kIOReturnSuccess == err)
1924 {
1925 err = purgeableStateBits(&state);
1926 *oldState = state;
1927 }
1928 }
91447636
A
1929 }
1930 while (false);
1931
b0d623f7
A
1932 if (kIOMemoryThreadSafe & _flags)
1933 UNLOCK;
1934
91447636
A
1935 return (err);
1936}
1937
39236c6e
A
1938
1939IOReturn IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
1940 IOByteCount * dirtyPageCount )
1941{
1942 IOReturn err = kIOReturnSuccess;
1943 unsigned int _residentPageCount, _dirtyPageCount;
1944
1945 if (kIOMemoryThreadSafe & _flags) LOCK;
1946
1947 do
1948 {
1949 if (!_memEntry)
1950 {
1951 err = kIOReturnNotReady;
1952 break;
1953 }
1954 if ((residentPageCount == NULL) && (dirtyPageCount == NULL))
1955 {
1956 err = kIOReturnBadArgument;
1957 break;
1958 }
1959
1960 err = mach_memory_entry_get_page_counts((ipc_port_t) _memEntry,
1961 residentPageCount ? &_residentPageCount : NULL,
1962 dirtyPageCount ? &_dirtyPageCount : NULL);
1963 if (kIOReturnSuccess != err) break;
1964 if (residentPageCount) *residentPageCount = _residentPageCount;
1965 if (dirtyPageCount) *dirtyPageCount = _dirtyPageCount;
1966 }
1967 while (false);
1968
1969 if (kIOMemoryThreadSafe & _flags) UNLOCK;
1970
1971 return (err);
1972}
1973
1974
91447636
A
1975extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
1976extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
1977
0b4c1975
A
1978static void SetEncryptOp(addr64_t pa, unsigned int count)
1979{
1980 ppnum_t page, end;
1981
1982 page = atop_64(round_page_64(pa));
1983 end = atop_64(trunc_page_64(pa + count));
1984 for (; page < end; page++)
1985 {
1986 pmap_clear_noencrypt(page);
1987 }
1988}
1989
1990static void ClearEncryptOp(addr64_t pa, unsigned int count)
1991{
1992 ppnum_t page, end;
1993
1994 page = atop_64(round_page_64(pa));
1995 end = atop_64(trunc_page_64(pa + count));
1996 for (; page < end; page++)
1997 {
1998 pmap_set_noencrypt(page);
1999 }
2000}
2001
91447636
A
2002IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options,
2003 IOByteCount offset, IOByteCount length )
2004{
2005 IOByteCount remaining;
316670eb 2006 unsigned int res;
91447636
A
2007 void (*func)(addr64_t pa, unsigned int count) = 0;
2008
2009 switch (options)
2010 {
2011 case kIOMemoryIncoherentIOFlush:
2012 func = &dcache_incoherent_io_flush64;
2013 break;
2014 case kIOMemoryIncoherentIOStore:
2015 func = &dcache_incoherent_io_store64;
2016 break;
0b4c1975
A
2017
2018 case kIOMemorySetEncrypted:
2019 func = &SetEncryptOp;
2020 break;
2021 case kIOMemoryClearEncrypted:
2022 func = &ClearEncryptOp;
2023 break;
91447636
A
2024 }
2025
2026 if (!func)
2027 return (kIOReturnUnsupported);
2028
b0d623f7
A
2029 if (kIOMemoryThreadSafe & _flags)
2030 LOCK;
2031
316670eb 2032 res = 0x0UL;
91447636
A
2033 remaining = length = min(length, getLength() - offset);
2034 while (remaining)
2035 // (process another target segment?)
2036 {
2037 addr64_t dstAddr64;
2038 IOByteCount dstLen;
2039
b0d623f7 2040 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
91447636
A
2041 if (!dstAddr64)
2042 break;
2043
2044 // Clip segment length to remaining
2045 if (dstLen > remaining)
2046 dstLen = remaining;
2047
2048 (*func)(dstAddr64, dstLen);
2049
2050 offset += dstLen;
2051 remaining -= dstLen;
2052 }
2053
b0d623f7
A
2054 if (kIOMemoryThreadSafe & _flags)
2055 UNLOCK;
2056
91447636
A
2057 return (remaining ? kIOReturnUnderrun : kIOReturnSuccess);
2058}
2059
316670eb 2060#if defined(__i386__) || defined(__x86_64__)
55e303ae
A
2061extern vm_offset_t first_avail;
2062#define io_kernel_static_end first_avail
316670eb
A
2063#else
2064#error io_kernel_static_end is undefined for this architecture
2065#endif
55e303ae
A
2066
2067static kern_return_t
2068io_get_kernel_static_upl(
91447636 2069 vm_map_t /* map */,
b0d623f7 2070 uintptr_t offset,
55e303ae
A
2071 vm_size_t *upl_size,
2072 upl_t *upl,
2073 upl_page_info_array_t page_list,
0c530ab8
A
2074 unsigned int *count,
2075 ppnum_t *highest_page)
1c79356b 2076{
55e303ae
A
2077 unsigned int pageCount, page;
2078 ppnum_t phys;
0c530ab8 2079 ppnum_t highestPage = 0;
1c79356b 2080
55e303ae
A
2081 pageCount = atop_32(*upl_size);
2082 if (pageCount > *count)
2083 pageCount = *count;
1c79356b 2084
55e303ae 2085 *upl = NULL;
1c79356b 2086
55e303ae
A
2087 for (page = 0; page < pageCount; page++)
2088 {
2089 phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
2090 if (!phys)
2091 break;
2092 page_list[page].phys_addr = phys;
2093 page_list[page].pageout = 0;
2094 page_list[page].absent = 0;
2095 page_list[page].dirty = 0;
2096 page_list[page].precious = 0;
2097 page_list[page].device = 0;
0c530ab8 2098 if (phys > highestPage)
b0d623f7 2099 highestPage = phys;
55e303ae 2100 }
0b4e3aa0 2101
0c530ab8
A
2102 *highest_page = highestPage;
2103
55e303ae
A
2104 return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError);
2105}
0b4e3aa0 2106
55e303ae
A
2107IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
2108{
91447636 2109 IOOptionBits type = _flags & kIOMemoryTypeMask;
2d21ac55 2110 IOReturn error = kIOReturnCannotWire;
55e303ae 2111 ioGMDData *dataP;
99c3a104 2112 upl_page_info_array_t pageInfo;
39236c6e
A
2113 ppnum_t mapBase;
2114 ipc_port_t sharedMem;
1c79356b 2115
0c530ab8 2116 assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
1c79356b 2117
39236c6e
A
2118 if ((kIODirectionOutIn & forDirection) == kIODirectionNone)
2119 forDirection = (IODirection) (forDirection | getDirection());
55e303ae
A
2120
2121 int uplFlags; // This Mem Desc's default flags for upl creation
0c530ab8 2122 switch (kIODirectionOutIn & forDirection)
55e303ae
A
2123 {
2124 case kIODirectionOut:
2125 // Pages do not need to be marked as dirty on commit
2126 uplFlags = UPL_COPYOUT_FROM;
55e303ae
A
2127 break;
2128
2129 case kIODirectionIn:
2130 default:
2131 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
2132 break;
2133 }
55e303ae 2134
39236c6e
A
2135 if (_wireCount)
2136 {
2137 if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags))
2138 {
2139 OSReportWithBacktrace("IOMemoryDescriptor 0x%lx prepared read only", VM_KERNEL_ADDRPERM(this));
2140 error = kIOReturnNotWritable;
2141 }
2142 else error = kIOReturnSuccess;
2143 return (error);
2144 }
2145
2146 dataP = getDataP(_memoryEntries);
2147 IOMapper *mapper;
2148 mapper = dataP->fMapper;
2149 dataP->fMappedBase = 0;
2150
2151 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
0c530ab8 2152 if (kIODirectionPrepareToPhys32 & forDirection)
99c3a104
A
2153 {
2154 if (!mapper) uplFlags |= UPL_NEED_32BIT_ADDR;
2155 if (dataP->fDMAMapNumAddressBits > 32) dataP->fDMAMapNumAddressBits = 32;
2156 }
39236c6e
A
2157 if (kIODirectionPrepareNoFault & forDirection) uplFlags |= UPL_REQUEST_NO_FAULT;
2158 if (kIODirectionPrepareNoZeroFill & forDirection) uplFlags |= UPL_NOZEROFILLIO;
2159
2160 mapBase = 0;
2161 sharedMem = (ipc_port_t) _memEntry;
0c530ab8 2162
99c3a104
A
2163 // Note that appendBytes(NULL) zeros the data up to the desired length.
2164 _memoryEntries->appendBytes(0, dataP->fPageCnt * sizeof(upl_page_info_t));
2165 dataP = 0;
2166
91447636 2167 // Find the appropriate vm_map for the given task
55e303ae
A
2168 vm_map_t curMap;
2169 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
2170 curMap = 0;
2171 else
2172 { curMap = get_task_map(_task); }
2173
91447636
A
2174 // Iterate over the vector of virtual ranges
2175 Ranges vec = _ranges;
39236c6e
A
2176 unsigned int pageIndex = 0;
2177 IOByteCount mdOffset = 0;
2178 ppnum_t highestPage = 0;
99c3a104 2179
55e303ae
A
2180 for (UInt range = 0; range < _rangesCount; range++) {
2181 ioPLBlock iopl;
91447636 2182 user_addr_t startPage;
55e303ae 2183 IOByteCount numBytes;
0c530ab8 2184 ppnum_t highPage = 0;
55e303ae 2185
91447636
A
2186 // Get the startPage address and length of vec[range]
2187 getAddrLenForInd(startPage, numBytes, type, vec, range);
b0d623f7 2188 iopl.fPageOffset = startPage & PAGE_MASK;
91447636
A
2189 numBytes += iopl.fPageOffset;
2190 startPage = trunc_page_64(startPage);
2191
55e303ae 2192 if (mapper)
99c3a104 2193 iopl.fMappedPage = mapBase + pageIndex;
55e303ae 2194 else
99c3a104 2195 iopl.fMappedPage = 0;
55e303ae 2196
91447636 2197 // Iterate over the current range, creating UPLs
55e303ae 2198 while (numBytes) {
91447636
A
2199 vm_address_t kernelStart = (vm_address_t) startPage;
2200 vm_map_t theMap;
2201 if (curMap)
2202 theMap = curMap;
2203 else if (!sharedMem) {
2204 assert(_task == kernel_task);
2205 theMap = IOPageableMapForAddress(kernelStart);
2206 }
2207 else
2208 theMap = NULL;
2209
55e303ae 2210 int ioplFlags = uplFlags;
99c3a104
A
2211 dataP = getDataP(_memoryEntries);
2212 pageInfo = getPageList(dataP);
55e303ae
A
2213 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
2214
b0d623f7 2215 vm_size_t ioplSize = round_page(numBytes);
55e303ae
A
2216 unsigned int numPageInfo = atop_32(ioplSize);
2217
91447636 2218 if (theMap == kernel_map && kernelStart < io_kernel_static_end) {
55e303ae 2219 error = io_get_kernel_static_upl(theMap,
91447636
A
2220 kernelStart,
2221 &ioplSize,
2222 &iopl.fIOPL,
2223 baseInfo,
0c530ab8
A
2224 &numPageInfo,
2225 &highPage);
91447636
A
2226 }
2227 else if (sharedMem) {
55e303ae 2228 error = memory_object_iopl_request(sharedMem,
91447636
A
2229 ptoa_32(pageIndex),
2230 &ioplSize,
2231 &iopl.fIOPL,
2232 baseInfo,
2233 &numPageInfo,
2234 &ioplFlags);
2235 }
2236 else {
2237 assert(theMap);
2238 error = vm_map_create_upl(theMap,
2239 startPage,
b0d623f7 2240 (upl_size_t*)&ioplSize,
91447636
A
2241 &iopl.fIOPL,
2242 baseInfo,
2243 &numPageInfo,
2244 &ioplFlags);
de355530
A
2245 }
2246
55e303ae
A
2247 assert(ioplSize);
2248 if (error != KERN_SUCCESS)
2249 goto abortExit;
2250
0c530ab8
A
2251 if (iopl.fIOPL)
2252 highPage = upl_get_highest_page(iopl.fIOPL);
2253 if (highPage > highestPage)
2254 highestPage = highPage;
2255
2d21ac55 2256 error = kIOReturnCannotWire;
55e303ae
A
2257
2258 if (baseInfo->device) {
2259 numPageInfo = 1;
39236c6e 2260 iopl.fFlags = kIOPLOnDevice;
55e303ae
A
2261 }
2262 else {
2263 iopl.fFlags = 0;
55e303ae
A
2264 }
2265
2266 iopl.fIOMDOffset = mdOffset;
2267 iopl.fPageInfo = pageIndex;
39236c6e 2268 if (mapper && pageIndex && (page_mask & (mdOffset + iopl.fPageOffset))) dataP->fDiscontig = true;
55e303ae 2269
6d2010ae
A
2270#if 0
2271 // used to remove the upl for auto prepares here, for some errant code
2272 // that freed memory before the descriptor pointing at it
55e303ae
A
2273 if ((_flags & kIOMemoryAutoPrepare) && iopl.fIOPL)
2274 {
91447636
A
2275 upl_commit(iopl.fIOPL, 0, 0);
2276 upl_deallocate(iopl.fIOPL);
55e303ae 2277 iopl.fIOPL = 0;
de355530 2278 }
6d2010ae 2279#endif
55e303ae
A
2280
2281 if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
2282 // Clean up partial created and unsaved iopl
91447636
A
2283 if (iopl.fIOPL) {
2284 upl_abort(iopl.fIOPL, 0);
2285 upl_deallocate(iopl.fIOPL);
2286 }
55e303ae
A
2287 goto abortExit;
2288 }
99c3a104 2289 dataP = 0;
55e303ae
A
2290
2291 // Check for a multiple iopl's in one virtual range
2292 pageIndex += numPageInfo;
2293 mdOffset -= iopl.fPageOffset;
2294 if (ioplSize < numBytes) {
2295 numBytes -= ioplSize;
2296 startPage += ioplSize;
2297 mdOffset += ioplSize;
2298 iopl.fPageOffset = 0;
99c3a104 2299 if (mapper) iopl.fMappedPage = mapBase + pageIndex;
55e303ae
A
2300 }
2301 else {
2302 mdOffset += numBytes;
2303 break;
2304 }
1c79356b
A
2305 }
2306 }
55e303ae 2307
0c530ab8
A
2308 _highestPage = highestPage;
2309
39236c6e
A
2310 if (UPL_COPYOUT_FROM & uplFlags) _flags |= kIOMemoryPreparedReadOnly;
2311
1c79356b
A
2312 return kIOReturnSuccess;
2313
2314abortExit:
55e303ae
A
2315 {
2316 dataP = getDataP(_memoryEntries);
91447636 2317 UInt done = getNumIOPL(_memoryEntries, dataP);
55e303ae
A
2318 ioPLBlock *ioplList = getIOPLList(dataP);
2319
2320 for (UInt range = 0; range < done; range++)
2321 {
91447636
A
2322 if (ioplList[range].fIOPL) {
2323 upl_abort(ioplList[range].fIOPL, 0);
2324 upl_deallocate(ioplList[range].fIOPL);
2325 }
55e303ae 2326 }
6d2010ae 2327 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
1c79356b
A
2328 }
2329
2d21ac55
A
2330 if (error == KERN_FAILURE)
2331 error = kIOReturnCannotWire;
39236c6e
A
2332 else if (error == KERN_MEMORY_ERROR)
2333 error = kIOReturnNoResources;
2d21ac55 2334
55e303ae
A
2335 return error;
2336}
d7e50217 2337
99c3a104
A
2338bool IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
2339{
2340 ioGMDData * dataP;
2341 unsigned dataSize = size;
2342
2343 if (!_memoryEntries) {
2344 _memoryEntries = OSData::withCapacity(dataSize);
2345 if (!_memoryEntries)
2346 return false;
2347 }
2348 else if (!_memoryEntries->initWithCapacity(dataSize))
2349 return false;
2350
2351 _memoryEntries->appendBytes(0, computeDataSize(0, 0));
2352 dataP = getDataP(_memoryEntries);
2353
2354 if (mapper == kIOMapperWaitSystem) {
2355 IOMapper::checkForSystemMapper();
2356 mapper = IOMapper::gSystem;
2357 }
2358 dataP->fMapper = mapper;
2359 dataP->fPageCnt = 0;
2360 dataP->fMappedBase = 0;
2361 dataP->fDMAMapNumAddressBits = 64;
2362 dataP->fDMAMapAlignment = 0;
2363 dataP->fPreparationID = kIOPreparationIDUnprepared;
39236c6e 2364 dataP->fDiscontig = false;
99c3a104
A
2365
2366 return (true);
2367}
2368
2369IOReturn IOMemoryDescriptor::dmaMap(
2370 IOMapper * mapper,
2371 const IODMAMapSpecification * mapSpec,
2372 uint64_t offset,
2373 uint64_t length,
2374 uint64_t * address,
2375 ppnum_t * mapPages)
2376{
2377 IOMDDMAWalkSegmentState walkState;
2378 IOMDDMAWalkSegmentArgs * walkArgs = (IOMDDMAWalkSegmentArgs *) (void *)&walkState;
2379 IOOptionBits mdOp;
2380 IOReturn ret;
2381 IOPhysicalLength segLen;
2382 addr64_t phys, align, pageOffset;
2383 ppnum_t base, pageIndex, pageCount;
2384 uint64_t index;
2385 uint32_t mapOptions = 0;
2386
2387 if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess;
2388
2389 walkArgs->fMapped = false;
2390 mdOp = kIOMDFirstSegment;
2391 pageCount = 0;
2392 for (index = 0; index < length; )
2393 {
2394 if (index && (page_mask & (index + pageOffset))) break;
2395
2396 walkArgs->fOffset = offset + index;
2397 ret = dmaCommandOperation(mdOp, &walkState, sizeof(walkState));
2398 mdOp = kIOMDWalkSegments;
2399 if (ret != kIOReturnSuccess) break;
2400 phys = walkArgs->fIOVMAddr;
2401 segLen = walkArgs->fLength;
2402
2403 align = (phys & page_mask);
2404 if (!index) pageOffset = align;
2405 else if (align) break;
2406 pageCount += atop_64(round_page_64(align + segLen));
2407 index += segLen;
2408 }
2409
2410 if (index < length) return (kIOReturnVMError);
2411
2412 base = mapper->iovmMapMemory(this, offset, pageCount,
2413 mapOptions, NULL, mapSpec);
2414
2415 if (!base) return (kIOReturnNoResources);
2416
2417 mdOp = kIOMDFirstSegment;
2418 for (pageIndex = 0, index = 0; index < length; )
2419 {
2420 walkArgs->fOffset = offset + index;
2421 ret = dmaCommandOperation(mdOp, &walkState, sizeof(walkState));
2422 mdOp = kIOMDWalkSegments;
2423 if (ret != kIOReturnSuccess) break;
2424 phys = walkArgs->fIOVMAddr;
2425 segLen = walkArgs->fLength;
2426
2427 ppnum_t page = atop_64(phys);
2428 ppnum_t count = atop_64(round_page_64(phys + segLen)) - page;
2429 while (count--)
2430 {
2431 mapper->iovmInsert(base, pageIndex, page);
2432 page++;
2433 pageIndex++;
2434 }
2435 index += segLen;
2436 }
2437 if (pageIndex != pageCount) panic("pageIndex");
2438
2439 *address = ptoa_64(base) + pageOffset;
2440 if (mapPages) *mapPages = pageCount;
2441
2442 return (kIOReturnSuccess);
2443}
2444
2445IOReturn IOGeneralMemoryDescriptor::dmaMap(
2446 IOMapper * mapper,
2447 const IODMAMapSpecification * mapSpec,
2448 uint64_t offset,
2449 uint64_t length,
2450 uint64_t * address,
2451 ppnum_t * mapPages)
2452{
2453 IOReturn err = kIOReturnSuccess;
2454 ioGMDData * dataP;
2455 IOOptionBits type = _flags & kIOMemoryTypeMask;
2456
2457 *address = 0;
2458 if (kIOMemoryHostOnly & _flags) return (kIOReturnSuccess);
2459
2460 if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
2461 || offset || (length != _length))
2462 {
2463 err = super::dmaMap(mapper, mapSpec, offset, length, address, mapPages);
2464 }
2465 else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries)))
2466 {
2467 const ioPLBlock * ioplList = getIOPLList(dataP);
2468 upl_page_info_t * pageList;
2469 uint32_t mapOptions = 0;
2470 ppnum_t base;
2471
2472 IODMAMapSpecification mapSpec;
2473 bzero(&mapSpec, sizeof(mapSpec));
2474 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
2475 mapSpec.alignment = dataP->fDMAMapAlignment;
2476
2477 // For external UPLs the fPageInfo field points directly to
2478 // the upl's upl_page_info_t array.
2479 if (ioplList->fFlags & kIOPLExternUPL)
2480 {
2481 pageList = (upl_page_info_t *) ioplList->fPageInfo;
2482 mapOptions |= kIODMAMapPagingPath;
2483 }
2484 else
2485 pageList = getPageList(dataP);
2486
2487 if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess;
2488
2489 // Check for direct device non-paged memory
2490 if (ioplList->fFlags & kIOPLOnDevice) mapOptions |= kIODMAMapPhysicallyContiguous;
2491
2492 base = mapper->iovmMapMemory(
2493 this, offset, _pages, mapOptions, &pageList[0], &mapSpec);
2494 *address = ptoa_64(base) + (ioplList->fPageOffset & PAGE_MASK);
2495 if (mapPages) *mapPages = _pages;
2496 }
2497
2498 return (err);
2499}
2500
55e303ae
A
2501/*
2502 * prepare
2503 *
2504 * Prepare the memory for an I/O transfer. This involves paging in
2505 * the memory, if necessary, and wiring it down for the duration of
2506 * the transfer. The complete() method completes the processing of
2507 * the memory after the I/O transfer finishes. This method needn't
2508 * called for non-pageable memory.
2509 */
99c3a104 2510
55e303ae
A
2511IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
2512{
91447636
A
2513 IOReturn error = kIOReturnSuccess;
2514 IOOptionBits type = _flags & kIOMemoryTypeMask;
55e303ae 2515
2d21ac55
A
2516 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
2517 return kIOReturnSuccess;
2518
2519 if (_prepareLock)
2520 IOLockLock(_prepareLock);
2521
39236c6e
A
2522 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type)
2523 {
2524 error = wireVirtual(forDirection);
de355530
A
2525 }
2526
2d21ac55 2527 if (kIOReturnSuccess == error)
0b4c1975 2528 {
99c3a104
A
2529 if (1 == ++_wireCount)
2530 {
2531 if (kIOMemoryClearEncrypt & _flags)
2532 {
2533 performOperation(kIOMemoryClearEncrypted, 0, _length);
2534 }
2535 }
0b4c1975
A
2536 }
2537
2d21ac55
A
2538 if (_prepareLock)
2539 IOLockUnlock(_prepareLock);
2540
2541 return error;
1c79356b
A
2542}
2543
2544/*
2545 * complete
2546 *
2547 * Complete processing of the memory after an I/O transfer finishes.
2548 * This method should not be called unless a prepare was previously
2549 * issued; the prepare() and complete() must occur in pairs, before
2550 * before and after an I/O transfer involving pageable memory.
2551 */
6d2010ae 2552
55e303ae 2553IOReturn IOGeneralMemoryDescriptor::complete(IODirection /* forDirection */)
1c79356b 2554{
2d21ac55 2555 IOOptionBits type = _flags & kIOMemoryTypeMask;
1c79356b 2556
2d21ac55
A
2557 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
2558 return kIOReturnSuccess;
1c79356b 2559
2d21ac55
A
2560 if (_prepareLock)
2561 IOLockLock(_prepareLock);
91447636 2562
2d21ac55
A
2563 assert(_wireCount);
2564
2565 if (_wireCount)
2566 {
0b4c1975
A
2567 if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount))
2568 {
2569 performOperation(kIOMemorySetEncrypted, 0, _length);
2570 }
2571
2d21ac55
A
2572 _wireCount--;
2573 if (!_wireCount)
2574 {
2575 IOOptionBits type = _flags & kIOMemoryTypeMask;
2576 ioGMDData * dataP = getDataP(_memoryEntries);
2577 ioPLBlock *ioplList = getIOPLList(dataP);
91447636 2578 UInt count = getNumIOPL(_memoryEntries, dataP);
55e303ae 2579
b0d623f7
A
2580#if IOMD_DEBUG_DMAACTIVE
2581 if (__iomd_reservedA) panic("complete() while dma active");
2582#endif /* IOMD_DEBUG_DMAACTIVE */
2583
99c3a104
A
2584 if (dataP->fMappedBase) {
2585 dataP->fMapper->iovmFree(atop_64(dataP->fMappedBase), _pages);
2586 dataP->fMappedBase = 0;
2587 }
2d21ac55
A
2588 // Only complete iopls that we created which are for TypeVirtual
2589 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
2590 for (UInt ind = 0; ind < count; ind++)
91447636
A
2591 if (ioplList[ind].fIOPL) {
2592 upl_commit(ioplList[ind].fIOPL, 0, 0);
2593 upl_deallocate(ioplList[ind].fIOPL);
2594 }
6d2010ae
A
2595 } else if (kIOMemoryTypeUPL == type) {
2596 upl_set_referenced(ioplList[0].fIOPL, false);
2d21ac55 2597 }
6d2010ae
A
2598
2599 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
b0d623f7
A
2600
2601 dataP->fPreparationID = kIOPreparationIDUnprepared;
2d21ac55 2602 }
1c79356b 2603 }
2d21ac55
A
2604
2605 if (_prepareLock)
2606 IOLockUnlock(_prepareLock);
2607
1c79356b
A
2608 return kIOReturnSuccess;
2609}
2610
2611IOReturn IOGeneralMemoryDescriptor::doMap(
2d21ac55
A
2612 vm_map_t __addressMap,
2613 IOVirtualAddress * __address,
1c79356b 2614 IOOptionBits options,
2d21ac55
A
2615 IOByteCount __offset,
2616 IOByteCount __length )
2617
1c79356b 2618{
b0d623f7 2619#ifndef __LP64__
2d21ac55 2620 if (!(kIOMap64Bit & options)) panic("IOGeneralMemoryDescriptor::doMap !64bit");
b0d623f7 2621#endif /* !__LP64__ */
2d21ac55 2622
b0d623f7 2623 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
2d21ac55
A
2624 mach_vm_size_t offset = mapping->fOffset + __offset;
2625 mach_vm_size_t length = mapping->fLength;
2626
b0d623f7 2627 kern_return_t kr = kIOReturnVMError;
0b4e3aa0 2628 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
1c79356b 2629
91447636
A
2630 IOOptionBits type = _flags & kIOMemoryTypeMask;
2631 Ranges vec = _ranges;
2632
2633 user_addr_t range0Addr = 0;
2634 IOByteCount range0Len = 0;
2635
060df5ea
A
2636 if ((offset >= _length) || ((offset + length) > _length))
2637 return( kIOReturnBadArgument );
2638
91447636
A
2639 if (vec.v)
2640 getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
2641
1c79356b 2642 // mapping source == dest? (could be much better)
91447636 2643 if( _task
2d21ac55
A
2644 && (mapping->fAddressMap == get_task_map(_task)) && (options & kIOMapAnywhere)
2645 && (1 == _rangesCount) && (0 == offset)
2646 && range0Addr && (length <= range0Len) )
2647 {
2648 mapping->fAddress = range0Addr;
2649 mapping->fOptions |= kIOMapStatic;
2650
2651 return( kIOReturnSuccess );
1c79356b
A
2652 }
2653
0b4e3aa0 2654 if( 0 == sharedMem) {
1c79356b 2655
91447636 2656 vm_size_t size = ptoa_32(_pages);
1c79356b 2657
0b4e3aa0 2658 if( _task) {
0c530ab8 2659
91447636 2660 memory_object_size_t actualSize = size;
2d21ac55
A
2661 vm_prot_t prot = VM_PROT_READ;
2662 if (!(kIOMapReadOnly & options))
2663 prot |= VM_PROT_WRITE;
2664 else if (kIOMapDefaultCache != (options & kIOMapCacheMask))
2665 prot |= VM_PROT_WRITE;
2666
060df5ea
A
2667 if (_rangesCount == 1)
2668 {
2669 kr = mach_make_memory_entry_64(get_task_map(_task),
2670 &actualSize, range0Addr,
2671 prot, &sharedMem,
2672 NULL);
2673 }
2674 if( (_rangesCount != 1)
2675 || ((KERN_SUCCESS == kr) && (actualSize != round_page(size))))
2676 do
b0d623f7 2677 {
0b4e3aa0 2678#if IOASSERT
060df5ea
A
2679 IOLog("mach_vm_remap path for ranges %d size (%08llx:%08llx)\n",
2680 _rangesCount, (UInt64)actualSize, (UInt64)size);
0b4e3aa0
A
2681#endif
2682 kr = kIOReturnVMError;
060df5ea
A
2683 if (sharedMem)
2684 {
2685 ipc_port_release_send(sharedMem);
2686 sharedMem = MACH_PORT_NULL;
2687 }
b0d623f7 2688
060df5ea
A
2689 mach_vm_address_t address, segDestAddr;
2690 mach_vm_size_t mapLength;
2691 unsigned rangesIndex;
2692 IOOptionBits type = _flags & kIOMemoryTypeMask;
2693 user_addr_t srcAddr;
2694 IOPhysicalLength segLen = 0;
2695
2696 // Find starting address within the vector of ranges
2697 for (rangesIndex = 0; rangesIndex < _rangesCount; rangesIndex++) {
2698 getAddrLenForInd(srcAddr, segLen, type, _ranges, rangesIndex);
2699 if (offset < segLen)
2700 break;
2701 offset -= segLen; // (make offset relative)
2702 }
2703
2704 mach_vm_size_t pageOffset = (srcAddr & PAGE_MASK);
b0d623f7 2705 address = trunc_page_64(mapping->fAddress);
060df5ea 2706
b0d623f7
A
2707 if ((options & kIOMapAnywhere) || ((mapping->fAddress - address) == pageOffset))
2708 {
060df5ea
A
2709 vm_map_t map = mapping->fAddressMap;
2710 kr = IOMemoryDescriptorMapCopy(&map,
b0d623f7
A
2711 options,
2712 offset, &address, round_page_64(length + pageOffset));
060df5ea
A
2713 if (kr == KERN_SUCCESS)
2714 {
2715 segDestAddr = address;
2716 segLen -= offset;
316670eb 2717 srcAddr += offset;
060df5ea
A
2718 mapLength = length;
2719
2720 while (true)
2721 {
2722 vm_prot_t cur_prot, max_prot;
316670eb
A
2723
2724 if (segLen > length) segLen = length;
060df5ea
A
2725 kr = mach_vm_remap(map, &segDestAddr, round_page_64(segLen), PAGE_MASK,
2726 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
2727 get_task_map(_task), trunc_page_64(srcAddr),
2728 FALSE /* copy */,
2729 &cur_prot,
2730 &max_prot,
2731 VM_INHERIT_NONE);
2732 if (KERN_SUCCESS == kr)
2733 {
2734 if ((!(VM_PROT_READ & cur_prot))
2735 || (!(kIOMapReadOnly & options) && !(VM_PROT_WRITE & cur_prot)))
2736 {
2737 kr = KERN_PROTECTION_FAILURE;
2738 }
2739 }
2740 if (KERN_SUCCESS != kr)
2741 break;
2742 segDestAddr += segLen;
2743 mapLength -= segLen;
2744 if (!mapLength)
2745 break;
2746 rangesIndex++;
2747 if (rangesIndex >= _rangesCount)
2748 {
2749 kr = kIOReturnBadArgument;
2750 break;
2751 }
2752 getAddrLenForInd(srcAddr, segLen, type, vec, rangesIndex);
2753 if (srcAddr & PAGE_MASK)
2754 {
2755 kr = kIOReturnBadArgument;
2756 break;
2757 }
2758 if (segLen > mapLength)
2759 segLen = mapLength;
2760 }
2761 if (KERN_SUCCESS != kr)
2762 {
2763 mach_vm_deallocate(mapping->fAddressMap, address, round_page_64(length + pageOffset));
2764 }
2765 }
2766
2767 if (KERN_SUCCESS == kr)
b0d623f7
A
2768 mapping->fAddress = address + pageOffset;
2769 else
2770 mapping->fAddress = NULL;
2771 }
2772 }
060df5ea 2773 while (false);
b0d623f7
A
2774 }
2775 else do
2776 { // _task == 0, must be physical
0b4e3aa0 2777
55e303ae
A
2778 memory_object_t pager;
2779 unsigned int flags = 0;
2780 addr64_t pa;
9bccf70c
A
2781 IOPhysicalLength segLen;
2782
b0d623f7 2783 pa = getPhysicalSegment( offset, &segLen, kIOMemoryMapperNone );
0b4e3aa0 2784
316670eb
A
2785 if( !getKernelReserved())
2786 continue;
2787 reserved->dp.pagerContig = (1 == _rangesCount);
2788 reserved->dp.memory = this;
9bccf70c 2789
55e303ae
A
2790 /*What cache mode do we need*/
2791 switch(options & kIOMapCacheMask ) {
9bccf70c
A
2792
2793 case kIOMapDefaultCache:
2794 default:
55e303ae 2795 flags = IODefaultCacheBits(pa);
2d21ac55
A
2796 if (DEVICE_PAGER_CACHE_INHIB & flags)
2797 {
2798 if (DEVICE_PAGER_GUARDED & flags)
2799 mapping->fOptions |= kIOMapInhibitCache;
2800 else
2801 mapping->fOptions |= kIOMapWriteCombineCache;
2802 }
2803 else if (DEVICE_PAGER_WRITE_THROUGH & flags)
2804 mapping->fOptions |= kIOMapWriteThruCache;
2805 else
2806 mapping->fOptions |= kIOMapCopybackCache;
55e303ae 2807 break;
9bccf70c
A
2808
2809 case kIOMapInhibitCache:
55e303ae
A
2810 flags = DEVICE_PAGER_CACHE_INHIB |
2811 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
2812 break;
9bccf70c
A
2813
2814 case kIOMapWriteThruCache:
55e303ae
A
2815 flags = DEVICE_PAGER_WRITE_THROUGH |
2816 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
2817 break;
9bccf70c
A
2818
2819 case kIOMapCopybackCache:
55e303ae
A
2820 flags = DEVICE_PAGER_COHERENT;
2821 break;
2822
2823 case kIOMapWriteCombineCache:
2824 flags = DEVICE_PAGER_CACHE_INHIB |
2825 DEVICE_PAGER_COHERENT;
2826 break;
9bccf70c
A
2827 }
2828
316670eb 2829 flags |= reserved->dp.pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0;
9bccf70c 2830
b0d623f7 2831 pager = device_pager_setup( (memory_object_t) 0, (uintptr_t) reserved,
9bccf70c 2832 size, flags);
0b4e3aa0
A
2833 assert( pager );
2834
2835 if( pager) {
0b4e3aa0
A
2836 kr = mach_memory_object_memory_entry_64( (host_t) 1, false /*internal*/,
2837 size, VM_PROT_READ | VM_PROT_WRITE, pager, &sharedMem );
2838
2839 assert( KERN_SUCCESS == kr );
2d21ac55
A
2840 if( KERN_SUCCESS != kr)
2841 {
9bccf70c 2842 device_pager_deallocate( pager );
0b4e3aa0
A
2843 pager = MACH_PORT_NULL;
2844 sharedMem = MACH_PORT_NULL;
2845 }
2846 }
9bccf70c 2847 if( pager && sharedMem)
316670eb 2848 reserved->dp.devicePager = pager;
1c79356b 2849
1c79356b
A
2850 } while( false );
2851
0b4e3aa0
A
2852 _memEntry = (void *) sharedMem;
2853 }
2854
2d21ac55
A
2855 IOReturn result;
2856 if (0 == sharedMem)
b0d623f7 2857 result = kr;
9bccf70c 2858 else
2d21ac55
A
2859 result = super::doMap( __addressMap, __address,
2860 options, __offset, __length );
0b4e3aa0 2861
2d21ac55 2862 return( result );
1c79356b
A
2863}
2864
2865IOReturn IOGeneralMemoryDescriptor::doUnmap(
2866 vm_map_t addressMap,
2d21ac55
A
2867 IOVirtualAddress __address,
2868 IOByteCount __length )
1c79356b 2869{
2d21ac55 2870 return (super::doUnmap(addressMap, __address, __length));
1c79356b
A
2871}
2872
2873/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2874
b0d623f7
A
2875#undef super
2876#define super OSObject
1c79356b 2877
b0d623f7 2878OSDefineMetaClassAndStructors( IOMemoryMap, OSObject )
1c79356b 2879
b0d623f7
A
2880OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
2881OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
2882OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
2883OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
2884OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
2885OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
2886OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
2887OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
1c79356b 2888
b0d623f7
A
2889/* ex-inline function implementation */
2890IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
2891 { return( getPhysicalSegment( 0, 0 )); }
1c79356b
A
2892
2893/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2894
b0d623f7 2895bool IOMemoryMap::init(
2d21ac55
A
2896 task_t intoTask,
2897 mach_vm_address_t toAddress,
2898 IOOptionBits _options,
2899 mach_vm_size_t _offset,
2900 mach_vm_size_t _length )
1c79356b 2901{
2d21ac55 2902 if (!intoTask)
1c79356b
A
2903 return( false);
2904
2d21ac55
A
2905 if (!super::init())
2906 return(false);
1c79356b 2907
2d21ac55
A
2908 fAddressMap = get_task_map(intoTask);
2909 if (!fAddressMap)
2910 return(false);
2911 vm_map_reference(fAddressMap);
1c79356b 2912
2d21ac55
A
2913 fAddressTask = intoTask;
2914 fOptions = _options;
2915 fLength = _length;
2916 fOffset = _offset;
2917 fAddress = toAddress;
1c79356b 2918
2d21ac55 2919 return (true);
1c79356b
A
2920}
2921
b0d623f7 2922bool IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
1c79356b 2923{
2d21ac55
A
2924 if (!_memory)
2925 return(false);
1c79356b 2926
2d21ac55 2927 if (!fSuperMap)
91447636 2928 {
2d21ac55 2929 if( (_offset + fLength) > _memory->getLength())
91447636 2930 return( false);
2d21ac55 2931 fOffset = _offset;
91447636 2932 }
1c79356b
A
2933
2934 _memory->retain();
2d21ac55 2935 if (fMemory)
91447636 2936 {
2d21ac55
A
2937 if (fMemory != _memory)
2938 fMemory->removeMapping(this);
2939 fMemory->release();
1c79356b 2940 }
2d21ac55 2941 fMemory = _memory;
91447636 2942
2d21ac55 2943 return( true );
1c79356b
A
2944}
2945
0b4e3aa0
A
2946struct IOMemoryDescriptorMapAllocRef
2947{
2948 ipc_port_t sharedMem;
060df5ea 2949 vm_map_t map;
2d21ac55
A
2950 mach_vm_address_t mapped;
2951 mach_vm_size_t size;
2952 mach_vm_size_t sourceOffset;
0b4e3aa0
A
2953 IOOptionBits options;
2954};
2955
2956static kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
2957{
2958 IOMemoryDescriptorMapAllocRef * ref = (IOMemoryDescriptorMapAllocRef *)_ref;
2959 IOReturn err;
2960
2961 do {
2d21ac55
A
2962 if( ref->sharedMem)
2963 {
0b4e3aa0
A
2964 vm_prot_t prot = VM_PROT_READ
2965 | ((ref->options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE);
55e303ae 2966
2d21ac55
A
2967 // VM system requires write access to change cache mode
2968 if (kIOMapDefaultCache != (ref->options & kIOMapCacheMask))
2969 prot |= VM_PROT_WRITE;
2970
55e303ae
A
2971 // set memory entry cache
2972 vm_prot_t memEntryCacheMode = prot | MAP_MEM_ONLY;
2973 switch (ref->options & kIOMapCacheMask)
2974 {
2975 case kIOMapInhibitCache:
2976 SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
2977 break;
2978
2979 case kIOMapWriteThruCache:
2980 SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
2981 break;
2982
2983 case kIOMapWriteCombineCache:
2984 SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
2985 break;
2986
2987 case kIOMapCopybackCache:
2988 SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
2989 break;
2990
316670eb
A
2991 case kIOMapCopybackInnerCache:
2992 SET_MAP_MEM(MAP_MEM_INNERWBACK, memEntryCacheMode);
2993 break;
2994
55e303ae
A
2995 case kIOMapDefaultCache:
2996 default:
2997 SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
2998 break;
2999 }
3000
3001 vm_size_t unused = 0;
3002
3003 err = mach_make_memory_entry( NULL /*unused*/, &unused, 0 /*unused*/,
3004 memEntryCacheMode, NULL, ref->sharedMem );
3005 if (KERN_SUCCESS != err)
3006 IOLog("MAP_MEM_ONLY failed %d\n", err);
3007
2d21ac55 3008 err = mach_vm_map( map,
0b4e3aa0
A
3009 &ref->mapped,
3010 ref->size, 0 /* mask */,
3011 (( ref->options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
3012 | VM_MAKE_TAG(VM_MEMORY_IOKIT),
3013 ref->sharedMem, ref->sourceOffset,
3014 false, // copy
3015 prot, // cur
3016 prot, // max
3017 VM_INHERIT_NONE);
55e303ae 3018
0b4e3aa0
A
3019 if( KERN_SUCCESS != err) {
3020 ref->mapped = 0;
3021 continue;
3022 }
060df5ea 3023 ref->map = map;
2d21ac55
A
3024 }
3025 else
3026 {
060df5ea 3027 err = mach_vm_allocate(map, &ref->mapped, ref->size,
0b4e3aa0
A
3028 ((ref->options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
3029 | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
0b4e3aa0
A
3030 if( KERN_SUCCESS != err) {
3031 ref->mapped = 0;
3032 continue;
3033 }
060df5ea 3034 ref->map = map;
0b4e3aa0 3035 // we have to make sure that these guys don't get copied if we fork.
060df5ea 3036 err = vm_inherit(map, ref->mapped, ref->size, VM_INHERIT_NONE);
0b4e3aa0
A
3037 assert( KERN_SUCCESS == err );
3038 }
2d21ac55
A
3039 }
3040 while( false );
0b4e3aa0
A
3041
3042 return( err );
3043}
3044
2d21ac55 3045kern_return_t
060df5ea 3046IOMemoryDescriptorMapMemEntry(vm_map_t * map, ipc_port_t entry, IOOptionBits options, bool pageable,
2d21ac55
A
3047 mach_vm_size_t offset,
3048 mach_vm_address_t * address, mach_vm_size_t length)
3049{
3050 IOReturn err;
3051 IOMemoryDescriptorMapAllocRef ref;
3052
060df5ea 3053 ref.map = *map;
b0d623f7 3054 ref.sharedMem = entry;
cf7d32b8 3055 ref.sourceOffset = trunc_page_64(offset);
b0d623f7
A
3056 ref.options = options;
3057 ref.size = length;
2d21ac55
A
3058
3059 if (options & kIOMapAnywhere)
3060 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
3061 ref.mapped = 0;
3062 else
3063 ref.mapped = *address;
3064
060df5ea 3065 if( ref.sharedMem && (ref.map == kernel_map) && pageable)
2d21ac55
A
3066 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
3067 else
060df5ea 3068 err = IOMemoryDescriptorMapAlloc( ref.map, &ref );
2d21ac55
A
3069
3070 *address = ref.mapped;
060df5ea
A
3071 *map = ref.map;
3072
2d21ac55
A
3073 return (err);
3074}
3075
b0d623f7 3076kern_return_t
060df5ea 3077IOMemoryDescriptorMapCopy(vm_map_t * map,
b0d623f7
A
3078 IOOptionBits options,
3079 mach_vm_size_t offset,
3080 mach_vm_address_t * address, mach_vm_size_t length)
3081{
3082 IOReturn err;
3083 IOMemoryDescriptorMapAllocRef ref;
3084
060df5ea 3085 ref.map = *map;
b0d623f7 3086 ref.sharedMem = NULL;
b0d623f7
A
3087 ref.sourceOffset = trunc_page_64(offset);
3088 ref.options = options;
3089 ref.size = length;
3090
3091 if (options & kIOMapAnywhere)
3092 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
3093 ref.mapped = 0;
3094 else
3095 ref.mapped = *address;
3096
060df5ea 3097 if (ref.map == kernel_map)
b0d623f7
A
3098 err = IOIteratePageableMaps(ref.size, &IOMemoryDescriptorMapAlloc, &ref);
3099 else
060df5ea 3100 err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
b0d623f7
A
3101
3102 *address = ref.mapped;
060df5ea
A
3103 *map = ref.map;
3104
b0d623f7
A
3105 return (err);
3106}
9bccf70c 3107
1c79356b 3108IOReturn IOMemoryDescriptor::doMap(
2d21ac55
A
3109 vm_map_t __addressMap,
3110 IOVirtualAddress * __address,
1c79356b 3111 IOOptionBits options,
2d21ac55
A
3112 IOByteCount __offset,
3113 IOByteCount __length )
1c79356b 3114{
b0d623f7 3115#ifndef __LP64__
2d21ac55 3116 if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::doMap !64bit");
b0d623f7 3117#endif /* !__LP64__ */
1c79356b 3118
b0d623f7 3119 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
2d21ac55
A
3120 mach_vm_size_t offset = mapping->fOffset + __offset;
3121 mach_vm_size_t length = mapping->fLength;
1c79356b 3122
2d21ac55
A
3123 IOReturn err = kIOReturnSuccess;
3124 memory_object_t pager;
3125 mach_vm_size_t pageOffset;
3126 IOPhysicalAddress sourceAddr;
b0d623f7 3127 unsigned int lock_count;
1c79356b 3128
2d21ac55
A
3129 do
3130 {
b0d623f7
A
3131 sourceAddr = getPhysicalSegment( offset, NULL, _kIOMemorySourceSegment );
3132 pageOffset = sourceAddr - trunc_page( sourceAddr );
1c79356b 3133
2d21ac55 3134 if( reserved)
316670eb 3135 pager = (memory_object_t) reserved->dp.devicePager;
2d21ac55
A
3136 else
3137 pager = MACH_PORT_NULL;
0b4e3aa0 3138
91447636
A
3139 if ((kIOMapReference|kIOMapUnique) == ((kIOMapReference|kIOMapUnique) & options))
3140 {
2d21ac55
A
3141 upl_t redirUPL2;
3142 vm_size_t size;
3143 int flags;
0b4e3aa0 3144
91447636
A
3145 if (!_memEntry)
3146 {
3147 err = kIOReturnNotReadable;
3148 continue;
3149 }
3150
b0d623f7 3151 size = round_page(mapping->fLength + pageOffset);
91447636
A
3152 flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
3153 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
3154
3155 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) _memEntry, 0, &size, &redirUPL2,
3156 NULL, NULL,
3157 &flags))
3158 redirUPL2 = NULL;
3159
b0d623f7
A
3160 for (lock_count = 0;
3161 IORecursiveLockHaveLock(gIOMemoryLock);
3162 lock_count++) {
3163 UNLOCK;
3164 }
2d21ac55 3165 err = upl_transpose(redirUPL2, mapping->fRedirUPL);
b0d623f7
A
3166 for (;
3167 lock_count;
3168 lock_count--) {
3169 LOCK;
3170 }
3171
91447636
A
3172 if (kIOReturnSuccess != err)
3173 {
3174 IOLog("upl_transpose(%x)\n", err);
3175 err = kIOReturnSuccess;
3176 }
3177
3178 if (redirUPL2)
3179 {
3180 upl_commit(redirUPL2, NULL, 0);
3181 upl_deallocate(redirUPL2);
3182 redirUPL2 = 0;
3183 }
3184 {
3185 // swap the memEntries since they now refer to different vm_objects
3186 void * me = _memEntry;
2d21ac55
A
3187 _memEntry = mapping->fMemory->_memEntry;
3188 mapping->fMemory->_memEntry = me;
91447636 3189 }
2d21ac55 3190 if (pager)
316670eb 3191 err = handleFault( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
91447636
A
3192 }
3193 else
3194 {
2d21ac55
A
3195 mach_vm_address_t address;
3196
3197 if (!(options & kIOMapAnywhere))
3198 {
3199 address = trunc_page_64(mapping->fAddress);
3200 if( (mapping->fAddress - address) != pageOffset)
3201 {
91447636
A
3202 err = kIOReturnVMError;
3203 continue;
3204 }
3205 }
0b4e3aa0 3206
060df5ea
A
3207 vm_map_t map = mapping->fAddressMap;
3208 err = IOMemoryDescriptorMapMemEntry(&map, (ipc_port_t) _memEntry,
2d21ac55
A
3209 options, (kIOMemoryBufferPageable & _flags),
3210 offset, &address, round_page_64(length + pageOffset));
3211 if( err != KERN_SUCCESS)
3212 continue;
0b4e3aa0 3213
2d21ac55
A
3214 if (!_memEntry || pager)
3215 {
3216 err = handleFault( pager, mapping->fAddressMap, address, offset, length, options );
3217 if (err != KERN_SUCCESS)
3218 doUnmap( mapping->fAddressMap, (IOVirtualAddress) mapping, 0 );
3219 }
0b4e3aa0 3220
b0d623f7 3221#if DEBUG
2d21ac55 3222 if (kIOLogMapping & gIOKitDebug)
316670eb
A
3223 IOLog("mapping(%x) desc %p @ %qx, map %p, address %qx, offset %qx, length %qx\n",
3224 err, this, (uint64_t)sourceAddr, mapping, address, offset, length);
2d21ac55 3225#endif
0b4e3aa0 3226
2d21ac55
A
3227 if (err == KERN_SUCCESS)
3228 mapping->fAddress = address + pageOffset;
3229 else
3230 mapping->fAddress = NULL;
3231 }
3232 }
3233 while( false );
0b4e3aa0 3234
2d21ac55 3235 return (err);
0b4e3aa0
A
3236}
3237
0b4e3aa0
A
3238IOReturn IOMemoryDescriptor::handleFault(
3239 void * _pager,
3240 vm_map_t addressMap,
2d21ac55
A
3241 mach_vm_address_t address,
3242 mach_vm_size_t sourceOffset,
3243 mach_vm_size_t length,
0b4e3aa0
A
3244 IOOptionBits options )
3245{
3246 IOReturn err = kIOReturnSuccess;
3247 memory_object_t pager = (memory_object_t) _pager;
2d21ac55
A
3248 mach_vm_size_t size;
3249 mach_vm_size_t bytes;
3250 mach_vm_size_t page;
3251 mach_vm_size_t pageOffset;
3252 mach_vm_size_t pagerOffset;
0b4e3aa0 3253 IOPhysicalLength segLen;
55e303ae 3254 addr64_t physAddr;
0b4e3aa0 3255
2d21ac55
A
3256 if( !addressMap)
3257 {
3258 if( kIOMemoryRedirected & _flags)
3259 {
b0d623f7 3260#if DEBUG
2d21ac55 3261 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
1c79356b 3262#endif
0b4e3aa0 3263 do {
9bccf70c 3264 SLEEP;
0b4e3aa0
A
3265 } while( kIOMemoryRedirected & _flags );
3266 }
1c79356b 3267
0b4e3aa0 3268 return( kIOReturnSuccess );
1c79356b
A
3269 }
3270
b0d623f7 3271 physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
0b4e3aa0 3272 assert( physAddr );
55e303ae
A
3273 pageOffset = physAddr - trunc_page_64( physAddr );
3274 pagerOffset = sourceOffset;
0b4e3aa0
A
3275
3276 size = length + pageOffset;
3277 physAddr -= pageOffset;
1c79356b
A
3278
3279 segLen += pageOffset;
0b4e3aa0 3280 bytes = size;
2d21ac55
A
3281 do
3282 {
1c79356b
A
3283 // in the middle of the loop only map whole pages
3284 if( segLen >= bytes)
3285 segLen = bytes;
b0d623f7 3286 else if( segLen != trunc_page( segLen))
1c79356b 3287 err = kIOReturnVMError;
55e303ae 3288 if( physAddr != trunc_page_64( physAddr))
1c79356b 3289 err = kIOReturnBadArgument;
8f6c56a5
A
3290 if (kIOReturnSuccess != err)
3291 break;
1c79356b 3292
b0d623f7 3293#if DEBUG
1c79356b 3294 if( kIOLogMapping & gIOKitDebug)
b0d623f7 3295 IOLog("IOMemoryMap::map(%p) 0x%qx->0x%qx:0x%qx\n",
0b4e3aa0 3296 addressMap, address + pageOffset, physAddr + pageOffset,
1c79356b
A
3297 segLen - pageOffset);
3298#endif
3299
2d21ac55 3300
0b4e3aa0 3301 if( pager) {
316670eb 3302 if( reserved && reserved->dp.pagerContig) {
0b4e3aa0 3303 IOPhysicalLength allLen;
55e303ae 3304 addr64_t allPhys;
0b4e3aa0 3305
b0d623f7 3306 allPhys = getPhysicalSegment( 0, &allLen, kIOMemoryMapperNone );
0b4e3aa0 3307 assert( allPhys );
b0d623f7 3308 err = device_pager_populate_object( pager, 0, atop_64(allPhys), round_page(allLen) );
2d21ac55
A
3309 }
3310 else
3311 {
0b4e3aa0 3312
2d21ac55 3313 for( page = 0;
0b4e3aa0 3314 (page < segLen) && (KERN_SUCCESS == err);
2d21ac55
A
3315 page += page_size)
3316 {
3317 err = device_pager_populate_object(pager, pagerOffset,
3318 (ppnum_t)(atop_64(physAddr + page)), page_size);
3319 pagerOffset += page_size;
0b4e3aa0
A
3320 }
3321 }
3322 assert( KERN_SUCCESS == err );
3323 if( err)
3324 break;
3325 }
0c530ab8 3326
2d21ac55
A
3327 // This call to vm_fault causes an early pmap level resolution
3328 // of the mappings created above for kernel mappings, since
3329 // faulting in later can't take place from interrupt level.
9bccf70c
A
3330 /* *** ALERT *** */
3331 /* *** Temporary Workaround *** */
3332
2d21ac55
A
3333 if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags))
3334 {
91447636
A
3335 vm_fault(addressMap,
3336 (vm_map_offset_t)address,
3337 VM_PROT_READ|VM_PROT_WRITE,
3338 FALSE, THREAD_UNINT, NULL,
3339 (vm_map_offset_t)0);
9bccf70c
A
3340 }
3341
3342 /* *** Temporary Workaround *** */
3343 /* *** ALERT *** */
0c530ab8 3344
1c79356b 3345 sourceOffset += segLen - pageOffset;
0b4e3aa0 3346 address += segLen;
1c79356b
A
3347 bytes -= segLen;
3348 pageOffset = 0;
3349
2d21ac55 3350 }
b0d623f7 3351 while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
1c79356b 3352
2d21ac55 3353 if (bytes)
1c79356b 3354 err = kIOReturnBadArgument;
1c79356b 3355
2d21ac55 3356 return (err);
1c79356b
A
3357}
3358
3359IOReturn IOMemoryDescriptor::doUnmap(
3360 vm_map_t addressMap,
2d21ac55
A
3361 IOVirtualAddress __address,
3362 IOByteCount __length )
1c79356b 3363{
2d21ac55
A
3364 IOReturn err;
3365 mach_vm_address_t address;
3366 mach_vm_size_t length;
3367
3368 if (__length)
3369 {
3370 address = __address;
3371 length = __length;
3372 }
3373 else
3374 {
b0d623f7
A
3375 addressMap = ((IOMemoryMap *) __address)->fAddressMap;
3376 address = ((IOMemoryMap *) __address)->fAddress;
3377 length = ((IOMemoryMap *) __address)->fLength;
2d21ac55
A
3378 }
3379
7e4a7d39
A
3380 if ((addressMap == kernel_map)
3381 && ((kIOMemoryBufferPageable & _flags) || !_memEntry))
2d21ac55 3382 addressMap = IOPageableMapForAddress( address );
1c79356b 3383
b0d623f7 3384#if DEBUG
1c79356b 3385 if( kIOLogMapping & gIOKitDebug)
2d21ac55
A
3386 IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
3387 addressMap, address, length );
1c79356b
A
3388#endif
3389
2d21ac55 3390 err = mach_vm_deallocate( addressMap, address, length );
1c79356b 3391
2d21ac55 3392 return (err);
1c79356b
A
3393}
3394
91447636 3395IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
e3027f41 3396{
91447636 3397 IOReturn err = kIOReturnSuccess;
b0d623f7 3398 IOMemoryMap * mapping = 0;
e3027f41
A
3399 OSIterator * iter;
3400
3401 LOCK;
3402
91447636
A
3403 if( doRedirect)
3404 _flags |= kIOMemoryRedirected;
3405 else
3406 _flags &= ~kIOMemoryRedirected;
3407
e3027f41
A
3408 do {
3409 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
39236c6e
A
3410
3411 memory_object_t pager;
3412
3413 if( reserved)
3414 pager = (memory_object_t) reserved->dp.devicePager;
3415 else
3416 pager = MACH_PORT_NULL;
3417
b0d623f7 3418 while( (mapping = (IOMemoryMap *) iter->getNextObject()))
39236c6e 3419 {
91447636 3420 mapping->redirect( safeTask, doRedirect );
39236c6e
A
3421 if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap))
3422 {
3423 err = handleFault( pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache );
3424 }
3425 }
e3027f41 3426
91447636
A
3427 iter->release();
3428 }
e3027f41
A
3429 } while( false );
3430
91447636
A
3431 if (!doRedirect)
3432 {
9bccf70c 3433 WAKEUP;
0b4e3aa0
A
3434 }
3435
e3027f41
A
3436 UNLOCK;
3437
b0d623f7 3438#ifndef __LP64__
e3027f41
A
3439 // temporary binary compatibility
3440 IOSubMemoryDescriptor * subMem;
3441 if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
91447636 3442 err = subMem->redirect( safeTask, doRedirect );
e3027f41 3443 else
91447636 3444 err = kIOReturnSuccess;
b0d623f7 3445#endif /* !__LP64__ */
e3027f41
A
3446
3447 return( err );
3448}
3449
b0d623f7 3450IOReturn IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
e3027f41
A
3451{
3452 IOReturn err = kIOReturnSuccess;
3453
2d21ac55 3454 if( fSuperMap) {
b0d623f7 3455// err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
e3027f41
A
3456 } else {
3457
3458 LOCK;
0c530ab8
A
3459
3460 do
91447636 3461 {
2d21ac55 3462 if (!fAddress)
0c530ab8 3463 break;
2d21ac55 3464 if (!fAddressMap)
0c530ab8
A
3465 break;
3466
2d21ac55
A
3467 if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
3468 && (0 == (fOptions & kIOMapStatic)))
0c530ab8 3469 {
2d21ac55 3470 IOUnmapPages( fAddressMap, fAddress, fLength );
b0d623f7
A
3471 err = kIOReturnSuccess;
3472#if DEBUG
2d21ac55 3473 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
e3027f41 3474#endif
0c530ab8 3475 }
2d21ac55 3476 else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask))
0c530ab8
A
3477 {
3478 IOOptionBits newMode;
2d21ac55
A
3479 newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
3480 IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
0c530ab8
A
3481 }
3482 }
3483 while (false);
0c530ab8 3484 UNLOCK;
e3027f41
A
3485 }
3486
2d21ac55
A
3487 if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3488 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
91447636 3489 && safeTask
2d21ac55
A
3490 && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected))))
3491 fMemory->redirect(safeTask, doRedirect);
91447636 3492
e3027f41
A
3493 return( err );
3494}
3495
b0d623f7 3496IOReturn IOMemoryMap::unmap( void )
1c79356b
A
3497{
3498 IOReturn err;
3499
3500 LOCK;
3501
2d21ac55
A
3502 if( fAddress && fAddressMap && (0 == fSuperMap) && fMemory
3503 && (0 == (fOptions & kIOMapStatic))) {
1c79356b 3504
39236c6e
A
3505 vm_map_iokit_unmapped_region(fAddressMap, fLength);
3506
2d21ac55 3507 err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
1c79356b
A
3508
3509 } else
3510 err = kIOReturnSuccess;
3511
2d21ac55
A
3512 if (fAddressMap)
3513 {
3514 vm_map_deallocate(fAddressMap);
3515 fAddressMap = 0;
3516 }
3517
3518 fAddress = 0;
1c79356b
A
3519
3520 UNLOCK;
3521
3522 return( err );
3523}
3524
b0d623f7 3525void IOMemoryMap::taskDied( void )
1c79356b
A
3526{
3527 LOCK;
b0d623f7
A
3528 if (fUserClientUnmap)
3529 unmap();
2d21ac55
A
3530 if( fAddressMap) {
3531 vm_map_deallocate(fAddressMap);
3532 fAddressMap = 0;
1c79356b 3533 }
2d21ac55
A
3534 fAddressTask = 0;
3535 fAddress = 0;
1c79356b
A
3536 UNLOCK;
3537}
3538
b0d623f7
A
3539IOReturn IOMemoryMap::userClientUnmap( void )
3540{
3541 fUserClientUnmap = true;
3542 return (kIOReturnSuccess);
3543}
3544
9bccf70c
A
3545// Overload the release mechanism. All mappings must be a member
3546// of a memory descriptors _mappings set. This means that we
3547// always have 2 references on a mapping. When either of these mappings
3548// are released we need to free ourselves.
b0d623f7 3549void IOMemoryMap::taggedRelease(const void *tag) const
9bccf70c 3550{
55e303ae 3551 LOCK;
9bccf70c 3552 super::taggedRelease(tag, 2);
55e303ae 3553 UNLOCK;
9bccf70c
A
3554}
3555
b0d623f7 3556void IOMemoryMap::free()
1c79356b
A
3557{
3558 unmap();
3559
2d21ac55
A
3560 if (fMemory)
3561 {
1c79356b 3562 LOCK;
2d21ac55 3563 fMemory->removeMapping(this);
1c79356b 3564 UNLOCK;
2d21ac55 3565 fMemory->release();
1c79356b
A
3566 }
3567
2d21ac55 3568 if (fOwner && (fOwner != fMemory))
91447636
A
3569 {
3570 LOCK;
2d21ac55 3571 fOwner->removeMapping(this);
91447636
A
3572 UNLOCK;
3573 }
3574
2d21ac55
A
3575 if (fSuperMap)
3576 fSuperMap->release();
1c79356b 3577
2d21ac55
A
3578 if (fRedirUPL) {
3579 upl_commit(fRedirUPL, NULL, 0);
3580 upl_deallocate(fRedirUPL);
91447636
A
3581 }
3582
1c79356b
A
3583 super::free();
3584}
3585
b0d623f7 3586IOByteCount IOMemoryMap::getLength()
1c79356b 3587{
2d21ac55 3588 return( fLength );
1c79356b
A
3589}
3590
b0d623f7 3591IOVirtualAddress IOMemoryMap::getVirtualAddress()
1c79356b 3592{
b0d623f7 3593#ifndef __LP64__
2d21ac55
A
3594 if (fSuperMap)
3595 fSuperMap->getVirtualAddress();
b0d623f7
A
3596 else if (fAddressMap
3597 && vm_map_is_64bit(fAddressMap)
3598 && (sizeof(IOVirtualAddress) < 8))
2d21ac55
A
3599 {
3600 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
3601 }
b0d623f7 3602#endif /* !__LP64__ */
2d21ac55
A
3603
3604 return (fAddress);
3605}
3606
b0d623f7
A
3607#ifndef __LP64__
3608mach_vm_address_t IOMemoryMap::getAddress()
2d21ac55
A
3609{
3610 return( fAddress);
3611}
3612
b0d623f7 3613mach_vm_size_t IOMemoryMap::getSize()
2d21ac55
A
3614{
3615 return( fLength );
1c79356b 3616}
b0d623f7 3617#endif /* !__LP64__ */
1c79356b 3618
2d21ac55 3619
b0d623f7 3620task_t IOMemoryMap::getAddressTask()
1c79356b 3621{
2d21ac55
A
3622 if( fSuperMap)
3623 return( fSuperMap->getAddressTask());
1c79356b 3624 else
2d21ac55 3625 return( fAddressTask);
1c79356b
A
3626}
3627
b0d623f7 3628IOOptionBits IOMemoryMap::getMapOptions()
1c79356b 3629{
2d21ac55 3630 return( fOptions);
1c79356b
A
3631}
3632
b0d623f7 3633IOMemoryDescriptor * IOMemoryMap::getMemoryDescriptor()
1c79356b 3634{
2d21ac55 3635 return( fMemory );
1c79356b
A
3636}
3637
b0d623f7
A
3638IOMemoryMap * IOMemoryMap::copyCompatible(
3639 IOMemoryMap * newMapping )
1c79356b 3640{
2d21ac55
A
3641 task_t task = newMapping->getAddressTask();
3642 mach_vm_address_t toAddress = newMapping->fAddress;
3643 IOOptionBits _options = newMapping->fOptions;
3644 mach_vm_size_t _offset = newMapping->fOffset;
3645 mach_vm_size_t _length = newMapping->fLength;
1c79356b 3646
2d21ac55 3647 if( (!task) || (!fAddressMap) || (fAddressMap != get_task_map(task)))
1c79356b 3648 return( 0 );
2d21ac55 3649 if( (fOptions ^ _options) & kIOMapReadOnly)
9bccf70c
A
3650 return( 0 );
3651 if( (kIOMapDefaultCache != (_options & kIOMapCacheMask))
2d21ac55 3652 && ((fOptions ^ _options) & kIOMapCacheMask))
1c79356b
A
3653 return( 0 );
3654
2d21ac55 3655 if( (0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress))
1c79356b
A
3656 return( 0 );
3657
2d21ac55 3658 if( _offset < fOffset)
1c79356b
A
3659 return( 0 );
3660
2d21ac55 3661 _offset -= fOffset;
1c79356b 3662
2d21ac55 3663 if( (_offset + _length) > fLength)
1c79356b
A
3664 return( 0 );
3665
2d21ac55
A
3666 retain();
3667 if( (fLength == _length) && (!_offset))
3668 {
2d21ac55
A
3669 newMapping = this;
3670 }
3671 else
3672 {
3673 newMapping->fSuperMap = this;
6d2010ae 3674 newMapping->fOffset = fOffset + _offset;
2d21ac55 3675 newMapping->fAddress = fAddress + _offset;
1c79356b
A
3676 }
3677
2d21ac55 3678 return( newMapping );
1c79356b
A
3679}
3680
99c3a104
A
3681IOReturn IOMemoryMap::wireRange(
3682 uint32_t options,
3683 mach_vm_size_t offset,
3684 mach_vm_size_t length)
3685{
3686 IOReturn kr;
3687 mach_vm_address_t start = trunc_page_64(fAddress + offset);
3688 mach_vm_address_t end = round_page_64(fAddress + offset + length);
3689
3690 if (kIODirectionOutIn & options)
3691 {
3692 kr = vm_map_wire(fAddressMap, start, end, (kIODirectionOutIn & options), FALSE);
3693 }
3694 else
3695 {
3696 kr = vm_map_unwire(fAddressMap, start, end, FALSE);
3697 }
3698
3699 return (kr);
3700}
3701
3702
0c530ab8 3703IOPhysicalAddress
b0d623f7
A
3704#ifdef __LP64__
3705IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
3706#else /* !__LP64__ */
3707IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
3708#endif /* !__LP64__ */
1c79356b
A
3709{
3710 IOPhysicalAddress address;
3711
3712 LOCK;
b0d623f7
A
3713#ifdef __LP64__
3714 address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
3715#else /* !__LP64__ */
2d21ac55 3716 address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
b0d623f7 3717#endif /* !__LP64__ */
1c79356b
A
3718 UNLOCK;
3719
3720 return( address );
3721}
3722
3723/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3724
3725#undef super
3726#define super OSObject
3727
3728/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3729
3730void IOMemoryDescriptor::initialize( void )
3731{
3732 if( 0 == gIOMemoryLock)
3733 gIOMemoryLock = IORecursiveLockAlloc();
55e303ae 3734
0c530ab8 3735 gIOLastPage = IOGetLastPageNumber();
1c79356b
A
3736}
3737
3738void IOMemoryDescriptor::free( void )
3739{
3740 if( _mappings)
3741 _mappings->release();
3742
3743 super::free();
3744}
3745
3746IOMemoryMap * IOMemoryDescriptor::setMapping(
3747 task_t intoTask,
3748 IOVirtualAddress mapAddress,
55e303ae 3749 IOOptionBits options )
1c79356b 3750{
2d21ac55
A
3751 return (createMappingInTask( intoTask, mapAddress,
3752 options | kIOMapStatic,
3753 0, getLength() ));
1c79356b
A
3754}
3755
3756IOMemoryMap * IOMemoryDescriptor::map(
55e303ae 3757 IOOptionBits options )
1c79356b 3758{
2d21ac55
A
3759 return (createMappingInTask( kernel_task, 0,
3760 options | kIOMapAnywhere,
3761 0, getLength() ));
1c79356b
A
3762}
3763
b0d623f7 3764#ifndef __LP64__
2d21ac55
A
3765IOMemoryMap * IOMemoryDescriptor::map(
3766 task_t intoTask,
3767 IOVirtualAddress atAddress,
1c79356b 3768 IOOptionBits options,
55e303ae
A
3769 IOByteCount offset,
3770 IOByteCount length )
1c79356b 3771{
2d21ac55
A
3772 if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask)))
3773 {
3774 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
3775 return (0);
3776 }
3777
3778 return (createMappingInTask(intoTask, atAddress,
3779 options, offset, length));
3780}
b0d623f7 3781#endif /* !__LP64__ */
2d21ac55
A
3782
3783IOMemoryMap * IOMemoryDescriptor::createMappingInTask(
3784 task_t intoTask,
3785 mach_vm_address_t atAddress,
3786 IOOptionBits options,
3787 mach_vm_size_t offset,
3788 mach_vm_size_t length)
3789{
b0d623f7
A
3790 IOMemoryMap * result;
3791 IOMemoryMap * mapping;
2d21ac55
A
3792
3793 if (0 == length)
1c79356b
A
3794 length = getLength();
3795
b0d623f7 3796 mapping = new IOMemoryMap;
2d21ac55
A
3797
3798 if( mapping
3799 && !mapping->init( intoTask, atAddress,
3800 options, offset, length )) {
3801 mapping->release();
3802 mapping = 0;
3803 }
3804
3805 if (mapping)
3806 result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
3807 else
3808 result = 0;
3809
b0d623f7 3810#if DEBUG
2d21ac55 3811 if (!result)
316670eb
A
3812 IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
3813 this, atAddress, (uint32_t) options, offset, length);
2d21ac55
A
3814#endif
3815
3816 return (result);
1c79356b
A
3817}
3818
b0d623f7
A
3819#ifndef __LP64__ // there is only a 64 bit version for LP64
3820IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
91447636
A
3821 IOOptionBits options,
3822 IOByteCount offset)
2d21ac55
A
3823{
3824 return (redirect(newBackingMemory, options, (mach_vm_size_t)offset));
3825}
b0d623f7 3826#endif
2d21ac55 3827
b0d623f7 3828IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
2d21ac55
A
3829 IOOptionBits options,
3830 mach_vm_size_t offset)
91447636
A
3831{
3832 IOReturn err = kIOReturnSuccess;
3833 IOMemoryDescriptor * physMem = 0;
3834
3835 LOCK;
3836
2d21ac55 3837 if (fAddress && fAddressMap) do
91447636 3838 {
2d21ac55
A
3839 if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3840 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
91447636 3841 {
2d21ac55 3842 physMem = fMemory;
91447636
A
3843 physMem->retain();
3844 }
3845
2d21ac55 3846 if (!fRedirUPL)
91447636 3847 {
b0d623f7 3848 vm_size_t size = round_page(fLength);
91447636
A
3849 int flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
3850 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
2d21ac55 3851 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) fMemory->_memEntry, 0, &size, &fRedirUPL,
91447636
A
3852 NULL, NULL,
3853 &flags))
2d21ac55 3854 fRedirUPL = 0;
91447636
A
3855
3856 if (physMem)
3857 {
2d21ac55 3858 IOUnmapPages( fAddressMap, fAddress, fLength );
b0d623f7
A
3859 if (false)
3860 physMem->redirect(0, true);
91447636
A
3861 }
3862 }
3863
3864 if (newBackingMemory)
3865 {
2d21ac55 3866 if (newBackingMemory != fMemory)
91447636 3867 {
2d21ac55
A
3868 fOffset = 0;
3869 if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
3870 options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
3871 offset, fLength))
91447636
A
3872 err = kIOReturnError;
3873 }
2d21ac55 3874 if (fRedirUPL)
91447636 3875 {
2d21ac55
A
3876 upl_commit(fRedirUPL, NULL, 0);
3877 upl_deallocate(fRedirUPL);
3878 fRedirUPL = 0;
91447636 3879 }
b0d623f7 3880 if (false && physMem)
91447636
A
3881 physMem->redirect(0, false);
3882 }
3883 }
3884 while (false);
3885
3886 UNLOCK;
3887
3888 if (physMem)
3889 physMem->release();
3890
3891 return (err);
3892}
3893
1c79356b
A
3894IOMemoryMap * IOMemoryDescriptor::makeMapping(
3895 IOMemoryDescriptor * owner,
2d21ac55
A
3896 task_t __intoTask,
3897 IOVirtualAddress __address,
1c79356b 3898 IOOptionBits options,
2d21ac55
A
3899 IOByteCount __offset,
3900 IOByteCount __length )
1c79356b 3901{
b0d623f7 3902#ifndef __LP64__
2d21ac55 3903 if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::makeMapping !64bit");
b0d623f7 3904#endif /* !__LP64__ */
2d21ac55 3905
91447636 3906 IOMemoryDescriptor * mapDesc = 0;
b0d623f7 3907 IOMemoryMap * result = 0;
2d21ac55
A
3908 OSIterator * iter;
3909
b0d623f7 3910 IOMemoryMap * mapping = (IOMemoryMap *) __address;
2d21ac55
A
3911 mach_vm_size_t offset = mapping->fOffset + __offset;
3912 mach_vm_size_t length = mapping->fLength;
3913
3914 mapping->fOffset = offset;
1c79356b
A
3915
3916 LOCK;
3917
91447636
A
3918 do
3919 {
2d21ac55
A
3920 if (kIOMapStatic & options)
3921 {
3922 result = mapping;
3923 addMapping(mapping);
3924 mapping->setMemoryDescriptor(this, 0);
3925 continue;
3926 }
3927
91447636
A
3928 if (kIOMapUnique & options)
3929 {
060df5ea 3930 addr64_t phys;
91447636 3931 IOByteCount physLen;
1c79356b 3932
2d21ac55 3933// if (owner != this) continue;
1c79356b 3934
0c530ab8
A
3935 if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3936 || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
91447636 3937 {
b0d623f7 3938 phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
91447636
A
3939 if (!phys || (physLen < length))
3940 continue;
3941
b0d623f7
A
3942 mapDesc = IOMemoryDescriptor::withAddressRange(
3943 phys, length, getDirection() | kIOMemoryMapperNone, NULL);
91447636
A
3944 if (!mapDesc)
3945 continue;
3946 offset = 0;
2d21ac55 3947 mapping->fOffset = offset;
91447636
A
3948 }
3949 }
3950 else
3951 {
2d21ac55
A
3952 // look for a compatible existing mapping
3953 if( (iter = OSCollectionIterator::withCollection(_mappings)))
3954 {
b0d623f7
A
3955 IOMemoryMap * lookMapping;
3956 while ((lookMapping = (IOMemoryMap *) iter->getNextObject()))
2d21ac55
A
3957 {
3958 if ((result = lookMapping->copyCompatible(mapping)))
3959 {
3960 addMapping(result);
3961 result->setMemoryDescriptor(this, offset);
91447636 3962 break;
2d21ac55 3963 }
91447636
A
3964 }
3965 iter->release();
3966 }
2d21ac55 3967 if (result || (options & kIOMapReference))
6d2010ae
A
3968 {
3969 if (result != mapping)
3970 {
3971 mapping->release();
3972 mapping = NULL;
3973 }
91447636 3974 continue;
6d2010ae 3975 }
2d21ac55 3976 }
91447636 3977
2d21ac55
A
3978 if (!mapDesc)
3979 {
3980 mapDesc = this;
91447636
A
3981 mapDesc->retain();
3982 }
2d21ac55
A
3983 IOReturn
3984 kr = mapDesc->doMap( 0, (IOVirtualAddress *) &mapping, options, 0, 0 );
3985 if (kIOReturnSuccess == kr)
3986 {
39236c6e
A
3987 if (0 == (mapping->fOptions & kIOMapStatic)) {
3988 vm_map_iokit_mapped_region(mapping->fAddressMap, length);
3989 }
3990
2d21ac55
A
3991 result = mapping;
3992 mapDesc->addMapping(result);
3993 result->setMemoryDescriptor(mapDesc, offset);
3994 }
3995 else
3996 {
1c79356b 3997 mapping->release();
2d21ac55 3998 mapping = NULL;
1c79356b 3999 }
91447636 4000 }
2d21ac55 4001 while( false );
1c79356b
A
4002
4003 UNLOCK;
4004
91447636
A
4005 if (mapDesc)
4006 mapDesc->release();
4007
2d21ac55 4008 return (result);
1c79356b
A
4009}
4010
4011void IOMemoryDescriptor::addMapping(
4012 IOMemoryMap * mapping )
4013{
2d21ac55
A
4014 if( mapping)
4015 {
1c79356b
A
4016 if( 0 == _mappings)
4017 _mappings = OSSet::withCapacity(1);
9bccf70c
A
4018 if( _mappings )
4019 _mappings->setObject( mapping );
1c79356b
A
4020 }
4021}
4022
4023void IOMemoryDescriptor::removeMapping(
4024 IOMemoryMap * mapping )
4025{
9bccf70c 4026 if( _mappings)
1c79356b 4027 _mappings->removeObject( mapping);
1c79356b
A
4028}
4029
b0d623f7
A
4030#ifndef __LP64__
4031// obsolete initializers
4032// - initWithOptions is the designated initializer
1c79356b 4033bool
b0d623f7 4034IOMemoryDescriptor::initWithAddress(void * address,
55e303ae
A
4035 IOByteCount length,
4036 IODirection direction)
1c79356b
A
4037{
4038 return( false );
4039}
4040
4041bool
b0d623f7 4042IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
55e303ae
A
4043 IOByteCount length,
4044 IODirection direction,
4045 task_t task)
1c79356b
A
4046{
4047 return( false );
4048}
4049
4050bool
b0d623f7 4051IOMemoryDescriptor::initWithPhysicalAddress(
1c79356b 4052 IOPhysicalAddress address,
55e303ae
A
4053 IOByteCount length,
4054 IODirection direction )
1c79356b
A
4055{
4056 return( false );
4057}
4058
4059bool
b0d623f7 4060IOMemoryDescriptor::initWithRanges(
1c79356b
A
4061 IOVirtualRange * ranges,
4062 UInt32 withCount,
55e303ae
A
4063 IODirection direction,
4064 task_t task,
4065 bool asReference)
1c79356b
A
4066{
4067 return( false );
4068}
4069
4070bool
b0d623f7 4071IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
1c79356b 4072 UInt32 withCount,
55e303ae
A
4073 IODirection direction,
4074 bool asReference)
1c79356b
A
4075{
4076 return( false );
4077}
4078
b0d623f7
A
4079void * IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
4080 IOByteCount * lengthOfSegment)
4081{
4082 return( 0 );
4083}
4084#endif /* !__LP64__ */
4085
1c79356b
A
4086/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4087
9bccf70c
A
4088bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
4089{
4090 OSSymbol const *keys[2];
4091 OSObject *values[2];
91447636
A
4092 struct SerData {
4093 user_addr_t address;
4094 user_size_t length;
4095 } *vcopy;
9bccf70c
A
4096 unsigned int index, nRanges;
4097 bool result;
4098
91447636
A
4099 IOOptionBits type = _flags & kIOMemoryTypeMask;
4100
9bccf70c
A
4101 if (s == NULL) return false;
4102 if (s->previouslySerialized(this)) return true;
4103
4104 // Pretend we are an array.
4105 if (!s->addXMLStartTag(this, "array")) return false;
4106
4107 nRanges = _rangesCount;
91447636 4108 vcopy = (SerData *) IOMalloc(sizeof(SerData) * nRanges);
9bccf70c
A
4109 if (vcopy == 0) return false;
4110
4111 keys[0] = OSSymbol::withCString("address");
4112 keys[1] = OSSymbol::withCString("length");
4113
4114 result = false;
4115 values[0] = values[1] = 0;
4116
4117 // From this point on we can go to bail.
4118
4119 // Copy the volatile data so we don't have to allocate memory
4120 // while the lock is held.
4121 LOCK;
4122 if (nRanges == _rangesCount) {
91447636 4123 Ranges vec = _ranges;
9bccf70c 4124 for (index = 0; index < nRanges; index++) {
91447636
A
4125 user_addr_t addr; IOByteCount len;
4126 getAddrLenForInd(addr, len, type, vec, index);
4127 vcopy[index].address = addr;
4128 vcopy[index].length = len;
9bccf70c
A
4129 }
4130 } else {
4131 // The descriptor changed out from under us. Give up.
4132 UNLOCK;
4133 result = false;
4134 goto bail;
4135 }
4136 UNLOCK;
4137
4138 for (index = 0; index < nRanges; index++)
4139 {
91447636
A
4140 user_addr_t addr = vcopy[index].address;
4141 IOByteCount len = (IOByteCount) vcopy[index].length;
4142 values[0] =
060df5ea 4143 OSNumber::withNumber(addr, sizeof(addr) * 8);
9bccf70c
A
4144 if (values[0] == 0) {
4145 result = false;
4146 goto bail;
4147 }
91447636 4148 values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
9bccf70c
A
4149 if (values[1] == 0) {
4150 result = false;
4151 goto bail;
4152 }
4153 OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
4154 if (dict == 0) {
4155 result = false;
4156 goto bail;
4157 }
4158 values[0]->release();
4159 values[1]->release();
4160 values[0] = values[1] = 0;
4161
4162 result = dict->serialize(s);
4163 dict->release();
4164 if (!result) {
4165 goto bail;
4166 }
4167 }
4168 result = s->addXMLEndTag("array");
4169
4170 bail:
4171 if (values[0])
4172 values[0]->release();
4173 if (values[1])
4174 values[1]->release();
4175 if (keys[0])
4176 keys[0]->release();
4177 if (keys[1])
4178 keys[1]->release();
4179 if (vcopy)
2d21ac55 4180 IOFree(vcopy, sizeof(SerData) * nRanges);
9bccf70c
A
4181 return result;
4182}
4183
9bccf70c
A
4184/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4185
0b4e3aa0 4186OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
b0d623f7
A
4187#ifdef __LP64__
4188OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
4189OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
4190OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
4191OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
4192OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
4193OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
4194OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
4195#else /* !__LP64__ */
55e303ae
A
4196OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
4197OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
91447636
A
4198OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3);
4199OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4);
0c530ab8 4200OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5);
b0d623f7
A
4201OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 6);
4202OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 7);
4203#endif /* !__LP64__ */
1c79356b
A
4204OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
4205OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
4206OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
4207OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
4208OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
4209OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
4210OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
4211OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
9bccf70c 4212
55e303ae 4213/* ex-inline function implementation */
0c530ab8
A
4214IOPhysicalAddress
4215IOMemoryDescriptor::getPhysicalAddress()
9bccf70c 4216 { return( getPhysicalSegment( 0, 0 )); }