]> git.saurik.com Git - apple/xnu.git/blame - iokit/Kernel/IOMemoryDescriptor.cpp
xnu-2422.115.4.tar.gz
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
30 *
31 * HISTORY
32 *
33 */
b0d623f7
A
34
35
55e303ae 36#include <sys/cdefs.h>
1c79356b
A
37
38#include <IOKit/assert.h>
39#include <IOKit/system.h>
40#include <IOKit/IOLib.h>
41#include <IOKit/IOMemoryDescriptor.h>
55e303ae 42#include <IOKit/IOMapper.h>
99c3a104 43#include <IOKit/IODMACommand.h>
55e303ae 44#include <IOKit/IOKitKeysPrivate.h>
1c79356b 45
b0d623f7
A
46#ifndef __LP64__
47#include <IOKit/IOSubMemoryDescriptor.h>
48#endif /* !__LP64__ */
49
1c79356b 50#include <IOKit/IOKitDebug.h>
2d21ac55 51#include <libkern/OSDebug.h>
1c79356b 52
91447636
A
53#include "IOKitKernelInternal.h"
54
1c79356b 55#include <libkern/c++/OSContainers.h>
9bccf70c
A
56#include <libkern/c++/OSDictionary.h>
57#include <libkern/c++/OSArray.h>
58#include <libkern/c++/OSSymbol.h>
59#include <libkern/c++/OSNumber.h>
91447636
A
60
61#include <sys/uio.h>
1c79356b
A
62
63__BEGIN_DECLS
64#include <vm/pmap.h>
91447636 65#include <vm/vm_pageout.h>
55e303ae 66#include <mach/memory_object_types.h>
0b4e3aa0 67#include <device/device_port.h>
55e303ae 68
91447636 69#include <mach/vm_prot.h>
2d21ac55 70#include <mach/mach_vm.h>
91447636 71#include <vm/vm_fault.h>
2d21ac55 72#include <vm/vm_protos.h>
91447636 73
55e303ae 74extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
6d2010ae
A
75extern void ipc_port_release_send(ipc_port_t port);
76
55e303ae
A
77kern_return_t
78memory_object_iopl_request(
79 ipc_port_t port,
80 memory_object_offset_t offset,
81 vm_size_t *upl_size,
82 upl_t *upl_ptr,
83 upl_page_info_array_t user_page_list,
84 unsigned int *page_list_count,
85 int *flags);
0b4e3aa0 86
55e303ae 87unsigned int IOTranslateCacheBits(struct phys_entry *pp);
1c79356b 88
55e303ae 89__END_DECLS
1c79356b 90
99c3a104
A
91#define kIOMapperWaitSystem ((IOMapper *) 1)
92
0c530ab8
A
93static IOMapper * gIOSystemMapper = NULL;
94
0c530ab8
A
95ppnum_t gIOLastPage;
96
55e303ae 97/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
de355530 98
55e303ae 99OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
de355530 100
55e303ae 101#define super IOMemoryDescriptor
de355530 102
55e303ae 103OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
de355530 104
1c79356b
A
105/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
106
9bccf70c
A
107static IORecursiveLock * gIOMemoryLock;
108
109#define LOCK IORecursiveLockLock( gIOMemoryLock)
110#define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
111#define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
112#define WAKEUP \
113 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
114
0c530ab8
A
115#if 0
116#define DEBG(fmt, args...) { kprintf(fmt, ## args); }
117#else
118#define DEBG(fmt, args...) {}
119#endif
120
b0d623f7 121#define IOMD_DEBUG_DMAACTIVE 1
91447636
A
122
123/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
124
125// Some data structures and accessor macros used by the initWithOptions
126// Function
127
128enum ioPLBlockFlags {
129 kIOPLOnDevice = 0x00000001,
130 kIOPLExternUPL = 0x00000002,
131};
132
133struct typePersMDData
134{
135 const IOGeneralMemoryDescriptor *fMD;
136 ipc_port_t fMemEntry;
137};
138
139struct ioPLBlock {
140 upl_t fIOPL;
b0d623f7
A
141 vm_address_t fPageInfo; // Pointer to page list or index into it
142 uint32_t fIOMDOffset; // The offset of this iopl in descriptor
99c3a104 143 ppnum_t fMappedPage; // Page number of first page in this iopl
b0d623f7
A
144 unsigned int fPageOffset; // Offset within first page of iopl
145 unsigned int fFlags; // Flags
91447636
A
146};
147
148struct ioGMDData {
99c3a104
A
149 IOMapper * fMapper;
150 uint8_t fDMAMapNumAddressBits;
151 uint64_t fDMAMapAlignment;
152 addr64_t fMappedBase;
b0d623f7 153 uint64_t fPreparationID;
91447636 154 unsigned int fPageCnt;
39236c6e 155 unsigned char fDiscontig;
b0d623f7
A
156#if __LP64__
157 // align arrays to 8 bytes so following macros work
39236c6e 158 unsigned char fPad[3];
b0d623f7 159#endif
6d2010ae
A
160 upl_page_info_t fPageList[1]; /* variable length */
161 ioPLBlock fBlocks[1]; /* variable length */
91447636
A
162};
163
164#define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
99c3a104 165#define getIOPLList(d) ((ioPLBlock *) (void *)&(d->fPageList[d->fPageCnt]))
91447636
A
166#define getNumIOPL(osd, d) \
167 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
168#define getPageList(d) (&(d->fPageList[0]))
169#define computeDataSize(p, u) \
6d2010ae 170 (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
91447636
A
171
172
173/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
174
b0d623f7 175#define next_page(a) ( trunc_page(a) + PAGE_SIZE )
0b4e3aa0
A
176
177
178extern "C" {
179
180kern_return_t device_data_action(
b0d623f7 181 uintptr_t device_handle,
0b4e3aa0
A
182 ipc_port_t device_pager,
183 vm_prot_t protection,
184 vm_object_offset_t offset,
185 vm_size_t size)
186{
9bccf70c 187 kern_return_t kr;
316670eb 188 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
9bccf70c 189 IOMemoryDescriptor * memDesc;
0b4e3aa0 190
9bccf70c 191 LOCK;
316670eb 192 memDesc = ref->dp.memory;
9bccf70c 193 if( memDesc)
91447636
A
194 {
195 memDesc->retain();
9bccf70c
A
196 kr = memDesc->handleFault( device_pager, 0, 0,
197 offset, size, kIOMapDefaultCache /*?*/);
91447636
A
198 memDesc->release();
199 }
9bccf70c
A
200 else
201 kr = KERN_ABORTED;
202 UNLOCK;
0b4e3aa0 203
9bccf70c 204 return( kr );
0b4e3aa0
A
205}
206
207kern_return_t device_close(
b0d623f7 208 uintptr_t device_handle)
0b4e3aa0 209{
316670eb 210 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
0b4e3aa0 211
316670eb 212 IODelete( ref, IOMemoryDescriptorReserved, 1 );
0b4e3aa0
A
213
214 return( kIOReturnSuccess );
215}
91447636 216}; // end extern "C"
0b4e3aa0 217
91447636
A
218// Note this inline function uses C++ reference arguments to return values
219// This means that pointers are not passed and NULLs don't have to be
220// checked for as a NULL reference is illegal.
221static inline void
2d21ac55 222getAddrLenForInd(user_addr_t &addr, IOPhysicalLength &len, // Output variables
91447636
A
223 UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
224{
0c530ab8
A
225 assert(kIOMemoryTypeUIO == type
226 || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
227 || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
91447636
A
228 if (kIOMemoryTypeUIO == type) {
229 user_size_t us;
230 uio_getiov((uio_t) r.uio, ind, &addr, &us); len = us;
231 }
b0d623f7 232#ifndef __LP64__
0c530ab8
A
233 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
234 IOAddressRange cur = r.v64[ind];
235 addr = cur.address;
236 len = cur.length;
237 }
b0d623f7 238#endif /* !__LP64__ */
91447636
A
239 else {
240 IOVirtualRange cur = r.v[ind];
241 addr = cur.address;
242 len = cur.length;
243 }
0b4e3aa0
A
244}
245
1c79356b
A
246/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
247
1c79356b
A
248IOMemoryDescriptor *
249IOMemoryDescriptor::withAddress(void * address,
55e303ae
A
250 IOByteCount length,
251 IODirection direction)
252{
253 return IOMemoryDescriptor::
b0d623f7 254 withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
55e303ae
A
255}
256
b0d623f7 257#ifndef __LP64__
55e303ae 258IOMemoryDescriptor *
b0d623f7 259IOMemoryDescriptor::withAddress(IOVirtualAddress address,
55e303ae
A
260 IOByteCount length,
261 IODirection direction,
262 task_t task)
1c79356b
A
263{
264 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
265 if (that)
266 {
55e303ae 267 if (that->initWithAddress(address, length, direction, task))
1c79356b
A
268 return that;
269
270 that->release();
271 }
272 return 0;
273}
b0d623f7 274#endif /* !__LP64__ */
1c79356b
A
275
276IOMemoryDescriptor *
55e303ae
A
277IOMemoryDescriptor::withPhysicalAddress(
278 IOPhysicalAddress address,
279 IOByteCount length,
280 IODirection direction )
281{
b0d623f7 282 return (IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL));
55e303ae
A
283}
284
b0d623f7 285#ifndef __LP64__
55e303ae
A
286IOMemoryDescriptor *
287IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
288 UInt32 withCount,
289 IODirection direction,
290 task_t task,
291 bool asReference)
1c79356b
A
292{
293 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
294 if (that)
295 {
55e303ae 296 if (that->initWithRanges(ranges, withCount, direction, task, asReference))
1c79356b
A
297 return that;
298
299 that->release();
300 }
301 return 0;
302}
b0d623f7 303#endif /* !__LP64__ */
1c79356b 304
0c530ab8
A
305IOMemoryDescriptor *
306IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
2d21ac55
A
307 mach_vm_size_t length,
308 IOOptionBits options,
309 task_t task)
0c530ab8
A
310{
311 IOAddressRange range = { address, length };
312 return (IOMemoryDescriptor::withAddressRanges(&range, 1, options, task));
313}
314
315IOMemoryDescriptor *
316IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
2d21ac55
A
317 UInt32 rangeCount,
318 IOOptionBits options,
319 task_t task)
0c530ab8
A
320{
321 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
322 if (that)
323 {
324 if (task)
325 options |= kIOMemoryTypeVirtual64;
326 else
327 options |= kIOMemoryTypePhysical64;
328
2d21ac55
A
329 if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ 0))
330 return that;
0c530ab8 331
2d21ac55 332 that->release();
0c530ab8
A
333 }
334
335 return 0;
336}
337
1c79356b
A
338
339/*
b0d623f7 340 * withOptions:
1c79356b
A
341 *
342 * Create a new IOMemoryDescriptor. The buffer is made up of several
343 * virtual address ranges, from a given task.
344 *
345 * Passing the ranges as a reference will avoid an extra allocation.
346 */
347IOMemoryDescriptor *
55e303ae
A
348IOMemoryDescriptor::withOptions(void * buffers,
349 UInt32 count,
350 UInt32 offset,
351 task_t task,
352 IOOptionBits opts,
353 IOMapper * mapper)
1c79356b 354{
55e303ae 355 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
d7e50217 356
55e303ae
A
357 if (self
358 && !self->initWithOptions(buffers, count, offset, task, opts, mapper))
359 {
360 self->release();
361 return 0;
de355530 362 }
55e303ae
A
363
364 return self;
365}
366
55e303ae
A
367bool IOMemoryDescriptor::initWithOptions(void * buffers,
368 UInt32 count,
369 UInt32 offset,
370 task_t task,
371 IOOptionBits options,
372 IOMapper * mapper)
373{
b0d623f7 374 return( false );
1c79356b
A
375}
376
b0d623f7 377#ifndef __LP64__
1c79356b
A
378IOMemoryDescriptor *
379IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
380 UInt32 withCount,
55e303ae
A
381 IODirection direction,
382 bool asReference)
1c79356b
A
383{
384 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
385 if (that)
386 {
55e303ae 387 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference))
1c79356b
A
388 return that;
389
390 that->release();
391 }
392 return 0;
393}
394
395IOMemoryDescriptor *
396IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
397 IOByteCount offset,
398 IOByteCount length,
55e303ae 399 IODirection direction)
1c79356b 400{
b0d623f7 401 return (IOSubMemoryDescriptor::withSubRange(of, offset, length, direction | kIOMemoryThreadSafe));
1c79356b 402}
b0d623f7 403#endif /* !__LP64__ */
1c79356b 404
0c530ab8
A
405IOMemoryDescriptor *
406IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
91447636
A
407{
408 IOGeneralMemoryDescriptor *origGenMD =
409 OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
410
411 if (origGenMD)
412 return IOGeneralMemoryDescriptor::
413 withPersistentMemoryDescriptor(origGenMD);
414 else
415 return 0;
416}
417
0c530ab8
A
418IOMemoryDescriptor *
419IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
91447636
A
420{
421 ipc_port_t sharedMem = (ipc_port_t) originalMD->createNamedEntry();
422
423 if (!sharedMem)
424 return 0;
425
426 if (sharedMem == originalMD->_memEntry) {
427 originalMD->retain(); // Add a new reference to ourselves
428 ipc_port_release_send(sharedMem); // Remove extra send right
429 return originalMD;
430 }
431
432 IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor;
433 typePersMDData initData = { originalMD, sharedMem };
434
435 if (self
436 && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) {
437 self->release();
438 self = 0;
439 }
440 return self;
441}
442
443void *IOGeneralMemoryDescriptor::createNamedEntry()
444{
445 kern_return_t error;
446 ipc_port_t sharedMem;
447
448 IOOptionBits type = _flags & kIOMemoryTypeMask;
449
450 user_addr_t range0Addr;
451 IOByteCount range0Len;
452 getAddrLenForInd(range0Addr, range0Len, type, _ranges, 0);
453 range0Addr = trunc_page_64(range0Addr);
454
455 vm_size_t size = ptoa_32(_pages);
456 vm_address_t kernelPage = (vm_address_t) range0Addr;
457
458 vm_map_t theMap = ((_task == kernel_task)
459 && (kIOMemoryBufferPageable & _flags))
460 ? IOPageableMapForAddress(kernelPage)
461 : get_task_map(_task);
462
463 memory_object_size_t actualSize = size;
2d21ac55 464 vm_prot_t prot = VM_PROT_READ;
2d21ac55 465 if (kIODirectionOut != (kIODirectionOutIn & _flags))
2d21ac55
A
466 prot |= VM_PROT_WRITE;
467
91447636
A
468 if (_memEntry)
469 prot |= MAP_MEM_NAMED_REUSE;
470
471 error = mach_make_memory_entry_64(theMap,
472 &actualSize, range0Addr, prot, &sharedMem, (ipc_port_t) _memEntry);
473
474 if (KERN_SUCCESS == error) {
475 if (actualSize == size) {
476 return sharedMem;
477 } else {
478#if IOASSERT
b0d623f7
A
479 IOLog("IOGMD::mach_make_memory_entry_64 (%08llx) size (%08llx:%08llx)\n",
480 (UInt64)range0Addr, (UInt64)actualSize, (UInt64)size);
91447636
A
481#endif
482 ipc_port_release_send( sharedMem );
483 }
484 }
485
486 return MACH_PORT_NULL;
487}
488
b0d623f7 489#ifndef __LP64__
1c79356b
A
490bool
491IOGeneralMemoryDescriptor::initWithAddress(void * address,
492 IOByteCount withLength,
493 IODirection withDirection)
494{
b0d623f7 495 _singleRange.v.address = (vm_offset_t) address;
1c79356b
A
496 _singleRange.v.length = withLength;
497
498 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
499}
500
501bool
b0d623f7 502IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
1c79356b
A
503 IOByteCount withLength,
504 IODirection withDirection,
505 task_t withTask)
506{
507 _singleRange.v.address = address;
508 _singleRange.v.length = withLength;
509
510 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
511}
512
513bool
514IOGeneralMemoryDescriptor::initWithPhysicalAddress(
515 IOPhysicalAddress address,
516 IOByteCount withLength,
517 IODirection withDirection )
518{
519 _singleRange.p.address = address;
520 _singleRange.p.length = withLength;
521
522 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
523}
524
55e303ae
A
525bool
526IOGeneralMemoryDescriptor::initWithPhysicalRanges(
527 IOPhysicalRange * ranges,
528 UInt32 count,
529 IODirection direction,
530 bool reference)
531{
532 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
533
534 if (reference)
535 mdOpts |= kIOMemoryAsReference;
536
537 return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0);
538}
539
540bool
541IOGeneralMemoryDescriptor::initWithRanges(
542 IOVirtualRange * ranges,
543 UInt32 count,
544 IODirection direction,
545 task_t task,
546 bool reference)
547{
548 IOOptionBits mdOpts = direction;
549
550 if (reference)
551 mdOpts |= kIOMemoryAsReference;
552
553 if (task) {
554 mdOpts |= kIOMemoryTypeVirtual;
91447636
A
555
556 // Auto-prepare if this is a kernel memory descriptor as very few
557 // clients bother to prepare() kernel memory.
2d21ac55 558 // But it was not enforced so what are you going to do?
55e303ae
A
559 if (task == kernel_task)
560 mdOpts |= kIOMemoryAutoPrepare;
561 }
562 else
563 mdOpts |= kIOMemoryTypePhysical;
55e303ae
A
564
565 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0);
566}
b0d623f7 567#endif /* !__LP64__ */
55e303ae 568
1c79356b 569/*
55e303ae 570 * initWithOptions:
1c79356b 571 *
55e303ae 572 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
91447636
A
573 * from a given task, several physical ranges, an UPL from the ubc
574 * system or a uio (may be 64bit) from the BSD subsystem.
1c79356b
A
575 *
576 * Passing the ranges as a reference will avoid an extra allocation.
577 *
55e303ae
A
578 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
579 * existing instance -- note this behavior is not commonly supported in other
580 * I/O Kit classes, although it is supported here.
1c79356b 581 */
55e303ae 582
1c79356b 583bool
55e303ae
A
584IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
585 UInt32 count,
586 UInt32 offset,
587 task_t task,
588 IOOptionBits options,
589 IOMapper * mapper)
590{
91447636
A
591 IOOptionBits type = options & kIOMemoryTypeMask;
592
6d2010ae
A
593#ifndef __LP64__
594 if (task
595 && (kIOMemoryTypeVirtual == type)
596 && vm_map_is_64bit(get_task_map(task))
597 && ((IOVirtualRange *) buffers)->address)
598 {
599 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
600 return false;
601 }
602#endif /* !__LP64__ */
603
91447636
A
604 // Grab the original MD's configuation data to initialse the
605 // arguments to this function.
606 if (kIOMemoryTypePersistentMD == type) {
607
608 typePersMDData *initData = (typePersMDData *) buffers;
609 const IOGeneralMemoryDescriptor *orig = initData->fMD;
610 ioGMDData *dataP = getDataP(orig->_memoryEntries);
611
612 // Only accept persistent memory descriptors with valid dataP data.
613 assert(orig->_rangesCount == 1);
614 if ( !(orig->_flags & kIOMemoryPersistent) || !dataP)
615 return false;
616
617 _memEntry = initData->fMemEntry; // Grab the new named entry
6d2010ae
A
618 options = orig->_flags & ~kIOMemoryAsReference;
619 type = options & kIOMemoryTypeMask;
620 buffers = orig->_ranges.v;
621 count = orig->_rangesCount;
55e303ae 622
91447636
A
623 // Now grab the original task and whatever mapper was previously used
624 task = orig->_task;
625 mapper = dataP->fMapper;
626
627 // We are ready to go through the original initialisation now
628 }
629
630 switch (type) {
631 case kIOMemoryTypeUIO:
55e303ae 632 case kIOMemoryTypeVirtual:
b0d623f7 633#ifndef __LP64__
0c530ab8 634 case kIOMemoryTypeVirtual64:
b0d623f7 635#endif /* !__LP64__ */
55e303ae
A
636 assert(task);
637 if (!task)
638 return false;
2d21ac55 639 break;
55e303ae
A
640
641 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
b0d623f7 642#ifndef __LP64__
0c530ab8 643 case kIOMemoryTypePhysical64:
b0d623f7 644#endif /* !__LP64__ */
55e303ae
A
645 case kIOMemoryTypeUPL:
646 assert(!task);
647 break;
648 default:
55e303ae
A
649 return false; /* bad argument */
650 }
651
652 assert(buffers);
653 assert(count);
1c79356b
A
654
655 /*
656 * We can check the _initialized instance variable before having ever set
657 * it to an initial value because I/O Kit guarantees that all our instance
658 * variables are zeroed on an object's allocation.
659 */
660
55e303ae 661 if (_initialized) {
1c79356b
A
662 /*
663 * An existing memory descriptor is being retargeted to point to
664 * somewhere else. Clean up our present state.
665 */
2d21ac55
A
666 IOOptionBits type = _flags & kIOMemoryTypeMask;
667 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
668 {
669 while (_wireCount)
670 complete();
671 }
b0d623f7 672 if (_ranges.v && !(kIOMemoryAsReference & _flags))
0c530ab8
A
673 {
674 if (kIOMemoryTypeUIO == type)
675 uio_free((uio_t) _ranges.v);
b0d623f7 676#ifndef __LP64__
0c530ab8
A
677 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
678 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
b0d623f7 679#endif /* !__LP64__ */
0c530ab8
A
680 else
681 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
682 }
2d21ac55 683
39236c6e
A
684 options |= (kIOMemoryRedirected & _flags);
685 if (!(kIOMemoryRedirected & options))
6d2010ae 686 {
39236c6e
A
687 if (_memEntry)
688 {
689 ipc_port_release_send((ipc_port_t) _memEntry);
690 _memEntry = 0;
691 }
692 if (_mappings)
693 _mappings->flushCollection();
6d2010ae 694 }
1c79356b 695 }
55e303ae
A
696 else {
697 if (!super::init())
698 return false;
699 _initialized = true;
700 }
d7e50217 701
55e303ae 702 // Grab the appropriate mapper
99c3a104 703 if (kIOMemoryHostOnly & options) options |= kIOMemoryMapperNone;
b0d623f7 704 if (kIOMemoryMapperNone & options)
55e303ae 705 mapper = 0; // No Mapper
0c530ab8 706 else if (mapper == kIOMapperSystem) {
55e303ae
A
707 IOMapper::checkForSystemMapper();
708 gIOSystemMapper = mapper = IOMapper::gSystem;
709 }
1c79356b 710
c910b4d9
A
711 // Temp binary compatibility for kIOMemoryThreadSafe
712 if (kIOMemoryReserved6156215 & options)
713 {
714 options &= ~kIOMemoryReserved6156215;
715 options |= kIOMemoryThreadSafe;
716 }
91447636
A
717 // Remove the dynamic internal use flags from the initial setting
718 options &= ~(kIOMemoryPreparedReadOnly);
55e303ae
A
719 _flags = options;
720 _task = task;
721
b0d623f7 722#ifndef __LP64__
55e303ae 723 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
b0d623f7 724#endif /* !__LP64__ */
0c530ab8
A
725
726 __iomd_reservedA = 0;
727 __iomd_reservedB = 0;
0c530ab8 728 _highestPage = 0;
1c79356b 729
2d21ac55
A
730 if (kIOMemoryThreadSafe & options)
731 {
732 if (!_prepareLock)
733 _prepareLock = IOLockAlloc();
734 }
735 else if (_prepareLock)
736 {
737 IOLockFree(_prepareLock);
738 _prepareLock = NULL;
739 }
740
91447636 741 if (kIOMemoryTypeUPL == type) {
1c79356b 742
55e303ae
A
743 ioGMDData *dataP;
744 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
d7e50217 745
99c3a104 746 if (!initMemoryEntries(dataSize, mapper)) return (false);
55e303ae 747 dataP = getDataP(_memoryEntries);
55e303ae
A
748 dataP->fPageCnt = 0;
749
0c530ab8 750 // _wireCount++; // UPLs start out life wired
55e303ae
A
751
752 _length = count;
753 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
754
755 ioPLBlock iopl;
55e303ae 756 iopl.fIOPL = (upl_t) buffers;
6d2010ae 757 upl_set_referenced(iopl.fIOPL, true);
b0d623f7
A
758 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
759
760 if (upl_get_size(iopl.fIOPL) < (count + offset))
761 panic("short external upl");
762
0c530ab8
A
763 _highestPage = upl_get_highest_page(iopl.fIOPL);
764
99c3a104
A
765 // Set the flag kIOPLOnDevice convieniently equal to 1
766 iopl.fFlags = pageList->device | kIOPLExternUPL;
55e303ae 767 if (!pageList->device) {
55e303ae
A
768 // Pre-compute the offset into the UPL's page list
769 pageList = &pageList[atop_32(offset)];
770 offset &= PAGE_MASK;
55e303ae 771 }
99c3a104
A
772 iopl.fIOMDOffset = 0;
773 iopl.fMappedPage = 0;
55e303ae
A
774 iopl.fPageInfo = (vm_address_t) pageList;
775 iopl.fPageOffset = offset;
55e303ae 776 _memoryEntries->appendBytes(&iopl, sizeof(iopl));
d7e50217 777 }
91447636 778 else {
0c530ab8
A
779 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
780 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
91447636
A
781
782 // Initialize the memory descriptor
783 if (options & kIOMemoryAsReference) {
b0d623f7 784#ifndef __LP64__
91447636 785 _rangesIsAllocated = false;
b0d623f7 786#endif /* !__LP64__ */
91447636
A
787
788 // Hack assignment to get the buffer arg into _ranges.
789 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
790 // work, C++ sigh.
791 // This also initialises the uio & physical ranges.
792 _ranges.v = (IOVirtualRange *) buffers;
793 }
794 else {
b0d623f7 795#ifndef __LP64__
6601e61a 796 _rangesIsAllocated = true;
b0d623f7
A
797#endif /* !__LP64__ */
798 switch (type)
0c530ab8
A
799 {
800 case kIOMemoryTypeUIO:
801 _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
802 break;
803
b0d623f7 804#ifndef __LP64__
0c530ab8
A
805 case kIOMemoryTypeVirtual64:
806 case kIOMemoryTypePhysical64:
b0d623f7 807 if (count == 1
6d2010ae
A
808 && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
809 ) {
b0d623f7
A
810 if (kIOMemoryTypeVirtual64 == type)
811 type = kIOMemoryTypeVirtual;
812 else
813 type = kIOMemoryTypePhysical;
814 _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
815 _rangesIsAllocated = false;
816 _ranges.v = &_singleRange.v;
817 _singleRange.v.address = ((IOAddressRange *) buffers)->address;
818 _singleRange.v.length = ((IOAddressRange *) buffers)->length;
819 break;
820 }
0c530ab8
A
821 _ranges.v64 = IONew(IOAddressRange, count);
822 if (!_ranges.v64)
823 return false;
824 bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
825 break;
b0d623f7 826#endif /* !__LP64__ */
0c530ab8 827 case kIOMemoryTypeVirtual:
2d21ac55 828 case kIOMemoryTypePhysical:
b0d623f7
A
829 if (count == 1) {
830 _flags |= kIOMemoryAsReference;
831#ifndef __LP64__
832 _rangesIsAllocated = false;
833#endif /* !__LP64__ */
834 _ranges.v = &_singleRange.v;
835 } else {
836 _ranges.v = IONew(IOVirtualRange, count);
837 if (!_ranges.v)
838 return false;
839 }
0c530ab8
A
840 bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
841 break;
842 }
91447636
A
843 }
844
845 // Find starting address within the vector of ranges
846 Ranges vec = _ranges;
847 UInt32 length = 0;
848 UInt32 pages = 0;
849 for (unsigned ind = 0; ind < count; ind++) {
850 user_addr_t addr;
b0d623f7 851 IOPhysicalLength len;
91447636
A
852
853 // addr & len are returned by this function
854 getAddrLenForInd(addr, len, type, vec, ind);
855 pages += (atop_64(addr + len + PAGE_MASK) - atop_64(addr));
856 len += length;
0c530ab8 857 assert(len >= length); // Check for 32 bit wrap around
91447636 858 length = len;
0c530ab8
A
859
860 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
861 {
862 ppnum_t highPage = atop_64(addr + len - 1);
863 if (highPage > _highestPage)
864 _highestPage = highPage;
865 }
91447636
A
866 }
867 _length = length;
868 _pages = pages;
869 _rangesCount = count;
55e303ae
A
870
871 // Auto-prepare memory at creation time.
872 // Implied completion when descriptor is free-ed
0c530ab8 873 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
91447636 874 _wireCount++; // Physical MDs are, by definition, wired
0c530ab8 875 else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
55e303ae 876 ioGMDData *dataP;
91447636 877 unsigned dataSize = computeDataSize(_pages, /* upls */ count * 2);
55e303ae 878
99c3a104 879 if (!initMemoryEntries(dataSize, mapper)) return false;
55e303ae 880 dataP = getDataP(_memoryEntries);
55e303ae
A
881 dataP->fPageCnt = _pages;
882
91447636
A
883 if ( (kIOMemoryPersistent & _flags) && !_memEntry)
884 _memEntry = createNamedEntry();
55e303ae
A
885
886 if ((_flags & kIOMemoryAutoPrepare)
887 && prepare() != kIOReturnSuccess)
888 return false;
889 }
890 }
891
892 return true;
de355530
A
893}
894
1c79356b
A
895/*
896 * free
897 *
898 * Free resources.
899 */
900void IOGeneralMemoryDescriptor::free()
901{
2d21ac55
A
902 IOOptionBits type = _flags & kIOMemoryTypeMask;
903
9bccf70c 904 if( reserved)
2d21ac55
A
905 {
906 LOCK;
316670eb 907 reserved->dp.memory = 0;
2d21ac55
A
908 UNLOCK;
909 }
bd504ef0 910 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
2d21ac55 911 {
bd504ef0
A
912 ioGMDData * dataP;
913 if (_memoryEntries && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
914 {
915 dataP->fMapper->iovmFree(atop_64(dataP->fMappedBase), _pages);
916 dataP->fMappedBase = 0;
917 }
2d21ac55 918 }
bd504ef0
A
919 else
920 {
921 while (_wireCount) complete();
922 }
923
924 if (_memoryEntries) _memoryEntries->release();
55e303ae 925
b0d623f7 926 if (_ranges.v && !(kIOMemoryAsReference & _flags))
0c530ab8 927 {
0c530ab8
A
928 if (kIOMemoryTypeUIO == type)
929 uio_free((uio_t) _ranges.v);
b0d623f7 930#ifndef __LP64__
0c530ab8
A
931 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
932 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
b0d623f7 933#endif /* !__LP64__ */
0c530ab8
A
934 else
935 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
4a3eedf9
A
936
937 _ranges.v = NULL;
0c530ab8 938 }
9bccf70c 939
316670eb
A
940 if (reserved)
941 {
942 if (reserved->dp.devicePager)
943 {
944 // memEntry holds a ref on the device pager which owns reserved
945 // (IOMemoryDescriptorReserved) so no reserved access after this point
946 device_pager_deallocate( (memory_object_t) reserved->dp.devicePager );
947 }
948 else
949 IODelete(reserved, IOMemoryDescriptorReserved, 1);
950 reserved = NULL;
951 }
9bccf70c 952
55e303ae 953 if (_memEntry)
1c79356b 954 ipc_port_release_send( (ipc_port_t) _memEntry );
55e303ae 955
2d21ac55
A
956 if (_prepareLock)
957 IOLockFree(_prepareLock);
958
1c79356b
A
959 super::free();
960}
961
b0d623f7
A
962#ifndef __LP64__
963void IOGeneralMemoryDescriptor::unmapFromKernel()
964{
965 panic("IOGMD::unmapFromKernel deprecated");
966}
967
968void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
969{
970 panic("IOGMD::mapIntoKernel deprecated");
971}
972#endif /* !__LP64__ */
1c79356b
A
973
974/*
975 * getDirection:
976 *
977 * Get the direction of the transfer.
978 */
979IODirection IOMemoryDescriptor::getDirection() const
980{
b0d623f7
A
981#ifndef __LP64__
982 if (_direction)
983 return _direction;
984#endif /* !__LP64__ */
985 return (IODirection) (_flags & kIOMemoryDirectionMask);
1c79356b
A
986}
987
988/*
989 * getLength:
990 *
991 * Get the length of the transfer (over all ranges).
992 */
993IOByteCount IOMemoryDescriptor::getLength() const
994{
995 return _length;
996}
997
55e303ae 998void IOMemoryDescriptor::setTag( IOOptionBits tag )
1c79356b
A
999{
1000 _tag = tag;
1001}
1002
1003IOOptionBits IOMemoryDescriptor::getTag( void )
1004{
1005 return( _tag);
1006}
1007
b0d623f7 1008#ifndef __LP64__
55e303ae 1009// @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
0c530ab8
A
1010IOPhysicalAddress
1011IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
0b4e3aa0 1012{
0c530ab8 1013 addr64_t physAddr = 0;
1c79356b 1014
9bccf70c 1015 if( prepare() == kIOReturnSuccess) {
0c530ab8 1016 physAddr = getPhysicalSegment64( offset, length );
9bccf70c
A
1017 complete();
1018 }
0b4e3aa0 1019
0c530ab8 1020 return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used
0b4e3aa0 1021}
b0d623f7 1022#endif /* !__LP64__ */
0b4e3aa0 1023
55e303ae
A
1024IOByteCount IOMemoryDescriptor::readBytes
1025 (IOByteCount offset, void *bytes, IOByteCount length)
1c79356b 1026{
b0d623f7 1027 addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
55e303ae 1028 IOByteCount remaining;
1c79356b 1029
55e303ae
A
1030 // Assert that this entire I/O is withing the available range
1031 assert(offset < _length);
1032 assert(offset + length <= _length);
1033 if (offset >= _length) {
55e303ae
A
1034 return 0;
1035 }
1c79356b 1036
b0d623f7
A
1037 if (kIOMemoryThreadSafe & _flags)
1038 LOCK;
1039
55e303ae
A
1040 remaining = length = min(length, _length - offset);
1041 while (remaining) { // (process another target segment?)
1042 addr64_t srcAddr64;
1043 IOByteCount srcLen;
1c79356b 1044
b0d623f7 1045 srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
55e303ae
A
1046 if (!srcAddr64)
1047 break;
1c79356b 1048
55e303ae
A
1049 // Clip segment length to remaining
1050 if (srcLen > remaining)
1051 srcLen = remaining;
1c79356b 1052
55e303ae
A
1053 copypv(srcAddr64, dstAddr, srcLen,
1054 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
1c79356b 1055
55e303ae
A
1056 dstAddr += srcLen;
1057 offset += srcLen;
1058 remaining -= srcLen;
1059 }
1c79356b 1060
b0d623f7
A
1061 if (kIOMemoryThreadSafe & _flags)
1062 UNLOCK;
1063
55e303ae 1064 assert(!remaining);
1c79356b 1065
55e303ae
A
1066 return length - remaining;
1067}
0b4e3aa0 1068
55e303ae
A
1069IOByteCount IOMemoryDescriptor::writeBytes
1070 (IOByteCount offset, const void *bytes, IOByteCount length)
1071{
b0d623f7 1072 addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
55e303ae 1073 IOByteCount remaining;
0b4e3aa0 1074
55e303ae
A
1075 // Assert that this entire I/O is withing the available range
1076 assert(offset < _length);
1077 assert(offset + length <= _length);
0b4e3aa0 1078
55e303ae 1079 assert( !(kIOMemoryPreparedReadOnly & _flags) );
0b4e3aa0 1080
55e303ae 1081 if ( (kIOMemoryPreparedReadOnly & _flags) || offset >= _length) {
55e303ae
A
1082 return 0;
1083 }
0b4e3aa0 1084
b0d623f7
A
1085 if (kIOMemoryThreadSafe & _flags)
1086 LOCK;
1087
55e303ae
A
1088 remaining = length = min(length, _length - offset);
1089 while (remaining) { // (process another target segment?)
1090 addr64_t dstAddr64;
1091 IOByteCount dstLen;
0b4e3aa0 1092
b0d623f7 1093 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
55e303ae
A
1094 if (!dstAddr64)
1095 break;
0b4e3aa0 1096
55e303ae
A
1097 // Clip segment length to remaining
1098 if (dstLen > remaining)
1099 dstLen = remaining;
0b4e3aa0 1100
55e303ae
A
1101 copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
1102 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
0b4e3aa0 1103
55e303ae
A
1104 srcAddr += dstLen;
1105 offset += dstLen;
1106 remaining -= dstLen;
1c79356b 1107 }
1c79356b 1108
b0d623f7
A
1109 if (kIOMemoryThreadSafe & _flags)
1110 UNLOCK;
1111
55e303ae
A
1112 assert(!remaining);
1113
1114 return length - remaining;
1c79356b
A
1115}
1116
55e303ae
A
1117// osfmk/device/iokit_rpc.c
1118extern "C" unsigned int IODefaultCacheBits(addr64_t pa);
1c79356b 1119
b0d623f7
A
1120#ifndef __LP64__
1121void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
1122{
1123 panic("IOGMD::setPosition deprecated");
1124}
1125#endif /* !__LP64__ */
1126
1127static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
1128
1129uint64_t
1130IOGeneralMemoryDescriptor::getPreparationID( void )
1131{
1132 ioGMDData *dataP;
7e4a7d39
A
1133
1134 if (!_wireCount)
b0d623f7 1135 return (kIOPreparationIDUnprepared);
7e4a7d39 1136
99c3a104
A
1137 if (((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical)
1138 || ((kIOMemoryTypeMask & _flags) == kIOMemoryTypePhysical64))
316670eb
A
1139 {
1140 IOMemoryDescriptor::setPreparationID();
1141 return (IOMemoryDescriptor::getPreparationID());
1142 }
7e4a7d39
A
1143
1144 if (!_memoryEntries || !(dataP = getDataP(_memoryEntries)))
1145 return (kIOPreparationIDUnprepared);
1146
b0d623f7
A
1147 if (kIOPreparationIDUnprepared == dataP->fPreparationID)
1148 {
b0d623f7 1149 dataP->fPreparationID = OSIncrementAtomic64(&gIOMDPreparationID);
b0d623f7
A
1150 }
1151 return (dataP->fPreparationID);
1152}
1153
316670eb 1154IOMemoryDescriptorReserved * IOMemoryDescriptor::getKernelReserved( void )
b0d623f7 1155{
316670eb
A
1156 if (!reserved)
1157 {
1158 reserved = IONew(IOMemoryDescriptorReserved, 1);
1159 if (reserved)
1160 bzero(reserved, sizeof(IOMemoryDescriptorReserved));
1161 }
1162 return (reserved);
1163}
1164
1165void IOMemoryDescriptor::setPreparationID( void )
1166{
1167 if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID))
1168 {
1169#if defined(__ppc__ )
1170 reserved->preparationID = gIOMDPreparationID++;
1171#else
1172 reserved->preparationID = OSIncrementAtomic64(&gIOMDPreparationID);
1173#endif
1174 }
1175}
1176
1177uint64_t IOMemoryDescriptor::getPreparationID( void )
1178{
1179 if (reserved)
1180 return (reserved->preparationID);
1181 else
1182 return (kIOPreparationIDUnsupported);
b0d623f7 1183}
de355530 1184
0c530ab8 1185IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
55e303ae 1186{
99c3a104
A
1187 IOReturn err = kIOReturnSuccess;
1188 DMACommandOps params;
1189 IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
1190 ioGMDData *dataP;
1191
1192 params = (op & ~kIOMDDMACommandOperationMask & op);
1193 op &= kIOMDDMACommandOperationMask;
1194
1195 if (kIOMDDMAMap == op)
1196 {
1197 if (dataSize < sizeof(IOMDDMAMapArgs))
1198 return kIOReturnUnderrun;
1199
1200 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
1201
1202 if (!_memoryEntries
1203 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
1204
1205 if (_memoryEntries && data->fMapper)
1206 {
39236c6e 1207 bool remap;
99c3a104
A
1208 bool whole = ((data->fOffset == 0) && (data->fLength == _length));
1209 dataP = getDataP(_memoryEntries);
39236c6e
A
1210
1211 if (data->fMapSpec.numAddressBits < dataP->fDMAMapNumAddressBits) dataP->fDMAMapNumAddressBits = data->fMapSpec.numAddressBits;
1212 if (data->fMapSpec.alignment > dataP->fDMAMapAlignment) dataP->fDMAMapAlignment = data->fMapSpec.alignment;
1213
1214 remap = (dataP->fDMAMapNumAddressBits < 64)
1215 && ((dataP->fMappedBase + _length) > (1ULL << dataP->fDMAMapNumAddressBits));
1216 remap |= (dataP->fDMAMapAlignment > page_size);
99c3a104
A
1217 remap |= (!whole);
1218 if (remap || !dataP->fMappedBase)
1219 {
1220// if (dataP->fMappedBase) OSReportWithBacktrace("kIOMDDMAMap whole %d remap %d params %d\n", whole, remap, params);
1221 err = md->dmaMap(data->fMapper, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocCount);
1222 if ((kIOReturnSuccess == err) && whole && !dataP->fMappedBase)
1223 {
1224 dataP->fMappedBase = data->fAlloc;
1225 data->fAllocCount = 0; // IOMD owns the alloc now
1226 }
1227 }
1228 else
1229 {
1230 data->fAlloc = dataP->fMappedBase;
1231 data->fAllocCount = 0; // IOMD owns the alloc
1232 }
39236c6e 1233 data->fMapContig = !dataP->fDiscontig;
99c3a104
A
1234 }
1235
1236 return (err);
1237 }
1238
1239 if (kIOMDAddDMAMapSpec == op)
1240 {
1241 if (dataSize < sizeof(IODMAMapSpecification))
1242 return kIOReturnUnderrun;
1243
1244 IODMAMapSpecification * data = (IODMAMapSpecification *) vData;
1245
1246 if (!_memoryEntries
1247 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
1248
1249 if (_memoryEntries)
1250 {
1251 dataP = getDataP(_memoryEntries);
1252 if (data->numAddressBits < dataP->fDMAMapNumAddressBits)
1253 dataP->fDMAMapNumAddressBits = data->numAddressBits;
1254 if (data->alignment > dataP->fDMAMapAlignment)
1255 dataP->fDMAMapAlignment = data->alignment;
1256 }
1257 return kIOReturnSuccess;
1258 }
1259
0c530ab8 1260 if (kIOMDGetCharacteristics == op) {
4452a7af 1261
0c530ab8
A
1262 if (dataSize < sizeof(IOMDDMACharacteristics))
1263 return kIOReturnUnderrun;
4452a7af 1264
0c530ab8
A
1265 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
1266 data->fLength = _length;
1267 data->fSGCount = _rangesCount;
1268 data->fPages = _pages;
b0d623f7 1269 data->fDirection = getDirection();
0c530ab8
A
1270 if (!_wireCount)
1271 data->fIsPrepared = false;
1272 else {
1273 data->fIsPrepared = true;
1274 data->fHighestPage = _highestPage;
99c3a104
A
1275 if (_memoryEntries)
1276 {
1277 dataP = getDataP(_memoryEntries);
1278 ioPLBlock *ioplList = getIOPLList(dataP);
1279 UInt count = getNumIOPL(_memoryEntries, dataP);
0c530ab8
A
1280 if (count == 1)
1281 data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
1282 }
0c530ab8 1283 }
4452a7af 1284
0c530ab8 1285 return kIOReturnSuccess;
b0d623f7
A
1286
1287#if IOMD_DEBUG_DMAACTIVE
99c3a104
A
1288 } else if (kIOMDDMAActive == op) {
1289 if (params) OSIncrementAtomic(&md->__iomd_reservedA);
1290 else {
1291 if (md->__iomd_reservedA)
1292 OSDecrementAtomic(&md->__iomd_reservedA);
1293 else
1294 panic("kIOMDSetDMAInactive");
1295 }
b0d623f7
A
1296#endif /* IOMD_DEBUG_DMAACTIVE */
1297
99c3a104 1298 } else if (kIOMDWalkSegments != op)
0c530ab8
A
1299 return kIOReturnBadArgument;
1300
1301 // Get the next segment
1302 struct InternalState {
1303 IOMDDMAWalkSegmentArgs fIO;
1304 UInt fOffset2Index;
1305 UInt fIndex;
1306 UInt fNextOffset;
1307 } *isP;
1308
1309 // Find the next segment
1310 if (dataSize < sizeof(*isP))
1311 return kIOReturnUnderrun;
1312
1313 isP = (InternalState *) vData;
1314 UInt offset = isP->fIO.fOffset;
1315 bool mapped = isP->fIO.fMapped;
1316
99c3a104
A
1317 if (IOMapper::gSystem && mapped
1318 && (!(kIOMemoryHostOnly & _flags))
1319 && (!_memoryEntries || !getDataP(_memoryEntries)->fMappedBase))
1320// && (_memoryEntries && !getDataP(_memoryEntries)->fMappedBase))
1321 {
1322 if (!_memoryEntries
1323 && !md->initMemoryEntries(computeDataSize(0, 0), kIOMapperWaitSystem)) return (kIOReturnNoMemory);
1324
1325 dataP = getDataP(_memoryEntries);
1326 if (dataP->fMapper)
1327 {
1328 IODMAMapSpecification mapSpec;
1329 bzero(&mapSpec, sizeof(mapSpec));
1330 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
1331 mapSpec.alignment = dataP->fDMAMapAlignment;
1332 err = md->dmaMap(dataP->fMapper, &mapSpec, 0, _length, &dataP->fMappedBase, NULL);
1333 if (kIOReturnSuccess != err) return (err);
1334 }
1335 }
1336
0c530ab8
A
1337 if (offset >= _length)
1338 return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
1339
1340 // Validate the previous offset
1341 UInt ind, off2Ind = isP->fOffset2Index;
99c3a104 1342 if (!params
0c530ab8
A
1343 && offset
1344 && (offset == isP->fNextOffset || off2Ind <= offset))
1345 ind = isP->fIndex;
1346 else
1347 ind = off2Ind = 0; // Start from beginning
4452a7af 1348
0c530ab8
A
1349 UInt length;
1350 UInt64 address;
99c3a104
A
1351
1352
0c530ab8 1353 if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
4452a7af 1354
0c530ab8
A
1355 // Physical address based memory descriptor
1356 const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
4452a7af 1357
0c530ab8 1358 // Find the range after the one that contains the offset
b0d623f7 1359 mach_vm_size_t len;
0c530ab8
A
1360 for (len = 0; off2Ind <= offset; ind++) {
1361 len = physP[ind].length;
1362 off2Ind += len;
1363 }
4452a7af 1364
0c530ab8
A
1365 // Calculate length within range and starting address
1366 length = off2Ind - offset;
1367 address = physP[ind - 1].address + len - length;
89b3af67 1368
99c3a104
A
1369 if (true && mapped && _memoryEntries
1370 && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
1371 {
1372 address = dataP->fMappedBase + offset;
1373 }
1374 else
1375 {
1376 // see how far we can coalesce ranges
1377 while (ind < _rangesCount && address + length == physP[ind].address) {
1378 len = physP[ind].length;
1379 length += len;
1380 off2Ind += len;
1381 ind++;
1382 }
0c530ab8 1383 }
4452a7af 1384
0c530ab8
A
1385 // correct contiguous check overshoot
1386 ind--;
1387 off2Ind -= len;
1388 }
b0d623f7 1389#ifndef __LP64__
0c530ab8 1390 else if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
4452a7af 1391
0c530ab8
A
1392 // Physical address based memory descriptor
1393 const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
4452a7af 1394
0c530ab8
A
1395 // Find the range after the one that contains the offset
1396 mach_vm_size_t len;
1397 for (len = 0; off2Ind <= offset; ind++) {
1398 len = physP[ind].length;
1399 off2Ind += len;
1400 }
89b3af67 1401
0c530ab8
A
1402 // Calculate length within range and starting address
1403 length = off2Ind - offset;
1404 address = physP[ind - 1].address + len - length;
89b3af67 1405
99c3a104
A
1406 if (true && mapped && _memoryEntries
1407 && (dataP = getDataP(_memoryEntries)) && dataP->fMappedBase)
1408 {
1409 address = dataP->fMappedBase + offset;
1410 }
1411 else
1412 {
1413 // see how far we can coalesce ranges
1414 while (ind < _rangesCount && address + length == physP[ind].address) {
1415 len = physP[ind].length;
1416 length += len;
1417 off2Ind += len;
1418 ind++;
1419 }
0c530ab8 1420 }
0c530ab8
A
1421 // correct contiguous check overshoot
1422 ind--;
1423 off2Ind -= len;
99c3a104 1424 }
b0d623f7 1425#endif /* !__LP64__ */
0c530ab8
A
1426 else do {
1427 if (!_wireCount)
1428 panic("IOGMD: not wired for the IODMACommand");
4452a7af 1429
0c530ab8 1430 assert(_memoryEntries);
4452a7af 1431
99c3a104 1432 dataP = getDataP(_memoryEntries);
0c530ab8
A
1433 const ioPLBlock *ioplList = getIOPLList(dataP);
1434 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1435 upl_page_info_t *pageList = getPageList(dataP);
4452a7af 1436
0c530ab8 1437 assert(numIOPLs > 0);
4452a7af 1438
0c530ab8
A
1439 // Scan through iopl info blocks looking for block containing offset
1440 while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset)
1441 ind++;
4452a7af 1442
0c530ab8
A
1443 // Go back to actual range as search goes past it
1444 ioPLBlock ioplInfo = ioplList[ind - 1];
1445 off2Ind = ioplInfo.fIOMDOffset;
1446
1447 if (ind < numIOPLs)
1448 length = ioplList[ind].fIOMDOffset;
1449 else
1450 length = _length;
1451 length -= offset; // Remainder within iopl
1452
1453 // Subtract offset till this iopl in total list
1454 offset -= off2Ind;
1455
1456 // If a mapped address is requested and this is a pre-mapped IOPL
1457 // then just need to compute an offset relative to the mapped base.
99c3a104 1458 if (mapped && dataP->fMappedBase) {
0c530ab8 1459 offset += (ioplInfo.fPageOffset & PAGE_MASK);
99c3a104 1460 address = trunc_page_64(dataP->fMappedBase) + ptoa_64(ioplInfo.fMappedPage) + offset;
0c530ab8
A
1461 continue; // Done leave do/while(false) now
1462 }
1463
1464 // The offset is rebased into the current iopl.
1465 // Now add the iopl 1st page offset.
1466 offset += ioplInfo.fPageOffset;
1467
1468 // For external UPLs the fPageInfo field points directly to
1469 // the upl's upl_page_info_t array.
1470 if (ioplInfo.fFlags & kIOPLExternUPL)
1471 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
1472 else
1473 pageList = &pageList[ioplInfo.fPageInfo];
1474
1475 // Check for direct device non-paged memory
1476 if ( ioplInfo.fFlags & kIOPLOnDevice ) {
1477 address = ptoa_64(pageList->phys_addr) + offset;
1478 continue; // Done leave do/while(false) now
1479 }
4452a7af 1480
0c530ab8
A
1481 // Now we need compute the index into the pageList
1482 UInt pageInd = atop_32(offset);
1483 offset &= PAGE_MASK;
1484
1485 // Compute the starting address of this segment
1486 IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
b0d623f7
A
1487 if (!pageAddr) {
1488 panic("!pageList phys_addr");
6d2010ae 1489 }
b0d623f7 1490
0c530ab8
A
1491 address = ptoa_64(pageAddr) + offset;
1492
1493 // length is currently set to the length of the remainider of the iopl.
1494 // We need to check that the remainder of the iopl is contiguous.
1495 // This is indicated by pageList[ind].phys_addr being sequential.
1496 IOByteCount contigLength = PAGE_SIZE - offset;
1497 while (contigLength < length
1498 && ++pageAddr == pageList[++pageInd].phys_addr)
1499 {
1500 contigLength += PAGE_SIZE;
1501 }
1502
1503 if (contigLength < length)
1504 length = contigLength;
1505
1506
1507 assert(address);
1508 assert(length);
1509
1510 } while (false);
1511
1512 // Update return values and state
1513 isP->fIO.fIOVMAddr = address;
1514 isP->fIO.fLength = length;
1515 isP->fIndex = ind;
1516 isP->fOffset2Index = off2Ind;
1517 isP->fNextOffset = isP->fIO.fOffset + length;
1518
1519 return kIOReturnSuccess;
1520}
1521
1522addr64_t
b0d623f7 1523IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
0c530ab8 1524{
b0d623f7
A
1525 IOReturn ret;
1526 addr64_t address = 0;
1527 IOByteCount length = 0;
1528 IOMapper * mapper = gIOSystemMapper;
1529 IOOptionBits type = _flags & kIOMemoryTypeMask;
1530
1531 if (lengthOfSegment)
1532 *lengthOfSegment = 0;
1533
1534 if (offset >= _length)
1535 return 0;
4452a7af 1536
b0d623f7
A
1537 // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
1538 // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
1539 // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
1540 // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
2d21ac55 1541
b0d623f7
A
1542 if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type))
1543 {
1544 unsigned rangesIndex = 0;
1545 Ranges vec = _ranges;
1546 user_addr_t addr;
1547
1548 // Find starting address within the vector of ranges
1549 for (;;) {
1550 getAddrLenForInd(addr, length, type, vec, rangesIndex);
1551 if (offset < length)
1552 break;
1553 offset -= length; // (make offset relative)
1554 rangesIndex++;
1555 }
1556
1557 // Now that we have the starting range,
1558 // lets find the last contiguous range
1559 addr += offset;
1560 length -= offset;
1561
1562 for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) {
1563 user_addr_t newAddr;
1564 IOPhysicalLength newLen;
1565
1566 getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
1567 if (addr + length != newAddr)
1568 break;
1569 length += newLen;
1570 }
1571 if (addr)
1572 address = (IOPhysicalAddress) addr; // Truncate address to 32bit
1573 }
1574 else
0c530ab8
A
1575 {
1576 IOMDDMAWalkSegmentState _state;
99c3a104 1577 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) (void *)&_state;
0c530ab8
A
1578
1579 state->fOffset = offset;
1580 state->fLength = _length - offset;
99c3a104 1581 state->fMapped = (0 == (options & kIOMemoryMapperNone)) && !(_flags & kIOMemoryHostOnly);
0c530ab8
A
1582
1583 ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
1584
1585 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret))
b0d623f7 1586 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
0c530ab8
A
1587 ret, this, state->fOffset,
1588 state->fIOVMAddr, state->fLength);
1589 if (kIOReturnSuccess == ret)
1590 {
1591 address = state->fIOVMAddr;
1592 length = state->fLength;
1593 }
b0d623f7
A
1594
1595 // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
1596 // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
1597
1598 if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)))
1599 {
1600 if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone))
1601 {
1602 addr64_t origAddr = address;
1603 IOByteCount origLen = length;
1604
1605 address = mapper->mapAddr(origAddr);
1606 length = page_size - (address & (page_size - 1));
1607 while ((length < origLen)
1608 && ((address + length) == mapper->mapAddr(origAddr + length)))
1609 length += page_size;
1610 if (length > origLen)
1611 length = origLen;
1612 }
b0d623f7 1613 }
4452a7af
A
1614 }
1615
b0d623f7
A
1616 if (!address)
1617 length = 0;
1618
4452a7af
A
1619 if (lengthOfSegment)
1620 *lengthOfSegment = length;
1621
0c530ab8
A
1622 return (address);
1623}
1624
b0d623f7
A
1625#ifndef __LP64__
1626addr64_t
1627IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
0c530ab8 1628{
b0d623f7 1629 addr64_t address = 0;
0c530ab8 1630
b0d623f7 1631 if (options & _kIOMemorySourceSegment)
0c530ab8 1632 {
b0d623f7
A
1633 address = getSourceSegment(offset, lengthOfSegment);
1634 }
1635 else if (options & kIOMemoryMapperNone)
1636 {
1637 address = getPhysicalSegment64(offset, lengthOfSegment);
1638 }
1639 else
1640 {
1641 address = getPhysicalSegment(offset, lengthOfSegment);
1642 }
0c530ab8 1643
b0d623f7
A
1644 return (address);
1645}
0c530ab8 1646
b0d623f7
A
1647addr64_t
1648IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
1649{
1650 return (getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone));
1651}
0c530ab8 1652
b0d623f7
A
1653IOPhysicalAddress
1654IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1655{
1656 addr64_t address = 0;
1657 IOByteCount length = 0;
0c530ab8 1658
b0d623f7
A
1659 address = getPhysicalSegment(offset, lengthOfSegment, 0);
1660
1661 if (lengthOfSegment)
1662 length = *lengthOfSegment;
0c530ab8
A
1663
1664 if ((address + length) > 0x100000000ULL)
1665 {
2d21ac55 1666 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
b0d623f7 1667 address, (long) length, (getMetaClass())->getClassName());
0c530ab8
A
1668 }
1669
0c530ab8 1670 return ((IOPhysicalAddress) address);
55e303ae 1671}
de355530 1672
0c530ab8
A
1673addr64_t
1674IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
55e303ae
A
1675{
1676 IOPhysicalAddress phys32;
1677 IOByteCount length;
1678 addr64_t phys64;
0c530ab8 1679 IOMapper * mapper = 0;
0b4e3aa0 1680
55e303ae
A
1681 phys32 = getPhysicalSegment(offset, lengthOfSegment);
1682 if (!phys32)
1683 return 0;
0b4e3aa0 1684
55e303ae 1685 if (gIOSystemMapper)
0c530ab8
A
1686 mapper = gIOSystemMapper;
1687
1688 if (mapper)
1c79356b 1689 {
55e303ae
A
1690 IOByteCount origLen;
1691
0c530ab8 1692 phys64 = mapper->mapAddr(phys32);
55e303ae
A
1693 origLen = *lengthOfSegment;
1694 length = page_size - (phys64 & (page_size - 1));
1695 while ((length < origLen)
0c530ab8 1696 && ((phys64 + length) == mapper->mapAddr(phys32 + length)))
55e303ae
A
1697 length += page_size;
1698 if (length > origLen)
1699 length = origLen;
1700
1701 *lengthOfSegment = length;
0b4e3aa0 1702 }
55e303ae
A
1703 else
1704 phys64 = (addr64_t) phys32;
1c79356b 1705
55e303ae 1706 return phys64;
0b4e3aa0
A
1707}
1708
0c530ab8 1709IOPhysicalAddress
b0d623f7 1710IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1c79356b 1711{
b0d623f7 1712 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0));
0b4e3aa0
A
1713}
1714
b0d623f7
A
1715IOPhysicalAddress
1716IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1717{
1718 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment));
1719}
1c79356b 1720
b0d623f7
A
1721void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
1722 IOByteCount * lengthOfSegment)
1723{
1724 if (_task == kernel_task)
1725 return (void *) getSourceSegment(offset, lengthOfSegment);
1726 else
1727 panic("IOGMD::getVirtualSegment deprecated");
91447636 1728
b0d623f7
A
1729 return 0;
1730}
1731#endif /* !__LP64__ */
91447636 1732
0c530ab8
A
1733IOReturn
1734IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
1735{
99c3a104
A
1736 IOMemoryDescriptor *md = const_cast<IOMemoryDescriptor *>(this);
1737 DMACommandOps params;
1738 IOReturn err;
1739
1740 params = (op & ~kIOMDDMACommandOperationMask & op);
1741 op &= kIOMDDMACommandOperationMask;
1742
0c530ab8
A
1743 if (kIOMDGetCharacteristics == op) {
1744 if (dataSize < sizeof(IOMDDMACharacteristics))
1745 return kIOReturnUnderrun;
1746
1747 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
1748 data->fLength = getLength();
1749 data->fSGCount = 0;
b0d623f7 1750 data->fDirection = getDirection();
0c530ab8
A
1751 data->fIsPrepared = true; // Assume prepared - fails safe
1752 }
99c3a104 1753 else if (kIOMDWalkSegments == op) {
0c530ab8
A
1754 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs))
1755 return kIOReturnUnderrun;
1756
1757 IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
1758 IOByteCount offset = (IOByteCount) data->fOffset;
1759
1760 IOPhysicalLength length;
0c530ab8 1761 if (data->fMapped && IOMapper::gSystem)
99c3a104 1762 data->fIOVMAddr = md->getPhysicalSegment(offset, &length);
0c530ab8 1763 else
99c3a104 1764 data->fIOVMAddr = md->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
0c530ab8
A
1765 data->fLength = length;
1766 }
99c3a104
A
1767 else if (kIOMDAddDMAMapSpec == op) return kIOReturnUnsupported;
1768 else if (kIOMDDMAMap == op)
1769 {
1770 if (dataSize < sizeof(IOMDDMAMapArgs))
1771 return kIOReturnUnderrun;
1772 IOMDDMAMapArgs * data = (IOMDDMAMapArgs *) vData;
1773
1774 if (params) panic("class %s does not support IODMACommand::kIterateOnly", getMetaClass()->getClassName());
1775
39236c6e 1776 data->fMapContig = true;
99c3a104
A
1777 err = md->dmaMap(data->fMapper, &data->fMapSpec, data->fOffset, data->fLength, &data->fAlloc, &data->fAllocCount);
1778 return (err);
1779 }
1780 else return kIOReturnBadArgument;
0c530ab8
A
1781
1782 return kIOReturnSuccess;
1783}
1784
b0d623f7
A
1785static IOReturn
1786purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
1787{
1788 IOReturn err = kIOReturnSuccess;
1789
1790 *control = VM_PURGABLE_SET_STATE;
39236c6e
A
1791
1792 enum { kIOMemoryPurgeableControlMask = 15 };
1793
1794 switch (kIOMemoryPurgeableControlMask & newState)
b0d623f7
A
1795 {
1796 case kIOMemoryPurgeableKeepCurrent:
1797 *control = VM_PURGABLE_GET_STATE;
1798 break;
1799
1800 case kIOMemoryPurgeableNonVolatile:
1801 *state = VM_PURGABLE_NONVOLATILE;
1802 break;
1803 case kIOMemoryPurgeableVolatile:
39236c6e 1804 *state = VM_PURGABLE_VOLATILE | (newState & ~kIOMemoryPurgeableControlMask);
b0d623f7
A
1805 break;
1806 case kIOMemoryPurgeableEmpty:
1807 *state = VM_PURGABLE_EMPTY;
1808 break;
1809 default:
1810 err = kIOReturnBadArgument;
1811 break;
1812 }
1813 return (err);
1814}
1815
1816static IOReturn
1817purgeableStateBits(int * state)
1818{
1819 IOReturn err = kIOReturnSuccess;
1820
39236c6e 1821 switch (VM_PURGABLE_STATE_MASK & *state)
b0d623f7
A
1822 {
1823 case VM_PURGABLE_NONVOLATILE:
1824 *state = kIOMemoryPurgeableNonVolatile;
1825 break;
1826 case VM_PURGABLE_VOLATILE:
1827 *state = kIOMemoryPurgeableVolatile;
1828 break;
1829 case VM_PURGABLE_EMPTY:
1830 *state = kIOMemoryPurgeableEmpty;
1831 break;
1832 default:
1833 *state = kIOMemoryPurgeableNonVolatile;
1834 err = kIOReturnNotReady;
1835 break;
1836 }
1837 return (err);
1838}
1839
1840IOReturn
1841IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
1842 IOOptionBits * oldState )
1843{
1844 IOReturn err = kIOReturnSuccess;
1845 vm_purgable_t control;
1846 int state;
1847
1848 if (_memEntry)
1849 {
1850 err = super::setPurgeable(newState, oldState);
1851 }
1852 else
1853 {
1854 if (kIOMemoryThreadSafe & _flags)
1855 LOCK;
1856 do
1857 {
1858 // Find the appropriate vm_map for the given task
1859 vm_map_t curMap;
1860 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
1861 {
1862 err = kIOReturnNotReady;
1863 break;
1864 }
39236c6e
A
1865 else if (!_task)
1866 {
1867 err = kIOReturnUnsupported;
1868 break;
1869 }
b0d623f7
A
1870 else
1871 curMap = get_task_map(_task);
1872
1873 // can only do one range
1874 Ranges vec = _ranges;
1875 IOOptionBits type = _flags & kIOMemoryTypeMask;
1876 user_addr_t addr;
1877 IOByteCount len;
1878 getAddrLenForInd(addr, len, type, vec, 0);
1879
1880 err = purgeableControlBits(newState, &control, &state);
1881 if (kIOReturnSuccess != err)
1882 break;
1883 err = mach_vm_purgable_control(curMap, addr, control, &state);
1884 if (oldState)
1885 {
1886 if (kIOReturnSuccess == err)
1887 {
1888 err = purgeableStateBits(&state);
1889 *oldState = state;
1890 }
1891 }
1892 }
1893 while (false);
1894 if (kIOMemoryThreadSafe & _flags)
1895 UNLOCK;
1896 }
1897 return (err);
1898}
1899
91447636
A
1900IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
1901 IOOptionBits * oldState )
1902{
1903 IOReturn err = kIOReturnSuccess;
1904 vm_purgable_t control;
1905 int state;
1906
b0d623f7
A
1907 if (kIOMemoryThreadSafe & _flags)
1908 LOCK;
1909
91447636
A
1910 do
1911 {
1912 if (!_memEntry)
1913 {
1914 err = kIOReturnNotReady;
1915 break;
1916 }
b0d623f7
A
1917 err = purgeableControlBits(newState, &control, &state);
1918 if (kIOReturnSuccess != err)
1919 break;
91447636 1920 err = mach_memory_entry_purgable_control((ipc_port_t) _memEntry, control, &state);
b0d623f7
A
1921 if (oldState)
1922 {
1923 if (kIOReturnSuccess == err)
1924 {
1925 err = purgeableStateBits(&state);
1926 *oldState = state;
1927 }
1928 }
91447636
A
1929 }
1930 while (false);
1931
b0d623f7
A
1932 if (kIOMemoryThreadSafe & _flags)
1933 UNLOCK;
1934
91447636
A
1935 return (err);
1936}
1937
39236c6e
A
1938
1939IOReturn IOMemoryDescriptor::getPageCounts( IOByteCount * residentPageCount,
1940 IOByteCount * dirtyPageCount )
1941{
1942 IOReturn err = kIOReturnSuccess;
1943 unsigned int _residentPageCount, _dirtyPageCount;
1944
1945 if (kIOMemoryThreadSafe & _flags) LOCK;
1946
1947 do
1948 {
1949 if (!_memEntry)
1950 {
1951 err = kIOReturnNotReady;
1952 break;
1953 }
1954 if ((residentPageCount == NULL) && (dirtyPageCount == NULL))
1955 {
1956 err = kIOReturnBadArgument;
1957 break;
1958 }
1959
1960 err = mach_memory_entry_get_page_counts((ipc_port_t) _memEntry,
1961 residentPageCount ? &_residentPageCount : NULL,
1962 dirtyPageCount ? &_dirtyPageCount : NULL);
1963 if (kIOReturnSuccess != err) break;
1964 if (residentPageCount) *residentPageCount = _residentPageCount;
1965 if (dirtyPageCount) *dirtyPageCount = _dirtyPageCount;
1966 }
1967 while (false);
1968
1969 if (kIOMemoryThreadSafe & _flags) UNLOCK;
1970
1971 return (err);
1972}
1973
1974
91447636
A
1975extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
1976extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
1977
0b4c1975
A
1978static void SetEncryptOp(addr64_t pa, unsigned int count)
1979{
1980 ppnum_t page, end;
1981
1982 page = atop_64(round_page_64(pa));
1983 end = atop_64(trunc_page_64(pa + count));
1984 for (; page < end; page++)
1985 {
1986 pmap_clear_noencrypt(page);
1987 }
1988}
1989
1990static void ClearEncryptOp(addr64_t pa, unsigned int count)
1991{
1992 ppnum_t page, end;
1993
1994 page = atop_64(round_page_64(pa));
1995 end = atop_64(trunc_page_64(pa + count));
1996 for (; page < end; page++)
1997 {
1998 pmap_set_noencrypt(page);
1999 }
2000}
2001
91447636
A
2002IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options,
2003 IOByteCount offset, IOByteCount length )
2004{
2005 IOByteCount remaining;
316670eb 2006 unsigned int res;
91447636
A
2007 void (*func)(addr64_t pa, unsigned int count) = 0;
2008
2009 switch (options)
2010 {
2011 case kIOMemoryIncoherentIOFlush:
2012 func = &dcache_incoherent_io_flush64;
2013 break;
2014 case kIOMemoryIncoherentIOStore:
2015 func = &dcache_incoherent_io_store64;
2016 break;
0b4c1975
A
2017
2018 case kIOMemorySetEncrypted:
2019 func = &SetEncryptOp;
2020 break;
2021 case kIOMemoryClearEncrypted:
2022 func = &ClearEncryptOp;
2023 break;
91447636
A
2024 }
2025
2026 if (!func)
2027 return (kIOReturnUnsupported);
2028
b0d623f7
A
2029 if (kIOMemoryThreadSafe & _flags)
2030 LOCK;
2031
316670eb 2032 res = 0x0UL;
91447636
A
2033 remaining = length = min(length, getLength() - offset);
2034 while (remaining)
2035 // (process another target segment?)
2036 {
2037 addr64_t dstAddr64;
2038 IOByteCount dstLen;
2039
b0d623f7 2040 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
91447636
A
2041 if (!dstAddr64)
2042 break;
2043
2044 // Clip segment length to remaining
2045 if (dstLen > remaining)
2046 dstLen = remaining;
2047
2048 (*func)(dstAddr64, dstLen);
2049
2050 offset += dstLen;
2051 remaining -= dstLen;
2052 }
2053
b0d623f7
A
2054 if (kIOMemoryThreadSafe & _flags)
2055 UNLOCK;
2056
91447636
A
2057 return (remaining ? kIOReturnUnderrun : kIOReturnSuccess);
2058}
2059
316670eb 2060#if defined(__i386__) || defined(__x86_64__)
55e303ae
A
2061extern vm_offset_t first_avail;
2062#define io_kernel_static_end first_avail
316670eb
A
2063#else
2064#error io_kernel_static_end is undefined for this architecture
2065#endif
55e303ae
A
2066
2067static kern_return_t
2068io_get_kernel_static_upl(
91447636 2069 vm_map_t /* map */,
b0d623f7 2070 uintptr_t offset,
55e303ae
A
2071 vm_size_t *upl_size,
2072 upl_t *upl,
2073 upl_page_info_array_t page_list,
0c530ab8
A
2074 unsigned int *count,
2075 ppnum_t *highest_page)
1c79356b 2076{
55e303ae
A
2077 unsigned int pageCount, page;
2078 ppnum_t phys;
0c530ab8 2079 ppnum_t highestPage = 0;
1c79356b 2080
55e303ae
A
2081 pageCount = atop_32(*upl_size);
2082 if (pageCount > *count)
2083 pageCount = *count;
1c79356b 2084
55e303ae 2085 *upl = NULL;
1c79356b 2086
55e303ae
A
2087 for (page = 0; page < pageCount; page++)
2088 {
2089 phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
2090 if (!phys)
2091 break;
2092 page_list[page].phys_addr = phys;
2093 page_list[page].pageout = 0;
2094 page_list[page].absent = 0;
2095 page_list[page].dirty = 0;
2096 page_list[page].precious = 0;
2097 page_list[page].device = 0;
0c530ab8 2098 if (phys > highestPage)
b0d623f7 2099 highestPage = phys;
55e303ae 2100 }
0b4e3aa0 2101
0c530ab8
A
2102 *highest_page = highestPage;
2103
55e303ae
A
2104 return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError);
2105}
0b4e3aa0 2106
55e303ae
A
2107IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
2108{
91447636 2109 IOOptionBits type = _flags & kIOMemoryTypeMask;
2d21ac55 2110 IOReturn error = kIOReturnCannotWire;
55e303ae 2111 ioGMDData *dataP;
99c3a104 2112 upl_page_info_array_t pageInfo;
39236c6e
A
2113 ppnum_t mapBase;
2114 ipc_port_t sharedMem;
1c79356b 2115
0c530ab8 2116 assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
1c79356b 2117
39236c6e
A
2118 if ((kIODirectionOutIn & forDirection) == kIODirectionNone)
2119 forDirection = (IODirection) (forDirection | getDirection());
55e303ae
A
2120
2121 int uplFlags; // This Mem Desc's default flags for upl creation
0c530ab8 2122 switch (kIODirectionOutIn & forDirection)
55e303ae
A
2123 {
2124 case kIODirectionOut:
2125 // Pages do not need to be marked as dirty on commit
2126 uplFlags = UPL_COPYOUT_FROM;
55e303ae
A
2127 break;
2128
2129 case kIODirectionIn:
2130 default:
2131 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
2132 break;
2133 }
55e303ae 2134
39236c6e
A
2135 if (_wireCount)
2136 {
2137 if ((kIOMemoryPreparedReadOnly & _flags) && !(UPL_COPYOUT_FROM & uplFlags))
2138 {
2139 OSReportWithBacktrace("IOMemoryDescriptor 0x%lx prepared read only", VM_KERNEL_ADDRPERM(this));
2140 error = kIOReturnNotWritable;
2141 }
2142 else error = kIOReturnSuccess;
2143 return (error);
2144 }
2145
2146 dataP = getDataP(_memoryEntries);
2147 IOMapper *mapper;
2148 mapper = dataP->fMapper;
2149 dataP->fMappedBase = 0;
2150
2151 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
0c530ab8 2152 if (kIODirectionPrepareToPhys32 & forDirection)
99c3a104
A
2153 {
2154 if (!mapper) uplFlags |= UPL_NEED_32BIT_ADDR;
2155 if (dataP->fDMAMapNumAddressBits > 32) dataP->fDMAMapNumAddressBits = 32;
2156 }
15129b1c
A
2157 if (kIODirectionPrepareNoFault & forDirection) uplFlags |= UPL_REQUEST_NO_FAULT;
2158 if (kIODirectionPrepareNoZeroFill & forDirection) uplFlags |= UPL_NOZEROFILLIO;
2159 if (kIODirectionPrepareNonCoherent & forDirection) uplFlags |= UPL_REQUEST_FORCE_COHERENCY;
39236c6e
A
2160
2161 mapBase = 0;
2162 sharedMem = (ipc_port_t) _memEntry;
0c530ab8 2163
99c3a104
A
2164 // Note that appendBytes(NULL) zeros the data up to the desired length.
2165 _memoryEntries->appendBytes(0, dataP->fPageCnt * sizeof(upl_page_info_t));
2166 dataP = 0;
2167
91447636 2168 // Find the appropriate vm_map for the given task
55e303ae
A
2169 vm_map_t curMap;
2170 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
2171 curMap = 0;
2172 else
2173 { curMap = get_task_map(_task); }
2174
91447636
A
2175 // Iterate over the vector of virtual ranges
2176 Ranges vec = _ranges;
39236c6e
A
2177 unsigned int pageIndex = 0;
2178 IOByteCount mdOffset = 0;
2179 ppnum_t highestPage = 0;
99c3a104 2180
55e303ae
A
2181 for (UInt range = 0; range < _rangesCount; range++) {
2182 ioPLBlock iopl;
91447636 2183 user_addr_t startPage;
55e303ae 2184 IOByteCount numBytes;
0c530ab8 2185 ppnum_t highPage = 0;
55e303ae 2186
91447636
A
2187 // Get the startPage address and length of vec[range]
2188 getAddrLenForInd(startPage, numBytes, type, vec, range);
b0d623f7 2189 iopl.fPageOffset = startPage & PAGE_MASK;
91447636
A
2190 numBytes += iopl.fPageOffset;
2191 startPage = trunc_page_64(startPage);
2192
55e303ae 2193 if (mapper)
99c3a104 2194 iopl.fMappedPage = mapBase + pageIndex;
55e303ae 2195 else
99c3a104 2196 iopl.fMappedPage = 0;
55e303ae 2197
91447636 2198 // Iterate over the current range, creating UPLs
55e303ae 2199 while (numBytes) {
91447636
A
2200 vm_address_t kernelStart = (vm_address_t) startPage;
2201 vm_map_t theMap;
2202 if (curMap)
2203 theMap = curMap;
2204 else if (!sharedMem) {
2205 assert(_task == kernel_task);
2206 theMap = IOPageableMapForAddress(kernelStart);
2207 }
2208 else
2209 theMap = NULL;
2210
55e303ae 2211 int ioplFlags = uplFlags;
99c3a104
A
2212 dataP = getDataP(_memoryEntries);
2213 pageInfo = getPageList(dataP);
55e303ae
A
2214 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
2215
b0d623f7 2216 vm_size_t ioplSize = round_page(numBytes);
55e303ae
A
2217 unsigned int numPageInfo = atop_32(ioplSize);
2218
91447636 2219 if (theMap == kernel_map && kernelStart < io_kernel_static_end) {
55e303ae 2220 error = io_get_kernel_static_upl(theMap,
91447636
A
2221 kernelStart,
2222 &ioplSize,
2223 &iopl.fIOPL,
2224 baseInfo,
0c530ab8
A
2225 &numPageInfo,
2226 &highPage);
91447636
A
2227 }
2228 else if (sharedMem) {
55e303ae 2229 error = memory_object_iopl_request(sharedMem,
91447636
A
2230 ptoa_32(pageIndex),
2231 &ioplSize,
2232 &iopl.fIOPL,
2233 baseInfo,
2234 &numPageInfo,
2235 &ioplFlags);
2236 }
2237 else {
2238 assert(theMap);
2239 error = vm_map_create_upl(theMap,
2240 startPage,
b0d623f7 2241 (upl_size_t*)&ioplSize,
91447636
A
2242 &iopl.fIOPL,
2243 baseInfo,
2244 &numPageInfo,
2245 &ioplFlags);
de355530
A
2246 }
2247
55e303ae
A
2248 assert(ioplSize);
2249 if (error != KERN_SUCCESS)
2250 goto abortExit;
2251
0c530ab8
A
2252 if (iopl.fIOPL)
2253 highPage = upl_get_highest_page(iopl.fIOPL);
2254 if (highPage > highestPage)
2255 highestPage = highPage;
2256
2d21ac55 2257 error = kIOReturnCannotWire;
55e303ae
A
2258
2259 if (baseInfo->device) {
2260 numPageInfo = 1;
39236c6e 2261 iopl.fFlags = kIOPLOnDevice;
55e303ae
A
2262 }
2263 else {
2264 iopl.fFlags = 0;
55e303ae
A
2265 }
2266
2267 iopl.fIOMDOffset = mdOffset;
2268 iopl.fPageInfo = pageIndex;
39236c6e 2269 if (mapper && pageIndex && (page_mask & (mdOffset + iopl.fPageOffset))) dataP->fDiscontig = true;
55e303ae 2270
6d2010ae
A
2271#if 0
2272 // used to remove the upl for auto prepares here, for some errant code
2273 // that freed memory before the descriptor pointing at it
55e303ae
A
2274 if ((_flags & kIOMemoryAutoPrepare) && iopl.fIOPL)
2275 {
91447636
A
2276 upl_commit(iopl.fIOPL, 0, 0);
2277 upl_deallocate(iopl.fIOPL);
55e303ae 2278 iopl.fIOPL = 0;
de355530 2279 }
6d2010ae 2280#endif
55e303ae
A
2281
2282 if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
2283 // Clean up partial created and unsaved iopl
91447636
A
2284 if (iopl.fIOPL) {
2285 upl_abort(iopl.fIOPL, 0);
2286 upl_deallocate(iopl.fIOPL);
2287 }
55e303ae
A
2288 goto abortExit;
2289 }
99c3a104 2290 dataP = 0;
55e303ae
A
2291
2292 // Check for a multiple iopl's in one virtual range
2293 pageIndex += numPageInfo;
2294 mdOffset -= iopl.fPageOffset;
2295 if (ioplSize < numBytes) {
2296 numBytes -= ioplSize;
2297 startPage += ioplSize;
2298 mdOffset += ioplSize;
2299 iopl.fPageOffset = 0;
99c3a104 2300 if (mapper) iopl.fMappedPage = mapBase + pageIndex;
55e303ae
A
2301 }
2302 else {
2303 mdOffset += numBytes;
2304 break;
2305 }
1c79356b
A
2306 }
2307 }
55e303ae 2308
0c530ab8
A
2309 _highestPage = highestPage;
2310
39236c6e
A
2311 if (UPL_COPYOUT_FROM & uplFlags) _flags |= kIOMemoryPreparedReadOnly;
2312
1c79356b
A
2313 return kIOReturnSuccess;
2314
2315abortExit:
55e303ae
A
2316 {
2317 dataP = getDataP(_memoryEntries);
91447636 2318 UInt done = getNumIOPL(_memoryEntries, dataP);
55e303ae
A
2319 ioPLBlock *ioplList = getIOPLList(dataP);
2320
2321 for (UInt range = 0; range < done; range++)
2322 {
91447636
A
2323 if (ioplList[range].fIOPL) {
2324 upl_abort(ioplList[range].fIOPL, 0);
2325 upl_deallocate(ioplList[range].fIOPL);
2326 }
55e303ae 2327 }
6d2010ae 2328 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
1c79356b
A
2329 }
2330
2d21ac55
A
2331 if (error == KERN_FAILURE)
2332 error = kIOReturnCannotWire;
39236c6e
A
2333 else if (error == KERN_MEMORY_ERROR)
2334 error = kIOReturnNoResources;
2d21ac55 2335
55e303ae
A
2336 return error;
2337}
d7e50217 2338
99c3a104
A
2339bool IOGeneralMemoryDescriptor::initMemoryEntries(size_t size, IOMapper * mapper)
2340{
2341 ioGMDData * dataP;
2342 unsigned dataSize = size;
2343
2344 if (!_memoryEntries) {
2345 _memoryEntries = OSData::withCapacity(dataSize);
2346 if (!_memoryEntries)
2347 return false;
2348 }
2349 else if (!_memoryEntries->initWithCapacity(dataSize))
2350 return false;
2351
2352 _memoryEntries->appendBytes(0, computeDataSize(0, 0));
2353 dataP = getDataP(_memoryEntries);
2354
2355 if (mapper == kIOMapperWaitSystem) {
2356 IOMapper::checkForSystemMapper();
2357 mapper = IOMapper::gSystem;
2358 }
2359 dataP->fMapper = mapper;
2360 dataP->fPageCnt = 0;
2361 dataP->fMappedBase = 0;
2362 dataP->fDMAMapNumAddressBits = 64;
2363 dataP->fDMAMapAlignment = 0;
2364 dataP->fPreparationID = kIOPreparationIDUnprepared;
39236c6e 2365 dataP->fDiscontig = false;
99c3a104
A
2366
2367 return (true);
2368}
2369
2370IOReturn IOMemoryDescriptor::dmaMap(
2371 IOMapper * mapper,
2372 const IODMAMapSpecification * mapSpec,
2373 uint64_t offset,
2374 uint64_t length,
2375 uint64_t * address,
2376 ppnum_t * mapPages)
2377{
2378 IOMDDMAWalkSegmentState walkState;
2379 IOMDDMAWalkSegmentArgs * walkArgs = (IOMDDMAWalkSegmentArgs *) (void *)&walkState;
2380 IOOptionBits mdOp;
2381 IOReturn ret;
2382 IOPhysicalLength segLen;
2383 addr64_t phys, align, pageOffset;
2384 ppnum_t base, pageIndex, pageCount;
2385 uint64_t index;
2386 uint32_t mapOptions = 0;
2387
2388 if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess;
2389
2390 walkArgs->fMapped = false;
2391 mdOp = kIOMDFirstSegment;
2392 pageCount = 0;
2393 for (index = 0; index < length; )
2394 {
2395 if (index && (page_mask & (index + pageOffset))) break;
2396
2397 walkArgs->fOffset = offset + index;
2398 ret = dmaCommandOperation(mdOp, &walkState, sizeof(walkState));
2399 mdOp = kIOMDWalkSegments;
2400 if (ret != kIOReturnSuccess) break;
2401 phys = walkArgs->fIOVMAddr;
2402 segLen = walkArgs->fLength;
2403
2404 align = (phys & page_mask);
2405 if (!index) pageOffset = align;
2406 else if (align) break;
2407 pageCount += atop_64(round_page_64(align + segLen));
2408 index += segLen;
2409 }
2410
2411 if (index < length) return (kIOReturnVMError);
2412
2413 base = mapper->iovmMapMemory(this, offset, pageCount,
2414 mapOptions, NULL, mapSpec);
2415
2416 if (!base) return (kIOReturnNoResources);
2417
2418 mdOp = kIOMDFirstSegment;
2419 for (pageIndex = 0, index = 0; index < length; )
2420 {
2421 walkArgs->fOffset = offset + index;
2422 ret = dmaCommandOperation(mdOp, &walkState, sizeof(walkState));
2423 mdOp = kIOMDWalkSegments;
2424 if (ret != kIOReturnSuccess) break;
2425 phys = walkArgs->fIOVMAddr;
2426 segLen = walkArgs->fLength;
2427
2428 ppnum_t page = atop_64(phys);
2429 ppnum_t count = atop_64(round_page_64(phys + segLen)) - page;
2430 while (count--)
2431 {
2432 mapper->iovmInsert(base, pageIndex, page);
2433 page++;
2434 pageIndex++;
2435 }
2436 index += segLen;
2437 }
2438 if (pageIndex != pageCount) panic("pageIndex");
2439
2440 *address = ptoa_64(base) + pageOffset;
2441 if (mapPages) *mapPages = pageCount;
2442
2443 return (kIOReturnSuccess);
2444}
2445
2446IOReturn IOGeneralMemoryDescriptor::dmaMap(
2447 IOMapper * mapper,
2448 const IODMAMapSpecification * mapSpec,
2449 uint64_t offset,
2450 uint64_t length,
2451 uint64_t * address,
2452 ppnum_t * mapPages)
2453{
2454 IOReturn err = kIOReturnSuccess;
2455 ioGMDData * dataP;
2456 IOOptionBits type = _flags & kIOMemoryTypeMask;
2457
2458 *address = 0;
2459 if (kIOMemoryHostOnly & _flags) return (kIOReturnSuccess);
2460
2461 if ((type == kIOMemoryTypePhysical) || (type == kIOMemoryTypePhysical64)
2462 || offset || (length != _length))
2463 {
2464 err = super::dmaMap(mapper, mapSpec, offset, length, address, mapPages);
2465 }
2466 else if (_memoryEntries && _pages && (dataP = getDataP(_memoryEntries)))
2467 {
2468 const ioPLBlock * ioplList = getIOPLList(dataP);
2469 upl_page_info_t * pageList;
2470 uint32_t mapOptions = 0;
2471 ppnum_t base;
2472
2473 IODMAMapSpecification mapSpec;
2474 bzero(&mapSpec, sizeof(mapSpec));
2475 mapSpec.numAddressBits = dataP->fDMAMapNumAddressBits;
2476 mapSpec.alignment = dataP->fDMAMapAlignment;
2477
2478 // For external UPLs the fPageInfo field points directly to
2479 // the upl's upl_page_info_t array.
2480 if (ioplList->fFlags & kIOPLExternUPL)
2481 {
2482 pageList = (upl_page_info_t *) ioplList->fPageInfo;
2483 mapOptions |= kIODMAMapPagingPath;
2484 }
2485 else
2486 pageList = getPageList(dataP);
2487
2488 if (!(kIOMemoryPreparedReadOnly & _flags)) mapOptions |= kIODMAMapWriteAccess;
2489
2490 // Check for direct device non-paged memory
2491 if (ioplList->fFlags & kIOPLOnDevice) mapOptions |= kIODMAMapPhysicallyContiguous;
2492
2493 base = mapper->iovmMapMemory(
2494 this, offset, _pages, mapOptions, &pageList[0], &mapSpec);
2495 *address = ptoa_64(base) + (ioplList->fPageOffset & PAGE_MASK);
2496 if (mapPages) *mapPages = _pages;
2497 }
2498
2499 return (err);
2500}
2501
55e303ae
A
2502/*
2503 * prepare
2504 *
2505 * Prepare the memory for an I/O transfer. This involves paging in
2506 * the memory, if necessary, and wiring it down for the duration of
2507 * the transfer. The complete() method completes the processing of
2508 * the memory after the I/O transfer finishes. This method needn't
2509 * called for non-pageable memory.
2510 */
99c3a104 2511
55e303ae
A
2512IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
2513{
91447636
A
2514 IOReturn error = kIOReturnSuccess;
2515 IOOptionBits type = _flags & kIOMemoryTypeMask;
55e303ae 2516
2d21ac55
A
2517 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
2518 return kIOReturnSuccess;
2519
2520 if (_prepareLock)
2521 IOLockLock(_prepareLock);
2522
39236c6e
A
2523 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type)
2524 {
2525 error = wireVirtual(forDirection);
de355530
A
2526 }
2527
2d21ac55 2528 if (kIOReturnSuccess == error)
0b4c1975 2529 {
99c3a104
A
2530 if (1 == ++_wireCount)
2531 {
2532 if (kIOMemoryClearEncrypt & _flags)
2533 {
2534 performOperation(kIOMemoryClearEncrypted, 0, _length);
2535 }
2536 }
0b4c1975
A
2537 }
2538
2d21ac55
A
2539 if (_prepareLock)
2540 IOLockUnlock(_prepareLock);
2541
2542 return error;
1c79356b
A
2543}
2544
2545/*
2546 * complete
2547 *
2548 * Complete processing of the memory after an I/O transfer finishes.
2549 * This method should not be called unless a prepare was previously
2550 * issued; the prepare() and complete() must occur in pairs, before
2551 * before and after an I/O transfer involving pageable memory.
2552 */
6d2010ae 2553
55e303ae 2554IOReturn IOGeneralMemoryDescriptor::complete(IODirection /* forDirection */)
1c79356b 2555{
2d21ac55 2556 IOOptionBits type = _flags & kIOMemoryTypeMask;
1c79356b 2557
2d21ac55
A
2558 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
2559 return kIOReturnSuccess;
1c79356b 2560
2d21ac55
A
2561 if (_prepareLock)
2562 IOLockLock(_prepareLock);
91447636 2563
2d21ac55
A
2564 assert(_wireCount);
2565
2566 if (_wireCount)
2567 {
0b4c1975
A
2568 if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount))
2569 {
2570 performOperation(kIOMemorySetEncrypted, 0, _length);
2571 }
2572
2d21ac55
A
2573 _wireCount--;
2574 if (!_wireCount)
2575 {
2576 IOOptionBits type = _flags & kIOMemoryTypeMask;
2577 ioGMDData * dataP = getDataP(_memoryEntries);
2578 ioPLBlock *ioplList = getIOPLList(dataP);
91447636 2579 UInt count = getNumIOPL(_memoryEntries, dataP);
55e303ae 2580
b0d623f7
A
2581#if IOMD_DEBUG_DMAACTIVE
2582 if (__iomd_reservedA) panic("complete() while dma active");
2583#endif /* IOMD_DEBUG_DMAACTIVE */
2584
99c3a104
A
2585 if (dataP->fMappedBase) {
2586 dataP->fMapper->iovmFree(atop_64(dataP->fMappedBase), _pages);
2587 dataP->fMappedBase = 0;
2588 }
2d21ac55
A
2589 // Only complete iopls that we created which are for TypeVirtual
2590 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
2591 for (UInt ind = 0; ind < count; ind++)
91447636
A
2592 if (ioplList[ind].fIOPL) {
2593 upl_commit(ioplList[ind].fIOPL, 0, 0);
2594 upl_deallocate(ioplList[ind].fIOPL);
2595 }
6d2010ae
A
2596 } else if (kIOMemoryTypeUPL == type) {
2597 upl_set_referenced(ioplList[0].fIOPL, false);
2d21ac55 2598 }
6d2010ae
A
2599
2600 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
b0d623f7
A
2601
2602 dataP->fPreparationID = kIOPreparationIDUnprepared;
2d21ac55 2603 }
1c79356b 2604 }
2d21ac55
A
2605
2606 if (_prepareLock)
2607 IOLockUnlock(_prepareLock);
2608
1c79356b
A
2609 return kIOReturnSuccess;
2610}
2611
2612IOReturn IOGeneralMemoryDescriptor::doMap(
2d21ac55
A
2613 vm_map_t __addressMap,
2614 IOVirtualAddress * __address,
1c79356b 2615 IOOptionBits options,
2d21ac55
A
2616 IOByteCount __offset,
2617 IOByteCount __length )
2618
1c79356b 2619{
b0d623f7 2620#ifndef __LP64__
2d21ac55 2621 if (!(kIOMap64Bit & options)) panic("IOGeneralMemoryDescriptor::doMap !64bit");
b0d623f7 2622#endif /* !__LP64__ */
2d21ac55 2623
b0d623f7 2624 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
2d21ac55
A
2625 mach_vm_size_t offset = mapping->fOffset + __offset;
2626 mach_vm_size_t length = mapping->fLength;
2627
b0d623f7 2628 kern_return_t kr = kIOReturnVMError;
0b4e3aa0 2629 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
1c79356b 2630
91447636
A
2631 IOOptionBits type = _flags & kIOMemoryTypeMask;
2632 Ranges vec = _ranges;
2633
2634 user_addr_t range0Addr = 0;
2635 IOByteCount range0Len = 0;
2636
060df5ea
A
2637 if ((offset >= _length) || ((offset + length) > _length))
2638 return( kIOReturnBadArgument );
2639
91447636
A
2640 if (vec.v)
2641 getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
2642
1c79356b 2643 // mapping source == dest? (could be much better)
91447636 2644 if( _task
2d21ac55
A
2645 && (mapping->fAddressMap == get_task_map(_task)) && (options & kIOMapAnywhere)
2646 && (1 == _rangesCount) && (0 == offset)
2647 && range0Addr && (length <= range0Len) )
2648 {
2649 mapping->fAddress = range0Addr;
2650 mapping->fOptions |= kIOMapStatic;
2651
2652 return( kIOReturnSuccess );
1c79356b
A
2653 }
2654
0b4e3aa0 2655 if( 0 == sharedMem) {
1c79356b 2656
91447636 2657 vm_size_t size = ptoa_32(_pages);
1c79356b 2658
0b4e3aa0 2659 if( _task) {
0c530ab8 2660
91447636 2661 memory_object_size_t actualSize = size;
2d21ac55
A
2662 vm_prot_t prot = VM_PROT_READ;
2663 if (!(kIOMapReadOnly & options))
2664 prot |= VM_PROT_WRITE;
2665 else if (kIOMapDefaultCache != (options & kIOMapCacheMask))
2666 prot |= VM_PROT_WRITE;
2667
060df5ea
A
2668 if (_rangesCount == 1)
2669 {
2670 kr = mach_make_memory_entry_64(get_task_map(_task),
2671 &actualSize, range0Addr,
2672 prot, &sharedMem,
2673 NULL);
2674 }
2675 if( (_rangesCount != 1)
2676 || ((KERN_SUCCESS == kr) && (actualSize != round_page(size))))
2677 do
b0d623f7 2678 {
0b4e3aa0 2679#if IOASSERT
060df5ea
A
2680 IOLog("mach_vm_remap path for ranges %d size (%08llx:%08llx)\n",
2681 _rangesCount, (UInt64)actualSize, (UInt64)size);
0b4e3aa0
A
2682#endif
2683 kr = kIOReturnVMError;
060df5ea
A
2684 if (sharedMem)
2685 {
2686 ipc_port_release_send(sharedMem);
2687 sharedMem = MACH_PORT_NULL;
2688 }
b0d623f7 2689
060df5ea
A
2690 mach_vm_address_t address, segDestAddr;
2691 mach_vm_size_t mapLength;
2692 unsigned rangesIndex;
2693 IOOptionBits type = _flags & kIOMemoryTypeMask;
2694 user_addr_t srcAddr;
2695 IOPhysicalLength segLen = 0;
2696
2697 // Find starting address within the vector of ranges
2698 for (rangesIndex = 0; rangesIndex < _rangesCount; rangesIndex++) {
2699 getAddrLenForInd(srcAddr, segLen, type, _ranges, rangesIndex);
2700 if (offset < segLen)
2701 break;
2702 offset -= segLen; // (make offset relative)
2703 }
2704
2705 mach_vm_size_t pageOffset = (srcAddr & PAGE_MASK);
b0d623f7 2706 address = trunc_page_64(mapping->fAddress);
060df5ea 2707
b0d623f7
A
2708 if ((options & kIOMapAnywhere) || ((mapping->fAddress - address) == pageOffset))
2709 {
060df5ea
A
2710 vm_map_t map = mapping->fAddressMap;
2711 kr = IOMemoryDescriptorMapCopy(&map,
b0d623f7
A
2712 options,
2713 offset, &address, round_page_64(length + pageOffset));
060df5ea
A
2714 if (kr == KERN_SUCCESS)
2715 {
2716 segDestAddr = address;
2717 segLen -= offset;
316670eb 2718 srcAddr += offset;
060df5ea
A
2719 mapLength = length;
2720
2721 while (true)
2722 {
2723 vm_prot_t cur_prot, max_prot;
316670eb
A
2724
2725 if (segLen > length) segLen = length;
060df5ea
A
2726 kr = mach_vm_remap(map, &segDestAddr, round_page_64(segLen), PAGE_MASK,
2727 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
2728 get_task_map(_task), trunc_page_64(srcAddr),
2729 FALSE /* copy */,
2730 &cur_prot,
2731 &max_prot,
2732 VM_INHERIT_NONE);
2733 if (KERN_SUCCESS == kr)
2734 {
2735 if ((!(VM_PROT_READ & cur_prot))
2736 || (!(kIOMapReadOnly & options) && !(VM_PROT_WRITE & cur_prot)))
2737 {
2738 kr = KERN_PROTECTION_FAILURE;
2739 }
2740 }
2741 if (KERN_SUCCESS != kr)
2742 break;
2743 segDestAddr += segLen;
2744 mapLength -= segLen;
2745 if (!mapLength)
2746 break;
2747 rangesIndex++;
2748 if (rangesIndex >= _rangesCount)
2749 {
2750 kr = kIOReturnBadArgument;
2751 break;
2752 }
2753 getAddrLenForInd(srcAddr, segLen, type, vec, rangesIndex);
2754 if (srcAddr & PAGE_MASK)
2755 {
2756 kr = kIOReturnBadArgument;
2757 break;
2758 }
2759 if (segLen > mapLength)
2760 segLen = mapLength;
2761 }
2762 if (KERN_SUCCESS != kr)
2763 {
2764 mach_vm_deallocate(mapping->fAddressMap, address, round_page_64(length + pageOffset));
2765 }
2766 }
2767
2768 if (KERN_SUCCESS == kr)
b0d623f7
A
2769 mapping->fAddress = address + pageOffset;
2770 else
2771 mapping->fAddress = NULL;
2772 }
2773 }
060df5ea 2774 while (false);
b0d623f7
A
2775 }
2776 else do
2777 { // _task == 0, must be physical
0b4e3aa0 2778
55e303ae
A
2779 memory_object_t pager;
2780 unsigned int flags = 0;
2781 addr64_t pa;
9bccf70c
A
2782 IOPhysicalLength segLen;
2783
b0d623f7 2784 pa = getPhysicalSegment( offset, &segLen, kIOMemoryMapperNone );
0b4e3aa0 2785
316670eb
A
2786 if( !getKernelReserved())
2787 continue;
2788 reserved->dp.pagerContig = (1 == _rangesCount);
2789 reserved->dp.memory = this;
9bccf70c 2790
55e303ae
A
2791 /*What cache mode do we need*/
2792 switch(options & kIOMapCacheMask ) {
9bccf70c
A
2793
2794 case kIOMapDefaultCache:
2795 default:
55e303ae 2796 flags = IODefaultCacheBits(pa);
2d21ac55
A
2797 if (DEVICE_PAGER_CACHE_INHIB & flags)
2798 {
2799 if (DEVICE_PAGER_GUARDED & flags)
2800 mapping->fOptions |= kIOMapInhibitCache;
2801 else
2802 mapping->fOptions |= kIOMapWriteCombineCache;
2803 }
2804 else if (DEVICE_PAGER_WRITE_THROUGH & flags)
2805 mapping->fOptions |= kIOMapWriteThruCache;
2806 else
2807 mapping->fOptions |= kIOMapCopybackCache;
55e303ae 2808 break;
9bccf70c
A
2809
2810 case kIOMapInhibitCache:
55e303ae
A
2811 flags = DEVICE_PAGER_CACHE_INHIB |
2812 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
2813 break;
9bccf70c
A
2814
2815 case kIOMapWriteThruCache:
55e303ae
A
2816 flags = DEVICE_PAGER_WRITE_THROUGH |
2817 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
2818 break;
9bccf70c
A
2819
2820 case kIOMapCopybackCache:
55e303ae
A
2821 flags = DEVICE_PAGER_COHERENT;
2822 break;
2823
2824 case kIOMapWriteCombineCache:
2825 flags = DEVICE_PAGER_CACHE_INHIB |
2826 DEVICE_PAGER_COHERENT;
2827 break;
9bccf70c
A
2828 }
2829
316670eb 2830 flags |= reserved->dp.pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0;
9bccf70c 2831
b0d623f7 2832 pager = device_pager_setup( (memory_object_t) 0, (uintptr_t) reserved,
9bccf70c 2833 size, flags);
0b4e3aa0
A
2834 assert( pager );
2835
2836 if( pager) {
0b4e3aa0
A
2837 kr = mach_memory_object_memory_entry_64( (host_t) 1, false /*internal*/,
2838 size, VM_PROT_READ | VM_PROT_WRITE, pager, &sharedMem );
2839
2840 assert( KERN_SUCCESS == kr );
2d21ac55
A
2841 if( KERN_SUCCESS != kr)
2842 {
9bccf70c 2843 device_pager_deallocate( pager );
0b4e3aa0
A
2844 pager = MACH_PORT_NULL;
2845 sharedMem = MACH_PORT_NULL;
2846 }
2847 }
9bccf70c 2848 if( pager && sharedMem)
316670eb 2849 reserved->dp.devicePager = pager;
1c79356b 2850
1c79356b
A
2851 } while( false );
2852
0b4e3aa0
A
2853 _memEntry = (void *) sharedMem;
2854 }
2855
2d21ac55
A
2856 IOReturn result;
2857 if (0 == sharedMem)
b0d623f7 2858 result = kr;
9bccf70c 2859 else
2d21ac55
A
2860 result = super::doMap( __addressMap, __address,
2861 options, __offset, __length );
0b4e3aa0 2862
2d21ac55 2863 return( result );
1c79356b
A
2864}
2865
2866IOReturn IOGeneralMemoryDescriptor::doUnmap(
2867 vm_map_t addressMap,
2d21ac55
A
2868 IOVirtualAddress __address,
2869 IOByteCount __length )
1c79356b 2870{
2d21ac55 2871 return (super::doUnmap(addressMap, __address, __length));
1c79356b
A
2872}
2873
2874/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2875
b0d623f7
A
2876#undef super
2877#define super OSObject
1c79356b 2878
b0d623f7 2879OSDefineMetaClassAndStructors( IOMemoryMap, OSObject )
1c79356b 2880
b0d623f7
A
2881OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
2882OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
2883OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
2884OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
2885OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
2886OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
2887OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
2888OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
1c79356b 2889
b0d623f7
A
2890/* ex-inline function implementation */
2891IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
2892 { return( getPhysicalSegment( 0, 0 )); }
1c79356b
A
2893
2894/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2895
b0d623f7 2896bool IOMemoryMap::init(
2d21ac55
A
2897 task_t intoTask,
2898 mach_vm_address_t toAddress,
2899 IOOptionBits _options,
2900 mach_vm_size_t _offset,
2901 mach_vm_size_t _length )
1c79356b 2902{
2d21ac55 2903 if (!intoTask)
1c79356b
A
2904 return( false);
2905
2d21ac55
A
2906 if (!super::init())
2907 return(false);
1c79356b 2908
2d21ac55
A
2909 fAddressMap = get_task_map(intoTask);
2910 if (!fAddressMap)
2911 return(false);
2912 vm_map_reference(fAddressMap);
1c79356b 2913
2d21ac55
A
2914 fAddressTask = intoTask;
2915 fOptions = _options;
2916 fLength = _length;
2917 fOffset = _offset;
2918 fAddress = toAddress;
1c79356b 2919
2d21ac55 2920 return (true);
1c79356b
A
2921}
2922
b0d623f7 2923bool IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
1c79356b 2924{
2d21ac55
A
2925 if (!_memory)
2926 return(false);
1c79356b 2927
2d21ac55 2928 if (!fSuperMap)
91447636 2929 {
2d21ac55 2930 if( (_offset + fLength) > _memory->getLength())
91447636 2931 return( false);
2d21ac55 2932 fOffset = _offset;
91447636 2933 }
1c79356b
A
2934
2935 _memory->retain();
2d21ac55 2936 if (fMemory)
91447636 2937 {
2d21ac55
A
2938 if (fMemory != _memory)
2939 fMemory->removeMapping(this);
2940 fMemory->release();
1c79356b 2941 }
2d21ac55 2942 fMemory = _memory;
91447636 2943
2d21ac55 2944 return( true );
1c79356b
A
2945}
2946
0b4e3aa0
A
2947struct IOMemoryDescriptorMapAllocRef
2948{
2949 ipc_port_t sharedMem;
060df5ea 2950 vm_map_t map;
2d21ac55
A
2951 mach_vm_address_t mapped;
2952 mach_vm_size_t size;
2953 mach_vm_size_t sourceOffset;
0b4e3aa0
A
2954 IOOptionBits options;
2955};
2956
2957static kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
2958{
2959 IOMemoryDescriptorMapAllocRef * ref = (IOMemoryDescriptorMapAllocRef *)_ref;
2960 IOReturn err;
2961
2962 do {
2d21ac55
A
2963 if( ref->sharedMem)
2964 {
0b4e3aa0
A
2965 vm_prot_t prot = VM_PROT_READ
2966 | ((ref->options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE);
55e303ae 2967
2d21ac55
A
2968 // VM system requires write access to change cache mode
2969 if (kIOMapDefaultCache != (ref->options & kIOMapCacheMask))
2970 prot |= VM_PROT_WRITE;
2971
55e303ae
A
2972 // set memory entry cache
2973 vm_prot_t memEntryCacheMode = prot | MAP_MEM_ONLY;
2974 switch (ref->options & kIOMapCacheMask)
2975 {
2976 case kIOMapInhibitCache:
2977 SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
2978 break;
2979
2980 case kIOMapWriteThruCache:
2981 SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
2982 break;
2983
2984 case kIOMapWriteCombineCache:
2985 SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
2986 break;
2987
2988 case kIOMapCopybackCache:
2989 SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
2990 break;
2991
316670eb
A
2992 case kIOMapCopybackInnerCache:
2993 SET_MAP_MEM(MAP_MEM_INNERWBACK, memEntryCacheMode);
2994 break;
2995
55e303ae
A
2996 case kIOMapDefaultCache:
2997 default:
2998 SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
2999 break;
3000 }
3001
3002 vm_size_t unused = 0;
3003
3004 err = mach_make_memory_entry( NULL /*unused*/, &unused, 0 /*unused*/,
3005 memEntryCacheMode, NULL, ref->sharedMem );
3006 if (KERN_SUCCESS != err)
3007 IOLog("MAP_MEM_ONLY failed %d\n", err);
3008
2d21ac55 3009 err = mach_vm_map( map,
0b4e3aa0
A
3010 &ref->mapped,
3011 ref->size, 0 /* mask */,
3012 (( ref->options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
3013 | VM_MAKE_TAG(VM_MEMORY_IOKIT),
3014 ref->sharedMem, ref->sourceOffset,
3015 false, // copy
3016 prot, // cur
3017 prot, // max
3018 VM_INHERIT_NONE);
55e303ae 3019
0b4e3aa0
A
3020 if( KERN_SUCCESS != err) {
3021 ref->mapped = 0;
3022 continue;
3023 }
060df5ea 3024 ref->map = map;
2d21ac55
A
3025 }
3026 else
3027 {
060df5ea 3028 err = mach_vm_allocate(map, &ref->mapped, ref->size,
0b4e3aa0
A
3029 ((ref->options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
3030 | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
0b4e3aa0
A
3031 if( KERN_SUCCESS != err) {
3032 ref->mapped = 0;
3033 continue;
3034 }
060df5ea 3035 ref->map = map;
0b4e3aa0 3036 // we have to make sure that these guys don't get copied if we fork.
060df5ea 3037 err = vm_inherit(map, ref->mapped, ref->size, VM_INHERIT_NONE);
0b4e3aa0
A
3038 assert( KERN_SUCCESS == err );
3039 }
2d21ac55
A
3040 }
3041 while( false );
0b4e3aa0
A
3042
3043 return( err );
3044}
3045
2d21ac55 3046kern_return_t
060df5ea 3047IOMemoryDescriptorMapMemEntry(vm_map_t * map, ipc_port_t entry, IOOptionBits options, bool pageable,
2d21ac55
A
3048 mach_vm_size_t offset,
3049 mach_vm_address_t * address, mach_vm_size_t length)
3050{
3051 IOReturn err;
3052 IOMemoryDescriptorMapAllocRef ref;
3053
060df5ea 3054 ref.map = *map;
b0d623f7 3055 ref.sharedMem = entry;
cf7d32b8 3056 ref.sourceOffset = trunc_page_64(offset);
b0d623f7
A
3057 ref.options = options;
3058 ref.size = length;
2d21ac55
A
3059
3060 if (options & kIOMapAnywhere)
3061 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
3062 ref.mapped = 0;
3063 else
3064 ref.mapped = *address;
3065
060df5ea 3066 if( ref.sharedMem && (ref.map == kernel_map) && pageable)
2d21ac55
A
3067 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
3068 else
060df5ea 3069 err = IOMemoryDescriptorMapAlloc( ref.map, &ref );
2d21ac55
A
3070
3071 *address = ref.mapped;
060df5ea
A
3072 *map = ref.map;
3073
2d21ac55
A
3074 return (err);
3075}
3076
b0d623f7 3077kern_return_t
060df5ea 3078IOMemoryDescriptorMapCopy(vm_map_t * map,
b0d623f7
A
3079 IOOptionBits options,
3080 mach_vm_size_t offset,
3081 mach_vm_address_t * address, mach_vm_size_t length)
3082{
3083 IOReturn err;
3084 IOMemoryDescriptorMapAllocRef ref;
3085
060df5ea 3086 ref.map = *map;
b0d623f7 3087 ref.sharedMem = NULL;
b0d623f7
A
3088 ref.sourceOffset = trunc_page_64(offset);
3089 ref.options = options;
3090 ref.size = length;
3091
3092 if (options & kIOMapAnywhere)
3093 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
3094 ref.mapped = 0;
3095 else
3096 ref.mapped = *address;
3097
060df5ea 3098 if (ref.map == kernel_map)
b0d623f7
A
3099 err = IOIteratePageableMaps(ref.size, &IOMemoryDescriptorMapAlloc, &ref);
3100 else
060df5ea 3101 err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
b0d623f7
A
3102
3103 *address = ref.mapped;
060df5ea
A
3104 *map = ref.map;
3105
b0d623f7
A
3106 return (err);
3107}
9bccf70c 3108
1c79356b 3109IOReturn IOMemoryDescriptor::doMap(
2d21ac55
A
3110 vm_map_t __addressMap,
3111 IOVirtualAddress * __address,
1c79356b 3112 IOOptionBits options,
2d21ac55
A
3113 IOByteCount __offset,
3114 IOByteCount __length )
1c79356b 3115{
b0d623f7 3116#ifndef __LP64__
2d21ac55 3117 if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::doMap !64bit");
b0d623f7 3118#endif /* !__LP64__ */
1c79356b 3119
b0d623f7 3120 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
2d21ac55
A
3121 mach_vm_size_t offset = mapping->fOffset + __offset;
3122 mach_vm_size_t length = mapping->fLength;
1c79356b 3123
2d21ac55
A
3124 IOReturn err = kIOReturnSuccess;
3125 memory_object_t pager;
3126 mach_vm_size_t pageOffset;
3127 IOPhysicalAddress sourceAddr;
b0d623f7 3128 unsigned int lock_count;
1c79356b 3129
2d21ac55
A
3130 do
3131 {
b0d623f7
A
3132 sourceAddr = getPhysicalSegment( offset, NULL, _kIOMemorySourceSegment );
3133 pageOffset = sourceAddr - trunc_page( sourceAddr );
1c79356b 3134
2d21ac55 3135 if( reserved)
316670eb 3136 pager = (memory_object_t) reserved->dp.devicePager;
2d21ac55
A
3137 else
3138 pager = MACH_PORT_NULL;
0b4e3aa0 3139
91447636
A
3140 if ((kIOMapReference|kIOMapUnique) == ((kIOMapReference|kIOMapUnique) & options))
3141 {
2d21ac55
A
3142 upl_t redirUPL2;
3143 vm_size_t size;
3144 int flags;
0b4e3aa0 3145
91447636
A
3146 if (!_memEntry)
3147 {
3148 err = kIOReturnNotReadable;
3149 continue;
3150 }
3151
b0d623f7 3152 size = round_page(mapping->fLength + pageOffset);
91447636
A
3153 flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
3154 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
3155
3156 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) _memEntry, 0, &size, &redirUPL2,
3157 NULL, NULL,
3158 &flags))
3159 redirUPL2 = NULL;
3160
b0d623f7
A
3161 for (lock_count = 0;
3162 IORecursiveLockHaveLock(gIOMemoryLock);
3163 lock_count++) {
3164 UNLOCK;
3165 }
2d21ac55 3166 err = upl_transpose(redirUPL2, mapping->fRedirUPL);
b0d623f7
A
3167 for (;
3168 lock_count;
3169 lock_count--) {
3170 LOCK;
3171 }
3172
91447636
A
3173 if (kIOReturnSuccess != err)
3174 {
3175 IOLog("upl_transpose(%x)\n", err);
3176 err = kIOReturnSuccess;
3177 }
3178
3179 if (redirUPL2)
3180 {
3181 upl_commit(redirUPL2, NULL, 0);
3182 upl_deallocate(redirUPL2);
3183 redirUPL2 = 0;
3184 }
3185 {
3186 // swap the memEntries since they now refer to different vm_objects
3187 void * me = _memEntry;
2d21ac55
A
3188 _memEntry = mapping->fMemory->_memEntry;
3189 mapping->fMemory->_memEntry = me;
91447636 3190 }
2d21ac55 3191 if (pager)
316670eb 3192 err = handleFault( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
91447636
A
3193 }
3194 else
3195 {
2d21ac55
A
3196 mach_vm_address_t address;
3197
3198 if (!(options & kIOMapAnywhere))
3199 {
3200 address = trunc_page_64(mapping->fAddress);
3201 if( (mapping->fAddress - address) != pageOffset)
3202 {
91447636
A
3203 err = kIOReturnVMError;
3204 continue;
3205 }
3206 }
0b4e3aa0 3207
060df5ea
A
3208 vm_map_t map = mapping->fAddressMap;
3209 err = IOMemoryDescriptorMapMemEntry(&map, (ipc_port_t) _memEntry,
2d21ac55
A
3210 options, (kIOMemoryBufferPageable & _flags),
3211 offset, &address, round_page_64(length + pageOffset));
3212 if( err != KERN_SUCCESS)
3213 continue;
0b4e3aa0 3214
2d21ac55
A
3215 if (!_memEntry || pager)
3216 {
3217 err = handleFault( pager, mapping->fAddressMap, address, offset, length, options );
3218 if (err != KERN_SUCCESS)
3219 doUnmap( mapping->fAddressMap, (IOVirtualAddress) mapping, 0 );
3220 }
0b4e3aa0 3221
b0d623f7 3222#if DEBUG
2d21ac55 3223 if (kIOLogMapping & gIOKitDebug)
316670eb
A
3224 IOLog("mapping(%x) desc %p @ %qx, map %p, address %qx, offset %qx, length %qx\n",
3225 err, this, (uint64_t)sourceAddr, mapping, address, offset, length);
2d21ac55 3226#endif
0b4e3aa0 3227
2d21ac55
A
3228 if (err == KERN_SUCCESS)
3229 mapping->fAddress = address + pageOffset;
3230 else
3231 mapping->fAddress = NULL;
3232 }
3233 }
3234 while( false );
0b4e3aa0 3235
2d21ac55 3236 return (err);
0b4e3aa0
A
3237}
3238
0b4e3aa0
A
3239IOReturn IOMemoryDescriptor::handleFault(
3240 void * _pager,
3241 vm_map_t addressMap,
2d21ac55
A
3242 mach_vm_address_t address,
3243 mach_vm_size_t sourceOffset,
3244 mach_vm_size_t length,
0b4e3aa0
A
3245 IOOptionBits options )
3246{
3247 IOReturn err = kIOReturnSuccess;
3248 memory_object_t pager = (memory_object_t) _pager;
2d21ac55
A
3249 mach_vm_size_t size;
3250 mach_vm_size_t bytes;
3251 mach_vm_size_t page;
3252 mach_vm_size_t pageOffset;
3253 mach_vm_size_t pagerOffset;
0b4e3aa0 3254 IOPhysicalLength segLen;
55e303ae 3255 addr64_t physAddr;
0b4e3aa0 3256
2d21ac55
A
3257 if( !addressMap)
3258 {
3259 if( kIOMemoryRedirected & _flags)
3260 {
b0d623f7 3261#if DEBUG
2d21ac55 3262 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
1c79356b 3263#endif
0b4e3aa0 3264 do {
9bccf70c 3265 SLEEP;
0b4e3aa0
A
3266 } while( kIOMemoryRedirected & _flags );
3267 }
1c79356b 3268
0b4e3aa0 3269 return( kIOReturnSuccess );
1c79356b
A
3270 }
3271
b0d623f7 3272 physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
0b4e3aa0 3273 assert( physAddr );
55e303ae
A
3274 pageOffset = physAddr - trunc_page_64( physAddr );
3275 pagerOffset = sourceOffset;
0b4e3aa0
A
3276
3277 size = length + pageOffset;
3278 physAddr -= pageOffset;
1c79356b
A
3279
3280 segLen += pageOffset;
0b4e3aa0 3281 bytes = size;
2d21ac55
A
3282 do
3283 {
1c79356b
A
3284 // in the middle of the loop only map whole pages
3285 if( segLen >= bytes)
3286 segLen = bytes;
b0d623f7 3287 else if( segLen != trunc_page( segLen))
1c79356b 3288 err = kIOReturnVMError;
55e303ae 3289 if( physAddr != trunc_page_64( physAddr))
1c79356b 3290 err = kIOReturnBadArgument;
8f6c56a5
A
3291 if (kIOReturnSuccess != err)
3292 break;
1c79356b 3293
b0d623f7 3294#if DEBUG
1c79356b 3295 if( kIOLogMapping & gIOKitDebug)
b0d623f7 3296 IOLog("IOMemoryMap::map(%p) 0x%qx->0x%qx:0x%qx\n",
0b4e3aa0 3297 addressMap, address + pageOffset, physAddr + pageOffset,
1c79356b
A
3298 segLen - pageOffset);
3299#endif
3300
2d21ac55 3301
0b4e3aa0 3302 if( pager) {
316670eb 3303 if( reserved && reserved->dp.pagerContig) {
0b4e3aa0 3304 IOPhysicalLength allLen;
55e303ae 3305 addr64_t allPhys;
0b4e3aa0 3306
b0d623f7 3307 allPhys = getPhysicalSegment( 0, &allLen, kIOMemoryMapperNone );
0b4e3aa0 3308 assert( allPhys );
b0d623f7 3309 err = device_pager_populate_object( pager, 0, atop_64(allPhys), round_page(allLen) );
2d21ac55
A
3310 }
3311 else
3312 {
0b4e3aa0 3313
2d21ac55 3314 for( page = 0;
0b4e3aa0 3315 (page < segLen) && (KERN_SUCCESS == err);
2d21ac55
A
3316 page += page_size)
3317 {
3318 err = device_pager_populate_object(pager, pagerOffset,
3319 (ppnum_t)(atop_64(physAddr + page)), page_size);
3320 pagerOffset += page_size;
0b4e3aa0
A
3321 }
3322 }
3323 assert( KERN_SUCCESS == err );
3324 if( err)
3325 break;
3326 }
0c530ab8 3327
2d21ac55
A
3328 // This call to vm_fault causes an early pmap level resolution
3329 // of the mappings created above for kernel mappings, since
3330 // faulting in later can't take place from interrupt level.
9bccf70c
A
3331 /* *** ALERT *** */
3332 /* *** Temporary Workaround *** */
3333
2d21ac55
A
3334 if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags))
3335 {
91447636
A
3336 vm_fault(addressMap,
3337 (vm_map_offset_t)address,
3338 VM_PROT_READ|VM_PROT_WRITE,
3339 FALSE, THREAD_UNINT, NULL,
3340 (vm_map_offset_t)0);
9bccf70c
A
3341 }
3342
3343 /* *** Temporary Workaround *** */
3344 /* *** ALERT *** */
0c530ab8 3345
1c79356b 3346 sourceOffset += segLen - pageOffset;
0b4e3aa0 3347 address += segLen;
1c79356b
A
3348 bytes -= segLen;
3349 pageOffset = 0;
3350
2d21ac55 3351 }
b0d623f7 3352 while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
1c79356b 3353
2d21ac55 3354 if (bytes)
1c79356b 3355 err = kIOReturnBadArgument;
1c79356b 3356
2d21ac55 3357 return (err);
1c79356b
A
3358}
3359
3360IOReturn IOMemoryDescriptor::doUnmap(
3361 vm_map_t addressMap,
2d21ac55
A
3362 IOVirtualAddress __address,
3363 IOByteCount __length )
1c79356b 3364{
2d21ac55
A
3365 IOReturn err;
3366 mach_vm_address_t address;
3367 mach_vm_size_t length;
3368
3369 if (__length)
3370 {
3371 address = __address;
3372 length = __length;
3373 }
3374 else
3375 {
b0d623f7
A
3376 addressMap = ((IOMemoryMap *) __address)->fAddressMap;
3377 address = ((IOMemoryMap *) __address)->fAddress;
3378 length = ((IOMemoryMap *) __address)->fLength;
2d21ac55
A
3379 }
3380
7e4a7d39
A
3381 if ((addressMap == kernel_map)
3382 && ((kIOMemoryBufferPageable & _flags) || !_memEntry))
2d21ac55 3383 addressMap = IOPageableMapForAddress( address );
1c79356b 3384
b0d623f7 3385#if DEBUG
1c79356b 3386 if( kIOLogMapping & gIOKitDebug)
2d21ac55
A
3387 IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
3388 addressMap, address, length );
1c79356b
A
3389#endif
3390
2d21ac55 3391 err = mach_vm_deallocate( addressMap, address, length );
1c79356b 3392
2d21ac55 3393 return (err);
1c79356b
A
3394}
3395
91447636 3396IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
e3027f41 3397{
91447636 3398 IOReturn err = kIOReturnSuccess;
b0d623f7 3399 IOMemoryMap * mapping = 0;
e3027f41
A
3400 OSIterator * iter;
3401
3402 LOCK;
3403
91447636
A
3404 if( doRedirect)
3405 _flags |= kIOMemoryRedirected;
3406 else
3407 _flags &= ~kIOMemoryRedirected;
3408
e3027f41
A
3409 do {
3410 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
39236c6e
A
3411
3412 memory_object_t pager;
3413
3414 if( reserved)
3415 pager = (memory_object_t) reserved->dp.devicePager;
3416 else
3417 pager = MACH_PORT_NULL;
3418
b0d623f7 3419 while( (mapping = (IOMemoryMap *) iter->getNextObject()))
39236c6e 3420 {
91447636 3421 mapping->redirect( safeTask, doRedirect );
39236c6e
A
3422 if (!doRedirect && !safeTask && pager && (kernel_map == mapping->fAddressMap))
3423 {
3424 err = handleFault( pager, mapping->fAddressMap, mapping->fAddress, mapping->fOffset, mapping->fLength, kIOMapDefaultCache );
3425 }
3426 }
e3027f41 3427
91447636
A
3428 iter->release();
3429 }
e3027f41
A
3430 } while( false );
3431
91447636
A
3432 if (!doRedirect)
3433 {
9bccf70c 3434 WAKEUP;
0b4e3aa0
A
3435 }
3436
e3027f41
A
3437 UNLOCK;
3438
b0d623f7 3439#ifndef __LP64__
e3027f41
A
3440 // temporary binary compatibility
3441 IOSubMemoryDescriptor * subMem;
3442 if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
91447636 3443 err = subMem->redirect( safeTask, doRedirect );
e3027f41 3444 else
91447636 3445 err = kIOReturnSuccess;
b0d623f7 3446#endif /* !__LP64__ */
e3027f41
A
3447
3448 return( err );
3449}
3450
b0d623f7 3451IOReturn IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
e3027f41
A
3452{
3453 IOReturn err = kIOReturnSuccess;
3454
2d21ac55 3455 if( fSuperMap) {
b0d623f7 3456// err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
e3027f41
A
3457 } else {
3458
3459 LOCK;
0c530ab8
A
3460
3461 do
91447636 3462 {
2d21ac55 3463 if (!fAddress)
0c530ab8 3464 break;
2d21ac55 3465 if (!fAddressMap)
0c530ab8
A
3466 break;
3467
2d21ac55
A
3468 if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
3469 && (0 == (fOptions & kIOMapStatic)))
0c530ab8 3470 {
2d21ac55 3471 IOUnmapPages( fAddressMap, fAddress, fLength );
b0d623f7
A
3472 err = kIOReturnSuccess;
3473#if DEBUG
2d21ac55 3474 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
e3027f41 3475#endif
0c530ab8 3476 }
2d21ac55 3477 else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask))
0c530ab8
A
3478 {
3479 IOOptionBits newMode;
2d21ac55
A
3480 newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
3481 IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
0c530ab8
A
3482 }
3483 }
3484 while (false);
0c530ab8 3485 UNLOCK;
e3027f41
A
3486 }
3487
2d21ac55
A
3488 if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3489 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
91447636 3490 && safeTask
2d21ac55
A
3491 && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected))))
3492 fMemory->redirect(safeTask, doRedirect);
91447636 3493
e3027f41
A
3494 return( err );
3495}
3496
b0d623f7 3497IOReturn IOMemoryMap::unmap( void )
1c79356b
A
3498{
3499 IOReturn err;
3500
3501 LOCK;
3502
2d21ac55
A
3503 if( fAddress && fAddressMap && (0 == fSuperMap) && fMemory
3504 && (0 == (fOptions & kIOMapStatic))) {
1c79356b 3505
39236c6e
A
3506 vm_map_iokit_unmapped_region(fAddressMap, fLength);
3507
2d21ac55 3508 err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
1c79356b
A
3509
3510 } else
3511 err = kIOReturnSuccess;
3512
2d21ac55
A
3513 if (fAddressMap)
3514 {
3515 vm_map_deallocate(fAddressMap);
3516 fAddressMap = 0;
3517 }
3518
3519 fAddress = 0;
1c79356b
A
3520
3521 UNLOCK;
3522
3523 return( err );
3524}
3525
b0d623f7 3526void IOMemoryMap::taskDied( void )
1c79356b
A
3527{
3528 LOCK;
b0d623f7
A
3529 if (fUserClientUnmap)
3530 unmap();
2d21ac55
A
3531 if( fAddressMap) {
3532 vm_map_deallocate(fAddressMap);
3533 fAddressMap = 0;
1c79356b 3534 }
2d21ac55
A
3535 fAddressTask = 0;
3536 fAddress = 0;
1c79356b
A
3537 UNLOCK;
3538}
3539
b0d623f7
A
3540IOReturn IOMemoryMap::userClientUnmap( void )
3541{
3542 fUserClientUnmap = true;
3543 return (kIOReturnSuccess);
3544}
3545
9bccf70c
A
3546// Overload the release mechanism. All mappings must be a member
3547// of a memory descriptors _mappings set. This means that we
3548// always have 2 references on a mapping. When either of these mappings
3549// are released we need to free ourselves.
b0d623f7 3550void IOMemoryMap::taggedRelease(const void *tag) const
9bccf70c 3551{
55e303ae 3552 LOCK;
9bccf70c 3553 super::taggedRelease(tag, 2);
55e303ae 3554 UNLOCK;
9bccf70c
A
3555}
3556
b0d623f7 3557void IOMemoryMap::free()
1c79356b
A
3558{
3559 unmap();
3560
2d21ac55
A
3561 if (fMemory)
3562 {
1c79356b 3563 LOCK;
2d21ac55 3564 fMemory->removeMapping(this);
1c79356b 3565 UNLOCK;
2d21ac55 3566 fMemory->release();
1c79356b
A
3567 }
3568
2d21ac55 3569 if (fOwner && (fOwner != fMemory))
91447636
A
3570 {
3571 LOCK;
2d21ac55 3572 fOwner->removeMapping(this);
91447636
A
3573 UNLOCK;
3574 }
3575
2d21ac55
A
3576 if (fSuperMap)
3577 fSuperMap->release();
1c79356b 3578
2d21ac55
A
3579 if (fRedirUPL) {
3580 upl_commit(fRedirUPL, NULL, 0);
3581 upl_deallocate(fRedirUPL);
91447636
A
3582 }
3583
1c79356b
A
3584 super::free();
3585}
3586
b0d623f7 3587IOByteCount IOMemoryMap::getLength()
1c79356b 3588{
2d21ac55 3589 return( fLength );
1c79356b
A
3590}
3591
b0d623f7 3592IOVirtualAddress IOMemoryMap::getVirtualAddress()
1c79356b 3593{
b0d623f7 3594#ifndef __LP64__
2d21ac55
A
3595 if (fSuperMap)
3596 fSuperMap->getVirtualAddress();
b0d623f7
A
3597 else if (fAddressMap
3598 && vm_map_is_64bit(fAddressMap)
3599 && (sizeof(IOVirtualAddress) < 8))
2d21ac55
A
3600 {
3601 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
3602 }
b0d623f7 3603#endif /* !__LP64__ */
2d21ac55
A
3604
3605 return (fAddress);
3606}
3607
b0d623f7
A
3608#ifndef __LP64__
3609mach_vm_address_t IOMemoryMap::getAddress()
2d21ac55
A
3610{
3611 return( fAddress);
3612}
3613
b0d623f7 3614mach_vm_size_t IOMemoryMap::getSize()
2d21ac55
A
3615{
3616 return( fLength );
1c79356b 3617}
b0d623f7 3618#endif /* !__LP64__ */
1c79356b 3619
2d21ac55 3620
b0d623f7 3621task_t IOMemoryMap::getAddressTask()
1c79356b 3622{
2d21ac55
A
3623 if( fSuperMap)
3624 return( fSuperMap->getAddressTask());
1c79356b 3625 else
2d21ac55 3626 return( fAddressTask);
1c79356b
A
3627}
3628
b0d623f7 3629IOOptionBits IOMemoryMap::getMapOptions()
1c79356b 3630{
2d21ac55 3631 return( fOptions);
1c79356b
A
3632}
3633
b0d623f7 3634IOMemoryDescriptor * IOMemoryMap::getMemoryDescriptor()
1c79356b 3635{
2d21ac55 3636 return( fMemory );
1c79356b
A
3637}
3638
b0d623f7
A
3639IOMemoryMap * IOMemoryMap::copyCompatible(
3640 IOMemoryMap * newMapping )
1c79356b 3641{
2d21ac55
A
3642 task_t task = newMapping->getAddressTask();
3643 mach_vm_address_t toAddress = newMapping->fAddress;
3644 IOOptionBits _options = newMapping->fOptions;
3645 mach_vm_size_t _offset = newMapping->fOffset;
3646 mach_vm_size_t _length = newMapping->fLength;
1c79356b 3647
2d21ac55 3648 if( (!task) || (!fAddressMap) || (fAddressMap != get_task_map(task)))
1c79356b 3649 return( 0 );
2d21ac55 3650 if( (fOptions ^ _options) & kIOMapReadOnly)
9bccf70c
A
3651 return( 0 );
3652 if( (kIOMapDefaultCache != (_options & kIOMapCacheMask))
2d21ac55 3653 && ((fOptions ^ _options) & kIOMapCacheMask))
1c79356b
A
3654 return( 0 );
3655
2d21ac55 3656 if( (0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress))
1c79356b
A
3657 return( 0 );
3658
2d21ac55 3659 if( _offset < fOffset)
1c79356b
A
3660 return( 0 );
3661
2d21ac55 3662 _offset -= fOffset;
1c79356b 3663
2d21ac55 3664 if( (_offset + _length) > fLength)
1c79356b
A
3665 return( 0 );
3666
2d21ac55
A
3667 retain();
3668 if( (fLength == _length) && (!_offset))
3669 {
2d21ac55
A
3670 newMapping = this;
3671 }
3672 else
3673 {
3674 newMapping->fSuperMap = this;
6d2010ae 3675 newMapping->fOffset = fOffset + _offset;
2d21ac55 3676 newMapping->fAddress = fAddress + _offset;
1c79356b
A
3677 }
3678
2d21ac55 3679 return( newMapping );
1c79356b
A
3680}
3681
99c3a104
A
3682IOReturn IOMemoryMap::wireRange(
3683 uint32_t options,
3684 mach_vm_size_t offset,
3685 mach_vm_size_t length)
3686{
3687 IOReturn kr;
3688 mach_vm_address_t start = trunc_page_64(fAddress + offset);
3689 mach_vm_address_t end = round_page_64(fAddress + offset + length);
3690
3691 if (kIODirectionOutIn & options)
3692 {
3693 kr = vm_map_wire(fAddressMap, start, end, (kIODirectionOutIn & options), FALSE);
3694 }
3695 else
3696 {
3697 kr = vm_map_unwire(fAddressMap, start, end, FALSE);
3698 }
3699
3700 return (kr);
3701}
3702
3703
0c530ab8 3704IOPhysicalAddress
b0d623f7
A
3705#ifdef __LP64__
3706IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
3707#else /* !__LP64__ */
3708IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
3709#endif /* !__LP64__ */
1c79356b
A
3710{
3711 IOPhysicalAddress address;
3712
3713 LOCK;
b0d623f7
A
3714#ifdef __LP64__
3715 address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
3716#else /* !__LP64__ */
2d21ac55 3717 address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
b0d623f7 3718#endif /* !__LP64__ */
1c79356b
A
3719 UNLOCK;
3720
3721 return( address );
3722}
3723
3724/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3725
3726#undef super
3727#define super OSObject
3728
3729/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3730
3731void IOMemoryDescriptor::initialize( void )
3732{
3733 if( 0 == gIOMemoryLock)
3734 gIOMemoryLock = IORecursiveLockAlloc();
55e303ae 3735
0c530ab8 3736 gIOLastPage = IOGetLastPageNumber();
1c79356b
A
3737}
3738
3739void IOMemoryDescriptor::free( void )
3740{
3741 if( _mappings)
3742 _mappings->release();
3743
3744 super::free();
3745}
3746
3747IOMemoryMap * IOMemoryDescriptor::setMapping(
3748 task_t intoTask,
3749 IOVirtualAddress mapAddress,
55e303ae 3750 IOOptionBits options )
1c79356b 3751{
2d21ac55
A
3752 return (createMappingInTask( intoTask, mapAddress,
3753 options | kIOMapStatic,
3754 0, getLength() ));
1c79356b
A
3755}
3756
3757IOMemoryMap * IOMemoryDescriptor::map(
55e303ae 3758 IOOptionBits options )
1c79356b 3759{
2d21ac55
A
3760 return (createMappingInTask( kernel_task, 0,
3761 options | kIOMapAnywhere,
3762 0, getLength() ));
1c79356b
A
3763}
3764
b0d623f7 3765#ifndef __LP64__
2d21ac55
A
3766IOMemoryMap * IOMemoryDescriptor::map(
3767 task_t intoTask,
3768 IOVirtualAddress atAddress,
1c79356b 3769 IOOptionBits options,
55e303ae
A
3770 IOByteCount offset,
3771 IOByteCount length )
1c79356b 3772{
2d21ac55
A
3773 if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask)))
3774 {
3775 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
3776 return (0);
3777 }
3778
3779 return (createMappingInTask(intoTask, atAddress,
3780 options, offset, length));
3781}
b0d623f7 3782#endif /* !__LP64__ */
2d21ac55
A
3783
3784IOMemoryMap * IOMemoryDescriptor::createMappingInTask(
3785 task_t intoTask,
3786 mach_vm_address_t atAddress,
3787 IOOptionBits options,
3788 mach_vm_size_t offset,
3789 mach_vm_size_t length)
3790{
b0d623f7
A
3791 IOMemoryMap * result;
3792 IOMemoryMap * mapping;
2d21ac55
A
3793
3794 if (0 == length)
1c79356b
A
3795 length = getLength();
3796
b0d623f7 3797 mapping = new IOMemoryMap;
2d21ac55
A
3798
3799 if( mapping
3800 && !mapping->init( intoTask, atAddress,
3801 options, offset, length )) {
3802 mapping->release();
3803 mapping = 0;
3804 }
3805
3806 if (mapping)
3807 result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
3808 else
3809 result = 0;
3810
b0d623f7 3811#if DEBUG
2d21ac55 3812 if (!result)
316670eb
A
3813 IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
3814 this, atAddress, (uint32_t) options, offset, length);
2d21ac55
A
3815#endif
3816
3817 return (result);
1c79356b
A
3818}
3819
b0d623f7
A
3820#ifndef __LP64__ // there is only a 64 bit version for LP64
3821IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
91447636
A
3822 IOOptionBits options,
3823 IOByteCount offset)
2d21ac55
A
3824{
3825 return (redirect(newBackingMemory, options, (mach_vm_size_t)offset));
3826}
b0d623f7 3827#endif
2d21ac55 3828
b0d623f7 3829IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
2d21ac55
A
3830 IOOptionBits options,
3831 mach_vm_size_t offset)
91447636
A
3832{
3833 IOReturn err = kIOReturnSuccess;
3834 IOMemoryDescriptor * physMem = 0;
3835
3836 LOCK;
3837
2d21ac55 3838 if (fAddress && fAddressMap) do
91447636 3839 {
2d21ac55
A
3840 if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3841 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
91447636 3842 {
2d21ac55 3843 physMem = fMemory;
91447636
A
3844 physMem->retain();
3845 }
3846
2d21ac55 3847 if (!fRedirUPL)
91447636 3848 {
b0d623f7 3849 vm_size_t size = round_page(fLength);
91447636
A
3850 int flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
3851 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
2d21ac55 3852 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) fMemory->_memEntry, 0, &size, &fRedirUPL,
91447636
A
3853 NULL, NULL,
3854 &flags))
2d21ac55 3855 fRedirUPL = 0;
91447636
A
3856
3857 if (physMem)
3858 {
2d21ac55 3859 IOUnmapPages( fAddressMap, fAddress, fLength );
b0d623f7
A
3860 if (false)
3861 physMem->redirect(0, true);
91447636
A
3862 }
3863 }
3864
3865 if (newBackingMemory)
3866 {
2d21ac55 3867 if (newBackingMemory != fMemory)
91447636 3868 {
2d21ac55
A
3869 fOffset = 0;
3870 if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
3871 options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
3872 offset, fLength))
91447636
A
3873 err = kIOReturnError;
3874 }
2d21ac55 3875 if (fRedirUPL)
91447636 3876 {
2d21ac55
A
3877 upl_commit(fRedirUPL, NULL, 0);
3878 upl_deallocate(fRedirUPL);
3879 fRedirUPL = 0;
91447636 3880 }
b0d623f7 3881 if (false && physMem)
91447636
A
3882 physMem->redirect(0, false);
3883 }
3884 }
3885 while (false);
3886
3887 UNLOCK;
3888
3889 if (physMem)
3890 physMem->release();
3891
3892 return (err);
3893}
3894
1c79356b
A
3895IOMemoryMap * IOMemoryDescriptor::makeMapping(
3896 IOMemoryDescriptor * owner,
2d21ac55
A
3897 task_t __intoTask,
3898 IOVirtualAddress __address,
1c79356b 3899 IOOptionBits options,
2d21ac55
A
3900 IOByteCount __offset,
3901 IOByteCount __length )
1c79356b 3902{
b0d623f7 3903#ifndef __LP64__
2d21ac55 3904 if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::makeMapping !64bit");
b0d623f7 3905#endif /* !__LP64__ */
2d21ac55 3906
91447636 3907 IOMemoryDescriptor * mapDesc = 0;
b0d623f7 3908 IOMemoryMap * result = 0;
2d21ac55
A
3909 OSIterator * iter;
3910
b0d623f7 3911 IOMemoryMap * mapping = (IOMemoryMap *) __address;
2d21ac55
A
3912 mach_vm_size_t offset = mapping->fOffset + __offset;
3913 mach_vm_size_t length = mapping->fLength;
3914
3915 mapping->fOffset = offset;
1c79356b
A
3916
3917 LOCK;
3918
91447636
A
3919 do
3920 {
2d21ac55
A
3921 if (kIOMapStatic & options)
3922 {
3923 result = mapping;
3924 addMapping(mapping);
3925 mapping->setMemoryDescriptor(this, 0);
3926 continue;
3927 }
3928
91447636
A
3929 if (kIOMapUnique & options)
3930 {
060df5ea 3931 addr64_t phys;
91447636 3932 IOByteCount physLen;
1c79356b 3933
2d21ac55 3934// if (owner != this) continue;
1c79356b 3935
0c530ab8
A
3936 if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3937 || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
91447636 3938 {
b0d623f7 3939 phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
91447636
A
3940 if (!phys || (physLen < length))
3941 continue;
3942
b0d623f7
A
3943 mapDesc = IOMemoryDescriptor::withAddressRange(
3944 phys, length, getDirection() | kIOMemoryMapperNone, NULL);
91447636
A
3945 if (!mapDesc)
3946 continue;
3947 offset = 0;
2d21ac55 3948 mapping->fOffset = offset;
91447636
A
3949 }
3950 }
3951 else
3952 {
2d21ac55
A
3953 // look for a compatible existing mapping
3954 if( (iter = OSCollectionIterator::withCollection(_mappings)))
3955 {
b0d623f7
A
3956 IOMemoryMap * lookMapping;
3957 while ((lookMapping = (IOMemoryMap *) iter->getNextObject()))
2d21ac55
A
3958 {
3959 if ((result = lookMapping->copyCompatible(mapping)))
3960 {
3961 addMapping(result);
3962 result->setMemoryDescriptor(this, offset);
91447636 3963 break;
2d21ac55 3964 }
91447636
A
3965 }
3966 iter->release();
3967 }
2d21ac55 3968 if (result || (options & kIOMapReference))
6d2010ae
A
3969 {
3970 if (result != mapping)
3971 {
3972 mapping->release();
3973 mapping = NULL;
3974 }
91447636 3975 continue;
6d2010ae 3976 }
2d21ac55 3977 }
91447636 3978
2d21ac55
A
3979 if (!mapDesc)
3980 {
3981 mapDesc = this;
91447636
A
3982 mapDesc->retain();
3983 }
2d21ac55
A
3984 IOReturn
3985 kr = mapDesc->doMap( 0, (IOVirtualAddress *) &mapping, options, 0, 0 );
3986 if (kIOReturnSuccess == kr)
3987 {
39236c6e
A
3988 if (0 == (mapping->fOptions & kIOMapStatic)) {
3989 vm_map_iokit_mapped_region(mapping->fAddressMap, length);
3990 }
3991
2d21ac55
A
3992 result = mapping;
3993 mapDesc->addMapping(result);
3994 result->setMemoryDescriptor(mapDesc, offset);
3995 }
3996 else
3997 {
1c79356b 3998 mapping->release();
2d21ac55 3999 mapping = NULL;
1c79356b 4000 }
91447636 4001 }
2d21ac55 4002 while( false );
1c79356b
A
4003
4004 UNLOCK;
4005
91447636
A
4006 if (mapDesc)
4007 mapDesc->release();
4008
2d21ac55 4009 return (result);
1c79356b
A
4010}
4011
4012void IOMemoryDescriptor::addMapping(
4013 IOMemoryMap * mapping )
4014{
2d21ac55
A
4015 if( mapping)
4016 {
1c79356b
A
4017 if( 0 == _mappings)
4018 _mappings = OSSet::withCapacity(1);
9bccf70c
A
4019 if( _mappings )
4020 _mappings->setObject( mapping );
1c79356b
A
4021 }
4022}
4023
4024void IOMemoryDescriptor::removeMapping(
4025 IOMemoryMap * mapping )
4026{
9bccf70c 4027 if( _mappings)
1c79356b 4028 _mappings->removeObject( mapping);
1c79356b
A
4029}
4030
b0d623f7
A
4031#ifndef __LP64__
4032// obsolete initializers
4033// - initWithOptions is the designated initializer
1c79356b 4034bool
b0d623f7 4035IOMemoryDescriptor::initWithAddress(void * address,
55e303ae
A
4036 IOByteCount length,
4037 IODirection direction)
1c79356b
A
4038{
4039 return( false );
4040}
4041
4042bool
b0d623f7 4043IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
55e303ae
A
4044 IOByteCount length,
4045 IODirection direction,
4046 task_t task)
1c79356b
A
4047{
4048 return( false );
4049}
4050
4051bool
b0d623f7 4052IOMemoryDescriptor::initWithPhysicalAddress(
1c79356b 4053 IOPhysicalAddress address,
55e303ae
A
4054 IOByteCount length,
4055 IODirection direction )
1c79356b
A
4056{
4057 return( false );
4058}
4059
4060bool
b0d623f7 4061IOMemoryDescriptor::initWithRanges(
1c79356b
A
4062 IOVirtualRange * ranges,
4063 UInt32 withCount,
55e303ae
A
4064 IODirection direction,
4065 task_t task,
4066 bool asReference)
1c79356b
A
4067{
4068 return( false );
4069}
4070
4071bool
b0d623f7 4072IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
1c79356b 4073 UInt32 withCount,
55e303ae
A
4074 IODirection direction,
4075 bool asReference)
1c79356b
A
4076{
4077 return( false );
4078}
4079
b0d623f7
A
4080void * IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
4081 IOByteCount * lengthOfSegment)
4082{
4083 return( 0 );
4084}
4085#endif /* !__LP64__ */
4086
1c79356b
A
4087/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4088
9bccf70c
A
4089bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
4090{
4091 OSSymbol const *keys[2];
4092 OSObject *values[2];
91447636
A
4093 struct SerData {
4094 user_addr_t address;
4095 user_size_t length;
4096 } *vcopy;
9bccf70c
A
4097 unsigned int index, nRanges;
4098 bool result;
4099
91447636
A
4100 IOOptionBits type = _flags & kIOMemoryTypeMask;
4101
9bccf70c
A
4102 if (s == NULL) return false;
4103 if (s->previouslySerialized(this)) return true;
4104
4105 // Pretend we are an array.
4106 if (!s->addXMLStartTag(this, "array")) return false;
4107
4108 nRanges = _rangesCount;
91447636 4109 vcopy = (SerData *) IOMalloc(sizeof(SerData) * nRanges);
9bccf70c
A
4110 if (vcopy == 0) return false;
4111
4112 keys[0] = OSSymbol::withCString("address");
4113 keys[1] = OSSymbol::withCString("length");
4114
4115 result = false;
4116 values[0] = values[1] = 0;
4117
4118 // From this point on we can go to bail.
4119
4120 // Copy the volatile data so we don't have to allocate memory
4121 // while the lock is held.
4122 LOCK;
4123 if (nRanges == _rangesCount) {
91447636 4124 Ranges vec = _ranges;
9bccf70c 4125 for (index = 0; index < nRanges; index++) {
91447636
A
4126 user_addr_t addr; IOByteCount len;
4127 getAddrLenForInd(addr, len, type, vec, index);
4128 vcopy[index].address = addr;
4129 vcopy[index].length = len;
9bccf70c
A
4130 }
4131 } else {
4132 // The descriptor changed out from under us. Give up.
4133 UNLOCK;
4134 result = false;
4135 goto bail;
4136 }
4137 UNLOCK;
4138
4139 for (index = 0; index < nRanges; index++)
4140 {
91447636
A
4141 user_addr_t addr = vcopy[index].address;
4142 IOByteCount len = (IOByteCount) vcopy[index].length;
4143 values[0] =
060df5ea 4144 OSNumber::withNumber(addr, sizeof(addr) * 8);
9bccf70c
A
4145 if (values[0] == 0) {
4146 result = false;
4147 goto bail;
4148 }
91447636 4149 values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
9bccf70c
A
4150 if (values[1] == 0) {
4151 result = false;
4152 goto bail;
4153 }
4154 OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
4155 if (dict == 0) {
4156 result = false;
4157 goto bail;
4158 }
4159 values[0]->release();
4160 values[1]->release();
4161 values[0] = values[1] = 0;
4162
4163 result = dict->serialize(s);
4164 dict->release();
4165 if (!result) {
4166 goto bail;
4167 }
4168 }
4169 result = s->addXMLEndTag("array");
4170
4171 bail:
4172 if (values[0])
4173 values[0]->release();
4174 if (values[1])
4175 values[1]->release();
4176 if (keys[0])
4177 keys[0]->release();
4178 if (keys[1])
4179 keys[1]->release();
4180 if (vcopy)
2d21ac55 4181 IOFree(vcopy, sizeof(SerData) * nRanges);
9bccf70c
A
4182 return result;
4183}
4184
9bccf70c
A
4185/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
4186
0b4e3aa0 4187OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
b0d623f7
A
4188#ifdef __LP64__
4189OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
4190OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
4191OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
4192OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
4193OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
4194OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
4195OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
4196#else /* !__LP64__ */
55e303ae
A
4197OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
4198OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
91447636
A
4199OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3);
4200OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4);
0c530ab8 4201OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5);
b0d623f7
A
4202OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 6);
4203OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 7);
4204#endif /* !__LP64__ */
1c79356b
A
4205OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
4206OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
4207OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
4208OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
4209OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
4210OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
4211OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
4212OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
9bccf70c 4213
55e303ae 4214/* ex-inline function implementation */
0c530ab8
A
4215IOPhysicalAddress
4216IOMemoryDescriptor::getPhysicalAddress()
9bccf70c 4217 { return( getPhysicalSegment( 0, 0 )); }