]> git.saurik.com Git - apple/xnu.git/blame - iokit/Kernel/IOMemoryDescriptor.cpp
xnu-2050.7.9.tar.gz
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
30 *
31 * HISTORY
32 *
33 */
b0d623f7
A
34
35
55e303ae 36#include <sys/cdefs.h>
1c79356b
A
37
38#include <IOKit/assert.h>
39#include <IOKit/system.h>
40#include <IOKit/IOLib.h>
41#include <IOKit/IOMemoryDescriptor.h>
55e303ae
A
42#include <IOKit/IOMapper.h>
43#include <IOKit/IOKitKeysPrivate.h>
1c79356b 44
b0d623f7
A
45#ifndef __LP64__
46#include <IOKit/IOSubMemoryDescriptor.h>
47#endif /* !__LP64__ */
48
1c79356b 49#include <IOKit/IOKitDebug.h>
2d21ac55 50#include <libkern/OSDebug.h>
1c79356b 51
91447636
A
52#include "IOKitKernelInternal.h"
53
1c79356b 54#include <libkern/c++/OSContainers.h>
9bccf70c
A
55#include <libkern/c++/OSDictionary.h>
56#include <libkern/c++/OSArray.h>
57#include <libkern/c++/OSSymbol.h>
58#include <libkern/c++/OSNumber.h>
91447636
A
59
60#include <sys/uio.h>
1c79356b
A
61
62__BEGIN_DECLS
63#include <vm/pmap.h>
91447636 64#include <vm/vm_pageout.h>
55e303ae 65#include <mach/memory_object_types.h>
0b4e3aa0 66#include <device/device_port.h>
55e303ae 67
91447636 68#include <mach/vm_prot.h>
2d21ac55 69#include <mach/mach_vm.h>
91447636 70#include <vm/vm_fault.h>
2d21ac55 71#include <vm/vm_protos.h>
91447636 72
55e303ae 73extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
6d2010ae
A
74extern void ipc_port_release_send(ipc_port_t port);
75
55e303ae
A
76kern_return_t
77memory_object_iopl_request(
78 ipc_port_t port,
79 memory_object_offset_t offset,
80 vm_size_t *upl_size,
81 upl_t *upl_ptr,
82 upl_page_info_array_t user_page_list,
83 unsigned int *page_list_count,
84 int *flags);
0b4e3aa0 85
55e303ae 86unsigned int IOTranslateCacheBits(struct phys_entry *pp);
1c79356b 87
55e303ae 88__END_DECLS
1c79356b 89
55e303ae 90#define kIOMaximumMappedIOByteCount (512*1024*1024)
1c79356b 91
0c530ab8
A
92static IOMapper * gIOSystemMapper = NULL;
93
55e303ae 94static ppnum_t gIOMaximumMappedIOPageCount = atop_32(kIOMaximumMappedIOByteCount);
de355530 95
0c530ab8
A
96ppnum_t gIOLastPage;
97
55e303ae 98/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
de355530 99
55e303ae 100OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
de355530 101
55e303ae 102#define super IOMemoryDescriptor
de355530 103
55e303ae 104OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
de355530 105
1c79356b
A
106/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
107
9bccf70c
A
108static IORecursiveLock * gIOMemoryLock;
109
110#define LOCK IORecursiveLockLock( gIOMemoryLock)
111#define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
112#define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
113#define WAKEUP \
114 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
115
0c530ab8
A
116#if 0
117#define DEBG(fmt, args...) { kprintf(fmt, ## args); }
118#else
119#define DEBG(fmt, args...) {}
120#endif
121
b0d623f7 122#define IOMD_DEBUG_DMAACTIVE 1
91447636
A
123
124/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
125
126// Some data structures and accessor macros used by the initWithOptions
127// Function
128
129enum ioPLBlockFlags {
130 kIOPLOnDevice = 0x00000001,
131 kIOPLExternUPL = 0x00000002,
132};
133
134struct typePersMDData
135{
136 const IOGeneralMemoryDescriptor *fMD;
137 ipc_port_t fMemEntry;
138};
139
140struct ioPLBlock {
141 upl_t fIOPL;
b0d623f7
A
142 vm_address_t fPageInfo; // Pointer to page list or index into it
143 uint32_t fIOMDOffset; // The offset of this iopl in descriptor
144 ppnum_t fMappedBase; // Page number of first page in this iopl
145 unsigned int fPageOffset; // Offset within first page of iopl
146 unsigned int fFlags; // Flags
91447636
A
147};
148
149struct ioGMDData {
150 IOMapper *fMapper;
b0d623f7 151 uint64_t fPreparationID;
91447636 152 unsigned int fPageCnt;
b0d623f7
A
153#if __LP64__
154 // align arrays to 8 bytes so following macros work
155 unsigned int fPad;
156#endif
6d2010ae
A
157 upl_page_info_t fPageList[1]; /* variable length */
158 ioPLBlock fBlocks[1]; /* variable length */
91447636
A
159};
160
161#define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
162#define getIOPLList(d) ((ioPLBlock *) &(d->fPageList[d->fPageCnt]))
163#define getNumIOPL(osd, d) \
164 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
165#define getPageList(d) (&(d->fPageList[0]))
166#define computeDataSize(p, u) \
6d2010ae 167 (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
91447636
A
168
169
170/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
171
b0d623f7 172#define next_page(a) ( trunc_page(a) + PAGE_SIZE )
0b4e3aa0
A
173
174
175extern "C" {
176
177kern_return_t device_data_action(
b0d623f7 178 uintptr_t device_handle,
0b4e3aa0
A
179 ipc_port_t device_pager,
180 vm_prot_t protection,
181 vm_object_offset_t offset,
182 vm_size_t size)
183{
9bccf70c 184 kern_return_t kr;
316670eb 185 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
9bccf70c 186 IOMemoryDescriptor * memDesc;
0b4e3aa0 187
9bccf70c 188 LOCK;
316670eb 189 memDesc = ref->dp.memory;
9bccf70c 190 if( memDesc)
91447636
A
191 {
192 memDesc->retain();
9bccf70c
A
193 kr = memDesc->handleFault( device_pager, 0, 0,
194 offset, size, kIOMapDefaultCache /*?*/);
91447636
A
195 memDesc->release();
196 }
9bccf70c
A
197 else
198 kr = KERN_ABORTED;
199 UNLOCK;
0b4e3aa0 200
9bccf70c 201 return( kr );
0b4e3aa0
A
202}
203
204kern_return_t device_close(
b0d623f7 205 uintptr_t device_handle)
0b4e3aa0 206{
316670eb 207 IOMemoryDescriptorReserved * ref = (IOMemoryDescriptorReserved *) device_handle;
0b4e3aa0 208
316670eb 209 IODelete( ref, IOMemoryDescriptorReserved, 1 );
0b4e3aa0
A
210
211 return( kIOReturnSuccess );
212}
91447636 213}; // end extern "C"
0b4e3aa0 214
91447636
A
215// Note this inline function uses C++ reference arguments to return values
216// This means that pointers are not passed and NULLs don't have to be
217// checked for as a NULL reference is illegal.
218static inline void
2d21ac55 219getAddrLenForInd(user_addr_t &addr, IOPhysicalLength &len, // Output variables
91447636
A
220 UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
221{
0c530ab8
A
222 assert(kIOMemoryTypeUIO == type
223 || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
224 || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
91447636
A
225 if (kIOMemoryTypeUIO == type) {
226 user_size_t us;
227 uio_getiov((uio_t) r.uio, ind, &addr, &us); len = us;
228 }
b0d623f7 229#ifndef __LP64__
0c530ab8
A
230 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
231 IOAddressRange cur = r.v64[ind];
232 addr = cur.address;
233 len = cur.length;
234 }
b0d623f7 235#endif /* !__LP64__ */
91447636
A
236 else {
237 IOVirtualRange cur = r.v[ind];
238 addr = cur.address;
239 len = cur.length;
240 }
0b4e3aa0
A
241}
242
1c79356b
A
243/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
244
1c79356b
A
245IOMemoryDescriptor *
246IOMemoryDescriptor::withAddress(void * address,
55e303ae
A
247 IOByteCount length,
248 IODirection direction)
249{
250 return IOMemoryDescriptor::
b0d623f7 251 withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task);
55e303ae
A
252}
253
b0d623f7 254#ifndef __LP64__
55e303ae 255IOMemoryDescriptor *
b0d623f7 256IOMemoryDescriptor::withAddress(IOVirtualAddress address,
55e303ae
A
257 IOByteCount length,
258 IODirection direction,
259 task_t task)
1c79356b
A
260{
261 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
262 if (that)
263 {
55e303ae 264 if (that->initWithAddress(address, length, direction, task))
1c79356b
A
265 return that;
266
267 that->release();
268 }
269 return 0;
270}
b0d623f7 271#endif /* !__LP64__ */
1c79356b
A
272
273IOMemoryDescriptor *
55e303ae
A
274IOMemoryDescriptor::withPhysicalAddress(
275 IOPhysicalAddress address,
276 IOByteCount length,
277 IODirection direction )
278{
b0d623f7 279 return (IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL));
55e303ae
A
280}
281
b0d623f7 282#ifndef __LP64__
55e303ae
A
283IOMemoryDescriptor *
284IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
285 UInt32 withCount,
286 IODirection direction,
287 task_t task,
288 bool asReference)
1c79356b
A
289{
290 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
291 if (that)
292 {
55e303ae 293 if (that->initWithRanges(ranges, withCount, direction, task, asReference))
1c79356b
A
294 return that;
295
296 that->release();
297 }
298 return 0;
299}
b0d623f7 300#endif /* !__LP64__ */
1c79356b 301
0c530ab8
A
302IOMemoryDescriptor *
303IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
2d21ac55
A
304 mach_vm_size_t length,
305 IOOptionBits options,
306 task_t task)
0c530ab8
A
307{
308 IOAddressRange range = { address, length };
309 return (IOMemoryDescriptor::withAddressRanges(&range, 1, options, task));
310}
311
312IOMemoryDescriptor *
313IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
2d21ac55
A
314 UInt32 rangeCount,
315 IOOptionBits options,
316 task_t task)
0c530ab8
A
317{
318 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
319 if (that)
320 {
321 if (task)
322 options |= kIOMemoryTypeVirtual64;
323 else
324 options |= kIOMemoryTypePhysical64;
325
2d21ac55
A
326 if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ 0))
327 return that;
0c530ab8 328
2d21ac55 329 that->release();
0c530ab8
A
330 }
331
332 return 0;
333}
334
1c79356b
A
335
336/*
b0d623f7 337 * withOptions:
1c79356b
A
338 *
339 * Create a new IOMemoryDescriptor. The buffer is made up of several
340 * virtual address ranges, from a given task.
341 *
342 * Passing the ranges as a reference will avoid an extra allocation.
343 */
344IOMemoryDescriptor *
55e303ae
A
345IOMemoryDescriptor::withOptions(void * buffers,
346 UInt32 count,
347 UInt32 offset,
348 task_t task,
349 IOOptionBits opts,
350 IOMapper * mapper)
1c79356b 351{
55e303ae 352 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
d7e50217 353
55e303ae
A
354 if (self
355 && !self->initWithOptions(buffers, count, offset, task, opts, mapper))
356 {
357 self->release();
358 return 0;
de355530 359 }
55e303ae
A
360
361 return self;
362}
363
55e303ae
A
364bool IOMemoryDescriptor::initWithOptions(void * buffers,
365 UInt32 count,
366 UInt32 offset,
367 task_t task,
368 IOOptionBits options,
369 IOMapper * mapper)
370{
b0d623f7 371 return( false );
1c79356b
A
372}
373
b0d623f7 374#ifndef __LP64__
1c79356b
A
375IOMemoryDescriptor *
376IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
377 UInt32 withCount,
55e303ae
A
378 IODirection direction,
379 bool asReference)
1c79356b
A
380{
381 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
382 if (that)
383 {
55e303ae 384 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference))
1c79356b
A
385 return that;
386
387 that->release();
388 }
389 return 0;
390}
391
392IOMemoryDescriptor *
393IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
394 IOByteCount offset,
395 IOByteCount length,
55e303ae 396 IODirection direction)
1c79356b 397{
b0d623f7 398 return (IOSubMemoryDescriptor::withSubRange(of, offset, length, direction | kIOMemoryThreadSafe));
1c79356b 399}
b0d623f7 400#endif /* !__LP64__ */
1c79356b 401
0c530ab8
A
402IOMemoryDescriptor *
403IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
91447636
A
404{
405 IOGeneralMemoryDescriptor *origGenMD =
406 OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
407
408 if (origGenMD)
409 return IOGeneralMemoryDescriptor::
410 withPersistentMemoryDescriptor(origGenMD);
411 else
412 return 0;
413}
414
0c530ab8
A
415IOMemoryDescriptor *
416IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
91447636
A
417{
418 ipc_port_t sharedMem = (ipc_port_t) originalMD->createNamedEntry();
419
420 if (!sharedMem)
421 return 0;
422
423 if (sharedMem == originalMD->_memEntry) {
424 originalMD->retain(); // Add a new reference to ourselves
425 ipc_port_release_send(sharedMem); // Remove extra send right
426 return originalMD;
427 }
428
429 IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor;
430 typePersMDData initData = { originalMD, sharedMem };
431
432 if (self
433 && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) {
434 self->release();
435 self = 0;
436 }
437 return self;
438}
439
440void *IOGeneralMemoryDescriptor::createNamedEntry()
441{
442 kern_return_t error;
443 ipc_port_t sharedMem;
444
445 IOOptionBits type = _flags & kIOMemoryTypeMask;
446
447 user_addr_t range0Addr;
448 IOByteCount range0Len;
449 getAddrLenForInd(range0Addr, range0Len, type, _ranges, 0);
450 range0Addr = trunc_page_64(range0Addr);
451
452 vm_size_t size = ptoa_32(_pages);
453 vm_address_t kernelPage = (vm_address_t) range0Addr;
454
455 vm_map_t theMap = ((_task == kernel_task)
456 && (kIOMemoryBufferPageable & _flags))
457 ? IOPageableMapForAddress(kernelPage)
458 : get_task_map(_task);
459
460 memory_object_size_t actualSize = size;
2d21ac55 461 vm_prot_t prot = VM_PROT_READ;
2d21ac55 462 if (kIODirectionOut != (kIODirectionOutIn & _flags))
2d21ac55
A
463 prot |= VM_PROT_WRITE;
464
91447636
A
465 if (_memEntry)
466 prot |= MAP_MEM_NAMED_REUSE;
467
468 error = mach_make_memory_entry_64(theMap,
469 &actualSize, range0Addr, prot, &sharedMem, (ipc_port_t) _memEntry);
470
471 if (KERN_SUCCESS == error) {
472 if (actualSize == size) {
473 return sharedMem;
474 } else {
475#if IOASSERT
b0d623f7
A
476 IOLog("IOGMD::mach_make_memory_entry_64 (%08llx) size (%08llx:%08llx)\n",
477 (UInt64)range0Addr, (UInt64)actualSize, (UInt64)size);
91447636
A
478#endif
479 ipc_port_release_send( sharedMem );
480 }
481 }
482
483 return MACH_PORT_NULL;
484}
485
b0d623f7 486#ifndef __LP64__
1c79356b
A
487bool
488IOGeneralMemoryDescriptor::initWithAddress(void * address,
489 IOByteCount withLength,
490 IODirection withDirection)
491{
b0d623f7 492 _singleRange.v.address = (vm_offset_t) address;
1c79356b
A
493 _singleRange.v.length = withLength;
494
495 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
496}
497
498bool
b0d623f7 499IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address,
1c79356b
A
500 IOByteCount withLength,
501 IODirection withDirection,
502 task_t withTask)
503{
504 _singleRange.v.address = address;
505 _singleRange.v.length = withLength;
506
507 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
508}
509
510bool
511IOGeneralMemoryDescriptor::initWithPhysicalAddress(
512 IOPhysicalAddress address,
513 IOByteCount withLength,
514 IODirection withDirection )
515{
516 _singleRange.p.address = address;
517 _singleRange.p.length = withLength;
518
519 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
520}
521
55e303ae
A
522bool
523IOGeneralMemoryDescriptor::initWithPhysicalRanges(
524 IOPhysicalRange * ranges,
525 UInt32 count,
526 IODirection direction,
527 bool reference)
528{
529 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
530
531 if (reference)
532 mdOpts |= kIOMemoryAsReference;
533
534 return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0);
535}
536
537bool
538IOGeneralMemoryDescriptor::initWithRanges(
539 IOVirtualRange * ranges,
540 UInt32 count,
541 IODirection direction,
542 task_t task,
543 bool reference)
544{
545 IOOptionBits mdOpts = direction;
546
547 if (reference)
548 mdOpts |= kIOMemoryAsReference;
549
550 if (task) {
551 mdOpts |= kIOMemoryTypeVirtual;
91447636
A
552
553 // Auto-prepare if this is a kernel memory descriptor as very few
554 // clients bother to prepare() kernel memory.
2d21ac55 555 // But it was not enforced so what are you going to do?
55e303ae
A
556 if (task == kernel_task)
557 mdOpts |= kIOMemoryAutoPrepare;
558 }
559 else
560 mdOpts |= kIOMemoryTypePhysical;
55e303ae
A
561
562 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0);
563}
b0d623f7 564#endif /* !__LP64__ */
55e303ae 565
1c79356b 566/*
55e303ae 567 * initWithOptions:
1c79356b 568 *
55e303ae 569 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
91447636
A
570 * from a given task, several physical ranges, an UPL from the ubc
571 * system or a uio (may be 64bit) from the BSD subsystem.
1c79356b
A
572 *
573 * Passing the ranges as a reference will avoid an extra allocation.
574 *
55e303ae
A
575 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
576 * existing instance -- note this behavior is not commonly supported in other
577 * I/O Kit classes, although it is supported here.
1c79356b 578 */
55e303ae 579
1c79356b 580bool
55e303ae
A
581IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
582 UInt32 count,
583 UInt32 offset,
584 task_t task,
585 IOOptionBits options,
586 IOMapper * mapper)
587{
91447636
A
588 IOOptionBits type = options & kIOMemoryTypeMask;
589
6d2010ae
A
590#ifndef __LP64__
591 if (task
592 && (kIOMemoryTypeVirtual == type)
593 && vm_map_is_64bit(get_task_map(task))
594 && ((IOVirtualRange *) buffers)->address)
595 {
596 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
597 return false;
598 }
599#endif /* !__LP64__ */
600
91447636
A
601 // Grab the original MD's configuation data to initialse the
602 // arguments to this function.
603 if (kIOMemoryTypePersistentMD == type) {
604
605 typePersMDData *initData = (typePersMDData *) buffers;
606 const IOGeneralMemoryDescriptor *orig = initData->fMD;
607 ioGMDData *dataP = getDataP(orig->_memoryEntries);
608
609 // Only accept persistent memory descriptors with valid dataP data.
610 assert(orig->_rangesCount == 1);
611 if ( !(orig->_flags & kIOMemoryPersistent) || !dataP)
612 return false;
613
614 _memEntry = initData->fMemEntry; // Grab the new named entry
6d2010ae
A
615 options = orig->_flags & ~kIOMemoryAsReference;
616 type = options & kIOMemoryTypeMask;
617 buffers = orig->_ranges.v;
618 count = orig->_rangesCount;
55e303ae 619
91447636
A
620 // Now grab the original task and whatever mapper was previously used
621 task = orig->_task;
622 mapper = dataP->fMapper;
623
624 // We are ready to go through the original initialisation now
625 }
626
627 switch (type) {
628 case kIOMemoryTypeUIO:
55e303ae 629 case kIOMemoryTypeVirtual:
b0d623f7 630#ifndef __LP64__
0c530ab8 631 case kIOMemoryTypeVirtual64:
b0d623f7 632#endif /* !__LP64__ */
55e303ae
A
633 assert(task);
634 if (!task)
635 return false;
2d21ac55 636 break;
55e303ae
A
637
638 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
b0d623f7 639#ifndef __LP64__
0c530ab8 640 case kIOMemoryTypePhysical64:
b0d623f7 641#endif /* !__LP64__ */
55e303ae
A
642 case kIOMemoryTypeUPL:
643 assert(!task);
644 break;
645 default:
55e303ae
A
646 return false; /* bad argument */
647 }
648
649 assert(buffers);
650 assert(count);
1c79356b
A
651
652 /*
653 * We can check the _initialized instance variable before having ever set
654 * it to an initial value because I/O Kit guarantees that all our instance
655 * variables are zeroed on an object's allocation.
656 */
657
55e303ae 658 if (_initialized) {
1c79356b
A
659 /*
660 * An existing memory descriptor is being retargeted to point to
661 * somewhere else. Clean up our present state.
662 */
2d21ac55
A
663 IOOptionBits type = _flags & kIOMemoryTypeMask;
664 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
665 {
666 while (_wireCount)
667 complete();
668 }
b0d623f7 669 if (_ranges.v && !(kIOMemoryAsReference & _flags))
0c530ab8
A
670 {
671 if (kIOMemoryTypeUIO == type)
672 uio_free((uio_t) _ranges.v);
b0d623f7 673#ifndef __LP64__
0c530ab8
A
674 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
675 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
b0d623f7 676#endif /* !__LP64__ */
0c530ab8
A
677 else
678 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
679 }
2d21ac55 680
91447636 681 if (_memEntry)
6d2010ae
A
682 {
683 ipc_port_release_send((ipc_port_t) _memEntry);
684 _memEntry = 0;
685 }
2d21ac55
A
686 if (_mappings)
687 _mappings->flushCollection();
1c79356b 688 }
55e303ae
A
689 else {
690 if (!super::init())
691 return false;
692 _initialized = true;
693 }
d7e50217 694
55e303ae 695 // Grab the appropriate mapper
b0d623f7 696 if (kIOMemoryMapperNone & options)
55e303ae 697 mapper = 0; // No Mapper
0c530ab8 698 else if (mapper == kIOMapperSystem) {
55e303ae
A
699 IOMapper::checkForSystemMapper();
700 gIOSystemMapper = mapper = IOMapper::gSystem;
701 }
1c79356b 702
c910b4d9
A
703 // Temp binary compatibility for kIOMemoryThreadSafe
704 if (kIOMemoryReserved6156215 & options)
705 {
706 options &= ~kIOMemoryReserved6156215;
707 options |= kIOMemoryThreadSafe;
708 }
91447636
A
709 // Remove the dynamic internal use flags from the initial setting
710 options &= ~(kIOMemoryPreparedReadOnly);
55e303ae
A
711 _flags = options;
712 _task = task;
713
b0d623f7 714#ifndef __LP64__
55e303ae 715 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
b0d623f7 716#endif /* !__LP64__ */
0c530ab8
A
717
718 __iomd_reservedA = 0;
719 __iomd_reservedB = 0;
0c530ab8 720 _highestPage = 0;
1c79356b 721
2d21ac55
A
722 if (kIOMemoryThreadSafe & options)
723 {
724 if (!_prepareLock)
725 _prepareLock = IOLockAlloc();
726 }
727 else if (_prepareLock)
728 {
729 IOLockFree(_prepareLock);
730 _prepareLock = NULL;
731 }
732
91447636 733 if (kIOMemoryTypeUPL == type) {
1c79356b 734
55e303ae
A
735 ioGMDData *dataP;
736 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
d7e50217 737
55e303ae
A
738 if (!_memoryEntries) {
739 _memoryEntries = OSData::withCapacity(dataSize);
740 if (!_memoryEntries)
741 return false;
742 }
743 else if (!_memoryEntries->initWithCapacity(dataSize))
744 return false;
745
6d2010ae 746 _memoryEntries->appendBytes(0, computeDataSize(0, 0));
55e303ae
A
747 dataP = getDataP(_memoryEntries);
748 dataP->fMapper = mapper;
749 dataP->fPageCnt = 0;
750
0c530ab8 751 // _wireCount++; // UPLs start out life wired
55e303ae
A
752
753 _length = count;
754 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
755
756 ioPLBlock iopl;
55e303ae 757 iopl.fIOPL = (upl_t) buffers;
6d2010ae 758 upl_set_referenced(iopl.fIOPL, true);
b0d623f7
A
759 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL);
760
761 if (upl_get_size(iopl.fIOPL) < (count + offset))
762 panic("short external upl");
763
55e303ae
A
764 // Set the flag kIOPLOnDevice convieniently equal to 1
765 iopl.fFlags = pageList->device | kIOPLExternUPL;
766 iopl.fIOMDOffset = 0;
0c530ab8
A
767
768 _highestPage = upl_get_highest_page(iopl.fIOPL);
769
55e303ae 770 if (!pageList->device) {
55e303ae
A
771 // Pre-compute the offset into the UPL's page list
772 pageList = &pageList[atop_32(offset)];
773 offset &= PAGE_MASK;
774 if (mapper) {
775 iopl.fMappedBase = mapper->iovmAlloc(_pages);
776 mapper->iovmInsert(iopl.fMappedBase, 0, pageList, _pages);
777 }
778 else
779 iopl.fMappedBase = 0;
780 }
781 else
782 iopl.fMappedBase = 0;
783 iopl.fPageInfo = (vm_address_t) pageList;
784 iopl.fPageOffset = offset;
785
786 _memoryEntries->appendBytes(&iopl, sizeof(iopl));
d7e50217 787 }
91447636 788 else {
0c530ab8
A
789 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
790 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
91447636
A
791
792 // Initialize the memory descriptor
793 if (options & kIOMemoryAsReference) {
b0d623f7 794#ifndef __LP64__
91447636 795 _rangesIsAllocated = false;
b0d623f7 796#endif /* !__LP64__ */
91447636
A
797
798 // Hack assignment to get the buffer arg into _ranges.
799 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
800 // work, C++ sigh.
801 // This also initialises the uio & physical ranges.
802 _ranges.v = (IOVirtualRange *) buffers;
803 }
804 else {
b0d623f7 805#ifndef __LP64__
6601e61a 806 _rangesIsAllocated = true;
b0d623f7
A
807#endif /* !__LP64__ */
808 switch (type)
0c530ab8
A
809 {
810 case kIOMemoryTypeUIO:
811 _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
812 break;
813
b0d623f7 814#ifndef __LP64__
0c530ab8
A
815 case kIOMemoryTypeVirtual64:
816 case kIOMemoryTypePhysical64:
b0d623f7 817 if (count == 1
6d2010ae
A
818 && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL
819 ) {
b0d623f7
A
820 if (kIOMemoryTypeVirtual64 == type)
821 type = kIOMemoryTypeVirtual;
822 else
823 type = kIOMemoryTypePhysical;
824 _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference;
825 _rangesIsAllocated = false;
826 _ranges.v = &_singleRange.v;
827 _singleRange.v.address = ((IOAddressRange *) buffers)->address;
828 _singleRange.v.length = ((IOAddressRange *) buffers)->length;
829 break;
830 }
0c530ab8
A
831 _ranges.v64 = IONew(IOAddressRange, count);
832 if (!_ranges.v64)
833 return false;
834 bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
835 break;
b0d623f7 836#endif /* !__LP64__ */
0c530ab8 837 case kIOMemoryTypeVirtual:
2d21ac55 838 case kIOMemoryTypePhysical:
b0d623f7
A
839 if (count == 1) {
840 _flags |= kIOMemoryAsReference;
841#ifndef __LP64__
842 _rangesIsAllocated = false;
843#endif /* !__LP64__ */
844 _ranges.v = &_singleRange.v;
845 } else {
846 _ranges.v = IONew(IOVirtualRange, count);
847 if (!_ranges.v)
848 return false;
849 }
0c530ab8
A
850 bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
851 break;
852 }
91447636
A
853 }
854
855 // Find starting address within the vector of ranges
856 Ranges vec = _ranges;
857 UInt32 length = 0;
858 UInt32 pages = 0;
859 for (unsigned ind = 0; ind < count; ind++) {
860 user_addr_t addr;
b0d623f7 861 IOPhysicalLength len;
91447636
A
862
863 // addr & len are returned by this function
864 getAddrLenForInd(addr, len, type, vec, ind);
865 pages += (atop_64(addr + len + PAGE_MASK) - atop_64(addr));
866 len += length;
0c530ab8 867 assert(len >= length); // Check for 32 bit wrap around
91447636 868 length = len;
0c530ab8
A
869
870 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
871 {
872 ppnum_t highPage = atop_64(addr + len - 1);
873 if (highPage > _highestPage)
874 _highestPage = highPage;
875 }
91447636
A
876 }
877 _length = length;
878 _pages = pages;
879 _rangesCount = count;
55e303ae
A
880
881 // Auto-prepare memory at creation time.
882 // Implied completion when descriptor is free-ed
0c530ab8 883 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
91447636 884 _wireCount++; // Physical MDs are, by definition, wired
0c530ab8 885 else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
55e303ae 886 ioGMDData *dataP;
91447636 887 unsigned dataSize = computeDataSize(_pages, /* upls */ count * 2);
55e303ae
A
888
889 if (!_memoryEntries) {
890 _memoryEntries = OSData::withCapacity(dataSize);
891 if (!_memoryEntries)
91447636 892 return false;
55e303ae
A
893 }
894 else if (!_memoryEntries->initWithCapacity(dataSize))
895 return false;
896
6d2010ae 897 _memoryEntries->appendBytes(0, computeDataSize(0, 0));
55e303ae
A
898 dataP = getDataP(_memoryEntries);
899 dataP->fMapper = mapper;
900 dataP->fPageCnt = _pages;
901
91447636
A
902 if ( (kIOMemoryPersistent & _flags) && !_memEntry)
903 _memEntry = createNamedEntry();
55e303ae
A
904
905 if ((_flags & kIOMemoryAutoPrepare)
906 && prepare() != kIOReturnSuccess)
907 return false;
908 }
909 }
910
911 return true;
de355530
A
912}
913
1c79356b
A
914/*
915 * free
916 *
917 * Free resources.
918 */
919void IOGeneralMemoryDescriptor::free()
920{
2d21ac55
A
921 IOOptionBits type = _flags & kIOMemoryTypeMask;
922
9bccf70c 923 if( reserved)
2d21ac55
A
924 {
925 LOCK;
316670eb 926 reserved->dp.memory = 0;
2d21ac55
A
927 UNLOCK;
928 }
9bccf70c 929
2d21ac55
A
930 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
931 {
932 while (_wireCount)
933 complete();
934 }
55e303ae
A
935 if (_memoryEntries)
936 _memoryEntries->release();
937
b0d623f7 938 if (_ranges.v && !(kIOMemoryAsReference & _flags))
0c530ab8 939 {
0c530ab8
A
940 if (kIOMemoryTypeUIO == type)
941 uio_free((uio_t) _ranges.v);
b0d623f7 942#ifndef __LP64__
0c530ab8
A
943 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
944 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
b0d623f7 945#endif /* !__LP64__ */
0c530ab8
A
946 else
947 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
4a3eedf9
A
948
949 _ranges.v = NULL;
0c530ab8 950 }
9bccf70c 951
316670eb
A
952 if (reserved)
953 {
954 if (reserved->dp.devicePager)
955 {
956 // memEntry holds a ref on the device pager which owns reserved
957 // (IOMemoryDescriptorReserved) so no reserved access after this point
958 device_pager_deallocate( (memory_object_t) reserved->dp.devicePager );
959 }
960 else
961 IODelete(reserved, IOMemoryDescriptorReserved, 1);
962 reserved = NULL;
963 }
9bccf70c 964
55e303ae 965 if (_memEntry)
1c79356b 966 ipc_port_release_send( (ipc_port_t) _memEntry );
55e303ae 967
2d21ac55
A
968 if (_prepareLock)
969 IOLockFree(_prepareLock);
970
1c79356b
A
971 super::free();
972}
973
b0d623f7
A
974#ifndef __LP64__
975void IOGeneralMemoryDescriptor::unmapFromKernel()
976{
977 panic("IOGMD::unmapFromKernel deprecated");
978}
979
980void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
981{
982 panic("IOGMD::mapIntoKernel deprecated");
983}
984#endif /* !__LP64__ */
1c79356b
A
985
986/*
987 * getDirection:
988 *
989 * Get the direction of the transfer.
990 */
991IODirection IOMemoryDescriptor::getDirection() const
992{
b0d623f7
A
993#ifndef __LP64__
994 if (_direction)
995 return _direction;
996#endif /* !__LP64__ */
997 return (IODirection) (_flags & kIOMemoryDirectionMask);
1c79356b
A
998}
999
1000/*
1001 * getLength:
1002 *
1003 * Get the length of the transfer (over all ranges).
1004 */
1005IOByteCount IOMemoryDescriptor::getLength() const
1006{
1007 return _length;
1008}
1009
55e303ae 1010void IOMemoryDescriptor::setTag( IOOptionBits tag )
1c79356b
A
1011{
1012 _tag = tag;
1013}
1014
1015IOOptionBits IOMemoryDescriptor::getTag( void )
1016{
1017 return( _tag);
1018}
1019
b0d623f7 1020#ifndef __LP64__
55e303ae 1021// @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
0c530ab8
A
1022IOPhysicalAddress
1023IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
0b4e3aa0 1024{
0c530ab8 1025 addr64_t physAddr = 0;
1c79356b 1026
9bccf70c 1027 if( prepare() == kIOReturnSuccess) {
0c530ab8 1028 physAddr = getPhysicalSegment64( offset, length );
9bccf70c
A
1029 complete();
1030 }
0b4e3aa0 1031
0c530ab8 1032 return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used
0b4e3aa0 1033}
b0d623f7 1034#endif /* !__LP64__ */
0b4e3aa0 1035
55e303ae
A
1036IOByteCount IOMemoryDescriptor::readBytes
1037 (IOByteCount offset, void *bytes, IOByteCount length)
1c79356b 1038{
b0d623f7 1039 addr64_t dstAddr = CAST_DOWN(addr64_t, bytes);
55e303ae 1040 IOByteCount remaining;
1c79356b 1041
55e303ae
A
1042 // Assert that this entire I/O is withing the available range
1043 assert(offset < _length);
1044 assert(offset + length <= _length);
1045 if (offset >= _length) {
55e303ae
A
1046 return 0;
1047 }
1c79356b 1048
b0d623f7
A
1049 if (kIOMemoryThreadSafe & _flags)
1050 LOCK;
1051
55e303ae
A
1052 remaining = length = min(length, _length - offset);
1053 while (remaining) { // (process another target segment?)
1054 addr64_t srcAddr64;
1055 IOByteCount srcLen;
1c79356b 1056
b0d623f7 1057 srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone);
55e303ae
A
1058 if (!srcAddr64)
1059 break;
1c79356b 1060
55e303ae
A
1061 // Clip segment length to remaining
1062 if (srcLen > remaining)
1063 srcLen = remaining;
1c79356b 1064
55e303ae
A
1065 copypv(srcAddr64, dstAddr, srcLen,
1066 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
1c79356b 1067
55e303ae
A
1068 dstAddr += srcLen;
1069 offset += srcLen;
1070 remaining -= srcLen;
1071 }
1c79356b 1072
b0d623f7
A
1073 if (kIOMemoryThreadSafe & _flags)
1074 UNLOCK;
1075
55e303ae 1076 assert(!remaining);
1c79356b 1077
55e303ae
A
1078 return length - remaining;
1079}
0b4e3aa0 1080
55e303ae
A
1081IOByteCount IOMemoryDescriptor::writeBytes
1082 (IOByteCount offset, const void *bytes, IOByteCount length)
1083{
b0d623f7 1084 addr64_t srcAddr = CAST_DOWN(addr64_t, bytes);
55e303ae 1085 IOByteCount remaining;
0b4e3aa0 1086
55e303ae
A
1087 // Assert that this entire I/O is withing the available range
1088 assert(offset < _length);
1089 assert(offset + length <= _length);
0b4e3aa0 1090
55e303ae 1091 assert( !(kIOMemoryPreparedReadOnly & _flags) );
0b4e3aa0 1092
55e303ae 1093 if ( (kIOMemoryPreparedReadOnly & _flags) || offset >= _length) {
55e303ae
A
1094 return 0;
1095 }
0b4e3aa0 1096
b0d623f7
A
1097 if (kIOMemoryThreadSafe & _flags)
1098 LOCK;
1099
55e303ae
A
1100 remaining = length = min(length, _length - offset);
1101 while (remaining) { // (process another target segment?)
1102 addr64_t dstAddr64;
1103 IOByteCount dstLen;
0b4e3aa0 1104
b0d623f7 1105 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
55e303ae
A
1106 if (!dstAddr64)
1107 break;
0b4e3aa0 1108
55e303ae
A
1109 // Clip segment length to remaining
1110 if (dstLen > remaining)
1111 dstLen = remaining;
0b4e3aa0 1112
55e303ae
A
1113 copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
1114 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
0b4e3aa0 1115
55e303ae
A
1116 srcAddr += dstLen;
1117 offset += dstLen;
1118 remaining -= dstLen;
1c79356b 1119 }
1c79356b 1120
b0d623f7
A
1121 if (kIOMemoryThreadSafe & _flags)
1122 UNLOCK;
1123
55e303ae
A
1124 assert(!remaining);
1125
1126 return length - remaining;
1c79356b
A
1127}
1128
55e303ae
A
1129// osfmk/device/iokit_rpc.c
1130extern "C" unsigned int IODefaultCacheBits(addr64_t pa);
1c79356b 1131
b0d623f7
A
1132#ifndef __LP64__
1133void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
1134{
1135 panic("IOGMD::setPosition deprecated");
1136}
1137#endif /* !__LP64__ */
1138
1139static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32);
1140
1141uint64_t
1142IOGeneralMemoryDescriptor::getPreparationID( void )
1143{
1144 ioGMDData *dataP;
7e4a7d39
A
1145
1146 if (!_wireCount)
b0d623f7 1147 return (kIOPreparationIDUnprepared);
7e4a7d39
A
1148
1149 if (_flags & (kIOMemoryTypePhysical | kIOMemoryTypePhysical64))
316670eb
A
1150 {
1151 IOMemoryDescriptor::setPreparationID();
1152 return (IOMemoryDescriptor::getPreparationID());
1153 }
7e4a7d39
A
1154
1155 if (!_memoryEntries || !(dataP = getDataP(_memoryEntries)))
1156 return (kIOPreparationIDUnprepared);
1157
b0d623f7
A
1158 if (kIOPreparationIDUnprepared == dataP->fPreparationID)
1159 {
b0d623f7 1160 dataP->fPreparationID = OSIncrementAtomic64(&gIOMDPreparationID);
b0d623f7
A
1161 }
1162 return (dataP->fPreparationID);
1163}
1164
316670eb 1165IOMemoryDescriptorReserved * IOMemoryDescriptor::getKernelReserved( void )
b0d623f7 1166{
316670eb
A
1167 if (!reserved)
1168 {
1169 reserved = IONew(IOMemoryDescriptorReserved, 1);
1170 if (reserved)
1171 bzero(reserved, sizeof(IOMemoryDescriptorReserved));
1172 }
1173 return (reserved);
1174}
1175
1176void IOMemoryDescriptor::setPreparationID( void )
1177{
1178 if (getKernelReserved() && (kIOPreparationIDUnprepared == reserved->preparationID))
1179 {
1180#if defined(__ppc__ )
1181 reserved->preparationID = gIOMDPreparationID++;
1182#else
1183 reserved->preparationID = OSIncrementAtomic64(&gIOMDPreparationID);
1184#endif
1185 }
1186}
1187
1188uint64_t IOMemoryDescriptor::getPreparationID( void )
1189{
1190 if (reserved)
1191 return (reserved->preparationID);
1192 else
1193 return (kIOPreparationIDUnsupported);
b0d623f7 1194}
de355530 1195
0c530ab8 1196IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
55e303ae 1197{
0c530ab8 1198 if (kIOMDGetCharacteristics == op) {
4452a7af 1199
0c530ab8
A
1200 if (dataSize < sizeof(IOMDDMACharacteristics))
1201 return kIOReturnUnderrun;
4452a7af 1202
0c530ab8
A
1203 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
1204 data->fLength = _length;
1205 data->fSGCount = _rangesCount;
1206 data->fPages = _pages;
b0d623f7 1207 data->fDirection = getDirection();
0c530ab8
A
1208 if (!_wireCount)
1209 data->fIsPrepared = false;
1210 else {
1211 data->fIsPrepared = true;
1212 data->fHighestPage = _highestPage;
1213 if (_memoryEntries) {
1214 ioGMDData *gmdData = getDataP(_memoryEntries);
1215 ioPLBlock *ioplList = getIOPLList(gmdData);
1216 UInt count = getNumIOPL(_memoryEntries, gmdData);
1217
1218 data->fIsMapped = (gmdData->fMapper && _pages && (count > 0)
1219 && ioplList[0].fMappedBase);
1220 if (count == 1)
1221 data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
1222 }
1223 else
1224 data->fIsMapped = false;
1225 }
4452a7af 1226
0c530ab8 1227 return kIOReturnSuccess;
b0d623f7
A
1228
1229#if IOMD_DEBUG_DMAACTIVE
1230 } else if (kIOMDSetDMAActive == op) {
1231 IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
7e4a7d39 1232 OSIncrementAtomic(&md->__iomd_reservedA);
b0d623f7
A
1233 } else if (kIOMDSetDMAInactive == op) {
1234 IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this);
1235 if (md->__iomd_reservedA)
7e4a7d39 1236 OSDecrementAtomic(&md->__iomd_reservedA);
b0d623f7
A
1237 else
1238 panic("kIOMDSetDMAInactive");
1239#endif /* IOMD_DEBUG_DMAACTIVE */
1240
1241 } else if (!(kIOMDWalkSegments & op))
0c530ab8
A
1242 return kIOReturnBadArgument;
1243
1244 // Get the next segment
1245 struct InternalState {
1246 IOMDDMAWalkSegmentArgs fIO;
1247 UInt fOffset2Index;
1248 UInt fIndex;
1249 UInt fNextOffset;
1250 } *isP;
1251
1252 // Find the next segment
1253 if (dataSize < sizeof(*isP))
1254 return kIOReturnUnderrun;
1255
1256 isP = (InternalState *) vData;
1257 UInt offset = isP->fIO.fOffset;
1258 bool mapped = isP->fIO.fMapped;
1259
1260 if (offset >= _length)
1261 return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
1262
1263 // Validate the previous offset
1264 UInt ind, off2Ind = isP->fOffset2Index;
1265 if ((kIOMDFirstSegment != op)
1266 && offset
1267 && (offset == isP->fNextOffset || off2Ind <= offset))
1268 ind = isP->fIndex;
1269 else
1270 ind = off2Ind = 0; // Start from beginning
4452a7af 1271
0c530ab8
A
1272 UInt length;
1273 UInt64 address;
1274 if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
4452a7af 1275
0c530ab8
A
1276 // Physical address based memory descriptor
1277 const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
4452a7af 1278
0c530ab8 1279 // Find the range after the one that contains the offset
b0d623f7 1280 mach_vm_size_t len;
0c530ab8
A
1281 for (len = 0; off2Ind <= offset; ind++) {
1282 len = physP[ind].length;
1283 off2Ind += len;
1284 }
4452a7af 1285
0c530ab8
A
1286 // Calculate length within range and starting address
1287 length = off2Ind - offset;
1288 address = physP[ind - 1].address + len - length;
89b3af67 1289
0c530ab8
A
1290 // see how far we can coalesce ranges
1291 while (ind < _rangesCount && address + length == physP[ind].address) {
1292 len = physP[ind].length;
1293 length += len;
1294 off2Ind += len;
1295 ind++;
1296 }
4452a7af 1297
0c530ab8
A
1298 // correct contiguous check overshoot
1299 ind--;
1300 off2Ind -= len;
1301 }
b0d623f7 1302#ifndef __LP64__
0c530ab8 1303 else if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
4452a7af 1304
0c530ab8
A
1305 // Physical address based memory descriptor
1306 const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
4452a7af 1307
0c530ab8
A
1308 // Find the range after the one that contains the offset
1309 mach_vm_size_t len;
1310 for (len = 0; off2Ind <= offset; ind++) {
1311 len = physP[ind].length;
1312 off2Ind += len;
1313 }
89b3af67 1314
0c530ab8
A
1315 // Calculate length within range and starting address
1316 length = off2Ind - offset;
1317 address = physP[ind - 1].address + len - length;
89b3af67 1318
0c530ab8
A
1319 // see how far we can coalesce ranges
1320 while (ind < _rangesCount && address + length == physP[ind].address) {
1321 len = physP[ind].length;
1322 length += len;
1323 off2Ind += len;
1324 ind++;
1325 }
1326
1327 // correct contiguous check overshoot
1328 ind--;
1329 off2Ind -= len;
1330 }
b0d623f7 1331#endif /* !__LP64__ */
0c530ab8
A
1332 else do {
1333 if (!_wireCount)
1334 panic("IOGMD: not wired for the IODMACommand");
4452a7af 1335
0c530ab8 1336 assert(_memoryEntries);
4452a7af 1337
0c530ab8
A
1338 ioGMDData * dataP = getDataP(_memoryEntries);
1339 const ioPLBlock *ioplList = getIOPLList(dataP);
1340 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1341 upl_page_info_t *pageList = getPageList(dataP);
4452a7af 1342
0c530ab8 1343 assert(numIOPLs > 0);
4452a7af 1344
0c530ab8
A
1345 // Scan through iopl info blocks looking for block containing offset
1346 while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset)
1347 ind++;
4452a7af 1348
0c530ab8
A
1349 // Go back to actual range as search goes past it
1350 ioPLBlock ioplInfo = ioplList[ind - 1];
1351 off2Ind = ioplInfo.fIOMDOffset;
1352
1353 if (ind < numIOPLs)
1354 length = ioplList[ind].fIOMDOffset;
1355 else
1356 length = _length;
1357 length -= offset; // Remainder within iopl
1358
1359 // Subtract offset till this iopl in total list
1360 offset -= off2Ind;
1361
1362 // If a mapped address is requested and this is a pre-mapped IOPL
1363 // then just need to compute an offset relative to the mapped base.
1364 if (mapped && ioplInfo.fMappedBase) {
1365 offset += (ioplInfo.fPageOffset & PAGE_MASK);
1366 address = ptoa_64(ioplInfo.fMappedBase) + offset;
1367 continue; // Done leave do/while(false) now
1368 }
1369
1370 // The offset is rebased into the current iopl.
1371 // Now add the iopl 1st page offset.
1372 offset += ioplInfo.fPageOffset;
1373
1374 // For external UPLs the fPageInfo field points directly to
1375 // the upl's upl_page_info_t array.
1376 if (ioplInfo.fFlags & kIOPLExternUPL)
1377 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
1378 else
1379 pageList = &pageList[ioplInfo.fPageInfo];
1380
1381 // Check for direct device non-paged memory
1382 if ( ioplInfo.fFlags & kIOPLOnDevice ) {
1383 address = ptoa_64(pageList->phys_addr) + offset;
1384 continue; // Done leave do/while(false) now
1385 }
4452a7af 1386
0c530ab8
A
1387 // Now we need compute the index into the pageList
1388 UInt pageInd = atop_32(offset);
1389 offset &= PAGE_MASK;
1390
1391 // Compute the starting address of this segment
1392 IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
b0d623f7
A
1393 if (!pageAddr) {
1394 panic("!pageList phys_addr");
6d2010ae 1395 }
b0d623f7 1396
0c530ab8
A
1397 address = ptoa_64(pageAddr) + offset;
1398
1399 // length is currently set to the length of the remainider of the iopl.
1400 // We need to check that the remainder of the iopl is contiguous.
1401 // This is indicated by pageList[ind].phys_addr being sequential.
1402 IOByteCount contigLength = PAGE_SIZE - offset;
1403 while (contigLength < length
1404 && ++pageAddr == pageList[++pageInd].phys_addr)
1405 {
1406 contigLength += PAGE_SIZE;
1407 }
1408
1409 if (contigLength < length)
1410 length = contigLength;
1411
1412
1413 assert(address);
1414 assert(length);
1415
1416 } while (false);
1417
1418 // Update return values and state
1419 isP->fIO.fIOVMAddr = address;
1420 isP->fIO.fLength = length;
1421 isP->fIndex = ind;
1422 isP->fOffset2Index = off2Ind;
1423 isP->fNextOffset = isP->fIO.fOffset + length;
1424
1425 return kIOReturnSuccess;
1426}
1427
1428addr64_t
b0d623f7 1429IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
0c530ab8 1430{
b0d623f7
A
1431 IOReturn ret;
1432 addr64_t address = 0;
1433 IOByteCount length = 0;
1434 IOMapper * mapper = gIOSystemMapper;
1435 IOOptionBits type = _flags & kIOMemoryTypeMask;
1436
1437 if (lengthOfSegment)
1438 *lengthOfSegment = 0;
1439
1440 if (offset >= _length)
1441 return 0;
4452a7af 1442
b0d623f7
A
1443 // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must
1444 // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use
1445 // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation
1446 // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up
2d21ac55 1447
b0d623f7
A
1448 if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type))
1449 {
1450 unsigned rangesIndex = 0;
1451 Ranges vec = _ranges;
1452 user_addr_t addr;
1453
1454 // Find starting address within the vector of ranges
1455 for (;;) {
1456 getAddrLenForInd(addr, length, type, vec, rangesIndex);
1457 if (offset < length)
1458 break;
1459 offset -= length; // (make offset relative)
1460 rangesIndex++;
1461 }
1462
1463 // Now that we have the starting range,
1464 // lets find the last contiguous range
1465 addr += offset;
1466 length -= offset;
1467
1468 for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) {
1469 user_addr_t newAddr;
1470 IOPhysicalLength newLen;
1471
1472 getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
1473 if (addr + length != newAddr)
1474 break;
1475 length += newLen;
1476 }
1477 if (addr)
1478 address = (IOPhysicalAddress) addr; // Truncate address to 32bit
1479 }
1480 else
0c530ab8
A
1481 {
1482 IOMDDMAWalkSegmentState _state;
1483 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) &_state;
1484
1485 state->fOffset = offset;
1486 state->fLength = _length - offset;
b0d623f7 1487 state->fMapped = (0 == (options & kIOMemoryMapperNone));
0c530ab8
A
1488
1489 ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
1490
1491 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret))
b0d623f7 1492 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
0c530ab8
A
1493 ret, this, state->fOffset,
1494 state->fIOVMAddr, state->fLength);
1495 if (kIOReturnSuccess == ret)
1496 {
1497 address = state->fIOVMAddr;
1498 length = state->fLength;
1499 }
b0d623f7
A
1500
1501 // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even
1502 // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up
1503
1504 if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)))
1505 {
1506 if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone))
1507 {
1508 addr64_t origAddr = address;
1509 IOByteCount origLen = length;
1510
1511 address = mapper->mapAddr(origAddr);
1512 length = page_size - (address & (page_size - 1));
1513 while ((length < origLen)
1514 && ((address + length) == mapper->mapAddr(origAddr + length)))
1515 length += page_size;
1516 if (length > origLen)
1517 length = origLen;
1518 }
1519#ifdef __LP64__
1520 else if (!(options & kIOMemoryMapperNone) && (_flags & kIOMemoryMapperNone))
1521 {
1522 panic("getPhysicalSegment not mapped for I/O");
1523 }
1524#endif /* __LP64__ */
1525 }
4452a7af
A
1526 }
1527
b0d623f7
A
1528 if (!address)
1529 length = 0;
1530
4452a7af
A
1531 if (lengthOfSegment)
1532 *lengthOfSegment = length;
1533
0c530ab8
A
1534 return (address);
1535}
1536
b0d623f7
A
1537#ifndef __LP64__
1538addr64_t
1539IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options)
0c530ab8 1540{
b0d623f7 1541 addr64_t address = 0;
0c530ab8 1542
b0d623f7 1543 if (options & _kIOMemorySourceSegment)
0c530ab8 1544 {
b0d623f7
A
1545 address = getSourceSegment(offset, lengthOfSegment);
1546 }
1547 else if (options & kIOMemoryMapperNone)
1548 {
1549 address = getPhysicalSegment64(offset, lengthOfSegment);
1550 }
1551 else
1552 {
1553 address = getPhysicalSegment(offset, lengthOfSegment);
1554 }
0c530ab8 1555
b0d623f7
A
1556 return (address);
1557}
0c530ab8 1558
b0d623f7
A
1559addr64_t
1560IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
1561{
1562 return (getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone));
1563}
0c530ab8 1564
b0d623f7
A
1565IOPhysicalAddress
1566IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1567{
1568 addr64_t address = 0;
1569 IOByteCount length = 0;
0c530ab8 1570
b0d623f7
A
1571 address = getPhysicalSegment(offset, lengthOfSegment, 0);
1572
1573 if (lengthOfSegment)
1574 length = *lengthOfSegment;
0c530ab8
A
1575
1576 if ((address + length) > 0x100000000ULL)
1577 {
2d21ac55 1578 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
b0d623f7 1579 address, (long) length, (getMetaClass())->getClassName());
0c530ab8
A
1580 }
1581
0c530ab8 1582 return ((IOPhysicalAddress) address);
55e303ae 1583}
de355530 1584
0c530ab8
A
1585addr64_t
1586IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
55e303ae
A
1587{
1588 IOPhysicalAddress phys32;
1589 IOByteCount length;
1590 addr64_t phys64;
0c530ab8 1591 IOMapper * mapper = 0;
0b4e3aa0 1592
55e303ae
A
1593 phys32 = getPhysicalSegment(offset, lengthOfSegment);
1594 if (!phys32)
1595 return 0;
0b4e3aa0 1596
55e303ae 1597 if (gIOSystemMapper)
0c530ab8
A
1598 mapper = gIOSystemMapper;
1599
1600 if (mapper)
1c79356b 1601 {
55e303ae
A
1602 IOByteCount origLen;
1603
0c530ab8 1604 phys64 = mapper->mapAddr(phys32);
55e303ae
A
1605 origLen = *lengthOfSegment;
1606 length = page_size - (phys64 & (page_size - 1));
1607 while ((length < origLen)
0c530ab8 1608 && ((phys64 + length) == mapper->mapAddr(phys32 + length)))
55e303ae
A
1609 length += page_size;
1610 if (length > origLen)
1611 length = origLen;
1612
1613 *lengthOfSegment = length;
0b4e3aa0 1614 }
55e303ae
A
1615 else
1616 phys64 = (addr64_t) phys32;
1c79356b 1617
55e303ae 1618 return phys64;
0b4e3aa0
A
1619}
1620
0c530ab8 1621IOPhysicalAddress
b0d623f7 1622IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1c79356b 1623{
b0d623f7 1624 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0));
0b4e3aa0
A
1625}
1626
b0d623f7
A
1627IOPhysicalAddress
1628IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1629{
1630 return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment));
1631}
1c79356b 1632
b0d623f7
A
1633void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
1634 IOByteCount * lengthOfSegment)
1635{
1636 if (_task == kernel_task)
1637 return (void *) getSourceSegment(offset, lengthOfSegment);
1638 else
1639 panic("IOGMD::getVirtualSegment deprecated");
91447636 1640
b0d623f7
A
1641 return 0;
1642}
1643#endif /* !__LP64__ */
91447636 1644
0c530ab8
A
1645IOReturn
1646IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
1647{
1648 if (kIOMDGetCharacteristics == op) {
1649 if (dataSize < sizeof(IOMDDMACharacteristics))
1650 return kIOReturnUnderrun;
1651
1652 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
1653 data->fLength = getLength();
1654 data->fSGCount = 0;
b0d623f7 1655 data->fDirection = getDirection();
0c530ab8
A
1656 if (IOMapper::gSystem)
1657 data->fIsMapped = true;
1658 data->fIsPrepared = true; // Assume prepared - fails safe
1659 }
1660 else if (kIOMDWalkSegments & op) {
1661 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs))
1662 return kIOReturnUnderrun;
1663
1664 IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
1665 IOByteCount offset = (IOByteCount) data->fOffset;
1666
1667 IOPhysicalLength length;
1668 IOMemoryDescriptor *ncmd = const_cast<IOMemoryDescriptor *>(this);
1669 if (data->fMapped && IOMapper::gSystem)
1670 data->fIOVMAddr = ncmd->getPhysicalSegment(offset, &length);
1671 else
b0d623f7 1672 data->fIOVMAddr = ncmd->getPhysicalSegment(offset, &length, kIOMemoryMapperNone);
0c530ab8
A
1673 data->fLength = length;
1674 }
1675 else
1676 return kIOReturnBadArgument;
1677
1678 return kIOReturnSuccess;
1679}
1680
b0d623f7
A
1681static IOReturn
1682purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state)
1683{
1684 IOReturn err = kIOReturnSuccess;
1685
1686 *control = VM_PURGABLE_SET_STATE;
1687 switch (newState)
1688 {
1689 case kIOMemoryPurgeableKeepCurrent:
1690 *control = VM_PURGABLE_GET_STATE;
1691 break;
1692
1693 case kIOMemoryPurgeableNonVolatile:
1694 *state = VM_PURGABLE_NONVOLATILE;
1695 break;
1696 case kIOMemoryPurgeableVolatile:
1697 *state = VM_PURGABLE_VOLATILE;
1698 break;
1699 case kIOMemoryPurgeableEmpty:
1700 *state = VM_PURGABLE_EMPTY;
1701 break;
1702 default:
1703 err = kIOReturnBadArgument;
1704 break;
1705 }
1706 return (err);
1707}
1708
1709static IOReturn
1710purgeableStateBits(int * state)
1711{
1712 IOReturn err = kIOReturnSuccess;
1713
1714 switch (*state)
1715 {
1716 case VM_PURGABLE_NONVOLATILE:
1717 *state = kIOMemoryPurgeableNonVolatile;
1718 break;
1719 case VM_PURGABLE_VOLATILE:
1720 *state = kIOMemoryPurgeableVolatile;
1721 break;
1722 case VM_PURGABLE_EMPTY:
1723 *state = kIOMemoryPurgeableEmpty;
1724 break;
1725 default:
1726 *state = kIOMemoryPurgeableNonVolatile;
1727 err = kIOReturnNotReady;
1728 break;
1729 }
1730 return (err);
1731}
1732
1733IOReturn
1734IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState,
1735 IOOptionBits * oldState )
1736{
1737 IOReturn err = kIOReturnSuccess;
1738 vm_purgable_t control;
1739 int state;
1740
1741 if (_memEntry)
1742 {
1743 err = super::setPurgeable(newState, oldState);
1744 }
1745 else
1746 {
1747 if (kIOMemoryThreadSafe & _flags)
1748 LOCK;
1749 do
1750 {
1751 // Find the appropriate vm_map for the given task
1752 vm_map_t curMap;
1753 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
1754 {
1755 err = kIOReturnNotReady;
1756 break;
1757 }
1758 else
1759 curMap = get_task_map(_task);
1760
1761 // can only do one range
1762 Ranges vec = _ranges;
1763 IOOptionBits type = _flags & kIOMemoryTypeMask;
1764 user_addr_t addr;
1765 IOByteCount len;
1766 getAddrLenForInd(addr, len, type, vec, 0);
1767
1768 err = purgeableControlBits(newState, &control, &state);
1769 if (kIOReturnSuccess != err)
1770 break;
1771 err = mach_vm_purgable_control(curMap, addr, control, &state);
1772 if (oldState)
1773 {
1774 if (kIOReturnSuccess == err)
1775 {
1776 err = purgeableStateBits(&state);
1777 *oldState = state;
1778 }
1779 }
1780 }
1781 while (false);
1782 if (kIOMemoryThreadSafe & _flags)
1783 UNLOCK;
1784 }
1785 return (err);
1786}
1787
91447636
A
1788IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
1789 IOOptionBits * oldState )
1790{
1791 IOReturn err = kIOReturnSuccess;
1792 vm_purgable_t control;
1793 int state;
1794
b0d623f7
A
1795 if (kIOMemoryThreadSafe & _flags)
1796 LOCK;
1797
91447636
A
1798 do
1799 {
1800 if (!_memEntry)
1801 {
1802 err = kIOReturnNotReady;
1803 break;
1804 }
b0d623f7
A
1805 err = purgeableControlBits(newState, &control, &state);
1806 if (kIOReturnSuccess != err)
1807 break;
91447636 1808 err = mach_memory_entry_purgable_control((ipc_port_t) _memEntry, control, &state);
b0d623f7
A
1809 if (oldState)
1810 {
1811 if (kIOReturnSuccess == err)
1812 {
1813 err = purgeableStateBits(&state);
1814 *oldState = state;
1815 }
1816 }
91447636
A
1817 }
1818 while (false);
1819
b0d623f7
A
1820 if (kIOMemoryThreadSafe & _flags)
1821 UNLOCK;
1822
91447636
A
1823 return (err);
1824}
1825
1826extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
1827extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
1828
0b4c1975
A
1829static void SetEncryptOp(addr64_t pa, unsigned int count)
1830{
1831 ppnum_t page, end;
1832
1833 page = atop_64(round_page_64(pa));
1834 end = atop_64(trunc_page_64(pa + count));
1835 for (; page < end; page++)
1836 {
1837 pmap_clear_noencrypt(page);
1838 }
1839}
1840
1841static void ClearEncryptOp(addr64_t pa, unsigned int count)
1842{
1843 ppnum_t page, end;
1844
1845 page = atop_64(round_page_64(pa));
1846 end = atop_64(trunc_page_64(pa + count));
1847 for (; page < end; page++)
1848 {
1849 pmap_set_noencrypt(page);
1850 }
1851}
1852
91447636
A
1853IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options,
1854 IOByteCount offset, IOByteCount length )
1855{
1856 IOByteCount remaining;
316670eb 1857 unsigned int res;
91447636
A
1858 void (*func)(addr64_t pa, unsigned int count) = 0;
1859
1860 switch (options)
1861 {
1862 case kIOMemoryIncoherentIOFlush:
1863 func = &dcache_incoherent_io_flush64;
1864 break;
1865 case kIOMemoryIncoherentIOStore:
1866 func = &dcache_incoherent_io_store64;
1867 break;
0b4c1975
A
1868
1869 case kIOMemorySetEncrypted:
1870 func = &SetEncryptOp;
1871 break;
1872 case kIOMemoryClearEncrypted:
1873 func = &ClearEncryptOp;
1874 break;
91447636
A
1875 }
1876
1877 if (!func)
1878 return (kIOReturnUnsupported);
1879
b0d623f7
A
1880 if (kIOMemoryThreadSafe & _flags)
1881 LOCK;
1882
316670eb 1883 res = 0x0UL;
91447636
A
1884 remaining = length = min(length, getLength() - offset);
1885 while (remaining)
1886 // (process another target segment?)
1887 {
1888 addr64_t dstAddr64;
1889 IOByteCount dstLen;
1890
b0d623f7 1891 dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone);
91447636
A
1892 if (!dstAddr64)
1893 break;
1894
1895 // Clip segment length to remaining
1896 if (dstLen > remaining)
1897 dstLen = remaining;
1898
1899 (*func)(dstAddr64, dstLen);
1900
1901 offset += dstLen;
1902 remaining -= dstLen;
1903 }
1904
b0d623f7
A
1905 if (kIOMemoryThreadSafe & _flags)
1906 UNLOCK;
1907
91447636
A
1908 return (remaining ? kIOReturnUnderrun : kIOReturnSuccess);
1909}
1910
316670eb 1911#if defined(__i386__) || defined(__x86_64__)
55e303ae
A
1912extern vm_offset_t first_avail;
1913#define io_kernel_static_end first_avail
316670eb
A
1914#else
1915#error io_kernel_static_end is undefined for this architecture
1916#endif
55e303ae
A
1917
1918static kern_return_t
1919io_get_kernel_static_upl(
91447636 1920 vm_map_t /* map */,
b0d623f7 1921 uintptr_t offset,
55e303ae
A
1922 vm_size_t *upl_size,
1923 upl_t *upl,
1924 upl_page_info_array_t page_list,
0c530ab8
A
1925 unsigned int *count,
1926 ppnum_t *highest_page)
1c79356b 1927{
55e303ae
A
1928 unsigned int pageCount, page;
1929 ppnum_t phys;
0c530ab8 1930 ppnum_t highestPage = 0;
1c79356b 1931
55e303ae
A
1932 pageCount = atop_32(*upl_size);
1933 if (pageCount > *count)
1934 pageCount = *count;
1c79356b 1935
55e303ae 1936 *upl = NULL;
1c79356b 1937
55e303ae
A
1938 for (page = 0; page < pageCount; page++)
1939 {
1940 phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
1941 if (!phys)
1942 break;
1943 page_list[page].phys_addr = phys;
1944 page_list[page].pageout = 0;
1945 page_list[page].absent = 0;
1946 page_list[page].dirty = 0;
1947 page_list[page].precious = 0;
1948 page_list[page].device = 0;
0c530ab8 1949 if (phys > highestPage)
b0d623f7 1950 highestPage = phys;
55e303ae 1951 }
0b4e3aa0 1952
0c530ab8
A
1953 *highest_page = highestPage;
1954
55e303ae
A
1955 return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError);
1956}
0b4e3aa0 1957
55e303ae
A
1958IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
1959{
91447636 1960 IOOptionBits type = _flags & kIOMemoryTypeMask;
2d21ac55 1961 IOReturn error = kIOReturnCannotWire;
55e303ae
A
1962 ioGMDData *dataP;
1963 ppnum_t mapBase = 0;
1964 IOMapper *mapper;
1965 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
1c79356b 1966
55e303ae 1967 assert(!_wireCount);
0c530ab8 1968 assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
1c79356b 1969
7ddcb079 1970 if (_pages > gIOMaximumMappedIOPageCount)
55e303ae 1971 return kIOReturnNoResources;
0b4e3aa0 1972
55e303ae
A
1973 dataP = getDataP(_memoryEntries);
1974 mapper = dataP->fMapper;
1975 if (mapper && _pages)
1976 mapBase = mapper->iovmAlloc(_pages);
d7e50217 1977
55e303ae
A
1978 // Note that appendBytes(NULL) zeros the data up to the
1979 // desired length.
1980 _memoryEntries->appendBytes(0, dataP->fPageCnt * sizeof(upl_page_info_t));
1981 dataP = 0; // May no longer be valid so lets not get tempted.
de355530 1982
55e303ae 1983 if (forDirection == kIODirectionNone)
b0d623f7 1984 forDirection = getDirection();
55e303ae
A
1985
1986 int uplFlags; // This Mem Desc's default flags for upl creation
0c530ab8 1987 switch (kIODirectionOutIn & forDirection)
55e303ae
A
1988 {
1989 case kIODirectionOut:
1990 // Pages do not need to be marked as dirty on commit
1991 uplFlags = UPL_COPYOUT_FROM;
1992 _flags |= kIOMemoryPreparedReadOnly;
1993 break;
1994
1995 case kIODirectionIn:
1996 default:
1997 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
1998 break;
1999 }
2000 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
2001
0c530ab8
A
2002#ifdef UPL_NEED_32BIT_ADDR
2003 if (kIODirectionPrepareToPhys32 & forDirection)
2004 uplFlags |= UPL_NEED_32BIT_ADDR;
2005#endif
2006
91447636 2007 // Find the appropriate vm_map for the given task
55e303ae
A
2008 vm_map_t curMap;
2009 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
2010 curMap = 0;
2011 else
2012 { curMap = get_task_map(_task); }
2013
91447636
A
2014 // Iterate over the vector of virtual ranges
2015 Ranges vec = _ranges;
2016 unsigned int pageIndex = 0;
2017 IOByteCount mdOffset = 0;
0c530ab8 2018 ppnum_t highestPage = 0;
55e303ae
A
2019 for (UInt range = 0; range < _rangesCount; range++) {
2020 ioPLBlock iopl;
91447636 2021 user_addr_t startPage;
55e303ae 2022 IOByteCount numBytes;
0c530ab8 2023 ppnum_t highPage = 0;
55e303ae 2024
91447636
A
2025 // Get the startPage address and length of vec[range]
2026 getAddrLenForInd(startPage, numBytes, type, vec, range);
b0d623f7 2027 iopl.fPageOffset = startPage & PAGE_MASK;
91447636
A
2028 numBytes += iopl.fPageOffset;
2029 startPage = trunc_page_64(startPage);
2030
55e303ae
A
2031 if (mapper)
2032 iopl.fMappedBase = mapBase + pageIndex;
2033 else
2034 iopl.fMappedBase = 0;
55e303ae 2035
91447636 2036 // Iterate over the current range, creating UPLs
55e303ae
A
2037 while (numBytes) {
2038 dataP = getDataP(_memoryEntries);
91447636
A
2039 vm_address_t kernelStart = (vm_address_t) startPage;
2040 vm_map_t theMap;
2041 if (curMap)
2042 theMap = curMap;
2043 else if (!sharedMem) {
2044 assert(_task == kernel_task);
2045 theMap = IOPageableMapForAddress(kernelStart);
2046 }
2047 else
2048 theMap = NULL;
2049
55e303ae
A
2050 upl_page_info_array_t pageInfo = getPageList(dataP);
2051 int ioplFlags = uplFlags;
2052 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
2053
b0d623f7 2054 vm_size_t ioplSize = round_page(numBytes);
55e303ae
A
2055 unsigned int numPageInfo = atop_32(ioplSize);
2056
91447636 2057 if (theMap == kernel_map && kernelStart < io_kernel_static_end) {
55e303ae 2058 error = io_get_kernel_static_upl(theMap,
91447636
A
2059 kernelStart,
2060 &ioplSize,
2061 &iopl.fIOPL,
2062 baseInfo,
0c530ab8
A
2063 &numPageInfo,
2064 &highPage);
91447636
A
2065 }
2066 else if (sharedMem) {
55e303ae 2067 error = memory_object_iopl_request(sharedMem,
91447636
A
2068 ptoa_32(pageIndex),
2069 &ioplSize,
2070 &iopl.fIOPL,
2071 baseInfo,
2072 &numPageInfo,
2073 &ioplFlags);
2074 }
2075 else {
2076 assert(theMap);
2077 error = vm_map_create_upl(theMap,
2078 startPage,
b0d623f7 2079 (upl_size_t*)&ioplSize,
91447636
A
2080 &iopl.fIOPL,
2081 baseInfo,
2082 &numPageInfo,
2083 &ioplFlags);
de355530
A
2084 }
2085
55e303ae
A
2086 assert(ioplSize);
2087 if (error != KERN_SUCCESS)
2088 goto abortExit;
2089
0c530ab8
A
2090 if (iopl.fIOPL)
2091 highPage = upl_get_highest_page(iopl.fIOPL);
2092 if (highPage > highestPage)
2093 highestPage = highPage;
2094
2d21ac55 2095 error = kIOReturnCannotWire;
55e303ae
A
2096
2097 if (baseInfo->device) {
2098 numPageInfo = 1;
2099 iopl.fFlags = kIOPLOnDevice;
2100 // Don't translate device memory at all
2101 if (mapper && mapBase) {
2102 mapper->iovmFree(mapBase, _pages);
2103 mapBase = 0;
2104 iopl.fMappedBase = 0;
2105 }
2106 }
2107 else {
2108 iopl.fFlags = 0;
0c530ab8 2109 if (mapper)
55e303ae
A
2110 mapper->iovmInsert(mapBase, pageIndex,
2111 baseInfo, numPageInfo);
2112 }
2113
2114 iopl.fIOMDOffset = mdOffset;
2115 iopl.fPageInfo = pageIndex;
2116
6d2010ae
A
2117#if 0
2118 // used to remove the upl for auto prepares here, for some errant code
2119 // that freed memory before the descriptor pointing at it
55e303ae
A
2120 if ((_flags & kIOMemoryAutoPrepare) && iopl.fIOPL)
2121 {
91447636
A
2122 upl_commit(iopl.fIOPL, 0, 0);
2123 upl_deallocate(iopl.fIOPL);
55e303ae 2124 iopl.fIOPL = 0;
de355530 2125 }
6d2010ae 2126#endif
55e303ae
A
2127
2128 if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
2129 // Clean up partial created and unsaved iopl
91447636
A
2130 if (iopl.fIOPL) {
2131 upl_abort(iopl.fIOPL, 0);
2132 upl_deallocate(iopl.fIOPL);
2133 }
55e303ae
A
2134 goto abortExit;
2135 }
2136
2137 // Check for a multiple iopl's in one virtual range
2138 pageIndex += numPageInfo;
2139 mdOffset -= iopl.fPageOffset;
2140 if (ioplSize < numBytes) {
2141 numBytes -= ioplSize;
2142 startPage += ioplSize;
2143 mdOffset += ioplSize;
2144 iopl.fPageOffset = 0;
2145 if (mapper)
2146 iopl.fMappedBase = mapBase + pageIndex;
2147 }
2148 else {
2149 mdOffset += numBytes;
2150 break;
2151 }
1c79356b
A
2152 }
2153 }
55e303ae 2154
0c530ab8
A
2155 _highestPage = highestPage;
2156
1c79356b
A
2157 return kIOReturnSuccess;
2158
2159abortExit:
55e303ae
A
2160 {
2161 dataP = getDataP(_memoryEntries);
91447636 2162 UInt done = getNumIOPL(_memoryEntries, dataP);
55e303ae
A
2163 ioPLBlock *ioplList = getIOPLList(dataP);
2164
2165 for (UInt range = 0; range < done; range++)
2166 {
91447636
A
2167 if (ioplList[range].fIOPL) {
2168 upl_abort(ioplList[range].fIOPL, 0);
2169 upl_deallocate(ioplList[range].fIOPL);
2170 }
55e303ae 2171 }
6d2010ae 2172 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
1c79356b 2173
55e303ae
A
2174 if (mapper && mapBase)
2175 mapper->iovmFree(mapBase, _pages);
1c79356b
A
2176 }
2177
2d21ac55
A
2178 if (error == KERN_FAILURE)
2179 error = kIOReturnCannotWire;
2180
55e303ae
A
2181 return error;
2182}
d7e50217 2183
55e303ae
A
2184/*
2185 * prepare
2186 *
2187 * Prepare the memory for an I/O transfer. This involves paging in
2188 * the memory, if necessary, and wiring it down for the duration of
2189 * the transfer. The complete() method completes the processing of
2190 * the memory after the I/O transfer finishes. This method needn't
2191 * called for non-pageable memory.
2192 */
2193IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
2194{
91447636
A
2195 IOReturn error = kIOReturnSuccess;
2196 IOOptionBits type = _flags & kIOMemoryTypeMask;
55e303ae 2197
2d21ac55
A
2198 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
2199 return kIOReturnSuccess;
2200
2201 if (_prepareLock)
2202 IOLockLock(_prepareLock);
2203
91447636 2204 if (!_wireCount
0c530ab8 2205 && (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) ) {
55e303ae 2206 error = wireVirtual(forDirection);
de355530
A
2207 }
2208
2d21ac55
A
2209 if (kIOReturnSuccess == error)
2210 _wireCount++;
55e303ae 2211
0b4c1975
A
2212 if (1 == _wireCount)
2213 {
2214 if (kIOMemoryClearEncrypt & _flags)
2215 {
2216 performOperation(kIOMemoryClearEncrypted, 0, _length);
2217 }
2218 }
2219
2d21ac55
A
2220 if (_prepareLock)
2221 IOLockUnlock(_prepareLock);
2222
2223 return error;
1c79356b
A
2224}
2225
2226/*
2227 * complete
2228 *
2229 * Complete processing of the memory after an I/O transfer finishes.
2230 * This method should not be called unless a prepare was previously
2231 * issued; the prepare() and complete() must occur in pairs, before
2232 * before and after an I/O transfer involving pageable memory.
2233 */
6d2010ae 2234
55e303ae 2235IOReturn IOGeneralMemoryDescriptor::complete(IODirection /* forDirection */)
1c79356b 2236{
2d21ac55 2237 IOOptionBits type = _flags & kIOMemoryTypeMask;
1c79356b 2238
2d21ac55
A
2239 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
2240 return kIOReturnSuccess;
1c79356b 2241
2d21ac55
A
2242 if (_prepareLock)
2243 IOLockLock(_prepareLock);
91447636 2244
2d21ac55
A
2245 assert(_wireCount);
2246
2247 if (_wireCount)
2248 {
0b4c1975
A
2249 if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount))
2250 {
2251 performOperation(kIOMemorySetEncrypted, 0, _length);
2252 }
2253
2d21ac55
A
2254 _wireCount--;
2255 if (!_wireCount)
2256 {
2257 IOOptionBits type = _flags & kIOMemoryTypeMask;
2258 ioGMDData * dataP = getDataP(_memoryEntries);
2259 ioPLBlock *ioplList = getIOPLList(dataP);
91447636 2260 UInt count = getNumIOPL(_memoryEntries, dataP);
55e303ae 2261
b0d623f7
A
2262#if IOMD_DEBUG_DMAACTIVE
2263 if (__iomd_reservedA) panic("complete() while dma active");
2264#endif /* IOMD_DEBUG_DMAACTIVE */
2265
2d21ac55
A
2266 if (dataP->fMapper && _pages && ioplList[0].fMappedBase)
2267 dataP->fMapper->iovmFree(ioplList[0].fMappedBase, _pages);
55e303ae 2268
2d21ac55
A
2269 // Only complete iopls that we created which are for TypeVirtual
2270 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
2271 for (UInt ind = 0; ind < count; ind++)
91447636
A
2272 if (ioplList[ind].fIOPL) {
2273 upl_commit(ioplList[ind].fIOPL, 0, 0);
2274 upl_deallocate(ioplList[ind].fIOPL);
2275 }
6d2010ae
A
2276 } else if (kIOMemoryTypeUPL == type) {
2277 upl_set_referenced(ioplList[0].fIOPL, false);
2d21ac55 2278 }
6d2010ae
A
2279
2280 (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength()
b0d623f7
A
2281
2282 dataP->fPreparationID = kIOPreparationIDUnprepared;
2d21ac55 2283 }
1c79356b 2284 }
2d21ac55
A
2285
2286 if (_prepareLock)
2287 IOLockUnlock(_prepareLock);
2288
1c79356b
A
2289 return kIOReturnSuccess;
2290}
2291
2292IOReturn IOGeneralMemoryDescriptor::doMap(
2d21ac55
A
2293 vm_map_t __addressMap,
2294 IOVirtualAddress * __address,
1c79356b 2295 IOOptionBits options,
2d21ac55
A
2296 IOByteCount __offset,
2297 IOByteCount __length )
2298
1c79356b 2299{
b0d623f7 2300#ifndef __LP64__
2d21ac55 2301 if (!(kIOMap64Bit & options)) panic("IOGeneralMemoryDescriptor::doMap !64bit");
b0d623f7 2302#endif /* !__LP64__ */
2d21ac55 2303
b0d623f7 2304 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
2d21ac55
A
2305 mach_vm_size_t offset = mapping->fOffset + __offset;
2306 mach_vm_size_t length = mapping->fLength;
2307
b0d623f7 2308 kern_return_t kr = kIOReturnVMError;
0b4e3aa0 2309 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
1c79356b 2310
91447636
A
2311 IOOptionBits type = _flags & kIOMemoryTypeMask;
2312 Ranges vec = _ranges;
2313
2314 user_addr_t range0Addr = 0;
2315 IOByteCount range0Len = 0;
2316
060df5ea
A
2317 if ((offset >= _length) || ((offset + length) > _length))
2318 return( kIOReturnBadArgument );
2319
91447636
A
2320 if (vec.v)
2321 getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
2322
1c79356b 2323 // mapping source == dest? (could be much better)
91447636 2324 if( _task
2d21ac55
A
2325 && (mapping->fAddressMap == get_task_map(_task)) && (options & kIOMapAnywhere)
2326 && (1 == _rangesCount) && (0 == offset)
2327 && range0Addr && (length <= range0Len) )
2328 {
2329 mapping->fAddress = range0Addr;
2330 mapping->fOptions |= kIOMapStatic;
2331
2332 return( kIOReturnSuccess );
1c79356b
A
2333 }
2334
0b4e3aa0 2335 if( 0 == sharedMem) {
1c79356b 2336
91447636 2337 vm_size_t size = ptoa_32(_pages);
1c79356b 2338
0b4e3aa0 2339 if( _task) {
0c530ab8 2340
91447636 2341 memory_object_size_t actualSize = size;
2d21ac55
A
2342 vm_prot_t prot = VM_PROT_READ;
2343 if (!(kIOMapReadOnly & options))
2344 prot |= VM_PROT_WRITE;
2345 else if (kIOMapDefaultCache != (options & kIOMapCacheMask))
2346 prot |= VM_PROT_WRITE;
2347
060df5ea
A
2348 if (_rangesCount == 1)
2349 {
2350 kr = mach_make_memory_entry_64(get_task_map(_task),
2351 &actualSize, range0Addr,
2352 prot, &sharedMem,
2353 NULL);
2354 }
2355 if( (_rangesCount != 1)
2356 || ((KERN_SUCCESS == kr) && (actualSize != round_page(size))))
2357 do
b0d623f7 2358 {
0b4e3aa0 2359#if IOASSERT
060df5ea
A
2360 IOLog("mach_vm_remap path for ranges %d size (%08llx:%08llx)\n",
2361 _rangesCount, (UInt64)actualSize, (UInt64)size);
0b4e3aa0
A
2362#endif
2363 kr = kIOReturnVMError;
060df5ea
A
2364 if (sharedMem)
2365 {
2366 ipc_port_release_send(sharedMem);
2367 sharedMem = MACH_PORT_NULL;
2368 }
b0d623f7 2369
060df5ea
A
2370 mach_vm_address_t address, segDestAddr;
2371 mach_vm_size_t mapLength;
2372 unsigned rangesIndex;
2373 IOOptionBits type = _flags & kIOMemoryTypeMask;
2374 user_addr_t srcAddr;
2375 IOPhysicalLength segLen = 0;
2376
2377 // Find starting address within the vector of ranges
2378 for (rangesIndex = 0; rangesIndex < _rangesCount; rangesIndex++) {
2379 getAddrLenForInd(srcAddr, segLen, type, _ranges, rangesIndex);
2380 if (offset < segLen)
2381 break;
2382 offset -= segLen; // (make offset relative)
2383 }
2384
2385 mach_vm_size_t pageOffset = (srcAddr & PAGE_MASK);
b0d623f7 2386 address = trunc_page_64(mapping->fAddress);
060df5ea 2387
b0d623f7
A
2388 if ((options & kIOMapAnywhere) || ((mapping->fAddress - address) == pageOffset))
2389 {
060df5ea
A
2390 vm_map_t map = mapping->fAddressMap;
2391 kr = IOMemoryDescriptorMapCopy(&map,
b0d623f7
A
2392 options,
2393 offset, &address, round_page_64(length + pageOffset));
060df5ea
A
2394 if (kr == KERN_SUCCESS)
2395 {
2396 segDestAddr = address;
2397 segLen -= offset;
316670eb 2398 srcAddr += offset;
060df5ea
A
2399 mapLength = length;
2400
2401 while (true)
2402 {
2403 vm_prot_t cur_prot, max_prot;
316670eb
A
2404
2405 if (segLen > length) segLen = length;
060df5ea
A
2406 kr = mach_vm_remap(map, &segDestAddr, round_page_64(segLen), PAGE_MASK,
2407 VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE,
2408 get_task_map(_task), trunc_page_64(srcAddr),
2409 FALSE /* copy */,
2410 &cur_prot,
2411 &max_prot,
2412 VM_INHERIT_NONE);
2413 if (KERN_SUCCESS == kr)
2414 {
2415 if ((!(VM_PROT_READ & cur_prot))
2416 || (!(kIOMapReadOnly & options) && !(VM_PROT_WRITE & cur_prot)))
2417 {
2418 kr = KERN_PROTECTION_FAILURE;
2419 }
2420 }
2421 if (KERN_SUCCESS != kr)
2422 break;
2423 segDestAddr += segLen;
2424 mapLength -= segLen;
2425 if (!mapLength)
2426 break;
2427 rangesIndex++;
2428 if (rangesIndex >= _rangesCount)
2429 {
2430 kr = kIOReturnBadArgument;
2431 break;
2432 }
2433 getAddrLenForInd(srcAddr, segLen, type, vec, rangesIndex);
2434 if (srcAddr & PAGE_MASK)
2435 {
2436 kr = kIOReturnBadArgument;
2437 break;
2438 }
2439 if (segLen > mapLength)
2440 segLen = mapLength;
2441 }
2442 if (KERN_SUCCESS != kr)
2443 {
2444 mach_vm_deallocate(mapping->fAddressMap, address, round_page_64(length + pageOffset));
2445 }
2446 }
2447
2448 if (KERN_SUCCESS == kr)
b0d623f7
A
2449 mapping->fAddress = address + pageOffset;
2450 else
2451 mapping->fAddress = NULL;
2452 }
2453 }
060df5ea 2454 while (false);
b0d623f7
A
2455 }
2456 else do
2457 { // _task == 0, must be physical
0b4e3aa0 2458
55e303ae
A
2459 memory_object_t pager;
2460 unsigned int flags = 0;
2461 addr64_t pa;
9bccf70c
A
2462 IOPhysicalLength segLen;
2463
b0d623f7 2464 pa = getPhysicalSegment( offset, &segLen, kIOMemoryMapperNone );
0b4e3aa0 2465
316670eb
A
2466 if( !getKernelReserved())
2467 continue;
2468 reserved->dp.pagerContig = (1 == _rangesCount);
2469 reserved->dp.memory = this;
9bccf70c 2470
55e303ae
A
2471 /*What cache mode do we need*/
2472 switch(options & kIOMapCacheMask ) {
9bccf70c
A
2473
2474 case kIOMapDefaultCache:
2475 default:
55e303ae 2476 flags = IODefaultCacheBits(pa);
2d21ac55
A
2477 if (DEVICE_PAGER_CACHE_INHIB & flags)
2478 {
2479 if (DEVICE_PAGER_GUARDED & flags)
2480 mapping->fOptions |= kIOMapInhibitCache;
2481 else
2482 mapping->fOptions |= kIOMapWriteCombineCache;
2483 }
2484 else if (DEVICE_PAGER_WRITE_THROUGH & flags)
2485 mapping->fOptions |= kIOMapWriteThruCache;
2486 else
2487 mapping->fOptions |= kIOMapCopybackCache;
55e303ae 2488 break;
9bccf70c
A
2489
2490 case kIOMapInhibitCache:
55e303ae
A
2491 flags = DEVICE_PAGER_CACHE_INHIB |
2492 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
2493 break;
9bccf70c
A
2494
2495 case kIOMapWriteThruCache:
55e303ae
A
2496 flags = DEVICE_PAGER_WRITE_THROUGH |
2497 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
2498 break;
9bccf70c
A
2499
2500 case kIOMapCopybackCache:
55e303ae
A
2501 flags = DEVICE_PAGER_COHERENT;
2502 break;
2503
2504 case kIOMapWriteCombineCache:
2505 flags = DEVICE_PAGER_CACHE_INHIB |
2506 DEVICE_PAGER_COHERENT;
2507 break;
9bccf70c
A
2508 }
2509
316670eb 2510 flags |= reserved->dp.pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0;
9bccf70c 2511
b0d623f7 2512 pager = device_pager_setup( (memory_object_t) 0, (uintptr_t) reserved,
9bccf70c 2513 size, flags);
0b4e3aa0
A
2514 assert( pager );
2515
2516 if( pager) {
0b4e3aa0
A
2517 kr = mach_memory_object_memory_entry_64( (host_t) 1, false /*internal*/,
2518 size, VM_PROT_READ | VM_PROT_WRITE, pager, &sharedMem );
2519
2520 assert( KERN_SUCCESS == kr );
2d21ac55
A
2521 if( KERN_SUCCESS != kr)
2522 {
9bccf70c 2523 device_pager_deallocate( pager );
0b4e3aa0
A
2524 pager = MACH_PORT_NULL;
2525 sharedMem = MACH_PORT_NULL;
2526 }
2527 }
9bccf70c 2528 if( pager && sharedMem)
316670eb 2529 reserved->dp.devicePager = pager;
1c79356b 2530
1c79356b
A
2531 } while( false );
2532
0b4e3aa0
A
2533 _memEntry = (void *) sharedMem;
2534 }
2535
2d21ac55
A
2536 IOReturn result;
2537 if (0 == sharedMem)
b0d623f7 2538 result = kr;
9bccf70c 2539 else
2d21ac55
A
2540 result = super::doMap( __addressMap, __address,
2541 options, __offset, __length );
0b4e3aa0 2542
2d21ac55 2543 return( result );
1c79356b
A
2544}
2545
2546IOReturn IOGeneralMemoryDescriptor::doUnmap(
2547 vm_map_t addressMap,
2d21ac55
A
2548 IOVirtualAddress __address,
2549 IOByteCount __length )
1c79356b 2550{
2d21ac55 2551 return (super::doUnmap(addressMap, __address, __length));
1c79356b
A
2552}
2553
2554/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2555
b0d623f7
A
2556#undef super
2557#define super OSObject
1c79356b 2558
b0d623f7 2559OSDefineMetaClassAndStructors( IOMemoryMap, OSObject )
1c79356b 2560
b0d623f7
A
2561OSMetaClassDefineReservedUnused(IOMemoryMap, 0);
2562OSMetaClassDefineReservedUnused(IOMemoryMap, 1);
2563OSMetaClassDefineReservedUnused(IOMemoryMap, 2);
2564OSMetaClassDefineReservedUnused(IOMemoryMap, 3);
2565OSMetaClassDefineReservedUnused(IOMemoryMap, 4);
2566OSMetaClassDefineReservedUnused(IOMemoryMap, 5);
2567OSMetaClassDefineReservedUnused(IOMemoryMap, 6);
2568OSMetaClassDefineReservedUnused(IOMemoryMap, 7);
1c79356b 2569
b0d623f7
A
2570/* ex-inline function implementation */
2571IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
2572 { return( getPhysicalSegment( 0, 0 )); }
1c79356b
A
2573
2574/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2575
b0d623f7 2576bool IOMemoryMap::init(
2d21ac55
A
2577 task_t intoTask,
2578 mach_vm_address_t toAddress,
2579 IOOptionBits _options,
2580 mach_vm_size_t _offset,
2581 mach_vm_size_t _length )
1c79356b 2582{
2d21ac55 2583 if (!intoTask)
1c79356b
A
2584 return( false);
2585
2d21ac55
A
2586 if (!super::init())
2587 return(false);
1c79356b 2588
2d21ac55
A
2589 fAddressMap = get_task_map(intoTask);
2590 if (!fAddressMap)
2591 return(false);
2592 vm_map_reference(fAddressMap);
1c79356b 2593
2d21ac55
A
2594 fAddressTask = intoTask;
2595 fOptions = _options;
2596 fLength = _length;
2597 fOffset = _offset;
2598 fAddress = toAddress;
1c79356b 2599
2d21ac55 2600 return (true);
1c79356b
A
2601}
2602
b0d623f7 2603bool IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
1c79356b 2604{
2d21ac55
A
2605 if (!_memory)
2606 return(false);
1c79356b 2607
2d21ac55 2608 if (!fSuperMap)
91447636 2609 {
2d21ac55 2610 if( (_offset + fLength) > _memory->getLength())
91447636 2611 return( false);
2d21ac55 2612 fOffset = _offset;
91447636 2613 }
1c79356b
A
2614
2615 _memory->retain();
2d21ac55 2616 if (fMemory)
91447636 2617 {
2d21ac55
A
2618 if (fMemory != _memory)
2619 fMemory->removeMapping(this);
2620 fMemory->release();
1c79356b 2621 }
2d21ac55 2622 fMemory = _memory;
91447636 2623
2d21ac55 2624 return( true );
1c79356b
A
2625}
2626
0b4e3aa0
A
2627struct IOMemoryDescriptorMapAllocRef
2628{
2629 ipc_port_t sharedMem;
060df5ea 2630 vm_map_t map;
2d21ac55
A
2631 mach_vm_address_t mapped;
2632 mach_vm_size_t size;
2633 mach_vm_size_t sourceOffset;
0b4e3aa0
A
2634 IOOptionBits options;
2635};
2636
2637static kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
2638{
2639 IOMemoryDescriptorMapAllocRef * ref = (IOMemoryDescriptorMapAllocRef *)_ref;
2640 IOReturn err;
2641
2642 do {
2d21ac55
A
2643 if( ref->sharedMem)
2644 {
0b4e3aa0
A
2645 vm_prot_t prot = VM_PROT_READ
2646 | ((ref->options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE);
55e303ae 2647
2d21ac55
A
2648 // VM system requires write access to change cache mode
2649 if (kIOMapDefaultCache != (ref->options & kIOMapCacheMask))
2650 prot |= VM_PROT_WRITE;
2651
55e303ae
A
2652 // set memory entry cache
2653 vm_prot_t memEntryCacheMode = prot | MAP_MEM_ONLY;
2654 switch (ref->options & kIOMapCacheMask)
2655 {
2656 case kIOMapInhibitCache:
2657 SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
2658 break;
2659
2660 case kIOMapWriteThruCache:
2661 SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
2662 break;
2663
2664 case kIOMapWriteCombineCache:
2665 SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
2666 break;
2667
2668 case kIOMapCopybackCache:
2669 SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
2670 break;
2671
316670eb
A
2672 case kIOMapCopybackInnerCache:
2673 SET_MAP_MEM(MAP_MEM_INNERWBACK, memEntryCacheMode);
2674 break;
2675
55e303ae
A
2676 case kIOMapDefaultCache:
2677 default:
2678 SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
2679 break;
2680 }
2681
2682 vm_size_t unused = 0;
2683
2684 err = mach_make_memory_entry( NULL /*unused*/, &unused, 0 /*unused*/,
2685 memEntryCacheMode, NULL, ref->sharedMem );
2686 if (KERN_SUCCESS != err)
2687 IOLog("MAP_MEM_ONLY failed %d\n", err);
2688
2d21ac55 2689 err = mach_vm_map( map,
0b4e3aa0
A
2690 &ref->mapped,
2691 ref->size, 0 /* mask */,
2692 (( ref->options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
2693 | VM_MAKE_TAG(VM_MEMORY_IOKIT),
2694 ref->sharedMem, ref->sourceOffset,
2695 false, // copy
2696 prot, // cur
2697 prot, // max
2698 VM_INHERIT_NONE);
55e303ae 2699
0b4e3aa0
A
2700 if( KERN_SUCCESS != err) {
2701 ref->mapped = 0;
2702 continue;
2703 }
060df5ea 2704 ref->map = map;
2d21ac55
A
2705 }
2706 else
2707 {
060df5ea 2708 err = mach_vm_allocate(map, &ref->mapped, ref->size,
0b4e3aa0
A
2709 ((ref->options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
2710 | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
0b4e3aa0
A
2711 if( KERN_SUCCESS != err) {
2712 ref->mapped = 0;
2713 continue;
2714 }
060df5ea 2715 ref->map = map;
0b4e3aa0 2716 // we have to make sure that these guys don't get copied if we fork.
060df5ea 2717 err = vm_inherit(map, ref->mapped, ref->size, VM_INHERIT_NONE);
0b4e3aa0
A
2718 assert( KERN_SUCCESS == err );
2719 }
2d21ac55
A
2720 }
2721 while( false );
0b4e3aa0
A
2722
2723 return( err );
2724}
2725
2d21ac55 2726kern_return_t
060df5ea 2727IOMemoryDescriptorMapMemEntry(vm_map_t * map, ipc_port_t entry, IOOptionBits options, bool pageable,
2d21ac55
A
2728 mach_vm_size_t offset,
2729 mach_vm_address_t * address, mach_vm_size_t length)
2730{
2731 IOReturn err;
2732 IOMemoryDescriptorMapAllocRef ref;
2733
060df5ea 2734 ref.map = *map;
b0d623f7 2735 ref.sharedMem = entry;
cf7d32b8 2736 ref.sourceOffset = trunc_page_64(offset);
b0d623f7
A
2737 ref.options = options;
2738 ref.size = length;
2d21ac55
A
2739
2740 if (options & kIOMapAnywhere)
2741 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
2742 ref.mapped = 0;
2743 else
2744 ref.mapped = *address;
2745
060df5ea 2746 if( ref.sharedMem && (ref.map == kernel_map) && pageable)
2d21ac55
A
2747 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
2748 else
060df5ea 2749 err = IOMemoryDescriptorMapAlloc( ref.map, &ref );
2d21ac55
A
2750
2751 *address = ref.mapped;
060df5ea
A
2752 *map = ref.map;
2753
2d21ac55
A
2754 return (err);
2755}
2756
b0d623f7 2757kern_return_t
060df5ea 2758IOMemoryDescriptorMapCopy(vm_map_t * map,
b0d623f7
A
2759 IOOptionBits options,
2760 mach_vm_size_t offset,
2761 mach_vm_address_t * address, mach_vm_size_t length)
2762{
2763 IOReturn err;
2764 IOMemoryDescriptorMapAllocRef ref;
2765
060df5ea 2766 ref.map = *map;
b0d623f7 2767 ref.sharedMem = NULL;
b0d623f7
A
2768 ref.sourceOffset = trunc_page_64(offset);
2769 ref.options = options;
2770 ref.size = length;
2771
2772 if (options & kIOMapAnywhere)
2773 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
2774 ref.mapped = 0;
2775 else
2776 ref.mapped = *address;
2777
060df5ea 2778 if (ref.map == kernel_map)
b0d623f7
A
2779 err = IOIteratePageableMaps(ref.size, &IOMemoryDescriptorMapAlloc, &ref);
2780 else
060df5ea 2781 err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
b0d623f7
A
2782
2783 *address = ref.mapped;
060df5ea
A
2784 *map = ref.map;
2785
b0d623f7
A
2786 return (err);
2787}
9bccf70c 2788
1c79356b 2789IOReturn IOMemoryDescriptor::doMap(
2d21ac55
A
2790 vm_map_t __addressMap,
2791 IOVirtualAddress * __address,
1c79356b 2792 IOOptionBits options,
2d21ac55
A
2793 IOByteCount __offset,
2794 IOByteCount __length )
1c79356b 2795{
b0d623f7 2796#ifndef __LP64__
2d21ac55 2797 if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::doMap !64bit");
b0d623f7 2798#endif /* !__LP64__ */
1c79356b 2799
b0d623f7 2800 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
2d21ac55
A
2801 mach_vm_size_t offset = mapping->fOffset + __offset;
2802 mach_vm_size_t length = mapping->fLength;
1c79356b 2803
2d21ac55
A
2804 IOReturn err = kIOReturnSuccess;
2805 memory_object_t pager;
2806 mach_vm_size_t pageOffset;
2807 IOPhysicalAddress sourceAddr;
b0d623f7 2808 unsigned int lock_count;
1c79356b 2809
2d21ac55
A
2810 do
2811 {
b0d623f7
A
2812 sourceAddr = getPhysicalSegment( offset, NULL, _kIOMemorySourceSegment );
2813 pageOffset = sourceAddr - trunc_page( sourceAddr );
1c79356b 2814
2d21ac55 2815 if( reserved)
316670eb 2816 pager = (memory_object_t) reserved->dp.devicePager;
2d21ac55
A
2817 else
2818 pager = MACH_PORT_NULL;
0b4e3aa0 2819
91447636
A
2820 if ((kIOMapReference|kIOMapUnique) == ((kIOMapReference|kIOMapUnique) & options))
2821 {
2d21ac55
A
2822 upl_t redirUPL2;
2823 vm_size_t size;
2824 int flags;
0b4e3aa0 2825
91447636
A
2826 if (!_memEntry)
2827 {
2828 err = kIOReturnNotReadable;
2829 continue;
2830 }
2831
b0d623f7 2832 size = round_page(mapping->fLength + pageOffset);
91447636
A
2833 flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
2834 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
2835
2836 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) _memEntry, 0, &size, &redirUPL2,
2837 NULL, NULL,
2838 &flags))
2839 redirUPL2 = NULL;
2840
b0d623f7
A
2841 for (lock_count = 0;
2842 IORecursiveLockHaveLock(gIOMemoryLock);
2843 lock_count++) {
2844 UNLOCK;
2845 }
2d21ac55 2846 err = upl_transpose(redirUPL2, mapping->fRedirUPL);
b0d623f7
A
2847 for (;
2848 lock_count;
2849 lock_count--) {
2850 LOCK;
2851 }
2852
91447636
A
2853 if (kIOReturnSuccess != err)
2854 {
2855 IOLog("upl_transpose(%x)\n", err);
2856 err = kIOReturnSuccess;
2857 }
2858
2859 if (redirUPL2)
2860 {
2861 upl_commit(redirUPL2, NULL, 0);
2862 upl_deallocate(redirUPL2);
2863 redirUPL2 = 0;
2864 }
2865 {
2866 // swap the memEntries since they now refer to different vm_objects
2867 void * me = _memEntry;
2d21ac55
A
2868 _memEntry = mapping->fMemory->_memEntry;
2869 mapping->fMemory->_memEntry = me;
91447636 2870 }
2d21ac55 2871 if (pager)
316670eb 2872 err = handleFault( pager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
91447636
A
2873 }
2874 else
2875 {
2d21ac55
A
2876 mach_vm_address_t address;
2877
2878 if (!(options & kIOMapAnywhere))
2879 {
2880 address = trunc_page_64(mapping->fAddress);
2881 if( (mapping->fAddress - address) != pageOffset)
2882 {
91447636
A
2883 err = kIOReturnVMError;
2884 continue;
2885 }
2886 }
0b4e3aa0 2887
060df5ea
A
2888 vm_map_t map = mapping->fAddressMap;
2889 err = IOMemoryDescriptorMapMemEntry(&map, (ipc_port_t) _memEntry,
2d21ac55
A
2890 options, (kIOMemoryBufferPageable & _flags),
2891 offset, &address, round_page_64(length + pageOffset));
2892 if( err != KERN_SUCCESS)
2893 continue;
0b4e3aa0 2894
2d21ac55
A
2895 if (!_memEntry || pager)
2896 {
2897 err = handleFault( pager, mapping->fAddressMap, address, offset, length, options );
2898 if (err != KERN_SUCCESS)
2899 doUnmap( mapping->fAddressMap, (IOVirtualAddress) mapping, 0 );
2900 }
0b4e3aa0 2901
b0d623f7 2902#if DEBUG
2d21ac55 2903 if (kIOLogMapping & gIOKitDebug)
316670eb
A
2904 IOLog("mapping(%x) desc %p @ %qx, map %p, address %qx, offset %qx, length %qx\n",
2905 err, this, (uint64_t)sourceAddr, mapping, address, offset, length);
2d21ac55 2906#endif
0b4e3aa0 2907
2d21ac55
A
2908 if (err == KERN_SUCCESS)
2909 mapping->fAddress = address + pageOffset;
2910 else
2911 mapping->fAddress = NULL;
2912 }
2913 }
2914 while( false );
0b4e3aa0 2915
2d21ac55 2916 return (err);
0b4e3aa0
A
2917}
2918
0b4e3aa0
A
2919IOReturn IOMemoryDescriptor::handleFault(
2920 void * _pager,
2921 vm_map_t addressMap,
2d21ac55
A
2922 mach_vm_address_t address,
2923 mach_vm_size_t sourceOffset,
2924 mach_vm_size_t length,
0b4e3aa0
A
2925 IOOptionBits options )
2926{
2927 IOReturn err = kIOReturnSuccess;
2928 memory_object_t pager = (memory_object_t) _pager;
2d21ac55
A
2929 mach_vm_size_t size;
2930 mach_vm_size_t bytes;
2931 mach_vm_size_t page;
2932 mach_vm_size_t pageOffset;
2933 mach_vm_size_t pagerOffset;
0b4e3aa0 2934 IOPhysicalLength segLen;
55e303ae 2935 addr64_t physAddr;
0b4e3aa0 2936
2d21ac55
A
2937 if( !addressMap)
2938 {
2939 if( kIOMemoryRedirected & _flags)
2940 {
b0d623f7 2941#if DEBUG
2d21ac55 2942 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
1c79356b 2943#endif
0b4e3aa0 2944 do {
9bccf70c 2945 SLEEP;
0b4e3aa0
A
2946 } while( kIOMemoryRedirected & _flags );
2947 }
1c79356b 2948
0b4e3aa0 2949 return( kIOReturnSuccess );
1c79356b
A
2950 }
2951
b0d623f7 2952 physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone );
0b4e3aa0 2953 assert( physAddr );
55e303ae
A
2954 pageOffset = physAddr - trunc_page_64( physAddr );
2955 pagerOffset = sourceOffset;
0b4e3aa0
A
2956
2957 size = length + pageOffset;
2958 physAddr -= pageOffset;
1c79356b
A
2959
2960 segLen += pageOffset;
0b4e3aa0 2961 bytes = size;
2d21ac55
A
2962 do
2963 {
1c79356b
A
2964 // in the middle of the loop only map whole pages
2965 if( segLen >= bytes)
2966 segLen = bytes;
b0d623f7 2967 else if( segLen != trunc_page( segLen))
1c79356b 2968 err = kIOReturnVMError;
55e303ae 2969 if( physAddr != trunc_page_64( physAddr))
1c79356b 2970 err = kIOReturnBadArgument;
8f6c56a5
A
2971 if (kIOReturnSuccess != err)
2972 break;
1c79356b 2973
b0d623f7 2974#if DEBUG
1c79356b 2975 if( kIOLogMapping & gIOKitDebug)
b0d623f7 2976 IOLog("IOMemoryMap::map(%p) 0x%qx->0x%qx:0x%qx\n",
0b4e3aa0 2977 addressMap, address + pageOffset, physAddr + pageOffset,
1c79356b
A
2978 segLen - pageOffset);
2979#endif
2980
2d21ac55 2981
0b4e3aa0 2982 if( pager) {
316670eb 2983 if( reserved && reserved->dp.pagerContig) {
0b4e3aa0 2984 IOPhysicalLength allLen;
55e303ae 2985 addr64_t allPhys;
0b4e3aa0 2986
b0d623f7 2987 allPhys = getPhysicalSegment( 0, &allLen, kIOMemoryMapperNone );
0b4e3aa0 2988 assert( allPhys );
b0d623f7 2989 err = device_pager_populate_object( pager, 0, atop_64(allPhys), round_page(allLen) );
2d21ac55
A
2990 }
2991 else
2992 {
0b4e3aa0 2993
2d21ac55 2994 for( page = 0;
0b4e3aa0 2995 (page < segLen) && (KERN_SUCCESS == err);
2d21ac55
A
2996 page += page_size)
2997 {
2998 err = device_pager_populate_object(pager, pagerOffset,
2999 (ppnum_t)(atop_64(physAddr + page)), page_size);
3000 pagerOffset += page_size;
0b4e3aa0
A
3001 }
3002 }
3003 assert( KERN_SUCCESS == err );
3004 if( err)
3005 break;
3006 }
0c530ab8 3007
2d21ac55
A
3008 // This call to vm_fault causes an early pmap level resolution
3009 // of the mappings created above for kernel mappings, since
3010 // faulting in later can't take place from interrupt level.
9bccf70c
A
3011 /* *** ALERT *** */
3012 /* *** Temporary Workaround *** */
3013
2d21ac55
A
3014 if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags))
3015 {
91447636
A
3016 vm_fault(addressMap,
3017 (vm_map_offset_t)address,
3018 VM_PROT_READ|VM_PROT_WRITE,
3019 FALSE, THREAD_UNINT, NULL,
3020 (vm_map_offset_t)0);
9bccf70c
A
3021 }
3022
3023 /* *** Temporary Workaround *** */
3024 /* *** ALERT *** */
0c530ab8 3025
1c79356b 3026 sourceOffset += segLen - pageOffset;
0b4e3aa0 3027 address += segLen;
1c79356b
A
3028 bytes -= segLen;
3029 pageOffset = 0;
3030
2d21ac55 3031 }
b0d623f7 3032 while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone )));
1c79356b 3033
2d21ac55 3034 if (bytes)
1c79356b 3035 err = kIOReturnBadArgument;
1c79356b 3036
2d21ac55 3037 return (err);
1c79356b
A
3038}
3039
3040IOReturn IOMemoryDescriptor::doUnmap(
3041 vm_map_t addressMap,
2d21ac55
A
3042 IOVirtualAddress __address,
3043 IOByteCount __length )
1c79356b 3044{
2d21ac55
A
3045 IOReturn err;
3046 mach_vm_address_t address;
3047 mach_vm_size_t length;
3048
3049 if (__length)
3050 {
3051 address = __address;
3052 length = __length;
3053 }
3054 else
3055 {
b0d623f7
A
3056 addressMap = ((IOMemoryMap *) __address)->fAddressMap;
3057 address = ((IOMemoryMap *) __address)->fAddress;
3058 length = ((IOMemoryMap *) __address)->fLength;
2d21ac55
A
3059 }
3060
7e4a7d39
A
3061 if ((addressMap == kernel_map)
3062 && ((kIOMemoryBufferPageable & _flags) || !_memEntry))
2d21ac55 3063 addressMap = IOPageableMapForAddress( address );
1c79356b 3064
b0d623f7 3065#if DEBUG
1c79356b 3066 if( kIOLogMapping & gIOKitDebug)
2d21ac55
A
3067 IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
3068 addressMap, address, length );
1c79356b
A
3069#endif
3070
2d21ac55 3071 err = mach_vm_deallocate( addressMap, address, length );
1c79356b 3072
2d21ac55 3073 return (err);
1c79356b
A
3074}
3075
91447636 3076IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
e3027f41 3077{
91447636 3078 IOReturn err = kIOReturnSuccess;
b0d623f7 3079 IOMemoryMap * mapping = 0;
e3027f41
A
3080 OSIterator * iter;
3081
3082 LOCK;
3083
91447636
A
3084 if( doRedirect)
3085 _flags |= kIOMemoryRedirected;
3086 else
3087 _flags &= ~kIOMemoryRedirected;
3088
e3027f41
A
3089 do {
3090 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
b0d623f7 3091 while( (mapping = (IOMemoryMap *) iter->getNextObject()))
91447636 3092 mapping->redirect( safeTask, doRedirect );
e3027f41 3093
91447636
A
3094 iter->release();
3095 }
e3027f41
A
3096 } while( false );
3097
91447636
A
3098 if (!doRedirect)
3099 {
9bccf70c 3100 WAKEUP;
0b4e3aa0
A
3101 }
3102
e3027f41
A
3103 UNLOCK;
3104
b0d623f7 3105#ifndef __LP64__
e3027f41
A
3106 // temporary binary compatibility
3107 IOSubMemoryDescriptor * subMem;
3108 if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
91447636 3109 err = subMem->redirect( safeTask, doRedirect );
e3027f41 3110 else
91447636 3111 err = kIOReturnSuccess;
b0d623f7 3112#endif /* !__LP64__ */
e3027f41
A
3113
3114 return( err );
3115}
3116
b0d623f7 3117IOReturn IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
e3027f41
A
3118{
3119 IOReturn err = kIOReturnSuccess;
3120
2d21ac55 3121 if( fSuperMap) {
b0d623f7 3122// err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
e3027f41
A
3123 } else {
3124
3125 LOCK;
0c530ab8
A
3126
3127 do
91447636 3128 {
2d21ac55 3129 if (!fAddress)
0c530ab8 3130 break;
2d21ac55 3131 if (!fAddressMap)
0c530ab8
A
3132 break;
3133
2d21ac55
A
3134 if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
3135 && (0 == (fOptions & kIOMapStatic)))
0c530ab8 3136 {
2d21ac55 3137 IOUnmapPages( fAddressMap, fAddress, fLength );
b0d623f7
A
3138 err = kIOReturnSuccess;
3139#if DEBUG
2d21ac55 3140 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
e3027f41 3141#endif
0c530ab8 3142 }
2d21ac55 3143 else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask))
0c530ab8
A
3144 {
3145 IOOptionBits newMode;
2d21ac55
A
3146 newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
3147 IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
0c530ab8
A
3148 }
3149 }
3150 while (false);
0c530ab8 3151 UNLOCK;
e3027f41
A
3152 }
3153
2d21ac55
A
3154 if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3155 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
91447636 3156 && safeTask
2d21ac55
A
3157 && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected))))
3158 fMemory->redirect(safeTask, doRedirect);
91447636 3159
e3027f41
A
3160 return( err );
3161}
3162
b0d623f7 3163IOReturn IOMemoryMap::unmap( void )
1c79356b
A
3164{
3165 IOReturn err;
3166
3167 LOCK;
3168
2d21ac55
A
3169 if( fAddress && fAddressMap && (0 == fSuperMap) && fMemory
3170 && (0 == (fOptions & kIOMapStatic))) {
1c79356b 3171
2d21ac55 3172 err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
1c79356b
A
3173
3174 } else
3175 err = kIOReturnSuccess;
3176
2d21ac55
A
3177 if (fAddressMap)
3178 {
3179 vm_map_deallocate(fAddressMap);
3180 fAddressMap = 0;
3181 }
3182
3183 fAddress = 0;
1c79356b
A
3184
3185 UNLOCK;
3186
3187 return( err );
3188}
3189
b0d623f7 3190void IOMemoryMap::taskDied( void )
1c79356b
A
3191{
3192 LOCK;
b0d623f7
A
3193 if (fUserClientUnmap)
3194 unmap();
2d21ac55
A
3195 if( fAddressMap) {
3196 vm_map_deallocate(fAddressMap);
3197 fAddressMap = 0;
1c79356b 3198 }
2d21ac55
A
3199 fAddressTask = 0;
3200 fAddress = 0;
1c79356b
A
3201 UNLOCK;
3202}
3203
b0d623f7
A
3204IOReturn IOMemoryMap::userClientUnmap( void )
3205{
3206 fUserClientUnmap = true;
3207 return (kIOReturnSuccess);
3208}
3209
9bccf70c
A
3210// Overload the release mechanism. All mappings must be a member
3211// of a memory descriptors _mappings set. This means that we
3212// always have 2 references on a mapping. When either of these mappings
3213// are released we need to free ourselves.
b0d623f7 3214void IOMemoryMap::taggedRelease(const void *tag) const
9bccf70c 3215{
55e303ae 3216 LOCK;
9bccf70c 3217 super::taggedRelease(tag, 2);
55e303ae 3218 UNLOCK;
9bccf70c
A
3219}
3220
b0d623f7 3221void IOMemoryMap::free()
1c79356b
A
3222{
3223 unmap();
3224
2d21ac55
A
3225 if (fMemory)
3226 {
1c79356b 3227 LOCK;
2d21ac55 3228 fMemory->removeMapping(this);
1c79356b 3229 UNLOCK;
2d21ac55 3230 fMemory->release();
1c79356b
A
3231 }
3232
2d21ac55 3233 if (fOwner && (fOwner != fMemory))
91447636
A
3234 {
3235 LOCK;
2d21ac55 3236 fOwner->removeMapping(this);
91447636
A
3237 UNLOCK;
3238 }
3239
2d21ac55
A
3240 if (fSuperMap)
3241 fSuperMap->release();
1c79356b 3242
2d21ac55
A
3243 if (fRedirUPL) {
3244 upl_commit(fRedirUPL, NULL, 0);
3245 upl_deallocate(fRedirUPL);
91447636
A
3246 }
3247
1c79356b
A
3248 super::free();
3249}
3250
b0d623f7 3251IOByteCount IOMemoryMap::getLength()
1c79356b 3252{
2d21ac55 3253 return( fLength );
1c79356b
A
3254}
3255
b0d623f7 3256IOVirtualAddress IOMemoryMap::getVirtualAddress()
1c79356b 3257{
b0d623f7 3258#ifndef __LP64__
2d21ac55
A
3259 if (fSuperMap)
3260 fSuperMap->getVirtualAddress();
b0d623f7
A
3261 else if (fAddressMap
3262 && vm_map_is_64bit(fAddressMap)
3263 && (sizeof(IOVirtualAddress) < 8))
2d21ac55
A
3264 {
3265 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
3266 }
b0d623f7 3267#endif /* !__LP64__ */
2d21ac55
A
3268
3269 return (fAddress);
3270}
3271
b0d623f7
A
3272#ifndef __LP64__
3273mach_vm_address_t IOMemoryMap::getAddress()
2d21ac55
A
3274{
3275 return( fAddress);
3276}
3277
b0d623f7 3278mach_vm_size_t IOMemoryMap::getSize()
2d21ac55
A
3279{
3280 return( fLength );
1c79356b 3281}
b0d623f7 3282#endif /* !__LP64__ */
1c79356b 3283
2d21ac55 3284
b0d623f7 3285task_t IOMemoryMap::getAddressTask()
1c79356b 3286{
2d21ac55
A
3287 if( fSuperMap)
3288 return( fSuperMap->getAddressTask());
1c79356b 3289 else
2d21ac55 3290 return( fAddressTask);
1c79356b
A
3291}
3292
b0d623f7 3293IOOptionBits IOMemoryMap::getMapOptions()
1c79356b 3294{
2d21ac55 3295 return( fOptions);
1c79356b
A
3296}
3297
b0d623f7 3298IOMemoryDescriptor * IOMemoryMap::getMemoryDescriptor()
1c79356b 3299{
2d21ac55 3300 return( fMemory );
1c79356b
A
3301}
3302
b0d623f7
A
3303IOMemoryMap * IOMemoryMap::copyCompatible(
3304 IOMemoryMap * newMapping )
1c79356b 3305{
2d21ac55
A
3306 task_t task = newMapping->getAddressTask();
3307 mach_vm_address_t toAddress = newMapping->fAddress;
3308 IOOptionBits _options = newMapping->fOptions;
3309 mach_vm_size_t _offset = newMapping->fOffset;
3310 mach_vm_size_t _length = newMapping->fLength;
1c79356b 3311
2d21ac55 3312 if( (!task) || (!fAddressMap) || (fAddressMap != get_task_map(task)))
1c79356b 3313 return( 0 );
2d21ac55 3314 if( (fOptions ^ _options) & kIOMapReadOnly)
9bccf70c
A
3315 return( 0 );
3316 if( (kIOMapDefaultCache != (_options & kIOMapCacheMask))
2d21ac55 3317 && ((fOptions ^ _options) & kIOMapCacheMask))
1c79356b
A
3318 return( 0 );
3319
2d21ac55 3320 if( (0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress))
1c79356b
A
3321 return( 0 );
3322
2d21ac55 3323 if( _offset < fOffset)
1c79356b
A
3324 return( 0 );
3325
2d21ac55 3326 _offset -= fOffset;
1c79356b 3327
2d21ac55 3328 if( (_offset + _length) > fLength)
1c79356b
A
3329 return( 0 );
3330
2d21ac55
A
3331 retain();
3332 if( (fLength == _length) && (!_offset))
3333 {
2d21ac55
A
3334 newMapping = this;
3335 }
3336 else
3337 {
3338 newMapping->fSuperMap = this;
6d2010ae 3339 newMapping->fOffset = fOffset + _offset;
2d21ac55 3340 newMapping->fAddress = fAddress + _offset;
1c79356b
A
3341 }
3342
2d21ac55 3343 return( newMapping );
1c79356b
A
3344}
3345
0c530ab8 3346IOPhysicalAddress
b0d623f7
A
3347#ifdef __LP64__
3348IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options)
3349#else /* !__LP64__ */
3350IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
3351#endif /* !__LP64__ */
1c79356b
A
3352{
3353 IOPhysicalAddress address;
3354
3355 LOCK;
b0d623f7
A
3356#ifdef __LP64__
3357 address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options );
3358#else /* !__LP64__ */
2d21ac55 3359 address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
b0d623f7 3360#endif /* !__LP64__ */
1c79356b
A
3361 UNLOCK;
3362
3363 return( address );
3364}
3365
3366/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3367
3368#undef super
3369#define super OSObject
3370
3371/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3372
3373void IOMemoryDescriptor::initialize( void )
3374{
3375 if( 0 == gIOMemoryLock)
3376 gIOMemoryLock = IORecursiveLockAlloc();
55e303ae
A
3377
3378 IORegistryEntry::getRegistryRoot()->setProperty(kIOMaximumMappedIOByteCountKey,
3379 ptoa_64(gIOMaximumMappedIOPageCount), 64);
0c530ab8 3380 gIOLastPage = IOGetLastPageNumber();
1c79356b
A
3381}
3382
3383void IOMemoryDescriptor::free( void )
3384{
3385 if( _mappings)
3386 _mappings->release();
3387
3388 super::free();
3389}
3390
3391IOMemoryMap * IOMemoryDescriptor::setMapping(
3392 task_t intoTask,
3393 IOVirtualAddress mapAddress,
55e303ae 3394 IOOptionBits options )
1c79356b 3395{
2d21ac55
A
3396 return (createMappingInTask( intoTask, mapAddress,
3397 options | kIOMapStatic,
3398 0, getLength() ));
1c79356b
A
3399}
3400
3401IOMemoryMap * IOMemoryDescriptor::map(
55e303ae 3402 IOOptionBits options )
1c79356b 3403{
2d21ac55
A
3404 return (createMappingInTask( kernel_task, 0,
3405 options | kIOMapAnywhere,
3406 0, getLength() ));
1c79356b
A
3407}
3408
b0d623f7 3409#ifndef __LP64__
2d21ac55
A
3410IOMemoryMap * IOMemoryDescriptor::map(
3411 task_t intoTask,
3412 IOVirtualAddress atAddress,
1c79356b 3413 IOOptionBits options,
55e303ae
A
3414 IOByteCount offset,
3415 IOByteCount length )
1c79356b 3416{
2d21ac55
A
3417 if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask)))
3418 {
3419 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
3420 return (0);
3421 }
3422
3423 return (createMappingInTask(intoTask, atAddress,
3424 options, offset, length));
3425}
b0d623f7 3426#endif /* !__LP64__ */
2d21ac55
A
3427
3428IOMemoryMap * IOMemoryDescriptor::createMappingInTask(
3429 task_t intoTask,
3430 mach_vm_address_t atAddress,
3431 IOOptionBits options,
3432 mach_vm_size_t offset,
3433 mach_vm_size_t length)
3434{
b0d623f7
A
3435 IOMemoryMap * result;
3436 IOMemoryMap * mapping;
2d21ac55
A
3437
3438 if (0 == length)
1c79356b
A
3439 length = getLength();
3440
b0d623f7 3441 mapping = new IOMemoryMap;
2d21ac55
A
3442
3443 if( mapping
3444 && !mapping->init( intoTask, atAddress,
3445 options, offset, length )) {
3446 mapping->release();
3447 mapping = 0;
3448 }
3449
3450 if (mapping)
3451 result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
3452 else
3453 result = 0;
3454
b0d623f7 3455#if DEBUG
2d21ac55 3456 if (!result)
316670eb
A
3457 IOLog("createMappingInTask failed desc %p, addr %qx, options %x, offset %qx, length %llx\n",
3458 this, atAddress, (uint32_t) options, offset, length);
2d21ac55
A
3459#endif
3460
3461 return (result);
1c79356b
A
3462}
3463
b0d623f7
A
3464#ifndef __LP64__ // there is only a 64 bit version for LP64
3465IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
91447636
A
3466 IOOptionBits options,
3467 IOByteCount offset)
2d21ac55
A
3468{
3469 return (redirect(newBackingMemory, options, (mach_vm_size_t)offset));
3470}
b0d623f7 3471#endif
2d21ac55 3472
b0d623f7 3473IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
2d21ac55
A
3474 IOOptionBits options,
3475 mach_vm_size_t offset)
91447636
A
3476{
3477 IOReturn err = kIOReturnSuccess;
3478 IOMemoryDescriptor * physMem = 0;
3479
3480 LOCK;
3481
2d21ac55 3482 if (fAddress && fAddressMap) do
91447636 3483 {
2d21ac55
A
3484 if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3485 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
91447636 3486 {
2d21ac55 3487 physMem = fMemory;
91447636
A
3488 physMem->retain();
3489 }
3490
2d21ac55 3491 if (!fRedirUPL)
91447636 3492 {
b0d623f7 3493 vm_size_t size = round_page(fLength);
91447636
A
3494 int flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
3495 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
2d21ac55 3496 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) fMemory->_memEntry, 0, &size, &fRedirUPL,
91447636
A
3497 NULL, NULL,
3498 &flags))
2d21ac55 3499 fRedirUPL = 0;
91447636
A
3500
3501 if (physMem)
3502 {
2d21ac55 3503 IOUnmapPages( fAddressMap, fAddress, fLength );
b0d623f7
A
3504 if (false)
3505 physMem->redirect(0, true);
91447636
A
3506 }
3507 }
3508
3509 if (newBackingMemory)
3510 {
2d21ac55 3511 if (newBackingMemory != fMemory)
91447636 3512 {
2d21ac55
A
3513 fOffset = 0;
3514 if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
3515 options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
3516 offset, fLength))
91447636
A
3517 err = kIOReturnError;
3518 }
2d21ac55 3519 if (fRedirUPL)
91447636 3520 {
2d21ac55
A
3521 upl_commit(fRedirUPL, NULL, 0);
3522 upl_deallocate(fRedirUPL);
3523 fRedirUPL = 0;
91447636 3524 }
b0d623f7 3525 if (false && physMem)
91447636
A
3526 physMem->redirect(0, false);
3527 }
3528 }
3529 while (false);
3530
3531 UNLOCK;
3532
3533 if (physMem)
3534 physMem->release();
3535
3536 return (err);
3537}
3538
1c79356b
A
3539IOMemoryMap * IOMemoryDescriptor::makeMapping(
3540 IOMemoryDescriptor * owner,
2d21ac55
A
3541 task_t __intoTask,
3542 IOVirtualAddress __address,
1c79356b 3543 IOOptionBits options,
2d21ac55
A
3544 IOByteCount __offset,
3545 IOByteCount __length )
1c79356b 3546{
b0d623f7 3547#ifndef __LP64__
2d21ac55 3548 if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::makeMapping !64bit");
b0d623f7 3549#endif /* !__LP64__ */
2d21ac55 3550
91447636 3551 IOMemoryDescriptor * mapDesc = 0;
b0d623f7 3552 IOMemoryMap * result = 0;
2d21ac55
A
3553 OSIterator * iter;
3554
b0d623f7 3555 IOMemoryMap * mapping = (IOMemoryMap *) __address;
2d21ac55
A
3556 mach_vm_size_t offset = mapping->fOffset + __offset;
3557 mach_vm_size_t length = mapping->fLength;
3558
3559 mapping->fOffset = offset;
1c79356b
A
3560
3561 LOCK;
3562
91447636
A
3563 do
3564 {
2d21ac55
A
3565 if (kIOMapStatic & options)
3566 {
3567 result = mapping;
3568 addMapping(mapping);
3569 mapping->setMemoryDescriptor(this, 0);
3570 continue;
3571 }
3572
91447636
A
3573 if (kIOMapUnique & options)
3574 {
060df5ea 3575 addr64_t phys;
91447636 3576 IOByteCount physLen;
1c79356b 3577
2d21ac55 3578// if (owner != this) continue;
1c79356b 3579
0c530ab8
A
3580 if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3581 || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
91447636 3582 {
b0d623f7 3583 phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone);
91447636
A
3584 if (!phys || (physLen < length))
3585 continue;
3586
b0d623f7
A
3587 mapDesc = IOMemoryDescriptor::withAddressRange(
3588 phys, length, getDirection() | kIOMemoryMapperNone, NULL);
91447636
A
3589 if (!mapDesc)
3590 continue;
3591 offset = 0;
2d21ac55 3592 mapping->fOffset = offset;
91447636
A
3593 }
3594 }
3595 else
3596 {
2d21ac55
A
3597 // look for a compatible existing mapping
3598 if( (iter = OSCollectionIterator::withCollection(_mappings)))
3599 {
b0d623f7
A
3600 IOMemoryMap * lookMapping;
3601 while ((lookMapping = (IOMemoryMap *) iter->getNextObject()))
2d21ac55
A
3602 {
3603 if ((result = lookMapping->copyCompatible(mapping)))
3604 {
3605 addMapping(result);
3606 result->setMemoryDescriptor(this, offset);
91447636 3607 break;
2d21ac55 3608 }
91447636
A
3609 }
3610 iter->release();
3611 }
2d21ac55 3612 if (result || (options & kIOMapReference))
6d2010ae
A
3613 {
3614 if (result != mapping)
3615 {
3616 mapping->release();
3617 mapping = NULL;
3618 }
91447636 3619 continue;
6d2010ae 3620 }
2d21ac55 3621 }
91447636 3622
2d21ac55
A
3623 if (!mapDesc)
3624 {
3625 mapDesc = this;
91447636
A
3626 mapDesc->retain();
3627 }
2d21ac55
A
3628 IOReturn
3629 kr = mapDesc->doMap( 0, (IOVirtualAddress *) &mapping, options, 0, 0 );
3630 if (kIOReturnSuccess == kr)
3631 {
3632 result = mapping;
3633 mapDesc->addMapping(result);
3634 result->setMemoryDescriptor(mapDesc, offset);
3635 }
3636 else
3637 {
1c79356b 3638 mapping->release();
2d21ac55 3639 mapping = NULL;
1c79356b 3640 }
91447636 3641 }
2d21ac55 3642 while( false );
1c79356b
A
3643
3644 UNLOCK;
3645
91447636
A
3646 if (mapDesc)
3647 mapDesc->release();
3648
2d21ac55 3649 return (result);
1c79356b
A
3650}
3651
3652void IOMemoryDescriptor::addMapping(
3653 IOMemoryMap * mapping )
3654{
2d21ac55
A
3655 if( mapping)
3656 {
1c79356b
A
3657 if( 0 == _mappings)
3658 _mappings = OSSet::withCapacity(1);
9bccf70c
A
3659 if( _mappings )
3660 _mappings->setObject( mapping );
1c79356b
A
3661 }
3662}
3663
3664void IOMemoryDescriptor::removeMapping(
3665 IOMemoryMap * mapping )
3666{
9bccf70c 3667 if( _mappings)
1c79356b 3668 _mappings->removeObject( mapping);
1c79356b
A
3669}
3670
b0d623f7
A
3671#ifndef __LP64__
3672// obsolete initializers
3673// - initWithOptions is the designated initializer
1c79356b 3674bool
b0d623f7 3675IOMemoryDescriptor::initWithAddress(void * address,
55e303ae
A
3676 IOByteCount length,
3677 IODirection direction)
1c79356b
A
3678{
3679 return( false );
3680}
3681
3682bool
b0d623f7 3683IOMemoryDescriptor::initWithAddress(IOVirtualAddress address,
55e303ae
A
3684 IOByteCount length,
3685 IODirection direction,
3686 task_t task)
1c79356b
A
3687{
3688 return( false );
3689}
3690
3691bool
b0d623f7 3692IOMemoryDescriptor::initWithPhysicalAddress(
1c79356b 3693 IOPhysicalAddress address,
55e303ae
A
3694 IOByteCount length,
3695 IODirection direction )
1c79356b
A
3696{
3697 return( false );
3698}
3699
3700bool
b0d623f7 3701IOMemoryDescriptor::initWithRanges(
1c79356b
A
3702 IOVirtualRange * ranges,
3703 UInt32 withCount,
55e303ae
A
3704 IODirection direction,
3705 task_t task,
3706 bool asReference)
1c79356b
A
3707{
3708 return( false );
3709}
3710
3711bool
b0d623f7 3712IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
1c79356b 3713 UInt32 withCount,
55e303ae
A
3714 IODirection direction,
3715 bool asReference)
1c79356b
A
3716{
3717 return( false );
3718}
3719
b0d623f7
A
3720void * IOMemoryDescriptor::getVirtualSegment(IOByteCount offset,
3721 IOByteCount * lengthOfSegment)
3722{
3723 return( 0 );
3724}
3725#endif /* !__LP64__ */
3726
1c79356b
A
3727/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3728
9bccf70c
A
3729bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
3730{
3731 OSSymbol const *keys[2];
3732 OSObject *values[2];
91447636
A
3733 struct SerData {
3734 user_addr_t address;
3735 user_size_t length;
3736 } *vcopy;
9bccf70c
A
3737 unsigned int index, nRanges;
3738 bool result;
3739
91447636
A
3740 IOOptionBits type = _flags & kIOMemoryTypeMask;
3741
9bccf70c
A
3742 if (s == NULL) return false;
3743 if (s->previouslySerialized(this)) return true;
3744
3745 // Pretend we are an array.
3746 if (!s->addXMLStartTag(this, "array")) return false;
3747
3748 nRanges = _rangesCount;
91447636 3749 vcopy = (SerData *) IOMalloc(sizeof(SerData) * nRanges);
9bccf70c
A
3750 if (vcopy == 0) return false;
3751
3752 keys[0] = OSSymbol::withCString("address");
3753 keys[1] = OSSymbol::withCString("length");
3754
3755 result = false;
3756 values[0] = values[1] = 0;
3757
3758 // From this point on we can go to bail.
3759
3760 // Copy the volatile data so we don't have to allocate memory
3761 // while the lock is held.
3762 LOCK;
3763 if (nRanges == _rangesCount) {
91447636 3764 Ranges vec = _ranges;
9bccf70c 3765 for (index = 0; index < nRanges; index++) {
91447636
A
3766 user_addr_t addr; IOByteCount len;
3767 getAddrLenForInd(addr, len, type, vec, index);
3768 vcopy[index].address = addr;
3769 vcopy[index].length = len;
9bccf70c
A
3770 }
3771 } else {
3772 // The descriptor changed out from under us. Give up.
3773 UNLOCK;
3774 result = false;
3775 goto bail;
3776 }
3777 UNLOCK;
3778
3779 for (index = 0; index < nRanges; index++)
3780 {
91447636
A
3781 user_addr_t addr = vcopy[index].address;
3782 IOByteCount len = (IOByteCount) vcopy[index].length;
3783 values[0] =
060df5ea 3784 OSNumber::withNumber(addr, sizeof(addr) * 8);
9bccf70c
A
3785 if (values[0] == 0) {
3786 result = false;
3787 goto bail;
3788 }
91447636 3789 values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
9bccf70c
A
3790 if (values[1] == 0) {
3791 result = false;
3792 goto bail;
3793 }
3794 OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
3795 if (dict == 0) {
3796 result = false;
3797 goto bail;
3798 }
3799 values[0]->release();
3800 values[1]->release();
3801 values[0] = values[1] = 0;
3802
3803 result = dict->serialize(s);
3804 dict->release();
3805 if (!result) {
3806 goto bail;
3807 }
3808 }
3809 result = s->addXMLEndTag("array");
3810
3811 bail:
3812 if (values[0])
3813 values[0]->release();
3814 if (values[1])
3815 values[1]->release();
3816 if (keys[0])
3817 keys[0]->release();
3818 if (keys[1])
3819 keys[1]->release();
3820 if (vcopy)
2d21ac55 3821 IOFree(vcopy, sizeof(SerData) * nRanges);
9bccf70c
A
3822 return result;
3823}
3824
9bccf70c
A
3825/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3826
0b4e3aa0 3827OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
b0d623f7
A
3828#ifdef __LP64__
3829OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
3830OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
3831OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
3832OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
3833OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
3834OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
3835OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
3836#else /* !__LP64__ */
55e303ae
A
3837OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
3838OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
91447636
A
3839OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3);
3840OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4);
0c530ab8 3841OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5);
b0d623f7
A
3842OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 6);
3843OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 7);
3844#endif /* !__LP64__ */
1c79356b
A
3845OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
3846OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
3847OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
3848OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
3849OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
3850OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
3851OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
3852OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
9bccf70c 3853
55e303ae 3854/* ex-inline function implementation */
0c530ab8
A
3855IOPhysicalAddress
3856IOMemoryDescriptor::getPhysicalAddress()
9bccf70c 3857 { return( getPhysicalSegment( 0, 0 )); }