]> git.saurik.com Git - apple/xnu.git/blame - iokit/Kernel/IOMemoryDescriptor.cpp
xnu-1228.3.13.tar.gz
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
30 *
31 * HISTORY
32 *
33 */
55e303ae
A
34// 45678901234567890123456789012345678901234567890123456789012345678901234567890
35#include <sys/cdefs.h>
1c79356b
A
36
37#include <IOKit/assert.h>
38#include <IOKit/system.h>
39#include <IOKit/IOLib.h>
40#include <IOKit/IOMemoryDescriptor.h>
55e303ae
A
41#include <IOKit/IOMapper.h>
42#include <IOKit/IOKitKeysPrivate.h>
1c79356b
A
43
44#include <IOKit/IOKitDebug.h>
2d21ac55 45#include <libkern/OSDebug.h>
1c79356b 46
91447636 47#include "IOKitKernelInternal.h"
0c530ab8 48#include "IOCopyMapper.h"
91447636 49
1c79356b 50#include <libkern/c++/OSContainers.h>
9bccf70c
A
51#include <libkern/c++/OSDictionary.h>
52#include <libkern/c++/OSArray.h>
53#include <libkern/c++/OSSymbol.h>
54#include <libkern/c++/OSNumber.h>
91447636
A
55
56#include <sys/uio.h>
1c79356b
A
57
58__BEGIN_DECLS
59#include <vm/pmap.h>
91447636 60#include <vm/vm_pageout.h>
55e303ae 61#include <mach/memory_object_types.h>
0b4e3aa0 62#include <device/device_port.h>
55e303ae 63
91447636 64#include <mach/vm_prot.h>
2d21ac55 65#include <mach/mach_vm.h>
91447636 66#include <vm/vm_fault.h>
2d21ac55 67#include <vm/vm_protos.h>
91447636 68
55e303ae 69extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
1c79356b 70void ipc_port_release_send(ipc_port_t port);
55e303ae
A
71
72/* Copy between a physical page and a virtual address in the given vm_map */
73kern_return_t copypv(addr64_t source, addr64_t sink, unsigned int size, int which);
0b4e3aa0
A
74
75memory_object_t
76device_pager_setup(
77 memory_object_t pager,
78 int device_handle,
79 vm_size_t size,
80 int flags);
9bccf70c
A
81void
82device_pager_deallocate(
83 memory_object_t);
0b4e3aa0
A
84kern_return_t
85device_pager_populate_object(
86 memory_object_t pager,
87 vm_object_offset_t offset,
55e303ae 88 ppnum_t phys_addr,
0b4e3aa0 89 vm_size_t size);
55e303ae
A
90kern_return_t
91memory_object_iopl_request(
92 ipc_port_t port,
93 memory_object_offset_t offset,
94 vm_size_t *upl_size,
95 upl_t *upl_ptr,
96 upl_page_info_array_t user_page_list,
97 unsigned int *page_list_count,
98 int *flags);
0b4e3aa0 99
55e303ae 100unsigned int IOTranslateCacheBits(struct phys_entry *pp);
1c79356b 101
55e303ae 102__END_DECLS
1c79356b 103
55e303ae 104#define kIOMaximumMappedIOByteCount (512*1024*1024)
1c79356b 105
0c530ab8
A
106static IOMapper * gIOSystemMapper = NULL;
107
108IOCopyMapper * gIOCopyMapper = NULL;
109
55e303ae 110static ppnum_t gIOMaximumMappedIOPageCount = atop_32(kIOMaximumMappedIOByteCount);
de355530 111
0c530ab8
A
112ppnum_t gIOLastPage;
113
55e303ae 114/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
de355530 115
55e303ae 116OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
de355530 117
55e303ae 118#define super IOMemoryDescriptor
de355530 119
55e303ae 120OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
de355530 121
1c79356b
A
122/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
123
9bccf70c
A
124static IORecursiveLock * gIOMemoryLock;
125
126#define LOCK IORecursiveLockLock( gIOMemoryLock)
127#define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
128#define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
129#define WAKEUP \
130 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
131
0c530ab8
A
132#if 0
133#define DEBG(fmt, args...) { kprintf(fmt, ## args); }
134#else
135#define DEBG(fmt, args...) {}
136#endif
137
9bccf70c
A
138/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
139
91447636
A
140class _IOMemoryMap : public IOMemoryMap
141{
142 OSDeclareDefaultStructors(_IOMemoryMap)
143public:
2d21ac55
A
144 IOMemoryDescriptor * fMemory;
145 IOMemoryMap * fSuperMap;
146 mach_vm_size_t fOffset;
147 mach_vm_address_t fAddress;
148 mach_vm_size_t fLength;
149 task_t fAddressTask;
150 vm_map_t fAddressMap;
151 IOOptionBits fOptions;
152 upl_t fRedirUPL;
153 ipc_port_t fRedirEntry;
154 IOMemoryDescriptor * fOwner;
91447636
A
155
156protected:
157 virtual void taggedRelease(const void *tag = 0) const;
158 virtual void free();
159
160public:
161
162 // IOMemoryMap methods
163 virtual IOVirtualAddress getVirtualAddress();
164 virtual IOByteCount getLength();
165 virtual task_t getAddressTask();
2d21ac55
A
166 virtual mach_vm_address_t getAddress();
167 virtual mach_vm_size_t getSize();
91447636
A
168 virtual IOMemoryDescriptor * getMemoryDescriptor();
169 virtual IOOptionBits getMapOptions();
170
171 virtual IOReturn unmap();
172 virtual void taskDied();
173
174 virtual IOReturn redirect(IOMemoryDescriptor * newBackingMemory,
175 IOOptionBits options,
176 IOByteCount offset = 0);
177
2d21ac55
A
178 virtual IOReturn redirect(IOMemoryDescriptor * newBackingMemory,
179 IOOptionBits options,
180 mach_vm_size_t offset = 0);
181
91447636
A
182 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
183 IOByteCount * length);
184
185 // for IOMemoryDescriptor use
2d21ac55
A
186 _IOMemoryMap * copyCompatible( _IOMemoryMap * newMapping );
187
188 bool init(
91447636 189 task_t intoTask,
2d21ac55 190 mach_vm_address_t toAddress,
91447636 191 IOOptionBits options,
2d21ac55
A
192 mach_vm_size_t offset,
193 mach_vm_size_t length );
194
195 bool setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset);
91447636
A
196
197 IOReturn redirect(
198 task_t intoTask, bool redirect );
199};
200
201/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
202
203// Some data structures and accessor macros used by the initWithOptions
204// Function
205
206enum ioPLBlockFlags {
207 kIOPLOnDevice = 0x00000001,
208 kIOPLExternUPL = 0x00000002,
209};
210
211struct typePersMDData
212{
213 const IOGeneralMemoryDescriptor *fMD;
214 ipc_port_t fMemEntry;
215};
216
217struct ioPLBlock {
218 upl_t fIOPL;
219 vm_address_t fIOMDOffset; // The offset of this iopl in descriptor
220 vm_offset_t fPageInfo; // Pointer to page list or index into it
221 ppnum_t fMappedBase; // Page number of first page in this iopl
222 unsigned int fPageOffset; // Offset within first page of iopl
223 unsigned int fFlags; // Flags
224};
225
226struct ioGMDData {
227 IOMapper *fMapper;
228 unsigned int fPageCnt;
229 upl_page_info_t fPageList[];
230 ioPLBlock fBlocks[];
231};
232
233#define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
234#define getIOPLList(d) ((ioPLBlock *) &(d->fPageList[d->fPageCnt]))
235#define getNumIOPL(osd, d) \
236 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
237#define getPageList(d) (&(d->fPageList[0]))
238#define computeDataSize(p, u) \
239 (sizeof(ioGMDData) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
240
241
242/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
243
55e303ae 244#define next_page(a) ( trunc_page_32(a) + PAGE_SIZE )
0b4e3aa0
A
245
246
247extern "C" {
248
249kern_return_t device_data_action(
250 int device_handle,
251 ipc_port_t device_pager,
252 vm_prot_t protection,
253 vm_object_offset_t offset,
254 vm_size_t size)
255{
9bccf70c
A
256 struct ExpansionData {
257 void * devicePager;
258 unsigned int pagerContig:1;
259 unsigned int unused:31;
260 IOMemoryDescriptor * memory;
261 };
262 kern_return_t kr;
263 ExpansionData * ref = (ExpansionData *) device_handle;
264 IOMemoryDescriptor * memDesc;
0b4e3aa0 265
9bccf70c
A
266 LOCK;
267 memDesc = ref->memory;
268 if( memDesc)
91447636
A
269 {
270 memDesc->retain();
9bccf70c
A
271 kr = memDesc->handleFault( device_pager, 0, 0,
272 offset, size, kIOMapDefaultCache /*?*/);
91447636
A
273 memDesc->release();
274 }
9bccf70c
A
275 else
276 kr = KERN_ABORTED;
277 UNLOCK;
0b4e3aa0 278
9bccf70c 279 return( kr );
0b4e3aa0
A
280}
281
282kern_return_t device_close(
283 int device_handle)
284{
9bccf70c
A
285 struct ExpansionData {
286 void * devicePager;
287 unsigned int pagerContig:1;
288 unsigned int unused:31;
289 IOMemoryDescriptor * memory;
290 };
291 ExpansionData * ref = (ExpansionData *) device_handle;
0b4e3aa0 292
9bccf70c 293 IODelete( ref, ExpansionData, 1 );
0b4e3aa0
A
294
295 return( kIOReturnSuccess );
296}
91447636 297}; // end extern "C"
0b4e3aa0 298
91447636
A
299// Note this inline function uses C++ reference arguments to return values
300// This means that pointers are not passed and NULLs don't have to be
301// checked for as a NULL reference is illegal.
302static inline void
2d21ac55 303getAddrLenForInd(user_addr_t &addr, IOPhysicalLength &len, // Output variables
91447636
A
304 UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
305{
0c530ab8
A
306 assert(kIOMemoryTypeUIO == type
307 || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
308 || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
91447636
A
309 if (kIOMemoryTypeUIO == type) {
310 user_size_t us;
311 uio_getiov((uio_t) r.uio, ind, &addr, &us); len = us;
312 }
0c530ab8
A
313 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
314 IOAddressRange cur = r.v64[ind];
315 addr = cur.address;
316 len = cur.length;
317 }
91447636
A
318 else {
319 IOVirtualRange cur = r.v[ind];
320 addr = cur.address;
321 len = cur.length;
322 }
0b4e3aa0
A
323}
324
1c79356b
A
325/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
326
327/*
328 * withAddress:
329 *
330 * Create a new IOMemoryDescriptor. The buffer is a virtual address
331 * relative to the specified task. If no task is supplied, the kernel
332 * task is implied.
333 */
334IOMemoryDescriptor *
335IOMemoryDescriptor::withAddress(void * address,
55e303ae
A
336 IOByteCount length,
337 IODirection direction)
338{
339 return IOMemoryDescriptor::
340 withAddress((vm_address_t) address, length, direction, kernel_task);
341}
342
343IOMemoryDescriptor *
344IOMemoryDescriptor::withAddress(vm_address_t address,
345 IOByteCount length,
346 IODirection direction,
347 task_t task)
1c79356b 348{
0c530ab8
A
349#if TEST_V64
350 if (task)
351 {
352 IOOptionBits options = (IOOptionBits) direction;
353 if (task == kernel_task)
354 options |= kIOMemoryAutoPrepare;
355 return (IOMemoryDescriptor::withAddressRange(address, length, options, task));
356 }
357#endif
1c79356b
A
358 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
359 if (that)
360 {
55e303ae 361 if (that->initWithAddress(address, length, direction, task))
1c79356b
A
362 return that;
363
364 that->release();
365 }
366 return 0;
367}
368
369IOMemoryDescriptor *
55e303ae
A
370IOMemoryDescriptor::withPhysicalAddress(
371 IOPhysicalAddress address,
372 IOByteCount length,
373 IODirection direction )
374{
0c530ab8
A
375#if TEST_P64
376 return (IOMemoryDescriptor::withAddressRange(address, length, (IOOptionBits) direction, NULL));
377#endif
55e303ae
A
378 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
379 if (self
380 && !self->initWithPhysicalAddress(address, length, direction)) {
381 self->release();
382 return 0;
383 }
384
385 return self;
386}
387
388IOMemoryDescriptor *
389IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
390 UInt32 withCount,
391 IODirection direction,
392 task_t task,
393 bool asReference)
1c79356b
A
394{
395 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
396 if (that)
397 {
55e303ae 398 if (that->initWithRanges(ranges, withCount, direction, task, asReference))
1c79356b
A
399 return that;
400
401 that->release();
402 }
403 return 0;
404}
405
0c530ab8
A
406IOMemoryDescriptor *
407IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
2d21ac55
A
408 mach_vm_size_t length,
409 IOOptionBits options,
410 task_t task)
0c530ab8
A
411{
412 IOAddressRange range = { address, length };
413 return (IOMemoryDescriptor::withAddressRanges(&range, 1, options, task));
414}
415
416IOMemoryDescriptor *
417IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
2d21ac55
A
418 UInt32 rangeCount,
419 IOOptionBits options,
420 task_t task)
0c530ab8
A
421{
422 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
423 if (that)
424 {
425 if (task)
426 options |= kIOMemoryTypeVirtual64;
427 else
428 options |= kIOMemoryTypePhysical64;
429
2d21ac55
A
430 if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ 0))
431 return that;
0c530ab8 432
2d21ac55 433 that->release();
0c530ab8
A
434 }
435
436 return 0;
437}
438
1c79356b
A
439
440/*
441 * withRanges:
442 *
443 * Create a new IOMemoryDescriptor. The buffer is made up of several
444 * virtual address ranges, from a given task.
445 *
446 * Passing the ranges as a reference will avoid an extra allocation.
447 */
448IOMemoryDescriptor *
55e303ae
A
449IOMemoryDescriptor::withOptions(void * buffers,
450 UInt32 count,
451 UInt32 offset,
452 task_t task,
453 IOOptionBits opts,
454 IOMapper * mapper)
1c79356b 455{
55e303ae 456 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
d7e50217 457
55e303ae
A
458 if (self
459 && !self->initWithOptions(buffers, count, offset, task, opts, mapper))
460 {
461 self->release();
462 return 0;
de355530 463 }
55e303ae
A
464
465 return self;
466}
467
468// Can't leave abstract but this should never be used directly,
469bool IOMemoryDescriptor::initWithOptions(void * buffers,
470 UInt32 count,
471 UInt32 offset,
472 task_t task,
473 IOOptionBits options,
474 IOMapper * mapper)
475{
476 // @@@ gvdl: Should I panic?
477 panic("IOMD::initWithOptions called\n");
1c79356b
A
478 return 0;
479}
480
481IOMemoryDescriptor *
482IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
483 UInt32 withCount,
55e303ae
A
484 IODirection direction,
485 bool asReference)
1c79356b
A
486{
487 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
488 if (that)
489 {
55e303ae 490 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference))
1c79356b
A
491 return that;
492
493 that->release();
494 }
495 return 0;
496}
497
498IOMemoryDescriptor *
499IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
500 IOByteCount offset,
501 IOByteCount length,
55e303ae 502 IODirection direction)
1c79356b 503{
55e303ae 504 IOSubMemoryDescriptor *self = new IOSubMemoryDescriptor;
1c79356b 505
55e303ae
A
506 if (self && !self->initSubRange(of, offset, length, direction)) {
507 self->release();
508 self = 0;
1c79356b 509 }
55e303ae 510 return self;
1c79356b
A
511}
512
0c530ab8
A
513IOMemoryDescriptor *
514IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
91447636
A
515{
516 IOGeneralMemoryDescriptor *origGenMD =
517 OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
518
519 if (origGenMD)
520 return IOGeneralMemoryDescriptor::
521 withPersistentMemoryDescriptor(origGenMD);
522 else
523 return 0;
524}
525
0c530ab8
A
526IOMemoryDescriptor *
527IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
91447636
A
528{
529 ipc_port_t sharedMem = (ipc_port_t) originalMD->createNamedEntry();
530
531 if (!sharedMem)
532 return 0;
533
534 if (sharedMem == originalMD->_memEntry) {
535 originalMD->retain(); // Add a new reference to ourselves
536 ipc_port_release_send(sharedMem); // Remove extra send right
537 return originalMD;
538 }
539
540 IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor;
541 typePersMDData initData = { originalMD, sharedMem };
542
543 if (self
544 && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) {
545 self->release();
546 self = 0;
547 }
548 return self;
549}
550
551void *IOGeneralMemoryDescriptor::createNamedEntry()
552{
553 kern_return_t error;
554 ipc_port_t sharedMem;
555
556 IOOptionBits type = _flags & kIOMemoryTypeMask;
557
558 user_addr_t range0Addr;
559 IOByteCount range0Len;
560 getAddrLenForInd(range0Addr, range0Len, type, _ranges, 0);
561 range0Addr = trunc_page_64(range0Addr);
562
563 vm_size_t size = ptoa_32(_pages);
564 vm_address_t kernelPage = (vm_address_t) range0Addr;
565
566 vm_map_t theMap = ((_task == kernel_task)
567 && (kIOMemoryBufferPageable & _flags))
568 ? IOPageableMapForAddress(kernelPage)
569 : get_task_map(_task);
570
571 memory_object_size_t actualSize = size;
2d21ac55
A
572 vm_prot_t prot = VM_PROT_READ;
573#if CONFIG_EMBEDDED
574 if (kIODirectionOut != (kIODirectionOutIn & _flags))
575#endif
576 prot |= VM_PROT_WRITE;
577
91447636
A
578 if (_memEntry)
579 prot |= MAP_MEM_NAMED_REUSE;
580
581 error = mach_make_memory_entry_64(theMap,
582 &actualSize, range0Addr, prot, &sharedMem, (ipc_port_t) _memEntry);
583
584 if (KERN_SUCCESS == error) {
585 if (actualSize == size) {
586 return sharedMem;
587 } else {
588#if IOASSERT
589 IOLog("IOGMD::mach_make_memory_entry_64 (%08llx) size (%08lx:%08x)\n",
590 (UInt64)range0Addr, (UInt32)actualSize, size);
591#endif
592 ipc_port_release_send( sharedMem );
593 }
594 }
595
596 return MACH_PORT_NULL;
597}
598
1c79356b
A
599/*
600 * initWithAddress:
601 *
602 * Initialize an IOMemoryDescriptor. The buffer is a virtual address
603 * relative to the specified task. If no task is supplied, the kernel
604 * task is implied.
605 *
606 * An IOMemoryDescriptor can be re-used by calling initWithAddress or
607 * initWithRanges again on an existing instance -- note this behavior
608 * is not commonly supported in other I/O Kit classes, although it is
609 * supported here.
610 */
611bool
612IOGeneralMemoryDescriptor::initWithAddress(void * address,
613 IOByteCount withLength,
614 IODirection withDirection)
615{
616 _singleRange.v.address = (vm_address_t) address;
617 _singleRange.v.length = withLength;
618
619 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
620}
621
622bool
623IOGeneralMemoryDescriptor::initWithAddress(vm_address_t address,
624 IOByteCount withLength,
625 IODirection withDirection,
626 task_t withTask)
627{
628 _singleRange.v.address = address;
629 _singleRange.v.length = withLength;
630
631 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
632}
633
634bool
635IOGeneralMemoryDescriptor::initWithPhysicalAddress(
636 IOPhysicalAddress address,
637 IOByteCount withLength,
638 IODirection withDirection )
639{
640 _singleRange.p.address = address;
641 _singleRange.p.length = withLength;
642
643 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
644}
645
55e303ae
A
646bool
647IOGeneralMemoryDescriptor::initWithPhysicalRanges(
648 IOPhysicalRange * ranges,
649 UInt32 count,
650 IODirection direction,
651 bool reference)
652{
653 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
654
655 if (reference)
656 mdOpts |= kIOMemoryAsReference;
657
658 return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0);
659}
660
661bool
662IOGeneralMemoryDescriptor::initWithRanges(
663 IOVirtualRange * ranges,
664 UInt32 count,
665 IODirection direction,
666 task_t task,
667 bool reference)
668{
669 IOOptionBits mdOpts = direction;
670
671 if (reference)
672 mdOpts |= kIOMemoryAsReference;
673
674 if (task) {
675 mdOpts |= kIOMemoryTypeVirtual;
91447636
A
676
677 // Auto-prepare if this is a kernel memory descriptor as very few
678 // clients bother to prepare() kernel memory.
2d21ac55 679 // But it was not enforced so what are you going to do?
55e303ae
A
680 if (task == kernel_task)
681 mdOpts |= kIOMemoryAutoPrepare;
682 }
683 else
684 mdOpts |= kIOMemoryTypePhysical;
55e303ae
A
685
686 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0);
687}
688
1c79356b 689/*
55e303ae 690 * initWithOptions:
1c79356b 691 *
55e303ae 692 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
91447636
A
693 * from a given task, several physical ranges, an UPL from the ubc
694 * system or a uio (may be 64bit) from the BSD subsystem.
1c79356b
A
695 *
696 * Passing the ranges as a reference will avoid an extra allocation.
697 *
55e303ae
A
698 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
699 * existing instance -- note this behavior is not commonly supported in other
700 * I/O Kit classes, although it is supported here.
1c79356b 701 */
55e303ae 702
1c79356b 703bool
55e303ae
A
704IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
705 UInt32 count,
706 UInt32 offset,
707 task_t task,
708 IOOptionBits options,
709 IOMapper * mapper)
710{
91447636
A
711 IOOptionBits type = options & kIOMemoryTypeMask;
712
713 // Grab the original MD's configuation data to initialse the
714 // arguments to this function.
715 if (kIOMemoryTypePersistentMD == type) {
716
717 typePersMDData *initData = (typePersMDData *) buffers;
718 const IOGeneralMemoryDescriptor *orig = initData->fMD;
719 ioGMDData *dataP = getDataP(orig->_memoryEntries);
720
721 // Only accept persistent memory descriptors with valid dataP data.
722 assert(orig->_rangesCount == 1);
723 if ( !(orig->_flags & kIOMemoryPersistent) || !dataP)
724 return false;
725
726 _memEntry = initData->fMemEntry; // Grab the new named entry
727 options = orig->_flags | kIOMemoryAsReference;
728 _singleRange = orig->_singleRange; // Initialise our range
729 buffers = &_singleRange;
730 count = 1;
55e303ae 731
91447636
A
732 // Now grab the original task and whatever mapper was previously used
733 task = orig->_task;
734 mapper = dataP->fMapper;
735
736 // We are ready to go through the original initialisation now
737 }
738
739 switch (type) {
740 case kIOMemoryTypeUIO:
55e303ae 741 case kIOMemoryTypeVirtual:
0c530ab8 742 case kIOMemoryTypeVirtual64:
55e303ae
A
743 assert(task);
744 if (!task)
745 return false;
2d21ac55
A
746
747 if (vm_map_is_64bit(get_task_map(task))
748 && (kIOMemoryTypeVirtual == type)
749 && ((IOVirtualRange *) buffers)->address)
750 {
751 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
752 return false;
753 }
754 break;
55e303ae
A
755
756 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
0c530ab8 757 case kIOMemoryTypePhysical64:
55e303ae 758 mapper = kIOMapperNone;
91447636 759
55e303ae
A
760 case kIOMemoryTypeUPL:
761 assert(!task);
762 break;
763 default:
55e303ae
A
764 return false; /* bad argument */
765 }
766
767 assert(buffers);
768 assert(count);
1c79356b
A
769
770 /*
771 * We can check the _initialized instance variable before having ever set
772 * it to an initial value because I/O Kit guarantees that all our instance
773 * variables are zeroed on an object's allocation.
774 */
775
55e303ae 776 if (_initialized) {
1c79356b
A
777 /*
778 * An existing memory descriptor is being retargeted to point to
779 * somewhere else. Clean up our present state.
780 */
2d21ac55
A
781 IOOptionBits type = _flags & kIOMemoryTypeMask;
782 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
783 {
784 while (_wireCount)
785 complete();
786 }
1c79356b 787 if (_ranges.v && _rangesIsAllocated)
0c530ab8
A
788 {
789 if (kIOMemoryTypeUIO == type)
790 uio_free((uio_t) _ranges.v);
791 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
792 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
793 else
794 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
795 }
2d21ac55 796
91447636
A
797 if (_memEntry)
798 { ipc_port_release_send((ipc_port_t) _memEntry); _memEntry = 0; }
2d21ac55
A
799 if (_mappings)
800 _mappings->flushCollection();
1c79356b 801 }
55e303ae
A
802 else {
803 if (!super::init())
804 return false;
805 _initialized = true;
806 }
d7e50217 807
55e303ae
A
808 // Grab the appropriate mapper
809 if (mapper == kIOMapperNone)
810 mapper = 0; // No Mapper
0c530ab8 811 else if (mapper == kIOMapperSystem) {
55e303ae
A
812 IOMapper::checkForSystemMapper();
813 gIOSystemMapper = mapper = IOMapper::gSystem;
814 }
1c79356b 815
91447636
A
816 // Remove the dynamic internal use flags from the initial setting
817 options &= ~(kIOMemoryPreparedReadOnly);
55e303ae
A
818 _flags = options;
819 _task = task;
820
821 // DEPRECATED variable initialisation
822 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
0c530ab8
A
823
824 __iomd_reservedA = 0;
825 __iomd_reservedB = 0;
0c530ab8 826 _highestPage = 0;
1c79356b 827
2d21ac55
A
828 if (kIOMemoryThreadSafe & options)
829 {
830 if (!_prepareLock)
831 _prepareLock = IOLockAlloc();
832 }
833 else if (_prepareLock)
834 {
835 IOLockFree(_prepareLock);
836 _prepareLock = NULL;
837 }
838
91447636 839 if (kIOMemoryTypeUPL == type) {
1c79356b 840
55e303ae
A
841 ioGMDData *dataP;
842 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
d7e50217 843
55e303ae
A
844 if (!_memoryEntries) {
845 _memoryEntries = OSData::withCapacity(dataSize);
846 if (!_memoryEntries)
847 return false;
848 }
849 else if (!_memoryEntries->initWithCapacity(dataSize))
850 return false;
851
852 _memoryEntries->appendBytes(0, sizeof(ioGMDData));
853 dataP = getDataP(_memoryEntries);
854 dataP->fMapper = mapper;
855 dataP->fPageCnt = 0;
856
0c530ab8 857 // _wireCount++; // UPLs start out life wired
55e303ae
A
858
859 _length = count;
860 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
861
862 ioPLBlock iopl;
863 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST((upl_t) buffers);
864
865 iopl.fIOPL = (upl_t) buffers;
866 // Set the flag kIOPLOnDevice convieniently equal to 1
867 iopl.fFlags = pageList->device | kIOPLExternUPL;
868 iopl.fIOMDOffset = 0;
0c530ab8
A
869
870 _highestPage = upl_get_highest_page(iopl.fIOPL);
871
55e303ae 872 if (!pageList->device) {
55e303ae
A
873 // Pre-compute the offset into the UPL's page list
874 pageList = &pageList[atop_32(offset)];
875 offset &= PAGE_MASK;
876 if (mapper) {
877 iopl.fMappedBase = mapper->iovmAlloc(_pages);
878 mapper->iovmInsert(iopl.fMappedBase, 0, pageList, _pages);
879 }
880 else
881 iopl.fMappedBase = 0;
882 }
883 else
884 iopl.fMappedBase = 0;
885 iopl.fPageInfo = (vm_address_t) pageList;
886 iopl.fPageOffset = offset;
887
888 _memoryEntries->appendBytes(&iopl, sizeof(iopl));
d7e50217 889 }
91447636 890 else {
0c530ab8
A
891 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
892 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
91447636
A
893
894 // Initialize the memory descriptor
895 if (options & kIOMemoryAsReference) {
896 _rangesIsAllocated = false;
897
898 // Hack assignment to get the buffer arg into _ranges.
899 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
900 // work, C++ sigh.
901 // This also initialises the uio & physical ranges.
902 _ranges.v = (IOVirtualRange *) buffers;
903 }
904 else {
6601e61a 905 _rangesIsAllocated = true;
0c530ab8
A
906 switch (_flags & kIOMemoryTypeMask)
907 {
908 case kIOMemoryTypeUIO:
909 _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
910 break;
911
912 case kIOMemoryTypeVirtual64:
913 case kIOMemoryTypePhysical64:
914 _ranges.v64 = IONew(IOAddressRange, count);
915 if (!_ranges.v64)
916 return false;
917 bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
918 break;
919 case kIOMemoryTypeVirtual:
2d21ac55 920 case kIOMemoryTypePhysical:
0c530ab8
A
921 _ranges.v = IONew(IOVirtualRange, count);
922 if (!_ranges.v)
923 return false;
924 bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
925 break;
926 }
91447636
A
927 }
928
929 // Find starting address within the vector of ranges
930 Ranges vec = _ranges;
931 UInt32 length = 0;
932 UInt32 pages = 0;
933 for (unsigned ind = 0; ind < count; ind++) {
934 user_addr_t addr;
935 UInt32 len;
936
937 // addr & len are returned by this function
938 getAddrLenForInd(addr, len, type, vec, ind);
939 pages += (atop_64(addr + len + PAGE_MASK) - atop_64(addr));
940 len += length;
0c530ab8 941 assert(len >= length); // Check for 32 bit wrap around
91447636 942 length = len;
0c530ab8
A
943
944 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
945 {
946 ppnum_t highPage = atop_64(addr + len - 1);
947 if (highPage > _highestPage)
948 _highestPage = highPage;
949 }
91447636
A
950 }
951 _length = length;
952 _pages = pages;
953 _rangesCount = count;
55e303ae
A
954
955 // Auto-prepare memory at creation time.
956 // Implied completion when descriptor is free-ed
0c530ab8 957 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
91447636 958 _wireCount++; // Physical MDs are, by definition, wired
0c530ab8 959 else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
55e303ae 960 ioGMDData *dataP;
91447636 961 unsigned dataSize = computeDataSize(_pages, /* upls */ count * 2);
55e303ae
A
962
963 if (!_memoryEntries) {
964 _memoryEntries = OSData::withCapacity(dataSize);
965 if (!_memoryEntries)
91447636 966 return false;
55e303ae
A
967 }
968 else if (!_memoryEntries->initWithCapacity(dataSize))
969 return false;
970
971 _memoryEntries->appendBytes(0, sizeof(ioGMDData));
972 dataP = getDataP(_memoryEntries);
973 dataP->fMapper = mapper;
974 dataP->fPageCnt = _pages;
975
91447636
A
976 if ( (kIOMemoryPersistent & _flags) && !_memEntry)
977 _memEntry = createNamedEntry();
55e303ae
A
978
979 if ((_flags & kIOMemoryAutoPrepare)
980 && prepare() != kIOReturnSuccess)
981 return false;
982 }
983 }
984
985 return true;
de355530
A
986}
987
1c79356b
A
988/*
989 * free
990 *
991 * Free resources.
992 */
993void IOGeneralMemoryDescriptor::free()
994{
2d21ac55
A
995 IOOptionBits type = _flags & kIOMemoryTypeMask;
996
9bccf70c 997 if( reserved)
2d21ac55
A
998 {
999 LOCK;
9bccf70c 1000 reserved->memory = 0;
2d21ac55
A
1001 UNLOCK;
1002 }
9bccf70c 1003
2d21ac55
A
1004 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
1005 {
1006 while (_wireCount)
1007 complete();
1008 }
55e303ae
A
1009 if (_memoryEntries)
1010 _memoryEntries->release();
1011
1c79356b 1012 if (_ranges.v && _rangesIsAllocated)
0c530ab8 1013 {
0c530ab8
A
1014 if (kIOMemoryTypeUIO == type)
1015 uio_free((uio_t) _ranges.v);
1016 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
1017 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
1018 else
1019 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
4a3eedf9
A
1020
1021 _ranges.v = NULL;
0c530ab8 1022 }
9bccf70c 1023
55e303ae
A
1024 if (reserved && reserved->devicePager)
1025 device_pager_deallocate( (memory_object_t) reserved->devicePager );
9bccf70c 1026
55e303ae
A
1027 // memEntry holds a ref on the device pager which owns reserved
1028 // (ExpansionData) so no reserved access after this point
1029 if (_memEntry)
1c79356b 1030 ipc_port_release_send( (ipc_port_t) _memEntry );
55e303ae 1031
2d21ac55
A
1032 if (_prepareLock)
1033 IOLockFree(_prepareLock);
1034
1c79356b
A
1035 super::free();
1036}
1037
0b4e3aa0
A
1038/* DEPRECATED */ void IOGeneralMemoryDescriptor::unmapFromKernel()
1039/* DEPRECATED */ {
55e303ae 1040 panic("IOGMD::unmapFromKernel deprecated");
0b4e3aa0
A
1041/* DEPRECATED */ }
1042/* DEPRECATED */
1043/* DEPRECATED */ void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
1044/* DEPRECATED */ {
55e303ae 1045 panic("IOGMD::mapIntoKernel deprecated");
0b4e3aa0 1046/* DEPRECATED */ }
1c79356b
A
1047
1048/*
1049 * getDirection:
1050 *
1051 * Get the direction of the transfer.
1052 */
1053IODirection IOMemoryDescriptor::getDirection() const
1054{
1055 return _direction;
1056}
1057
1058/*
1059 * getLength:
1060 *
1061 * Get the length of the transfer (over all ranges).
1062 */
1063IOByteCount IOMemoryDescriptor::getLength() const
1064{
1065 return _length;
1066}
1067
55e303ae 1068void IOMemoryDescriptor::setTag( IOOptionBits tag )
1c79356b
A
1069{
1070 _tag = tag;
1071}
1072
1073IOOptionBits IOMemoryDescriptor::getTag( void )
1074{
1075 return( _tag);
1076}
1077
55e303ae 1078// @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
0c530ab8
A
1079IOPhysicalAddress
1080IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
0b4e3aa0 1081{
0c530ab8 1082 addr64_t physAddr = 0;
1c79356b 1083
9bccf70c 1084 if( prepare() == kIOReturnSuccess) {
0c530ab8 1085 physAddr = getPhysicalSegment64( offset, length );
9bccf70c
A
1086 complete();
1087 }
0b4e3aa0 1088
0c530ab8 1089 return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used
0b4e3aa0
A
1090}
1091
55e303ae
A
1092IOByteCount IOMemoryDescriptor::readBytes
1093 (IOByteCount offset, void *bytes, IOByteCount length)
1c79356b 1094{
55e303ae
A
1095 addr64_t dstAddr = (addr64_t) (UInt32) bytes;
1096 IOByteCount remaining;
1c79356b 1097
55e303ae
A
1098 // Assert that this entire I/O is withing the available range
1099 assert(offset < _length);
1100 assert(offset + length <= _length);
1101 if (offset >= _length) {
55e303ae
A
1102 return 0;
1103 }
1c79356b 1104
55e303ae
A
1105 remaining = length = min(length, _length - offset);
1106 while (remaining) { // (process another target segment?)
1107 addr64_t srcAddr64;
1108 IOByteCount srcLen;
1c79356b 1109
55e303ae
A
1110 srcAddr64 = getPhysicalSegment64(offset, &srcLen);
1111 if (!srcAddr64)
1112 break;
1c79356b 1113
55e303ae
A
1114 // Clip segment length to remaining
1115 if (srcLen > remaining)
1116 srcLen = remaining;
1c79356b 1117
55e303ae
A
1118 copypv(srcAddr64, dstAddr, srcLen,
1119 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
1c79356b 1120
55e303ae
A
1121 dstAddr += srcLen;
1122 offset += srcLen;
1123 remaining -= srcLen;
1124 }
1c79356b 1125
55e303ae 1126 assert(!remaining);
1c79356b 1127
55e303ae
A
1128 return length - remaining;
1129}
0b4e3aa0 1130
55e303ae
A
1131IOByteCount IOMemoryDescriptor::writeBytes
1132 (IOByteCount offset, const void *bytes, IOByteCount length)
1133{
1134 addr64_t srcAddr = (addr64_t) (UInt32) bytes;
1135 IOByteCount remaining;
0b4e3aa0 1136
55e303ae
A
1137 // Assert that this entire I/O is withing the available range
1138 assert(offset < _length);
1139 assert(offset + length <= _length);
0b4e3aa0 1140
55e303ae 1141 assert( !(kIOMemoryPreparedReadOnly & _flags) );
0b4e3aa0 1142
55e303ae 1143 if ( (kIOMemoryPreparedReadOnly & _flags) || offset >= _length) {
55e303ae
A
1144 return 0;
1145 }
0b4e3aa0 1146
55e303ae
A
1147 remaining = length = min(length, _length - offset);
1148 while (remaining) { // (process another target segment?)
1149 addr64_t dstAddr64;
1150 IOByteCount dstLen;
0b4e3aa0 1151
55e303ae
A
1152 dstAddr64 = getPhysicalSegment64(offset, &dstLen);
1153 if (!dstAddr64)
1154 break;
0b4e3aa0 1155
55e303ae
A
1156 // Clip segment length to remaining
1157 if (dstLen > remaining)
1158 dstLen = remaining;
0b4e3aa0 1159
55e303ae
A
1160 copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
1161 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
0b4e3aa0 1162
55e303ae
A
1163 srcAddr += dstLen;
1164 offset += dstLen;
1165 remaining -= dstLen;
1c79356b 1166 }
1c79356b 1167
55e303ae
A
1168 assert(!remaining);
1169
1170 return length - remaining;
1c79356b
A
1171}
1172
55e303ae
A
1173// osfmk/device/iokit_rpc.c
1174extern "C" unsigned int IODefaultCacheBits(addr64_t pa);
1c79356b 1175
55e303ae
A
1176/* DEPRECATED */ void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
1177/* DEPRECATED */ {
1178 panic("IOGMD::setPosition deprecated");
1179/* DEPRECATED */ }
de355530 1180
0c530ab8 1181IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
55e303ae 1182{
0c530ab8 1183 if (kIOMDGetCharacteristics == op) {
4452a7af 1184
0c530ab8
A
1185 if (dataSize < sizeof(IOMDDMACharacteristics))
1186 return kIOReturnUnderrun;
4452a7af 1187
0c530ab8
A
1188 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
1189 data->fLength = _length;
1190 data->fSGCount = _rangesCount;
1191 data->fPages = _pages;
1192 data->fDirection = _direction;
1193 if (!_wireCount)
1194 data->fIsPrepared = false;
1195 else {
1196 data->fIsPrepared = true;
1197 data->fHighestPage = _highestPage;
1198 if (_memoryEntries) {
1199 ioGMDData *gmdData = getDataP(_memoryEntries);
1200 ioPLBlock *ioplList = getIOPLList(gmdData);
1201 UInt count = getNumIOPL(_memoryEntries, gmdData);
1202
1203 data->fIsMapped = (gmdData->fMapper && _pages && (count > 0)
1204 && ioplList[0].fMappedBase);
1205 if (count == 1)
1206 data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
1207 }
1208 else
1209 data->fIsMapped = false;
1210 }
4452a7af 1211
0c530ab8
A
1212 return kIOReturnSuccess;
1213 }
1214 else if (!(kIOMDWalkSegments & op))
1215 return kIOReturnBadArgument;
1216
1217 // Get the next segment
1218 struct InternalState {
1219 IOMDDMAWalkSegmentArgs fIO;
1220 UInt fOffset2Index;
1221 UInt fIndex;
1222 UInt fNextOffset;
1223 } *isP;
1224
1225 // Find the next segment
1226 if (dataSize < sizeof(*isP))
1227 return kIOReturnUnderrun;
1228
1229 isP = (InternalState *) vData;
1230 UInt offset = isP->fIO.fOffset;
1231 bool mapped = isP->fIO.fMapped;
1232
1233 if (offset >= _length)
1234 return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
1235
1236 // Validate the previous offset
1237 UInt ind, off2Ind = isP->fOffset2Index;
1238 if ((kIOMDFirstSegment != op)
1239 && offset
1240 && (offset == isP->fNextOffset || off2Ind <= offset))
1241 ind = isP->fIndex;
1242 else
1243 ind = off2Ind = 0; // Start from beginning
4452a7af 1244
0c530ab8
A
1245 UInt length;
1246 UInt64 address;
1247 if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
4452a7af 1248
0c530ab8
A
1249 // Physical address based memory descriptor
1250 const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
4452a7af 1251
0c530ab8
A
1252 // Find the range after the one that contains the offset
1253 UInt len;
1254 for (len = 0; off2Ind <= offset; ind++) {
1255 len = physP[ind].length;
1256 off2Ind += len;
1257 }
4452a7af 1258
0c530ab8
A
1259 // Calculate length within range and starting address
1260 length = off2Ind - offset;
1261 address = physP[ind - 1].address + len - length;
89b3af67 1262
0c530ab8
A
1263 // see how far we can coalesce ranges
1264 while (ind < _rangesCount && address + length == physP[ind].address) {
1265 len = physP[ind].length;
1266 length += len;
1267 off2Ind += len;
1268 ind++;
1269 }
4452a7af 1270
0c530ab8
A
1271 // correct contiguous check overshoot
1272 ind--;
1273 off2Ind -= len;
1274 }
1275 else if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
4452a7af 1276
0c530ab8
A
1277 // Physical address based memory descriptor
1278 const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
4452a7af 1279
0c530ab8
A
1280 // Find the range after the one that contains the offset
1281 mach_vm_size_t len;
1282 for (len = 0; off2Ind <= offset; ind++) {
1283 len = physP[ind].length;
1284 off2Ind += len;
1285 }
89b3af67 1286
0c530ab8
A
1287 // Calculate length within range and starting address
1288 length = off2Ind - offset;
1289 address = physP[ind - 1].address + len - length;
89b3af67 1290
0c530ab8
A
1291 // see how far we can coalesce ranges
1292 while (ind < _rangesCount && address + length == physP[ind].address) {
1293 len = physP[ind].length;
1294 length += len;
1295 off2Ind += len;
1296 ind++;
1297 }
1298
1299 // correct contiguous check overshoot
1300 ind--;
1301 off2Ind -= len;
1302 }
1303 else do {
1304 if (!_wireCount)
1305 panic("IOGMD: not wired for the IODMACommand");
4452a7af 1306
0c530ab8 1307 assert(_memoryEntries);
4452a7af 1308
0c530ab8
A
1309 ioGMDData * dataP = getDataP(_memoryEntries);
1310 const ioPLBlock *ioplList = getIOPLList(dataP);
1311 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1312 upl_page_info_t *pageList = getPageList(dataP);
4452a7af 1313
0c530ab8 1314 assert(numIOPLs > 0);
4452a7af 1315
0c530ab8
A
1316 // Scan through iopl info blocks looking for block containing offset
1317 while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset)
1318 ind++;
4452a7af 1319
0c530ab8
A
1320 // Go back to actual range as search goes past it
1321 ioPLBlock ioplInfo = ioplList[ind - 1];
1322 off2Ind = ioplInfo.fIOMDOffset;
1323
1324 if (ind < numIOPLs)
1325 length = ioplList[ind].fIOMDOffset;
1326 else
1327 length = _length;
1328 length -= offset; // Remainder within iopl
1329
1330 // Subtract offset till this iopl in total list
1331 offset -= off2Ind;
1332
1333 // If a mapped address is requested and this is a pre-mapped IOPL
1334 // then just need to compute an offset relative to the mapped base.
1335 if (mapped && ioplInfo.fMappedBase) {
1336 offset += (ioplInfo.fPageOffset & PAGE_MASK);
1337 address = ptoa_64(ioplInfo.fMappedBase) + offset;
1338 continue; // Done leave do/while(false) now
1339 }
1340
1341 // The offset is rebased into the current iopl.
1342 // Now add the iopl 1st page offset.
1343 offset += ioplInfo.fPageOffset;
1344
1345 // For external UPLs the fPageInfo field points directly to
1346 // the upl's upl_page_info_t array.
1347 if (ioplInfo.fFlags & kIOPLExternUPL)
1348 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
1349 else
1350 pageList = &pageList[ioplInfo.fPageInfo];
1351
1352 // Check for direct device non-paged memory
1353 if ( ioplInfo.fFlags & kIOPLOnDevice ) {
1354 address = ptoa_64(pageList->phys_addr) + offset;
1355 continue; // Done leave do/while(false) now
1356 }
4452a7af 1357
0c530ab8
A
1358 // Now we need compute the index into the pageList
1359 UInt pageInd = atop_32(offset);
1360 offset &= PAGE_MASK;
1361
1362 // Compute the starting address of this segment
1363 IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
1364 address = ptoa_64(pageAddr) + offset;
1365
1366 // length is currently set to the length of the remainider of the iopl.
1367 // We need to check that the remainder of the iopl is contiguous.
1368 // This is indicated by pageList[ind].phys_addr being sequential.
1369 IOByteCount contigLength = PAGE_SIZE - offset;
1370 while (contigLength < length
1371 && ++pageAddr == pageList[++pageInd].phys_addr)
1372 {
1373 contigLength += PAGE_SIZE;
1374 }
1375
1376 if (contigLength < length)
1377 length = contigLength;
1378
1379
1380 assert(address);
1381 assert(length);
1382
1383 } while (false);
1384
1385 // Update return values and state
1386 isP->fIO.fIOVMAddr = address;
1387 isP->fIO.fLength = length;
1388 isP->fIndex = ind;
1389 isP->fOffset2Index = off2Ind;
1390 isP->fNextOffset = isP->fIO.fOffset + length;
1391
1392 return kIOReturnSuccess;
1393}
1394
1395addr64_t
1396IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
1397{
1398 IOReturn ret;
1399 IOByteCount length = 0;
1400 addr64_t address = 0;
4452a7af 1401
2d21ac55
A
1402 if (gIOSystemMapper && (kIOMemoryTypePhysical == (_flags & kIOMemoryTypeMask)))
1403 return (super::getPhysicalSegment64(offset, lengthOfSegment));
1404
0c530ab8
A
1405 if (offset < _length) // (within bounds?)
1406 {
1407 IOMDDMAWalkSegmentState _state;
1408 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) &_state;
1409
1410 state->fOffset = offset;
1411 state->fLength = _length - offset;
1412 state->fMapped = false;
1413
1414 ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
1415
1416 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret))
1417 DEBG("getPhysicalSegment64 dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
1418 ret, this, state->fOffset,
1419 state->fIOVMAddr, state->fLength);
1420 if (kIOReturnSuccess == ret)
1421 {
1422 address = state->fIOVMAddr;
1423 length = state->fLength;
1424 }
4452a7af
A
1425 if (!address)
1426 length = 0;
1427 }
1428
4452a7af
A
1429 if (lengthOfSegment)
1430 *lengthOfSegment = length;
1431
0c530ab8
A
1432 return (address);
1433}
1434
1435IOPhysicalAddress
1436IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1437{
1438 IOReturn ret;
1439 IOByteCount length = 0;
1440 addr64_t address = 0;
1441
1442// assert(offset <= _length);
1443
1444 if (offset < _length) // (within bounds?)
1445 {
1446 IOMDDMAWalkSegmentState _state;
1447 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) &_state;
1448
1449 state->fOffset = offset;
1450 state->fLength = _length - offset;
1451 state->fMapped = true;
1452
1453 ret = dmaCommandOperation(
1454 kIOMDFirstSegment, _state, sizeof(_state));
1455
1456 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret))
1457 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
1458 ret, this, state->fOffset,
1459 state->fIOVMAddr, state->fLength);
1460 if (kIOReturnSuccess == ret)
1461 {
1462 address = state->fIOVMAddr;
1463 length = state->fLength;
1464 }
1465
1466 if (!address)
1467 length = 0;
1468 }
1469
1470 if ((address + length) > 0x100000000ULL)
1471 {
2d21ac55 1472 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
0c530ab8
A
1473 address, length, (getMetaClass())->getClassName());
1474 }
1475
1476 if (lengthOfSegment)
1477 *lengthOfSegment = length;
1478
1479 return ((IOPhysicalAddress) address);
55e303ae 1480}
de355530 1481
0c530ab8
A
1482addr64_t
1483IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
55e303ae
A
1484{
1485 IOPhysicalAddress phys32;
1486 IOByteCount length;
1487 addr64_t phys64;
0c530ab8 1488 IOMapper * mapper = 0;
0b4e3aa0 1489
55e303ae
A
1490 phys32 = getPhysicalSegment(offset, lengthOfSegment);
1491 if (!phys32)
1492 return 0;
0b4e3aa0 1493
55e303ae 1494 if (gIOSystemMapper)
0c530ab8
A
1495 mapper = gIOSystemMapper;
1496
1497 if (mapper)
1c79356b 1498 {
55e303ae
A
1499 IOByteCount origLen;
1500
0c530ab8 1501 phys64 = mapper->mapAddr(phys32);
55e303ae
A
1502 origLen = *lengthOfSegment;
1503 length = page_size - (phys64 & (page_size - 1));
1504 while ((length < origLen)
0c530ab8 1505 && ((phys64 + length) == mapper->mapAddr(phys32 + length)))
55e303ae
A
1506 length += page_size;
1507 if (length > origLen)
1508 length = origLen;
1509
1510 *lengthOfSegment = length;
0b4e3aa0 1511 }
55e303ae
A
1512 else
1513 phys64 = (addr64_t) phys32;
1c79356b 1514
55e303ae 1515 return phys64;
0b4e3aa0
A
1516}
1517
0c530ab8
A
1518IOPhysicalAddress
1519IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1c79356b 1520{
0b4e3aa0
A
1521 IOPhysicalAddress address = 0;
1522 IOPhysicalLength length = 0;
91447636 1523 IOOptionBits type = _flags & kIOMemoryTypeMask;
1c79356b 1524
0b4e3aa0 1525 assert(offset <= _length);
1c79356b 1526
91447636 1527 if ( type == kIOMemoryTypeUPL)
55e303ae 1528 return super::getSourceSegment( offset, lengthOfSegment );
91447636 1529 else if ( offset < _length ) // (within bounds?)
1c79356b 1530 {
0b4e3aa0 1531 unsigned rangesIndex = 0;
91447636
A
1532 Ranges vec = _ranges;
1533 user_addr_t addr;
1534
1535 // Find starting address within the vector of ranges
1536 for (;;) {
1537 getAddrLenForInd(addr, length, type, vec, rangesIndex);
1538 if (offset < length)
1539 break;
1540 offset -= length; // (make offset relative)
1541 rangesIndex++;
1542 }
1543
1544 // Now that we have the starting range,
1545 // lets find the last contiguous range
1546 addr += offset;
1547 length -= offset;
1548
1549 for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) {
1550 user_addr_t newAddr;
1551 IOPhysicalLength newLen;
1552
1553 getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
1554 if (addr + length != newAddr)
1555 break;
1556 length += newLen;
1557 }
1558 if (addr)
1559 address = (IOPhysicalAddress) addr; // Truncate address to 32bit
1560 else
1561 length = 0;
1c79356b 1562 }
0b4e3aa0
A
1563
1564 if ( lengthOfSegment ) *lengthOfSegment = length;
1565
1566 return address;
1567}
1568
1569/* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
1570/* DEPRECATED */ void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
1571/* DEPRECATED */ IOByteCount * lengthOfSegment)
1572/* DEPRECATED */ {
55e303ae
A
1573 if (_task == kernel_task)
1574 return (void *) getSourceSegment(offset, lengthOfSegment);
1575 else
1576 panic("IOGMD::getVirtualSegment deprecated");
1577
1578 return 0;
0b4e3aa0
A
1579/* DEPRECATED */ }
1580/* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
1c79356b 1581
91447636
A
1582
1583
0c530ab8
A
1584IOReturn
1585IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
1586{
1587 if (kIOMDGetCharacteristics == op) {
1588 if (dataSize < sizeof(IOMDDMACharacteristics))
1589 return kIOReturnUnderrun;
1590
1591 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
1592 data->fLength = getLength();
1593 data->fSGCount = 0;
1594 data->fDirection = _direction;
1595 if (IOMapper::gSystem)
1596 data->fIsMapped = true;
1597 data->fIsPrepared = true; // Assume prepared - fails safe
1598 }
1599 else if (kIOMDWalkSegments & op) {
1600 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs))
1601 return kIOReturnUnderrun;
1602
1603 IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
1604 IOByteCount offset = (IOByteCount) data->fOffset;
1605
1606 IOPhysicalLength length;
1607 IOMemoryDescriptor *ncmd = const_cast<IOMemoryDescriptor *>(this);
1608 if (data->fMapped && IOMapper::gSystem)
1609 data->fIOVMAddr = ncmd->getPhysicalSegment(offset, &length);
1610 else
1611 data->fIOVMAddr = ncmd->getPhysicalSegment64(offset, &length);
1612 data->fLength = length;
1613 }
1614 else
1615 return kIOReturnBadArgument;
1616
1617 return kIOReturnSuccess;
1618}
1619
91447636
A
1620IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
1621 IOOptionBits * oldState )
1622{
1623 IOReturn err = kIOReturnSuccess;
1624 vm_purgable_t control;
1625 int state;
1626
1627 do
1628 {
1629 if (!_memEntry)
1630 {
1631 err = kIOReturnNotReady;
1632 break;
1633 }
1634
1635 control = VM_PURGABLE_SET_STATE;
1636 switch (newState)
1637 {
1638 case kIOMemoryPurgeableKeepCurrent:
1639 control = VM_PURGABLE_GET_STATE;
1640 break;
1641
1642 case kIOMemoryPurgeableNonVolatile:
1643 state = VM_PURGABLE_NONVOLATILE;
1644 break;
1645 case kIOMemoryPurgeableVolatile:
1646 state = VM_PURGABLE_VOLATILE;
1647 break;
1648 case kIOMemoryPurgeableEmpty:
1649 state = VM_PURGABLE_EMPTY;
1650 break;
1651 default:
1652 err = kIOReturnBadArgument;
1653 break;
1654 }
1655
1656 if (kIOReturnSuccess != err)
1657 break;
1658
1659 err = mach_memory_entry_purgable_control((ipc_port_t) _memEntry, control, &state);
1660
1661 if (oldState)
1662 {
1663 if (kIOReturnSuccess == err)
1664 {
1665 switch (state)
1666 {
1667 case VM_PURGABLE_NONVOLATILE:
1668 state = kIOMemoryPurgeableNonVolatile;
1669 break;
1670 case VM_PURGABLE_VOLATILE:
1671 state = kIOMemoryPurgeableVolatile;
1672 break;
1673 case VM_PURGABLE_EMPTY:
1674 state = kIOMemoryPurgeableEmpty;
1675 break;
1676 default:
1677 state = kIOMemoryPurgeableNonVolatile;
1678 err = kIOReturnNotReady;
1679 break;
1680 }
1681 *oldState = state;
1682 }
1683 }
1684 }
1685 while (false);
1686
1687 return (err);
1688}
1689
1690extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
1691extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
1692
1693IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options,
1694 IOByteCount offset, IOByteCount length )
1695{
1696 IOByteCount remaining;
1697 void (*func)(addr64_t pa, unsigned int count) = 0;
1698
1699 switch (options)
1700 {
1701 case kIOMemoryIncoherentIOFlush:
1702 func = &dcache_incoherent_io_flush64;
1703 break;
1704 case kIOMemoryIncoherentIOStore:
1705 func = &dcache_incoherent_io_store64;
1706 break;
1707 }
1708
1709 if (!func)
1710 return (kIOReturnUnsupported);
1711
1712 remaining = length = min(length, getLength() - offset);
1713 while (remaining)
1714 // (process another target segment?)
1715 {
1716 addr64_t dstAddr64;
1717 IOByteCount dstLen;
1718
1719 dstAddr64 = getPhysicalSegment64(offset, &dstLen);
1720 if (!dstAddr64)
1721 break;
1722
1723 // Clip segment length to remaining
1724 if (dstLen > remaining)
1725 dstLen = remaining;
1726
1727 (*func)(dstAddr64, dstLen);
1728
1729 offset += dstLen;
1730 remaining -= dstLen;
1731 }
1732
1733 return (remaining ? kIOReturnUnderrun : kIOReturnSuccess);
1734}
1735
2d21ac55 1736#if defined(__ppc__) || defined(__arm__)
55e303ae
A
1737extern vm_offset_t static_memory_end;
1738#define io_kernel_static_end static_memory_end
1739#else
1740extern vm_offset_t first_avail;
1741#define io_kernel_static_end first_avail
1742#endif
1743
1744static kern_return_t
1745io_get_kernel_static_upl(
91447636 1746 vm_map_t /* map */,
55e303ae
A
1747 vm_address_t offset,
1748 vm_size_t *upl_size,
1749 upl_t *upl,
1750 upl_page_info_array_t page_list,
0c530ab8
A
1751 unsigned int *count,
1752 ppnum_t *highest_page)
1c79356b 1753{
55e303ae
A
1754 unsigned int pageCount, page;
1755 ppnum_t phys;
0c530ab8 1756 ppnum_t highestPage = 0;
1c79356b 1757
55e303ae
A
1758 pageCount = atop_32(*upl_size);
1759 if (pageCount > *count)
1760 pageCount = *count;
1c79356b 1761
55e303ae 1762 *upl = NULL;
1c79356b 1763
55e303ae
A
1764 for (page = 0; page < pageCount; page++)
1765 {
1766 phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
1767 if (!phys)
1768 break;
1769 page_list[page].phys_addr = phys;
1770 page_list[page].pageout = 0;
1771 page_list[page].absent = 0;
1772 page_list[page].dirty = 0;
1773 page_list[page].precious = 0;
1774 page_list[page].device = 0;
0c530ab8
A
1775 if (phys > highestPage)
1776 highestPage = page;
55e303ae 1777 }
0b4e3aa0 1778
0c530ab8
A
1779 *highest_page = highestPage;
1780
55e303ae
A
1781 return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError);
1782}
0b4e3aa0 1783
55e303ae
A
1784IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
1785{
91447636 1786 IOOptionBits type = _flags & kIOMemoryTypeMask;
2d21ac55 1787 IOReturn error = kIOReturnCannotWire;
55e303ae
A
1788 ioGMDData *dataP;
1789 ppnum_t mapBase = 0;
1790 IOMapper *mapper;
1791 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
1c79356b 1792
55e303ae 1793 assert(!_wireCount);
0c530ab8 1794 assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
1c79356b 1795
55e303ae
A
1796 if (_pages >= gIOMaximumMappedIOPageCount)
1797 return kIOReturnNoResources;
0b4e3aa0 1798
55e303ae
A
1799 dataP = getDataP(_memoryEntries);
1800 mapper = dataP->fMapper;
1801 if (mapper && _pages)
1802 mapBase = mapper->iovmAlloc(_pages);
d7e50217 1803
55e303ae
A
1804 // Note that appendBytes(NULL) zeros the data up to the
1805 // desired length.
1806 _memoryEntries->appendBytes(0, dataP->fPageCnt * sizeof(upl_page_info_t));
1807 dataP = 0; // May no longer be valid so lets not get tempted.
de355530 1808
55e303ae
A
1809 if (forDirection == kIODirectionNone)
1810 forDirection = _direction;
1811
1812 int uplFlags; // This Mem Desc's default flags for upl creation
0c530ab8 1813 switch (kIODirectionOutIn & forDirection)
55e303ae
A
1814 {
1815 case kIODirectionOut:
1816 // Pages do not need to be marked as dirty on commit
1817 uplFlags = UPL_COPYOUT_FROM;
1818 _flags |= kIOMemoryPreparedReadOnly;
1819 break;
1820
1821 case kIODirectionIn:
1822 default:
1823 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
1824 break;
1825 }
1826 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
1827
0c530ab8
A
1828#ifdef UPL_NEED_32BIT_ADDR
1829 if (kIODirectionPrepareToPhys32 & forDirection)
1830 uplFlags |= UPL_NEED_32BIT_ADDR;
1831#endif
1832
91447636 1833 // Find the appropriate vm_map for the given task
55e303ae
A
1834 vm_map_t curMap;
1835 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
1836 curMap = 0;
1837 else
1838 { curMap = get_task_map(_task); }
1839
91447636
A
1840 // Iterate over the vector of virtual ranges
1841 Ranges vec = _ranges;
1842 unsigned int pageIndex = 0;
1843 IOByteCount mdOffset = 0;
0c530ab8 1844 ppnum_t highestPage = 0;
55e303ae
A
1845 for (UInt range = 0; range < _rangesCount; range++) {
1846 ioPLBlock iopl;
91447636 1847 user_addr_t startPage;
55e303ae 1848 IOByteCount numBytes;
0c530ab8 1849 ppnum_t highPage = 0;
55e303ae 1850
91447636
A
1851 // Get the startPage address and length of vec[range]
1852 getAddrLenForInd(startPage, numBytes, type, vec, range);
1853 iopl.fPageOffset = (short) startPage & PAGE_MASK;
1854 numBytes += iopl.fPageOffset;
1855 startPage = trunc_page_64(startPage);
1856
55e303ae
A
1857 if (mapper)
1858 iopl.fMappedBase = mapBase + pageIndex;
1859 else
1860 iopl.fMappedBase = 0;
55e303ae 1861
91447636 1862 // Iterate over the current range, creating UPLs
55e303ae
A
1863 while (numBytes) {
1864 dataP = getDataP(_memoryEntries);
91447636
A
1865 vm_address_t kernelStart = (vm_address_t) startPage;
1866 vm_map_t theMap;
1867 if (curMap)
1868 theMap = curMap;
1869 else if (!sharedMem) {
1870 assert(_task == kernel_task);
1871 theMap = IOPageableMapForAddress(kernelStart);
1872 }
1873 else
1874 theMap = NULL;
1875
55e303ae
A
1876 upl_page_info_array_t pageInfo = getPageList(dataP);
1877 int ioplFlags = uplFlags;
1878 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
1879
1880 vm_size_t ioplSize = round_page_32(numBytes);
1881 unsigned int numPageInfo = atop_32(ioplSize);
1882
91447636 1883 if (theMap == kernel_map && kernelStart < io_kernel_static_end) {
55e303ae 1884 error = io_get_kernel_static_upl(theMap,
91447636
A
1885 kernelStart,
1886 &ioplSize,
1887 &iopl.fIOPL,
1888 baseInfo,
0c530ab8
A
1889 &numPageInfo,
1890 &highPage);
91447636
A
1891 }
1892 else if (sharedMem) {
55e303ae 1893 error = memory_object_iopl_request(sharedMem,
91447636
A
1894 ptoa_32(pageIndex),
1895 &ioplSize,
1896 &iopl.fIOPL,
1897 baseInfo,
1898 &numPageInfo,
1899 &ioplFlags);
1900 }
1901 else {
1902 assert(theMap);
1903 error = vm_map_create_upl(theMap,
1904 startPage,
1905 &ioplSize,
1906 &iopl.fIOPL,
1907 baseInfo,
1908 &numPageInfo,
1909 &ioplFlags);
de355530
A
1910 }
1911
55e303ae
A
1912 assert(ioplSize);
1913 if (error != KERN_SUCCESS)
1914 goto abortExit;
1915
0c530ab8
A
1916 if (iopl.fIOPL)
1917 highPage = upl_get_highest_page(iopl.fIOPL);
1918 if (highPage > highestPage)
1919 highestPage = highPage;
1920
2d21ac55 1921 error = kIOReturnCannotWire;
55e303ae
A
1922
1923 if (baseInfo->device) {
1924 numPageInfo = 1;
1925 iopl.fFlags = kIOPLOnDevice;
1926 // Don't translate device memory at all
1927 if (mapper && mapBase) {
1928 mapper->iovmFree(mapBase, _pages);
1929 mapBase = 0;
1930 iopl.fMappedBase = 0;
1931 }
1932 }
1933 else {
1934 iopl.fFlags = 0;
0c530ab8 1935 if (mapper)
55e303ae
A
1936 mapper->iovmInsert(mapBase, pageIndex,
1937 baseInfo, numPageInfo);
1938 }
1939
1940 iopl.fIOMDOffset = mdOffset;
1941 iopl.fPageInfo = pageIndex;
1942
1943 if ((_flags & kIOMemoryAutoPrepare) && iopl.fIOPL)
1944 {
91447636
A
1945 upl_commit(iopl.fIOPL, 0, 0);
1946 upl_deallocate(iopl.fIOPL);
55e303ae 1947 iopl.fIOPL = 0;
de355530 1948 }
55e303ae
A
1949
1950 if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
1951 // Clean up partial created and unsaved iopl
91447636
A
1952 if (iopl.fIOPL) {
1953 upl_abort(iopl.fIOPL, 0);
1954 upl_deallocate(iopl.fIOPL);
1955 }
55e303ae
A
1956 goto abortExit;
1957 }
1958
1959 // Check for a multiple iopl's in one virtual range
1960 pageIndex += numPageInfo;
1961 mdOffset -= iopl.fPageOffset;
1962 if (ioplSize < numBytes) {
1963 numBytes -= ioplSize;
1964 startPage += ioplSize;
1965 mdOffset += ioplSize;
1966 iopl.fPageOffset = 0;
1967 if (mapper)
1968 iopl.fMappedBase = mapBase + pageIndex;
1969 }
1970 else {
1971 mdOffset += numBytes;
1972 break;
1973 }
1c79356b
A
1974 }
1975 }
55e303ae 1976
0c530ab8
A
1977 _highestPage = highestPage;
1978
1c79356b
A
1979 return kIOReturnSuccess;
1980
1981abortExit:
55e303ae
A
1982 {
1983 dataP = getDataP(_memoryEntries);
91447636 1984 UInt done = getNumIOPL(_memoryEntries, dataP);
55e303ae
A
1985 ioPLBlock *ioplList = getIOPLList(dataP);
1986
1987 for (UInt range = 0; range < done; range++)
1988 {
91447636
A
1989 if (ioplList[range].fIOPL) {
1990 upl_abort(ioplList[range].fIOPL, 0);
1991 upl_deallocate(ioplList[range].fIOPL);
1992 }
55e303ae 1993 }
91447636 1994 (void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); // == setLength()
1c79356b 1995
55e303ae
A
1996 if (mapper && mapBase)
1997 mapper->iovmFree(mapBase, _pages);
1c79356b
A
1998 }
1999
2d21ac55
A
2000 if (error == KERN_FAILURE)
2001 error = kIOReturnCannotWire;
2002
55e303ae
A
2003 return error;
2004}
d7e50217 2005
55e303ae
A
2006/*
2007 * prepare
2008 *
2009 * Prepare the memory for an I/O transfer. This involves paging in
2010 * the memory, if necessary, and wiring it down for the duration of
2011 * the transfer. The complete() method completes the processing of
2012 * the memory after the I/O transfer finishes. This method needn't
2013 * called for non-pageable memory.
2014 */
2015IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
2016{
91447636
A
2017 IOReturn error = kIOReturnSuccess;
2018 IOOptionBits type = _flags & kIOMemoryTypeMask;
55e303ae 2019
2d21ac55
A
2020 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
2021 return kIOReturnSuccess;
2022
2023 if (_prepareLock)
2024 IOLockLock(_prepareLock);
2025
91447636 2026 if (!_wireCount
0c530ab8 2027 && (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) ) {
55e303ae 2028 error = wireVirtual(forDirection);
de355530
A
2029 }
2030
2d21ac55
A
2031 if (kIOReturnSuccess == error)
2032 _wireCount++;
55e303ae 2033
2d21ac55
A
2034 if (_prepareLock)
2035 IOLockUnlock(_prepareLock);
2036
2037 return error;
1c79356b
A
2038}
2039
2040/*
2041 * complete
2042 *
2043 * Complete processing of the memory after an I/O transfer finishes.
2044 * This method should not be called unless a prepare was previously
2045 * issued; the prepare() and complete() must occur in pairs, before
2046 * before and after an I/O transfer involving pageable memory.
2047 */
2048
55e303ae 2049IOReturn IOGeneralMemoryDescriptor::complete(IODirection /* forDirection */)
1c79356b 2050{
2d21ac55 2051 IOOptionBits type = _flags & kIOMemoryTypeMask;
1c79356b 2052
2d21ac55
A
2053 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
2054 return kIOReturnSuccess;
1c79356b 2055
2d21ac55
A
2056 if (_prepareLock)
2057 IOLockLock(_prepareLock);
91447636 2058
2d21ac55
A
2059 assert(_wireCount);
2060
2061 if (_wireCount)
2062 {
2063 _wireCount--;
2064 if (!_wireCount)
2065 {
2066 IOOptionBits type = _flags & kIOMemoryTypeMask;
2067 ioGMDData * dataP = getDataP(_memoryEntries);
2068 ioPLBlock *ioplList = getIOPLList(dataP);
91447636 2069 UInt count = getNumIOPL(_memoryEntries, dataP);
55e303ae 2070
2d21ac55
A
2071 if (dataP->fMapper && _pages && ioplList[0].fMappedBase)
2072 dataP->fMapper->iovmFree(ioplList[0].fMappedBase, _pages);
55e303ae 2073
2d21ac55
A
2074 // Only complete iopls that we created which are for TypeVirtual
2075 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
2076 for (UInt ind = 0; ind < count; ind++)
91447636
A
2077 if (ioplList[ind].fIOPL) {
2078 upl_commit(ioplList[ind].fIOPL, 0, 0);
2079 upl_deallocate(ioplList[ind].fIOPL);
2080 }
2d21ac55
A
2081 }
2082 (void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); // == setLength()
2083 }
1c79356b 2084 }
2d21ac55
A
2085
2086 if (_prepareLock)
2087 IOLockUnlock(_prepareLock);
2088
1c79356b
A
2089 return kIOReturnSuccess;
2090}
2091
2092IOReturn IOGeneralMemoryDescriptor::doMap(
2d21ac55
A
2093 vm_map_t __addressMap,
2094 IOVirtualAddress * __address,
1c79356b 2095 IOOptionBits options,
2d21ac55
A
2096 IOByteCount __offset,
2097 IOByteCount __length )
2098
1c79356b 2099{
2d21ac55
A
2100 if (!(kIOMap64Bit & options)) panic("IOGeneralMemoryDescriptor::doMap !64bit");
2101
2102 _IOMemoryMap * mapping = (_IOMemoryMap *) *__address;
2103 mach_vm_size_t offset = mapping->fOffset + __offset;
2104 mach_vm_size_t length = mapping->fLength;
2105
1c79356b 2106 kern_return_t kr;
0b4e3aa0 2107 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
1c79356b 2108
91447636
A
2109 IOOptionBits type = _flags & kIOMemoryTypeMask;
2110 Ranges vec = _ranges;
2111
2112 user_addr_t range0Addr = 0;
2113 IOByteCount range0Len = 0;
2114
2115 if (vec.v)
2116 getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
2117
1c79356b 2118 // mapping source == dest? (could be much better)
91447636 2119 if( _task
2d21ac55
A
2120 && (mapping->fAddressMap == get_task_map(_task)) && (options & kIOMapAnywhere)
2121 && (1 == _rangesCount) && (0 == offset)
2122 && range0Addr && (length <= range0Len) )
2123 {
2124 mapping->fAddress = range0Addr;
2125 mapping->fOptions |= kIOMapStatic;
2126
2127 return( kIOReturnSuccess );
1c79356b
A
2128 }
2129
0b4e3aa0 2130 if( 0 == sharedMem) {
1c79356b 2131
91447636 2132 vm_size_t size = ptoa_32(_pages);
1c79356b 2133
0b4e3aa0 2134 if( _task) {
0c530ab8 2135
91447636 2136 memory_object_size_t actualSize = size;
2d21ac55
A
2137 vm_prot_t prot = VM_PROT_READ;
2138 if (!(kIOMapReadOnly & options))
2139 prot |= VM_PROT_WRITE;
2140 else if (kIOMapDefaultCache != (options & kIOMapCacheMask))
2141 prot |= VM_PROT_WRITE;
2142
91447636
A
2143 kr = mach_make_memory_entry_64(get_task_map(_task),
2144 &actualSize, range0Addr,
2d21ac55 2145 prot, &sharedMem,
0b4e3aa0
A
2146 NULL );
2147
55e303ae 2148 if( (KERN_SUCCESS == kr) && (actualSize != round_page_32(size))) {
0b4e3aa0 2149#if IOASSERT
91447636
A
2150 IOLog("mach_make_memory_entry_64 (%08llx) size (%08lx:%08x)\n",
2151 range0Addr, (UInt32) actualSize, size);
0b4e3aa0
A
2152#endif
2153 kr = kIOReturnVMError;
2154 ipc_port_release_send( sharedMem );
1c79356b
A
2155 }
2156
0b4e3aa0 2157 if( KERN_SUCCESS != kr)
0b4e3aa0 2158 sharedMem = MACH_PORT_NULL;
1c79356b 2159
0c530ab8 2160 } else do { // _task == 0, must be physical
0b4e3aa0 2161
55e303ae
A
2162 memory_object_t pager;
2163 unsigned int flags = 0;
2164 addr64_t pa;
9bccf70c
A
2165 IOPhysicalLength segLen;
2166
2d21ac55 2167 pa = getPhysicalSegment64( offset, &segLen );
0b4e3aa0
A
2168
2169 if( !reserved) {
2170 reserved = IONew( ExpansionData, 1 );
2171 if( !reserved)
2172 continue;
2173 }
2174 reserved->pagerContig = (1 == _rangesCount);
9bccf70c
A
2175 reserved->memory = this;
2176
55e303ae
A
2177 /*What cache mode do we need*/
2178 switch(options & kIOMapCacheMask ) {
9bccf70c
A
2179
2180 case kIOMapDefaultCache:
2181 default:
55e303ae 2182 flags = IODefaultCacheBits(pa);
2d21ac55
A
2183 if (DEVICE_PAGER_CACHE_INHIB & flags)
2184 {
2185 if (DEVICE_PAGER_GUARDED & flags)
2186 mapping->fOptions |= kIOMapInhibitCache;
2187 else
2188 mapping->fOptions |= kIOMapWriteCombineCache;
2189 }
2190 else if (DEVICE_PAGER_WRITE_THROUGH & flags)
2191 mapping->fOptions |= kIOMapWriteThruCache;
2192 else
2193 mapping->fOptions |= kIOMapCopybackCache;
55e303ae 2194 break;
9bccf70c
A
2195
2196 case kIOMapInhibitCache:
55e303ae
A
2197 flags = DEVICE_PAGER_CACHE_INHIB |
2198 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
2199 break;
9bccf70c
A
2200
2201 case kIOMapWriteThruCache:
55e303ae
A
2202 flags = DEVICE_PAGER_WRITE_THROUGH |
2203 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
2204 break;
9bccf70c
A
2205
2206 case kIOMapCopybackCache:
55e303ae
A
2207 flags = DEVICE_PAGER_COHERENT;
2208 break;
2209
2210 case kIOMapWriteCombineCache:
2211 flags = DEVICE_PAGER_CACHE_INHIB |
2212 DEVICE_PAGER_COHERENT;
2213 break;
9bccf70c
A
2214 }
2215
2216 flags |= reserved->pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0;
9bccf70c
A
2217
2218 pager = device_pager_setup( (memory_object_t) 0, (int) reserved,
2219 size, flags);
0b4e3aa0
A
2220 assert( pager );
2221
2222 if( pager) {
0b4e3aa0
A
2223 kr = mach_memory_object_memory_entry_64( (host_t) 1, false /*internal*/,
2224 size, VM_PROT_READ | VM_PROT_WRITE, pager, &sharedMem );
2225
2226 assert( KERN_SUCCESS == kr );
2d21ac55
A
2227 if( KERN_SUCCESS != kr)
2228 {
9bccf70c 2229 device_pager_deallocate( pager );
0b4e3aa0
A
2230 pager = MACH_PORT_NULL;
2231 sharedMem = MACH_PORT_NULL;
2232 }
2233 }
9bccf70c
A
2234 if( pager && sharedMem)
2235 reserved->devicePager = pager;
2236 else {
2237 IODelete( reserved, ExpansionData, 1 );
2238 reserved = 0;
2239 }
1c79356b 2240
1c79356b
A
2241 } while( false );
2242
0b4e3aa0
A
2243 _memEntry = (void *) sharedMem;
2244 }
2245
2d21ac55
A
2246 IOReturn result;
2247 if (0 == sharedMem)
2248 result = kIOReturnVMError;
9bccf70c 2249 else
2d21ac55
A
2250 result = super::doMap( __addressMap, __address,
2251 options, __offset, __length );
0b4e3aa0 2252
2d21ac55 2253 return( result );
1c79356b
A
2254}
2255
2256IOReturn IOGeneralMemoryDescriptor::doUnmap(
2257 vm_map_t addressMap,
2d21ac55
A
2258 IOVirtualAddress __address,
2259 IOByteCount __length )
1c79356b 2260{
2d21ac55 2261 return (super::doUnmap(addressMap, __address, __length));
1c79356b
A
2262}
2263
2264/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2265
9bccf70c 2266OSDefineMetaClassAndAbstractStructors( IOMemoryMap, OSObject )
1c79356b 2267
9bccf70c
A
2268/* inline function implementation */
2269IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
2270 { return( getPhysicalSegment( 0, 0 )); }
1c79356b 2271
1c79356b
A
2272
2273#undef super
2274#define super IOMemoryMap
2275
2276OSDefineMetaClassAndStructors(_IOMemoryMap, IOMemoryMap)
2277
2278/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2279
2d21ac55
A
2280bool _IOMemoryMap::init(
2281 task_t intoTask,
2282 mach_vm_address_t toAddress,
2283 IOOptionBits _options,
2284 mach_vm_size_t _offset,
2285 mach_vm_size_t _length )
1c79356b 2286{
2d21ac55 2287 if (!intoTask)
1c79356b
A
2288 return( false);
2289
2d21ac55
A
2290 if (!super::init())
2291 return(false);
1c79356b 2292
2d21ac55
A
2293 fAddressMap = get_task_map(intoTask);
2294 if (!fAddressMap)
2295 return(false);
2296 vm_map_reference(fAddressMap);
1c79356b 2297
2d21ac55
A
2298 fAddressTask = intoTask;
2299 fOptions = _options;
2300 fLength = _length;
2301 fOffset = _offset;
2302 fAddress = toAddress;
1c79356b 2303
2d21ac55 2304 return (true);
1c79356b
A
2305}
2306
2d21ac55 2307bool _IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
1c79356b 2308{
2d21ac55
A
2309 if (!_memory)
2310 return(false);
1c79356b 2311
2d21ac55 2312 if (!fSuperMap)
91447636 2313 {
2d21ac55 2314 if( (_offset + fLength) > _memory->getLength())
91447636 2315 return( false);
2d21ac55 2316 fOffset = _offset;
91447636 2317 }
1c79356b
A
2318
2319 _memory->retain();
2d21ac55 2320 if (fMemory)
91447636 2321 {
2d21ac55
A
2322 if (fMemory != _memory)
2323 fMemory->removeMapping(this);
2324 fMemory->release();
1c79356b 2325 }
2d21ac55 2326 fMemory = _memory;
91447636 2327
2d21ac55 2328 return( true );
1c79356b
A
2329}
2330
0b4e3aa0
A
2331struct IOMemoryDescriptorMapAllocRef
2332{
2333 ipc_port_t sharedMem;
2d21ac55
A
2334 mach_vm_address_t mapped;
2335 mach_vm_size_t size;
2336 mach_vm_size_t sourceOffset;
0b4e3aa0
A
2337 IOOptionBits options;
2338};
2339
2340static kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
2341{
2342 IOMemoryDescriptorMapAllocRef * ref = (IOMemoryDescriptorMapAllocRef *)_ref;
2343 IOReturn err;
2344
2345 do {
2d21ac55
A
2346 if( ref->sharedMem)
2347 {
0b4e3aa0
A
2348 vm_prot_t prot = VM_PROT_READ
2349 | ((ref->options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE);
55e303ae 2350
2d21ac55
A
2351 // VM system requires write access to change cache mode
2352 if (kIOMapDefaultCache != (ref->options & kIOMapCacheMask))
2353 prot |= VM_PROT_WRITE;
2354
55e303ae
A
2355 // set memory entry cache
2356 vm_prot_t memEntryCacheMode = prot | MAP_MEM_ONLY;
2357 switch (ref->options & kIOMapCacheMask)
2358 {
2359 case kIOMapInhibitCache:
2360 SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
2361 break;
2362
2363 case kIOMapWriteThruCache:
2364 SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
2365 break;
2366
2367 case kIOMapWriteCombineCache:
2368 SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
2369 break;
2370
2371 case kIOMapCopybackCache:
2372 SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
2373 break;
2374
2375 case kIOMapDefaultCache:
2376 default:
2377 SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
2378 break;
2379 }
2380
2381 vm_size_t unused = 0;
2382
2383 err = mach_make_memory_entry( NULL /*unused*/, &unused, 0 /*unused*/,
2384 memEntryCacheMode, NULL, ref->sharedMem );
2385 if (KERN_SUCCESS != err)
2386 IOLog("MAP_MEM_ONLY failed %d\n", err);
2387
2d21ac55 2388 err = mach_vm_map( map,
0b4e3aa0
A
2389 &ref->mapped,
2390 ref->size, 0 /* mask */,
2391 (( ref->options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
2392 | VM_MAKE_TAG(VM_MEMORY_IOKIT),
2393 ref->sharedMem, ref->sourceOffset,
2394 false, // copy
2395 prot, // cur
2396 prot, // max
2397 VM_INHERIT_NONE);
55e303ae 2398
0b4e3aa0
A
2399 if( KERN_SUCCESS != err) {
2400 ref->mapped = 0;
2401 continue;
2402 }
2403
2d21ac55
A
2404 }
2405 else
2406 {
2407 err = mach_vm_allocate( map, &ref->mapped, ref->size,
0b4e3aa0
A
2408 ((ref->options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
2409 | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
0b4e3aa0
A
2410 if( KERN_SUCCESS != err) {
2411 ref->mapped = 0;
2412 continue;
2413 }
0b4e3aa0
A
2414 // we have to make sure that these guys don't get copied if we fork.
2415 err = vm_inherit( map, ref->mapped, ref->size, VM_INHERIT_NONE);
2416 assert( KERN_SUCCESS == err );
2417 }
2d21ac55
A
2418 }
2419 while( false );
0b4e3aa0
A
2420
2421 return( err );
2422}
2423
2d21ac55
A
2424kern_return_t
2425IOMemoryDescriptorMapMemEntry(vm_map_t map, ipc_port_t entry, IOOptionBits options, bool pageable,
2426 mach_vm_size_t offset,
2427 mach_vm_address_t * address, mach_vm_size_t length)
2428{
2429 IOReturn err;
2430 IOMemoryDescriptorMapAllocRef ref;
2431
2432 ref.sharedMem = entry;
2433 ref.sourceOffset = offset;
2434 ref.options = options;
2435
2436 ref.size = length;
2437
2438 if (options & kIOMapAnywhere)
2439 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
2440 ref.mapped = 0;
2441 else
2442 ref.mapped = *address;
2443
2444 if( ref.sharedMem && (map == kernel_map) && pageable)
2445 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
2446 else
2447 err = IOMemoryDescriptorMapAlloc( map, &ref );
2448
2449 *address = ref.mapped;
2450 return (err);
2451}
2452
9bccf70c 2453
1c79356b 2454IOReturn IOMemoryDescriptor::doMap(
2d21ac55
A
2455 vm_map_t __addressMap,
2456 IOVirtualAddress * __address,
1c79356b 2457 IOOptionBits options,
2d21ac55
A
2458 IOByteCount __offset,
2459 IOByteCount __length )
1c79356b 2460{
2d21ac55 2461 if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::doMap !64bit");
1c79356b 2462
2d21ac55
A
2463 _IOMemoryMap * mapping = (_IOMemoryMap *) *__address;
2464 mach_vm_size_t offset = mapping->fOffset + __offset;
2465 mach_vm_size_t length = mapping->fLength;
1c79356b 2466
2d21ac55
A
2467 IOReturn err = kIOReturnSuccess;
2468 memory_object_t pager;
2469 mach_vm_size_t pageOffset;
2470 IOPhysicalAddress sourceAddr;
1c79356b 2471
2d21ac55
A
2472 do
2473 {
2474 sourceAddr = getSourceSegment( offset, NULL );
91447636 2475 pageOffset = sourceAddr - trunc_page_32( sourceAddr );
1c79356b 2476
2d21ac55
A
2477 if( reserved)
2478 pager = (memory_object_t) reserved->devicePager;
2479 else
2480 pager = MACH_PORT_NULL;
0b4e3aa0 2481
91447636
A
2482 if ((kIOMapReference|kIOMapUnique) == ((kIOMapReference|kIOMapUnique) & options))
2483 {
2d21ac55
A
2484 upl_t redirUPL2;
2485 vm_size_t size;
2486 int flags;
0b4e3aa0 2487
91447636
A
2488 if (!_memEntry)
2489 {
2490 err = kIOReturnNotReadable;
2491 continue;
2492 }
2493
2d21ac55 2494 size = mapping->fLength + pageOffset;
91447636
A
2495 flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
2496 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
2497
2498 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) _memEntry, 0, &size, &redirUPL2,
2499 NULL, NULL,
2500 &flags))
2501 redirUPL2 = NULL;
2502
2d21ac55 2503 err = upl_transpose(redirUPL2, mapping->fRedirUPL);
91447636
A
2504 if (kIOReturnSuccess != err)
2505 {
2506 IOLog("upl_transpose(%x)\n", err);
2507 err = kIOReturnSuccess;
2508 }
2509
2510 if (redirUPL2)
2511 {
2512 upl_commit(redirUPL2, NULL, 0);
2513 upl_deallocate(redirUPL2);
2514 redirUPL2 = 0;
2515 }
2516 {
2517 // swap the memEntries since they now refer to different vm_objects
2518 void * me = _memEntry;
2d21ac55
A
2519 _memEntry = mapping->fMemory->_memEntry;
2520 mapping->fMemory->_memEntry = me;
91447636 2521 }
2d21ac55
A
2522 if (pager)
2523 err = handleFault( reserved->devicePager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
91447636
A
2524 }
2525 else
2526 {
2d21ac55
A
2527 mach_vm_address_t address;
2528
2529 if (!(options & kIOMapAnywhere))
2530 {
2531 address = trunc_page_64(mapping->fAddress);
2532 if( (mapping->fAddress - address) != pageOffset)
2533 {
91447636
A
2534 err = kIOReturnVMError;
2535 continue;
2536 }
2537 }
0b4e3aa0 2538
2d21ac55
A
2539 err = IOMemoryDescriptorMapMemEntry(mapping->fAddressMap, (ipc_port_t) _memEntry,
2540 options, (kIOMemoryBufferPageable & _flags),
2541 offset, &address, round_page_64(length + pageOffset));
2542 if( err != KERN_SUCCESS)
2543 continue;
0b4e3aa0 2544
2d21ac55
A
2545 if (!_memEntry || pager)
2546 {
2547 err = handleFault( pager, mapping->fAddressMap, address, offset, length, options );
2548 if (err != KERN_SUCCESS)
2549 doUnmap( mapping->fAddressMap, (IOVirtualAddress) mapping, 0 );
2550 }
0b4e3aa0 2551
2d21ac55
A
2552#ifdef DEBUG
2553 if (kIOLogMapping & gIOKitDebug)
2554 IOLog("mapping(%x) desc %p @ %lx, map %p, address %qx, offset %qx, length %qx\n",
2555 err, this, sourceAddr, mapping, address, offset, length);
2556#endif
0b4e3aa0 2557
2d21ac55
A
2558 if (err == KERN_SUCCESS)
2559 mapping->fAddress = address + pageOffset;
2560 else
2561 mapping->fAddress = NULL;
2562 }
2563 }
2564 while( false );
0b4e3aa0 2565
2d21ac55 2566 return (err);
0b4e3aa0
A
2567}
2568
2569enum {
2570 kIOMemoryRedirected = 0x00010000
2571};
2572
2573IOReturn IOMemoryDescriptor::handleFault(
2574 void * _pager,
2575 vm_map_t addressMap,
2d21ac55
A
2576 mach_vm_address_t address,
2577 mach_vm_size_t sourceOffset,
2578 mach_vm_size_t length,
0b4e3aa0
A
2579 IOOptionBits options )
2580{
2581 IOReturn err = kIOReturnSuccess;
2582 memory_object_t pager = (memory_object_t) _pager;
2d21ac55
A
2583 mach_vm_size_t size;
2584 mach_vm_size_t bytes;
2585 mach_vm_size_t page;
2586 mach_vm_size_t pageOffset;
2587 mach_vm_size_t pagerOffset;
0b4e3aa0 2588 IOPhysicalLength segLen;
55e303ae 2589 addr64_t physAddr;
0b4e3aa0 2590
2d21ac55
A
2591 if( !addressMap)
2592 {
2593 if( kIOMemoryRedirected & _flags)
2594 {
1c79356b 2595#ifdef DEBUG
2d21ac55 2596 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
1c79356b 2597#endif
0b4e3aa0 2598 do {
9bccf70c 2599 SLEEP;
0b4e3aa0
A
2600 } while( kIOMemoryRedirected & _flags );
2601 }
1c79356b 2602
0b4e3aa0 2603 return( kIOReturnSuccess );
1c79356b
A
2604 }
2605
55e303ae 2606 physAddr = getPhysicalSegment64( sourceOffset, &segLen );
0b4e3aa0 2607 assert( physAddr );
55e303ae
A
2608 pageOffset = physAddr - trunc_page_64( physAddr );
2609 pagerOffset = sourceOffset;
0b4e3aa0
A
2610
2611 size = length + pageOffset;
2612 physAddr -= pageOffset;
1c79356b
A
2613
2614 segLen += pageOffset;
0b4e3aa0 2615 bytes = size;
2d21ac55
A
2616 do
2617 {
1c79356b
A
2618 // in the middle of the loop only map whole pages
2619 if( segLen >= bytes)
2620 segLen = bytes;
55e303ae 2621 else if( segLen != trunc_page_32( segLen))
1c79356b 2622 err = kIOReturnVMError;
55e303ae 2623 if( physAddr != trunc_page_64( physAddr))
1c79356b 2624 err = kIOReturnBadArgument;
8f6c56a5
A
2625 if (kIOReturnSuccess != err)
2626 break;
1c79356b
A
2627
2628#ifdef DEBUG
2629 if( kIOLogMapping & gIOKitDebug)
2d21ac55 2630 IOLog("_IOMemoryMap::map(%p) 0x%qx->0x%qx:0x%qx\n",
0b4e3aa0 2631 addressMap, address + pageOffset, physAddr + pageOffset,
1c79356b
A
2632 segLen - pageOffset);
2633#endif
2634
2d21ac55 2635
0b4e3aa0
A
2636 if( pager) {
2637 if( reserved && reserved->pagerContig) {
2638 IOPhysicalLength allLen;
55e303ae 2639 addr64_t allPhys;
0b4e3aa0 2640
55e303ae 2641 allPhys = getPhysicalSegment64( 0, &allLen );
0b4e3aa0 2642 assert( allPhys );
2d21ac55
A
2643 err = device_pager_populate_object( pager, 0, atop_64(allPhys), round_page_32(allLen) );
2644 }
2645 else
2646 {
0b4e3aa0 2647
2d21ac55 2648 for( page = 0;
0b4e3aa0 2649 (page < segLen) && (KERN_SUCCESS == err);
2d21ac55
A
2650 page += page_size)
2651 {
2652 err = device_pager_populate_object(pager, pagerOffset,
2653 (ppnum_t)(atop_64(physAddr + page)), page_size);
2654 pagerOffset += page_size;
0b4e3aa0
A
2655 }
2656 }
2657 assert( KERN_SUCCESS == err );
2658 if( err)
2659 break;
2660 }
0c530ab8 2661
2d21ac55
A
2662 // This call to vm_fault causes an early pmap level resolution
2663 // of the mappings created above for kernel mappings, since
2664 // faulting in later can't take place from interrupt level.
9bccf70c
A
2665 /* *** ALERT *** */
2666 /* *** Temporary Workaround *** */
2667
2d21ac55
A
2668 if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags))
2669 {
91447636
A
2670 vm_fault(addressMap,
2671 (vm_map_offset_t)address,
2672 VM_PROT_READ|VM_PROT_WRITE,
2673 FALSE, THREAD_UNINT, NULL,
2674 (vm_map_offset_t)0);
9bccf70c
A
2675 }
2676
2677 /* *** Temporary Workaround *** */
2678 /* *** ALERT *** */
0c530ab8 2679
1c79356b 2680 sourceOffset += segLen - pageOffset;
0b4e3aa0 2681 address += segLen;
1c79356b
A
2682 bytes -= segLen;
2683 pageOffset = 0;
2684
2d21ac55
A
2685 }
2686 while (bytes && (physAddr = getPhysicalSegment64( sourceOffset, &segLen )));
1c79356b 2687
2d21ac55 2688 if (bytes)
1c79356b 2689 err = kIOReturnBadArgument;
1c79356b 2690
2d21ac55 2691 return (err);
1c79356b
A
2692}
2693
2694IOReturn IOMemoryDescriptor::doUnmap(
2695 vm_map_t addressMap,
2d21ac55
A
2696 IOVirtualAddress __address,
2697 IOByteCount __length )
1c79356b 2698{
2d21ac55
A
2699 IOReturn err;
2700 mach_vm_address_t address;
2701 mach_vm_size_t length;
2702
2703 if (__length)
2704 {
2705 address = __address;
2706 length = __length;
2707 }
2708 else
2709 {
2710 addressMap = ((_IOMemoryMap *) __address)->fAddressMap;
2711 address = ((_IOMemoryMap *) __address)->fAddress;
2712 length = ((_IOMemoryMap *) __address)->fLength;
2713 }
2714
2715 if( _memEntry && (addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags))
2716 addressMap = IOPageableMapForAddress( address );
1c79356b
A
2717
2718#ifdef DEBUG
2719 if( kIOLogMapping & gIOKitDebug)
2d21ac55
A
2720 IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
2721 addressMap, address, length );
1c79356b
A
2722#endif
2723
2d21ac55 2724 err = mach_vm_deallocate( addressMap, address, length );
1c79356b 2725
2d21ac55 2726 return (err);
1c79356b
A
2727}
2728
91447636 2729IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
e3027f41 2730{
91447636 2731 IOReturn err = kIOReturnSuccess;
e3027f41
A
2732 _IOMemoryMap * mapping = 0;
2733 OSIterator * iter;
2734
2735 LOCK;
2736
91447636
A
2737 if( doRedirect)
2738 _flags |= kIOMemoryRedirected;
2739 else
2740 _flags &= ~kIOMemoryRedirected;
2741
e3027f41
A
2742 do {
2743 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
91447636
A
2744 while( (mapping = (_IOMemoryMap *) iter->getNextObject()))
2745 mapping->redirect( safeTask, doRedirect );
e3027f41 2746
91447636
A
2747 iter->release();
2748 }
e3027f41
A
2749 } while( false );
2750
91447636
A
2751 if (!doRedirect)
2752 {
9bccf70c 2753 WAKEUP;
0b4e3aa0
A
2754 }
2755
e3027f41
A
2756 UNLOCK;
2757
2758 // temporary binary compatibility
2759 IOSubMemoryDescriptor * subMem;
2760 if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
91447636 2761 err = subMem->redirect( safeTask, doRedirect );
e3027f41 2762 else
91447636 2763 err = kIOReturnSuccess;
e3027f41
A
2764
2765 return( err );
2766}
2767
91447636 2768IOReturn IOSubMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
e3027f41 2769{
91447636 2770 return( _parent->redirect( safeTask, doRedirect ));
e3027f41
A
2771}
2772
91447636 2773IOReturn _IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
e3027f41
A
2774{
2775 IOReturn err = kIOReturnSuccess;
2776
2d21ac55 2777 if( fSuperMap) {
91447636 2778// err = ((_IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
e3027f41
A
2779 } else {
2780
2781 LOCK;
0c530ab8
A
2782
2783 do
91447636 2784 {
2d21ac55 2785 if (!fAddress)
0c530ab8 2786 break;
2d21ac55 2787 if (!fAddressMap)
0c530ab8
A
2788 break;
2789
2d21ac55
A
2790 if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
2791 && (0 == (fOptions & kIOMapStatic)))
0c530ab8 2792 {
2d21ac55 2793 IOUnmapPages( fAddressMap, fAddress, fLength );
0c530ab8 2794 if(!doRedirect && safeTask
2d21ac55
A
2795 && (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
2796 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)))
0c530ab8 2797 {
2d21ac55
A
2798 IOVirtualAddress iova = (IOVirtualAddress) this;
2799 err = mach_vm_deallocate( fAddressMap, fAddress, fLength );
2800 err = fMemory->doMap( fAddressMap, &iova,
2801 (fOptions & ~kIOMapAnywhere) | kIOMap64Bit/*| kIOMapReserve*/,
2802 0, 0 );
0c530ab8
A
2803 } else
2804 err = kIOReturnSuccess;
e3027f41 2805#ifdef DEBUG
2d21ac55 2806 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
e3027f41 2807#endif
0c530ab8 2808 }
2d21ac55 2809 else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask))
0c530ab8
A
2810 {
2811 IOOptionBits newMode;
2d21ac55
A
2812 newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
2813 IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
0c530ab8
A
2814 }
2815 }
2816 while (false);
0c530ab8 2817 UNLOCK;
e3027f41
A
2818 }
2819
2d21ac55
A
2820 if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
2821 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
91447636 2822 && safeTask
2d21ac55
A
2823 && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected))))
2824 fMemory->redirect(safeTask, doRedirect);
91447636 2825
e3027f41
A
2826 return( err );
2827}
2828
1c79356b
A
2829IOReturn _IOMemoryMap::unmap( void )
2830{
2831 IOReturn err;
2832
2833 LOCK;
2834
2d21ac55
A
2835 if( fAddress && fAddressMap && (0 == fSuperMap) && fMemory
2836 && (0 == (fOptions & kIOMapStatic))) {
1c79356b 2837
2d21ac55 2838 err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
1c79356b
A
2839
2840 } else
2841 err = kIOReturnSuccess;
2842
2d21ac55
A
2843 if (fAddressMap)
2844 {
2845 vm_map_deallocate(fAddressMap);
2846 fAddressMap = 0;
2847 }
2848
2849 fAddress = 0;
1c79356b
A
2850
2851 UNLOCK;
2852
2853 return( err );
2854}
2855
2856void _IOMemoryMap::taskDied( void )
2857{
2858 LOCK;
2d21ac55
A
2859 if( fAddressMap) {
2860 vm_map_deallocate(fAddressMap);
2861 fAddressMap = 0;
1c79356b 2862 }
2d21ac55
A
2863 fAddressTask = 0;
2864 fAddress = 0;
1c79356b
A
2865 UNLOCK;
2866}
2867
9bccf70c
A
2868// Overload the release mechanism. All mappings must be a member
2869// of a memory descriptors _mappings set. This means that we
2870// always have 2 references on a mapping. When either of these mappings
2871// are released we need to free ourselves.
55e303ae 2872void _IOMemoryMap::taggedRelease(const void *tag) const
9bccf70c 2873{
55e303ae 2874 LOCK;
9bccf70c 2875 super::taggedRelease(tag, 2);
55e303ae 2876 UNLOCK;
9bccf70c
A
2877}
2878
1c79356b
A
2879void _IOMemoryMap::free()
2880{
2881 unmap();
2882
2d21ac55
A
2883 if (fMemory)
2884 {
1c79356b 2885 LOCK;
2d21ac55 2886 fMemory->removeMapping(this);
1c79356b 2887 UNLOCK;
2d21ac55 2888 fMemory->release();
1c79356b
A
2889 }
2890
2d21ac55 2891 if (fOwner && (fOwner != fMemory))
91447636
A
2892 {
2893 LOCK;
2d21ac55 2894 fOwner->removeMapping(this);
91447636
A
2895 UNLOCK;
2896 }
2897
2d21ac55
A
2898 if (fSuperMap)
2899 fSuperMap->release();
1c79356b 2900
2d21ac55
A
2901 if (fRedirUPL) {
2902 upl_commit(fRedirUPL, NULL, 0);
2903 upl_deallocate(fRedirUPL);
91447636
A
2904 }
2905
1c79356b
A
2906 super::free();
2907}
2908
2909IOByteCount _IOMemoryMap::getLength()
2910{
2d21ac55 2911 return( fLength );
1c79356b
A
2912}
2913
2914IOVirtualAddress _IOMemoryMap::getVirtualAddress()
2915{
2d21ac55
A
2916 if (fSuperMap)
2917 fSuperMap->getVirtualAddress();
2918 else if (fAddressMap && vm_map_is_64bit(fAddressMap))
2919 {
2920 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
2921 }
2922
2923 return (fAddress);
2924}
2925
2926mach_vm_address_t _IOMemoryMap::getAddress()
2927{
2928 return( fAddress);
2929}
2930
2931mach_vm_size_t _IOMemoryMap::getSize()
2932{
2933 return( fLength );
1c79356b
A
2934}
2935
2d21ac55 2936
1c79356b
A
2937task_t _IOMemoryMap::getAddressTask()
2938{
2d21ac55
A
2939 if( fSuperMap)
2940 return( fSuperMap->getAddressTask());
1c79356b 2941 else
2d21ac55 2942 return( fAddressTask);
1c79356b
A
2943}
2944
2945IOOptionBits _IOMemoryMap::getMapOptions()
2946{
2d21ac55 2947 return( fOptions);
1c79356b
A
2948}
2949
2950IOMemoryDescriptor * _IOMemoryMap::getMemoryDescriptor()
2951{
2d21ac55 2952 return( fMemory );
1c79356b
A
2953}
2954
9bccf70c 2955_IOMemoryMap * _IOMemoryMap::copyCompatible(
2d21ac55 2956 _IOMemoryMap * newMapping )
1c79356b 2957{
2d21ac55
A
2958 task_t task = newMapping->getAddressTask();
2959 mach_vm_address_t toAddress = newMapping->fAddress;
2960 IOOptionBits _options = newMapping->fOptions;
2961 mach_vm_size_t _offset = newMapping->fOffset;
2962 mach_vm_size_t _length = newMapping->fLength;
1c79356b 2963
2d21ac55 2964 if( (!task) || (!fAddressMap) || (fAddressMap != get_task_map(task)))
1c79356b 2965 return( 0 );
2d21ac55 2966 if( (fOptions ^ _options) & kIOMapReadOnly)
9bccf70c
A
2967 return( 0 );
2968 if( (kIOMapDefaultCache != (_options & kIOMapCacheMask))
2d21ac55 2969 && ((fOptions ^ _options) & kIOMapCacheMask))
1c79356b
A
2970 return( 0 );
2971
2d21ac55 2972 if( (0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress))
1c79356b
A
2973 return( 0 );
2974
2d21ac55 2975 if( _offset < fOffset)
1c79356b
A
2976 return( 0 );
2977
2d21ac55 2978 _offset -= fOffset;
1c79356b 2979
2d21ac55 2980 if( (_offset + _length) > fLength)
1c79356b
A
2981 return( 0 );
2982
2d21ac55
A
2983 retain();
2984 if( (fLength == _length) && (!_offset))
2985 {
2986 newMapping->release();
2987 newMapping = this;
2988 }
2989 else
2990 {
2991 newMapping->fSuperMap = this;
2992 newMapping->fOffset = _offset;
2993 newMapping->fAddress = fAddress + _offset;
1c79356b
A
2994 }
2995
2d21ac55 2996 return( newMapping );
1c79356b
A
2997}
2998
0c530ab8
A
2999IOPhysicalAddress
3000_IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
1c79356b
A
3001{
3002 IOPhysicalAddress address;
3003
3004 LOCK;
2d21ac55 3005 address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
1c79356b
A
3006 UNLOCK;
3007
3008 return( address );
3009}
3010
3011/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3012
3013#undef super
3014#define super OSObject
3015
3016/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3017
3018void IOMemoryDescriptor::initialize( void )
3019{
3020 if( 0 == gIOMemoryLock)
3021 gIOMemoryLock = IORecursiveLockAlloc();
55e303ae
A
3022
3023 IORegistryEntry::getRegistryRoot()->setProperty(kIOMaximumMappedIOByteCountKey,
3024 ptoa_64(gIOMaximumMappedIOPageCount), 64);
0c530ab8
A
3025 if (!gIOCopyMapper)
3026 {
3027 IOMapper *
3028 mapper = new IOCopyMapper;
3029 if (mapper)
3030 {
3031 if (mapper->init() && mapper->start(NULL))
3032 gIOCopyMapper = (IOCopyMapper *) mapper;
3033 else
3034 mapper->release();
3035 }
3036 }
3037
3038 gIOLastPage = IOGetLastPageNumber();
1c79356b
A
3039}
3040
3041void IOMemoryDescriptor::free( void )
3042{
3043 if( _mappings)
3044 _mappings->release();
3045
3046 super::free();
3047}
3048
3049IOMemoryMap * IOMemoryDescriptor::setMapping(
3050 task_t intoTask,
3051 IOVirtualAddress mapAddress,
55e303ae 3052 IOOptionBits options )
1c79356b 3053{
2d21ac55
A
3054 return (createMappingInTask( intoTask, mapAddress,
3055 options | kIOMapStatic,
3056 0, getLength() ));
1c79356b
A
3057}
3058
3059IOMemoryMap * IOMemoryDescriptor::map(
55e303ae 3060 IOOptionBits options )
1c79356b 3061{
2d21ac55
A
3062 return (createMappingInTask( kernel_task, 0,
3063 options | kIOMapAnywhere,
3064 0, getLength() ));
1c79356b
A
3065}
3066
2d21ac55
A
3067IOMemoryMap * IOMemoryDescriptor::map(
3068 task_t intoTask,
3069 IOVirtualAddress atAddress,
1c79356b 3070 IOOptionBits options,
55e303ae
A
3071 IOByteCount offset,
3072 IOByteCount length )
1c79356b 3073{
2d21ac55
A
3074 if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask)))
3075 {
3076 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
3077 return (0);
3078 }
3079
3080 return (createMappingInTask(intoTask, atAddress,
3081 options, offset, length));
3082}
3083
3084IOMemoryMap * IOMemoryDescriptor::createMappingInTask(
3085 task_t intoTask,
3086 mach_vm_address_t atAddress,
3087 IOOptionBits options,
3088 mach_vm_size_t offset,
3089 mach_vm_size_t length)
3090{
3091 IOMemoryMap * result;
3092 _IOMemoryMap * mapping;
3093
3094 if (0 == length)
1c79356b
A
3095 length = getLength();
3096
2d21ac55
A
3097 mapping = new _IOMemoryMap;
3098
3099 if( mapping
3100 && !mapping->init( intoTask, atAddress,
3101 options, offset, length )) {
3102 mapping->release();
3103 mapping = 0;
3104 }
3105
3106 if (mapping)
3107 result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
3108 else
3109 result = 0;
3110
3111#ifdef DEBUG
3112 if (!result)
3113 IOLog("createMappingInTask failed desc %p, addr %qx, options %lx, offset %qx, length %qx\n",
3114 this, atAddress, options, offset, length);
3115#endif
3116
3117 return (result);
1c79356b
A
3118}
3119
91447636
A
3120IOReturn _IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
3121 IOOptionBits options,
3122 IOByteCount offset)
2d21ac55
A
3123{
3124 return (redirect(newBackingMemory, options, (mach_vm_size_t)offset));
3125}
3126
3127IOReturn _IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
3128 IOOptionBits options,
3129 mach_vm_size_t offset)
91447636
A
3130{
3131 IOReturn err = kIOReturnSuccess;
3132 IOMemoryDescriptor * physMem = 0;
3133
3134 LOCK;
3135
2d21ac55 3136 if (fAddress && fAddressMap) do
91447636 3137 {
2d21ac55
A
3138 if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3139 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
91447636 3140 {
2d21ac55 3141 physMem = fMemory;
91447636
A
3142 physMem->retain();
3143 }
3144
2d21ac55 3145 if (!fRedirUPL)
91447636 3146 {
2d21ac55 3147 vm_size_t size = fLength;
91447636
A
3148 int flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
3149 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
2d21ac55 3150 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) fMemory->_memEntry, 0, &size, &fRedirUPL,
91447636
A
3151 NULL, NULL,
3152 &flags))
2d21ac55 3153 fRedirUPL = 0;
91447636
A
3154
3155 if (physMem)
3156 {
2d21ac55 3157 IOUnmapPages( fAddressMap, fAddress, fLength );
91447636
A
3158 physMem->redirect(0, true);
3159 }
3160 }
3161
3162 if (newBackingMemory)
3163 {
2d21ac55 3164 if (newBackingMemory != fMemory)
91447636 3165 {
2d21ac55
A
3166 fOffset = 0;
3167 if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
3168 options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
3169 offset, fLength))
91447636
A
3170 err = kIOReturnError;
3171 }
2d21ac55 3172 if (fRedirUPL)
91447636 3173 {
2d21ac55
A
3174 upl_commit(fRedirUPL, NULL, 0);
3175 upl_deallocate(fRedirUPL);
3176 fRedirUPL = 0;
91447636
A
3177 }
3178 if (physMem)
3179 physMem->redirect(0, false);
3180 }
3181 }
3182 while (false);
3183
3184 UNLOCK;
3185
3186 if (physMem)
3187 physMem->release();
3188
3189 return (err);
3190}
3191
1c79356b
A
3192IOMemoryMap * IOMemoryDescriptor::makeMapping(
3193 IOMemoryDescriptor * owner,
2d21ac55
A
3194 task_t __intoTask,
3195 IOVirtualAddress __address,
1c79356b 3196 IOOptionBits options,
2d21ac55
A
3197 IOByteCount __offset,
3198 IOByteCount __length )
1c79356b 3199{
2d21ac55
A
3200 if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::makeMapping !64bit");
3201
91447636 3202 IOMemoryDescriptor * mapDesc = 0;
2d21ac55
A
3203 _IOMemoryMap * result = 0;
3204 OSIterator * iter;
3205
3206 _IOMemoryMap * mapping = (_IOMemoryMap *) __address;
3207 mach_vm_size_t offset = mapping->fOffset + __offset;
3208 mach_vm_size_t length = mapping->fLength;
3209
3210 mapping->fOffset = offset;
1c79356b
A
3211
3212 LOCK;
3213
91447636
A
3214 do
3215 {
2d21ac55
A
3216 if (kIOMapStatic & options)
3217 {
3218 result = mapping;
3219 addMapping(mapping);
3220 mapping->setMemoryDescriptor(this, 0);
3221 continue;
3222 }
3223
91447636
A
3224 if (kIOMapUnique & options)
3225 {
3226 IOPhysicalAddress phys;
3227 IOByteCount physLen;
1c79356b 3228
2d21ac55 3229// if (owner != this) continue;
1c79356b 3230
0c530ab8
A
3231 if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3232 || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
91447636
A
3233 {
3234 phys = getPhysicalSegment(offset, &physLen);
3235 if (!phys || (physLen < length))
3236 continue;
3237
3238 mapDesc = IOMemoryDescriptor::withPhysicalAddress(
3239 phys, length, _direction);
3240 if (!mapDesc)
3241 continue;
3242 offset = 0;
2d21ac55 3243 mapping->fOffset = offset;
91447636
A
3244 }
3245 }
3246 else
3247 {
2d21ac55
A
3248 // look for a compatible existing mapping
3249 if( (iter = OSCollectionIterator::withCollection(_mappings)))
3250 {
3251 _IOMemoryMap * lookMapping;
3252 while ((lookMapping = (_IOMemoryMap *) iter->getNextObject()))
3253 {
3254 if ((result = lookMapping->copyCompatible(mapping)))
3255 {
3256 addMapping(result);
3257 result->setMemoryDescriptor(this, offset);
91447636 3258 break;
2d21ac55 3259 }
91447636
A
3260 }
3261 iter->release();
3262 }
2d21ac55 3263 if (result || (options & kIOMapReference))
91447636 3264 continue;
2d21ac55 3265 }
91447636 3266
2d21ac55
A
3267 if (!mapDesc)
3268 {
3269 mapDesc = this;
91447636
A
3270 mapDesc->retain();
3271 }
2d21ac55
A
3272 IOReturn
3273 kr = mapDesc->doMap( 0, (IOVirtualAddress *) &mapping, options, 0, 0 );
3274 if (kIOReturnSuccess == kr)
3275 {
3276 result = mapping;
3277 mapDesc->addMapping(result);
3278 result->setMemoryDescriptor(mapDesc, offset);
3279 }
3280 else
3281 {
1c79356b 3282 mapping->release();
2d21ac55 3283 mapping = NULL;
1c79356b 3284 }
91447636 3285 }
2d21ac55 3286 while( false );
1c79356b
A
3287
3288 UNLOCK;
3289
91447636
A
3290 if (mapDesc)
3291 mapDesc->release();
3292
2d21ac55 3293 return (result);
1c79356b
A
3294}
3295
3296void IOMemoryDescriptor::addMapping(
3297 IOMemoryMap * mapping )
3298{
2d21ac55
A
3299 if( mapping)
3300 {
1c79356b
A
3301 if( 0 == _mappings)
3302 _mappings = OSSet::withCapacity(1);
9bccf70c
A
3303 if( _mappings )
3304 _mappings->setObject( mapping );
1c79356b
A
3305 }
3306}
3307
3308void IOMemoryDescriptor::removeMapping(
3309 IOMemoryMap * mapping )
3310{
9bccf70c 3311 if( _mappings)
1c79356b 3312 _mappings->removeObject( mapping);
1c79356b
A
3313}
3314
3315/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3316
3317#undef super
3318#define super IOMemoryDescriptor
3319
3320OSDefineMetaClassAndStructors(IOSubMemoryDescriptor, IOMemoryDescriptor)
3321
3322/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3323
3324bool IOSubMemoryDescriptor::initSubRange( IOMemoryDescriptor * parent,
3325 IOByteCount offset, IOByteCount length,
55e303ae 3326 IODirection direction )
1c79356b 3327{
1c79356b
A
3328 if( !parent)
3329 return( false);
3330
3331 if( (offset + length) > parent->getLength())
3332 return( false);
3333
55e303ae
A
3334 /*
3335 * We can check the _parent instance variable before having ever set it
3336 * to an initial value because I/O Kit guarantees that all our instance
3337 * variables are zeroed on an object's allocation.
3338 */
3339
3340 if( !_parent) {
3341 if( !super::init())
3342 return( false );
3343 } else {
3344 /*
3345 * An existing memory descriptor is being retargeted to
3346 * point to somewhere else. Clean up our present state.
3347 */
3348
3349 _parent->release();
3350 _parent = 0;
3351 }
3352
1c79356b
A
3353 parent->retain();
3354 _parent = parent;
3355 _start = offset;
3356 _length = length;
55e303ae 3357 _direction = direction;
1c79356b
A
3358 _tag = parent->getTag();
3359
3360 return( true );
3361}
3362
3363void IOSubMemoryDescriptor::free( void )
3364{
3365 if( _parent)
3366 _parent->release();
3367
3368 super::free();
3369}
3370
3371
0c530ab8
A
3372IOReturn
3373IOSubMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
3374{
3375 IOReturn rtn;
3376
3377 if (kIOMDGetCharacteristics == op) {
3378
3379 rtn = _parent->dmaCommandOperation(op, vData, dataSize);
3380 if (kIOReturnSuccess == rtn) {
3381 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3382 data->fLength = _length;
3383 data->fSGCount = 0; // XXX gvdl: need to compute and pages
3384 data->fPages = 0;
3385 data->fPageAlign = 0;
3386 }
3387
3388 return rtn;
3389 }
3390 else if (kIOMDWalkSegments & op) {
3391 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs))
3392 return kIOReturnUnderrun;
3393
3394 IOMDDMAWalkSegmentArgs *data =
3395 reinterpret_cast<IOMDDMAWalkSegmentArgs *>(vData);
3396 UInt offset = data->fOffset;
3397 UInt remain = _length - offset;
3398 if ((int) remain <= 0)
3399 return (!remain)? kIOReturnOverrun : kIOReturnInternalError;
3400
3401 data->fOffset = offset + _start;
3402 rtn = _parent->dmaCommandOperation(op, vData, dataSize);
3403 if (data->fLength > remain)
3404 data->fLength = remain;
3405 data->fOffset = offset;
3406
3407 return rtn;
3408 }
3409 else
3410 return kIOReturnBadArgument;
3411}
3412
3413addr64_t
3414IOSubMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount * length)
3415{
3416 addr64_t address;
3417 IOByteCount actualLength;
3418
3419 assert(offset <= _length);
3420
3421 if( length)
3422 *length = 0;
3423
3424 if( offset >= _length)
3425 return( 0 );
3426
3427 address = _parent->getPhysicalSegment64( offset + _start, &actualLength );
3428
3429 if( address && length)
3430 *length = min( _length - offset, actualLength );
3431
3432 return( address );
3433}
3434
3435IOPhysicalAddress
3436IOSubMemoryDescriptor::getPhysicalSegment( IOByteCount offset, IOByteCount * length )
1c79356b
A
3437{
3438 IOPhysicalAddress address;
3439 IOByteCount actualLength;
3440
3441 assert(offset <= _length);
3442
3443 if( length)
3444 *length = 0;
3445
3446 if( offset >= _length)
3447 return( 0 );
3448
3449 address = _parent->getPhysicalSegment( offset + _start, &actualLength );
3450
3451 if( address && length)
3452 *length = min( _length - offset, actualLength );
3453
3454 return( address );
3455}
3456
0c530ab8
A
3457IOPhysicalAddress
3458IOSubMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
0b4e3aa0
A
3459{
3460 IOPhysicalAddress address;
3461 IOByteCount actualLength;
3462
3463 assert(offset <= _length);
3464
3465 if( length)
3466 *length = 0;
3467
3468 if( offset >= _length)
3469 return( 0 );
3470
3471 address = _parent->getSourceSegment( offset + _start, &actualLength );
3472
3473 if( address && length)
3474 *length = min( _length - offset, actualLength );
3475
3476 return( address );
3477}
3478
1c79356b
A
3479void * IOSubMemoryDescriptor::getVirtualSegment(IOByteCount offset,
3480 IOByteCount * lengthOfSegment)
3481{
3482 return( 0 );
3483}
3484
2d21ac55
A
3485IOReturn IOSubMemoryDescriptor::doMap(
3486 vm_map_t addressMap,
3487 IOVirtualAddress * atAddress,
3488 IOOptionBits options,
3489 IOByteCount sourceOffset,
3490 IOByteCount length )
3491{
3492 panic("IOSubMemoryDescriptor::doMap");
3493 return (IOMemoryDescriptor::doMap(addressMap, atAddress, options, sourceOffset, length));
3494}
3495
1c79356b 3496IOByteCount IOSubMemoryDescriptor::readBytes(IOByteCount offset,
55e303ae 3497 void * bytes, IOByteCount length)
1c79356b
A
3498{
3499 IOByteCount byteCount;
3500
3501 assert(offset <= _length);
3502
3503 if( offset >= _length)
3504 return( 0 );
3505
3506 LOCK;
3507 byteCount = _parent->readBytes( _start + offset, bytes,
55e303ae 3508 min(length, _length - offset) );
1c79356b
A
3509 UNLOCK;
3510
3511 return( byteCount );
3512}
3513
3514IOByteCount IOSubMemoryDescriptor::writeBytes(IOByteCount offset,
55e303ae 3515 const void* bytes, IOByteCount length)
1c79356b
A
3516{
3517 IOByteCount byteCount;
3518
3519 assert(offset <= _length);
3520
3521 if( offset >= _length)
3522 return( 0 );
3523
3524 LOCK;
3525 byteCount = _parent->writeBytes( _start + offset, bytes,
55e303ae 3526 min(length, _length - offset) );
1c79356b
A
3527 UNLOCK;
3528
3529 return( byteCount );
3530}
3531
91447636
A
3532IOReturn IOSubMemoryDescriptor::setPurgeable( IOOptionBits newState,
3533 IOOptionBits * oldState )
3534{
3535 IOReturn err;
3536
3537 LOCK;
3538 err = _parent->setPurgeable( newState, oldState );
3539 UNLOCK;
3540
3541 return( err );
3542}
3543
3544IOReturn IOSubMemoryDescriptor::performOperation( IOOptionBits options,
3545 IOByteCount offset, IOByteCount length )
3546{
3547 IOReturn err;
3548
3549 assert(offset <= _length);
3550
3551 if( offset >= _length)
3552 return( kIOReturnOverrun );
3553
3554 LOCK;
3555 err = _parent->performOperation( options, _start + offset,
3556 min(length, _length - offset) );
3557 UNLOCK;
3558
3559 return( err );
3560}
3561
1c79356b 3562IOReturn IOSubMemoryDescriptor::prepare(
55e303ae 3563 IODirection forDirection)
1c79356b
A
3564{
3565 IOReturn err;
3566
3567 LOCK;
3568 err = _parent->prepare( forDirection);
3569 UNLOCK;
3570
3571 return( err );
3572}
3573
3574IOReturn IOSubMemoryDescriptor::complete(
55e303ae 3575 IODirection forDirection)
1c79356b
A
3576{
3577 IOReturn err;
3578
3579 LOCK;
3580 err = _parent->complete( forDirection);
3581 UNLOCK;
3582
3583 return( err );
3584}
3585
3586IOMemoryMap * IOSubMemoryDescriptor::makeMapping(
3587 IOMemoryDescriptor * owner,
3588 task_t intoTask,
2d21ac55 3589 IOVirtualAddress address,
1c79356b
A
3590 IOOptionBits options,
3591 IOByteCount offset,
3592 IOByteCount length )
3593{
91447636 3594 IOMemoryMap * mapping = 0;
1c79356b 3595
2d21ac55
A
3596 if (!(kIOMap64Bit & options))
3597 {
3598 panic("IOSubMemoryDescriptor::makeMapping !64bit");
3599 }
0b4e3aa0 3600
2d21ac55
A
3601 mapping = (IOMemoryMap *) _parent->makeMapping(
3602 owner,
3603 intoTask,
3604 address,
3605 options, _start + offset, length );
1c79356b
A
3606
3607 return( mapping );
3608}
3609
3610/* ick */
3611
3612bool
3613IOSubMemoryDescriptor::initWithAddress(void * address,
55e303ae
A
3614 IOByteCount length,
3615 IODirection direction)
1c79356b
A
3616{
3617 return( false );
3618}
3619
3620bool
3621IOSubMemoryDescriptor::initWithAddress(vm_address_t address,
55e303ae
A
3622 IOByteCount length,
3623 IODirection direction,
3624 task_t task)
1c79356b
A
3625{
3626 return( false );
3627}
3628
3629bool
3630IOSubMemoryDescriptor::initWithPhysicalAddress(
3631 IOPhysicalAddress address,
55e303ae
A
3632 IOByteCount length,
3633 IODirection direction )
1c79356b
A
3634{
3635 return( false );
3636}
3637
3638bool
3639IOSubMemoryDescriptor::initWithRanges(
3640 IOVirtualRange * ranges,
3641 UInt32 withCount,
55e303ae
A
3642 IODirection direction,
3643 task_t task,
3644 bool asReference)
1c79356b
A
3645{
3646 return( false );
3647}
3648
3649bool
3650IOSubMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
3651 UInt32 withCount,
55e303ae
A
3652 IODirection direction,
3653 bool asReference)
1c79356b
A
3654{
3655 return( false );
3656}
3657
3658/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3659
9bccf70c
A
3660bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
3661{
3662 OSSymbol const *keys[2];
3663 OSObject *values[2];
91447636
A
3664 struct SerData {
3665 user_addr_t address;
3666 user_size_t length;
3667 } *vcopy;
9bccf70c
A
3668 unsigned int index, nRanges;
3669 bool result;
3670
91447636
A
3671 IOOptionBits type = _flags & kIOMemoryTypeMask;
3672
9bccf70c
A
3673 if (s == NULL) return false;
3674 if (s->previouslySerialized(this)) return true;
3675
3676 // Pretend we are an array.
3677 if (!s->addXMLStartTag(this, "array")) return false;
3678
3679 nRanges = _rangesCount;
91447636 3680 vcopy = (SerData *) IOMalloc(sizeof(SerData) * nRanges);
9bccf70c
A
3681 if (vcopy == 0) return false;
3682
3683 keys[0] = OSSymbol::withCString("address");
3684 keys[1] = OSSymbol::withCString("length");
3685
3686 result = false;
3687 values[0] = values[1] = 0;
3688
3689 // From this point on we can go to bail.
3690
3691 // Copy the volatile data so we don't have to allocate memory
3692 // while the lock is held.
3693 LOCK;
3694 if (nRanges == _rangesCount) {
91447636 3695 Ranges vec = _ranges;
9bccf70c 3696 for (index = 0; index < nRanges; index++) {
91447636
A
3697 user_addr_t addr; IOByteCount len;
3698 getAddrLenForInd(addr, len, type, vec, index);
3699 vcopy[index].address = addr;
3700 vcopy[index].length = len;
9bccf70c
A
3701 }
3702 } else {
3703 // The descriptor changed out from under us. Give up.
3704 UNLOCK;
3705 result = false;
3706 goto bail;
3707 }
3708 UNLOCK;
3709
3710 for (index = 0; index < nRanges; index++)
3711 {
91447636
A
3712 user_addr_t addr = vcopy[index].address;
3713 IOByteCount len = (IOByteCount) vcopy[index].length;
3714 values[0] =
3715 OSNumber::withNumber(addr, (((UInt64) addr) >> 32)? 64 : 32);
9bccf70c
A
3716 if (values[0] == 0) {
3717 result = false;
3718 goto bail;
3719 }
91447636 3720 values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
9bccf70c
A
3721 if (values[1] == 0) {
3722 result = false;
3723 goto bail;
3724 }
3725 OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
3726 if (dict == 0) {
3727 result = false;
3728 goto bail;
3729 }
3730 values[0]->release();
3731 values[1]->release();
3732 values[0] = values[1] = 0;
3733
3734 result = dict->serialize(s);
3735 dict->release();
3736 if (!result) {
3737 goto bail;
3738 }
3739 }
3740 result = s->addXMLEndTag("array");
3741
3742 bail:
3743 if (values[0])
3744 values[0]->release();
3745 if (values[1])
3746 values[1]->release();
3747 if (keys[0])
3748 keys[0]->release();
3749 if (keys[1])
3750 keys[1]->release();
3751 if (vcopy)
2d21ac55 3752 IOFree(vcopy, sizeof(SerData) * nRanges);
9bccf70c
A
3753 return result;
3754}
3755
3756bool IOSubMemoryDescriptor::serialize(OSSerialize * s) const
3757{
3758 if (!s) {
3759 return (false);
3760 }
3761 if (s->previouslySerialized(this)) return true;
3762
3763 // Pretend we are a dictionary.
3764 // We must duplicate the functionality of OSDictionary here
3765 // because otherwise object references will not work;
3766 // they are based on the value of the object passed to
3767 // previouslySerialized and addXMLStartTag.
3768
3769 if (!s->addXMLStartTag(this, "dict")) return false;
3770
3771 char const *keys[3] = {"offset", "length", "parent"};
3772
3773 OSObject *values[3];
3774 values[0] = OSNumber::withNumber(_start, sizeof(_start) * 8);
3775 if (values[0] == 0)
3776 return false;
3777 values[1] = OSNumber::withNumber(_length, sizeof(_length) * 8);
3778 if (values[1] == 0) {
3779 values[0]->release();
3780 return false;
3781 }
3782 values[2] = _parent;
3783
3784 bool result = true;
3785 for (int i=0; i<3; i++) {
3786 if (!s->addString("<key>") ||
3787 !s->addString(keys[i]) ||
3788 !s->addXMLEndTag("key") ||
3789 !values[i]->serialize(s)) {
3790 result = false;
3791 break;
3792 }
3793 }
3794 values[0]->release();
3795 values[1]->release();
3796 if (!result) {
3797 return false;
3798 }
3799
3800 return s->addXMLEndTag("dict");
3801}
3802
3803/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3804
0b4e3aa0 3805OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
55e303ae
A
3806OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
3807OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
91447636
A
3808OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3);
3809OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4);
0c530ab8 3810OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5);
1c79356b
A
3811OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
3812OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
3813OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
3814OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
3815OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
3816OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
3817OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
3818OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
3819OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
3820OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
9bccf70c 3821
55e303ae 3822/* ex-inline function implementation */
0c530ab8
A
3823IOPhysicalAddress
3824IOMemoryDescriptor::getPhysicalAddress()
9bccf70c 3825 { return( getPhysicalSegment( 0, 0 )); }
0c530ab8
A
3826
3827
3828