]> git.saurik.com Git - apple/xnu.git/blame - iokit/Kernel/IOMemoryDescriptor.cpp
xnu-517.12.7.tar.gz
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
e5568f75
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
e5568f75
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
e5568f75
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
24 *
25 * HISTORY
26 *
27 */
55e303ae
A
28// 45678901234567890123456789012345678901234567890123456789012345678901234567890
29#include <sys/cdefs.h>
1c79356b
A
30
31#include <IOKit/assert.h>
32#include <IOKit/system.h>
33#include <IOKit/IOLib.h>
34#include <IOKit/IOMemoryDescriptor.h>
55e303ae
A
35#include <IOKit/IOMapper.h>
36#include <IOKit/IOKitKeysPrivate.h>
1c79356b
A
37
38#include <IOKit/IOKitDebug.h>
39
40#include <libkern/c++/OSContainers.h>
9bccf70c
A
41#include <libkern/c++/OSDictionary.h>
42#include <libkern/c++/OSArray.h>
43#include <libkern/c++/OSSymbol.h>
44#include <libkern/c++/OSNumber.h>
1c79356b
A
45#include <sys/cdefs.h>
46
47__BEGIN_DECLS
48#include <vm/pmap.h>
55e303ae 49#include <mach/memory_object_types.h>
0b4e3aa0 50#include <device/device_port.h>
55e303ae 51
9bccf70c 52#ifndef i386
55e303ae 53struct phys_entry *pmap_find_physentry(ppnum_t pa);
9bccf70c 54#endif
55e303ae 55extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
1c79356b 56void ipc_port_release_send(ipc_port_t port);
55e303ae
A
57
58/* Copy between a physical page and a virtual address in the given vm_map */
59kern_return_t copypv(addr64_t source, addr64_t sink, unsigned int size, int which);
0b4e3aa0
A
60
61memory_object_t
62device_pager_setup(
63 memory_object_t pager,
64 int device_handle,
65 vm_size_t size,
66 int flags);
9bccf70c
A
67void
68device_pager_deallocate(
69 memory_object_t);
0b4e3aa0
A
70kern_return_t
71device_pager_populate_object(
72 memory_object_t pager,
73 vm_object_offset_t offset,
55e303ae 74 ppnum_t phys_addr,
0b4e3aa0 75 vm_size_t size);
55e303ae
A
76kern_return_t
77memory_object_iopl_request(
78 ipc_port_t port,
79 memory_object_offset_t offset,
80 vm_size_t *upl_size,
81 upl_t *upl_ptr,
82 upl_page_info_array_t user_page_list,
83 unsigned int *page_list_count,
84 int *flags);
0b4e3aa0 85
9bccf70c
A
86/*
87 * Page fault handling based on vm_map (or entries therein)
88 */
89extern kern_return_t vm_fault(
90 vm_map_t map,
91 vm_offset_t vaddr,
92 vm_prot_t fault_type,
93 boolean_t change_wiring,
94 int interruptible,
95 pmap_t caller_pmap,
96 vm_offset_t caller_pmap_addr);
97
55e303ae 98unsigned int IOTranslateCacheBits(struct phys_entry *pp);
1c79356b 99
55e303ae 100vm_map_t IOPageableMapForAddress( vm_address_t address );
1c79356b 101
55e303ae 102typedef kern_return_t (*IOIteratePageableMapsCallback)(vm_map_t map, void * ref);
1c79356b 103
55e303ae
A
104kern_return_t IOIteratePageableMaps(vm_size_t size,
105 IOIteratePageableMapsCallback callback, void * ref);
106__END_DECLS
1c79356b 107
55e303ae 108#define kIOMaximumMappedIOByteCount (512*1024*1024)
1c79356b 109
55e303ae
A
110static IOMapper * gIOSystemMapper;
111static ppnum_t gIOMaximumMappedIOPageCount = atop_32(kIOMaximumMappedIOByteCount);
de355530 112
55e303ae 113/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
de355530 114
55e303ae 115OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
de355530 116
55e303ae 117#define super IOMemoryDescriptor
de355530 118
55e303ae 119OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
de355530 120
1c79356b
A
121/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
122
9bccf70c
A
123static IORecursiveLock * gIOMemoryLock;
124
125#define LOCK IORecursiveLockLock( gIOMemoryLock)
126#define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
127#define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
128#define WAKEUP \
129 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
130
131/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
132
55e303ae 133#define next_page(a) ( trunc_page_32(a) + PAGE_SIZE )
0b4e3aa0
A
134
135
136extern "C" {
137
138kern_return_t device_data_action(
139 int device_handle,
140 ipc_port_t device_pager,
141 vm_prot_t protection,
142 vm_object_offset_t offset,
143 vm_size_t size)
144{
9bccf70c
A
145 struct ExpansionData {
146 void * devicePager;
147 unsigned int pagerContig:1;
148 unsigned int unused:31;
149 IOMemoryDescriptor * memory;
150 };
151 kern_return_t kr;
152 ExpansionData * ref = (ExpansionData *) device_handle;
153 IOMemoryDescriptor * memDesc;
0b4e3aa0 154
9bccf70c
A
155 LOCK;
156 memDesc = ref->memory;
157 if( memDesc)
158 kr = memDesc->handleFault( device_pager, 0, 0,
159 offset, size, kIOMapDefaultCache /*?*/);
160 else
161 kr = KERN_ABORTED;
162 UNLOCK;
0b4e3aa0 163
9bccf70c 164 return( kr );
0b4e3aa0
A
165}
166
167kern_return_t device_close(
168 int device_handle)
169{
9bccf70c
A
170 struct ExpansionData {
171 void * devicePager;
172 unsigned int pagerContig:1;
173 unsigned int unused:31;
174 IOMemoryDescriptor * memory;
175 };
176 ExpansionData * ref = (ExpansionData *) device_handle;
0b4e3aa0 177
9bccf70c 178 IODelete( ref, ExpansionData, 1 );
0b4e3aa0
A
179
180 return( kIOReturnSuccess );
181}
182
183}
184
1c79356b
A
185/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
186
187/*
188 * withAddress:
189 *
190 * Create a new IOMemoryDescriptor. The buffer is a virtual address
191 * relative to the specified task. If no task is supplied, the kernel
192 * task is implied.
193 */
194IOMemoryDescriptor *
195IOMemoryDescriptor::withAddress(void * address,
55e303ae
A
196 IOByteCount length,
197 IODirection direction)
198{
199 return IOMemoryDescriptor::
200 withAddress((vm_address_t) address, length, direction, kernel_task);
201}
202
203IOMemoryDescriptor *
204IOMemoryDescriptor::withAddress(vm_address_t address,
205 IOByteCount length,
206 IODirection direction,
207 task_t task)
1c79356b
A
208{
209 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
210 if (that)
211 {
55e303ae 212 if (that->initWithAddress(address, length, direction, task))
1c79356b
A
213 return that;
214
215 that->release();
216 }
217 return 0;
218}
219
220IOMemoryDescriptor *
55e303ae
A
221IOMemoryDescriptor::withPhysicalAddress(
222 IOPhysicalAddress address,
223 IOByteCount length,
224 IODirection direction )
225{
226 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
227 if (self
228 && !self->initWithPhysicalAddress(address, length, direction)) {
229 self->release();
230 return 0;
231 }
232
233 return self;
234}
235
236IOMemoryDescriptor *
237IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
238 UInt32 withCount,
239 IODirection direction,
240 task_t task,
241 bool asReference)
1c79356b
A
242{
243 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
244 if (that)
245 {
55e303ae 246 if (that->initWithRanges(ranges, withCount, direction, task, asReference))
1c79356b
A
247 return that;
248
249 that->release();
250 }
251 return 0;
252}
253
1c79356b
A
254
255/*
256 * withRanges:
257 *
258 * Create a new IOMemoryDescriptor. The buffer is made up of several
259 * virtual address ranges, from a given task.
260 *
261 * Passing the ranges as a reference will avoid an extra allocation.
262 */
263IOMemoryDescriptor *
55e303ae
A
264IOMemoryDescriptor::withOptions(void * buffers,
265 UInt32 count,
266 UInt32 offset,
267 task_t task,
268 IOOptionBits opts,
269 IOMapper * mapper)
1c79356b 270{
55e303ae 271 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
d7e50217 272
55e303ae
A
273 if (self
274 && !self->initWithOptions(buffers, count, offset, task, opts, mapper))
275 {
276 self->release();
277 return 0;
de355530 278 }
55e303ae
A
279
280 return self;
281}
282
283// Can't leave abstract but this should never be used directly,
284bool IOMemoryDescriptor::initWithOptions(void * buffers,
285 UInt32 count,
286 UInt32 offset,
287 task_t task,
288 IOOptionBits options,
289 IOMapper * mapper)
290{
291 // @@@ gvdl: Should I panic?
292 panic("IOMD::initWithOptions called\n");
1c79356b
A
293 return 0;
294}
295
296IOMemoryDescriptor *
297IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
298 UInt32 withCount,
55e303ae
A
299 IODirection direction,
300 bool asReference)
1c79356b
A
301{
302 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
303 if (that)
304 {
55e303ae 305 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference))
1c79356b
A
306 return that;
307
308 that->release();
309 }
310 return 0;
311}
312
313IOMemoryDescriptor *
314IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
315 IOByteCount offset,
316 IOByteCount length,
55e303ae 317 IODirection direction)
1c79356b 318{
55e303ae 319 IOSubMemoryDescriptor *self = new IOSubMemoryDescriptor;
1c79356b 320
55e303ae
A
321 if (self && !self->initSubRange(of, offset, length, direction)) {
322 self->release();
323 self = 0;
1c79356b 324 }
55e303ae 325 return self;
1c79356b
A
326}
327
328/*
329 * initWithAddress:
330 *
331 * Initialize an IOMemoryDescriptor. The buffer is a virtual address
332 * relative to the specified task. If no task is supplied, the kernel
333 * task is implied.
334 *
335 * An IOMemoryDescriptor can be re-used by calling initWithAddress or
336 * initWithRanges again on an existing instance -- note this behavior
337 * is not commonly supported in other I/O Kit classes, although it is
338 * supported here.
339 */
340bool
341IOGeneralMemoryDescriptor::initWithAddress(void * address,
342 IOByteCount withLength,
343 IODirection withDirection)
344{
345 _singleRange.v.address = (vm_address_t) address;
346 _singleRange.v.length = withLength;
347
348 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
349}
350
351bool
352IOGeneralMemoryDescriptor::initWithAddress(vm_address_t address,
353 IOByteCount withLength,
354 IODirection withDirection,
355 task_t withTask)
356{
357 _singleRange.v.address = address;
358 _singleRange.v.length = withLength;
359
360 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
361}
362
363bool
364IOGeneralMemoryDescriptor::initWithPhysicalAddress(
365 IOPhysicalAddress address,
366 IOByteCount withLength,
367 IODirection withDirection )
368{
369 _singleRange.p.address = address;
370 _singleRange.p.length = withLength;
371
372 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
373}
374
55e303ae
A
375bool
376IOGeneralMemoryDescriptor::initWithPhysicalRanges(
377 IOPhysicalRange * ranges,
378 UInt32 count,
379 IODirection direction,
380 bool reference)
381{
382 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
383
384 if (reference)
385 mdOpts |= kIOMemoryAsReference;
386
387 return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0);
388}
389
390bool
391IOGeneralMemoryDescriptor::initWithRanges(
392 IOVirtualRange * ranges,
393 UInt32 count,
394 IODirection direction,
395 task_t task,
396 bool reference)
397{
398 IOOptionBits mdOpts = direction;
399
400 if (reference)
401 mdOpts |= kIOMemoryAsReference;
402
403 if (task) {
404 mdOpts |= kIOMemoryTypeVirtual;
405 if (task == kernel_task)
406 mdOpts |= kIOMemoryAutoPrepare;
407 }
408 else
409 mdOpts |= kIOMemoryTypePhysical;
410
411 // @@@ gvdl: Need to remove this
412 // Auto-prepare if this is a kernel memory descriptor as very few
413 // clients bother to prepare() kernel memory.
414 // But it has been enforced so what are you going to do?
415
416 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0);
417}
418
1c79356b 419/*
55e303ae 420 * initWithOptions:
1c79356b 421 *
55e303ae
A
422 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
423 * from a given task or several physical ranges or finally an UPL from the ubc
424 * system.
1c79356b
A
425 *
426 * Passing the ranges as a reference will avoid an extra allocation.
427 *
55e303ae
A
428 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
429 * existing instance -- note this behavior is not commonly supported in other
430 * I/O Kit classes, although it is supported here.
1c79356b 431 */
55e303ae
A
432
433enum ioPLBlockFlags {
434 kIOPLOnDevice = 0x00000001,
435 kIOPLExternUPL = 0x00000002,
436};
437
438struct ioPLBlock {
439 upl_t fIOPL;
440 vm_address_t fIOMDOffset; // The offset of this iopl in descriptor
441 vm_offset_t fPageInfo; // Pointer to page list or index into it
442 ppnum_t fMappedBase; // Page number of first page in this iopl
443 unsigned int fPageOffset; // Offset within first page of iopl
444 unsigned int fFlags; // Flags
445};
446
447struct ioGMDData {
448 IOMapper *fMapper;
449 unsigned int fPageCnt;
450 upl_page_info_t fPageList[0]; // @@@ gvdl need to get rid of this
451 // should be able to use upl directly
452 ioPLBlock fBlocks[0];
453};
454
455#define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
456#define getIOPLList(d) ((ioPLBlock *) &(d->fPageList[d->fPageCnt]))
457#define getNumIOPL(d,len) \
458 ((len - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
459#define getPageList(d) (&(d->fPageList[0]))
460#define computeDataSize(p, u) \
461 (sizeof(ioGMDData) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
462
1c79356b 463bool
55e303ae
A
464IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
465 UInt32 count,
466 UInt32 offset,
467 task_t task,
468 IOOptionBits options,
469 IOMapper * mapper)
470{
471
472 switch (options & kIOMemoryTypeMask) {
473 case kIOMemoryTypeVirtual:
474 assert(task);
475 if (!task)
476 return false;
477 else
478 break;
479
480 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
481 mapper = kIOMapperNone;
482 case kIOMemoryTypeUPL:
483 assert(!task);
484 break;
485 default:
486panic("IOGMD::iWO(): bad type"); // @@@ gvdl: for testing
487 return false; /* bad argument */
488 }
489
490 assert(buffers);
491 assert(count);
1c79356b
A
492
493 /*
494 * We can check the _initialized instance variable before having ever set
495 * it to an initial value because I/O Kit guarantees that all our instance
496 * variables are zeroed on an object's allocation.
497 */
498
55e303ae 499 if (_initialized) {
1c79356b
A
500 /*
501 * An existing memory descriptor is being retargeted to point to
502 * somewhere else. Clean up our present state.
503 */
504
1c79356b
A
505 while (_wireCount)
506 complete();
507 if (_kernPtrAligned)
508 unmapFromKernel();
509 if (_ranges.v && _rangesIsAllocated)
510 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
511 }
55e303ae
A
512 else {
513 if (!super::init())
514 return false;
515 _initialized = true;
516 }
d7e50217 517
55e303ae
A
518 // Grab the appropriate mapper
519 if (mapper == kIOMapperNone)
520 mapper = 0; // No Mapper
521 else if (!mapper) {
522 IOMapper::checkForSystemMapper();
523 gIOSystemMapper = mapper = IOMapper::gSystem;
524 }
1c79356b 525
55e303ae
A
526 _flags = options;
527 _task = task;
528
529 // DEPRECATED variable initialisation
530 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
1c79356b 531 _position = 0;
1c79356b
A
532 _kernPtrAligned = 0;
533 _cachedPhysicalAddress = 0;
534 _cachedVirtualAddress = 0;
1c79356b 535
55e303ae 536 if ( (options & kIOMemoryTypeMask) == kIOMemoryTypeUPL) {
1c79356b 537
55e303ae
A
538 ioGMDData *dataP;
539 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
d7e50217 540
55e303ae
A
541 if (!_memoryEntries) {
542 _memoryEntries = OSData::withCapacity(dataSize);
543 if (!_memoryEntries)
544 return false;
545 }
546 else if (!_memoryEntries->initWithCapacity(dataSize))
547 return false;
548
549 _memoryEntries->appendBytes(0, sizeof(ioGMDData));
550 dataP = getDataP(_memoryEntries);
551 dataP->fMapper = mapper;
552 dataP->fPageCnt = 0;
553
554 _wireCount++; // UPLs start out life wired
555
556 _length = count;
557 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
558
559 ioPLBlock iopl;
560 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST((upl_t) buffers);
561
562 iopl.fIOPL = (upl_t) buffers;
563 // Set the flag kIOPLOnDevice convieniently equal to 1
564 iopl.fFlags = pageList->device | kIOPLExternUPL;
565 iopl.fIOMDOffset = 0;
566 if (!pageList->device) {
567 // @@@ gvdl: Ask JoeS are the pages contiguious with the list?
568 // or there a chance that we may be inserting 0 phys_addrs?
569 // Pre-compute the offset into the UPL's page list
570 pageList = &pageList[atop_32(offset)];
571 offset &= PAGE_MASK;
572 if (mapper) {
573 iopl.fMappedBase = mapper->iovmAlloc(_pages);
574 mapper->iovmInsert(iopl.fMappedBase, 0, pageList, _pages);
575 }
576 else
577 iopl.fMappedBase = 0;
578 }
579 else
580 iopl.fMappedBase = 0;
581 iopl.fPageInfo = (vm_address_t) pageList;
582 iopl.fPageOffset = offset;
583
584 _memoryEntries->appendBytes(&iopl, sizeof(iopl));
d7e50217 585 }
55e303ae
A
586 else { /* kIOMemoryTypeVirtual | kIOMemoryTypePhysical */
587 IOVirtualRange *ranges = (IOVirtualRange *) buffers;
d7e50217 588
55e303ae
A
589 /*
590 * Initialize the memory descriptor.
591 */
1c79356b 592
55e303ae
A
593 _length = 0;
594 _pages = 0;
595 for (unsigned ind = 0; ind < count; ind++) {
596 IOVirtualRange cur = ranges[ind];
597
598 _length += cur.length;
599 _pages += atop_32(cur.address + cur.length + PAGE_MASK)
600 - atop_32(cur.address);
601 }
602
603 _ranges.v = 0;
604 _rangesIsAllocated = !(options & kIOMemoryAsReference);
605 _rangesCount = count;
606
607 if (options & kIOMemoryAsReference)
608 _ranges.v = ranges;
609 else {
610 _ranges.v = IONew(IOVirtualRange, count);
611 if (!_ranges.v)
612 return false;
613 bcopy(/* from */ ranges, _ranges.v,
614 count * sizeof(IOVirtualRange));
615 }
616
617 // Auto-prepare memory at creation time.
618 // Implied completion when descriptor is free-ed
619 if ( (options & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
620 _wireCount++; // Physical MDs are start out wired
621 else { /* kIOMemoryTypeVirtual */
622 ioGMDData *dataP;
623 unsigned int dataSize =
624 computeDataSize(_pages, /* upls */ _rangesCount * 2);
625
626 if (!_memoryEntries) {
627 _memoryEntries = OSData::withCapacity(dataSize);
628 if (!_memoryEntries)
629 return false;
630 }
631 else if (!_memoryEntries->initWithCapacity(dataSize))
632 return false;
633
634 _memoryEntries->appendBytes(0, sizeof(ioGMDData));
635 dataP = getDataP(_memoryEntries);
636 dataP->fMapper = mapper;
637 dataP->fPageCnt = _pages;
638
639 if (kIOMemoryPersistent & _flags)
640 {
641 kern_return_t error;
642 ipc_port_t sharedMem;
643
644 vm_size_t size = _pages << PAGE_SHIFT;
645 vm_address_t startPage;
646
647 startPage = trunc_page_32(_ranges.v[0].address);
648
649 vm_map_t theMap = ((_task == kernel_task) && (kIOMemoryBufferPageable & _flags))
650 ? IOPageableMapForAddress(startPage)
651 : get_task_map(_task);
652
653 vm_size_t actualSize = size;
654 error = mach_make_memory_entry( theMap,
655 &actualSize, startPage,
656 VM_PROT_READ | VM_PROT_WRITE, &sharedMem,
657 NULL );
658
659 if (KERN_SUCCESS == error) {
660 if (actualSize == round_page_32(size)) {
661 _memEntry = (void *) sharedMem;
662 } else {
663#if IOASSERT
664 IOLog("mach_make_memory_entry_64 (%08x) size (%08lx:%08x)\n",
665 startPage, (UInt32)actualSize, size);
666#endif
667 ipc_port_release_send( sharedMem );
668 }
669 }
670 }
671
672 if ((_flags & kIOMemoryAutoPrepare)
673 && prepare() != kIOReturnSuccess)
674 return false;
675 }
676 }
677
678 return true;
de355530
A
679}
680
1c79356b
A
681/*
682 * free
683 *
684 * Free resources.
685 */
686void IOGeneralMemoryDescriptor::free()
687{
9bccf70c
A
688 LOCK;
689 if( reserved)
690 reserved->memory = 0;
691 UNLOCK;
692
1c79356b
A
693 while (_wireCount)
694 complete();
55e303ae
A
695 if (_memoryEntries)
696 _memoryEntries->release();
697
1c79356b
A
698 if (_kernPtrAligned)
699 unmapFromKernel();
700 if (_ranges.v && _rangesIsAllocated)
701 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
9bccf70c 702
55e303ae
A
703 if (reserved && reserved->devicePager)
704 device_pager_deallocate( (memory_object_t) reserved->devicePager );
9bccf70c 705
55e303ae
A
706 // memEntry holds a ref on the device pager which owns reserved
707 // (ExpansionData) so no reserved access after this point
708 if (_memEntry)
1c79356b 709 ipc_port_release_send( (ipc_port_t) _memEntry );
55e303ae 710
1c79356b
A
711 super::free();
712}
713
0b4e3aa0
A
714/* DEPRECATED */ void IOGeneralMemoryDescriptor::unmapFromKernel()
715/* DEPRECATED */ {
55e303ae 716 panic("IOGMD::unmapFromKernel deprecated");
0b4e3aa0
A
717/* DEPRECATED */ }
718/* DEPRECATED */
719/* DEPRECATED */ void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
720/* DEPRECATED */ {
55e303ae 721 panic("IOGMD::mapIntoKernel deprecated");
0b4e3aa0 722/* DEPRECATED */ }
1c79356b
A
723
724/*
725 * getDirection:
726 *
727 * Get the direction of the transfer.
728 */
729IODirection IOMemoryDescriptor::getDirection() const
730{
731 return _direction;
732}
733
734/*
735 * getLength:
736 *
737 * Get the length of the transfer (over all ranges).
738 */
739IOByteCount IOMemoryDescriptor::getLength() const
740{
741 return _length;
742}
743
55e303ae 744void IOMemoryDescriptor::setTag( IOOptionBits tag )
1c79356b
A
745{
746 _tag = tag;
747}
748
749IOOptionBits IOMemoryDescriptor::getTag( void )
750{
751 return( _tag);
752}
753
55e303ae 754// @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
0b4e3aa0
A
755IOPhysicalAddress IOMemoryDescriptor::getSourceSegment( IOByteCount offset,
756 IOByteCount * length )
757{
9bccf70c 758 IOPhysicalAddress physAddr = 0;
1c79356b 759
9bccf70c
A
760 if( prepare() == kIOReturnSuccess) {
761 physAddr = getPhysicalSegment( offset, length );
762 complete();
763 }
0b4e3aa0
A
764
765 return( physAddr );
766}
767
55e303ae
A
768IOByteCount IOMemoryDescriptor::readBytes
769 (IOByteCount offset, void *bytes, IOByteCount length)
1c79356b 770{
55e303ae
A
771 addr64_t dstAddr = (addr64_t) (UInt32) bytes;
772 IOByteCount remaining;
1c79356b 773
55e303ae
A
774 // Assert that this entire I/O is withing the available range
775 assert(offset < _length);
776 assert(offset + length <= _length);
777 if (offset >= _length) {
778IOLog("IOGMD(%p): rB = o%lx, l%lx\n", this, offset, length); // @@@ gvdl
779 return 0;
780 }
1c79356b 781
55e303ae
A
782 remaining = length = min(length, _length - offset);
783 while (remaining) { // (process another target segment?)
784 addr64_t srcAddr64;
785 IOByteCount srcLen;
1c79356b 786
55e303ae
A
787 srcAddr64 = getPhysicalSegment64(offset, &srcLen);
788 if (!srcAddr64)
789 break;
1c79356b 790
55e303ae
A
791 // Clip segment length to remaining
792 if (srcLen > remaining)
793 srcLen = remaining;
1c79356b 794
55e303ae
A
795 copypv(srcAddr64, dstAddr, srcLen,
796 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
1c79356b 797
55e303ae
A
798 dstAddr += srcLen;
799 offset += srcLen;
800 remaining -= srcLen;
801 }
1c79356b 802
55e303ae 803 assert(!remaining);
1c79356b 804
55e303ae
A
805 return length - remaining;
806}
0b4e3aa0 807
55e303ae
A
808IOByteCount IOMemoryDescriptor::writeBytes
809 (IOByteCount offset, const void *bytes, IOByteCount length)
810{
811 addr64_t srcAddr = (addr64_t) (UInt32) bytes;
812 IOByteCount remaining;
0b4e3aa0 813
55e303ae
A
814 // Assert that this entire I/O is withing the available range
815 assert(offset < _length);
816 assert(offset + length <= _length);
0b4e3aa0 817
55e303ae 818 assert( !(kIOMemoryPreparedReadOnly & _flags) );
0b4e3aa0 819
55e303ae
A
820 if ( (kIOMemoryPreparedReadOnly & _flags) || offset >= _length) {
821IOLog("IOGMD(%p): wB = o%lx, l%lx\n", this, offset, length); // @@@ gvdl
822 return 0;
823 }
0b4e3aa0 824
55e303ae
A
825 remaining = length = min(length, _length - offset);
826 while (remaining) { // (process another target segment?)
827 addr64_t dstAddr64;
828 IOByteCount dstLen;
0b4e3aa0 829
55e303ae
A
830 dstAddr64 = getPhysicalSegment64(offset, &dstLen);
831 if (!dstAddr64)
832 break;
0b4e3aa0 833
55e303ae
A
834 // Clip segment length to remaining
835 if (dstLen > remaining)
836 dstLen = remaining;
0b4e3aa0 837
55e303ae
A
838 copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
839 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
0b4e3aa0 840
55e303ae
A
841 srcAddr += dstLen;
842 offset += dstLen;
843 remaining -= dstLen;
1c79356b 844 }
1c79356b 845
55e303ae
A
846 assert(!remaining);
847
848 return length - remaining;
1c79356b
A
849}
850
55e303ae
A
851// osfmk/device/iokit_rpc.c
852extern "C" unsigned int IODefaultCacheBits(addr64_t pa);
1c79356b 853
55e303ae
A
854/* DEPRECATED */ void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
855/* DEPRECATED */ {
856 panic("IOGMD::setPosition deprecated");
857/* DEPRECATED */ }
de355530 858
55e303ae
A
859IOPhysicalAddress IOGeneralMemoryDescriptor::getPhysicalSegment
860 (IOByteCount offset, IOByteCount *lengthOfSegment)
861{
862 IOPhysicalAddress address = 0;
863 IOPhysicalLength length = 0;
1c79356b 864
55e303ae
A
865// assert(offset <= _length);
866 if (offset < _length) // (within bounds?)
867 {
868 if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
869 unsigned int ind;
1c79356b 870
55e303ae 871 // Physical address based memory descriptor
1c79356b 872
55e303ae
A
873 // Find offset within descriptor and make it relative
874 // to the current _range.
875 for (ind = 0 ; offset >= _ranges.p[ind].length; ind++ )
876 offset -= _ranges.p[ind].length;
877
878 IOPhysicalRange cur = _ranges.p[ind];
879 address = cur.address + offset;
880 length = cur.length - offset;
881
882 // see how far we can coalesce ranges
883 for (++ind; ind < _rangesCount; ind++) {
884 cur = _ranges.p[ind];
885
886 if (address + length != cur.address)
887 break;
888
889 length += cur.length;
890 }
1c79356b 891
55e303ae
A
892 // @@@ gvdl: should assert(address);
893 // but can't as NVidia GeForce creates a bogus physical mem
de355530 894 {
55e303ae
A
895 assert(address || /*nvidia*/(!_ranges.p[0].address && 1 == _rangesCount));
896 }
897 assert(length);
898 }
899 else do {
900 // We need wiring & we are wired.
901 assert(_wireCount);
1c79356b 902
55e303ae
A
903 if (!_wireCount)
904 {
905 panic("IOGMD: not wired for getPhysicalSegment()");
906 continue;
907 }
1c79356b 908
55e303ae 909 assert(_memoryEntries);
1c79356b 910
55e303ae
A
911 ioGMDData * dataP = getDataP(_memoryEntries);
912 const ioPLBlock *ioplList = getIOPLList(dataP);
913 UInt ind, numIOPLs = getNumIOPL(dataP, _memoryEntries->getLength());
914 upl_page_info_t *pageList = getPageList(dataP);
1c79356b 915
55e303ae 916 assert(numIOPLs > 0);
1c79356b 917
55e303ae
A
918 // Scan through iopl info blocks looking for block containing offset
919 for (ind = 1; ind < numIOPLs; ind++) {
920 if (offset < ioplList[ind].fIOMDOffset)
921 break;
922 }
1c79356b 923
55e303ae
A
924 // Go back to actual range as search goes past it
925 ioPLBlock ioplInfo = ioplList[ind - 1];
1c79356b 926
55e303ae
A
927 if (ind < numIOPLs)
928 length = ioplList[ind].fIOMDOffset;
929 else
930 length = _length;
931 length -= offset; // Remainder within iopl
1c79356b 932
55e303ae
A
933 // Subtract offset till this iopl in total list
934 offset -= ioplInfo.fIOMDOffset;
1c79356b 935
55e303ae
A
936 // This is a mapped IOPL so we just need to compute an offset
937 // relative to the mapped base.
938 if (ioplInfo.fMappedBase) {
939 offset += (ioplInfo.fPageOffset & PAGE_MASK);
940 address = ptoa_32(ioplInfo.fMappedBase) + offset;
941 continue;
942 }
1c79356b 943
55e303ae
A
944 // Currently the offset is rebased into the current iopl.
945 // Now add the iopl 1st page offset.
946 offset += ioplInfo.fPageOffset;
0b4e3aa0 947
55e303ae
A
948 // For external UPLs the fPageInfo field points directly to
949 // the upl's upl_page_info_t array.
950 if (ioplInfo.fFlags & kIOPLExternUPL)
951 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
952 else
953 pageList = &pageList[ioplInfo.fPageInfo];
1c79356b 954
55e303ae
A
955 // Check for direct device non-paged memory
956 if ( ioplInfo.fFlags & kIOPLOnDevice ) {
957 address = ptoa_32(pageList->phys_addr) + offset;
958 continue;
d7e50217 959 }
9bccf70c 960
55e303ae
A
961 // Now we need compute the index into the pageList
962 ind = atop_32(offset);
963 offset &= PAGE_MASK;
964
965 IOPhysicalAddress pageAddr = pageList[ind].phys_addr;
966 address = ptoa_32(pageAddr) + offset;
967
968 // Check for the remaining data in this upl being longer than the
969 // remainder on the current page. This should be checked for
970 // contiguous pages
971 if (length > PAGE_SIZE - offset) {
972 // See if the next page is contiguous. Stop looking when we hit
973 // the end of this upl, which is indicated by the
974 // contigLength >= length.
975 IOByteCount contigLength = PAGE_SIZE - offset;
976
977 // Look for contiguous segment
978 while (contigLength < length
979 && ++pageAddr == pageList[++ind].phys_addr) {
980 contigLength += PAGE_SIZE;
981 }
982 if (length > contigLength)
983 length = contigLength;
984 }
985
986 assert(address);
987 assert(length);
0b4e3aa0 988
55e303ae 989 } while (0);
0b4e3aa0 990
55e303ae
A
991 if (!address)
992 length = 0;
993 }
de355530 994
55e303ae
A
995 if (lengthOfSegment)
996 *lengthOfSegment = length;
de355530 997
55e303ae
A
998 return address;
999}
de355530 1000
55e303ae
A
1001addr64_t IOMemoryDescriptor::getPhysicalSegment64
1002 (IOByteCount offset, IOByteCount *lengthOfSegment)
1003{
1004 IOPhysicalAddress phys32;
1005 IOByteCount length;
1006 addr64_t phys64;
0b4e3aa0 1007
55e303ae
A
1008 phys32 = getPhysicalSegment(offset, lengthOfSegment);
1009 if (!phys32)
1010 return 0;
0b4e3aa0 1011
55e303ae 1012 if (gIOSystemMapper)
1c79356b 1013 {
55e303ae
A
1014 IOByteCount origLen;
1015
1016 phys64 = gIOSystemMapper->mapAddr(phys32);
1017 origLen = *lengthOfSegment;
1018 length = page_size - (phys64 & (page_size - 1));
1019 while ((length < origLen)
1020 && ((phys64 + length) == gIOSystemMapper->mapAddr(phys32 + length)))
1021 length += page_size;
1022 if (length > origLen)
1023 length = origLen;
1024
1025 *lengthOfSegment = length;
0b4e3aa0 1026 }
55e303ae
A
1027 else
1028 phys64 = (addr64_t) phys32;
1c79356b 1029
55e303ae 1030 return phys64;
0b4e3aa0
A
1031}
1032
e5568f75
A
1033// Note this function is NOT a virtual function
1034void * IOGeneralMemoryDescriptor::getBackingID() const
1035{
1036 if (!_memEntry) // Not created as a persistent memory descriptor
1037 return 0;
1038
1039 vm_size_t size = _pages << PAGE_SHIFT;
1040 vm_size_t seenSize = 0;
1041 vm_address_t basePage = trunc_page_32(_ranges.v[0].address);
1042 void *retObjID = 0;
1043
1044 vm_map_t theMap =
1045 ((_task == kernel_task) && (kIOMemoryBufferPageable & _flags))
1046 ? IOPageableMapForAddress(basePage)
1047 : get_task_map(_task);
1048
1049
1050 for (;;) {
1051 vm_region_object_info_data_64_t objInfo;
1052 vm_address_t actualPage = basePage;
1053 vm_size_t actualSize;
1054 mach_msg_type_number_t objInfoSize;
1055 kern_return_t error;
1056
1057 objInfoSize = VM_REGION_OBJECT_INFO_COUNT_64;
1058 error = vm_region_64(theMap,
1059 &actualPage,
1060 &actualSize,
1061 VM_REGION_OBJECT_INFO_64,
1062 (vm_region_info_t) &objInfo,
1063 &objInfoSize,
1064 0);
1065
1066 if (KERN_SUCCESS != error || actualSize == 0 || actualPage > basePage
1067 || (retObjID && retObjID != (void *) objInfo.object_id))
1068 return 0;
1069
1070 actualPage += actualSize; // Calculate the end address
1071 seenSize += actualPage - basePage; // Size of overlap
1072 basePage = actualPage; // Start here for next loop
1073 if (seenSize >= size)
1074 return (void *) objInfo.object_id;
1075
1076 if (!retObjID)
1077 retObjID = (void *) objInfo.object_id;
1078 }
1079}
1080
1081
55e303ae
A
1082IOPhysicalAddress IOGeneralMemoryDescriptor::getSourceSegment
1083 (IOByteCount offset, IOByteCount *lengthOfSegment)
1c79356b 1084{
0b4e3aa0
A
1085 IOPhysicalAddress address = 0;
1086 IOPhysicalLength length = 0;
1c79356b 1087
0b4e3aa0 1088 assert(offset <= _length);
1c79356b 1089
55e303ae
A
1090 if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypeUPL)
1091 return super::getSourceSegment( offset, lengthOfSegment );
1092
0b4e3aa0 1093 if ( offset < _length ) // (within bounds?)
1c79356b 1094 {
0b4e3aa0 1095 unsigned rangesIndex = 0;
1c79356b 1096
0b4e3aa0
A
1097 for ( ; offset >= _ranges.v[rangesIndex].length; rangesIndex++ )
1098 {
1099 offset -= _ranges.v[rangesIndex].length; // (make offset relative)
1100 }
1c79356b 1101
0b4e3aa0
A
1102 address = _ranges.v[rangesIndex].address + offset;
1103 length = _ranges.v[rangesIndex].length - offset;
1c79356b 1104
0b4e3aa0
A
1105 for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ )
1106 {
1107 if ( address + length != _ranges.v[rangesIndex].address ) break;
1c79356b 1108
0b4e3aa0
A
1109 length += _ranges.v[rangesIndex].length; // (coalesce ranges)
1110 }
1c79356b 1111
0b4e3aa0
A
1112 assert(address);
1113 if ( address == 0 ) length = 0;
1c79356b 1114 }
0b4e3aa0
A
1115
1116 if ( lengthOfSegment ) *lengthOfSegment = length;
1117
1118 return address;
1119}
1120
1121/* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
1122/* DEPRECATED */ void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
1123/* DEPRECATED */ IOByteCount * lengthOfSegment)
1124/* DEPRECATED */ {
55e303ae
A
1125 if (_task == kernel_task)
1126 return (void *) getSourceSegment(offset, lengthOfSegment);
1127 else
1128 panic("IOGMD::getVirtualSegment deprecated");
1129
1130 return 0;
0b4e3aa0
A
1131/* DEPRECATED */ }
1132/* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
1c79356b 1133
55e303ae
A
1134#ifdef __ppc__
1135extern vm_offset_t static_memory_end;
1136#define io_kernel_static_end static_memory_end
1137#else
1138extern vm_offset_t first_avail;
1139#define io_kernel_static_end first_avail
1140#endif
1141
1142static kern_return_t
1143io_get_kernel_static_upl(
1144 vm_map_t map,
1145 vm_address_t offset,
1146 vm_size_t *upl_size,
1147 upl_t *upl,
1148 upl_page_info_array_t page_list,
1149 unsigned int *count,
1150 int *flags,
1151 int force_data_sync)
1c79356b 1152{
55e303ae
A
1153 unsigned int pageCount, page;
1154 ppnum_t phys;
1c79356b 1155
55e303ae
A
1156 pageCount = atop_32(*upl_size);
1157 if (pageCount > *count)
1158 pageCount = *count;
1c79356b 1159
55e303ae 1160 *upl = NULL;
1c79356b 1161
55e303ae
A
1162 for (page = 0; page < pageCount; page++)
1163 {
1164 phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
1165 if (!phys)
1166 break;
1167 page_list[page].phys_addr = phys;
1168 page_list[page].pageout = 0;
1169 page_list[page].absent = 0;
1170 page_list[page].dirty = 0;
1171 page_list[page].precious = 0;
1172 page_list[page].device = 0;
1173 }
0b4e3aa0 1174
55e303ae
A
1175 return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError);
1176}
0b4e3aa0 1177
55e303ae
A
1178IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
1179{
1180 IOReturn error = kIOReturnNoMemory;
1181 ioGMDData *dataP;
1182 ppnum_t mapBase = 0;
1183 IOMapper *mapper;
1184 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
1c79356b 1185
55e303ae 1186 assert(!_wireCount);
1c79356b 1187
55e303ae
A
1188 if (_pages >= gIOMaximumMappedIOPageCount)
1189 return kIOReturnNoResources;
0b4e3aa0 1190
55e303ae
A
1191 dataP = getDataP(_memoryEntries);
1192 mapper = dataP->fMapper;
1193 if (mapper && _pages)
1194 mapBase = mapper->iovmAlloc(_pages);
d7e50217 1195
55e303ae
A
1196 // Note that appendBytes(NULL) zeros the data up to the
1197 // desired length.
1198 _memoryEntries->appendBytes(0, dataP->fPageCnt * sizeof(upl_page_info_t));
1199 dataP = 0; // May no longer be valid so lets not get tempted.
de355530 1200
55e303ae
A
1201 if (forDirection == kIODirectionNone)
1202 forDirection = _direction;
1203
1204 int uplFlags; // This Mem Desc's default flags for upl creation
1205 switch (forDirection)
1206 {
1207 case kIODirectionOut:
1208 // Pages do not need to be marked as dirty on commit
1209 uplFlags = UPL_COPYOUT_FROM;
1210 _flags |= kIOMemoryPreparedReadOnly;
1211 break;
1212
1213 case kIODirectionIn:
1214 default:
1215 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
1216 break;
1217 }
1218 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
1219
1220 //
1221 // Check user read/write access to the data buffer.
1222 //
1223 unsigned int pageIndex = 0;
1224 IOByteCount mdOffset = 0;
1225 vm_map_t curMap;
1226 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
1227 curMap = 0;
1228 else
1229 { curMap = get_task_map(_task); }
1230
1231 for (UInt range = 0; range < _rangesCount; range++) {
1232 ioPLBlock iopl;
1233 IOVirtualRange curRange = _ranges.v[range];
1234 vm_address_t startPage;
1235 IOByteCount numBytes;
1236
1237 startPage = trunc_page_32(curRange.address);
1238 iopl.fPageOffset = (short) curRange.address & PAGE_MASK;
1239 if (mapper)
1240 iopl.fMappedBase = mapBase + pageIndex;
1241 else
1242 iopl.fMappedBase = 0;
1243 numBytes = iopl.fPageOffset + curRange.length;
1244
1245 while (numBytes) {
1246 dataP = getDataP(_memoryEntries);
1247 vm_map_t theMap =
1248 (curMap)? curMap
1249 : IOPageableMapForAddress(startPage);
1250 upl_page_info_array_t pageInfo = getPageList(dataP);
1251 int ioplFlags = uplFlags;
1252 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
1253
1254 vm_size_t ioplSize = round_page_32(numBytes);
1255 unsigned int numPageInfo = atop_32(ioplSize);
1256
1257 if ((theMap == kernel_map) && (startPage < io_kernel_static_end))
1258 {
1259 error = io_get_kernel_static_upl(theMap,
1260 startPage,
1261 &ioplSize,
1262 &iopl.fIOPL,
1263 baseInfo,
1264 &numPageInfo,
1265 &ioplFlags,
1266 false);
1267
1268 } else if (sharedMem && (kIOMemoryPersistent & _flags)) {
1269
1270 error = memory_object_iopl_request(sharedMem,
1271 ptoa_32(pageIndex),
1272 &ioplSize,
1273 &iopl.fIOPL,
1274 baseInfo,
1275 &numPageInfo,
1276 &ioplFlags);
1277
1278 } else {
1279 error = vm_map_get_upl(theMap,
1280 startPage,
1281 &ioplSize,
1282 &iopl.fIOPL,
1283 baseInfo,
1284 &numPageInfo,
1285 &ioplFlags,
1286 false);
de355530
A
1287 }
1288
55e303ae
A
1289 assert(ioplSize);
1290 if (error != KERN_SUCCESS)
1291 goto abortExit;
1292
1293 error = kIOReturnNoMemory;
1294
1295 if (baseInfo->device) {
1296 numPageInfo = 1;
1297 iopl.fFlags = kIOPLOnDevice;
1298 // Don't translate device memory at all
1299 if (mapper && mapBase) {
1300 mapper->iovmFree(mapBase, _pages);
1301 mapBase = 0;
1302 iopl.fMappedBase = 0;
1303 }
1304 }
1305 else {
1306 iopl.fFlags = 0;
1307 if (mapper)
1308 mapper->iovmInsert(mapBase, pageIndex,
1309 baseInfo, numPageInfo);
1310 }
1311
1312 iopl.fIOMDOffset = mdOffset;
1313 iopl.fPageInfo = pageIndex;
1314
1315 if ((_flags & kIOMemoryAutoPrepare) && iopl.fIOPL)
1316 {
1317 kernel_upl_commit(iopl.fIOPL, 0, 0);
1318 iopl.fIOPL = 0;
de355530 1319 }
55e303ae
A
1320
1321 if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
1322 // Clean up partial created and unsaved iopl
1323 if (iopl.fIOPL)
1324 kernel_upl_abort(iopl.fIOPL, 0);
1325 goto abortExit;
1326 }
1327
1328 // Check for a multiple iopl's in one virtual range
1329 pageIndex += numPageInfo;
1330 mdOffset -= iopl.fPageOffset;
1331 if (ioplSize < numBytes) {
1332 numBytes -= ioplSize;
1333 startPage += ioplSize;
1334 mdOffset += ioplSize;
1335 iopl.fPageOffset = 0;
1336 if (mapper)
1337 iopl.fMappedBase = mapBase + pageIndex;
1338 }
1339 else {
1340 mdOffset += numBytes;
1341 break;
1342 }
1c79356b
A
1343 }
1344 }
55e303ae 1345
1c79356b
A
1346 return kIOReturnSuccess;
1347
1348abortExit:
55e303ae
A
1349 {
1350 dataP = getDataP(_memoryEntries);
1351 UInt done = getNumIOPL(dataP, _memoryEntries->getLength());
1352 ioPLBlock *ioplList = getIOPLList(dataP);
1353
1354 for (UInt range = 0; range < done; range++)
1355 {
1356 if (ioplList[range].fIOPL)
1357 kernel_upl_abort(ioplList[range].fIOPL, 0);
1358 }
1c79356b 1359
55e303ae
A
1360 if (mapper && mapBase)
1361 mapper->iovmFree(mapBase, _pages);
1c79356b
A
1362 }
1363
55e303ae
A
1364 return error;
1365}
d7e50217 1366
55e303ae
A
1367/*
1368 * prepare
1369 *
1370 * Prepare the memory for an I/O transfer. This involves paging in
1371 * the memory, if necessary, and wiring it down for the duration of
1372 * the transfer. The complete() method completes the processing of
1373 * the memory after the I/O transfer finishes. This method needn't
1374 * called for non-pageable memory.
1375 */
1376IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
1377{
1378 IOReturn error = kIOReturnSuccess;
1379
1380 if (!_wireCount && (_flags & kIOMemoryTypeMask) == kIOMemoryTypeVirtual) {
1381 error = wireVirtual(forDirection);
1382 if (error)
1383 return error;
de355530
A
1384 }
1385
55e303ae
A
1386 _wireCount++;
1387
1388 return kIOReturnSuccess;
1c79356b
A
1389}
1390
1391/*
1392 * complete
1393 *
1394 * Complete processing of the memory after an I/O transfer finishes.
1395 * This method should not be called unless a prepare was previously
1396 * issued; the prepare() and complete() must occur in pairs, before
1397 * before and after an I/O transfer involving pageable memory.
1398 */
1399
55e303ae 1400IOReturn IOGeneralMemoryDescriptor::complete(IODirection /* forDirection */)
1c79356b
A
1401{
1402 assert(_wireCount);
1403
55e303ae 1404 if (!_wireCount)
1c79356b
A
1405 return kIOReturnSuccess;
1406
1407 _wireCount--;
55e303ae
A
1408 if (!_wireCount) {
1409 if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
1410 /* kIOMemoryTypePhysical */
1411 // DO NOTHING
d7e50217 1412 }
55e303ae
A
1413 else {
1414 ioGMDData * dataP = getDataP(_memoryEntries);
1415 ioPLBlock *ioplList = getIOPLList(dataP);
1416 UInt count = getNumIOPL(dataP, _memoryEntries->getLength());
1417
1418 if (dataP->fMapper && _pages && ioplList[0].fMappedBase)
1419 dataP->fMapper->iovmFree(ioplList[0].fMappedBase, _pages);
1420
1421 // Only complete iopls that we created which are for TypeVirtual
1422 if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypeVirtual) {
1423 for (UInt ind = 0; ind < count; ind++)
1424 if (ioplList[ind].fIOPL)
1425 kernel_upl_commit(ioplList[ind].fIOPL, 0, 0);
1426 }
de355530 1427
55e303ae
A
1428 (void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); // == setLength()
1429 }
1c79356b
A
1430 }
1431 return kIOReturnSuccess;
1432}
1433
1434IOReturn IOGeneralMemoryDescriptor::doMap(
1435 vm_map_t addressMap,
1436 IOVirtualAddress * atAddress,
1437 IOOptionBits options,
55e303ae
A
1438 IOByteCount sourceOffset,
1439 IOByteCount length )
1c79356b
A
1440{
1441 kern_return_t kr;
0b4e3aa0 1442 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
1c79356b
A
1443
1444 // mapping source == dest? (could be much better)
1445 if( _task && (addressMap == get_task_map(_task)) && (options & kIOMapAnywhere)
1446 && (1 == _rangesCount) && (0 == sourceOffset)
1447 && (length <= _ranges.v[0].length) ) {
1448 *atAddress = _ranges.v[0].address;
1449 return( kIOReturnSuccess );
1450 }
1451
0b4e3aa0 1452 if( 0 == sharedMem) {
1c79356b 1453
55e303ae 1454 vm_size_t size = _pages << PAGE_SHIFT;
1c79356b 1455
0b4e3aa0 1456 if( _task) {
9bccf70c
A
1457#ifndef i386
1458 vm_size_t actualSize = size;
1459 kr = mach_make_memory_entry( get_task_map(_task),
0b4e3aa0
A
1460 &actualSize, _ranges.v[0].address,
1461 VM_PROT_READ | VM_PROT_WRITE, &sharedMem,
1462 NULL );
1463
55e303ae 1464 if( (KERN_SUCCESS == kr) && (actualSize != round_page_32(size))) {
0b4e3aa0 1465#if IOASSERT
55e303ae 1466 IOLog("mach_make_memory_entry_64 (%08x) size (%08lx:%08x)\n",
0b4e3aa0
A
1467 _ranges.v[0].address, (UInt32)actualSize, size);
1468#endif
1469 kr = kIOReturnVMError;
1470 ipc_port_release_send( sharedMem );
1c79356b
A
1471 }
1472
0b4e3aa0 1473 if( KERN_SUCCESS != kr)
9bccf70c 1474#endif /* i386 */
0b4e3aa0 1475 sharedMem = MACH_PORT_NULL;
1c79356b 1476
0b4e3aa0
A
1477 } else do {
1478
55e303ae
A
1479 memory_object_t pager;
1480 unsigned int flags = 0;
1481 addr64_t pa;
9bccf70c
A
1482 IOPhysicalLength segLen;
1483
55e303ae 1484 pa = getPhysicalSegment64( sourceOffset, &segLen );
0b4e3aa0
A
1485
1486 if( !reserved) {
1487 reserved = IONew( ExpansionData, 1 );
1488 if( !reserved)
1489 continue;
1490 }
1491 reserved->pagerContig = (1 == _rangesCount);
9bccf70c
A
1492 reserved->memory = this;
1493
55e303ae
A
1494 /*What cache mode do we need*/
1495 switch(options & kIOMapCacheMask ) {
9bccf70c
A
1496
1497 case kIOMapDefaultCache:
1498 default:
55e303ae
A
1499 flags = IODefaultCacheBits(pa);
1500 break;
9bccf70c
A
1501
1502 case kIOMapInhibitCache:
55e303ae
A
1503 flags = DEVICE_PAGER_CACHE_INHIB |
1504 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
1505 break;
9bccf70c
A
1506
1507 case kIOMapWriteThruCache:
55e303ae
A
1508 flags = DEVICE_PAGER_WRITE_THROUGH |
1509 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
1510 break;
9bccf70c
A
1511
1512 case kIOMapCopybackCache:
55e303ae
A
1513 flags = DEVICE_PAGER_COHERENT;
1514 break;
1515
1516 case kIOMapWriteCombineCache:
1517 flags = DEVICE_PAGER_CACHE_INHIB |
1518 DEVICE_PAGER_COHERENT;
1519 break;
9bccf70c
A
1520 }
1521
1522 flags |= reserved->pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0;
9bccf70c
A
1523
1524 pager = device_pager_setup( (memory_object_t) 0, (int) reserved,
1525 size, flags);
0b4e3aa0
A
1526 assert( pager );
1527
1528 if( pager) {
0b4e3aa0
A
1529 kr = mach_memory_object_memory_entry_64( (host_t) 1, false /*internal*/,
1530 size, VM_PROT_READ | VM_PROT_WRITE, pager, &sharedMem );
1531
1532 assert( KERN_SUCCESS == kr );
1533 if( KERN_SUCCESS != kr) {
9bccf70c 1534 device_pager_deallocate( pager );
0b4e3aa0
A
1535 pager = MACH_PORT_NULL;
1536 sharedMem = MACH_PORT_NULL;
1537 }
1538 }
9bccf70c
A
1539 if( pager && sharedMem)
1540 reserved->devicePager = pager;
1541 else {
1542 IODelete( reserved, ExpansionData, 1 );
1543 reserved = 0;
1544 }
1c79356b 1545
1c79356b
A
1546 } while( false );
1547
0b4e3aa0
A
1548 _memEntry = (void *) sharedMem;
1549 }
1550
9bccf70c
A
1551#ifndef i386
1552 if( 0 == sharedMem)
1553 kr = kIOReturnVMError;
1554 else
1555#endif
1556 kr = super::doMap( addressMap, atAddress,
1c79356b 1557 options, sourceOffset, length );
0b4e3aa0 1558
1c79356b
A
1559 return( kr );
1560}
1561
1562IOReturn IOGeneralMemoryDescriptor::doUnmap(
1563 vm_map_t addressMap,
1564 IOVirtualAddress logical,
1565 IOByteCount length )
1566{
1567 // could be much better
55e303ae 1568 if( _task && (addressMap == get_task_map(_task)) && (1 == _rangesCount)
1c79356b
A
1569 && (logical == _ranges.v[0].address)
1570 && (length <= _ranges.v[0].length) )
1571 return( kIOReturnSuccess );
1572
1573 return( super::doUnmap( addressMap, logical, length ));
1574}
1575
1576/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1577
1578extern "C" {
1579// osfmk/device/iokit_rpc.c
1580extern kern_return_t IOMapPages( vm_map_t map, vm_offset_t va, vm_offset_t pa,
1581 vm_size_t length, unsigned int mapFlags);
e3027f41 1582extern kern_return_t IOUnmapPages(vm_map_t map, vm_offset_t va, vm_size_t length);
1c79356b
A
1583};
1584
1585/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1586
9bccf70c 1587OSDefineMetaClassAndAbstractStructors( IOMemoryMap, OSObject )
1c79356b 1588
9bccf70c
A
1589/* inline function implementation */
1590IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
1591 { return( getPhysicalSegment( 0, 0 )); }
1c79356b
A
1592
1593/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1594
1595class _IOMemoryMap : public IOMemoryMap
1596{
1597 OSDeclareDefaultStructors(_IOMemoryMap)
1598
1599 IOMemoryDescriptor * memory;
1600 IOMemoryMap * superMap;
1601 IOByteCount offset;
1602 IOByteCount length;
1603 IOVirtualAddress logical;
1604 task_t addressTask;
1605 vm_map_t addressMap;
1606 IOOptionBits options;
1607
9bccf70c
A
1608protected:
1609 virtual void taggedRelease(const void *tag = 0) const;
1c79356b
A
1610 virtual void free();
1611
9bccf70c
A
1612public:
1613
1c79356b
A
1614 // IOMemoryMap methods
1615 virtual IOVirtualAddress getVirtualAddress();
1616 virtual IOByteCount getLength();
1617 virtual task_t getAddressTask();
1618 virtual IOMemoryDescriptor * getMemoryDescriptor();
1619 virtual IOOptionBits getMapOptions();
1620
1621 virtual IOReturn unmap();
1622 virtual void taskDied();
1623
1624 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
1625 IOByteCount * length);
1626
1627 // for IOMemoryDescriptor use
9bccf70c 1628 _IOMemoryMap * copyCompatible(
1c79356b
A
1629 IOMemoryDescriptor * owner,
1630 task_t intoTask,
1631 IOVirtualAddress toAddress,
1632 IOOptionBits options,
1633 IOByteCount offset,
1634 IOByteCount length );
1635
9bccf70c 1636 bool initCompatible(
1c79356b
A
1637 IOMemoryDescriptor * memory,
1638 IOMemoryMap * superMap,
1639 IOByteCount offset,
1640 IOByteCount length );
1641
9bccf70c 1642 bool initWithDescriptor(
1c79356b
A
1643 IOMemoryDescriptor * memory,
1644 task_t intoTask,
1645 IOVirtualAddress toAddress,
1646 IOOptionBits options,
1647 IOByteCount offset,
1648 IOByteCount length );
e3027f41
A
1649
1650 IOReturn redirect(
1651 task_t intoTask, bool redirect );
1c79356b
A
1652};
1653
1654/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1655
1656#undef super
1657#define super IOMemoryMap
1658
1659OSDefineMetaClassAndStructors(_IOMemoryMap, IOMemoryMap)
1660
1661/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1662
9bccf70c 1663bool _IOMemoryMap::initCompatible(
1c79356b
A
1664 IOMemoryDescriptor * _memory,
1665 IOMemoryMap * _superMap,
1666 IOByteCount _offset,
1667 IOByteCount _length )
1668{
1669
1670 if( !super::init())
1671 return( false);
1672
1673 if( (_offset + _length) > _superMap->getLength())
1674 return( false);
1675
1676 _memory->retain();
1677 memory = _memory;
1678 _superMap->retain();
1679 superMap = _superMap;
1680
1681 offset = _offset;
1682 if( _length)
1683 length = _length;
1684 else
1685 length = _memory->getLength();
1686
1687 options = superMap->getMapOptions();
1688 logical = superMap->getVirtualAddress() + offset;
1689
1690 return( true );
1691}
1692
9bccf70c 1693bool _IOMemoryMap::initWithDescriptor(
1c79356b
A
1694 IOMemoryDescriptor * _memory,
1695 task_t intoTask,
1696 IOVirtualAddress toAddress,
1697 IOOptionBits _options,
1698 IOByteCount _offset,
1699 IOByteCount _length )
1700{
1701 bool ok;
1702
1703 if( (!_memory) || (!intoTask) || !super::init())
1704 return( false);
1705
1706 if( (_offset + _length) > _memory->getLength())
1707 return( false);
1708
1709 addressMap = get_task_map(intoTask);
1710 if( !addressMap)
1711 return( false);
9bccf70c 1712 vm_map_reference(addressMap);
1c79356b
A
1713
1714 _memory->retain();
1715 memory = _memory;
1716
1717 offset = _offset;
1718 if( _length)
1719 length = _length;
1720 else
1721 length = _memory->getLength();
1722
1723 addressTask = intoTask;
1724 logical = toAddress;
1725 options = _options;
1726
1727 if( options & kIOMapStatic)
1728 ok = true;
1729 else
1730 ok = (kIOReturnSuccess == memory->doMap( addressMap, &logical,
1731 options, offset, length ));
1732 if( !ok) {
1733 logical = 0;
e3027f41
A
1734 memory->release();
1735 memory = 0;
1c79356b
A
1736 vm_map_deallocate(addressMap);
1737 addressMap = 0;
1738 }
1739 return( ok );
1740}
1741
0b4e3aa0
A
1742struct IOMemoryDescriptorMapAllocRef
1743{
1744 ipc_port_t sharedMem;
1745 vm_size_t size;
1746 vm_offset_t mapped;
1747 IOByteCount sourceOffset;
1748 IOOptionBits options;
1749};
1750
1751static kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
1752{
1753 IOMemoryDescriptorMapAllocRef * ref = (IOMemoryDescriptorMapAllocRef *)_ref;
1754 IOReturn err;
1755
1756 do {
1757 if( ref->sharedMem) {
1758 vm_prot_t prot = VM_PROT_READ
1759 | ((ref->options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE);
55e303ae
A
1760
1761 // set memory entry cache
1762 vm_prot_t memEntryCacheMode = prot | MAP_MEM_ONLY;
1763 switch (ref->options & kIOMapCacheMask)
1764 {
1765 case kIOMapInhibitCache:
1766 SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
1767 break;
1768
1769 case kIOMapWriteThruCache:
1770 SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
1771 break;
1772
1773 case kIOMapWriteCombineCache:
1774 SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
1775 break;
1776
1777 case kIOMapCopybackCache:
1778 SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
1779 break;
1780
1781 case kIOMapDefaultCache:
1782 default:
1783 SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
1784 break;
1785 }
1786
1787 vm_size_t unused = 0;
1788
1789 err = mach_make_memory_entry( NULL /*unused*/, &unused, 0 /*unused*/,
1790 memEntryCacheMode, NULL, ref->sharedMem );
1791 if (KERN_SUCCESS != err)
1792 IOLog("MAP_MEM_ONLY failed %d\n", err);
1793
0b4e3aa0
A
1794 err = vm_map( map,
1795 &ref->mapped,
1796 ref->size, 0 /* mask */,
1797 (( ref->options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
1798 | VM_MAKE_TAG(VM_MEMORY_IOKIT),
1799 ref->sharedMem, ref->sourceOffset,
1800 false, // copy
1801 prot, // cur
1802 prot, // max
1803 VM_INHERIT_NONE);
55e303ae 1804
0b4e3aa0
A
1805 if( KERN_SUCCESS != err) {
1806 ref->mapped = 0;
1807 continue;
1808 }
1809
1810 } else {
1811
1812 err = vm_allocate( map, &ref->mapped, ref->size,
1813 ((ref->options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
1814 | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
1815
1816 if( KERN_SUCCESS != err) {
1817 ref->mapped = 0;
1818 continue;
1819 }
1820
1821 // we have to make sure that these guys don't get copied if we fork.
1822 err = vm_inherit( map, ref->mapped, ref->size, VM_INHERIT_NONE);
1823 assert( KERN_SUCCESS == err );
1824 }
1825
1826 } while( false );
1827
1828 return( err );
1829}
1830
9bccf70c 1831
1c79356b
A
1832IOReturn IOMemoryDescriptor::doMap(
1833 vm_map_t addressMap,
1834 IOVirtualAddress * atAddress,
1835 IOOptionBits options,
55e303ae
A
1836 IOByteCount sourceOffset,
1837 IOByteCount length )
1c79356b
A
1838{
1839 IOReturn err = kIOReturnSuccess;
0b4e3aa0 1840 memory_object_t pager;
1c79356b
A
1841 vm_address_t logical;
1842 IOByteCount pageOffset;
0b4e3aa0
A
1843 IOPhysicalAddress sourceAddr;
1844 IOMemoryDescriptorMapAllocRef ref;
1c79356b 1845
0b4e3aa0
A
1846 ref.sharedMem = (ipc_port_t) _memEntry;
1847 ref.sourceOffset = sourceOffset;
1848 ref.options = options;
1c79356b 1849
0b4e3aa0 1850 do {
1c79356b 1851
0b4e3aa0
A
1852 if( 0 == length)
1853 length = getLength();
1c79356b 1854
0b4e3aa0
A
1855 sourceAddr = getSourceSegment( sourceOffset, NULL );
1856 assert( sourceAddr );
55e303ae 1857 pageOffset = sourceAddr - trunc_page_32( sourceAddr );
1c79356b 1858
55e303ae 1859 ref.size = round_page_32( length + pageOffset );
0b4e3aa0
A
1860
1861 logical = *atAddress;
1862 if( options & kIOMapAnywhere)
1863 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1864 ref.mapped = 0;
1865 else {
55e303ae 1866 ref.mapped = trunc_page_32( logical );
0b4e3aa0
A
1867 if( (logical - ref.mapped) != pageOffset) {
1868 err = kIOReturnVMError;
1869 continue;
1870 }
1871 }
1872
55e303ae 1873 if( ref.sharedMem && (addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags))
0b4e3aa0
A
1874 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
1875 else
1876 err = IOMemoryDescriptorMapAlloc( addressMap, &ref );
1877
1878 if( err != KERN_SUCCESS)
1879 continue;
1880
1881 if( reserved)
1882 pager = (memory_object_t) reserved->devicePager;
1883 else
1884 pager = MACH_PORT_NULL;
1885
1886 if( !ref.sharedMem || pager )
1887 err = handleFault( pager, addressMap, ref.mapped, sourceOffset, length, options );
1888
1889 } while( false );
1890
1891 if( err != KERN_SUCCESS) {
1892 if( ref.mapped)
1893 doUnmap( addressMap, ref.mapped, ref.size );
1894 *atAddress = NULL;
1895 } else
1896 *atAddress = ref.mapped + pageOffset;
1897
1898 return( err );
1899}
1900
1901enum {
1902 kIOMemoryRedirected = 0x00010000
1903};
1904
1905IOReturn IOMemoryDescriptor::handleFault(
1906 void * _pager,
1907 vm_map_t addressMap,
1908 IOVirtualAddress address,
1909 IOByteCount sourceOffset,
1910 IOByteCount length,
1911 IOOptionBits options )
1912{
1913 IOReturn err = kIOReturnSuccess;
1914 memory_object_t pager = (memory_object_t) _pager;
1915 vm_size_t size;
1916 vm_size_t bytes;
1917 vm_size_t page;
1918 IOByteCount pageOffset;
55e303ae 1919 IOByteCount pagerOffset;
0b4e3aa0 1920 IOPhysicalLength segLen;
55e303ae 1921 addr64_t physAddr;
0b4e3aa0
A
1922
1923 if( !addressMap) {
1924
0b4e3aa0 1925 if( kIOMemoryRedirected & _flags) {
1c79356b 1926#ifdef DEBUG
9bccf70c 1927 IOLog("sleep mem redirect %p, %lx\n", this, sourceOffset);
1c79356b 1928#endif
0b4e3aa0 1929 do {
9bccf70c 1930 SLEEP;
0b4e3aa0
A
1931 } while( kIOMemoryRedirected & _flags );
1932 }
1c79356b 1933
0b4e3aa0 1934 return( kIOReturnSuccess );
1c79356b
A
1935 }
1936
55e303ae 1937 physAddr = getPhysicalSegment64( sourceOffset, &segLen );
0b4e3aa0 1938 assert( physAddr );
55e303ae
A
1939 pageOffset = physAddr - trunc_page_64( physAddr );
1940 pagerOffset = sourceOffset;
0b4e3aa0
A
1941
1942 size = length + pageOffset;
1943 physAddr -= pageOffset;
1c79356b
A
1944
1945 segLen += pageOffset;
0b4e3aa0 1946 bytes = size;
1c79356b
A
1947 do {
1948 // in the middle of the loop only map whole pages
1949 if( segLen >= bytes)
1950 segLen = bytes;
55e303ae 1951 else if( segLen != trunc_page_32( segLen))
1c79356b 1952 err = kIOReturnVMError;
55e303ae 1953 if( physAddr != trunc_page_64( physAddr))
1c79356b
A
1954 err = kIOReturnBadArgument;
1955
1956#ifdef DEBUG
1957 if( kIOLogMapping & gIOKitDebug)
55e303ae 1958 IOLog("_IOMemoryMap::map(%p) %08lx->%08qx:%08lx\n",
0b4e3aa0 1959 addressMap, address + pageOffset, physAddr + pageOffset,
1c79356b
A
1960 segLen - pageOffset);
1961#endif
1962
9bccf70c
A
1963
1964
1965
1966
1967#ifdef i386
1968 /* i386 doesn't support faulting on device memory yet */
0b4e3aa0 1969 if( addressMap && (kIOReturnSuccess == err))
55e303ae 1970 err = IOMapPages( addressMap, address, (IOPhysicalAddress) physAddr, segLen, options );
0b4e3aa0 1971 assert( KERN_SUCCESS == err );
1c79356b
A
1972 if( err)
1973 break;
9bccf70c 1974#endif
1c79356b 1975
0b4e3aa0
A
1976 if( pager) {
1977 if( reserved && reserved->pagerContig) {
1978 IOPhysicalLength allLen;
55e303ae 1979 addr64_t allPhys;
0b4e3aa0 1980
55e303ae 1981 allPhys = getPhysicalSegment64( 0, &allLen );
0b4e3aa0 1982 assert( allPhys );
55e303ae 1983 err = device_pager_populate_object( pager, 0, allPhys >> PAGE_SHIFT, round_page_32(allLen) );
0b4e3aa0
A
1984
1985 } else {
1986
55e303ae 1987 for( page = 0;
0b4e3aa0
A
1988 (page < segLen) && (KERN_SUCCESS == err);
1989 page += page_size) {
55e303ae
A
1990 err = device_pager_populate_object(pager, pagerOffset,
1991 (ppnum_t)((physAddr + page) >> PAGE_SHIFT), page_size);
1992 pagerOffset += page_size;
0b4e3aa0
A
1993 }
1994 }
1995 assert( KERN_SUCCESS == err );
1996 if( err)
1997 break;
1998 }
9bccf70c
A
1999#ifndef i386
2000 /* *** ALERT *** */
2001 /* *** Temporary Workaround *** */
2002
2003 /* This call to vm_fault causes an early pmap level resolution */
2004 /* of the mappings created above. Need for this is in absolute */
2005 /* violation of the basic tenet that the pmap layer is a cache. */
2006 /* Further, it implies a serious I/O architectural violation on */
2007 /* the part of some user of the mapping. As of this writing, */
2008 /* the call to vm_fault is needed because the NVIDIA driver */
2009 /* makes a call to pmap_extract. The NVIDIA driver needs to be */
2010 /* fixed as soon as possible. The NVIDIA driver should not */
2011 /* need to query for this info as it should know from the doMap */
2012 /* call where the physical memory is mapped. When a query is */
2013 /* necessary to find a physical mapping, it should be done */
2014 /* through an iokit call which includes the mapped memory */
2015 /* handle. This is required for machine architecture independence.*/
2016
2017 if(!(kIOMemoryRedirected & _flags)) {
2018 vm_fault(addressMap, address, 3, FALSE, FALSE, NULL, 0);
2019 }
2020
2021 /* *** Temporary Workaround *** */
2022 /* *** ALERT *** */
2023#endif
1c79356b 2024 sourceOffset += segLen - pageOffset;
0b4e3aa0 2025 address += segLen;
1c79356b
A
2026 bytes -= segLen;
2027 pageOffset = 0;
2028
2029 } while( bytes
55e303ae 2030 && (physAddr = getPhysicalSegment64( sourceOffset, &segLen )));
1c79356b
A
2031
2032 if( bytes)
2033 err = kIOReturnBadArgument;
1c79356b
A
2034
2035 return( err );
2036}
2037
2038IOReturn IOMemoryDescriptor::doUnmap(
2039 vm_map_t addressMap,
2040 IOVirtualAddress logical,
2041 IOByteCount length )
2042{
2043 IOReturn err;
2044
2045#ifdef DEBUG
2046 if( kIOLogMapping & gIOKitDebug)
2047 kprintf("IOMemoryDescriptor::doUnmap(%x) %08x:%08x\n",
2048 addressMap, logical, length );
2049#endif
2050
90556fb8 2051 if( true /* && (addressMap == kernel_map) || (addressMap == get_task_map(current_task()))*/) {
0b4e3aa0 2052
55e303ae 2053 if( _memEntry && (addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags))
0b4e3aa0
A
2054 addressMap = IOPageableMapForAddress( logical );
2055
1c79356b 2056 err = vm_deallocate( addressMap, logical, length );
0b4e3aa0
A
2057
2058 } else
1c79356b
A
2059 err = kIOReturnSuccess;
2060
2061 return( err );
2062}
2063
e3027f41
A
2064IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool redirect )
2065{
2066 IOReturn err;
2067 _IOMemoryMap * mapping = 0;
2068 OSIterator * iter;
2069
2070 LOCK;
2071
2072 do {
2073 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
2074 while( (mapping = (_IOMemoryMap *) iter->getNextObject()))
2075 mapping->redirect( safeTask, redirect );
2076
2077 iter->release();
2078 }
2079 } while( false );
2080
0b4e3aa0
A
2081 if( redirect)
2082 _flags |= kIOMemoryRedirected;
2083 else {
2084 _flags &= ~kIOMemoryRedirected;
9bccf70c 2085 WAKEUP;
0b4e3aa0
A
2086 }
2087
e3027f41
A
2088 UNLOCK;
2089
2090 // temporary binary compatibility
2091 IOSubMemoryDescriptor * subMem;
2092 if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
2093 err = subMem->redirect( safeTask, redirect );
2094 else
2095 err = kIOReturnSuccess;
2096
2097 return( err );
2098}
2099
2100IOReturn IOSubMemoryDescriptor::redirect( task_t safeTask, bool redirect )
2101{
e3027f41
A
2102 return( _parent->redirect( safeTask, redirect ));
2103}
2104
2105IOReturn _IOMemoryMap::redirect( task_t safeTask, bool redirect )
2106{
2107 IOReturn err = kIOReturnSuccess;
2108
2109 if( superMap) {
2110// err = ((_IOMemoryMap *)superMap)->redirect( safeTask, redirect );
2111 } else {
2112
2113 LOCK;
2114 if( logical && addressMap
2115 && (get_task_map( safeTask) != addressMap)
2116 && (0 == (options & kIOMapStatic))) {
9bccf70c 2117
e3027f41
A
2118 IOUnmapPages( addressMap, logical, length );
2119 if( !redirect) {
2120 err = vm_deallocate( addressMap, logical, length );
2121 err = memory->doMap( addressMap, &logical,
0b4e3aa0
A
2122 (options & ~kIOMapAnywhere) /*| kIOMapReserve*/,
2123 offset, length );
e3027f41
A
2124 } else
2125 err = kIOReturnSuccess;
2126#ifdef DEBUG
9bccf70c 2127 IOLog("IOMemoryMap::redirect(%d, %p) %x:%lx from %p\n", redirect, this, logical, length, addressMap);
e3027f41
A
2128#endif
2129 }
2130 UNLOCK;
2131 }
2132
2133 return( err );
2134}
2135
1c79356b
A
2136IOReturn _IOMemoryMap::unmap( void )
2137{
2138 IOReturn err;
2139
2140 LOCK;
2141
2142 if( logical && addressMap && (0 == superMap)
2143 && (0 == (options & kIOMapStatic))) {
2144
2145 err = memory->doUnmap( addressMap, logical, length );
2146 vm_map_deallocate(addressMap);
2147 addressMap = 0;
2148
2149 } else
2150 err = kIOReturnSuccess;
2151
2152 logical = 0;
2153
2154 UNLOCK;
2155
2156 return( err );
2157}
2158
2159void _IOMemoryMap::taskDied( void )
2160{
2161 LOCK;
2162 if( addressMap) {
2163 vm_map_deallocate(addressMap);
2164 addressMap = 0;
2165 }
2166 addressTask = 0;
2167 logical = 0;
2168 UNLOCK;
2169}
2170
9bccf70c
A
2171// Overload the release mechanism. All mappings must be a member
2172// of a memory descriptors _mappings set. This means that we
2173// always have 2 references on a mapping. When either of these mappings
2174// are released we need to free ourselves.
55e303ae 2175void _IOMemoryMap::taggedRelease(const void *tag) const
9bccf70c 2176{
55e303ae 2177 LOCK;
9bccf70c 2178 super::taggedRelease(tag, 2);
55e303ae 2179 UNLOCK;
9bccf70c
A
2180}
2181
1c79356b
A
2182void _IOMemoryMap::free()
2183{
2184 unmap();
2185
2186 if( memory) {
2187 LOCK;
2188 memory->removeMapping( this);
2189 UNLOCK;
2190 memory->release();
2191 }
2192
2193 if( superMap)
2194 superMap->release();
2195
2196 super::free();
2197}
2198
2199IOByteCount _IOMemoryMap::getLength()
2200{
2201 return( length );
2202}
2203
2204IOVirtualAddress _IOMemoryMap::getVirtualAddress()
2205{
2206 return( logical);
2207}
2208
2209task_t _IOMemoryMap::getAddressTask()
2210{
2211 if( superMap)
2212 return( superMap->getAddressTask());
2213 else
2214 return( addressTask);
2215}
2216
2217IOOptionBits _IOMemoryMap::getMapOptions()
2218{
2219 return( options);
2220}
2221
2222IOMemoryDescriptor * _IOMemoryMap::getMemoryDescriptor()
2223{
2224 return( memory );
2225}
2226
9bccf70c 2227_IOMemoryMap * _IOMemoryMap::copyCompatible(
1c79356b
A
2228 IOMemoryDescriptor * owner,
2229 task_t task,
2230 IOVirtualAddress toAddress,
2231 IOOptionBits _options,
2232 IOByteCount _offset,
2233 IOByteCount _length )
2234{
2235 _IOMemoryMap * mapping;
2236
55e303ae 2237 if( (!task) || (!addressMap) || (addressMap != get_task_map(task)))
1c79356b 2238 return( 0 );
9bccf70c
A
2239 if( (options ^ _options) & kIOMapReadOnly)
2240 return( 0 );
2241 if( (kIOMapDefaultCache != (_options & kIOMapCacheMask))
2242 && ((options ^ _options) & kIOMapCacheMask))
1c79356b
A
2243 return( 0 );
2244
2245 if( (0 == (_options & kIOMapAnywhere)) && (logical != toAddress))
2246 return( 0 );
2247
2248 if( _offset < offset)
2249 return( 0 );
2250
2251 _offset -= offset;
2252
2253 if( (_offset + _length) > length)
2254 return( 0 );
2255
2256 if( (length == _length) && (!_offset)) {
2257 retain();
2258 mapping = this;
2259
2260 } else {
2261 mapping = new _IOMemoryMap;
2262 if( mapping
9bccf70c 2263 && !mapping->initCompatible( owner, this, _offset, _length )) {
1c79356b
A
2264 mapping->release();
2265 mapping = 0;
2266 }
2267 }
2268
2269 return( mapping );
2270}
2271
2272IOPhysicalAddress _IOMemoryMap::getPhysicalSegment( IOByteCount _offset,
2273 IOPhysicalLength * length)
2274{
2275 IOPhysicalAddress address;
2276
2277 LOCK;
2278 address = memory->getPhysicalSegment( offset + _offset, length );
2279 UNLOCK;
2280
2281 return( address );
2282}
2283
2284/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2285
2286#undef super
2287#define super OSObject
2288
2289/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2290
2291void IOMemoryDescriptor::initialize( void )
2292{
2293 if( 0 == gIOMemoryLock)
2294 gIOMemoryLock = IORecursiveLockAlloc();
55e303ae
A
2295
2296 IORegistryEntry::getRegistryRoot()->setProperty(kIOMaximumMappedIOByteCountKey,
2297 ptoa_64(gIOMaximumMappedIOPageCount), 64);
1c79356b
A
2298}
2299
2300void IOMemoryDescriptor::free( void )
2301{
2302 if( _mappings)
2303 _mappings->release();
2304
2305 super::free();
2306}
2307
2308IOMemoryMap * IOMemoryDescriptor::setMapping(
2309 task_t intoTask,
2310 IOVirtualAddress mapAddress,
55e303ae 2311 IOOptionBits options )
1c79356b
A
2312{
2313 _IOMemoryMap * map;
2314
2315 map = new _IOMemoryMap;
2316
2317 LOCK;
2318
2319 if( map
9bccf70c 2320 && !map->initWithDescriptor( this, intoTask, mapAddress,
1c79356b
A
2321 options | kIOMapStatic, 0, getLength() )) {
2322 map->release();
2323 map = 0;
2324 }
2325
2326 addMapping( map);
2327
2328 UNLOCK;
2329
2330 return( map);
2331}
2332
2333IOMemoryMap * IOMemoryDescriptor::map(
55e303ae 2334 IOOptionBits options )
1c79356b
A
2335{
2336
2337 return( makeMapping( this, kernel_task, 0,
2338 options | kIOMapAnywhere,
2339 0, getLength() ));
2340}
2341
2342IOMemoryMap * IOMemoryDescriptor::map(
2343 task_t intoTask,
2344 IOVirtualAddress toAddress,
2345 IOOptionBits options,
55e303ae
A
2346 IOByteCount offset,
2347 IOByteCount length )
1c79356b
A
2348{
2349 if( 0 == length)
2350 length = getLength();
2351
2352 return( makeMapping( this, intoTask, toAddress, options, offset, length ));
2353}
2354
2355IOMemoryMap * IOMemoryDescriptor::makeMapping(
2356 IOMemoryDescriptor * owner,
2357 task_t intoTask,
2358 IOVirtualAddress toAddress,
2359 IOOptionBits options,
2360 IOByteCount offset,
2361 IOByteCount length )
2362{
2363 _IOMemoryMap * mapping = 0;
2364 OSIterator * iter;
2365
2366 LOCK;
2367
2368 do {
2369 // look for an existing mapping
2370 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
2371
2372 while( (mapping = (_IOMemoryMap *) iter->getNextObject())) {
2373
9bccf70c 2374 if( (mapping = mapping->copyCompatible(
1c79356b
A
2375 owner, intoTask, toAddress,
2376 options | kIOMapReference,
2377 offset, length )))
2378 break;
2379 }
2380 iter->release();
2381 if( mapping)
2382 continue;
2383 }
2384
2385
2386 if( mapping || (options & kIOMapReference))
2387 continue;
2388
2389 owner = this;
2390
2391 mapping = new _IOMemoryMap;
2392 if( mapping
9bccf70c 2393 && !mapping->initWithDescriptor( owner, intoTask, toAddress, options,
1c79356b 2394 offset, length )) {
9bccf70c 2395#ifdef DEBUG
1c79356b 2396 IOLog("Didn't make map %08lx : %08lx\n", offset, length );
9bccf70c 2397#endif
1c79356b
A
2398 mapping->release();
2399 mapping = 0;
2400 }
2401
2402 } while( false );
2403
2404 owner->addMapping( mapping);
2405
2406 UNLOCK;
2407
2408 return( mapping);
2409}
2410
2411void IOMemoryDescriptor::addMapping(
2412 IOMemoryMap * mapping )
2413{
2414 if( mapping) {
2415 if( 0 == _mappings)
2416 _mappings = OSSet::withCapacity(1);
9bccf70c
A
2417 if( _mappings )
2418 _mappings->setObject( mapping );
1c79356b
A
2419 }
2420}
2421
2422void IOMemoryDescriptor::removeMapping(
2423 IOMemoryMap * mapping )
2424{
9bccf70c 2425 if( _mappings)
1c79356b 2426 _mappings->removeObject( mapping);
1c79356b
A
2427}
2428
2429/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2430
2431#undef super
2432#define super IOMemoryDescriptor
2433
2434OSDefineMetaClassAndStructors(IOSubMemoryDescriptor, IOMemoryDescriptor)
2435
2436/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2437
2438bool IOSubMemoryDescriptor::initSubRange( IOMemoryDescriptor * parent,
2439 IOByteCount offset, IOByteCount length,
55e303ae 2440 IODirection direction )
1c79356b 2441{
1c79356b
A
2442 if( !parent)
2443 return( false);
2444
2445 if( (offset + length) > parent->getLength())
2446 return( false);
2447
55e303ae
A
2448 /*
2449 * We can check the _parent instance variable before having ever set it
2450 * to an initial value because I/O Kit guarantees that all our instance
2451 * variables are zeroed on an object's allocation.
2452 */
2453
2454 if( !_parent) {
2455 if( !super::init())
2456 return( false );
2457 } else {
2458 /*
2459 * An existing memory descriptor is being retargeted to
2460 * point to somewhere else. Clean up our present state.
2461 */
2462
2463 _parent->release();
2464 _parent = 0;
2465 }
2466
1c79356b
A
2467 parent->retain();
2468 _parent = parent;
2469 _start = offset;
2470 _length = length;
55e303ae 2471 _direction = direction;
1c79356b
A
2472 _tag = parent->getTag();
2473
2474 return( true );
2475}
2476
2477void IOSubMemoryDescriptor::free( void )
2478{
2479 if( _parent)
2480 _parent->release();
2481
2482 super::free();
2483}
2484
2485
2486IOPhysicalAddress IOSubMemoryDescriptor::getPhysicalSegment( IOByteCount offset,
2487 IOByteCount * length )
2488{
2489 IOPhysicalAddress address;
2490 IOByteCount actualLength;
2491
2492 assert(offset <= _length);
2493
2494 if( length)
2495 *length = 0;
2496
2497 if( offset >= _length)
2498 return( 0 );
2499
2500 address = _parent->getPhysicalSegment( offset + _start, &actualLength );
2501
2502 if( address && length)
2503 *length = min( _length - offset, actualLength );
2504
2505 return( address );
2506}
2507
0b4e3aa0
A
2508IOPhysicalAddress IOSubMemoryDescriptor::getSourceSegment( IOByteCount offset,
2509 IOByteCount * length )
2510{
2511 IOPhysicalAddress address;
2512 IOByteCount actualLength;
2513
2514 assert(offset <= _length);
2515
2516 if( length)
2517 *length = 0;
2518
2519 if( offset >= _length)
2520 return( 0 );
2521
2522 address = _parent->getSourceSegment( offset + _start, &actualLength );
2523
2524 if( address && length)
2525 *length = min( _length - offset, actualLength );
2526
2527 return( address );
2528}
2529
1c79356b
A
2530void * IOSubMemoryDescriptor::getVirtualSegment(IOByteCount offset,
2531 IOByteCount * lengthOfSegment)
2532{
2533 return( 0 );
2534}
2535
2536IOByteCount IOSubMemoryDescriptor::readBytes(IOByteCount offset,
55e303ae 2537 void * bytes, IOByteCount length)
1c79356b
A
2538{
2539 IOByteCount byteCount;
2540
2541 assert(offset <= _length);
2542
2543 if( offset >= _length)
2544 return( 0 );
2545
2546 LOCK;
2547 byteCount = _parent->readBytes( _start + offset, bytes,
55e303ae 2548 min(length, _length - offset) );
1c79356b
A
2549 UNLOCK;
2550
2551 return( byteCount );
2552}
2553
2554IOByteCount IOSubMemoryDescriptor::writeBytes(IOByteCount offset,
55e303ae 2555 const void* bytes, IOByteCount length)
1c79356b
A
2556{
2557 IOByteCount byteCount;
2558
2559 assert(offset <= _length);
2560
2561 if( offset >= _length)
2562 return( 0 );
2563
2564 LOCK;
2565 byteCount = _parent->writeBytes( _start + offset, bytes,
55e303ae 2566 min(length, _length - offset) );
1c79356b
A
2567 UNLOCK;
2568
2569 return( byteCount );
2570}
2571
2572IOReturn IOSubMemoryDescriptor::prepare(
55e303ae 2573 IODirection forDirection)
1c79356b
A
2574{
2575 IOReturn err;
2576
2577 LOCK;
2578 err = _parent->prepare( forDirection);
2579 UNLOCK;
2580
2581 return( err );
2582}
2583
2584IOReturn IOSubMemoryDescriptor::complete(
55e303ae 2585 IODirection forDirection)
1c79356b
A
2586{
2587 IOReturn err;
2588
2589 LOCK;
2590 err = _parent->complete( forDirection);
2591 UNLOCK;
2592
2593 return( err );
2594}
2595
2596IOMemoryMap * IOSubMemoryDescriptor::makeMapping(
2597 IOMemoryDescriptor * owner,
2598 task_t intoTask,
2599 IOVirtualAddress toAddress,
2600 IOOptionBits options,
2601 IOByteCount offset,
2602 IOByteCount length )
2603{
2604 IOMemoryMap * mapping;
2605
2606 mapping = (IOMemoryMap *) _parent->makeMapping(
2607 _parent, intoTask,
2608 toAddress - (_start + offset),
2609 options | kIOMapReference,
2610 _start + offset, length );
2611
0b4e3aa0
A
2612 if( !mapping)
2613 mapping = (IOMemoryMap *) _parent->makeMapping(
2614 _parent, intoTask,
2615 toAddress,
2616 options, _start + offset, length );
2617
1c79356b
A
2618 if( !mapping)
2619 mapping = super::makeMapping( owner, intoTask, toAddress, options,
2620 offset, length );
2621
2622 return( mapping );
2623}
2624
2625/* ick */
2626
2627bool
2628IOSubMemoryDescriptor::initWithAddress(void * address,
55e303ae
A
2629 IOByteCount length,
2630 IODirection direction)
1c79356b
A
2631{
2632 return( false );
2633}
2634
2635bool
2636IOSubMemoryDescriptor::initWithAddress(vm_address_t address,
55e303ae
A
2637 IOByteCount length,
2638 IODirection direction,
2639 task_t task)
1c79356b
A
2640{
2641 return( false );
2642}
2643
2644bool
2645IOSubMemoryDescriptor::initWithPhysicalAddress(
2646 IOPhysicalAddress address,
55e303ae
A
2647 IOByteCount length,
2648 IODirection direction )
1c79356b
A
2649{
2650 return( false );
2651}
2652
2653bool
2654IOSubMemoryDescriptor::initWithRanges(
2655 IOVirtualRange * ranges,
2656 UInt32 withCount,
55e303ae
A
2657 IODirection direction,
2658 task_t task,
2659 bool asReference)
1c79356b
A
2660{
2661 return( false );
2662}
2663
2664bool
2665IOSubMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
2666 UInt32 withCount,
55e303ae
A
2667 IODirection direction,
2668 bool asReference)
1c79356b
A
2669{
2670 return( false );
2671}
2672
2673/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2674
9bccf70c
A
2675bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
2676{
2677 OSSymbol const *keys[2];
2678 OSObject *values[2];
9bccf70c
A
2679 IOVirtualRange *vcopy;
2680 unsigned int index, nRanges;
2681 bool result;
2682
2683 if (s == NULL) return false;
2684 if (s->previouslySerialized(this)) return true;
2685
2686 // Pretend we are an array.
2687 if (!s->addXMLStartTag(this, "array")) return false;
2688
2689 nRanges = _rangesCount;
2690 vcopy = (IOVirtualRange *) IOMalloc(sizeof(IOVirtualRange) * nRanges);
2691 if (vcopy == 0) return false;
2692
2693 keys[0] = OSSymbol::withCString("address");
2694 keys[1] = OSSymbol::withCString("length");
2695
2696 result = false;
2697 values[0] = values[1] = 0;
2698
2699 // From this point on we can go to bail.
2700
2701 // Copy the volatile data so we don't have to allocate memory
2702 // while the lock is held.
2703 LOCK;
2704 if (nRanges == _rangesCount) {
2705 for (index = 0; index < nRanges; index++) {
2706 vcopy[index] = _ranges.v[index];
2707 }
2708 } else {
2709 // The descriptor changed out from under us. Give up.
2710 UNLOCK;
2711 result = false;
2712 goto bail;
2713 }
2714 UNLOCK;
2715
2716 for (index = 0; index < nRanges; index++)
2717 {
2718 values[0] = OSNumber::withNumber(_ranges.v[index].address, sizeof(_ranges.v[index].address) * 8);
2719 if (values[0] == 0) {
2720 result = false;
2721 goto bail;
2722 }
2723 values[1] = OSNumber::withNumber(_ranges.v[index].length, sizeof(_ranges.v[index].length) * 8);
2724 if (values[1] == 0) {
2725 result = false;
2726 goto bail;
2727 }
2728 OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
2729 if (dict == 0) {
2730 result = false;
2731 goto bail;
2732 }
2733 values[0]->release();
2734 values[1]->release();
2735 values[0] = values[1] = 0;
2736
2737 result = dict->serialize(s);
2738 dict->release();
2739 if (!result) {
2740 goto bail;
2741 }
2742 }
2743 result = s->addXMLEndTag("array");
2744
2745 bail:
2746 if (values[0])
2747 values[0]->release();
2748 if (values[1])
2749 values[1]->release();
2750 if (keys[0])
2751 keys[0]->release();
2752 if (keys[1])
2753 keys[1]->release();
2754 if (vcopy)
2755 IOFree(vcopy, sizeof(IOVirtualRange) * nRanges);
2756 return result;
2757}
2758
2759bool IOSubMemoryDescriptor::serialize(OSSerialize * s) const
2760{
2761 if (!s) {
2762 return (false);
2763 }
2764 if (s->previouslySerialized(this)) return true;
2765
2766 // Pretend we are a dictionary.
2767 // We must duplicate the functionality of OSDictionary here
2768 // because otherwise object references will not work;
2769 // they are based on the value of the object passed to
2770 // previouslySerialized and addXMLStartTag.
2771
2772 if (!s->addXMLStartTag(this, "dict")) return false;
2773
2774 char const *keys[3] = {"offset", "length", "parent"};
2775
2776 OSObject *values[3];
2777 values[0] = OSNumber::withNumber(_start, sizeof(_start) * 8);
2778 if (values[0] == 0)
2779 return false;
2780 values[1] = OSNumber::withNumber(_length, sizeof(_length) * 8);
2781 if (values[1] == 0) {
2782 values[0]->release();
2783 return false;
2784 }
2785 values[2] = _parent;
2786
2787 bool result = true;
2788 for (int i=0; i<3; i++) {
2789 if (!s->addString("<key>") ||
2790 !s->addString(keys[i]) ||
2791 !s->addXMLEndTag("key") ||
2792 !values[i]->serialize(s)) {
2793 result = false;
2794 break;
2795 }
2796 }
2797 values[0]->release();
2798 values[1]->release();
2799 if (!result) {
2800 return false;
2801 }
2802
2803 return s->addXMLEndTag("dict");
2804}
2805
2806/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2807
0b4e3aa0 2808OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
55e303ae
A
2809OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
2810OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
1c79356b
A
2811OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
2812OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
2813OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
2814OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
2815OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
2816OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
2817OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
2818OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
2819OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
2820OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
2821OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
2822OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
2823OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
9bccf70c 2824
55e303ae 2825/* ex-inline function implementation */
9bccf70c
A
2826IOPhysicalAddress IOMemoryDescriptor::getPhysicalAddress()
2827 { return( getPhysicalSegment( 0, 0 )); }