]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOMemoryDescriptor.cpp
xnu-792.12.6.tar.gz
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
32 *
33 * HISTORY
34 *
35 */
36 // 45678901234567890123456789012345678901234567890123456789012345678901234567890
37 #include <sys/cdefs.h>
38
39 #include <IOKit/assert.h>
40 #include <IOKit/system.h>
41 #include <IOKit/IOLib.h>
42 #include <IOKit/IOMemoryDescriptor.h>
43 #include <IOKit/IOMapper.h>
44 #include <IOKit/IOKitKeysPrivate.h>
45
46 #include <IOKit/IOKitDebug.h>
47
48 #include "IOKitKernelInternal.h"
49
50 #include <libkern/c++/OSContainers.h>
51 #include <libkern/c++/OSDictionary.h>
52 #include <libkern/c++/OSArray.h>
53 #include <libkern/c++/OSSymbol.h>
54 #include <libkern/c++/OSNumber.h>
55
56 #include <sys/uio.h>
57
58 __BEGIN_DECLS
59 #include <vm/pmap.h>
60 #include <vm/vm_pageout.h>
61 #include <vm/vm_shared_memory_server.h>
62 #include <mach/memory_object_types.h>
63 #include <device/device_port.h>
64
65 #ifndef i386
66 #include <mach/vm_prot.h>
67 #include <vm/vm_fault.h>
68 struct phys_entry *pmap_find_physentry(ppnum_t pa);
69 #endif
70
71 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
72 void ipc_port_release_send(ipc_port_t port);
73
74 /* Copy between a physical page and a virtual address in the given vm_map */
75 kern_return_t copypv(addr64_t source, addr64_t sink, unsigned int size, int which);
76
77 memory_object_t
78 device_pager_setup(
79 memory_object_t pager,
80 int device_handle,
81 vm_size_t size,
82 int flags);
83 void
84 device_pager_deallocate(
85 memory_object_t);
86 kern_return_t
87 device_pager_populate_object(
88 memory_object_t pager,
89 vm_object_offset_t offset,
90 ppnum_t phys_addr,
91 vm_size_t size);
92 kern_return_t
93 memory_object_iopl_request(
94 ipc_port_t port,
95 memory_object_offset_t offset,
96 vm_size_t *upl_size,
97 upl_t *upl_ptr,
98 upl_page_info_array_t user_page_list,
99 unsigned int *page_list_count,
100 int *flags);
101
102 unsigned int IOTranslateCacheBits(struct phys_entry *pp);
103
104 __END_DECLS
105
106 #define kIOMaximumMappedIOByteCount (512*1024*1024)
107
108 static IOMapper * gIOSystemMapper;
109 static ppnum_t gIOMaximumMappedIOPageCount = atop_32(kIOMaximumMappedIOByteCount);
110
111 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
112
113 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
114
115 #define super IOMemoryDescriptor
116
117 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
118
119 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
120
121 static IORecursiveLock * gIOMemoryLock;
122
123 #define LOCK IORecursiveLockLock( gIOMemoryLock)
124 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
125 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
126 #define WAKEUP \
127 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
128
129 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
130
131 class _IOMemoryMap : public IOMemoryMap
132 {
133 OSDeclareDefaultStructors(_IOMemoryMap)
134 public:
135 IOMemoryDescriptor * memory;
136 IOMemoryMap * superMap;
137 IOByteCount offset;
138 IOByteCount length;
139 IOVirtualAddress logical;
140 task_t addressTask;
141 vm_map_t addressMap;
142 IOOptionBits options;
143 upl_t redirUPL;
144 ipc_port_t redirEntry;
145 IOMemoryDescriptor * owner;
146
147 protected:
148 virtual void taggedRelease(const void *tag = 0) const;
149 virtual void free();
150
151 public:
152
153 // IOMemoryMap methods
154 virtual IOVirtualAddress getVirtualAddress();
155 virtual IOByteCount getLength();
156 virtual task_t getAddressTask();
157 virtual IOMemoryDescriptor * getMemoryDescriptor();
158 virtual IOOptionBits getMapOptions();
159
160 virtual IOReturn unmap();
161 virtual void taskDied();
162
163 virtual IOReturn redirect(IOMemoryDescriptor * newBackingMemory,
164 IOOptionBits options,
165 IOByteCount offset = 0);
166
167 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
168 IOByteCount * length);
169
170 // for IOMemoryDescriptor use
171 _IOMemoryMap * copyCompatible(
172 IOMemoryDescriptor * owner,
173 task_t intoTask,
174 IOVirtualAddress toAddress,
175 IOOptionBits options,
176 IOByteCount offset,
177 IOByteCount length );
178
179 bool initCompatible(
180 IOMemoryDescriptor * memory,
181 IOMemoryMap * superMap,
182 IOByteCount offset,
183 IOByteCount length );
184
185 bool initWithDescriptor(
186 IOMemoryDescriptor * memory,
187 task_t intoTask,
188 IOVirtualAddress toAddress,
189 IOOptionBits options,
190 IOByteCount offset,
191 IOByteCount length );
192
193 IOReturn redirect(
194 task_t intoTask, bool redirect );
195 };
196
197 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
198
199 // Some data structures and accessor macros used by the initWithOptions
200 // Function
201
202 enum ioPLBlockFlags {
203 kIOPLOnDevice = 0x00000001,
204 kIOPLExternUPL = 0x00000002,
205 };
206
207 struct typePersMDData
208 {
209 const IOGeneralMemoryDescriptor *fMD;
210 ipc_port_t fMemEntry;
211 };
212
213 struct ioPLBlock {
214 upl_t fIOPL;
215 vm_address_t fIOMDOffset; // The offset of this iopl in descriptor
216 vm_offset_t fPageInfo; // Pointer to page list or index into it
217 ppnum_t fMappedBase; // Page number of first page in this iopl
218 unsigned int fPageOffset; // Offset within first page of iopl
219 unsigned int fFlags; // Flags
220 };
221
222 struct ioGMDData {
223 IOMapper *fMapper;
224 unsigned int fPageCnt;
225 upl_page_info_t fPageList[];
226 ioPLBlock fBlocks[];
227 };
228
229 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
230 #define getIOPLList(d) ((ioPLBlock *) &(d->fPageList[d->fPageCnt]))
231 #define getNumIOPL(osd, d) \
232 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
233 #define getPageList(d) (&(d->fPageList[0]))
234 #define computeDataSize(p, u) \
235 (sizeof(ioGMDData) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
236
237
238 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
239
240 #define next_page(a) ( trunc_page_32(a) + PAGE_SIZE )
241
242
243 extern "C" {
244
245 kern_return_t device_data_action(
246 int device_handle,
247 ipc_port_t device_pager,
248 vm_prot_t protection,
249 vm_object_offset_t offset,
250 vm_size_t size)
251 {
252 struct ExpansionData {
253 void * devicePager;
254 unsigned int pagerContig:1;
255 unsigned int unused:31;
256 IOMemoryDescriptor * memory;
257 };
258 kern_return_t kr;
259 ExpansionData * ref = (ExpansionData *) device_handle;
260 IOMemoryDescriptor * memDesc;
261
262 LOCK;
263 memDesc = ref->memory;
264 if( memDesc)
265 {
266 memDesc->retain();
267 kr = memDesc->handleFault( device_pager, 0, 0,
268 offset, size, kIOMapDefaultCache /*?*/);
269 memDesc->release();
270 }
271 else
272 kr = KERN_ABORTED;
273 UNLOCK;
274
275 return( kr );
276 }
277
278 kern_return_t device_close(
279 int device_handle)
280 {
281 struct ExpansionData {
282 void * devicePager;
283 unsigned int pagerContig:1;
284 unsigned int unused:31;
285 IOMemoryDescriptor * memory;
286 };
287 ExpansionData * ref = (ExpansionData *) device_handle;
288
289 IODelete( ref, ExpansionData, 1 );
290
291 return( kIOReturnSuccess );
292 }
293 }; // end extern "C"
294
295 // Note this inline function uses C++ reference arguments to return values
296 // This means that pointers are not passed and NULLs don't have to be
297 // checked for as a NULL reference is illegal.
298 static inline void
299 getAddrLenForInd(user_addr_t &addr, IOPhysicalLength &len, // Output variables
300 UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
301 {
302 assert(kIOMemoryTypePhysical == type || kIOMemoryTypeUIO == type
303 || kIOMemoryTypeVirtual == type);
304 if (kIOMemoryTypeUIO == type) {
305 user_size_t us;
306 uio_getiov((uio_t) r.uio, ind, &addr, &us); len = us;
307 }
308 else {
309 IOVirtualRange cur = r.v[ind];
310 addr = cur.address;
311 len = cur.length;
312 }
313 }
314
315 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
316
317 /*
318 * withAddress:
319 *
320 * Create a new IOMemoryDescriptor. The buffer is a virtual address
321 * relative to the specified task. If no task is supplied, the kernel
322 * task is implied.
323 */
324 IOMemoryDescriptor *
325 IOMemoryDescriptor::withAddress(void * address,
326 IOByteCount length,
327 IODirection direction)
328 {
329 return IOMemoryDescriptor::
330 withAddress((vm_address_t) address, length, direction, kernel_task);
331 }
332
333 IOMemoryDescriptor *
334 IOMemoryDescriptor::withAddress(vm_address_t address,
335 IOByteCount length,
336 IODirection direction,
337 task_t task)
338 {
339 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
340 if (that)
341 {
342 if (that->initWithAddress(address, length, direction, task))
343 return that;
344
345 that->release();
346 }
347 return 0;
348 }
349
350 IOMemoryDescriptor *
351 IOMemoryDescriptor::withPhysicalAddress(
352 IOPhysicalAddress address,
353 IOByteCount length,
354 IODirection direction )
355 {
356 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
357 if (self
358 && !self->initWithPhysicalAddress(address, length, direction)) {
359 self->release();
360 return 0;
361 }
362
363 return self;
364 }
365
366 IOMemoryDescriptor *
367 IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
368 UInt32 withCount,
369 IODirection direction,
370 task_t task,
371 bool asReference)
372 {
373 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
374 if (that)
375 {
376 if (that->initWithRanges(ranges, withCount, direction, task, asReference))
377 return that;
378
379 that->release();
380 }
381 return 0;
382 }
383
384
385 /*
386 * withRanges:
387 *
388 * Create a new IOMemoryDescriptor. The buffer is made up of several
389 * virtual address ranges, from a given task.
390 *
391 * Passing the ranges as a reference will avoid an extra allocation.
392 */
393 IOMemoryDescriptor *
394 IOMemoryDescriptor::withOptions(void * buffers,
395 UInt32 count,
396 UInt32 offset,
397 task_t task,
398 IOOptionBits opts,
399 IOMapper * mapper)
400 {
401 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
402
403 if (self
404 && !self->initWithOptions(buffers, count, offset, task, opts, mapper))
405 {
406 self->release();
407 return 0;
408 }
409
410 return self;
411 }
412
413 // Can't leave abstract but this should never be used directly,
414 bool IOMemoryDescriptor::initWithOptions(void * buffers,
415 UInt32 count,
416 UInt32 offset,
417 task_t task,
418 IOOptionBits options,
419 IOMapper * mapper)
420 {
421 // @@@ gvdl: Should I panic?
422 panic("IOMD::initWithOptions called\n");
423 return 0;
424 }
425
426 IOMemoryDescriptor *
427 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
428 UInt32 withCount,
429 IODirection direction,
430 bool asReference)
431 {
432 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
433 if (that)
434 {
435 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference))
436 return that;
437
438 that->release();
439 }
440 return 0;
441 }
442
443 IOMemoryDescriptor *
444 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
445 IOByteCount offset,
446 IOByteCount length,
447 IODirection direction)
448 {
449 IOSubMemoryDescriptor *self = new IOSubMemoryDescriptor;
450
451 if (self && !self->initSubRange(of, offset, length, direction)) {
452 self->release();
453 self = 0;
454 }
455 return self;
456 }
457
458 IOMemoryDescriptor * IOMemoryDescriptor::
459 withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
460 {
461 IOGeneralMemoryDescriptor *origGenMD =
462 OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
463
464 if (origGenMD)
465 return IOGeneralMemoryDescriptor::
466 withPersistentMemoryDescriptor(origGenMD);
467 else
468 return 0;
469 }
470
471 IOMemoryDescriptor * IOGeneralMemoryDescriptor::
472 withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
473 {
474 ipc_port_t sharedMem = (ipc_port_t) originalMD->createNamedEntry();
475
476 if (!sharedMem)
477 return 0;
478
479 if (sharedMem == originalMD->_memEntry) {
480 originalMD->retain(); // Add a new reference to ourselves
481 ipc_port_release_send(sharedMem); // Remove extra send right
482 return originalMD;
483 }
484
485 IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor;
486 typePersMDData initData = { originalMD, sharedMem };
487
488 if (self
489 && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) {
490 self->release();
491 self = 0;
492 }
493 return self;
494 }
495
496 void *IOGeneralMemoryDescriptor::createNamedEntry()
497 {
498 kern_return_t error;
499 ipc_port_t sharedMem;
500
501 IOOptionBits type = _flags & kIOMemoryTypeMask;
502
503 user_addr_t range0Addr;
504 IOByteCount range0Len;
505 getAddrLenForInd(range0Addr, range0Len, type, _ranges, 0);
506 range0Addr = trunc_page_64(range0Addr);
507
508 vm_size_t size = ptoa_32(_pages);
509 vm_address_t kernelPage = (vm_address_t) range0Addr;
510
511 vm_map_t theMap = ((_task == kernel_task)
512 && (kIOMemoryBufferPageable & _flags))
513 ? IOPageableMapForAddress(kernelPage)
514 : get_task_map(_task);
515
516 memory_object_size_t actualSize = size;
517 vm_prot_t prot = VM_PROT_READ | VM_PROT_WRITE;
518 if (_memEntry)
519 prot |= MAP_MEM_NAMED_REUSE;
520
521 error = mach_make_memory_entry_64(theMap,
522 &actualSize, range0Addr, prot, &sharedMem, (ipc_port_t) _memEntry);
523
524 if (KERN_SUCCESS == error) {
525 if (actualSize == size) {
526 return sharedMem;
527 } else {
528 #if IOASSERT
529 IOLog("IOGMD::mach_make_memory_entry_64 (%08llx) size (%08lx:%08x)\n",
530 (UInt64)range0Addr, (UInt32)actualSize, size);
531 #endif
532 ipc_port_release_send( sharedMem );
533 }
534 }
535
536 return MACH_PORT_NULL;
537 }
538
539 /*
540 * initWithAddress:
541 *
542 * Initialize an IOMemoryDescriptor. The buffer is a virtual address
543 * relative to the specified task. If no task is supplied, the kernel
544 * task is implied.
545 *
546 * An IOMemoryDescriptor can be re-used by calling initWithAddress or
547 * initWithRanges again on an existing instance -- note this behavior
548 * is not commonly supported in other I/O Kit classes, although it is
549 * supported here.
550 */
551 bool
552 IOGeneralMemoryDescriptor::initWithAddress(void * address,
553 IOByteCount withLength,
554 IODirection withDirection)
555 {
556 _singleRange.v.address = (vm_address_t) address;
557 _singleRange.v.length = withLength;
558
559 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
560 }
561
562 bool
563 IOGeneralMemoryDescriptor::initWithAddress(vm_address_t address,
564 IOByteCount withLength,
565 IODirection withDirection,
566 task_t withTask)
567 {
568 _singleRange.v.address = address;
569 _singleRange.v.length = withLength;
570
571 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
572 }
573
574 bool
575 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
576 IOPhysicalAddress address,
577 IOByteCount withLength,
578 IODirection withDirection )
579 {
580 _singleRange.p.address = address;
581 _singleRange.p.length = withLength;
582
583 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
584 }
585
586 bool
587 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
588 IOPhysicalRange * ranges,
589 UInt32 count,
590 IODirection direction,
591 bool reference)
592 {
593 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
594
595 if (reference)
596 mdOpts |= kIOMemoryAsReference;
597
598 return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0);
599 }
600
601 bool
602 IOGeneralMemoryDescriptor::initWithRanges(
603 IOVirtualRange * ranges,
604 UInt32 count,
605 IODirection direction,
606 task_t task,
607 bool reference)
608 {
609 IOOptionBits mdOpts = direction;
610
611 if (reference)
612 mdOpts |= kIOMemoryAsReference;
613
614 if (task) {
615 mdOpts |= kIOMemoryTypeVirtual;
616
617 // Auto-prepare if this is a kernel memory descriptor as very few
618 // clients bother to prepare() kernel memory.
619 // But it was not enforced so what are you going to do?
620 if (task == kernel_task)
621 mdOpts |= kIOMemoryAutoPrepare;
622 }
623 else
624 mdOpts |= kIOMemoryTypePhysical;
625
626 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0);
627 }
628
629 /*
630 * initWithOptions:
631 *
632 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
633 * from a given task, several physical ranges, an UPL from the ubc
634 * system or a uio (may be 64bit) from the BSD subsystem.
635 *
636 * Passing the ranges as a reference will avoid an extra allocation.
637 *
638 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
639 * existing instance -- note this behavior is not commonly supported in other
640 * I/O Kit classes, although it is supported here.
641 */
642
643 bool
644 IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
645 UInt32 count,
646 UInt32 offset,
647 task_t task,
648 IOOptionBits options,
649 IOMapper * mapper)
650 {
651 IOOptionBits type = options & kIOMemoryTypeMask;
652
653 // Grab the original MD's configuation data to initialse the
654 // arguments to this function.
655 if (kIOMemoryTypePersistentMD == type) {
656
657 typePersMDData *initData = (typePersMDData *) buffers;
658 const IOGeneralMemoryDescriptor *orig = initData->fMD;
659 ioGMDData *dataP = getDataP(orig->_memoryEntries);
660
661 // Only accept persistent memory descriptors with valid dataP data.
662 assert(orig->_rangesCount == 1);
663 if ( !(orig->_flags & kIOMemoryPersistent) || !dataP)
664 return false;
665
666 _memEntry = initData->fMemEntry; // Grab the new named entry
667 options = orig->_flags | kIOMemoryAsReference;
668 _singleRange = orig->_singleRange; // Initialise our range
669 buffers = &_singleRange;
670 count = 1;
671
672 // Now grab the original task and whatever mapper was previously used
673 task = orig->_task;
674 mapper = dataP->fMapper;
675
676 // We are ready to go through the original initialisation now
677 }
678
679 switch (type) {
680 case kIOMemoryTypeUIO:
681 case kIOMemoryTypeVirtual:
682 assert(task);
683 if (!task)
684 return false;
685 else
686 break;
687
688 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
689 mapper = kIOMapperNone;
690
691 case kIOMemoryTypeUPL:
692 assert(!task);
693 break;
694 default:
695 return false; /* bad argument */
696 }
697
698 assert(buffers);
699 assert(count);
700
701 /*
702 * We can check the _initialized instance variable before having ever set
703 * it to an initial value because I/O Kit guarantees that all our instance
704 * variables are zeroed on an object's allocation.
705 */
706
707 if (_initialized) {
708 /*
709 * An existing memory descriptor is being retargeted to point to
710 * somewhere else. Clean up our present state.
711 */
712
713 while (_wireCount)
714 complete();
715 if (_kernPtrAligned)
716 unmapFromKernel();
717 if (_ranges.v && _rangesIsAllocated)
718 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
719 if (_memEntry)
720 { ipc_port_release_send((ipc_port_t) _memEntry); _memEntry = 0; }
721 }
722 else {
723 if (!super::init())
724 return false;
725 _initialized = true;
726 }
727
728 // Grab the appropriate mapper
729 if (mapper == kIOMapperNone)
730 mapper = 0; // No Mapper
731 else if (!mapper) {
732 IOMapper::checkForSystemMapper();
733 gIOSystemMapper = mapper = IOMapper::gSystem;
734 }
735
736 // Remove the dynamic internal use flags from the initial setting
737 options &= ~(kIOMemoryPreparedReadOnly);
738 _flags = options;
739 _task = task;
740
741 // DEPRECATED variable initialisation
742 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
743 _position = 0;
744 _kernPtrAligned = 0;
745 _cachedPhysicalAddress = 0;
746 _cachedVirtualAddress = 0;
747
748 if (kIOMemoryTypeUPL == type) {
749
750 ioGMDData *dataP;
751 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
752
753 if (!_memoryEntries) {
754 _memoryEntries = OSData::withCapacity(dataSize);
755 if (!_memoryEntries)
756 return false;
757 }
758 else if (!_memoryEntries->initWithCapacity(dataSize))
759 return false;
760
761 _memoryEntries->appendBytes(0, sizeof(ioGMDData));
762 dataP = getDataP(_memoryEntries);
763 dataP->fMapper = mapper;
764 dataP->fPageCnt = 0;
765
766 _wireCount++; // UPLs start out life wired
767
768 _length = count;
769 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
770
771 ioPLBlock iopl;
772 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST((upl_t) buffers);
773
774 iopl.fIOPL = (upl_t) buffers;
775 // Set the flag kIOPLOnDevice convieniently equal to 1
776 iopl.fFlags = pageList->device | kIOPLExternUPL;
777 iopl.fIOMDOffset = 0;
778 if (!pageList->device) {
779 // Pre-compute the offset into the UPL's page list
780 pageList = &pageList[atop_32(offset)];
781 offset &= PAGE_MASK;
782 if (mapper) {
783 iopl.fMappedBase = mapper->iovmAlloc(_pages);
784 mapper->iovmInsert(iopl.fMappedBase, 0, pageList, _pages);
785 }
786 else
787 iopl.fMappedBase = 0;
788 }
789 else
790 iopl.fMappedBase = 0;
791 iopl.fPageInfo = (vm_address_t) pageList;
792 iopl.fPageOffset = offset;
793
794 _memoryEntries->appendBytes(&iopl, sizeof(iopl));
795 }
796 else {
797 // kIOMemoryTypeVirtual | kIOMemoryTypeUIO | kIOMemoryTypePhysical
798
799 // Initialize the memory descriptor
800 if (options & kIOMemoryAsReference) {
801 _rangesIsAllocated = false;
802
803 // Hack assignment to get the buffer arg into _ranges.
804 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
805 // work, C++ sigh.
806 // This also initialises the uio & physical ranges.
807 _ranges.v = (IOVirtualRange *) buffers;
808 }
809 else {
810 assert(kIOMemoryTypeUIO != type);
811
812 _rangesIsAllocated = true;
813 _ranges.v = IONew(IOVirtualRange, count);
814 if (!_ranges.v)
815 return false;
816 bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
817 }
818
819 // Find starting address within the vector of ranges
820 Ranges vec = _ranges;
821 UInt32 length = 0;
822 UInt32 pages = 0;
823 for (unsigned ind = 0; ind < count; ind++) {
824 user_addr_t addr;
825 UInt32 len;
826
827 // addr & len are returned by this function
828 getAddrLenForInd(addr, len, type, vec, ind);
829 pages += (atop_64(addr + len + PAGE_MASK) - atop_64(addr));
830 len += length;
831 assert(len > length); // Check for 32 bit wrap around
832 length = len;
833 }
834 _length = length;
835 _pages = pages;
836 _rangesCount = count;
837
838 // Auto-prepare memory at creation time.
839 // Implied completion when descriptor is free-ed
840 if (kIOMemoryTypePhysical == type)
841 _wireCount++; // Physical MDs are, by definition, wired
842 else { /* kIOMemoryTypeVirtual | kIOMemoryTypeUIO */
843 ioGMDData *dataP;
844 unsigned dataSize = computeDataSize(_pages, /* upls */ count * 2);
845
846 if (!_memoryEntries) {
847 _memoryEntries = OSData::withCapacity(dataSize);
848 if (!_memoryEntries)
849 return false;
850 }
851 else if (!_memoryEntries->initWithCapacity(dataSize))
852 return false;
853
854 _memoryEntries->appendBytes(0, sizeof(ioGMDData));
855 dataP = getDataP(_memoryEntries);
856 dataP->fMapper = mapper;
857 dataP->fPageCnt = _pages;
858
859 if ( (kIOMemoryPersistent & _flags) && !_memEntry)
860 _memEntry = createNamedEntry();
861
862 if ((_flags & kIOMemoryAutoPrepare)
863 && prepare() != kIOReturnSuccess)
864 return false;
865 }
866 }
867
868 return true;
869 }
870
871 /*
872 * free
873 *
874 * Free resources.
875 */
876 void IOGeneralMemoryDescriptor::free()
877 {
878 LOCK;
879 if( reserved)
880 reserved->memory = 0;
881 UNLOCK;
882
883 while (_wireCount)
884 complete();
885 if (_memoryEntries)
886 _memoryEntries->release();
887
888 if (_kernPtrAligned)
889 unmapFromKernel();
890 if (_ranges.v && _rangesIsAllocated)
891 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
892
893 if (reserved && reserved->devicePager)
894 device_pager_deallocate( (memory_object_t) reserved->devicePager );
895
896 // memEntry holds a ref on the device pager which owns reserved
897 // (ExpansionData) so no reserved access after this point
898 if (_memEntry)
899 ipc_port_release_send( (ipc_port_t) _memEntry );
900
901 super::free();
902 }
903
904 /* DEPRECATED */ void IOGeneralMemoryDescriptor::unmapFromKernel()
905 /* DEPRECATED */ {
906 panic("IOGMD::unmapFromKernel deprecated");
907 /* DEPRECATED */ }
908 /* DEPRECATED */
909 /* DEPRECATED */ void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
910 /* DEPRECATED */ {
911 panic("IOGMD::mapIntoKernel deprecated");
912 /* DEPRECATED */ }
913
914 /*
915 * getDirection:
916 *
917 * Get the direction of the transfer.
918 */
919 IODirection IOMemoryDescriptor::getDirection() const
920 {
921 return _direction;
922 }
923
924 /*
925 * getLength:
926 *
927 * Get the length of the transfer (over all ranges).
928 */
929 IOByteCount IOMemoryDescriptor::getLength() const
930 {
931 return _length;
932 }
933
934 void IOMemoryDescriptor::setTag( IOOptionBits tag )
935 {
936 _tag = tag;
937 }
938
939 IOOptionBits IOMemoryDescriptor::getTag( void )
940 {
941 return( _tag);
942 }
943
944 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
945 IOPhysicalAddress IOMemoryDescriptor::getSourceSegment( IOByteCount offset,
946 IOByteCount * length )
947 {
948 IOPhysicalAddress physAddr = 0;
949
950 if( prepare() == kIOReturnSuccess) {
951 physAddr = getPhysicalSegment( offset, length );
952 complete();
953 }
954
955 return( physAddr );
956 }
957
958 IOByteCount IOMemoryDescriptor::readBytes
959 (IOByteCount offset, void *bytes, IOByteCount length)
960 {
961 addr64_t dstAddr = (addr64_t) (UInt32) bytes;
962 IOByteCount remaining;
963
964 // Assert that this entire I/O is withing the available range
965 assert(offset < _length);
966 assert(offset + length <= _length);
967 if (offset >= _length) {
968 IOLog("IOGMD(%p): rB = o%lx, l%lx\n", this, offset, length); // @@@ gvdl
969 return 0;
970 }
971
972 remaining = length = min(length, _length - offset);
973 while (remaining) { // (process another target segment?)
974 addr64_t srcAddr64;
975 IOByteCount srcLen;
976
977 srcAddr64 = getPhysicalSegment64(offset, &srcLen);
978 if (!srcAddr64)
979 break;
980
981 // Clip segment length to remaining
982 if (srcLen > remaining)
983 srcLen = remaining;
984
985 copypv(srcAddr64, dstAddr, srcLen,
986 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
987
988 dstAddr += srcLen;
989 offset += srcLen;
990 remaining -= srcLen;
991 }
992
993 assert(!remaining);
994
995 return length - remaining;
996 }
997
998 IOByteCount IOMemoryDescriptor::writeBytes
999 (IOByteCount offset, const void *bytes, IOByteCount length)
1000 {
1001 addr64_t srcAddr = (addr64_t) (UInt32) bytes;
1002 IOByteCount remaining;
1003
1004 // Assert that this entire I/O is withing the available range
1005 assert(offset < _length);
1006 assert(offset + length <= _length);
1007
1008 assert( !(kIOMemoryPreparedReadOnly & _flags) );
1009
1010 if ( (kIOMemoryPreparedReadOnly & _flags) || offset >= _length) {
1011 IOLog("IOGMD(%p): wB = o%lx, l%lx\n", this, offset, length); // @@@ gvdl
1012 return 0;
1013 }
1014
1015 remaining = length = min(length, _length - offset);
1016 while (remaining) { // (process another target segment?)
1017 addr64_t dstAddr64;
1018 IOByteCount dstLen;
1019
1020 dstAddr64 = getPhysicalSegment64(offset, &dstLen);
1021 if (!dstAddr64)
1022 break;
1023
1024 // Clip segment length to remaining
1025 if (dstLen > remaining)
1026 dstLen = remaining;
1027
1028 copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
1029 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
1030
1031 srcAddr += dstLen;
1032 offset += dstLen;
1033 remaining -= dstLen;
1034 }
1035
1036 assert(!remaining);
1037
1038 return length - remaining;
1039 }
1040
1041 // osfmk/device/iokit_rpc.c
1042 extern "C" unsigned int IODefaultCacheBits(addr64_t pa);
1043
1044 /* DEPRECATED */ void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
1045 /* DEPRECATED */ {
1046 panic("IOGMD::setPosition deprecated");
1047 /* DEPRECATED */ }
1048
1049 IOPhysicalAddress IOGeneralMemoryDescriptor::getPhysicalSegment
1050 (IOByteCount offset, IOByteCount *lengthOfSegment)
1051 {
1052 IOPhysicalAddress address = 0;
1053 IOPhysicalLength length = 0;
1054
1055 // assert(offset <= _length);
1056 if (offset < _length) // (within bounds?)
1057 {
1058 if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
1059 unsigned int ind;
1060
1061 // Physical address based memory descriptor
1062
1063 // Find offset within descriptor and make it relative
1064 // to the current _range.
1065 for (ind = 0 ; offset >= _ranges.p[ind].length; ind++ )
1066 offset -= _ranges.p[ind].length;
1067
1068 IOPhysicalRange cur = _ranges.p[ind];
1069 address = cur.address + offset;
1070 length = cur.length - offset;
1071
1072 // see how far we can coalesce ranges
1073 for (++ind; ind < _rangesCount; ind++) {
1074 cur = _ranges.p[ind];
1075
1076 if (address + length != cur.address)
1077 break;
1078
1079 length += cur.length;
1080 }
1081
1082 // @@@ gvdl: should be assert(address);
1083 // but can't as NVidia GeForce creates a bogus physical mem
1084 assert(address
1085 || /* nvidia */ (!_ranges.p[0].address && 1 == _rangesCount));
1086 assert(length);
1087 }
1088 else do {
1089 // We need wiring & we are wired.
1090 assert(_wireCount);
1091
1092 if (!_wireCount)
1093 {
1094 panic("IOGMD: not wired for getPhysicalSegment()");
1095 continue;
1096 }
1097
1098 assert(_memoryEntries);
1099
1100 ioGMDData * dataP = getDataP(_memoryEntries);
1101 const ioPLBlock *ioplList = getIOPLList(dataP);
1102 UInt ind, numIOPLs = getNumIOPL(_memoryEntries, dataP);
1103 upl_page_info_t *pageList = getPageList(dataP);
1104
1105 assert(numIOPLs > 0);
1106
1107 // Scan through iopl info blocks looking for block containing offset
1108 for (ind = 1; ind < numIOPLs; ind++) {
1109 if (offset < ioplList[ind].fIOMDOffset)
1110 break;
1111 }
1112
1113 // Go back to actual range as search goes past it
1114 ioPLBlock ioplInfo = ioplList[ind - 1];
1115
1116 if (ind < numIOPLs)
1117 length = ioplList[ind].fIOMDOffset;
1118 else
1119 length = _length;
1120 length -= offset; // Remainder within iopl
1121
1122 // Subtract offset till this iopl in total list
1123 offset -= ioplInfo.fIOMDOffset;
1124
1125 // This is a mapped IOPL so we just need to compute an offset
1126 // relative to the mapped base.
1127 if (ioplInfo.fMappedBase) {
1128 offset += (ioplInfo.fPageOffset & PAGE_MASK);
1129 address = ptoa_32(ioplInfo.fMappedBase) + offset;
1130 continue;
1131 }
1132
1133 // Currently the offset is rebased into the current iopl.
1134 // Now add the iopl 1st page offset.
1135 offset += ioplInfo.fPageOffset;
1136
1137 // For external UPLs the fPageInfo field points directly to
1138 // the upl's upl_page_info_t array.
1139 if (ioplInfo.fFlags & kIOPLExternUPL)
1140 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
1141 else
1142 pageList = &pageList[ioplInfo.fPageInfo];
1143
1144 // Check for direct device non-paged memory
1145 if ( ioplInfo.fFlags & kIOPLOnDevice ) {
1146 address = ptoa_32(pageList->phys_addr) + offset;
1147 continue;
1148 }
1149
1150 // Now we need compute the index into the pageList
1151 ind = atop_32(offset);
1152 offset &= PAGE_MASK;
1153
1154 IOPhysicalAddress pageAddr = pageList[ind].phys_addr;
1155 address = ptoa_32(pageAddr) + offset;
1156
1157 // Check for the remaining data in this upl being longer than the
1158 // remainder on the current page. This should be checked for
1159 // contiguous pages
1160 if (length > PAGE_SIZE - offset) {
1161 // See if the next page is contiguous. Stop looking when we hit
1162 // the end of this upl, which is indicated by the
1163 // contigLength >= length.
1164 IOByteCount contigLength = PAGE_SIZE - offset;
1165
1166 // Look for contiguous segment
1167 while (contigLength < length
1168 && ++pageAddr == pageList[++ind].phys_addr) {
1169 contigLength += PAGE_SIZE;
1170 }
1171 if (length > contigLength)
1172 length = contigLength;
1173 }
1174
1175 assert(address);
1176 assert(length);
1177
1178 } while (0);
1179
1180 if (!address)
1181 length = 0;
1182 }
1183
1184 if (lengthOfSegment)
1185 *lengthOfSegment = length;
1186
1187 return address;
1188 }
1189
1190 addr64_t IOMemoryDescriptor::getPhysicalSegment64
1191 (IOByteCount offset, IOByteCount *lengthOfSegment)
1192 {
1193 IOPhysicalAddress phys32;
1194 IOByteCount length;
1195 addr64_t phys64;
1196
1197 phys32 = getPhysicalSegment(offset, lengthOfSegment);
1198 if (!phys32)
1199 return 0;
1200
1201 if (gIOSystemMapper)
1202 {
1203 IOByteCount origLen;
1204
1205 phys64 = gIOSystemMapper->mapAddr(phys32);
1206 origLen = *lengthOfSegment;
1207 length = page_size - (phys64 & (page_size - 1));
1208 while ((length < origLen)
1209 && ((phys64 + length) == gIOSystemMapper->mapAddr(phys32 + length)))
1210 length += page_size;
1211 if (length > origLen)
1212 length = origLen;
1213
1214 *lengthOfSegment = length;
1215 }
1216 else
1217 phys64 = (addr64_t) phys32;
1218
1219 return phys64;
1220 }
1221
1222 IOPhysicalAddress IOGeneralMemoryDescriptor::
1223 getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1224 {
1225 IOPhysicalAddress address = 0;
1226 IOPhysicalLength length = 0;
1227 IOOptionBits type = _flags & kIOMemoryTypeMask;
1228
1229 assert(offset <= _length);
1230
1231 if ( type == kIOMemoryTypeUPL)
1232 return super::getSourceSegment( offset, lengthOfSegment );
1233 else if ( offset < _length ) // (within bounds?)
1234 {
1235 unsigned rangesIndex = 0;
1236 Ranges vec = _ranges;
1237 user_addr_t addr;
1238
1239 // Find starting address within the vector of ranges
1240 for (;;) {
1241 getAddrLenForInd(addr, length, type, vec, rangesIndex);
1242 if (offset < length)
1243 break;
1244 offset -= length; // (make offset relative)
1245 rangesIndex++;
1246 }
1247
1248 // Now that we have the starting range,
1249 // lets find the last contiguous range
1250 addr += offset;
1251 length -= offset;
1252
1253 for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) {
1254 user_addr_t newAddr;
1255 IOPhysicalLength newLen;
1256
1257 getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
1258 if (addr + length != newAddr)
1259 break;
1260 length += newLen;
1261 }
1262 if (addr)
1263 address = (IOPhysicalAddress) addr; // Truncate address to 32bit
1264 else
1265 length = 0;
1266 }
1267
1268 if ( lengthOfSegment ) *lengthOfSegment = length;
1269
1270 return address;
1271 }
1272
1273 /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
1274 /* DEPRECATED */ void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
1275 /* DEPRECATED */ IOByteCount * lengthOfSegment)
1276 /* DEPRECATED */ {
1277 if (_task == kernel_task)
1278 return (void *) getSourceSegment(offset, lengthOfSegment);
1279 else
1280 panic("IOGMD::getVirtualSegment deprecated");
1281
1282 return 0;
1283 /* DEPRECATED */ }
1284 /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
1285
1286
1287
1288 IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
1289 IOOptionBits * oldState )
1290 {
1291 IOReturn err = kIOReturnSuccess;
1292 vm_purgable_t control;
1293 int state;
1294
1295 do
1296 {
1297 if (!_memEntry)
1298 {
1299 err = kIOReturnNotReady;
1300 break;
1301 }
1302
1303 control = VM_PURGABLE_SET_STATE;
1304 switch (newState)
1305 {
1306 case kIOMemoryPurgeableKeepCurrent:
1307 control = VM_PURGABLE_GET_STATE;
1308 break;
1309
1310 case kIOMemoryPurgeableNonVolatile:
1311 state = VM_PURGABLE_NONVOLATILE;
1312 break;
1313 case kIOMemoryPurgeableVolatile:
1314 state = VM_PURGABLE_VOLATILE;
1315 break;
1316 case kIOMemoryPurgeableEmpty:
1317 state = VM_PURGABLE_EMPTY;
1318 break;
1319 default:
1320 err = kIOReturnBadArgument;
1321 break;
1322 }
1323
1324 if (kIOReturnSuccess != err)
1325 break;
1326
1327 err = mach_memory_entry_purgable_control((ipc_port_t) _memEntry, control, &state);
1328
1329 if (oldState)
1330 {
1331 if (kIOReturnSuccess == err)
1332 {
1333 switch (state)
1334 {
1335 case VM_PURGABLE_NONVOLATILE:
1336 state = kIOMemoryPurgeableNonVolatile;
1337 break;
1338 case VM_PURGABLE_VOLATILE:
1339 state = kIOMemoryPurgeableVolatile;
1340 break;
1341 case VM_PURGABLE_EMPTY:
1342 state = kIOMemoryPurgeableEmpty;
1343 break;
1344 default:
1345 state = kIOMemoryPurgeableNonVolatile;
1346 err = kIOReturnNotReady;
1347 break;
1348 }
1349 *oldState = state;
1350 }
1351 }
1352 }
1353 while (false);
1354
1355 return (err);
1356 }
1357
1358 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
1359 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
1360
1361 IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options,
1362 IOByteCount offset, IOByteCount length )
1363 {
1364 IOByteCount remaining;
1365 void (*func)(addr64_t pa, unsigned int count) = 0;
1366
1367 switch (options)
1368 {
1369 case kIOMemoryIncoherentIOFlush:
1370 func = &dcache_incoherent_io_flush64;
1371 break;
1372 case kIOMemoryIncoherentIOStore:
1373 func = &dcache_incoherent_io_store64;
1374 break;
1375 }
1376
1377 if (!func)
1378 return (kIOReturnUnsupported);
1379
1380 remaining = length = min(length, getLength() - offset);
1381 while (remaining)
1382 // (process another target segment?)
1383 {
1384 addr64_t dstAddr64;
1385 IOByteCount dstLen;
1386
1387 dstAddr64 = getPhysicalSegment64(offset, &dstLen);
1388 if (!dstAddr64)
1389 break;
1390
1391 // Clip segment length to remaining
1392 if (dstLen > remaining)
1393 dstLen = remaining;
1394
1395 (*func)(dstAddr64, dstLen);
1396
1397 offset += dstLen;
1398 remaining -= dstLen;
1399 }
1400
1401 return (remaining ? kIOReturnUnderrun : kIOReturnSuccess);
1402 }
1403
1404 #ifdef __ppc__
1405 extern vm_offset_t static_memory_end;
1406 #define io_kernel_static_end static_memory_end
1407 #else
1408 extern vm_offset_t first_avail;
1409 #define io_kernel_static_end first_avail
1410 #endif
1411
1412 static kern_return_t
1413 io_get_kernel_static_upl(
1414 vm_map_t /* map */,
1415 vm_address_t offset,
1416 vm_size_t *upl_size,
1417 upl_t *upl,
1418 upl_page_info_array_t page_list,
1419 unsigned int *count)
1420 {
1421 unsigned int pageCount, page;
1422 ppnum_t phys;
1423
1424 pageCount = atop_32(*upl_size);
1425 if (pageCount > *count)
1426 pageCount = *count;
1427
1428 *upl = NULL;
1429
1430 for (page = 0; page < pageCount; page++)
1431 {
1432 phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
1433 if (!phys)
1434 break;
1435 page_list[page].phys_addr = phys;
1436 page_list[page].pageout = 0;
1437 page_list[page].absent = 0;
1438 page_list[page].dirty = 0;
1439 page_list[page].precious = 0;
1440 page_list[page].device = 0;
1441 }
1442
1443 return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError);
1444 }
1445
1446 IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
1447 {
1448 IOOptionBits type = _flags & kIOMemoryTypeMask;
1449 IOReturn error = kIOReturnNoMemory;
1450 ioGMDData *dataP;
1451 ppnum_t mapBase = 0;
1452 IOMapper *mapper;
1453 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
1454
1455 assert(!_wireCount);
1456 assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeUIO == type);
1457
1458 if (_pages >= gIOMaximumMappedIOPageCount)
1459 return kIOReturnNoResources;
1460
1461 dataP = getDataP(_memoryEntries);
1462 mapper = dataP->fMapper;
1463 if (mapper && _pages)
1464 mapBase = mapper->iovmAlloc(_pages);
1465
1466 // Note that appendBytes(NULL) zeros the data up to the
1467 // desired length.
1468 _memoryEntries->appendBytes(0, dataP->fPageCnt * sizeof(upl_page_info_t));
1469 dataP = 0; // May no longer be valid so lets not get tempted.
1470
1471 if (forDirection == kIODirectionNone)
1472 forDirection = _direction;
1473
1474 int uplFlags; // This Mem Desc's default flags for upl creation
1475 switch (forDirection)
1476 {
1477 case kIODirectionOut:
1478 // Pages do not need to be marked as dirty on commit
1479 uplFlags = UPL_COPYOUT_FROM;
1480 _flags |= kIOMemoryPreparedReadOnly;
1481 break;
1482
1483 case kIODirectionIn:
1484 default:
1485 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
1486 break;
1487 }
1488 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
1489
1490 // Find the appropriate vm_map for the given task
1491 vm_map_t curMap;
1492 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
1493 curMap = 0;
1494 else
1495 { curMap = get_task_map(_task); }
1496
1497 // Iterate over the vector of virtual ranges
1498 Ranges vec = _ranges;
1499 unsigned int pageIndex = 0;
1500 IOByteCount mdOffset = 0;
1501 for (UInt range = 0; range < _rangesCount; range++) {
1502 ioPLBlock iopl;
1503 user_addr_t startPage;
1504 IOByteCount numBytes;
1505
1506 // Get the startPage address and length of vec[range]
1507 getAddrLenForInd(startPage, numBytes, type, vec, range);
1508 iopl.fPageOffset = (short) startPage & PAGE_MASK;
1509 numBytes += iopl.fPageOffset;
1510 startPage = trunc_page_64(startPage);
1511
1512 if (mapper)
1513 iopl.fMappedBase = mapBase + pageIndex;
1514 else
1515 iopl.fMappedBase = 0;
1516
1517 // Iterate over the current range, creating UPLs
1518 while (numBytes) {
1519 dataP = getDataP(_memoryEntries);
1520 vm_address_t kernelStart = (vm_address_t) startPage;
1521 vm_map_t theMap;
1522 if (curMap)
1523 theMap = curMap;
1524 else if (!sharedMem) {
1525 assert(_task == kernel_task);
1526 theMap = IOPageableMapForAddress(kernelStart);
1527 }
1528 else
1529 theMap = NULL;
1530
1531 upl_page_info_array_t pageInfo = getPageList(dataP);
1532 int ioplFlags = uplFlags;
1533 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
1534
1535 vm_size_t ioplSize = round_page_32(numBytes);
1536 unsigned int numPageInfo = atop_32(ioplSize);
1537
1538 if (theMap == kernel_map && kernelStart < io_kernel_static_end) {
1539 error = io_get_kernel_static_upl(theMap,
1540 kernelStart,
1541 &ioplSize,
1542 &iopl.fIOPL,
1543 baseInfo,
1544 &numPageInfo);
1545 }
1546 else if (sharedMem) {
1547 error = memory_object_iopl_request(sharedMem,
1548 ptoa_32(pageIndex),
1549 &ioplSize,
1550 &iopl.fIOPL,
1551 baseInfo,
1552 &numPageInfo,
1553 &ioplFlags);
1554 }
1555 else {
1556 assert(theMap);
1557 error = vm_map_create_upl(theMap,
1558 startPage,
1559 &ioplSize,
1560 &iopl.fIOPL,
1561 baseInfo,
1562 &numPageInfo,
1563 &ioplFlags);
1564 }
1565
1566 assert(ioplSize);
1567 if (error != KERN_SUCCESS)
1568 goto abortExit;
1569
1570 error = kIOReturnNoMemory;
1571
1572 if (baseInfo->device) {
1573 numPageInfo = 1;
1574 iopl.fFlags = kIOPLOnDevice;
1575 // Don't translate device memory at all
1576 if (mapper && mapBase) {
1577 mapper->iovmFree(mapBase, _pages);
1578 mapBase = 0;
1579 iopl.fMappedBase = 0;
1580 }
1581 }
1582 else {
1583 iopl.fFlags = 0;
1584 if (mapper)
1585 mapper->iovmInsert(mapBase, pageIndex,
1586 baseInfo, numPageInfo);
1587 }
1588
1589 iopl.fIOMDOffset = mdOffset;
1590 iopl.fPageInfo = pageIndex;
1591
1592 if ((_flags & kIOMemoryAutoPrepare) && iopl.fIOPL)
1593 {
1594 upl_commit(iopl.fIOPL, 0, 0);
1595 upl_deallocate(iopl.fIOPL);
1596 iopl.fIOPL = 0;
1597 }
1598
1599 if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
1600 // Clean up partial created and unsaved iopl
1601 if (iopl.fIOPL) {
1602 upl_abort(iopl.fIOPL, 0);
1603 upl_deallocate(iopl.fIOPL);
1604 }
1605 goto abortExit;
1606 }
1607
1608 // Check for a multiple iopl's in one virtual range
1609 pageIndex += numPageInfo;
1610 mdOffset -= iopl.fPageOffset;
1611 if (ioplSize < numBytes) {
1612 numBytes -= ioplSize;
1613 startPage += ioplSize;
1614 mdOffset += ioplSize;
1615 iopl.fPageOffset = 0;
1616 if (mapper)
1617 iopl.fMappedBase = mapBase + pageIndex;
1618 }
1619 else {
1620 mdOffset += numBytes;
1621 break;
1622 }
1623 }
1624 }
1625
1626 return kIOReturnSuccess;
1627
1628 abortExit:
1629 {
1630 dataP = getDataP(_memoryEntries);
1631 UInt done = getNumIOPL(_memoryEntries, dataP);
1632 ioPLBlock *ioplList = getIOPLList(dataP);
1633
1634 for (UInt range = 0; range < done; range++)
1635 {
1636 if (ioplList[range].fIOPL) {
1637 upl_abort(ioplList[range].fIOPL, 0);
1638 upl_deallocate(ioplList[range].fIOPL);
1639 }
1640 }
1641 (void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); // == setLength()
1642
1643 if (mapper && mapBase)
1644 mapper->iovmFree(mapBase, _pages);
1645 }
1646
1647 return error;
1648 }
1649
1650 /*
1651 * prepare
1652 *
1653 * Prepare the memory for an I/O transfer. This involves paging in
1654 * the memory, if necessary, and wiring it down for the duration of
1655 * the transfer. The complete() method completes the processing of
1656 * the memory after the I/O transfer finishes. This method needn't
1657 * called for non-pageable memory.
1658 */
1659 IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
1660 {
1661 IOReturn error = kIOReturnSuccess;
1662 IOOptionBits type = _flags & kIOMemoryTypeMask;
1663
1664 if (!_wireCount
1665 && (kIOMemoryTypeVirtual == type || kIOMemoryTypeUIO == type) ) {
1666 error = wireVirtual(forDirection);
1667 if (error)
1668 return error;
1669 }
1670
1671 _wireCount++;
1672
1673 return kIOReturnSuccess;
1674 }
1675
1676 /*
1677 * complete
1678 *
1679 * Complete processing of the memory after an I/O transfer finishes.
1680 * This method should not be called unless a prepare was previously
1681 * issued; the prepare() and complete() must occur in pairs, before
1682 * before and after an I/O transfer involving pageable memory.
1683 */
1684
1685 IOReturn IOGeneralMemoryDescriptor::complete(IODirection /* forDirection */)
1686 {
1687 assert(_wireCount);
1688
1689 if (!_wireCount)
1690 return kIOReturnSuccess;
1691
1692 _wireCount--;
1693 if (!_wireCount) {
1694 IOOptionBits type = _flags & kIOMemoryTypeMask;
1695
1696 if (kIOMemoryTypePhysical == type) {
1697 /* kIOMemoryTypePhysical */
1698 // DO NOTHING
1699 }
1700 else {
1701 ioGMDData * dataP = getDataP(_memoryEntries);
1702 ioPLBlock *ioplList = getIOPLList(dataP);
1703 UInt count = getNumIOPL(_memoryEntries, dataP);
1704
1705 if (dataP->fMapper && _pages && ioplList[0].fMappedBase)
1706 dataP->fMapper->iovmFree(ioplList[0].fMappedBase, _pages);
1707
1708 // Only complete iopls that we created which are for TypeVirtual
1709 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeUIO == type) {
1710 for (UInt ind = 0; ind < count; ind++)
1711 if (ioplList[ind].fIOPL) {
1712 upl_commit(ioplList[ind].fIOPL, 0, 0);
1713 upl_deallocate(ioplList[ind].fIOPL);
1714 }
1715 }
1716
1717 (void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); // == setLength()
1718 }
1719 }
1720 return kIOReturnSuccess;
1721 }
1722
1723 IOReturn IOGeneralMemoryDescriptor::doMap(
1724 vm_map_t addressMap,
1725 IOVirtualAddress * atAddress,
1726 IOOptionBits options,
1727 IOByteCount sourceOffset,
1728 IOByteCount length )
1729 {
1730 kern_return_t kr;
1731 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
1732
1733 IOOptionBits type = _flags & kIOMemoryTypeMask;
1734 Ranges vec = _ranges;
1735
1736 user_addr_t range0Addr = 0;
1737 IOByteCount range0Len = 0;
1738
1739 if (vec.v)
1740 getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
1741
1742 // mapping source == dest? (could be much better)
1743 if( _task
1744 && (addressMap == get_task_map(_task)) && (options & kIOMapAnywhere)
1745 && (1 == _rangesCount) && (0 == sourceOffset)
1746 && range0Addr && (length <= range0Len) ) {
1747 if (sizeof(user_addr_t) > 4 && ((UInt64) range0Addr) >> 32)
1748 return kIOReturnOverrun; // Doesn't fit in 32bit return field
1749 else {
1750 *atAddress = range0Addr;
1751 return( kIOReturnSuccess );
1752 }
1753 }
1754
1755 if( 0 == sharedMem) {
1756
1757 vm_size_t size = ptoa_32(_pages);
1758
1759 if( _task) {
1760 #ifndef i386
1761 memory_object_size_t actualSize = size;
1762 kr = mach_make_memory_entry_64(get_task_map(_task),
1763 &actualSize, range0Addr,
1764 VM_PROT_READ | VM_PROT_WRITE, &sharedMem,
1765 NULL );
1766
1767 if( (KERN_SUCCESS == kr) && (actualSize != round_page_32(size))) {
1768 #if IOASSERT
1769 IOLog("mach_make_memory_entry_64 (%08llx) size (%08lx:%08x)\n",
1770 range0Addr, (UInt32) actualSize, size);
1771 #endif
1772 kr = kIOReturnVMError;
1773 ipc_port_release_send( sharedMem );
1774 }
1775
1776 if( KERN_SUCCESS != kr)
1777 #endif /* !i386 */
1778 sharedMem = MACH_PORT_NULL;
1779
1780 } else do {
1781
1782 memory_object_t pager;
1783 unsigned int flags = 0;
1784 addr64_t pa;
1785 IOPhysicalLength segLen;
1786
1787 pa = getPhysicalSegment64( sourceOffset, &segLen );
1788
1789 if( !reserved) {
1790 reserved = IONew( ExpansionData, 1 );
1791 if( !reserved)
1792 continue;
1793 }
1794 reserved->pagerContig = (1 == _rangesCount);
1795 reserved->memory = this;
1796
1797 /*What cache mode do we need*/
1798 switch(options & kIOMapCacheMask ) {
1799
1800 case kIOMapDefaultCache:
1801 default:
1802 flags = IODefaultCacheBits(pa);
1803 break;
1804
1805 case kIOMapInhibitCache:
1806 flags = DEVICE_PAGER_CACHE_INHIB |
1807 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
1808 break;
1809
1810 case kIOMapWriteThruCache:
1811 flags = DEVICE_PAGER_WRITE_THROUGH |
1812 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
1813 break;
1814
1815 case kIOMapCopybackCache:
1816 flags = DEVICE_PAGER_COHERENT;
1817 break;
1818
1819 case kIOMapWriteCombineCache:
1820 flags = DEVICE_PAGER_CACHE_INHIB |
1821 DEVICE_PAGER_COHERENT;
1822 break;
1823 }
1824
1825 flags |= reserved->pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0;
1826
1827 pager = device_pager_setup( (memory_object_t) 0, (int) reserved,
1828 size, flags);
1829 assert( pager );
1830
1831 if( pager) {
1832 kr = mach_memory_object_memory_entry_64( (host_t) 1, false /*internal*/,
1833 size, VM_PROT_READ | VM_PROT_WRITE, pager, &sharedMem );
1834
1835 assert( KERN_SUCCESS == kr );
1836 if( KERN_SUCCESS != kr) {
1837 device_pager_deallocate( pager );
1838 pager = MACH_PORT_NULL;
1839 sharedMem = MACH_PORT_NULL;
1840 }
1841 }
1842 if( pager && sharedMem)
1843 reserved->devicePager = pager;
1844 else {
1845 IODelete( reserved, ExpansionData, 1 );
1846 reserved = 0;
1847 }
1848
1849 } while( false );
1850
1851 _memEntry = (void *) sharedMem;
1852 }
1853
1854
1855 #ifndef i386
1856 if( 0 == sharedMem)
1857 kr = kIOReturnVMError;
1858 else
1859 #endif
1860 kr = super::doMap( addressMap, atAddress,
1861 options, sourceOffset, length );
1862
1863 return( kr );
1864 }
1865
1866 IOReturn IOGeneralMemoryDescriptor::doUnmap(
1867 vm_map_t addressMap,
1868 IOVirtualAddress logical,
1869 IOByteCount length )
1870 {
1871 // could be much better
1872 if( _task && (addressMap == get_task_map(_task)) && (1 == _rangesCount)) {
1873
1874 IOOptionBits type = _flags & kIOMemoryTypeMask;
1875 user_addr_t range0Addr;
1876 IOByteCount range0Len;
1877
1878 getAddrLenForInd(range0Addr, range0Len, type, _ranges, 0);
1879 if (logical == range0Addr && length <= range0Len)
1880 return( kIOReturnSuccess );
1881 }
1882
1883 return( super::doUnmap( addressMap, logical, length ));
1884 }
1885
1886 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1887
1888 OSDefineMetaClassAndAbstractStructors( IOMemoryMap, OSObject )
1889
1890 /* inline function implementation */
1891 IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
1892 { return( getPhysicalSegment( 0, 0 )); }
1893
1894
1895 #undef super
1896 #define super IOMemoryMap
1897
1898 OSDefineMetaClassAndStructors(_IOMemoryMap, IOMemoryMap)
1899
1900 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1901
1902 bool _IOMemoryMap::initCompatible(
1903 IOMemoryDescriptor * _memory,
1904 IOMemoryMap * _superMap,
1905 IOByteCount _offset,
1906 IOByteCount _length )
1907 {
1908
1909 if( !super::init())
1910 return( false);
1911
1912 if( (_offset + _length) > _superMap->getLength())
1913 return( false);
1914
1915 _memory->retain();
1916 memory = _memory;
1917 _superMap->retain();
1918 superMap = _superMap;
1919
1920 offset = _offset;
1921 if( _length)
1922 length = _length;
1923 else
1924 length = _memory->getLength();
1925
1926 options = superMap->getMapOptions();
1927 logical = superMap->getVirtualAddress() + offset;
1928
1929 return( true );
1930 }
1931
1932 bool _IOMemoryMap::initWithDescriptor(
1933 IOMemoryDescriptor * _memory,
1934 task_t intoTask,
1935 IOVirtualAddress toAddress,
1936 IOOptionBits _options,
1937 IOByteCount _offset,
1938 IOByteCount _length )
1939 {
1940 bool ok;
1941 bool redir = ((kIOMapUnique|kIOMapReference) == ((kIOMapUnique|kIOMapReference) & _options));
1942
1943 if ((!_memory) || (!intoTask))
1944 return( false);
1945
1946 if( (_offset + _length) > _memory->getLength())
1947 return( false);
1948
1949 if (!redir)
1950 {
1951 if (!super::init())
1952 return(false);
1953 addressMap = get_task_map(intoTask);
1954 if( !addressMap)
1955 return( false);
1956 vm_map_reference(addressMap);
1957 addressTask = intoTask;
1958 logical = toAddress;
1959 options = _options;
1960 }
1961
1962 _memory->retain();
1963
1964 offset = _offset;
1965 if( _length)
1966 length = _length;
1967 else
1968 length = _memory->getLength();
1969
1970 if( options & kIOMapStatic)
1971 ok = true;
1972 else
1973 ok = (kIOReturnSuccess == _memory->doMap( addressMap, &toAddress,
1974 _options, offset, length ));
1975 if (ok || redir)
1976 {
1977 if (memory)
1978 memory->release();
1979 memory = _memory;
1980 logical = toAddress;
1981 }
1982 else
1983 {
1984 _memory->release();
1985 if (!redir)
1986 {
1987 logical = 0;
1988 memory = 0;
1989 vm_map_deallocate(addressMap);
1990 addressMap = 0;
1991 }
1992 }
1993
1994 return( ok );
1995 }
1996
1997 /* LP64todo - these need to expand */
1998 struct IOMemoryDescriptorMapAllocRef
1999 {
2000 ipc_port_t sharedMem;
2001 vm_size_t size;
2002 vm_offset_t mapped;
2003 IOByteCount sourceOffset;
2004 IOOptionBits options;
2005 };
2006
2007 static kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
2008 {
2009 IOMemoryDescriptorMapAllocRef * ref = (IOMemoryDescriptorMapAllocRef *)_ref;
2010 IOReturn err;
2011
2012 do {
2013 if( ref->sharedMem) {
2014 vm_prot_t prot = VM_PROT_READ
2015 | ((ref->options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE);
2016
2017 // set memory entry cache
2018 vm_prot_t memEntryCacheMode = prot | MAP_MEM_ONLY;
2019 switch (ref->options & kIOMapCacheMask)
2020 {
2021 case kIOMapInhibitCache:
2022 SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
2023 break;
2024
2025 case kIOMapWriteThruCache:
2026 SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
2027 break;
2028
2029 case kIOMapWriteCombineCache:
2030 SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
2031 break;
2032
2033 case kIOMapCopybackCache:
2034 SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
2035 break;
2036
2037 case kIOMapDefaultCache:
2038 default:
2039 SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
2040 break;
2041 }
2042
2043 vm_size_t unused = 0;
2044
2045 err = mach_make_memory_entry( NULL /*unused*/, &unused, 0 /*unused*/,
2046 memEntryCacheMode, NULL, ref->sharedMem );
2047 if (KERN_SUCCESS != err)
2048 IOLog("MAP_MEM_ONLY failed %d\n", err);
2049
2050 err = vm_map( map,
2051 &ref->mapped,
2052 ref->size, 0 /* mask */,
2053 (( ref->options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
2054 | VM_MAKE_TAG(VM_MEMORY_IOKIT),
2055 ref->sharedMem, ref->sourceOffset,
2056 false, // copy
2057 prot, // cur
2058 prot, // max
2059 VM_INHERIT_NONE);
2060
2061 if( KERN_SUCCESS != err) {
2062 ref->mapped = 0;
2063 continue;
2064 }
2065
2066 } else {
2067
2068 err = vm_allocate( map, &ref->mapped, ref->size,
2069 ((ref->options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
2070 | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
2071
2072 if( KERN_SUCCESS != err) {
2073 ref->mapped = 0;
2074 continue;
2075 }
2076
2077 // we have to make sure that these guys don't get copied if we fork.
2078 err = vm_inherit( map, ref->mapped, ref->size, VM_INHERIT_NONE);
2079 assert( KERN_SUCCESS == err );
2080 }
2081
2082 } while( false );
2083
2084 return( err );
2085 }
2086
2087
2088 IOReturn IOMemoryDescriptor::doMap(
2089 vm_map_t addressMap,
2090 IOVirtualAddress * atAddress,
2091 IOOptionBits options,
2092 IOByteCount sourceOffset,
2093 IOByteCount length )
2094 {
2095 IOReturn err = kIOReturnSuccess;
2096 memory_object_t pager;
2097 vm_address_t logical;
2098 IOByteCount pageOffset;
2099 IOPhysicalAddress sourceAddr;
2100 IOMemoryDescriptorMapAllocRef ref;
2101
2102 ref.sharedMem = (ipc_port_t) _memEntry;
2103 ref.sourceOffset = sourceOffset;
2104 ref.options = options;
2105
2106 do {
2107
2108 if( 0 == length)
2109 length = getLength();
2110
2111 sourceAddr = getSourceSegment( sourceOffset, NULL );
2112 pageOffset = sourceAddr - trunc_page_32( sourceAddr );
2113
2114 ref.size = round_page_32( length + pageOffset );
2115
2116 if ((kIOMapReference|kIOMapUnique) == ((kIOMapReference|kIOMapUnique) & options))
2117 {
2118 upl_t redirUPL2;
2119 vm_size_t size;
2120 int flags;
2121
2122 _IOMemoryMap * mapping = (_IOMemoryMap *) *atAddress;
2123 ref.mapped = mapping->getVirtualAddress();
2124
2125 if (!_memEntry)
2126 {
2127 err = kIOReturnNotReadable;
2128 continue;
2129 }
2130
2131 size = length;
2132 flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
2133 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
2134
2135 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) _memEntry, 0, &size, &redirUPL2,
2136 NULL, NULL,
2137 &flags))
2138 redirUPL2 = NULL;
2139
2140 err = upl_transpose(redirUPL2, mapping->redirUPL);
2141 if (kIOReturnSuccess != err)
2142 {
2143 IOLog("upl_transpose(%x)\n", err);
2144 err = kIOReturnSuccess;
2145 }
2146
2147 if (redirUPL2)
2148 {
2149 upl_commit(redirUPL2, NULL, 0);
2150 upl_deallocate(redirUPL2);
2151 redirUPL2 = 0;
2152 }
2153 {
2154 // swap the memEntries since they now refer to different vm_objects
2155 void * me = _memEntry;
2156 _memEntry = mapping->memory->_memEntry;
2157 mapping->memory->_memEntry = me;
2158 }
2159 }
2160 else
2161 {
2162
2163 logical = *atAddress;
2164 if( options & kIOMapAnywhere)
2165 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
2166 ref.mapped = 0;
2167 else {
2168 ref.mapped = trunc_page_32( logical );
2169 if( (logical - ref.mapped) != pageOffset) {
2170 err = kIOReturnVMError;
2171 continue;
2172 }
2173 }
2174
2175 if( ref.sharedMem && (addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags))
2176 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
2177 else
2178 err = IOMemoryDescriptorMapAlloc( addressMap, &ref );
2179 }
2180
2181 if( err != KERN_SUCCESS)
2182 continue;
2183
2184 if( reserved)
2185 pager = (memory_object_t) reserved->devicePager;
2186 else
2187 pager = MACH_PORT_NULL;
2188
2189 if( !ref.sharedMem || pager )
2190 err = handleFault( pager, addressMap, ref.mapped, sourceOffset, length, options );
2191
2192 } while( false );
2193
2194 if( err != KERN_SUCCESS) {
2195 if( ref.mapped)
2196 doUnmap( addressMap, ref.mapped, ref.size );
2197 *atAddress = NULL;
2198 } else
2199 *atAddress = ref.mapped + pageOffset;
2200
2201 return( err );
2202 }
2203
2204 enum {
2205 kIOMemoryRedirected = 0x00010000
2206 };
2207
2208 IOReturn IOMemoryDescriptor::handleFault(
2209 void * _pager,
2210 vm_map_t addressMap,
2211 IOVirtualAddress address,
2212 IOByteCount sourceOffset,
2213 IOByteCount length,
2214 IOOptionBits options )
2215 {
2216 IOReturn err = kIOReturnSuccess;
2217 memory_object_t pager = (memory_object_t) _pager;
2218 vm_size_t size;
2219 vm_size_t bytes;
2220 vm_size_t page;
2221 IOByteCount pageOffset;
2222 IOByteCount pagerOffset;
2223 IOPhysicalLength segLen;
2224 addr64_t physAddr;
2225
2226 if( !addressMap) {
2227
2228 if( kIOMemoryRedirected & _flags) {
2229 #ifdef DEBUG
2230 IOLog("sleep mem redirect %p, %lx\n", this, sourceOffset);
2231 #endif
2232 do {
2233 SLEEP;
2234 } while( kIOMemoryRedirected & _flags );
2235 }
2236
2237 return( kIOReturnSuccess );
2238 }
2239
2240 physAddr = getPhysicalSegment64( sourceOffset, &segLen );
2241 assert( physAddr );
2242 pageOffset = physAddr - trunc_page_64( physAddr );
2243 pagerOffset = sourceOffset;
2244
2245 size = length + pageOffset;
2246 physAddr -= pageOffset;
2247
2248 segLen += pageOffset;
2249 bytes = size;
2250 do {
2251 // in the middle of the loop only map whole pages
2252 if( segLen >= bytes)
2253 segLen = bytes;
2254 else if( segLen != trunc_page_32( segLen))
2255 err = kIOReturnVMError;
2256 if( physAddr != trunc_page_64( physAddr))
2257 err = kIOReturnBadArgument;
2258
2259 #ifdef DEBUG
2260 if( kIOLogMapping & gIOKitDebug)
2261 IOLog("_IOMemoryMap::map(%p) %08lx->%08qx:%08lx\n",
2262 addressMap, address + pageOffset, physAddr + pageOffset,
2263 segLen - pageOffset);
2264 #endif
2265
2266
2267
2268
2269
2270 #ifdef i386
2271 /* i386 doesn't support faulting on device memory yet */
2272 if( addressMap && (kIOReturnSuccess == err))
2273 err = IOMapPages( addressMap, address, (IOPhysicalAddress) physAddr, segLen, options );
2274 assert( KERN_SUCCESS == err );
2275 if( err)
2276 break;
2277 #endif
2278
2279 if( pager) {
2280 if( reserved && reserved->pagerContig) {
2281 IOPhysicalLength allLen;
2282 addr64_t allPhys;
2283
2284 allPhys = getPhysicalSegment64( 0, &allLen );
2285 assert( allPhys );
2286 err = device_pager_populate_object( pager, 0, allPhys >> PAGE_SHIFT, round_page_32(allLen) );
2287
2288 } else {
2289
2290 for( page = 0;
2291 (page < segLen) && (KERN_SUCCESS == err);
2292 page += page_size) {
2293 err = device_pager_populate_object(pager, pagerOffset,
2294 (ppnum_t)((physAddr + page) >> PAGE_SHIFT), page_size);
2295 pagerOffset += page_size;
2296 }
2297 }
2298 assert( KERN_SUCCESS == err );
2299 if( err)
2300 break;
2301 }
2302 #ifndef i386
2303 /* *** ALERT *** */
2304 /* *** Temporary Workaround *** */
2305
2306 /* This call to vm_fault causes an early pmap level resolution */
2307 /* of the mappings created above. Need for this is in absolute */
2308 /* violation of the basic tenet that the pmap layer is a cache. */
2309 /* Further, it implies a serious I/O architectural violation on */
2310 /* the part of some user of the mapping. As of this writing, */
2311 /* the call to vm_fault is needed because the NVIDIA driver */
2312 /* makes a call to pmap_extract. The NVIDIA driver needs to be */
2313 /* fixed as soon as possible. The NVIDIA driver should not */
2314 /* need to query for this info as it should know from the doMap */
2315 /* call where the physical memory is mapped. When a query is */
2316 /* necessary to find a physical mapping, it should be done */
2317 /* through an iokit call which includes the mapped memory */
2318 /* handle. This is required for machine architecture independence.*/
2319
2320 if(!(kIOMemoryRedirected & _flags)) {
2321 vm_fault(addressMap,
2322 (vm_map_offset_t)address,
2323 VM_PROT_READ|VM_PROT_WRITE,
2324 FALSE, THREAD_UNINT, NULL,
2325 (vm_map_offset_t)0);
2326 }
2327
2328 /* *** Temporary Workaround *** */
2329 /* *** ALERT *** */
2330 #endif
2331 sourceOffset += segLen - pageOffset;
2332 address += segLen;
2333 bytes -= segLen;
2334 pageOffset = 0;
2335
2336 } while( bytes
2337 && (physAddr = getPhysicalSegment64( sourceOffset, &segLen )));
2338
2339 if( bytes)
2340 err = kIOReturnBadArgument;
2341
2342 return( err );
2343 }
2344
2345 IOReturn IOMemoryDescriptor::doUnmap(
2346 vm_map_t addressMap,
2347 IOVirtualAddress logical,
2348 IOByteCount length )
2349 {
2350 IOReturn err;
2351
2352 #ifdef DEBUG
2353 if( kIOLogMapping & gIOKitDebug)
2354 kprintf("IOMemoryDescriptor::doUnmap(%x) %08x:%08x\n",
2355 addressMap, logical, length );
2356 #endif
2357
2358 if( true /* && (addressMap == kernel_map) || (addressMap == get_task_map(current_task()))*/) {
2359
2360 if( _memEntry && (addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags))
2361 addressMap = IOPageableMapForAddress( logical );
2362
2363 err = vm_deallocate( addressMap, logical, length );
2364
2365 } else
2366 err = kIOReturnSuccess;
2367
2368 return( err );
2369 }
2370
2371 IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
2372 {
2373 IOReturn err = kIOReturnSuccess;
2374 _IOMemoryMap * mapping = 0;
2375 OSIterator * iter;
2376
2377 LOCK;
2378
2379 if( doRedirect)
2380 _flags |= kIOMemoryRedirected;
2381 else
2382 _flags &= ~kIOMemoryRedirected;
2383
2384 do {
2385 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
2386 while( (mapping = (_IOMemoryMap *) iter->getNextObject()))
2387 mapping->redirect( safeTask, doRedirect );
2388
2389 iter->release();
2390 }
2391 } while( false );
2392
2393 if (!doRedirect)
2394 {
2395 WAKEUP;
2396 }
2397
2398 UNLOCK;
2399
2400 // temporary binary compatibility
2401 IOSubMemoryDescriptor * subMem;
2402 if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
2403 err = subMem->redirect( safeTask, doRedirect );
2404 else
2405 err = kIOReturnSuccess;
2406
2407 return( err );
2408 }
2409
2410 IOReturn IOSubMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
2411 {
2412 return( _parent->redirect( safeTask, doRedirect ));
2413 }
2414
2415 IOReturn _IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
2416 {
2417 IOReturn err = kIOReturnSuccess;
2418
2419 if( superMap) {
2420 // err = ((_IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
2421 } else {
2422
2423 LOCK;
2424 if( logical && addressMap
2425 && (!safeTask || (get_task_map(safeTask) != addressMap))
2426 && (0 == (options & kIOMapStatic)))
2427 {
2428 IOUnmapPages( addressMap, logical, length );
2429 if(!doRedirect && safeTask
2430 && ((memory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical))
2431 {
2432 err = vm_deallocate( addressMap, logical, length );
2433 err = memory->doMap( addressMap, &logical,
2434 (options & ~kIOMapAnywhere) /*| kIOMapReserve*/,
2435 offset, length );
2436 } else
2437 err = kIOReturnSuccess;
2438 #ifdef DEBUG
2439 IOLog("IOMemoryMap::redirect(%d, %p) %x:%lx from %p\n", doRedirect, this, logical, length, addressMap);
2440 #endif
2441 }
2442 UNLOCK;
2443 }
2444
2445 if (((memory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
2446 && safeTask
2447 && (doRedirect != (0 != (memory->_flags & kIOMemoryRedirected))))
2448 memory->redirect(safeTask, doRedirect);
2449
2450 return( err );
2451 }
2452
2453 IOReturn _IOMemoryMap::unmap( void )
2454 {
2455 IOReturn err;
2456
2457 LOCK;
2458
2459 if( logical && addressMap && (0 == superMap)
2460 && (0 == (options & kIOMapStatic))) {
2461
2462 err = memory->doUnmap( addressMap, logical, length );
2463 vm_map_deallocate(addressMap);
2464 addressMap = 0;
2465
2466 } else
2467 err = kIOReturnSuccess;
2468
2469 logical = 0;
2470
2471 UNLOCK;
2472
2473 return( err );
2474 }
2475
2476 void _IOMemoryMap::taskDied( void )
2477 {
2478 LOCK;
2479 if( addressMap) {
2480 vm_map_deallocate(addressMap);
2481 addressMap = 0;
2482 }
2483 addressTask = 0;
2484 logical = 0;
2485 UNLOCK;
2486 }
2487
2488 // Overload the release mechanism. All mappings must be a member
2489 // of a memory descriptors _mappings set. This means that we
2490 // always have 2 references on a mapping. When either of these mappings
2491 // are released we need to free ourselves.
2492 void _IOMemoryMap::taggedRelease(const void *tag) const
2493 {
2494 LOCK;
2495 super::taggedRelease(tag, 2);
2496 UNLOCK;
2497 }
2498
2499 void _IOMemoryMap::free()
2500 {
2501 unmap();
2502
2503 if( memory) {
2504 LOCK;
2505 memory->removeMapping( this);
2506 UNLOCK;
2507 memory->release();
2508 }
2509
2510 if (owner && (owner != memory))
2511 {
2512 LOCK;
2513 owner->removeMapping(this);
2514 UNLOCK;
2515 }
2516
2517 if( superMap)
2518 superMap->release();
2519
2520 if (redirUPL) {
2521 upl_commit(redirUPL, NULL, 0);
2522 upl_deallocate(redirUPL);
2523 }
2524
2525 super::free();
2526 }
2527
2528 IOByteCount _IOMemoryMap::getLength()
2529 {
2530 return( length );
2531 }
2532
2533 IOVirtualAddress _IOMemoryMap::getVirtualAddress()
2534 {
2535 return( logical);
2536 }
2537
2538 task_t _IOMemoryMap::getAddressTask()
2539 {
2540 if( superMap)
2541 return( superMap->getAddressTask());
2542 else
2543 return( addressTask);
2544 }
2545
2546 IOOptionBits _IOMemoryMap::getMapOptions()
2547 {
2548 return( options);
2549 }
2550
2551 IOMemoryDescriptor * _IOMemoryMap::getMemoryDescriptor()
2552 {
2553 return( memory );
2554 }
2555
2556 _IOMemoryMap * _IOMemoryMap::copyCompatible(
2557 IOMemoryDescriptor * owner,
2558 task_t task,
2559 IOVirtualAddress toAddress,
2560 IOOptionBits _options,
2561 IOByteCount _offset,
2562 IOByteCount _length )
2563 {
2564 _IOMemoryMap * mapping;
2565
2566 if( (!task) || (!addressMap) || (addressMap != get_task_map(task)))
2567 return( 0 );
2568 if( options & kIOMapUnique)
2569 return( 0 );
2570 if( (options ^ _options) & kIOMapReadOnly)
2571 return( 0 );
2572 if( (kIOMapDefaultCache != (_options & kIOMapCacheMask))
2573 && ((options ^ _options) & kIOMapCacheMask))
2574 return( 0 );
2575
2576 if( (0 == (_options & kIOMapAnywhere)) && (logical != toAddress))
2577 return( 0 );
2578
2579 if( _offset < offset)
2580 return( 0 );
2581
2582 _offset -= offset;
2583
2584 if( (_offset + _length) > length)
2585 return( 0 );
2586
2587 if( (length == _length) && (!_offset)) {
2588 retain();
2589 mapping = this;
2590
2591 } else {
2592 mapping = new _IOMemoryMap;
2593 if( mapping
2594 && !mapping->initCompatible( owner, this, _offset, _length )) {
2595 mapping->release();
2596 mapping = 0;
2597 }
2598 }
2599
2600 return( mapping );
2601 }
2602
2603 IOPhysicalAddress _IOMemoryMap::getPhysicalSegment( IOByteCount _offset,
2604 IOPhysicalLength * _length)
2605 {
2606 IOPhysicalAddress address;
2607
2608 LOCK;
2609 address = memory->getPhysicalSegment( offset + _offset, _length );
2610 UNLOCK;
2611
2612 return( address );
2613 }
2614
2615 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2616
2617 #undef super
2618 #define super OSObject
2619
2620 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2621
2622 void IOMemoryDescriptor::initialize( void )
2623 {
2624 if( 0 == gIOMemoryLock)
2625 gIOMemoryLock = IORecursiveLockAlloc();
2626
2627 IORegistryEntry::getRegistryRoot()->setProperty(kIOMaximumMappedIOByteCountKey,
2628 ptoa_64(gIOMaximumMappedIOPageCount), 64);
2629 }
2630
2631 void IOMemoryDescriptor::free( void )
2632 {
2633 if( _mappings)
2634 _mappings->release();
2635
2636 super::free();
2637 }
2638
2639 IOMemoryMap * IOMemoryDescriptor::setMapping(
2640 task_t intoTask,
2641 IOVirtualAddress mapAddress,
2642 IOOptionBits options )
2643 {
2644 _IOMemoryMap * newMap;
2645
2646 newMap = new _IOMemoryMap;
2647
2648 LOCK;
2649
2650 if( newMap
2651 && !newMap->initWithDescriptor( this, intoTask, mapAddress,
2652 options | kIOMapStatic, 0, getLength() )) {
2653 newMap->release();
2654 newMap = 0;
2655 }
2656
2657 addMapping( newMap);
2658
2659 UNLOCK;
2660
2661 return( newMap);
2662 }
2663
2664 IOMemoryMap * IOMemoryDescriptor::map(
2665 IOOptionBits options )
2666 {
2667
2668 return( makeMapping( this, kernel_task, 0,
2669 options | kIOMapAnywhere,
2670 0, getLength() ));
2671 }
2672
2673 IOMemoryMap * IOMemoryDescriptor::map(
2674 task_t intoTask,
2675 IOVirtualAddress toAddress,
2676 IOOptionBits options,
2677 IOByteCount offset,
2678 IOByteCount length )
2679 {
2680 if( 0 == length)
2681 length = getLength();
2682
2683 return( makeMapping( this, intoTask, toAddress, options, offset, length ));
2684 }
2685
2686 IOReturn _IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
2687 IOOptionBits options,
2688 IOByteCount offset)
2689 {
2690 IOReturn err = kIOReturnSuccess;
2691 IOMemoryDescriptor * physMem = 0;
2692
2693 LOCK;
2694
2695 if (logical && addressMap) do
2696 {
2697 if ((memory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
2698 {
2699 physMem = memory;
2700 physMem->retain();
2701 }
2702
2703 if (!redirUPL)
2704 {
2705 vm_size_t size = length;
2706 int flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
2707 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
2708 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) memory->_memEntry, 0, &size, &redirUPL,
2709 NULL, NULL,
2710 &flags))
2711 redirUPL = 0;
2712
2713 if (physMem)
2714 {
2715 IOUnmapPages( addressMap, logical, length );
2716 physMem->redirect(0, true);
2717 }
2718 }
2719
2720 if (newBackingMemory)
2721 {
2722 if (newBackingMemory != memory)
2723 {
2724 if (this != newBackingMemory->makeMapping(newBackingMemory, addressTask, (IOVirtualAddress) this,
2725 options | kIOMapUnique | kIOMapReference,
2726 offset, length))
2727 err = kIOReturnError;
2728 }
2729 if (redirUPL)
2730 {
2731 upl_commit(redirUPL, NULL, 0);
2732 upl_deallocate(redirUPL);
2733 redirUPL = 0;
2734 }
2735 if (physMem)
2736 physMem->redirect(0, false);
2737 }
2738 }
2739 while (false);
2740
2741 UNLOCK;
2742
2743 if (physMem)
2744 physMem->release();
2745
2746 return (err);
2747 }
2748
2749 IOMemoryMap * IOMemoryDescriptor::makeMapping(
2750 IOMemoryDescriptor * owner,
2751 task_t intoTask,
2752 IOVirtualAddress toAddress,
2753 IOOptionBits options,
2754 IOByteCount offset,
2755 IOByteCount length )
2756 {
2757 IOMemoryDescriptor * mapDesc = 0;
2758 _IOMemoryMap * mapping = 0;
2759 OSIterator * iter;
2760
2761 LOCK;
2762
2763 do
2764 {
2765 if (kIOMapUnique & options)
2766 {
2767 IOPhysicalAddress phys;
2768 IOByteCount physLen;
2769
2770 if (owner != this)
2771 continue;
2772
2773 if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
2774 {
2775 phys = getPhysicalSegment(offset, &physLen);
2776 if (!phys || (physLen < length))
2777 continue;
2778
2779 mapDesc = IOMemoryDescriptor::withPhysicalAddress(
2780 phys, length, _direction);
2781 if (!mapDesc)
2782 continue;
2783 offset = 0;
2784 }
2785 else
2786 {
2787 mapDesc = this;
2788 mapDesc->retain();
2789 }
2790
2791 if (kIOMapReference & options)
2792 {
2793 mapping = (_IOMemoryMap *) toAddress;
2794 mapping->retain();
2795
2796 #if 1
2797 uint32_t pageOffset1 = mapDesc->getSourceSegment( offset, NULL );
2798 pageOffset1 -= trunc_page_32( pageOffset1 );
2799
2800 uint32_t pageOffset2 = mapping->getVirtualAddress();
2801 pageOffset2 -= trunc_page_32( pageOffset2 );
2802
2803 if (pageOffset1 != pageOffset2)
2804 IOLog("::redirect can't map offset %x to addr %x\n",
2805 pageOffset1, mapping->getVirtualAddress());
2806 #endif
2807
2808
2809 if (!mapping->initWithDescriptor( mapDesc, intoTask, toAddress, options,
2810 offset, length ))
2811 {
2812 #ifdef DEBUG
2813 IOLog("Didn't redirect map %08lx : %08lx\n", offset, length );
2814 #endif
2815 }
2816
2817 if (mapping->owner)
2818 mapping->owner->removeMapping(mapping);
2819 continue;
2820 }
2821 }
2822 else
2823 {
2824 // look for an existing mapping
2825 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
2826
2827 while( (mapping = (_IOMemoryMap *) iter->getNextObject())) {
2828
2829 if( (mapping = mapping->copyCompatible(
2830 owner, intoTask, toAddress,
2831 options | kIOMapReference,
2832 offset, length )))
2833 break;
2834 }
2835 iter->release();
2836 }
2837
2838
2839 if (mapping)
2840 mapping->retain();
2841
2842 if( mapping || (options & kIOMapReference))
2843 continue;
2844
2845 mapDesc = owner;
2846 mapDesc->retain();
2847 }
2848 owner = this;
2849
2850 mapping = new _IOMemoryMap;
2851 if( mapping
2852 && !mapping->initWithDescriptor( mapDesc, intoTask, toAddress, options,
2853 offset, length )) {
2854 #ifdef DEBUG
2855 IOLog("Didn't make map %08lx : %08lx\n", offset, length );
2856 #endif
2857 mapping->release();
2858 mapping = 0;
2859 }
2860
2861 if (mapping)
2862 mapping->retain();
2863
2864 } while( false );
2865
2866 if (mapping)
2867 {
2868 mapping->owner = owner;
2869 owner->addMapping( mapping);
2870 mapping->release();
2871 }
2872
2873 UNLOCK;
2874
2875 if (mapDesc)
2876 mapDesc->release();
2877
2878 return( mapping);
2879 }
2880
2881 void IOMemoryDescriptor::addMapping(
2882 IOMemoryMap * mapping )
2883 {
2884 if( mapping) {
2885 if( 0 == _mappings)
2886 _mappings = OSSet::withCapacity(1);
2887 if( _mappings )
2888 _mappings->setObject( mapping );
2889 }
2890 }
2891
2892 void IOMemoryDescriptor::removeMapping(
2893 IOMemoryMap * mapping )
2894 {
2895 if( _mappings)
2896 _mappings->removeObject( mapping);
2897 }
2898
2899 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2900
2901 #undef super
2902 #define super IOMemoryDescriptor
2903
2904 OSDefineMetaClassAndStructors(IOSubMemoryDescriptor, IOMemoryDescriptor)
2905
2906 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2907
2908 bool IOSubMemoryDescriptor::initSubRange( IOMemoryDescriptor * parent,
2909 IOByteCount offset, IOByteCount length,
2910 IODirection direction )
2911 {
2912 if( !parent)
2913 return( false);
2914
2915 if( (offset + length) > parent->getLength())
2916 return( false);
2917
2918 /*
2919 * We can check the _parent instance variable before having ever set it
2920 * to an initial value because I/O Kit guarantees that all our instance
2921 * variables are zeroed on an object's allocation.
2922 */
2923
2924 if( !_parent) {
2925 if( !super::init())
2926 return( false );
2927 } else {
2928 /*
2929 * An existing memory descriptor is being retargeted to
2930 * point to somewhere else. Clean up our present state.
2931 */
2932
2933 _parent->release();
2934 _parent = 0;
2935 }
2936
2937 parent->retain();
2938 _parent = parent;
2939 _start = offset;
2940 _length = length;
2941 _direction = direction;
2942 _tag = parent->getTag();
2943
2944 return( true );
2945 }
2946
2947 void IOSubMemoryDescriptor::free( void )
2948 {
2949 if( _parent)
2950 _parent->release();
2951
2952 super::free();
2953 }
2954
2955
2956 IOPhysicalAddress IOSubMemoryDescriptor::getPhysicalSegment( IOByteCount offset,
2957 IOByteCount * length )
2958 {
2959 IOPhysicalAddress address;
2960 IOByteCount actualLength;
2961
2962 assert(offset <= _length);
2963
2964 if( length)
2965 *length = 0;
2966
2967 if( offset >= _length)
2968 return( 0 );
2969
2970 address = _parent->getPhysicalSegment( offset + _start, &actualLength );
2971
2972 if( address && length)
2973 *length = min( _length - offset, actualLength );
2974
2975 return( address );
2976 }
2977
2978
2979 IOReturn IOSubMemoryDescriptor::doMap(
2980 vm_map_t addressMap,
2981 IOVirtualAddress * atAddress,
2982 IOOptionBits options,
2983 IOByteCount sourceOffset,
2984 IOByteCount length )
2985 {
2986 if( sourceOffset >= _length)
2987 return( kIOReturnOverrun );
2988 return (_parent->doMap(addressMap, atAddress, options, sourceOffset + _start, length));
2989 }
2990
2991 IOPhysicalAddress IOSubMemoryDescriptor::getSourceSegment( IOByteCount offset,
2992 IOByteCount * length )
2993 {
2994 IOPhysicalAddress address;
2995 IOByteCount actualLength;
2996
2997 assert(offset <= _length);
2998
2999 if( length)
3000 *length = 0;
3001
3002 if( offset >= _length)
3003 return( 0 );
3004
3005 address = _parent->getSourceSegment( offset + _start, &actualLength );
3006
3007 if( address && length)
3008 *length = min( _length - offset, actualLength );
3009
3010 return( address );
3011 }
3012
3013 void * IOSubMemoryDescriptor::getVirtualSegment(IOByteCount offset,
3014 IOByteCount * lengthOfSegment)
3015 {
3016 return( 0 );
3017 }
3018
3019 IOByteCount IOSubMemoryDescriptor::readBytes(IOByteCount offset,
3020 void * bytes, IOByteCount length)
3021 {
3022 IOByteCount byteCount;
3023
3024 assert(offset <= _length);
3025
3026 if( offset >= _length)
3027 return( 0 );
3028
3029 LOCK;
3030 byteCount = _parent->readBytes( _start + offset, bytes,
3031 min(length, _length - offset) );
3032 UNLOCK;
3033
3034 return( byteCount );
3035 }
3036
3037 IOByteCount IOSubMemoryDescriptor::writeBytes(IOByteCount offset,
3038 const void* bytes, IOByteCount length)
3039 {
3040 IOByteCount byteCount;
3041
3042 assert(offset <= _length);
3043
3044 if( offset >= _length)
3045 return( 0 );
3046
3047 LOCK;
3048 byteCount = _parent->writeBytes( _start + offset, bytes,
3049 min(length, _length - offset) );
3050 UNLOCK;
3051
3052 return( byteCount );
3053 }
3054
3055 IOReturn IOSubMemoryDescriptor::setPurgeable( IOOptionBits newState,
3056 IOOptionBits * oldState )
3057 {
3058 IOReturn err;
3059
3060 LOCK;
3061 err = _parent->setPurgeable( newState, oldState );
3062 UNLOCK;
3063
3064 return( err );
3065 }
3066
3067 IOReturn IOSubMemoryDescriptor::performOperation( IOOptionBits options,
3068 IOByteCount offset, IOByteCount length )
3069 {
3070 IOReturn err;
3071
3072 assert(offset <= _length);
3073
3074 if( offset >= _length)
3075 return( kIOReturnOverrun );
3076
3077 LOCK;
3078 err = _parent->performOperation( options, _start + offset,
3079 min(length, _length - offset) );
3080 UNLOCK;
3081
3082 return( err );
3083 }
3084
3085 IOReturn IOSubMemoryDescriptor::prepare(
3086 IODirection forDirection)
3087 {
3088 IOReturn err;
3089
3090 LOCK;
3091 err = _parent->prepare( forDirection);
3092 UNLOCK;
3093
3094 return( err );
3095 }
3096
3097 IOReturn IOSubMemoryDescriptor::complete(
3098 IODirection forDirection)
3099 {
3100 IOReturn err;
3101
3102 LOCK;
3103 err = _parent->complete( forDirection);
3104 UNLOCK;
3105
3106 return( err );
3107 }
3108
3109 IOMemoryMap * IOSubMemoryDescriptor::makeMapping(
3110 IOMemoryDescriptor * owner,
3111 task_t intoTask,
3112 IOVirtualAddress toAddress,
3113 IOOptionBits options,
3114 IOByteCount offset,
3115 IOByteCount length )
3116 {
3117 IOMemoryMap * mapping = 0;
3118
3119 if (!(kIOMapUnique & options))
3120 mapping = (IOMemoryMap *) _parent->makeMapping(
3121 _parent, intoTask,
3122 toAddress - (_start + offset),
3123 options | kIOMapReference,
3124 _start + offset, length );
3125
3126 if( !mapping)
3127 mapping = (IOMemoryMap *) _parent->makeMapping(
3128 _parent, intoTask,
3129 toAddress,
3130 options, _start + offset, length );
3131
3132 if( !mapping)
3133 mapping = super::makeMapping( owner, intoTask, toAddress, options,
3134 offset, length );
3135
3136 return( mapping );
3137 }
3138
3139 /* ick */
3140
3141 bool
3142 IOSubMemoryDescriptor::initWithAddress(void * address,
3143 IOByteCount length,
3144 IODirection direction)
3145 {
3146 return( false );
3147 }
3148
3149 bool
3150 IOSubMemoryDescriptor::initWithAddress(vm_address_t address,
3151 IOByteCount length,
3152 IODirection direction,
3153 task_t task)
3154 {
3155 return( false );
3156 }
3157
3158 bool
3159 IOSubMemoryDescriptor::initWithPhysicalAddress(
3160 IOPhysicalAddress address,
3161 IOByteCount length,
3162 IODirection direction )
3163 {
3164 return( false );
3165 }
3166
3167 bool
3168 IOSubMemoryDescriptor::initWithRanges(
3169 IOVirtualRange * ranges,
3170 UInt32 withCount,
3171 IODirection direction,
3172 task_t task,
3173 bool asReference)
3174 {
3175 return( false );
3176 }
3177
3178 bool
3179 IOSubMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
3180 UInt32 withCount,
3181 IODirection direction,
3182 bool asReference)
3183 {
3184 return( false );
3185 }
3186
3187 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3188
3189 bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
3190 {
3191 OSSymbol const *keys[2];
3192 OSObject *values[2];
3193 struct SerData {
3194 user_addr_t address;
3195 user_size_t length;
3196 } *vcopy;
3197 unsigned int index, nRanges;
3198 bool result;
3199
3200 IOOptionBits type = _flags & kIOMemoryTypeMask;
3201
3202 if (s == NULL) return false;
3203 if (s->previouslySerialized(this)) return true;
3204
3205 // Pretend we are an array.
3206 if (!s->addXMLStartTag(this, "array")) return false;
3207
3208 nRanges = _rangesCount;
3209 vcopy = (SerData *) IOMalloc(sizeof(SerData) * nRanges);
3210 if (vcopy == 0) return false;
3211
3212 keys[0] = OSSymbol::withCString("address");
3213 keys[1] = OSSymbol::withCString("length");
3214
3215 result = false;
3216 values[0] = values[1] = 0;
3217
3218 // From this point on we can go to bail.
3219
3220 // Copy the volatile data so we don't have to allocate memory
3221 // while the lock is held.
3222 LOCK;
3223 if (nRanges == _rangesCount) {
3224 Ranges vec = _ranges;
3225 for (index = 0; index < nRanges; index++) {
3226 user_addr_t addr; IOByteCount len;
3227 getAddrLenForInd(addr, len, type, vec, index);
3228 vcopy[index].address = addr;
3229 vcopy[index].length = len;
3230 }
3231 } else {
3232 // The descriptor changed out from under us. Give up.
3233 UNLOCK;
3234 result = false;
3235 goto bail;
3236 }
3237 UNLOCK;
3238
3239 for (index = 0; index < nRanges; index++)
3240 {
3241 user_addr_t addr = vcopy[index].address;
3242 IOByteCount len = (IOByteCount) vcopy[index].length;
3243 values[0] =
3244 OSNumber::withNumber(addr, (((UInt64) addr) >> 32)? 64 : 32);
3245 if (values[0] == 0) {
3246 result = false;
3247 goto bail;
3248 }
3249 values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
3250 if (values[1] == 0) {
3251 result = false;
3252 goto bail;
3253 }
3254 OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
3255 if (dict == 0) {
3256 result = false;
3257 goto bail;
3258 }
3259 values[0]->release();
3260 values[1]->release();
3261 values[0] = values[1] = 0;
3262
3263 result = dict->serialize(s);
3264 dict->release();
3265 if (!result) {
3266 goto bail;
3267 }
3268 }
3269 result = s->addXMLEndTag("array");
3270
3271 bail:
3272 if (values[0])
3273 values[0]->release();
3274 if (values[1])
3275 values[1]->release();
3276 if (keys[0])
3277 keys[0]->release();
3278 if (keys[1])
3279 keys[1]->release();
3280 if (vcopy)
3281 IOFree(vcopy, sizeof(IOVirtualRange) * nRanges);
3282 return result;
3283 }
3284
3285 bool IOSubMemoryDescriptor::serialize(OSSerialize * s) const
3286 {
3287 if (!s) {
3288 return (false);
3289 }
3290 if (s->previouslySerialized(this)) return true;
3291
3292 // Pretend we are a dictionary.
3293 // We must duplicate the functionality of OSDictionary here
3294 // because otherwise object references will not work;
3295 // they are based on the value of the object passed to
3296 // previouslySerialized and addXMLStartTag.
3297
3298 if (!s->addXMLStartTag(this, "dict")) return false;
3299
3300 char const *keys[3] = {"offset", "length", "parent"};
3301
3302 OSObject *values[3];
3303 values[0] = OSNumber::withNumber(_start, sizeof(_start) * 8);
3304 if (values[0] == 0)
3305 return false;
3306 values[1] = OSNumber::withNumber(_length, sizeof(_length) * 8);
3307 if (values[1] == 0) {
3308 values[0]->release();
3309 return false;
3310 }
3311 values[2] = _parent;
3312
3313 bool result = true;
3314 for (int i=0; i<3; i++) {
3315 if (!s->addString("<key>") ||
3316 !s->addString(keys[i]) ||
3317 !s->addXMLEndTag("key") ||
3318 !values[i]->serialize(s)) {
3319 result = false;
3320 break;
3321 }
3322 }
3323 values[0]->release();
3324 values[1]->release();
3325 if (!result) {
3326 return false;
3327 }
3328
3329 return s->addXMLEndTag("dict");
3330 }
3331
3332 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3333
3334 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
3335 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
3336 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
3337 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3);
3338 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4);
3339 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
3340 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
3341 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
3342 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
3343 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
3344 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
3345 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
3346 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
3347 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
3348 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
3349 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
3350
3351 /* ex-inline function implementation */
3352 IOPhysicalAddress IOMemoryDescriptor::getPhysicalAddress()
3353 { return( getPhysicalSegment( 0, 0 )); }