]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOMemoryDescriptor.cpp
xnu-792.17.14.tar.gz
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
30 *
31 * HISTORY
32 *
33 */
34 // 45678901234567890123456789012345678901234567890123456789012345678901234567890
35 #include <sys/cdefs.h>
36
37 #include <IOKit/assert.h>
38 #include <IOKit/system.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOMemoryDescriptor.h>
41 #include <IOKit/IOMapper.h>
42 #include <IOKit/IOKitKeysPrivate.h>
43
44 #include <IOKit/IOKitDebug.h>
45
46 #include "IOKitKernelInternal.h"
47
48 #include <libkern/c++/OSContainers.h>
49 #include <libkern/c++/OSDictionary.h>
50 #include <libkern/c++/OSArray.h>
51 #include <libkern/c++/OSSymbol.h>
52 #include <libkern/c++/OSNumber.h>
53
54 #include <sys/uio.h>
55
56 __BEGIN_DECLS
57 #include <vm/pmap.h>
58 #include <vm/vm_pageout.h>
59 #include <vm/vm_shared_memory_server.h>
60 #include <mach/memory_object_types.h>
61 #include <device/device_port.h>
62
63 #ifndef i386
64 #include <mach/vm_prot.h>
65 #include <vm/vm_fault.h>
66 struct phys_entry *pmap_find_physentry(ppnum_t pa);
67 #endif
68
69 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
70 void ipc_port_release_send(ipc_port_t port);
71
72 /* Copy between a physical page and a virtual address in the given vm_map */
73 kern_return_t copypv(addr64_t source, addr64_t sink, unsigned int size, int which);
74
75 memory_object_t
76 device_pager_setup(
77 memory_object_t pager,
78 int device_handle,
79 vm_size_t size,
80 int flags);
81 void
82 device_pager_deallocate(
83 memory_object_t);
84 kern_return_t
85 device_pager_populate_object(
86 memory_object_t pager,
87 vm_object_offset_t offset,
88 ppnum_t phys_addr,
89 vm_size_t size);
90 kern_return_t
91 memory_object_iopl_request(
92 ipc_port_t port,
93 memory_object_offset_t offset,
94 vm_size_t *upl_size,
95 upl_t *upl_ptr,
96 upl_page_info_array_t user_page_list,
97 unsigned int *page_list_count,
98 int *flags);
99
100 unsigned int IOTranslateCacheBits(struct phys_entry *pp);
101
102 __END_DECLS
103
104 #define kIOMaximumMappedIOByteCount (512*1024*1024)
105
106 static IOMapper * gIOSystemMapper;
107 static ppnum_t gIOMaximumMappedIOPageCount = atop_32(kIOMaximumMappedIOByteCount);
108
109 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
110
111 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
112
113 #define super IOMemoryDescriptor
114
115 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
116
117 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
118
119 static IORecursiveLock * gIOMemoryLock;
120
121 #define LOCK IORecursiveLockLock( gIOMemoryLock)
122 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
123 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
124 #define WAKEUP \
125 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
126
127 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
128
129 class _IOMemoryMap : public IOMemoryMap
130 {
131 OSDeclareDefaultStructors(_IOMemoryMap)
132 public:
133 IOMemoryDescriptor * memory;
134 IOMemoryMap * superMap;
135 IOByteCount offset;
136 IOByteCount length;
137 IOVirtualAddress logical;
138 task_t addressTask;
139 vm_map_t addressMap;
140 IOOptionBits options;
141 upl_t redirUPL;
142 ipc_port_t redirEntry;
143 IOMemoryDescriptor * owner;
144
145 protected:
146 virtual void taggedRelease(const void *tag = 0) const;
147 virtual void free();
148
149 public:
150
151 // IOMemoryMap methods
152 virtual IOVirtualAddress getVirtualAddress();
153 virtual IOByteCount getLength();
154 virtual task_t getAddressTask();
155 virtual IOMemoryDescriptor * getMemoryDescriptor();
156 virtual IOOptionBits getMapOptions();
157
158 virtual IOReturn unmap();
159 virtual void taskDied();
160
161 virtual IOReturn redirect(IOMemoryDescriptor * newBackingMemory,
162 IOOptionBits options,
163 IOByteCount offset = 0);
164
165 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
166 IOByteCount * length);
167
168 // for IOMemoryDescriptor use
169 _IOMemoryMap * copyCompatible(
170 IOMemoryDescriptor * owner,
171 task_t intoTask,
172 IOVirtualAddress toAddress,
173 IOOptionBits options,
174 IOByteCount offset,
175 IOByteCount length );
176
177 bool initCompatible(
178 IOMemoryDescriptor * memory,
179 IOMemoryMap * superMap,
180 IOByteCount offset,
181 IOByteCount length );
182
183 bool initWithDescriptor(
184 IOMemoryDescriptor * memory,
185 task_t intoTask,
186 IOVirtualAddress toAddress,
187 IOOptionBits options,
188 IOByteCount offset,
189 IOByteCount length );
190
191 IOReturn redirect(
192 task_t intoTask, bool redirect );
193 };
194
195 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
196
197 // Some data structures and accessor macros used by the initWithOptions
198 // Function
199
200 enum ioPLBlockFlags {
201 kIOPLOnDevice = 0x00000001,
202 kIOPLExternUPL = 0x00000002,
203 };
204
205 struct typePersMDData
206 {
207 const IOGeneralMemoryDescriptor *fMD;
208 ipc_port_t fMemEntry;
209 };
210
211 struct ioPLBlock {
212 upl_t fIOPL;
213 vm_address_t fIOMDOffset; // The offset of this iopl in descriptor
214 vm_offset_t fPageInfo; // Pointer to page list or index into it
215 ppnum_t fMappedBase; // Page number of first page in this iopl
216 unsigned int fPageOffset; // Offset within first page of iopl
217 unsigned int fFlags; // Flags
218 };
219
220 struct ioGMDData {
221 IOMapper *fMapper;
222 unsigned int fPageCnt;
223 upl_page_info_t fPageList[];
224 ioPLBlock fBlocks[];
225 };
226
227 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
228 #define getIOPLList(d) ((ioPLBlock *) &(d->fPageList[d->fPageCnt]))
229 #define getNumIOPL(osd, d) \
230 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
231 #define getPageList(d) (&(d->fPageList[0]))
232 #define computeDataSize(p, u) \
233 (sizeof(ioGMDData) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
234
235
236 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
237
238 #define next_page(a) ( trunc_page_32(a) + PAGE_SIZE )
239
240
241 extern "C" {
242
243 kern_return_t device_data_action(
244 int device_handle,
245 ipc_port_t device_pager,
246 vm_prot_t protection,
247 vm_object_offset_t offset,
248 vm_size_t size)
249 {
250 struct ExpansionData {
251 void * devicePager;
252 unsigned int pagerContig:1;
253 unsigned int unused:31;
254 IOMemoryDescriptor * memory;
255 };
256 kern_return_t kr;
257 ExpansionData * ref = (ExpansionData *) device_handle;
258 IOMemoryDescriptor * memDesc;
259
260 LOCK;
261 memDesc = ref->memory;
262 if( memDesc)
263 {
264 memDesc->retain();
265 kr = memDesc->handleFault( device_pager, 0, 0,
266 offset, size, kIOMapDefaultCache /*?*/);
267 memDesc->release();
268 }
269 else
270 kr = KERN_ABORTED;
271 UNLOCK;
272
273 return( kr );
274 }
275
276 kern_return_t device_close(
277 int device_handle)
278 {
279 struct ExpansionData {
280 void * devicePager;
281 unsigned int pagerContig:1;
282 unsigned int unused:31;
283 IOMemoryDescriptor * memory;
284 };
285 ExpansionData * ref = (ExpansionData *) device_handle;
286
287 IODelete( ref, ExpansionData, 1 );
288
289 return( kIOReturnSuccess );
290 }
291 }; // end extern "C"
292
293 // Note this inline function uses C++ reference arguments to return values
294 // This means that pointers are not passed and NULLs don't have to be
295 // checked for as a NULL reference is illegal.
296 static inline void
297 getAddrLenForInd(user_addr_t &addr, IOPhysicalLength &len, // Output variables
298 UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
299 {
300 assert(kIOMemoryTypePhysical == type || kIOMemoryTypeUIO == type
301 || kIOMemoryTypeVirtual == type);
302 if (kIOMemoryTypeUIO == type) {
303 user_size_t us;
304 uio_getiov((uio_t) r.uio, ind, &addr, &us); len = us;
305 }
306 else {
307 IOVirtualRange cur = r.v[ind];
308 addr = cur.address;
309 len = cur.length;
310 }
311 }
312
313 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
314
315 /*
316 * withAddress:
317 *
318 * Create a new IOMemoryDescriptor. The buffer is a virtual address
319 * relative to the specified task. If no task is supplied, the kernel
320 * task is implied.
321 */
322 IOMemoryDescriptor *
323 IOMemoryDescriptor::withAddress(void * address,
324 IOByteCount length,
325 IODirection direction)
326 {
327 return IOMemoryDescriptor::
328 withAddress((vm_address_t) address, length, direction, kernel_task);
329 }
330
331 IOMemoryDescriptor *
332 IOMemoryDescriptor::withAddress(vm_address_t address,
333 IOByteCount length,
334 IODirection direction,
335 task_t task)
336 {
337 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
338 if (that)
339 {
340 if (that->initWithAddress(address, length, direction, task))
341 return that;
342
343 that->release();
344 }
345 return 0;
346 }
347
348 IOMemoryDescriptor *
349 IOMemoryDescriptor::withPhysicalAddress(
350 IOPhysicalAddress address,
351 IOByteCount length,
352 IODirection direction )
353 {
354 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
355 if (self
356 && !self->initWithPhysicalAddress(address, length, direction)) {
357 self->release();
358 return 0;
359 }
360
361 return self;
362 }
363
364 IOMemoryDescriptor *
365 IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
366 UInt32 withCount,
367 IODirection direction,
368 task_t task,
369 bool asReference)
370 {
371 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
372 if (that)
373 {
374 if (that->initWithRanges(ranges, withCount, direction, task, asReference))
375 return that;
376
377 that->release();
378 }
379 return 0;
380 }
381
382
383 /*
384 * withRanges:
385 *
386 * Create a new IOMemoryDescriptor. The buffer is made up of several
387 * virtual address ranges, from a given task.
388 *
389 * Passing the ranges as a reference will avoid an extra allocation.
390 */
391 IOMemoryDescriptor *
392 IOMemoryDescriptor::withOptions(void * buffers,
393 UInt32 count,
394 UInt32 offset,
395 task_t task,
396 IOOptionBits opts,
397 IOMapper * mapper)
398 {
399 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
400
401 if (self
402 && !self->initWithOptions(buffers, count, offset, task, opts, mapper))
403 {
404 self->release();
405 return 0;
406 }
407
408 return self;
409 }
410
411 // Can't leave abstract but this should never be used directly,
412 bool IOMemoryDescriptor::initWithOptions(void * buffers,
413 UInt32 count,
414 UInt32 offset,
415 task_t task,
416 IOOptionBits options,
417 IOMapper * mapper)
418 {
419 // @@@ gvdl: Should I panic?
420 panic("IOMD::initWithOptions called\n");
421 return 0;
422 }
423
424 IOMemoryDescriptor *
425 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
426 UInt32 withCount,
427 IODirection direction,
428 bool asReference)
429 {
430 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
431 if (that)
432 {
433 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference))
434 return that;
435
436 that->release();
437 }
438 return 0;
439 }
440
441 IOMemoryDescriptor *
442 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
443 IOByteCount offset,
444 IOByteCount length,
445 IODirection direction)
446 {
447 IOSubMemoryDescriptor *self = new IOSubMemoryDescriptor;
448
449 if (self && !self->initSubRange(of, offset, length, direction)) {
450 self->release();
451 self = 0;
452 }
453 return self;
454 }
455
456 IOMemoryDescriptor * IOMemoryDescriptor::
457 withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
458 {
459 IOGeneralMemoryDescriptor *origGenMD =
460 OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
461
462 if (origGenMD)
463 return IOGeneralMemoryDescriptor::
464 withPersistentMemoryDescriptor(origGenMD);
465 else
466 return 0;
467 }
468
469 IOMemoryDescriptor * IOGeneralMemoryDescriptor::
470 withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
471 {
472 ipc_port_t sharedMem = (ipc_port_t) originalMD->createNamedEntry();
473
474 if (!sharedMem)
475 return 0;
476
477 if (sharedMem == originalMD->_memEntry) {
478 originalMD->retain(); // Add a new reference to ourselves
479 ipc_port_release_send(sharedMem); // Remove extra send right
480 return originalMD;
481 }
482
483 IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor;
484 typePersMDData initData = { originalMD, sharedMem };
485
486 if (self
487 && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) {
488 self->release();
489 self = 0;
490 }
491 return self;
492 }
493
494 void *IOGeneralMemoryDescriptor::createNamedEntry()
495 {
496 kern_return_t error;
497 ipc_port_t sharedMem;
498
499 IOOptionBits type = _flags & kIOMemoryTypeMask;
500
501 user_addr_t range0Addr;
502 IOByteCount range0Len;
503 getAddrLenForInd(range0Addr, range0Len, type, _ranges, 0);
504 range0Addr = trunc_page_64(range0Addr);
505
506 vm_size_t size = ptoa_32(_pages);
507 vm_address_t kernelPage = (vm_address_t) range0Addr;
508
509 vm_map_t theMap = ((_task == kernel_task)
510 && (kIOMemoryBufferPageable & _flags))
511 ? IOPageableMapForAddress(kernelPage)
512 : get_task_map(_task);
513
514 memory_object_size_t actualSize = size;
515 vm_prot_t prot = VM_PROT_READ | VM_PROT_WRITE;
516 if (_memEntry)
517 prot |= MAP_MEM_NAMED_REUSE;
518
519 error = mach_make_memory_entry_64(theMap,
520 &actualSize, range0Addr, prot, &sharedMem, (ipc_port_t) _memEntry);
521
522 if (KERN_SUCCESS == error) {
523 if (actualSize == size) {
524 return sharedMem;
525 } else {
526 #if IOASSERT
527 IOLog("IOGMD::mach_make_memory_entry_64 (%08llx) size (%08lx:%08x)\n",
528 (UInt64)range0Addr, (UInt32)actualSize, size);
529 #endif
530 ipc_port_release_send( sharedMem );
531 }
532 }
533
534 return MACH_PORT_NULL;
535 }
536
537 /*
538 * initWithAddress:
539 *
540 * Initialize an IOMemoryDescriptor. The buffer is a virtual address
541 * relative to the specified task. If no task is supplied, the kernel
542 * task is implied.
543 *
544 * An IOMemoryDescriptor can be re-used by calling initWithAddress or
545 * initWithRanges again on an existing instance -- note this behavior
546 * is not commonly supported in other I/O Kit classes, although it is
547 * supported here.
548 */
549 bool
550 IOGeneralMemoryDescriptor::initWithAddress(void * address,
551 IOByteCount withLength,
552 IODirection withDirection)
553 {
554 _singleRange.v.address = (vm_address_t) address;
555 _singleRange.v.length = withLength;
556
557 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
558 }
559
560 bool
561 IOGeneralMemoryDescriptor::initWithAddress(vm_address_t address,
562 IOByteCount withLength,
563 IODirection withDirection,
564 task_t withTask)
565 {
566 _singleRange.v.address = address;
567 _singleRange.v.length = withLength;
568
569 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
570 }
571
572 bool
573 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
574 IOPhysicalAddress address,
575 IOByteCount withLength,
576 IODirection withDirection )
577 {
578 _singleRange.p.address = address;
579 _singleRange.p.length = withLength;
580
581 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
582 }
583
584 bool
585 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
586 IOPhysicalRange * ranges,
587 UInt32 count,
588 IODirection direction,
589 bool reference)
590 {
591 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
592
593 if (reference)
594 mdOpts |= kIOMemoryAsReference;
595
596 return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0);
597 }
598
599 bool
600 IOGeneralMemoryDescriptor::initWithRanges(
601 IOVirtualRange * ranges,
602 UInt32 count,
603 IODirection direction,
604 task_t task,
605 bool reference)
606 {
607 IOOptionBits mdOpts = direction;
608
609 if (reference)
610 mdOpts |= kIOMemoryAsReference;
611
612 if (task) {
613 mdOpts |= kIOMemoryTypeVirtual;
614
615 // Auto-prepare if this is a kernel memory descriptor as very few
616 // clients bother to prepare() kernel memory.
617 // But it was not enforced so what are you going to do?
618 if (task == kernel_task)
619 mdOpts |= kIOMemoryAutoPrepare;
620 }
621 else
622 mdOpts |= kIOMemoryTypePhysical;
623
624 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0);
625 }
626
627 /*
628 * initWithOptions:
629 *
630 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
631 * from a given task, several physical ranges, an UPL from the ubc
632 * system or a uio (may be 64bit) from the BSD subsystem.
633 *
634 * Passing the ranges as a reference will avoid an extra allocation.
635 *
636 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
637 * existing instance -- note this behavior is not commonly supported in other
638 * I/O Kit classes, although it is supported here.
639 */
640
641 bool
642 IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
643 UInt32 count,
644 UInt32 offset,
645 task_t task,
646 IOOptionBits options,
647 IOMapper * mapper)
648 {
649 IOOptionBits type = options & kIOMemoryTypeMask;
650
651 // Grab the original MD's configuation data to initialse the
652 // arguments to this function.
653 if (kIOMemoryTypePersistentMD == type) {
654
655 typePersMDData *initData = (typePersMDData *) buffers;
656 const IOGeneralMemoryDescriptor *orig = initData->fMD;
657 ioGMDData *dataP = getDataP(orig->_memoryEntries);
658
659 // Only accept persistent memory descriptors with valid dataP data.
660 assert(orig->_rangesCount == 1);
661 if ( !(orig->_flags & kIOMemoryPersistent) || !dataP)
662 return false;
663
664 _memEntry = initData->fMemEntry; // Grab the new named entry
665 options = orig->_flags | kIOMemoryAsReference;
666 _singleRange = orig->_singleRange; // Initialise our range
667 buffers = &_singleRange;
668 count = 1;
669
670 // Now grab the original task and whatever mapper was previously used
671 task = orig->_task;
672 mapper = dataP->fMapper;
673
674 // We are ready to go through the original initialisation now
675 }
676
677 switch (type) {
678 case kIOMemoryTypeUIO:
679 case kIOMemoryTypeVirtual:
680 assert(task);
681 if (!task)
682 return false;
683 else
684 break;
685
686 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
687 mapper = kIOMapperNone;
688
689 case kIOMemoryTypeUPL:
690 assert(!task);
691 break;
692 default:
693 return false; /* bad argument */
694 }
695
696 assert(buffers);
697 assert(count);
698
699 /*
700 * We can check the _initialized instance variable before having ever set
701 * it to an initial value because I/O Kit guarantees that all our instance
702 * variables are zeroed on an object's allocation.
703 */
704
705 if (_initialized) {
706 /*
707 * An existing memory descriptor is being retargeted to point to
708 * somewhere else. Clean up our present state.
709 */
710
711 while (_wireCount)
712 complete();
713 if (_kernPtrAligned)
714 unmapFromKernel();
715 if (_ranges.v && _rangesIsAllocated)
716 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
717 if (_memEntry)
718 { ipc_port_release_send((ipc_port_t) _memEntry); _memEntry = 0; }
719 }
720 else {
721 if (!super::init())
722 return false;
723 _initialized = true;
724 }
725
726 // Grab the appropriate mapper
727 if (mapper == kIOMapperNone)
728 mapper = 0; // No Mapper
729 else if (!mapper) {
730 IOMapper::checkForSystemMapper();
731 gIOSystemMapper = mapper = IOMapper::gSystem;
732 }
733
734 // Remove the dynamic internal use flags from the initial setting
735 options &= ~(kIOMemoryPreparedReadOnly);
736 _flags = options;
737 _task = task;
738
739 // DEPRECATED variable initialisation
740 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
741 _position = 0;
742 _kernPtrAligned = 0;
743 _cachedPhysicalAddress = 0;
744 _cachedVirtualAddress = 0;
745
746 if (kIOMemoryTypeUPL == type) {
747
748 ioGMDData *dataP;
749 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
750
751 if (!_memoryEntries) {
752 _memoryEntries = OSData::withCapacity(dataSize);
753 if (!_memoryEntries)
754 return false;
755 }
756 else if (!_memoryEntries->initWithCapacity(dataSize))
757 return false;
758
759 _memoryEntries->appendBytes(0, sizeof(ioGMDData));
760 dataP = getDataP(_memoryEntries);
761 dataP->fMapper = mapper;
762 dataP->fPageCnt = 0;
763
764 _wireCount++; // UPLs start out life wired
765
766 _length = count;
767 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
768
769 ioPLBlock iopl;
770 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST((upl_t) buffers);
771
772 iopl.fIOPL = (upl_t) buffers;
773 // Set the flag kIOPLOnDevice convieniently equal to 1
774 iopl.fFlags = pageList->device | kIOPLExternUPL;
775 iopl.fIOMDOffset = 0;
776 if (!pageList->device) {
777 // Pre-compute the offset into the UPL's page list
778 pageList = &pageList[atop_32(offset)];
779 offset &= PAGE_MASK;
780 if (mapper) {
781 iopl.fMappedBase = mapper->iovmAlloc(_pages);
782 mapper->iovmInsert(iopl.fMappedBase, 0, pageList, _pages);
783 }
784 else
785 iopl.fMappedBase = 0;
786 }
787 else
788 iopl.fMappedBase = 0;
789 iopl.fPageInfo = (vm_address_t) pageList;
790 iopl.fPageOffset = offset;
791
792 _memoryEntries->appendBytes(&iopl, sizeof(iopl));
793 }
794 else {
795 // kIOMemoryTypeVirtual | kIOMemoryTypeUIO | kIOMemoryTypePhysical
796
797 // Initialize the memory descriptor
798 if (options & kIOMemoryAsReference) {
799 _rangesIsAllocated = false;
800
801 // Hack assignment to get the buffer arg into _ranges.
802 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
803 // work, C++ sigh.
804 // This also initialises the uio & physical ranges.
805 _ranges.v = (IOVirtualRange *) buffers;
806 }
807 else {
808 assert(kIOMemoryTypeUIO != type);
809
810 _rangesIsAllocated = true;
811 _ranges.v = IONew(IOVirtualRange, count);
812 if (!_ranges.v)
813 return false;
814 bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
815 }
816
817 // Find starting address within the vector of ranges
818 Ranges vec = _ranges;
819 UInt32 length = 0;
820 UInt32 pages = 0;
821 for (unsigned ind = 0; ind < count; ind++) {
822 user_addr_t addr;
823 UInt32 len;
824
825 // addr & len are returned by this function
826 getAddrLenForInd(addr, len, type, vec, ind);
827 pages += (atop_64(addr + len + PAGE_MASK) - atop_64(addr));
828 len += length;
829 assert(len > length); // Check for 32 bit wrap around
830 length = len;
831 }
832 _length = length;
833 _pages = pages;
834 _rangesCount = count;
835
836 // Auto-prepare memory at creation time.
837 // Implied completion when descriptor is free-ed
838 if (kIOMemoryTypePhysical == type)
839 _wireCount++; // Physical MDs are, by definition, wired
840 else { /* kIOMemoryTypeVirtual | kIOMemoryTypeUIO */
841 ioGMDData *dataP;
842 unsigned dataSize = computeDataSize(_pages, /* upls */ count * 2);
843
844 if (!_memoryEntries) {
845 _memoryEntries = OSData::withCapacity(dataSize);
846 if (!_memoryEntries)
847 return false;
848 }
849 else if (!_memoryEntries->initWithCapacity(dataSize))
850 return false;
851
852 _memoryEntries->appendBytes(0, sizeof(ioGMDData));
853 dataP = getDataP(_memoryEntries);
854 dataP->fMapper = mapper;
855 dataP->fPageCnt = _pages;
856
857 if ( (kIOMemoryPersistent & _flags) && !_memEntry)
858 _memEntry = createNamedEntry();
859
860 if ((_flags & kIOMemoryAutoPrepare)
861 && prepare() != kIOReturnSuccess)
862 return false;
863 }
864 }
865
866 return true;
867 }
868
869 /*
870 * free
871 *
872 * Free resources.
873 */
874 void IOGeneralMemoryDescriptor::free()
875 {
876 LOCK;
877 if( reserved)
878 reserved->memory = 0;
879 UNLOCK;
880
881 while (_wireCount)
882 complete();
883 if (_memoryEntries)
884 _memoryEntries->release();
885
886 if (_kernPtrAligned)
887 unmapFromKernel();
888 if (_ranges.v && _rangesIsAllocated)
889 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
890
891 if (reserved && reserved->devicePager)
892 device_pager_deallocate( (memory_object_t) reserved->devicePager );
893
894 // memEntry holds a ref on the device pager which owns reserved
895 // (ExpansionData) so no reserved access after this point
896 if (_memEntry)
897 ipc_port_release_send( (ipc_port_t) _memEntry );
898
899 super::free();
900 }
901
902 /* DEPRECATED */ void IOGeneralMemoryDescriptor::unmapFromKernel()
903 /* DEPRECATED */ {
904 panic("IOGMD::unmapFromKernel deprecated");
905 /* DEPRECATED */ }
906 /* DEPRECATED */
907 /* DEPRECATED */ void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
908 /* DEPRECATED */ {
909 panic("IOGMD::mapIntoKernel deprecated");
910 /* DEPRECATED */ }
911
912 /*
913 * getDirection:
914 *
915 * Get the direction of the transfer.
916 */
917 IODirection IOMemoryDescriptor::getDirection() const
918 {
919 return _direction;
920 }
921
922 /*
923 * getLength:
924 *
925 * Get the length of the transfer (over all ranges).
926 */
927 IOByteCount IOMemoryDescriptor::getLength() const
928 {
929 return _length;
930 }
931
932 void IOMemoryDescriptor::setTag( IOOptionBits tag )
933 {
934 _tag = tag;
935 }
936
937 IOOptionBits IOMemoryDescriptor::getTag( void )
938 {
939 return( _tag);
940 }
941
942 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
943 IOPhysicalAddress IOMemoryDescriptor::getSourceSegment( IOByteCount offset,
944 IOByteCount * length )
945 {
946 IOPhysicalAddress physAddr = 0;
947
948 if( prepare() == kIOReturnSuccess) {
949 physAddr = getPhysicalSegment( offset, length );
950 complete();
951 }
952
953 return( physAddr );
954 }
955
956 IOByteCount IOMemoryDescriptor::readBytes
957 (IOByteCount offset, void *bytes, IOByteCount length)
958 {
959 addr64_t dstAddr = (addr64_t) (UInt32) bytes;
960 IOByteCount remaining;
961
962 // Assert that this entire I/O is withing the available range
963 assert(offset < _length);
964 assert(offset + length <= _length);
965 if (offset >= _length) {
966 IOLog("IOGMD(%p): rB = o%lx, l%lx\n", this, offset, length); // @@@ gvdl
967 return 0;
968 }
969
970 remaining = length = min(length, _length - offset);
971 while (remaining) { // (process another target segment?)
972 addr64_t srcAddr64;
973 IOByteCount srcLen;
974
975 srcAddr64 = getPhysicalSegment64(offset, &srcLen);
976 if (!srcAddr64)
977 break;
978
979 // Clip segment length to remaining
980 if (srcLen > remaining)
981 srcLen = remaining;
982
983 copypv(srcAddr64, dstAddr, srcLen,
984 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
985
986 dstAddr += srcLen;
987 offset += srcLen;
988 remaining -= srcLen;
989 }
990
991 assert(!remaining);
992
993 return length - remaining;
994 }
995
996 IOByteCount IOMemoryDescriptor::writeBytes
997 (IOByteCount offset, const void *bytes, IOByteCount length)
998 {
999 addr64_t srcAddr = (addr64_t) (UInt32) bytes;
1000 IOByteCount remaining;
1001
1002 // Assert that this entire I/O is withing the available range
1003 assert(offset < _length);
1004 assert(offset + length <= _length);
1005
1006 assert( !(kIOMemoryPreparedReadOnly & _flags) );
1007
1008 if ( (kIOMemoryPreparedReadOnly & _flags) || offset >= _length) {
1009 IOLog("IOGMD(%p): wB = o%lx, l%lx\n", this, offset, length); // @@@ gvdl
1010 return 0;
1011 }
1012
1013 remaining = length = min(length, _length - offset);
1014 while (remaining) { // (process another target segment?)
1015 addr64_t dstAddr64;
1016 IOByteCount dstLen;
1017
1018 dstAddr64 = getPhysicalSegment64(offset, &dstLen);
1019 if (!dstAddr64)
1020 break;
1021
1022 // Clip segment length to remaining
1023 if (dstLen > remaining)
1024 dstLen = remaining;
1025
1026 copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
1027 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
1028
1029 srcAddr += dstLen;
1030 offset += dstLen;
1031 remaining -= dstLen;
1032 }
1033
1034 assert(!remaining);
1035
1036 return length - remaining;
1037 }
1038
1039 // osfmk/device/iokit_rpc.c
1040 extern "C" unsigned int IODefaultCacheBits(addr64_t pa);
1041
1042 /* DEPRECATED */ void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
1043 /* DEPRECATED */ {
1044 panic("IOGMD::setPosition deprecated");
1045 /* DEPRECATED */ }
1046
1047 IOPhysicalAddress IOGeneralMemoryDescriptor::getPhysicalSegment
1048 (IOByteCount offset, IOByteCount *lengthOfSegment)
1049 {
1050 IOPhysicalAddress address = 0;
1051 IOPhysicalLength length = 0;
1052
1053 // assert(offset <= _length);
1054 if (offset < _length) // (within bounds?)
1055 {
1056 if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
1057 unsigned int ind;
1058
1059 // Physical address based memory descriptor
1060
1061 // Find offset within descriptor and make it relative
1062 // to the current _range.
1063 for (ind = 0 ; offset >= _ranges.p[ind].length; ind++ )
1064 offset -= _ranges.p[ind].length;
1065
1066 IOPhysicalRange cur = _ranges.p[ind];
1067 address = cur.address + offset;
1068 length = cur.length - offset;
1069
1070 // see how far we can coalesce ranges
1071 for (++ind; ind < _rangesCount; ind++) {
1072 cur = _ranges.p[ind];
1073
1074 if (address + length != cur.address)
1075 break;
1076
1077 length += cur.length;
1078 }
1079
1080 // @@@ gvdl: should be assert(address);
1081 // but can't as NVidia GeForce creates a bogus physical mem
1082 assert(address
1083 || /* nvidia */ (!_ranges.p[0].address && 1 == _rangesCount));
1084 assert(length);
1085 }
1086 else do {
1087 // We need wiring & we are wired.
1088 assert(_wireCount);
1089
1090 if (!_wireCount)
1091 {
1092 panic("IOGMD: not wired for getPhysicalSegment()");
1093 continue;
1094 }
1095
1096 assert(_memoryEntries);
1097
1098 ioGMDData * dataP = getDataP(_memoryEntries);
1099 const ioPLBlock *ioplList = getIOPLList(dataP);
1100 UInt ind, numIOPLs = getNumIOPL(_memoryEntries, dataP);
1101 upl_page_info_t *pageList = getPageList(dataP);
1102
1103 assert(numIOPLs > 0);
1104
1105 // Scan through iopl info blocks looking for block containing offset
1106 for (ind = 1; ind < numIOPLs; ind++) {
1107 if (offset < ioplList[ind].fIOMDOffset)
1108 break;
1109 }
1110
1111 // Go back to actual range as search goes past it
1112 ioPLBlock ioplInfo = ioplList[ind - 1];
1113
1114 if (ind < numIOPLs)
1115 length = ioplList[ind].fIOMDOffset;
1116 else
1117 length = _length;
1118 length -= offset; // Remainder within iopl
1119
1120 // Subtract offset till this iopl in total list
1121 offset -= ioplInfo.fIOMDOffset;
1122
1123 // This is a mapped IOPL so we just need to compute an offset
1124 // relative to the mapped base.
1125 if (ioplInfo.fMappedBase) {
1126 offset += (ioplInfo.fPageOffset & PAGE_MASK);
1127 address = ptoa_32(ioplInfo.fMappedBase) + offset;
1128 continue;
1129 }
1130
1131 // Currently the offset is rebased into the current iopl.
1132 // Now add the iopl 1st page offset.
1133 offset += ioplInfo.fPageOffset;
1134
1135 // For external UPLs the fPageInfo field points directly to
1136 // the upl's upl_page_info_t array.
1137 if (ioplInfo.fFlags & kIOPLExternUPL)
1138 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
1139 else
1140 pageList = &pageList[ioplInfo.fPageInfo];
1141
1142 // Check for direct device non-paged memory
1143 if ( ioplInfo.fFlags & kIOPLOnDevice ) {
1144 address = ptoa_32(pageList->phys_addr) + offset;
1145 continue;
1146 }
1147
1148 // Now we need compute the index into the pageList
1149 ind = atop_32(offset);
1150 offset &= PAGE_MASK;
1151
1152 IOPhysicalAddress pageAddr = pageList[ind].phys_addr;
1153 address = ptoa_32(pageAddr) + offset;
1154
1155 // Check for the remaining data in this upl being longer than the
1156 // remainder on the current page. This should be checked for
1157 // contiguous pages
1158 if (length > PAGE_SIZE - offset) {
1159 // See if the next page is contiguous. Stop looking when we hit
1160 // the end of this upl, which is indicated by the
1161 // contigLength >= length.
1162 IOByteCount contigLength = PAGE_SIZE - offset;
1163
1164 // Look for contiguous segment
1165 while (contigLength < length
1166 && ++pageAddr == pageList[++ind].phys_addr) {
1167 contigLength += PAGE_SIZE;
1168 }
1169 if (length > contigLength)
1170 length = contigLength;
1171 }
1172
1173 assert(address);
1174 assert(length);
1175
1176 } while (0);
1177
1178 if (!address)
1179 length = 0;
1180 }
1181
1182 if (lengthOfSegment)
1183 *lengthOfSegment = length;
1184
1185 return address;
1186 }
1187
1188 addr64_t IOMemoryDescriptor::getPhysicalSegment64
1189 (IOByteCount offset, IOByteCount *lengthOfSegment)
1190 {
1191 IOPhysicalAddress phys32;
1192 IOByteCount length;
1193 addr64_t phys64;
1194
1195 phys32 = getPhysicalSegment(offset, lengthOfSegment);
1196 if (!phys32)
1197 return 0;
1198
1199 if (gIOSystemMapper)
1200 {
1201 IOByteCount origLen;
1202
1203 phys64 = gIOSystemMapper->mapAddr(phys32);
1204 origLen = *lengthOfSegment;
1205 length = page_size - (phys64 & (page_size - 1));
1206 while ((length < origLen)
1207 && ((phys64 + length) == gIOSystemMapper->mapAddr(phys32 + length)))
1208 length += page_size;
1209 if (length > origLen)
1210 length = origLen;
1211
1212 *lengthOfSegment = length;
1213 }
1214 else
1215 phys64 = (addr64_t) phys32;
1216
1217 return phys64;
1218 }
1219
1220 IOPhysicalAddress IOGeneralMemoryDescriptor::
1221 getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1222 {
1223 IOPhysicalAddress address = 0;
1224 IOPhysicalLength length = 0;
1225 IOOptionBits type = _flags & kIOMemoryTypeMask;
1226
1227 assert(offset <= _length);
1228
1229 if ( type == kIOMemoryTypeUPL)
1230 return super::getSourceSegment( offset, lengthOfSegment );
1231 else if ( offset < _length ) // (within bounds?)
1232 {
1233 unsigned rangesIndex = 0;
1234 Ranges vec = _ranges;
1235 user_addr_t addr;
1236
1237 // Find starting address within the vector of ranges
1238 for (;;) {
1239 getAddrLenForInd(addr, length, type, vec, rangesIndex);
1240 if (offset < length)
1241 break;
1242 offset -= length; // (make offset relative)
1243 rangesIndex++;
1244 }
1245
1246 // Now that we have the starting range,
1247 // lets find the last contiguous range
1248 addr += offset;
1249 length -= offset;
1250
1251 for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) {
1252 user_addr_t newAddr;
1253 IOPhysicalLength newLen;
1254
1255 getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
1256 if (addr + length != newAddr)
1257 break;
1258 length += newLen;
1259 }
1260 if (addr)
1261 address = (IOPhysicalAddress) addr; // Truncate address to 32bit
1262 else
1263 length = 0;
1264 }
1265
1266 if ( lengthOfSegment ) *lengthOfSegment = length;
1267
1268 return address;
1269 }
1270
1271 /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
1272 /* DEPRECATED */ void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
1273 /* DEPRECATED */ IOByteCount * lengthOfSegment)
1274 /* DEPRECATED */ {
1275 if (_task == kernel_task)
1276 return (void *) getSourceSegment(offset, lengthOfSegment);
1277 else
1278 panic("IOGMD::getVirtualSegment deprecated");
1279
1280 return 0;
1281 /* DEPRECATED */ }
1282 /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
1283
1284
1285
1286 IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
1287 IOOptionBits * oldState )
1288 {
1289 IOReturn err = kIOReturnSuccess;
1290 vm_purgable_t control;
1291 int state;
1292
1293 do
1294 {
1295 if (!_memEntry)
1296 {
1297 err = kIOReturnNotReady;
1298 break;
1299 }
1300
1301 control = VM_PURGABLE_SET_STATE;
1302 switch (newState)
1303 {
1304 case kIOMemoryPurgeableKeepCurrent:
1305 control = VM_PURGABLE_GET_STATE;
1306 break;
1307
1308 case kIOMemoryPurgeableNonVolatile:
1309 state = VM_PURGABLE_NONVOLATILE;
1310 break;
1311 case kIOMemoryPurgeableVolatile:
1312 state = VM_PURGABLE_VOLATILE;
1313 break;
1314 case kIOMemoryPurgeableEmpty:
1315 state = VM_PURGABLE_EMPTY;
1316 break;
1317 default:
1318 err = kIOReturnBadArgument;
1319 break;
1320 }
1321
1322 if (kIOReturnSuccess != err)
1323 break;
1324
1325 err = mach_memory_entry_purgable_control((ipc_port_t) _memEntry, control, &state);
1326
1327 if (oldState)
1328 {
1329 if (kIOReturnSuccess == err)
1330 {
1331 switch (state)
1332 {
1333 case VM_PURGABLE_NONVOLATILE:
1334 state = kIOMemoryPurgeableNonVolatile;
1335 break;
1336 case VM_PURGABLE_VOLATILE:
1337 state = kIOMemoryPurgeableVolatile;
1338 break;
1339 case VM_PURGABLE_EMPTY:
1340 state = kIOMemoryPurgeableEmpty;
1341 break;
1342 default:
1343 state = kIOMemoryPurgeableNonVolatile;
1344 err = kIOReturnNotReady;
1345 break;
1346 }
1347 *oldState = state;
1348 }
1349 }
1350 }
1351 while (false);
1352
1353 return (err);
1354 }
1355
1356 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
1357 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
1358
1359 IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options,
1360 IOByteCount offset, IOByteCount length )
1361 {
1362 IOByteCount remaining;
1363 void (*func)(addr64_t pa, unsigned int count) = 0;
1364
1365 switch (options)
1366 {
1367 case kIOMemoryIncoherentIOFlush:
1368 func = &dcache_incoherent_io_flush64;
1369 break;
1370 case kIOMemoryIncoherentIOStore:
1371 func = &dcache_incoherent_io_store64;
1372 break;
1373 }
1374
1375 if (!func)
1376 return (kIOReturnUnsupported);
1377
1378 remaining = length = min(length, getLength() - offset);
1379 while (remaining)
1380 // (process another target segment?)
1381 {
1382 addr64_t dstAddr64;
1383 IOByteCount dstLen;
1384
1385 dstAddr64 = getPhysicalSegment64(offset, &dstLen);
1386 if (!dstAddr64)
1387 break;
1388
1389 // Clip segment length to remaining
1390 if (dstLen > remaining)
1391 dstLen = remaining;
1392
1393 (*func)(dstAddr64, dstLen);
1394
1395 offset += dstLen;
1396 remaining -= dstLen;
1397 }
1398
1399 return (remaining ? kIOReturnUnderrun : kIOReturnSuccess);
1400 }
1401
1402 #ifdef __ppc__
1403 extern vm_offset_t static_memory_end;
1404 #define io_kernel_static_end static_memory_end
1405 #else
1406 extern vm_offset_t first_avail;
1407 #define io_kernel_static_end first_avail
1408 #endif
1409
1410 static kern_return_t
1411 io_get_kernel_static_upl(
1412 vm_map_t /* map */,
1413 vm_address_t offset,
1414 vm_size_t *upl_size,
1415 upl_t *upl,
1416 upl_page_info_array_t page_list,
1417 unsigned int *count)
1418 {
1419 unsigned int pageCount, page;
1420 ppnum_t phys;
1421
1422 pageCount = atop_32(*upl_size);
1423 if (pageCount > *count)
1424 pageCount = *count;
1425
1426 *upl = NULL;
1427
1428 for (page = 0; page < pageCount; page++)
1429 {
1430 phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
1431 if (!phys)
1432 break;
1433 page_list[page].phys_addr = phys;
1434 page_list[page].pageout = 0;
1435 page_list[page].absent = 0;
1436 page_list[page].dirty = 0;
1437 page_list[page].precious = 0;
1438 page_list[page].device = 0;
1439 }
1440
1441 return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError);
1442 }
1443
1444 IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
1445 {
1446 IOOptionBits type = _flags & kIOMemoryTypeMask;
1447 IOReturn error = kIOReturnNoMemory;
1448 ioGMDData *dataP;
1449 ppnum_t mapBase = 0;
1450 IOMapper *mapper;
1451 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
1452
1453 assert(!_wireCount);
1454 assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeUIO == type);
1455
1456 if (_pages >= gIOMaximumMappedIOPageCount)
1457 return kIOReturnNoResources;
1458
1459 dataP = getDataP(_memoryEntries);
1460 mapper = dataP->fMapper;
1461 if (mapper && _pages)
1462 mapBase = mapper->iovmAlloc(_pages);
1463
1464 // Note that appendBytes(NULL) zeros the data up to the
1465 // desired length.
1466 _memoryEntries->appendBytes(0, dataP->fPageCnt * sizeof(upl_page_info_t));
1467 dataP = 0; // May no longer be valid so lets not get tempted.
1468
1469 if (forDirection == kIODirectionNone)
1470 forDirection = _direction;
1471
1472 int uplFlags; // This Mem Desc's default flags for upl creation
1473 switch (forDirection)
1474 {
1475 case kIODirectionOut:
1476 // Pages do not need to be marked as dirty on commit
1477 uplFlags = UPL_COPYOUT_FROM;
1478 _flags |= kIOMemoryPreparedReadOnly;
1479 break;
1480
1481 case kIODirectionIn:
1482 default:
1483 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
1484 break;
1485 }
1486 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
1487
1488 // Find the appropriate vm_map for the given task
1489 vm_map_t curMap;
1490 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
1491 curMap = 0;
1492 else
1493 { curMap = get_task_map(_task); }
1494
1495 // Iterate over the vector of virtual ranges
1496 Ranges vec = _ranges;
1497 unsigned int pageIndex = 0;
1498 IOByteCount mdOffset = 0;
1499 for (UInt range = 0; range < _rangesCount; range++) {
1500 ioPLBlock iopl;
1501 user_addr_t startPage;
1502 IOByteCount numBytes;
1503
1504 // Get the startPage address and length of vec[range]
1505 getAddrLenForInd(startPage, numBytes, type, vec, range);
1506 iopl.fPageOffset = (short) startPage & PAGE_MASK;
1507 numBytes += iopl.fPageOffset;
1508 startPage = trunc_page_64(startPage);
1509
1510 if (mapper)
1511 iopl.fMappedBase = mapBase + pageIndex;
1512 else
1513 iopl.fMappedBase = 0;
1514
1515 // Iterate over the current range, creating UPLs
1516 while (numBytes) {
1517 dataP = getDataP(_memoryEntries);
1518 vm_address_t kernelStart = (vm_address_t) startPage;
1519 vm_map_t theMap;
1520 if (curMap)
1521 theMap = curMap;
1522 else if (!sharedMem) {
1523 assert(_task == kernel_task);
1524 theMap = IOPageableMapForAddress(kernelStart);
1525 }
1526 else
1527 theMap = NULL;
1528
1529 upl_page_info_array_t pageInfo = getPageList(dataP);
1530 int ioplFlags = uplFlags;
1531 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
1532
1533 vm_size_t ioplSize = round_page_32(numBytes);
1534 unsigned int numPageInfo = atop_32(ioplSize);
1535
1536 if (theMap == kernel_map && kernelStart < io_kernel_static_end) {
1537 error = io_get_kernel_static_upl(theMap,
1538 kernelStart,
1539 &ioplSize,
1540 &iopl.fIOPL,
1541 baseInfo,
1542 &numPageInfo);
1543 }
1544 else if (sharedMem) {
1545 error = memory_object_iopl_request(sharedMem,
1546 ptoa_32(pageIndex),
1547 &ioplSize,
1548 &iopl.fIOPL,
1549 baseInfo,
1550 &numPageInfo,
1551 &ioplFlags);
1552 }
1553 else {
1554 assert(theMap);
1555 error = vm_map_create_upl(theMap,
1556 startPage,
1557 &ioplSize,
1558 &iopl.fIOPL,
1559 baseInfo,
1560 &numPageInfo,
1561 &ioplFlags);
1562 }
1563
1564 assert(ioplSize);
1565 if (error != KERN_SUCCESS)
1566 goto abortExit;
1567
1568 error = kIOReturnNoMemory;
1569
1570 if (baseInfo->device) {
1571 numPageInfo = 1;
1572 iopl.fFlags = kIOPLOnDevice;
1573 // Don't translate device memory at all
1574 if (mapper && mapBase) {
1575 mapper->iovmFree(mapBase, _pages);
1576 mapBase = 0;
1577 iopl.fMappedBase = 0;
1578 }
1579 }
1580 else {
1581 iopl.fFlags = 0;
1582 if (mapper)
1583 mapper->iovmInsert(mapBase, pageIndex,
1584 baseInfo, numPageInfo);
1585 }
1586
1587 iopl.fIOMDOffset = mdOffset;
1588 iopl.fPageInfo = pageIndex;
1589
1590 if ((_flags & kIOMemoryAutoPrepare) && iopl.fIOPL)
1591 {
1592 upl_commit(iopl.fIOPL, 0, 0);
1593 upl_deallocate(iopl.fIOPL);
1594 iopl.fIOPL = 0;
1595 }
1596
1597 if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
1598 // Clean up partial created and unsaved iopl
1599 if (iopl.fIOPL) {
1600 upl_abort(iopl.fIOPL, 0);
1601 upl_deallocate(iopl.fIOPL);
1602 }
1603 goto abortExit;
1604 }
1605
1606 // Check for a multiple iopl's in one virtual range
1607 pageIndex += numPageInfo;
1608 mdOffset -= iopl.fPageOffset;
1609 if (ioplSize < numBytes) {
1610 numBytes -= ioplSize;
1611 startPage += ioplSize;
1612 mdOffset += ioplSize;
1613 iopl.fPageOffset = 0;
1614 if (mapper)
1615 iopl.fMappedBase = mapBase + pageIndex;
1616 }
1617 else {
1618 mdOffset += numBytes;
1619 break;
1620 }
1621 }
1622 }
1623
1624 return kIOReturnSuccess;
1625
1626 abortExit:
1627 {
1628 dataP = getDataP(_memoryEntries);
1629 UInt done = getNumIOPL(_memoryEntries, dataP);
1630 ioPLBlock *ioplList = getIOPLList(dataP);
1631
1632 for (UInt range = 0; range < done; range++)
1633 {
1634 if (ioplList[range].fIOPL) {
1635 upl_abort(ioplList[range].fIOPL, 0);
1636 upl_deallocate(ioplList[range].fIOPL);
1637 }
1638 }
1639 (void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); // == setLength()
1640
1641 if (mapper && mapBase)
1642 mapper->iovmFree(mapBase, _pages);
1643 }
1644
1645 return error;
1646 }
1647
1648 /*
1649 * prepare
1650 *
1651 * Prepare the memory for an I/O transfer. This involves paging in
1652 * the memory, if necessary, and wiring it down for the duration of
1653 * the transfer. The complete() method completes the processing of
1654 * the memory after the I/O transfer finishes. This method needn't
1655 * called for non-pageable memory.
1656 */
1657 IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
1658 {
1659 IOReturn error = kIOReturnSuccess;
1660 IOOptionBits type = _flags & kIOMemoryTypeMask;
1661
1662 if (!_wireCount
1663 && (kIOMemoryTypeVirtual == type || kIOMemoryTypeUIO == type) ) {
1664 error = wireVirtual(forDirection);
1665 if (error)
1666 return error;
1667 }
1668
1669 _wireCount++;
1670
1671 return kIOReturnSuccess;
1672 }
1673
1674 /*
1675 * complete
1676 *
1677 * Complete processing of the memory after an I/O transfer finishes.
1678 * This method should not be called unless a prepare was previously
1679 * issued; the prepare() and complete() must occur in pairs, before
1680 * before and after an I/O transfer involving pageable memory.
1681 */
1682
1683 IOReturn IOGeneralMemoryDescriptor::complete(IODirection /* forDirection */)
1684 {
1685 assert(_wireCount);
1686
1687 if (!_wireCount)
1688 return kIOReturnSuccess;
1689
1690 _wireCount--;
1691 if (!_wireCount) {
1692 IOOptionBits type = _flags & kIOMemoryTypeMask;
1693
1694 if (kIOMemoryTypePhysical == type) {
1695 /* kIOMemoryTypePhysical */
1696 // DO NOTHING
1697 }
1698 else {
1699 ioGMDData * dataP = getDataP(_memoryEntries);
1700 ioPLBlock *ioplList = getIOPLList(dataP);
1701 UInt count = getNumIOPL(_memoryEntries, dataP);
1702
1703 if (dataP->fMapper && _pages && ioplList[0].fMappedBase)
1704 dataP->fMapper->iovmFree(ioplList[0].fMappedBase, _pages);
1705
1706 // Only complete iopls that we created which are for TypeVirtual
1707 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeUIO == type) {
1708 for (UInt ind = 0; ind < count; ind++)
1709 if (ioplList[ind].fIOPL) {
1710 upl_commit(ioplList[ind].fIOPL, 0, 0);
1711 upl_deallocate(ioplList[ind].fIOPL);
1712 }
1713 }
1714
1715 (void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); // == setLength()
1716 }
1717 }
1718 return kIOReturnSuccess;
1719 }
1720
1721 IOReturn IOGeneralMemoryDescriptor::doMap(
1722 vm_map_t addressMap,
1723 IOVirtualAddress * atAddress,
1724 IOOptionBits options,
1725 IOByteCount sourceOffset,
1726 IOByteCount length )
1727 {
1728 kern_return_t kr;
1729 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
1730
1731 IOOptionBits type = _flags & kIOMemoryTypeMask;
1732 Ranges vec = _ranges;
1733
1734 user_addr_t range0Addr = 0;
1735 IOByteCount range0Len = 0;
1736
1737 if (vec.v)
1738 getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
1739
1740 // mapping source == dest? (could be much better)
1741 if( _task
1742 && (addressMap == get_task_map(_task)) && (options & kIOMapAnywhere)
1743 && (1 == _rangesCount) && (0 == sourceOffset)
1744 && range0Addr && (length <= range0Len) ) {
1745 if (sizeof(user_addr_t) > 4 && ((UInt64) range0Addr) >> 32)
1746 return kIOReturnOverrun; // Doesn't fit in 32bit return field
1747 else {
1748 *atAddress = range0Addr;
1749 return( kIOReturnSuccess );
1750 }
1751 }
1752
1753 if( 0 == sharedMem) {
1754
1755 vm_size_t size = ptoa_32(_pages);
1756
1757 if( _task) {
1758 #ifndef i386
1759 memory_object_size_t actualSize = size;
1760 kr = mach_make_memory_entry_64(get_task_map(_task),
1761 &actualSize, range0Addr,
1762 VM_PROT_READ | VM_PROT_WRITE, &sharedMem,
1763 NULL );
1764
1765 if( (KERN_SUCCESS == kr) && (actualSize != round_page_32(size))) {
1766 #if IOASSERT
1767 IOLog("mach_make_memory_entry_64 (%08llx) size (%08lx:%08x)\n",
1768 range0Addr, (UInt32) actualSize, size);
1769 #endif
1770 kr = kIOReturnVMError;
1771 ipc_port_release_send( sharedMem );
1772 }
1773
1774 if( KERN_SUCCESS != kr)
1775 #endif /* !i386 */
1776 sharedMem = MACH_PORT_NULL;
1777
1778 } else do {
1779
1780 memory_object_t pager;
1781 unsigned int flags = 0;
1782 addr64_t pa;
1783 IOPhysicalLength segLen;
1784
1785 pa = getPhysicalSegment64( sourceOffset, &segLen );
1786
1787 if( !reserved) {
1788 reserved = IONew( ExpansionData, 1 );
1789 if( !reserved)
1790 continue;
1791 }
1792 reserved->pagerContig = (1 == _rangesCount);
1793 reserved->memory = this;
1794
1795 /*What cache mode do we need*/
1796 switch(options & kIOMapCacheMask ) {
1797
1798 case kIOMapDefaultCache:
1799 default:
1800 flags = IODefaultCacheBits(pa);
1801 break;
1802
1803 case kIOMapInhibitCache:
1804 flags = DEVICE_PAGER_CACHE_INHIB |
1805 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
1806 break;
1807
1808 case kIOMapWriteThruCache:
1809 flags = DEVICE_PAGER_WRITE_THROUGH |
1810 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
1811 break;
1812
1813 case kIOMapCopybackCache:
1814 flags = DEVICE_PAGER_COHERENT;
1815 break;
1816
1817 case kIOMapWriteCombineCache:
1818 flags = DEVICE_PAGER_CACHE_INHIB |
1819 DEVICE_PAGER_COHERENT;
1820 break;
1821 }
1822
1823 flags |= reserved->pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0;
1824
1825 pager = device_pager_setup( (memory_object_t) 0, (int) reserved,
1826 size, flags);
1827 assert( pager );
1828
1829 if( pager) {
1830 kr = mach_memory_object_memory_entry_64( (host_t) 1, false /*internal*/,
1831 size, VM_PROT_READ | VM_PROT_WRITE, pager, &sharedMem );
1832
1833 assert( KERN_SUCCESS == kr );
1834 if( KERN_SUCCESS != kr) {
1835 device_pager_deallocate( pager );
1836 pager = MACH_PORT_NULL;
1837 sharedMem = MACH_PORT_NULL;
1838 }
1839 }
1840 if( pager && sharedMem)
1841 reserved->devicePager = pager;
1842 else {
1843 IODelete( reserved, ExpansionData, 1 );
1844 reserved = 0;
1845 }
1846
1847 } while( false );
1848
1849 _memEntry = (void *) sharedMem;
1850 }
1851
1852
1853 #ifndef i386
1854 if( 0 == sharedMem)
1855 kr = kIOReturnVMError;
1856 else
1857 #endif
1858 kr = super::doMap( addressMap, atAddress,
1859 options, sourceOffset, length );
1860
1861 return( kr );
1862 }
1863
1864 IOReturn IOGeneralMemoryDescriptor::doUnmap(
1865 vm_map_t addressMap,
1866 IOVirtualAddress logical,
1867 IOByteCount length )
1868 {
1869 // could be much better
1870 if( _task && (addressMap == get_task_map(_task)) && (1 == _rangesCount)) {
1871
1872 IOOptionBits type = _flags & kIOMemoryTypeMask;
1873 user_addr_t range0Addr;
1874 IOByteCount range0Len;
1875
1876 getAddrLenForInd(range0Addr, range0Len, type, _ranges, 0);
1877 if (logical == range0Addr && length <= range0Len)
1878 return( kIOReturnSuccess );
1879 }
1880
1881 return( super::doUnmap( addressMap, logical, length ));
1882 }
1883
1884 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1885
1886 OSDefineMetaClassAndAbstractStructors( IOMemoryMap, OSObject )
1887
1888 /* inline function implementation */
1889 IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
1890 { return( getPhysicalSegment( 0, 0 )); }
1891
1892
1893 #undef super
1894 #define super IOMemoryMap
1895
1896 OSDefineMetaClassAndStructors(_IOMemoryMap, IOMemoryMap)
1897
1898 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1899
1900 bool _IOMemoryMap::initCompatible(
1901 IOMemoryDescriptor * _memory,
1902 IOMemoryMap * _superMap,
1903 IOByteCount _offset,
1904 IOByteCount _length )
1905 {
1906
1907 if( !super::init())
1908 return( false);
1909
1910 if( (_offset + _length) > _superMap->getLength())
1911 return( false);
1912
1913 _memory->retain();
1914 memory = _memory;
1915 _superMap->retain();
1916 superMap = _superMap;
1917
1918 offset = _offset;
1919 if( _length)
1920 length = _length;
1921 else
1922 length = _memory->getLength();
1923
1924 options = superMap->getMapOptions();
1925 logical = superMap->getVirtualAddress() + offset;
1926
1927 return( true );
1928 }
1929
1930 bool _IOMemoryMap::initWithDescriptor(
1931 IOMemoryDescriptor * _memory,
1932 task_t intoTask,
1933 IOVirtualAddress toAddress,
1934 IOOptionBits _options,
1935 IOByteCount _offset,
1936 IOByteCount _length )
1937 {
1938 bool ok;
1939 bool redir = ((kIOMapUnique|kIOMapReference) == ((kIOMapUnique|kIOMapReference) & _options));
1940
1941 if ((!_memory) || (!intoTask))
1942 return( false);
1943
1944 if( (_offset + _length) > _memory->getLength())
1945 return( false);
1946
1947 if (!redir)
1948 {
1949 if (!super::init())
1950 return(false);
1951 addressMap = get_task_map(intoTask);
1952 if( !addressMap)
1953 return( false);
1954 vm_map_reference(addressMap);
1955 addressTask = intoTask;
1956 logical = toAddress;
1957 options = _options;
1958 }
1959
1960 _memory->retain();
1961
1962 offset = _offset;
1963 if( _length)
1964 length = _length;
1965 else
1966 length = _memory->getLength();
1967
1968 if( options & kIOMapStatic)
1969 ok = true;
1970 else
1971 ok = (kIOReturnSuccess == _memory->doMap( addressMap, &toAddress,
1972 _options, offset, length ));
1973 if (ok || redir)
1974 {
1975 if (memory)
1976 memory->release();
1977 memory = _memory;
1978 logical = toAddress;
1979 }
1980 else
1981 {
1982 _memory->release();
1983 if (!redir)
1984 {
1985 logical = 0;
1986 memory = 0;
1987 vm_map_deallocate(addressMap);
1988 addressMap = 0;
1989 }
1990 }
1991
1992 return( ok );
1993 }
1994
1995 /* LP64todo - these need to expand */
1996 struct IOMemoryDescriptorMapAllocRef
1997 {
1998 ipc_port_t sharedMem;
1999 vm_size_t size;
2000 vm_offset_t mapped;
2001 IOByteCount sourceOffset;
2002 IOOptionBits options;
2003 };
2004
2005 static kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
2006 {
2007 IOMemoryDescriptorMapAllocRef * ref = (IOMemoryDescriptorMapAllocRef *)_ref;
2008 IOReturn err;
2009
2010 do {
2011 if( ref->sharedMem) {
2012 vm_prot_t prot = VM_PROT_READ
2013 | ((ref->options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE);
2014
2015 // set memory entry cache
2016 vm_prot_t memEntryCacheMode = prot | MAP_MEM_ONLY;
2017 switch (ref->options & kIOMapCacheMask)
2018 {
2019 case kIOMapInhibitCache:
2020 SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
2021 break;
2022
2023 case kIOMapWriteThruCache:
2024 SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
2025 break;
2026
2027 case kIOMapWriteCombineCache:
2028 SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
2029 break;
2030
2031 case kIOMapCopybackCache:
2032 SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
2033 break;
2034
2035 case kIOMapDefaultCache:
2036 default:
2037 SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
2038 break;
2039 }
2040
2041 vm_size_t unused = 0;
2042
2043 err = mach_make_memory_entry( NULL /*unused*/, &unused, 0 /*unused*/,
2044 memEntryCacheMode, NULL, ref->sharedMem );
2045 if (KERN_SUCCESS != err)
2046 IOLog("MAP_MEM_ONLY failed %d\n", err);
2047
2048 err = vm_map( map,
2049 &ref->mapped,
2050 ref->size, 0 /* mask */,
2051 (( ref->options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
2052 | VM_MAKE_TAG(VM_MEMORY_IOKIT),
2053 ref->sharedMem, ref->sourceOffset,
2054 false, // copy
2055 prot, // cur
2056 prot, // max
2057 VM_INHERIT_NONE);
2058
2059 if( KERN_SUCCESS != err) {
2060 ref->mapped = 0;
2061 continue;
2062 }
2063
2064 } else {
2065
2066 err = vm_allocate( map, &ref->mapped, ref->size,
2067 ((ref->options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
2068 | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
2069
2070 if( KERN_SUCCESS != err) {
2071 ref->mapped = 0;
2072 continue;
2073 }
2074
2075 // we have to make sure that these guys don't get copied if we fork.
2076 err = vm_inherit( map, ref->mapped, ref->size, VM_INHERIT_NONE);
2077 assert( KERN_SUCCESS == err );
2078 }
2079
2080 } while( false );
2081
2082 return( err );
2083 }
2084
2085
2086 IOReturn IOMemoryDescriptor::doMap(
2087 vm_map_t addressMap,
2088 IOVirtualAddress * atAddress,
2089 IOOptionBits options,
2090 IOByteCount sourceOffset,
2091 IOByteCount length )
2092 {
2093 IOReturn err = kIOReturnSuccess;
2094 memory_object_t pager;
2095 vm_address_t logical;
2096 IOByteCount pageOffset;
2097 IOPhysicalAddress sourceAddr;
2098 IOMemoryDescriptorMapAllocRef ref;
2099
2100 ref.sharedMem = (ipc_port_t) _memEntry;
2101 ref.sourceOffset = sourceOffset;
2102 ref.options = options;
2103
2104 do {
2105
2106 if( 0 == length)
2107 length = getLength();
2108
2109 sourceAddr = getSourceSegment( sourceOffset, NULL );
2110 pageOffset = sourceAddr - trunc_page_32( sourceAddr );
2111
2112 ref.size = round_page_32( length + pageOffset );
2113
2114 if ((kIOMapReference|kIOMapUnique) == ((kIOMapReference|kIOMapUnique) & options))
2115 {
2116 upl_t redirUPL2;
2117 vm_size_t size;
2118 int flags;
2119
2120 _IOMemoryMap * mapping = (_IOMemoryMap *) *atAddress;
2121 ref.mapped = mapping->getVirtualAddress();
2122
2123 if (!_memEntry)
2124 {
2125 err = kIOReturnNotReadable;
2126 continue;
2127 }
2128
2129 size = length;
2130 flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
2131 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
2132
2133 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) _memEntry, 0, &size, &redirUPL2,
2134 NULL, NULL,
2135 &flags))
2136 redirUPL2 = NULL;
2137
2138 err = upl_transpose(redirUPL2, mapping->redirUPL);
2139 if (kIOReturnSuccess != err)
2140 {
2141 IOLog("upl_transpose(%x)\n", err);
2142 err = kIOReturnSuccess;
2143 }
2144
2145 if (redirUPL2)
2146 {
2147 upl_commit(redirUPL2, NULL, 0);
2148 upl_deallocate(redirUPL2);
2149 redirUPL2 = 0;
2150 }
2151 {
2152 // swap the memEntries since they now refer to different vm_objects
2153 void * me = _memEntry;
2154 _memEntry = mapping->memory->_memEntry;
2155 mapping->memory->_memEntry = me;
2156 }
2157 }
2158 else
2159 {
2160
2161 logical = *atAddress;
2162 if( options & kIOMapAnywhere)
2163 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
2164 ref.mapped = 0;
2165 else {
2166 ref.mapped = trunc_page_32( logical );
2167 if( (logical - ref.mapped) != pageOffset) {
2168 err = kIOReturnVMError;
2169 continue;
2170 }
2171 }
2172
2173 if( ref.sharedMem && (addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags))
2174 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
2175 else
2176 err = IOMemoryDescriptorMapAlloc( addressMap, &ref );
2177 }
2178
2179 if( err != KERN_SUCCESS)
2180 continue;
2181
2182 if( reserved)
2183 pager = (memory_object_t) reserved->devicePager;
2184 else
2185 pager = MACH_PORT_NULL;
2186
2187 if( !ref.sharedMem || pager )
2188 err = handleFault( pager, addressMap, ref.mapped, sourceOffset, length, options );
2189
2190 } while( false );
2191
2192 if( err != KERN_SUCCESS) {
2193 if( ref.mapped)
2194 doUnmap( addressMap, ref.mapped, ref.size );
2195 *atAddress = NULL;
2196 } else
2197 *atAddress = ref.mapped + pageOffset;
2198
2199 return( err );
2200 }
2201
2202 enum {
2203 kIOMemoryRedirected = 0x00010000
2204 };
2205
2206 IOReturn IOMemoryDescriptor::handleFault(
2207 void * _pager,
2208 vm_map_t addressMap,
2209 IOVirtualAddress address,
2210 IOByteCount sourceOffset,
2211 IOByteCount length,
2212 IOOptionBits options )
2213 {
2214 IOReturn err = kIOReturnSuccess;
2215 memory_object_t pager = (memory_object_t) _pager;
2216 vm_size_t size;
2217 vm_size_t bytes;
2218 vm_size_t page;
2219 IOByteCount pageOffset;
2220 IOByteCount pagerOffset;
2221 IOPhysicalLength segLen;
2222 addr64_t physAddr;
2223
2224 if( !addressMap) {
2225
2226 if( kIOMemoryRedirected & _flags) {
2227 #ifdef DEBUG
2228 IOLog("sleep mem redirect %p, %lx\n", this, sourceOffset);
2229 #endif
2230 do {
2231 SLEEP;
2232 } while( kIOMemoryRedirected & _flags );
2233 }
2234
2235 return( kIOReturnSuccess );
2236 }
2237
2238 physAddr = getPhysicalSegment64( sourceOffset, &segLen );
2239 assert( physAddr );
2240 pageOffset = physAddr - trunc_page_64( physAddr );
2241 pagerOffset = sourceOffset;
2242
2243 size = length + pageOffset;
2244 physAddr -= pageOffset;
2245
2246 segLen += pageOffset;
2247 bytes = size;
2248 do {
2249 // in the middle of the loop only map whole pages
2250 if( segLen >= bytes)
2251 segLen = bytes;
2252 else if( segLen != trunc_page_32( segLen))
2253 err = kIOReturnVMError;
2254 if( physAddr != trunc_page_64( physAddr))
2255 err = kIOReturnBadArgument;
2256 if (kIOReturnSuccess != err)
2257 break;
2258
2259 #ifdef DEBUG
2260 if( kIOLogMapping & gIOKitDebug)
2261 IOLog("_IOMemoryMap::map(%p) %08lx->%08qx:%08lx\n",
2262 addressMap, address + pageOffset, physAddr + pageOffset,
2263 segLen - pageOffset);
2264 #endif
2265
2266
2267
2268
2269
2270 #ifdef i386
2271 /* i386 doesn't support faulting on device memory yet */
2272 if( addressMap && (kIOReturnSuccess == err))
2273 err = IOMapPages( addressMap, address, (IOPhysicalAddress) physAddr, segLen, options );
2274 assert( KERN_SUCCESS == err );
2275 if( err)
2276 break;
2277 #endif
2278
2279 if( pager) {
2280 if( reserved && reserved->pagerContig) {
2281 IOPhysicalLength allLen;
2282 addr64_t allPhys;
2283
2284 allPhys = getPhysicalSegment64( 0, &allLen );
2285 assert( allPhys );
2286 err = device_pager_populate_object( pager, 0, allPhys >> PAGE_SHIFT, round_page_32(allLen) );
2287
2288 } else {
2289
2290 for( page = 0;
2291 (page < segLen) && (KERN_SUCCESS == err);
2292 page += page_size) {
2293 err = device_pager_populate_object(pager, pagerOffset,
2294 (ppnum_t)((physAddr + page) >> PAGE_SHIFT), page_size);
2295 pagerOffset += page_size;
2296 }
2297 }
2298 assert( KERN_SUCCESS == err );
2299 if( err)
2300 break;
2301 }
2302 #ifndef i386
2303 /* *** ALERT *** */
2304 /* *** Temporary Workaround *** */
2305
2306 /* This call to vm_fault causes an early pmap level resolution */
2307 /* of the mappings created above. Need for this is in absolute */
2308 /* violation of the basic tenet that the pmap layer is a cache. */
2309 /* Further, it implies a serious I/O architectural violation on */
2310 /* the part of some user of the mapping. As of this writing, */
2311 /* the call to vm_fault is needed because the NVIDIA driver */
2312 /* makes a call to pmap_extract. The NVIDIA driver needs to be */
2313 /* fixed as soon as possible. The NVIDIA driver should not */
2314 /* need to query for this info as it should know from the doMap */
2315 /* call where the physical memory is mapped. When a query is */
2316 /* necessary to find a physical mapping, it should be done */
2317 /* through an iokit call which includes the mapped memory */
2318 /* handle. This is required for machine architecture independence.*/
2319
2320 if(!(kIOMemoryRedirected & _flags)) {
2321 vm_fault(addressMap,
2322 (vm_map_offset_t)address,
2323 VM_PROT_READ|VM_PROT_WRITE,
2324 FALSE, THREAD_UNINT, NULL,
2325 (vm_map_offset_t)0);
2326 }
2327
2328 /* *** Temporary Workaround *** */
2329 /* *** ALERT *** */
2330 #endif
2331 sourceOffset += segLen - pageOffset;
2332 address += segLen;
2333 bytes -= segLen;
2334 pageOffset = 0;
2335
2336 } while( bytes
2337 && (physAddr = getPhysicalSegment64( sourceOffset, &segLen )));
2338
2339 if( bytes)
2340 err = kIOReturnBadArgument;
2341
2342 return( err );
2343 }
2344
2345 IOReturn IOMemoryDescriptor::doUnmap(
2346 vm_map_t addressMap,
2347 IOVirtualAddress logical,
2348 IOByteCount length )
2349 {
2350 IOReturn err;
2351
2352 #ifdef DEBUG
2353 if( kIOLogMapping & gIOKitDebug)
2354 kprintf("IOMemoryDescriptor::doUnmap(%x) %08x:%08x\n",
2355 addressMap, logical, length );
2356 #endif
2357
2358 if( true /* && (addressMap == kernel_map) || (addressMap == get_task_map(current_task()))*/) {
2359
2360 if( _memEntry && (addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags))
2361 addressMap = IOPageableMapForAddress( logical );
2362
2363 err = vm_deallocate( addressMap, logical, length );
2364
2365 } else
2366 err = kIOReturnSuccess;
2367
2368 return( err );
2369 }
2370
2371 IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
2372 {
2373 IOReturn err = kIOReturnSuccess;
2374 _IOMemoryMap * mapping = 0;
2375 OSIterator * iter;
2376
2377 LOCK;
2378
2379 if( doRedirect)
2380 _flags |= kIOMemoryRedirected;
2381 else
2382 _flags &= ~kIOMemoryRedirected;
2383
2384 do {
2385 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
2386 while( (mapping = (_IOMemoryMap *) iter->getNextObject()))
2387 mapping->redirect( safeTask, doRedirect );
2388
2389 iter->release();
2390 }
2391 } while( false );
2392
2393 if (!doRedirect)
2394 {
2395 WAKEUP;
2396 }
2397
2398 UNLOCK;
2399
2400 // temporary binary compatibility
2401 IOSubMemoryDescriptor * subMem;
2402 if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
2403 err = subMem->redirect( safeTask, doRedirect );
2404 else
2405 err = kIOReturnSuccess;
2406
2407 return( err );
2408 }
2409
2410 IOReturn IOSubMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
2411 {
2412 return( _parent->redirect( safeTask, doRedirect ));
2413 }
2414
2415 IOReturn _IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
2416 {
2417 IOReturn err = kIOReturnSuccess;
2418
2419 if( superMap) {
2420 // err = ((_IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
2421 } else {
2422
2423 LOCK;
2424 if( logical && addressMap
2425 && (!safeTask || (get_task_map(safeTask) != addressMap))
2426 && (0 == (options & kIOMapStatic)))
2427 {
2428 IOUnmapPages( addressMap, logical, length );
2429 if(!doRedirect && safeTask
2430 && ((memory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical))
2431 {
2432 err = vm_deallocate( addressMap, logical, length );
2433 err = memory->doMap( addressMap, &logical,
2434 (options & ~kIOMapAnywhere) /*| kIOMapReserve*/,
2435 offset, length );
2436 } else
2437 err = kIOReturnSuccess;
2438 #ifdef DEBUG
2439 IOLog("IOMemoryMap::redirect(%d, %p) %x:%lx from %p\n", doRedirect, this, logical, length, addressMap);
2440 #endif
2441 }
2442 UNLOCK;
2443 }
2444
2445 if (((memory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
2446 && safeTask
2447 && (doRedirect != (0 != (memory->_flags & kIOMemoryRedirected))))
2448 memory->redirect(safeTask, doRedirect);
2449
2450 return( err );
2451 }
2452
2453 IOReturn _IOMemoryMap::unmap( void )
2454 {
2455 IOReturn err;
2456
2457 LOCK;
2458
2459 if( logical && addressMap && (0 == superMap)
2460 && (0 == (options & kIOMapStatic))) {
2461
2462 err = memory->doUnmap( addressMap, logical, length );
2463 vm_map_deallocate(addressMap);
2464 addressMap = 0;
2465
2466 } else
2467 err = kIOReturnSuccess;
2468
2469 logical = 0;
2470
2471 UNLOCK;
2472
2473 return( err );
2474 }
2475
2476 void _IOMemoryMap::taskDied( void )
2477 {
2478 LOCK;
2479 if( addressMap) {
2480 vm_map_deallocate(addressMap);
2481 addressMap = 0;
2482 }
2483 addressTask = 0;
2484 logical = 0;
2485 UNLOCK;
2486 }
2487
2488 // Overload the release mechanism. All mappings must be a member
2489 // of a memory descriptors _mappings set. This means that we
2490 // always have 2 references on a mapping. When either of these mappings
2491 // are released we need to free ourselves.
2492 void _IOMemoryMap::taggedRelease(const void *tag) const
2493 {
2494 LOCK;
2495 super::taggedRelease(tag, 2);
2496 UNLOCK;
2497 }
2498
2499 void _IOMemoryMap::free()
2500 {
2501 unmap();
2502
2503 if( memory) {
2504 LOCK;
2505 memory->removeMapping( this);
2506 UNLOCK;
2507 memory->release();
2508 }
2509
2510 if (owner && (owner != memory))
2511 {
2512 LOCK;
2513 owner->removeMapping(this);
2514 UNLOCK;
2515 }
2516
2517 if( superMap)
2518 superMap->release();
2519
2520 if (redirUPL) {
2521 upl_commit(redirUPL, NULL, 0);
2522 upl_deallocate(redirUPL);
2523 }
2524
2525 super::free();
2526 }
2527
2528 IOByteCount _IOMemoryMap::getLength()
2529 {
2530 return( length );
2531 }
2532
2533 IOVirtualAddress _IOMemoryMap::getVirtualAddress()
2534 {
2535 return( logical);
2536 }
2537
2538 task_t _IOMemoryMap::getAddressTask()
2539 {
2540 if( superMap)
2541 return( superMap->getAddressTask());
2542 else
2543 return( addressTask);
2544 }
2545
2546 IOOptionBits _IOMemoryMap::getMapOptions()
2547 {
2548 return( options);
2549 }
2550
2551 IOMemoryDescriptor * _IOMemoryMap::getMemoryDescriptor()
2552 {
2553 return( memory );
2554 }
2555
2556 _IOMemoryMap * _IOMemoryMap::copyCompatible(
2557 IOMemoryDescriptor * owner,
2558 task_t task,
2559 IOVirtualAddress toAddress,
2560 IOOptionBits _options,
2561 IOByteCount _offset,
2562 IOByteCount _length )
2563 {
2564 _IOMemoryMap * mapping;
2565
2566 if( (!task) || (!addressMap) || (addressMap != get_task_map(task)))
2567 return( 0 );
2568 if( options & kIOMapUnique)
2569 return( 0 );
2570 if( (options ^ _options) & kIOMapReadOnly)
2571 return( 0 );
2572 if( (kIOMapDefaultCache != (_options & kIOMapCacheMask))
2573 && ((options ^ _options) & kIOMapCacheMask))
2574 return( 0 );
2575
2576 if( (0 == (_options & kIOMapAnywhere)) && (logical != toAddress))
2577 return( 0 );
2578
2579 if( _offset < offset)
2580 return( 0 );
2581
2582 _offset -= offset;
2583
2584 if( (_offset + _length) > length)
2585 return( 0 );
2586
2587 if( (length == _length) && (!_offset)) {
2588 retain();
2589 mapping = this;
2590
2591 } else {
2592 mapping = new _IOMemoryMap;
2593 if( mapping
2594 && !mapping->initCompatible( owner, this, _offset, _length )) {
2595 mapping->release();
2596 mapping = 0;
2597 }
2598 }
2599
2600 return( mapping );
2601 }
2602
2603 IOPhysicalAddress _IOMemoryMap::getPhysicalSegment( IOByteCount _offset,
2604 IOPhysicalLength * _length)
2605 {
2606 IOPhysicalAddress address;
2607
2608 LOCK;
2609 address = memory->getPhysicalSegment( offset + _offset, _length );
2610 UNLOCK;
2611
2612 return( address );
2613 }
2614
2615 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2616
2617 #undef super
2618 #define super OSObject
2619
2620 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2621
2622 void IOMemoryDescriptor::initialize( void )
2623 {
2624 if( 0 == gIOMemoryLock)
2625 gIOMemoryLock = IORecursiveLockAlloc();
2626
2627 IORegistryEntry::getRegistryRoot()->setProperty(kIOMaximumMappedIOByteCountKey,
2628 ptoa_64(gIOMaximumMappedIOPageCount), 64);
2629 }
2630
2631 void IOMemoryDescriptor::free( void )
2632 {
2633 if( _mappings)
2634 _mappings->release();
2635
2636 super::free();
2637 }
2638
2639 IOMemoryMap * IOMemoryDescriptor::setMapping(
2640 task_t intoTask,
2641 IOVirtualAddress mapAddress,
2642 IOOptionBits options )
2643 {
2644 _IOMemoryMap * newMap;
2645
2646 newMap = new _IOMemoryMap;
2647
2648 LOCK;
2649
2650 if( newMap
2651 && !newMap->initWithDescriptor( this, intoTask, mapAddress,
2652 options | kIOMapStatic, 0, getLength() )) {
2653 newMap->release();
2654 newMap = 0;
2655 }
2656
2657 addMapping( newMap);
2658
2659 UNLOCK;
2660
2661 return( newMap);
2662 }
2663
2664 IOMemoryMap * IOMemoryDescriptor::map(
2665 IOOptionBits options )
2666 {
2667
2668 return( makeMapping( this, kernel_task, 0,
2669 options | kIOMapAnywhere,
2670 0, getLength() ));
2671 }
2672
2673 IOMemoryMap * IOMemoryDescriptor::map(
2674 task_t intoTask,
2675 IOVirtualAddress toAddress,
2676 IOOptionBits options,
2677 IOByteCount offset,
2678 IOByteCount length )
2679 {
2680 if( 0 == length)
2681 length = getLength();
2682
2683 return( makeMapping( this, intoTask, toAddress, options, offset, length ));
2684 }
2685
2686 IOReturn _IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
2687 IOOptionBits options,
2688 IOByteCount offset)
2689 {
2690 IOReturn err = kIOReturnSuccess;
2691 IOMemoryDescriptor * physMem = 0;
2692
2693 LOCK;
2694
2695 if (logical && addressMap) do
2696 {
2697 if ((memory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
2698 {
2699 physMem = memory;
2700 physMem->retain();
2701 }
2702
2703 if (!redirUPL)
2704 {
2705 vm_size_t size = length;
2706 int flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
2707 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
2708 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) memory->_memEntry, 0, &size, &redirUPL,
2709 NULL, NULL,
2710 &flags))
2711 redirUPL = 0;
2712
2713 if (physMem)
2714 {
2715 IOUnmapPages( addressMap, logical, length );
2716 physMem->redirect(0, true);
2717 }
2718 }
2719
2720 if (newBackingMemory)
2721 {
2722 if (newBackingMemory != memory)
2723 {
2724 if (this != newBackingMemory->makeMapping(newBackingMemory, addressTask, (IOVirtualAddress) this,
2725 options | kIOMapUnique | kIOMapReference,
2726 offset, length))
2727 err = kIOReturnError;
2728 }
2729 if (redirUPL)
2730 {
2731 upl_commit(redirUPL, NULL, 0);
2732 upl_deallocate(redirUPL);
2733 redirUPL = 0;
2734 }
2735 if (physMem)
2736 physMem->redirect(0, false);
2737 }
2738 }
2739 while (false);
2740
2741 UNLOCK;
2742
2743 if (physMem)
2744 physMem->release();
2745
2746 return (err);
2747 }
2748
2749 IOMemoryMap * IOMemoryDescriptor::makeMapping(
2750 IOMemoryDescriptor * owner,
2751 task_t intoTask,
2752 IOVirtualAddress toAddress,
2753 IOOptionBits options,
2754 IOByteCount offset,
2755 IOByteCount length )
2756 {
2757 IOMemoryDescriptor * mapDesc = 0;
2758 _IOMemoryMap * mapping = 0;
2759 OSIterator * iter;
2760
2761 LOCK;
2762
2763 do
2764 {
2765 if (kIOMapUnique & options)
2766 {
2767 IOPhysicalAddress phys;
2768 IOByteCount physLen;
2769
2770 if (owner != this)
2771 continue;
2772
2773 if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
2774 {
2775 phys = getPhysicalSegment(offset, &physLen);
2776 if (!phys || (physLen < length))
2777 continue;
2778
2779 mapDesc = IOMemoryDescriptor::withPhysicalAddress(
2780 phys, length, _direction);
2781 if (!mapDesc)
2782 continue;
2783 offset = 0;
2784 }
2785 else
2786 {
2787 mapDesc = this;
2788 mapDesc->retain();
2789 }
2790
2791 if (kIOMapReference & options)
2792 {
2793 mapping = (_IOMemoryMap *) toAddress;
2794 mapping->retain();
2795
2796 #if 1
2797 uint32_t pageOffset1 = mapDesc->getSourceSegment( offset, NULL );
2798 pageOffset1 -= trunc_page_32( pageOffset1 );
2799
2800 uint32_t pageOffset2 = mapping->getVirtualAddress();
2801 pageOffset2 -= trunc_page_32( pageOffset2 );
2802
2803 if (pageOffset1 != pageOffset2)
2804 IOLog("::redirect can't map offset %x to addr %x\n",
2805 pageOffset1, mapping->getVirtualAddress());
2806 #endif
2807
2808
2809 if (!mapping->initWithDescriptor( mapDesc, intoTask, toAddress, options,
2810 offset, length ))
2811 {
2812 #ifdef DEBUG
2813 IOLog("Didn't redirect map %08lx : %08lx\n", offset, length );
2814 #endif
2815 }
2816
2817 if (mapping->owner)
2818 mapping->owner->removeMapping(mapping);
2819 continue;
2820 }
2821 }
2822 else
2823 {
2824 // look for an existing mapping
2825 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
2826
2827 while( (mapping = (_IOMemoryMap *) iter->getNextObject())) {
2828
2829 if( (mapping = mapping->copyCompatible(
2830 owner, intoTask, toAddress,
2831 options | kIOMapReference,
2832 offset, length )))
2833 break;
2834 }
2835 iter->release();
2836 }
2837
2838
2839 if (mapping)
2840 mapping->retain();
2841
2842 if( mapping || (options & kIOMapReference))
2843 continue;
2844
2845 mapDesc = owner;
2846 mapDesc->retain();
2847 }
2848 owner = this;
2849
2850 mapping = new _IOMemoryMap;
2851 if( mapping
2852 && !mapping->initWithDescriptor( mapDesc, intoTask, toAddress, options,
2853 offset, length )) {
2854 #ifdef DEBUG
2855 IOLog("Didn't make map %08lx : %08lx\n", offset, length );
2856 #endif
2857 mapping->release();
2858 mapping = 0;
2859 }
2860
2861 if (mapping)
2862 mapping->retain();
2863
2864 } while( false );
2865
2866 if (mapping)
2867 {
2868 mapping->owner = owner;
2869 owner->addMapping( mapping);
2870 mapping->release();
2871 }
2872
2873 UNLOCK;
2874
2875 if (mapDesc)
2876 mapDesc->release();
2877
2878 return( mapping);
2879 }
2880
2881 void IOMemoryDescriptor::addMapping(
2882 IOMemoryMap * mapping )
2883 {
2884 if( mapping) {
2885 if( 0 == _mappings)
2886 _mappings = OSSet::withCapacity(1);
2887 if( _mappings )
2888 _mappings->setObject( mapping );
2889 }
2890 }
2891
2892 void IOMemoryDescriptor::removeMapping(
2893 IOMemoryMap * mapping )
2894 {
2895 if( _mappings)
2896 _mappings->removeObject( mapping);
2897 }
2898
2899 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2900
2901 #undef super
2902 #define super IOMemoryDescriptor
2903
2904 OSDefineMetaClassAndStructors(IOSubMemoryDescriptor, IOMemoryDescriptor)
2905
2906 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2907
2908 bool IOSubMemoryDescriptor::initSubRange( IOMemoryDescriptor * parent,
2909 IOByteCount offset, IOByteCount length,
2910 IODirection direction )
2911 {
2912 if( !parent)
2913 return( false);
2914
2915 if( (offset + length) > parent->getLength())
2916 return( false);
2917
2918 /*
2919 * We can check the _parent instance variable before having ever set it
2920 * to an initial value because I/O Kit guarantees that all our instance
2921 * variables are zeroed on an object's allocation.
2922 */
2923
2924 if( !_parent) {
2925 if( !super::init())
2926 return( false );
2927 } else {
2928 /*
2929 * An existing memory descriptor is being retargeted to
2930 * point to somewhere else. Clean up our present state.
2931 */
2932
2933 _parent->release();
2934 _parent = 0;
2935 }
2936
2937 parent->retain();
2938 _parent = parent;
2939 _start = offset;
2940 _length = length;
2941 _direction = direction;
2942 _tag = parent->getTag();
2943
2944 return( true );
2945 }
2946
2947 void IOSubMemoryDescriptor::free( void )
2948 {
2949 if( _parent)
2950 _parent->release();
2951
2952 super::free();
2953 }
2954
2955
2956 IOPhysicalAddress IOSubMemoryDescriptor::getPhysicalSegment( IOByteCount offset,
2957 IOByteCount * length )
2958 {
2959 IOPhysicalAddress address;
2960 IOByteCount actualLength;
2961
2962 assert(offset <= _length);
2963
2964 if( length)
2965 *length = 0;
2966
2967 if( offset >= _length)
2968 return( 0 );
2969
2970 address = _parent->getPhysicalSegment( offset + _start, &actualLength );
2971
2972 if( address && length)
2973 *length = min( _length - offset, actualLength );
2974
2975 return( address );
2976 }
2977
2978
2979 IOReturn IOSubMemoryDescriptor::doMap(
2980 vm_map_t addressMap,
2981 IOVirtualAddress * atAddress,
2982 IOOptionBits options,
2983 IOByteCount sourceOffset,
2984 IOByteCount length )
2985 {
2986 if( sourceOffset >= _length)
2987 return( kIOReturnOverrun );
2988 return (_parent->doMap(addressMap, atAddress, options, sourceOffset + _start, length));
2989 }
2990
2991 IOPhysicalAddress IOSubMemoryDescriptor::getSourceSegment( IOByteCount offset,
2992 IOByteCount * length )
2993 {
2994 IOPhysicalAddress address;
2995 IOByteCount actualLength;
2996
2997 assert(offset <= _length);
2998
2999 if( length)
3000 *length = 0;
3001
3002 if( offset >= _length)
3003 return( 0 );
3004
3005 address = _parent->getSourceSegment( offset + _start, &actualLength );
3006
3007 if( address && length)
3008 *length = min( _length - offset, actualLength );
3009
3010 return( address );
3011 }
3012
3013 void * IOSubMemoryDescriptor::getVirtualSegment(IOByteCount offset,
3014 IOByteCount * lengthOfSegment)
3015 {
3016 return( 0 );
3017 }
3018
3019 IOByteCount IOSubMemoryDescriptor::readBytes(IOByteCount offset,
3020 void * bytes, IOByteCount length)
3021 {
3022 IOByteCount byteCount;
3023
3024 assert(offset <= _length);
3025
3026 if( offset >= _length)
3027 return( 0 );
3028
3029 LOCK;
3030 byteCount = _parent->readBytes( _start + offset, bytes,
3031 min(length, _length - offset) );
3032 UNLOCK;
3033
3034 return( byteCount );
3035 }
3036
3037 IOByteCount IOSubMemoryDescriptor::writeBytes(IOByteCount offset,
3038 const void* bytes, IOByteCount length)
3039 {
3040 IOByteCount byteCount;
3041
3042 assert(offset <= _length);
3043
3044 if( offset >= _length)
3045 return( 0 );
3046
3047 LOCK;
3048 byteCount = _parent->writeBytes( _start + offset, bytes,
3049 min(length, _length - offset) );
3050 UNLOCK;
3051
3052 return( byteCount );
3053 }
3054
3055 IOReturn IOSubMemoryDescriptor::setPurgeable( IOOptionBits newState,
3056 IOOptionBits * oldState )
3057 {
3058 IOReturn err;
3059
3060 LOCK;
3061 err = _parent->setPurgeable( newState, oldState );
3062 UNLOCK;
3063
3064 return( err );
3065 }
3066
3067 IOReturn IOSubMemoryDescriptor::performOperation( IOOptionBits options,
3068 IOByteCount offset, IOByteCount length )
3069 {
3070 IOReturn err;
3071
3072 assert(offset <= _length);
3073
3074 if( offset >= _length)
3075 return( kIOReturnOverrun );
3076
3077 LOCK;
3078 err = _parent->performOperation( options, _start + offset,
3079 min(length, _length - offset) );
3080 UNLOCK;
3081
3082 return( err );
3083 }
3084
3085 IOReturn IOSubMemoryDescriptor::prepare(
3086 IODirection forDirection)
3087 {
3088 IOReturn err;
3089
3090 LOCK;
3091 err = _parent->prepare( forDirection);
3092 UNLOCK;
3093
3094 return( err );
3095 }
3096
3097 IOReturn IOSubMemoryDescriptor::complete(
3098 IODirection forDirection)
3099 {
3100 IOReturn err;
3101
3102 LOCK;
3103 err = _parent->complete( forDirection);
3104 UNLOCK;
3105
3106 return( err );
3107 }
3108
3109 IOMemoryMap * IOSubMemoryDescriptor::makeMapping(
3110 IOMemoryDescriptor * owner,
3111 task_t intoTask,
3112 IOVirtualAddress toAddress,
3113 IOOptionBits options,
3114 IOByteCount offset,
3115 IOByteCount length )
3116 {
3117 IOMemoryMap * mapping = 0;
3118
3119 if (!(kIOMapUnique & options))
3120 mapping = (IOMemoryMap *) _parent->makeMapping(
3121 _parent, intoTask,
3122 toAddress - (_start + offset),
3123 options | kIOMapReference,
3124 _start + offset, length );
3125
3126 if( !mapping)
3127 mapping = (IOMemoryMap *) _parent->makeMapping(
3128 _parent, intoTask,
3129 toAddress,
3130 options, _start + offset, length );
3131
3132 if( !mapping)
3133 mapping = super::makeMapping( owner, intoTask, toAddress, options,
3134 offset, length );
3135
3136 return( mapping );
3137 }
3138
3139 /* ick */
3140
3141 bool
3142 IOSubMemoryDescriptor::initWithAddress(void * address,
3143 IOByteCount length,
3144 IODirection direction)
3145 {
3146 return( false );
3147 }
3148
3149 bool
3150 IOSubMemoryDescriptor::initWithAddress(vm_address_t address,
3151 IOByteCount length,
3152 IODirection direction,
3153 task_t task)
3154 {
3155 return( false );
3156 }
3157
3158 bool
3159 IOSubMemoryDescriptor::initWithPhysicalAddress(
3160 IOPhysicalAddress address,
3161 IOByteCount length,
3162 IODirection direction )
3163 {
3164 return( false );
3165 }
3166
3167 bool
3168 IOSubMemoryDescriptor::initWithRanges(
3169 IOVirtualRange * ranges,
3170 UInt32 withCount,
3171 IODirection direction,
3172 task_t task,
3173 bool asReference)
3174 {
3175 return( false );
3176 }
3177
3178 bool
3179 IOSubMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
3180 UInt32 withCount,
3181 IODirection direction,
3182 bool asReference)
3183 {
3184 return( false );
3185 }
3186
3187 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3188
3189 bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
3190 {
3191 OSSymbol const *keys[2];
3192 OSObject *values[2];
3193 struct SerData {
3194 user_addr_t address;
3195 user_size_t length;
3196 } *vcopy;
3197 unsigned int index, nRanges;
3198 bool result;
3199
3200 IOOptionBits type = _flags & kIOMemoryTypeMask;
3201
3202 if (s == NULL) return false;
3203 if (s->previouslySerialized(this)) return true;
3204
3205 // Pretend we are an array.
3206 if (!s->addXMLStartTag(this, "array")) return false;
3207
3208 nRanges = _rangesCount;
3209 vcopy = (SerData *) IOMalloc(sizeof(SerData) * nRanges);
3210 if (vcopy == 0) return false;
3211
3212 keys[0] = OSSymbol::withCString("address");
3213 keys[1] = OSSymbol::withCString("length");
3214
3215 result = false;
3216 values[0] = values[1] = 0;
3217
3218 // From this point on we can go to bail.
3219
3220 // Copy the volatile data so we don't have to allocate memory
3221 // while the lock is held.
3222 LOCK;
3223 if (nRanges == _rangesCount) {
3224 Ranges vec = _ranges;
3225 for (index = 0; index < nRanges; index++) {
3226 user_addr_t addr; IOByteCount len;
3227 getAddrLenForInd(addr, len, type, vec, index);
3228 vcopy[index].address = addr;
3229 vcopy[index].length = len;
3230 }
3231 } else {
3232 // The descriptor changed out from under us. Give up.
3233 UNLOCK;
3234 result = false;
3235 goto bail;
3236 }
3237 UNLOCK;
3238
3239 for (index = 0; index < nRanges; index++)
3240 {
3241 user_addr_t addr = vcopy[index].address;
3242 IOByteCount len = (IOByteCount) vcopy[index].length;
3243 values[0] =
3244 OSNumber::withNumber(addr, (((UInt64) addr) >> 32)? 64 : 32);
3245 if (values[0] == 0) {
3246 result = false;
3247 goto bail;
3248 }
3249 values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
3250 if (values[1] == 0) {
3251 result = false;
3252 goto bail;
3253 }
3254 OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
3255 if (dict == 0) {
3256 result = false;
3257 goto bail;
3258 }
3259 values[0]->release();
3260 values[1]->release();
3261 values[0] = values[1] = 0;
3262
3263 result = dict->serialize(s);
3264 dict->release();
3265 if (!result) {
3266 goto bail;
3267 }
3268 }
3269 result = s->addXMLEndTag("array");
3270
3271 bail:
3272 if (values[0])
3273 values[0]->release();
3274 if (values[1])
3275 values[1]->release();
3276 if (keys[0])
3277 keys[0]->release();
3278 if (keys[1])
3279 keys[1]->release();
3280 if (vcopy)
3281 IOFree(vcopy, sizeof(IOVirtualRange) * nRanges);
3282 return result;
3283 }
3284
3285 bool IOSubMemoryDescriptor::serialize(OSSerialize * s) const
3286 {
3287 if (!s) {
3288 return (false);
3289 }
3290 if (s->previouslySerialized(this)) return true;
3291
3292 // Pretend we are a dictionary.
3293 // We must duplicate the functionality of OSDictionary here
3294 // because otherwise object references will not work;
3295 // they are based on the value of the object passed to
3296 // previouslySerialized and addXMLStartTag.
3297
3298 if (!s->addXMLStartTag(this, "dict")) return false;
3299
3300 char const *keys[3] = {"offset", "length", "parent"};
3301
3302 OSObject *values[3];
3303 values[0] = OSNumber::withNumber(_start, sizeof(_start) * 8);
3304 if (values[0] == 0)
3305 return false;
3306 values[1] = OSNumber::withNumber(_length, sizeof(_length) * 8);
3307 if (values[1] == 0) {
3308 values[0]->release();
3309 return false;
3310 }
3311 values[2] = _parent;
3312
3313 bool result = true;
3314 for (int i=0; i<3; i++) {
3315 if (!s->addString("<key>") ||
3316 !s->addString(keys[i]) ||
3317 !s->addXMLEndTag("key") ||
3318 !values[i]->serialize(s)) {
3319 result = false;
3320 break;
3321 }
3322 }
3323 values[0]->release();
3324 values[1]->release();
3325 if (!result) {
3326 return false;
3327 }
3328
3329 return s->addXMLEndTag("dict");
3330 }
3331
3332 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3333
3334 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
3335 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
3336 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
3337 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3);
3338 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4);
3339 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
3340 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
3341 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
3342 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
3343 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
3344 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
3345 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
3346 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
3347 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
3348 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
3349 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
3350
3351 /* ex-inline function implementation */
3352 IOPhysicalAddress IOMemoryDescriptor::getPhysicalAddress()
3353 { return( getPhysicalSegment( 0, 0 )); }