]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOMemoryDescriptor.cpp
bdb84c0a63b852d18fd2a7047f0bb542c71b3c56
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
24 *
25 * HISTORY
26 *
27 */
28 // 45678901234567890123456789012345678901234567890123456789012345678901234567890
29 #include <sys/cdefs.h>
30
31 #include <IOKit/assert.h>
32 #include <IOKit/system.h>
33 #include <IOKit/IOLib.h>
34 #include <IOKit/IOMemoryDescriptor.h>
35 #include <IOKit/IOMapper.h>
36 #include <IOKit/IOKitKeysPrivate.h>
37
38 #include <IOKit/IOKitDebug.h>
39
40 #include "IOKitKernelInternal.h"
41
42 #include <libkern/c++/OSContainers.h>
43 #include <libkern/c++/OSDictionary.h>
44 #include <libkern/c++/OSArray.h>
45 #include <libkern/c++/OSSymbol.h>
46 #include <libkern/c++/OSNumber.h>
47
48 #include <sys/uio.h>
49
50 __BEGIN_DECLS
51 #include <vm/pmap.h>
52 #include <vm/vm_pageout.h>
53 #include <vm/vm_shared_memory_server.h>
54 #include <mach/memory_object_types.h>
55 #include <device/device_port.h>
56
57 #ifndef i386
58 #include <mach/vm_prot.h>
59 #include <vm/vm_fault.h>
60 struct phys_entry *pmap_find_physentry(ppnum_t pa);
61 #endif
62
63 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
64 void ipc_port_release_send(ipc_port_t port);
65
66 /* Copy between a physical page and a virtual address in the given vm_map */
67 kern_return_t copypv(addr64_t source, addr64_t sink, unsigned int size, int which);
68
69 memory_object_t
70 device_pager_setup(
71 memory_object_t pager,
72 int device_handle,
73 vm_size_t size,
74 int flags);
75 void
76 device_pager_deallocate(
77 memory_object_t);
78 kern_return_t
79 device_pager_populate_object(
80 memory_object_t pager,
81 vm_object_offset_t offset,
82 ppnum_t phys_addr,
83 vm_size_t size);
84 kern_return_t
85 memory_object_iopl_request(
86 ipc_port_t port,
87 memory_object_offset_t offset,
88 vm_size_t *upl_size,
89 upl_t *upl_ptr,
90 upl_page_info_array_t user_page_list,
91 unsigned int *page_list_count,
92 int *flags);
93
94 unsigned int IOTranslateCacheBits(struct phys_entry *pp);
95
96 __END_DECLS
97
98 #define kIOMaximumMappedIOByteCount (512*1024*1024)
99
100 static IOMapper * gIOSystemMapper;
101 static ppnum_t gIOMaximumMappedIOPageCount = atop_32(kIOMaximumMappedIOByteCount);
102
103 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
104
105 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
106
107 #define super IOMemoryDescriptor
108
109 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
110
111 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
112
113 static IORecursiveLock * gIOMemoryLock;
114
115 #define LOCK IORecursiveLockLock( gIOMemoryLock)
116 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
117 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
118 #define WAKEUP \
119 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
120
121 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
122
123 class _IOMemoryMap : public IOMemoryMap
124 {
125 OSDeclareDefaultStructors(_IOMemoryMap)
126 public:
127 IOMemoryDescriptor * memory;
128 IOMemoryMap * superMap;
129 IOByteCount offset;
130 IOByteCount length;
131 IOVirtualAddress logical;
132 task_t addressTask;
133 vm_map_t addressMap;
134 IOOptionBits options;
135 upl_t redirUPL;
136 ipc_port_t redirEntry;
137 IOMemoryDescriptor * owner;
138
139 protected:
140 virtual void taggedRelease(const void *tag = 0) const;
141 virtual void free();
142
143 public:
144
145 // IOMemoryMap methods
146 virtual IOVirtualAddress getVirtualAddress();
147 virtual IOByteCount getLength();
148 virtual task_t getAddressTask();
149 virtual IOMemoryDescriptor * getMemoryDescriptor();
150 virtual IOOptionBits getMapOptions();
151
152 virtual IOReturn unmap();
153 virtual void taskDied();
154
155 virtual IOReturn redirect(IOMemoryDescriptor * newBackingMemory,
156 IOOptionBits options,
157 IOByteCount offset = 0);
158
159 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
160 IOByteCount * length);
161
162 // for IOMemoryDescriptor use
163 _IOMemoryMap * copyCompatible(
164 IOMemoryDescriptor * owner,
165 task_t intoTask,
166 IOVirtualAddress toAddress,
167 IOOptionBits options,
168 IOByteCount offset,
169 IOByteCount length );
170
171 bool initCompatible(
172 IOMemoryDescriptor * memory,
173 IOMemoryMap * superMap,
174 IOByteCount offset,
175 IOByteCount length );
176
177 bool initWithDescriptor(
178 IOMemoryDescriptor * memory,
179 task_t intoTask,
180 IOVirtualAddress toAddress,
181 IOOptionBits options,
182 IOByteCount offset,
183 IOByteCount length );
184
185 IOReturn redirect(
186 task_t intoTask, bool redirect );
187 };
188
189 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
190
191 // Some data structures and accessor macros used by the initWithOptions
192 // Function
193
194 enum ioPLBlockFlags {
195 kIOPLOnDevice = 0x00000001,
196 kIOPLExternUPL = 0x00000002,
197 };
198
199 struct typePersMDData
200 {
201 const IOGeneralMemoryDescriptor *fMD;
202 ipc_port_t fMemEntry;
203 };
204
205 struct ioPLBlock {
206 upl_t fIOPL;
207 vm_address_t fIOMDOffset; // The offset of this iopl in descriptor
208 vm_offset_t fPageInfo; // Pointer to page list or index into it
209 ppnum_t fMappedBase; // Page number of first page in this iopl
210 unsigned int fPageOffset; // Offset within first page of iopl
211 unsigned int fFlags; // Flags
212 };
213
214 struct ioGMDData {
215 IOMapper *fMapper;
216 unsigned int fPageCnt;
217 upl_page_info_t fPageList[];
218 ioPLBlock fBlocks[];
219 };
220
221 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
222 #define getIOPLList(d) ((ioPLBlock *) &(d->fPageList[d->fPageCnt]))
223 #define getNumIOPL(osd, d) \
224 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
225 #define getPageList(d) (&(d->fPageList[0]))
226 #define computeDataSize(p, u) \
227 (sizeof(ioGMDData) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
228
229
230 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
231
232 #define next_page(a) ( trunc_page_32(a) + PAGE_SIZE )
233
234
235 extern "C" {
236
237 kern_return_t device_data_action(
238 int device_handle,
239 ipc_port_t device_pager,
240 vm_prot_t protection,
241 vm_object_offset_t offset,
242 vm_size_t size)
243 {
244 struct ExpansionData {
245 void * devicePager;
246 unsigned int pagerContig:1;
247 unsigned int unused:31;
248 IOMemoryDescriptor * memory;
249 };
250 kern_return_t kr;
251 ExpansionData * ref = (ExpansionData *) device_handle;
252 IOMemoryDescriptor * memDesc;
253
254 LOCK;
255 memDesc = ref->memory;
256 if( memDesc)
257 {
258 memDesc->retain();
259 kr = memDesc->handleFault( device_pager, 0, 0,
260 offset, size, kIOMapDefaultCache /*?*/);
261 memDesc->release();
262 }
263 else
264 kr = KERN_ABORTED;
265 UNLOCK;
266
267 return( kr );
268 }
269
270 kern_return_t device_close(
271 int device_handle)
272 {
273 struct ExpansionData {
274 void * devicePager;
275 unsigned int pagerContig:1;
276 unsigned int unused:31;
277 IOMemoryDescriptor * memory;
278 };
279 ExpansionData * ref = (ExpansionData *) device_handle;
280
281 IODelete( ref, ExpansionData, 1 );
282
283 return( kIOReturnSuccess );
284 }
285 }; // end extern "C"
286
287 // Note this inline function uses C++ reference arguments to return values
288 // This means that pointers are not passed and NULLs don't have to be
289 // checked for as a NULL reference is illegal.
290 static inline void
291 getAddrLenForInd(user_addr_t &addr, IOPhysicalLength &len, // Output variables
292 UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
293 {
294 assert(kIOMemoryTypePhysical == type || kIOMemoryTypeUIO == type
295 || kIOMemoryTypeVirtual == type);
296 if (kIOMemoryTypeUIO == type) {
297 user_size_t us;
298 uio_getiov((uio_t) r.uio, ind, &addr, &us); len = us;
299 }
300 else {
301 IOVirtualRange cur = r.v[ind];
302 addr = cur.address;
303 len = cur.length;
304 }
305 }
306
307 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
308
309 /*
310 * withAddress:
311 *
312 * Create a new IOMemoryDescriptor. The buffer is a virtual address
313 * relative to the specified task. If no task is supplied, the kernel
314 * task is implied.
315 */
316 IOMemoryDescriptor *
317 IOMemoryDescriptor::withAddress(void * address,
318 IOByteCount length,
319 IODirection direction)
320 {
321 return IOMemoryDescriptor::
322 withAddress((vm_address_t) address, length, direction, kernel_task);
323 }
324
325 IOMemoryDescriptor *
326 IOMemoryDescriptor::withAddress(vm_address_t address,
327 IOByteCount length,
328 IODirection direction,
329 task_t task)
330 {
331 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
332 if (that)
333 {
334 if (that->initWithAddress(address, length, direction, task))
335 return that;
336
337 that->release();
338 }
339 return 0;
340 }
341
342 IOMemoryDescriptor *
343 IOMemoryDescriptor::withPhysicalAddress(
344 IOPhysicalAddress address,
345 IOByteCount length,
346 IODirection direction )
347 {
348 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
349 if (self
350 && !self->initWithPhysicalAddress(address, length, direction)) {
351 self->release();
352 return 0;
353 }
354
355 return self;
356 }
357
358 IOMemoryDescriptor *
359 IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
360 UInt32 withCount,
361 IODirection direction,
362 task_t task,
363 bool asReference)
364 {
365 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
366 if (that)
367 {
368 if (that->initWithRanges(ranges, withCount, direction, task, asReference))
369 return that;
370
371 that->release();
372 }
373 return 0;
374 }
375
376
377 /*
378 * withRanges:
379 *
380 * Create a new IOMemoryDescriptor. The buffer is made up of several
381 * virtual address ranges, from a given task.
382 *
383 * Passing the ranges as a reference will avoid an extra allocation.
384 */
385 IOMemoryDescriptor *
386 IOMemoryDescriptor::withOptions(void * buffers,
387 UInt32 count,
388 UInt32 offset,
389 task_t task,
390 IOOptionBits opts,
391 IOMapper * mapper)
392 {
393 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
394
395 if (self
396 && !self->initWithOptions(buffers, count, offset, task, opts, mapper))
397 {
398 self->release();
399 return 0;
400 }
401
402 return self;
403 }
404
405 // Can't leave abstract but this should never be used directly,
406 bool IOMemoryDescriptor::initWithOptions(void * buffers,
407 UInt32 count,
408 UInt32 offset,
409 task_t task,
410 IOOptionBits options,
411 IOMapper * mapper)
412 {
413 // @@@ gvdl: Should I panic?
414 panic("IOMD::initWithOptions called\n");
415 return 0;
416 }
417
418 IOMemoryDescriptor *
419 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
420 UInt32 withCount,
421 IODirection direction,
422 bool asReference)
423 {
424 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
425 if (that)
426 {
427 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference))
428 return that;
429
430 that->release();
431 }
432 return 0;
433 }
434
435 IOMemoryDescriptor *
436 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
437 IOByteCount offset,
438 IOByteCount length,
439 IODirection direction)
440 {
441 IOSubMemoryDescriptor *self = new IOSubMemoryDescriptor;
442
443 if (self && !self->initSubRange(of, offset, length, direction)) {
444 self->release();
445 self = 0;
446 }
447 return self;
448 }
449
450 IOMemoryDescriptor * IOMemoryDescriptor::
451 withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
452 {
453 IOGeneralMemoryDescriptor *origGenMD =
454 OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
455
456 if (origGenMD)
457 return IOGeneralMemoryDescriptor::
458 withPersistentMemoryDescriptor(origGenMD);
459 else
460 return 0;
461 }
462
463 IOMemoryDescriptor * IOGeneralMemoryDescriptor::
464 withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
465 {
466 ipc_port_t sharedMem = (ipc_port_t) originalMD->createNamedEntry();
467
468 if (!sharedMem)
469 return 0;
470
471 if (sharedMem == originalMD->_memEntry) {
472 originalMD->retain(); // Add a new reference to ourselves
473 ipc_port_release_send(sharedMem); // Remove extra send right
474 return originalMD;
475 }
476
477 IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor;
478 typePersMDData initData = { originalMD, sharedMem };
479
480 if (self
481 && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) {
482 self->release();
483 self = 0;
484 }
485 return self;
486 }
487
488 void *IOGeneralMemoryDescriptor::createNamedEntry()
489 {
490 kern_return_t error;
491 ipc_port_t sharedMem;
492
493 IOOptionBits type = _flags & kIOMemoryTypeMask;
494
495 user_addr_t range0Addr;
496 IOByteCount range0Len;
497 getAddrLenForInd(range0Addr, range0Len, type, _ranges, 0);
498 range0Addr = trunc_page_64(range0Addr);
499
500 vm_size_t size = ptoa_32(_pages);
501 vm_address_t kernelPage = (vm_address_t) range0Addr;
502
503 vm_map_t theMap = ((_task == kernel_task)
504 && (kIOMemoryBufferPageable & _flags))
505 ? IOPageableMapForAddress(kernelPage)
506 : get_task_map(_task);
507
508 memory_object_size_t actualSize = size;
509 vm_prot_t prot = VM_PROT_READ | VM_PROT_WRITE;
510 if (_memEntry)
511 prot |= MAP_MEM_NAMED_REUSE;
512
513 error = mach_make_memory_entry_64(theMap,
514 &actualSize, range0Addr, prot, &sharedMem, (ipc_port_t) _memEntry);
515
516 if (KERN_SUCCESS == error) {
517 if (actualSize == size) {
518 return sharedMem;
519 } else {
520 #if IOASSERT
521 IOLog("IOGMD::mach_make_memory_entry_64 (%08llx) size (%08lx:%08x)\n",
522 (UInt64)range0Addr, (UInt32)actualSize, size);
523 #endif
524 ipc_port_release_send( sharedMem );
525 }
526 }
527
528 return MACH_PORT_NULL;
529 }
530
531 /*
532 * initWithAddress:
533 *
534 * Initialize an IOMemoryDescriptor. The buffer is a virtual address
535 * relative to the specified task. If no task is supplied, the kernel
536 * task is implied.
537 *
538 * An IOMemoryDescriptor can be re-used by calling initWithAddress or
539 * initWithRanges again on an existing instance -- note this behavior
540 * is not commonly supported in other I/O Kit classes, although it is
541 * supported here.
542 */
543 bool
544 IOGeneralMemoryDescriptor::initWithAddress(void * address,
545 IOByteCount withLength,
546 IODirection withDirection)
547 {
548 _singleRange.v.address = (vm_address_t) address;
549 _singleRange.v.length = withLength;
550
551 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
552 }
553
554 bool
555 IOGeneralMemoryDescriptor::initWithAddress(vm_address_t address,
556 IOByteCount withLength,
557 IODirection withDirection,
558 task_t withTask)
559 {
560 _singleRange.v.address = address;
561 _singleRange.v.length = withLength;
562
563 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
564 }
565
566 bool
567 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
568 IOPhysicalAddress address,
569 IOByteCount withLength,
570 IODirection withDirection )
571 {
572 _singleRange.p.address = address;
573 _singleRange.p.length = withLength;
574
575 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
576 }
577
578 bool
579 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
580 IOPhysicalRange * ranges,
581 UInt32 count,
582 IODirection direction,
583 bool reference)
584 {
585 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
586
587 if (reference)
588 mdOpts |= kIOMemoryAsReference;
589
590 return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0);
591 }
592
593 bool
594 IOGeneralMemoryDescriptor::initWithRanges(
595 IOVirtualRange * ranges,
596 UInt32 count,
597 IODirection direction,
598 task_t task,
599 bool reference)
600 {
601 IOOptionBits mdOpts = direction;
602
603 if (reference)
604 mdOpts |= kIOMemoryAsReference;
605
606 if (task) {
607 mdOpts |= kIOMemoryTypeVirtual;
608
609 // Auto-prepare if this is a kernel memory descriptor as very few
610 // clients bother to prepare() kernel memory.
611 // But it was not enforced so what are you going to do?
612 if (task == kernel_task)
613 mdOpts |= kIOMemoryAutoPrepare;
614 }
615 else
616 mdOpts |= kIOMemoryTypePhysical;
617
618 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0);
619 }
620
621 /*
622 * initWithOptions:
623 *
624 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
625 * from a given task, several physical ranges, an UPL from the ubc
626 * system or a uio (may be 64bit) from the BSD subsystem.
627 *
628 * Passing the ranges as a reference will avoid an extra allocation.
629 *
630 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
631 * existing instance -- note this behavior is not commonly supported in other
632 * I/O Kit classes, although it is supported here.
633 */
634
635 bool
636 IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
637 UInt32 count,
638 UInt32 offset,
639 task_t task,
640 IOOptionBits options,
641 IOMapper * mapper)
642 {
643 IOOptionBits type = options & kIOMemoryTypeMask;
644
645 // Grab the original MD's configuation data to initialse the
646 // arguments to this function.
647 if (kIOMemoryTypePersistentMD == type) {
648
649 typePersMDData *initData = (typePersMDData *) buffers;
650 const IOGeneralMemoryDescriptor *orig = initData->fMD;
651 ioGMDData *dataP = getDataP(orig->_memoryEntries);
652
653 // Only accept persistent memory descriptors with valid dataP data.
654 assert(orig->_rangesCount == 1);
655 if ( !(orig->_flags & kIOMemoryPersistent) || !dataP)
656 return false;
657
658 _memEntry = initData->fMemEntry; // Grab the new named entry
659 options = orig->_flags | kIOMemoryAsReference;
660 _singleRange = orig->_singleRange; // Initialise our range
661 buffers = &_singleRange;
662 count = 1;
663
664 // Now grab the original task and whatever mapper was previously used
665 task = orig->_task;
666 mapper = dataP->fMapper;
667
668 // We are ready to go through the original initialisation now
669 }
670
671 switch (type) {
672 case kIOMemoryTypeUIO:
673 case kIOMemoryTypeVirtual:
674 assert(task);
675 if (!task)
676 return false;
677 else
678 break;
679
680 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
681 mapper = kIOMapperNone;
682
683 case kIOMemoryTypeUPL:
684 assert(!task);
685 break;
686 default:
687 return false; /* bad argument */
688 }
689
690 assert(buffers);
691 assert(count);
692
693 /*
694 * We can check the _initialized instance variable before having ever set
695 * it to an initial value because I/O Kit guarantees that all our instance
696 * variables are zeroed on an object's allocation.
697 */
698
699 if (_initialized) {
700 /*
701 * An existing memory descriptor is being retargeted to point to
702 * somewhere else. Clean up our present state.
703 */
704
705 while (_wireCount)
706 complete();
707 if (_kernPtrAligned)
708 unmapFromKernel();
709 if (_ranges.v && _rangesIsAllocated)
710 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
711 if (_memEntry)
712 { ipc_port_release_send((ipc_port_t) _memEntry); _memEntry = 0; }
713 }
714 else {
715 if (!super::init())
716 return false;
717 _initialized = true;
718 }
719
720 // Grab the appropriate mapper
721 if (mapper == kIOMapperNone)
722 mapper = 0; // No Mapper
723 else if (!mapper) {
724 IOMapper::checkForSystemMapper();
725 gIOSystemMapper = mapper = IOMapper::gSystem;
726 }
727
728 // Remove the dynamic internal use flags from the initial setting
729 options &= ~(kIOMemoryPreparedReadOnly);
730 _flags = options;
731 _task = task;
732
733 // DEPRECATED variable initialisation
734 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
735 _position = 0;
736 _kernPtrAligned = 0;
737 _cachedPhysicalAddress = 0;
738 _cachedVirtualAddress = 0;
739
740 if (kIOMemoryTypeUPL == type) {
741
742 ioGMDData *dataP;
743 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
744
745 if (!_memoryEntries) {
746 _memoryEntries = OSData::withCapacity(dataSize);
747 if (!_memoryEntries)
748 return false;
749 }
750 else if (!_memoryEntries->initWithCapacity(dataSize))
751 return false;
752
753 _memoryEntries->appendBytes(0, sizeof(ioGMDData));
754 dataP = getDataP(_memoryEntries);
755 dataP->fMapper = mapper;
756 dataP->fPageCnt = 0;
757
758 _wireCount++; // UPLs start out life wired
759
760 _length = count;
761 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
762
763 ioPLBlock iopl;
764 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST((upl_t) buffers);
765
766 iopl.fIOPL = (upl_t) buffers;
767 // Set the flag kIOPLOnDevice convieniently equal to 1
768 iopl.fFlags = pageList->device | kIOPLExternUPL;
769 iopl.fIOMDOffset = 0;
770 if (!pageList->device) {
771 // Pre-compute the offset into the UPL's page list
772 pageList = &pageList[atop_32(offset)];
773 offset &= PAGE_MASK;
774 if (mapper) {
775 iopl.fMappedBase = mapper->iovmAlloc(_pages);
776 mapper->iovmInsert(iopl.fMappedBase, 0, pageList, _pages);
777 }
778 else
779 iopl.fMappedBase = 0;
780 }
781 else
782 iopl.fMappedBase = 0;
783 iopl.fPageInfo = (vm_address_t) pageList;
784 iopl.fPageOffset = offset;
785
786 _memoryEntries->appendBytes(&iopl, sizeof(iopl));
787 }
788 else {
789 // kIOMemoryTypeVirtual | kIOMemoryTypeUIO | kIOMemoryTypePhysical
790
791 // Initialize the memory descriptor
792 if (options & kIOMemoryAsReference) {
793 _rangesIsAllocated = false;
794
795 // Hack assignment to get the buffer arg into _ranges.
796 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
797 // work, C++ sigh.
798 // This also initialises the uio & physical ranges.
799 _ranges.v = (IOVirtualRange *) buffers;
800 }
801 else {
802 assert(kIOMemoryTypeUIO != type);
803
804 _rangesIsAllocated = true;
805 _ranges.v = IONew(IOVirtualRange, count);
806 if (!_ranges.v)
807 return false;
808 bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
809 }
810
811 // Find starting address within the vector of ranges
812 Ranges vec = _ranges;
813 UInt32 length = 0;
814 UInt32 pages = 0;
815 for (unsigned ind = 0; ind < count; ind++) {
816 user_addr_t addr;
817 UInt32 len;
818
819 // addr & len are returned by this function
820 getAddrLenForInd(addr, len, type, vec, ind);
821 pages += (atop_64(addr + len + PAGE_MASK) - atop_64(addr));
822 len += length;
823 assert(len > length); // Check for 32 bit wrap around
824 length = len;
825 }
826 _length = length;
827 _pages = pages;
828 _rangesCount = count;
829
830 // Auto-prepare memory at creation time.
831 // Implied completion when descriptor is free-ed
832 if (kIOMemoryTypePhysical == type)
833 _wireCount++; // Physical MDs are, by definition, wired
834 else { /* kIOMemoryTypeVirtual | kIOMemoryTypeUIO */
835 ioGMDData *dataP;
836 unsigned dataSize = computeDataSize(_pages, /* upls */ count * 2);
837
838 if (!_memoryEntries) {
839 _memoryEntries = OSData::withCapacity(dataSize);
840 if (!_memoryEntries)
841 return false;
842 }
843 else if (!_memoryEntries->initWithCapacity(dataSize))
844 return false;
845
846 _memoryEntries->appendBytes(0, sizeof(ioGMDData));
847 dataP = getDataP(_memoryEntries);
848 dataP->fMapper = mapper;
849 dataP->fPageCnt = _pages;
850
851 if ( (kIOMemoryPersistent & _flags) && !_memEntry)
852 _memEntry = createNamedEntry();
853
854 if ((_flags & kIOMemoryAutoPrepare)
855 && prepare() != kIOReturnSuccess)
856 return false;
857 }
858 }
859
860 return true;
861 }
862
863 /*
864 * free
865 *
866 * Free resources.
867 */
868 void IOGeneralMemoryDescriptor::free()
869 {
870 LOCK;
871 if( reserved)
872 reserved->memory = 0;
873 UNLOCK;
874
875 while (_wireCount)
876 complete();
877 if (_memoryEntries)
878 _memoryEntries->release();
879
880 if (_kernPtrAligned)
881 unmapFromKernel();
882 if (_ranges.v && _rangesIsAllocated)
883 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
884
885 if (reserved && reserved->devicePager)
886 device_pager_deallocate( (memory_object_t) reserved->devicePager );
887
888 // memEntry holds a ref on the device pager which owns reserved
889 // (ExpansionData) so no reserved access after this point
890 if (_memEntry)
891 ipc_port_release_send( (ipc_port_t) _memEntry );
892
893 super::free();
894 }
895
896 /* DEPRECATED */ void IOGeneralMemoryDescriptor::unmapFromKernel()
897 /* DEPRECATED */ {
898 panic("IOGMD::unmapFromKernel deprecated");
899 /* DEPRECATED */ }
900 /* DEPRECATED */
901 /* DEPRECATED */ void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
902 /* DEPRECATED */ {
903 panic("IOGMD::mapIntoKernel deprecated");
904 /* DEPRECATED */ }
905
906 /*
907 * getDirection:
908 *
909 * Get the direction of the transfer.
910 */
911 IODirection IOMemoryDescriptor::getDirection() const
912 {
913 return _direction;
914 }
915
916 /*
917 * getLength:
918 *
919 * Get the length of the transfer (over all ranges).
920 */
921 IOByteCount IOMemoryDescriptor::getLength() const
922 {
923 return _length;
924 }
925
926 void IOMemoryDescriptor::setTag( IOOptionBits tag )
927 {
928 _tag = tag;
929 }
930
931 IOOptionBits IOMemoryDescriptor::getTag( void )
932 {
933 return( _tag);
934 }
935
936 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
937 IOPhysicalAddress IOMemoryDescriptor::getSourceSegment( IOByteCount offset,
938 IOByteCount * length )
939 {
940 IOPhysicalAddress physAddr = 0;
941
942 if( prepare() == kIOReturnSuccess) {
943 physAddr = getPhysicalSegment( offset, length );
944 complete();
945 }
946
947 return( physAddr );
948 }
949
950 IOByteCount IOMemoryDescriptor::readBytes
951 (IOByteCount offset, void *bytes, IOByteCount length)
952 {
953 addr64_t dstAddr = (addr64_t) (UInt32) bytes;
954 IOByteCount remaining;
955
956 // Assert that this entire I/O is withing the available range
957 assert(offset < _length);
958 assert(offset + length <= _length);
959 if (offset >= _length) {
960 IOLog("IOGMD(%p): rB = o%lx, l%lx\n", this, offset, length); // @@@ gvdl
961 return 0;
962 }
963
964 remaining = length = min(length, _length - offset);
965 while (remaining) { // (process another target segment?)
966 addr64_t srcAddr64;
967 IOByteCount srcLen;
968
969 srcAddr64 = getPhysicalSegment64(offset, &srcLen);
970 if (!srcAddr64)
971 break;
972
973 // Clip segment length to remaining
974 if (srcLen > remaining)
975 srcLen = remaining;
976
977 copypv(srcAddr64, dstAddr, srcLen,
978 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
979
980 dstAddr += srcLen;
981 offset += srcLen;
982 remaining -= srcLen;
983 }
984
985 assert(!remaining);
986
987 return length - remaining;
988 }
989
990 IOByteCount IOMemoryDescriptor::writeBytes
991 (IOByteCount offset, const void *bytes, IOByteCount length)
992 {
993 addr64_t srcAddr = (addr64_t) (UInt32) bytes;
994 IOByteCount remaining;
995
996 // Assert that this entire I/O is withing the available range
997 assert(offset < _length);
998 assert(offset + length <= _length);
999
1000 assert( !(kIOMemoryPreparedReadOnly & _flags) );
1001
1002 if ( (kIOMemoryPreparedReadOnly & _flags) || offset >= _length) {
1003 IOLog("IOGMD(%p): wB = o%lx, l%lx\n", this, offset, length); // @@@ gvdl
1004 return 0;
1005 }
1006
1007 remaining = length = min(length, _length - offset);
1008 while (remaining) { // (process another target segment?)
1009 addr64_t dstAddr64;
1010 IOByteCount dstLen;
1011
1012 dstAddr64 = getPhysicalSegment64(offset, &dstLen);
1013 if (!dstAddr64)
1014 break;
1015
1016 // Clip segment length to remaining
1017 if (dstLen > remaining)
1018 dstLen = remaining;
1019
1020 copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
1021 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
1022
1023 srcAddr += dstLen;
1024 offset += dstLen;
1025 remaining -= dstLen;
1026 }
1027
1028 assert(!remaining);
1029
1030 return length - remaining;
1031 }
1032
1033 // osfmk/device/iokit_rpc.c
1034 extern "C" unsigned int IODefaultCacheBits(addr64_t pa);
1035
1036 /* DEPRECATED */ void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
1037 /* DEPRECATED */ {
1038 panic("IOGMD::setPosition deprecated");
1039 /* DEPRECATED */ }
1040
1041 IOPhysicalAddress IOGeneralMemoryDescriptor::getPhysicalSegment
1042 (IOByteCount offset, IOByteCount *lengthOfSegment)
1043 {
1044 IOPhysicalAddress address = 0;
1045 IOPhysicalLength length = 0;
1046
1047 // assert(offset <= _length);
1048 if (offset < _length) // (within bounds?)
1049 {
1050 if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
1051 unsigned int ind;
1052
1053 // Physical address based memory descriptor
1054
1055 // Find offset within descriptor and make it relative
1056 // to the current _range.
1057 for (ind = 0 ; offset >= _ranges.p[ind].length; ind++ )
1058 offset -= _ranges.p[ind].length;
1059
1060 IOPhysicalRange cur = _ranges.p[ind];
1061 address = cur.address + offset;
1062 length = cur.length - offset;
1063
1064 // see how far we can coalesce ranges
1065 for (++ind; ind < _rangesCount; ind++) {
1066 cur = _ranges.p[ind];
1067
1068 if (address + length != cur.address)
1069 break;
1070
1071 length += cur.length;
1072 }
1073
1074 // @@@ gvdl: should be assert(address);
1075 // but can't as NVidia GeForce creates a bogus physical mem
1076 assert(address
1077 || /* nvidia */ (!_ranges.p[0].address && 1 == _rangesCount));
1078 assert(length);
1079 }
1080 else do {
1081 // We need wiring & we are wired.
1082 assert(_wireCount);
1083
1084 if (!_wireCount)
1085 {
1086 panic("IOGMD: not wired for getPhysicalSegment()");
1087 continue;
1088 }
1089
1090 assert(_memoryEntries);
1091
1092 ioGMDData * dataP = getDataP(_memoryEntries);
1093 const ioPLBlock *ioplList = getIOPLList(dataP);
1094 UInt ind, numIOPLs = getNumIOPL(_memoryEntries, dataP);
1095 upl_page_info_t *pageList = getPageList(dataP);
1096
1097 assert(numIOPLs > 0);
1098
1099 // Scan through iopl info blocks looking for block containing offset
1100 for (ind = 1; ind < numIOPLs; ind++) {
1101 if (offset < ioplList[ind].fIOMDOffset)
1102 break;
1103 }
1104
1105 // Go back to actual range as search goes past it
1106 ioPLBlock ioplInfo = ioplList[ind - 1];
1107
1108 if (ind < numIOPLs)
1109 length = ioplList[ind].fIOMDOffset;
1110 else
1111 length = _length;
1112 length -= offset; // Remainder within iopl
1113
1114 // Subtract offset till this iopl in total list
1115 offset -= ioplInfo.fIOMDOffset;
1116
1117 // This is a mapped IOPL so we just need to compute an offset
1118 // relative to the mapped base.
1119 if (ioplInfo.fMappedBase) {
1120 offset += (ioplInfo.fPageOffset & PAGE_MASK);
1121 address = ptoa_32(ioplInfo.fMappedBase) + offset;
1122 continue;
1123 }
1124
1125 // Currently the offset is rebased into the current iopl.
1126 // Now add the iopl 1st page offset.
1127 offset += ioplInfo.fPageOffset;
1128
1129 // For external UPLs the fPageInfo field points directly to
1130 // the upl's upl_page_info_t array.
1131 if (ioplInfo.fFlags & kIOPLExternUPL)
1132 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
1133 else
1134 pageList = &pageList[ioplInfo.fPageInfo];
1135
1136 // Check for direct device non-paged memory
1137 if ( ioplInfo.fFlags & kIOPLOnDevice ) {
1138 address = ptoa_32(pageList->phys_addr) + offset;
1139 continue;
1140 }
1141
1142 // Now we need compute the index into the pageList
1143 ind = atop_32(offset);
1144 offset &= PAGE_MASK;
1145
1146 IOPhysicalAddress pageAddr = pageList[ind].phys_addr;
1147 address = ptoa_32(pageAddr) + offset;
1148
1149 // Check for the remaining data in this upl being longer than the
1150 // remainder on the current page. This should be checked for
1151 // contiguous pages
1152 if (length > PAGE_SIZE - offset) {
1153 // See if the next page is contiguous. Stop looking when we hit
1154 // the end of this upl, which is indicated by the
1155 // contigLength >= length.
1156 IOByteCount contigLength = PAGE_SIZE - offset;
1157
1158 // Look for contiguous segment
1159 while (contigLength < length
1160 && ++pageAddr == pageList[++ind].phys_addr) {
1161 contigLength += PAGE_SIZE;
1162 }
1163 if (length > contigLength)
1164 length = contigLength;
1165 }
1166
1167 assert(address);
1168 assert(length);
1169
1170 } while (0);
1171
1172 if (!address)
1173 length = 0;
1174 }
1175
1176 if (lengthOfSegment)
1177 *lengthOfSegment = length;
1178
1179 return address;
1180 }
1181
1182 addr64_t IOMemoryDescriptor::getPhysicalSegment64
1183 (IOByteCount offset, IOByteCount *lengthOfSegment)
1184 {
1185 IOPhysicalAddress phys32;
1186 IOByteCount length;
1187 addr64_t phys64;
1188
1189 phys32 = getPhysicalSegment(offset, lengthOfSegment);
1190 if (!phys32)
1191 return 0;
1192
1193 if (gIOSystemMapper)
1194 {
1195 IOByteCount origLen;
1196
1197 phys64 = gIOSystemMapper->mapAddr(phys32);
1198 origLen = *lengthOfSegment;
1199 length = page_size - (phys64 & (page_size - 1));
1200 while ((length < origLen)
1201 && ((phys64 + length) == gIOSystemMapper->mapAddr(phys32 + length)))
1202 length += page_size;
1203 if (length > origLen)
1204 length = origLen;
1205
1206 *lengthOfSegment = length;
1207 }
1208 else
1209 phys64 = (addr64_t) phys32;
1210
1211 return phys64;
1212 }
1213
1214 IOPhysicalAddress IOGeneralMemoryDescriptor::
1215 getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1216 {
1217 IOPhysicalAddress address = 0;
1218 IOPhysicalLength length = 0;
1219 IOOptionBits type = _flags & kIOMemoryTypeMask;
1220
1221 assert(offset <= _length);
1222
1223 if ( type == kIOMemoryTypeUPL)
1224 return super::getSourceSegment( offset, lengthOfSegment );
1225 else if ( offset < _length ) // (within bounds?)
1226 {
1227 unsigned rangesIndex = 0;
1228 Ranges vec = _ranges;
1229 user_addr_t addr;
1230
1231 // Find starting address within the vector of ranges
1232 for (;;) {
1233 getAddrLenForInd(addr, length, type, vec, rangesIndex);
1234 if (offset < length)
1235 break;
1236 offset -= length; // (make offset relative)
1237 rangesIndex++;
1238 }
1239
1240 // Now that we have the starting range,
1241 // lets find the last contiguous range
1242 addr += offset;
1243 length -= offset;
1244
1245 for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) {
1246 user_addr_t newAddr;
1247 IOPhysicalLength newLen;
1248
1249 getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
1250 if (addr + length != newAddr)
1251 break;
1252 length += newLen;
1253 }
1254 if (addr)
1255 address = (IOPhysicalAddress) addr; // Truncate address to 32bit
1256 else
1257 length = 0;
1258 }
1259
1260 if ( lengthOfSegment ) *lengthOfSegment = length;
1261
1262 return address;
1263 }
1264
1265 /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
1266 /* DEPRECATED */ void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
1267 /* DEPRECATED */ IOByteCount * lengthOfSegment)
1268 /* DEPRECATED */ {
1269 if (_task == kernel_task)
1270 return (void *) getSourceSegment(offset, lengthOfSegment);
1271 else
1272 panic("IOGMD::getVirtualSegment deprecated");
1273
1274 return 0;
1275 /* DEPRECATED */ }
1276 /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
1277
1278
1279
1280 IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
1281 IOOptionBits * oldState )
1282 {
1283 IOReturn err = kIOReturnSuccess;
1284 vm_purgable_t control;
1285 int state;
1286
1287 do
1288 {
1289 if (!_memEntry)
1290 {
1291 err = kIOReturnNotReady;
1292 break;
1293 }
1294
1295 control = VM_PURGABLE_SET_STATE;
1296 switch (newState)
1297 {
1298 case kIOMemoryPurgeableKeepCurrent:
1299 control = VM_PURGABLE_GET_STATE;
1300 break;
1301
1302 case kIOMemoryPurgeableNonVolatile:
1303 state = VM_PURGABLE_NONVOLATILE;
1304 break;
1305 case kIOMemoryPurgeableVolatile:
1306 state = VM_PURGABLE_VOLATILE;
1307 break;
1308 case kIOMemoryPurgeableEmpty:
1309 state = VM_PURGABLE_EMPTY;
1310 break;
1311 default:
1312 err = kIOReturnBadArgument;
1313 break;
1314 }
1315
1316 if (kIOReturnSuccess != err)
1317 break;
1318
1319 err = mach_memory_entry_purgable_control((ipc_port_t) _memEntry, control, &state);
1320
1321 if (oldState)
1322 {
1323 if (kIOReturnSuccess == err)
1324 {
1325 switch (state)
1326 {
1327 case VM_PURGABLE_NONVOLATILE:
1328 state = kIOMemoryPurgeableNonVolatile;
1329 break;
1330 case VM_PURGABLE_VOLATILE:
1331 state = kIOMemoryPurgeableVolatile;
1332 break;
1333 case VM_PURGABLE_EMPTY:
1334 state = kIOMemoryPurgeableEmpty;
1335 break;
1336 default:
1337 state = kIOMemoryPurgeableNonVolatile;
1338 err = kIOReturnNotReady;
1339 break;
1340 }
1341 *oldState = state;
1342 }
1343 }
1344 }
1345 while (false);
1346
1347 return (err);
1348 }
1349
1350 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
1351 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
1352
1353 IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options,
1354 IOByteCount offset, IOByteCount length )
1355 {
1356 IOByteCount remaining;
1357 void (*func)(addr64_t pa, unsigned int count) = 0;
1358
1359 switch (options)
1360 {
1361 case kIOMemoryIncoherentIOFlush:
1362 func = &dcache_incoherent_io_flush64;
1363 break;
1364 case kIOMemoryIncoherentIOStore:
1365 func = &dcache_incoherent_io_store64;
1366 break;
1367 }
1368
1369 if (!func)
1370 return (kIOReturnUnsupported);
1371
1372 remaining = length = min(length, getLength() - offset);
1373 while (remaining)
1374 // (process another target segment?)
1375 {
1376 addr64_t dstAddr64;
1377 IOByteCount dstLen;
1378
1379 dstAddr64 = getPhysicalSegment64(offset, &dstLen);
1380 if (!dstAddr64)
1381 break;
1382
1383 // Clip segment length to remaining
1384 if (dstLen > remaining)
1385 dstLen = remaining;
1386
1387 (*func)(dstAddr64, dstLen);
1388
1389 offset += dstLen;
1390 remaining -= dstLen;
1391 }
1392
1393 return (remaining ? kIOReturnUnderrun : kIOReturnSuccess);
1394 }
1395
1396 #ifdef __ppc__
1397 extern vm_offset_t static_memory_end;
1398 #define io_kernel_static_end static_memory_end
1399 #else
1400 extern vm_offset_t first_avail;
1401 #define io_kernel_static_end first_avail
1402 #endif
1403
1404 static kern_return_t
1405 io_get_kernel_static_upl(
1406 vm_map_t /* map */,
1407 vm_address_t offset,
1408 vm_size_t *upl_size,
1409 upl_t *upl,
1410 upl_page_info_array_t page_list,
1411 unsigned int *count)
1412 {
1413 unsigned int pageCount, page;
1414 ppnum_t phys;
1415
1416 pageCount = atop_32(*upl_size);
1417 if (pageCount > *count)
1418 pageCount = *count;
1419
1420 *upl = NULL;
1421
1422 for (page = 0; page < pageCount; page++)
1423 {
1424 phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
1425 if (!phys)
1426 break;
1427 page_list[page].phys_addr = phys;
1428 page_list[page].pageout = 0;
1429 page_list[page].absent = 0;
1430 page_list[page].dirty = 0;
1431 page_list[page].precious = 0;
1432 page_list[page].device = 0;
1433 }
1434
1435 return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError);
1436 }
1437
1438 IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
1439 {
1440 IOOptionBits type = _flags & kIOMemoryTypeMask;
1441 IOReturn error = kIOReturnNoMemory;
1442 ioGMDData *dataP;
1443 ppnum_t mapBase = 0;
1444 IOMapper *mapper;
1445 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
1446
1447 assert(!_wireCount);
1448 assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeUIO == type);
1449
1450 if (_pages >= gIOMaximumMappedIOPageCount)
1451 return kIOReturnNoResources;
1452
1453 dataP = getDataP(_memoryEntries);
1454 mapper = dataP->fMapper;
1455 if (mapper && _pages)
1456 mapBase = mapper->iovmAlloc(_pages);
1457
1458 // Note that appendBytes(NULL) zeros the data up to the
1459 // desired length.
1460 _memoryEntries->appendBytes(0, dataP->fPageCnt * sizeof(upl_page_info_t));
1461 dataP = 0; // May no longer be valid so lets not get tempted.
1462
1463 if (forDirection == kIODirectionNone)
1464 forDirection = _direction;
1465
1466 int uplFlags; // This Mem Desc's default flags for upl creation
1467 switch (forDirection)
1468 {
1469 case kIODirectionOut:
1470 // Pages do not need to be marked as dirty on commit
1471 uplFlags = UPL_COPYOUT_FROM;
1472 _flags |= kIOMemoryPreparedReadOnly;
1473 break;
1474
1475 case kIODirectionIn:
1476 default:
1477 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
1478 break;
1479 }
1480 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
1481
1482 // Find the appropriate vm_map for the given task
1483 vm_map_t curMap;
1484 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
1485 curMap = 0;
1486 else
1487 { curMap = get_task_map(_task); }
1488
1489 // Iterate over the vector of virtual ranges
1490 Ranges vec = _ranges;
1491 unsigned int pageIndex = 0;
1492 IOByteCount mdOffset = 0;
1493 for (UInt range = 0; range < _rangesCount; range++) {
1494 ioPLBlock iopl;
1495 user_addr_t startPage;
1496 IOByteCount numBytes;
1497
1498 // Get the startPage address and length of vec[range]
1499 getAddrLenForInd(startPage, numBytes, type, vec, range);
1500 iopl.fPageOffset = (short) startPage & PAGE_MASK;
1501 numBytes += iopl.fPageOffset;
1502 startPage = trunc_page_64(startPage);
1503
1504 if (mapper)
1505 iopl.fMappedBase = mapBase + pageIndex;
1506 else
1507 iopl.fMappedBase = 0;
1508
1509 // Iterate over the current range, creating UPLs
1510 while (numBytes) {
1511 dataP = getDataP(_memoryEntries);
1512 vm_address_t kernelStart = (vm_address_t) startPage;
1513 vm_map_t theMap;
1514 if (curMap)
1515 theMap = curMap;
1516 else if (!sharedMem) {
1517 assert(_task == kernel_task);
1518 theMap = IOPageableMapForAddress(kernelStart);
1519 }
1520 else
1521 theMap = NULL;
1522
1523 upl_page_info_array_t pageInfo = getPageList(dataP);
1524 int ioplFlags = uplFlags;
1525 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
1526
1527 vm_size_t ioplSize = round_page_32(numBytes);
1528 unsigned int numPageInfo = atop_32(ioplSize);
1529
1530 if (theMap == kernel_map && kernelStart < io_kernel_static_end) {
1531 error = io_get_kernel_static_upl(theMap,
1532 kernelStart,
1533 &ioplSize,
1534 &iopl.fIOPL,
1535 baseInfo,
1536 &numPageInfo);
1537 }
1538 else if (sharedMem) {
1539 error = memory_object_iopl_request(sharedMem,
1540 ptoa_32(pageIndex),
1541 &ioplSize,
1542 &iopl.fIOPL,
1543 baseInfo,
1544 &numPageInfo,
1545 &ioplFlags);
1546 }
1547 else {
1548 assert(theMap);
1549 error = vm_map_create_upl(theMap,
1550 startPage,
1551 &ioplSize,
1552 &iopl.fIOPL,
1553 baseInfo,
1554 &numPageInfo,
1555 &ioplFlags);
1556 }
1557
1558 assert(ioplSize);
1559 if (error != KERN_SUCCESS)
1560 goto abortExit;
1561
1562 error = kIOReturnNoMemory;
1563
1564 if (baseInfo->device) {
1565 numPageInfo = 1;
1566 iopl.fFlags = kIOPLOnDevice;
1567 // Don't translate device memory at all
1568 if (mapper && mapBase) {
1569 mapper->iovmFree(mapBase, _pages);
1570 mapBase = 0;
1571 iopl.fMappedBase = 0;
1572 }
1573 }
1574 else {
1575 iopl.fFlags = 0;
1576 if (mapper)
1577 mapper->iovmInsert(mapBase, pageIndex,
1578 baseInfo, numPageInfo);
1579 }
1580
1581 iopl.fIOMDOffset = mdOffset;
1582 iopl.fPageInfo = pageIndex;
1583
1584 if ((_flags & kIOMemoryAutoPrepare) && iopl.fIOPL)
1585 {
1586 upl_commit(iopl.fIOPL, 0, 0);
1587 upl_deallocate(iopl.fIOPL);
1588 iopl.fIOPL = 0;
1589 }
1590
1591 if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
1592 // Clean up partial created and unsaved iopl
1593 if (iopl.fIOPL) {
1594 upl_abort(iopl.fIOPL, 0);
1595 upl_deallocate(iopl.fIOPL);
1596 }
1597 goto abortExit;
1598 }
1599
1600 // Check for a multiple iopl's in one virtual range
1601 pageIndex += numPageInfo;
1602 mdOffset -= iopl.fPageOffset;
1603 if (ioplSize < numBytes) {
1604 numBytes -= ioplSize;
1605 startPage += ioplSize;
1606 mdOffset += ioplSize;
1607 iopl.fPageOffset = 0;
1608 if (mapper)
1609 iopl.fMappedBase = mapBase + pageIndex;
1610 }
1611 else {
1612 mdOffset += numBytes;
1613 break;
1614 }
1615 }
1616 }
1617
1618 return kIOReturnSuccess;
1619
1620 abortExit:
1621 {
1622 dataP = getDataP(_memoryEntries);
1623 UInt done = getNumIOPL(_memoryEntries, dataP);
1624 ioPLBlock *ioplList = getIOPLList(dataP);
1625
1626 for (UInt range = 0; range < done; range++)
1627 {
1628 if (ioplList[range].fIOPL) {
1629 upl_abort(ioplList[range].fIOPL, 0);
1630 upl_deallocate(ioplList[range].fIOPL);
1631 }
1632 }
1633 (void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); // == setLength()
1634
1635 if (mapper && mapBase)
1636 mapper->iovmFree(mapBase, _pages);
1637 }
1638
1639 return error;
1640 }
1641
1642 /*
1643 * prepare
1644 *
1645 * Prepare the memory for an I/O transfer. This involves paging in
1646 * the memory, if necessary, and wiring it down for the duration of
1647 * the transfer. The complete() method completes the processing of
1648 * the memory after the I/O transfer finishes. This method needn't
1649 * called for non-pageable memory.
1650 */
1651 IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
1652 {
1653 IOReturn error = kIOReturnSuccess;
1654 IOOptionBits type = _flags & kIOMemoryTypeMask;
1655
1656 if (!_wireCount
1657 && (kIOMemoryTypeVirtual == type || kIOMemoryTypeUIO == type) ) {
1658 error = wireVirtual(forDirection);
1659 if (error)
1660 return error;
1661 }
1662
1663 _wireCount++;
1664
1665 return kIOReturnSuccess;
1666 }
1667
1668 /*
1669 * complete
1670 *
1671 * Complete processing of the memory after an I/O transfer finishes.
1672 * This method should not be called unless a prepare was previously
1673 * issued; the prepare() and complete() must occur in pairs, before
1674 * before and after an I/O transfer involving pageable memory.
1675 */
1676
1677 IOReturn IOGeneralMemoryDescriptor::complete(IODirection /* forDirection */)
1678 {
1679 assert(_wireCount);
1680
1681 if (!_wireCount)
1682 return kIOReturnSuccess;
1683
1684 _wireCount--;
1685 if (!_wireCount) {
1686 IOOptionBits type = _flags & kIOMemoryTypeMask;
1687
1688 if (kIOMemoryTypePhysical == type) {
1689 /* kIOMemoryTypePhysical */
1690 // DO NOTHING
1691 }
1692 else {
1693 ioGMDData * dataP = getDataP(_memoryEntries);
1694 ioPLBlock *ioplList = getIOPLList(dataP);
1695 UInt count = getNumIOPL(_memoryEntries, dataP);
1696
1697 if (dataP->fMapper && _pages && ioplList[0].fMappedBase)
1698 dataP->fMapper->iovmFree(ioplList[0].fMappedBase, _pages);
1699
1700 // Only complete iopls that we created which are for TypeVirtual
1701 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeUIO == type) {
1702 for (UInt ind = 0; ind < count; ind++)
1703 if (ioplList[ind].fIOPL) {
1704 upl_commit(ioplList[ind].fIOPL, 0, 0);
1705 upl_deallocate(ioplList[ind].fIOPL);
1706 }
1707 }
1708
1709 (void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); // == setLength()
1710 }
1711 }
1712 return kIOReturnSuccess;
1713 }
1714
1715 IOReturn IOGeneralMemoryDescriptor::doMap(
1716 vm_map_t addressMap,
1717 IOVirtualAddress * atAddress,
1718 IOOptionBits options,
1719 IOByteCount sourceOffset,
1720 IOByteCount length )
1721 {
1722 kern_return_t kr;
1723 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
1724
1725 IOOptionBits type = _flags & kIOMemoryTypeMask;
1726 Ranges vec = _ranges;
1727
1728 user_addr_t range0Addr = 0;
1729 IOByteCount range0Len = 0;
1730
1731 if (vec.v)
1732 getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
1733
1734 // mapping source == dest? (could be much better)
1735 if( _task
1736 && (addressMap == get_task_map(_task)) && (options & kIOMapAnywhere)
1737 && (1 == _rangesCount) && (0 == sourceOffset)
1738 && range0Addr && (length <= range0Len) ) {
1739 if (sizeof(user_addr_t) > 4 && ((UInt64) range0Addr) >> 32)
1740 return kIOReturnOverrun; // Doesn't fit in 32bit return field
1741 else {
1742 *atAddress = range0Addr;
1743 return( kIOReturnSuccess );
1744 }
1745 }
1746
1747 if( 0 == sharedMem) {
1748
1749 vm_size_t size = ptoa_32(_pages);
1750
1751 if( _task) {
1752 #ifndef i386
1753 memory_object_size_t actualSize = size;
1754 kr = mach_make_memory_entry_64(get_task_map(_task),
1755 &actualSize, range0Addr,
1756 VM_PROT_READ | VM_PROT_WRITE, &sharedMem,
1757 NULL );
1758
1759 if( (KERN_SUCCESS == kr) && (actualSize != round_page_32(size))) {
1760 #if IOASSERT
1761 IOLog("mach_make_memory_entry_64 (%08llx) size (%08lx:%08x)\n",
1762 range0Addr, (UInt32) actualSize, size);
1763 #endif
1764 kr = kIOReturnVMError;
1765 ipc_port_release_send( sharedMem );
1766 }
1767
1768 if( KERN_SUCCESS != kr)
1769 #endif /* !i386 */
1770 sharedMem = MACH_PORT_NULL;
1771
1772 } else do {
1773
1774 memory_object_t pager;
1775 unsigned int flags = 0;
1776 addr64_t pa;
1777 IOPhysicalLength segLen;
1778
1779 pa = getPhysicalSegment64( sourceOffset, &segLen );
1780
1781 if( !reserved) {
1782 reserved = IONew( ExpansionData, 1 );
1783 if( !reserved)
1784 continue;
1785 }
1786 reserved->pagerContig = (1 == _rangesCount);
1787 reserved->memory = this;
1788
1789 /*What cache mode do we need*/
1790 switch(options & kIOMapCacheMask ) {
1791
1792 case kIOMapDefaultCache:
1793 default:
1794 flags = IODefaultCacheBits(pa);
1795 break;
1796
1797 case kIOMapInhibitCache:
1798 flags = DEVICE_PAGER_CACHE_INHIB |
1799 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
1800 break;
1801
1802 case kIOMapWriteThruCache:
1803 flags = DEVICE_PAGER_WRITE_THROUGH |
1804 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
1805 break;
1806
1807 case kIOMapCopybackCache:
1808 flags = DEVICE_PAGER_COHERENT;
1809 break;
1810
1811 case kIOMapWriteCombineCache:
1812 flags = DEVICE_PAGER_CACHE_INHIB |
1813 DEVICE_PAGER_COHERENT;
1814 break;
1815 }
1816
1817 flags |= reserved->pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0;
1818
1819 pager = device_pager_setup( (memory_object_t) 0, (int) reserved,
1820 size, flags);
1821 assert( pager );
1822
1823 if( pager) {
1824 kr = mach_memory_object_memory_entry_64( (host_t) 1, false /*internal*/,
1825 size, VM_PROT_READ | VM_PROT_WRITE, pager, &sharedMem );
1826
1827 assert( KERN_SUCCESS == kr );
1828 if( KERN_SUCCESS != kr) {
1829 device_pager_deallocate( pager );
1830 pager = MACH_PORT_NULL;
1831 sharedMem = MACH_PORT_NULL;
1832 }
1833 }
1834 if( pager && sharedMem)
1835 reserved->devicePager = pager;
1836 else {
1837 IODelete( reserved, ExpansionData, 1 );
1838 reserved = 0;
1839 }
1840
1841 } while( false );
1842
1843 _memEntry = (void *) sharedMem;
1844 }
1845
1846
1847 #ifndef i386
1848 if( 0 == sharedMem)
1849 kr = kIOReturnVMError;
1850 else
1851 #endif
1852 kr = super::doMap( addressMap, atAddress,
1853 options, sourceOffset, length );
1854
1855 return( kr );
1856 }
1857
1858 IOReturn IOGeneralMemoryDescriptor::doUnmap(
1859 vm_map_t addressMap,
1860 IOVirtualAddress logical,
1861 IOByteCount length )
1862 {
1863 // could be much better
1864 if( _task && (addressMap == get_task_map(_task)) && (1 == _rangesCount)) {
1865
1866 IOOptionBits type = _flags & kIOMemoryTypeMask;
1867 user_addr_t range0Addr;
1868 IOByteCount range0Len;
1869
1870 getAddrLenForInd(range0Addr, range0Len, type, _ranges, 0);
1871 if (logical == range0Addr && length <= range0Len)
1872 return( kIOReturnSuccess );
1873 }
1874
1875 return( super::doUnmap( addressMap, logical, length ));
1876 }
1877
1878 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1879
1880 OSDefineMetaClassAndAbstractStructors( IOMemoryMap, OSObject )
1881
1882 /* inline function implementation */
1883 IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
1884 { return( getPhysicalSegment( 0, 0 )); }
1885
1886
1887 #undef super
1888 #define super IOMemoryMap
1889
1890 OSDefineMetaClassAndStructors(_IOMemoryMap, IOMemoryMap)
1891
1892 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1893
1894 bool _IOMemoryMap::initCompatible(
1895 IOMemoryDescriptor * _memory,
1896 IOMemoryMap * _superMap,
1897 IOByteCount _offset,
1898 IOByteCount _length )
1899 {
1900
1901 if( !super::init())
1902 return( false);
1903
1904 if( (_offset + _length) > _superMap->getLength())
1905 return( false);
1906
1907 _memory->retain();
1908 memory = _memory;
1909 _superMap->retain();
1910 superMap = _superMap;
1911
1912 offset = _offset;
1913 if( _length)
1914 length = _length;
1915 else
1916 length = _memory->getLength();
1917
1918 options = superMap->getMapOptions();
1919 logical = superMap->getVirtualAddress() + offset;
1920
1921 return( true );
1922 }
1923
1924 bool _IOMemoryMap::initWithDescriptor(
1925 IOMemoryDescriptor * _memory,
1926 task_t intoTask,
1927 IOVirtualAddress toAddress,
1928 IOOptionBits _options,
1929 IOByteCount _offset,
1930 IOByteCount _length )
1931 {
1932 bool ok;
1933 bool redir = ((kIOMapUnique|kIOMapReference) == ((kIOMapUnique|kIOMapReference) & _options));
1934
1935 if ((!_memory) || (!intoTask))
1936 return( false);
1937
1938 if( (_offset + _length) > _memory->getLength())
1939 return( false);
1940
1941 if (!redir)
1942 {
1943 if (!super::init())
1944 return(false);
1945 addressMap = get_task_map(intoTask);
1946 if( !addressMap)
1947 return( false);
1948 vm_map_reference(addressMap);
1949 addressTask = intoTask;
1950 logical = toAddress;
1951 options = _options;
1952 }
1953
1954 _memory->retain();
1955
1956 offset = _offset;
1957 if( _length)
1958 length = _length;
1959 else
1960 length = _memory->getLength();
1961
1962 if( options & kIOMapStatic)
1963 ok = true;
1964 else
1965 ok = (kIOReturnSuccess == _memory->doMap( addressMap, &toAddress,
1966 _options, offset, length ));
1967 if (ok || redir)
1968 {
1969 if (memory)
1970 memory->release();
1971 memory = _memory;
1972 logical = toAddress;
1973 }
1974 else
1975 {
1976 _memory->release();
1977 if (!redir)
1978 {
1979 logical = 0;
1980 memory = 0;
1981 vm_map_deallocate(addressMap);
1982 addressMap = 0;
1983 }
1984 }
1985
1986 return( ok );
1987 }
1988
1989 /* LP64todo - these need to expand */
1990 struct IOMemoryDescriptorMapAllocRef
1991 {
1992 ipc_port_t sharedMem;
1993 vm_size_t size;
1994 vm_offset_t mapped;
1995 IOByteCount sourceOffset;
1996 IOOptionBits options;
1997 };
1998
1999 static kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
2000 {
2001 IOMemoryDescriptorMapAllocRef * ref = (IOMemoryDescriptorMapAllocRef *)_ref;
2002 IOReturn err;
2003
2004 do {
2005 if( ref->sharedMem) {
2006 vm_prot_t prot = VM_PROT_READ
2007 | ((ref->options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE);
2008
2009 // set memory entry cache
2010 vm_prot_t memEntryCacheMode = prot | MAP_MEM_ONLY;
2011 switch (ref->options & kIOMapCacheMask)
2012 {
2013 case kIOMapInhibitCache:
2014 SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
2015 break;
2016
2017 case kIOMapWriteThruCache:
2018 SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
2019 break;
2020
2021 case kIOMapWriteCombineCache:
2022 SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
2023 break;
2024
2025 case kIOMapCopybackCache:
2026 SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
2027 break;
2028
2029 case kIOMapDefaultCache:
2030 default:
2031 SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
2032 break;
2033 }
2034
2035 vm_size_t unused = 0;
2036
2037 err = mach_make_memory_entry( NULL /*unused*/, &unused, 0 /*unused*/,
2038 memEntryCacheMode, NULL, ref->sharedMem );
2039 if (KERN_SUCCESS != err)
2040 IOLog("MAP_MEM_ONLY failed %d\n", err);
2041
2042 err = vm_map( map,
2043 &ref->mapped,
2044 ref->size, 0 /* mask */,
2045 (( ref->options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
2046 | VM_MAKE_TAG(VM_MEMORY_IOKIT),
2047 ref->sharedMem, ref->sourceOffset,
2048 false, // copy
2049 prot, // cur
2050 prot, // max
2051 VM_INHERIT_NONE);
2052
2053 if( KERN_SUCCESS != err) {
2054 ref->mapped = 0;
2055 continue;
2056 }
2057
2058 } else {
2059
2060 err = vm_allocate( map, &ref->mapped, ref->size,
2061 ((ref->options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
2062 | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
2063
2064 if( KERN_SUCCESS != err) {
2065 ref->mapped = 0;
2066 continue;
2067 }
2068
2069 // we have to make sure that these guys don't get copied if we fork.
2070 err = vm_inherit( map, ref->mapped, ref->size, VM_INHERIT_NONE);
2071 assert( KERN_SUCCESS == err );
2072 }
2073
2074 } while( false );
2075
2076 return( err );
2077 }
2078
2079
2080 IOReturn IOMemoryDescriptor::doMap(
2081 vm_map_t addressMap,
2082 IOVirtualAddress * atAddress,
2083 IOOptionBits options,
2084 IOByteCount sourceOffset,
2085 IOByteCount length )
2086 {
2087 IOReturn err = kIOReturnSuccess;
2088 memory_object_t pager;
2089 vm_address_t logical;
2090 IOByteCount pageOffset;
2091 IOPhysicalAddress sourceAddr;
2092 IOMemoryDescriptorMapAllocRef ref;
2093
2094 ref.sharedMem = (ipc_port_t) _memEntry;
2095 ref.sourceOffset = sourceOffset;
2096 ref.options = options;
2097
2098 do {
2099
2100 if( 0 == length)
2101 length = getLength();
2102
2103 sourceAddr = getSourceSegment( sourceOffset, NULL );
2104 pageOffset = sourceAddr - trunc_page_32( sourceAddr );
2105
2106 ref.size = round_page_32( length + pageOffset );
2107
2108 if ((kIOMapReference|kIOMapUnique) == ((kIOMapReference|kIOMapUnique) & options))
2109 {
2110 upl_t redirUPL2;
2111 vm_size_t size;
2112 int flags;
2113
2114 _IOMemoryMap * mapping = (_IOMemoryMap *) *atAddress;
2115 ref.mapped = mapping->getVirtualAddress();
2116
2117 if (!_memEntry)
2118 {
2119 err = kIOReturnNotReadable;
2120 continue;
2121 }
2122
2123 size = length;
2124 flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
2125 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
2126
2127 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) _memEntry, 0, &size, &redirUPL2,
2128 NULL, NULL,
2129 &flags))
2130 redirUPL2 = NULL;
2131
2132 err = upl_transpose(redirUPL2, mapping->redirUPL);
2133 if (kIOReturnSuccess != err)
2134 {
2135 IOLog("upl_transpose(%x)\n", err);
2136 err = kIOReturnSuccess;
2137 }
2138
2139 if (redirUPL2)
2140 {
2141 upl_commit(redirUPL2, NULL, 0);
2142 upl_deallocate(redirUPL2);
2143 redirUPL2 = 0;
2144 }
2145 {
2146 // swap the memEntries since they now refer to different vm_objects
2147 void * me = _memEntry;
2148 _memEntry = mapping->memory->_memEntry;
2149 mapping->memory->_memEntry = me;
2150 }
2151 }
2152 else
2153 {
2154
2155 logical = *atAddress;
2156 if( options & kIOMapAnywhere)
2157 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
2158 ref.mapped = 0;
2159 else {
2160 ref.mapped = trunc_page_32( logical );
2161 if( (logical - ref.mapped) != pageOffset) {
2162 err = kIOReturnVMError;
2163 continue;
2164 }
2165 }
2166
2167 if( ref.sharedMem && (addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags))
2168 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
2169 else
2170 err = IOMemoryDescriptorMapAlloc( addressMap, &ref );
2171 }
2172
2173 if( err != KERN_SUCCESS)
2174 continue;
2175
2176 if( reserved)
2177 pager = (memory_object_t) reserved->devicePager;
2178 else
2179 pager = MACH_PORT_NULL;
2180
2181 if( !ref.sharedMem || pager )
2182 err = handleFault( pager, addressMap, ref.mapped, sourceOffset, length, options );
2183
2184 } while( false );
2185
2186 if( err != KERN_SUCCESS) {
2187 if( ref.mapped)
2188 doUnmap( addressMap, ref.mapped, ref.size );
2189 *atAddress = NULL;
2190 } else
2191 *atAddress = ref.mapped + pageOffset;
2192
2193 return( err );
2194 }
2195
2196 enum {
2197 kIOMemoryRedirected = 0x00010000
2198 };
2199
2200 IOReturn IOMemoryDescriptor::handleFault(
2201 void * _pager,
2202 vm_map_t addressMap,
2203 IOVirtualAddress address,
2204 IOByteCount sourceOffset,
2205 IOByteCount length,
2206 IOOptionBits options )
2207 {
2208 IOReturn err = kIOReturnSuccess;
2209 memory_object_t pager = (memory_object_t) _pager;
2210 vm_size_t size;
2211 vm_size_t bytes;
2212 vm_size_t page;
2213 IOByteCount pageOffset;
2214 IOByteCount pagerOffset;
2215 IOPhysicalLength segLen;
2216 addr64_t physAddr;
2217
2218 if( !addressMap) {
2219
2220 if( kIOMemoryRedirected & _flags) {
2221 #ifdef DEBUG
2222 IOLog("sleep mem redirect %p, %lx\n", this, sourceOffset);
2223 #endif
2224 do {
2225 SLEEP;
2226 } while( kIOMemoryRedirected & _flags );
2227 }
2228
2229 return( kIOReturnSuccess );
2230 }
2231
2232 physAddr = getPhysicalSegment64( sourceOffset, &segLen );
2233 assert( physAddr );
2234 pageOffset = physAddr - trunc_page_64( physAddr );
2235 pagerOffset = sourceOffset;
2236
2237 size = length + pageOffset;
2238 physAddr -= pageOffset;
2239
2240 segLen += pageOffset;
2241 bytes = size;
2242 do {
2243 // in the middle of the loop only map whole pages
2244 if( segLen >= bytes)
2245 segLen = bytes;
2246 else if( segLen != trunc_page_32( segLen))
2247 err = kIOReturnVMError;
2248 if( physAddr != trunc_page_64( physAddr))
2249 err = kIOReturnBadArgument;
2250 if (kIOReturnSuccess != err)
2251 break;
2252
2253 #ifdef DEBUG
2254 if( kIOLogMapping & gIOKitDebug)
2255 IOLog("_IOMemoryMap::map(%p) %08lx->%08qx:%08lx\n",
2256 addressMap, address + pageOffset, physAddr + pageOffset,
2257 segLen - pageOffset);
2258 #endif
2259
2260
2261
2262
2263
2264 #ifdef i386
2265 /* i386 doesn't support faulting on device memory yet */
2266 if( addressMap && (kIOReturnSuccess == err))
2267 err = IOMapPages( addressMap, address, (IOPhysicalAddress) physAddr, segLen, options );
2268 assert( KERN_SUCCESS == err );
2269 if( err)
2270 break;
2271 #endif
2272
2273 if( pager) {
2274 if( reserved && reserved->pagerContig) {
2275 IOPhysicalLength allLen;
2276 addr64_t allPhys;
2277
2278 allPhys = getPhysicalSegment64( 0, &allLen );
2279 assert( allPhys );
2280 err = device_pager_populate_object( pager, 0, allPhys >> PAGE_SHIFT, round_page_32(allLen) );
2281
2282 } else {
2283
2284 for( page = 0;
2285 (page < segLen) && (KERN_SUCCESS == err);
2286 page += page_size) {
2287 err = device_pager_populate_object(pager, pagerOffset,
2288 (ppnum_t)((physAddr + page) >> PAGE_SHIFT), page_size);
2289 pagerOffset += page_size;
2290 }
2291 }
2292 assert( KERN_SUCCESS == err );
2293 if( err)
2294 break;
2295 }
2296 #ifndef i386
2297 /* *** ALERT *** */
2298 /* *** Temporary Workaround *** */
2299
2300 /* This call to vm_fault causes an early pmap level resolution */
2301 /* of the mappings created above. Need for this is in absolute */
2302 /* violation of the basic tenet that the pmap layer is a cache. */
2303 /* Further, it implies a serious I/O architectural violation on */
2304 /* the part of some user of the mapping. As of this writing, */
2305 /* the call to vm_fault is needed because the NVIDIA driver */
2306 /* makes a call to pmap_extract. The NVIDIA driver needs to be */
2307 /* fixed as soon as possible. The NVIDIA driver should not */
2308 /* need to query for this info as it should know from the doMap */
2309 /* call where the physical memory is mapped. When a query is */
2310 /* necessary to find a physical mapping, it should be done */
2311 /* through an iokit call which includes the mapped memory */
2312 /* handle. This is required for machine architecture independence.*/
2313
2314 if(!(kIOMemoryRedirected & _flags)) {
2315 vm_fault(addressMap,
2316 (vm_map_offset_t)address,
2317 VM_PROT_READ|VM_PROT_WRITE,
2318 FALSE, THREAD_UNINT, NULL,
2319 (vm_map_offset_t)0);
2320 }
2321
2322 /* *** Temporary Workaround *** */
2323 /* *** ALERT *** */
2324 #endif
2325 sourceOffset += segLen - pageOffset;
2326 address += segLen;
2327 bytes -= segLen;
2328 pageOffset = 0;
2329
2330 } while( bytes
2331 && (physAddr = getPhysicalSegment64( sourceOffset, &segLen )));
2332
2333 if( bytes)
2334 err = kIOReturnBadArgument;
2335
2336 return( err );
2337 }
2338
2339 IOReturn IOMemoryDescriptor::doUnmap(
2340 vm_map_t addressMap,
2341 IOVirtualAddress logical,
2342 IOByteCount length )
2343 {
2344 IOReturn err;
2345
2346 #ifdef DEBUG
2347 if( kIOLogMapping & gIOKitDebug)
2348 kprintf("IOMemoryDescriptor::doUnmap(%x) %08x:%08x\n",
2349 addressMap, logical, length );
2350 #endif
2351
2352 if( true /* && (addressMap == kernel_map) || (addressMap == get_task_map(current_task()))*/) {
2353
2354 if( _memEntry && (addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags))
2355 addressMap = IOPageableMapForAddress( logical );
2356
2357 err = vm_deallocate( addressMap, logical, length );
2358
2359 } else
2360 err = kIOReturnSuccess;
2361
2362 return( err );
2363 }
2364
2365 IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
2366 {
2367 IOReturn err = kIOReturnSuccess;
2368 _IOMemoryMap * mapping = 0;
2369 OSIterator * iter;
2370
2371 LOCK;
2372
2373 if( doRedirect)
2374 _flags |= kIOMemoryRedirected;
2375 else
2376 _flags &= ~kIOMemoryRedirected;
2377
2378 do {
2379 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
2380 while( (mapping = (_IOMemoryMap *) iter->getNextObject()))
2381 mapping->redirect( safeTask, doRedirect );
2382
2383 iter->release();
2384 }
2385 } while( false );
2386
2387 if (!doRedirect)
2388 {
2389 WAKEUP;
2390 }
2391
2392 UNLOCK;
2393
2394 // temporary binary compatibility
2395 IOSubMemoryDescriptor * subMem;
2396 if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
2397 err = subMem->redirect( safeTask, doRedirect );
2398 else
2399 err = kIOReturnSuccess;
2400
2401 return( err );
2402 }
2403
2404 IOReturn IOSubMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
2405 {
2406 return( _parent->redirect( safeTask, doRedirect ));
2407 }
2408
2409 IOReturn _IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
2410 {
2411 IOReturn err = kIOReturnSuccess;
2412
2413 if( superMap) {
2414 // err = ((_IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
2415 } else {
2416
2417 LOCK;
2418 if( logical && addressMap
2419 && (!safeTask || (get_task_map(safeTask) != addressMap))
2420 && (0 == (options & kIOMapStatic)))
2421 {
2422 IOUnmapPages( addressMap, logical, length );
2423 if(!doRedirect && safeTask
2424 && ((memory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical))
2425 {
2426 err = vm_deallocate( addressMap, logical, length );
2427 err = memory->doMap( addressMap, &logical,
2428 (options & ~kIOMapAnywhere) /*| kIOMapReserve*/,
2429 offset, length );
2430 } else
2431 err = kIOReturnSuccess;
2432 #ifdef DEBUG
2433 IOLog("IOMemoryMap::redirect(%d, %p) %x:%lx from %p\n", doRedirect, this, logical, length, addressMap);
2434 #endif
2435 }
2436 UNLOCK;
2437 }
2438
2439 if (((memory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
2440 && safeTask
2441 && (doRedirect != (0 != (memory->_flags & kIOMemoryRedirected))))
2442 memory->redirect(safeTask, doRedirect);
2443
2444 return( err );
2445 }
2446
2447 IOReturn _IOMemoryMap::unmap( void )
2448 {
2449 IOReturn err;
2450
2451 LOCK;
2452
2453 if( logical && addressMap && (0 == superMap)
2454 && (0 == (options & kIOMapStatic))) {
2455
2456 err = memory->doUnmap( addressMap, logical, length );
2457 vm_map_deallocate(addressMap);
2458 addressMap = 0;
2459
2460 } else
2461 err = kIOReturnSuccess;
2462
2463 logical = 0;
2464
2465 UNLOCK;
2466
2467 return( err );
2468 }
2469
2470 void _IOMemoryMap::taskDied( void )
2471 {
2472 LOCK;
2473 if( addressMap) {
2474 vm_map_deallocate(addressMap);
2475 addressMap = 0;
2476 }
2477 addressTask = 0;
2478 logical = 0;
2479 UNLOCK;
2480 }
2481
2482 // Overload the release mechanism. All mappings must be a member
2483 // of a memory descriptors _mappings set. This means that we
2484 // always have 2 references on a mapping. When either of these mappings
2485 // are released we need to free ourselves.
2486 void _IOMemoryMap::taggedRelease(const void *tag) const
2487 {
2488 LOCK;
2489 super::taggedRelease(tag, 2);
2490 UNLOCK;
2491 }
2492
2493 void _IOMemoryMap::free()
2494 {
2495 unmap();
2496
2497 if( memory) {
2498 LOCK;
2499 memory->removeMapping( this);
2500 UNLOCK;
2501 memory->release();
2502 }
2503
2504 if (owner && (owner != memory))
2505 {
2506 LOCK;
2507 owner->removeMapping(this);
2508 UNLOCK;
2509 }
2510
2511 if( superMap)
2512 superMap->release();
2513
2514 if (redirUPL) {
2515 upl_commit(redirUPL, NULL, 0);
2516 upl_deallocate(redirUPL);
2517 }
2518
2519 super::free();
2520 }
2521
2522 IOByteCount _IOMemoryMap::getLength()
2523 {
2524 return( length );
2525 }
2526
2527 IOVirtualAddress _IOMemoryMap::getVirtualAddress()
2528 {
2529 return( logical);
2530 }
2531
2532 task_t _IOMemoryMap::getAddressTask()
2533 {
2534 if( superMap)
2535 return( superMap->getAddressTask());
2536 else
2537 return( addressTask);
2538 }
2539
2540 IOOptionBits _IOMemoryMap::getMapOptions()
2541 {
2542 return( options);
2543 }
2544
2545 IOMemoryDescriptor * _IOMemoryMap::getMemoryDescriptor()
2546 {
2547 return( memory );
2548 }
2549
2550 _IOMemoryMap * _IOMemoryMap::copyCompatible(
2551 IOMemoryDescriptor * owner,
2552 task_t task,
2553 IOVirtualAddress toAddress,
2554 IOOptionBits _options,
2555 IOByteCount _offset,
2556 IOByteCount _length )
2557 {
2558 _IOMemoryMap * mapping;
2559
2560 if( (!task) || (!addressMap) || (addressMap != get_task_map(task)))
2561 return( 0 );
2562 if( options & kIOMapUnique)
2563 return( 0 );
2564 if( (options ^ _options) & kIOMapReadOnly)
2565 return( 0 );
2566 if( (kIOMapDefaultCache != (_options & kIOMapCacheMask))
2567 && ((options ^ _options) & kIOMapCacheMask))
2568 return( 0 );
2569
2570 if( (0 == (_options & kIOMapAnywhere)) && (logical != toAddress))
2571 return( 0 );
2572
2573 if( _offset < offset)
2574 return( 0 );
2575
2576 _offset -= offset;
2577
2578 if( (_offset + _length) > length)
2579 return( 0 );
2580
2581 if( (length == _length) && (!_offset)) {
2582 retain();
2583 mapping = this;
2584
2585 } else {
2586 mapping = new _IOMemoryMap;
2587 if( mapping
2588 && !mapping->initCompatible( owner, this, _offset, _length )) {
2589 mapping->release();
2590 mapping = 0;
2591 }
2592 }
2593
2594 return( mapping );
2595 }
2596
2597 IOPhysicalAddress _IOMemoryMap::getPhysicalSegment( IOByteCount _offset,
2598 IOPhysicalLength * _length)
2599 {
2600 IOPhysicalAddress address;
2601
2602 LOCK;
2603 address = memory->getPhysicalSegment( offset + _offset, _length );
2604 UNLOCK;
2605
2606 return( address );
2607 }
2608
2609 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2610
2611 #undef super
2612 #define super OSObject
2613
2614 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2615
2616 void IOMemoryDescriptor::initialize( void )
2617 {
2618 if( 0 == gIOMemoryLock)
2619 gIOMemoryLock = IORecursiveLockAlloc();
2620
2621 IORegistryEntry::getRegistryRoot()->setProperty(kIOMaximumMappedIOByteCountKey,
2622 ptoa_64(gIOMaximumMappedIOPageCount), 64);
2623 }
2624
2625 void IOMemoryDescriptor::free( void )
2626 {
2627 if( _mappings)
2628 _mappings->release();
2629
2630 super::free();
2631 }
2632
2633 IOMemoryMap * IOMemoryDescriptor::setMapping(
2634 task_t intoTask,
2635 IOVirtualAddress mapAddress,
2636 IOOptionBits options )
2637 {
2638 _IOMemoryMap * newMap;
2639
2640 newMap = new _IOMemoryMap;
2641
2642 LOCK;
2643
2644 if( newMap
2645 && !newMap->initWithDescriptor( this, intoTask, mapAddress,
2646 options | kIOMapStatic, 0, getLength() )) {
2647 newMap->release();
2648 newMap = 0;
2649 }
2650
2651 addMapping( newMap);
2652
2653 UNLOCK;
2654
2655 return( newMap);
2656 }
2657
2658 IOMemoryMap * IOMemoryDescriptor::map(
2659 IOOptionBits options )
2660 {
2661
2662 return( makeMapping( this, kernel_task, 0,
2663 options | kIOMapAnywhere,
2664 0, getLength() ));
2665 }
2666
2667 IOMemoryMap * IOMemoryDescriptor::map(
2668 task_t intoTask,
2669 IOVirtualAddress toAddress,
2670 IOOptionBits options,
2671 IOByteCount offset,
2672 IOByteCount length )
2673 {
2674 if( 0 == length)
2675 length = getLength();
2676
2677 return( makeMapping( this, intoTask, toAddress, options, offset, length ));
2678 }
2679
2680 IOReturn _IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
2681 IOOptionBits options,
2682 IOByteCount offset)
2683 {
2684 IOReturn err = kIOReturnSuccess;
2685 IOMemoryDescriptor * physMem = 0;
2686
2687 LOCK;
2688
2689 if (logical && addressMap) do
2690 {
2691 if ((memory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
2692 {
2693 physMem = memory;
2694 physMem->retain();
2695 }
2696
2697 if (!redirUPL)
2698 {
2699 vm_size_t size = length;
2700 int flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
2701 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
2702 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) memory->_memEntry, 0, &size, &redirUPL,
2703 NULL, NULL,
2704 &flags))
2705 redirUPL = 0;
2706
2707 if (physMem)
2708 {
2709 IOUnmapPages( addressMap, logical, length );
2710 physMem->redirect(0, true);
2711 }
2712 }
2713
2714 if (newBackingMemory)
2715 {
2716 if (newBackingMemory != memory)
2717 {
2718 if (this != newBackingMemory->makeMapping(newBackingMemory, addressTask, (IOVirtualAddress) this,
2719 options | kIOMapUnique | kIOMapReference,
2720 offset, length))
2721 err = kIOReturnError;
2722 }
2723 if (redirUPL)
2724 {
2725 upl_commit(redirUPL, NULL, 0);
2726 upl_deallocate(redirUPL);
2727 redirUPL = 0;
2728 }
2729 if (physMem)
2730 physMem->redirect(0, false);
2731 }
2732 }
2733 while (false);
2734
2735 UNLOCK;
2736
2737 if (physMem)
2738 physMem->release();
2739
2740 return (err);
2741 }
2742
2743 IOMemoryMap * IOMemoryDescriptor::makeMapping(
2744 IOMemoryDescriptor * owner,
2745 task_t intoTask,
2746 IOVirtualAddress toAddress,
2747 IOOptionBits options,
2748 IOByteCount offset,
2749 IOByteCount length )
2750 {
2751 IOMemoryDescriptor * mapDesc = 0;
2752 _IOMemoryMap * mapping = 0;
2753 OSIterator * iter;
2754
2755 LOCK;
2756
2757 do
2758 {
2759 if (kIOMapUnique & options)
2760 {
2761 IOPhysicalAddress phys;
2762 IOByteCount physLen;
2763
2764 if (owner != this)
2765 continue;
2766
2767 if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
2768 {
2769 phys = getPhysicalSegment(offset, &physLen);
2770 if (!phys || (physLen < length))
2771 continue;
2772
2773 mapDesc = IOMemoryDescriptor::withPhysicalAddress(
2774 phys, length, _direction);
2775 if (!mapDesc)
2776 continue;
2777 offset = 0;
2778 }
2779 else
2780 {
2781 mapDesc = this;
2782 mapDesc->retain();
2783 }
2784
2785 if (kIOMapReference & options)
2786 {
2787 mapping = (_IOMemoryMap *) toAddress;
2788 mapping->retain();
2789
2790 #if 1
2791 uint32_t pageOffset1 = mapDesc->getSourceSegment( offset, NULL );
2792 pageOffset1 -= trunc_page_32( pageOffset1 );
2793
2794 uint32_t pageOffset2 = mapping->getVirtualAddress();
2795 pageOffset2 -= trunc_page_32( pageOffset2 );
2796
2797 if (pageOffset1 != pageOffset2)
2798 IOLog("::redirect can't map offset %x to addr %x\n",
2799 pageOffset1, mapping->getVirtualAddress());
2800 #endif
2801
2802
2803 if (!mapping->initWithDescriptor( mapDesc, intoTask, toAddress, options,
2804 offset, length ))
2805 {
2806 #ifdef DEBUG
2807 IOLog("Didn't redirect map %08lx : %08lx\n", offset, length );
2808 #endif
2809 }
2810
2811 if (mapping->owner)
2812 mapping->owner->removeMapping(mapping);
2813 continue;
2814 }
2815 }
2816 else
2817 {
2818 // look for an existing mapping
2819 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
2820
2821 while( (mapping = (_IOMemoryMap *) iter->getNextObject())) {
2822
2823 if( (mapping = mapping->copyCompatible(
2824 owner, intoTask, toAddress,
2825 options | kIOMapReference,
2826 offset, length )))
2827 break;
2828 }
2829 iter->release();
2830 }
2831
2832
2833 if (mapping)
2834 mapping->retain();
2835
2836 if( mapping || (options & kIOMapReference))
2837 continue;
2838
2839 mapDesc = owner;
2840 mapDesc->retain();
2841 }
2842 owner = this;
2843
2844 mapping = new _IOMemoryMap;
2845 if( mapping
2846 && !mapping->initWithDescriptor( mapDesc, intoTask, toAddress, options,
2847 offset, length )) {
2848 #ifdef DEBUG
2849 IOLog("Didn't make map %08lx : %08lx\n", offset, length );
2850 #endif
2851 mapping->release();
2852 mapping = 0;
2853 }
2854
2855 if (mapping)
2856 mapping->retain();
2857
2858 } while( false );
2859
2860 if (mapping)
2861 {
2862 mapping->owner = owner;
2863 owner->addMapping( mapping);
2864 mapping->release();
2865 }
2866
2867 UNLOCK;
2868
2869 if (mapDesc)
2870 mapDesc->release();
2871
2872 return( mapping);
2873 }
2874
2875 void IOMemoryDescriptor::addMapping(
2876 IOMemoryMap * mapping )
2877 {
2878 if( mapping) {
2879 if( 0 == _mappings)
2880 _mappings = OSSet::withCapacity(1);
2881 if( _mappings )
2882 _mappings->setObject( mapping );
2883 }
2884 }
2885
2886 void IOMemoryDescriptor::removeMapping(
2887 IOMemoryMap * mapping )
2888 {
2889 if( _mappings)
2890 _mappings->removeObject( mapping);
2891 }
2892
2893 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2894
2895 #undef super
2896 #define super IOMemoryDescriptor
2897
2898 OSDefineMetaClassAndStructors(IOSubMemoryDescriptor, IOMemoryDescriptor)
2899
2900 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2901
2902 bool IOSubMemoryDescriptor::initSubRange( IOMemoryDescriptor * parent,
2903 IOByteCount offset, IOByteCount length,
2904 IODirection direction )
2905 {
2906 if( !parent)
2907 return( false);
2908
2909 if( (offset + length) > parent->getLength())
2910 return( false);
2911
2912 /*
2913 * We can check the _parent instance variable before having ever set it
2914 * to an initial value because I/O Kit guarantees that all our instance
2915 * variables are zeroed on an object's allocation.
2916 */
2917
2918 if( !_parent) {
2919 if( !super::init())
2920 return( false );
2921 } else {
2922 /*
2923 * An existing memory descriptor is being retargeted to
2924 * point to somewhere else. Clean up our present state.
2925 */
2926
2927 _parent->release();
2928 _parent = 0;
2929 }
2930
2931 parent->retain();
2932 _parent = parent;
2933 _start = offset;
2934 _length = length;
2935 _direction = direction;
2936 _tag = parent->getTag();
2937
2938 return( true );
2939 }
2940
2941 void IOSubMemoryDescriptor::free( void )
2942 {
2943 if( _parent)
2944 _parent->release();
2945
2946 super::free();
2947 }
2948
2949
2950 IOPhysicalAddress IOSubMemoryDescriptor::getPhysicalSegment( IOByteCount offset,
2951 IOByteCount * length )
2952 {
2953 IOPhysicalAddress address;
2954 IOByteCount actualLength;
2955
2956 assert(offset <= _length);
2957
2958 if( length)
2959 *length = 0;
2960
2961 if( offset >= _length)
2962 return( 0 );
2963
2964 address = _parent->getPhysicalSegment( offset + _start, &actualLength );
2965
2966 if( address && length)
2967 *length = min( _length - offset, actualLength );
2968
2969 return( address );
2970 }
2971
2972
2973 IOReturn IOSubMemoryDescriptor::doMap(
2974 vm_map_t addressMap,
2975 IOVirtualAddress * atAddress,
2976 IOOptionBits options,
2977 IOByteCount sourceOffset,
2978 IOByteCount length )
2979 {
2980 if( sourceOffset >= _length)
2981 return( kIOReturnOverrun );
2982 return (_parent->doMap(addressMap, atAddress, options, sourceOffset + _start, length));
2983 }
2984
2985 IOPhysicalAddress IOSubMemoryDescriptor::getSourceSegment( IOByteCount offset,
2986 IOByteCount * length )
2987 {
2988 IOPhysicalAddress address;
2989 IOByteCount actualLength;
2990
2991 assert(offset <= _length);
2992
2993 if( length)
2994 *length = 0;
2995
2996 if( offset >= _length)
2997 return( 0 );
2998
2999 address = _parent->getSourceSegment( offset + _start, &actualLength );
3000
3001 if( address && length)
3002 *length = min( _length - offset, actualLength );
3003
3004 return( address );
3005 }
3006
3007 void * IOSubMemoryDescriptor::getVirtualSegment(IOByteCount offset,
3008 IOByteCount * lengthOfSegment)
3009 {
3010 return( 0 );
3011 }
3012
3013 IOByteCount IOSubMemoryDescriptor::readBytes(IOByteCount offset,
3014 void * bytes, IOByteCount length)
3015 {
3016 IOByteCount byteCount;
3017
3018 assert(offset <= _length);
3019
3020 if( offset >= _length)
3021 return( 0 );
3022
3023 LOCK;
3024 byteCount = _parent->readBytes( _start + offset, bytes,
3025 min(length, _length - offset) );
3026 UNLOCK;
3027
3028 return( byteCount );
3029 }
3030
3031 IOByteCount IOSubMemoryDescriptor::writeBytes(IOByteCount offset,
3032 const void* bytes, IOByteCount length)
3033 {
3034 IOByteCount byteCount;
3035
3036 assert(offset <= _length);
3037
3038 if( offset >= _length)
3039 return( 0 );
3040
3041 LOCK;
3042 byteCount = _parent->writeBytes( _start + offset, bytes,
3043 min(length, _length - offset) );
3044 UNLOCK;
3045
3046 return( byteCount );
3047 }
3048
3049 IOReturn IOSubMemoryDescriptor::setPurgeable( IOOptionBits newState,
3050 IOOptionBits * oldState )
3051 {
3052 IOReturn err;
3053
3054 LOCK;
3055 err = _parent->setPurgeable( newState, oldState );
3056 UNLOCK;
3057
3058 return( err );
3059 }
3060
3061 IOReturn IOSubMemoryDescriptor::performOperation( IOOptionBits options,
3062 IOByteCount offset, IOByteCount length )
3063 {
3064 IOReturn err;
3065
3066 assert(offset <= _length);
3067
3068 if( offset >= _length)
3069 return( kIOReturnOverrun );
3070
3071 LOCK;
3072 err = _parent->performOperation( options, _start + offset,
3073 min(length, _length - offset) );
3074 UNLOCK;
3075
3076 return( err );
3077 }
3078
3079 IOReturn IOSubMemoryDescriptor::prepare(
3080 IODirection forDirection)
3081 {
3082 IOReturn err;
3083
3084 LOCK;
3085 err = _parent->prepare( forDirection);
3086 UNLOCK;
3087
3088 return( err );
3089 }
3090
3091 IOReturn IOSubMemoryDescriptor::complete(
3092 IODirection forDirection)
3093 {
3094 IOReturn err;
3095
3096 LOCK;
3097 err = _parent->complete( forDirection);
3098 UNLOCK;
3099
3100 return( err );
3101 }
3102
3103 IOMemoryMap * IOSubMemoryDescriptor::makeMapping(
3104 IOMemoryDescriptor * owner,
3105 task_t intoTask,
3106 IOVirtualAddress toAddress,
3107 IOOptionBits options,
3108 IOByteCount offset,
3109 IOByteCount length )
3110 {
3111 IOMemoryMap * mapping = 0;
3112
3113 if (!(kIOMapUnique & options))
3114 mapping = (IOMemoryMap *) _parent->makeMapping(
3115 _parent, intoTask,
3116 toAddress - (_start + offset),
3117 options | kIOMapReference,
3118 _start + offset, length );
3119
3120 if( !mapping)
3121 mapping = (IOMemoryMap *) _parent->makeMapping(
3122 _parent, intoTask,
3123 toAddress,
3124 options, _start + offset, length );
3125
3126 if( !mapping)
3127 mapping = super::makeMapping( owner, intoTask, toAddress, options,
3128 offset, length );
3129
3130 return( mapping );
3131 }
3132
3133 /* ick */
3134
3135 bool
3136 IOSubMemoryDescriptor::initWithAddress(void * address,
3137 IOByteCount length,
3138 IODirection direction)
3139 {
3140 return( false );
3141 }
3142
3143 bool
3144 IOSubMemoryDescriptor::initWithAddress(vm_address_t address,
3145 IOByteCount length,
3146 IODirection direction,
3147 task_t task)
3148 {
3149 return( false );
3150 }
3151
3152 bool
3153 IOSubMemoryDescriptor::initWithPhysicalAddress(
3154 IOPhysicalAddress address,
3155 IOByteCount length,
3156 IODirection direction )
3157 {
3158 return( false );
3159 }
3160
3161 bool
3162 IOSubMemoryDescriptor::initWithRanges(
3163 IOVirtualRange * ranges,
3164 UInt32 withCount,
3165 IODirection direction,
3166 task_t task,
3167 bool asReference)
3168 {
3169 return( false );
3170 }
3171
3172 bool
3173 IOSubMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
3174 UInt32 withCount,
3175 IODirection direction,
3176 bool asReference)
3177 {
3178 return( false );
3179 }
3180
3181 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3182
3183 bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
3184 {
3185 OSSymbol const *keys[2];
3186 OSObject *values[2];
3187 struct SerData {
3188 user_addr_t address;
3189 user_size_t length;
3190 } *vcopy;
3191 unsigned int index, nRanges;
3192 bool result;
3193
3194 IOOptionBits type = _flags & kIOMemoryTypeMask;
3195
3196 if (s == NULL) return false;
3197 if (s->previouslySerialized(this)) return true;
3198
3199 // Pretend we are an array.
3200 if (!s->addXMLStartTag(this, "array")) return false;
3201
3202 nRanges = _rangesCount;
3203 vcopy = (SerData *) IOMalloc(sizeof(SerData) * nRanges);
3204 if (vcopy == 0) return false;
3205
3206 keys[0] = OSSymbol::withCString("address");
3207 keys[1] = OSSymbol::withCString("length");
3208
3209 result = false;
3210 values[0] = values[1] = 0;
3211
3212 // From this point on we can go to bail.
3213
3214 // Copy the volatile data so we don't have to allocate memory
3215 // while the lock is held.
3216 LOCK;
3217 if (nRanges == _rangesCount) {
3218 Ranges vec = _ranges;
3219 for (index = 0; index < nRanges; index++) {
3220 user_addr_t addr; IOByteCount len;
3221 getAddrLenForInd(addr, len, type, vec, index);
3222 vcopy[index].address = addr;
3223 vcopy[index].length = len;
3224 }
3225 } else {
3226 // The descriptor changed out from under us. Give up.
3227 UNLOCK;
3228 result = false;
3229 goto bail;
3230 }
3231 UNLOCK;
3232
3233 for (index = 0; index < nRanges; index++)
3234 {
3235 user_addr_t addr = vcopy[index].address;
3236 IOByteCount len = (IOByteCount) vcopy[index].length;
3237 values[0] =
3238 OSNumber::withNumber(addr, (((UInt64) addr) >> 32)? 64 : 32);
3239 if (values[0] == 0) {
3240 result = false;
3241 goto bail;
3242 }
3243 values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
3244 if (values[1] == 0) {
3245 result = false;
3246 goto bail;
3247 }
3248 OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
3249 if (dict == 0) {
3250 result = false;
3251 goto bail;
3252 }
3253 values[0]->release();
3254 values[1]->release();
3255 values[0] = values[1] = 0;
3256
3257 result = dict->serialize(s);
3258 dict->release();
3259 if (!result) {
3260 goto bail;
3261 }
3262 }
3263 result = s->addXMLEndTag("array");
3264
3265 bail:
3266 if (values[0])
3267 values[0]->release();
3268 if (values[1])
3269 values[1]->release();
3270 if (keys[0])
3271 keys[0]->release();
3272 if (keys[1])
3273 keys[1]->release();
3274 if (vcopy)
3275 IOFree(vcopy, sizeof(IOVirtualRange) * nRanges);
3276 return result;
3277 }
3278
3279 bool IOSubMemoryDescriptor::serialize(OSSerialize * s) const
3280 {
3281 if (!s) {
3282 return (false);
3283 }
3284 if (s->previouslySerialized(this)) return true;
3285
3286 // Pretend we are a dictionary.
3287 // We must duplicate the functionality of OSDictionary here
3288 // because otherwise object references will not work;
3289 // they are based on the value of the object passed to
3290 // previouslySerialized and addXMLStartTag.
3291
3292 if (!s->addXMLStartTag(this, "dict")) return false;
3293
3294 char const *keys[3] = {"offset", "length", "parent"};
3295
3296 OSObject *values[3];
3297 values[0] = OSNumber::withNumber(_start, sizeof(_start) * 8);
3298 if (values[0] == 0)
3299 return false;
3300 values[1] = OSNumber::withNumber(_length, sizeof(_length) * 8);
3301 if (values[1] == 0) {
3302 values[0]->release();
3303 return false;
3304 }
3305 values[2] = _parent;
3306
3307 bool result = true;
3308 for (int i=0; i<3; i++) {
3309 if (!s->addString("<key>") ||
3310 !s->addString(keys[i]) ||
3311 !s->addXMLEndTag("key") ||
3312 !values[i]->serialize(s)) {
3313 result = false;
3314 break;
3315 }
3316 }
3317 values[0]->release();
3318 values[1]->release();
3319 if (!result) {
3320 return false;
3321 }
3322
3323 return s->addXMLEndTag("dict");
3324 }
3325
3326 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3327
3328 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
3329 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
3330 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
3331 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3);
3332 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4);
3333 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
3334 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
3335 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
3336 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
3337 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
3338 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
3339 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
3340 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
3341 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
3342 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
3343 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
3344
3345 /* ex-inline function implementation */
3346 IOPhysicalAddress IOMemoryDescriptor::getPhysicalAddress()
3347 { return( getPhysicalSegment( 0, 0 )); }