]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOMemoryDescriptor.cpp
4c9af13abde8bba678d08417953b8bd57ee20285
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
25 *
26 * HISTORY
27 *
28 */
29 // 45678901234567890123456789012345678901234567890123456789012345678901234567890
30 #include <sys/cdefs.h>
31
32 #include <IOKit/assert.h>
33 #include <IOKit/system.h>
34 #include <IOKit/IOLib.h>
35 #include <IOKit/IOMemoryDescriptor.h>
36 #include <IOKit/IOMapper.h>
37 #include <IOKit/IOKitKeysPrivate.h>
38
39 #include <IOKit/IOKitDebug.h>
40
41 #include "IOKitKernelInternal.h"
42
43 #include <libkern/c++/OSContainers.h>
44 #include <libkern/c++/OSDictionary.h>
45 #include <libkern/c++/OSArray.h>
46 #include <libkern/c++/OSSymbol.h>
47 #include <libkern/c++/OSNumber.h>
48
49 #include <sys/uio.h>
50
51 __BEGIN_DECLS
52 #include <vm/pmap.h>
53 #include <vm/vm_pageout.h>
54 #include <vm/vm_shared_memory_server.h>
55 #include <mach/memory_object_types.h>
56 #include <device/device_port.h>
57
58 #ifndef i386
59 #include <mach/vm_prot.h>
60 #include <vm/vm_fault.h>
61 struct phys_entry *pmap_find_physentry(ppnum_t pa);
62 #endif
63
64 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
65 void ipc_port_release_send(ipc_port_t port);
66
67 /* Copy between a physical page and a virtual address in the given vm_map */
68 kern_return_t copypv(addr64_t source, addr64_t sink, unsigned int size, int which);
69
70 memory_object_t
71 device_pager_setup(
72 memory_object_t pager,
73 int device_handle,
74 vm_size_t size,
75 int flags);
76 void
77 device_pager_deallocate(
78 memory_object_t);
79 kern_return_t
80 device_pager_populate_object(
81 memory_object_t pager,
82 vm_object_offset_t offset,
83 ppnum_t phys_addr,
84 vm_size_t size);
85 kern_return_t
86 memory_object_iopl_request(
87 ipc_port_t port,
88 memory_object_offset_t offset,
89 vm_size_t *upl_size,
90 upl_t *upl_ptr,
91 upl_page_info_array_t user_page_list,
92 unsigned int *page_list_count,
93 int *flags);
94
95 unsigned int IOTranslateCacheBits(struct phys_entry *pp);
96
97 __END_DECLS
98
99 #define kIOMaximumMappedIOByteCount (512*1024*1024)
100
101 static IOMapper * gIOSystemMapper;
102 static ppnum_t gIOMaximumMappedIOPageCount = atop_32(kIOMaximumMappedIOByteCount);
103
104 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
105
106 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
107
108 #define super IOMemoryDescriptor
109
110 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
111
112 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
113
114 static IORecursiveLock * gIOMemoryLock;
115
116 #define LOCK IORecursiveLockLock( gIOMemoryLock)
117 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
118 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
119 #define WAKEUP \
120 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
121
122 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
123
124 class _IOMemoryMap : public IOMemoryMap
125 {
126 OSDeclareDefaultStructors(_IOMemoryMap)
127 public:
128 IOMemoryDescriptor * memory;
129 IOMemoryMap * superMap;
130 IOByteCount offset;
131 IOByteCount length;
132 IOVirtualAddress logical;
133 task_t addressTask;
134 vm_map_t addressMap;
135 IOOptionBits options;
136 upl_t redirUPL;
137 ipc_port_t redirEntry;
138 IOMemoryDescriptor * owner;
139
140 protected:
141 virtual void taggedRelease(const void *tag = 0) const;
142 virtual void free();
143
144 public:
145
146 // IOMemoryMap methods
147 virtual IOVirtualAddress getVirtualAddress();
148 virtual IOByteCount getLength();
149 virtual task_t getAddressTask();
150 virtual IOMemoryDescriptor * getMemoryDescriptor();
151 virtual IOOptionBits getMapOptions();
152
153 virtual IOReturn unmap();
154 virtual void taskDied();
155
156 virtual IOReturn redirect(IOMemoryDescriptor * newBackingMemory,
157 IOOptionBits options,
158 IOByteCount offset = 0);
159
160 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
161 IOByteCount * length);
162
163 // for IOMemoryDescriptor use
164 _IOMemoryMap * copyCompatible(
165 IOMemoryDescriptor * owner,
166 task_t intoTask,
167 IOVirtualAddress toAddress,
168 IOOptionBits options,
169 IOByteCount offset,
170 IOByteCount length );
171
172 bool initCompatible(
173 IOMemoryDescriptor * memory,
174 IOMemoryMap * superMap,
175 IOByteCount offset,
176 IOByteCount length );
177
178 bool initWithDescriptor(
179 IOMemoryDescriptor * memory,
180 task_t intoTask,
181 IOVirtualAddress toAddress,
182 IOOptionBits options,
183 IOByteCount offset,
184 IOByteCount length );
185
186 IOReturn redirect(
187 task_t intoTask, bool redirect );
188 };
189
190 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
191
192 // Some data structures and accessor macros used by the initWithOptions
193 // Function
194
195 enum ioPLBlockFlags {
196 kIOPLOnDevice = 0x00000001,
197 kIOPLExternUPL = 0x00000002,
198 };
199
200 struct typePersMDData
201 {
202 const IOGeneralMemoryDescriptor *fMD;
203 ipc_port_t fMemEntry;
204 };
205
206 struct ioPLBlock {
207 upl_t fIOPL;
208 vm_address_t fIOMDOffset; // The offset of this iopl in descriptor
209 vm_offset_t fPageInfo; // Pointer to page list or index into it
210 ppnum_t fMappedBase; // Page number of first page in this iopl
211 unsigned int fPageOffset; // Offset within first page of iopl
212 unsigned int fFlags; // Flags
213 };
214
215 struct ioGMDData {
216 IOMapper *fMapper;
217 unsigned int fPageCnt;
218 upl_page_info_t fPageList[];
219 ioPLBlock fBlocks[];
220 };
221
222 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
223 #define getIOPLList(d) ((ioPLBlock *) &(d->fPageList[d->fPageCnt]))
224 #define getNumIOPL(osd, d) \
225 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
226 #define getPageList(d) (&(d->fPageList[0]))
227 #define computeDataSize(p, u) \
228 (sizeof(ioGMDData) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
229
230
231 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
232
233 #define next_page(a) ( trunc_page_32(a) + PAGE_SIZE )
234
235
236 extern "C" {
237
238 kern_return_t device_data_action(
239 int device_handle,
240 ipc_port_t device_pager,
241 vm_prot_t protection,
242 vm_object_offset_t offset,
243 vm_size_t size)
244 {
245 struct ExpansionData {
246 void * devicePager;
247 unsigned int pagerContig:1;
248 unsigned int unused:31;
249 IOMemoryDescriptor * memory;
250 };
251 kern_return_t kr;
252 ExpansionData * ref = (ExpansionData *) device_handle;
253 IOMemoryDescriptor * memDesc;
254
255 LOCK;
256 memDesc = ref->memory;
257 if( memDesc)
258 {
259 memDesc->retain();
260 kr = memDesc->handleFault( device_pager, 0, 0,
261 offset, size, kIOMapDefaultCache /*?*/);
262 memDesc->release();
263 }
264 else
265 kr = KERN_ABORTED;
266 UNLOCK;
267
268 return( kr );
269 }
270
271 kern_return_t device_close(
272 int device_handle)
273 {
274 struct ExpansionData {
275 void * devicePager;
276 unsigned int pagerContig:1;
277 unsigned int unused:31;
278 IOMemoryDescriptor * memory;
279 };
280 ExpansionData * ref = (ExpansionData *) device_handle;
281
282 IODelete( ref, ExpansionData, 1 );
283
284 return( kIOReturnSuccess );
285 }
286 }; // end extern "C"
287
288 // Note this inline function uses C++ reference arguments to return values
289 // This means that pointers are not passed and NULLs don't have to be
290 // checked for as a NULL reference is illegal.
291 static inline void
292 getAddrLenForInd(user_addr_t &addr, IOPhysicalLength &len, // Output variables
293 UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
294 {
295 assert(kIOMemoryTypePhysical == type || kIOMemoryTypeUIO == type
296 || kIOMemoryTypeVirtual == type);
297 if (kIOMemoryTypeUIO == type) {
298 user_size_t us;
299 uio_getiov((uio_t) r.uio, ind, &addr, &us); len = us;
300 }
301 else {
302 IOVirtualRange cur = r.v[ind];
303 addr = cur.address;
304 len = cur.length;
305 }
306 }
307
308 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
309
310 /*
311 * withAddress:
312 *
313 * Create a new IOMemoryDescriptor. The buffer is a virtual address
314 * relative to the specified task. If no task is supplied, the kernel
315 * task is implied.
316 */
317 IOMemoryDescriptor *
318 IOMemoryDescriptor::withAddress(void * address,
319 IOByteCount length,
320 IODirection direction)
321 {
322 return IOMemoryDescriptor::
323 withAddress((vm_address_t) address, length, direction, kernel_task);
324 }
325
326 IOMemoryDescriptor *
327 IOMemoryDescriptor::withAddress(vm_address_t address,
328 IOByteCount length,
329 IODirection direction,
330 task_t task)
331 {
332 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
333 if (that)
334 {
335 if (that->initWithAddress(address, length, direction, task))
336 return that;
337
338 that->release();
339 }
340 return 0;
341 }
342
343 IOMemoryDescriptor *
344 IOMemoryDescriptor::withPhysicalAddress(
345 IOPhysicalAddress address,
346 IOByteCount length,
347 IODirection direction )
348 {
349 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
350 if (self
351 && !self->initWithPhysicalAddress(address, length, direction)) {
352 self->release();
353 return 0;
354 }
355
356 return self;
357 }
358
359 IOMemoryDescriptor *
360 IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
361 UInt32 withCount,
362 IODirection direction,
363 task_t task,
364 bool asReference)
365 {
366 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
367 if (that)
368 {
369 if (that->initWithRanges(ranges, withCount, direction, task, asReference))
370 return that;
371
372 that->release();
373 }
374 return 0;
375 }
376
377
378 /*
379 * withRanges:
380 *
381 * Create a new IOMemoryDescriptor. The buffer is made up of several
382 * virtual address ranges, from a given task.
383 *
384 * Passing the ranges as a reference will avoid an extra allocation.
385 */
386 IOMemoryDescriptor *
387 IOMemoryDescriptor::withOptions(void * buffers,
388 UInt32 count,
389 UInt32 offset,
390 task_t task,
391 IOOptionBits opts,
392 IOMapper * mapper)
393 {
394 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
395
396 if (self
397 && !self->initWithOptions(buffers, count, offset, task, opts, mapper))
398 {
399 self->release();
400 return 0;
401 }
402
403 return self;
404 }
405
406 // Can't leave abstract but this should never be used directly,
407 bool IOMemoryDescriptor::initWithOptions(void * buffers,
408 UInt32 count,
409 UInt32 offset,
410 task_t task,
411 IOOptionBits options,
412 IOMapper * mapper)
413 {
414 // @@@ gvdl: Should I panic?
415 panic("IOMD::initWithOptions called\n");
416 return 0;
417 }
418
419 IOMemoryDescriptor *
420 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
421 UInt32 withCount,
422 IODirection direction,
423 bool asReference)
424 {
425 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
426 if (that)
427 {
428 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference))
429 return that;
430
431 that->release();
432 }
433 return 0;
434 }
435
436 IOMemoryDescriptor *
437 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
438 IOByteCount offset,
439 IOByteCount length,
440 IODirection direction)
441 {
442 IOSubMemoryDescriptor *self = new IOSubMemoryDescriptor;
443
444 if (self && !self->initSubRange(of, offset, length, direction)) {
445 self->release();
446 self = 0;
447 }
448 return self;
449 }
450
451 IOMemoryDescriptor * IOMemoryDescriptor::
452 withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
453 {
454 IOGeneralMemoryDescriptor *origGenMD =
455 OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
456
457 if (origGenMD)
458 return IOGeneralMemoryDescriptor::
459 withPersistentMemoryDescriptor(origGenMD);
460 else
461 return 0;
462 }
463
464 IOMemoryDescriptor * IOGeneralMemoryDescriptor::
465 withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
466 {
467 ipc_port_t sharedMem = (ipc_port_t) originalMD->createNamedEntry();
468
469 if (!sharedMem)
470 return 0;
471
472 if (sharedMem == originalMD->_memEntry) {
473 originalMD->retain(); // Add a new reference to ourselves
474 ipc_port_release_send(sharedMem); // Remove extra send right
475 return originalMD;
476 }
477
478 IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor;
479 typePersMDData initData = { originalMD, sharedMem };
480
481 if (self
482 && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) {
483 self->release();
484 self = 0;
485 }
486 return self;
487 }
488
489 void *IOGeneralMemoryDescriptor::createNamedEntry()
490 {
491 kern_return_t error;
492 ipc_port_t sharedMem;
493
494 IOOptionBits type = _flags & kIOMemoryTypeMask;
495
496 user_addr_t range0Addr;
497 IOByteCount range0Len;
498 getAddrLenForInd(range0Addr, range0Len, type, _ranges, 0);
499 range0Addr = trunc_page_64(range0Addr);
500
501 vm_size_t size = ptoa_32(_pages);
502 vm_address_t kernelPage = (vm_address_t) range0Addr;
503
504 vm_map_t theMap = ((_task == kernel_task)
505 && (kIOMemoryBufferPageable & _flags))
506 ? IOPageableMapForAddress(kernelPage)
507 : get_task_map(_task);
508
509 memory_object_size_t actualSize = size;
510 vm_prot_t prot = VM_PROT_READ | VM_PROT_WRITE;
511 if (_memEntry)
512 prot |= MAP_MEM_NAMED_REUSE;
513
514 error = mach_make_memory_entry_64(theMap,
515 &actualSize, range0Addr, prot, &sharedMem, (ipc_port_t) _memEntry);
516
517 if (KERN_SUCCESS == error) {
518 if (actualSize == size) {
519 return sharedMem;
520 } else {
521 #if IOASSERT
522 IOLog("IOGMD::mach_make_memory_entry_64 (%08llx) size (%08lx:%08x)\n",
523 (UInt64)range0Addr, (UInt32)actualSize, size);
524 #endif
525 ipc_port_release_send( sharedMem );
526 }
527 }
528
529 return MACH_PORT_NULL;
530 }
531
532 /*
533 * initWithAddress:
534 *
535 * Initialize an IOMemoryDescriptor. The buffer is a virtual address
536 * relative to the specified task. If no task is supplied, the kernel
537 * task is implied.
538 *
539 * An IOMemoryDescriptor can be re-used by calling initWithAddress or
540 * initWithRanges again on an existing instance -- note this behavior
541 * is not commonly supported in other I/O Kit classes, although it is
542 * supported here.
543 */
544 bool
545 IOGeneralMemoryDescriptor::initWithAddress(void * address,
546 IOByteCount withLength,
547 IODirection withDirection)
548 {
549 _singleRange.v.address = (vm_address_t) address;
550 _singleRange.v.length = withLength;
551
552 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
553 }
554
555 bool
556 IOGeneralMemoryDescriptor::initWithAddress(vm_address_t address,
557 IOByteCount withLength,
558 IODirection withDirection,
559 task_t withTask)
560 {
561 _singleRange.v.address = address;
562 _singleRange.v.length = withLength;
563
564 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
565 }
566
567 bool
568 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
569 IOPhysicalAddress address,
570 IOByteCount withLength,
571 IODirection withDirection )
572 {
573 _singleRange.p.address = address;
574 _singleRange.p.length = withLength;
575
576 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
577 }
578
579 bool
580 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
581 IOPhysicalRange * ranges,
582 UInt32 count,
583 IODirection direction,
584 bool reference)
585 {
586 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
587
588 if (reference)
589 mdOpts |= kIOMemoryAsReference;
590
591 return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0);
592 }
593
594 bool
595 IOGeneralMemoryDescriptor::initWithRanges(
596 IOVirtualRange * ranges,
597 UInt32 count,
598 IODirection direction,
599 task_t task,
600 bool reference)
601 {
602 IOOptionBits mdOpts = direction;
603
604 if (reference)
605 mdOpts |= kIOMemoryAsReference;
606
607 if (task) {
608 mdOpts |= kIOMemoryTypeVirtual;
609
610 // Auto-prepare if this is a kernel memory descriptor as very few
611 // clients bother to prepare() kernel memory.
612 // But it was not enforced so what are you going to do?
613 if (task == kernel_task)
614 mdOpts |= kIOMemoryAutoPrepare;
615 }
616 else
617 mdOpts |= kIOMemoryTypePhysical;
618
619 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0);
620 }
621
622 /*
623 * initWithOptions:
624 *
625 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
626 * from a given task, several physical ranges, an UPL from the ubc
627 * system or a uio (may be 64bit) from the BSD subsystem.
628 *
629 * Passing the ranges as a reference will avoid an extra allocation.
630 *
631 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
632 * existing instance -- note this behavior is not commonly supported in other
633 * I/O Kit classes, although it is supported here.
634 */
635
636 bool
637 IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
638 UInt32 count,
639 UInt32 offset,
640 task_t task,
641 IOOptionBits options,
642 IOMapper * mapper)
643 {
644 IOOptionBits type = options & kIOMemoryTypeMask;
645
646 // Grab the original MD's configuation data to initialse the
647 // arguments to this function.
648 if (kIOMemoryTypePersistentMD == type) {
649
650 typePersMDData *initData = (typePersMDData *) buffers;
651 const IOGeneralMemoryDescriptor *orig = initData->fMD;
652 ioGMDData *dataP = getDataP(orig->_memoryEntries);
653
654 // Only accept persistent memory descriptors with valid dataP data.
655 assert(orig->_rangesCount == 1);
656 if ( !(orig->_flags & kIOMemoryPersistent) || !dataP)
657 return false;
658
659 _memEntry = initData->fMemEntry; // Grab the new named entry
660 options = orig->_flags | kIOMemoryAsReference;
661 _singleRange = orig->_singleRange; // Initialise our range
662 buffers = &_singleRange;
663 count = 1;
664
665 // Now grab the original task and whatever mapper was previously used
666 task = orig->_task;
667 mapper = dataP->fMapper;
668
669 // We are ready to go through the original initialisation now
670 }
671
672 switch (type) {
673 case kIOMemoryTypeUIO:
674 case kIOMemoryTypeVirtual:
675 assert(task);
676 if (!task)
677 return false;
678 else
679 break;
680
681 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
682 mapper = kIOMapperNone;
683
684 case kIOMemoryTypeUPL:
685 assert(!task);
686 break;
687 default:
688 return false; /* bad argument */
689 }
690
691 assert(buffers);
692 assert(count);
693
694 /*
695 * We can check the _initialized instance variable before having ever set
696 * it to an initial value because I/O Kit guarantees that all our instance
697 * variables are zeroed on an object's allocation.
698 */
699
700 if (_initialized) {
701 /*
702 * An existing memory descriptor is being retargeted to point to
703 * somewhere else. Clean up our present state.
704 */
705
706 while (_wireCount)
707 complete();
708 if (_kernPtrAligned)
709 unmapFromKernel();
710 if (_ranges.v && _rangesIsAllocated)
711 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
712 if (_memEntry)
713 { ipc_port_release_send((ipc_port_t) _memEntry); _memEntry = 0; }
714 }
715 else {
716 if (!super::init())
717 return false;
718 _initialized = true;
719 }
720
721 // Grab the appropriate mapper
722 if (mapper == kIOMapperNone)
723 mapper = 0; // No Mapper
724 else if (!mapper) {
725 IOMapper::checkForSystemMapper();
726 gIOSystemMapper = mapper = IOMapper::gSystem;
727 }
728
729 // Remove the dynamic internal use flags from the initial setting
730 options &= ~(kIOMemoryPreparedReadOnly);
731 _flags = options;
732 _task = task;
733
734 // DEPRECATED variable initialisation
735 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
736 _position = 0;
737 _kernPtrAligned = 0;
738 _cachedPhysicalAddress = 0;
739 _cachedVirtualAddress = 0;
740
741 if (kIOMemoryTypeUPL == type) {
742
743 ioGMDData *dataP;
744 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
745
746 if (!_memoryEntries) {
747 _memoryEntries = OSData::withCapacity(dataSize);
748 if (!_memoryEntries)
749 return false;
750 }
751 else if (!_memoryEntries->initWithCapacity(dataSize))
752 return false;
753
754 _memoryEntries->appendBytes(0, sizeof(ioGMDData));
755 dataP = getDataP(_memoryEntries);
756 dataP->fMapper = mapper;
757 dataP->fPageCnt = 0;
758
759 _wireCount++; // UPLs start out life wired
760
761 _length = count;
762 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
763
764 ioPLBlock iopl;
765 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST((upl_t) buffers);
766
767 iopl.fIOPL = (upl_t) buffers;
768 // Set the flag kIOPLOnDevice convieniently equal to 1
769 iopl.fFlags = pageList->device | kIOPLExternUPL;
770 iopl.fIOMDOffset = 0;
771 if (!pageList->device) {
772 // Pre-compute the offset into the UPL's page list
773 pageList = &pageList[atop_32(offset)];
774 offset &= PAGE_MASK;
775 if (mapper) {
776 iopl.fMappedBase = mapper->iovmAlloc(_pages);
777 mapper->iovmInsert(iopl.fMappedBase, 0, pageList, _pages);
778 }
779 else
780 iopl.fMappedBase = 0;
781 }
782 else
783 iopl.fMappedBase = 0;
784 iopl.fPageInfo = (vm_address_t) pageList;
785 iopl.fPageOffset = offset;
786
787 _memoryEntries->appendBytes(&iopl, sizeof(iopl));
788 }
789 else {
790 // kIOMemoryTypeVirtual | kIOMemoryTypeUIO | kIOMemoryTypePhysical
791
792 // Initialize the memory descriptor
793 if (options & kIOMemoryAsReference) {
794 _rangesIsAllocated = false;
795
796 // Hack assignment to get the buffer arg into _ranges.
797 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
798 // work, C++ sigh.
799 // This also initialises the uio & physical ranges.
800 _ranges.v = (IOVirtualRange *) buffers;
801 }
802 else {
803 assert(kIOMemoryTypeUIO != type);
804
805 _rangesIsAllocated = true;
806 _ranges.v = IONew(IOVirtualRange, count);
807 if (!_ranges.v)
808 return false;
809 bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
810 }
811
812 // Find starting address within the vector of ranges
813 Ranges vec = _ranges;
814 UInt32 length = 0;
815 UInt32 pages = 0;
816 for (unsigned ind = 0; ind < count; ind++) {
817 user_addr_t addr;
818 UInt32 len;
819
820 // addr & len are returned by this function
821 getAddrLenForInd(addr, len, type, vec, ind);
822 pages += (atop_64(addr + len + PAGE_MASK) - atop_64(addr));
823 len += length;
824 assert(len > length); // Check for 32 bit wrap around
825 length = len;
826 }
827 _length = length;
828 _pages = pages;
829 _rangesCount = count;
830
831 // Auto-prepare memory at creation time.
832 // Implied completion when descriptor is free-ed
833 if (kIOMemoryTypePhysical == type)
834 _wireCount++; // Physical MDs are, by definition, wired
835 else { /* kIOMemoryTypeVirtual | kIOMemoryTypeUIO */
836 ioGMDData *dataP;
837 unsigned dataSize = computeDataSize(_pages, /* upls */ count * 2);
838
839 if (!_memoryEntries) {
840 _memoryEntries = OSData::withCapacity(dataSize);
841 if (!_memoryEntries)
842 return false;
843 }
844 else if (!_memoryEntries->initWithCapacity(dataSize))
845 return false;
846
847 _memoryEntries->appendBytes(0, sizeof(ioGMDData));
848 dataP = getDataP(_memoryEntries);
849 dataP->fMapper = mapper;
850 dataP->fPageCnt = _pages;
851
852 if ( (kIOMemoryPersistent & _flags) && !_memEntry)
853 _memEntry = createNamedEntry();
854
855 if ((_flags & kIOMemoryAutoPrepare)
856 && prepare() != kIOReturnSuccess)
857 return false;
858 }
859 }
860
861 return true;
862 }
863
864 /*
865 * free
866 *
867 * Free resources.
868 */
869 void IOGeneralMemoryDescriptor::free()
870 {
871 LOCK;
872 if( reserved)
873 reserved->memory = 0;
874 UNLOCK;
875
876 while (_wireCount)
877 complete();
878 if (_memoryEntries)
879 _memoryEntries->release();
880
881 if (_kernPtrAligned)
882 unmapFromKernel();
883 if (_ranges.v && _rangesIsAllocated)
884 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
885
886 if (reserved && reserved->devicePager)
887 device_pager_deallocate( (memory_object_t) reserved->devicePager );
888
889 // memEntry holds a ref on the device pager which owns reserved
890 // (ExpansionData) so no reserved access after this point
891 if (_memEntry)
892 ipc_port_release_send( (ipc_port_t) _memEntry );
893
894 super::free();
895 }
896
897 /* DEPRECATED */ void IOGeneralMemoryDescriptor::unmapFromKernel()
898 /* DEPRECATED */ {
899 panic("IOGMD::unmapFromKernel deprecated");
900 /* DEPRECATED */ }
901 /* DEPRECATED */
902 /* DEPRECATED */ void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
903 /* DEPRECATED */ {
904 panic("IOGMD::mapIntoKernel deprecated");
905 /* DEPRECATED */ }
906
907 /*
908 * getDirection:
909 *
910 * Get the direction of the transfer.
911 */
912 IODirection IOMemoryDescriptor::getDirection() const
913 {
914 return _direction;
915 }
916
917 /*
918 * getLength:
919 *
920 * Get the length of the transfer (over all ranges).
921 */
922 IOByteCount IOMemoryDescriptor::getLength() const
923 {
924 return _length;
925 }
926
927 void IOMemoryDescriptor::setTag( IOOptionBits tag )
928 {
929 _tag = tag;
930 }
931
932 IOOptionBits IOMemoryDescriptor::getTag( void )
933 {
934 return( _tag);
935 }
936
937 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
938 IOPhysicalAddress IOMemoryDescriptor::getSourceSegment( IOByteCount offset,
939 IOByteCount * length )
940 {
941 IOPhysicalAddress physAddr = 0;
942
943 if( prepare() == kIOReturnSuccess) {
944 physAddr = getPhysicalSegment( offset, length );
945 complete();
946 }
947
948 return( physAddr );
949 }
950
951 IOByteCount IOMemoryDescriptor::readBytes
952 (IOByteCount offset, void *bytes, IOByteCount length)
953 {
954 addr64_t dstAddr = (addr64_t) (UInt32) bytes;
955 IOByteCount remaining;
956
957 // Assert that this entire I/O is withing the available range
958 assert(offset < _length);
959 assert(offset + length <= _length);
960 if (offset >= _length) {
961 IOLog("IOGMD(%p): rB = o%lx, l%lx\n", this, offset, length); // @@@ gvdl
962 return 0;
963 }
964
965 remaining = length = min(length, _length - offset);
966 while (remaining) { // (process another target segment?)
967 addr64_t srcAddr64;
968 IOByteCount srcLen;
969
970 srcAddr64 = getPhysicalSegment64(offset, &srcLen);
971 if (!srcAddr64)
972 break;
973
974 // Clip segment length to remaining
975 if (srcLen > remaining)
976 srcLen = remaining;
977
978 copypv(srcAddr64, dstAddr, srcLen,
979 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
980
981 dstAddr += srcLen;
982 offset += srcLen;
983 remaining -= srcLen;
984 }
985
986 assert(!remaining);
987
988 return length - remaining;
989 }
990
991 IOByteCount IOMemoryDescriptor::writeBytes
992 (IOByteCount offset, const void *bytes, IOByteCount length)
993 {
994 addr64_t srcAddr = (addr64_t) (UInt32) bytes;
995 IOByteCount remaining;
996
997 // Assert that this entire I/O is withing the available range
998 assert(offset < _length);
999 assert(offset + length <= _length);
1000
1001 assert( !(kIOMemoryPreparedReadOnly & _flags) );
1002
1003 if ( (kIOMemoryPreparedReadOnly & _flags) || offset >= _length) {
1004 IOLog("IOGMD(%p): wB = o%lx, l%lx\n", this, offset, length); // @@@ gvdl
1005 return 0;
1006 }
1007
1008 remaining = length = min(length, _length - offset);
1009 while (remaining) { // (process another target segment?)
1010 addr64_t dstAddr64;
1011 IOByteCount dstLen;
1012
1013 dstAddr64 = getPhysicalSegment64(offset, &dstLen);
1014 if (!dstAddr64)
1015 break;
1016
1017 // Clip segment length to remaining
1018 if (dstLen > remaining)
1019 dstLen = remaining;
1020
1021 copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
1022 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
1023
1024 srcAddr += dstLen;
1025 offset += dstLen;
1026 remaining -= dstLen;
1027 }
1028
1029 assert(!remaining);
1030
1031 return length - remaining;
1032 }
1033
1034 // osfmk/device/iokit_rpc.c
1035 extern "C" unsigned int IODefaultCacheBits(addr64_t pa);
1036
1037 /* DEPRECATED */ void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
1038 /* DEPRECATED */ {
1039 panic("IOGMD::setPosition deprecated");
1040 /* DEPRECATED */ }
1041
1042 IOPhysicalAddress IOGeneralMemoryDescriptor::getPhysicalSegment
1043 (IOByteCount offset, IOByteCount *lengthOfSegment)
1044 {
1045 IOPhysicalAddress address = 0;
1046 IOPhysicalLength length = 0;
1047
1048 // assert(offset <= _length);
1049 if (offset < _length) // (within bounds?)
1050 {
1051 if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
1052 unsigned int ind;
1053
1054 // Physical address based memory descriptor
1055
1056 // Find offset within descriptor and make it relative
1057 // to the current _range.
1058 for (ind = 0 ; offset >= _ranges.p[ind].length; ind++ )
1059 offset -= _ranges.p[ind].length;
1060
1061 IOPhysicalRange cur = _ranges.p[ind];
1062 address = cur.address + offset;
1063 length = cur.length - offset;
1064
1065 // see how far we can coalesce ranges
1066 for (++ind; ind < _rangesCount; ind++) {
1067 cur = _ranges.p[ind];
1068
1069 if (address + length != cur.address)
1070 break;
1071
1072 length += cur.length;
1073 }
1074
1075 // @@@ gvdl: should be assert(address);
1076 // but can't as NVidia GeForce creates a bogus physical mem
1077 assert(address
1078 || /* nvidia */ (!_ranges.p[0].address && 1 == _rangesCount));
1079 assert(length);
1080 }
1081 else do {
1082 // We need wiring & we are wired.
1083 assert(_wireCount);
1084
1085 if (!_wireCount)
1086 {
1087 panic("IOGMD: not wired for getPhysicalSegment()");
1088 continue;
1089 }
1090
1091 assert(_memoryEntries);
1092
1093 ioGMDData * dataP = getDataP(_memoryEntries);
1094 const ioPLBlock *ioplList = getIOPLList(dataP);
1095 UInt ind, numIOPLs = getNumIOPL(_memoryEntries, dataP);
1096 upl_page_info_t *pageList = getPageList(dataP);
1097
1098 assert(numIOPLs > 0);
1099
1100 // Scan through iopl info blocks looking for block containing offset
1101 for (ind = 1; ind < numIOPLs; ind++) {
1102 if (offset < ioplList[ind].fIOMDOffset)
1103 break;
1104 }
1105
1106 // Go back to actual range as search goes past it
1107 ioPLBlock ioplInfo = ioplList[ind - 1];
1108
1109 if (ind < numIOPLs)
1110 length = ioplList[ind].fIOMDOffset;
1111 else
1112 length = _length;
1113 length -= offset; // Remainder within iopl
1114
1115 // Subtract offset till this iopl in total list
1116 offset -= ioplInfo.fIOMDOffset;
1117
1118 // This is a mapped IOPL so we just need to compute an offset
1119 // relative to the mapped base.
1120 if (ioplInfo.fMappedBase) {
1121 offset += (ioplInfo.fPageOffset & PAGE_MASK);
1122 address = ptoa_32(ioplInfo.fMappedBase) + offset;
1123 continue;
1124 }
1125
1126 // Currently the offset is rebased into the current iopl.
1127 // Now add the iopl 1st page offset.
1128 offset += ioplInfo.fPageOffset;
1129
1130 // For external UPLs the fPageInfo field points directly to
1131 // the upl's upl_page_info_t array.
1132 if (ioplInfo.fFlags & kIOPLExternUPL)
1133 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
1134 else
1135 pageList = &pageList[ioplInfo.fPageInfo];
1136
1137 // Check for direct device non-paged memory
1138 if ( ioplInfo.fFlags & kIOPLOnDevice ) {
1139 address = ptoa_32(pageList->phys_addr) + offset;
1140 continue;
1141 }
1142
1143 // Now we need compute the index into the pageList
1144 ind = atop_32(offset);
1145 offset &= PAGE_MASK;
1146
1147 IOPhysicalAddress pageAddr = pageList[ind].phys_addr;
1148 address = ptoa_32(pageAddr) + offset;
1149
1150 // Check for the remaining data in this upl being longer than the
1151 // remainder on the current page. This should be checked for
1152 // contiguous pages
1153 if (length > PAGE_SIZE - offset) {
1154 // See if the next page is contiguous. Stop looking when we hit
1155 // the end of this upl, which is indicated by the
1156 // contigLength >= length.
1157 IOByteCount contigLength = PAGE_SIZE - offset;
1158
1159 // Look for contiguous segment
1160 while (contigLength < length
1161 && ++pageAddr == pageList[++ind].phys_addr) {
1162 contigLength += PAGE_SIZE;
1163 }
1164 if (length > contigLength)
1165 length = contigLength;
1166 }
1167
1168 assert(address);
1169 assert(length);
1170
1171 } while (0);
1172
1173 if (!address)
1174 length = 0;
1175 }
1176
1177 if (lengthOfSegment)
1178 *lengthOfSegment = length;
1179
1180 return address;
1181 }
1182
1183 addr64_t IOMemoryDescriptor::getPhysicalSegment64
1184 (IOByteCount offset, IOByteCount *lengthOfSegment)
1185 {
1186 IOPhysicalAddress phys32;
1187 IOByteCount length;
1188 addr64_t phys64;
1189
1190 phys32 = getPhysicalSegment(offset, lengthOfSegment);
1191 if (!phys32)
1192 return 0;
1193
1194 if (gIOSystemMapper)
1195 {
1196 IOByteCount origLen;
1197
1198 phys64 = gIOSystemMapper->mapAddr(phys32);
1199 origLen = *lengthOfSegment;
1200 length = page_size - (phys64 & (page_size - 1));
1201 while ((length < origLen)
1202 && ((phys64 + length) == gIOSystemMapper->mapAddr(phys32 + length)))
1203 length += page_size;
1204 if (length > origLen)
1205 length = origLen;
1206
1207 *lengthOfSegment = length;
1208 }
1209 else
1210 phys64 = (addr64_t) phys32;
1211
1212 return phys64;
1213 }
1214
1215 IOPhysicalAddress IOGeneralMemoryDescriptor::
1216 getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1217 {
1218 IOPhysicalAddress address = 0;
1219 IOPhysicalLength length = 0;
1220 IOOptionBits type = _flags & kIOMemoryTypeMask;
1221
1222 assert(offset <= _length);
1223
1224 if ( type == kIOMemoryTypeUPL)
1225 return super::getSourceSegment( offset, lengthOfSegment );
1226 else if ( offset < _length ) // (within bounds?)
1227 {
1228 unsigned rangesIndex = 0;
1229 Ranges vec = _ranges;
1230 user_addr_t addr;
1231
1232 // Find starting address within the vector of ranges
1233 for (;;) {
1234 getAddrLenForInd(addr, length, type, vec, rangesIndex);
1235 if (offset < length)
1236 break;
1237 offset -= length; // (make offset relative)
1238 rangesIndex++;
1239 }
1240
1241 // Now that we have the starting range,
1242 // lets find the last contiguous range
1243 addr += offset;
1244 length -= offset;
1245
1246 for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) {
1247 user_addr_t newAddr;
1248 IOPhysicalLength newLen;
1249
1250 getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
1251 if (addr + length != newAddr)
1252 break;
1253 length += newLen;
1254 }
1255 if (addr)
1256 address = (IOPhysicalAddress) addr; // Truncate address to 32bit
1257 else
1258 length = 0;
1259 }
1260
1261 if ( lengthOfSegment ) *lengthOfSegment = length;
1262
1263 return address;
1264 }
1265
1266 /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
1267 /* DEPRECATED */ void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
1268 /* DEPRECATED */ IOByteCount * lengthOfSegment)
1269 /* DEPRECATED */ {
1270 if (_task == kernel_task)
1271 return (void *) getSourceSegment(offset, lengthOfSegment);
1272 else
1273 panic("IOGMD::getVirtualSegment deprecated");
1274
1275 return 0;
1276 /* DEPRECATED */ }
1277 /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
1278
1279
1280
1281 IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
1282 IOOptionBits * oldState )
1283 {
1284 IOReturn err = kIOReturnSuccess;
1285 vm_purgable_t control;
1286 int state;
1287
1288 do
1289 {
1290 if (!_memEntry)
1291 {
1292 err = kIOReturnNotReady;
1293 break;
1294 }
1295
1296 control = VM_PURGABLE_SET_STATE;
1297 switch (newState)
1298 {
1299 case kIOMemoryPurgeableKeepCurrent:
1300 control = VM_PURGABLE_GET_STATE;
1301 break;
1302
1303 case kIOMemoryPurgeableNonVolatile:
1304 state = VM_PURGABLE_NONVOLATILE;
1305 break;
1306 case kIOMemoryPurgeableVolatile:
1307 state = VM_PURGABLE_VOLATILE;
1308 break;
1309 case kIOMemoryPurgeableEmpty:
1310 state = VM_PURGABLE_EMPTY;
1311 break;
1312 default:
1313 err = kIOReturnBadArgument;
1314 break;
1315 }
1316
1317 if (kIOReturnSuccess != err)
1318 break;
1319
1320 err = mach_memory_entry_purgable_control((ipc_port_t) _memEntry, control, &state);
1321
1322 if (oldState)
1323 {
1324 if (kIOReturnSuccess == err)
1325 {
1326 switch (state)
1327 {
1328 case VM_PURGABLE_NONVOLATILE:
1329 state = kIOMemoryPurgeableNonVolatile;
1330 break;
1331 case VM_PURGABLE_VOLATILE:
1332 state = kIOMemoryPurgeableVolatile;
1333 break;
1334 case VM_PURGABLE_EMPTY:
1335 state = kIOMemoryPurgeableEmpty;
1336 break;
1337 default:
1338 state = kIOMemoryPurgeableNonVolatile;
1339 err = kIOReturnNotReady;
1340 break;
1341 }
1342 *oldState = state;
1343 }
1344 }
1345 }
1346 while (false);
1347
1348 return (err);
1349 }
1350
1351 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
1352 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
1353
1354 IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options,
1355 IOByteCount offset, IOByteCount length )
1356 {
1357 IOByteCount remaining;
1358 void (*func)(addr64_t pa, unsigned int count) = 0;
1359
1360 switch (options)
1361 {
1362 case kIOMemoryIncoherentIOFlush:
1363 func = &dcache_incoherent_io_flush64;
1364 break;
1365 case kIOMemoryIncoherentIOStore:
1366 func = &dcache_incoherent_io_store64;
1367 break;
1368 }
1369
1370 if (!func)
1371 return (kIOReturnUnsupported);
1372
1373 remaining = length = min(length, getLength() - offset);
1374 while (remaining)
1375 // (process another target segment?)
1376 {
1377 addr64_t dstAddr64;
1378 IOByteCount dstLen;
1379
1380 dstAddr64 = getPhysicalSegment64(offset, &dstLen);
1381 if (!dstAddr64)
1382 break;
1383
1384 // Clip segment length to remaining
1385 if (dstLen > remaining)
1386 dstLen = remaining;
1387
1388 (*func)(dstAddr64, dstLen);
1389
1390 offset += dstLen;
1391 remaining -= dstLen;
1392 }
1393
1394 return (remaining ? kIOReturnUnderrun : kIOReturnSuccess);
1395 }
1396
1397 #ifdef __ppc__
1398 extern vm_offset_t static_memory_end;
1399 #define io_kernel_static_end static_memory_end
1400 #else
1401 extern vm_offset_t first_avail;
1402 #define io_kernel_static_end first_avail
1403 #endif
1404
1405 static kern_return_t
1406 io_get_kernel_static_upl(
1407 vm_map_t /* map */,
1408 vm_address_t offset,
1409 vm_size_t *upl_size,
1410 upl_t *upl,
1411 upl_page_info_array_t page_list,
1412 unsigned int *count)
1413 {
1414 unsigned int pageCount, page;
1415 ppnum_t phys;
1416
1417 pageCount = atop_32(*upl_size);
1418 if (pageCount > *count)
1419 pageCount = *count;
1420
1421 *upl = NULL;
1422
1423 for (page = 0; page < pageCount; page++)
1424 {
1425 phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
1426 if (!phys)
1427 break;
1428 page_list[page].phys_addr = phys;
1429 page_list[page].pageout = 0;
1430 page_list[page].absent = 0;
1431 page_list[page].dirty = 0;
1432 page_list[page].precious = 0;
1433 page_list[page].device = 0;
1434 }
1435
1436 return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError);
1437 }
1438
1439 IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
1440 {
1441 IOOptionBits type = _flags & kIOMemoryTypeMask;
1442 IOReturn error = kIOReturnNoMemory;
1443 ioGMDData *dataP;
1444 ppnum_t mapBase = 0;
1445 IOMapper *mapper;
1446 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
1447
1448 assert(!_wireCount);
1449 assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeUIO == type);
1450
1451 if (_pages >= gIOMaximumMappedIOPageCount)
1452 return kIOReturnNoResources;
1453
1454 dataP = getDataP(_memoryEntries);
1455 mapper = dataP->fMapper;
1456 if (mapper && _pages)
1457 mapBase = mapper->iovmAlloc(_pages);
1458
1459 // Note that appendBytes(NULL) zeros the data up to the
1460 // desired length.
1461 _memoryEntries->appendBytes(0, dataP->fPageCnt * sizeof(upl_page_info_t));
1462 dataP = 0; // May no longer be valid so lets not get tempted.
1463
1464 if (forDirection == kIODirectionNone)
1465 forDirection = _direction;
1466
1467 int uplFlags; // This Mem Desc's default flags for upl creation
1468 switch (forDirection)
1469 {
1470 case kIODirectionOut:
1471 // Pages do not need to be marked as dirty on commit
1472 uplFlags = UPL_COPYOUT_FROM;
1473 _flags |= kIOMemoryPreparedReadOnly;
1474 break;
1475
1476 case kIODirectionIn:
1477 default:
1478 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
1479 break;
1480 }
1481 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
1482
1483 // Find the appropriate vm_map for the given task
1484 vm_map_t curMap;
1485 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
1486 curMap = 0;
1487 else
1488 { curMap = get_task_map(_task); }
1489
1490 // Iterate over the vector of virtual ranges
1491 Ranges vec = _ranges;
1492 unsigned int pageIndex = 0;
1493 IOByteCount mdOffset = 0;
1494 for (UInt range = 0; range < _rangesCount; range++) {
1495 ioPLBlock iopl;
1496 user_addr_t startPage;
1497 IOByteCount numBytes;
1498
1499 // Get the startPage address and length of vec[range]
1500 getAddrLenForInd(startPage, numBytes, type, vec, range);
1501 iopl.fPageOffset = (short) startPage & PAGE_MASK;
1502 numBytes += iopl.fPageOffset;
1503 startPage = trunc_page_64(startPage);
1504
1505 if (mapper)
1506 iopl.fMappedBase = mapBase + pageIndex;
1507 else
1508 iopl.fMappedBase = 0;
1509
1510 // Iterate over the current range, creating UPLs
1511 while (numBytes) {
1512 dataP = getDataP(_memoryEntries);
1513 vm_address_t kernelStart = (vm_address_t) startPage;
1514 vm_map_t theMap;
1515 if (curMap)
1516 theMap = curMap;
1517 else if (!sharedMem) {
1518 assert(_task == kernel_task);
1519 theMap = IOPageableMapForAddress(kernelStart);
1520 }
1521 else
1522 theMap = NULL;
1523
1524 upl_page_info_array_t pageInfo = getPageList(dataP);
1525 int ioplFlags = uplFlags;
1526 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
1527
1528 vm_size_t ioplSize = round_page_32(numBytes);
1529 unsigned int numPageInfo = atop_32(ioplSize);
1530
1531 if (theMap == kernel_map && kernelStart < io_kernel_static_end) {
1532 error = io_get_kernel_static_upl(theMap,
1533 kernelStart,
1534 &ioplSize,
1535 &iopl.fIOPL,
1536 baseInfo,
1537 &numPageInfo);
1538 }
1539 else if (sharedMem) {
1540 error = memory_object_iopl_request(sharedMem,
1541 ptoa_32(pageIndex),
1542 &ioplSize,
1543 &iopl.fIOPL,
1544 baseInfo,
1545 &numPageInfo,
1546 &ioplFlags);
1547 }
1548 else {
1549 assert(theMap);
1550 error = vm_map_create_upl(theMap,
1551 startPage,
1552 &ioplSize,
1553 &iopl.fIOPL,
1554 baseInfo,
1555 &numPageInfo,
1556 &ioplFlags);
1557 }
1558
1559 assert(ioplSize);
1560 if (error != KERN_SUCCESS)
1561 goto abortExit;
1562
1563 error = kIOReturnNoMemory;
1564
1565 if (baseInfo->device) {
1566 numPageInfo = 1;
1567 iopl.fFlags = kIOPLOnDevice;
1568 // Don't translate device memory at all
1569 if (mapper && mapBase) {
1570 mapper->iovmFree(mapBase, _pages);
1571 mapBase = 0;
1572 iopl.fMappedBase = 0;
1573 }
1574 }
1575 else {
1576 iopl.fFlags = 0;
1577 if (mapper)
1578 mapper->iovmInsert(mapBase, pageIndex,
1579 baseInfo, numPageInfo);
1580 }
1581
1582 iopl.fIOMDOffset = mdOffset;
1583 iopl.fPageInfo = pageIndex;
1584
1585 if ((_flags & kIOMemoryAutoPrepare) && iopl.fIOPL)
1586 {
1587 upl_commit(iopl.fIOPL, 0, 0);
1588 upl_deallocate(iopl.fIOPL);
1589 iopl.fIOPL = 0;
1590 }
1591
1592 if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
1593 // Clean up partial created and unsaved iopl
1594 if (iopl.fIOPL) {
1595 upl_abort(iopl.fIOPL, 0);
1596 upl_deallocate(iopl.fIOPL);
1597 }
1598 goto abortExit;
1599 }
1600
1601 // Check for a multiple iopl's in one virtual range
1602 pageIndex += numPageInfo;
1603 mdOffset -= iopl.fPageOffset;
1604 if (ioplSize < numBytes) {
1605 numBytes -= ioplSize;
1606 startPage += ioplSize;
1607 mdOffset += ioplSize;
1608 iopl.fPageOffset = 0;
1609 if (mapper)
1610 iopl.fMappedBase = mapBase + pageIndex;
1611 }
1612 else {
1613 mdOffset += numBytes;
1614 break;
1615 }
1616 }
1617 }
1618
1619 return kIOReturnSuccess;
1620
1621 abortExit:
1622 {
1623 dataP = getDataP(_memoryEntries);
1624 UInt done = getNumIOPL(_memoryEntries, dataP);
1625 ioPLBlock *ioplList = getIOPLList(dataP);
1626
1627 for (UInt range = 0; range < done; range++)
1628 {
1629 if (ioplList[range].fIOPL) {
1630 upl_abort(ioplList[range].fIOPL, 0);
1631 upl_deallocate(ioplList[range].fIOPL);
1632 }
1633 }
1634 (void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); // == setLength()
1635
1636 if (mapper && mapBase)
1637 mapper->iovmFree(mapBase, _pages);
1638 }
1639
1640 return error;
1641 }
1642
1643 /*
1644 * prepare
1645 *
1646 * Prepare the memory for an I/O transfer. This involves paging in
1647 * the memory, if necessary, and wiring it down for the duration of
1648 * the transfer. The complete() method completes the processing of
1649 * the memory after the I/O transfer finishes. This method needn't
1650 * called for non-pageable memory.
1651 */
1652 IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
1653 {
1654 IOReturn error = kIOReturnSuccess;
1655 IOOptionBits type = _flags & kIOMemoryTypeMask;
1656
1657 if (!_wireCount
1658 && (kIOMemoryTypeVirtual == type || kIOMemoryTypeUIO == type) ) {
1659 error = wireVirtual(forDirection);
1660 if (error)
1661 return error;
1662 }
1663
1664 _wireCount++;
1665
1666 return kIOReturnSuccess;
1667 }
1668
1669 /*
1670 * complete
1671 *
1672 * Complete processing of the memory after an I/O transfer finishes.
1673 * This method should not be called unless a prepare was previously
1674 * issued; the prepare() and complete() must occur in pairs, before
1675 * before and after an I/O transfer involving pageable memory.
1676 */
1677
1678 IOReturn IOGeneralMemoryDescriptor::complete(IODirection /* forDirection */)
1679 {
1680 assert(_wireCount);
1681
1682 if (!_wireCount)
1683 return kIOReturnSuccess;
1684
1685 _wireCount--;
1686 if (!_wireCount) {
1687 IOOptionBits type = _flags & kIOMemoryTypeMask;
1688
1689 if (kIOMemoryTypePhysical == type) {
1690 /* kIOMemoryTypePhysical */
1691 // DO NOTHING
1692 }
1693 else {
1694 ioGMDData * dataP = getDataP(_memoryEntries);
1695 ioPLBlock *ioplList = getIOPLList(dataP);
1696 UInt count = getNumIOPL(_memoryEntries, dataP);
1697
1698 if (dataP->fMapper && _pages && ioplList[0].fMappedBase)
1699 dataP->fMapper->iovmFree(ioplList[0].fMappedBase, _pages);
1700
1701 // Only complete iopls that we created which are for TypeVirtual
1702 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeUIO == type) {
1703 for (UInt ind = 0; ind < count; ind++)
1704 if (ioplList[ind].fIOPL) {
1705 upl_commit(ioplList[ind].fIOPL, 0, 0);
1706 upl_deallocate(ioplList[ind].fIOPL);
1707 }
1708 }
1709
1710 (void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); // == setLength()
1711 }
1712 }
1713 return kIOReturnSuccess;
1714 }
1715
1716 IOReturn IOGeneralMemoryDescriptor::doMap(
1717 vm_map_t addressMap,
1718 IOVirtualAddress * atAddress,
1719 IOOptionBits options,
1720 IOByteCount sourceOffset,
1721 IOByteCount length )
1722 {
1723 kern_return_t kr;
1724 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
1725
1726 IOOptionBits type = _flags & kIOMemoryTypeMask;
1727 Ranges vec = _ranges;
1728
1729 user_addr_t range0Addr = 0;
1730 IOByteCount range0Len = 0;
1731
1732 if (vec.v)
1733 getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
1734
1735 // mapping source == dest? (could be much better)
1736 if( _task
1737 && (addressMap == get_task_map(_task)) && (options & kIOMapAnywhere)
1738 && (1 == _rangesCount) && (0 == sourceOffset)
1739 && range0Addr && (length <= range0Len) ) {
1740 if (sizeof(user_addr_t) > 4 && ((UInt64) range0Addr) >> 32)
1741 return kIOReturnOverrun; // Doesn't fit in 32bit return field
1742 else {
1743 *atAddress = range0Addr;
1744 return( kIOReturnSuccess );
1745 }
1746 }
1747
1748 if( 0 == sharedMem) {
1749
1750 vm_size_t size = ptoa_32(_pages);
1751
1752 if( _task) {
1753 #ifndef i386
1754 memory_object_size_t actualSize = size;
1755 kr = mach_make_memory_entry_64(get_task_map(_task),
1756 &actualSize, range0Addr,
1757 VM_PROT_READ | VM_PROT_WRITE, &sharedMem,
1758 NULL );
1759
1760 if( (KERN_SUCCESS == kr) && (actualSize != round_page_32(size))) {
1761 #if IOASSERT
1762 IOLog("mach_make_memory_entry_64 (%08llx) size (%08lx:%08x)\n",
1763 range0Addr, (UInt32) actualSize, size);
1764 #endif
1765 kr = kIOReturnVMError;
1766 ipc_port_release_send( sharedMem );
1767 }
1768
1769 if( KERN_SUCCESS != kr)
1770 #endif /* !i386 */
1771 sharedMem = MACH_PORT_NULL;
1772
1773 } else do {
1774
1775 memory_object_t pager;
1776 unsigned int flags = 0;
1777 addr64_t pa;
1778 IOPhysicalLength segLen;
1779
1780 pa = getPhysicalSegment64( sourceOffset, &segLen );
1781
1782 if( !reserved) {
1783 reserved = IONew( ExpansionData, 1 );
1784 if( !reserved)
1785 continue;
1786 }
1787 reserved->pagerContig = (1 == _rangesCount);
1788 reserved->memory = this;
1789
1790 /*What cache mode do we need*/
1791 switch(options & kIOMapCacheMask ) {
1792
1793 case kIOMapDefaultCache:
1794 default:
1795 flags = IODefaultCacheBits(pa);
1796 break;
1797
1798 case kIOMapInhibitCache:
1799 flags = DEVICE_PAGER_CACHE_INHIB |
1800 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
1801 break;
1802
1803 case kIOMapWriteThruCache:
1804 flags = DEVICE_PAGER_WRITE_THROUGH |
1805 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
1806 break;
1807
1808 case kIOMapCopybackCache:
1809 flags = DEVICE_PAGER_COHERENT;
1810 break;
1811
1812 case kIOMapWriteCombineCache:
1813 flags = DEVICE_PAGER_CACHE_INHIB |
1814 DEVICE_PAGER_COHERENT;
1815 break;
1816 }
1817
1818 flags |= reserved->pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0;
1819
1820 pager = device_pager_setup( (memory_object_t) 0, (int) reserved,
1821 size, flags);
1822 assert( pager );
1823
1824 if( pager) {
1825 kr = mach_memory_object_memory_entry_64( (host_t) 1, false /*internal*/,
1826 size, VM_PROT_READ | VM_PROT_WRITE, pager, &sharedMem );
1827
1828 assert( KERN_SUCCESS == kr );
1829 if( KERN_SUCCESS != kr) {
1830 device_pager_deallocate( pager );
1831 pager = MACH_PORT_NULL;
1832 sharedMem = MACH_PORT_NULL;
1833 }
1834 }
1835 if( pager && sharedMem)
1836 reserved->devicePager = pager;
1837 else {
1838 IODelete( reserved, ExpansionData, 1 );
1839 reserved = 0;
1840 }
1841
1842 } while( false );
1843
1844 _memEntry = (void *) sharedMem;
1845 }
1846
1847
1848 #ifndef i386
1849 if( 0 == sharedMem)
1850 kr = kIOReturnVMError;
1851 else
1852 #endif
1853 kr = super::doMap( addressMap, atAddress,
1854 options, sourceOffset, length );
1855
1856 return( kr );
1857 }
1858
1859 IOReturn IOGeneralMemoryDescriptor::doUnmap(
1860 vm_map_t addressMap,
1861 IOVirtualAddress logical,
1862 IOByteCount length )
1863 {
1864 // could be much better
1865 if( _task && (addressMap == get_task_map(_task)) && (1 == _rangesCount)) {
1866
1867 IOOptionBits type = _flags & kIOMemoryTypeMask;
1868 user_addr_t range0Addr;
1869 IOByteCount range0Len;
1870
1871 getAddrLenForInd(range0Addr, range0Len, type, _ranges, 0);
1872 if (logical == range0Addr && length <= range0Len)
1873 return( kIOReturnSuccess );
1874 }
1875
1876 return( super::doUnmap( addressMap, logical, length ));
1877 }
1878
1879 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1880
1881 OSDefineMetaClassAndAbstractStructors( IOMemoryMap, OSObject )
1882
1883 /* inline function implementation */
1884 IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
1885 { return( getPhysicalSegment( 0, 0 )); }
1886
1887
1888 #undef super
1889 #define super IOMemoryMap
1890
1891 OSDefineMetaClassAndStructors(_IOMemoryMap, IOMemoryMap)
1892
1893 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1894
1895 bool _IOMemoryMap::initCompatible(
1896 IOMemoryDescriptor * _memory,
1897 IOMemoryMap * _superMap,
1898 IOByteCount _offset,
1899 IOByteCount _length )
1900 {
1901
1902 if( !super::init())
1903 return( false);
1904
1905 if( (_offset + _length) > _superMap->getLength())
1906 return( false);
1907
1908 _memory->retain();
1909 memory = _memory;
1910 _superMap->retain();
1911 superMap = _superMap;
1912
1913 offset = _offset;
1914 if( _length)
1915 length = _length;
1916 else
1917 length = _memory->getLength();
1918
1919 options = superMap->getMapOptions();
1920 logical = superMap->getVirtualAddress() + offset;
1921
1922 return( true );
1923 }
1924
1925 bool _IOMemoryMap::initWithDescriptor(
1926 IOMemoryDescriptor * _memory,
1927 task_t intoTask,
1928 IOVirtualAddress toAddress,
1929 IOOptionBits _options,
1930 IOByteCount _offset,
1931 IOByteCount _length )
1932 {
1933 bool ok;
1934 bool redir = ((kIOMapUnique|kIOMapReference) == ((kIOMapUnique|kIOMapReference) & _options));
1935
1936 if ((!_memory) || (!intoTask))
1937 return( false);
1938
1939 if( (_offset + _length) > _memory->getLength())
1940 return( false);
1941
1942 if (!redir)
1943 {
1944 if (!super::init())
1945 return(false);
1946 addressMap = get_task_map(intoTask);
1947 if( !addressMap)
1948 return( false);
1949 vm_map_reference(addressMap);
1950 addressTask = intoTask;
1951 logical = toAddress;
1952 options = _options;
1953 }
1954
1955 _memory->retain();
1956
1957 offset = _offset;
1958 if( _length)
1959 length = _length;
1960 else
1961 length = _memory->getLength();
1962
1963 if( options & kIOMapStatic)
1964 ok = true;
1965 else
1966 ok = (kIOReturnSuccess == _memory->doMap( addressMap, &toAddress,
1967 _options, offset, length ));
1968 if (ok || redir)
1969 {
1970 if (memory)
1971 memory->release();
1972 memory = _memory;
1973 logical = toAddress;
1974 }
1975 else
1976 {
1977 _memory->release();
1978 if (!redir)
1979 {
1980 logical = 0;
1981 memory = 0;
1982 vm_map_deallocate(addressMap);
1983 addressMap = 0;
1984 }
1985 }
1986
1987 return( ok );
1988 }
1989
1990 /* LP64todo - these need to expand */
1991 struct IOMemoryDescriptorMapAllocRef
1992 {
1993 ipc_port_t sharedMem;
1994 vm_size_t size;
1995 vm_offset_t mapped;
1996 IOByteCount sourceOffset;
1997 IOOptionBits options;
1998 };
1999
2000 static kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
2001 {
2002 IOMemoryDescriptorMapAllocRef * ref = (IOMemoryDescriptorMapAllocRef *)_ref;
2003 IOReturn err;
2004
2005 do {
2006 if( ref->sharedMem) {
2007 vm_prot_t prot = VM_PROT_READ
2008 | ((ref->options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE);
2009
2010 // set memory entry cache
2011 vm_prot_t memEntryCacheMode = prot | MAP_MEM_ONLY;
2012 switch (ref->options & kIOMapCacheMask)
2013 {
2014 case kIOMapInhibitCache:
2015 SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
2016 break;
2017
2018 case kIOMapWriteThruCache:
2019 SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
2020 break;
2021
2022 case kIOMapWriteCombineCache:
2023 SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
2024 break;
2025
2026 case kIOMapCopybackCache:
2027 SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
2028 break;
2029
2030 case kIOMapDefaultCache:
2031 default:
2032 SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
2033 break;
2034 }
2035
2036 vm_size_t unused = 0;
2037
2038 err = mach_make_memory_entry( NULL /*unused*/, &unused, 0 /*unused*/,
2039 memEntryCacheMode, NULL, ref->sharedMem );
2040 if (KERN_SUCCESS != err)
2041 IOLog("MAP_MEM_ONLY failed %d\n", err);
2042
2043 err = vm_map( map,
2044 &ref->mapped,
2045 ref->size, 0 /* mask */,
2046 (( ref->options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
2047 | VM_MAKE_TAG(VM_MEMORY_IOKIT),
2048 ref->sharedMem, ref->sourceOffset,
2049 false, // copy
2050 prot, // cur
2051 prot, // max
2052 VM_INHERIT_NONE);
2053
2054 if( KERN_SUCCESS != err) {
2055 ref->mapped = 0;
2056 continue;
2057 }
2058
2059 } else {
2060
2061 err = vm_allocate( map, &ref->mapped, ref->size,
2062 ((ref->options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
2063 | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
2064
2065 if( KERN_SUCCESS != err) {
2066 ref->mapped = 0;
2067 continue;
2068 }
2069
2070 // we have to make sure that these guys don't get copied if we fork.
2071 err = vm_inherit( map, ref->mapped, ref->size, VM_INHERIT_NONE);
2072 assert( KERN_SUCCESS == err );
2073 }
2074
2075 } while( false );
2076
2077 return( err );
2078 }
2079
2080
2081 IOReturn IOMemoryDescriptor::doMap(
2082 vm_map_t addressMap,
2083 IOVirtualAddress * atAddress,
2084 IOOptionBits options,
2085 IOByteCount sourceOffset,
2086 IOByteCount length )
2087 {
2088 IOReturn err = kIOReturnSuccess;
2089 memory_object_t pager;
2090 vm_address_t logical;
2091 IOByteCount pageOffset;
2092 IOPhysicalAddress sourceAddr;
2093 IOMemoryDescriptorMapAllocRef ref;
2094
2095 ref.sharedMem = (ipc_port_t) _memEntry;
2096 ref.sourceOffset = sourceOffset;
2097 ref.options = options;
2098
2099 do {
2100
2101 if( 0 == length)
2102 length = getLength();
2103
2104 sourceAddr = getSourceSegment( sourceOffset, NULL );
2105 pageOffset = sourceAddr - trunc_page_32( sourceAddr );
2106
2107 ref.size = round_page_32( length + pageOffset );
2108
2109 if ((kIOMapReference|kIOMapUnique) == ((kIOMapReference|kIOMapUnique) & options))
2110 {
2111 upl_t redirUPL2;
2112 vm_size_t size;
2113 int flags;
2114
2115 _IOMemoryMap * mapping = (_IOMemoryMap *) *atAddress;
2116 ref.mapped = mapping->getVirtualAddress();
2117
2118 if (!_memEntry)
2119 {
2120 err = kIOReturnNotReadable;
2121 continue;
2122 }
2123
2124 size = length;
2125 flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
2126 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
2127
2128 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) _memEntry, 0, &size, &redirUPL2,
2129 NULL, NULL,
2130 &flags))
2131 redirUPL2 = NULL;
2132
2133 err = upl_transpose(redirUPL2, mapping->redirUPL);
2134 if (kIOReturnSuccess != err)
2135 {
2136 IOLog("upl_transpose(%x)\n", err);
2137 err = kIOReturnSuccess;
2138 }
2139
2140 if (redirUPL2)
2141 {
2142 upl_commit(redirUPL2, NULL, 0);
2143 upl_deallocate(redirUPL2);
2144 redirUPL2 = 0;
2145 }
2146 {
2147 // swap the memEntries since they now refer to different vm_objects
2148 void * me = _memEntry;
2149 _memEntry = mapping->memory->_memEntry;
2150 mapping->memory->_memEntry = me;
2151 }
2152 }
2153 else
2154 {
2155
2156 logical = *atAddress;
2157 if( options & kIOMapAnywhere)
2158 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
2159 ref.mapped = 0;
2160 else {
2161 ref.mapped = trunc_page_32( logical );
2162 if( (logical - ref.mapped) != pageOffset) {
2163 err = kIOReturnVMError;
2164 continue;
2165 }
2166 }
2167
2168 if( ref.sharedMem && (addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags))
2169 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
2170 else
2171 err = IOMemoryDescriptorMapAlloc( addressMap, &ref );
2172 }
2173
2174 if( err != KERN_SUCCESS)
2175 continue;
2176
2177 if( reserved)
2178 pager = (memory_object_t) reserved->devicePager;
2179 else
2180 pager = MACH_PORT_NULL;
2181
2182 if( !ref.sharedMem || pager )
2183 err = handleFault( pager, addressMap, ref.mapped, sourceOffset, length, options );
2184
2185 } while( false );
2186
2187 if( err != KERN_SUCCESS) {
2188 if( ref.mapped)
2189 doUnmap( addressMap, ref.mapped, ref.size );
2190 *atAddress = NULL;
2191 } else
2192 *atAddress = ref.mapped + pageOffset;
2193
2194 return( err );
2195 }
2196
2197 enum {
2198 kIOMemoryRedirected = 0x00010000
2199 };
2200
2201 IOReturn IOMemoryDescriptor::handleFault(
2202 void * _pager,
2203 vm_map_t addressMap,
2204 IOVirtualAddress address,
2205 IOByteCount sourceOffset,
2206 IOByteCount length,
2207 IOOptionBits options )
2208 {
2209 IOReturn err = kIOReturnSuccess;
2210 memory_object_t pager = (memory_object_t) _pager;
2211 vm_size_t size;
2212 vm_size_t bytes;
2213 vm_size_t page;
2214 IOByteCount pageOffset;
2215 IOByteCount pagerOffset;
2216 IOPhysicalLength segLen;
2217 addr64_t physAddr;
2218
2219 if( !addressMap) {
2220
2221 if( kIOMemoryRedirected & _flags) {
2222 #ifdef DEBUG
2223 IOLog("sleep mem redirect %p, %lx\n", this, sourceOffset);
2224 #endif
2225 do {
2226 SLEEP;
2227 } while( kIOMemoryRedirected & _flags );
2228 }
2229
2230 return( kIOReturnSuccess );
2231 }
2232
2233 physAddr = getPhysicalSegment64( sourceOffset, &segLen );
2234 assert( physAddr );
2235 pageOffset = physAddr - trunc_page_64( physAddr );
2236 pagerOffset = sourceOffset;
2237
2238 size = length + pageOffset;
2239 physAddr -= pageOffset;
2240
2241 segLen += pageOffset;
2242 bytes = size;
2243 do {
2244 // in the middle of the loop only map whole pages
2245 if( segLen >= bytes)
2246 segLen = bytes;
2247 else if( segLen != trunc_page_32( segLen))
2248 err = kIOReturnVMError;
2249 if( physAddr != trunc_page_64( physAddr))
2250 err = kIOReturnBadArgument;
2251
2252 #ifdef DEBUG
2253 if( kIOLogMapping & gIOKitDebug)
2254 IOLog("_IOMemoryMap::map(%p) %08lx->%08qx:%08lx\n",
2255 addressMap, address + pageOffset, physAddr + pageOffset,
2256 segLen - pageOffset);
2257 #endif
2258
2259
2260
2261
2262
2263 #ifdef i386
2264 /* i386 doesn't support faulting on device memory yet */
2265 if( addressMap && (kIOReturnSuccess == err))
2266 err = IOMapPages( addressMap, address, (IOPhysicalAddress) physAddr, segLen, options );
2267 assert( KERN_SUCCESS == err );
2268 if( err)
2269 break;
2270 #endif
2271
2272 if( pager) {
2273 if( reserved && reserved->pagerContig) {
2274 IOPhysicalLength allLen;
2275 addr64_t allPhys;
2276
2277 allPhys = getPhysicalSegment64( 0, &allLen );
2278 assert( allPhys );
2279 err = device_pager_populate_object( pager, 0, allPhys >> PAGE_SHIFT, round_page_32(allLen) );
2280
2281 } else {
2282
2283 for( page = 0;
2284 (page < segLen) && (KERN_SUCCESS == err);
2285 page += page_size) {
2286 err = device_pager_populate_object(pager, pagerOffset,
2287 (ppnum_t)((physAddr + page) >> PAGE_SHIFT), page_size);
2288 pagerOffset += page_size;
2289 }
2290 }
2291 assert( KERN_SUCCESS == err );
2292 if( err)
2293 break;
2294 }
2295 #ifndef i386
2296 /* *** ALERT *** */
2297 /* *** Temporary Workaround *** */
2298
2299 /* This call to vm_fault causes an early pmap level resolution */
2300 /* of the mappings created above. Need for this is in absolute */
2301 /* violation of the basic tenet that the pmap layer is a cache. */
2302 /* Further, it implies a serious I/O architectural violation on */
2303 /* the part of some user of the mapping. As of this writing, */
2304 /* the call to vm_fault is needed because the NVIDIA driver */
2305 /* makes a call to pmap_extract. The NVIDIA driver needs to be */
2306 /* fixed as soon as possible. The NVIDIA driver should not */
2307 /* need to query for this info as it should know from the doMap */
2308 /* call where the physical memory is mapped. When a query is */
2309 /* necessary to find a physical mapping, it should be done */
2310 /* through an iokit call which includes the mapped memory */
2311 /* handle. This is required for machine architecture independence.*/
2312
2313 if(!(kIOMemoryRedirected & _flags)) {
2314 vm_fault(addressMap,
2315 (vm_map_offset_t)address,
2316 VM_PROT_READ|VM_PROT_WRITE,
2317 FALSE, THREAD_UNINT, NULL,
2318 (vm_map_offset_t)0);
2319 }
2320
2321 /* *** Temporary Workaround *** */
2322 /* *** ALERT *** */
2323 #endif
2324 sourceOffset += segLen - pageOffset;
2325 address += segLen;
2326 bytes -= segLen;
2327 pageOffset = 0;
2328
2329 } while( bytes
2330 && (physAddr = getPhysicalSegment64( sourceOffset, &segLen )));
2331
2332 if( bytes)
2333 err = kIOReturnBadArgument;
2334
2335 return( err );
2336 }
2337
2338 IOReturn IOMemoryDescriptor::doUnmap(
2339 vm_map_t addressMap,
2340 IOVirtualAddress logical,
2341 IOByteCount length )
2342 {
2343 IOReturn err;
2344
2345 #ifdef DEBUG
2346 if( kIOLogMapping & gIOKitDebug)
2347 kprintf("IOMemoryDescriptor::doUnmap(%x) %08x:%08x\n",
2348 addressMap, logical, length );
2349 #endif
2350
2351 if( true /* && (addressMap == kernel_map) || (addressMap == get_task_map(current_task()))*/) {
2352
2353 if( _memEntry && (addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags))
2354 addressMap = IOPageableMapForAddress( logical );
2355
2356 err = vm_deallocate( addressMap, logical, length );
2357
2358 } else
2359 err = kIOReturnSuccess;
2360
2361 return( err );
2362 }
2363
2364 IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
2365 {
2366 IOReturn err = kIOReturnSuccess;
2367 _IOMemoryMap * mapping = 0;
2368 OSIterator * iter;
2369
2370 LOCK;
2371
2372 if( doRedirect)
2373 _flags |= kIOMemoryRedirected;
2374 else
2375 _flags &= ~kIOMemoryRedirected;
2376
2377 do {
2378 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
2379 while( (mapping = (_IOMemoryMap *) iter->getNextObject()))
2380 mapping->redirect( safeTask, doRedirect );
2381
2382 iter->release();
2383 }
2384 } while( false );
2385
2386 if (!doRedirect)
2387 {
2388 WAKEUP;
2389 }
2390
2391 UNLOCK;
2392
2393 // temporary binary compatibility
2394 IOSubMemoryDescriptor * subMem;
2395 if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
2396 err = subMem->redirect( safeTask, doRedirect );
2397 else
2398 err = kIOReturnSuccess;
2399
2400 return( err );
2401 }
2402
2403 IOReturn IOSubMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
2404 {
2405 return( _parent->redirect( safeTask, doRedirect ));
2406 }
2407
2408 IOReturn _IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
2409 {
2410 IOReturn err = kIOReturnSuccess;
2411
2412 if( superMap) {
2413 // err = ((_IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
2414 } else {
2415
2416 LOCK;
2417 if( logical && addressMap
2418 && (!safeTask || (get_task_map(safeTask) != addressMap))
2419 && (0 == (options & kIOMapStatic)))
2420 {
2421 IOUnmapPages( addressMap, logical, length );
2422 if(!doRedirect && safeTask
2423 && ((memory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical))
2424 {
2425 err = vm_deallocate( addressMap, logical, length );
2426 err = memory->doMap( addressMap, &logical,
2427 (options & ~kIOMapAnywhere) /*| kIOMapReserve*/,
2428 offset, length );
2429 } else
2430 err = kIOReturnSuccess;
2431 #ifdef DEBUG
2432 IOLog("IOMemoryMap::redirect(%d, %p) %x:%lx from %p\n", doRedirect, this, logical, length, addressMap);
2433 #endif
2434 }
2435 UNLOCK;
2436 }
2437
2438 if (((memory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
2439 && safeTask
2440 && (doRedirect != (0 != (memory->_flags & kIOMemoryRedirected))))
2441 memory->redirect(safeTask, doRedirect);
2442
2443 return( err );
2444 }
2445
2446 IOReturn _IOMemoryMap::unmap( void )
2447 {
2448 IOReturn err;
2449
2450 LOCK;
2451
2452 if( logical && addressMap && (0 == superMap)
2453 && (0 == (options & kIOMapStatic))) {
2454
2455 err = memory->doUnmap( addressMap, logical, length );
2456 vm_map_deallocate(addressMap);
2457 addressMap = 0;
2458
2459 } else
2460 err = kIOReturnSuccess;
2461
2462 logical = 0;
2463
2464 UNLOCK;
2465
2466 return( err );
2467 }
2468
2469 void _IOMemoryMap::taskDied( void )
2470 {
2471 LOCK;
2472 if( addressMap) {
2473 vm_map_deallocate(addressMap);
2474 addressMap = 0;
2475 }
2476 addressTask = 0;
2477 logical = 0;
2478 UNLOCK;
2479 }
2480
2481 // Overload the release mechanism. All mappings must be a member
2482 // of a memory descriptors _mappings set. This means that we
2483 // always have 2 references on a mapping. When either of these mappings
2484 // are released we need to free ourselves.
2485 void _IOMemoryMap::taggedRelease(const void *tag) const
2486 {
2487 LOCK;
2488 super::taggedRelease(tag, 2);
2489 UNLOCK;
2490 }
2491
2492 void _IOMemoryMap::free()
2493 {
2494 unmap();
2495
2496 if( memory) {
2497 LOCK;
2498 memory->removeMapping( this);
2499 UNLOCK;
2500 memory->release();
2501 }
2502
2503 if (owner && (owner != memory))
2504 {
2505 LOCK;
2506 owner->removeMapping(this);
2507 UNLOCK;
2508 }
2509
2510 if( superMap)
2511 superMap->release();
2512
2513 if (redirUPL) {
2514 upl_commit(redirUPL, NULL, 0);
2515 upl_deallocate(redirUPL);
2516 }
2517
2518 super::free();
2519 }
2520
2521 IOByteCount _IOMemoryMap::getLength()
2522 {
2523 return( length );
2524 }
2525
2526 IOVirtualAddress _IOMemoryMap::getVirtualAddress()
2527 {
2528 return( logical);
2529 }
2530
2531 task_t _IOMemoryMap::getAddressTask()
2532 {
2533 if( superMap)
2534 return( superMap->getAddressTask());
2535 else
2536 return( addressTask);
2537 }
2538
2539 IOOptionBits _IOMemoryMap::getMapOptions()
2540 {
2541 return( options);
2542 }
2543
2544 IOMemoryDescriptor * _IOMemoryMap::getMemoryDescriptor()
2545 {
2546 return( memory );
2547 }
2548
2549 _IOMemoryMap * _IOMemoryMap::copyCompatible(
2550 IOMemoryDescriptor * owner,
2551 task_t task,
2552 IOVirtualAddress toAddress,
2553 IOOptionBits _options,
2554 IOByteCount _offset,
2555 IOByteCount _length )
2556 {
2557 _IOMemoryMap * mapping;
2558
2559 if( (!task) || (!addressMap) || (addressMap != get_task_map(task)))
2560 return( 0 );
2561 if( options & kIOMapUnique)
2562 return( 0 );
2563 if( (options ^ _options) & kIOMapReadOnly)
2564 return( 0 );
2565 if( (kIOMapDefaultCache != (_options & kIOMapCacheMask))
2566 && ((options ^ _options) & kIOMapCacheMask))
2567 return( 0 );
2568
2569 if( (0 == (_options & kIOMapAnywhere)) && (logical != toAddress))
2570 return( 0 );
2571
2572 if( _offset < offset)
2573 return( 0 );
2574
2575 _offset -= offset;
2576
2577 if( (_offset + _length) > length)
2578 return( 0 );
2579
2580 if( (length == _length) && (!_offset)) {
2581 retain();
2582 mapping = this;
2583
2584 } else {
2585 mapping = new _IOMemoryMap;
2586 if( mapping
2587 && !mapping->initCompatible( owner, this, _offset, _length )) {
2588 mapping->release();
2589 mapping = 0;
2590 }
2591 }
2592
2593 return( mapping );
2594 }
2595
2596 IOPhysicalAddress _IOMemoryMap::getPhysicalSegment( IOByteCount _offset,
2597 IOPhysicalLength * _length)
2598 {
2599 IOPhysicalAddress address;
2600
2601 LOCK;
2602 address = memory->getPhysicalSegment( offset + _offset, _length );
2603 UNLOCK;
2604
2605 return( address );
2606 }
2607
2608 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2609
2610 #undef super
2611 #define super OSObject
2612
2613 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2614
2615 void IOMemoryDescriptor::initialize( void )
2616 {
2617 if( 0 == gIOMemoryLock)
2618 gIOMemoryLock = IORecursiveLockAlloc();
2619
2620 IORegistryEntry::getRegistryRoot()->setProperty(kIOMaximumMappedIOByteCountKey,
2621 ptoa_64(gIOMaximumMappedIOPageCount), 64);
2622 }
2623
2624 void IOMemoryDescriptor::free( void )
2625 {
2626 if( _mappings)
2627 _mappings->release();
2628
2629 super::free();
2630 }
2631
2632 IOMemoryMap * IOMemoryDescriptor::setMapping(
2633 task_t intoTask,
2634 IOVirtualAddress mapAddress,
2635 IOOptionBits options )
2636 {
2637 _IOMemoryMap * newMap;
2638
2639 newMap = new _IOMemoryMap;
2640
2641 LOCK;
2642
2643 if( newMap
2644 && !newMap->initWithDescriptor( this, intoTask, mapAddress,
2645 options | kIOMapStatic, 0, getLength() )) {
2646 newMap->release();
2647 newMap = 0;
2648 }
2649
2650 addMapping( newMap);
2651
2652 UNLOCK;
2653
2654 return( newMap);
2655 }
2656
2657 IOMemoryMap * IOMemoryDescriptor::map(
2658 IOOptionBits options )
2659 {
2660
2661 return( makeMapping( this, kernel_task, 0,
2662 options | kIOMapAnywhere,
2663 0, getLength() ));
2664 }
2665
2666 IOMemoryMap * IOMemoryDescriptor::map(
2667 task_t intoTask,
2668 IOVirtualAddress toAddress,
2669 IOOptionBits options,
2670 IOByteCount offset,
2671 IOByteCount length )
2672 {
2673 if( 0 == length)
2674 length = getLength();
2675
2676 return( makeMapping( this, intoTask, toAddress, options, offset, length ));
2677 }
2678
2679 IOReturn _IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
2680 IOOptionBits options,
2681 IOByteCount offset)
2682 {
2683 IOReturn err = kIOReturnSuccess;
2684 IOMemoryDescriptor * physMem = 0;
2685
2686 LOCK;
2687
2688 if (logical && addressMap) do
2689 {
2690 if ((memory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
2691 {
2692 physMem = memory;
2693 physMem->retain();
2694 }
2695
2696 if (!redirUPL)
2697 {
2698 vm_size_t size = length;
2699 int flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
2700 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
2701 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) memory->_memEntry, 0, &size, &redirUPL,
2702 NULL, NULL,
2703 &flags))
2704 redirUPL = 0;
2705
2706 if (physMem)
2707 {
2708 IOUnmapPages( addressMap, logical, length );
2709 physMem->redirect(0, true);
2710 }
2711 }
2712
2713 if (newBackingMemory)
2714 {
2715 if (newBackingMemory != memory)
2716 {
2717 if (this != newBackingMemory->makeMapping(newBackingMemory, addressTask, (IOVirtualAddress) this,
2718 options | kIOMapUnique | kIOMapReference,
2719 offset, length))
2720 err = kIOReturnError;
2721 }
2722 if (redirUPL)
2723 {
2724 upl_commit(redirUPL, NULL, 0);
2725 upl_deallocate(redirUPL);
2726 redirUPL = 0;
2727 }
2728 if (physMem)
2729 physMem->redirect(0, false);
2730 }
2731 }
2732 while (false);
2733
2734 UNLOCK;
2735
2736 if (physMem)
2737 physMem->release();
2738
2739 return (err);
2740 }
2741
2742 IOMemoryMap * IOMemoryDescriptor::makeMapping(
2743 IOMemoryDescriptor * owner,
2744 task_t intoTask,
2745 IOVirtualAddress toAddress,
2746 IOOptionBits options,
2747 IOByteCount offset,
2748 IOByteCount length )
2749 {
2750 IOMemoryDescriptor * mapDesc = 0;
2751 _IOMemoryMap * mapping = 0;
2752 OSIterator * iter;
2753
2754 LOCK;
2755
2756 do
2757 {
2758 if (kIOMapUnique & options)
2759 {
2760 IOPhysicalAddress phys;
2761 IOByteCount physLen;
2762
2763 if (owner != this)
2764 continue;
2765
2766 if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
2767 {
2768 phys = getPhysicalSegment(offset, &physLen);
2769 if (!phys || (physLen < length))
2770 continue;
2771
2772 mapDesc = IOMemoryDescriptor::withPhysicalAddress(
2773 phys, length, _direction);
2774 if (!mapDesc)
2775 continue;
2776 offset = 0;
2777 }
2778 else
2779 {
2780 mapDesc = this;
2781 mapDesc->retain();
2782 }
2783
2784 if (kIOMapReference & options)
2785 {
2786 mapping = (_IOMemoryMap *) toAddress;
2787 mapping->retain();
2788
2789 #if 1
2790 uint32_t pageOffset1 = mapDesc->getSourceSegment( offset, NULL );
2791 pageOffset1 -= trunc_page_32( pageOffset1 );
2792
2793 uint32_t pageOffset2 = mapping->getVirtualAddress();
2794 pageOffset2 -= trunc_page_32( pageOffset2 );
2795
2796 if (pageOffset1 != pageOffset2)
2797 IOLog("::redirect can't map offset %x to addr %x\n",
2798 pageOffset1, mapping->getVirtualAddress());
2799 #endif
2800
2801
2802 if (!mapping->initWithDescriptor( mapDesc, intoTask, toAddress, options,
2803 offset, length ))
2804 {
2805 #ifdef DEBUG
2806 IOLog("Didn't redirect map %08lx : %08lx\n", offset, length );
2807 #endif
2808 }
2809
2810 if (mapping->owner)
2811 mapping->owner->removeMapping(mapping);
2812 continue;
2813 }
2814 }
2815 else
2816 {
2817 // look for an existing mapping
2818 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
2819
2820 while( (mapping = (_IOMemoryMap *) iter->getNextObject())) {
2821
2822 if( (mapping = mapping->copyCompatible(
2823 owner, intoTask, toAddress,
2824 options | kIOMapReference,
2825 offset, length )))
2826 break;
2827 }
2828 iter->release();
2829 }
2830
2831
2832 if (mapping)
2833 mapping->retain();
2834
2835 if( mapping || (options & kIOMapReference))
2836 continue;
2837
2838 mapDesc = owner;
2839 mapDesc->retain();
2840 }
2841 owner = this;
2842
2843 mapping = new _IOMemoryMap;
2844 if( mapping
2845 && !mapping->initWithDescriptor( mapDesc, intoTask, toAddress, options,
2846 offset, length )) {
2847 #ifdef DEBUG
2848 IOLog("Didn't make map %08lx : %08lx\n", offset, length );
2849 #endif
2850 mapping->release();
2851 mapping = 0;
2852 }
2853
2854 if (mapping)
2855 mapping->retain();
2856
2857 } while( false );
2858
2859 if (mapping)
2860 {
2861 mapping->owner = owner;
2862 owner->addMapping( mapping);
2863 mapping->release();
2864 }
2865
2866 UNLOCK;
2867
2868 if (mapDesc)
2869 mapDesc->release();
2870
2871 return( mapping);
2872 }
2873
2874 void IOMemoryDescriptor::addMapping(
2875 IOMemoryMap * mapping )
2876 {
2877 if( mapping) {
2878 if( 0 == _mappings)
2879 _mappings = OSSet::withCapacity(1);
2880 if( _mappings )
2881 _mappings->setObject( mapping );
2882 }
2883 }
2884
2885 void IOMemoryDescriptor::removeMapping(
2886 IOMemoryMap * mapping )
2887 {
2888 if( _mappings)
2889 _mappings->removeObject( mapping);
2890 }
2891
2892 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2893
2894 #undef super
2895 #define super IOMemoryDescriptor
2896
2897 OSDefineMetaClassAndStructors(IOSubMemoryDescriptor, IOMemoryDescriptor)
2898
2899 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2900
2901 bool IOSubMemoryDescriptor::initSubRange( IOMemoryDescriptor * parent,
2902 IOByteCount offset, IOByteCount length,
2903 IODirection direction )
2904 {
2905 if( !parent)
2906 return( false);
2907
2908 if( (offset + length) > parent->getLength())
2909 return( false);
2910
2911 /*
2912 * We can check the _parent instance variable before having ever set it
2913 * to an initial value because I/O Kit guarantees that all our instance
2914 * variables are zeroed on an object's allocation.
2915 */
2916
2917 if( !_parent) {
2918 if( !super::init())
2919 return( false );
2920 } else {
2921 /*
2922 * An existing memory descriptor is being retargeted to
2923 * point to somewhere else. Clean up our present state.
2924 */
2925
2926 _parent->release();
2927 _parent = 0;
2928 }
2929
2930 parent->retain();
2931 _parent = parent;
2932 _start = offset;
2933 _length = length;
2934 _direction = direction;
2935 _tag = parent->getTag();
2936
2937 return( true );
2938 }
2939
2940 void IOSubMemoryDescriptor::free( void )
2941 {
2942 if( _parent)
2943 _parent->release();
2944
2945 super::free();
2946 }
2947
2948
2949 IOPhysicalAddress IOSubMemoryDescriptor::getPhysicalSegment( IOByteCount offset,
2950 IOByteCount * length )
2951 {
2952 IOPhysicalAddress address;
2953 IOByteCount actualLength;
2954
2955 assert(offset <= _length);
2956
2957 if( length)
2958 *length = 0;
2959
2960 if( offset >= _length)
2961 return( 0 );
2962
2963 address = _parent->getPhysicalSegment( offset + _start, &actualLength );
2964
2965 if( address && length)
2966 *length = min( _length - offset, actualLength );
2967
2968 return( address );
2969 }
2970
2971
2972 IOReturn IOSubMemoryDescriptor::doMap(
2973 vm_map_t addressMap,
2974 IOVirtualAddress * atAddress,
2975 IOOptionBits options,
2976 IOByteCount sourceOffset,
2977 IOByteCount length )
2978 {
2979 if( sourceOffset >= _length)
2980 return( kIOReturnOverrun );
2981 return (_parent->doMap(addressMap, atAddress, options, sourceOffset + _start, length));
2982 }
2983
2984 IOPhysicalAddress IOSubMemoryDescriptor::getSourceSegment( IOByteCount offset,
2985 IOByteCount * length )
2986 {
2987 IOPhysicalAddress address;
2988 IOByteCount actualLength;
2989
2990 assert(offset <= _length);
2991
2992 if( length)
2993 *length = 0;
2994
2995 if( offset >= _length)
2996 return( 0 );
2997
2998 address = _parent->getSourceSegment( offset + _start, &actualLength );
2999
3000 if( address && length)
3001 *length = min( _length - offset, actualLength );
3002
3003 return( address );
3004 }
3005
3006 void * IOSubMemoryDescriptor::getVirtualSegment(IOByteCount offset,
3007 IOByteCount * lengthOfSegment)
3008 {
3009 return( 0 );
3010 }
3011
3012 IOByteCount IOSubMemoryDescriptor::readBytes(IOByteCount offset,
3013 void * bytes, IOByteCount length)
3014 {
3015 IOByteCount byteCount;
3016
3017 assert(offset <= _length);
3018
3019 if( offset >= _length)
3020 return( 0 );
3021
3022 LOCK;
3023 byteCount = _parent->readBytes( _start + offset, bytes,
3024 min(length, _length - offset) );
3025 UNLOCK;
3026
3027 return( byteCount );
3028 }
3029
3030 IOByteCount IOSubMemoryDescriptor::writeBytes(IOByteCount offset,
3031 const void* bytes, IOByteCount length)
3032 {
3033 IOByteCount byteCount;
3034
3035 assert(offset <= _length);
3036
3037 if( offset >= _length)
3038 return( 0 );
3039
3040 LOCK;
3041 byteCount = _parent->writeBytes( _start + offset, bytes,
3042 min(length, _length - offset) );
3043 UNLOCK;
3044
3045 return( byteCount );
3046 }
3047
3048 IOReturn IOSubMemoryDescriptor::setPurgeable( IOOptionBits newState,
3049 IOOptionBits * oldState )
3050 {
3051 IOReturn err;
3052
3053 LOCK;
3054 err = _parent->setPurgeable( newState, oldState );
3055 UNLOCK;
3056
3057 return( err );
3058 }
3059
3060 IOReturn IOSubMemoryDescriptor::performOperation( IOOptionBits options,
3061 IOByteCount offset, IOByteCount length )
3062 {
3063 IOReturn err;
3064
3065 assert(offset <= _length);
3066
3067 if( offset >= _length)
3068 return( kIOReturnOverrun );
3069
3070 LOCK;
3071 err = _parent->performOperation( options, _start + offset,
3072 min(length, _length - offset) );
3073 UNLOCK;
3074
3075 return( err );
3076 }
3077
3078 IOReturn IOSubMemoryDescriptor::prepare(
3079 IODirection forDirection)
3080 {
3081 IOReturn err;
3082
3083 LOCK;
3084 err = _parent->prepare( forDirection);
3085 UNLOCK;
3086
3087 return( err );
3088 }
3089
3090 IOReturn IOSubMemoryDescriptor::complete(
3091 IODirection forDirection)
3092 {
3093 IOReturn err;
3094
3095 LOCK;
3096 err = _parent->complete( forDirection);
3097 UNLOCK;
3098
3099 return( err );
3100 }
3101
3102 IOMemoryMap * IOSubMemoryDescriptor::makeMapping(
3103 IOMemoryDescriptor * owner,
3104 task_t intoTask,
3105 IOVirtualAddress toAddress,
3106 IOOptionBits options,
3107 IOByteCount offset,
3108 IOByteCount length )
3109 {
3110 IOMemoryMap * mapping = 0;
3111
3112 if (!(kIOMapUnique & options))
3113 mapping = (IOMemoryMap *) _parent->makeMapping(
3114 _parent, intoTask,
3115 toAddress - (_start + offset),
3116 options | kIOMapReference,
3117 _start + offset, length );
3118
3119 if( !mapping)
3120 mapping = (IOMemoryMap *) _parent->makeMapping(
3121 _parent, intoTask,
3122 toAddress,
3123 options, _start + offset, length );
3124
3125 if( !mapping)
3126 mapping = super::makeMapping( owner, intoTask, toAddress, options,
3127 offset, length );
3128
3129 return( mapping );
3130 }
3131
3132 /* ick */
3133
3134 bool
3135 IOSubMemoryDescriptor::initWithAddress(void * address,
3136 IOByteCount length,
3137 IODirection direction)
3138 {
3139 return( false );
3140 }
3141
3142 bool
3143 IOSubMemoryDescriptor::initWithAddress(vm_address_t address,
3144 IOByteCount length,
3145 IODirection direction,
3146 task_t task)
3147 {
3148 return( false );
3149 }
3150
3151 bool
3152 IOSubMemoryDescriptor::initWithPhysicalAddress(
3153 IOPhysicalAddress address,
3154 IOByteCount length,
3155 IODirection direction )
3156 {
3157 return( false );
3158 }
3159
3160 bool
3161 IOSubMemoryDescriptor::initWithRanges(
3162 IOVirtualRange * ranges,
3163 UInt32 withCount,
3164 IODirection direction,
3165 task_t task,
3166 bool asReference)
3167 {
3168 return( false );
3169 }
3170
3171 bool
3172 IOSubMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
3173 UInt32 withCount,
3174 IODirection direction,
3175 bool asReference)
3176 {
3177 return( false );
3178 }
3179
3180 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3181
3182 bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
3183 {
3184 OSSymbol const *keys[2];
3185 OSObject *values[2];
3186 struct SerData {
3187 user_addr_t address;
3188 user_size_t length;
3189 } *vcopy;
3190 unsigned int index, nRanges;
3191 bool result;
3192
3193 IOOptionBits type = _flags & kIOMemoryTypeMask;
3194
3195 if (s == NULL) return false;
3196 if (s->previouslySerialized(this)) return true;
3197
3198 // Pretend we are an array.
3199 if (!s->addXMLStartTag(this, "array")) return false;
3200
3201 nRanges = _rangesCount;
3202 vcopy = (SerData *) IOMalloc(sizeof(SerData) * nRanges);
3203 if (vcopy == 0) return false;
3204
3205 keys[0] = OSSymbol::withCString("address");
3206 keys[1] = OSSymbol::withCString("length");
3207
3208 result = false;
3209 values[0] = values[1] = 0;
3210
3211 // From this point on we can go to bail.
3212
3213 // Copy the volatile data so we don't have to allocate memory
3214 // while the lock is held.
3215 LOCK;
3216 if (nRanges == _rangesCount) {
3217 Ranges vec = _ranges;
3218 for (index = 0; index < nRanges; index++) {
3219 user_addr_t addr; IOByteCount len;
3220 getAddrLenForInd(addr, len, type, vec, index);
3221 vcopy[index].address = addr;
3222 vcopy[index].length = len;
3223 }
3224 } else {
3225 // The descriptor changed out from under us. Give up.
3226 UNLOCK;
3227 result = false;
3228 goto bail;
3229 }
3230 UNLOCK;
3231
3232 for (index = 0; index < nRanges; index++)
3233 {
3234 user_addr_t addr = vcopy[index].address;
3235 IOByteCount len = (IOByteCount) vcopy[index].length;
3236 values[0] =
3237 OSNumber::withNumber(addr, (((UInt64) addr) >> 32)? 64 : 32);
3238 if (values[0] == 0) {
3239 result = false;
3240 goto bail;
3241 }
3242 values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
3243 if (values[1] == 0) {
3244 result = false;
3245 goto bail;
3246 }
3247 OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
3248 if (dict == 0) {
3249 result = false;
3250 goto bail;
3251 }
3252 values[0]->release();
3253 values[1]->release();
3254 values[0] = values[1] = 0;
3255
3256 result = dict->serialize(s);
3257 dict->release();
3258 if (!result) {
3259 goto bail;
3260 }
3261 }
3262 result = s->addXMLEndTag("array");
3263
3264 bail:
3265 if (values[0])
3266 values[0]->release();
3267 if (values[1])
3268 values[1]->release();
3269 if (keys[0])
3270 keys[0]->release();
3271 if (keys[1])
3272 keys[1]->release();
3273 if (vcopy)
3274 IOFree(vcopy, sizeof(IOVirtualRange) * nRanges);
3275 return result;
3276 }
3277
3278 bool IOSubMemoryDescriptor::serialize(OSSerialize * s) const
3279 {
3280 if (!s) {
3281 return (false);
3282 }
3283 if (s->previouslySerialized(this)) return true;
3284
3285 // Pretend we are a dictionary.
3286 // We must duplicate the functionality of OSDictionary here
3287 // because otherwise object references will not work;
3288 // they are based on the value of the object passed to
3289 // previouslySerialized and addXMLStartTag.
3290
3291 if (!s->addXMLStartTag(this, "dict")) return false;
3292
3293 char const *keys[3] = {"offset", "length", "parent"};
3294
3295 OSObject *values[3];
3296 values[0] = OSNumber::withNumber(_start, sizeof(_start) * 8);
3297 if (values[0] == 0)
3298 return false;
3299 values[1] = OSNumber::withNumber(_length, sizeof(_length) * 8);
3300 if (values[1] == 0) {
3301 values[0]->release();
3302 return false;
3303 }
3304 values[2] = _parent;
3305
3306 bool result = true;
3307 for (int i=0; i<3; i++) {
3308 if (!s->addString("<key>") ||
3309 !s->addString(keys[i]) ||
3310 !s->addXMLEndTag("key") ||
3311 !values[i]->serialize(s)) {
3312 result = false;
3313 break;
3314 }
3315 }
3316 values[0]->release();
3317 values[1]->release();
3318 if (!result) {
3319 return false;
3320 }
3321
3322 return s->addXMLEndTag("dict");
3323 }
3324
3325 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3326
3327 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
3328 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
3329 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
3330 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3);
3331 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4);
3332 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
3333 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
3334 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
3335 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
3336 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
3337 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
3338 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
3339 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
3340 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
3341 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
3342 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
3343
3344 /* ex-inline function implementation */
3345 IOPhysicalAddress IOMemoryDescriptor::getPhysicalAddress()
3346 { return( getPhysicalSegment( 0, 0 )); }