]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOMemoryDescriptor.cpp
4c6d57e086105d9933157376bc49611487537f2b
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
30 *
31 * HISTORY
32 *
33 */
34 // 45678901234567890123456789012345678901234567890123456789012345678901234567890
35 #include <sys/cdefs.h>
36
37 #include <IOKit/assert.h>
38 #include <IOKit/system.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOMemoryDescriptor.h>
41 #include <IOKit/IOMapper.h>
42 #include <IOKit/IOKitKeysPrivate.h>
43
44 #include <IOKit/IOKitDebug.h>
45
46 #include "IOKitKernelInternal.h"
47 #include "IOCopyMapper.h"
48
49 #include <libkern/c++/OSContainers.h>
50 #include <libkern/c++/OSDictionary.h>
51 #include <libkern/c++/OSArray.h>
52 #include <libkern/c++/OSSymbol.h>
53 #include <libkern/c++/OSNumber.h>
54
55 #include <sys/uio.h>
56
57 __BEGIN_DECLS
58 #include <vm/pmap.h>
59 #include <vm/vm_pageout.h>
60 #include <vm/vm_shared_memory_server.h>
61 #include <mach/memory_object_types.h>
62 #include <device/device_port.h>
63
64 #include <mach/vm_prot.h>
65 #include <vm/vm_fault.h>
66
67 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
68 void ipc_port_release_send(ipc_port_t port);
69
70 /* Copy between a physical page and a virtual address in the given vm_map */
71 kern_return_t copypv(addr64_t source, addr64_t sink, unsigned int size, int which);
72
73 memory_object_t
74 device_pager_setup(
75 memory_object_t pager,
76 int device_handle,
77 vm_size_t size,
78 int flags);
79 void
80 device_pager_deallocate(
81 memory_object_t);
82 kern_return_t
83 device_pager_populate_object(
84 memory_object_t pager,
85 vm_object_offset_t offset,
86 ppnum_t phys_addr,
87 vm_size_t size);
88 kern_return_t
89 memory_object_iopl_request(
90 ipc_port_t port,
91 memory_object_offset_t offset,
92 vm_size_t *upl_size,
93 upl_t *upl_ptr,
94 upl_page_info_array_t user_page_list,
95 unsigned int *page_list_count,
96 int *flags);
97
98 unsigned int IOTranslateCacheBits(struct phys_entry *pp);
99
100 __END_DECLS
101
102 #define kIOMaximumMappedIOByteCount (512*1024*1024)
103
104 static IOMapper * gIOSystemMapper = NULL;
105
106 IOCopyMapper * gIOCopyMapper = NULL;
107
108 static ppnum_t gIOMaximumMappedIOPageCount = atop_32(kIOMaximumMappedIOByteCount);
109
110 ppnum_t gIOLastPage;
111
112 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
113
114 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
115
116 #define super IOMemoryDescriptor
117
118 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
119
120 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
121
122 static IORecursiveLock * gIOMemoryLock;
123
124 #define LOCK IORecursiveLockLock( gIOMemoryLock)
125 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
126 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
127 #define WAKEUP \
128 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
129
130 #if 0
131 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
132 #else
133 #define DEBG(fmt, args...) {}
134 #endif
135
136 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
137
138 class _IOMemoryMap : public IOMemoryMap
139 {
140 OSDeclareDefaultStructors(_IOMemoryMap)
141 public:
142 IOMemoryDescriptor * memory;
143 IOMemoryMap * superMap;
144 IOByteCount offset;
145 IOByteCount length;
146 IOVirtualAddress logical;
147 task_t addressTask;
148 vm_map_t addressMap;
149 IOOptionBits options;
150 upl_t redirUPL;
151 ipc_port_t redirEntry;
152 IOMemoryDescriptor * owner;
153
154 protected:
155 virtual void taggedRelease(const void *tag = 0) const;
156 virtual void free();
157
158 public:
159
160 // IOMemoryMap methods
161 virtual IOVirtualAddress getVirtualAddress();
162 virtual IOByteCount getLength();
163 virtual task_t getAddressTask();
164 virtual IOMemoryDescriptor * getMemoryDescriptor();
165 virtual IOOptionBits getMapOptions();
166
167 virtual IOReturn unmap();
168 virtual void taskDied();
169
170 virtual IOReturn redirect(IOMemoryDescriptor * newBackingMemory,
171 IOOptionBits options,
172 IOByteCount offset = 0);
173
174 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
175 IOByteCount * length);
176
177 // for IOMemoryDescriptor use
178 _IOMemoryMap * copyCompatible(
179 IOMemoryDescriptor * owner,
180 task_t intoTask,
181 IOVirtualAddress toAddress,
182 IOOptionBits options,
183 IOByteCount offset,
184 IOByteCount length );
185
186 bool initCompatible(
187 IOMemoryDescriptor * memory,
188 IOMemoryMap * superMap,
189 IOByteCount offset,
190 IOByteCount length );
191
192 bool initWithDescriptor(
193 IOMemoryDescriptor * memory,
194 task_t intoTask,
195 IOVirtualAddress toAddress,
196 IOOptionBits options,
197 IOByteCount offset,
198 IOByteCount length );
199
200 IOReturn redirect(
201 task_t intoTask, bool redirect );
202 };
203
204 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
205
206 // Some data structures and accessor macros used by the initWithOptions
207 // Function
208
209 enum ioPLBlockFlags {
210 kIOPLOnDevice = 0x00000001,
211 kIOPLExternUPL = 0x00000002,
212 };
213
214 struct typePersMDData
215 {
216 const IOGeneralMemoryDescriptor *fMD;
217 ipc_port_t fMemEntry;
218 };
219
220 struct ioPLBlock {
221 upl_t fIOPL;
222 vm_address_t fIOMDOffset; // The offset of this iopl in descriptor
223 vm_offset_t fPageInfo; // Pointer to page list or index into it
224 ppnum_t fMappedBase; // Page number of first page in this iopl
225 unsigned int fPageOffset; // Offset within first page of iopl
226 unsigned int fFlags; // Flags
227 };
228
229 struct ioGMDData {
230 IOMapper *fMapper;
231 unsigned int fPageCnt;
232 upl_page_info_t fPageList[];
233 ioPLBlock fBlocks[];
234 };
235
236 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
237 #define getIOPLList(d) ((ioPLBlock *) &(d->fPageList[d->fPageCnt]))
238 #define getNumIOPL(osd, d) \
239 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
240 #define getPageList(d) (&(d->fPageList[0]))
241 #define computeDataSize(p, u) \
242 (sizeof(ioGMDData) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
243
244
245 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
246
247 #define next_page(a) ( trunc_page_32(a) + PAGE_SIZE )
248
249
250 extern "C" {
251
252 kern_return_t device_data_action(
253 int device_handle,
254 ipc_port_t device_pager,
255 vm_prot_t protection,
256 vm_object_offset_t offset,
257 vm_size_t size)
258 {
259 struct ExpansionData {
260 void * devicePager;
261 unsigned int pagerContig:1;
262 unsigned int unused:31;
263 IOMemoryDescriptor * memory;
264 };
265 kern_return_t kr;
266 ExpansionData * ref = (ExpansionData *) device_handle;
267 IOMemoryDescriptor * memDesc;
268
269 LOCK;
270 memDesc = ref->memory;
271 if( memDesc)
272 {
273 memDesc->retain();
274 kr = memDesc->handleFault( device_pager, 0, 0,
275 offset, size, kIOMapDefaultCache /*?*/);
276 memDesc->release();
277 }
278 else
279 kr = KERN_ABORTED;
280 UNLOCK;
281
282 return( kr );
283 }
284
285 kern_return_t device_close(
286 int device_handle)
287 {
288 struct ExpansionData {
289 void * devicePager;
290 unsigned int pagerContig:1;
291 unsigned int unused:31;
292 IOMemoryDescriptor * memory;
293 };
294 ExpansionData * ref = (ExpansionData *) device_handle;
295
296 IODelete( ref, ExpansionData, 1 );
297
298 return( kIOReturnSuccess );
299 }
300 }; // end extern "C"
301
302 // Note this inline function uses C++ reference arguments to return values
303 // This means that pointers are not passed and NULLs don't have to be
304 // checked for as a NULL reference is illegal.
305 static inline void
306 getAddrLenForInd(addr64_t &addr, IOPhysicalLength &len, // Output variables
307 UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
308 {
309 assert(kIOMemoryTypeUIO == type
310 || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
311 || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
312 if (kIOMemoryTypeUIO == type) {
313 user_size_t us;
314 uio_getiov((uio_t) r.uio, ind, &addr, &us); len = us;
315 }
316 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
317 IOAddressRange cur = r.v64[ind];
318 addr = cur.address;
319 len = cur.length;
320 }
321 else {
322 IOVirtualRange cur = r.v[ind];
323 addr = cur.address;
324 len = cur.length;
325 }
326 }
327
328 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
329
330 /*
331 * withAddress:
332 *
333 * Create a new IOMemoryDescriptor. The buffer is a virtual address
334 * relative to the specified task. If no task is supplied, the kernel
335 * task is implied.
336 */
337 IOMemoryDescriptor *
338 IOMemoryDescriptor::withAddress(void * address,
339 IOByteCount length,
340 IODirection direction)
341 {
342 return IOMemoryDescriptor::
343 withAddress((vm_address_t) address, length, direction, kernel_task);
344 }
345
346 IOMemoryDescriptor *
347 IOMemoryDescriptor::withAddress(vm_address_t address,
348 IOByteCount length,
349 IODirection direction,
350 task_t task)
351 {
352 #if TEST_V64
353 if (task)
354 {
355 IOOptionBits options = (IOOptionBits) direction;
356 if (task == kernel_task)
357 options |= kIOMemoryAutoPrepare;
358 return (IOMemoryDescriptor::withAddressRange(address, length, options, task));
359 }
360 #endif
361 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
362 if (that)
363 {
364 if (that->initWithAddress(address, length, direction, task))
365 return that;
366
367 that->release();
368 }
369 return 0;
370 }
371
372 IOMemoryDescriptor *
373 IOMemoryDescriptor::withPhysicalAddress(
374 IOPhysicalAddress address,
375 IOByteCount length,
376 IODirection direction )
377 {
378 #if TEST_P64
379 return (IOMemoryDescriptor::withAddressRange(address, length, (IOOptionBits) direction, NULL));
380 #endif
381 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
382 if (self
383 && !self->initWithPhysicalAddress(address, length, direction)) {
384 self->release();
385 return 0;
386 }
387
388 return self;
389 }
390
391 IOMemoryDescriptor *
392 IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
393 UInt32 withCount,
394 IODirection direction,
395 task_t task,
396 bool asReference)
397 {
398 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
399 if (that)
400 {
401 if (that->initWithRanges(ranges, withCount, direction, task, asReference))
402 return that;
403
404 that->release();
405 }
406 return 0;
407 }
408
409 IOMemoryDescriptor *
410 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
411 mach_vm_size_t length,
412 IOOptionBits options,
413 task_t task)
414 {
415 IOAddressRange range = { address, length };
416 return (IOMemoryDescriptor::withAddressRanges(&range, 1, options, task));
417 }
418
419 IOMemoryDescriptor *
420 IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
421 UInt32 rangeCount,
422 IOOptionBits options,
423 task_t task)
424 {
425 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
426 if (that)
427 {
428 if (task)
429 options |= kIOMemoryTypeVirtual64;
430 else
431 options |= kIOMemoryTypePhysical64;
432
433 if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ 0))
434 return that;
435
436 that->release();
437 }
438
439 return 0;
440 }
441
442
443 /*
444 * withRanges:
445 *
446 * Create a new IOMemoryDescriptor. The buffer is made up of several
447 * virtual address ranges, from a given task.
448 *
449 * Passing the ranges as a reference will avoid an extra allocation.
450 */
451 IOMemoryDescriptor *
452 IOMemoryDescriptor::withOptions(void * buffers,
453 UInt32 count,
454 UInt32 offset,
455 task_t task,
456 IOOptionBits opts,
457 IOMapper * mapper)
458 {
459 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
460
461 if (self
462 && !self->initWithOptions(buffers, count, offset, task, opts, mapper))
463 {
464 self->release();
465 return 0;
466 }
467
468 return self;
469 }
470
471 // Can't leave abstract but this should never be used directly,
472 bool IOMemoryDescriptor::initWithOptions(void * buffers,
473 UInt32 count,
474 UInt32 offset,
475 task_t task,
476 IOOptionBits options,
477 IOMapper * mapper)
478 {
479 // @@@ gvdl: Should I panic?
480 panic("IOMD::initWithOptions called\n");
481 return 0;
482 }
483
484 IOMemoryDescriptor *
485 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
486 UInt32 withCount,
487 IODirection direction,
488 bool asReference)
489 {
490 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
491 if (that)
492 {
493 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference))
494 return that;
495
496 that->release();
497 }
498 return 0;
499 }
500
501 IOMemoryDescriptor *
502 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
503 IOByteCount offset,
504 IOByteCount length,
505 IODirection direction)
506 {
507 IOSubMemoryDescriptor *self = new IOSubMemoryDescriptor;
508
509 if (self && !self->initSubRange(of, offset, length, direction)) {
510 self->release();
511 self = 0;
512 }
513 return self;
514 }
515
516 IOMemoryDescriptor *
517 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
518 {
519 IOGeneralMemoryDescriptor *origGenMD =
520 OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
521
522 if (origGenMD)
523 return IOGeneralMemoryDescriptor::
524 withPersistentMemoryDescriptor(origGenMD);
525 else
526 return 0;
527 }
528
529 IOMemoryDescriptor *
530 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
531 {
532 ipc_port_t sharedMem = (ipc_port_t) originalMD->createNamedEntry();
533
534 if (!sharedMem)
535 return 0;
536
537 if (sharedMem == originalMD->_memEntry) {
538 originalMD->retain(); // Add a new reference to ourselves
539 ipc_port_release_send(sharedMem); // Remove extra send right
540 return originalMD;
541 }
542
543 IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor;
544 typePersMDData initData = { originalMD, sharedMem };
545
546 if (self
547 && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) {
548 self->release();
549 self = 0;
550 }
551 return self;
552 }
553
554 void *IOGeneralMemoryDescriptor::createNamedEntry()
555 {
556 kern_return_t error;
557 ipc_port_t sharedMem;
558
559 IOOptionBits type = _flags & kIOMemoryTypeMask;
560
561 user_addr_t range0Addr;
562 IOByteCount range0Len;
563 getAddrLenForInd(range0Addr, range0Len, type, _ranges, 0);
564 range0Addr = trunc_page_64(range0Addr);
565
566 vm_size_t size = ptoa_32(_pages);
567 vm_address_t kernelPage = (vm_address_t) range0Addr;
568
569 vm_map_t theMap = ((_task == kernel_task)
570 && (kIOMemoryBufferPageable & _flags))
571 ? IOPageableMapForAddress(kernelPage)
572 : get_task_map(_task);
573
574 memory_object_size_t actualSize = size;
575 vm_prot_t prot = VM_PROT_READ | VM_PROT_WRITE;
576 if (_memEntry)
577 prot |= MAP_MEM_NAMED_REUSE;
578
579 error = mach_make_memory_entry_64(theMap,
580 &actualSize, range0Addr, prot, &sharedMem, (ipc_port_t) _memEntry);
581
582 if (KERN_SUCCESS == error) {
583 if (actualSize == size) {
584 return sharedMem;
585 } else {
586 #if IOASSERT
587 IOLog("IOGMD::mach_make_memory_entry_64 (%08llx) size (%08lx:%08x)\n",
588 (UInt64)range0Addr, (UInt32)actualSize, size);
589 #endif
590 ipc_port_release_send( sharedMem );
591 }
592 }
593
594 return MACH_PORT_NULL;
595 }
596
597 /*
598 * initWithAddress:
599 *
600 * Initialize an IOMemoryDescriptor. The buffer is a virtual address
601 * relative to the specified task. If no task is supplied, the kernel
602 * task is implied.
603 *
604 * An IOMemoryDescriptor can be re-used by calling initWithAddress or
605 * initWithRanges again on an existing instance -- note this behavior
606 * is not commonly supported in other I/O Kit classes, although it is
607 * supported here.
608 */
609 bool
610 IOGeneralMemoryDescriptor::initWithAddress(void * address,
611 IOByteCount withLength,
612 IODirection withDirection)
613 {
614 _singleRange.v.address = (vm_address_t) address;
615 _singleRange.v.length = withLength;
616
617 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
618 }
619
620 bool
621 IOGeneralMemoryDescriptor::initWithAddress(vm_address_t address,
622 IOByteCount withLength,
623 IODirection withDirection,
624 task_t withTask)
625 {
626 _singleRange.v.address = address;
627 _singleRange.v.length = withLength;
628
629 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
630 }
631
632 bool
633 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
634 IOPhysicalAddress address,
635 IOByteCount withLength,
636 IODirection withDirection )
637 {
638 _singleRange.p.address = address;
639 _singleRange.p.length = withLength;
640
641 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
642 }
643
644 bool
645 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
646 IOPhysicalRange * ranges,
647 UInt32 count,
648 IODirection direction,
649 bool reference)
650 {
651 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
652
653 if (reference)
654 mdOpts |= kIOMemoryAsReference;
655
656 return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0);
657 }
658
659 bool
660 IOGeneralMemoryDescriptor::initWithRanges(
661 IOVirtualRange * ranges,
662 UInt32 count,
663 IODirection direction,
664 task_t task,
665 bool reference)
666 {
667 IOOptionBits mdOpts = direction;
668
669 if (reference)
670 mdOpts |= kIOMemoryAsReference;
671
672 if (task) {
673 mdOpts |= kIOMemoryTypeVirtual;
674
675 // Auto-prepare if this is a kernel memory descriptor as very few
676 // clients bother to prepare() kernel memory.
677 // But it was not enforced so what are you going to do?
678 if (task == kernel_task)
679 mdOpts |= kIOMemoryAutoPrepare;
680 }
681 else
682 mdOpts |= kIOMemoryTypePhysical;
683
684 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0);
685 }
686
687 /*
688 * initWithOptions:
689 *
690 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
691 * from a given task, several physical ranges, an UPL from the ubc
692 * system or a uio (may be 64bit) from the BSD subsystem.
693 *
694 * Passing the ranges as a reference will avoid an extra allocation.
695 *
696 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
697 * existing instance -- note this behavior is not commonly supported in other
698 * I/O Kit classes, although it is supported here.
699 */
700
701 bool
702 IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
703 UInt32 count,
704 UInt32 offset,
705 task_t task,
706 IOOptionBits options,
707 IOMapper * mapper)
708 {
709 IOOptionBits type = options & kIOMemoryTypeMask;
710
711 // Grab the original MD's configuation data to initialse the
712 // arguments to this function.
713 if (kIOMemoryTypePersistentMD == type) {
714
715 typePersMDData *initData = (typePersMDData *) buffers;
716 const IOGeneralMemoryDescriptor *orig = initData->fMD;
717 ioGMDData *dataP = getDataP(orig->_memoryEntries);
718
719 // Only accept persistent memory descriptors with valid dataP data.
720 assert(orig->_rangesCount == 1);
721 if ( !(orig->_flags & kIOMemoryPersistent) || !dataP)
722 return false;
723
724 _memEntry = initData->fMemEntry; // Grab the new named entry
725 options = orig->_flags | kIOMemoryAsReference;
726 _singleRange = orig->_singleRange; // Initialise our range
727 buffers = &_singleRange;
728 count = 1;
729
730 // Now grab the original task and whatever mapper was previously used
731 task = orig->_task;
732 mapper = dataP->fMapper;
733
734 // We are ready to go through the original initialisation now
735 }
736
737 switch (type) {
738 case kIOMemoryTypeUIO:
739 case kIOMemoryTypeVirtual:
740 case kIOMemoryTypeVirtual64:
741 assert(task);
742 if (!task)
743 return false;
744 else
745 break;
746
747 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
748 case kIOMemoryTypePhysical64:
749 mapper = kIOMapperNone;
750
751 case kIOMemoryTypeUPL:
752 assert(!task);
753 break;
754 default:
755 return false; /* bad argument */
756 }
757
758 assert(buffers);
759 assert(count);
760
761 /*
762 * We can check the _initialized instance variable before having ever set
763 * it to an initial value because I/O Kit guarantees that all our instance
764 * variables are zeroed on an object's allocation.
765 */
766
767 if (_initialized) {
768 /*
769 * An existing memory descriptor is being retargeted to point to
770 * somewhere else. Clean up our present state.
771 */
772
773 while (_wireCount)
774 complete();
775 if (_ranges.v && _rangesIsAllocated)
776 {
777 if (kIOMemoryTypeUIO == type)
778 uio_free((uio_t) _ranges.v);
779 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
780 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
781 else
782 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
783 }
784 if (_memEntry)
785 { ipc_port_release_send((ipc_port_t) _memEntry); _memEntry = 0; }
786 }
787 else {
788 if (!super::init())
789 return false;
790 _initialized = true;
791 }
792
793 // Grab the appropriate mapper
794 if (mapper == kIOMapperNone)
795 mapper = 0; // No Mapper
796 else if (mapper == kIOMapperSystem) {
797 IOMapper::checkForSystemMapper();
798 gIOSystemMapper = mapper = IOMapper::gSystem;
799 }
800
801 // Remove the dynamic internal use flags from the initial setting
802 options &= ~(kIOMemoryPreparedReadOnly);
803 _flags = options;
804 _task = task;
805
806 // DEPRECATED variable initialisation
807 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
808
809 __iomd_reservedA = 0;
810 __iomd_reservedB = 0;
811 __iomd_reservedC = 0;
812
813 _highestPage = 0;
814
815 if (kIOMemoryTypeUPL == type) {
816
817 ioGMDData *dataP;
818 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
819
820 if (!_memoryEntries) {
821 _memoryEntries = OSData::withCapacity(dataSize);
822 if (!_memoryEntries)
823 return false;
824 }
825 else if (!_memoryEntries->initWithCapacity(dataSize))
826 return false;
827
828 _memoryEntries->appendBytes(0, sizeof(ioGMDData));
829 dataP = getDataP(_memoryEntries);
830 dataP->fMapper = mapper;
831 dataP->fPageCnt = 0;
832
833 // _wireCount++; // UPLs start out life wired
834
835 _length = count;
836 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
837
838 ioPLBlock iopl;
839 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST((upl_t) buffers);
840
841 iopl.fIOPL = (upl_t) buffers;
842 // Set the flag kIOPLOnDevice convieniently equal to 1
843 iopl.fFlags = pageList->device | kIOPLExternUPL;
844 iopl.fIOMDOffset = 0;
845
846 _highestPage = upl_get_highest_page(iopl.fIOPL);
847
848 if (!pageList->device) {
849 // Pre-compute the offset into the UPL's page list
850 pageList = &pageList[atop_32(offset)];
851 offset &= PAGE_MASK;
852 if (mapper) {
853 iopl.fMappedBase = mapper->iovmAlloc(_pages);
854 mapper->iovmInsert(iopl.fMappedBase, 0, pageList, _pages);
855 }
856 else
857 iopl.fMappedBase = 0;
858 }
859 else
860 iopl.fMappedBase = 0;
861 iopl.fPageInfo = (vm_address_t) pageList;
862 iopl.fPageOffset = offset;
863
864 _memoryEntries->appendBytes(&iopl, sizeof(iopl));
865 }
866 else {
867 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
868 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
869
870 // Initialize the memory descriptor
871 if (options & kIOMemoryAsReference) {
872 _rangesIsAllocated = false;
873
874 // Hack assignment to get the buffer arg into _ranges.
875 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
876 // work, C++ sigh.
877 // This also initialises the uio & physical ranges.
878 _ranges.v = (IOVirtualRange *) buffers;
879 }
880 else {
881 _rangesIsAllocated = true;
882 switch (_flags & kIOMemoryTypeMask)
883 {
884 case kIOMemoryTypeUIO:
885 _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
886 break;
887
888 case kIOMemoryTypeVirtual64:
889 case kIOMemoryTypePhysical64:
890 _ranges.v64 = IONew(IOAddressRange, count);
891 if (!_ranges.v64)
892 return false;
893 bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
894 break;
895 case kIOMemoryTypeVirtual:
896 _ranges.v = IONew(IOVirtualRange, count);
897 if (!_ranges.v)
898 return false;
899 bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
900 break;
901 }
902 }
903
904 // Find starting address within the vector of ranges
905 Ranges vec = _ranges;
906 UInt32 length = 0;
907 UInt32 pages = 0;
908 for (unsigned ind = 0; ind < count; ind++) {
909 user_addr_t addr;
910 UInt32 len;
911
912 // addr & len are returned by this function
913 getAddrLenForInd(addr, len, type, vec, ind);
914 pages += (atop_64(addr + len + PAGE_MASK) - atop_64(addr));
915 len += length;
916 assert(len >= length); // Check for 32 bit wrap around
917 length = len;
918
919 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
920 {
921 ppnum_t highPage = atop_64(addr + len - 1);
922 if (highPage > _highestPage)
923 _highestPage = highPage;
924 }
925 }
926 _length = length;
927 _pages = pages;
928 _rangesCount = count;
929
930 // Auto-prepare memory at creation time.
931 // Implied completion when descriptor is free-ed
932 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
933 _wireCount++; // Physical MDs are, by definition, wired
934 else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
935 ioGMDData *dataP;
936 unsigned dataSize = computeDataSize(_pages, /* upls */ count * 2);
937
938 if (!_memoryEntries) {
939 _memoryEntries = OSData::withCapacity(dataSize);
940 if (!_memoryEntries)
941 return false;
942 }
943 else if (!_memoryEntries->initWithCapacity(dataSize))
944 return false;
945
946 _memoryEntries->appendBytes(0, sizeof(ioGMDData));
947 dataP = getDataP(_memoryEntries);
948 dataP->fMapper = mapper;
949 dataP->fPageCnt = _pages;
950
951 if ( (kIOMemoryPersistent & _flags) && !_memEntry)
952 _memEntry = createNamedEntry();
953
954 if ((_flags & kIOMemoryAutoPrepare)
955 && prepare() != kIOReturnSuccess)
956 return false;
957 }
958 }
959
960 return true;
961 }
962
963 /*
964 * free
965 *
966 * Free resources.
967 */
968 void IOGeneralMemoryDescriptor::free()
969 {
970 LOCK;
971 if( reserved)
972 reserved->memory = 0;
973 UNLOCK;
974
975 while (_wireCount)
976 complete();
977 if (_memoryEntries)
978 _memoryEntries->release();
979
980 if (_ranges.v && _rangesIsAllocated)
981 {
982 IOOptionBits type = _flags & kIOMemoryTypeMask;
983 if (kIOMemoryTypeUIO == type)
984 uio_free((uio_t) _ranges.v);
985 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
986 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
987 else
988 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
989 }
990
991 if (reserved && reserved->devicePager)
992 device_pager_deallocate( (memory_object_t) reserved->devicePager );
993
994 // memEntry holds a ref on the device pager which owns reserved
995 // (ExpansionData) so no reserved access after this point
996 if (_memEntry)
997 ipc_port_release_send( (ipc_port_t) _memEntry );
998
999 super::free();
1000 }
1001
1002 /* DEPRECATED */ void IOGeneralMemoryDescriptor::unmapFromKernel()
1003 /* DEPRECATED */ {
1004 panic("IOGMD::unmapFromKernel deprecated");
1005 /* DEPRECATED */ }
1006 /* DEPRECATED */
1007 /* DEPRECATED */ void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
1008 /* DEPRECATED */ {
1009 panic("IOGMD::mapIntoKernel deprecated");
1010 /* DEPRECATED */ }
1011
1012 /*
1013 * getDirection:
1014 *
1015 * Get the direction of the transfer.
1016 */
1017 IODirection IOMemoryDescriptor::getDirection() const
1018 {
1019 return _direction;
1020 }
1021
1022 /*
1023 * getLength:
1024 *
1025 * Get the length of the transfer (over all ranges).
1026 */
1027 IOByteCount IOMemoryDescriptor::getLength() const
1028 {
1029 return _length;
1030 }
1031
1032 void IOMemoryDescriptor::setTag( IOOptionBits tag )
1033 {
1034 _tag = tag;
1035 }
1036
1037 IOOptionBits IOMemoryDescriptor::getTag( void )
1038 {
1039 return( _tag);
1040 }
1041
1042 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
1043 IOPhysicalAddress
1044 IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
1045 {
1046 addr64_t physAddr = 0;
1047
1048 if( prepare() == kIOReturnSuccess) {
1049 physAddr = getPhysicalSegment64( offset, length );
1050 complete();
1051 }
1052
1053 return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used
1054 }
1055
1056 IOByteCount IOMemoryDescriptor::readBytes
1057 (IOByteCount offset, void *bytes, IOByteCount length)
1058 {
1059 addr64_t dstAddr = (addr64_t) (UInt32) bytes;
1060 IOByteCount remaining;
1061
1062 // Assert that this entire I/O is withing the available range
1063 assert(offset < _length);
1064 assert(offset + length <= _length);
1065 if (offset >= _length) {
1066 IOLog("IOGMD(%p): rB = o%lx, l%lx\n", this, offset, length); // @@@ gvdl
1067 return 0;
1068 }
1069
1070 remaining = length = min(length, _length - offset);
1071 while (remaining) { // (process another target segment?)
1072 addr64_t srcAddr64;
1073 IOByteCount srcLen;
1074
1075 srcAddr64 = getPhysicalSegment64(offset, &srcLen);
1076 if (!srcAddr64)
1077 break;
1078
1079 // Clip segment length to remaining
1080 if (srcLen > remaining)
1081 srcLen = remaining;
1082
1083 copypv(srcAddr64, dstAddr, srcLen,
1084 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
1085
1086 dstAddr += srcLen;
1087 offset += srcLen;
1088 remaining -= srcLen;
1089 }
1090
1091 assert(!remaining);
1092
1093 return length - remaining;
1094 }
1095
1096 IOByteCount IOMemoryDescriptor::writeBytes
1097 (IOByteCount offset, const void *bytes, IOByteCount length)
1098 {
1099 addr64_t srcAddr = (addr64_t) (UInt32) bytes;
1100 IOByteCount remaining;
1101
1102 // Assert that this entire I/O is withing the available range
1103 assert(offset < _length);
1104 assert(offset + length <= _length);
1105
1106 assert( !(kIOMemoryPreparedReadOnly & _flags) );
1107
1108 if ( (kIOMemoryPreparedReadOnly & _flags) || offset >= _length) {
1109 IOLog("IOGMD(%p): wB = o%lx, l%lx\n", this, offset, length); // @@@ gvdl
1110 return 0;
1111 }
1112
1113 remaining = length = min(length, _length - offset);
1114 while (remaining) { // (process another target segment?)
1115 addr64_t dstAddr64;
1116 IOByteCount dstLen;
1117
1118 dstAddr64 = getPhysicalSegment64(offset, &dstLen);
1119 if (!dstAddr64)
1120 break;
1121
1122 // Clip segment length to remaining
1123 if (dstLen > remaining)
1124 dstLen = remaining;
1125
1126 copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
1127 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
1128
1129 srcAddr += dstLen;
1130 offset += dstLen;
1131 remaining -= dstLen;
1132 }
1133
1134 assert(!remaining);
1135
1136 return length - remaining;
1137 }
1138
1139 // osfmk/device/iokit_rpc.c
1140 extern "C" unsigned int IODefaultCacheBits(addr64_t pa);
1141
1142 /* DEPRECATED */ void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
1143 /* DEPRECATED */ {
1144 panic("IOGMD::setPosition deprecated");
1145 /* DEPRECATED */ }
1146
1147 IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
1148 {
1149 if (kIOMDGetCharacteristics == op) {
1150
1151 if (dataSize < sizeof(IOMDDMACharacteristics))
1152 return kIOReturnUnderrun;
1153
1154 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
1155 data->fLength = _length;
1156 data->fSGCount = _rangesCount;
1157 data->fPages = _pages;
1158 data->fDirection = _direction;
1159 if (!_wireCount)
1160 data->fIsPrepared = false;
1161 else {
1162 data->fIsPrepared = true;
1163 data->fHighestPage = _highestPage;
1164 if (_memoryEntries) {
1165 ioGMDData *gmdData = getDataP(_memoryEntries);
1166 ioPLBlock *ioplList = getIOPLList(gmdData);
1167 UInt count = getNumIOPL(_memoryEntries, gmdData);
1168
1169 data->fIsMapped = (gmdData->fMapper && _pages && (count > 0)
1170 && ioplList[0].fMappedBase);
1171 if (count == 1)
1172 data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
1173 }
1174 else
1175 data->fIsMapped = false;
1176 }
1177
1178 return kIOReturnSuccess;
1179 }
1180 else if (!(kIOMDWalkSegments & op))
1181 return kIOReturnBadArgument;
1182
1183 // Get the next segment
1184 struct InternalState {
1185 IOMDDMAWalkSegmentArgs fIO;
1186 UInt fOffset2Index;
1187 UInt fIndex;
1188 UInt fNextOffset;
1189 } *isP;
1190
1191 // Find the next segment
1192 if (dataSize < sizeof(*isP))
1193 return kIOReturnUnderrun;
1194
1195 isP = (InternalState *) vData;
1196 UInt offset = isP->fIO.fOffset;
1197 bool mapped = isP->fIO.fMapped;
1198
1199 if (offset >= _length)
1200 return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
1201
1202 // Validate the previous offset
1203 UInt ind, off2Ind = isP->fOffset2Index;
1204 if ((kIOMDFirstSegment != op)
1205 && offset
1206 && (offset == isP->fNextOffset || off2Ind <= offset))
1207 ind = isP->fIndex;
1208 else
1209 ind = off2Ind = 0; // Start from beginning
1210
1211 UInt length;
1212 UInt64 address;
1213 if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
1214
1215 // Physical address based memory descriptor
1216 const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
1217
1218 // Find the range after the one that contains the offset
1219 UInt len;
1220 for (len = 0; off2Ind <= offset; ind++) {
1221 len = physP[ind].length;
1222 off2Ind += len;
1223 }
1224
1225 // Calculate length within range and starting address
1226 length = off2Ind - offset;
1227 address = physP[ind - 1].address + len - length;
1228
1229 // see how far we can coalesce ranges
1230 while (ind < _rangesCount && address + length == physP[ind].address) {
1231 len = physP[ind].length;
1232 length += len;
1233 off2Ind += len;
1234 ind++;
1235 }
1236
1237 // correct contiguous check overshoot
1238 ind--;
1239 off2Ind -= len;
1240 }
1241 else if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
1242
1243 // Physical address based memory descriptor
1244 const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
1245
1246 // Find the range after the one that contains the offset
1247 mach_vm_size_t len;
1248 for (len = 0; off2Ind <= offset; ind++) {
1249 len = physP[ind].length;
1250 off2Ind += len;
1251 }
1252
1253 // Calculate length within range and starting address
1254 length = off2Ind - offset;
1255 address = physP[ind - 1].address + len - length;
1256
1257 // see how far we can coalesce ranges
1258 while (ind < _rangesCount && address + length == physP[ind].address) {
1259 len = physP[ind].length;
1260 length += len;
1261 off2Ind += len;
1262 ind++;
1263 }
1264
1265 // correct contiguous check overshoot
1266 ind--;
1267 off2Ind -= len;
1268 }
1269 else do {
1270 if (!_wireCount)
1271 panic("IOGMD: not wired for the IODMACommand");
1272
1273 assert(_memoryEntries);
1274
1275 ioGMDData * dataP = getDataP(_memoryEntries);
1276 const ioPLBlock *ioplList = getIOPLList(dataP);
1277 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1278 upl_page_info_t *pageList = getPageList(dataP);
1279
1280 assert(numIOPLs > 0);
1281
1282 // Scan through iopl info blocks looking for block containing offset
1283 while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset)
1284 ind++;
1285
1286 // Go back to actual range as search goes past it
1287 ioPLBlock ioplInfo = ioplList[ind - 1];
1288 off2Ind = ioplInfo.fIOMDOffset;
1289
1290 if (ind < numIOPLs)
1291 length = ioplList[ind].fIOMDOffset;
1292 else
1293 length = _length;
1294 length -= offset; // Remainder within iopl
1295
1296 // Subtract offset till this iopl in total list
1297 offset -= off2Ind;
1298
1299 // If a mapped address is requested and this is a pre-mapped IOPL
1300 // then just need to compute an offset relative to the mapped base.
1301 if (mapped && ioplInfo.fMappedBase) {
1302 offset += (ioplInfo.fPageOffset & PAGE_MASK);
1303 address = ptoa_64(ioplInfo.fMappedBase) + offset;
1304 continue; // Done leave do/while(false) now
1305 }
1306
1307 // The offset is rebased into the current iopl.
1308 // Now add the iopl 1st page offset.
1309 offset += ioplInfo.fPageOffset;
1310
1311 // For external UPLs the fPageInfo field points directly to
1312 // the upl's upl_page_info_t array.
1313 if (ioplInfo.fFlags & kIOPLExternUPL)
1314 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
1315 else
1316 pageList = &pageList[ioplInfo.fPageInfo];
1317
1318 // Check for direct device non-paged memory
1319 if ( ioplInfo.fFlags & kIOPLOnDevice ) {
1320 address = ptoa_64(pageList->phys_addr) + offset;
1321 continue; // Done leave do/while(false) now
1322 }
1323
1324 // Now we need compute the index into the pageList
1325 UInt pageInd = atop_32(offset);
1326 offset &= PAGE_MASK;
1327
1328 // Compute the starting address of this segment
1329 IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
1330 address = ptoa_64(pageAddr) + offset;
1331
1332 // length is currently set to the length of the remainider of the iopl.
1333 // We need to check that the remainder of the iopl is contiguous.
1334 // This is indicated by pageList[ind].phys_addr being sequential.
1335 IOByteCount contigLength = PAGE_SIZE - offset;
1336 while (contigLength < length
1337 && ++pageAddr == pageList[++pageInd].phys_addr)
1338 {
1339 contigLength += PAGE_SIZE;
1340 }
1341
1342 if (contigLength < length)
1343 length = contigLength;
1344
1345
1346 assert(address);
1347 assert(length);
1348
1349 } while (false);
1350
1351 // Update return values and state
1352 isP->fIO.fIOVMAddr = address;
1353 isP->fIO.fLength = length;
1354 isP->fIndex = ind;
1355 isP->fOffset2Index = off2Ind;
1356 isP->fNextOffset = isP->fIO.fOffset + length;
1357
1358 return kIOReturnSuccess;
1359 }
1360
1361 addr64_t
1362 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
1363 {
1364 IOReturn ret;
1365 IOByteCount length = 0;
1366 addr64_t address = 0;
1367
1368 if (offset < _length) // (within bounds?)
1369 {
1370 IOMDDMAWalkSegmentState _state;
1371 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) &_state;
1372
1373 state->fOffset = offset;
1374 state->fLength = _length - offset;
1375 state->fMapped = false;
1376
1377 ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
1378
1379 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret))
1380 DEBG("getPhysicalSegment64 dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
1381 ret, this, state->fOffset,
1382 state->fIOVMAddr, state->fLength);
1383 if (kIOReturnSuccess == ret)
1384 {
1385 address = state->fIOVMAddr;
1386 length = state->fLength;
1387 }
1388 if (!address)
1389 length = 0;
1390 }
1391
1392 if (lengthOfSegment)
1393 *lengthOfSegment = length;
1394
1395 return (address);
1396 }
1397
1398 IOPhysicalAddress
1399 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1400 {
1401 IOReturn ret;
1402 IOByteCount length = 0;
1403 addr64_t address = 0;
1404
1405 // assert(offset <= _length);
1406
1407 if (offset < _length) // (within bounds?)
1408 {
1409 IOMDDMAWalkSegmentState _state;
1410 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) &_state;
1411
1412 state->fOffset = offset;
1413 state->fLength = _length - offset;
1414 state->fMapped = true;
1415
1416 ret = dmaCommandOperation(
1417 kIOMDFirstSegment, _state, sizeof(_state));
1418
1419 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret))
1420 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
1421 ret, this, state->fOffset,
1422 state->fIOVMAddr, state->fLength);
1423 if (kIOReturnSuccess == ret)
1424 {
1425 address = state->fIOVMAddr;
1426 length = state->fLength;
1427 }
1428
1429 if (!address)
1430 length = 0;
1431 }
1432
1433 if ((address + length) > 0x100000000ULL)
1434 {
1435 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%x, class %s",
1436 address, length, (getMetaClass())->getClassName());
1437 }
1438
1439 if (lengthOfSegment)
1440 *lengthOfSegment = length;
1441
1442 return ((IOPhysicalAddress) address);
1443 }
1444
1445 addr64_t
1446 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
1447 {
1448 IOPhysicalAddress phys32;
1449 IOByteCount length;
1450 addr64_t phys64;
1451 IOMapper * mapper = 0;
1452
1453 phys32 = getPhysicalSegment(offset, lengthOfSegment);
1454 if (!phys32)
1455 return 0;
1456
1457 if (gIOSystemMapper)
1458 mapper = gIOSystemMapper;
1459
1460 if (mapper)
1461 {
1462 IOByteCount origLen;
1463
1464 phys64 = mapper->mapAddr(phys32);
1465 origLen = *lengthOfSegment;
1466 length = page_size - (phys64 & (page_size - 1));
1467 while ((length < origLen)
1468 && ((phys64 + length) == mapper->mapAddr(phys32 + length)))
1469 length += page_size;
1470 if (length > origLen)
1471 length = origLen;
1472
1473 *lengthOfSegment = length;
1474 }
1475 else
1476 phys64 = (addr64_t) phys32;
1477
1478 return phys64;
1479 }
1480
1481 IOPhysicalAddress
1482 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1483 {
1484 IOPhysicalAddress address = 0;
1485 IOPhysicalLength length = 0;
1486 IOOptionBits type = _flags & kIOMemoryTypeMask;
1487
1488 assert(offset <= _length);
1489
1490 if ( type == kIOMemoryTypeUPL)
1491 return super::getSourceSegment( offset, lengthOfSegment );
1492 else if ( offset < _length ) // (within bounds?)
1493 {
1494 unsigned rangesIndex = 0;
1495 Ranges vec = _ranges;
1496 user_addr_t addr;
1497
1498 // Find starting address within the vector of ranges
1499 for (;;) {
1500 getAddrLenForInd(addr, length, type, vec, rangesIndex);
1501 if (offset < length)
1502 break;
1503 offset -= length; // (make offset relative)
1504 rangesIndex++;
1505 }
1506
1507 // Now that we have the starting range,
1508 // lets find the last contiguous range
1509 addr += offset;
1510 length -= offset;
1511
1512 for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) {
1513 user_addr_t newAddr;
1514 IOPhysicalLength newLen;
1515
1516 getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
1517 if (addr + length != newAddr)
1518 break;
1519 length += newLen;
1520 }
1521 if (addr)
1522 address = (IOPhysicalAddress) addr; // Truncate address to 32bit
1523 else
1524 length = 0;
1525 }
1526
1527 if ( lengthOfSegment ) *lengthOfSegment = length;
1528
1529 return address;
1530 }
1531
1532 /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
1533 /* DEPRECATED */ void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
1534 /* DEPRECATED */ IOByteCount * lengthOfSegment)
1535 /* DEPRECATED */ {
1536 if (_task == kernel_task)
1537 return (void *) getSourceSegment(offset, lengthOfSegment);
1538 else
1539 panic("IOGMD::getVirtualSegment deprecated");
1540
1541 return 0;
1542 /* DEPRECATED */ }
1543 /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
1544
1545
1546
1547 IOReturn
1548 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
1549 {
1550 if (kIOMDGetCharacteristics == op) {
1551 if (dataSize < sizeof(IOMDDMACharacteristics))
1552 return kIOReturnUnderrun;
1553
1554 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
1555 data->fLength = getLength();
1556 data->fSGCount = 0;
1557 data->fDirection = _direction;
1558 if (IOMapper::gSystem)
1559 data->fIsMapped = true;
1560 data->fIsPrepared = true; // Assume prepared - fails safe
1561 }
1562 else if (kIOMDWalkSegments & op) {
1563 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs))
1564 return kIOReturnUnderrun;
1565
1566 IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
1567 IOByteCount offset = (IOByteCount) data->fOffset;
1568
1569 IOPhysicalLength length;
1570 IOMemoryDescriptor *ncmd = const_cast<IOMemoryDescriptor *>(this);
1571 if (data->fMapped && IOMapper::gSystem)
1572 data->fIOVMAddr = ncmd->getPhysicalSegment(offset, &length);
1573 else
1574 data->fIOVMAddr = ncmd->getPhysicalSegment64(offset, &length);
1575 data->fLength = length;
1576 }
1577 else
1578 return kIOReturnBadArgument;
1579
1580 return kIOReturnSuccess;
1581 }
1582
1583 IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
1584 IOOptionBits * oldState )
1585 {
1586 IOReturn err = kIOReturnSuccess;
1587 vm_purgable_t control;
1588 int state;
1589
1590 do
1591 {
1592 if (!_memEntry)
1593 {
1594 err = kIOReturnNotReady;
1595 break;
1596 }
1597
1598 control = VM_PURGABLE_SET_STATE;
1599 switch (newState)
1600 {
1601 case kIOMemoryPurgeableKeepCurrent:
1602 control = VM_PURGABLE_GET_STATE;
1603 break;
1604
1605 case kIOMemoryPurgeableNonVolatile:
1606 state = VM_PURGABLE_NONVOLATILE;
1607 break;
1608 case kIOMemoryPurgeableVolatile:
1609 state = VM_PURGABLE_VOLATILE;
1610 break;
1611 case kIOMemoryPurgeableEmpty:
1612 state = VM_PURGABLE_EMPTY;
1613 break;
1614 default:
1615 err = kIOReturnBadArgument;
1616 break;
1617 }
1618
1619 if (kIOReturnSuccess != err)
1620 break;
1621
1622 err = mach_memory_entry_purgable_control((ipc_port_t) _memEntry, control, &state);
1623
1624 if (oldState)
1625 {
1626 if (kIOReturnSuccess == err)
1627 {
1628 switch (state)
1629 {
1630 case VM_PURGABLE_NONVOLATILE:
1631 state = kIOMemoryPurgeableNonVolatile;
1632 break;
1633 case VM_PURGABLE_VOLATILE:
1634 state = kIOMemoryPurgeableVolatile;
1635 break;
1636 case VM_PURGABLE_EMPTY:
1637 state = kIOMemoryPurgeableEmpty;
1638 break;
1639 default:
1640 state = kIOMemoryPurgeableNonVolatile;
1641 err = kIOReturnNotReady;
1642 break;
1643 }
1644 *oldState = state;
1645 }
1646 }
1647 }
1648 while (false);
1649
1650 return (err);
1651 }
1652
1653 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
1654 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
1655
1656 IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options,
1657 IOByteCount offset, IOByteCount length )
1658 {
1659 IOByteCount remaining;
1660 void (*func)(addr64_t pa, unsigned int count) = 0;
1661
1662 switch (options)
1663 {
1664 case kIOMemoryIncoherentIOFlush:
1665 func = &dcache_incoherent_io_flush64;
1666 break;
1667 case kIOMemoryIncoherentIOStore:
1668 func = &dcache_incoherent_io_store64;
1669 break;
1670 }
1671
1672 if (!func)
1673 return (kIOReturnUnsupported);
1674
1675 remaining = length = min(length, getLength() - offset);
1676 while (remaining)
1677 // (process another target segment?)
1678 {
1679 addr64_t dstAddr64;
1680 IOByteCount dstLen;
1681
1682 dstAddr64 = getPhysicalSegment64(offset, &dstLen);
1683 if (!dstAddr64)
1684 break;
1685
1686 // Clip segment length to remaining
1687 if (dstLen > remaining)
1688 dstLen = remaining;
1689
1690 (*func)(dstAddr64, dstLen);
1691
1692 offset += dstLen;
1693 remaining -= dstLen;
1694 }
1695
1696 return (remaining ? kIOReturnUnderrun : kIOReturnSuccess);
1697 }
1698
1699 #ifdef __ppc__
1700 extern vm_offset_t static_memory_end;
1701 #define io_kernel_static_end static_memory_end
1702 #else
1703 extern vm_offset_t first_avail;
1704 #define io_kernel_static_end first_avail
1705 #endif
1706
1707 static kern_return_t
1708 io_get_kernel_static_upl(
1709 vm_map_t /* map */,
1710 vm_address_t offset,
1711 vm_size_t *upl_size,
1712 upl_t *upl,
1713 upl_page_info_array_t page_list,
1714 unsigned int *count,
1715 ppnum_t *highest_page)
1716 {
1717 unsigned int pageCount, page;
1718 ppnum_t phys;
1719 ppnum_t highestPage = 0;
1720
1721 pageCount = atop_32(*upl_size);
1722 if (pageCount > *count)
1723 pageCount = *count;
1724
1725 *upl = NULL;
1726
1727 for (page = 0; page < pageCount; page++)
1728 {
1729 phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
1730 if (!phys)
1731 break;
1732 page_list[page].phys_addr = phys;
1733 page_list[page].pageout = 0;
1734 page_list[page].absent = 0;
1735 page_list[page].dirty = 0;
1736 page_list[page].precious = 0;
1737 page_list[page].device = 0;
1738 if (phys > highestPage)
1739 highestPage = page;
1740 }
1741
1742 *highest_page = highestPage;
1743
1744 return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError);
1745 }
1746
1747 IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
1748 {
1749 IOOptionBits type = _flags & kIOMemoryTypeMask;
1750 IOReturn error = kIOReturnNoMemory;
1751 ioGMDData *dataP;
1752 ppnum_t mapBase = 0;
1753 IOMapper *mapper;
1754 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
1755
1756 assert(!_wireCount);
1757 assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
1758
1759 if (_pages >= gIOMaximumMappedIOPageCount)
1760 return kIOReturnNoResources;
1761
1762 dataP = getDataP(_memoryEntries);
1763 mapper = dataP->fMapper;
1764 if (mapper && _pages)
1765 mapBase = mapper->iovmAlloc(_pages);
1766
1767 // Note that appendBytes(NULL) zeros the data up to the
1768 // desired length.
1769 _memoryEntries->appendBytes(0, dataP->fPageCnt * sizeof(upl_page_info_t));
1770 dataP = 0; // May no longer be valid so lets not get tempted.
1771
1772 if (forDirection == kIODirectionNone)
1773 forDirection = _direction;
1774
1775 int uplFlags; // This Mem Desc's default flags for upl creation
1776 switch (kIODirectionOutIn & forDirection)
1777 {
1778 case kIODirectionOut:
1779 // Pages do not need to be marked as dirty on commit
1780 uplFlags = UPL_COPYOUT_FROM;
1781 _flags |= kIOMemoryPreparedReadOnly;
1782 break;
1783
1784 case kIODirectionIn:
1785 default:
1786 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
1787 break;
1788 }
1789 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
1790
1791 #ifdef UPL_NEED_32BIT_ADDR
1792 if (kIODirectionPrepareToPhys32 & forDirection)
1793 uplFlags |= UPL_NEED_32BIT_ADDR;
1794 #endif
1795
1796 // Find the appropriate vm_map for the given task
1797 vm_map_t curMap;
1798 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
1799 curMap = 0;
1800 else
1801 { curMap = get_task_map(_task); }
1802
1803 // Iterate over the vector of virtual ranges
1804 Ranges vec = _ranges;
1805 unsigned int pageIndex = 0;
1806 IOByteCount mdOffset = 0;
1807 ppnum_t highestPage = 0;
1808 for (UInt range = 0; range < _rangesCount; range++) {
1809 ioPLBlock iopl;
1810 user_addr_t startPage;
1811 IOByteCount numBytes;
1812 ppnum_t highPage = 0;
1813
1814 // Get the startPage address and length of vec[range]
1815 getAddrLenForInd(startPage, numBytes, type, vec, range);
1816 iopl.fPageOffset = (short) startPage & PAGE_MASK;
1817 numBytes += iopl.fPageOffset;
1818 startPage = trunc_page_64(startPage);
1819
1820 if (mapper)
1821 iopl.fMappedBase = mapBase + pageIndex;
1822 else
1823 iopl.fMappedBase = 0;
1824
1825 // Iterate over the current range, creating UPLs
1826 while (numBytes) {
1827 dataP = getDataP(_memoryEntries);
1828 vm_address_t kernelStart = (vm_address_t) startPage;
1829 vm_map_t theMap;
1830 if (curMap)
1831 theMap = curMap;
1832 else if (!sharedMem) {
1833 assert(_task == kernel_task);
1834 theMap = IOPageableMapForAddress(kernelStart);
1835 }
1836 else
1837 theMap = NULL;
1838
1839 upl_page_info_array_t pageInfo = getPageList(dataP);
1840 int ioplFlags = uplFlags;
1841 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
1842
1843 vm_size_t ioplSize = round_page_32(numBytes);
1844 unsigned int numPageInfo = atop_32(ioplSize);
1845
1846 if (theMap == kernel_map && kernelStart < io_kernel_static_end) {
1847 error = io_get_kernel_static_upl(theMap,
1848 kernelStart,
1849 &ioplSize,
1850 &iopl.fIOPL,
1851 baseInfo,
1852 &numPageInfo,
1853 &highPage);
1854 }
1855 else if (sharedMem) {
1856 error = memory_object_iopl_request(sharedMem,
1857 ptoa_32(pageIndex),
1858 &ioplSize,
1859 &iopl.fIOPL,
1860 baseInfo,
1861 &numPageInfo,
1862 &ioplFlags);
1863 }
1864 else {
1865 assert(theMap);
1866 error = vm_map_create_upl(theMap,
1867 startPage,
1868 &ioplSize,
1869 &iopl.fIOPL,
1870 baseInfo,
1871 &numPageInfo,
1872 &ioplFlags);
1873 }
1874
1875 assert(ioplSize);
1876 if (error != KERN_SUCCESS)
1877 goto abortExit;
1878
1879 if (iopl.fIOPL)
1880 highPage = upl_get_highest_page(iopl.fIOPL);
1881 if (highPage > highestPage)
1882 highestPage = highPage;
1883
1884 error = kIOReturnNoMemory;
1885
1886 if (baseInfo->device) {
1887 numPageInfo = 1;
1888 iopl.fFlags = kIOPLOnDevice;
1889 // Don't translate device memory at all
1890 if (mapper && mapBase) {
1891 mapper->iovmFree(mapBase, _pages);
1892 mapBase = 0;
1893 iopl.fMappedBase = 0;
1894 }
1895 }
1896 else {
1897 iopl.fFlags = 0;
1898 if (mapper)
1899 mapper->iovmInsert(mapBase, pageIndex,
1900 baseInfo, numPageInfo);
1901 }
1902
1903 iopl.fIOMDOffset = mdOffset;
1904 iopl.fPageInfo = pageIndex;
1905
1906 if ((_flags & kIOMemoryAutoPrepare) && iopl.fIOPL)
1907 {
1908 upl_commit(iopl.fIOPL, 0, 0);
1909 upl_deallocate(iopl.fIOPL);
1910 iopl.fIOPL = 0;
1911 }
1912
1913 if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
1914 // Clean up partial created and unsaved iopl
1915 if (iopl.fIOPL) {
1916 upl_abort(iopl.fIOPL, 0);
1917 upl_deallocate(iopl.fIOPL);
1918 }
1919 goto abortExit;
1920 }
1921
1922 // Check for a multiple iopl's in one virtual range
1923 pageIndex += numPageInfo;
1924 mdOffset -= iopl.fPageOffset;
1925 if (ioplSize < numBytes) {
1926 numBytes -= ioplSize;
1927 startPage += ioplSize;
1928 mdOffset += ioplSize;
1929 iopl.fPageOffset = 0;
1930 if (mapper)
1931 iopl.fMappedBase = mapBase + pageIndex;
1932 }
1933 else {
1934 mdOffset += numBytes;
1935 break;
1936 }
1937 }
1938 }
1939
1940 _highestPage = highestPage;
1941
1942 return kIOReturnSuccess;
1943
1944 abortExit:
1945 {
1946 dataP = getDataP(_memoryEntries);
1947 UInt done = getNumIOPL(_memoryEntries, dataP);
1948 ioPLBlock *ioplList = getIOPLList(dataP);
1949
1950 for (UInt range = 0; range < done; range++)
1951 {
1952 if (ioplList[range].fIOPL) {
1953 upl_abort(ioplList[range].fIOPL, 0);
1954 upl_deallocate(ioplList[range].fIOPL);
1955 }
1956 }
1957 (void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); // == setLength()
1958
1959 if (mapper && mapBase)
1960 mapper->iovmFree(mapBase, _pages);
1961 }
1962
1963 return error;
1964 }
1965
1966 /*
1967 * prepare
1968 *
1969 * Prepare the memory for an I/O transfer. This involves paging in
1970 * the memory, if necessary, and wiring it down for the duration of
1971 * the transfer. The complete() method completes the processing of
1972 * the memory after the I/O transfer finishes. This method needn't
1973 * called for non-pageable memory.
1974 */
1975 IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
1976 {
1977 IOReturn error = kIOReturnSuccess;
1978 IOOptionBits type = _flags & kIOMemoryTypeMask;
1979
1980 if (!_wireCount
1981 && (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) ) {
1982 error = wireVirtual(forDirection);
1983 if (error)
1984 return error;
1985 }
1986
1987 _wireCount++;
1988
1989 return kIOReturnSuccess;
1990 }
1991
1992 /*
1993 * complete
1994 *
1995 * Complete processing of the memory after an I/O transfer finishes.
1996 * This method should not be called unless a prepare was previously
1997 * issued; the prepare() and complete() must occur in pairs, before
1998 * before and after an I/O transfer involving pageable memory.
1999 */
2000
2001 IOReturn IOGeneralMemoryDescriptor::complete(IODirection /* forDirection */)
2002 {
2003 assert(_wireCount);
2004
2005 if (!_wireCount)
2006 return kIOReturnSuccess;
2007
2008 _wireCount--;
2009 if (!_wireCount) {
2010 IOOptionBits type = _flags & kIOMemoryTypeMask;
2011
2012 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
2013 /* kIOMemoryTypePhysical */
2014 // DO NOTHING
2015 }
2016 else {
2017 ioGMDData * dataP = getDataP(_memoryEntries);
2018 ioPLBlock *ioplList = getIOPLList(dataP);
2019 UInt count = getNumIOPL(_memoryEntries, dataP);
2020
2021 if (dataP->fMapper && _pages && ioplList[0].fMappedBase)
2022 dataP->fMapper->iovmFree(ioplList[0].fMappedBase, _pages);
2023
2024 // Only complete iopls that we created which are for TypeVirtual
2025 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
2026 for (UInt ind = 0; ind < count; ind++)
2027 if (ioplList[ind].fIOPL) {
2028 upl_commit(ioplList[ind].fIOPL, 0, 0);
2029 upl_deallocate(ioplList[ind].fIOPL);
2030 }
2031 }
2032
2033 (void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); // == setLength()
2034 }
2035 }
2036 return kIOReturnSuccess;
2037 }
2038
2039 IOReturn IOGeneralMemoryDescriptor::doMap(
2040 vm_map_t addressMap,
2041 IOVirtualAddress * atAddress,
2042 IOOptionBits options,
2043 IOByteCount sourceOffset,
2044 IOByteCount length )
2045 {
2046 kern_return_t kr;
2047 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
2048
2049 IOOptionBits type = _flags & kIOMemoryTypeMask;
2050 Ranges vec = _ranges;
2051
2052 user_addr_t range0Addr = 0;
2053 IOByteCount range0Len = 0;
2054
2055 if (vec.v)
2056 getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
2057
2058 // mapping source == dest? (could be much better)
2059 if( _task
2060 && (addressMap == get_task_map(_task)) && (options & kIOMapAnywhere)
2061 && (1 == _rangesCount) && (0 == sourceOffset)
2062 && range0Addr && (length <= range0Len) ) {
2063 if (sizeof(user_addr_t) > 4 && ((UInt64) range0Addr) >> 32)
2064 return kIOReturnOverrun; // Doesn't fit in 32bit return field
2065 else {
2066 *atAddress = range0Addr;
2067 return( kIOReturnSuccess );
2068 }
2069 }
2070
2071 if( 0 == sharedMem) {
2072
2073 vm_size_t size = ptoa_32(_pages);
2074
2075 if( _task) {
2076
2077 memory_object_size_t actualSize = size;
2078 kr = mach_make_memory_entry_64(get_task_map(_task),
2079 &actualSize, range0Addr,
2080 VM_PROT_READ | VM_PROT_WRITE, &sharedMem,
2081 NULL );
2082
2083 if( (KERN_SUCCESS == kr) && (actualSize != round_page_32(size))) {
2084 #if IOASSERT
2085 IOLog("mach_make_memory_entry_64 (%08llx) size (%08lx:%08x)\n",
2086 range0Addr, (UInt32) actualSize, size);
2087 #endif
2088 kr = kIOReturnVMError;
2089 ipc_port_release_send( sharedMem );
2090 }
2091
2092 if( KERN_SUCCESS != kr)
2093 sharedMem = MACH_PORT_NULL;
2094
2095 } else do { // _task == 0, must be physical
2096
2097 memory_object_t pager;
2098 unsigned int flags = 0;
2099 addr64_t pa;
2100 IOPhysicalLength segLen;
2101
2102 pa = getPhysicalSegment64( sourceOffset, &segLen );
2103
2104 if( !reserved) {
2105 reserved = IONew( ExpansionData, 1 );
2106 if( !reserved)
2107 continue;
2108 }
2109 reserved->pagerContig = (1 == _rangesCount);
2110 reserved->memory = this;
2111
2112 /*What cache mode do we need*/
2113 switch(options & kIOMapCacheMask ) {
2114
2115 case kIOMapDefaultCache:
2116 default:
2117 flags = IODefaultCacheBits(pa);
2118 break;
2119
2120 case kIOMapInhibitCache:
2121 flags = DEVICE_PAGER_CACHE_INHIB |
2122 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
2123 break;
2124
2125 case kIOMapWriteThruCache:
2126 flags = DEVICE_PAGER_WRITE_THROUGH |
2127 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
2128 break;
2129
2130 case kIOMapCopybackCache:
2131 flags = DEVICE_PAGER_COHERENT;
2132 break;
2133
2134 case kIOMapWriteCombineCache:
2135 flags = DEVICE_PAGER_CACHE_INHIB |
2136 DEVICE_PAGER_COHERENT;
2137 break;
2138 }
2139
2140 flags |= reserved->pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0;
2141
2142 pager = device_pager_setup( (memory_object_t) 0, (int) reserved,
2143 size, flags);
2144 assert( pager );
2145
2146 if( pager) {
2147 kr = mach_memory_object_memory_entry_64( (host_t) 1, false /*internal*/,
2148 size, VM_PROT_READ | VM_PROT_WRITE, pager, &sharedMem );
2149
2150 assert( KERN_SUCCESS == kr );
2151 if( KERN_SUCCESS != kr) {
2152 device_pager_deallocate( pager );
2153 pager = MACH_PORT_NULL;
2154 sharedMem = MACH_PORT_NULL;
2155 }
2156 }
2157 if( pager && sharedMem)
2158 reserved->devicePager = pager;
2159 else {
2160 IODelete( reserved, ExpansionData, 1 );
2161 reserved = 0;
2162 }
2163
2164 } while( false );
2165
2166 _memEntry = (void *) sharedMem;
2167 }
2168
2169
2170 if( 0 == sharedMem)
2171 kr = kIOReturnVMError;
2172 else
2173 kr = super::doMap( addressMap, atAddress,
2174 options, sourceOffset, length );
2175
2176 return( kr );
2177 }
2178
2179 IOReturn IOGeneralMemoryDescriptor::doUnmap(
2180 vm_map_t addressMap,
2181 IOVirtualAddress logical,
2182 IOByteCount length )
2183 {
2184 // could be much better
2185 if( _task && (addressMap == get_task_map(_task)) && (1 == _rangesCount)) {
2186
2187 IOOptionBits type = _flags & kIOMemoryTypeMask;
2188 user_addr_t range0Addr;
2189 IOByteCount range0Len;
2190
2191 getAddrLenForInd(range0Addr, range0Len, type, _ranges, 0);
2192 if (logical == range0Addr && length <= range0Len)
2193 return( kIOReturnSuccess );
2194 }
2195
2196 return( super::doUnmap( addressMap, logical, length ));
2197 }
2198
2199 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2200
2201 OSDefineMetaClassAndAbstractStructors( IOMemoryMap, OSObject )
2202
2203 /* inline function implementation */
2204 IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
2205 { return( getPhysicalSegment( 0, 0 )); }
2206
2207
2208 #undef super
2209 #define super IOMemoryMap
2210
2211 OSDefineMetaClassAndStructors(_IOMemoryMap, IOMemoryMap)
2212
2213 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2214
2215 bool _IOMemoryMap::initCompatible(
2216 IOMemoryDescriptor * _memory,
2217 IOMemoryMap * _superMap,
2218 IOByteCount _offset,
2219 IOByteCount _length )
2220 {
2221
2222 if( !super::init())
2223 return( false);
2224
2225 if( (_offset + _length) > _superMap->getLength())
2226 return( false);
2227
2228 _memory->retain();
2229 memory = _memory;
2230 _superMap->retain();
2231 superMap = _superMap;
2232
2233 offset = _offset;
2234 if( _length)
2235 length = _length;
2236 else
2237 length = _memory->getLength();
2238
2239 options = superMap->getMapOptions();
2240 logical = superMap->getVirtualAddress() + offset;
2241
2242 return( true );
2243 }
2244
2245 bool _IOMemoryMap::initWithDescriptor(
2246 IOMemoryDescriptor * _memory,
2247 task_t intoTask,
2248 IOVirtualAddress toAddress,
2249 IOOptionBits _options,
2250 IOByteCount _offset,
2251 IOByteCount _length )
2252 {
2253 bool ok;
2254 bool redir = ((kIOMapUnique|kIOMapReference) == ((kIOMapUnique|kIOMapReference) & _options));
2255
2256 if ((!_memory) || (!intoTask))
2257 return( false);
2258
2259 if( (_offset + _length) > _memory->getLength())
2260 return( false);
2261
2262 if (!redir)
2263 {
2264 if (!super::init())
2265 return(false);
2266 addressMap = get_task_map(intoTask);
2267 if( !addressMap)
2268 return( false);
2269 vm_map_reference(addressMap);
2270 addressTask = intoTask;
2271 logical = toAddress;
2272 options = _options;
2273 }
2274
2275 _memory->retain();
2276
2277 offset = _offset;
2278 if( _length)
2279 length = _length;
2280 else
2281 length = _memory->getLength();
2282
2283 if( options & kIOMapStatic)
2284 ok = true;
2285 else
2286 ok = (kIOReturnSuccess == _memory->doMap( addressMap, &toAddress,
2287 _options, offset, length ));
2288 if (ok || redir)
2289 {
2290 if (memory)
2291 memory->release();
2292 memory = _memory;
2293 logical = toAddress;
2294 }
2295 else
2296 {
2297 _memory->release();
2298 if (!redir)
2299 {
2300 logical = 0;
2301 memory = 0;
2302 vm_map_deallocate(addressMap);
2303 addressMap = 0;
2304 }
2305 }
2306
2307 return( ok );
2308 }
2309
2310 /* LP64todo - these need to expand */
2311 struct IOMemoryDescriptorMapAllocRef
2312 {
2313 ipc_port_t sharedMem;
2314 vm_size_t size;
2315 vm_offset_t mapped;
2316 IOByteCount sourceOffset;
2317 IOOptionBits options;
2318 };
2319
2320 static kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
2321 {
2322 IOMemoryDescriptorMapAllocRef * ref = (IOMemoryDescriptorMapAllocRef *)_ref;
2323 IOReturn err;
2324
2325 do {
2326 if( ref->sharedMem) {
2327 vm_prot_t prot = VM_PROT_READ
2328 | ((ref->options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE);
2329
2330 // set memory entry cache
2331 vm_prot_t memEntryCacheMode = prot | MAP_MEM_ONLY;
2332 switch (ref->options & kIOMapCacheMask)
2333 {
2334 case kIOMapInhibitCache:
2335 SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
2336 break;
2337
2338 case kIOMapWriteThruCache:
2339 SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
2340 break;
2341
2342 case kIOMapWriteCombineCache:
2343 SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
2344 break;
2345
2346 case kIOMapCopybackCache:
2347 SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
2348 break;
2349
2350 case kIOMapDefaultCache:
2351 default:
2352 SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
2353 break;
2354 }
2355
2356 vm_size_t unused = 0;
2357
2358 err = mach_make_memory_entry( NULL /*unused*/, &unused, 0 /*unused*/,
2359 memEntryCacheMode, NULL, ref->sharedMem );
2360 if (KERN_SUCCESS != err)
2361 IOLog("MAP_MEM_ONLY failed %d\n", err);
2362
2363 err = vm_map( map,
2364 &ref->mapped,
2365 ref->size, 0 /* mask */,
2366 (( ref->options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
2367 | VM_MAKE_TAG(VM_MEMORY_IOKIT),
2368 ref->sharedMem, ref->sourceOffset,
2369 false, // copy
2370 prot, // cur
2371 prot, // max
2372 VM_INHERIT_NONE);
2373
2374 if( KERN_SUCCESS != err) {
2375 ref->mapped = 0;
2376 continue;
2377 }
2378
2379 } else {
2380
2381 err = vm_allocate( map, &ref->mapped, ref->size,
2382 ((ref->options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
2383 | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
2384
2385 if( KERN_SUCCESS != err) {
2386 ref->mapped = 0;
2387 continue;
2388 }
2389
2390 // we have to make sure that these guys don't get copied if we fork.
2391 err = vm_inherit( map, ref->mapped, ref->size, VM_INHERIT_NONE);
2392 assert( KERN_SUCCESS == err );
2393 }
2394
2395 } while( false );
2396
2397 return( err );
2398 }
2399
2400
2401 IOReturn IOMemoryDescriptor::doMap(
2402 vm_map_t addressMap,
2403 IOVirtualAddress * atAddress,
2404 IOOptionBits options,
2405 IOByteCount sourceOffset,
2406 IOByteCount length )
2407 {
2408 IOReturn err = kIOReturnSuccess;
2409 memory_object_t pager;
2410 vm_address_t logical;
2411 IOByteCount pageOffset;
2412 IOPhysicalAddress sourceAddr;
2413 IOMemoryDescriptorMapAllocRef ref;
2414
2415 ref.sharedMem = (ipc_port_t) _memEntry;
2416 ref.sourceOffset = sourceOffset;
2417 ref.options = options;
2418
2419 do {
2420
2421 if( 0 == length)
2422 length = getLength();
2423
2424 sourceAddr = getSourceSegment( sourceOffset, NULL );
2425 pageOffset = sourceAddr - trunc_page_32( sourceAddr );
2426
2427 ref.size = round_page_32( length + pageOffset );
2428
2429 if ((kIOMapReference|kIOMapUnique) == ((kIOMapReference|kIOMapUnique) & options))
2430 {
2431 upl_t redirUPL2;
2432 vm_size_t size;
2433 int flags;
2434
2435 _IOMemoryMap * mapping = (_IOMemoryMap *) *atAddress;
2436 ref.mapped = mapping->getVirtualAddress();
2437
2438 if (!_memEntry)
2439 {
2440 err = kIOReturnNotReadable;
2441 continue;
2442 }
2443
2444 size = length;
2445 flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
2446 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
2447
2448 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) _memEntry, 0, &size, &redirUPL2,
2449 NULL, NULL,
2450 &flags))
2451 redirUPL2 = NULL;
2452
2453 err = upl_transpose(redirUPL2, mapping->redirUPL);
2454 if (kIOReturnSuccess != err)
2455 {
2456 IOLog("upl_transpose(%x)\n", err);
2457 err = kIOReturnSuccess;
2458 }
2459
2460 if (redirUPL2)
2461 {
2462 upl_commit(redirUPL2, NULL, 0);
2463 upl_deallocate(redirUPL2);
2464 redirUPL2 = 0;
2465 }
2466 {
2467 // swap the memEntries since they now refer to different vm_objects
2468 void * me = _memEntry;
2469 _memEntry = mapping->memory->_memEntry;
2470 mapping->memory->_memEntry = me;
2471 }
2472 }
2473 else
2474 {
2475
2476 logical = *atAddress;
2477 if( options & kIOMapAnywhere)
2478 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
2479 ref.mapped = 0;
2480 else {
2481 ref.mapped = trunc_page_32( logical );
2482 if( (logical - ref.mapped) != pageOffset) {
2483 err = kIOReturnVMError;
2484 continue;
2485 }
2486 }
2487
2488 if( ref.sharedMem && (addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags))
2489 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
2490 else
2491 err = IOMemoryDescriptorMapAlloc( addressMap, &ref );
2492 }
2493
2494 if( err != KERN_SUCCESS)
2495 continue;
2496
2497 if( reserved)
2498 pager = (memory_object_t) reserved->devicePager;
2499 else
2500 pager = MACH_PORT_NULL;
2501
2502 if( !ref.sharedMem || pager )
2503 err = handleFault( pager, addressMap, ref.mapped, sourceOffset, length, options );
2504
2505 } while( false );
2506
2507 if( err != KERN_SUCCESS) {
2508 if( ref.mapped)
2509 doUnmap( addressMap, ref.mapped, ref.size );
2510 *atAddress = NULL;
2511 } else
2512 *atAddress = ref.mapped + pageOffset;
2513
2514 return( err );
2515 }
2516
2517 enum {
2518 kIOMemoryRedirected = 0x00010000
2519 };
2520
2521 IOReturn IOMemoryDescriptor::handleFault(
2522 void * _pager,
2523 vm_map_t addressMap,
2524 IOVirtualAddress address,
2525 IOByteCount sourceOffset,
2526 IOByteCount length,
2527 IOOptionBits options )
2528 {
2529 IOReturn err = kIOReturnSuccess;
2530 memory_object_t pager = (memory_object_t) _pager;
2531 vm_size_t size;
2532 vm_size_t bytes;
2533 vm_size_t page;
2534 IOByteCount pageOffset;
2535 IOByteCount pagerOffset;
2536 IOPhysicalLength segLen;
2537 addr64_t physAddr;
2538
2539 if( !addressMap) {
2540
2541 if( kIOMemoryRedirected & _flags) {
2542 #ifdef DEBUG
2543 IOLog("sleep mem redirect %p, %lx\n", this, sourceOffset);
2544 #endif
2545 do {
2546 SLEEP;
2547 } while( kIOMemoryRedirected & _flags );
2548 }
2549
2550 return( kIOReturnSuccess );
2551 }
2552
2553 physAddr = getPhysicalSegment64( sourceOffset, &segLen );
2554 assert( physAddr );
2555 pageOffset = physAddr - trunc_page_64( physAddr );
2556 pagerOffset = sourceOffset;
2557
2558 size = length + pageOffset;
2559 physAddr -= pageOffset;
2560
2561 segLen += pageOffset;
2562 bytes = size;
2563 do {
2564 // in the middle of the loop only map whole pages
2565 if( segLen >= bytes)
2566 segLen = bytes;
2567 else if( segLen != trunc_page_32( segLen))
2568 err = kIOReturnVMError;
2569 if( physAddr != trunc_page_64( physAddr))
2570 err = kIOReturnBadArgument;
2571 if (kIOReturnSuccess != err)
2572 break;
2573
2574 #ifdef DEBUG
2575 if( kIOLogMapping & gIOKitDebug)
2576 IOLog("_IOMemoryMap::map(%p) %08lx->%08qx:%08lx\n",
2577 addressMap, address + pageOffset, physAddr + pageOffset,
2578 segLen - pageOffset);
2579 #endif
2580
2581 if( pager) {
2582 if( reserved && reserved->pagerContig) {
2583 IOPhysicalLength allLen;
2584 addr64_t allPhys;
2585
2586 allPhys = getPhysicalSegment64( 0, &allLen );
2587 assert( allPhys );
2588 err = device_pager_populate_object( pager, 0, allPhys >> PAGE_SHIFT, round_page_32(allLen) );
2589
2590 } else {
2591
2592 for( page = 0;
2593 (page < segLen) && (KERN_SUCCESS == err);
2594 page += page_size) {
2595 err = device_pager_populate_object(pager, pagerOffset,
2596 (ppnum_t)((physAddr + page) >> PAGE_SHIFT), page_size);
2597 pagerOffset += page_size;
2598 }
2599 }
2600 assert( KERN_SUCCESS == err );
2601 if( err)
2602 break;
2603 }
2604
2605 /* *** ALERT *** */
2606 /* *** Temporary Workaround *** */
2607
2608 /* This call to vm_fault causes an early pmap level resolution */
2609 /* of the mappings created above. Need for this is in absolute */
2610 /* violation of the basic tenet that the pmap layer is a cache. */
2611 /* Further, it implies a serious I/O architectural violation on */
2612 /* the part of some user of the mapping. As of this writing, */
2613 /* the call to vm_fault is needed because the NVIDIA driver */
2614 /* makes a call to pmap_extract. The NVIDIA driver needs to be */
2615 /* fixed as soon as possible. The NVIDIA driver should not */
2616 /* need to query for this info as it should know from the doMap */
2617 /* call where the physical memory is mapped. When a query is */
2618 /* necessary to find a physical mapping, it should be done */
2619 /* through an iokit call which includes the mapped memory */
2620 /* handle. This is required for machine architecture independence.*/
2621
2622 if(!(kIOMemoryRedirected & _flags)) {
2623 vm_fault(addressMap,
2624 (vm_map_offset_t)address,
2625 VM_PROT_READ|VM_PROT_WRITE,
2626 FALSE, THREAD_UNINT, NULL,
2627 (vm_map_offset_t)0);
2628 }
2629
2630 /* *** Temporary Workaround *** */
2631 /* *** ALERT *** */
2632
2633 sourceOffset += segLen - pageOffset;
2634 address += segLen;
2635 bytes -= segLen;
2636 pageOffset = 0;
2637
2638 } while( bytes
2639 && (physAddr = getPhysicalSegment64( sourceOffset, &segLen )));
2640
2641 if( bytes)
2642 err = kIOReturnBadArgument;
2643
2644 return( err );
2645 }
2646
2647 IOReturn IOMemoryDescriptor::doUnmap(
2648 vm_map_t addressMap,
2649 IOVirtualAddress logical,
2650 IOByteCount length )
2651 {
2652 IOReturn err;
2653
2654 #ifdef DEBUG
2655 if( kIOLogMapping & gIOKitDebug)
2656 kprintf("IOMemoryDescriptor::doUnmap(%x) %08x:%08x\n",
2657 addressMap, logical, length );
2658 #endif
2659
2660 if( true /* && (addressMap == kernel_map) || (addressMap == get_task_map(current_task()))*/) {
2661
2662 if( _memEntry && (addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags))
2663 addressMap = IOPageableMapForAddress( logical );
2664
2665 err = vm_deallocate( addressMap, logical, length );
2666
2667 } else
2668 err = kIOReturnSuccess;
2669
2670 return( err );
2671 }
2672
2673 IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
2674 {
2675 IOReturn err = kIOReturnSuccess;
2676 _IOMemoryMap * mapping = 0;
2677 OSIterator * iter;
2678
2679 LOCK;
2680
2681 if( doRedirect)
2682 _flags |= kIOMemoryRedirected;
2683 else
2684 _flags &= ~kIOMemoryRedirected;
2685
2686 do {
2687 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
2688 while( (mapping = (_IOMemoryMap *) iter->getNextObject()))
2689 mapping->redirect( safeTask, doRedirect );
2690
2691 iter->release();
2692 }
2693 } while( false );
2694
2695 if (!doRedirect)
2696 {
2697 WAKEUP;
2698 }
2699
2700 UNLOCK;
2701
2702 // temporary binary compatibility
2703 IOSubMemoryDescriptor * subMem;
2704 if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
2705 err = subMem->redirect( safeTask, doRedirect );
2706 else
2707 err = kIOReturnSuccess;
2708
2709 return( err );
2710 }
2711
2712 IOReturn IOSubMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
2713 {
2714 return( _parent->redirect( safeTask, doRedirect ));
2715 }
2716
2717 IOReturn _IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
2718 {
2719 IOReturn err = kIOReturnSuccess;
2720
2721 if( superMap) {
2722 // err = ((_IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
2723 } else {
2724
2725 LOCK;
2726
2727 do
2728 {
2729 if (!logical)
2730 break;
2731 if (!addressMap)
2732 break;
2733
2734 if ((!safeTask || (get_task_map(safeTask) != addressMap))
2735 && (0 == (options & kIOMapStatic)))
2736 {
2737 IOUnmapPages( addressMap, logical, length );
2738 if(!doRedirect && safeTask
2739 && (((memory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
2740 || ((memory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)))
2741 {
2742 err = vm_deallocate( addressMap, logical, length );
2743 err = memory->doMap( addressMap, &logical,
2744 (options & ~kIOMapAnywhere) /*| kIOMapReserve*/,
2745 offset, length );
2746 } else
2747 err = kIOReturnSuccess;
2748 #ifdef DEBUG
2749 IOLog("IOMemoryMap::redirect(%d, %p) %x:%lx from %p\n", doRedirect, this, logical, length, addressMap);
2750 #endif
2751 }
2752 else if (kIOMapWriteCombineCache == (options & kIOMapCacheMask))
2753 {
2754 IOOptionBits newMode;
2755 newMode = (options & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
2756 IOProtectCacheMode(addressMap, logical, length, newMode);
2757 }
2758 }
2759 while (false);
2760
2761 UNLOCK;
2762 }
2763
2764 if ((((memory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
2765 || ((memory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
2766 && safeTask
2767 && (doRedirect != (0 != (memory->_flags & kIOMemoryRedirected))))
2768 memory->redirect(safeTask, doRedirect);
2769
2770 return( err );
2771 }
2772
2773 IOReturn _IOMemoryMap::unmap( void )
2774 {
2775 IOReturn err;
2776
2777 LOCK;
2778
2779 if( logical && addressMap && (0 == superMap)
2780 && (0 == (options & kIOMapStatic))) {
2781
2782 err = memory->doUnmap( addressMap, logical, length );
2783 vm_map_deallocate(addressMap);
2784 addressMap = 0;
2785
2786 } else
2787 err = kIOReturnSuccess;
2788
2789 logical = 0;
2790
2791 UNLOCK;
2792
2793 return( err );
2794 }
2795
2796 void _IOMemoryMap::taskDied( void )
2797 {
2798 LOCK;
2799 if( addressMap) {
2800 vm_map_deallocate(addressMap);
2801 addressMap = 0;
2802 }
2803 addressTask = 0;
2804 logical = 0;
2805 UNLOCK;
2806 }
2807
2808 // Overload the release mechanism. All mappings must be a member
2809 // of a memory descriptors _mappings set. This means that we
2810 // always have 2 references on a mapping. When either of these mappings
2811 // are released we need to free ourselves.
2812 void _IOMemoryMap::taggedRelease(const void *tag) const
2813 {
2814 LOCK;
2815 super::taggedRelease(tag, 2);
2816 UNLOCK;
2817 }
2818
2819 void _IOMemoryMap::free()
2820 {
2821 unmap();
2822
2823 if( memory) {
2824 LOCK;
2825 memory->removeMapping( this);
2826 UNLOCK;
2827 memory->release();
2828 }
2829
2830 if (owner && (owner != memory))
2831 {
2832 LOCK;
2833 owner->removeMapping(this);
2834 UNLOCK;
2835 }
2836
2837 if( superMap)
2838 superMap->release();
2839
2840 if (redirUPL) {
2841 upl_commit(redirUPL, NULL, 0);
2842 upl_deallocate(redirUPL);
2843 }
2844
2845 super::free();
2846 }
2847
2848 IOByteCount _IOMemoryMap::getLength()
2849 {
2850 return( length );
2851 }
2852
2853 IOVirtualAddress _IOMemoryMap::getVirtualAddress()
2854 {
2855 return( logical);
2856 }
2857
2858 task_t _IOMemoryMap::getAddressTask()
2859 {
2860 if( superMap)
2861 return( superMap->getAddressTask());
2862 else
2863 return( addressTask);
2864 }
2865
2866 IOOptionBits _IOMemoryMap::getMapOptions()
2867 {
2868 return( options);
2869 }
2870
2871 IOMemoryDescriptor * _IOMemoryMap::getMemoryDescriptor()
2872 {
2873 return( memory );
2874 }
2875
2876 _IOMemoryMap * _IOMemoryMap::copyCompatible(
2877 IOMemoryDescriptor * owner,
2878 task_t task,
2879 IOVirtualAddress toAddress,
2880 IOOptionBits _options,
2881 IOByteCount _offset,
2882 IOByteCount _length )
2883 {
2884 _IOMemoryMap * mapping;
2885
2886 if( (!task) || (!addressMap) || (addressMap != get_task_map(task)))
2887 return( 0 );
2888 if( options & kIOMapUnique)
2889 return( 0 );
2890 if( (options ^ _options) & kIOMapReadOnly)
2891 return( 0 );
2892 if( (kIOMapDefaultCache != (_options & kIOMapCacheMask))
2893 && ((options ^ _options) & kIOMapCacheMask))
2894 return( 0 );
2895
2896 if( (0 == (_options & kIOMapAnywhere)) && (logical != toAddress))
2897 return( 0 );
2898
2899 if( _offset < offset)
2900 return( 0 );
2901
2902 _offset -= offset;
2903
2904 if( (_offset + _length) > length)
2905 return( 0 );
2906
2907 if( (length == _length) && (!_offset)) {
2908 retain();
2909 mapping = this;
2910
2911 } else {
2912 mapping = new _IOMemoryMap;
2913 if( mapping
2914 && !mapping->initCompatible( owner, this, _offset, _length )) {
2915 mapping->release();
2916 mapping = 0;
2917 }
2918 }
2919
2920 return( mapping );
2921 }
2922
2923 IOPhysicalAddress
2924 _IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
2925 {
2926 IOPhysicalAddress address;
2927
2928 LOCK;
2929 address = memory->getPhysicalSegment( offset + _offset, _length );
2930 UNLOCK;
2931
2932 return( address );
2933 }
2934
2935 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2936
2937 #undef super
2938 #define super OSObject
2939
2940 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2941
2942 void IOMemoryDescriptor::initialize( void )
2943 {
2944 if( 0 == gIOMemoryLock)
2945 gIOMemoryLock = IORecursiveLockAlloc();
2946
2947 IORegistryEntry::getRegistryRoot()->setProperty(kIOMaximumMappedIOByteCountKey,
2948 ptoa_64(gIOMaximumMappedIOPageCount), 64);
2949 if (!gIOCopyMapper)
2950 {
2951 IOMapper *
2952 mapper = new IOCopyMapper;
2953 if (mapper)
2954 {
2955 if (mapper->init() && mapper->start(NULL))
2956 gIOCopyMapper = (IOCopyMapper *) mapper;
2957 else
2958 mapper->release();
2959 }
2960 }
2961
2962 gIOLastPage = IOGetLastPageNumber();
2963 }
2964
2965 void IOMemoryDescriptor::free( void )
2966 {
2967 if( _mappings)
2968 _mappings->release();
2969
2970 super::free();
2971 }
2972
2973 IOMemoryMap * IOMemoryDescriptor::setMapping(
2974 task_t intoTask,
2975 IOVirtualAddress mapAddress,
2976 IOOptionBits options )
2977 {
2978 _IOMemoryMap * newMap;
2979
2980 newMap = new _IOMemoryMap;
2981
2982 LOCK;
2983
2984 if( newMap
2985 && !newMap->initWithDescriptor( this, intoTask, mapAddress,
2986 options | kIOMapStatic, 0, getLength() )) {
2987 newMap->release();
2988 newMap = 0;
2989 }
2990
2991 addMapping( newMap);
2992
2993 UNLOCK;
2994
2995 return( newMap);
2996 }
2997
2998 IOMemoryMap * IOMemoryDescriptor::map(
2999 IOOptionBits options )
3000 {
3001
3002 return( makeMapping( this, kernel_task, 0,
3003 options | kIOMapAnywhere,
3004 0, getLength() ));
3005 }
3006
3007 IOMemoryMap * IOMemoryDescriptor::map(
3008 task_t intoTask,
3009 IOVirtualAddress toAddress,
3010 IOOptionBits options,
3011 IOByteCount offset,
3012 IOByteCount length )
3013 {
3014 if( 0 == length)
3015 length = getLength();
3016
3017 return( makeMapping( this, intoTask, toAddress, options, offset, length ));
3018 }
3019
3020 IOReturn _IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
3021 IOOptionBits options,
3022 IOByteCount offset)
3023 {
3024 IOReturn err = kIOReturnSuccess;
3025 IOMemoryDescriptor * physMem = 0;
3026
3027 LOCK;
3028
3029 if (logical && addressMap) do
3030 {
3031 if (((memory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3032 || ((memory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3033 {
3034 physMem = memory;
3035 physMem->retain();
3036 }
3037
3038 if (!redirUPL)
3039 {
3040 vm_size_t size = length;
3041 int flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
3042 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
3043 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) memory->_memEntry, 0, &size, &redirUPL,
3044 NULL, NULL,
3045 &flags))
3046 redirUPL = 0;
3047
3048 if (physMem)
3049 {
3050 IOUnmapPages( addressMap, logical, length );
3051 physMem->redirect(0, true);
3052 }
3053 }
3054
3055 if (newBackingMemory)
3056 {
3057 if (newBackingMemory != memory)
3058 {
3059 if (this != newBackingMemory->makeMapping(newBackingMemory, addressTask, (IOVirtualAddress) this,
3060 options | kIOMapUnique | kIOMapReference,
3061 offset, length))
3062 err = kIOReturnError;
3063 }
3064 if (redirUPL)
3065 {
3066 upl_commit(redirUPL, NULL, 0);
3067 upl_deallocate(redirUPL);
3068 redirUPL = 0;
3069 }
3070 if (physMem)
3071 physMem->redirect(0, false);
3072 }
3073 }
3074 while (false);
3075
3076 UNLOCK;
3077
3078 if (physMem)
3079 physMem->release();
3080
3081 return (err);
3082 }
3083
3084 IOMemoryMap * IOMemoryDescriptor::makeMapping(
3085 IOMemoryDescriptor * owner,
3086 task_t intoTask,
3087 IOVirtualAddress toAddress,
3088 IOOptionBits options,
3089 IOByteCount offset,
3090 IOByteCount length )
3091 {
3092 IOMemoryDescriptor * mapDesc = 0;
3093 _IOMemoryMap * mapping = 0;
3094 OSIterator * iter;
3095
3096 LOCK;
3097
3098 do
3099 {
3100 if (kIOMapUnique & options)
3101 {
3102 IOPhysicalAddress phys;
3103 IOByteCount physLen;
3104
3105 if (owner != this)
3106 continue;
3107
3108 if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3109 || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3110 {
3111 phys = getPhysicalSegment(offset, &physLen);
3112 if (!phys || (physLen < length))
3113 continue;
3114
3115 mapDesc = IOMemoryDescriptor::withPhysicalAddress(
3116 phys, length, _direction);
3117 if (!mapDesc)
3118 continue;
3119 offset = 0;
3120 }
3121 else
3122 {
3123 mapDesc = this;
3124 mapDesc->retain();
3125 }
3126
3127 if (kIOMapReference & options)
3128 {
3129 mapping = (_IOMemoryMap *) toAddress;
3130 mapping->retain();
3131
3132 #if 1
3133 uint32_t pageOffset1 = mapDesc->getSourceSegment( offset, NULL );
3134 pageOffset1 -= trunc_page_32( pageOffset1 );
3135
3136 uint32_t pageOffset2 = mapping->getVirtualAddress();
3137 pageOffset2 -= trunc_page_32( pageOffset2 );
3138
3139 if (pageOffset1 != pageOffset2)
3140 IOLog("::redirect can't map offset %x to addr %x\n",
3141 pageOffset1, mapping->getVirtualAddress());
3142 #endif
3143
3144
3145 if (!mapping->initWithDescriptor( mapDesc, intoTask, toAddress, options,
3146 offset, length ))
3147 {
3148 #ifdef DEBUG
3149 IOLog("Didn't redirect map %08lx : %08lx\n", offset, length );
3150 #endif
3151 }
3152
3153 if (mapping->owner)
3154 mapping->owner->removeMapping(mapping);
3155 continue;
3156 }
3157 }
3158 else
3159 {
3160 // look for an existing mapping
3161 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
3162
3163 while( (mapping = (_IOMemoryMap *) iter->getNextObject())) {
3164
3165 if( (mapping = mapping->copyCompatible(
3166 owner, intoTask, toAddress,
3167 options | kIOMapReference,
3168 offset, length )))
3169 break;
3170 }
3171 iter->release();
3172 }
3173
3174
3175 if (mapping)
3176 mapping->retain();
3177
3178 if( mapping || (options & kIOMapReference))
3179 continue;
3180
3181 mapDesc = owner;
3182 mapDesc->retain();
3183 }
3184 owner = this;
3185
3186 mapping = new _IOMemoryMap;
3187 if( mapping
3188 && !mapping->initWithDescriptor( mapDesc, intoTask, toAddress, options,
3189 offset, length )) {
3190 #ifdef DEBUG
3191 IOLog("Didn't make map %08lx : %08lx\n", offset, length );
3192 #endif
3193 mapping->release();
3194 mapping = 0;
3195 }
3196
3197 if (mapping)
3198 mapping->retain();
3199
3200 } while( false );
3201
3202 if (mapping)
3203 {
3204 mapping->owner = owner;
3205 owner->addMapping( mapping);
3206 mapping->release();
3207 }
3208
3209 UNLOCK;
3210
3211 if (mapDesc)
3212 mapDesc->release();
3213
3214 return( mapping);
3215 }
3216
3217 void IOMemoryDescriptor::addMapping(
3218 IOMemoryMap * mapping )
3219 {
3220 if( mapping) {
3221 if( 0 == _mappings)
3222 _mappings = OSSet::withCapacity(1);
3223 if( _mappings )
3224 _mappings->setObject( mapping );
3225 }
3226 }
3227
3228 void IOMemoryDescriptor::removeMapping(
3229 IOMemoryMap * mapping )
3230 {
3231 if( _mappings)
3232 _mappings->removeObject( mapping);
3233 }
3234
3235 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3236
3237 #undef super
3238 #define super IOMemoryDescriptor
3239
3240 OSDefineMetaClassAndStructors(IOSubMemoryDescriptor, IOMemoryDescriptor)
3241
3242 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3243
3244 bool IOSubMemoryDescriptor::initSubRange( IOMemoryDescriptor * parent,
3245 IOByteCount offset, IOByteCount length,
3246 IODirection direction )
3247 {
3248 if( !parent)
3249 return( false);
3250
3251 if( (offset + length) > parent->getLength())
3252 return( false);
3253
3254 /*
3255 * We can check the _parent instance variable before having ever set it
3256 * to an initial value because I/O Kit guarantees that all our instance
3257 * variables are zeroed on an object's allocation.
3258 */
3259
3260 if( !_parent) {
3261 if( !super::init())
3262 return( false );
3263 } else {
3264 /*
3265 * An existing memory descriptor is being retargeted to
3266 * point to somewhere else. Clean up our present state.
3267 */
3268
3269 _parent->release();
3270 _parent = 0;
3271 }
3272
3273 parent->retain();
3274 _parent = parent;
3275 _start = offset;
3276 _length = length;
3277 _direction = direction;
3278 _tag = parent->getTag();
3279
3280 return( true );
3281 }
3282
3283 void IOSubMemoryDescriptor::free( void )
3284 {
3285 if( _parent)
3286 _parent->release();
3287
3288 super::free();
3289 }
3290
3291
3292 IOReturn
3293 IOSubMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
3294 {
3295 IOReturn rtn;
3296
3297 if (kIOMDGetCharacteristics == op) {
3298
3299 rtn = _parent->dmaCommandOperation(op, vData, dataSize);
3300 if (kIOReturnSuccess == rtn) {
3301 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3302 data->fLength = _length;
3303 data->fSGCount = 0; // XXX gvdl: need to compute and pages
3304 data->fPages = 0;
3305 data->fPageAlign = 0;
3306 }
3307
3308 return rtn;
3309 }
3310 else if (kIOMDWalkSegments & op) {
3311 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs))
3312 return kIOReturnUnderrun;
3313
3314 IOMDDMAWalkSegmentArgs *data =
3315 reinterpret_cast<IOMDDMAWalkSegmentArgs *>(vData);
3316 UInt offset = data->fOffset;
3317 UInt remain = _length - offset;
3318 if ((int) remain <= 0)
3319 return (!remain)? kIOReturnOverrun : kIOReturnInternalError;
3320
3321 data->fOffset = offset + _start;
3322 rtn = _parent->dmaCommandOperation(op, vData, dataSize);
3323 if (data->fLength > remain)
3324 data->fLength = remain;
3325 data->fOffset = offset;
3326
3327 return rtn;
3328 }
3329 else
3330 return kIOReturnBadArgument;
3331 }
3332
3333 addr64_t
3334 IOSubMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount * length)
3335 {
3336 addr64_t address;
3337 IOByteCount actualLength;
3338
3339 assert(offset <= _length);
3340
3341 if( length)
3342 *length = 0;
3343
3344 if( offset >= _length)
3345 return( 0 );
3346
3347 address = _parent->getPhysicalSegment64( offset + _start, &actualLength );
3348
3349 if( address && length)
3350 *length = min( _length - offset, actualLength );
3351
3352 return( address );
3353 }
3354
3355 IOPhysicalAddress
3356 IOSubMemoryDescriptor::getPhysicalSegment( IOByteCount offset, IOByteCount * length )
3357 {
3358 IOPhysicalAddress address;
3359 IOByteCount actualLength;
3360
3361 assert(offset <= _length);
3362
3363 if( length)
3364 *length = 0;
3365
3366 if( offset >= _length)
3367 return( 0 );
3368
3369 address = _parent->getPhysicalSegment( offset + _start, &actualLength );
3370
3371 if( address && length)
3372 *length = min( _length - offset, actualLength );
3373
3374 return( address );
3375 }
3376
3377
3378 IOReturn IOSubMemoryDescriptor::doMap(
3379 vm_map_t addressMap,
3380 IOVirtualAddress * atAddress,
3381 IOOptionBits options,
3382 IOByteCount sourceOffset,
3383 IOByteCount length )
3384 {
3385 if( sourceOffset >= _length)
3386 return( kIOReturnOverrun );
3387 return (_parent->doMap(addressMap, atAddress, options, sourceOffset + _start, length));
3388 }
3389
3390 IOPhysicalAddress
3391 IOSubMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
3392 {
3393 IOPhysicalAddress address;
3394 IOByteCount actualLength;
3395
3396 assert(offset <= _length);
3397
3398 if( length)
3399 *length = 0;
3400
3401 if( offset >= _length)
3402 return( 0 );
3403
3404 address = _parent->getSourceSegment( offset + _start, &actualLength );
3405
3406 if( address && length)
3407 *length = min( _length - offset, actualLength );
3408
3409 return( address );
3410 }
3411
3412 void * IOSubMemoryDescriptor::getVirtualSegment(IOByteCount offset,
3413 IOByteCount * lengthOfSegment)
3414 {
3415 return( 0 );
3416 }
3417
3418 IOByteCount IOSubMemoryDescriptor::readBytes(IOByteCount offset,
3419 void * bytes, IOByteCount length)
3420 {
3421 IOByteCount byteCount;
3422
3423 assert(offset <= _length);
3424
3425 if( offset >= _length)
3426 return( 0 );
3427
3428 LOCK;
3429 byteCount = _parent->readBytes( _start + offset, bytes,
3430 min(length, _length - offset) );
3431 UNLOCK;
3432
3433 return( byteCount );
3434 }
3435
3436 IOByteCount IOSubMemoryDescriptor::writeBytes(IOByteCount offset,
3437 const void* bytes, IOByteCount length)
3438 {
3439 IOByteCount byteCount;
3440
3441 assert(offset <= _length);
3442
3443 if( offset >= _length)
3444 return( 0 );
3445
3446 LOCK;
3447 byteCount = _parent->writeBytes( _start + offset, bytes,
3448 min(length, _length - offset) );
3449 UNLOCK;
3450
3451 return( byteCount );
3452 }
3453
3454 IOReturn IOSubMemoryDescriptor::setPurgeable( IOOptionBits newState,
3455 IOOptionBits * oldState )
3456 {
3457 IOReturn err;
3458
3459 LOCK;
3460 err = _parent->setPurgeable( newState, oldState );
3461 UNLOCK;
3462
3463 return( err );
3464 }
3465
3466 IOReturn IOSubMemoryDescriptor::performOperation( IOOptionBits options,
3467 IOByteCount offset, IOByteCount length )
3468 {
3469 IOReturn err;
3470
3471 assert(offset <= _length);
3472
3473 if( offset >= _length)
3474 return( kIOReturnOverrun );
3475
3476 LOCK;
3477 err = _parent->performOperation( options, _start + offset,
3478 min(length, _length - offset) );
3479 UNLOCK;
3480
3481 return( err );
3482 }
3483
3484 IOReturn IOSubMemoryDescriptor::prepare(
3485 IODirection forDirection)
3486 {
3487 IOReturn err;
3488
3489 LOCK;
3490 err = _parent->prepare( forDirection);
3491 UNLOCK;
3492
3493 return( err );
3494 }
3495
3496 IOReturn IOSubMemoryDescriptor::complete(
3497 IODirection forDirection)
3498 {
3499 IOReturn err;
3500
3501 LOCK;
3502 err = _parent->complete( forDirection);
3503 UNLOCK;
3504
3505 return( err );
3506 }
3507
3508 IOMemoryMap * IOSubMemoryDescriptor::makeMapping(
3509 IOMemoryDescriptor * owner,
3510 task_t intoTask,
3511 IOVirtualAddress toAddress,
3512 IOOptionBits options,
3513 IOByteCount offset,
3514 IOByteCount length )
3515 {
3516 IOMemoryMap * mapping = 0;
3517
3518 if (!(kIOMapUnique & options))
3519 mapping = (IOMemoryMap *) _parent->makeMapping(
3520 _parent, intoTask,
3521 toAddress - (_start + offset),
3522 options | kIOMapReference,
3523 _start + offset, length );
3524
3525 if( !mapping)
3526 mapping = (IOMemoryMap *) _parent->makeMapping(
3527 _parent, intoTask,
3528 toAddress,
3529 options, _start + offset, length );
3530
3531 if( !mapping)
3532 mapping = super::makeMapping( owner, intoTask, toAddress, options,
3533 offset, length );
3534
3535 return( mapping );
3536 }
3537
3538 /* ick */
3539
3540 bool
3541 IOSubMemoryDescriptor::initWithAddress(void * address,
3542 IOByteCount length,
3543 IODirection direction)
3544 {
3545 return( false );
3546 }
3547
3548 bool
3549 IOSubMemoryDescriptor::initWithAddress(vm_address_t address,
3550 IOByteCount length,
3551 IODirection direction,
3552 task_t task)
3553 {
3554 return( false );
3555 }
3556
3557 bool
3558 IOSubMemoryDescriptor::initWithPhysicalAddress(
3559 IOPhysicalAddress address,
3560 IOByteCount length,
3561 IODirection direction )
3562 {
3563 return( false );
3564 }
3565
3566 bool
3567 IOSubMemoryDescriptor::initWithRanges(
3568 IOVirtualRange * ranges,
3569 UInt32 withCount,
3570 IODirection direction,
3571 task_t task,
3572 bool asReference)
3573 {
3574 return( false );
3575 }
3576
3577 bool
3578 IOSubMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
3579 UInt32 withCount,
3580 IODirection direction,
3581 bool asReference)
3582 {
3583 return( false );
3584 }
3585
3586 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3587
3588 bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
3589 {
3590 OSSymbol const *keys[2];
3591 OSObject *values[2];
3592 struct SerData {
3593 user_addr_t address;
3594 user_size_t length;
3595 } *vcopy;
3596 unsigned int index, nRanges;
3597 bool result;
3598
3599 IOOptionBits type = _flags & kIOMemoryTypeMask;
3600
3601 if (s == NULL) return false;
3602 if (s->previouslySerialized(this)) return true;
3603
3604 // Pretend we are an array.
3605 if (!s->addXMLStartTag(this, "array")) return false;
3606
3607 nRanges = _rangesCount;
3608 vcopy = (SerData *) IOMalloc(sizeof(SerData) * nRanges);
3609 if (vcopy == 0) return false;
3610
3611 keys[0] = OSSymbol::withCString("address");
3612 keys[1] = OSSymbol::withCString("length");
3613
3614 result = false;
3615 values[0] = values[1] = 0;
3616
3617 // From this point on we can go to bail.
3618
3619 // Copy the volatile data so we don't have to allocate memory
3620 // while the lock is held.
3621 LOCK;
3622 if (nRanges == _rangesCount) {
3623 Ranges vec = _ranges;
3624 for (index = 0; index < nRanges; index++) {
3625 user_addr_t addr; IOByteCount len;
3626 getAddrLenForInd(addr, len, type, vec, index);
3627 vcopy[index].address = addr;
3628 vcopy[index].length = len;
3629 }
3630 } else {
3631 // The descriptor changed out from under us. Give up.
3632 UNLOCK;
3633 result = false;
3634 goto bail;
3635 }
3636 UNLOCK;
3637
3638 for (index = 0; index < nRanges; index++)
3639 {
3640 user_addr_t addr = vcopy[index].address;
3641 IOByteCount len = (IOByteCount) vcopy[index].length;
3642 values[0] =
3643 OSNumber::withNumber(addr, (((UInt64) addr) >> 32)? 64 : 32);
3644 if (values[0] == 0) {
3645 result = false;
3646 goto bail;
3647 }
3648 values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
3649 if (values[1] == 0) {
3650 result = false;
3651 goto bail;
3652 }
3653 OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
3654 if (dict == 0) {
3655 result = false;
3656 goto bail;
3657 }
3658 values[0]->release();
3659 values[1]->release();
3660 values[0] = values[1] = 0;
3661
3662 result = dict->serialize(s);
3663 dict->release();
3664 if (!result) {
3665 goto bail;
3666 }
3667 }
3668 result = s->addXMLEndTag("array");
3669
3670 bail:
3671 if (values[0])
3672 values[0]->release();
3673 if (values[1])
3674 values[1]->release();
3675 if (keys[0])
3676 keys[0]->release();
3677 if (keys[1])
3678 keys[1]->release();
3679 if (vcopy)
3680 IOFree(vcopy, sizeof(IOVirtualRange) * nRanges);
3681 return result;
3682 }
3683
3684 bool IOSubMemoryDescriptor::serialize(OSSerialize * s) const
3685 {
3686 if (!s) {
3687 return (false);
3688 }
3689 if (s->previouslySerialized(this)) return true;
3690
3691 // Pretend we are a dictionary.
3692 // We must duplicate the functionality of OSDictionary here
3693 // because otherwise object references will not work;
3694 // they are based on the value of the object passed to
3695 // previouslySerialized and addXMLStartTag.
3696
3697 if (!s->addXMLStartTag(this, "dict")) return false;
3698
3699 char const *keys[3] = {"offset", "length", "parent"};
3700
3701 OSObject *values[3];
3702 values[0] = OSNumber::withNumber(_start, sizeof(_start) * 8);
3703 if (values[0] == 0)
3704 return false;
3705 values[1] = OSNumber::withNumber(_length, sizeof(_length) * 8);
3706 if (values[1] == 0) {
3707 values[0]->release();
3708 return false;
3709 }
3710 values[2] = _parent;
3711
3712 bool result = true;
3713 for (int i=0; i<3; i++) {
3714 if (!s->addString("<key>") ||
3715 !s->addString(keys[i]) ||
3716 !s->addXMLEndTag("key") ||
3717 !values[i]->serialize(s)) {
3718 result = false;
3719 break;
3720 }
3721 }
3722 values[0]->release();
3723 values[1]->release();
3724 if (!result) {
3725 return false;
3726 }
3727
3728 return s->addXMLEndTag("dict");
3729 }
3730
3731 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3732
3733 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
3734 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
3735 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
3736 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3);
3737 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4);
3738 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5);
3739 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
3740 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
3741 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
3742 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
3743 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
3744 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
3745 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
3746 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
3747 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
3748 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
3749
3750 /* ex-inline function implementation */
3751 IOPhysicalAddress
3752 IOMemoryDescriptor::getPhysicalAddress()
3753 { return( getPhysicalSegment( 0, 0 )); }
3754
3755
3756