]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOMemoryDescriptor.cpp
b86f7f65164c1496db20cbdb8298a0942185e78e
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
30 *
31 * HISTORY
32 *
33 */
34 // 45678901234567890123456789012345678901234567890123456789012345678901234567890
35 #include <sys/cdefs.h>
36
37 #include <IOKit/assert.h>
38 #include <IOKit/system.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOMemoryDescriptor.h>
41 #include <IOKit/IOMapper.h>
42 #include <IOKit/IOKitKeysPrivate.h>
43
44 #include <IOKit/IOKitDebug.h>
45 #include <libkern/OSDebug.h>
46
47 #include "IOKitKernelInternal.h"
48 #include "IOCopyMapper.h"
49
50 #include <libkern/c++/OSContainers.h>
51 #include <libkern/c++/OSDictionary.h>
52 #include <libkern/c++/OSArray.h>
53 #include <libkern/c++/OSSymbol.h>
54 #include <libkern/c++/OSNumber.h>
55
56 #include <sys/uio.h>
57
58 __BEGIN_DECLS
59 #include <vm/pmap.h>
60 #include <vm/vm_pageout.h>
61 #include <mach/memory_object_types.h>
62 #include <device/device_port.h>
63
64 #include <mach/vm_prot.h>
65 #include <mach/mach_vm.h>
66 #include <vm/vm_fault.h>
67 #include <vm/vm_protos.h>
68
69 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
70 void ipc_port_release_send(ipc_port_t port);
71
72 /* Copy between a physical page and a virtual address in the given vm_map */
73 kern_return_t copypv(addr64_t source, addr64_t sink, unsigned int size, int which);
74
75 memory_object_t
76 device_pager_setup(
77 memory_object_t pager,
78 int device_handle,
79 vm_size_t size,
80 int flags);
81 void
82 device_pager_deallocate(
83 memory_object_t);
84 kern_return_t
85 device_pager_populate_object(
86 memory_object_t pager,
87 vm_object_offset_t offset,
88 ppnum_t phys_addr,
89 vm_size_t size);
90 kern_return_t
91 memory_object_iopl_request(
92 ipc_port_t port,
93 memory_object_offset_t offset,
94 vm_size_t *upl_size,
95 upl_t *upl_ptr,
96 upl_page_info_array_t user_page_list,
97 unsigned int *page_list_count,
98 int *flags);
99
100 unsigned int IOTranslateCacheBits(struct phys_entry *pp);
101
102 __END_DECLS
103
104 #define kIOMaximumMappedIOByteCount (512*1024*1024)
105
106 static IOMapper * gIOSystemMapper = NULL;
107
108 IOCopyMapper * gIOCopyMapper = NULL;
109
110 static ppnum_t gIOMaximumMappedIOPageCount = atop_32(kIOMaximumMappedIOByteCount);
111
112 ppnum_t gIOLastPage;
113
114 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
115
116 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
117
118 #define super IOMemoryDescriptor
119
120 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
121
122 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
123
124 static IORecursiveLock * gIOMemoryLock;
125
126 #define LOCK IORecursiveLockLock( gIOMemoryLock)
127 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
128 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
129 #define WAKEUP \
130 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
131
132 #if 0
133 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
134 #else
135 #define DEBG(fmt, args...) {}
136 #endif
137
138 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
139
140 class _IOMemoryMap : public IOMemoryMap
141 {
142 OSDeclareDefaultStructors(_IOMemoryMap)
143 public:
144 IOMemoryDescriptor * fMemory;
145 IOMemoryMap * fSuperMap;
146 mach_vm_size_t fOffset;
147 mach_vm_address_t fAddress;
148 mach_vm_size_t fLength;
149 task_t fAddressTask;
150 vm_map_t fAddressMap;
151 IOOptionBits fOptions;
152 upl_t fRedirUPL;
153 ipc_port_t fRedirEntry;
154 IOMemoryDescriptor * fOwner;
155
156 protected:
157 virtual void taggedRelease(const void *tag = 0) const;
158 virtual void free();
159
160 public:
161
162 // IOMemoryMap methods
163 virtual IOVirtualAddress getVirtualAddress();
164 virtual IOByteCount getLength();
165 virtual task_t getAddressTask();
166 virtual mach_vm_address_t getAddress();
167 virtual mach_vm_size_t getSize();
168 virtual IOMemoryDescriptor * getMemoryDescriptor();
169 virtual IOOptionBits getMapOptions();
170
171 virtual IOReturn unmap();
172 virtual void taskDied();
173
174 virtual IOReturn redirect(IOMemoryDescriptor * newBackingMemory,
175 IOOptionBits options,
176 IOByteCount offset = 0);
177
178 virtual IOReturn redirect(IOMemoryDescriptor * newBackingMemory,
179 IOOptionBits options,
180 mach_vm_size_t offset = 0);
181
182 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
183 IOByteCount * length);
184
185 // for IOMemoryDescriptor use
186 _IOMemoryMap * copyCompatible( _IOMemoryMap * newMapping );
187
188 bool init(
189 task_t intoTask,
190 mach_vm_address_t toAddress,
191 IOOptionBits options,
192 mach_vm_size_t offset,
193 mach_vm_size_t length );
194
195 bool setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset);
196
197 IOReturn redirect(
198 task_t intoTask, bool redirect );
199 };
200
201 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
202
203 // Some data structures and accessor macros used by the initWithOptions
204 // Function
205
206 enum ioPLBlockFlags {
207 kIOPLOnDevice = 0x00000001,
208 kIOPLExternUPL = 0x00000002,
209 };
210
211 struct typePersMDData
212 {
213 const IOGeneralMemoryDescriptor *fMD;
214 ipc_port_t fMemEntry;
215 };
216
217 struct ioPLBlock {
218 upl_t fIOPL;
219 vm_address_t fIOMDOffset; // The offset of this iopl in descriptor
220 vm_offset_t fPageInfo; // Pointer to page list or index into it
221 ppnum_t fMappedBase; // Page number of first page in this iopl
222 unsigned int fPageOffset; // Offset within first page of iopl
223 unsigned int fFlags; // Flags
224 };
225
226 struct ioGMDData {
227 IOMapper *fMapper;
228 unsigned int fPageCnt;
229 upl_page_info_t fPageList[];
230 ioPLBlock fBlocks[];
231 };
232
233 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
234 #define getIOPLList(d) ((ioPLBlock *) &(d->fPageList[d->fPageCnt]))
235 #define getNumIOPL(osd, d) \
236 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
237 #define getPageList(d) (&(d->fPageList[0]))
238 #define computeDataSize(p, u) \
239 (sizeof(ioGMDData) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
240
241
242 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
243
244 #define next_page(a) ( trunc_page_32(a) + PAGE_SIZE )
245
246
247 extern "C" {
248
249 kern_return_t device_data_action(
250 int device_handle,
251 ipc_port_t device_pager,
252 vm_prot_t protection,
253 vm_object_offset_t offset,
254 vm_size_t size)
255 {
256 struct ExpansionData {
257 void * devicePager;
258 unsigned int pagerContig:1;
259 unsigned int unused:31;
260 IOMemoryDescriptor * memory;
261 };
262 kern_return_t kr;
263 ExpansionData * ref = (ExpansionData *) device_handle;
264 IOMemoryDescriptor * memDesc;
265
266 LOCK;
267 memDesc = ref->memory;
268 if( memDesc)
269 {
270 memDesc->retain();
271 kr = memDesc->handleFault( device_pager, 0, 0,
272 offset, size, kIOMapDefaultCache /*?*/);
273 memDesc->release();
274 }
275 else
276 kr = KERN_ABORTED;
277 UNLOCK;
278
279 return( kr );
280 }
281
282 kern_return_t device_close(
283 int device_handle)
284 {
285 struct ExpansionData {
286 void * devicePager;
287 unsigned int pagerContig:1;
288 unsigned int unused:31;
289 IOMemoryDescriptor * memory;
290 };
291 ExpansionData * ref = (ExpansionData *) device_handle;
292
293 IODelete( ref, ExpansionData, 1 );
294
295 return( kIOReturnSuccess );
296 }
297 }; // end extern "C"
298
299 // Note this inline function uses C++ reference arguments to return values
300 // This means that pointers are not passed and NULLs don't have to be
301 // checked for as a NULL reference is illegal.
302 static inline void
303 getAddrLenForInd(user_addr_t &addr, IOPhysicalLength &len, // Output variables
304 UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
305 {
306 assert(kIOMemoryTypeUIO == type
307 || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
308 || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
309 if (kIOMemoryTypeUIO == type) {
310 user_size_t us;
311 uio_getiov((uio_t) r.uio, ind, &addr, &us); len = us;
312 }
313 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
314 IOAddressRange cur = r.v64[ind];
315 addr = cur.address;
316 len = cur.length;
317 }
318 else {
319 IOVirtualRange cur = r.v[ind];
320 addr = cur.address;
321 len = cur.length;
322 }
323 }
324
325 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
326
327 /*
328 * withAddress:
329 *
330 * Create a new IOMemoryDescriptor. The buffer is a virtual address
331 * relative to the specified task. If no task is supplied, the kernel
332 * task is implied.
333 */
334 IOMemoryDescriptor *
335 IOMemoryDescriptor::withAddress(void * address,
336 IOByteCount length,
337 IODirection direction)
338 {
339 return IOMemoryDescriptor::
340 withAddress((vm_address_t) address, length, direction, kernel_task);
341 }
342
343 IOMemoryDescriptor *
344 IOMemoryDescriptor::withAddress(vm_address_t address,
345 IOByteCount length,
346 IODirection direction,
347 task_t task)
348 {
349 #if TEST_V64
350 if (task)
351 {
352 IOOptionBits options = (IOOptionBits) direction;
353 if (task == kernel_task)
354 options |= kIOMemoryAutoPrepare;
355 return (IOMemoryDescriptor::withAddressRange(address, length, options, task));
356 }
357 #endif
358 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
359 if (that)
360 {
361 if (that->initWithAddress(address, length, direction, task))
362 return that;
363
364 that->release();
365 }
366 return 0;
367 }
368
369 IOMemoryDescriptor *
370 IOMemoryDescriptor::withPhysicalAddress(
371 IOPhysicalAddress address,
372 IOByteCount length,
373 IODirection direction )
374 {
375 #if TEST_P64
376 return (IOMemoryDescriptor::withAddressRange(address, length, (IOOptionBits) direction, NULL));
377 #endif
378 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
379 if (self
380 && !self->initWithPhysicalAddress(address, length, direction)) {
381 self->release();
382 return 0;
383 }
384
385 return self;
386 }
387
388 IOMemoryDescriptor *
389 IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
390 UInt32 withCount,
391 IODirection direction,
392 task_t task,
393 bool asReference)
394 {
395 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
396 if (that)
397 {
398 if (that->initWithRanges(ranges, withCount, direction, task, asReference))
399 return that;
400
401 that->release();
402 }
403 return 0;
404 }
405
406 IOMemoryDescriptor *
407 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
408 mach_vm_size_t length,
409 IOOptionBits options,
410 task_t task)
411 {
412 IOAddressRange range = { address, length };
413 return (IOMemoryDescriptor::withAddressRanges(&range, 1, options, task));
414 }
415
416 IOMemoryDescriptor *
417 IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
418 UInt32 rangeCount,
419 IOOptionBits options,
420 task_t task)
421 {
422 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
423 if (that)
424 {
425 if (task)
426 options |= kIOMemoryTypeVirtual64;
427 else
428 options |= kIOMemoryTypePhysical64;
429
430 if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ 0))
431 return that;
432
433 that->release();
434 }
435
436 return 0;
437 }
438
439
440 /*
441 * withRanges:
442 *
443 * Create a new IOMemoryDescriptor. The buffer is made up of several
444 * virtual address ranges, from a given task.
445 *
446 * Passing the ranges as a reference will avoid an extra allocation.
447 */
448 IOMemoryDescriptor *
449 IOMemoryDescriptor::withOptions(void * buffers,
450 UInt32 count,
451 UInt32 offset,
452 task_t task,
453 IOOptionBits opts,
454 IOMapper * mapper)
455 {
456 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
457
458 if (self
459 && !self->initWithOptions(buffers, count, offset, task, opts, mapper))
460 {
461 self->release();
462 return 0;
463 }
464
465 return self;
466 }
467
468 // Can't leave abstract but this should never be used directly,
469 bool IOMemoryDescriptor::initWithOptions(void * buffers,
470 UInt32 count,
471 UInt32 offset,
472 task_t task,
473 IOOptionBits options,
474 IOMapper * mapper)
475 {
476 // @@@ gvdl: Should I panic?
477 panic("IOMD::initWithOptions called\n");
478 return 0;
479 }
480
481 IOMemoryDescriptor *
482 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
483 UInt32 withCount,
484 IODirection direction,
485 bool asReference)
486 {
487 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
488 if (that)
489 {
490 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference))
491 return that;
492
493 that->release();
494 }
495 return 0;
496 }
497
498 IOMemoryDescriptor *
499 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
500 IOByteCount offset,
501 IOByteCount length,
502 IODirection direction)
503 {
504 IOSubMemoryDescriptor *self = new IOSubMemoryDescriptor;
505
506 if (self && !self->initSubRange(of, offset, length, direction)) {
507 self->release();
508 self = 0;
509 }
510 return self;
511 }
512
513 IOMemoryDescriptor *
514 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
515 {
516 IOGeneralMemoryDescriptor *origGenMD =
517 OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
518
519 if (origGenMD)
520 return IOGeneralMemoryDescriptor::
521 withPersistentMemoryDescriptor(origGenMD);
522 else
523 return 0;
524 }
525
526 IOMemoryDescriptor *
527 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
528 {
529 ipc_port_t sharedMem = (ipc_port_t) originalMD->createNamedEntry();
530
531 if (!sharedMem)
532 return 0;
533
534 if (sharedMem == originalMD->_memEntry) {
535 originalMD->retain(); // Add a new reference to ourselves
536 ipc_port_release_send(sharedMem); // Remove extra send right
537 return originalMD;
538 }
539
540 IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor;
541 typePersMDData initData = { originalMD, sharedMem };
542
543 if (self
544 && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) {
545 self->release();
546 self = 0;
547 }
548 return self;
549 }
550
551 void *IOGeneralMemoryDescriptor::createNamedEntry()
552 {
553 kern_return_t error;
554 ipc_port_t sharedMem;
555
556 IOOptionBits type = _flags & kIOMemoryTypeMask;
557
558 user_addr_t range0Addr;
559 IOByteCount range0Len;
560 getAddrLenForInd(range0Addr, range0Len, type, _ranges, 0);
561 range0Addr = trunc_page_64(range0Addr);
562
563 vm_size_t size = ptoa_32(_pages);
564 vm_address_t kernelPage = (vm_address_t) range0Addr;
565
566 vm_map_t theMap = ((_task == kernel_task)
567 && (kIOMemoryBufferPageable & _flags))
568 ? IOPageableMapForAddress(kernelPage)
569 : get_task_map(_task);
570
571 memory_object_size_t actualSize = size;
572 vm_prot_t prot = VM_PROT_READ;
573 #if CONFIG_EMBEDDED
574 if (kIODirectionOut != (kIODirectionOutIn & _flags))
575 #endif
576 prot |= VM_PROT_WRITE;
577
578 if (_memEntry)
579 prot |= MAP_MEM_NAMED_REUSE;
580
581 error = mach_make_memory_entry_64(theMap,
582 &actualSize, range0Addr, prot, &sharedMem, (ipc_port_t) _memEntry);
583
584 if (KERN_SUCCESS == error) {
585 if (actualSize == size) {
586 return sharedMem;
587 } else {
588 #if IOASSERT
589 IOLog("IOGMD::mach_make_memory_entry_64 (%08llx) size (%08lx:%08x)\n",
590 (UInt64)range0Addr, (UInt32)actualSize, size);
591 #endif
592 ipc_port_release_send( sharedMem );
593 }
594 }
595
596 return MACH_PORT_NULL;
597 }
598
599 /*
600 * initWithAddress:
601 *
602 * Initialize an IOMemoryDescriptor. The buffer is a virtual address
603 * relative to the specified task. If no task is supplied, the kernel
604 * task is implied.
605 *
606 * An IOMemoryDescriptor can be re-used by calling initWithAddress or
607 * initWithRanges again on an existing instance -- note this behavior
608 * is not commonly supported in other I/O Kit classes, although it is
609 * supported here.
610 */
611 bool
612 IOGeneralMemoryDescriptor::initWithAddress(void * address,
613 IOByteCount withLength,
614 IODirection withDirection)
615 {
616 _singleRange.v.address = (vm_address_t) address;
617 _singleRange.v.length = withLength;
618
619 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
620 }
621
622 bool
623 IOGeneralMemoryDescriptor::initWithAddress(vm_address_t address,
624 IOByteCount withLength,
625 IODirection withDirection,
626 task_t withTask)
627 {
628 _singleRange.v.address = address;
629 _singleRange.v.length = withLength;
630
631 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
632 }
633
634 bool
635 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
636 IOPhysicalAddress address,
637 IOByteCount withLength,
638 IODirection withDirection )
639 {
640 _singleRange.p.address = address;
641 _singleRange.p.length = withLength;
642
643 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
644 }
645
646 bool
647 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
648 IOPhysicalRange * ranges,
649 UInt32 count,
650 IODirection direction,
651 bool reference)
652 {
653 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
654
655 if (reference)
656 mdOpts |= kIOMemoryAsReference;
657
658 return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0);
659 }
660
661 bool
662 IOGeneralMemoryDescriptor::initWithRanges(
663 IOVirtualRange * ranges,
664 UInt32 count,
665 IODirection direction,
666 task_t task,
667 bool reference)
668 {
669 IOOptionBits mdOpts = direction;
670
671 if (reference)
672 mdOpts |= kIOMemoryAsReference;
673
674 if (task) {
675 mdOpts |= kIOMemoryTypeVirtual;
676
677 // Auto-prepare if this is a kernel memory descriptor as very few
678 // clients bother to prepare() kernel memory.
679 // But it was not enforced so what are you going to do?
680 if (task == kernel_task)
681 mdOpts |= kIOMemoryAutoPrepare;
682 }
683 else
684 mdOpts |= kIOMemoryTypePhysical;
685
686 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0);
687 }
688
689 /*
690 * initWithOptions:
691 *
692 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
693 * from a given task, several physical ranges, an UPL from the ubc
694 * system or a uio (may be 64bit) from the BSD subsystem.
695 *
696 * Passing the ranges as a reference will avoid an extra allocation.
697 *
698 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
699 * existing instance -- note this behavior is not commonly supported in other
700 * I/O Kit classes, although it is supported here.
701 */
702
703 bool
704 IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
705 UInt32 count,
706 UInt32 offset,
707 task_t task,
708 IOOptionBits options,
709 IOMapper * mapper)
710 {
711 IOOptionBits type = options & kIOMemoryTypeMask;
712
713 // Grab the original MD's configuation data to initialse the
714 // arguments to this function.
715 if (kIOMemoryTypePersistentMD == type) {
716
717 typePersMDData *initData = (typePersMDData *) buffers;
718 const IOGeneralMemoryDescriptor *orig = initData->fMD;
719 ioGMDData *dataP = getDataP(orig->_memoryEntries);
720
721 // Only accept persistent memory descriptors with valid dataP data.
722 assert(orig->_rangesCount == 1);
723 if ( !(orig->_flags & kIOMemoryPersistent) || !dataP)
724 return false;
725
726 _memEntry = initData->fMemEntry; // Grab the new named entry
727 options = orig->_flags | kIOMemoryAsReference;
728 _singleRange = orig->_singleRange; // Initialise our range
729 buffers = &_singleRange;
730 count = 1;
731
732 // Now grab the original task and whatever mapper was previously used
733 task = orig->_task;
734 mapper = dataP->fMapper;
735
736 // We are ready to go through the original initialisation now
737 }
738
739 switch (type) {
740 case kIOMemoryTypeUIO:
741 case kIOMemoryTypeVirtual:
742 case kIOMemoryTypeVirtual64:
743 assert(task);
744 if (!task)
745 return false;
746
747 if (vm_map_is_64bit(get_task_map(task))
748 && (kIOMemoryTypeVirtual == type)
749 && ((IOVirtualRange *) buffers)->address)
750 {
751 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
752 return false;
753 }
754 break;
755
756 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
757 case kIOMemoryTypePhysical64:
758 mapper = kIOMapperNone;
759
760 case kIOMemoryTypeUPL:
761 assert(!task);
762 break;
763 default:
764 return false; /* bad argument */
765 }
766
767 assert(buffers);
768 assert(count);
769
770 /*
771 * We can check the _initialized instance variable before having ever set
772 * it to an initial value because I/O Kit guarantees that all our instance
773 * variables are zeroed on an object's allocation.
774 */
775
776 if (_initialized) {
777 /*
778 * An existing memory descriptor is being retargeted to point to
779 * somewhere else. Clean up our present state.
780 */
781 IOOptionBits type = _flags & kIOMemoryTypeMask;
782 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
783 {
784 while (_wireCount)
785 complete();
786 }
787 if (_ranges.v && _rangesIsAllocated)
788 {
789 if (kIOMemoryTypeUIO == type)
790 uio_free((uio_t) _ranges.v);
791 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
792 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
793 else
794 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
795 }
796
797 if (_memEntry)
798 { ipc_port_release_send((ipc_port_t) _memEntry); _memEntry = 0; }
799 if (_mappings)
800 _mappings->flushCollection();
801 }
802 else {
803 if (!super::init())
804 return false;
805 _initialized = true;
806 }
807
808 // Grab the appropriate mapper
809 if (mapper == kIOMapperNone)
810 mapper = 0; // No Mapper
811 else if (mapper == kIOMapperSystem) {
812 IOMapper::checkForSystemMapper();
813 gIOSystemMapper = mapper = IOMapper::gSystem;
814 }
815
816 // Remove the dynamic internal use flags from the initial setting
817 options &= ~(kIOMemoryPreparedReadOnly);
818 _flags = options;
819 _task = task;
820
821 // DEPRECATED variable initialisation
822 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
823
824 __iomd_reservedA = 0;
825 __iomd_reservedB = 0;
826 _highestPage = 0;
827
828 if (kIOMemoryThreadSafe & options)
829 {
830 if (!_prepareLock)
831 _prepareLock = IOLockAlloc();
832 }
833 else if (_prepareLock)
834 {
835 IOLockFree(_prepareLock);
836 _prepareLock = NULL;
837 }
838
839 if (kIOMemoryTypeUPL == type) {
840
841 ioGMDData *dataP;
842 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
843
844 if (!_memoryEntries) {
845 _memoryEntries = OSData::withCapacity(dataSize);
846 if (!_memoryEntries)
847 return false;
848 }
849 else if (!_memoryEntries->initWithCapacity(dataSize))
850 return false;
851
852 _memoryEntries->appendBytes(0, sizeof(ioGMDData));
853 dataP = getDataP(_memoryEntries);
854 dataP->fMapper = mapper;
855 dataP->fPageCnt = 0;
856
857 // _wireCount++; // UPLs start out life wired
858
859 _length = count;
860 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
861
862 ioPLBlock iopl;
863 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST((upl_t) buffers);
864
865 iopl.fIOPL = (upl_t) buffers;
866 // Set the flag kIOPLOnDevice convieniently equal to 1
867 iopl.fFlags = pageList->device | kIOPLExternUPL;
868 iopl.fIOMDOffset = 0;
869
870 _highestPage = upl_get_highest_page(iopl.fIOPL);
871
872 if (!pageList->device) {
873 // Pre-compute the offset into the UPL's page list
874 pageList = &pageList[atop_32(offset)];
875 offset &= PAGE_MASK;
876 if (mapper) {
877 iopl.fMappedBase = mapper->iovmAlloc(_pages);
878 mapper->iovmInsert(iopl.fMappedBase, 0, pageList, _pages);
879 }
880 else
881 iopl.fMappedBase = 0;
882 }
883 else
884 iopl.fMappedBase = 0;
885 iopl.fPageInfo = (vm_address_t) pageList;
886 iopl.fPageOffset = offset;
887
888 _memoryEntries->appendBytes(&iopl, sizeof(iopl));
889 }
890 else {
891 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
892 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
893
894 // Initialize the memory descriptor
895 if (options & kIOMemoryAsReference) {
896 _rangesIsAllocated = false;
897
898 // Hack assignment to get the buffer arg into _ranges.
899 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
900 // work, C++ sigh.
901 // This also initialises the uio & physical ranges.
902 _ranges.v = (IOVirtualRange *) buffers;
903 }
904 else {
905 _rangesIsAllocated = true;
906 switch (_flags & kIOMemoryTypeMask)
907 {
908 case kIOMemoryTypeUIO:
909 _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
910 break;
911
912 case kIOMemoryTypeVirtual64:
913 case kIOMemoryTypePhysical64:
914 _ranges.v64 = IONew(IOAddressRange, count);
915 if (!_ranges.v64)
916 return false;
917 bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
918 break;
919 case kIOMemoryTypeVirtual:
920 case kIOMemoryTypePhysical:
921 _ranges.v = IONew(IOVirtualRange, count);
922 if (!_ranges.v)
923 return false;
924 bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
925 break;
926 }
927 }
928
929 // Find starting address within the vector of ranges
930 Ranges vec = _ranges;
931 UInt32 length = 0;
932 UInt32 pages = 0;
933 for (unsigned ind = 0; ind < count; ind++) {
934 user_addr_t addr;
935 UInt32 len;
936
937 // addr & len are returned by this function
938 getAddrLenForInd(addr, len, type, vec, ind);
939 pages += (atop_64(addr + len + PAGE_MASK) - atop_64(addr));
940 len += length;
941 assert(len >= length); // Check for 32 bit wrap around
942 length = len;
943
944 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
945 {
946 ppnum_t highPage = atop_64(addr + len - 1);
947 if (highPage > _highestPage)
948 _highestPage = highPage;
949 }
950 }
951 _length = length;
952 _pages = pages;
953 _rangesCount = count;
954
955 // Auto-prepare memory at creation time.
956 // Implied completion when descriptor is free-ed
957 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
958 _wireCount++; // Physical MDs are, by definition, wired
959 else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
960 ioGMDData *dataP;
961 unsigned dataSize = computeDataSize(_pages, /* upls */ count * 2);
962
963 if (!_memoryEntries) {
964 _memoryEntries = OSData::withCapacity(dataSize);
965 if (!_memoryEntries)
966 return false;
967 }
968 else if (!_memoryEntries->initWithCapacity(dataSize))
969 return false;
970
971 _memoryEntries->appendBytes(0, sizeof(ioGMDData));
972 dataP = getDataP(_memoryEntries);
973 dataP->fMapper = mapper;
974 dataP->fPageCnt = _pages;
975
976 if ( (kIOMemoryPersistent & _flags) && !_memEntry)
977 _memEntry = createNamedEntry();
978
979 if ((_flags & kIOMemoryAutoPrepare)
980 && prepare() != kIOReturnSuccess)
981 return false;
982 }
983 }
984
985 return true;
986 }
987
988 /*
989 * free
990 *
991 * Free resources.
992 */
993 void IOGeneralMemoryDescriptor::free()
994 {
995 IOOptionBits type = _flags & kIOMemoryTypeMask;
996
997 if( reserved)
998 {
999 LOCK;
1000 reserved->memory = 0;
1001 UNLOCK;
1002 }
1003
1004 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
1005 {
1006 while (_wireCount)
1007 complete();
1008 }
1009 if (_memoryEntries)
1010 _memoryEntries->release();
1011
1012 if (_ranges.v && _rangesIsAllocated)
1013 {
1014 if (kIOMemoryTypeUIO == type)
1015 uio_free((uio_t) _ranges.v);
1016 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
1017 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
1018 else
1019 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
1020
1021 _ranges.v = NULL;
1022 }
1023
1024 if (reserved && reserved->devicePager)
1025 device_pager_deallocate( (memory_object_t) reserved->devicePager );
1026
1027 // memEntry holds a ref on the device pager which owns reserved
1028 // (ExpansionData) so no reserved access after this point
1029 if (_memEntry)
1030 ipc_port_release_send( (ipc_port_t) _memEntry );
1031
1032 if (_prepareLock)
1033 IOLockFree(_prepareLock);
1034
1035 super::free();
1036 }
1037
1038 /* DEPRECATED */ void IOGeneralMemoryDescriptor::unmapFromKernel()
1039 /* DEPRECATED */ {
1040 panic("IOGMD::unmapFromKernel deprecated");
1041 /* DEPRECATED */ }
1042 /* DEPRECATED */
1043 /* DEPRECATED */ void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
1044 /* DEPRECATED */ {
1045 panic("IOGMD::mapIntoKernel deprecated");
1046 /* DEPRECATED */ }
1047
1048 /*
1049 * getDirection:
1050 *
1051 * Get the direction of the transfer.
1052 */
1053 IODirection IOMemoryDescriptor::getDirection() const
1054 {
1055 return _direction;
1056 }
1057
1058 /*
1059 * getLength:
1060 *
1061 * Get the length of the transfer (over all ranges).
1062 */
1063 IOByteCount IOMemoryDescriptor::getLength() const
1064 {
1065 return _length;
1066 }
1067
1068 void IOMemoryDescriptor::setTag( IOOptionBits tag )
1069 {
1070 _tag = tag;
1071 }
1072
1073 IOOptionBits IOMemoryDescriptor::getTag( void )
1074 {
1075 return( _tag);
1076 }
1077
1078 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
1079 IOPhysicalAddress
1080 IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
1081 {
1082 addr64_t physAddr = 0;
1083
1084 if( prepare() == kIOReturnSuccess) {
1085 physAddr = getPhysicalSegment64( offset, length );
1086 complete();
1087 }
1088
1089 return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used
1090 }
1091
1092 IOByteCount IOMemoryDescriptor::readBytes
1093 (IOByteCount offset, void *bytes, IOByteCount length)
1094 {
1095 addr64_t dstAddr = (addr64_t) (UInt32) bytes;
1096 IOByteCount remaining;
1097
1098 // Assert that this entire I/O is withing the available range
1099 assert(offset < _length);
1100 assert(offset + length <= _length);
1101 if (offset >= _length) {
1102 return 0;
1103 }
1104
1105 remaining = length = min(length, _length - offset);
1106 while (remaining) { // (process another target segment?)
1107 addr64_t srcAddr64;
1108 IOByteCount srcLen;
1109
1110 srcAddr64 = getPhysicalSegment64(offset, &srcLen);
1111 if (!srcAddr64)
1112 break;
1113
1114 // Clip segment length to remaining
1115 if (srcLen > remaining)
1116 srcLen = remaining;
1117
1118 copypv(srcAddr64, dstAddr, srcLen,
1119 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
1120
1121 dstAddr += srcLen;
1122 offset += srcLen;
1123 remaining -= srcLen;
1124 }
1125
1126 assert(!remaining);
1127
1128 return length - remaining;
1129 }
1130
1131 IOByteCount IOMemoryDescriptor::writeBytes
1132 (IOByteCount offset, const void *bytes, IOByteCount length)
1133 {
1134 addr64_t srcAddr = (addr64_t) (UInt32) bytes;
1135 IOByteCount remaining;
1136
1137 // Assert that this entire I/O is withing the available range
1138 assert(offset < _length);
1139 assert(offset + length <= _length);
1140
1141 assert( !(kIOMemoryPreparedReadOnly & _flags) );
1142
1143 if ( (kIOMemoryPreparedReadOnly & _flags) || offset >= _length) {
1144 return 0;
1145 }
1146
1147 remaining = length = min(length, _length - offset);
1148 while (remaining) { // (process another target segment?)
1149 addr64_t dstAddr64;
1150 IOByteCount dstLen;
1151
1152 dstAddr64 = getPhysicalSegment64(offset, &dstLen);
1153 if (!dstAddr64)
1154 break;
1155
1156 // Clip segment length to remaining
1157 if (dstLen > remaining)
1158 dstLen = remaining;
1159
1160 copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
1161 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
1162
1163 srcAddr += dstLen;
1164 offset += dstLen;
1165 remaining -= dstLen;
1166 }
1167
1168 assert(!remaining);
1169
1170 return length - remaining;
1171 }
1172
1173 // osfmk/device/iokit_rpc.c
1174 extern "C" unsigned int IODefaultCacheBits(addr64_t pa);
1175
1176 /* DEPRECATED */ void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
1177 /* DEPRECATED */ {
1178 panic("IOGMD::setPosition deprecated");
1179 /* DEPRECATED */ }
1180
1181 IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
1182 {
1183 if (kIOMDGetCharacteristics == op) {
1184
1185 if (dataSize < sizeof(IOMDDMACharacteristics))
1186 return kIOReturnUnderrun;
1187
1188 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
1189 data->fLength = _length;
1190 data->fSGCount = _rangesCount;
1191 data->fPages = _pages;
1192 data->fDirection = _direction;
1193 if (!_wireCount)
1194 data->fIsPrepared = false;
1195 else {
1196 data->fIsPrepared = true;
1197 data->fHighestPage = _highestPage;
1198 if (_memoryEntries) {
1199 ioGMDData *gmdData = getDataP(_memoryEntries);
1200 ioPLBlock *ioplList = getIOPLList(gmdData);
1201 UInt count = getNumIOPL(_memoryEntries, gmdData);
1202
1203 data->fIsMapped = (gmdData->fMapper && _pages && (count > 0)
1204 && ioplList[0].fMappedBase);
1205 if (count == 1)
1206 data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
1207 }
1208 else
1209 data->fIsMapped = false;
1210 }
1211
1212 return kIOReturnSuccess;
1213 }
1214 else if (!(kIOMDWalkSegments & op))
1215 return kIOReturnBadArgument;
1216
1217 // Get the next segment
1218 struct InternalState {
1219 IOMDDMAWalkSegmentArgs fIO;
1220 UInt fOffset2Index;
1221 UInt fIndex;
1222 UInt fNextOffset;
1223 } *isP;
1224
1225 // Find the next segment
1226 if (dataSize < sizeof(*isP))
1227 return kIOReturnUnderrun;
1228
1229 isP = (InternalState *) vData;
1230 UInt offset = isP->fIO.fOffset;
1231 bool mapped = isP->fIO.fMapped;
1232
1233 if (offset >= _length)
1234 return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
1235
1236 // Validate the previous offset
1237 UInt ind, off2Ind = isP->fOffset2Index;
1238 if ((kIOMDFirstSegment != op)
1239 && offset
1240 && (offset == isP->fNextOffset || off2Ind <= offset))
1241 ind = isP->fIndex;
1242 else
1243 ind = off2Ind = 0; // Start from beginning
1244
1245 UInt length;
1246 UInt64 address;
1247 if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
1248
1249 // Physical address based memory descriptor
1250 const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
1251
1252 // Find the range after the one that contains the offset
1253 UInt len;
1254 for (len = 0; off2Ind <= offset; ind++) {
1255 len = physP[ind].length;
1256 off2Ind += len;
1257 }
1258
1259 // Calculate length within range and starting address
1260 length = off2Ind - offset;
1261 address = physP[ind - 1].address + len - length;
1262
1263 // see how far we can coalesce ranges
1264 while (ind < _rangesCount && address + length == physP[ind].address) {
1265 len = physP[ind].length;
1266 length += len;
1267 off2Ind += len;
1268 ind++;
1269 }
1270
1271 // correct contiguous check overshoot
1272 ind--;
1273 off2Ind -= len;
1274 }
1275 else if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
1276
1277 // Physical address based memory descriptor
1278 const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
1279
1280 // Find the range after the one that contains the offset
1281 mach_vm_size_t len;
1282 for (len = 0; off2Ind <= offset; ind++) {
1283 len = physP[ind].length;
1284 off2Ind += len;
1285 }
1286
1287 // Calculate length within range and starting address
1288 length = off2Ind - offset;
1289 address = physP[ind - 1].address + len - length;
1290
1291 // see how far we can coalesce ranges
1292 while (ind < _rangesCount && address + length == physP[ind].address) {
1293 len = physP[ind].length;
1294 length += len;
1295 off2Ind += len;
1296 ind++;
1297 }
1298
1299 // correct contiguous check overshoot
1300 ind--;
1301 off2Ind -= len;
1302 }
1303 else do {
1304 if (!_wireCount)
1305 panic("IOGMD: not wired for the IODMACommand");
1306
1307 assert(_memoryEntries);
1308
1309 ioGMDData * dataP = getDataP(_memoryEntries);
1310 const ioPLBlock *ioplList = getIOPLList(dataP);
1311 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1312 upl_page_info_t *pageList = getPageList(dataP);
1313
1314 assert(numIOPLs > 0);
1315
1316 // Scan through iopl info blocks looking for block containing offset
1317 while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset)
1318 ind++;
1319
1320 // Go back to actual range as search goes past it
1321 ioPLBlock ioplInfo = ioplList[ind - 1];
1322 off2Ind = ioplInfo.fIOMDOffset;
1323
1324 if (ind < numIOPLs)
1325 length = ioplList[ind].fIOMDOffset;
1326 else
1327 length = _length;
1328 length -= offset; // Remainder within iopl
1329
1330 // Subtract offset till this iopl in total list
1331 offset -= off2Ind;
1332
1333 // If a mapped address is requested and this is a pre-mapped IOPL
1334 // then just need to compute an offset relative to the mapped base.
1335 if (mapped && ioplInfo.fMappedBase) {
1336 offset += (ioplInfo.fPageOffset & PAGE_MASK);
1337 address = ptoa_64(ioplInfo.fMappedBase) + offset;
1338 continue; // Done leave do/while(false) now
1339 }
1340
1341 // The offset is rebased into the current iopl.
1342 // Now add the iopl 1st page offset.
1343 offset += ioplInfo.fPageOffset;
1344
1345 // For external UPLs the fPageInfo field points directly to
1346 // the upl's upl_page_info_t array.
1347 if (ioplInfo.fFlags & kIOPLExternUPL)
1348 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
1349 else
1350 pageList = &pageList[ioplInfo.fPageInfo];
1351
1352 // Check for direct device non-paged memory
1353 if ( ioplInfo.fFlags & kIOPLOnDevice ) {
1354 address = ptoa_64(pageList->phys_addr) + offset;
1355 continue; // Done leave do/while(false) now
1356 }
1357
1358 // Now we need compute the index into the pageList
1359 UInt pageInd = atop_32(offset);
1360 offset &= PAGE_MASK;
1361
1362 // Compute the starting address of this segment
1363 IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
1364 address = ptoa_64(pageAddr) + offset;
1365
1366 // length is currently set to the length of the remainider of the iopl.
1367 // We need to check that the remainder of the iopl is contiguous.
1368 // This is indicated by pageList[ind].phys_addr being sequential.
1369 IOByteCount contigLength = PAGE_SIZE - offset;
1370 while (contigLength < length
1371 && ++pageAddr == pageList[++pageInd].phys_addr)
1372 {
1373 contigLength += PAGE_SIZE;
1374 }
1375
1376 if (contigLength < length)
1377 length = contigLength;
1378
1379
1380 assert(address);
1381 assert(length);
1382
1383 } while (false);
1384
1385 // Update return values and state
1386 isP->fIO.fIOVMAddr = address;
1387 isP->fIO.fLength = length;
1388 isP->fIndex = ind;
1389 isP->fOffset2Index = off2Ind;
1390 isP->fNextOffset = isP->fIO.fOffset + length;
1391
1392 return kIOReturnSuccess;
1393 }
1394
1395 addr64_t
1396 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
1397 {
1398 IOReturn ret;
1399 IOByteCount length = 0;
1400 addr64_t address = 0;
1401
1402 if (gIOSystemMapper && (kIOMemoryTypePhysical == (_flags & kIOMemoryTypeMask)))
1403 return (super::getPhysicalSegment64(offset, lengthOfSegment));
1404
1405 if (offset < _length) // (within bounds?)
1406 {
1407 IOMDDMAWalkSegmentState _state;
1408 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) &_state;
1409
1410 state->fOffset = offset;
1411 state->fLength = _length - offset;
1412 state->fMapped = false;
1413
1414 ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
1415
1416 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret))
1417 DEBG("getPhysicalSegment64 dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
1418 ret, this, state->fOffset,
1419 state->fIOVMAddr, state->fLength);
1420 if (kIOReturnSuccess == ret)
1421 {
1422 address = state->fIOVMAddr;
1423 length = state->fLength;
1424 }
1425 if (!address)
1426 length = 0;
1427 }
1428
1429 if (lengthOfSegment)
1430 *lengthOfSegment = length;
1431
1432 return (address);
1433 }
1434
1435 IOPhysicalAddress
1436 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1437 {
1438 IOReturn ret;
1439 IOByteCount length = 0;
1440 addr64_t address = 0;
1441
1442 // assert(offset <= _length);
1443
1444 if (offset < _length) // (within bounds?)
1445 {
1446 IOMDDMAWalkSegmentState _state;
1447 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) &_state;
1448
1449 state->fOffset = offset;
1450 state->fLength = _length - offset;
1451 state->fMapped = true;
1452
1453 ret = dmaCommandOperation(
1454 kIOMDFirstSegment, _state, sizeof(_state));
1455
1456 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret))
1457 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
1458 ret, this, state->fOffset,
1459 state->fIOVMAddr, state->fLength);
1460 if (kIOReturnSuccess == ret)
1461 {
1462 address = state->fIOVMAddr;
1463 length = state->fLength;
1464 }
1465
1466 if (!address)
1467 length = 0;
1468 }
1469
1470 if ((address + length) > 0x100000000ULL)
1471 {
1472 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
1473 address, length, (getMetaClass())->getClassName());
1474 }
1475
1476 if (lengthOfSegment)
1477 *lengthOfSegment = length;
1478
1479 return ((IOPhysicalAddress) address);
1480 }
1481
1482 addr64_t
1483 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
1484 {
1485 IOPhysicalAddress phys32;
1486 IOByteCount length;
1487 addr64_t phys64;
1488 IOMapper * mapper = 0;
1489
1490 phys32 = getPhysicalSegment(offset, lengthOfSegment);
1491 if (!phys32)
1492 return 0;
1493
1494 if (gIOSystemMapper)
1495 mapper = gIOSystemMapper;
1496
1497 if (mapper)
1498 {
1499 IOByteCount origLen;
1500
1501 phys64 = mapper->mapAddr(phys32);
1502 origLen = *lengthOfSegment;
1503 length = page_size - (phys64 & (page_size - 1));
1504 while ((length < origLen)
1505 && ((phys64 + length) == mapper->mapAddr(phys32 + length)))
1506 length += page_size;
1507 if (length > origLen)
1508 length = origLen;
1509
1510 *lengthOfSegment = length;
1511 }
1512 else
1513 phys64 = (addr64_t) phys32;
1514
1515 return phys64;
1516 }
1517
1518 IOPhysicalAddress
1519 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1520 {
1521 IOPhysicalAddress address = 0;
1522 IOPhysicalLength length = 0;
1523 IOOptionBits type = _flags & kIOMemoryTypeMask;
1524
1525 assert(offset <= _length);
1526
1527 if ( type == kIOMemoryTypeUPL)
1528 return super::getSourceSegment( offset, lengthOfSegment );
1529 else if ( offset < _length ) // (within bounds?)
1530 {
1531 unsigned rangesIndex = 0;
1532 Ranges vec = _ranges;
1533 user_addr_t addr;
1534
1535 // Find starting address within the vector of ranges
1536 for (;;) {
1537 getAddrLenForInd(addr, length, type, vec, rangesIndex);
1538 if (offset < length)
1539 break;
1540 offset -= length; // (make offset relative)
1541 rangesIndex++;
1542 }
1543
1544 // Now that we have the starting range,
1545 // lets find the last contiguous range
1546 addr += offset;
1547 length -= offset;
1548
1549 for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) {
1550 user_addr_t newAddr;
1551 IOPhysicalLength newLen;
1552
1553 getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
1554 if (addr + length != newAddr)
1555 break;
1556 length += newLen;
1557 }
1558 if (addr)
1559 address = (IOPhysicalAddress) addr; // Truncate address to 32bit
1560 else
1561 length = 0;
1562 }
1563
1564 if ( lengthOfSegment ) *lengthOfSegment = length;
1565
1566 return address;
1567 }
1568
1569 /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
1570 /* DEPRECATED */ void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
1571 /* DEPRECATED */ IOByteCount * lengthOfSegment)
1572 /* DEPRECATED */ {
1573 if (_task == kernel_task)
1574 return (void *) getSourceSegment(offset, lengthOfSegment);
1575 else
1576 panic("IOGMD::getVirtualSegment deprecated");
1577
1578 return 0;
1579 /* DEPRECATED */ }
1580 /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
1581
1582
1583
1584 IOReturn
1585 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
1586 {
1587 if (kIOMDGetCharacteristics == op) {
1588 if (dataSize < sizeof(IOMDDMACharacteristics))
1589 return kIOReturnUnderrun;
1590
1591 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
1592 data->fLength = getLength();
1593 data->fSGCount = 0;
1594 data->fDirection = _direction;
1595 if (IOMapper::gSystem)
1596 data->fIsMapped = true;
1597 data->fIsPrepared = true; // Assume prepared - fails safe
1598 }
1599 else if (kIOMDWalkSegments & op) {
1600 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs))
1601 return kIOReturnUnderrun;
1602
1603 IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
1604 IOByteCount offset = (IOByteCount) data->fOffset;
1605
1606 IOPhysicalLength length;
1607 IOMemoryDescriptor *ncmd = const_cast<IOMemoryDescriptor *>(this);
1608 if (data->fMapped && IOMapper::gSystem)
1609 data->fIOVMAddr = ncmd->getPhysicalSegment(offset, &length);
1610 else
1611 data->fIOVMAddr = ncmd->getPhysicalSegment64(offset, &length);
1612 data->fLength = length;
1613 }
1614 else
1615 return kIOReturnBadArgument;
1616
1617 return kIOReturnSuccess;
1618 }
1619
1620 IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
1621 IOOptionBits * oldState )
1622 {
1623 IOReturn err = kIOReturnSuccess;
1624 vm_purgable_t control;
1625 int state;
1626
1627 do
1628 {
1629 if (!_memEntry)
1630 {
1631 err = kIOReturnNotReady;
1632 break;
1633 }
1634
1635 control = VM_PURGABLE_SET_STATE;
1636 switch (newState)
1637 {
1638 case kIOMemoryPurgeableKeepCurrent:
1639 control = VM_PURGABLE_GET_STATE;
1640 break;
1641
1642 case kIOMemoryPurgeableNonVolatile:
1643 state = VM_PURGABLE_NONVOLATILE;
1644 break;
1645 case kIOMemoryPurgeableVolatile:
1646 state = VM_PURGABLE_VOLATILE;
1647 break;
1648 case kIOMemoryPurgeableEmpty:
1649 state = VM_PURGABLE_EMPTY;
1650 break;
1651 default:
1652 err = kIOReturnBadArgument;
1653 break;
1654 }
1655
1656 if (kIOReturnSuccess != err)
1657 break;
1658
1659 err = mach_memory_entry_purgable_control((ipc_port_t) _memEntry, control, &state);
1660
1661 if (oldState)
1662 {
1663 if (kIOReturnSuccess == err)
1664 {
1665 switch (state)
1666 {
1667 case VM_PURGABLE_NONVOLATILE:
1668 state = kIOMemoryPurgeableNonVolatile;
1669 break;
1670 case VM_PURGABLE_VOLATILE:
1671 state = kIOMemoryPurgeableVolatile;
1672 break;
1673 case VM_PURGABLE_EMPTY:
1674 state = kIOMemoryPurgeableEmpty;
1675 break;
1676 default:
1677 state = kIOMemoryPurgeableNonVolatile;
1678 err = kIOReturnNotReady;
1679 break;
1680 }
1681 *oldState = state;
1682 }
1683 }
1684 }
1685 while (false);
1686
1687 return (err);
1688 }
1689
1690 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
1691 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
1692
1693 IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options,
1694 IOByteCount offset, IOByteCount length )
1695 {
1696 IOByteCount remaining;
1697 void (*func)(addr64_t pa, unsigned int count) = 0;
1698
1699 switch (options)
1700 {
1701 case kIOMemoryIncoherentIOFlush:
1702 func = &dcache_incoherent_io_flush64;
1703 break;
1704 case kIOMemoryIncoherentIOStore:
1705 func = &dcache_incoherent_io_store64;
1706 break;
1707 }
1708
1709 if (!func)
1710 return (kIOReturnUnsupported);
1711
1712 remaining = length = min(length, getLength() - offset);
1713 while (remaining)
1714 // (process another target segment?)
1715 {
1716 addr64_t dstAddr64;
1717 IOByteCount dstLen;
1718
1719 dstAddr64 = getPhysicalSegment64(offset, &dstLen);
1720 if (!dstAddr64)
1721 break;
1722
1723 // Clip segment length to remaining
1724 if (dstLen > remaining)
1725 dstLen = remaining;
1726
1727 (*func)(dstAddr64, dstLen);
1728
1729 offset += dstLen;
1730 remaining -= dstLen;
1731 }
1732
1733 return (remaining ? kIOReturnUnderrun : kIOReturnSuccess);
1734 }
1735
1736 #if defined(__ppc__) || defined(__arm__)
1737 extern vm_offset_t static_memory_end;
1738 #define io_kernel_static_end static_memory_end
1739 #else
1740 extern vm_offset_t first_avail;
1741 #define io_kernel_static_end first_avail
1742 #endif
1743
1744 static kern_return_t
1745 io_get_kernel_static_upl(
1746 vm_map_t /* map */,
1747 vm_address_t offset,
1748 vm_size_t *upl_size,
1749 upl_t *upl,
1750 upl_page_info_array_t page_list,
1751 unsigned int *count,
1752 ppnum_t *highest_page)
1753 {
1754 unsigned int pageCount, page;
1755 ppnum_t phys;
1756 ppnum_t highestPage = 0;
1757
1758 pageCount = atop_32(*upl_size);
1759 if (pageCount > *count)
1760 pageCount = *count;
1761
1762 *upl = NULL;
1763
1764 for (page = 0; page < pageCount; page++)
1765 {
1766 phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
1767 if (!phys)
1768 break;
1769 page_list[page].phys_addr = phys;
1770 page_list[page].pageout = 0;
1771 page_list[page].absent = 0;
1772 page_list[page].dirty = 0;
1773 page_list[page].precious = 0;
1774 page_list[page].device = 0;
1775 if (phys > highestPage)
1776 highestPage = page;
1777 }
1778
1779 *highest_page = highestPage;
1780
1781 return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError);
1782 }
1783
1784 IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
1785 {
1786 IOOptionBits type = _flags & kIOMemoryTypeMask;
1787 IOReturn error = kIOReturnCannotWire;
1788 ioGMDData *dataP;
1789 ppnum_t mapBase = 0;
1790 IOMapper *mapper;
1791 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
1792
1793 assert(!_wireCount);
1794 assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
1795
1796 if (_pages >= gIOMaximumMappedIOPageCount)
1797 return kIOReturnNoResources;
1798
1799 dataP = getDataP(_memoryEntries);
1800 mapper = dataP->fMapper;
1801 if (mapper && _pages)
1802 mapBase = mapper->iovmAlloc(_pages);
1803
1804 // Note that appendBytes(NULL) zeros the data up to the
1805 // desired length.
1806 _memoryEntries->appendBytes(0, dataP->fPageCnt * sizeof(upl_page_info_t));
1807 dataP = 0; // May no longer be valid so lets not get tempted.
1808
1809 if (forDirection == kIODirectionNone)
1810 forDirection = _direction;
1811
1812 int uplFlags; // This Mem Desc's default flags for upl creation
1813 switch (kIODirectionOutIn & forDirection)
1814 {
1815 case kIODirectionOut:
1816 // Pages do not need to be marked as dirty on commit
1817 uplFlags = UPL_COPYOUT_FROM;
1818 _flags |= kIOMemoryPreparedReadOnly;
1819 break;
1820
1821 case kIODirectionIn:
1822 default:
1823 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
1824 break;
1825 }
1826 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
1827
1828 #ifdef UPL_NEED_32BIT_ADDR
1829 if (kIODirectionPrepareToPhys32 & forDirection)
1830 uplFlags |= UPL_NEED_32BIT_ADDR;
1831 #endif
1832
1833 // Find the appropriate vm_map for the given task
1834 vm_map_t curMap;
1835 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
1836 curMap = 0;
1837 else
1838 { curMap = get_task_map(_task); }
1839
1840 // Iterate over the vector of virtual ranges
1841 Ranges vec = _ranges;
1842 unsigned int pageIndex = 0;
1843 IOByteCount mdOffset = 0;
1844 ppnum_t highestPage = 0;
1845 for (UInt range = 0; range < _rangesCount; range++) {
1846 ioPLBlock iopl;
1847 user_addr_t startPage;
1848 IOByteCount numBytes;
1849 ppnum_t highPage = 0;
1850
1851 // Get the startPage address and length of vec[range]
1852 getAddrLenForInd(startPage, numBytes, type, vec, range);
1853 iopl.fPageOffset = (short) startPage & PAGE_MASK;
1854 numBytes += iopl.fPageOffset;
1855 startPage = trunc_page_64(startPage);
1856
1857 if (mapper)
1858 iopl.fMappedBase = mapBase + pageIndex;
1859 else
1860 iopl.fMappedBase = 0;
1861
1862 // Iterate over the current range, creating UPLs
1863 while (numBytes) {
1864 dataP = getDataP(_memoryEntries);
1865 vm_address_t kernelStart = (vm_address_t) startPage;
1866 vm_map_t theMap;
1867 if (curMap)
1868 theMap = curMap;
1869 else if (!sharedMem) {
1870 assert(_task == kernel_task);
1871 theMap = IOPageableMapForAddress(kernelStart);
1872 }
1873 else
1874 theMap = NULL;
1875
1876 upl_page_info_array_t pageInfo = getPageList(dataP);
1877 int ioplFlags = uplFlags;
1878 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
1879
1880 vm_size_t ioplSize = round_page_32(numBytes);
1881 unsigned int numPageInfo = atop_32(ioplSize);
1882
1883 if (theMap == kernel_map && kernelStart < io_kernel_static_end) {
1884 error = io_get_kernel_static_upl(theMap,
1885 kernelStart,
1886 &ioplSize,
1887 &iopl.fIOPL,
1888 baseInfo,
1889 &numPageInfo,
1890 &highPage);
1891 }
1892 else if (sharedMem) {
1893 error = memory_object_iopl_request(sharedMem,
1894 ptoa_32(pageIndex),
1895 &ioplSize,
1896 &iopl.fIOPL,
1897 baseInfo,
1898 &numPageInfo,
1899 &ioplFlags);
1900 }
1901 else {
1902 assert(theMap);
1903 error = vm_map_create_upl(theMap,
1904 startPage,
1905 &ioplSize,
1906 &iopl.fIOPL,
1907 baseInfo,
1908 &numPageInfo,
1909 &ioplFlags);
1910 }
1911
1912 assert(ioplSize);
1913 if (error != KERN_SUCCESS)
1914 goto abortExit;
1915
1916 if (iopl.fIOPL)
1917 highPage = upl_get_highest_page(iopl.fIOPL);
1918 if (highPage > highestPage)
1919 highestPage = highPage;
1920
1921 error = kIOReturnCannotWire;
1922
1923 if (baseInfo->device) {
1924 numPageInfo = 1;
1925 iopl.fFlags = kIOPLOnDevice;
1926 // Don't translate device memory at all
1927 if (mapper && mapBase) {
1928 mapper->iovmFree(mapBase, _pages);
1929 mapBase = 0;
1930 iopl.fMappedBase = 0;
1931 }
1932 }
1933 else {
1934 iopl.fFlags = 0;
1935 if (mapper)
1936 mapper->iovmInsert(mapBase, pageIndex,
1937 baseInfo, numPageInfo);
1938 }
1939
1940 iopl.fIOMDOffset = mdOffset;
1941 iopl.fPageInfo = pageIndex;
1942
1943 if ((_flags & kIOMemoryAutoPrepare) && iopl.fIOPL)
1944 {
1945 upl_commit(iopl.fIOPL, 0, 0);
1946 upl_deallocate(iopl.fIOPL);
1947 iopl.fIOPL = 0;
1948 }
1949
1950 if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
1951 // Clean up partial created and unsaved iopl
1952 if (iopl.fIOPL) {
1953 upl_abort(iopl.fIOPL, 0);
1954 upl_deallocate(iopl.fIOPL);
1955 }
1956 goto abortExit;
1957 }
1958
1959 // Check for a multiple iopl's in one virtual range
1960 pageIndex += numPageInfo;
1961 mdOffset -= iopl.fPageOffset;
1962 if (ioplSize < numBytes) {
1963 numBytes -= ioplSize;
1964 startPage += ioplSize;
1965 mdOffset += ioplSize;
1966 iopl.fPageOffset = 0;
1967 if (mapper)
1968 iopl.fMappedBase = mapBase + pageIndex;
1969 }
1970 else {
1971 mdOffset += numBytes;
1972 break;
1973 }
1974 }
1975 }
1976
1977 _highestPage = highestPage;
1978
1979 return kIOReturnSuccess;
1980
1981 abortExit:
1982 {
1983 dataP = getDataP(_memoryEntries);
1984 UInt done = getNumIOPL(_memoryEntries, dataP);
1985 ioPLBlock *ioplList = getIOPLList(dataP);
1986
1987 for (UInt range = 0; range < done; range++)
1988 {
1989 if (ioplList[range].fIOPL) {
1990 upl_abort(ioplList[range].fIOPL, 0);
1991 upl_deallocate(ioplList[range].fIOPL);
1992 }
1993 }
1994 (void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); // == setLength()
1995
1996 if (mapper && mapBase)
1997 mapper->iovmFree(mapBase, _pages);
1998 }
1999
2000 if (error == KERN_FAILURE)
2001 error = kIOReturnCannotWire;
2002
2003 return error;
2004 }
2005
2006 /*
2007 * prepare
2008 *
2009 * Prepare the memory for an I/O transfer. This involves paging in
2010 * the memory, if necessary, and wiring it down for the duration of
2011 * the transfer. The complete() method completes the processing of
2012 * the memory after the I/O transfer finishes. This method needn't
2013 * called for non-pageable memory.
2014 */
2015 IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
2016 {
2017 IOReturn error = kIOReturnSuccess;
2018 IOOptionBits type = _flags & kIOMemoryTypeMask;
2019
2020 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
2021 return kIOReturnSuccess;
2022
2023 if (_prepareLock)
2024 IOLockLock(_prepareLock);
2025
2026 if (!_wireCount
2027 && (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) ) {
2028 error = wireVirtual(forDirection);
2029 }
2030
2031 if (kIOReturnSuccess == error)
2032 _wireCount++;
2033
2034 if (_prepareLock)
2035 IOLockUnlock(_prepareLock);
2036
2037 return error;
2038 }
2039
2040 /*
2041 * complete
2042 *
2043 * Complete processing of the memory after an I/O transfer finishes.
2044 * This method should not be called unless a prepare was previously
2045 * issued; the prepare() and complete() must occur in pairs, before
2046 * before and after an I/O transfer involving pageable memory.
2047 */
2048
2049 IOReturn IOGeneralMemoryDescriptor::complete(IODirection /* forDirection */)
2050 {
2051 IOOptionBits type = _flags & kIOMemoryTypeMask;
2052
2053 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
2054 return kIOReturnSuccess;
2055
2056 if (_prepareLock)
2057 IOLockLock(_prepareLock);
2058
2059 assert(_wireCount);
2060
2061 if (_wireCount)
2062 {
2063 _wireCount--;
2064 if (!_wireCount)
2065 {
2066 IOOptionBits type = _flags & kIOMemoryTypeMask;
2067 ioGMDData * dataP = getDataP(_memoryEntries);
2068 ioPLBlock *ioplList = getIOPLList(dataP);
2069 UInt count = getNumIOPL(_memoryEntries, dataP);
2070
2071 if (dataP->fMapper && _pages && ioplList[0].fMappedBase)
2072 dataP->fMapper->iovmFree(ioplList[0].fMappedBase, _pages);
2073
2074 // Only complete iopls that we created which are for TypeVirtual
2075 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
2076 for (UInt ind = 0; ind < count; ind++)
2077 if (ioplList[ind].fIOPL) {
2078 upl_commit(ioplList[ind].fIOPL, 0, 0);
2079 upl_deallocate(ioplList[ind].fIOPL);
2080 }
2081 }
2082 (void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); // == setLength()
2083 }
2084 }
2085
2086 if (_prepareLock)
2087 IOLockUnlock(_prepareLock);
2088
2089 return kIOReturnSuccess;
2090 }
2091
2092 IOReturn IOGeneralMemoryDescriptor::doMap(
2093 vm_map_t __addressMap,
2094 IOVirtualAddress * __address,
2095 IOOptionBits options,
2096 IOByteCount __offset,
2097 IOByteCount __length )
2098
2099 {
2100 if (!(kIOMap64Bit & options)) panic("IOGeneralMemoryDescriptor::doMap !64bit");
2101
2102 _IOMemoryMap * mapping = (_IOMemoryMap *) *__address;
2103 mach_vm_size_t offset = mapping->fOffset + __offset;
2104 mach_vm_size_t length = mapping->fLength;
2105
2106 kern_return_t kr;
2107 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
2108
2109 IOOptionBits type = _flags & kIOMemoryTypeMask;
2110 Ranges vec = _ranges;
2111
2112 user_addr_t range0Addr = 0;
2113 IOByteCount range0Len = 0;
2114
2115 if (vec.v)
2116 getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
2117
2118 // mapping source == dest? (could be much better)
2119 if( _task
2120 && (mapping->fAddressMap == get_task_map(_task)) && (options & kIOMapAnywhere)
2121 && (1 == _rangesCount) && (0 == offset)
2122 && range0Addr && (length <= range0Len) )
2123 {
2124 mapping->fAddress = range0Addr;
2125 mapping->fOptions |= kIOMapStatic;
2126
2127 return( kIOReturnSuccess );
2128 }
2129
2130 if( 0 == sharedMem) {
2131
2132 vm_size_t size = ptoa_32(_pages);
2133
2134 if( _task) {
2135
2136 memory_object_size_t actualSize = size;
2137 vm_prot_t prot = VM_PROT_READ;
2138 if (!(kIOMapReadOnly & options))
2139 prot |= VM_PROT_WRITE;
2140 else if (kIOMapDefaultCache != (options & kIOMapCacheMask))
2141 prot |= VM_PROT_WRITE;
2142
2143 kr = mach_make_memory_entry_64(get_task_map(_task),
2144 &actualSize, range0Addr,
2145 prot, &sharedMem,
2146 NULL );
2147
2148 if( (KERN_SUCCESS == kr) && (actualSize != round_page_32(size))) {
2149 #if IOASSERT
2150 IOLog("mach_make_memory_entry_64 (%08llx) size (%08lx:%08x)\n",
2151 range0Addr, (UInt32) actualSize, size);
2152 #endif
2153 kr = kIOReturnVMError;
2154 ipc_port_release_send( sharedMem );
2155 }
2156
2157 if( KERN_SUCCESS != kr)
2158 sharedMem = MACH_PORT_NULL;
2159
2160 } else do { // _task == 0, must be physical
2161
2162 memory_object_t pager;
2163 unsigned int flags = 0;
2164 addr64_t pa;
2165 IOPhysicalLength segLen;
2166
2167 pa = getPhysicalSegment64( offset, &segLen );
2168
2169 if( !reserved) {
2170 reserved = IONew( ExpansionData, 1 );
2171 if( !reserved)
2172 continue;
2173 }
2174 reserved->pagerContig = (1 == _rangesCount);
2175 reserved->memory = this;
2176
2177 /*What cache mode do we need*/
2178 switch(options & kIOMapCacheMask ) {
2179
2180 case kIOMapDefaultCache:
2181 default:
2182 flags = IODefaultCacheBits(pa);
2183 if (DEVICE_PAGER_CACHE_INHIB & flags)
2184 {
2185 if (DEVICE_PAGER_GUARDED & flags)
2186 mapping->fOptions |= kIOMapInhibitCache;
2187 else
2188 mapping->fOptions |= kIOMapWriteCombineCache;
2189 }
2190 else if (DEVICE_PAGER_WRITE_THROUGH & flags)
2191 mapping->fOptions |= kIOMapWriteThruCache;
2192 else
2193 mapping->fOptions |= kIOMapCopybackCache;
2194 break;
2195
2196 case kIOMapInhibitCache:
2197 flags = DEVICE_PAGER_CACHE_INHIB |
2198 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
2199 break;
2200
2201 case kIOMapWriteThruCache:
2202 flags = DEVICE_PAGER_WRITE_THROUGH |
2203 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
2204 break;
2205
2206 case kIOMapCopybackCache:
2207 flags = DEVICE_PAGER_COHERENT;
2208 break;
2209
2210 case kIOMapWriteCombineCache:
2211 flags = DEVICE_PAGER_CACHE_INHIB |
2212 DEVICE_PAGER_COHERENT;
2213 break;
2214 }
2215
2216 flags |= reserved->pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0;
2217
2218 pager = device_pager_setup( (memory_object_t) 0, (int) reserved,
2219 size, flags);
2220 assert( pager );
2221
2222 if( pager) {
2223 kr = mach_memory_object_memory_entry_64( (host_t) 1, false /*internal*/,
2224 size, VM_PROT_READ | VM_PROT_WRITE, pager, &sharedMem );
2225
2226 assert( KERN_SUCCESS == kr );
2227 if( KERN_SUCCESS != kr)
2228 {
2229 device_pager_deallocate( pager );
2230 pager = MACH_PORT_NULL;
2231 sharedMem = MACH_PORT_NULL;
2232 }
2233 }
2234 if( pager && sharedMem)
2235 reserved->devicePager = pager;
2236 else {
2237 IODelete( reserved, ExpansionData, 1 );
2238 reserved = 0;
2239 }
2240
2241 } while( false );
2242
2243 _memEntry = (void *) sharedMem;
2244 }
2245
2246 IOReturn result;
2247 if (0 == sharedMem)
2248 result = kIOReturnVMError;
2249 else
2250 result = super::doMap( __addressMap, __address,
2251 options, __offset, __length );
2252
2253 return( result );
2254 }
2255
2256 IOReturn IOGeneralMemoryDescriptor::doUnmap(
2257 vm_map_t addressMap,
2258 IOVirtualAddress __address,
2259 IOByteCount __length )
2260 {
2261 return (super::doUnmap(addressMap, __address, __length));
2262 }
2263
2264 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2265
2266 OSDefineMetaClassAndAbstractStructors( IOMemoryMap, OSObject )
2267
2268 /* inline function implementation */
2269 IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
2270 { return( getPhysicalSegment( 0, 0 )); }
2271
2272
2273 #undef super
2274 #define super IOMemoryMap
2275
2276 OSDefineMetaClassAndStructors(_IOMemoryMap, IOMemoryMap)
2277
2278 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2279
2280 bool _IOMemoryMap::init(
2281 task_t intoTask,
2282 mach_vm_address_t toAddress,
2283 IOOptionBits _options,
2284 mach_vm_size_t _offset,
2285 mach_vm_size_t _length )
2286 {
2287 if (!intoTask)
2288 return( false);
2289
2290 if (!super::init())
2291 return(false);
2292
2293 fAddressMap = get_task_map(intoTask);
2294 if (!fAddressMap)
2295 return(false);
2296 vm_map_reference(fAddressMap);
2297
2298 fAddressTask = intoTask;
2299 fOptions = _options;
2300 fLength = _length;
2301 fOffset = _offset;
2302 fAddress = toAddress;
2303
2304 return (true);
2305 }
2306
2307 bool _IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
2308 {
2309 if (!_memory)
2310 return(false);
2311
2312 if (!fSuperMap)
2313 {
2314 if( (_offset + fLength) > _memory->getLength())
2315 return( false);
2316 fOffset = _offset;
2317 }
2318
2319 _memory->retain();
2320 if (fMemory)
2321 {
2322 if (fMemory != _memory)
2323 fMemory->removeMapping(this);
2324 fMemory->release();
2325 }
2326 fMemory = _memory;
2327
2328 return( true );
2329 }
2330
2331 struct IOMemoryDescriptorMapAllocRef
2332 {
2333 ipc_port_t sharedMem;
2334 mach_vm_address_t mapped;
2335 mach_vm_size_t size;
2336 mach_vm_size_t sourceOffset;
2337 IOOptionBits options;
2338 };
2339
2340 static kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
2341 {
2342 IOMemoryDescriptorMapAllocRef * ref = (IOMemoryDescriptorMapAllocRef *)_ref;
2343 IOReturn err;
2344
2345 do {
2346 if( ref->sharedMem)
2347 {
2348 vm_prot_t prot = VM_PROT_READ
2349 | ((ref->options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE);
2350
2351 // VM system requires write access to change cache mode
2352 if (kIOMapDefaultCache != (ref->options & kIOMapCacheMask))
2353 prot |= VM_PROT_WRITE;
2354
2355 // set memory entry cache
2356 vm_prot_t memEntryCacheMode = prot | MAP_MEM_ONLY;
2357 switch (ref->options & kIOMapCacheMask)
2358 {
2359 case kIOMapInhibitCache:
2360 SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
2361 break;
2362
2363 case kIOMapWriteThruCache:
2364 SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
2365 break;
2366
2367 case kIOMapWriteCombineCache:
2368 SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
2369 break;
2370
2371 case kIOMapCopybackCache:
2372 SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
2373 break;
2374
2375 case kIOMapDefaultCache:
2376 default:
2377 SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
2378 break;
2379 }
2380
2381 vm_size_t unused = 0;
2382
2383 err = mach_make_memory_entry( NULL /*unused*/, &unused, 0 /*unused*/,
2384 memEntryCacheMode, NULL, ref->sharedMem );
2385 if (KERN_SUCCESS != err)
2386 IOLog("MAP_MEM_ONLY failed %d\n", err);
2387
2388 err = mach_vm_map( map,
2389 &ref->mapped,
2390 ref->size, 0 /* mask */,
2391 (( ref->options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
2392 | VM_MAKE_TAG(VM_MEMORY_IOKIT),
2393 ref->sharedMem, ref->sourceOffset,
2394 false, // copy
2395 prot, // cur
2396 prot, // max
2397 VM_INHERIT_NONE);
2398
2399 if( KERN_SUCCESS != err) {
2400 ref->mapped = 0;
2401 continue;
2402 }
2403
2404 }
2405 else
2406 {
2407 err = mach_vm_allocate( map, &ref->mapped, ref->size,
2408 ((ref->options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
2409 | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
2410 if( KERN_SUCCESS != err) {
2411 ref->mapped = 0;
2412 continue;
2413 }
2414 // we have to make sure that these guys don't get copied if we fork.
2415 err = vm_inherit( map, ref->mapped, ref->size, VM_INHERIT_NONE);
2416 assert( KERN_SUCCESS == err );
2417 }
2418 }
2419 while( false );
2420
2421 return( err );
2422 }
2423
2424 kern_return_t
2425 IOMemoryDescriptorMapMemEntry(vm_map_t map, ipc_port_t entry, IOOptionBits options, bool pageable,
2426 mach_vm_size_t offset,
2427 mach_vm_address_t * address, mach_vm_size_t length)
2428 {
2429 IOReturn err;
2430 IOMemoryDescriptorMapAllocRef ref;
2431
2432 ref.sharedMem = entry;
2433 ref.sourceOffset = trunc_page_64(offset);
2434 ref.options = options;
2435
2436 ref.size = length;
2437
2438 if (options & kIOMapAnywhere)
2439 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
2440 ref.mapped = 0;
2441 else
2442 ref.mapped = *address;
2443
2444 if( ref.sharedMem && (map == kernel_map) && pageable)
2445 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
2446 else
2447 err = IOMemoryDescriptorMapAlloc( map, &ref );
2448
2449 *address = ref.mapped;
2450 return (err);
2451 }
2452
2453
2454 IOReturn IOMemoryDescriptor::doMap(
2455 vm_map_t __addressMap,
2456 IOVirtualAddress * __address,
2457 IOOptionBits options,
2458 IOByteCount __offset,
2459 IOByteCount __length )
2460 {
2461 if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::doMap !64bit");
2462
2463 _IOMemoryMap * mapping = (_IOMemoryMap *) *__address;
2464 mach_vm_size_t offset = mapping->fOffset + __offset;
2465 mach_vm_size_t length = mapping->fLength;
2466
2467 IOReturn err = kIOReturnSuccess;
2468 memory_object_t pager;
2469 mach_vm_size_t pageOffset;
2470 IOPhysicalAddress sourceAddr;
2471
2472 do
2473 {
2474 sourceAddr = getSourceSegment( offset, NULL );
2475 pageOffset = sourceAddr - trunc_page_32( sourceAddr );
2476
2477 if( reserved)
2478 pager = (memory_object_t) reserved->devicePager;
2479 else
2480 pager = MACH_PORT_NULL;
2481
2482 if ((kIOMapReference|kIOMapUnique) == ((kIOMapReference|kIOMapUnique) & options))
2483 {
2484 upl_t redirUPL2;
2485 vm_size_t size;
2486 int flags;
2487
2488 if (!_memEntry)
2489 {
2490 err = kIOReturnNotReadable;
2491 continue;
2492 }
2493
2494 size = mapping->fLength + pageOffset;
2495 flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
2496 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
2497
2498 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) _memEntry, 0, &size, &redirUPL2,
2499 NULL, NULL,
2500 &flags))
2501 redirUPL2 = NULL;
2502
2503 err = upl_transpose(redirUPL2, mapping->fRedirUPL);
2504 if (kIOReturnSuccess != err)
2505 {
2506 IOLog("upl_transpose(%x)\n", err);
2507 err = kIOReturnSuccess;
2508 }
2509
2510 if (redirUPL2)
2511 {
2512 upl_commit(redirUPL2, NULL, 0);
2513 upl_deallocate(redirUPL2);
2514 redirUPL2 = 0;
2515 }
2516 {
2517 // swap the memEntries since they now refer to different vm_objects
2518 void * me = _memEntry;
2519 _memEntry = mapping->fMemory->_memEntry;
2520 mapping->fMemory->_memEntry = me;
2521 }
2522 if (pager)
2523 err = handleFault( reserved->devicePager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
2524 }
2525 else
2526 {
2527 mach_vm_address_t address;
2528
2529 if (!(options & kIOMapAnywhere))
2530 {
2531 address = trunc_page_64(mapping->fAddress);
2532 if( (mapping->fAddress - address) != pageOffset)
2533 {
2534 err = kIOReturnVMError;
2535 continue;
2536 }
2537 }
2538
2539 err = IOMemoryDescriptorMapMemEntry(mapping->fAddressMap, (ipc_port_t) _memEntry,
2540 options, (kIOMemoryBufferPageable & _flags),
2541 offset, &address, round_page_64(length + pageOffset));
2542 if( err != KERN_SUCCESS)
2543 continue;
2544
2545 if (!_memEntry || pager)
2546 {
2547 err = handleFault( pager, mapping->fAddressMap, address, offset, length, options );
2548 if (err != KERN_SUCCESS)
2549 doUnmap( mapping->fAddressMap, (IOVirtualAddress) mapping, 0 );
2550 }
2551
2552 #ifdef DEBUG
2553 if (kIOLogMapping & gIOKitDebug)
2554 IOLog("mapping(%x) desc %p @ %lx, map %p, address %qx, offset %qx, length %qx\n",
2555 err, this, sourceAddr, mapping, address, offset, length);
2556 #endif
2557
2558 if (err == KERN_SUCCESS)
2559 mapping->fAddress = address + pageOffset;
2560 else
2561 mapping->fAddress = NULL;
2562 }
2563 }
2564 while( false );
2565
2566 return (err);
2567 }
2568
2569 enum {
2570 kIOMemoryRedirected = 0x00010000
2571 };
2572
2573 IOReturn IOMemoryDescriptor::handleFault(
2574 void * _pager,
2575 vm_map_t addressMap,
2576 mach_vm_address_t address,
2577 mach_vm_size_t sourceOffset,
2578 mach_vm_size_t length,
2579 IOOptionBits options )
2580 {
2581 IOReturn err = kIOReturnSuccess;
2582 memory_object_t pager = (memory_object_t) _pager;
2583 mach_vm_size_t size;
2584 mach_vm_size_t bytes;
2585 mach_vm_size_t page;
2586 mach_vm_size_t pageOffset;
2587 mach_vm_size_t pagerOffset;
2588 IOPhysicalLength segLen;
2589 addr64_t physAddr;
2590
2591 if( !addressMap)
2592 {
2593 if( kIOMemoryRedirected & _flags)
2594 {
2595 #ifdef DEBUG
2596 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
2597 #endif
2598 do {
2599 SLEEP;
2600 } while( kIOMemoryRedirected & _flags );
2601 }
2602
2603 return( kIOReturnSuccess );
2604 }
2605
2606 physAddr = getPhysicalSegment64( sourceOffset, &segLen );
2607 assert( physAddr );
2608 pageOffset = physAddr - trunc_page_64( physAddr );
2609 pagerOffset = sourceOffset;
2610
2611 size = length + pageOffset;
2612 physAddr -= pageOffset;
2613
2614 segLen += pageOffset;
2615 bytes = size;
2616 do
2617 {
2618 // in the middle of the loop only map whole pages
2619 if( segLen >= bytes)
2620 segLen = bytes;
2621 else if( segLen != trunc_page_32( segLen))
2622 err = kIOReturnVMError;
2623 if( physAddr != trunc_page_64( physAddr))
2624 err = kIOReturnBadArgument;
2625 if (kIOReturnSuccess != err)
2626 break;
2627
2628 #ifdef DEBUG
2629 if( kIOLogMapping & gIOKitDebug)
2630 IOLog("_IOMemoryMap::map(%p) 0x%qx->0x%qx:0x%qx\n",
2631 addressMap, address + pageOffset, physAddr + pageOffset,
2632 segLen - pageOffset);
2633 #endif
2634
2635
2636 if( pager) {
2637 if( reserved && reserved->pagerContig) {
2638 IOPhysicalLength allLen;
2639 addr64_t allPhys;
2640
2641 allPhys = getPhysicalSegment64( 0, &allLen );
2642 assert( allPhys );
2643 err = device_pager_populate_object( pager, 0, atop_64(allPhys), round_page_32(allLen) );
2644 }
2645 else
2646 {
2647
2648 for( page = 0;
2649 (page < segLen) && (KERN_SUCCESS == err);
2650 page += page_size)
2651 {
2652 err = device_pager_populate_object(pager, pagerOffset,
2653 (ppnum_t)(atop_64(physAddr + page)), page_size);
2654 pagerOffset += page_size;
2655 }
2656 }
2657 assert( KERN_SUCCESS == err );
2658 if( err)
2659 break;
2660 }
2661
2662 // This call to vm_fault causes an early pmap level resolution
2663 // of the mappings created above for kernel mappings, since
2664 // faulting in later can't take place from interrupt level.
2665 /* *** ALERT *** */
2666 /* *** Temporary Workaround *** */
2667
2668 if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags))
2669 {
2670 vm_fault(addressMap,
2671 (vm_map_offset_t)address,
2672 VM_PROT_READ|VM_PROT_WRITE,
2673 FALSE, THREAD_UNINT, NULL,
2674 (vm_map_offset_t)0);
2675 }
2676
2677 /* *** Temporary Workaround *** */
2678 /* *** ALERT *** */
2679
2680 sourceOffset += segLen - pageOffset;
2681 address += segLen;
2682 bytes -= segLen;
2683 pageOffset = 0;
2684
2685 }
2686 while (bytes && (physAddr = getPhysicalSegment64( sourceOffset, &segLen )));
2687
2688 if (bytes)
2689 err = kIOReturnBadArgument;
2690
2691 return (err);
2692 }
2693
2694 IOReturn IOMemoryDescriptor::doUnmap(
2695 vm_map_t addressMap,
2696 IOVirtualAddress __address,
2697 IOByteCount __length )
2698 {
2699 IOReturn err;
2700 mach_vm_address_t address;
2701 mach_vm_size_t length;
2702
2703 if (__length)
2704 {
2705 address = __address;
2706 length = __length;
2707 }
2708 else
2709 {
2710 addressMap = ((_IOMemoryMap *) __address)->fAddressMap;
2711 address = ((_IOMemoryMap *) __address)->fAddress;
2712 length = ((_IOMemoryMap *) __address)->fLength;
2713 }
2714
2715 if( _memEntry && (addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags))
2716 addressMap = IOPageableMapForAddress( address );
2717
2718 #ifdef DEBUG
2719 if( kIOLogMapping & gIOKitDebug)
2720 IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
2721 addressMap, address, length );
2722 #endif
2723
2724 err = mach_vm_deallocate( addressMap, address, length );
2725
2726 return (err);
2727 }
2728
2729 IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
2730 {
2731 IOReturn err = kIOReturnSuccess;
2732 _IOMemoryMap * mapping = 0;
2733 OSIterator * iter;
2734
2735 LOCK;
2736
2737 if( doRedirect)
2738 _flags |= kIOMemoryRedirected;
2739 else
2740 _flags &= ~kIOMemoryRedirected;
2741
2742 do {
2743 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
2744 while( (mapping = (_IOMemoryMap *) iter->getNextObject()))
2745 mapping->redirect( safeTask, doRedirect );
2746
2747 iter->release();
2748 }
2749 } while( false );
2750
2751 if (!doRedirect)
2752 {
2753 WAKEUP;
2754 }
2755
2756 UNLOCK;
2757
2758 // temporary binary compatibility
2759 IOSubMemoryDescriptor * subMem;
2760 if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
2761 err = subMem->redirect( safeTask, doRedirect );
2762 else
2763 err = kIOReturnSuccess;
2764
2765 return( err );
2766 }
2767
2768 IOReturn IOSubMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
2769 {
2770 return( _parent->redirect( safeTask, doRedirect ));
2771 }
2772
2773 IOReturn _IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
2774 {
2775 IOReturn err = kIOReturnSuccess;
2776
2777 if( fSuperMap) {
2778 // err = ((_IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
2779 } else {
2780
2781 LOCK;
2782
2783 do
2784 {
2785 if (!fAddress)
2786 break;
2787 if (!fAddressMap)
2788 break;
2789
2790 if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
2791 && (0 == (fOptions & kIOMapStatic)))
2792 {
2793 IOUnmapPages( fAddressMap, fAddress, fLength );
2794 if(!doRedirect && safeTask
2795 && (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
2796 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)))
2797 {
2798 IOVirtualAddress iova = (IOVirtualAddress) this;
2799 err = mach_vm_deallocate( fAddressMap, fAddress, fLength );
2800 err = fMemory->doMap( fAddressMap, &iova,
2801 (fOptions & ~kIOMapAnywhere) | kIOMap64Bit/*| kIOMapReserve*/,
2802 0, 0 );
2803 } else
2804 err = kIOReturnSuccess;
2805 #ifdef DEBUG
2806 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
2807 #endif
2808 }
2809 else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask))
2810 {
2811 IOOptionBits newMode;
2812 newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
2813 IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
2814 }
2815 }
2816 while (false);
2817 UNLOCK;
2818 }
2819
2820 if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
2821 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
2822 && safeTask
2823 && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected))))
2824 fMemory->redirect(safeTask, doRedirect);
2825
2826 return( err );
2827 }
2828
2829 IOReturn _IOMemoryMap::unmap( void )
2830 {
2831 IOReturn err;
2832
2833 LOCK;
2834
2835 if( fAddress && fAddressMap && (0 == fSuperMap) && fMemory
2836 && (0 == (fOptions & kIOMapStatic))) {
2837
2838 err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
2839
2840 } else
2841 err = kIOReturnSuccess;
2842
2843 if (fAddressMap)
2844 {
2845 vm_map_deallocate(fAddressMap);
2846 fAddressMap = 0;
2847 }
2848
2849 fAddress = 0;
2850
2851 UNLOCK;
2852
2853 return( err );
2854 }
2855
2856 void _IOMemoryMap::taskDied( void )
2857 {
2858 LOCK;
2859 if( fAddressMap) {
2860 vm_map_deallocate(fAddressMap);
2861 fAddressMap = 0;
2862 }
2863 fAddressTask = 0;
2864 fAddress = 0;
2865 UNLOCK;
2866 }
2867
2868 // Overload the release mechanism. All mappings must be a member
2869 // of a memory descriptors _mappings set. This means that we
2870 // always have 2 references on a mapping. When either of these mappings
2871 // are released we need to free ourselves.
2872 void _IOMemoryMap::taggedRelease(const void *tag) const
2873 {
2874 LOCK;
2875 super::taggedRelease(tag, 2);
2876 UNLOCK;
2877 }
2878
2879 void _IOMemoryMap::free()
2880 {
2881 unmap();
2882
2883 if (fMemory)
2884 {
2885 LOCK;
2886 fMemory->removeMapping(this);
2887 UNLOCK;
2888 fMemory->release();
2889 }
2890
2891 if (fOwner && (fOwner != fMemory))
2892 {
2893 LOCK;
2894 fOwner->removeMapping(this);
2895 UNLOCK;
2896 }
2897
2898 if (fSuperMap)
2899 fSuperMap->release();
2900
2901 if (fRedirUPL) {
2902 upl_commit(fRedirUPL, NULL, 0);
2903 upl_deallocate(fRedirUPL);
2904 }
2905
2906 super::free();
2907 }
2908
2909 IOByteCount _IOMemoryMap::getLength()
2910 {
2911 return( fLength );
2912 }
2913
2914 IOVirtualAddress _IOMemoryMap::getVirtualAddress()
2915 {
2916 if (fSuperMap)
2917 fSuperMap->getVirtualAddress();
2918 else if (fAddressMap && vm_map_is_64bit(fAddressMap))
2919 {
2920 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
2921 }
2922
2923 return (fAddress);
2924 }
2925
2926 mach_vm_address_t _IOMemoryMap::getAddress()
2927 {
2928 return( fAddress);
2929 }
2930
2931 mach_vm_size_t _IOMemoryMap::getSize()
2932 {
2933 return( fLength );
2934 }
2935
2936
2937 task_t _IOMemoryMap::getAddressTask()
2938 {
2939 if( fSuperMap)
2940 return( fSuperMap->getAddressTask());
2941 else
2942 return( fAddressTask);
2943 }
2944
2945 IOOptionBits _IOMemoryMap::getMapOptions()
2946 {
2947 return( fOptions);
2948 }
2949
2950 IOMemoryDescriptor * _IOMemoryMap::getMemoryDescriptor()
2951 {
2952 return( fMemory );
2953 }
2954
2955 _IOMemoryMap * _IOMemoryMap::copyCompatible(
2956 _IOMemoryMap * newMapping )
2957 {
2958 task_t task = newMapping->getAddressTask();
2959 mach_vm_address_t toAddress = newMapping->fAddress;
2960 IOOptionBits _options = newMapping->fOptions;
2961 mach_vm_size_t _offset = newMapping->fOffset;
2962 mach_vm_size_t _length = newMapping->fLength;
2963
2964 if( (!task) || (!fAddressMap) || (fAddressMap != get_task_map(task)))
2965 return( 0 );
2966 if( (fOptions ^ _options) & kIOMapReadOnly)
2967 return( 0 );
2968 if( (kIOMapDefaultCache != (_options & kIOMapCacheMask))
2969 && ((fOptions ^ _options) & kIOMapCacheMask))
2970 return( 0 );
2971
2972 if( (0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress))
2973 return( 0 );
2974
2975 if( _offset < fOffset)
2976 return( 0 );
2977
2978 _offset -= fOffset;
2979
2980 if( (_offset + _length) > fLength)
2981 return( 0 );
2982
2983 retain();
2984 if( (fLength == _length) && (!_offset))
2985 {
2986 newMapping->release();
2987 newMapping = this;
2988 }
2989 else
2990 {
2991 newMapping->fSuperMap = this;
2992 newMapping->fOffset = _offset;
2993 newMapping->fAddress = fAddress + _offset;
2994 }
2995
2996 return( newMapping );
2997 }
2998
2999 IOPhysicalAddress
3000 _IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
3001 {
3002 IOPhysicalAddress address;
3003
3004 LOCK;
3005 address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
3006 UNLOCK;
3007
3008 return( address );
3009 }
3010
3011 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3012
3013 #undef super
3014 #define super OSObject
3015
3016 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3017
3018 void IOMemoryDescriptor::initialize( void )
3019 {
3020 if( 0 == gIOMemoryLock)
3021 gIOMemoryLock = IORecursiveLockAlloc();
3022
3023 IORegistryEntry::getRegistryRoot()->setProperty(kIOMaximumMappedIOByteCountKey,
3024 ptoa_64(gIOMaximumMappedIOPageCount), 64);
3025 if (!gIOCopyMapper)
3026 {
3027 IOMapper *
3028 mapper = new IOCopyMapper;
3029 if (mapper)
3030 {
3031 if (mapper->init() && mapper->start(NULL))
3032 gIOCopyMapper = (IOCopyMapper *) mapper;
3033 else
3034 mapper->release();
3035 }
3036 }
3037
3038 gIOLastPage = IOGetLastPageNumber();
3039 }
3040
3041 void IOMemoryDescriptor::free( void )
3042 {
3043 if( _mappings)
3044 _mappings->release();
3045
3046 super::free();
3047 }
3048
3049 IOMemoryMap * IOMemoryDescriptor::setMapping(
3050 task_t intoTask,
3051 IOVirtualAddress mapAddress,
3052 IOOptionBits options )
3053 {
3054 return (createMappingInTask( intoTask, mapAddress,
3055 options | kIOMapStatic,
3056 0, getLength() ));
3057 }
3058
3059 IOMemoryMap * IOMemoryDescriptor::map(
3060 IOOptionBits options )
3061 {
3062 return (createMappingInTask( kernel_task, 0,
3063 options | kIOMapAnywhere,
3064 0, getLength() ));
3065 }
3066
3067 IOMemoryMap * IOMemoryDescriptor::map(
3068 task_t intoTask,
3069 IOVirtualAddress atAddress,
3070 IOOptionBits options,
3071 IOByteCount offset,
3072 IOByteCount length )
3073 {
3074 if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask)))
3075 {
3076 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
3077 return (0);
3078 }
3079
3080 return (createMappingInTask(intoTask, atAddress,
3081 options, offset, length));
3082 }
3083
3084 IOMemoryMap * IOMemoryDescriptor::createMappingInTask(
3085 task_t intoTask,
3086 mach_vm_address_t atAddress,
3087 IOOptionBits options,
3088 mach_vm_size_t offset,
3089 mach_vm_size_t length)
3090 {
3091 IOMemoryMap * result;
3092 _IOMemoryMap * mapping;
3093
3094 if (0 == length)
3095 length = getLength();
3096
3097 mapping = new _IOMemoryMap;
3098
3099 if( mapping
3100 && !mapping->init( intoTask, atAddress,
3101 options, offset, length )) {
3102 mapping->release();
3103 mapping = 0;
3104 }
3105
3106 if (mapping)
3107 result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
3108 else
3109 result = 0;
3110
3111 #ifdef DEBUG
3112 if (!result)
3113 IOLog("createMappingInTask failed desc %p, addr %qx, options %lx, offset %qx, length %qx\n",
3114 this, atAddress, options, offset, length);
3115 #endif
3116
3117 return (result);
3118 }
3119
3120 IOReturn _IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
3121 IOOptionBits options,
3122 IOByteCount offset)
3123 {
3124 return (redirect(newBackingMemory, options, (mach_vm_size_t)offset));
3125 }
3126
3127 IOReturn _IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
3128 IOOptionBits options,
3129 mach_vm_size_t offset)
3130 {
3131 IOReturn err = kIOReturnSuccess;
3132 IOMemoryDescriptor * physMem = 0;
3133
3134 LOCK;
3135
3136 if (fAddress && fAddressMap) do
3137 {
3138 if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3139 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3140 {
3141 physMem = fMemory;
3142 physMem->retain();
3143 }
3144
3145 if (!fRedirUPL)
3146 {
3147 vm_size_t size = fLength;
3148 int flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
3149 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
3150 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) fMemory->_memEntry, 0, &size, &fRedirUPL,
3151 NULL, NULL,
3152 &flags))
3153 fRedirUPL = 0;
3154
3155 if (physMem)
3156 {
3157 IOUnmapPages( fAddressMap, fAddress, fLength );
3158 physMem->redirect(0, true);
3159 }
3160 }
3161
3162 if (newBackingMemory)
3163 {
3164 if (newBackingMemory != fMemory)
3165 {
3166 fOffset = 0;
3167 if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
3168 options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
3169 offset, fLength))
3170 err = kIOReturnError;
3171 }
3172 if (fRedirUPL)
3173 {
3174 upl_commit(fRedirUPL, NULL, 0);
3175 upl_deallocate(fRedirUPL);
3176 fRedirUPL = 0;
3177 }
3178 if (physMem)
3179 physMem->redirect(0, false);
3180 }
3181 }
3182 while (false);
3183
3184 UNLOCK;
3185
3186 if (physMem)
3187 physMem->release();
3188
3189 return (err);
3190 }
3191
3192 IOMemoryMap * IOMemoryDescriptor::makeMapping(
3193 IOMemoryDescriptor * owner,
3194 task_t __intoTask,
3195 IOVirtualAddress __address,
3196 IOOptionBits options,
3197 IOByteCount __offset,
3198 IOByteCount __length )
3199 {
3200 if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::makeMapping !64bit");
3201
3202 IOMemoryDescriptor * mapDesc = 0;
3203 _IOMemoryMap * result = 0;
3204 OSIterator * iter;
3205
3206 _IOMemoryMap * mapping = (_IOMemoryMap *) __address;
3207 mach_vm_size_t offset = mapping->fOffset + __offset;
3208 mach_vm_size_t length = mapping->fLength;
3209
3210 mapping->fOffset = offset;
3211
3212 LOCK;
3213
3214 do
3215 {
3216 if (kIOMapStatic & options)
3217 {
3218 result = mapping;
3219 addMapping(mapping);
3220 mapping->setMemoryDescriptor(this, 0);
3221 continue;
3222 }
3223
3224 if (kIOMapUnique & options)
3225 {
3226 IOPhysicalAddress phys;
3227 IOByteCount physLen;
3228
3229 // if (owner != this) continue;
3230
3231 if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3232 || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3233 {
3234 phys = getPhysicalSegment(offset, &physLen);
3235 if (!phys || (physLen < length))
3236 continue;
3237
3238 mapDesc = IOMemoryDescriptor::withPhysicalAddress(
3239 phys, length, _direction);
3240 if (!mapDesc)
3241 continue;
3242 offset = 0;
3243 mapping->fOffset = offset;
3244 }
3245 }
3246 else
3247 {
3248 // look for a compatible existing mapping
3249 if( (iter = OSCollectionIterator::withCollection(_mappings)))
3250 {
3251 _IOMemoryMap * lookMapping;
3252 while ((lookMapping = (_IOMemoryMap *) iter->getNextObject()))
3253 {
3254 if ((result = lookMapping->copyCompatible(mapping)))
3255 {
3256 addMapping(result);
3257 result->setMemoryDescriptor(this, offset);
3258 break;
3259 }
3260 }
3261 iter->release();
3262 }
3263 if (result || (options & kIOMapReference))
3264 continue;
3265 }
3266
3267 if (!mapDesc)
3268 {
3269 mapDesc = this;
3270 mapDesc->retain();
3271 }
3272 IOReturn
3273 kr = mapDesc->doMap( 0, (IOVirtualAddress *) &mapping, options, 0, 0 );
3274 if (kIOReturnSuccess == kr)
3275 {
3276 result = mapping;
3277 mapDesc->addMapping(result);
3278 result->setMemoryDescriptor(mapDesc, offset);
3279 }
3280 else
3281 {
3282 mapping->release();
3283 mapping = NULL;
3284 }
3285 }
3286 while( false );
3287
3288 UNLOCK;
3289
3290 if (mapDesc)
3291 mapDesc->release();
3292
3293 return (result);
3294 }
3295
3296 void IOMemoryDescriptor::addMapping(
3297 IOMemoryMap * mapping )
3298 {
3299 if( mapping)
3300 {
3301 if( 0 == _mappings)
3302 _mappings = OSSet::withCapacity(1);
3303 if( _mappings )
3304 _mappings->setObject( mapping );
3305 }
3306 }
3307
3308 void IOMemoryDescriptor::removeMapping(
3309 IOMemoryMap * mapping )
3310 {
3311 if( _mappings)
3312 _mappings->removeObject( mapping);
3313 }
3314
3315 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3316
3317 #undef super
3318 #define super IOMemoryDescriptor
3319
3320 OSDefineMetaClassAndStructors(IOSubMemoryDescriptor, IOMemoryDescriptor)
3321
3322 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3323
3324 bool IOSubMemoryDescriptor::initSubRange( IOMemoryDescriptor * parent,
3325 IOByteCount offset, IOByteCount length,
3326 IODirection direction )
3327 {
3328 if( !parent)
3329 return( false);
3330
3331 if( (offset + length) > parent->getLength())
3332 return( false);
3333
3334 /*
3335 * We can check the _parent instance variable before having ever set it
3336 * to an initial value because I/O Kit guarantees that all our instance
3337 * variables are zeroed on an object's allocation.
3338 */
3339
3340 if( !_parent) {
3341 if( !super::init())
3342 return( false );
3343 } else {
3344 /*
3345 * An existing memory descriptor is being retargeted to
3346 * point to somewhere else. Clean up our present state.
3347 */
3348
3349 _parent->release();
3350 _parent = 0;
3351 }
3352
3353 parent->retain();
3354 _parent = parent;
3355 _start = offset;
3356 _length = length;
3357 _direction = direction;
3358 _tag = parent->getTag();
3359
3360 return( true );
3361 }
3362
3363 void IOSubMemoryDescriptor::free( void )
3364 {
3365 if( _parent)
3366 _parent->release();
3367
3368 super::free();
3369 }
3370
3371
3372 IOReturn
3373 IOSubMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
3374 {
3375 IOReturn rtn;
3376
3377 if (kIOMDGetCharacteristics == op) {
3378
3379 rtn = _parent->dmaCommandOperation(op, vData, dataSize);
3380 if (kIOReturnSuccess == rtn) {
3381 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3382 data->fLength = _length;
3383 data->fSGCount = 0; // XXX gvdl: need to compute and pages
3384 data->fPages = 0;
3385 data->fPageAlign = 0;
3386 }
3387
3388 return rtn;
3389 }
3390 else if (kIOMDWalkSegments & op) {
3391 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs))
3392 return kIOReturnUnderrun;
3393
3394 IOMDDMAWalkSegmentArgs *data =
3395 reinterpret_cast<IOMDDMAWalkSegmentArgs *>(vData);
3396 UInt offset = data->fOffset;
3397 UInt remain = _length - offset;
3398 if ((int) remain <= 0)
3399 return (!remain)? kIOReturnOverrun : kIOReturnInternalError;
3400
3401 data->fOffset = offset + _start;
3402 rtn = _parent->dmaCommandOperation(op, vData, dataSize);
3403 if (data->fLength > remain)
3404 data->fLength = remain;
3405 data->fOffset = offset;
3406
3407 return rtn;
3408 }
3409 else
3410 return kIOReturnBadArgument;
3411 }
3412
3413 addr64_t
3414 IOSubMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount * length)
3415 {
3416 addr64_t address;
3417 IOByteCount actualLength;
3418
3419 assert(offset <= _length);
3420
3421 if( length)
3422 *length = 0;
3423
3424 if( offset >= _length)
3425 return( 0 );
3426
3427 address = _parent->getPhysicalSegment64( offset + _start, &actualLength );
3428
3429 if( address && length)
3430 *length = min( _length - offset, actualLength );
3431
3432 return( address );
3433 }
3434
3435 IOPhysicalAddress
3436 IOSubMemoryDescriptor::getPhysicalSegment( IOByteCount offset, IOByteCount * length )
3437 {
3438 IOPhysicalAddress address;
3439 IOByteCount actualLength;
3440
3441 assert(offset <= _length);
3442
3443 if( length)
3444 *length = 0;
3445
3446 if( offset >= _length)
3447 return( 0 );
3448
3449 address = _parent->getPhysicalSegment( offset + _start, &actualLength );
3450
3451 if( address && length)
3452 *length = min( _length - offset, actualLength );
3453
3454 return( address );
3455 }
3456
3457 IOPhysicalAddress
3458 IOSubMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
3459 {
3460 IOPhysicalAddress address;
3461 IOByteCount actualLength;
3462
3463 assert(offset <= _length);
3464
3465 if( length)
3466 *length = 0;
3467
3468 if( offset >= _length)
3469 return( 0 );
3470
3471 address = _parent->getSourceSegment( offset + _start, &actualLength );
3472
3473 if( address && length)
3474 *length = min( _length - offset, actualLength );
3475
3476 return( address );
3477 }
3478
3479 void * IOSubMemoryDescriptor::getVirtualSegment(IOByteCount offset,
3480 IOByteCount * lengthOfSegment)
3481 {
3482 return( 0 );
3483 }
3484
3485 IOReturn IOSubMemoryDescriptor::doMap(
3486 vm_map_t addressMap,
3487 IOVirtualAddress * atAddress,
3488 IOOptionBits options,
3489 IOByteCount sourceOffset,
3490 IOByteCount length )
3491 {
3492 panic("IOSubMemoryDescriptor::doMap");
3493 return (IOMemoryDescriptor::doMap(addressMap, atAddress, options, sourceOffset, length));
3494 }
3495
3496 IOByteCount IOSubMemoryDescriptor::readBytes(IOByteCount offset,
3497 void * bytes, IOByteCount length)
3498 {
3499 IOByteCount byteCount;
3500
3501 assert(offset <= _length);
3502
3503 if( offset >= _length)
3504 return( 0 );
3505
3506 LOCK;
3507 byteCount = _parent->readBytes( _start + offset, bytes,
3508 min(length, _length - offset) );
3509 UNLOCK;
3510
3511 return( byteCount );
3512 }
3513
3514 IOByteCount IOSubMemoryDescriptor::writeBytes(IOByteCount offset,
3515 const void* bytes, IOByteCount length)
3516 {
3517 IOByteCount byteCount;
3518
3519 assert(offset <= _length);
3520
3521 if( offset >= _length)
3522 return( 0 );
3523
3524 LOCK;
3525 byteCount = _parent->writeBytes( _start + offset, bytes,
3526 min(length, _length - offset) );
3527 UNLOCK;
3528
3529 return( byteCount );
3530 }
3531
3532 IOReturn IOSubMemoryDescriptor::setPurgeable( IOOptionBits newState,
3533 IOOptionBits * oldState )
3534 {
3535 IOReturn err;
3536
3537 LOCK;
3538 err = _parent->setPurgeable( newState, oldState );
3539 UNLOCK;
3540
3541 return( err );
3542 }
3543
3544 IOReturn IOSubMemoryDescriptor::performOperation( IOOptionBits options,
3545 IOByteCount offset, IOByteCount length )
3546 {
3547 IOReturn err;
3548
3549 assert(offset <= _length);
3550
3551 if( offset >= _length)
3552 return( kIOReturnOverrun );
3553
3554 LOCK;
3555 err = _parent->performOperation( options, _start + offset,
3556 min(length, _length - offset) );
3557 UNLOCK;
3558
3559 return( err );
3560 }
3561
3562 IOReturn IOSubMemoryDescriptor::prepare(
3563 IODirection forDirection)
3564 {
3565 IOReturn err;
3566
3567 LOCK;
3568 err = _parent->prepare( forDirection);
3569 UNLOCK;
3570
3571 return( err );
3572 }
3573
3574 IOReturn IOSubMemoryDescriptor::complete(
3575 IODirection forDirection)
3576 {
3577 IOReturn err;
3578
3579 LOCK;
3580 err = _parent->complete( forDirection);
3581 UNLOCK;
3582
3583 return( err );
3584 }
3585
3586 IOMemoryMap * IOSubMemoryDescriptor::makeMapping(
3587 IOMemoryDescriptor * owner,
3588 task_t intoTask,
3589 IOVirtualAddress address,
3590 IOOptionBits options,
3591 IOByteCount offset,
3592 IOByteCount length )
3593 {
3594 IOMemoryMap * mapping = 0;
3595
3596 if (!(kIOMap64Bit & options))
3597 {
3598 panic("IOSubMemoryDescriptor::makeMapping !64bit");
3599 }
3600
3601 mapping = (IOMemoryMap *) _parent->makeMapping(
3602 owner,
3603 intoTask,
3604 address,
3605 options, _start + offset, length );
3606
3607 return( mapping );
3608 }
3609
3610 /* ick */
3611
3612 bool
3613 IOSubMemoryDescriptor::initWithAddress(void * address,
3614 IOByteCount length,
3615 IODirection direction)
3616 {
3617 return( false );
3618 }
3619
3620 bool
3621 IOSubMemoryDescriptor::initWithAddress(vm_address_t address,
3622 IOByteCount length,
3623 IODirection direction,
3624 task_t task)
3625 {
3626 return( false );
3627 }
3628
3629 bool
3630 IOSubMemoryDescriptor::initWithPhysicalAddress(
3631 IOPhysicalAddress address,
3632 IOByteCount length,
3633 IODirection direction )
3634 {
3635 return( false );
3636 }
3637
3638 bool
3639 IOSubMemoryDescriptor::initWithRanges(
3640 IOVirtualRange * ranges,
3641 UInt32 withCount,
3642 IODirection direction,
3643 task_t task,
3644 bool asReference)
3645 {
3646 return( false );
3647 }
3648
3649 bool
3650 IOSubMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
3651 UInt32 withCount,
3652 IODirection direction,
3653 bool asReference)
3654 {
3655 return( false );
3656 }
3657
3658 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3659
3660 bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
3661 {
3662 OSSymbol const *keys[2];
3663 OSObject *values[2];
3664 struct SerData {
3665 user_addr_t address;
3666 user_size_t length;
3667 } *vcopy;
3668 unsigned int index, nRanges;
3669 bool result;
3670
3671 IOOptionBits type = _flags & kIOMemoryTypeMask;
3672
3673 if (s == NULL) return false;
3674 if (s->previouslySerialized(this)) return true;
3675
3676 // Pretend we are an array.
3677 if (!s->addXMLStartTag(this, "array")) return false;
3678
3679 nRanges = _rangesCount;
3680 vcopy = (SerData *) IOMalloc(sizeof(SerData) * nRanges);
3681 if (vcopy == 0) return false;
3682
3683 keys[0] = OSSymbol::withCString("address");
3684 keys[1] = OSSymbol::withCString("length");
3685
3686 result = false;
3687 values[0] = values[1] = 0;
3688
3689 // From this point on we can go to bail.
3690
3691 // Copy the volatile data so we don't have to allocate memory
3692 // while the lock is held.
3693 LOCK;
3694 if (nRanges == _rangesCount) {
3695 Ranges vec = _ranges;
3696 for (index = 0; index < nRanges; index++) {
3697 user_addr_t addr; IOByteCount len;
3698 getAddrLenForInd(addr, len, type, vec, index);
3699 vcopy[index].address = addr;
3700 vcopy[index].length = len;
3701 }
3702 } else {
3703 // The descriptor changed out from under us. Give up.
3704 UNLOCK;
3705 result = false;
3706 goto bail;
3707 }
3708 UNLOCK;
3709
3710 for (index = 0; index < nRanges; index++)
3711 {
3712 user_addr_t addr = vcopy[index].address;
3713 IOByteCount len = (IOByteCount) vcopy[index].length;
3714 values[0] =
3715 OSNumber::withNumber(addr, (((UInt64) addr) >> 32)? 64 : 32);
3716 if (values[0] == 0) {
3717 result = false;
3718 goto bail;
3719 }
3720 values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
3721 if (values[1] == 0) {
3722 result = false;
3723 goto bail;
3724 }
3725 OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
3726 if (dict == 0) {
3727 result = false;
3728 goto bail;
3729 }
3730 values[0]->release();
3731 values[1]->release();
3732 values[0] = values[1] = 0;
3733
3734 result = dict->serialize(s);
3735 dict->release();
3736 if (!result) {
3737 goto bail;
3738 }
3739 }
3740 result = s->addXMLEndTag("array");
3741
3742 bail:
3743 if (values[0])
3744 values[0]->release();
3745 if (values[1])
3746 values[1]->release();
3747 if (keys[0])
3748 keys[0]->release();
3749 if (keys[1])
3750 keys[1]->release();
3751 if (vcopy)
3752 IOFree(vcopy, sizeof(SerData) * nRanges);
3753 return result;
3754 }
3755
3756 bool IOSubMemoryDescriptor::serialize(OSSerialize * s) const
3757 {
3758 if (!s) {
3759 return (false);
3760 }
3761 if (s->previouslySerialized(this)) return true;
3762
3763 // Pretend we are a dictionary.
3764 // We must duplicate the functionality of OSDictionary here
3765 // because otherwise object references will not work;
3766 // they are based on the value of the object passed to
3767 // previouslySerialized and addXMLStartTag.
3768
3769 if (!s->addXMLStartTag(this, "dict")) return false;
3770
3771 char const *keys[3] = {"offset", "length", "parent"};
3772
3773 OSObject *values[3];
3774 values[0] = OSNumber::withNumber(_start, sizeof(_start) * 8);
3775 if (values[0] == 0)
3776 return false;
3777 values[1] = OSNumber::withNumber(_length, sizeof(_length) * 8);
3778 if (values[1] == 0) {
3779 values[0]->release();
3780 return false;
3781 }
3782 values[2] = _parent;
3783
3784 bool result = true;
3785 for (int i=0; i<3; i++) {
3786 if (!s->addString("<key>") ||
3787 !s->addString(keys[i]) ||
3788 !s->addXMLEndTag("key") ||
3789 !values[i]->serialize(s)) {
3790 result = false;
3791 break;
3792 }
3793 }
3794 values[0]->release();
3795 values[1]->release();
3796 if (!result) {
3797 return false;
3798 }
3799
3800 return s->addXMLEndTag("dict");
3801 }
3802
3803 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3804
3805 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
3806 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
3807 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
3808 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3);
3809 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4);
3810 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5);
3811 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
3812 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
3813 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
3814 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
3815 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
3816 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
3817 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
3818 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
3819 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
3820 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
3821
3822 /* ex-inline function implementation */
3823 IOPhysicalAddress
3824 IOMemoryDescriptor::getPhysicalAddress()
3825 { return( getPhysicalSegment( 0, 0 )); }
3826
3827
3828