]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOMemoryDescriptor.cpp
58e7190e60e75ca954bd7ff216885e9b31260400
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
30 *
31 * HISTORY
32 *
33 */
34 // 45678901234567890123456789012345678901234567890123456789012345678901234567890
35 #include <sys/cdefs.h>
36
37 #include <IOKit/assert.h>
38 #include <IOKit/system.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOMemoryDescriptor.h>
41 #include <IOKit/IOMapper.h>
42 #include <IOKit/IOKitKeysPrivate.h>
43
44 #include <IOKit/IOKitDebug.h>
45 #include <libkern/OSDebug.h>
46
47 #include "IOKitKernelInternal.h"
48 #include "IOCopyMapper.h"
49
50 #include <libkern/c++/OSContainers.h>
51 #include <libkern/c++/OSDictionary.h>
52 #include <libkern/c++/OSArray.h>
53 #include <libkern/c++/OSSymbol.h>
54 #include <libkern/c++/OSNumber.h>
55
56 #include <sys/uio.h>
57
58 __BEGIN_DECLS
59 #include <vm/pmap.h>
60 #include <vm/vm_pageout.h>
61 #include <mach/memory_object_types.h>
62 #include <device/device_port.h>
63
64 #include <mach/vm_prot.h>
65 #include <mach/mach_vm.h>
66 #include <vm/vm_fault.h>
67 #include <vm/vm_protos.h>
68
69 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
70 void ipc_port_release_send(ipc_port_t port);
71
72 /* Copy between a physical page and a virtual address in the given vm_map */
73 kern_return_t copypv(addr64_t source, addr64_t sink, unsigned int size, int which);
74
75 memory_object_t
76 device_pager_setup(
77 memory_object_t pager,
78 int device_handle,
79 vm_size_t size,
80 int flags);
81 void
82 device_pager_deallocate(
83 memory_object_t);
84 kern_return_t
85 device_pager_populate_object(
86 memory_object_t pager,
87 vm_object_offset_t offset,
88 ppnum_t phys_addr,
89 vm_size_t size);
90 kern_return_t
91 memory_object_iopl_request(
92 ipc_port_t port,
93 memory_object_offset_t offset,
94 vm_size_t *upl_size,
95 upl_t *upl_ptr,
96 upl_page_info_array_t user_page_list,
97 unsigned int *page_list_count,
98 int *flags);
99
100 unsigned int IOTranslateCacheBits(struct phys_entry *pp);
101
102 __END_DECLS
103
104 #define kIOMaximumMappedIOByteCount (512*1024*1024)
105
106 static IOMapper * gIOSystemMapper = NULL;
107
108 IOCopyMapper * gIOCopyMapper = NULL;
109
110 static ppnum_t gIOMaximumMappedIOPageCount = atop_32(kIOMaximumMappedIOByteCount);
111
112 ppnum_t gIOLastPage;
113
114 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
115
116 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
117
118 #define super IOMemoryDescriptor
119
120 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
121
122 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
123
124 static IORecursiveLock * gIOMemoryLock;
125
126 #define LOCK IORecursiveLockLock( gIOMemoryLock)
127 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
128 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
129 #define WAKEUP \
130 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
131
132 #if 0
133 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
134 #else
135 #define DEBG(fmt, args...) {}
136 #endif
137
138 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
139
140 class _IOMemoryMap : public IOMemoryMap
141 {
142 OSDeclareDefaultStructors(_IOMemoryMap)
143 public:
144 IOMemoryDescriptor * fMemory;
145 IOMemoryMap * fSuperMap;
146 mach_vm_size_t fOffset;
147 mach_vm_address_t fAddress;
148 mach_vm_size_t fLength;
149 task_t fAddressTask;
150 vm_map_t fAddressMap;
151 IOOptionBits fOptions;
152 upl_t fRedirUPL;
153 ipc_port_t fRedirEntry;
154 IOMemoryDescriptor * fOwner;
155
156 protected:
157 virtual void taggedRelease(const void *tag = 0) const;
158 virtual void free();
159
160 public:
161
162 // IOMemoryMap methods
163 virtual IOVirtualAddress getVirtualAddress();
164 virtual IOByteCount getLength();
165 virtual task_t getAddressTask();
166 virtual mach_vm_address_t getAddress();
167 virtual mach_vm_size_t getSize();
168 virtual IOMemoryDescriptor * getMemoryDescriptor();
169 virtual IOOptionBits getMapOptions();
170
171 virtual IOReturn unmap();
172 virtual void taskDied();
173
174 virtual IOReturn redirect(IOMemoryDescriptor * newBackingMemory,
175 IOOptionBits options,
176 IOByteCount offset = 0);
177
178 virtual IOReturn redirect(IOMemoryDescriptor * newBackingMemory,
179 IOOptionBits options,
180 mach_vm_size_t offset = 0);
181
182 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
183 IOByteCount * length);
184
185 // for IOMemoryDescriptor use
186 _IOMemoryMap * copyCompatible( _IOMemoryMap * newMapping );
187
188 bool init(
189 task_t intoTask,
190 mach_vm_address_t toAddress,
191 IOOptionBits options,
192 mach_vm_size_t offset,
193 mach_vm_size_t length );
194
195 bool setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset);
196
197 IOReturn redirect(
198 task_t intoTask, bool redirect );
199 };
200
201 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
202
203 // Some data structures and accessor macros used by the initWithOptions
204 // Function
205
206 enum ioPLBlockFlags {
207 kIOPLOnDevice = 0x00000001,
208 kIOPLExternUPL = 0x00000002,
209 };
210
211 struct typePersMDData
212 {
213 const IOGeneralMemoryDescriptor *fMD;
214 ipc_port_t fMemEntry;
215 };
216
217 struct ioPLBlock {
218 upl_t fIOPL;
219 vm_address_t fIOMDOffset; // The offset of this iopl in descriptor
220 vm_offset_t fPageInfo; // Pointer to page list or index into it
221 ppnum_t fMappedBase; // Page number of first page in this iopl
222 unsigned int fPageOffset; // Offset within first page of iopl
223 unsigned int fFlags; // Flags
224 };
225
226 struct ioGMDData {
227 IOMapper *fMapper;
228 unsigned int fPageCnt;
229 upl_page_info_t fPageList[];
230 ioPLBlock fBlocks[];
231 };
232
233 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
234 #define getIOPLList(d) ((ioPLBlock *) &(d->fPageList[d->fPageCnt]))
235 #define getNumIOPL(osd, d) \
236 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
237 #define getPageList(d) (&(d->fPageList[0]))
238 #define computeDataSize(p, u) \
239 (sizeof(ioGMDData) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
240
241
242 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
243
244 #define next_page(a) ( trunc_page_32(a) + PAGE_SIZE )
245
246
247 extern "C" {
248
249 kern_return_t device_data_action(
250 int device_handle,
251 ipc_port_t device_pager,
252 vm_prot_t protection,
253 vm_object_offset_t offset,
254 vm_size_t size)
255 {
256 struct ExpansionData {
257 void * devicePager;
258 unsigned int pagerContig:1;
259 unsigned int unused:31;
260 IOMemoryDescriptor * memory;
261 };
262 kern_return_t kr;
263 ExpansionData * ref = (ExpansionData *) device_handle;
264 IOMemoryDescriptor * memDesc;
265
266 LOCK;
267 memDesc = ref->memory;
268 if( memDesc)
269 {
270 memDesc->retain();
271 kr = memDesc->handleFault( device_pager, 0, 0,
272 offset, size, kIOMapDefaultCache /*?*/);
273 memDesc->release();
274 }
275 else
276 kr = KERN_ABORTED;
277 UNLOCK;
278
279 return( kr );
280 }
281
282 kern_return_t device_close(
283 int device_handle)
284 {
285 struct ExpansionData {
286 void * devicePager;
287 unsigned int pagerContig:1;
288 unsigned int unused:31;
289 IOMemoryDescriptor * memory;
290 };
291 ExpansionData * ref = (ExpansionData *) device_handle;
292
293 IODelete( ref, ExpansionData, 1 );
294
295 return( kIOReturnSuccess );
296 }
297 }; // end extern "C"
298
299 // Note this inline function uses C++ reference arguments to return values
300 // This means that pointers are not passed and NULLs don't have to be
301 // checked for as a NULL reference is illegal.
302 static inline void
303 getAddrLenForInd(user_addr_t &addr, IOPhysicalLength &len, // Output variables
304 UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
305 {
306 assert(kIOMemoryTypeUIO == type
307 || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
308 || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
309 if (kIOMemoryTypeUIO == type) {
310 user_size_t us;
311 uio_getiov((uio_t) r.uio, ind, &addr, &us); len = us;
312 }
313 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
314 IOAddressRange cur = r.v64[ind];
315 addr = cur.address;
316 len = cur.length;
317 }
318 else {
319 IOVirtualRange cur = r.v[ind];
320 addr = cur.address;
321 len = cur.length;
322 }
323 }
324
325 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
326
327 /*
328 * withAddress:
329 *
330 * Create a new IOMemoryDescriptor. The buffer is a virtual address
331 * relative to the specified task. If no task is supplied, the kernel
332 * task is implied.
333 */
334 IOMemoryDescriptor *
335 IOMemoryDescriptor::withAddress(void * address,
336 IOByteCount length,
337 IODirection direction)
338 {
339 return IOMemoryDescriptor::
340 withAddress((vm_address_t) address, length, direction, kernel_task);
341 }
342
343 IOMemoryDescriptor *
344 IOMemoryDescriptor::withAddress(vm_address_t address,
345 IOByteCount length,
346 IODirection direction,
347 task_t task)
348 {
349 #if TEST_V64
350 if (task)
351 {
352 IOOptionBits options = (IOOptionBits) direction;
353 if (task == kernel_task)
354 options |= kIOMemoryAutoPrepare;
355 return (IOMemoryDescriptor::withAddressRange(address, length, options, task));
356 }
357 #endif
358 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
359 if (that)
360 {
361 if (that->initWithAddress(address, length, direction, task))
362 return that;
363
364 that->release();
365 }
366 return 0;
367 }
368
369 IOMemoryDescriptor *
370 IOMemoryDescriptor::withPhysicalAddress(
371 IOPhysicalAddress address,
372 IOByteCount length,
373 IODirection direction )
374 {
375 #if TEST_P64
376 return (IOMemoryDescriptor::withAddressRange(address, length, (IOOptionBits) direction, NULL));
377 #endif
378 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
379 if (self
380 && !self->initWithPhysicalAddress(address, length, direction)) {
381 self->release();
382 return 0;
383 }
384
385 return self;
386 }
387
388 IOMemoryDescriptor *
389 IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
390 UInt32 withCount,
391 IODirection direction,
392 task_t task,
393 bool asReference)
394 {
395 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
396 if (that)
397 {
398 if (that->initWithRanges(ranges, withCount, direction, task, asReference))
399 return that;
400
401 that->release();
402 }
403 return 0;
404 }
405
406 IOMemoryDescriptor *
407 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
408 mach_vm_size_t length,
409 IOOptionBits options,
410 task_t task)
411 {
412 IOAddressRange range = { address, length };
413 return (IOMemoryDescriptor::withAddressRanges(&range, 1, options, task));
414 }
415
416 IOMemoryDescriptor *
417 IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
418 UInt32 rangeCount,
419 IOOptionBits options,
420 task_t task)
421 {
422 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
423 if (that)
424 {
425 if (task)
426 options |= kIOMemoryTypeVirtual64;
427 else
428 options |= kIOMemoryTypePhysical64;
429
430 if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ 0))
431 return that;
432
433 that->release();
434 }
435
436 return 0;
437 }
438
439
440 /*
441 * withRanges:
442 *
443 * Create a new IOMemoryDescriptor. The buffer is made up of several
444 * virtual address ranges, from a given task.
445 *
446 * Passing the ranges as a reference will avoid an extra allocation.
447 */
448 IOMemoryDescriptor *
449 IOMemoryDescriptor::withOptions(void * buffers,
450 UInt32 count,
451 UInt32 offset,
452 task_t task,
453 IOOptionBits opts,
454 IOMapper * mapper)
455 {
456 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
457
458 if (self
459 && !self->initWithOptions(buffers, count, offset, task, opts, mapper))
460 {
461 self->release();
462 return 0;
463 }
464
465 return self;
466 }
467
468 // Can't leave abstract but this should never be used directly,
469 bool IOMemoryDescriptor::initWithOptions(void * buffers,
470 UInt32 count,
471 UInt32 offset,
472 task_t task,
473 IOOptionBits options,
474 IOMapper * mapper)
475 {
476 // @@@ gvdl: Should I panic?
477 panic("IOMD::initWithOptions called\n");
478 return 0;
479 }
480
481 IOMemoryDescriptor *
482 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
483 UInt32 withCount,
484 IODirection direction,
485 bool asReference)
486 {
487 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
488 if (that)
489 {
490 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference))
491 return that;
492
493 that->release();
494 }
495 return 0;
496 }
497
498 IOMemoryDescriptor *
499 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
500 IOByteCount offset,
501 IOByteCount length,
502 IODirection direction)
503 {
504 IOSubMemoryDescriptor *self = new IOSubMemoryDescriptor;
505
506 if (self && !self->initSubRange(of, offset, length, direction)) {
507 self->release();
508 self = 0;
509 }
510 return self;
511 }
512
513 IOMemoryDescriptor *
514 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
515 {
516 IOGeneralMemoryDescriptor *origGenMD =
517 OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
518
519 if (origGenMD)
520 return IOGeneralMemoryDescriptor::
521 withPersistentMemoryDescriptor(origGenMD);
522 else
523 return 0;
524 }
525
526 IOMemoryDescriptor *
527 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
528 {
529 ipc_port_t sharedMem = (ipc_port_t) originalMD->createNamedEntry();
530
531 if (!sharedMem)
532 return 0;
533
534 if (sharedMem == originalMD->_memEntry) {
535 originalMD->retain(); // Add a new reference to ourselves
536 ipc_port_release_send(sharedMem); // Remove extra send right
537 return originalMD;
538 }
539
540 IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor;
541 typePersMDData initData = { originalMD, sharedMem };
542
543 if (self
544 && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) {
545 self->release();
546 self = 0;
547 }
548 return self;
549 }
550
551 void *IOGeneralMemoryDescriptor::createNamedEntry()
552 {
553 kern_return_t error;
554 ipc_port_t sharedMem;
555
556 IOOptionBits type = _flags & kIOMemoryTypeMask;
557
558 user_addr_t range0Addr;
559 IOByteCount range0Len;
560 getAddrLenForInd(range0Addr, range0Len, type, _ranges, 0);
561 range0Addr = trunc_page_64(range0Addr);
562
563 vm_size_t size = ptoa_32(_pages);
564 vm_address_t kernelPage = (vm_address_t) range0Addr;
565
566 vm_map_t theMap = ((_task == kernel_task)
567 && (kIOMemoryBufferPageable & _flags))
568 ? IOPageableMapForAddress(kernelPage)
569 : get_task_map(_task);
570
571 memory_object_size_t actualSize = size;
572 vm_prot_t prot = VM_PROT_READ;
573 #if CONFIG_EMBEDDED
574 if (kIODirectionOut != (kIODirectionOutIn & _flags))
575 #endif
576 prot |= VM_PROT_WRITE;
577
578 if (_memEntry)
579 prot |= MAP_MEM_NAMED_REUSE;
580
581 error = mach_make_memory_entry_64(theMap,
582 &actualSize, range0Addr, prot, &sharedMem, (ipc_port_t) _memEntry);
583
584 if (KERN_SUCCESS == error) {
585 if (actualSize == size) {
586 return sharedMem;
587 } else {
588 #if IOASSERT
589 IOLog("IOGMD::mach_make_memory_entry_64 (%08llx) size (%08lx:%08x)\n",
590 (UInt64)range0Addr, (UInt32)actualSize, size);
591 #endif
592 ipc_port_release_send( sharedMem );
593 }
594 }
595
596 return MACH_PORT_NULL;
597 }
598
599 /*
600 * initWithAddress:
601 *
602 * Initialize an IOMemoryDescriptor. The buffer is a virtual address
603 * relative to the specified task. If no task is supplied, the kernel
604 * task is implied.
605 *
606 * An IOMemoryDescriptor can be re-used by calling initWithAddress or
607 * initWithRanges again on an existing instance -- note this behavior
608 * is not commonly supported in other I/O Kit classes, although it is
609 * supported here.
610 */
611 bool
612 IOGeneralMemoryDescriptor::initWithAddress(void * address,
613 IOByteCount withLength,
614 IODirection withDirection)
615 {
616 _singleRange.v.address = (vm_address_t) address;
617 _singleRange.v.length = withLength;
618
619 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
620 }
621
622 bool
623 IOGeneralMemoryDescriptor::initWithAddress(vm_address_t address,
624 IOByteCount withLength,
625 IODirection withDirection,
626 task_t withTask)
627 {
628 _singleRange.v.address = address;
629 _singleRange.v.length = withLength;
630
631 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
632 }
633
634 bool
635 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
636 IOPhysicalAddress address,
637 IOByteCount withLength,
638 IODirection withDirection )
639 {
640 _singleRange.p.address = address;
641 _singleRange.p.length = withLength;
642
643 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
644 }
645
646 bool
647 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
648 IOPhysicalRange * ranges,
649 UInt32 count,
650 IODirection direction,
651 bool reference)
652 {
653 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
654
655 if (reference)
656 mdOpts |= kIOMemoryAsReference;
657
658 return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0);
659 }
660
661 bool
662 IOGeneralMemoryDescriptor::initWithRanges(
663 IOVirtualRange * ranges,
664 UInt32 count,
665 IODirection direction,
666 task_t task,
667 bool reference)
668 {
669 IOOptionBits mdOpts = direction;
670
671 if (reference)
672 mdOpts |= kIOMemoryAsReference;
673
674 if (task) {
675 mdOpts |= kIOMemoryTypeVirtual;
676
677 // Auto-prepare if this is a kernel memory descriptor as very few
678 // clients bother to prepare() kernel memory.
679 // But it was not enforced so what are you going to do?
680 if (task == kernel_task)
681 mdOpts |= kIOMemoryAutoPrepare;
682 }
683 else
684 mdOpts |= kIOMemoryTypePhysical;
685
686 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0);
687 }
688
689 /*
690 * initWithOptions:
691 *
692 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
693 * from a given task, several physical ranges, an UPL from the ubc
694 * system or a uio (may be 64bit) from the BSD subsystem.
695 *
696 * Passing the ranges as a reference will avoid an extra allocation.
697 *
698 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
699 * existing instance -- note this behavior is not commonly supported in other
700 * I/O Kit classes, although it is supported here.
701 */
702
703 bool
704 IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
705 UInt32 count,
706 UInt32 offset,
707 task_t task,
708 IOOptionBits options,
709 IOMapper * mapper)
710 {
711 IOOptionBits type = options & kIOMemoryTypeMask;
712
713 // Grab the original MD's configuation data to initialse the
714 // arguments to this function.
715 if (kIOMemoryTypePersistentMD == type) {
716
717 typePersMDData *initData = (typePersMDData *) buffers;
718 const IOGeneralMemoryDescriptor *orig = initData->fMD;
719 ioGMDData *dataP = getDataP(orig->_memoryEntries);
720
721 // Only accept persistent memory descriptors with valid dataP data.
722 assert(orig->_rangesCount == 1);
723 if ( !(orig->_flags & kIOMemoryPersistent) || !dataP)
724 return false;
725
726 _memEntry = initData->fMemEntry; // Grab the new named entry
727 options = orig->_flags | kIOMemoryAsReference;
728 _singleRange = orig->_singleRange; // Initialise our range
729 buffers = &_singleRange;
730 count = 1;
731
732 // Now grab the original task and whatever mapper was previously used
733 task = orig->_task;
734 mapper = dataP->fMapper;
735
736 // We are ready to go through the original initialisation now
737 }
738
739 switch (type) {
740 case kIOMemoryTypeUIO:
741 case kIOMemoryTypeVirtual:
742 case kIOMemoryTypeVirtual64:
743 assert(task);
744 if (!task)
745 return false;
746
747 if (vm_map_is_64bit(get_task_map(task))
748 && (kIOMemoryTypeVirtual == type)
749 && ((IOVirtualRange *) buffers)->address)
750 {
751 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
752 return false;
753 }
754 break;
755
756 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
757 case kIOMemoryTypePhysical64:
758 mapper = kIOMapperNone;
759
760 case kIOMemoryTypeUPL:
761 assert(!task);
762 break;
763 default:
764 return false; /* bad argument */
765 }
766
767 assert(buffers);
768 assert(count);
769
770 /*
771 * We can check the _initialized instance variable before having ever set
772 * it to an initial value because I/O Kit guarantees that all our instance
773 * variables are zeroed on an object's allocation.
774 */
775
776 if (_initialized) {
777 /*
778 * An existing memory descriptor is being retargeted to point to
779 * somewhere else. Clean up our present state.
780 */
781 IOOptionBits type = _flags & kIOMemoryTypeMask;
782 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
783 {
784 while (_wireCount)
785 complete();
786 }
787 if (_ranges.v && _rangesIsAllocated)
788 {
789 if (kIOMemoryTypeUIO == type)
790 uio_free((uio_t) _ranges.v);
791 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
792 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
793 else
794 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
795 }
796
797 if (_memEntry)
798 { ipc_port_release_send((ipc_port_t) _memEntry); _memEntry = 0; }
799 if (_mappings)
800 _mappings->flushCollection();
801 }
802 else {
803 if (!super::init())
804 return false;
805 _initialized = true;
806 }
807
808 // Grab the appropriate mapper
809 if (mapper == kIOMapperNone)
810 mapper = 0; // No Mapper
811 else if (mapper == kIOMapperSystem) {
812 IOMapper::checkForSystemMapper();
813 gIOSystemMapper = mapper = IOMapper::gSystem;
814 }
815
816 // Temp binary compatibility for kIOMemoryThreadSafe
817 if (kIOMemoryReserved6156215 & options)
818 {
819 options &= ~kIOMemoryReserved6156215;
820 options |= kIOMemoryThreadSafe;
821 }
822 // Remove the dynamic internal use flags from the initial setting
823 options &= ~(kIOMemoryPreparedReadOnly);
824 _flags = options;
825 _task = task;
826
827 // DEPRECATED variable initialisation
828 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
829
830 __iomd_reservedA = 0;
831 __iomd_reservedB = 0;
832 _highestPage = 0;
833
834 if (kIOMemoryThreadSafe & options)
835 {
836 if (!_prepareLock)
837 _prepareLock = IOLockAlloc();
838 }
839 else if (_prepareLock)
840 {
841 IOLockFree(_prepareLock);
842 _prepareLock = NULL;
843 }
844
845 if (kIOMemoryTypeUPL == type) {
846
847 ioGMDData *dataP;
848 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
849
850 if (!_memoryEntries) {
851 _memoryEntries = OSData::withCapacity(dataSize);
852 if (!_memoryEntries)
853 return false;
854 }
855 else if (!_memoryEntries->initWithCapacity(dataSize))
856 return false;
857
858 _memoryEntries->appendBytes(0, sizeof(ioGMDData));
859 dataP = getDataP(_memoryEntries);
860 dataP->fMapper = mapper;
861 dataP->fPageCnt = 0;
862
863 // _wireCount++; // UPLs start out life wired
864
865 _length = count;
866 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
867
868 ioPLBlock iopl;
869 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST((upl_t) buffers);
870
871 iopl.fIOPL = (upl_t) buffers;
872 // Set the flag kIOPLOnDevice convieniently equal to 1
873 iopl.fFlags = pageList->device | kIOPLExternUPL;
874 iopl.fIOMDOffset = 0;
875
876 _highestPage = upl_get_highest_page(iopl.fIOPL);
877
878 if (!pageList->device) {
879 // Pre-compute the offset into the UPL's page list
880 pageList = &pageList[atop_32(offset)];
881 offset &= PAGE_MASK;
882 if (mapper) {
883 iopl.fMappedBase = mapper->iovmAlloc(_pages);
884 mapper->iovmInsert(iopl.fMappedBase, 0, pageList, _pages);
885 }
886 else
887 iopl.fMappedBase = 0;
888 }
889 else
890 iopl.fMappedBase = 0;
891 iopl.fPageInfo = (vm_address_t) pageList;
892 iopl.fPageOffset = offset;
893
894 _memoryEntries->appendBytes(&iopl, sizeof(iopl));
895 }
896 else {
897 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
898 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
899
900 // Initialize the memory descriptor
901 if (options & kIOMemoryAsReference) {
902 _rangesIsAllocated = false;
903
904 // Hack assignment to get the buffer arg into _ranges.
905 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
906 // work, C++ sigh.
907 // This also initialises the uio & physical ranges.
908 _ranges.v = (IOVirtualRange *) buffers;
909 }
910 else {
911 _rangesIsAllocated = true;
912 switch (_flags & kIOMemoryTypeMask)
913 {
914 case kIOMemoryTypeUIO:
915 _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
916 break;
917
918 case kIOMemoryTypeVirtual64:
919 case kIOMemoryTypePhysical64:
920 _ranges.v64 = IONew(IOAddressRange, count);
921 if (!_ranges.v64)
922 return false;
923 bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
924 break;
925 case kIOMemoryTypeVirtual:
926 case kIOMemoryTypePhysical:
927 _ranges.v = IONew(IOVirtualRange, count);
928 if (!_ranges.v)
929 return false;
930 bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
931 break;
932 }
933 }
934
935 // Find starting address within the vector of ranges
936 Ranges vec = _ranges;
937 UInt32 length = 0;
938 UInt32 pages = 0;
939 for (unsigned ind = 0; ind < count; ind++) {
940 user_addr_t addr;
941 UInt32 len;
942
943 // addr & len are returned by this function
944 getAddrLenForInd(addr, len, type, vec, ind);
945 pages += (atop_64(addr + len + PAGE_MASK) - atop_64(addr));
946 len += length;
947 assert(len >= length); // Check for 32 bit wrap around
948 length = len;
949
950 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
951 {
952 ppnum_t highPage = atop_64(addr + len - 1);
953 if (highPage > _highestPage)
954 _highestPage = highPage;
955 }
956 }
957 _length = length;
958 _pages = pages;
959 _rangesCount = count;
960
961 // Auto-prepare memory at creation time.
962 // Implied completion when descriptor is free-ed
963 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
964 _wireCount++; // Physical MDs are, by definition, wired
965 else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
966 ioGMDData *dataP;
967 unsigned dataSize = computeDataSize(_pages, /* upls */ count * 2);
968
969 if (!_memoryEntries) {
970 _memoryEntries = OSData::withCapacity(dataSize);
971 if (!_memoryEntries)
972 return false;
973 }
974 else if (!_memoryEntries->initWithCapacity(dataSize))
975 return false;
976
977 _memoryEntries->appendBytes(0, sizeof(ioGMDData));
978 dataP = getDataP(_memoryEntries);
979 dataP->fMapper = mapper;
980 dataP->fPageCnt = _pages;
981
982 if ( (kIOMemoryPersistent & _flags) && !_memEntry)
983 _memEntry = createNamedEntry();
984
985 if ((_flags & kIOMemoryAutoPrepare)
986 && prepare() != kIOReturnSuccess)
987 return false;
988 }
989 }
990
991 return true;
992 }
993
994 /*
995 * free
996 *
997 * Free resources.
998 */
999 void IOGeneralMemoryDescriptor::free()
1000 {
1001 IOOptionBits type = _flags & kIOMemoryTypeMask;
1002
1003 if( reserved)
1004 {
1005 LOCK;
1006 reserved->memory = 0;
1007 UNLOCK;
1008 }
1009
1010 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
1011 {
1012 while (_wireCount)
1013 complete();
1014 }
1015 if (_memoryEntries)
1016 _memoryEntries->release();
1017
1018 if (_ranges.v && _rangesIsAllocated)
1019 {
1020 if (kIOMemoryTypeUIO == type)
1021 uio_free((uio_t) _ranges.v);
1022 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
1023 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
1024 else
1025 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
1026
1027 _ranges.v = NULL;
1028 }
1029
1030 if (reserved && reserved->devicePager)
1031 device_pager_deallocate( (memory_object_t) reserved->devicePager );
1032
1033 // memEntry holds a ref on the device pager which owns reserved
1034 // (ExpansionData) so no reserved access after this point
1035 if (_memEntry)
1036 ipc_port_release_send( (ipc_port_t) _memEntry );
1037
1038 if (_prepareLock)
1039 IOLockFree(_prepareLock);
1040
1041 super::free();
1042 }
1043
1044 /* DEPRECATED */ void IOGeneralMemoryDescriptor::unmapFromKernel()
1045 /* DEPRECATED */ {
1046 panic("IOGMD::unmapFromKernel deprecated");
1047 /* DEPRECATED */ }
1048 /* DEPRECATED */
1049 /* DEPRECATED */ void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
1050 /* DEPRECATED */ {
1051 panic("IOGMD::mapIntoKernel deprecated");
1052 /* DEPRECATED */ }
1053
1054 /*
1055 * getDirection:
1056 *
1057 * Get the direction of the transfer.
1058 */
1059 IODirection IOMemoryDescriptor::getDirection() const
1060 {
1061 return _direction;
1062 }
1063
1064 /*
1065 * getLength:
1066 *
1067 * Get the length of the transfer (over all ranges).
1068 */
1069 IOByteCount IOMemoryDescriptor::getLength() const
1070 {
1071 return _length;
1072 }
1073
1074 void IOMemoryDescriptor::setTag( IOOptionBits tag )
1075 {
1076 _tag = tag;
1077 }
1078
1079 IOOptionBits IOMemoryDescriptor::getTag( void )
1080 {
1081 return( _tag);
1082 }
1083
1084 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
1085 IOPhysicalAddress
1086 IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
1087 {
1088 addr64_t physAddr = 0;
1089
1090 if( prepare() == kIOReturnSuccess) {
1091 physAddr = getPhysicalSegment64( offset, length );
1092 complete();
1093 }
1094
1095 return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used
1096 }
1097
1098 IOByteCount IOMemoryDescriptor::readBytes
1099 (IOByteCount offset, void *bytes, IOByteCount length)
1100 {
1101 addr64_t dstAddr = (addr64_t) (UInt32) bytes;
1102 IOByteCount remaining;
1103
1104 // Assert that this entire I/O is withing the available range
1105 assert(offset < _length);
1106 assert(offset + length <= _length);
1107 if (offset >= _length) {
1108 return 0;
1109 }
1110
1111 remaining = length = min(length, _length - offset);
1112 while (remaining) { // (process another target segment?)
1113 addr64_t srcAddr64;
1114 IOByteCount srcLen;
1115
1116 srcAddr64 = getPhysicalSegment64(offset, &srcLen);
1117 if (!srcAddr64)
1118 break;
1119
1120 // Clip segment length to remaining
1121 if (srcLen > remaining)
1122 srcLen = remaining;
1123
1124 copypv(srcAddr64, dstAddr, srcLen,
1125 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
1126
1127 dstAddr += srcLen;
1128 offset += srcLen;
1129 remaining -= srcLen;
1130 }
1131
1132 assert(!remaining);
1133
1134 return length - remaining;
1135 }
1136
1137 IOByteCount IOMemoryDescriptor::writeBytes
1138 (IOByteCount offset, const void *bytes, IOByteCount length)
1139 {
1140 addr64_t srcAddr = (addr64_t) (UInt32) bytes;
1141 IOByteCount remaining;
1142
1143 // Assert that this entire I/O is withing the available range
1144 assert(offset < _length);
1145 assert(offset + length <= _length);
1146
1147 assert( !(kIOMemoryPreparedReadOnly & _flags) );
1148
1149 if ( (kIOMemoryPreparedReadOnly & _flags) || offset >= _length) {
1150 return 0;
1151 }
1152
1153 remaining = length = min(length, _length - offset);
1154 while (remaining) { // (process another target segment?)
1155 addr64_t dstAddr64;
1156 IOByteCount dstLen;
1157
1158 dstAddr64 = getPhysicalSegment64(offset, &dstLen);
1159 if (!dstAddr64)
1160 break;
1161
1162 // Clip segment length to remaining
1163 if (dstLen > remaining)
1164 dstLen = remaining;
1165
1166 copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
1167 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
1168
1169 srcAddr += dstLen;
1170 offset += dstLen;
1171 remaining -= dstLen;
1172 }
1173
1174 assert(!remaining);
1175
1176 return length - remaining;
1177 }
1178
1179 // osfmk/device/iokit_rpc.c
1180 extern "C" unsigned int IODefaultCacheBits(addr64_t pa);
1181
1182 /* DEPRECATED */ void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
1183 /* DEPRECATED */ {
1184 panic("IOGMD::setPosition deprecated");
1185 /* DEPRECATED */ }
1186
1187 IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
1188 {
1189 if (kIOMDGetCharacteristics == op) {
1190
1191 if (dataSize < sizeof(IOMDDMACharacteristics))
1192 return kIOReturnUnderrun;
1193
1194 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
1195 data->fLength = _length;
1196 data->fSGCount = _rangesCount;
1197 data->fPages = _pages;
1198 data->fDirection = _direction;
1199 if (!_wireCount)
1200 data->fIsPrepared = false;
1201 else {
1202 data->fIsPrepared = true;
1203 data->fHighestPage = _highestPage;
1204 if (_memoryEntries) {
1205 ioGMDData *gmdData = getDataP(_memoryEntries);
1206 ioPLBlock *ioplList = getIOPLList(gmdData);
1207 UInt count = getNumIOPL(_memoryEntries, gmdData);
1208
1209 data->fIsMapped = (gmdData->fMapper && _pages && (count > 0)
1210 && ioplList[0].fMappedBase);
1211 if (count == 1)
1212 data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
1213 }
1214 else
1215 data->fIsMapped = false;
1216 }
1217
1218 return kIOReturnSuccess;
1219 }
1220 else if (!(kIOMDWalkSegments & op))
1221 return kIOReturnBadArgument;
1222
1223 // Get the next segment
1224 struct InternalState {
1225 IOMDDMAWalkSegmentArgs fIO;
1226 UInt fOffset2Index;
1227 UInt fIndex;
1228 UInt fNextOffset;
1229 } *isP;
1230
1231 // Find the next segment
1232 if (dataSize < sizeof(*isP))
1233 return kIOReturnUnderrun;
1234
1235 isP = (InternalState *) vData;
1236 UInt offset = isP->fIO.fOffset;
1237 bool mapped = isP->fIO.fMapped;
1238
1239 if (offset >= _length)
1240 return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
1241
1242 // Validate the previous offset
1243 UInt ind, off2Ind = isP->fOffset2Index;
1244 if ((kIOMDFirstSegment != op)
1245 && offset
1246 && (offset == isP->fNextOffset || off2Ind <= offset))
1247 ind = isP->fIndex;
1248 else
1249 ind = off2Ind = 0; // Start from beginning
1250
1251 UInt length;
1252 UInt64 address;
1253 if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
1254
1255 // Physical address based memory descriptor
1256 const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
1257
1258 // Find the range after the one that contains the offset
1259 UInt len;
1260 for (len = 0; off2Ind <= offset; ind++) {
1261 len = physP[ind].length;
1262 off2Ind += len;
1263 }
1264
1265 // Calculate length within range and starting address
1266 length = off2Ind - offset;
1267 address = physP[ind - 1].address + len - length;
1268
1269 // see how far we can coalesce ranges
1270 while (ind < _rangesCount && address + length == physP[ind].address) {
1271 len = physP[ind].length;
1272 length += len;
1273 off2Ind += len;
1274 ind++;
1275 }
1276
1277 // correct contiguous check overshoot
1278 ind--;
1279 off2Ind -= len;
1280 }
1281 else if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
1282
1283 // Physical address based memory descriptor
1284 const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
1285
1286 // Find the range after the one that contains the offset
1287 mach_vm_size_t len;
1288 for (len = 0; off2Ind <= offset; ind++) {
1289 len = physP[ind].length;
1290 off2Ind += len;
1291 }
1292
1293 // Calculate length within range and starting address
1294 length = off2Ind - offset;
1295 address = physP[ind - 1].address + len - length;
1296
1297 // see how far we can coalesce ranges
1298 while (ind < _rangesCount && address + length == physP[ind].address) {
1299 len = physP[ind].length;
1300 length += len;
1301 off2Ind += len;
1302 ind++;
1303 }
1304
1305 // correct contiguous check overshoot
1306 ind--;
1307 off2Ind -= len;
1308 }
1309 else do {
1310 if (!_wireCount)
1311 panic("IOGMD: not wired for the IODMACommand");
1312
1313 assert(_memoryEntries);
1314
1315 ioGMDData * dataP = getDataP(_memoryEntries);
1316 const ioPLBlock *ioplList = getIOPLList(dataP);
1317 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1318 upl_page_info_t *pageList = getPageList(dataP);
1319
1320 assert(numIOPLs > 0);
1321
1322 // Scan through iopl info blocks looking for block containing offset
1323 while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset)
1324 ind++;
1325
1326 // Go back to actual range as search goes past it
1327 ioPLBlock ioplInfo = ioplList[ind - 1];
1328 off2Ind = ioplInfo.fIOMDOffset;
1329
1330 if (ind < numIOPLs)
1331 length = ioplList[ind].fIOMDOffset;
1332 else
1333 length = _length;
1334 length -= offset; // Remainder within iopl
1335
1336 // Subtract offset till this iopl in total list
1337 offset -= off2Ind;
1338
1339 // If a mapped address is requested and this is a pre-mapped IOPL
1340 // then just need to compute an offset relative to the mapped base.
1341 if (mapped && ioplInfo.fMappedBase) {
1342 offset += (ioplInfo.fPageOffset & PAGE_MASK);
1343 address = ptoa_64(ioplInfo.fMappedBase) + offset;
1344 continue; // Done leave do/while(false) now
1345 }
1346
1347 // The offset is rebased into the current iopl.
1348 // Now add the iopl 1st page offset.
1349 offset += ioplInfo.fPageOffset;
1350
1351 // For external UPLs the fPageInfo field points directly to
1352 // the upl's upl_page_info_t array.
1353 if (ioplInfo.fFlags & kIOPLExternUPL)
1354 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
1355 else
1356 pageList = &pageList[ioplInfo.fPageInfo];
1357
1358 // Check for direct device non-paged memory
1359 if ( ioplInfo.fFlags & kIOPLOnDevice ) {
1360 address = ptoa_64(pageList->phys_addr) + offset;
1361 continue; // Done leave do/while(false) now
1362 }
1363
1364 // Now we need compute the index into the pageList
1365 UInt pageInd = atop_32(offset);
1366 offset &= PAGE_MASK;
1367
1368 // Compute the starting address of this segment
1369 IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
1370 address = ptoa_64(pageAddr) + offset;
1371
1372 // length is currently set to the length of the remainider of the iopl.
1373 // We need to check that the remainder of the iopl is contiguous.
1374 // This is indicated by pageList[ind].phys_addr being sequential.
1375 IOByteCount contigLength = PAGE_SIZE - offset;
1376 while (contigLength < length
1377 && ++pageAddr == pageList[++pageInd].phys_addr)
1378 {
1379 contigLength += PAGE_SIZE;
1380 }
1381
1382 if (contigLength < length)
1383 length = contigLength;
1384
1385
1386 assert(address);
1387 assert(length);
1388
1389 } while (false);
1390
1391 // Update return values and state
1392 isP->fIO.fIOVMAddr = address;
1393 isP->fIO.fLength = length;
1394 isP->fIndex = ind;
1395 isP->fOffset2Index = off2Ind;
1396 isP->fNextOffset = isP->fIO.fOffset + length;
1397
1398 return kIOReturnSuccess;
1399 }
1400
1401 addr64_t
1402 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
1403 {
1404 IOReturn ret;
1405 IOByteCount length = 0;
1406 addr64_t address = 0;
1407
1408 if (gIOSystemMapper && (kIOMemoryTypePhysical == (_flags & kIOMemoryTypeMask)))
1409 return (super::getPhysicalSegment64(offset, lengthOfSegment));
1410
1411 if (offset < _length) // (within bounds?)
1412 {
1413 IOMDDMAWalkSegmentState _state;
1414 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) &_state;
1415
1416 state->fOffset = offset;
1417 state->fLength = _length - offset;
1418 state->fMapped = false;
1419
1420 ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
1421
1422 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret))
1423 DEBG("getPhysicalSegment64 dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
1424 ret, this, state->fOffset,
1425 state->fIOVMAddr, state->fLength);
1426 if (kIOReturnSuccess == ret)
1427 {
1428 address = state->fIOVMAddr;
1429 length = state->fLength;
1430 }
1431 if (!address)
1432 length = 0;
1433 }
1434
1435 if (lengthOfSegment)
1436 *lengthOfSegment = length;
1437
1438 return (address);
1439 }
1440
1441 IOPhysicalAddress
1442 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1443 {
1444 IOReturn ret;
1445 IOByteCount length = 0;
1446 addr64_t address = 0;
1447
1448 // assert(offset <= _length);
1449
1450 if (offset < _length) // (within bounds?)
1451 {
1452 IOMDDMAWalkSegmentState _state;
1453 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) &_state;
1454
1455 state->fOffset = offset;
1456 state->fLength = _length - offset;
1457 state->fMapped = true;
1458
1459 ret = dmaCommandOperation(
1460 kIOMDFirstSegment, _state, sizeof(_state));
1461
1462 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret))
1463 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
1464 ret, this, state->fOffset,
1465 state->fIOVMAddr, state->fLength);
1466 if (kIOReturnSuccess == ret)
1467 {
1468 address = state->fIOVMAddr;
1469 length = state->fLength;
1470 }
1471
1472 if (!address)
1473 length = 0;
1474 }
1475
1476 if ((address + length) > 0x100000000ULL)
1477 {
1478 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
1479 address, length, (getMetaClass())->getClassName());
1480 }
1481
1482 if (lengthOfSegment)
1483 *lengthOfSegment = length;
1484
1485 return ((IOPhysicalAddress) address);
1486 }
1487
1488 addr64_t
1489 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
1490 {
1491 IOPhysicalAddress phys32;
1492 IOByteCount length;
1493 addr64_t phys64;
1494 IOMapper * mapper = 0;
1495
1496 phys32 = getPhysicalSegment(offset, lengthOfSegment);
1497 if (!phys32)
1498 return 0;
1499
1500 if (gIOSystemMapper)
1501 mapper = gIOSystemMapper;
1502
1503 if (mapper)
1504 {
1505 IOByteCount origLen;
1506
1507 phys64 = mapper->mapAddr(phys32);
1508 origLen = *lengthOfSegment;
1509 length = page_size - (phys64 & (page_size - 1));
1510 while ((length < origLen)
1511 && ((phys64 + length) == mapper->mapAddr(phys32 + length)))
1512 length += page_size;
1513 if (length > origLen)
1514 length = origLen;
1515
1516 *lengthOfSegment = length;
1517 }
1518 else
1519 phys64 = (addr64_t) phys32;
1520
1521 return phys64;
1522 }
1523
1524 IOPhysicalAddress
1525 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1526 {
1527 IOPhysicalAddress address = 0;
1528 IOPhysicalLength length = 0;
1529 IOOptionBits type = _flags & kIOMemoryTypeMask;
1530
1531 assert(offset <= _length);
1532
1533 if ( type == kIOMemoryTypeUPL)
1534 return super::getSourceSegment( offset, lengthOfSegment );
1535 else if ( offset < _length ) // (within bounds?)
1536 {
1537 unsigned rangesIndex = 0;
1538 Ranges vec = _ranges;
1539 user_addr_t addr;
1540
1541 // Find starting address within the vector of ranges
1542 for (;;) {
1543 getAddrLenForInd(addr, length, type, vec, rangesIndex);
1544 if (offset < length)
1545 break;
1546 offset -= length; // (make offset relative)
1547 rangesIndex++;
1548 }
1549
1550 // Now that we have the starting range,
1551 // lets find the last contiguous range
1552 addr += offset;
1553 length -= offset;
1554
1555 for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) {
1556 user_addr_t newAddr;
1557 IOPhysicalLength newLen;
1558
1559 getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
1560 if (addr + length != newAddr)
1561 break;
1562 length += newLen;
1563 }
1564 if (addr)
1565 address = (IOPhysicalAddress) addr; // Truncate address to 32bit
1566 else
1567 length = 0;
1568 }
1569
1570 if ( lengthOfSegment ) *lengthOfSegment = length;
1571
1572 return address;
1573 }
1574
1575 /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
1576 /* DEPRECATED */ void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
1577 /* DEPRECATED */ IOByteCount * lengthOfSegment)
1578 /* DEPRECATED */ {
1579 if (_task == kernel_task)
1580 return (void *) getSourceSegment(offset, lengthOfSegment);
1581 else
1582 panic("IOGMD::getVirtualSegment deprecated");
1583
1584 return 0;
1585 /* DEPRECATED */ }
1586 /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
1587
1588
1589
1590 IOReturn
1591 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
1592 {
1593 if (kIOMDGetCharacteristics == op) {
1594 if (dataSize < sizeof(IOMDDMACharacteristics))
1595 return kIOReturnUnderrun;
1596
1597 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
1598 data->fLength = getLength();
1599 data->fSGCount = 0;
1600 data->fDirection = _direction;
1601 if (IOMapper::gSystem)
1602 data->fIsMapped = true;
1603 data->fIsPrepared = true; // Assume prepared - fails safe
1604 }
1605 else if (kIOMDWalkSegments & op) {
1606 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs))
1607 return kIOReturnUnderrun;
1608
1609 IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
1610 IOByteCount offset = (IOByteCount) data->fOffset;
1611
1612 IOPhysicalLength length;
1613 IOMemoryDescriptor *ncmd = const_cast<IOMemoryDescriptor *>(this);
1614 if (data->fMapped && IOMapper::gSystem)
1615 data->fIOVMAddr = ncmd->getPhysicalSegment(offset, &length);
1616 else
1617 data->fIOVMAddr = ncmd->getPhysicalSegment64(offset, &length);
1618 data->fLength = length;
1619 }
1620 else
1621 return kIOReturnBadArgument;
1622
1623 return kIOReturnSuccess;
1624 }
1625
1626 IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
1627 IOOptionBits * oldState )
1628 {
1629 IOReturn err = kIOReturnSuccess;
1630 vm_purgable_t control;
1631 int state;
1632
1633 do
1634 {
1635 if (!_memEntry)
1636 {
1637 err = kIOReturnNotReady;
1638 break;
1639 }
1640
1641 control = VM_PURGABLE_SET_STATE;
1642 switch (newState)
1643 {
1644 case kIOMemoryPurgeableKeepCurrent:
1645 control = VM_PURGABLE_GET_STATE;
1646 break;
1647
1648 case kIOMemoryPurgeableNonVolatile:
1649 state = VM_PURGABLE_NONVOLATILE;
1650 break;
1651 case kIOMemoryPurgeableVolatile:
1652 state = VM_PURGABLE_VOLATILE;
1653 break;
1654 case kIOMemoryPurgeableEmpty:
1655 state = VM_PURGABLE_EMPTY;
1656 break;
1657 default:
1658 err = kIOReturnBadArgument;
1659 break;
1660 }
1661
1662 if (kIOReturnSuccess != err)
1663 break;
1664
1665 err = mach_memory_entry_purgable_control((ipc_port_t) _memEntry, control, &state);
1666
1667 if (oldState)
1668 {
1669 if (kIOReturnSuccess == err)
1670 {
1671 switch (state)
1672 {
1673 case VM_PURGABLE_NONVOLATILE:
1674 state = kIOMemoryPurgeableNonVolatile;
1675 break;
1676 case VM_PURGABLE_VOLATILE:
1677 state = kIOMemoryPurgeableVolatile;
1678 break;
1679 case VM_PURGABLE_EMPTY:
1680 state = kIOMemoryPurgeableEmpty;
1681 break;
1682 default:
1683 state = kIOMemoryPurgeableNonVolatile;
1684 err = kIOReturnNotReady;
1685 break;
1686 }
1687 *oldState = state;
1688 }
1689 }
1690 }
1691 while (false);
1692
1693 return (err);
1694 }
1695
1696 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
1697 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
1698
1699 IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options,
1700 IOByteCount offset, IOByteCount length )
1701 {
1702 IOByteCount remaining;
1703 void (*func)(addr64_t pa, unsigned int count) = 0;
1704
1705 switch (options)
1706 {
1707 case kIOMemoryIncoherentIOFlush:
1708 func = &dcache_incoherent_io_flush64;
1709 break;
1710 case kIOMemoryIncoherentIOStore:
1711 func = &dcache_incoherent_io_store64;
1712 break;
1713 }
1714
1715 if (!func)
1716 return (kIOReturnUnsupported);
1717
1718 remaining = length = min(length, getLength() - offset);
1719 while (remaining)
1720 // (process another target segment?)
1721 {
1722 addr64_t dstAddr64;
1723 IOByteCount dstLen;
1724
1725 dstAddr64 = getPhysicalSegment64(offset, &dstLen);
1726 if (!dstAddr64)
1727 break;
1728
1729 // Clip segment length to remaining
1730 if (dstLen > remaining)
1731 dstLen = remaining;
1732
1733 (*func)(dstAddr64, dstLen);
1734
1735 offset += dstLen;
1736 remaining -= dstLen;
1737 }
1738
1739 return (remaining ? kIOReturnUnderrun : kIOReturnSuccess);
1740 }
1741
1742 #if defined(__ppc__) || defined(__arm__)
1743 extern vm_offset_t static_memory_end;
1744 #define io_kernel_static_end static_memory_end
1745 #else
1746 extern vm_offset_t first_avail;
1747 #define io_kernel_static_end first_avail
1748 #endif
1749
1750 static kern_return_t
1751 io_get_kernel_static_upl(
1752 vm_map_t /* map */,
1753 vm_address_t offset,
1754 vm_size_t *upl_size,
1755 upl_t *upl,
1756 upl_page_info_array_t page_list,
1757 unsigned int *count,
1758 ppnum_t *highest_page)
1759 {
1760 unsigned int pageCount, page;
1761 ppnum_t phys;
1762 ppnum_t highestPage = 0;
1763
1764 pageCount = atop_32(*upl_size);
1765 if (pageCount > *count)
1766 pageCount = *count;
1767
1768 *upl = NULL;
1769
1770 for (page = 0; page < pageCount; page++)
1771 {
1772 phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
1773 if (!phys)
1774 break;
1775 page_list[page].phys_addr = phys;
1776 page_list[page].pageout = 0;
1777 page_list[page].absent = 0;
1778 page_list[page].dirty = 0;
1779 page_list[page].precious = 0;
1780 page_list[page].device = 0;
1781 if (phys > highestPage)
1782 highestPage = page;
1783 }
1784
1785 *highest_page = highestPage;
1786
1787 return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError);
1788 }
1789
1790 IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
1791 {
1792 IOOptionBits type = _flags & kIOMemoryTypeMask;
1793 IOReturn error = kIOReturnCannotWire;
1794 ioGMDData *dataP;
1795 ppnum_t mapBase = 0;
1796 IOMapper *mapper;
1797 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
1798
1799 assert(!_wireCount);
1800 assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
1801
1802 if (_pages >= gIOMaximumMappedIOPageCount)
1803 return kIOReturnNoResources;
1804
1805 dataP = getDataP(_memoryEntries);
1806 mapper = dataP->fMapper;
1807 if (mapper && _pages)
1808 mapBase = mapper->iovmAlloc(_pages);
1809
1810 // Note that appendBytes(NULL) zeros the data up to the
1811 // desired length.
1812 _memoryEntries->appendBytes(0, dataP->fPageCnt * sizeof(upl_page_info_t));
1813 dataP = 0; // May no longer be valid so lets not get tempted.
1814
1815 if (forDirection == kIODirectionNone)
1816 forDirection = _direction;
1817
1818 int uplFlags; // This Mem Desc's default flags for upl creation
1819 switch (kIODirectionOutIn & forDirection)
1820 {
1821 case kIODirectionOut:
1822 // Pages do not need to be marked as dirty on commit
1823 uplFlags = UPL_COPYOUT_FROM;
1824 _flags |= kIOMemoryPreparedReadOnly;
1825 break;
1826
1827 case kIODirectionIn:
1828 default:
1829 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
1830 break;
1831 }
1832 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
1833
1834 #ifdef UPL_NEED_32BIT_ADDR
1835 if (kIODirectionPrepareToPhys32 & forDirection)
1836 uplFlags |= UPL_NEED_32BIT_ADDR;
1837 #endif
1838
1839 // Find the appropriate vm_map for the given task
1840 vm_map_t curMap;
1841 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
1842 curMap = 0;
1843 else
1844 { curMap = get_task_map(_task); }
1845
1846 // Iterate over the vector of virtual ranges
1847 Ranges vec = _ranges;
1848 unsigned int pageIndex = 0;
1849 IOByteCount mdOffset = 0;
1850 ppnum_t highestPage = 0;
1851 for (UInt range = 0; range < _rangesCount; range++) {
1852 ioPLBlock iopl;
1853 user_addr_t startPage;
1854 IOByteCount numBytes;
1855 ppnum_t highPage = 0;
1856
1857 // Get the startPage address and length of vec[range]
1858 getAddrLenForInd(startPage, numBytes, type, vec, range);
1859 iopl.fPageOffset = (short) startPage & PAGE_MASK;
1860 numBytes += iopl.fPageOffset;
1861 startPage = trunc_page_64(startPage);
1862
1863 if (mapper)
1864 iopl.fMappedBase = mapBase + pageIndex;
1865 else
1866 iopl.fMappedBase = 0;
1867
1868 // Iterate over the current range, creating UPLs
1869 while (numBytes) {
1870 dataP = getDataP(_memoryEntries);
1871 vm_address_t kernelStart = (vm_address_t) startPage;
1872 vm_map_t theMap;
1873 if (curMap)
1874 theMap = curMap;
1875 else if (!sharedMem) {
1876 assert(_task == kernel_task);
1877 theMap = IOPageableMapForAddress(kernelStart);
1878 }
1879 else
1880 theMap = NULL;
1881
1882 upl_page_info_array_t pageInfo = getPageList(dataP);
1883 int ioplFlags = uplFlags;
1884 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
1885
1886 vm_size_t ioplSize = round_page_32(numBytes);
1887 unsigned int numPageInfo = atop_32(ioplSize);
1888
1889 if (theMap == kernel_map && kernelStart < io_kernel_static_end) {
1890 error = io_get_kernel_static_upl(theMap,
1891 kernelStart,
1892 &ioplSize,
1893 &iopl.fIOPL,
1894 baseInfo,
1895 &numPageInfo,
1896 &highPage);
1897 }
1898 else if (sharedMem) {
1899 error = memory_object_iopl_request(sharedMem,
1900 ptoa_32(pageIndex),
1901 &ioplSize,
1902 &iopl.fIOPL,
1903 baseInfo,
1904 &numPageInfo,
1905 &ioplFlags);
1906 }
1907 else {
1908 assert(theMap);
1909 error = vm_map_create_upl(theMap,
1910 startPage,
1911 &ioplSize,
1912 &iopl.fIOPL,
1913 baseInfo,
1914 &numPageInfo,
1915 &ioplFlags);
1916 }
1917
1918 assert(ioplSize);
1919 if (error != KERN_SUCCESS)
1920 goto abortExit;
1921
1922 if (iopl.fIOPL)
1923 highPage = upl_get_highest_page(iopl.fIOPL);
1924 if (highPage > highestPage)
1925 highestPage = highPage;
1926
1927 error = kIOReturnCannotWire;
1928
1929 if (baseInfo->device) {
1930 numPageInfo = 1;
1931 iopl.fFlags = kIOPLOnDevice;
1932 // Don't translate device memory at all
1933 if (mapper && mapBase) {
1934 mapper->iovmFree(mapBase, _pages);
1935 mapBase = 0;
1936 iopl.fMappedBase = 0;
1937 }
1938 }
1939 else {
1940 iopl.fFlags = 0;
1941 if (mapper)
1942 mapper->iovmInsert(mapBase, pageIndex,
1943 baseInfo, numPageInfo);
1944 }
1945
1946 iopl.fIOMDOffset = mdOffset;
1947 iopl.fPageInfo = pageIndex;
1948
1949 if ((_flags & kIOMemoryAutoPrepare) && iopl.fIOPL)
1950 {
1951 upl_commit(iopl.fIOPL, 0, 0);
1952 upl_deallocate(iopl.fIOPL);
1953 iopl.fIOPL = 0;
1954 }
1955
1956 if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
1957 // Clean up partial created and unsaved iopl
1958 if (iopl.fIOPL) {
1959 upl_abort(iopl.fIOPL, 0);
1960 upl_deallocate(iopl.fIOPL);
1961 }
1962 goto abortExit;
1963 }
1964
1965 // Check for a multiple iopl's in one virtual range
1966 pageIndex += numPageInfo;
1967 mdOffset -= iopl.fPageOffset;
1968 if (ioplSize < numBytes) {
1969 numBytes -= ioplSize;
1970 startPage += ioplSize;
1971 mdOffset += ioplSize;
1972 iopl.fPageOffset = 0;
1973 if (mapper)
1974 iopl.fMappedBase = mapBase + pageIndex;
1975 }
1976 else {
1977 mdOffset += numBytes;
1978 break;
1979 }
1980 }
1981 }
1982
1983 _highestPage = highestPage;
1984
1985 return kIOReturnSuccess;
1986
1987 abortExit:
1988 {
1989 dataP = getDataP(_memoryEntries);
1990 UInt done = getNumIOPL(_memoryEntries, dataP);
1991 ioPLBlock *ioplList = getIOPLList(dataP);
1992
1993 for (UInt range = 0; range < done; range++)
1994 {
1995 if (ioplList[range].fIOPL) {
1996 upl_abort(ioplList[range].fIOPL, 0);
1997 upl_deallocate(ioplList[range].fIOPL);
1998 }
1999 }
2000 (void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); // == setLength()
2001
2002 if (mapper && mapBase)
2003 mapper->iovmFree(mapBase, _pages);
2004 }
2005
2006 if (error == KERN_FAILURE)
2007 error = kIOReturnCannotWire;
2008
2009 return error;
2010 }
2011
2012 /*
2013 * prepare
2014 *
2015 * Prepare the memory for an I/O transfer. This involves paging in
2016 * the memory, if necessary, and wiring it down for the duration of
2017 * the transfer. The complete() method completes the processing of
2018 * the memory after the I/O transfer finishes. This method needn't
2019 * called for non-pageable memory.
2020 */
2021 IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
2022 {
2023 IOReturn error = kIOReturnSuccess;
2024 IOOptionBits type = _flags & kIOMemoryTypeMask;
2025
2026 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
2027 return kIOReturnSuccess;
2028
2029 if (_prepareLock)
2030 IOLockLock(_prepareLock);
2031
2032 if (!_wireCount
2033 && (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) ) {
2034 error = wireVirtual(forDirection);
2035 }
2036
2037 if (kIOReturnSuccess == error)
2038 _wireCount++;
2039
2040 if (_prepareLock)
2041 IOLockUnlock(_prepareLock);
2042
2043 return error;
2044 }
2045
2046 /*
2047 * complete
2048 *
2049 * Complete processing of the memory after an I/O transfer finishes.
2050 * This method should not be called unless a prepare was previously
2051 * issued; the prepare() and complete() must occur in pairs, before
2052 * before and after an I/O transfer involving pageable memory.
2053 */
2054
2055 IOReturn IOGeneralMemoryDescriptor::complete(IODirection /* forDirection */)
2056 {
2057 IOOptionBits type = _flags & kIOMemoryTypeMask;
2058
2059 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
2060 return kIOReturnSuccess;
2061
2062 if (_prepareLock)
2063 IOLockLock(_prepareLock);
2064
2065 assert(_wireCount);
2066
2067 if (_wireCount)
2068 {
2069 _wireCount--;
2070 if (!_wireCount)
2071 {
2072 IOOptionBits type = _flags & kIOMemoryTypeMask;
2073 ioGMDData * dataP = getDataP(_memoryEntries);
2074 ioPLBlock *ioplList = getIOPLList(dataP);
2075 UInt count = getNumIOPL(_memoryEntries, dataP);
2076
2077 if (dataP->fMapper && _pages && ioplList[0].fMappedBase)
2078 dataP->fMapper->iovmFree(ioplList[0].fMappedBase, _pages);
2079
2080 // Only complete iopls that we created which are for TypeVirtual
2081 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
2082 for (UInt ind = 0; ind < count; ind++)
2083 if (ioplList[ind].fIOPL) {
2084 upl_commit(ioplList[ind].fIOPL, 0, 0);
2085 upl_deallocate(ioplList[ind].fIOPL);
2086 }
2087 }
2088 (void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); // == setLength()
2089 }
2090 }
2091
2092 if (_prepareLock)
2093 IOLockUnlock(_prepareLock);
2094
2095 return kIOReturnSuccess;
2096 }
2097
2098 IOReturn IOGeneralMemoryDescriptor::doMap(
2099 vm_map_t __addressMap,
2100 IOVirtualAddress * __address,
2101 IOOptionBits options,
2102 IOByteCount __offset,
2103 IOByteCount __length )
2104
2105 {
2106 if (!(kIOMap64Bit & options)) panic("IOGeneralMemoryDescriptor::doMap !64bit");
2107
2108 _IOMemoryMap * mapping = (_IOMemoryMap *) *__address;
2109 mach_vm_size_t offset = mapping->fOffset + __offset;
2110 mach_vm_size_t length = mapping->fLength;
2111
2112 kern_return_t kr;
2113 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
2114
2115 IOOptionBits type = _flags & kIOMemoryTypeMask;
2116 Ranges vec = _ranges;
2117
2118 user_addr_t range0Addr = 0;
2119 IOByteCount range0Len = 0;
2120
2121 if (vec.v)
2122 getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
2123
2124 // mapping source == dest? (could be much better)
2125 if( _task
2126 && (mapping->fAddressMap == get_task_map(_task)) && (options & kIOMapAnywhere)
2127 && (1 == _rangesCount) && (0 == offset)
2128 && range0Addr && (length <= range0Len) )
2129 {
2130 mapping->fAddress = range0Addr;
2131 mapping->fOptions |= kIOMapStatic;
2132
2133 return( kIOReturnSuccess );
2134 }
2135
2136 if( 0 == sharedMem) {
2137
2138 vm_size_t size = ptoa_32(_pages);
2139
2140 if( _task) {
2141
2142 memory_object_size_t actualSize = size;
2143 vm_prot_t prot = VM_PROT_READ;
2144 if (!(kIOMapReadOnly & options))
2145 prot |= VM_PROT_WRITE;
2146 else if (kIOMapDefaultCache != (options & kIOMapCacheMask))
2147 prot |= VM_PROT_WRITE;
2148
2149 kr = mach_make_memory_entry_64(get_task_map(_task),
2150 &actualSize, range0Addr,
2151 prot, &sharedMem,
2152 NULL );
2153
2154 if( (KERN_SUCCESS == kr) && (actualSize != round_page_32(size))) {
2155 #if IOASSERT
2156 IOLog("mach_make_memory_entry_64 (%08llx) size (%08lx:%08x)\n",
2157 range0Addr, (UInt32) actualSize, size);
2158 #endif
2159 kr = kIOReturnVMError;
2160 ipc_port_release_send( sharedMem );
2161 }
2162
2163 if( KERN_SUCCESS != kr)
2164 sharedMem = MACH_PORT_NULL;
2165
2166 } else do { // _task == 0, must be physical
2167
2168 memory_object_t pager;
2169 unsigned int flags = 0;
2170 addr64_t pa;
2171 IOPhysicalLength segLen;
2172
2173 pa = getPhysicalSegment64( offset, &segLen );
2174
2175 if( !reserved) {
2176 reserved = IONew( ExpansionData, 1 );
2177 if( !reserved)
2178 continue;
2179 }
2180 reserved->pagerContig = (1 == _rangesCount);
2181 reserved->memory = this;
2182
2183 /*What cache mode do we need*/
2184 switch(options & kIOMapCacheMask ) {
2185
2186 case kIOMapDefaultCache:
2187 default:
2188 flags = IODefaultCacheBits(pa);
2189 if (DEVICE_PAGER_CACHE_INHIB & flags)
2190 {
2191 if (DEVICE_PAGER_GUARDED & flags)
2192 mapping->fOptions |= kIOMapInhibitCache;
2193 else
2194 mapping->fOptions |= kIOMapWriteCombineCache;
2195 }
2196 else if (DEVICE_PAGER_WRITE_THROUGH & flags)
2197 mapping->fOptions |= kIOMapWriteThruCache;
2198 else
2199 mapping->fOptions |= kIOMapCopybackCache;
2200 break;
2201
2202 case kIOMapInhibitCache:
2203 flags = DEVICE_PAGER_CACHE_INHIB |
2204 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
2205 break;
2206
2207 case kIOMapWriteThruCache:
2208 flags = DEVICE_PAGER_WRITE_THROUGH |
2209 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
2210 break;
2211
2212 case kIOMapCopybackCache:
2213 flags = DEVICE_PAGER_COHERENT;
2214 break;
2215
2216 case kIOMapWriteCombineCache:
2217 flags = DEVICE_PAGER_CACHE_INHIB |
2218 DEVICE_PAGER_COHERENT;
2219 break;
2220 }
2221
2222 flags |= reserved->pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0;
2223
2224 pager = device_pager_setup( (memory_object_t) 0, (int) reserved,
2225 size, flags);
2226 assert( pager );
2227
2228 if( pager) {
2229 kr = mach_memory_object_memory_entry_64( (host_t) 1, false /*internal*/,
2230 size, VM_PROT_READ | VM_PROT_WRITE, pager, &sharedMem );
2231
2232 assert( KERN_SUCCESS == kr );
2233 if( KERN_SUCCESS != kr)
2234 {
2235 device_pager_deallocate( pager );
2236 pager = MACH_PORT_NULL;
2237 sharedMem = MACH_PORT_NULL;
2238 }
2239 }
2240 if( pager && sharedMem)
2241 reserved->devicePager = pager;
2242 else {
2243 IODelete( reserved, ExpansionData, 1 );
2244 reserved = 0;
2245 }
2246
2247 } while( false );
2248
2249 _memEntry = (void *) sharedMem;
2250 }
2251
2252 IOReturn result;
2253 if (0 == sharedMem)
2254 result = kIOReturnVMError;
2255 else
2256 result = super::doMap( __addressMap, __address,
2257 options, __offset, __length );
2258
2259 return( result );
2260 }
2261
2262 IOReturn IOGeneralMemoryDescriptor::doUnmap(
2263 vm_map_t addressMap,
2264 IOVirtualAddress __address,
2265 IOByteCount __length )
2266 {
2267 return (super::doUnmap(addressMap, __address, __length));
2268 }
2269
2270 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2271
2272 OSDefineMetaClassAndAbstractStructors( IOMemoryMap, OSObject )
2273
2274 /* inline function implementation */
2275 IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
2276 { return( getPhysicalSegment( 0, 0 )); }
2277
2278
2279 #undef super
2280 #define super IOMemoryMap
2281
2282 OSDefineMetaClassAndStructors(_IOMemoryMap, IOMemoryMap)
2283
2284 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2285
2286 bool _IOMemoryMap::init(
2287 task_t intoTask,
2288 mach_vm_address_t toAddress,
2289 IOOptionBits _options,
2290 mach_vm_size_t _offset,
2291 mach_vm_size_t _length )
2292 {
2293 if (!intoTask)
2294 return( false);
2295
2296 if (!super::init())
2297 return(false);
2298
2299 fAddressMap = get_task_map(intoTask);
2300 if (!fAddressMap)
2301 return(false);
2302 vm_map_reference(fAddressMap);
2303
2304 fAddressTask = intoTask;
2305 fOptions = _options;
2306 fLength = _length;
2307 fOffset = _offset;
2308 fAddress = toAddress;
2309
2310 return (true);
2311 }
2312
2313 bool _IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
2314 {
2315 if (!_memory)
2316 return(false);
2317
2318 if (!fSuperMap)
2319 {
2320 if( (_offset + fLength) > _memory->getLength())
2321 return( false);
2322 fOffset = _offset;
2323 }
2324
2325 _memory->retain();
2326 if (fMemory)
2327 {
2328 if (fMemory != _memory)
2329 fMemory->removeMapping(this);
2330 fMemory->release();
2331 }
2332 fMemory = _memory;
2333
2334 return( true );
2335 }
2336
2337 struct IOMemoryDescriptorMapAllocRef
2338 {
2339 ipc_port_t sharedMem;
2340 mach_vm_address_t mapped;
2341 mach_vm_size_t size;
2342 mach_vm_size_t sourceOffset;
2343 IOOptionBits options;
2344 };
2345
2346 static kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
2347 {
2348 IOMemoryDescriptorMapAllocRef * ref = (IOMemoryDescriptorMapAllocRef *)_ref;
2349 IOReturn err;
2350
2351 do {
2352 if( ref->sharedMem)
2353 {
2354 vm_prot_t prot = VM_PROT_READ
2355 | ((ref->options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE);
2356
2357 // VM system requires write access to change cache mode
2358 if (kIOMapDefaultCache != (ref->options & kIOMapCacheMask))
2359 prot |= VM_PROT_WRITE;
2360
2361 // set memory entry cache
2362 vm_prot_t memEntryCacheMode = prot | MAP_MEM_ONLY;
2363 switch (ref->options & kIOMapCacheMask)
2364 {
2365 case kIOMapInhibitCache:
2366 SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
2367 break;
2368
2369 case kIOMapWriteThruCache:
2370 SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
2371 break;
2372
2373 case kIOMapWriteCombineCache:
2374 SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
2375 break;
2376
2377 case kIOMapCopybackCache:
2378 SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
2379 break;
2380
2381 case kIOMapDefaultCache:
2382 default:
2383 SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
2384 break;
2385 }
2386
2387 vm_size_t unused = 0;
2388
2389 err = mach_make_memory_entry( NULL /*unused*/, &unused, 0 /*unused*/,
2390 memEntryCacheMode, NULL, ref->sharedMem );
2391 if (KERN_SUCCESS != err)
2392 IOLog("MAP_MEM_ONLY failed %d\n", err);
2393
2394 err = mach_vm_map( map,
2395 &ref->mapped,
2396 ref->size, 0 /* mask */,
2397 (( ref->options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
2398 | VM_MAKE_TAG(VM_MEMORY_IOKIT),
2399 ref->sharedMem, ref->sourceOffset,
2400 false, // copy
2401 prot, // cur
2402 prot, // max
2403 VM_INHERIT_NONE);
2404
2405 if( KERN_SUCCESS != err) {
2406 ref->mapped = 0;
2407 continue;
2408 }
2409
2410 }
2411 else
2412 {
2413 err = mach_vm_allocate( map, &ref->mapped, ref->size,
2414 ((ref->options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
2415 | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
2416 if( KERN_SUCCESS != err) {
2417 ref->mapped = 0;
2418 continue;
2419 }
2420 // we have to make sure that these guys don't get copied if we fork.
2421 err = vm_inherit( map, ref->mapped, ref->size, VM_INHERIT_NONE);
2422 assert( KERN_SUCCESS == err );
2423 }
2424 }
2425 while( false );
2426
2427 return( err );
2428 }
2429
2430 kern_return_t
2431 IOMemoryDescriptorMapMemEntry(vm_map_t map, ipc_port_t entry, IOOptionBits options, bool pageable,
2432 mach_vm_size_t offset,
2433 mach_vm_address_t * address, mach_vm_size_t length)
2434 {
2435 IOReturn err;
2436 IOMemoryDescriptorMapAllocRef ref;
2437
2438 ref.sharedMem = entry;
2439 ref.sourceOffset = trunc_page_64(offset);
2440 ref.options = options;
2441
2442 ref.size = length;
2443
2444 if (options & kIOMapAnywhere)
2445 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
2446 ref.mapped = 0;
2447 else
2448 ref.mapped = *address;
2449
2450 if( ref.sharedMem && (map == kernel_map) && pageable)
2451 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
2452 else
2453 err = IOMemoryDescriptorMapAlloc( map, &ref );
2454
2455 *address = ref.mapped;
2456 return (err);
2457 }
2458
2459
2460 IOReturn IOMemoryDescriptor::doMap(
2461 vm_map_t __addressMap,
2462 IOVirtualAddress * __address,
2463 IOOptionBits options,
2464 IOByteCount __offset,
2465 IOByteCount __length )
2466 {
2467 if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::doMap !64bit");
2468
2469 _IOMemoryMap * mapping = (_IOMemoryMap *) *__address;
2470 mach_vm_size_t offset = mapping->fOffset + __offset;
2471 mach_vm_size_t length = mapping->fLength;
2472
2473 IOReturn err = kIOReturnSuccess;
2474 memory_object_t pager;
2475 mach_vm_size_t pageOffset;
2476 IOPhysicalAddress sourceAddr;
2477
2478 do
2479 {
2480 sourceAddr = getSourceSegment( offset, NULL );
2481 pageOffset = sourceAddr - trunc_page_32( sourceAddr );
2482
2483 if( reserved)
2484 pager = (memory_object_t) reserved->devicePager;
2485 else
2486 pager = MACH_PORT_NULL;
2487
2488 if ((kIOMapReference|kIOMapUnique) == ((kIOMapReference|kIOMapUnique) & options))
2489 {
2490 upl_t redirUPL2;
2491 vm_size_t size;
2492 int flags;
2493
2494 if (!_memEntry)
2495 {
2496 err = kIOReturnNotReadable;
2497 continue;
2498 }
2499
2500 size = mapping->fLength + pageOffset;
2501 flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
2502 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
2503
2504 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) _memEntry, 0, &size, &redirUPL2,
2505 NULL, NULL,
2506 &flags))
2507 redirUPL2 = NULL;
2508
2509 err = upl_transpose(redirUPL2, mapping->fRedirUPL);
2510 if (kIOReturnSuccess != err)
2511 {
2512 IOLog("upl_transpose(%x)\n", err);
2513 err = kIOReturnSuccess;
2514 }
2515
2516 if (redirUPL2)
2517 {
2518 upl_commit(redirUPL2, NULL, 0);
2519 upl_deallocate(redirUPL2);
2520 redirUPL2 = 0;
2521 }
2522 {
2523 // swap the memEntries since they now refer to different vm_objects
2524 void * me = _memEntry;
2525 _memEntry = mapping->fMemory->_memEntry;
2526 mapping->fMemory->_memEntry = me;
2527 }
2528 if (pager)
2529 err = handleFault( reserved->devicePager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
2530 }
2531 else
2532 {
2533 mach_vm_address_t address;
2534
2535 if (!(options & kIOMapAnywhere))
2536 {
2537 address = trunc_page_64(mapping->fAddress);
2538 if( (mapping->fAddress - address) != pageOffset)
2539 {
2540 err = kIOReturnVMError;
2541 continue;
2542 }
2543 }
2544
2545 err = IOMemoryDescriptorMapMemEntry(mapping->fAddressMap, (ipc_port_t) _memEntry,
2546 options, (kIOMemoryBufferPageable & _flags),
2547 offset, &address, round_page_64(length + pageOffset));
2548 if( err != KERN_SUCCESS)
2549 continue;
2550
2551 if (!_memEntry || pager)
2552 {
2553 err = handleFault( pager, mapping->fAddressMap, address, offset, length, options );
2554 if (err != KERN_SUCCESS)
2555 doUnmap( mapping->fAddressMap, (IOVirtualAddress) mapping, 0 );
2556 }
2557
2558 #ifdef DEBUG
2559 if (kIOLogMapping & gIOKitDebug)
2560 IOLog("mapping(%x) desc %p @ %lx, map %p, address %qx, offset %qx, length %qx\n",
2561 err, this, sourceAddr, mapping, address, offset, length);
2562 #endif
2563
2564 if (err == KERN_SUCCESS)
2565 mapping->fAddress = address + pageOffset;
2566 else
2567 mapping->fAddress = NULL;
2568 }
2569 }
2570 while( false );
2571
2572 return (err);
2573 }
2574
2575 IOReturn IOMemoryDescriptor::handleFault(
2576 void * _pager,
2577 vm_map_t addressMap,
2578 mach_vm_address_t address,
2579 mach_vm_size_t sourceOffset,
2580 mach_vm_size_t length,
2581 IOOptionBits options )
2582 {
2583 IOReturn err = kIOReturnSuccess;
2584 memory_object_t pager = (memory_object_t) _pager;
2585 mach_vm_size_t size;
2586 mach_vm_size_t bytes;
2587 mach_vm_size_t page;
2588 mach_vm_size_t pageOffset;
2589 mach_vm_size_t pagerOffset;
2590 IOPhysicalLength segLen;
2591 addr64_t physAddr;
2592
2593 if( !addressMap)
2594 {
2595 if( kIOMemoryRedirected & _flags)
2596 {
2597 #ifdef DEBUG
2598 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
2599 #endif
2600 do {
2601 SLEEP;
2602 } while( kIOMemoryRedirected & _flags );
2603 }
2604
2605 return( kIOReturnSuccess );
2606 }
2607
2608 physAddr = getPhysicalSegment64( sourceOffset, &segLen );
2609 assert( physAddr );
2610 pageOffset = physAddr - trunc_page_64( physAddr );
2611 pagerOffset = sourceOffset;
2612
2613 size = length + pageOffset;
2614 physAddr -= pageOffset;
2615
2616 segLen += pageOffset;
2617 bytes = size;
2618 do
2619 {
2620 // in the middle of the loop only map whole pages
2621 if( segLen >= bytes)
2622 segLen = bytes;
2623 else if( segLen != trunc_page_32( segLen))
2624 err = kIOReturnVMError;
2625 if( physAddr != trunc_page_64( physAddr))
2626 err = kIOReturnBadArgument;
2627 if (kIOReturnSuccess != err)
2628 break;
2629
2630 #ifdef DEBUG
2631 if( kIOLogMapping & gIOKitDebug)
2632 IOLog("_IOMemoryMap::map(%p) 0x%qx->0x%qx:0x%qx\n",
2633 addressMap, address + pageOffset, physAddr + pageOffset,
2634 segLen - pageOffset);
2635 #endif
2636
2637
2638 if( pager) {
2639 if( reserved && reserved->pagerContig) {
2640 IOPhysicalLength allLen;
2641 addr64_t allPhys;
2642
2643 allPhys = getPhysicalSegment64( 0, &allLen );
2644 assert( allPhys );
2645 err = device_pager_populate_object( pager, 0, atop_64(allPhys), round_page_32(allLen) );
2646 }
2647 else
2648 {
2649
2650 for( page = 0;
2651 (page < segLen) && (KERN_SUCCESS == err);
2652 page += page_size)
2653 {
2654 err = device_pager_populate_object(pager, pagerOffset,
2655 (ppnum_t)(atop_64(physAddr + page)), page_size);
2656 pagerOffset += page_size;
2657 }
2658 }
2659 assert( KERN_SUCCESS == err );
2660 if( err)
2661 break;
2662 }
2663
2664 // This call to vm_fault causes an early pmap level resolution
2665 // of the mappings created above for kernel mappings, since
2666 // faulting in later can't take place from interrupt level.
2667 /* *** ALERT *** */
2668 /* *** Temporary Workaround *** */
2669
2670 if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags))
2671 {
2672 vm_fault(addressMap,
2673 (vm_map_offset_t)address,
2674 VM_PROT_READ|VM_PROT_WRITE,
2675 FALSE, THREAD_UNINT, NULL,
2676 (vm_map_offset_t)0);
2677 }
2678
2679 /* *** Temporary Workaround *** */
2680 /* *** ALERT *** */
2681
2682 sourceOffset += segLen - pageOffset;
2683 address += segLen;
2684 bytes -= segLen;
2685 pageOffset = 0;
2686
2687 }
2688 while (bytes && (physAddr = getPhysicalSegment64( sourceOffset, &segLen )));
2689
2690 if (bytes)
2691 err = kIOReturnBadArgument;
2692
2693 return (err);
2694 }
2695
2696 IOReturn IOMemoryDescriptor::doUnmap(
2697 vm_map_t addressMap,
2698 IOVirtualAddress __address,
2699 IOByteCount __length )
2700 {
2701 IOReturn err;
2702 mach_vm_address_t address;
2703 mach_vm_size_t length;
2704
2705 if (__length)
2706 {
2707 address = __address;
2708 length = __length;
2709 }
2710 else
2711 {
2712 addressMap = ((_IOMemoryMap *) __address)->fAddressMap;
2713 address = ((_IOMemoryMap *) __address)->fAddress;
2714 length = ((_IOMemoryMap *) __address)->fLength;
2715 }
2716
2717 if( _memEntry && (addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags))
2718 addressMap = IOPageableMapForAddress( address );
2719
2720 #ifdef DEBUG
2721 if( kIOLogMapping & gIOKitDebug)
2722 IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
2723 addressMap, address, length );
2724 #endif
2725
2726 err = mach_vm_deallocate( addressMap, address, length );
2727
2728 return (err);
2729 }
2730
2731 IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
2732 {
2733 IOReturn err = kIOReturnSuccess;
2734 _IOMemoryMap * mapping = 0;
2735 OSIterator * iter;
2736
2737 LOCK;
2738
2739 if( doRedirect)
2740 _flags |= kIOMemoryRedirected;
2741 else
2742 _flags &= ~kIOMemoryRedirected;
2743
2744 do {
2745 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
2746 while( (mapping = (_IOMemoryMap *) iter->getNextObject()))
2747 mapping->redirect( safeTask, doRedirect );
2748
2749 iter->release();
2750 }
2751 } while( false );
2752
2753 if (!doRedirect)
2754 {
2755 WAKEUP;
2756 }
2757
2758 UNLOCK;
2759
2760 // temporary binary compatibility
2761 IOSubMemoryDescriptor * subMem;
2762 if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
2763 err = subMem->redirect( safeTask, doRedirect );
2764 else
2765 err = kIOReturnSuccess;
2766
2767 return( err );
2768 }
2769
2770 IOReturn IOSubMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
2771 {
2772 return( _parent->redirect( safeTask, doRedirect ));
2773 }
2774
2775 IOReturn _IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
2776 {
2777 IOReturn err = kIOReturnSuccess;
2778
2779 if( fSuperMap) {
2780 // err = ((_IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
2781 } else {
2782
2783 LOCK;
2784
2785 do
2786 {
2787 if (!fAddress)
2788 break;
2789 if (!fAddressMap)
2790 break;
2791
2792 if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
2793 && (0 == (fOptions & kIOMapStatic)))
2794 {
2795 IOUnmapPages( fAddressMap, fAddress, fLength );
2796 if(!doRedirect && safeTask
2797 && (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
2798 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)))
2799 {
2800 IOVirtualAddress iova = (IOVirtualAddress) this;
2801 err = mach_vm_deallocate( fAddressMap, fAddress, fLength );
2802 err = fMemory->doMap( fAddressMap, &iova,
2803 (fOptions & ~kIOMapAnywhere) | kIOMap64Bit/*| kIOMapReserve*/,
2804 0, 0 );
2805 } else
2806 err = kIOReturnSuccess;
2807 #ifdef DEBUG
2808 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
2809 #endif
2810 }
2811 else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask))
2812 {
2813 IOOptionBits newMode;
2814 newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
2815 IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
2816 }
2817 }
2818 while (false);
2819 UNLOCK;
2820 }
2821
2822 if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
2823 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
2824 && safeTask
2825 && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected))))
2826 fMemory->redirect(safeTask, doRedirect);
2827
2828 return( err );
2829 }
2830
2831 IOReturn _IOMemoryMap::unmap( void )
2832 {
2833 IOReturn err;
2834
2835 LOCK;
2836
2837 if( fAddress && fAddressMap && (0 == fSuperMap) && fMemory
2838 && (0 == (fOptions & kIOMapStatic))) {
2839
2840 err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
2841
2842 } else
2843 err = kIOReturnSuccess;
2844
2845 if (fAddressMap)
2846 {
2847 vm_map_deallocate(fAddressMap);
2848 fAddressMap = 0;
2849 }
2850
2851 fAddress = 0;
2852
2853 UNLOCK;
2854
2855 return( err );
2856 }
2857
2858 void _IOMemoryMap::taskDied( void )
2859 {
2860 LOCK;
2861 if( fAddressMap) {
2862 vm_map_deallocate(fAddressMap);
2863 fAddressMap = 0;
2864 }
2865 fAddressTask = 0;
2866 fAddress = 0;
2867 UNLOCK;
2868 }
2869
2870 // Overload the release mechanism. All mappings must be a member
2871 // of a memory descriptors _mappings set. This means that we
2872 // always have 2 references on a mapping. When either of these mappings
2873 // are released we need to free ourselves.
2874 void _IOMemoryMap::taggedRelease(const void *tag) const
2875 {
2876 LOCK;
2877 super::taggedRelease(tag, 2);
2878 UNLOCK;
2879 }
2880
2881 void _IOMemoryMap::free()
2882 {
2883 unmap();
2884
2885 if (fMemory)
2886 {
2887 LOCK;
2888 fMemory->removeMapping(this);
2889 UNLOCK;
2890 fMemory->release();
2891 }
2892
2893 if (fOwner && (fOwner != fMemory))
2894 {
2895 LOCK;
2896 fOwner->removeMapping(this);
2897 UNLOCK;
2898 }
2899
2900 if (fSuperMap)
2901 fSuperMap->release();
2902
2903 if (fRedirUPL) {
2904 upl_commit(fRedirUPL, NULL, 0);
2905 upl_deallocate(fRedirUPL);
2906 }
2907
2908 super::free();
2909 }
2910
2911 IOByteCount _IOMemoryMap::getLength()
2912 {
2913 return( fLength );
2914 }
2915
2916 IOVirtualAddress _IOMemoryMap::getVirtualAddress()
2917 {
2918 if (fSuperMap)
2919 fSuperMap->getVirtualAddress();
2920 else if (fAddressMap && vm_map_is_64bit(fAddressMap))
2921 {
2922 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
2923 }
2924
2925 return (fAddress);
2926 }
2927
2928 mach_vm_address_t _IOMemoryMap::getAddress()
2929 {
2930 return( fAddress);
2931 }
2932
2933 mach_vm_size_t _IOMemoryMap::getSize()
2934 {
2935 return( fLength );
2936 }
2937
2938
2939 task_t _IOMemoryMap::getAddressTask()
2940 {
2941 if( fSuperMap)
2942 return( fSuperMap->getAddressTask());
2943 else
2944 return( fAddressTask);
2945 }
2946
2947 IOOptionBits _IOMemoryMap::getMapOptions()
2948 {
2949 return( fOptions);
2950 }
2951
2952 IOMemoryDescriptor * _IOMemoryMap::getMemoryDescriptor()
2953 {
2954 return( fMemory );
2955 }
2956
2957 _IOMemoryMap * _IOMemoryMap::copyCompatible(
2958 _IOMemoryMap * newMapping )
2959 {
2960 task_t task = newMapping->getAddressTask();
2961 mach_vm_address_t toAddress = newMapping->fAddress;
2962 IOOptionBits _options = newMapping->fOptions;
2963 mach_vm_size_t _offset = newMapping->fOffset;
2964 mach_vm_size_t _length = newMapping->fLength;
2965
2966 if( (!task) || (!fAddressMap) || (fAddressMap != get_task_map(task)))
2967 return( 0 );
2968 if( (fOptions ^ _options) & kIOMapReadOnly)
2969 return( 0 );
2970 if( (kIOMapDefaultCache != (_options & kIOMapCacheMask))
2971 && ((fOptions ^ _options) & kIOMapCacheMask))
2972 return( 0 );
2973
2974 if( (0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress))
2975 return( 0 );
2976
2977 if( _offset < fOffset)
2978 return( 0 );
2979
2980 _offset -= fOffset;
2981
2982 if( (_offset + _length) > fLength)
2983 return( 0 );
2984
2985 retain();
2986 if( (fLength == _length) && (!_offset))
2987 {
2988 newMapping->release();
2989 newMapping = this;
2990 }
2991 else
2992 {
2993 newMapping->fSuperMap = this;
2994 newMapping->fOffset = _offset;
2995 newMapping->fAddress = fAddress + _offset;
2996 }
2997
2998 return( newMapping );
2999 }
3000
3001 IOPhysicalAddress
3002 _IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
3003 {
3004 IOPhysicalAddress address;
3005
3006 LOCK;
3007 address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
3008 UNLOCK;
3009
3010 return( address );
3011 }
3012
3013 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3014
3015 #undef super
3016 #define super OSObject
3017
3018 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3019
3020 void IOMemoryDescriptor::initialize( void )
3021 {
3022 if( 0 == gIOMemoryLock)
3023 gIOMemoryLock = IORecursiveLockAlloc();
3024
3025 IORegistryEntry::getRegistryRoot()->setProperty(kIOMaximumMappedIOByteCountKey,
3026 ptoa_64(gIOMaximumMappedIOPageCount), 64);
3027 if (!gIOCopyMapper)
3028 {
3029 IOMapper *
3030 mapper = new IOCopyMapper;
3031 if (mapper)
3032 {
3033 if (mapper->init() && mapper->start(NULL))
3034 gIOCopyMapper = (IOCopyMapper *) mapper;
3035 else
3036 mapper->release();
3037 }
3038 }
3039
3040 gIOLastPage = IOGetLastPageNumber();
3041 }
3042
3043 void IOMemoryDescriptor::free( void )
3044 {
3045 if( _mappings)
3046 _mappings->release();
3047
3048 super::free();
3049 }
3050
3051 IOMemoryMap * IOMemoryDescriptor::setMapping(
3052 task_t intoTask,
3053 IOVirtualAddress mapAddress,
3054 IOOptionBits options )
3055 {
3056 return (createMappingInTask( intoTask, mapAddress,
3057 options | kIOMapStatic,
3058 0, getLength() ));
3059 }
3060
3061 IOMemoryMap * IOMemoryDescriptor::map(
3062 IOOptionBits options )
3063 {
3064 return (createMappingInTask( kernel_task, 0,
3065 options | kIOMapAnywhere,
3066 0, getLength() ));
3067 }
3068
3069 IOMemoryMap * IOMemoryDescriptor::map(
3070 task_t intoTask,
3071 IOVirtualAddress atAddress,
3072 IOOptionBits options,
3073 IOByteCount offset,
3074 IOByteCount length )
3075 {
3076 if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask)))
3077 {
3078 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
3079 return (0);
3080 }
3081
3082 return (createMappingInTask(intoTask, atAddress,
3083 options, offset, length));
3084 }
3085
3086 IOMemoryMap * IOMemoryDescriptor::createMappingInTask(
3087 task_t intoTask,
3088 mach_vm_address_t atAddress,
3089 IOOptionBits options,
3090 mach_vm_size_t offset,
3091 mach_vm_size_t length)
3092 {
3093 IOMemoryMap * result;
3094 _IOMemoryMap * mapping;
3095
3096 if (0 == length)
3097 length = getLength();
3098
3099 mapping = new _IOMemoryMap;
3100
3101 if( mapping
3102 && !mapping->init( intoTask, atAddress,
3103 options, offset, length )) {
3104 mapping->release();
3105 mapping = 0;
3106 }
3107
3108 if (mapping)
3109 result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
3110 else
3111 result = 0;
3112
3113 #ifdef DEBUG
3114 if (!result)
3115 IOLog("createMappingInTask failed desc %p, addr %qx, options %lx, offset %qx, length %qx\n",
3116 this, atAddress, options, offset, length);
3117 #endif
3118
3119 return (result);
3120 }
3121
3122 IOReturn _IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
3123 IOOptionBits options,
3124 IOByteCount offset)
3125 {
3126 return (redirect(newBackingMemory, options, (mach_vm_size_t)offset));
3127 }
3128
3129 IOReturn _IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
3130 IOOptionBits options,
3131 mach_vm_size_t offset)
3132 {
3133 IOReturn err = kIOReturnSuccess;
3134 IOMemoryDescriptor * physMem = 0;
3135
3136 LOCK;
3137
3138 if (fAddress && fAddressMap) do
3139 {
3140 if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3141 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3142 {
3143 physMem = fMemory;
3144 physMem->retain();
3145 }
3146
3147 if (!fRedirUPL)
3148 {
3149 vm_size_t size = fLength;
3150 int flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
3151 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
3152 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) fMemory->_memEntry, 0, &size, &fRedirUPL,
3153 NULL, NULL,
3154 &flags))
3155 fRedirUPL = 0;
3156
3157 if (physMem)
3158 {
3159 IOUnmapPages( fAddressMap, fAddress, fLength );
3160 physMem->redirect(0, true);
3161 }
3162 }
3163
3164 if (newBackingMemory)
3165 {
3166 if (newBackingMemory != fMemory)
3167 {
3168 fOffset = 0;
3169 if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
3170 options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
3171 offset, fLength))
3172 err = kIOReturnError;
3173 }
3174 if (fRedirUPL)
3175 {
3176 upl_commit(fRedirUPL, NULL, 0);
3177 upl_deallocate(fRedirUPL);
3178 fRedirUPL = 0;
3179 }
3180 if (physMem)
3181 physMem->redirect(0, false);
3182 }
3183 }
3184 while (false);
3185
3186 UNLOCK;
3187
3188 if (physMem)
3189 physMem->release();
3190
3191 return (err);
3192 }
3193
3194 IOMemoryMap * IOMemoryDescriptor::makeMapping(
3195 IOMemoryDescriptor * owner,
3196 task_t __intoTask,
3197 IOVirtualAddress __address,
3198 IOOptionBits options,
3199 IOByteCount __offset,
3200 IOByteCount __length )
3201 {
3202 if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::makeMapping !64bit");
3203
3204 IOMemoryDescriptor * mapDesc = 0;
3205 _IOMemoryMap * result = 0;
3206 OSIterator * iter;
3207
3208 _IOMemoryMap * mapping = (_IOMemoryMap *) __address;
3209 mach_vm_size_t offset = mapping->fOffset + __offset;
3210 mach_vm_size_t length = mapping->fLength;
3211
3212 mapping->fOffset = offset;
3213
3214 LOCK;
3215
3216 do
3217 {
3218 if (kIOMapStatic & options)
3219 {
3220 result = mapping;
3221 addMapping(mapping);
3222 mapping->setMemoryDescriptor(this, 0);
3223 continue;
3224 }
3225
3226 if (kIOMapUnique & options)
3227 {
3228 IOPhysicalAddress phys;
3229 IOByteCount physLen;
3230
3231 // if (owner != this) continue;
3232
3233 if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3234 || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3235 {
3236 phys = getPhysicalSegment(offset, &physLen);
3237 if (!phys || (physLen < length))
3238 continue;
3239
3240 mapDesc = IOMemoryDescriptor::withPhysicalAddress(
3241 phys, length, _direction);
3242 if (!mapDesc)
3243 continue;
3244 offset = 0;
3245 mapping->fOffset = offset;
3246 }
3247 }
3248 else
3249 {
3250 // look for a compatible existing mapping
3251 if( (iter = OSCollectionIterator::withCollection(_mappings)))
3252 {
3253 _IOMemoryMap * lookMapping;
3254 while ((lookMapping = (_IOMemoryMap *) iter->getNextObject()))
3255 {
3256 if ((result = lookMapping->copyCompatible(mapping)))
3257 {
3258 addMapping(result);
3259 result->setMemoryDescriptor(this, offset);
3260 break;
3261 }
3262 }
3263 iter->release();
3264 }
3265 if (result || (options & kIOMapReference))
3266 continue;
3267 }
3268
3269 if (!mapDesc)
3270 {
3271 mapDesc = this;
3272 mapDesc->retain();
3273 }
3274 IOReturn
3275 kr = mapDesc->doMap( 0, (IOVirtualAddress *) &mapping, options, 0, 0 );
3276 if (kIOReturnSuccess == kr)
3277 {
3278 result = mapping;
3279 mapDesc->addMapping(result);
3280 result->setMemoryDescriptor(mapDesc, offset);
3281 }
3282 else
3283 {
3284 mapping->release();
3285 mapping = NULL;
3286 }
3287 }
3288 while( false );
3289
3290 UNLOCK;
3291
3292 if (mapDesc)
3293 mapDesc->release();
3294
3295 return (result);
3296 }
3297
3298 void IOMemoryDescriptor::addMapping(
3299 IOMemoryMap * mapping )
3300 {
3301 if( mapping)
3302 {
3303 if( 0 == _mappings)
3304 _mappings = OSSet::withCapacity(1);
3305 if( _mappings )
3306 _mappings->setObject( mapping );
3307 }
3308 }
3309
3310 void IOMemoryDescriptor::removeMapping(
3311 IOMemoryMap * mapping )
3312 {
3313 if( _mappings)
3314 _mappings->removeObject( mapping);
3315 }
3316
3317 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3318
3319 #undef super
3320 #define super IOMemoryDescriptor
3321
3322 OSDefineMetaClassAndStructors(IOSubMemoryDescriptor, IOMemoryDescriptor)
3323
3324 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3325
3326 bool IOSubMemoryDescriptor::initSubRange( IOMemoryDescriptor * parent,
3327 IOByteCount offset, IOByteCount length,
3328 IODirection direction )
3329 {
3330 if( !parent)
3331 return( false);
3332
3333 if( (offset + length) > parent->getLength())
3334 return( false);
3335
3336 /*
3337 * We can check the _parent instance variable before having ever set it
3338 * to an initial value because I/O Kit guarantees that all our instance
3339 * variables are zeroed on an object's allocation.
3340 */
3341
3342 if( !_parent) {
3343 if( !super::init())
3344 return( false );
3345 } else {
3346 /*
3347 * An existing memory descriptor is being retargeted to
3348 * point to somewhere else. Clean up our present state.
3349 */
3350
3351 _parent->release();
3352 _parent = 0;
3353 }
3354
3355 parent->retain();
3356 _parent = parent;
3357 _start = offset;
3358 _length = length;
3359 _direction = direction;
3360 _tag = parent->getTag();
3361
3362 return( true );
3363 }
3364
3365 void IOSubMemoryDescriptor::free( void )
3366 {
3367 if( _parent)
3368 _parent->release();
3369
3370 super::free();
3371 }
3372
3373
3374 IOReturn
3375 IOSubMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
3376 {
3377 IOReturn rtn;
3378
3379 if (kIOMDGetCharacteristics == op) {
3380
3381 rtn = _parent->dmaCommandOperation(op, vData, dataSize);
3382 if (kIOReturnSuccess == rtn) {
3383 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3384 data->fLength = _length;
3385 data->fSGCount = 0; // XXX gvdl: need to compute and pages
3386 data->fPages = 0;
3387 data->fPageAlign = 0;
3388 }
3389
3390 return rtn;
3391 }
3392 else if (kIOMDWalkSegments & op) {
3393 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs))
3394 return kIOReturnUnderrun;
3395
3396 IOMDDMAWalkSegmentArgs *data =
3397 reinterpret_cast<IOMDDMAWalkSegmentArgs *>(vData);
3398 UInt offset = data->fOffset;
3399 UInt remain = _length - offset;
3400 if ((int) remain <= 0)
3401 return (!remain)? kIOReturnOverrun : kIOReturnInternalError;
3402
3403 data->fOffset = offset + _start;
3404 rtn = _parent->dmaCommandOperation(op, vData, dataSize);
3405 if (data->fLength > remain)
3406 data->fLength = remain;
3407 data->fOffset = offset;
3408
3409 return rtn;
3410 }
3411 else
3412 return kIOReturnBadArgument;
3413 }
3414
3415 addr64_t
3416 IOSubMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount * length)
3417 {
3418 addr64_t address;
3419 IOByteCount actualLength;
3420
3421 assert(offset <= _length);
3422
3423 if( length)
3424 *length = 0;
3425
3426 if( offset >= _length)
3427 return( 0 );
3428
3429 address = _parent->getPhysicalSegment64( offset + _start, &actualLength );
3430
3431 if( address && length)
3432 *length = min( _length - offset, actualLength );
3433
3434 return( address );
3435 }
3436
3437 IOPhysicalAddress
3438 IOSubMemoryDescriptor::getPhysicalSegment( IOByteCount offset, IOByteCount * length )
3439 {
3440 IOPhysicalAddress address;
3441 IOByteCount actualLength;
3442
3443 assert(offset <= _length);
3444
3445 if( length)
3446 *length = 0;
3447
3448 if( offset >= _length)
3449 return( 0 );
3450
3451 address = _parent->getPhysicalSegment( offset + _start, &actualLength );
3452
3453 if( address && length)
3454 *length = min( _length - offset, actualLength );
3455
3456 return( address );
3457 }
3458
3459 IOPhysicalAddress
3460 IOSubMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
3461 {
3462 IOPhysicalAddress address;
3463 IOByteCount actualLength;
3464
3465 assert(offset <= _length);
3466
3467 if( length)
3468 *length = 0;
3469
3470 if( offset >= _length)
3471 return( 0 );
3472
3473 address = _parent->getSourceSegment( offset + _start, &actualLength );
3474
3475 if( address && length)
3476 *length = min( _length - offset, actualLength );
3477
3478 return( address );
3479 }
3480
3481 void * IOSubMemoryDescriptor::getVirtualSegment(IOByteCount offset,
3482 IOByteCount * lengthOfSegment)
3483 {
3484 return( 0 );
3485 }
3486
3487 IOReturn IOSubMemoryDescriptor::doMap(
3488 vm_map_t addressMap,
3489 IOVirtualAddress * atAddress,
3490 IOOptionBits options,
3491 IOByteCount sourceOffset,
3492 IOByteCount length )
3493 {
3494 panic("IOSubMemoryDescriptor::doMap");
3495 return (IOMemoryDescriptor::doMap(addressMap, atAddress, options, sourceOffset, length));
3496 }
3497
3498 IOByteCount IOSubMemoryDescriptor::readBytes(IOByteCount offset,
3499 void * bytes, IOByteCount length)
3500 {
3501 IOByteCount byteCount;
3502
3503 assert(offset <= _length);
3504
3505 if( offset >= _length)
3506 return( 0 );
3507
3508 LOCK;
3509 byteCount = _parent->readBytes( _start + offset, bytes,
3510 min(length, _length - offset) );
3511 UNLOCK;
3512
3513 return( byteCount );
3514 }
3515
3516 IOByteCount IOSubMemoryDescriptor::writeBytes(IOByteCount offset,
3517 const void* bytes, IOByteCount length)
3518 {
3519 IOByteCount byteCount;
3520
3521 assert(offset <= _length);
3522
3523 if( offset >= _length)
3524 return( 0 );
3525
3526 LOCK;
3527 byteCount = _parent->writeBytes( _start + offset, bytes,
3528 min(length, _length - offset) );
3529 UNLOCK;
3530
3531 return( byteCount );
3532 }
3533
3534 IOReturn IOSubMemoryDescriptor::setPurgeable( IOOptionBits newState,
3535 IOOptionBits * oldState )
3536 {
3537 IOReturn err;
3538
3539 LOCK;
3540 err = _parent->setPurgeable( newState, oldState );
3541 UNLOCK;
3542
3543 return( err );
3544 }
3545
3546 IOReturn IOSubMemoryDescriptor::performOperation( IOOptionBits options,
3547 IOByteCount offset, IOByteCount length )
3548 {
3549 IOReturn err;
3550
3551 assert(offset <= _length);
3552
3553 if( offset >= _length)
3554 return( kIOReturnOverrun );
3555
3556 LOCK;
3557 err = _parent->performOperation( options, _start + offset,
3558 min(length, _length - offset) );
3559 UNLOCK;
3560
3561 return( err );
3562 }
3563
3564 IOReturn IOSubMemoryDescriptor::prepare(
3565 IODirection forDirection)
3566 {
3567 IOReturn err;
3568
3569 LOCK;
3570 err = _parent->prepare( forDirection);
3571 UNLOCK;
3572
3573 return( err );
3574 }
3575
3576 IOReturn IOSubMemoryDescriptor::complete(
3577 IODirection forDirection)
3578 {
3579 IOReturn err;
3580
3581 LOCK;
3582 err = _parent->complete( forDirection);
3583 UNLOCK;
3584
3585 return( err );
3586 }
3587
3588 IOMemoryMap * IOSubMemoryDescriptor::makeMapping(
3589 IOMemoryDescriptor * owner,
3590 task_t intoTask,
3591 IOVirtualAddress address,
3592 IOOptionBits options,
3593 IOByteCount offset,
3594 IOByteCount length )
3595 {
3596 IOMemoryMap * mapping = 0;
3597
3598 if (!(kIOMap64Bit & options))
3599 {
3600 panic("IOSubMemoryDescriptor::makeMapping !64bit");
3601 }
3602
3603 mapping = (IOMemoryMap *) _parent->makeMapping(
3604 owner,
3605 intoTask,
3606 address,
3607 options, _start + offset, length );
3608
3609 return( mapping );
3610 }
3611
3612 /* ick */
3613
3614 bool
3615 IOSubMemoryDescriptor::initWithAddress(void * address,
3616 IOByteCount length,
3617 IODirection direction)
3618 {
3619 return( false );
3620 }
3621
3622 bool
3623 IOSubMemoryDescriptor::initWithAddress(vm_address_t address,
3624 IOByteCount length,
3625 IODirection direction,
3626 task_t task)
3627 {
3628 return( false );
3629 }
3630
3631 bool
3632 IOSubMemoryDescriptor::initWithPhysicalAddress(
3633 IOPhysicalAddress address,
3634 IOByteCount length,
3635 IODirection direction )
3636 {
3637 return( false );
3638 }
3639
3640 bool
3641 IOSubMemoryDescriptor::initWithRanges(
3642 IOVirtualRange * ranges,
3643 UInt32 withCount,
3644 IODirection direction,
3645 task_t task,
3646 bool asReference)
3647 {
3648 return( false );
3649 }
3650
3651 bool
3652 IOSubMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
3653 UInt32 withCount,
3654 IODirection direction,
3655 bool asReference)
3656 {
3657 return( false );
3658 }
3659
3660 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3661
3662 bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
3663 {
3664 OSSymbol const *keys[2];
3665 OSObject *values[2];
3666 struct SerData {
3667 user_addr_t address;
3668 user_size_t length;
3669 } *vcopy;
3670 unsigned int index, nRanges;
3671 bool result;
3672
3673 IOOptionBits type = _flags & kIOMemoryTypeMask;
3674
3675 if (s == NULL) return false;
3676 if (s->previouslySerialized(this)) return true;
3677
3678 // Pretend we are an array.
3679 if (!s->addXMLStartTag(this, "array")) return false;
3680
3681 nRanges = _rangesCount;
3682 vcopy = (SerData *) IOMalloc(sizeof(SerData) * nRanges);
3683 if (vcopy == 0) return false;
3684
3685 keys[0] = OSSymbol::withCString("address");
3686 keys[1] = OSSymbol::withCString("length");
3687
3688 result = false;
3689 values[0] = values[1] = 0;
3690
3691 // From this point on we can go to bail.
3692
3693 // Copy the volatile data so we don't have to allocate memory
3694 // while the lock is held.
3695 LOCK;
3696 if (nRanges == _rangesCount) {
3697 Ranges vec = _ranges;
3698 for (index = 0; index < nRanges; index++) {
3699 user_addr_t addr; IOByteCount len;
3700 getAddrLenForInd(addr, len, type, vec, index);
3701 vcopy[index].address = addr;
3702 vcopy[index].length = len;
3703 }
3704 } else {
3705 // The descriptor changed out from under us. Give up.
3706 UNLOCK;
3707 result = false;
3708 goto bail;
3709 }
3710 UNLOCK;
3711
3712 for (index = 0; index < nRanges; index++)
3713 {
3714 user_addr_t addr = vcopy[index].address;
3715 IOByteCount len = (IOByteCount) vcopy[index].length;
3716 values[0] =
3717 OSNumber::withNumber(addr, (((UInt64) addr) >> 32)? 64 : 32);
3718 if (values[0] == 0) {
3719 result = false;
3720 goto bail;
3721 }
3722 values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
3723 if (values[1] == 0) {
3724 result = false;
3725 goto bail;
3726 }
3727 OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
3728 if (dict == 0) {
3729 result = false;
3730 goto bail;
3731 }
3732 values[0]->release();
3733 values[1]->release();
3734 values[0] = values[1] = 0;
3735
3736 result = dict->serialize(s);
3737 dict->release();
3738 if (!result) {
3739 goto bail;
3740 }
3741 }
3742 result = s->addXMLEndTag("array");
3743
3744 bail:
3745 if (values[0])
3746 values[0]->release();
3747 if (values[1])
3748 values[1]->release();
3749 if (keys[0])
3750 keys[0]->release();
3751 if (keys[1])
3752 keys[1]->release();
3753 if (vcopy)
3754 IOFree(vcopy, sizeof(SerData) * nRanges);
3755 return result;
3756 }
3757
3758 bool IOSubMemoryDescriptor::serialize(OSSerialize * s) const
3759 {
3760 if (!s) {
3761 return (false);
3762 }
3763 if (s->previouslySerialized(this)) return true;
3764
3765 // Pretend we are a dictionary.
3766 // We must duplicate the functionality of OSDictionary here
3767 // because otherwise object references will not work;
3768 // they are based on the value of the object passed to
3769 // previouslySerialized and addXMLStartTag.
3770
3771 if (!s->addXMLStartTag(this, "dict")) return false;
3772
3773 char const *keys[3] = {"offset", "length", "parent"};
3774
3775 OSObject *values[3];
3776 values[0] = OSNumber::withNumber(_start, sizeof(_start) * 8);
3777 if (values[0] == 0)
3778 return false;
3779 values[1] = OSNumber::withNumber(_length, sizeof(_length) * 8);
3780 if (values[1] == 0) {
3781 values[0]->release();
3782 return false;
3783 }
3784 values[2] = _parent;
3785
3786 bool result = true;
3787 for (int i=0; i<3; i++) {
3788 if (!s->addString("<key>") ||
3789 !s->addString(keys[i]) ||
3790 !s->addXMLEndTag("key") ||
3791 !values[i]->serialize(s)) {
3792 result = false;
3793 break;
3794 }
3795 }
3796 values[0]->release();
3797 values[1]->release();
3798 if (!result) {
3799 return false;
3800 }
3801
3802 return s->addXMLEndTag("dict");
3803 }
3804
3805 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3806
3807 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
3808 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
3809 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
3810 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3);
3811 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4);
3812 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5);
3813 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
3814 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
3815 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
3816 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
3817 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
3818 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
3819 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
3820 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
3821 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
3822 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
3823
3824 /* ex-inline function implementation */
3825 IOPhysicalAddress
3826 IOMemoryDescriptor::getPhysicalAddress()
3827 { return( getPhysicalSegment( 0, 0 )); }
3828
3829
3830