]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOMemoryDescriptor.cpp
xnu-1228.0.2.tar.gz
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
30 *
31 * HISTORY
32 *
33 */
34 // 45678901234567890123456789012345678901234567890123456789012345678901234567890
35 #include <sys/cdefs.h>
36
37 #include <IOKit/assert.h>
38 #include <IOKit/system.h>
39 #include <IOKit/IOLib.h>
40 #include <IOKit/IOMemoryDescriptor.h>
41 #include <IOKit/IOMapper.h>
42 #include <IOKit/IOKitKeysPrivate.h>
43
44 #include <IOKit/IOKitDebug.h>
45 #include <libkern/OSDebug.h>
46
47 #include "IOKitKernelInternal.h"
48 #include "IOCopyMapper.h"
49
50 #include <libkern/c++/OSContainers.h>
51 #include <libkern/c++/OSDictionary.h>
52 #include <libkern/c++/OSArray.h>
53 #include <libkern/c++/OSSymbol.h>
54 #include <libkern/c++/OSNumber.h>
55
56 #include <sys/uio.h>
57
58 __BEGIN_DECLS
59 #include <vm/pmap.h>
60 #include <vm/vm_pageout.h>
61 #include <mach/memory_object_types.h>
62 #include <device/device_port.h>
63
64 #include <mach/vm_prot.h>
65 #include <mach/mach_vm.h>
66 #include <vm/vm_fault.h>
67 #include <vm/vm_protos.h>
68
69 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
70 void ipc_port_release_send(ipc_port_t port);
71
72 /* Copy between a physical page and a virtual address in the given vm_map */
73 kern_return_t copypv(addr64_t source, addr64_t sink, unsigned int size, int which);
74
75 memory_object_t
76 device_pager_setup(
77 memory_object_t pager,
78 int device_handle,
79 vm_size_t size,
80 int flags);
81 void
82 device_pager_deallocate(
83 memory_object_t);
84 kern_return_t
85 device_pager_populate_object(
86 memory_object_t pager,
87 vm_object_offset_t offset,
88 ppnum_t phys_addr,
89 vm_size_t size);
90 kern_return_t
91 memory_object_iopl_request(
92 ipc_port_t port,
93 memory_object_offset_t offset,
94 vm_size_t *upl_size,
95 upl_t *upl_ptr,
96 upl_page_info_array_t user_page_list,
97 unsigned int *page_list_count,
98 int *flags);
99
100 unsigned int IOTranslateCacheBits(struct phys_entry *pp);
101
102 __END_DECLS
103
104 #define kIOMaximumMappedIOByteCount (512*1024*1024)
105
106 static IOMapper * gIOSystemMapper = NULL;
107
108 IOCopyMapper * gIOCopyMapper = NULL;
109
110 static ppnum_t gIOMaximumMappedIOPageCount = atop_32(kIOMaximumMappedIOByteCount);
111
112 ppnum_t gIOLastPage;
113
114 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
115
116 OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
117
118 #define super IOMemoryDescriptor
119
120 OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
121
122 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
123
124 static IORecursiveLock * gIOMemoryLock;
125
126 #define LOCK IORecursiveLockLock( gIOMemoryLock)
127 #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
128 #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
129 #define WAKEUP \
130 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
131
132 #if 0
133 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
134 #else
135 #define DEBG(fmt, args...) {}
136 #endif
137
138 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
139
140 class _IOMemoryMap : public IOMemoryMap
141 {
142 OSDeclareDefaultStructors(_IOMemoryMap)
143 public:
144 IOMemoryDescriptor * fMemory;
145 IOMemoryMap * fSuperMap;
146 mach_vm_size_t fOffset;
147 mach_vm_address_t fAddress;
148 mach_vm_size_t fLength;
149 task_t fAddressTask;
150 vm_map_t fAddressMap;
151 IOOptionBits fOptions;
152 upl_t fRedirUPL;
153 ipc_port_t fRedirEntry;
154 IOMemoryDescriptor * fOwner;
155
156 protected:
157 virtual void taggedRelease(const void *tag = 0) const;
158 virtual void free();
159
160 public:
161
162 // IOMemoryMap methods
163 virtual IOVirtualAddress getVirtualAddress();
164 virtual IOByteCount getLength();
165 virtual task_t getAddressTask();
166 virtual mach_vm_address_t getAddress();
167 virtual mach_vm_size_t getSize();
168 virtual IOMemoryDescriptor * getMemoryDescriptor();
169 virtual IOOptionBits getMapOptions();
170
171 virtual IOReturn unmap();
172 virtual void taskDied();
173
174 virtual IOReturn redirect(IOMemoryDescriptor * newBackingMemory,
175 IOOptionBits options,
176 IOByteCount offset = 0);
177
178 virtual IOReturn redirect(IOMemoryDescriptor * newBackingMemory,
179 IOOptionBits options,
180 mach_vm_size_t offset = 0);
181
182 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
183 IOByteCount * length);
184
185 // for IOMemoryDescriptor use
186 _IOMemoryMap * copyCompatible( _IOMemoryMap * newMapping );
187
188 bool init(
189 task_t intoTask,
190 mach_vm_address_t toAddress,
191 IOOptionBits options,
192 mach_vm_size_t offset,
193 mach_vm_size_t length );
194
195 bool setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset);
196
197 IOReturn redirect(
198 task_t intoTask, bool redirect );
199 };
200
201 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
202
203 // Some data structures and accessor macros used by the initWithOptions
204 // Function
205
206 enum ioPLBlockFlags {
207 kIOPLOnDevice = 0x00000001,
208 kIOPLExternUPL = 0x00000002,
209 };
210
211 struct typePersMDData
212 {
213 const IOGeneralMemoryDescriptor *fMD;
214 ipc_port_t fMemEntry;
215 };
216
217 struct ioPLBlock {
218 upl_t fIOPL;
219 vm_address_t fIOMDOffset; // The offset of this iopl in descriptor
220 vm_offset_t fPageInfo; // Pointer to page list or index into it
221 ppnum_t fMappedBase; // Page number of first page in this iopl
222 unsigned int fPageOffset; // Offset within first page of iopl
223 unsigned int fFlags; // Flags
224 };
225
226 struct ioGMDData {
227 IOMapper *fMapper;
228 unsigned int fPageCnt;
229 upl_page_info_t fPageList[];
230 ioPLBlock fBlocks[];
231 };
232
233 #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
234 #define getIOPLList(d) ((ioPLBlock *) &(d->fPageList[d->fPageCnt]))
235 #define getNumIOPL(osd, d) \
236 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
237 #define getPageList(d) (&(d->fPageList[0]))
238 #define computeDataSize(p, u) \
239 (sizeof(ioGMDData) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
240
241
242 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
243
244 #define next_page(a) ( trunc_page_32(a) + PAGE_SIZE )
245
246
247 extern "C" {
248
249 kern_return_t device_data_action(
250 int device_handle,
251 ipc_port_t device_pager,
252 vm_prot_t protection,
253 vm_object_offset_t offset,
254 vm_size_t size)
255 {
256 struct ExpansionData {
257 void * devicePager;
258 unsigned int pagerContig:1;
259 unsigned int unused:31;
260 IOMemoryDescriptor * memory;
261 };
262 kern_return_t kr;
263 ExpansionData * ref = (ExpansionData *) device_handle;
264 IOMemoryDescriptor * memDesc;
265
266 LOCK;
267 memDesc = ref->memory;
268 if( memDesc)
269 {
270 memDesc->retain();
271 kr = memDesc->handleFault( device_pager, 0, 0,
272 offset, size, kIOMapDefaultCache /*?*/);
273 memDesc->release();
274 }
275 else
276 kr = KERN_ABORTED;
277 UNLOCK;
278
279 return( kr );
280 }
281
282 kern_return_t device_close(
283 int device_handle)
284 {
285 struct ExpansionData {
286 void * devicePager;
287 unsigned int pagerContig:1;
288 unsigned int unused:31;
289 IOMemoryDescriptor * memory;
290 };
291 ExpansionData * ref = (ExpansionData *) device_handle;
292
293 IODelete( ref, ExpansionData, 1 );
294
295 return( kIOReturnSuccess );
296 }
297 }; // end extern "C"
298
299 // Note this inline function uses C++ reference arguments to return values
300 // This means that pointers are not passed and NULLs don't have to be
301 // checked for as a NULL reference is illegal.
302 static inline void
303 getAddrLenForInd(user_addr_t &addr, IOPhysicalLength &len, // Output variables
304 UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
305 {
306 assert(kIOMemoryTypeUIO == type
307 || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
308 || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
309 if (kIOMemoryTypeUIO == type) {
310 user_size_t us;
311 uio_getiov((uio_t) r.uio, ind, &addr, &us); len = us;
312 }
313 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
314 IOAddressRange cur = r.v64[ind];
315 addr = cur.address;
316 len = cur.length;
317 }
318 else {
319 IOVirtualRange cur = r.v[ind];
320 addr = cur.address;
321 len = cur.length;
322 }
323 }
324
325 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
326
327 /*
328 * withAddress:
329 *
330 * Create a new IOMemoryDescriptor. The buffer is a virtual address
331 * relative to the specified task. If no task is supplied, the kernel
332 * task is implied.
333 */
334 IOMemoryDescriptor *
335 IOMemoryDescriptor::withAddress(void * address,
336 IOByteCount length,
337 IODirection direction)
338 {
339 return IOMemoryDescriptor::
340 withAddress((vm_address_t) address, length, direction, kernel_task);
341 }
342
343 IOMemoryDescriptor *
344 IOMemoryDescriptor::withAddress(vm_address_t address,
345 IOByteCount length,
346 IODirection direction,
347 task_t task)
348 {
349 #if TEST_V64
350 if (task)
351 {
352 IOOptionBits options = (IOOptionBits) direction;
353 if (task == kernel_task)
354 options |= kIOMemoryAutoPrepare;
355 return (IOMemoryDescriptor::withAddressRange(address, length, options, task));
356 }
357 #endif
358 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
359 if (that)
360 {
361 if (that->initWithAddress(address, length, direction, task))
362 return that;
363
364 that->release();
365 }
366 return 0;
367 }
368
369 IOMemoryDescriptor *
370 IOMemoryDescriptor::withPhysicalAddress(
371 IOPhysicalAddress address,
372 IOByteCount length,
373 IODirection direction )
374 {
375 #if TEST_P64
376 return (IOMemoryDescriptor::withAddressRange(address, length, (IOOptionBits) direction, NULL));
377 #endif
378 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
379 if (self
380 && !self->initWithPhysicalAddress(address, length, direction)) {
381 self->release();
382 return 0;
383 }
384
385 return self;
386 }
387
388 IOMemoryDescriptor *
389 IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
390 UInt32 withCount,
391 IODirection direction,
392 task_t task,
393 bool asReference)
394 {
395 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
396 if (that)
397 {
398 if (that->initWithRanges(ranges, withCount, direction, task, asReference))
399 return that;
400
401 that->release();
402 }
403 return 0;
404 }
405
406 IOMemoryDescriptor *
407 IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
408 mach_vm_size_t length,
409 IOOptionBits options,
410 task_t task)
411 {
412 IOAddressRange range = { address, length };
413 return (IOMemoryDescriptor::withAddressRanges(&range, 1, options, task));
414 }
415
416 IOMemoryDescriptor *
417 IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
418 UInt32 rangeCount,
419 IOOptionBits options,
420 task_t task)
421 {
422 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
423 if (that)
424 {
425 if (task)
426 options |= kIOMemoryTypeVirtual64;
427 else
428 options |= kIOMemoryTypePhysical64;
429
430 if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ 0))
431 return that;
432
433 that->release();
434 }
435
436 return 0;
437 }
438
439
440 /*
441 * withRanges:
442 *
443 * Create a new IOMemoryDescriptor. The buffer is made up of several
444 * virtual address ranges, from a given task.
445 *
446 * Passing the ranges as a reference will avoid an extra allocation.
447 */
448 IOMemoryDescriptor *
449 IOMemoryDescriptor::withOptions(void * buffers,
450 UInt32 count,
451 UInt32 offset,
452 task_t task,
453 IOOptionBits opts,
454 IOMapper * mapper)
455 {
456 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
457
458 if (self
459 && !self->initWithOptions(buffers, count, offset, task, opts, mapper))
460 {
461 self->release();
462 return 0;
463 }
464
465 return self;
466 }
467
468 // Can't leave abstract but this should never be used directly,
469 bool IOMemoryDescriptor::initWithOptions(void * buffers,
470 UInt32 count,
471 UInt32 offset,
472 task_t task,
473 IOOptionBits options,
474 IOMapper * mapper)
475 {
476 // @@@ gvdl: Should I panic?
477 panic("IOMD::initWithOptions called\n");
478 return 0;
479 }
480
481 IOMemoryDescriptor *
482 IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
483 UInt32 withCount,
484 IODirection direction,
485 bool asReference)
486 {
487 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
488 if (that)
489 {
490 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference))
491 return that;
492
493 that->release();
494 }
495 return 0;
496 }
497
498 IOMemoryDescriptor *
499 IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
500 IOByteCount offset,
501 IOByteCount length,
502 IODirection direction)
503 {
504 IOSubMemoryDescriptor *self = new IOSubMemoryDescriptor;
505
506 if (self && !self->initSubRange(of, offset, length, direction)) {
507 self->release();
508 self = 0;
509 }
510 return self;
511 }
512
513 IOMemoryDescriptor *
514 IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
515 {
516 IOGeneralMemoryDescriptor *origGenMD =
517 OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
518
519 if (origGenMD)
520 return IOGeneralMemoryDescriptor::
521 withPersistentMemoryDescriptor(origGenMD);
522 else
523 return 0;
524 }
525
526 IOMemoryDescriptor *
527 IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
528 {
529 ipc_port_t sharedMem = (ipc_port_t) originalMD->createNamedEntry();
530
531 if (!sharedMem)
532 return 0;
533
534 if (sharedMem == originalMD->_memEntry) {
535 originalMD->retain(); // Add a new reference to ourselves
536 ipc_port_release_send(sharedMem); // Remove extra send right
537 return originalMD;
538 }
539
540 IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor;
541 typePersMDData initData = { originalMD, sharedMem };
542
543 if (self
544 && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) {
545 self->release();
546 self = 0;
547 }
548 return self;
549 }
550
551 void *IOGeneralMemoryDescriptor::createNamedEntry()
552 {
553 kern_return_t error;
554 ipc_port_t sharedMem;
555
556 IOOptionBits type = _flags & kIOMemoryTypeMask;
557
558 user_addr_t range0Addr;
559 IOByteCount range0Len;
560 getAddrLenForInd(range0Addr, range0Len, type, _ranges, 0);
561 range0Addr = trunc_page_64(range0Addr);
562
563 vm_size_t size = ptoa_32(_pages);
564 vm_address_t kernelPage = (vm_address_t) range0Addr;
565
566 vm_map_t theMap = ((_task == kernel_task)
567 && (kIOMemoryBufferPageable & _flags))
568 ? IOPageableMapForAddress(kernelPage)
569 : get_task_map(_task);
570
571 memory_object_size_t actualSize = size;
572 vm_prot_t prot = VM_PROT_READ;
573 #if CONFIG_EMBEDDED
574 if (kIODirectionOut != (kIODirectionOutIn & _flags))
575 #endif
576 prot |= VM_PROT_WRITE;
577
578 if (_memEntry)
579 prot |= MAP_MEM_NAMED_REUSE;
580
581 error = mach_make_memory_entry_64(theMap,
582 &actualSize, range0Addr, prot, &sharedMem, (ipc_port_t) _memEntry);
583
584 if (KERN_SUCCESS == error) {
585 if (actualSize == size) {
586 return sharedMem;
587 } else {
588 #if IOASSERT
589 IOLog("IOGMD::mach_make_memory_entry_64 (%08llx) size (%08lx:%08x)\n",
590 (UInt64)range0Addr, (UInt32)actualSize, size);
591 #endif
592 ipc_port_release_send( sharedMem );
593 }
594 }
595
596 return MACH_PORT_NULL;
597 }
598
599 /*
600 * initWithAddress:
601 *
602 * Initialize an IOMemoryDescriptor. The buffer is a virtual address
603 * relative to the specified task. If no task is supplied, the kernel
604 * task is implied.
605 *
606 * An IOMemoryDescriptor can be re-used by calling initWithAddress or
607 * initWithRanges again on an existing instance -- note this behavior
608 * is not commonly supported in other I/O Kit classes, although it is
609 * supported here.
610 */
611 bool
612 IOGeneralMemoryDescriptor::initWithAddress(void * address,
613 IOByteCount withLength,
614 IODirection withDirection)
615 {
616 _singleRange.v.address = (vm_address_t) address;
617 _singleRange.v.length = withLength;
618
619 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
620 }
621
622 bool
623 IOGeneralMemoryDescriptor::initWithAddress(vm_address_t address,
624 IOByteCount withLength,
625 IODirection withDirection,
626 task_t withTask)
627 {
628 _singleRange.v.address = address;
629 _singleRange.v.length = withLength;
630
631 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
632 }
633
634 bool
635 IOGeneralMemoryDescriptor::initWithPhysicalAddress(
636 IOPhysicalAddress address,
637 IOByteCount withLength,
638 IODirection withDirection )
639 {
640 _singleRange.p.address = address;
641 _singleRange.p.length = withLength;
642
643 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
644 }
645
646 bool
647 IOGeneralMemoryDescriptor::initWithPhysicalRanges(
648 IOPhysicalRange * ranges,
649 UInt32 count,
650 IODirection direction,
651 bool reference)
652 {
653 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
654
655 if (reference)
656 mdOpts |= kIOMemoryAsReference;
657
658 return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0);
659 }
660
661 bool
662 IOGeneralMemoryDescriptor::initWithRanges(
663 IOVirtualRange * ranges,
664 UInt32 count,
665 IODirection direction,
666 task_t task,
667 bool reference)
668 {
669 IOOptionBits mdOpts = direction;
670
671 if (reference)
672 mdOpts |= kIOMemoryAsReference;
673
674 if (task) {
675 mdOpts |= kIOMemoryTypeVirtual;
676
677 // Auto-prepare if this is a kernel memory descriptor as very few
678 // clients bother to prepare() kernel memory.
679 // But it was not enforced so what are you going to do?
680 if (task == kernel_task)
681 mdOpts |= kIOMemoryAutoPrepare;
682 }
683 else
684 mdOpts |= kIOMemoryTypePhysical;
685
686 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0);
687 }
688
689 /*
690 * initWithOptions:
691 *
692 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
693 * from a given task, several physical ranges, an UPL from the ubc
694 * system or a uio (may be 64bit) from the BSD subsystem.
695 *
696 * Passing the ranges as a reference will avoid an extra allocation.
697 *
698 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
699 * existing instance -- note this behavior is not commonly supported in other
700 * I/O Kit classes, although it is supported here.
701 */
702
703 bool
704 IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
705 UInt32 count,
706 UInt32 offset,
707 task_t task,
708 IOOptionBits options,
709 IOMapper * mapper)
710 {
711 IOOptionBits type = options & kIOMemoryTypeMask;
712
713 // Grab the original MD's configuation data to initialse the
714 // arguments to this function.
715 if (kIOMemoryTypePersistentMD == type) {
716
717 typePersMDData *initData = (typePersMDData *) buffers;
718 const IOGeneralMemoryDescriptor *orig = initData->fMD;
719 ioGMDData *dataP = getDataP(orig->_memoryEntries);
720
721 // Only accept persistent memory descriptors with valid dataP data.
722 assert(orig->_rangesCount == 1);
723 if ( !(orig->_flags & kIOMemoryPersistent) || !dataP)
724 return false;
725
726 _memEntry = initData->fMemEntry; // Grab the new named entry
727 options = orig->_flags | kIOMemoryAsReference;
728 _singleRange = orig->_singleRange; // Initialise our range
729 buffers = &_singleRange;
730 count = 1;
731
732 // Now grab the original task and whatever mapper was previously used
733 task = orig->_task;
734 mapper = dataP->fMapper;
735
736 // We are ready to go through the original initialisation now
737 }
738
739 switch (type) {
740 case kIOMemoryTypeUIO:
741 case kIOMemoryTypeVirtual:
742 case kIOMemoryTypeVirtual64:
743 assert(task);
744 if (!task)
745 return false;
746
747 if (vm_map_is_64bit(get_task_map(task))
748 && (kIOMemoryTypeVirtual == type)
749 && ((IOVirtualRange *) buffers)->address)
750 {
751 OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()");
752 return false;
753 }
754 break;
755
756 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
757 case kIOMemoryTypePhysical64:
758 mapper = kIOMapperNone;
759
760 case kIOMemoryTypeUPL:
761 assert(!task);
762 break;
763 default:
764 return false; /* bad argument */
765 }
766
767 assert(buffers);
768 assert(count);
769
770 /*
771 * We can check the _initialized instance variable before having ever set
772 * it to an initial value because I/O Kit guarantees that all our instance
773 * variables are zeroed on an object's allocation.
774 */
775
776 if (_initialized) {
777 /*
778 * An existing memory descriptor is being retargeted to point to
779 * somewhere else. Clean up our present state.
780 */
781 IOOptionBits type = _flags & kIOMemoryTypeMask;
782 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
783 {
784 while (_wireCount)
785 complete();
786 }
787 if (_ranges.v && _rangesIsAllocated)
788 {
789 if (kIOMemoryTypeUIO == type)
790 uio_free((uio_t) _ranges.v);
791 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
792 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
793 else
794 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
795 }
796
797 if (_memEntry)
798 { ipc_port_release_send((ipc_port_t) _memEntry); _memEntry = 0; }
799 if (_mappings)
800 _mappings->flushCollection();
801 }
802 else {
803 if (!super::init())
804 return false;
805 _initialized = true;
806 }
807
808 // Grab the appropriate mapper
809 if (mapper == kIOMapperNone)
810 mapper = 0; // No Mapper
811 else if (mapper == kIOMapperSystem) {
812 IOMapper::checkForSystemMapper();
813 gIOSystemMapper = mapper = IOMapper::gSystem;
814 }
815
816 // Remove the dynamic internal use flags from the initial setting
817 options &= ~(kIOMemoryPreparedReadOnly);
818 _flags = options;
819 _task = task;
820
821 // DEPRECATED variable initialisation
822 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
823
824 __iomd_reservedA = 0;
825 __iomd_reservedB = 0;
826 _highestPage = 0;
827
828 if (kIOMemoryThreadSafe & options)
829 {
830 if (!_prepareLock)
831 _prepareLock = IOLockAlloc();
832 }
833 else if (_prepareLock)
834 {
835 IOLockFree(_prepareLock);
836 _prepareLock = NULL;
837 }
838
839 if (kIOMemoryTypeUPL == type) {
840
841 ioGMDData *dataP;
842 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
843
844 if (!_memoryEntries) {
845 _memoryEntries = OSData::withCapacity(dataSize);
846 if (!_memoryEntries)
847 return false;
848 }
849 else if (!_memoryEntries->initWithCapacity(dataSize))
850 return false;
851
852 _memoryEntries->appendBytes(0, sizeof(ioGMDData));
853 dataP = getDataP(_memoryEntries);
854 dataP->fMapper = mapper;
855 dataP->fPageCnt = 0;
856
857 // _wireCount++; // UPLs start out life wired
858
859 _length = count;
860 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
861
862 ioPLBlock iopl;
863 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST((upl_t) buffers);
864
865 iopl.fIOPL = (upl_t) buffers;
866 // Set the flag kIOPLOnDevice convieniently equal to 1
867 iopl.fFlags = pageList->device | kIOPLExternUPL;
868 iopl.fIOMDOffset = 0;
869
870 _highestPage = upl_get_highest_page(iopl.fIOPL);
871
872 if (!pageList->device) {
873 // Pre-compute the offset into the UPL's page list
874 pageList = &pageList[atop_32(offset)];
875 offset &= PAGE_MASK;
876 if (mapper) {
877 iopl.fMappedBase = mapper->iovmAlloc(_pages);
878 mapper->iovmInsert(iopl.fMappedBase, 0, pageList, _pages);
879 }
880 else
881 iopl.fMappedBase = 0;
882 }
883 else
884 iopl.fMappedBase = 0;
885 iopl.fPageInfo = (vm_address_t) pageList;
886 iopl.fPageOffset = offset;
887
888 _memoryEntries->appendBytes(&iopl, sizeof(iopl));
889 }
890 else {
891 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
892 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
893
894 // Initialize the memory descriptor
895 if (options & kIOMemoryAsReference) {
896 _rangesIsAllocated = false;
897
898 // Hack assignment to get the buffer arg into _ranges.
899 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
900 // work, C++ sigh.
901 // This also initialises the uio & physical ranges.
902 _ranges.v = (IOVirtualRange *) buffers;
903 }
904 else {
905 _rangesIsAllocated = true;
906 switch (_flags & kIOMemoryTypeMask)
907 {
908 case kIOMemoryTypeUIO:
909 _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
910 break;
911
912 case kIOMemoryTypeVirtual64:
913 case kIOMemoryTypePhysical64:
914 _ranges.v64 = IONew(IOAddressRange, count);
915 if (!_ranges.v64)
916 return false;
917 bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
918 break;
919 case kIOMemoryTypeVirtual:
920 case kIOMemoryTypePhysical:
921 _ranges.v = IONew(IOVirtualRange, count);
922 if (!_ranges.v)
923 return false;
924 bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
925 break;
926 }
927 }
928
929 // Find starting address within the vector of ranges
930 Ranges vec = _ranges;
931 UInt32 length = 0;
932 UInt32 pages = 0;
933 for (unsigned ind = 0; ind < count; ind++) {
934 user_addr_t addr;
935 UInt32 len;
936
937 // addr & len are returned by this function
938 getAddrLenForInd(addr, len, type, vec, ind);
939 pages += (atop_64(addr + len + PAGE_MASK) - atop_64(addr));
940 len += length;
941 assert(len >= length); // Check for 32 bit wrap around
942 length = len;
943
944 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
945 {
946 ppnum_t highPage = atop_64(addr + len - 1);
947 if (highPage > _highestPage)
948 _highestPage = highPage;
949 }
950 }
951 _length = length;
952 _pages = pages;
953 _rangesCount = count;
954
955 // Auto-prepare memory at creation time.
956 // Implied completion when descriptor is free-ed
957 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
958 _wireCount++; // Physical MDs are, by definition, wired
959 else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
960 ioGMDData *dataP;
961 unsigned dataSize = computeDataSize(_pages, /* upls */ count * 2);
962
963 if (!_memoryEntries) {
964 _memoryEntries = OSData::withCapacity(dataSize);
965 if (!_memoryEntries)
966 return false;
967 }
968 else if (!_memoryEntries->initWithCapacity(dataSize))
969 return false;
970
971 _memoryEntries->appendBytes(0, sizeof(ioGMDData));
972 dataP = getDataP(_memoryEntries);
973 dataP->fMapper = mapper;
974 dataP->fPageCnt = _pages;
975
976 if ( (kIOMemoryPersistent & _flags) && !_memEntry)
977 _memEntry = createNamedEntry();
978
979 if ((_flags & kIOMemoryAutoPrepare)
980 && prepare() != kIOReturnSuccess)
981 return false;
982 }
983 }
984
985 return true;
986 }
987
988 /*
989 * free
990 *
991 * Free resources.
992 */
993 void IOGeneralMemoryDescriptor::free()
994 {
995 IOOptionBits type = _flags & kIOMemoryTypeMask;
996
997 if( reserved)
998 {
999 LOCK;
1000 reserved->memory = 0;
1001 UNLOCK;
1002 }
1003
1004 if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type))
1005 {
1006 while (_wireCount)
1007 complete();
1008 }
1009 if (_memoryEntries)
1010 _memoryEntries->release();
1011
1012 if (_ranges.v && _rangesIsAllocated)
1013 {
1014 if (kIOMemoryTypeUIO == type)
1015 uio_free((uio_t) _ranges.v);
1016 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
1017 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
1018 else
1019 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
1020 }
1021
1022 if (reserved && reserved->devicePager)
1023 device_pager_deallocate( (memory_object_t) reserved->devicePager );
1024
1025 // memEntry holds a ref on the device pager which owns reserved
1026 // (ExpansionData) so no reserved access after this point
1027 if (_memEntry)
1028 ipc_port_release_send( (ipc_port_t) _memEntry );
1029
1030 if (_prepareLock)
1031 IOLockFree(_prepareLock);
1032
1033 super::free();
1034 }
1035
1036 /* DEPRECATED */ void IOGeneralMemoryDescriptor::unmapFromKernel()
1037 /* DEPRECATED */ {
1038 panic("IOGMD::unmapFromKernel deprecated");
1039 /* DEPRECATED */ }
1040 /* DEPRECATED */
1041 /* DEPRECATED */ void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
1042 /* DEPRECATED */ {
1043 panic("IOGMD::mapIntoKernel deprecated");
1044 /* DEPRECATED */ }
1045
1046 /*
1047 * getDirection:
1048 *
1049 * Get the direction of the transfer.
1050 */
1051 IODirection IOMemoryDescriptor::getDirection() const
1052 {
1053 return _direction;
1054 }
1055
1056 /*
1057 * getLength:
1058 *
1059 * Get the length of the transfer (over all ranges).
1060 */
1061 IOByteCount IOMemoryDescriptor::getLength() const
1062 {
1063 return _length;
1064 }
1065
1066 void IOMemoryDescriptor::setTag( IOOptionBits tag )
1067 {
1068 _tag = tag;
1069 }
1070
1071 IOOptionBits IOMemoryDescriptor::getTag( void )
1072 {
1073 return( _tag);
1074 }
1075
1076 // @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
1077 IOPhysicalAddress
1078 IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
1079 {
1080 addr64_t physAddr = 0;
1081
1082 if( prepare() == kIOReturnSuccess) {
1083 physAddr = getPhysicalSegment64( offset, length );
1084 complete();
1085 }
1086
1087 return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used
1088 }
1089
1090 IOByteCount IOMemoryDescriptor::readBytes
1091 (IOByteCount offset, void *bytes, IOByteCount length)
1092 {
1093 addr64_t dstAddr = (addr64_t) (UInt32) bytes;
1094 IOByteCount remaining;
1095
1096 // Assert that this entire I/O is withing the available range
1097 assert(offset < _length);
1098 assert(offset + length <= _length);
1099 if (offset >= _length) {
1100 return 0;
1101 }
1102
1103 remaining = length = min(length, _length - offset);
1104 while (remaining) { // (process another target segment?)
1105 addr64_t srcAddr64;
1106 IOByteCount srcLen;
1107
1108 srcAddr64 = getPhysicalSegment64(offset, &srcLen);
1109 if (!srcAddr64)
1110 break;
1111
1112 // Clip segment length to remaining
1113 if (srcLen > remaining)
1114 srcLen = remaining;
1115
1116 copypv(srcAddr64, dstAddr, srcLen,
1117 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
1118
1119 dstAddr += srcLen;
1120 offset += srcLen;
1121 remaining -= srcLen;
1122 }
1123
1124 assert(!remaining);
1125
1126 return length - remaining;
1127 }
1128
1129 IOByteCount IOMemoryDescriptor::writeBytes
1130 (IOByteCount offset, const void *bytes, IOByteCount length)
1131 {
1132 addr64_t srcAddr = (addr64_t) (UInt32) bytes;
1133 IOByteCount remaining;
1134
1135 // Assert that this entire I/O is withing the available range
1136 assert(offset < _length);
1137 assert(offset + length <= _length);
1138
1139 assert( !(kIOMemoryPreparedReadOnly & _flags) );
1140
1141 if ( (kIOMemoryPreparedReadOnly & _flags) || offset >= _length) {
1142 return 0;
1143 }
1144
1145 remaining = length = min(length, _length - offset);
1146 while (remaining) { // (process another target segment?)
1147 addr64_t dstAddr64;
1148 IOByteCount dstLen;
1149
1150 dstAddr64 = getPhysicalSegment64(offset, &dstLen);
1151 if (!dstAddr64)
1152 break;
1153
1154 // Clip segment length to remaining
1155 if (dstLen > remaining)
1156 dstLen = remaining;
1157
1158 copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
1159 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
1160
1161 srcAddr += dstLen;
1162 offset += dstLen;
1163 remaining -= dstLen;
1164 }
1165
1166 assert(!remaining);
1167
1168 return length - remaining;
1169 }
1170
1171 // osfmk/device/iokit_rpc.c
1172 extern "C" unsigned int IODefaultCacheBits(addr64_t pa);
1173
1174 /* DEPRECATED */ void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
1175 /* DEPRECATED */ {
1176 panic("IOGMD::setPosition deprecated");
1177 /* DEPRECATED */ }
1178
1179 IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
1180 {
1181 if (kIOMDGetCharacteristics == op) {
1182
1183 if (dataSize < sizeof(IOMDDMACharacteristics))
1184 return kIOReturnUnderrun;
1185
1186 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
1187 data->fLength = _length;
1188 data->fSGCount = _rangesCount;
1189 data->fPages = _pages;
1190 data->fDirection = _direction;
1191 if (!_wireCount)
1192 data->fIsPrepared = false;
1193 else {
1194 data->fIsPrepared = true;
1195 data->fHighestPage = _highestPage;
1196 if (_memoryEntries) {
1197 ioGMDData *gmdData = getDataP(_memoryEntries);
1198 ioPLBlock *ioplList = getIOPLList(gmdData);
1199 UInt count = getNumIOPL(_memoryEntries, gmdData);
1200
1201 data->fIsMapped = (gmdData->fMapper && _pages && (count > 0)
1202 && ioplList[0].fMappedBase);
1203 if (count == 1)
1204 data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
1205 }
1206 else
1207 data->fIsMapped = false;
1208 }
1209
1210 return kIOReturnSuccess;
1211 }
1212 else if (!(kIOMDWalkSegments & op))
1213 return kIOReturnBadArgument;
1214
1215 // Get the next segment
1216 struct InternalState {
1217 IOMDDMAWalkSegmentArgs fIO;
1218 UInt fOffset2Index;
1219 UInt fIndex;
1220 UInt fNextOffset;
1221 } *isP;
1222
1223 // Find the next segment
1224 if (dataSize < sizeof(*isP))
1225 return kIOReturnUnderrun;
1226
1227 isP = (InternalState *) vData;
1228 UInt offset = isP->fIO.fOffset;
1229 bool mapped = isP->fIO.fMapped;
1230
1231 if (offset >= _length)
1232 return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
1233
1234 // Validate the previous offset
1235 UInt ind, off2Ind = isP->fOffset2Index;
1236 if ((kIOMDFirstSegment != op)
1237 && offset
1238 && (offset == isP->fNextOffset || off2Ind <= offset))
1239 ind = isP->fIndex;
1240 else
1241 ind = off2Ind = 0; // Start from beginning
1242
1243 UInt length;
1244 UInt64 address;
1245 if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
1246
1247 // Physical address based memory descriptor
1248 const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
1249
1250 // Find the range after the one that contains the offset
1251 UInt len;
1252 for (len = 0; off2Ind <= offset; ind++) {
1253 len = physP[ind].length;
1254 off2Ind += len;
1255 }
1256
1257 // Calculate length within range and starting address
1258 length = off2Ind - offset;
1259 address = physP[ind - 1].address + len - length;
1260
1261 // see how far we can coalesce ranges
1262 while (ind < _rangesCount && address + length == physP[ind].address) {
1263 len = physP[ind].length;
1264 length += len;
1265 off2Ind += len;
1266 ind++;
1267 }
1268
1269 // correct contiguous check overshoot
1270 ind--;
1271 off2Ind -= len;
1272 }
1273 else if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
1274
1275 // Physical address based memory descriptor
1276 const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
1277
1278 // Find the range after the one that contains the offset
1279 mach_vm_size_t len;
1280 for (len = 0; off2Ind <= offset; ind++) {
1281 len = physP[ind].length;
1282 off2Ind += len;
1283 }
1284
1285 // Calculate length within range and starting address
1286 length = off2Ind - offset;
1287 address = physP[ind - 1].address + len - length;
1288
1289 // see how far we can coalesce ranges
1290 while (ind < _rangesCount && address + length == physP[ind].address) {
1291 len = physP[ind].length;
1292 length += len;
1293 off2Ind += len;
1294 ind++;
1295 }
1296
1297 // correct contiguous check overshoot
1298 ind--;
1299 off2Ind -= len;
1300 }
1301 else do {
1302 if (!_wireCount)
1303 panic("IOGMD: not wired for the IODMACommand");
1304
1305 assert(_memoryEntries);
1306
1307 ioGMDData * dataP = getDataP(_memoryEntries);
1308 const ioPLBlock *ioplList = getIOPLList(dataP);
1309 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1310 upl_page_info_t *pageList = getPageList(dataP);
1311
1312 assert(numIOPLs > 0);
1313
1314 // Scan through iopl info blocks looking for block containing offset
1315 while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset)
1316 ind++;
1317
1318 // Go back to actual range as search goes past it
1319 ioPLBlock ioplInfo = ioplList[ind - 1];
1320 off2Ind = ioplInfo.fIOMDOffset;
1321
1322 if (ind < numIOPLs)
1323 length = ioplList[ind].fIOMDOffset;
1324 else
1325 length = _length;
1326 length -= offset; // Remainder within iopl
1327
1328 // Subtract offset till this iopl in total list
1329 offset -= off2Ind;
1330
1331 // If a mapped address is requested and this is a pre-mapped IOPL
1332 // then just need to compute an offset relative to the mapped base.
1333 if (mapped && ioplInfo.fMappedBase) {
1334 offset += (ioplInfo.fPageOffset & PAGE_MASK);
1335 address = ptoa_64(ioplInfo.fMappedBase) + offset;
1336 continue; // Done leave do/while(false) now
1337 }
1338
1339 // The offset is rebased into the current iopl.
1340 // Now add the iopl 1st page offset.
1341 offset += ioplInfo.fPageOffset;
1342
1343 // For external UPLs the fPageInfo field points directly to
1344 // the upl's upl_page_info_t array.
1345 if (ioplInfo.fFlags & kIOPLExternUPL)
1346 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
1347 else
1348 pageList = &pageList[ioplInfo.fPageInfo];
1349
1350 // Check for direct device non-paged memory
1351 if ( ioplInfo.fFlags & kIOPLOnDevice ) {
1352 address = ptoa_64(pageList->phys_addr) + offset;
1353 continue; // Done leave do/while(false) now
1354 }
1355
1356 // Now we need compute the index into the pageList
1357 UInt pageInd = atop_32(offset);
1358 offset &= PAGE_MASK;
1359
1360 // Compute the starting address of this segment
1361 IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
1362 address = ptoa_64(pageAddr) + offset;
1363
1364 // length is currently set to the length of the remainider of the iopl.
1365 // We need to check that the remainder of the iopl is contiguous.
1366 // This is indicated by pageList[ind].phys_addr being sequential.
1367 IOByteCount contigLength = PAGE_SIZE - offset;
1368 while (contigLength < length
1369 && ++pageAddr == pageList[++pageInd].phys_addr)
1370 {
1371 contigLength += PAGE_SIZE;
1372 }
1373
1374 if (contigLength < length)
1375 length = contigLength;
1376
1377
1378 assert(address);
1379 assert(length);
1380
1381 } while (false);
1382
1383 // Update return values and state
1384 isP->fIO.fIOVMAddr = address;
1385 isP->fIO.fLength = length;
1386 isP->fIndex = ind;
1387 isP->fOffset2Index = off2Ind;
1388 isP->fNextOffset = isP->fIO.fOffset + length;
1389
1390 return kIOReturnSuccess;
1391 }
1392
1393 addr64_t
1394 IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
1395 {
1396 IOReturn ret;
1397 IOByteCount length = 0;
1398 addr64_t address = 0;
1399
1400 if (gIOSystemMapper && (kIOMemoryTypePhysical == (_flags & kIOMemoryTypeMask)))
1401 return (super::getPhysicalSegment64(offset, lengthOfSegment));
1402
1403 if (offset < _length) // (within bounds?)
1404 {
1405 IOMDDMAWalkSegmentState _state;
1406 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) &_state;
1407
1408 state->fOffset = offset;
1409 state->fLength = _length - offset;
1410 state->fMapped = false;
1411
1412 ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
1413
1414 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret))
1415 DEBG("getPhysicalSegment64 dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
1416 ret, this, state->fOffset,
1417 state->fIOVMAddr, state->fLength);
1418 if (kIOReturnSuccess == ret)
1419 {
1420 address = state->fIOVMAddr;
1421 length = state->fLength;
1422 }
1423 if (!address)
1424 length = 0;
1425 }
1426
1427 if (lengthOfSegment)
1428 *lengthOfSegment = length;
1429
1430 return (address);
1431 }
1432
1433 IOPhysicalAddress
1434 IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1435 {
1436 IOReturn ret;
1437 IOByteCount length = 0;
1438 addr64_t address = 0;
1439
1440 // assert(offset <= _length);
1441
1442 if (offset < _length) // (within bounds?)
1443 {
1444 IOMDDMAWalkSegmentState _state;
1445 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) &_state;
1446
1447 state->fOffset = offset;
1448 state->fLength = _length - offset;
1449 state->fMapped = true;
1450
1451 ret = dmaCommandOperation(
1452 kIOMDFirstSegment, _state, sizeof(_state));
1453
1454 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret))
1455 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
1456 ret, this, state->fOffset,
1457 state->fIOVMAddr, state->fLength);
1458 if (kIOReturnSuccess == ret)
1459 {
1460 address = state->fIOVMAddr;
1461 length = state->fLength;
1462 }
1463
1464 if (!address)
1465 length = 0;
1466 }
1467
1468 if ((address + length) > 0x100000000ULL)
1469 {
1470 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s",
1471 address, length, (getMetaClass())->getClassName());
1472 }
1473
1474 if (lengthOfSegment)
1475 *lengthOfSegment = length;
1476
1477 return ((IOPhysicalAddress) address);
1478 }
1479
1480 addr64_t
1481 IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
1482 {
1483 IOPhysicalAddress phys32;
1484 IOByteCount length;
1485 addr64_t phys64;
1486 IOMapper * mapper = 0;
1487
1488 phys32 = getPhysicalSegment(offset, lengthOfSegment);
1489 if (!phys32)
1490 return 0;
1491
1492 if (gIOSystemMapper)
1493 mapper = gIOSystemMapper;
1494
1495 if (mapper)
1496 {
1497 IOByteCount origLen;
1498
1499 phys64 = mapper->mapAddr(phys32);
1500 origLen = *lengthOfSegment;
1501 length = page_size - (phys64 & (page_size - 1));
1502 while ((length < origLen)
1503 && ((phys64 + length) == mapper->mapAddr(phys32 + length)))
1504 length += page_size;
1505 if (length > origLen)
1506 length = origLen;
1507
1508 *lengthOfSegment = length;
1509 }
1510 else
1511 phys64 = (addr64_t) phys32;
1512
1513 return phys64;
1514 }
1515
1516 IOPhysicalAddress
1517 IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1518 {
1519 IOPhysicalAddress address = 0;
1520 IOPhysicalLength length = 0;
1521 IOOptionBits type = _flags & kIOMemoryTypeMask;
1522
1523 assert(offset <= _length);
1524
1525 if ( type == kIOMemoryTypeUPL)
1526 return super::getSourceSegment( offset, lengthOfSegment );
1527 else if ( offset < _length ) // (within bounds?)
1528 {
1529 unsigned rangesIndex = 0;
1530 Ranges vec = _ranges;
1531 user_addr_t addr;
1532
1533 // Find starting address within the vector of ranges
1534 for (;;) {
1535 getAddrLenForInd(addr, length, type, vec, rangesIndex);
1536 if (offset < length)
1537 break;
1538 offset -= length; // (make offset relative)
1539 rangesIndex++;
1540 }
1541
1542 // Now that we have the starting range,
1543 // lets find the last contiguous range
1544 addr += offset;
1545 length -= offset;
1546
1547 for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) {
1548 user_addr_t newAddr;
1549 IOPhysicalLength newLen;
1550
1551 getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
1552 if (addr + length != newAddr)
1553 break;
1554 length += newLen;
1555 }
1556 if (addr)
1557 address = (IOPhysicalAddress) addr; // Truncate address to 32bit
1558 else
1559 length = 0;
1560 }
1561
1562 if ( lengthOfSegment ) *lengthOfSegment = length;
1563
1564 return address;
1565 }
1566
1567 /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
1568 /* DEPRECATED */ void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
1569 /* DEPRECATED */ IOByteCount * lengthOfSegment)
1570 /* DEPRECATED */ {
1571 if (_task == kernel_task)
1572 return (void *) getSourceSegment(offset, lengthOfSegment);
1573 else
1574 panic("IOGMD::getVirtualSegment deprecated");
1575
1576 return 0;
1577 /* DEPRECATED */ }
1578 /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
1579
1580
1581
1582 IOReturn
1583 IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
1584 {
1585 if (kIOMDGetCharacteristics == op) {
1586 if (dataSize < sizeof(IOMDDMACharacteristics))
1587 return kIOReturnUnderrun;
1588
1589 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
1590 data->fLength = getLength();
1591 data->fSGCount = 0;
1592 data->fDirection = _direction;
1593 if (IOMapper::gSystem)
1594 data->fIsMapped = true;
1595 data->fIsPrepared = true; // Assume prepared - fails safe
1596 }
1597 else if (kIOMDWalkSegments & op) {
1598 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs))
1599 return kIOReturnUnderrun;
1600
1601 IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
1602 IOByteCount offset = (IOByteCount) data->fOffset;
1603
1604 IOPhysicalLength length;
1605 IOMemoryDescriptor *ncmd = const_cast<IOMemoryDescriptor *>(this);
1606 if (data->fMapped && IOMapper::gSystem)
1607 data->fIOVMAddr = ncmd->getPhysicalSegment(offset, &length);
1608 else
1609 data->fIOVMAddr = ncmd->getPhysicalSegment64(offset, &length);
1610 data->fLength = length;
1611 }
1612 else
1613 return kIOReturnBadArgument;
1614
1615 return kIOReturnSuccess;
1616 }
1617
1618 IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
1619 IOOptionBits * oldState )
1620 {
1621 IOReturn err = kIOReturnSuccess;
1622 vm_purgable_t control;
1623 int state;
1624
1625 do
1626 {
1627 if (!_memEntry)
1628 {
1629 err = kIOReturnNotReady;
1630 break;
1631 }
1632
1633 control = VM_PURGABLE_SET_STATE;
1634 switch (newState)
1635 {
1636 case kIOMemoryPurgeableKeepCurrent:
1637 control = VM_PURGABLE_GET_STATE;
1638 break;
1639
1640 case kIOMemoryPurgeableNonVolatile:
1641 state = VM_PURGABLE_NONVOLATILE;
1642 break;
1643 case kIOMemoryPurgeableVolatile:
1644 state = VM_PURGABLE_VOLATILE;
1645 break;
1646 case kIOMemoryPurgeableEmpty:
1647 state = VM_PURGABLE_EMPTY;
1648 break;
1649 default:
1650 err = kIOReturnBadArgument;
1651 break;
1652 }
1653
1654 if (kIOReturnSuccess != err)
1655 break;
1656
1657 err = mach_memory_entry_purgable_control((ipc_port_t) _memEntry, control, &state);
1658
1659 if (oldState)
1660 {
1661 if (kIOReturnSuccess == err)
1662 {
1663 switch (state)
1664 {
1665 case VM_PURGABLE_NONVOLATILE:
1666 state = kIOMemoryPurgeableNonVolatile;
1667 break;
1668 case VM_PURGABLE_VOLATILE:
1669 state = kIOMemoryPurgeableVolatile;
1670 break;
1671 case VM_PURGABLE_EMPTY:
1672 state = kIOMemoryPurgeableEmpty;
1673 break;
1674 default:
1675 state = kIOMemoryPurgeableNonVolatile;
1676 err = kIOReturnNotReady;
1677 break;
1678 }
1679 *oldState = state;
1680 }
1681 }
1682 }
1683 while (false);
1684
1685 return (err);
1686 }
1687
1688 extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
1689 extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
1690
1691 IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options,
1692 IOByteCount offset, IOByteCount length )
1693 {
1694 IOByteCount remaining;
1695 void (*func)(addr64_t pa, unsigned int count) = 0;
1696
1697 switch (options)
1698 {
1699 case kIOMemoryIncoherentIOFlush:
1700 func = &dcache_incoherent_io_flush64;
1701 break;
1702 case kIOMemoryIncoherentIOStore:
1703 func = &dcache_incoherent_io_store64;
1704 break;
1705 }
1706
1707 if (!func)
1708 return (kIOReturnUnsupported);
1709
1710 remaining = length = min(length, getLength() - offset);
1711 while (remaining)
1712 // (process another target segment?)
1713 {
1714 addr64_t dstAddr64;
1715 IOByteCount dstLen;
1716
1717 dstAddr64 = getPhysicalSegment64(offset, &dstLen);
1718 if (!dstAddr64)
1719 break;
1720
1721 // Clip segment length to remaining
1722 if (dstLen > remaining)
1723 dstLen = remaining;
1724
1725 (*func)(dstAddr64, dstLen);
1726
1727 offset += dstLen;
1728 remaining -= dstLen;
1729 }
1730
1731 return (remaining ? kIOReturnUnderrun : kIOReturnSuccess);
1732 }
1733
1734 #if defined(__ppc__) || defined(__arm__)
1735 extern vm_offset_t static_memory_end;
1736 #define io_kernel_static_end static_memory_end
1737 #else
1738 extern vm_offset_t first_avail;
1739 #define io_kernel_static_end first_avail
1740 #endif
1741
1742 static kern_return_t
1743 io_get_kernel_static_upl(
1744 vm_map_t /* map */,
1745 vm_address_t offset,
1746 vm_size_t *upl_size,
1747 upl_t *upl,
1748 upl_page_info_array_t page_list,
1749 unsigned int *count,
1750 ppnum_t *highest_page)
1751 {
1752 unsigned int pageCount, page;
1753 ppnum_t phys;
1754 ppnum_t highestPage = 0;
1755
1756 pageCount = atop_32(*upl_size);
1757 if (pageCount > *count)
1758 pageCount = *count;
1759
1760 *upl = NULL;
1761
1762 for (page = 0; page < pageCount; page++)
1763 {
1764 phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
1765 if (!phys)
1766 break;
1767 page_list[page].phys_addr = phys;
1768 page_list[page].pageout = 0;
1769 page_list[page].absent = 0;
1770 page_list[page].dirty = 0;
1771 page_list[page].precious = 0;
1772 page_list[page].device = 0;
1773 if (phys > highestPage)
1774 highestPage = page;
1775 }
1776
1777 *highest_page = highestPage;
1778
1779 return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError);
1780 }
1781
1782 IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
1783 {
1784 IOOptionBits type = _flags & kIOMemoryTypeMask;
1785 IOReturn error = kIOReturnCannotWire;
1786 ioGMDData *dataP;
1787 ppnum_t mapBase = 0;
1788 IOMapper *mapper;
1789 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
1790
1791 assert(!_wireCount);
1792 assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
1793
1794 if (_pages >= gIOMaximumMappedIOPageCount)
1795 return kIOReturnNoResources;
1796
1797 dataP = getDataP(_memoryEntries);
1798 mapper = dataP->fMapper;
1799 if (mapper && _pages)
1800 mapBase = mapper->iovmAlloc(_pages);
1801
1802 // Note that appendBytes(NULL) zeros the data up to the
1803 // desired length.
1804 _memoryEntries->appendBytes(0, dataP->fPageCnt * sizeof(upl_page_info_t));
1805 dataP = 0; // May no longer be valid so lets not get tempted.
1806
1807 if (forDirection == kIODirectionNone)
1808 forDirection = _direction;
1809
1810 int uplFlags; // This Mem Desc's default flags for upl creation
1811 switch (kIODirectionOutIn & forDirection)
1812 {
1813 case kIODirectionOut:
1814 // Pages do not need to be marked as dirty on commit
1815 uplFlags = UPL_COPYOUT_FROM;
1816 _flags |= kIOMemoryPreparedReadOnly;
1817 break;
1818
1819 case kIODirectionIn:
1820 default:
1821 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
1822 break;
1823 }
1824 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
1825
1826 #ifdef UPL_NEED_32BIT_ADDR
1827 if (kIODirectionPrepareToPhys32 & forDirection)
1828 uplFlags |= UPL_NEED_32BIT_ADDR;
1829 #endif
1830
1831 // Find the appropriate vm_map for the given task
1832 vm_map_t curMap;
1833 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
1834 curMap = 0;
1835 else
1836 { curMap = get_task_map(_task); }
1837
1838 // Iterate over the vector of virtual ranges
1839 Ranges vec = _ranges;
1840 unsigned int pageIndex = 0;
1841 IOByteCount mdOffset = 0;
1842 ppnum_t highestPage = 0;
1843 for (UInt range = 0; range < _rangesCount; range++) {
1844 ioPLBlock iopl;
1845 user_addr_t startPage;
1846 IOByteCount numBytes;
1847 ppnum_t highPage = 0;
1848
1849 // Get the startPage address and length of vec[range]
1850 getAddrLenForInd(startPage, numBytes, type, vec, range);
1851 iopl.fPageOffset = (short) startPage & PAGE_MASK;
1852 numBytes += iopl.fPageOffset;
1853 startPage = trunc_page_64(startPage);
1854
1855 if (mapper)
1856 iopl.fMappedBase = mapBase + pageIndex;
1857 else
1858 iopl.fMappedBase = 0;
1859
1860 // Iterate over the current range, creating UPLs
1861 while (numBytes) {
1862 dataP = getDataP(_memoryEntries);
1863 vm_address_t kernelStart = (vm_address_t) startPage;
1864 vm_map_t theMap;
1865 if (curMap)
1866 theMap = curMap;
1867 else if (!sharedMem) {
1868 assert(_task == kernel_task);
1869 theMap = IOPageableMapForAddress(kernelStart);
1870 }
1871 else
1872 theMap = NULL;
1873
1874 upl_page_info_array_t pageInfo = getPageList(dataP);
1875 int ioplFlags = uplFlags;
1876 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
1877
1878 vm_size_t ioplSize = round_page_32(numBytes);
1879 unsigned int numPageInfo = atop_32(ioplSize);
1880
1881 if (theMap == kernel_map && kernelStart < io_kernel_static_end) {
1882 error = io_get_kernel_static_upl(theMap,
1883 kernelStart,
1884 &ioplSize,
1885 &iopl.fIOPL,
1886 baseInfo,
1887 &numPageInfo,
1888 &highPage);
1889 }
1890 else if (sharedMem) {
1891 error = memory_object_iopl_request(sharedMem,
1892 ptoa_32(pageIndex),
1893 &ioplSize,
1894 &iopl.fIOPL,
1895 baseInfo,
1896 &numPageInfo,
1897 &ioplFlags);
1898 }
1899 else {
1900 assert(theMap);
1901 error = vm_map_create_upl(theMap,
1902 startPage,
1903 &ioplSize,
1904 &iopl.fIOPL,
1905 baseInfo,
1906 &numPageInfo,
1907 &ioplFlags);
1908 }
1909
1910 assert(ioplSize);
1911 if (error != KERN_SUCCESS)
1912 goto abortExit;
1913
1914 if (iopl.fIOPL)
1915 highPage = upl_get_highest_page(iopl.fIOPL);
1916 if (highPage > highestPage)
1917 highestPage = highPage;
1918
1919 error = kIOReturnCannotWire;
1920
1921 if (baseInfo->device) {
1922 numPageInfo = 1;
1923 iopl.fFlags = kIOPLOnDevice;
1924 // Don't translate device memory at all
1925 if (mapper && mapBase) {
1926 mapper->iovmFree(mapBase, _pages);
1927 mapBase = 0;
1928 iopl.fMappedBase = 0;
1929 }
1930 }
1931 else {
1932 iopl.fFlags = 0;
1933 if (mapper)
1934 mapper->iovmInsert(mapBase, pageIndex,
1935 baseInfo, numPageInfo);
1936 }
1937
1938 iopl.fIOMDOffset = mdOffset;
1939 iopl.fPageInfo = pageIndex;
1940
1941 if ((_flags & kIOMemoryAutoPrepare) && iopl.fIOPL)
1942 {
1943 upl_commit(iopl.fIOPL, 0, 0);
1944 upl_deallocate(iopl.fIOPL);
1945 iopl.fIOPL = 0;
1946 }
1947
1948 if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
1949 // Clean up partial created and unsaved iopl
1950 if (iopl.fIOPL) {
1951 upl_abort(iopl.fIOPL, 0);
1952 upl_deallocate(iopl.fIOPL);
1953 }
1954 goto abortExit;
1955 }
1956
1957 // Check for a multiple iopl's in one virtual range
1958 pageIndex += numPageInfo;
1959 mdOffset -= iopl.fPageOffset;
1960 if (ioplSize < numBytes) {
1961 numBytes -= ioplSize;
1962 startPage += ioplSize;
1963 mdOffset += ioplSize;
1964 iopl.fPageOffset = 0;
1965 if (mapper)
1966 iopl.fMappedBase = mapBase + pageIndex;
1967 }
1968 else {
1969 mdOffset += numBytes;
1970 break;
1971 }
1972 }
1973 }
1974
1975 _highestPage = highestPage;
1976
1977 return kIOReturnSuccess;
1978
1979 abortExit:
1980 {
1981 dataP = getDataP(_memoryEntries);
1982 UInt done = getNumIOPL(_memoryEntries, dataP);
1983 ioPLBlock *ioplList = getIOPLList(dataP);
1984
1985 for (UInt range = 0; range < done; range++)
1986 {
1987 if (ioplList[range].fIOPL) {
1988 upl_abort(ioplList[range].fIOPL, 0);
1989 upl_deallocate(ioplList[range].fIOPL);
1990 }
1991 }
1992 (void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); // == setLength()
1993
1994 if (mapper && mapBase)
1995 mapper->iovmFree(mapBase, _pages);
1996 }
1997
1998 if (error == KERN_FAILURE)
1999 error = kIOReturnCannotWire;
2000
2001 return error;
2002 }
2003
2004 /*
2005 * prepare
2006 *
2007 * Prepare the memory for an I/O transfer. This involves paging in
2008 * the memory, if necessary, and wiring it down for the duration of
2009 * the transfer. The complete() method completes the processing of
2010 * the memory after the I/O transfer finishes. This method needn't
2011 * called for non-pageable memory.
2012 */
2013 IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
2014 {
2015 IOReturn error = kIOReturnSuccess;
2016 IOOptionBits type = _flags & kIOMemoryTypeMask;
2017
2018 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
2019 return kIOReturnSuccess;
2020
2021 if (_prepareLock)
2022 IOLockLock(_prepareLock);
2023
2024 if (!_wireCount
2025 && (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) ) {
2026 error = wireVirtual(forDirection);
2027 }
2028
2029 if (kIOReturnSuccess == error)
2030 _wireCount++;
2031
2032 if (_prepareLock)
2033 IOLockUnlock(_prepareLock);
2034
2035 return error;
2036 }
2037
2038 /*
2039 * complete
2040 *
2041 * Complete processing of the memory after an I/O transfer finishes.
2042 * This method should not be called unless a prepare was previously
2043 * issued; the prepare() and complete() must occur in pairs, before
2044 * before and after an I/O transfer involving pageable memory.
2045 */
2046
2047 IOReturn IOGeneralMemoryDescriptor::complete(IODirection /* forDirection */)
2048 {
2049 IOOptionBits type = _flags & kIOMemoryTypeMask;
2050
2051 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
2052 return kIOReturnSuccess;
2053
2054 if (_prepareLock)
2055 IOLockLock(_prepareLock);
2056
2057 assert(_wireCount);
2058
2059 if (_wireCount)
2060 {
2061 _wireCount--;
2062 if (!_wireCount)
2063 {
2064 IOOptionBits type = _flags & kIOMemoryTypeMask;
2065 ioGMDData * dataP = getDataP(_memoryEntries);
2066 ioPLBlock *ioplList = getIOPLList(dataP);
2067 UInt count = getNumIOPL(_memoryEntries, dataP);
2068
2069 if (dataP->fMapper && _pages && ioplList[0].fMappedBase)
2070 dataP->fMapper->iovmFree(ioplList[0].fMappedBase, _pages);
2071
2072 // Only complete iopls that we created which are for TypeVirtual
2073 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
2074 for (UInt ind = 0; ind < count; ind++)
2075 if (ioplList[ind].fIOPL) {
2076 upl_commit(ioplList[ind].fIOPL, 0, 0);
2077 upl_deallocate(ioplList[ind].fIOPL);
2078 }
2079 }
2080 (void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); // == setLength()
2081 }
2082 }
2083
2084 if (_prepareLock)
2085 IOLockUnlock(_prepareLock);
2086
2087 return kIOReturnSuccess;
2088 }
2089
2090 IOReturn IOGeneralMemoryDescriptor::doMap(
2091 vm_map_t __addressMap,
2092 IOVirtualAddress * __address,
2093 IOOptionBits options,
2094 IOByteCount __offset,
2095 IOByteCount __length )
2096
2097 {
2098 if (!(kIOMap64Bit & options)) panic("IOGeneralMemoryDescriptor::doMap !64bit");
2099
2100 _IOMemoryMap * mapping = (_IOMemoryMap *) *__address;
2101 mach_vm_size_t offset = mapping->fOffset + __offset;
2102 mach_vm_size_t length = mapping->fLength;
2103
2104 kern_return_t kr;
2105 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
2106
2107 IOOptionBits type = _flags & kIOMemoryTypeMask;
2108 Ranges vec = _ranges;
2109
2110 user_addr_t range0Addr = 0;
2111 IOByteCount range0Len = 0;
2112
2113 if (vec.v)
2114 getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
2115
2116 // mapping source == dest? (could be much better)
2117 if( _task
2118 && (mapping->fAddressMap == get_task_map(_task)) && (options & kIOMapAnywhere)
2119 && (1 == _rangesCount) && (0 == offset)
2120 && range0Addr && (length <= range0Len) )
2121 {
2122 mapping->fAddress = range0Addr;
2123 mapping->fOptions |= kIOMapStatic;
2124
2125 return( kIOReturnSuccess );
2126 }
2127
2128 if( 0 == sharedMem) {
2129
2130 vm_size_t size = ptoa_32(_pages);
2131
2132 if( _task) {
2133
2134 memory_object_size_t actualSize = size;
2135 vm_prot_t prot = VM_PROT_READ;
2136 if (!(kIOMapReadOnly & options))
2137 prot |= VM_PROT_WRITE;
2138 else if (kIOMapDefaultCache != (options & kIOMapCacheMask))
2139 prot |= VM_PROT_WRITE;
2140
2141 kr = mach_make_memory_entry_64(get_task_map(_task),
2142 &actualSize, range0Addr,
2143 prot, &sharedMem,
2144 NULL );
2145
2146 if( (KERN_SUCCESS == kr) && (actualSize != round_page_32(size))) {
2147 #if IOASSERT
2148 IOLog("mach_make_memory_entry_64 (%08llx) size (%08lx:%08x)\n",
2149 range0Addr, (UInt32) actualSize, size);
2150 #endif
2151 kr = kIOReturnVMError;
2152 ipc_port_release_send( sharedMem );
2153 }
2154
2155 if( KERN_SUCCESS != kr)
2156 sharedMem = MACH_PORT_NULL;
2157
2158 } else do { // _task == 0, must be physical
2159
2160 memory_object_t pager;
2161 unsigned int flags = 0;
2162 addr64_t pa;
2163 IOPhysicalLength segLen;
2164
2165 pa = getPhysicalSegment64( offset, &segLen );
2166
2167 if( !reserved) {
2168 reserved = IONew( ExpansionData, 1 );
2169 if( !reserved)
2170 continue;
2171 }
2172 reserved->pagerContig = (1 == _rangesCount);
2173 reserved->memory = this;
2174
2175 /*What cache mode do we need*/
2176 switch(options & kIOMapCacheMask ) {
2177
2178 case kIOMapDefaultCache:
2179 default:
2180 flags = IODefaultCacheBits(pa);
2181 if (DEVICE_PAGER_CACHE_INHIB & flags)
2182 {
2183 if (DEVICE_PAGER_GUARDED & flags)
2184 mapping->fOptions |= kIOMapInhibitCache;
2185 else
2186 mapping->fOptions |= kIOMapWriteCombineCache;
2187 }
2188 else if (DEVICE_PAGER_WRITE_THROUGH & flags)
2189 mapping->fOptions |= kIOMapWriteThruCache;
2190 else
2191 mapping->fOptions |= kIOMapCopybackCache;
2192 break;
2193
2194 case kIOMapInhibitCache:
2195 flags = DEVICE_PAGER_CACHE_INHIB |
2196 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
2197 break;
2198
2199 case kIOMapWriteThruCache:
2200 flags = DEVICE_PAGER_WRITE_THROUGH |
2201 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
2202 break;
2203
2204 case kIOMapCopybackCache:
2205 flags = DEVICE_PAGER_COHERENT;
2206 break;
2207
2208 case kIOMapWriteCombineCache:
2209 flags = DEVICE_PAGER_CACHE_INHIB |
2210 DEVICE_PAGER_COHERENT;
2211 break;
2212 }
2213
2214 flags |= reserved->pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0;
2215
2216 pager = device_pager_setup( (memory_object_t) 0, (int) reserved,
2217 size, flags);
2218 assert( pager );
2219
2220 if( pager) {
2221 kr = mach_memory_object_memory_entry_64( (host_t) 1, false /*internal*/,
2222 size, VM_PROT_READ | VM_PROT_WRITE, pager, &sharedMem );
2223
2224 assert( KERN_SUCCESS == kr );
2225 if( KERN_SUCCESS != kr)
2226 {
2227 device_pager_deallocate( pager );
2228 pager = MACH_PORT_NULL;
2229 sharedMem = MACH_PORT_NULL;
2230 }
2231 }
2232 if( pager && sharedMem)
2233 reserved->devicePager = pager;
2234 else {
2235 IODelete( reserved, ExpansionData, 1 );
2236 reserved = 0;
2237 }
2238
2239 } while( false );
2240
2241 _memEntry = (void *) sharedMem;
2242 }
2243
2244 IOReturn result;
2245 if (0 == sharedMem)
2246 result = kIOReturnVMError;
2247 else
2248 result = super::doMap( __addressMap, __address,
2249 options, __offset, __length );
2250
2251 return( result );
2252 }
2253
2254 IOReturn IOGeneralMemoryDescriptor::doUnmap(
2255 vm_map_t addressMap,
2256 IOVirtualAddress __address,
2257 IOByteCount __length )
2258 {
2259 return (super::doUnmap(addressMap, __address, __length));
2260 }
2261
2262 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2263
2264 OSDefineMetaClassAndAbstractStructors( IOMemoryMap, OSObject )
2265
2266 /* inline function implementation */
2267 IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
2268 { return( getPhysicalSegment( 0, 0 )); }
2269
2270
2271 #undef super
2272 #define super IOMemoryMap
2273
2274 OSDefineMetaClassAndStructors(_IOMemoryMap, IOMemoryMap)
2275
2276 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2277
2278 bool _IOMemoryMap::init(
2279 task_t intoTask,
2280 mach_vm_address_t toAddress,
2281 IOOptionBits _options,
2282 mach_vm_size_t _offset,
2283 mach_vm_size_t _length )
2284 {
2285 if (!intoTask)
2286 return( false);
2287
2288 if (!super::init())
2289 return(false);
2290
2291 fAddressMap = get_task_map(intoTask);
2292 if (!fAddressMap)
2293 return(false);
2294 vm_map_reference(fAddressMap);
2295
2296 fAddressTask = intoTask;
2297 fOptions = _options;
2298 fLength = _length;
2299 fOffset = _offset;
2300 fAddress = toAddress;
2301
2302 return (true);
2303 }
2304
2305 bool _IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset)
2306 {
2307 if (!_memory)
2308 return(false);
2309
2310 if (!fSuperMap)
2311 {
2312 if( (_offset + fLength) > _memory->getLength())
2313 return( false);
2314 fOffset = _offset;
2315 }
2316
2317 _memory->retain();
2318 if (fMemory)
2319 {
2320 if (fMemory != _memory)
2321 fMemory->removeMapping(this);
2322 fMemory->release();
2323 }
2324 fMemory = _memory;
2325
2326 return( true );
2327 }
2328
2329 struct IOMemoryDescriptorMapAllocRef
2330 {
2331 ipc_port_t sharedMem;
2332 mach_vm_address_t mapped;
2333 mach_vm_size_t size;
2334 mach_vm_size_t sourceOffset;
2335 IOOptionBits options;
2336 };
2337
2338 static kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
2339 {
2340 IOMemoryDescriptorMapAllocRef * ref = (IOMemoryDescriptorMapAllocRef *)_ref;
2341 IOReturn err;
2342
2343 do {
2344 if( ref->sharedMem)
2345 {
2346 vm_prot_t prot = VM_PROT_READ
2347 | ((ref->options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE);
2348
2349 // VM system requires write access to change cache mode
2350 if (kIOMapDefaultCache != (ref->options & kIOMapCacheMask))
2351 prot |= VM_PROT_WRITE;
2352
2353 // set memory entry cache
2354 vm_prot_t memEntryCacheMode = prot | MAP_MEM_ONLY;
2355 switch (ref->options & kIOMapCacheMask)
2356 {
2357 case kIOMapInhibitCache:
2358 SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
2359 break;
2360
2361 case kIOMapWriteThruCache:
2362 SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
2363 break;
2364
2365 case kIOMapWriteCombineCache:
2366 SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
2367 break;
2368
2369 case kIOMapCopybackCache:
2370 SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
2371 break;
2372
2373 case kIOMapDefaultCache:
2374 default:
2375 SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
2376 break;
2377 }
2378
2379 vm_size_t unused = 0;
2380
2381 err = mach_make_memory_entry( NULL /*unused*/, &unused, 0 /*unused*/,
2382 memEntryCacheMode, NULL, ref->sharedMem );
2383 if (KERN_SUCCESS != err)
2384 IOLog("MAP_MEM_ONLY failed %d\n", err);
2385
2386 err = mach_vm_map( map,
2387 &ref->mapped,
2388 ref->size, 0 /* mask */,
2389 (( ref->options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
2390 | VM_MAKE_TAG(VM_MEMORY_IOKIT),
2391 ref->sharedMem, ref->sourceOffset,
2392 false, // copy
2393 prot, // cur
2394 prot, // max
2395 VM_INHERIT_NONE);
2396
2397 if( KERN_SUCCESS != err) {
2398 ref->mapped = 0;
2399 continue;
2400 }
2401
2402 }
2403 else
2404 {
2405 err = mach_vm_allocate( map, &ref->mapped, ref->size,
2406 ((ref->options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
2407 | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
2408 if( KERN_SUCCESS != err) {
2409 ref->mapped = 0;
2410 continue;
2411 }
2412 // we have to make sure that these guys don't get copied if we fork.
2413 err = vm_inherit( map, ref->mapped, ref->size, VM_INHERIT_NONE);
2414 assert( KERN_SUCCESS == err );
2415 }
2416 }
2417 while( false );
2418
2419 return( err );
2420 }
2421
2422 kern_return_t
2423 IOMemoryDescriptorMapMemEntry(vm_map_t map, ipc_port_t entry, IOOptionBits options, bool pageable,
2424 mach_vm_size_t offset,
2425 mach_vm_address_t * address, mach_vm_size_t length)
2426 {
2427 IOReturn err;
2428 IOMemoryDescriptorMapAllocRef ref;
2429
2430 ref.sharedMem = entry;
2431 ref.sourceOffset = offset;
2432 ref.options = options;
2433
2434 ref.size = length;
2435
2436 if (options & kIOMapAnywhere)
2437 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
2438 ref.mapped = 0;
2439 else
2440 ref.mapped = *address;
2441
2442 if( ref.sharedMem && (map == kernel_map) && pageable)
2443 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
2444 else
2445 err = IOMemoryDescriptorMapAlloc( map, &ref );
2446
2447 *address = ref.mapped;
2448 return (err);
2449 }
2450
2451
2452 IOReturn IOMemoryDescriptor::doMap(
2453 vm_map_t __addressMap,
2454 IOVirtualAddress * __address,
2455 IOOptionBits options,
2456 IOByteCount __offset,
2457 IOByteCount __length )
2458 {
2459 if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::doMap !64bit");
2460
2461 _IOMemoryMap * mapping = (_IOMemoryMap *) *__address;
2462 mach_vm_size_t offset = mapping->fOffset + __offset;
2463 mach_vm_size_t length = mapping->fLength;
2464
2465 IOReturn err = kIOReturnSuccess;
2466 memory_object_t pager;
2467 mach_vm_size_t pageOffset;
2468 IOPhysicalAddress sourceAddr;
2469
2470 do
2471 {
2472 sourceAddr = getSourceSegment( offset, NULL );
2473 pageOffset = sourceAddr - trunc_page_32( sourceAddr );
2474
2475 if( reserved)
2476 pager = (memory_object_t) reserved->devicePager;
2477 else
2478 pager = MACH_PORT_NULL;
2479
2480 if ((kIOMapReference|kIOMapUnique) == ((kIOMapReference|kIOMapUnique) & options))
2481 {
2482 upl_t redirUPL2;
2483 vm_size_t size;
2484 int flags;
2485
2486 if (!_memEntry)
2487 {
2488 err = kIOReturnNotReadable;
2489 continue;
2490 }
2491
2492 size = mapping->fLength + pageOffset;
2493 flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
2494 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
2495
2496 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) _memEntry, 0, &size, &redirUPL2,
2497 NULL, NULL,
2498 &flags))
2499 redirUPL2 = NULL;
2500
2501 err = upl_transpose(redirUPL2, mapping->fRedirUPL);
2502 if (kIOReturnSuccess != err)
2503 {
2504 IOLog("upl_transpose(%x)\n", err);
2505 err = kIOReturnSuccess;
2506 }
2507
2508 if (redirUPL2)
2509 {
2510 upl_commit(redirUPL2, NULL, 0);
2511 upl_deallocate(redirUPL2);
2512 redirUPL2 = 0;
2513 }
2514 {
2515 // swap the memEntries since they now refer to different vm_objects
2516 void * me = _memEntry;
2517 _memEntry = mapping->fMemory->_memEntry;
2518 mapping->fMemory->_memEntry = me;
2519 }
2520 if (pager)
2521 err = handleFault( reserved->devicePager, mapping->fAddressMap, mapping->fAddress, offset, length, options );
2522 }
2523 else
2524 {
2525 mach_vm_address_t address;
2526
2527 if (!(options & kIOMapAnywhere))
2528 {
2529 address = trunc_page_64(mapping->fAddress);
2530 if( (mapping->fAddress - address) != pageOffset)
2531 {
2532 err = kIOReturnVMError;
2533 continue;
2534 }
2535 }
2536
2537 err = IOMemoryDescriptorMapMemEntry(mapping->fAddressMap, (ipc_port_t) _memEntry,
2538 options, (kIOMemoryBufferPageable & _flags),
2539 offset, &address, round_page_64(length + pageOffset));
2540 if( err != KERN_SUCCESS)
2541 continue;
2542
2543 if (!_memEntry || pager)
2544 {
2545 err = handleFault( pager, mapping->fAddressMap, address, offset, length, options );
2546 if (err != KERN_SUCCESS)
2547 doUnmap( mapping->fAddressMap, (IOVirtualAddress) mapping, 0 );
2548 }
2549
2550 #ifdef DEBUG
2551 if (kIOLogMapping & gIOKitDebug)
2552 IOLog("mapping(%x) desc %p @ %lx, map %p, address %qx, offset %qx, length %qx\n",
2553 err, this, sourceAddr, mapping, address, offset, length);
2554 #endif
2555
2556 if (err == KERN_SUCCESS)
2557 mapping->fAddress = address + pageOffset;
2558 else
2559 mapping->fAddress = NULL;
2560 }
2561 }
2562 while( false );
2563
2564 return (err);
2565 }
2566
2567 enum {
2568 kIOMemoryRedirected = 0x00010000
2569 };
2570
2571 IOReturn IOMemoryDescriptor::handleFault(
2572 void * _pager,
2573 vm_map_t addressMap,
2574 mach_vm_address_t address,
2575 mach_vm_size_t sourceOffset,
2576 mach_vm_size_t length,
2577 IOOptionBits options )
2578 {
2579 IOReturn err = kIOReturnSuccess;
2580 memory_object_t pager = (memory_object_t) _pager;
2581 mach_vm_size_t size;
2582 mach_vm_size_t bytes;
2583 mach_vm_size_t page;
2584 mach_vm_size_t pageOffset;
2585 mach_vm_size_t pagerOffset;
2586 IOPhysicalLength segLen;
2587 addr64_t physAddr;
2588
2589 if( !addressMap)
2590 {
2591 if( kIOMemoryRedirected & _flags)
2592 {
2593 #ifdef DEBUG
2594 IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset);
2595 #endif
2596 do {
2597 SLEEP;
2598 } while( kIOMemoryRedirected & _flags );
2599 }
2600
2601 return( kIOReturnSuccess );
2602 }
2603
2604 physAddr = getPhysicalSegment64( sourceOffset, &segLen );
2605 assert( physAddr );
2606 pageOffset = physAddr - trunc_page_64( physAddr );
2607 pagerOffset = sourceOffset;
2608
2609 size = length + pageOffset;
2610 physAddr -= pageOffset;
2611
2612 segLen += pageOffset;
2613 bytes = size;
2614 do
2615 {
2616 // in the middle of the loop only map whole pages
2617 if( segLen >= bytes)
2618 segLen = bytes;
2619 else if( segLen != trunc_page_32( segLen))
2620 err = kIOReturnVMError;
2621 if( physAddr != trunc_page_64( physAddr))
2622 err = kIOReturnBadArgument;
2623 if (kIOReturnSuccess != err)
2624 break;
2625
2626 #ifdef DEBUG
2627 if( kIOLogMapping & gIOKitDebug)
2628 IOLog("_IOMemoryMap::map(%p) 0x%qx->0x%qx:0x%qx\n",
2629 addressMap, address + pageOffset, physAddr + pageOffset,
2630 segLen - pageOffset);
2631 #endif
2632
2633
2634 if( pager) {
2635 if( reserved && reserved->pagerContig) {
2636 IOPhysicalLength allLen;
2637 addr64_t allPhys;
2638
2639 allPhys = getPhysicalSegment64( 0, &allLen );
2640 assert( allPhys );
2641 err = device_pager_populate_object( pager, 0, atop_64(allPhys), round_page_32(allLen) );
2642 }
2643 else
2644 {
2645
2646 for( page = 0;
2647 (page < segLen) && (KERN_SUCCESS == err);
2648 page += page_size)
2649 {
2650 err = device_pager_populate_object(pager, pagerOffset,
2651 (ppnum_t)(atop_64(physAddr + page)), page_size);
2652 pagerOffset += page_size;
2653 }
2654 }
2655 assert( KERN_SUCCESS == err );
2656 if( err)
2657 break;
2658 }
2659
2660 // This call to vm_fault causes an early pmap level resolution
2661 // of the mappings created above for kernel mappings, since
2662 // faulting in later can't take place from interrupt level.
2663 /* *** ALERT *** */
2664 /* *** Temporary Workaround *** */
2665
2666 if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags))
2667 {
2668 vm_fault(addressMap,
2669 (vm_map_offset_t)address,
2670 VM_PROT_READ|VM_PROT_WRITE,
2671 FALSE, THREAD_UNINT, NULL,
2672 (vm_map_offset_t)0);
2673 }
2674
2675 /* *** Temporary Workaround *** */
2676 /* *** ALERT *** */
2677
2678 sourceOffset += segLen - pageOffset;
2679 address += segLen;
2680 bytes -= segLen;
2681 pageOffset = 0;
2682
2683 }
2684 while (bytes && (physAddr = getPhysicalSegment64( sourceOffset, &segLen )));
2685
2686 if (bytes)
2687 err = kIOReturnBadArgument;
2688
2689 return (err);
2690 }
2691
2692 IOReturn IOMemoryDescriptor::doUnmap(
2693 vm_map_t addressMap,
2694 IOVirtualAddress __address,
2695 IOByteCount __length )
2696 {
2697 IOReturn err;
2698 mach_vm_address_t address;
2699 mach_vm_size_t length;
2700
2701 if (__length)
2702 {
2703 address = __address;
2704 length = __length;
2705 }
2706 else
2707 {
2708 addressMap = ((_IOMemoryMap *) __address)->fAddressMap;
2709 address = ((_IOMemoryMap *) __address)->fAddress;
2710 length = ((_IOMemoryMap *) __address)->fLength;
2711 }
2712
2713 if( _memEntry && (addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags))
2714 addressMap = IOPageableMapForAddress( address );
2715
2716 #ifdef DEBUG
2717 if( kIOLogMapping & gIOKitDebug)
2718 IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n",
2719 addressMap, address, length );
2720 #endif
2721
2722 err = mach_vm_deallocate( addressMap, address, length );
2723
2724 return (err);
2725 }
2726
2727 IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
2728 {
2729 IOReturn err = kIOReturnSuccess;
2730 _IOMemoryMap * mapping = 0;
2731 OSIterator * iter;
2732
2733 LOCK;
2734
2735 if( doRedirect)
2736 _flags |= kIOMemoryRedirected;
2737 else
2738 _flags &= ~kIOMemoryRedirected;
2739
2740 do {
2741 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
2742 while( (mapping = (_IOMemoryMap *) iter->getNextObject()))
2743 mapping->redirect( safeTask, doRedirect );
2744
2745 iter->release();
2746 }
2747 } while( false );
2748
2749 if (!doRedirect)
2750 {
2751 WAKEUP;
2752 }
2753
2754 UNLOCK;
2755
2756 // temporary binary compatibility
2757 IOSubMemoryDescriptor * subMem;
2758 if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
2759 err = subMem->redirect( safeTask, doRedirect );
2760 else
2761 err = kIOReturnSuccess;
2762
2763 return( err );
2764 }
2765
2766 IOReturn IOSubMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
2767 {
2768 return( _parent->redirect( safeTask, doRedirect ));
2769 }
2770
2771 IOReturn _IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
2772 {
2773 IOReturn err = kIOReturnSuccess;
2774
2775 if( fSuperMap) {
2776 // err = ((_IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
2777 } else {
2778
2779 LOCK;
2780
2781 do
2782 {
2783 if (!fAddress)
2784 break;
2785 if (!fAddressMap)
2786 break;
2787
2788 if ((!safeTask || (get_task_map(safeTask) != fAddressMap))
2789 && (0 == (fOptions & kIOMapStatic)))
2790 {
2791 IOUnmapPages( fAddressMap, fAddress, fLength );
2792 if(!doRedirect && safeTask
2793 && (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
2794 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)))
2795 {
2796 IOVirtualAddress iova = (IOVirtualAddress) this;
2797 err = mach_vm_deallocate( fAddressMap, fAddress, fLength );
2798 err = fMemory->doMap( fAddressMap, &iova,
2799 (fOptions & ~kIOMapAnywhere) | kIOMap64Bit/*| kIOMapReserve*/,
2800 0, 0 );
2801 } else
2802 err = kIOReturnSuccess;
2803 #ifdef DEBUG
2804 IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap);
2805 #endif
2806 }
2807 else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask))
2808 {
2809 IOOptionBits newMode;
2810 newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
2811 IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode);
2812 }
2813 }
2814 while (false);
2815 UNLOCK;
2816 }
2817
2818 if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
2819 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
2820 && safeTask
2821 && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected))))
2822 fMemory->redirect(safeTask, doRedirect);
2823
2824 return( err );
2825 }
2826
2827 IOReturn _IOMemoryMap::unmap( void )
2828 {
2829 IOReturn err;
2830
2831 LOCK;
2832
2833 if( fAddress && fAddressMap && (0 == fSuperMap) && fMemory
2834 && (0 == (fOptions & kIOMapStatic))) {
2835
2836 err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0);
2837
2838 } else
2839 err = kIOReturnSuccess;
2840
2841 if (fAddressMap)
2842 {
2843 vm_map_deallocate(fAddressMap);
2844 fAddressMap = 0;
2845 }
2846
2847 fAddress = 0;
2848
2849 UNLOCK;
2850
2851 return( err );
2852 }
2853
2854 void _IOMemoryMap::taskDied( void )
2855 {
2856 LOCK;
2857 if( fAddressMap) {
2858 vm_map_deallocate(fAddressMap);
2859 fAddressMap = 0;
2860 }
2861 fAddressTask = 0;
2862 fAddress = 0;
2863 UNLOCK;
2864 }
2865
2866 // Overload the release mechanism. All mappings must be a member
2867 // of a memory descriptors _mappings set. This means that we
2868 // always have 2 references on a mapping. When either of these mappings
2869 // are released we need to free ourselves.
2870 void _IOMemoryMap::taggedRelease(const void *tag) const
2871 {
2872 LOCK;
2873 super::taggedRelease(tag, 2);
2874 UNLOCK;
2875 }
2876
2877 void _IOMemoryMap::free()
2878 {
2879 unmap();
2880
2881 if (fMemory)
2882 {
2883 LOCK;
2884 fMemory->removeMapping(this);
2885 UNLOCK;
2886 fMemory->release();
2887 }
2888
2889 if (fOwner && (fOwner != fMemory))
2890 {
2891 LOCK;
2892 fOwner->removeMapping(this);
2893 UNLOCK;
2894 }
2895
2896 if (fSuperMap)
2897 fSuperMap->release();
2898
2899 if (fRedirUPL) {
2900 upl_commit(fRedirUPL, NULL, 0);
2901 upl_deallocate(fRedirUPL);
2902 }
2903
2904 super::free();
2905 }
2906
2907 IOByteCount _IOMemoryMap::getLength()
2908 {
2909 return( fLength );
2910 }
2911
2912 IOVirtualAddress _IOMemoryMap::getVirtualAddress()
2913 {
2914 if (fSuperMap)
2915 fSuperMap->getVirtualAddress();
2916 else if (fAddressMap && vm_map_is_64bit(fAddressMap))
2917 {
2918 OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress);
2919 }
2920
2921 return (fAddress);
2922 }
2923
2924 mach_vm_address_t _IOMemoryMap::getAddress()
2925 {
2926 return( fAddress);
2927 }
2928
2929 mach_vm_size_t _IOMemoryMap::getSize()
2930 {
2931 return( fLength );
2932 }
2933
2934
2935 task_t _IOMemoryMap::getAddressTask()
2936 {
2937 if( fSuperMap)
2938 return( fSuperMap->getAddressTask());
2939 else
2940 return( fAddressTask);
2941 }
2942
2943 IOOptionBits _IOMemoryMap::getMapOptions()
2944 {
2945 return( fOptions);
2946 }
2947
2948 IOMemoryDescriptor * _IOMemoryMap::getMemoryDescriptor()
2949 {
2950 return( fMemory );
2951 }
2952
2953 _IOMemoryMap * _IOMemoryMap::copyCompatible(
2954 _IOMemoryMap * newMapping )
2955 {
2956 task_t task = newMapping->getAddressTask();
2957 mach_vm_address_t toAddress = newMapping->fAddress;
2958 IOOptionBits _options = newMapping->fOptions;
2959 mach_vm_size_t _offset = newMapping->fOffset;
2960 mach_vm_size_t _length = newMapping->fLength;
2961
2962 if( (!task) || (!fAddressMap) || (fAddressMap != get_task_map(task)))
2963 return( 0 );
2964 if( (fOptions ^ _options) & kIOMapReadOnly)
2965 return( 0 );
2966 if( (kIOMapDefaultCache != (_options & kIOMapCacheMask))
2967 && ((fOptions ^ _options) & kIOMapCacheMask))
2968 return( 0 );
2969
2970 if( (0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress))
2971 return( 0 );
2972
2973 if( _offset < fOffset)
2974 return( 0 );
2975
2976 _offset -= fOffset;
2977
2978 if( (_offset + _length) > fLength)
2979 return( 0 );
2980
2981 retain();
2982 if( (fLength == _length) && (!_offset))
2983 {
2984 newMapping->release();
2985 newMapping = this;
2986 }
2987 else
2988 {
2989 newMapping->fSuperMap = this;
2990 newMapping->fOffset = _offset;
2991 newMapping->fAddress = fAddress + _offset;
2992 }
2993
2994 return( newMapping );
2995 }
2996
2997 IOPhysicalAddress
2998 _IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
2999 {
3000 IOPhysicalAddress address;
3001
3002 LOCK;
3003 address = fMemory->getPhysicalSegment( fOffset + _offset, _length );
3004 UNLOCK;
3005
3006 return( address );
3007 }
3008
3009 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3010
3011 #undef super
3012 #define super OSObject
3013
3014 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3015
3016 void IOMemoryDescriptor::initialize( void )
3017 {
3018 if( 0 == gIOMemoryLock)
3019 gIOMemoryLock = IORecursiveLockAlloc();
3020
3021 IORegistryEntry::getRegistryRoot()->setProperty(kIOMaximumMappedIOByteCountKey,
3022 ptoa_64(gIOMaximumMappedIOPageCount), 64);
3023 if (!gIOCopyMapper)
3024 {
3025 IOMapper *
3026 mapper = new IOCopyMapper;
3027 if (mapper)
3028 {
3029 if (mapper->init() && mapper->start(NULL))
3030 gIOCopyMapper = (IOCopyMapper *) mapper;
3031 else
3032 mapper->release();
3033 }
3034 }
3035
3036 gIOLastPage = IOGetLastPageNumber();
3037 }
3038
3039 void IOMemoryDescriptor::free( void )
3040 {
3041 if( _mappings)
3042 _mappings->release();
3043
3044 super::free();
3045 }
3046
3047 IOMemoryMap * IOMemoryDescriptor::setMapping(
3048 task_t intoTask,
3049 IOVirtualAddress mapAddress,
3050 IOOptionBits options )
3051 {
3052 return (createMappingInTask( intoTask, mapAddress,
3053 options | kIOMapStatic,
3054 0, getLength() ));
3055 }
3056
3057 IOMemoryMap * IOMemoryDescriptor::map(
3058 IOOptionBits options )
3059 {
3060 return (createMappingInTask( kernel_task, 0,
3061 options | kIOMapAnywhere,
3062 0, getLength() ));
3063 }
3064
3065 IOMemoryMap * IOMemoryDescriptor::map(
3066 task_t intoTask,
3067 IOVirtualAddress atAddress,
3068 IOOptionBits options,
3069 IOByteCount offset,
3070 IOByteCount length )
3071 {
3072 if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask)))
3073 {
3074 OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()");
3075 return (0);
3076 }
3077
3078 return (createMappingInTask(intoTask, atAddress,
3079 options, offset, length));
3080 }
3081
3082 IOMemoryMap * IOMemoryDescriptor::createMappingInTask(
3083 task_t intoTask,
3084 mach_vm_address_t atAddress,
3085 IOOptionBits options,
3086 mach_vm_size_t offset,
3087 mach_vm_size_t length)
3088 {
3089 IOMemoryMap * result;
3090 _IOMemoryMap * mapping;
3091
3092 if (0 == length)
3093 length = getLength();
3094
3095 mapping = new _IOMemoryMap;
3096
3097 if( mapping
3098 && !mapping->init( intoTask, atAddress,
3099 options, offset, length )) {
3100 mapping->release();
3101 mapping = 0;
3102 }
3103
3104 if (mapping)
3105 result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0);
3106 else
3107 result = 0;
3108
3109 #ifdef DEBUG
3110 if (!result)
3111 IOLog("createMappingInTask failed desc %p, addr %qx, options %lx, offset %qx, length %qx\n",
3112 this, atAddress, options, offset, length);
3113 #endif
3114
3115 return (result);
3116 }
3117
3118 IOReturn _IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
3119 IOOptionBits options,
3120 IOByteCount offset)
3121 {
3122 return (redirect(newBackingMemory, options, (mach_vm_size_t)offset));
3123 }
3124
3125 IOReturn _IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
3126 IOOptionBits options,
3127 mach_vm_size_t offset)
3128 {
3129 IOReturn err = kIOReturnSuccess;
3130 IOMemoryDescriptor * physMem = 0;
3131
3132 LOCK;
3133
3134 if (fAddress && fAddressMap) do
3135 {
3136 if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3137 || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3138 {
3139 physMem = fMemory;
3140 physMem->retain();
3141 }
3142
3143 if (!fRedirUPL)
3144 {
3145 vm_size_t size = fLength;
3146 int flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
3147 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
3148 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) fMemory->_memEntry, 0, &size, &fRedirUPL,
3149 NULL, NULL,
3150 &flags))
3151 fRedirUPL = 0;
3152
3153 if (physMem)
3154 {
3155 IOUnmapPages( fAddressMap, fAddress, fLength );
3156 physMem->redirect(0, true);
3157 }
3158 }
3159
3160 if (newBackingMemory)
3161 {
3162 if (newBackingMemory != fMemory)
3163 {
3164 fOffset = 0;
3165 if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this,
3166 options | kIOMapUnique | kIOMapReference | kIOMap64Bit,
3167 offset, fLength))
3168 err = kIOReturnError;
3169 }
3170 if (fRedirUPL)
3171 {
3172 upl_commit(fRedirUPL, NULL, 0);
3173 upl_deallocate(fRedirUPL);
3174 fRedirUPL = 0;
3175 }
3176 if (physMem)
3177 physMem->redirect(0, false);
3178 }
3179 }
3180 while (false);
3181
3182 UNLOCK;
3183
3184 if (physMem)
3185 physMem->release();
3186
3187 return (err);
3188 }
3189
3190 IOMemoryMap * IOMemoryDescriptor::makeMapping(
3191 IOMemoryDescriptor * owner,
3192 task_t __intoTask,
3193 IOVirtualAddress __address,
3194 IOOptionBits options,
3195 IOByteCount __offset,
3196 IOByteCount __length )
3197 {
3198 if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::makeMapping !64bit");
3199
3200 IOMemoryDescriptor * mapDesc = 0;
3201 _IOMemoryMap * result = 0;
3202 OSIterator * iter;
3203
3204 _IOMemoryMap * mapping = (_IOMemoryMap *) __address;
3205 mach_vm_size_t offset = mapping->fOffset + __offset;
3206 mach_vm_size_t length = mapping->fLength;
3207
3208 mapping->fOffset = offset;
3209
3210 LOCK;
3211
3212 do
3213 {
3214 if (kIOMapStatic & options)
3215 {
3216 result = mapping;
3217 addMapping(mapping);
3218 mapping->setMemoryDescriptor(this, 0);
3219 continue;
3220 }
3221
3222 if (kIOMapUnique & options)
3223 {
3224 IOPhysicalAddress phys;
3225 IOByteCount physLen;
3226
3227 // if (owner != this) continue;
3228
3229 if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3230 || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
3231 {
3232 phys = getPhysicalSegment(offset, &physLen);
3233 if (!phys || (physLen < length))
3234 continue;
3235
3236 mapDesc = IOMemoryDescriptor::withPhysicalAddress(
3237 phys, length, _direction);
3238 if (!mapDesc)
3239 continue;
3240 offset = 0;
3241 mapping->fOffset = offset;
3242 }
3243 }
3244 else
3245 {
3246 // look for a compatible existing mapping
3247 if( (iter = OSCollectionIterator::withCollection(_mappings)))
3248 {
3249 _IOMemoryMap * lookMapping;
3250 while ((lookMapping = (_IOMemoryMap *) iter->getNextObject()))
3251 {
3252 if ((result = lookMapping->copyCompatible(mapping)))
3253 {
3254 addMapping(result);
3255 result->setMemoryDescriptor(this, offset);
3256 break;
3257 }
3258 }
3259 iter->release();
3260 }
3261 if (result || (options & kIOMapReference))
3262 continue;
3263 }
3264
3265 if (!mapDesc)
3266 {
3267 mapDesc = this;
3268 mapDesc->retain();
3269 }
3270 IOReturn
3271 kr = mapDesc->doMap( 0, (IOVirtualAddress *) &mapping, options, 0, 0 );
3272 if (kIOReturnSuccess == kr)
3273 {
3274 result = mapping;
3275 mapDesc->addMapping(result);
3276 result->setMemoryDescriptor(mapDesc, offset);
3277 }
3278 else
3279 {
3280 mapping->release();
3281 mapping = NULL;
3282 }
3283 }
3284 while( false );
3285
3286 UNLOCK;
3287
3288 if (mapDesc)
3289 mapDesc->release();
3290
3291 return (result);
3292 }
3293
3294 void IOMemoryDescriptor::addMapping(
3295 IOMemoryMap * mapping )
3296 {
3297 if( mapping)
3298 {
3299 if( 0 == _mappings)
3300 _mappings = OSSet::withCapacity(1);
3301 if( _mappings )
3302 _mappings->setObject( mapping );
3303 }
3304 }
3305
3306 void IOMemoryDescriptor::removeMapping(
3307 IOMemoryMap * mapping )
3308 {
3309 if( _mappings)
3310 _mappings->removeObject( mapping);
3311 }
3312
3313 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3314
3315 #undef super
3316 #define super IOMemoryDescriptor
3317
3318 OSDefineMetaClassAndStructors(IOSubMemoryDescriptor, IOMemoryDescriptor)
3319
3320 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3321
3322 bool IOSubMemoryDescriptor::initSubRange( IOMemoryDescriptor * parent,
3323 IOByteCount offset, IOByteCount length,
3324 IODirection direction )
3325 {
3326 if( !parent)
3327 return( false);
3328
3329 if( (offset + length) > parent->getLength())
3330 return( false);
3331
3332 /*
3333 * We can check the _parent instance variable before having ever set it
3334 * to an initial value because I/O Kit guarantees that all our instance
3335 * variables are zeroed on an object's allocation.
3336 */
3337
3338 if( !_parent) {
3339 if( !super::init())
3340 return( false );
3341 } else {
3342 /*
3343 * An existing memory descriptor is being retargeted to
3344 * point to somewhere else. Clean up our present state.
3345 */
3346
3347 _parent->release();
3348 _parent = 0;
3349 }
3350
3351 parent->retain();
3352 _parent = parent;
3353 _start = offset;
3354 _length = length;
3355 _direction = direction;
3356 _tag = parent->getTag();
3357
3358 return( true );
3359 }
3360
3361 void IOSubMemoryDescriptor::free( void )
3362 {
3363 if( _parent)
3364 _parent->release();
3365
3366 super::free();
3367 }
3368
3369
3370 IOReturn
3371 IOSubMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
3372 {
3373 IOReturn rtn;
3374
3375 if (kIOMDGetCharacteristics == op) {
3376
3377 rtn = _parent->dmaCommandOperation(op, vData, dataSize);
3378 if (kIOReturnSuccess == rtn) {
3379 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3380 data->fLength = _length;
3381 data->fSGCount = 0; // XXX gvdl: need to compute and pages
3382 data->fPages = 0;
3383 data->fPageAlign = 0;
3384 }
3385
3386 return rtn;
3387 }
3388 else if (kIOMDWalkSegments & op) {
3389 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs))
3390 return kIOReturnUnderrun;
3391
3392 IOMDDMAWalkSegmentArgs *data =
3393 reinterpret_cast<IOMDDMAWalkSegmentArgs *>(vData);
3394 UInt offset = data->fOffset;
3395 UInt remain = _length - offset;
3396 if ((int) remain <= 0)
3397 return (!remain)? kIOReturnOverrun : kIOReturnInternalError;
3398
3399 data->fOffset = offset + _start;
3400 rtn = _parent->dmaCommandOperation(op, vData, dataSize);
3401 if (data->fLength > remain)
3402 data->fLength = remain;
3403 data->fOffset = offset;
3404
3405 return rtn;
3406 }
3407 else
3408 return kIOReturnBadArgument;
3409 }
3410
3411 addr64_t
3412 IOSubMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount * length)
3413 {
3414 addr64_t address;
3415 IOByteCount actualLength;
3416
3417 assert(offset <= _length);
3418
3419 if( length)
3420 *length = 0;
3421
3422 if( offset >= _length)
3423 return( 0 );
3424
3425 address = _parent->getPhysicalSegment64( offset + _start, &actualLength );
3426
3427 if( address && length)
3428 *length = min( _length - offset, actualLength );
3429
3430 return( address );
3431 }
3432
3433 IOPhysicalAddress
3434 IOSubMemoryDescriptor::getPhysicalSegment( IOByteCount offset, IOByteCount * length )
3435 {
3436 IOPhysicalAddress address;
3437 IOByteCount actualLength;
3438
3439 assert(offset <= _length);
3440
3441 if( length)
3442 *length = 0;
3443
3444 if( offset >= _length)
3445 return( 0 );
3446
3447 address = _parent->getPhysicalSegment( offset + _start, &actualLength );
3448
3449 if( address && length)
3450 *length = min( _length - offset, actualLength );
3451
3452 return( address );
3453 }
3454
3455 IOPhysicalAddress
3456 IOSubMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
3457 {
3458 IOPhysicalAddress address;
3459 IOByteCount actualLength;
3460
3461 assert(offset <= _length);
3462
3463 if( length)
3464 *length = 0;
3465
3466 if( offset >= _length)
3467 return( 0 );
3468
3469 address = _parent->getSourceSegment( offset + _start, &actualLength );
3470
3471 if( address && length)
3472 *length = min( _length - offset, actualLength );
3473
3474 return( address );
3475 }
3476
3477 void * IOSubMemoryDescriptor::getVirtualSegment(IOByteCount offset,
3478 IOByteCount * lengthOfSegment)
3479 {
3480 return( 0 );
3481 }
3482
3483 IOReturn IOSubMemoryDescriptor::doMap(
3484 vm_map_t addressMap,
3485 IOVirtualAddress * atAddress,
3486 IOOptionBits options,
3487 IOByteCount sourceOffset,
3488 IOByteCount length )
3489 {
3490 panic("IOSubMemoryDescriptor::doMap");
3491 return (IOMemoryDescriptor::doMap(addressMap, atAddress, options, sourceOffset, length));
3492 }
3493
3494 IOByteCount IOSubMemoryDescriptor::readBytes(IOByteCount offset,
3495 void * bytes, IOByteCount length)
3496 {
3497 IOByteCount byteCount;
3498
3499 assert(offset <= _length);
3500
3501 if( offset >= _length)
3502 return( 0 );
3503
3504 LOCK;
3505 byteCount = _parent->readBytes( _start + offset, bytes,
3506 min(length, _length - offset) );
3507 UNLOCK;
3508
3509 return( byteCount );
3510 }
3511
3512 IOByteCount IOSubMemoryDescriptor::writeBytes(IOByteCount offset,
3513 const void* bytes, IOByteCount length)
3514 {
3515 IOByteCount byteCount;
3516
3517 assert(offset <= _length);
3518
3519 if( offset >= _length)
3520 return( 0 );
3521
3522 LOCK;
3523 byteCount = _parent->writeBytes( _start + offset, bytes,
3524 min(length, _length - offset) );
3525 UNLOCK;
3526
3527 return( byteCount );
3528 }
3529
3530 IOReturn IOSubMemoryDescriptor::setPurgeable( IOOptionBits newState,
3531 IOOptionBits * oldState )
3532 {
3533 IOReturn err;
3534
3535 LOCK;
3536 err = _parent->setPurgeable( newState, oldState );
3537 UNLOCK;
3538
3539 return( err );
3540 }
3541
3542 IOReturn IOSubMemoryDescriptor::performOperation( IOOptionBits options,
3543 IOByteCount offset, IOByteCount length )
3544 {
3545 IOReturn err;
3546
3547 assert(offset <= _length);
3548
3549 if( offset >= _length)
3550 return( kIOReturnOverrun );
3551
3552 LOCK;
3553 err = _parent->performOperation( options, _start + offset,
3554 min(length, _length - offset) );
3555 UNLOCK;
3556
3557 return( err );
3558 }
3559
3560 IOReturn IOSubMemoryDescriptor::prepare(
3561 IODirection forDirection)
3562 {
3563 IOReturn err;
3564
3565 LOCK;
3566 err = _parent->prepare( forDirection);
3567 UNLOCK;
3568
3569 return( err );
3570 }
3571
3572 IOReturn IOSubMemoryDescriptor::complete(
3573 IODirection forDirection)
3574 {
3575 IOReturn err;
3576
3577 LOCK;
3578 err = _parent->complete( forDirection);
3579 UNLOCK;
3580
3581 return( err );
3582 }
3583
3584 IOMemoryMap * IOSubMemoryDescriptor::makeMapping(
3585 IOMemoryDescriptor * owner,
3586 task_t intoTask,
3587 IOVirtualAddress address,
3588 IOOptionBits options,
3589 IOByteCount offset,
3590 IOByteCount length )
3591 {
3592 IOMemoryMap * mapping = 0;
3593
3594 if (!(kIOMap64Bit & options))
3595 {
3596 panic("IOSubMemoryDescriptor::makeMapping !64bit");
3597 }
3598
3599 mapping = (IOMemoryMap *) _parent->makeMapping(
3600 owner,
3601 intoTask,
3602 address,
3603 options, _start + offset, length );
3604
3605 return( mapping );
3606 }
3607
3608 /* ick */
3609
3610 bool
3611 IOSubMemoryDescriptor::initWithAddress(void * address,
3612 IOByteCount length,
3613 IODirection direction)
3614 {
3615 return( false );
3616 }
3617
3618 bool
3619 IOSubMemoryDescriptor::initWithAddress(vm_address_t address,
3620 IOByteCount length,
3621 IODirection direction,
3622 task_t task)
3623 {
3624 return( false );
3625 }
3626
3627 bool
3628 IOSubMemoryDescriptor::initWithPhysicalAddress(
3629 IOPhysicalAddress address,
3630 IOByteCount length,
3631 IODirection direction )
3632 {
3633 return( false );
3634 }
3635
3636 bool
3637 IOSubMemoryDescriptor::initWithRanges(
3638 IOVirtualRange * ranges,
3639 UInt32 withCount,
3640 IODirection direction,
3641 task_t task,
3642 bool asReference)
3643 {
3644 return( false );
3645 }
3646
3647 bool
3648 IOSubMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
3649 UInt32 withCount,
3650 IODirection direction,
3651 bool asReference)
3652 {
3653 return( false );
3654 }
3655
3656 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3657
3658 bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
3659 {
3660 OSSymbol const *keys[2];
3661 OSObject *values[2];
3662 struct SerData {
3663 user_addr_t address;
3664 user_size_t length;
3665 } *vcopy;
3666 unsigned int index, nRanges;
3667 bool result;
3668
3669 IOOptionBits type = _flags & kIOMemoryTypeMask;
3670
3671 if (s == NULL) return false;
3672 if (s->previouslySerialized(this)) return true;
3673
3674 // Pretend we are an array.
3675 if (!s->addXMLStartTag(this, "array")) return false;
3676
3677 nRanges = _rangesCount;
3678 vcopy = (SerData *) IOMalloc(sizeof(SerData) * nRanges);
3679 if (vcopy == 0) return false;
3680
3681 keys[0] = OSSymbol::withCString("address");
3682 keys[1] = OSSymbol::withCString("length");
3683
3684 result = false;
3685 values[0] = values[1] = 0;
3686
3687 // From this point on we can go to bail.
3688
3689 // Copy the volatile data so we don't have to allocate memory
3690 // while the lock is held.
3691 LOCK;
3692 if (nRanges == _rangesCount) {
3693 Ranges vec = _ranges;
3694 for (index = 0; index < nRanges; index++) {
3695 user_addr_t addr; IOByteCount len;
3696 getAddrLenForInd(addr, len, type, vec, index);
3697 vcopy[index].address = addr;
3698 vcopy[index].length = len;
3699 }
3700 } else {
3701 // The descriptor changed out from under us. Give up.
3702 UNLOCK;
3703 result = false;
3704 goto bail;
3705 }
3706 UNLOCK;
3707
3708 for (index = 0; index < nRanges; index++)
3709 {
3710 user_addr_t addr = vcopy[index].address;
3711 IOByteCount len = (IOByteCount) vcopy[index].length;
3712 values[0] =
3713 OSNumber::withNumber(addr, (((UInt64) addr) >> 32)? 64 : 32);
3714 if (values[0] == 0) {
3715 result = false;
3716 goto bail;
3717 }
3718 values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
3719 if (values[1] == 0) {
3720 result = false;
3721 goto bail;
3722 }
3723 OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
3724 if (dict == 0) {
3725 result = false;
3726 goto bail;
3727 }
3728 values[0]->release();
3729 values[1]->release();
3730 values[0] = values[1] = 0;
3731
3732 result = dict->serialize(s);
3733 dict->release();
3734 if (!result) {
3735 goto bail;
3736 }
3737 }
3738 result = s->addXMLEndTag("array");
3739
3740 bail:
3741 if (values[0])
3742 values[0]->release();
3743 if (values[1])
3744 values[1]->release();
3745 if (keys[0])
3746 keys[0]->release();
3747 if (keys[1])
3748 keys[1]->release();
3749 if (vcopy)
3750 IOFree(vcopy, sizeof(SerData) * nRanges);
3751 return result;
3752 }
3753
3754 bool IOSubMemoryDescriptor::serialize(OSSerialize * s) const
3755 {
3756 if (!s) {
3757 return (false);
3758 }
3759 if (s->previouslySerialized(this)) return true;
3760
3761 // Pretend we are a dictionary.
3762 // We must duplicate the functionality of OSDictionary here
3763 // because otherwise object references will not work;
3764 // they are based on the value of the object passed to
3765 // previouslySerialized and addXMLStartTag.
3766
3767 if (!s->addXMLStartTag(this, "dict")) return false;
3768
3769 char const *keys[3] = {"offset", "length", "parent"};
3770
3771 OSObject *values[3];
3772 values[0] = OSNumber::withNumber(_start, sizeof(_start) * 8);
3773 if (values[0] == 0)
3774 return false;
3775 values[1] = OSNumber::withNumber(_length, sizeof(_length) * 8);
3776 if (values[1] == 0) {
3777 values[0]->release();
3778 return false;
3779 }
3780 values[2] = _parent;
3781
3782 bool result = true;
3783 for (int i=0; i<3; i++) {
3784 if (!s->addString("<key>") ||
3785 !s->addString(keys[i]) ||
3786 !s->addXMLEndTag("key") ||
3787 !values[i]->serialize(s)) {
3788 result = false;
3789 break;
3790 }
3791 }
3792 values[0]->release();
3793 values[1]->release();
3794 if (!result) {
3795 return false;
3796 }
3797
3798 return s->addXMLEndTag("dict");
3799 }
3800
3801 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3802
3803 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
3804 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
3805 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
3806 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3);
3807 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4);
3808 OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5);
3809 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
3810 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
3811 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
3812 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
3813 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
3814 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
3815 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
3816 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
3817 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
3818 OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
3819
3820 /* ex-inline function implementation */
3821 IOPhysicalAddress
3822 IOMemoryDescriptor::getPhysicalAddress()
3823 { return( getPhysicalSegment( 0, 0 )); }
3824
3825
3826