]> git.saurik.com Git - apple/xnu.git/blame - iokit/Kernel/IOMemoryDescriptor.cpp
xnu-792.10.96.tar.gz
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 1998-2004 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
37839358
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
37839358
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
37839358
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
24 *
25 * HISTORY
26 *
27 */
55e303ae
A
28// 45678901234567890123456789012345678901234567890123456789012345678901234567890
29#include <sys/cdefs.h>
1c79356b
A
30
31#include <IOKit/assert.h>
32#include <IOKit/system.h>
33#include <IOKit/IOLib.h>
34#include <IOKit/IOMemoryDescriptor.h>
55e303ae
A
35#include <IOKit/IOMapper.h>
36#include <IOKit/IOKitKeysPrivate.h>
1c79356b
A
37
38#include <IOKit/IOKitDebug.h>
39
91447636 40#include "IOKitKernelInternal.h"
c0fea474 41#include "IOCopyMapper.h"
91447636 42
1c79356b 43#include <libkern/c++/OSContainers.h>
9bccf70c
A
44#include <libkern/c++/OSDictionary.h>
45#include <libkern/c++/OSArray.h>
46#include <libkern/c++/OSSymbol.h>
47#include <libkern/c++/OSNumber.h>
91447636
A
48
49#include <sys/uio.h>
1c79356b
A
50
51__BEGIN_DECLS
52#include <vm/pmap.h>
91447636
A
53#include <vm/vm_pageout.h>
54#include <vm/vm_shared_memory_server.h>
55e303ae 55#include <mach/memory_object_types.h>
0b4e3aa0 56#include <device/device_port.h>
55e303ae 57
91447636
A
58#include <mach/vm_prot.h>
59#include <vm/vm_fault.h>
91447636 60
55e303ae 61extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
1c79356b 62void ipc_port_release_send(ipc_port_t port);
55e303ae
A
63
64/* Copy between a physical page and a virtual address in the given vm_map */
65kern_return_t copypv(addr64_t source, addr64_t sink, unsigned int size, int which);
0b4e3aa0
A
66
67memory_object_t
68device_pager_setup(
69 memory_object_t pager,
70 int device_handle,
71 vm_size_t size,
72 int flags);
9bccf70c
A
73void
74device_pager_deallocate(
75 memory_object_t);
0b4e3aa0
A
76kern_return_t
77device_pager_populate_object(
78 memory_object_t pager,
79 vm_object_offset_t offset,
55e303ae 80 ppnum_t phys_addr,
0b4e3aa0 81 vm_size_t size);
55e303ae
A
82kern_return_t
83memory_object_iopl_request(
84 ipc_port_t port,
85 memory_object_offset_t offset,
86 vm_size_t *upl_size,
87 upl_t *upl_ptr,
88 upl_page_info_array_t user_page_list,
89 unsigned int *page_list_count,
90 int *flags);
0b4e3aa0 91
55e303ae 92unsigned int IOTranslateCacheBits(struct phys_entry *pp);
1c79356b 93
55e303ae 94__END_DECLS
1c79356b 95
55e303ae 96#define kIOMaximumMappedIOByteCount (512*1024*1024)
1c79356b 97
c0fea474
A
98static IOMapper * gIOSystemMapper = NULL;
99
100IOCopyMapper * gIOCopyMapper = NULL;
101
55e303ae 102static ppnum_t gIOMaximumMappedIOPageCount = atop_32(kIOMaximumMappedIOByteCount);
de355530 103
c0fea474
A
104ppnum_t gIOLastPage;
105
55e303ae 106/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
de355530 107
55e303ae 108OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
de355530 109
55e303ae 110#define super IOMemoryDescriptor
de355530 111
55e303ae 112OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
de355530 113
1c79356b
A
114/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
115
9bccf70c
A
116static IORecursiveLock * gIOMemoryLock;
117
118#define LOCK IORecursiveLockLock( gIOMemoryLock)
119#define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
120#define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
121#define WAKEUP \
122 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
123
c0fea474
A
124#if 0
125#define DEBG(fmt, args...) { kprintf(fmt, ## args); }
126#else
127#define DEBG(fmt, args...) {}
128#endif
129
9bccf70c
A
130/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
131
91447636
A
132class _IOMemoryMap : public IOMemoryMap
133{
134 OSDeclareDefaultStructors(_IOMemoryMap)
135public:
136 IOMemoryDescriptor * memory;
137 IOMemoryMap * superMap;
138 IOByteCount offset;
139 IOByteCount length;
140 IOVirtualAddress logical;
141 task_t addressTask;
142 vm_map_t addressMap;
143 IOOptionBits options;
144 upl_t redirUPL;
145 ipc_port_t redirEntry;
146 IOMemoryDescriptor * owner;
147
148protected:
149 virtual void taggedRelease(const void *tag = 0) const;
150 virtual void free();
151
152public:
153
154 // IOMemoryMap methods
155 virtual IOVirtualAddress getVirtualAddress();
156 virtual IOByteCount getLength();
157 virtual task_t getAddressTask();
158 virtual IOMemoryDescriptor * getMemoryDescriptor();
159 virtual IOOptionBits getMapOptions();
160
161 virtual IOReturn unmap();
162 virtual void taskDied();
163
164 virtual IOReturn redirect(IOMemoryDescriptor * newBackingMemory,
165 IOOptionBits options,
166 IOByteCount offset = 0);
167
168 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
169 IOByteCount * length);
170
171 // for IOMemoryDescriptor use
172 _IOMemoryMap * copyCompatible(
173 IOMemoryDescriptor * owner,
174 task_t intoTask,
175 IOVirtualAddress toAddress,
176 IOOptionBits options,
177 IOByteCount offset,
178 IOByteCount length );
179
180 bool initCompatible(
181 IOMemoryDescriptor * memory,
182 IOMemoryMap * superMap,
183 IOByteCount offset,
184 IOByteCount length );
185
186 bool initWithDescriptor(
187 IOMemoryDescriptor * memory,
188 task_t intoTask,
189 IOVirtualAddress toAddress,
190 IOOptionBits options,
191 IOByteCount offset,
192 IOByteCount length );
193
194 IOReturn redirect(
195 task_t intoTask, bool redirect );
196};
197
198/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
199
200// Some data structures and accessor macros used by the initWithOptions
201// Function
202
203enum ioPLBlockFlags {
204 kIOPLOnDevice = 0x00000001,
205 kIOPLExternUPL = 0x00000002,
206};
207
208struct typePersMDData
209{
210 const IOGeneralMemoryDescriptor *fMD;
211 ipc_port_t fMemEntry;
212};
213
214struct ioPLBlock {
215 upl_t fIOPL;
216 vm_address_t fIOMDOffset; // The offset of this iopl in descriptor
217 vm_offset_t fPageInfo; // Pointer to page list or index into it
218 ppnum_t fMappedBase; // Page number of first page in this iopl
219 unsigned int fPageOffset; // Offset within first page of iopl
220 unsigned int fFlags; // Flags
221};
222
223struct ioGMDData {
224 IOMapper *fMapper;
225 unsigned int fPageCnt;
226 upl_page_info_t fPageList[];
227 ioPLBlock fBlocks[];
228};
229
230#define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy())
231#define getIOPLList(d) ((ioPLBlock *) &(d->fPageList[d->fPageCnt]))
232#define getNumIOPL(osd, d) \
233 (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock))
234#define getPageList(d) (&(d->fPageList[0]))
235#define computeDataSize(p, u) \
236 (sizeof(ioGMDData) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock))
237
238
239/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
240
55e303ae 241#define next_page(a) ( trunc_page_32(a) + PAGE_SIZE )
0b4e3aa0
A
242
243
244extern "C" {
245
246kern_return_t device_data_action(
247 int device_handle,
248 ipc_port_t device_pager,
249 vm_prot_t protection,
250 vm_object_offset_t offset,
251 vm_size_t size)
252{
9bccf70c
A
253 struct ExpansionData {
254 void * devicePager;
255 unsigned int pagerContig:1;
256 unsigned int unused:31;
257 IOMemoryDescriptor * memory;
258 };
259 kern_return_t kr;
260 ExpansionData * ref = (ExpansionData *) device_handle;
261 IOMemoryDescriptor * memDesc;
0b4e3aa0 262
9bccf70c
A
263 LOCK;
264 memDesc = ref->memory;
265 if( memDesc)
91447636
A
266 {
267 memDesc->retain();
9bccf70c
A
268 kr = memDesc->handleFault( device_pager, 0, 0,
269 offset, size, kIOMapDefaultCache /*?*/);
91447636
A
270 memDesc->release();
271 }
9bccf70c
A
272 else
273 kr = KERN_ABORTED;
274 UNLOCK;
0b4e3aa0 275
9bccf70c 276 return( kr );
0b4e3aa0
A
277}
278
279kern_return_t device_close(
280 int device_handle)
281{
9bccf70c
A
282 struct ExpansionData {
283 void * devicePager;
284 unsigned int pagerContig:1;
285 unsigned int unused:31;
286 IOMemoryDescriptor * memory;
287 };
288 ExpansionData * ref = (ExpansionData *) device_handle;
0b4e3aa0 289
9bccf70c 290 IODelete( ref, ExpansionData, 1 );
0b4e3aa0
A
291
292 return( kIOReturnSuccess );
293}
91447636 294}; // end extern "C"
0b4e3aa0 295
91447636
A
296// Note this inline function uses C++ reference arguments to return values
297// This means that pointers are not passed and NULLs don't have to be
298// checked for as a NULL reference is illegal.
299static inline void
c0fea474 300getAddrLenForInd(addr64_t &addr, IOPhysicalLength &len, // Output variables
91447636
A
301 UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind)
302{
c0fea474
A
303 assert(kIOMemoryTypeUIO == type
304 || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type
305 || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type);
91447636
A
306 if (kIOMemoryTypeUIO == type) {
307 user_size_t us;
308 uio_getiov((uio_t) r.uio, ind, &addr, &us); len = us;
309 }
c0fea474
A
310 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) {
311 IOAddressRange cur = r.v64[ind];
312 addr = cur.address;
313 len = cur.length;
314 }
91447636
A
315 else {
316 IOVirtualRange cur = r.v[ind];
317 addr = cur.address;
318 len = cur.length;
319 }
0b4e3aa0
A
320}
321
1c79356b
A
322/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
323
324/*
325 * withAddress:
326 *
327 * Create a new IOMemoryDescriptor. The buffer is a virtual address
328 * relative to the specified task. If no task is supplied, the kernel
329 * task is implied.
330 */
331IOMemoryDescriptor *
332IOMemoryDescriptor::withAddress(void * address,
55e303ae
A
333 IOByteCount length,
334 IODirection direction)
335{
336 return IOMemoryDescriptor::
337 withAddress((vm_address_t) address, length, direction, kernel_task);
338}
339
340IOMemoryDescriptor *
341IOMemoryDescriptor::withAddress(vm_address_t address,
342 IOByteCount length,
343 IODirection direction,
344 task_t task)
1c79356b 345{
c0fea474
A
346#if TEST_V64
347 if (task)
348 {
349 IOOptionBits options = (IOOptionBits) direction;
350 if (task == kernel_task)
351 options |= kIOMemoryAutoPrepare;
352 return (IOMemoryDescriptor::withAddressRange(address, length, options, task));
353 }
354#endif
1c79356b
A
355 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
356 if (that)
357 {
55e303ae 358 if (that->initWithAddress(address, length, direction, task))
1c79356b
A
359 return that;
360
361 that->release();
362 }
363 return 0;
364}
365
366IOMemoryDescriptor *
55e303ae
A
367IOMemoryDescriptor::withPhysicalAddress(
368 IOPhysicalAddress address,
369 IOByteCount length,
370 IODirection direction )
371{
c0fea474
A
372#if TEST_P64
373 return (IOMemoryDescriptor::withAddressRange(address, length, (IOOptionBits) direction, NULL));
374#endif
55e303ae
A
375 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
376 if (self
377 && !self->initWithPhysicalAddress(address, length, direction)) {
378 self->release();
379 return 0;
380 }
381
382 return self;
383}
384
385IOMemoryDescriptor *
386IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
387 UInt32 withCount,
388 IODirection direction,
389 task_t task,
390 bool asReference)
1c79356b
A
391{
392 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
393 if (that)
394 {
55e303ae 395 if (that->initWithRanges(ranges, withCount, direction, task, asReference))
1c79356b
A
396 return that;
397
398 that->release();
399 }
400 return 0;
401}
402
c0fea474
A
403IOMemoryDescriptor *
404IOMemoryDescriptor::withAddressRange(mach_vm_address_t address,
405 mach_vm_size_t length,
406 IOOptionBits options,
407 task_t task)
408{
409 IOAddressRange range = { address, length };
410 return (IOMemoryDescriptor::withAddressRanges(&range, 1, options, task));
411}
412
413IOMemoryDescriptor *
414IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges,
415 UInt32 rangeCount,
416 IOOptionBits options,
417 task_t task)
418{
419 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
420 if (that)
421 {
422 if (task)
423 options |= kIOMemoryTypeVirtual64;
424 else
425 options |= kIOMemoryTypePhysical64;
426
427 if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ 0))
428 return that;
429
430 that->release();
431 }
432
433 return 0;
434}
435
1c79356b
A
436
437/*
438 * withRanges:
439 *
440 * Create a new IOMemoryDescriptor. The buffer is made up of several
441 * virtual address ranges, from a given task.
442 *
443 * Passing the ranges as a reference will avoid an extra allocation.
444 */
445IOMemoryDescriptor *
55e303ae
A
446IOMemoryDescriptor::withOptions(void * buffers,
447 UInt32 count,
448 UInt32 offset,
449 task_t task,
450 IOOptionBits opts,
451 IOMapper * mapper)
1c79356b 452{
55e303ae 453 IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor;
d7e50217 454
55e303ae
A
455 if (self
456 && !self->initWithOptions(buffers, count, offset, task, opts, mapper))
457 {
458 self->release();
459 return 0;
de355530 460 }
55e303ae
A
461
462 return self;
463}
464
465// Can't leave abstract but this should never be used directly,
466bool IOMemoryDescriptor::initWithOptions(void * buffers,
467 UInt32 count,
468 UInt32 offset,
469 task_t task,
470 IOOptionBits options,
471 IOMapper * mapper)
472{
473 // @@@ gvdl: Should I panic?
474 panic("IOMD::initWithOptions called\n");
1c79356b
A
475 return 0;
476}
477
478IOMemoryDescriptor *
479IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
480 UInt32 withCount,
55e303ae
A
481 IODirection direction,
482 bool asReference)
1c79356b
A
483{
484 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
485 if (that)
486 {
55e303ae 487 if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference))
1c79356b
A
488 return that;
489
490 that->release();
491 }
492 return 0;
493}
494
495IOMemoryDescriptor *
496IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
497 IOByteCount offset,
498 IOByteCount length,
55e303ae 499 IODirection direction)
1c79356b 500{
55e303ae 501 IOSubMemoryDescriptor *self = new IOSubMemoryDescriptor;
1c79356b 502
55e303ae
A
503 if (self && !self->initSubRange(of, offset, length, direction)) {
504 self->release();
505 self = 0;
1c79356b 506 }
55e303ae 507 return self;
1c79356b
A
508}
509
c0fea474
A
510IOMemoryDescriptor *
511IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD)
91447636
A
512{
513 IOGeneralMemoryDescriptor *origGenMD =
514 OSDynamicCast(IOGeneralMemoryDescriptor, originalMD);
515
516 if (origGenMD)
517 return IOGeneralMemoryDescriptor::
518 withPersistentMemoryDescriptor(origGenMD);
519 else
520 return 0;
521}
522
c0fea474
A
523IOMemoryDescriptor *
524IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD)
91447636
A
525{
526 ipc_port_t sharedMem = (ipc_port_t) originalMD->createNamedEntry();
527
528 if (!sharedMem)
529 return 0;
530
531 if (sharedMem == originalMD->_memEntry) {
532 originalMD->retain(); // Add a new reference to ourselves
533 ipc_port_release_send(sharedMem); // Remove extra send right
534 return originalMD;
535 }
536
537 IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor;
538 typePersMDData initData = { originalMD, sharedMem };
539
540 if (self
541 && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) {
542 self->release();
543 self = 0;
544 }
545 return self;
546}
547
548void *IOGeneralMemoryDescriptor::createNamedEntry()
549{
550 kern_return_t error;
551 ipc_port_t sharedMem;
552
553 IOOptionBits type = _flags & kIOMemoryTypeMask;
554
555 user_addr_t range0Addr;
556 IOByteCount range0Len;
557 getAddrLenForInd(range0Addr, range0Len, type, _ranges, 0);
558 range0Addr = trunc_page_64(range0Addr);
559
560 vm_size_t size = ptoa_32(_pages);
561 vm_address_t kernelPage = (vm_address_t) range0Addr;
562
563 vm_map_t theMap = ((_task == kernel_task)
564 && (kIOMemoryBufferPageable & _flags))
565 ? IOPageableMapForAddress(kernelPage)
566 : get_task_map(_task);
567
568 memory_object_size_t actualSize = size;
569 vm_prot_t prot = VM_PROT_READ | VM_PROT_WRITE;
570 if (_memEntry)
571 prot |= MAP_MEM_NAMED_REUSE;
572
573 error = mach_make_memory_entry_64(theMap,
574 &actualSize, range0Addr, prot, &sharedMem, (ipc_port_t) _memEntry);
575
576 if (KERN_SUCCESS == error) {
577 if (actualSize == size) {
578 return sharedMem;
579 } else {
580#if IOASSERT
581 IOLog("IOGMD::mach_make_memory_entry_64 (%08llx) size (%08lx:%08x)\n",
582 (UInt64)range0Addr, (UInt32)actualSize, size);
583#endif
584 ipc_port_release_send( sharedMem );
585 }
586 }
587
588 return MACH_PORT_NULL;
589}
590
1c79356b
A
591/*
592 * initWithAddress:
593 *
594 * Initialize an IOMemoryDescriptor. The buffer is a virtual address
595 * relative to the specified task. If no task is supplied, the kernel
596 * task is implied.
597 *
598 * An IOMemoryDescriptor can be re-used by calling initWithAddress or
599 * initWithRanges again on an existing instance -- note this behavior
600 * is not commonly supported in other I/O Kit classes, although it is
601 * supported here.
602 */
603bool
604IOGeneralMemoryDescriptor::initWithAddress(void * address,
605 IOByteCount withLength,
606 IODirection withDirection)
607{
608 _singleRange.v.address = (vm_address_t) address;
609 _singleRange.v.length = withLength;
610
611 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
612}
613
614bool
615IOGeneralMemoryDescriptor::initWithAddress(vm_address_t address,
616 IOByteCount withLength,
617 IODirection withDirection,
618 task_t withTask)
619{
620 _singleRange.v.address = address;
621 _singleRange.v.length = withLength;
622
623 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
624}
625
626bool
627IOGeneralMemoryDescriptor::initWithPhysicalAddress(
628 IOPhysicalAddress address,
629 IOByteCount withLength,
630 IODirection withDirection )
631{
632 _singleRange.p.address = address;
633 _singleRange.p.length = withLength;
634
635 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
636}
637
55e303ae
A
638bool
639IOGeneralMemoryDescriptor::initWithPhysicalRanges(
640 IOPhysicalRange * ranges,
641 UInt32 count,
642 IODirection direction,
643 bool reference)
644{
645 IOOptionBits mdOpts = direction | kIOMemoryTypePhysical;
646
647 if (reference)
648 mdOpts |= kIOMemoryAsReference;
649
650 return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0);
651}
652
653bool
654IOGeneralMemoryDescriptor::initWithRanges(
655 IOVirtualRange * ranges,
656 UInt32 count,
657 IODirection direction,
658 task_t task,
659 bool reference)
660{
661 IOOptionBits mdOpts = direction;
662
663 if (reference)
664 mdOpts |= kIOMemoryAsReference;
665
666 if (task) {
667 mdOpts |= kIOMemoryTypeVirtual;
91447636
A
668
669 // Auto-prepare if this is a kernel memory descriptor as very few
670 // clients bother to prepare() kernel memory.
671 // But it was not enforced so what are you going to do?
55e303ae
A
672 if (task == kernel_task)
673 mdOpts |= kIOMemoryAutoPrepare;
674 }
675 else
676 mdOpts |= kIOMemoryTypePhysical;
55e303ae
A
677
678 return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0);
679}
680
1c79356b 681/*
55e303ae 682 * initWithOptions:
1c79356b 683 *
55e303ae 684 * IOMemoryDescriptor. The buffer is made up of several virtual address ranges,
91447636
A
685 * from a given task, several physical ranges, an UPL from the ubc
686 * system or a uio (may be 64bit) from the BSD subsystem.
1c79356b
A
687 *
688 * Passing the ranges as a reference will avoid an extra allocation.
689 *
55e303ae
A
690 * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an
691 * existing instance -- note this behavior is not commonly supported in other
692 * I/O Kit classes, although it is supported here.
1c79356b 693 */
55e303ae 694
1c79356b 695bool
55e303ae
A
696IOGeneralMemoryDescriptor::initWithOptions(void * buffers,
697 UInt32 count,
698 UInt32 offset,
699 task_t task,
700 IOOptionBits options,
701 IOMapper * mapper)
702{
91447636
A
703 IOOptionBits type = options & kIOMemoryTypeMask;
704
705 // Grab the original MD's configuation data to initialse the
706 // arguments to this function.
707 if (kIOMemoryTypePersistentMD == type) {
708
709 typePersMDData *initData = (typePersMDData *) buffers;
710 const IOGeneralMemoryDescriptor *orig = initData->fMD;
711 ioGMDData *dataP = getDataP(orig->_memoryEntries);
712
713 // Only accept persistent memory descriptors with valid dataP data.
714 assert(orig->_rangesCount == 1);
715 if ( !(orig->_flags & kIOMemoryPersistent) || !dataP)
716 return false;
717
718 _memEntry = initData->fMemEntry; // Grab the new named entry
719 options = orig->_flags | kIOMemoryAsReference;
720 _singleRange = orig->_singleRange; // Initialise our range
721 buffers = &_singleRange;
722 count = 1;
55e303ae 723
91447636
A
724 // Now grab the original task and whatever mapper was previously used
725 task = orig->_task;
726 mapper = dataP->fMapper;
727
728 // We are ready to go through the original initialisation now
729 }
730
731 switch (type) {
732 case kIOMemoryTypeUIO:
55e303ae 733 case kIOMemoryTypeVirtual:
c0fea474 734 case kIOMemoryTypeVirtual64:
55e303ae
A
735 assert(task);
736 if (!task)
737 return false;
738 else
739 break;
740
741 case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task
c0fea474 742 case kIOMemoryTypePhysical64:
55e303ae 743 mapper = kIOMapperNone;
91447636 744
55e303ae
A
745 case kIOMemoryTypeUPL:
746 assert(!task);
747 break;
748 default:
55e303ae
A
749 return false; /* bad argument */
750 }
751
752 assert(buffers);
753 assert(count);
1c79356b
A
754
755 /*
756 * We can check the _initialized instance variable before having ever set
757 * it to an initial value because I/O Kit guarantees that all our instance
758 * variables are zeroed on an object's allocation.
759 */
760
55e303ae 761 if (_initialized) {
1c79356b
A
762 /*
763 * An existing memory descriptor is being retargeted to point to
764 * somewhere else. Clean up our present state.
765 */
766
1c79356b
A
767 while (_wireCount)
768 complete();
1c79356b 769 if (_ranges.v && _rangesIsAllocated)
c0fea474
A
770 {
771 if (kIOMemoryTypeUIO == type)
772 uio_free((uio_t) _ranges.v);
773 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
774 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
775 else
776 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
777 }
91447636
A
778 if (_memEntry)
779 { ipc_port_release_send((ipc_port_t) _memEntry); _memEntry = 0; }
1c79356b 780 }
55e303ae
A
781 else {
782 if (!super::init())
783 return false;
784 _initialized = true;
785 }
d7e50217 786
55e303ae
A
787 // Grab the appropriate mapper
788 if (mapper == kIOMapperNone)
789 mapper = 0; // No Mapper
c0fea474 790 else if (mapper == kIOMapperSystem) {
55e303ae
A
791 IOMapper::checkForSystemMapper();
792 gIOSystemMapper = mapper = IOMapper::gSystem;
793 }
1c79356b 794
91447636
A
795 // Remove the dynamic internal use flags from the initial setting
796 options &= ~(kIOMemoryPreparedReadOnly);
55e303ae
A
797 _flags = options;
798 _task = task;
799
800 // DEPRECATED variable initialisation
801 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
c0fea474
A
802
803 __iomd_reservedA = 0;
804 __iomd_reservedB = 0;
805 __iomd_reservedC = 0;
806
807 _highestPage = 0;
1c79356b 808
91447636 809 if (kIOMemoryTypeUPL == type) {
1c79356b 810
55e303ae
A
811 ioGMDData *dataP;
812 unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1);
d7e50217 813
55e303ae
A
814 if (!_memoryEntries) {
815 _memoryEntries = OSData::withCapacity(dataSize);
816 if (!_memoryEntries)
817 return false;
818 }
819 else if (!_memoryEntries->initWithCapacity(dataSize))
820 return false;
821
822 _memoryEntries->appendBytes(0, sizeof(ioGMDData));
823 dataP = getDataP(_memoryEntries);
824 dataP->fMapper = mapper;
825 dataP->fPageCnt = 0;
826
c0fea474 827 // _wireCount++; // UPLs start out life wired
55e303ae
A
828
829 _length = count;
830 _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset);
831
832 ioPLBlock iopl;
833 upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST((upl_t) buffers);
834
835 iopl.fIOPL = (upl_t) buffers;
836 // Set the flag kIOPLOnDevice convieniently equal to 1
837 iopl.fFlags = pageList->device | kIOPLExternUPL;
838 iopl.fIOMDOffset = 0;
c0fea474
A
839
840 _highestPage = upl_get_highest_page(iopl.fIOPL);
841
55e303ae 842 if (!pageList->device) {
55e303ae
A
843 // Pre-compute the offset into the UPL's page list
844 pageList = &pageList[atop_32(offset)];
845 offset &= PAGE_MASK;
846 if (mapper) {
847 iopl.fMappedBase = mapper->iovmAlloc(_pages);
848 mapper->iovmInsert(iopl.fMappedBase, 0, pageList, _pages);
849 }
850 else
851 iopl.fMappedBase = 0;
852 }
853 else
854 iopl.fMappedBase = 0;
855 iopl.fPageInfo = (vm_address_t) pageList;
856 iopl.fPageOffset = offset;
857
858 _memoryEntries->appendBytes(&iopl, sizeof(iopl));
d7e50217 859 }
91447636 860 else {
c0fea474
A
861 // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO
862 // kIOMemoryTypePhysical | kIOMemoryTypePhysical64
91447636
A
863
864 // Initialize the memory descriptor
865 if (options & kIOMemoryAsReference) {
866 _rangesIsAllocated = false;
867
868 // Hack assignment to get the buffer arg into _ranges.
869 // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't
870 // work, C++ sigh.
871 // This also initialises the uio & physical ranges.
872 _ranges.v = (IOVirtualRange *) buffers;
873 }
874 else {
91447636 875 _rangesIsAllocated = true;
c0fea474
A
876 switch (_flags & kIOMemoryTypeMask)
877 {
878 case kIOMemoryTypeUIO:
879 _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers);
880 break;
881
882 case kIOMemoryTypeVirtual64:
883 case kIOMemoryTypePhysical64:
884 _ranges.v64 = IONew(IOAddressRange, count);
885 if (!_ranges.v64)
886 return false;
887 bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange));
888 break;
889 case kIOMemoryTypeVirtual:
890 _ranges.v = IONew(IOVirtualRange, count);
891 if (!_ranges.v)
892 return false;
893 bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange));
894 break;
895 }
91447636
A
896 }
897
898 // Find starting address within the vector of ranges
899 Ranges vec = _ranges;
900 UInt32 length = 0;
901 UInt32 pages = 0;
902 for (unsigned ind = 0; ind < count; ind++) {
903 user_addr_t addr;
904 UInt32 len;
905
906 // addr & len are returned by this function
907 getAddrLenForInd(addr, len, type, vec, ind);
908 pages += (atop_64(addr + len + PAGE_MASK) - atop_64(addr));
909 len += length;
c0fea474 910 assert(len >= length); // Check for 32 bit wrap around
91447636 911 length = len;
c0fea474
A
912
913 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
914 {
915 ppnum_t highPage = atop_64(addr + len - 1);
916 if (highPage > _highestPage)
917 _highestPage = highPage;
918 }
91447636
A
919 }
920 _length = length;
921 _pages = pages;
922 _rangesCount = count;
55e303ae
A
923
924 // Auto-prepare memory at creation time.
925 // Implied completion when descriptor is free-ed
c0fea474 926 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))
91447636 927 _wireCount++; // Physical MDs are, by definition, wired
c0fea474 928 else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */
55e303ae 929 ioGMDData *dataP;
91447636 930 unsigned dataSize = computeDataSize(_pages, /* upls */ count * 2);
55e303ae
A
931
932 if (!_memoryEntries) {
933 _memoryEntries = OSData::withCapacity(dataSize);
934 if (!_memoryEntries)
91447636 935 return false;
55e303ae
A
936 }
937 else if (!_memoryEntries->initWithCapacity(dataSize))
938 return false;
939
940 _memoryEntries->appendBytes(0, sizeof(ioGMDData));
941 dataP = getDataP(_memoryEntries);
942 dataP->fMapper = mapper;
943 dataP->fPageCnt = _pages;
944
91447636
A
945 if ( (kIOMemoryPersistent & _flags) && !_memEntry)
946 _memEntry = createNamedEntry();
55e303ae
A
947
948 if ((_flags & kIOMemoryAutoPrepare)
949 && prepare() != kIOReturnSuccess)
950 return false;
951 }
952 }
953
954 return true;
de355530
A
955}
956
1c79356b
A
957/*
958 * free
959 *
960 * Free resources.
961 */
962void IOGeneralMemoryDescriptor::free()
963{
9bccf70c
A
964 LOCK;
965 if( reserved)
966 reserved->memory = 0;
967 UNLOCK;
968
1c79356b
A
969 while (_wireCount)
970 complete();
55e303ae
A
971 if (_memoryEntries)
972 _memoryEntries->release();
973
1c79356b 974 if (_ranges.v && _rangesIsAllocated)
c0fea474
A
975 {
976 IOOptionBits type = _flags & kIOMemoryTypeMask;
977 if (kIOMemoryTypeUIO == type)
978 uio_free((uio_t) _ranges.v);
979 else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type))
980 IODelete(_ranges.v64, IOAddressRange, _rangesCount);
981 else
982 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
983 }
9bccf70c 984
55e303ae
A
985 if (reserved && reserved->devicePager)
986 device_pager_deallocate( (memory_object_t) reserved->devicePager );
9bccf70c 987
55e303ae
A
988 // memEntry holds a ref on the device pager which owns reserved
989 // (ExpansionData) so no reserved access after this point
990 if (_memEntry)
1c79356b 991 ipc_port_release_send( (ipc_port_t) _memEntry );
55e303ae 992
1c79356b
A
993 super::free();
994}
995
0b4e3aa0
A
996/* DEPRECATED */ void IOGeneralMemoryDescriptor::unmapFromKernel()
997/* DEPRECATED */ {
55e303ae 998 panic("IOGMD::unmapFromKernel deprecated");
0b4e3aa0
A
999/* DEPRECATED */ }
1000/* DEPRECATED */
1001/* DEPRECATED */ void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
1002/* DEPRECATED */ {
55e303ae 1003 panic("IOGMD::mapIntoKernel deprecated");
0b4e3aa0 1004/* DEPRECATED */ }
1c79356b
A
1005
1006/*
1007 * getDirection:
1008 *
1009 * Get the direction of the transfer.
1010 */
1011IODirection IOMemoryDescriptor::getDirection() const
1012{
1013 return _direction;
1014}
1015
1016/*
1017 * getLength:
1018 *
1019 * Get the length of the transfer (over all ranges).
1020 */
1021IOByteCount IOMemoryDescriptor::getLength() const
1022{
1023 return _length;
1024}
1025
55e303ae 1026void IOMemoryDescriptor::setTag( IOOptionBits tag )
1c79356b
A
1027{
1028 _tag = tag;
1029}
1030
1031IOOptionBits IOMemoryDescriptor::getTag( void )
1032{
1033 return( _tag);
1034}
1035
55e303ae 1036// @@@ gvdl: who is using this API? Seems like a wierd thing to implement.
c0fea474
A
1037IOPhysicalAddress
1038IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
0b4e3aa0 1039{
c0fea474 1040 addr64_t physAddr = 0;
1c79356b 1041
9bccf70c 1042 if( prepare() == kIOReturnSuccess) {
c0fea474 1043 physAddr = getPhysicalSegment64( offset, length );
9bccf70c
A
1044 complete();
1045 }
0b4e3aa0 1046
c0fea474 1047 return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used
0b4e3aa0
A
1048}
1049
55e303ae
A
1050IOByteCount IOMemoryDescriptor::readBytes
1051 (IOByteCount offset, void *bytes, IOByteCount length)
1c79356b 1052{
55e303ae
A
1053 addr64_t dstAddr = (addr64_t) (UInt32) bytes;
1054 IOByteCount remaining;
1c79356b 1055
55e303ae
A
1056 // Assert that this entire I/O is withing the available range
1057 assert(offset < _length);
1058 assert(offset + length <= _length);
1059 if (offset >= _length) {
1060IOLog("IOGMD(%p): rB = o%lx, l%lx\n", this, offset, length); // @@@ gvdl
1061 return 0;
1062 }
1c79356b 1063
55e303ae
A
1064 remaining = length = min(length, _length - offset);
1065 while (remaining) { // (process another target segment?)
1066 addr64_t srcAddr64;
1067 IOByteCount srcLen;
1c79356b 1068
55e303ae
A
1069 srcAddr64 = getPhysicalSegment64(offset, &srcLen);
1070 if (!srcAddr64)
1071 break;
1c79356b 1072
55e303ae
A
1073 // Clip segment length to remaining
1074 if (srcLen > remaining)
1075 srcLen = remaining;
1c79356b 1076
55e303ae
A
1077 copypv(srcAddr64, dstAddr, srcLen,
1078 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
1c79356b 1079
55e303ae
A
1080 dstAddr += srcLen;
1081 offset += srcLen;
1082 remaining -= srcLen;
1083 }
1c79356b 1084
55e303ae 1085 assert(!remaining);
1c79356b 1086
55e303ae
A
1087 return length - remaining;
1088}
0b4e3aa0 1089
55e303ae
A
1090IOByteCount IOMemoryDescriptor::writeBytes
1091 (IOByteCount offset, const void *bytes, IOByteCount length)
1092{
1093 addr64_t srcAddr = (addr64_t) (UInt32) bytes;
1094 IOByteCount remaining;
0b4e3aa0 1095
55e303ae
A
1096 // Assert that this entire I/O is withing the available range
1097 assert(offset < _length);
1098 assert(offset + length <= _length);
0b4e3aa0 1099
55e303ae 1100 assert( !(kIOMemoryPreparedReadOnly & _flags) );
0b4e3aa0 1101
55e303ae
A
1102 if ( (kIOMemoryPreparedReadOnly & _flags) || offset >= _length) {
1103IOLog("IOGMD(%p): wB = o%lx, l%lx\n", this, offset, length); // @@@ gvdl
1104 return 0;
1105 }
0b4e3aa0 1106
55e303ae
A
1107 remaining = length = min(length, _length - offset);
1108 while (remaining) { // (process another target segment?)
1109 addr64_t dstAddr64;
1110 IOByteCount dstLen;
0b4e3aa0 1111
55e303ae
A
1112 dstAddr64 = getPhysicalSegment64(offset, &dstLen);
1113 if (!dstAddr64)
1114 break;
0b4e3aa0 1115
55e303ae
A
1116 // Clip segment length to remaining
1117 if (dstLen > remaining)
1118 dstLen = remaining;
0b4e3aa0 1119
55e303ae
A
1120 copypv(srcAddr, (addr64_t) dstAddr64, dstLen,
1121 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
0b4e3aa0 1122
55e303ae
A
1123 srcAddr += dstLen;
1124 offset += dstLen;
1125 remaining -= dstLen;
1c79356b 1126 }
1c79356b 1127
55e303ae
A
1128 assert(!remaining);
1129
1130 return length - remaining;
1c79356b
A
1131}
1132
55e303ae
A
1133// osfmk/device/iokit_rpc.c
1134extern "C" unsigned int IODefaultCacheBits(addr64_t pa);
1c79356b 1135
55e303ae
A
1136/* DEPRECATED */ void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
1137/* DEPRECATED */ {
1138 panic("IOGMD::setPosition deprecated");
1139/* DEPRECATED */ }
de355530 1140
c0fea474 1141IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
55e303ae 1142{
c0fea474 1143 if (kIOMDGetCharacteristics == op) {
1c79356b 1144
c0fea474
A
1145 if (dataSize < sizeof(IOMDDMACharacteristics))
1146 return kIOReturnUnderrun;
1c79356b 1147
c0fea474
A
1148 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
1149 data->fLength = _length;
1150 data->fSGCount = _rangesCount;
1151 data->fPages = _pages;
1152 data->fDirection = _direction;
1153 if (!_wireCount)
1154 data->fIsPrepared = false;
1155 else {
1156 data->fIsPrepared = true;
1157 data->fHighestPage = _highestPage;
1158 if (_memoryEntries) {
1159 ioGMDData *gmdData = getDataP(_memoryEntries);
1160 ioPLBlock *ioplList = getIOPLList(gmdData);
1161 UInt count = getNumIOPL(_memoryEntries, gmdData);
1162
1163 data->fIsMapped = (gmdData->fMapper && _pages && (count > 0)
1164 && ioplList[0].fMappedBase);
1165 if (count == 1)
1166 data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK;
1167 }
1168 else
1169 data->fIsMapped = false;
1170 }
1c79356b 1171
c0fea474
A
1172 return kIOReturnSuccess;
1173 }
1174 else if (!(kIOMDWalkSegments & op))
1175 return kIOReturnBadArgument;
1176
1177 // Get the next segment
1178 struct InternalState {
1179 IOMDDMAWalkSegmentArgs fIO;
1180 UInt fOffset2Index;
1181 UInt fIndex;
1182 UInt fNextOffset;
1183 } *isP;
1184
1185 // Find the next segment
1186 if (dataSize < sizeof(*isP))
1187 return kIOReturnUnderrun;
1188
1189 isP = (InternalState *) vData;
1190 UInt offset = isP->fIO.fOffset;
1191 bool mapped = isP->fIO.fMapped;
1192
1193 if (offset >= _length)
1194 return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError;
1195
1196 // Validate the previous offset
1197 UInt ind, off2Ind = isP->fOffset2Index;
1198 if ((kIOMDFirstSegment != op)
1199 && offset
1200 && (offset == isP->fNextOffset || off2Ind <= offset))
1201 ind = isP->fIndex;
1202 else
1203 ind = off2Ind = 0; // Start from beginning
1c79356b 1204
c0fea474
A
1205 UInt length;
1206 UInt64 address;
1207 if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) {
1c79356b 1208
c0fea474
A
1209 // Physical address based memory descriptor
1210 const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0];
1c79356b 1211
c0fea474
A
1212 // Find the range after the one that contains the offset
1213 UInt len;
1214 for (len = 0; off2Ind <= offset; ind++) {
1215 len = physP[ind].length;
1216 off2Ind += len;
1217 }
1c79356b 1218
c0fea474
A
1219 // Calculate length within range and starting address
1220 length = off2Ind - offset;
1221 address = physP[ind - 1].address + len - length;
1c79356b 1222
c0fea474
A
1223 // see how far we can coalesce ranges
1224 while (ind < _rangesCount && address + length == physP[ind].address) {
1225 len = physP[ind].length;
1226 length += len;
1227 off2Ind += len;
1228 ind++;
1229 }
1c79356b 1230
c0fea474
A
1231 // correct contiguous check overshoot
1232 ind--;
1233 off2Ind -= len;
1234 }
1235 else if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) {
1c79356b 1236
c0fea474
A
1237 // Physical address based memory descriptor
1238 const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0];
1c79356b 1239
c0fea474
A
1240 // Find the range after the one that contains the offset
1241 mach_vm_size_t len;
1242 for (len = 0; off2Ind <= offset; ind++) {
1243 len = physP[ind].length;
1244 off2Ind += len;
1245 }
1c79356b 1246
c0fea474
A
1247 // Calculate length within range and starting address
1248 length = off2Ind - offset;
1249 address = physP[ind - 1].address + len - length;
1c79356b 1250
c0fea474
A
1251 // see how far we can coalesce ranges
1252 while (ind < _rangesCount && address + length == physP[ind].address) {
1253 len = physP[ind].length;
1254 length += len;
1255 off2Ind += len;
1256 ind++;
1257 }
1258
1259 // correct contiguous check overshoot
1260 ind--;
1261 off2Ind -= len;
1262 }
1263 else do {
1264 if (!_wireCount)
1265 panic("IOGMD: not wired for the IODMACommand");
1c79356b 1266
c0fea474 1267 assert(_memoryEntries);
0b4e3aa0 1268
c0fea474
A
1269 ioGMDData * dataP = getDataP(_memoryEntries);
1270 const ioPLBlock *ioplList = getIOPLList(dataP);
1271 UInt numIOPLs = getNumIOPL(_memoryEntries, dataP);
1272 upl_page_info_t *pageList = getPageList(dataP);
1c79356b 1273
c0fea474 1274 assert(numIOPLs > 0);
9bccf70c 1275
c0fea474
A
1276 // Scan through iopl info blocks looking for block containing offset
1277 while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset)
1278 ind++;
55e303ae 1279
c0fea474
A
1280 // Go back to actual range as search goes past it
1281 ioPLBlock ioplInfo = ioplList[ind - 1];
1282 off2Ind = ioplInfo.fIOMDOffset;
1283
1284 if (ind < numIOPLs)
1285 length = ioplList[ind].fIOMDOffset;
1286 else
1287 length = _length;
1288 length -= offset; // Remainder within iopl
1289
1290 // Subtract offset till this iopl in total list
1291 offset -= off2Ind;
1292
1293 // If a mapped address is requested and this is a pre-mapped IOPL
1294 // then just need to compute an offset relative to the mapped base.
1295 if (mapped && ioplInfo.fMappedBase) {
1296 offset += (ioplInfo.fPageOffset & PAGE_MASK);
1297 address = ptoa_64(ioplInfo.fMappedBase) + offset;
1298 continue; // Done leave do/while(false) now
1299 }
1300
1301 // The offset is rebased into the current iopl.
1302 // Now add the iopl 1st page offset.
1303 offset += ioplInfo.fPageOffset;
1304
1305 // For external UPLs the fPageInfo field points directly to
1306 // the upl's upl_page_info_t array.
1307 if (ioplInfo.fFlags & kIOPLExternUPL)
1308 pageList = (upl_page_info_t *) ioplInfo.fPageInfo;
1309 else
1310 pageList = &pageList[ioplInfo.fPageInfo];
1311
1312 // Check for direct device non-paged memory
1313 if ( ioplInfo.fFlags & kIOPLOnDevice ) {
1314 address = ptoa_64(pageList->phys_addr) + offset;
1315 continue; // Done leave do/while(false) now
1316 }
0b4e3aa0 1317
c0fea474
A
1318 // Now we need compute the index into the pageList
1319 UInt pageInd = atop_32(offset);
1320 offset &= PAGE_MASK;
1321
1322 // Compute the starting address of this segment
1323 IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr;
1324 address = ptoa_64(pageAddr) + offset;
1325
1326 // length is currently set to the length of the remainider of the iopl.
1327 // We need to check that the remainder of the iopl is contiguous.
1328 // This is indicated by pageList[ind].phys_addr being sequential.
1329 IOByteCount contigLength = PAGE_SIZE - offset;
1330 while (contigLength < length
1331 && ++pageAddr == pageList[++pageInd].phys_addr)
1332 {
1333 contigLength += PAGE_SIZE;
1334 }
1335
1336 if (contigLength < length)
1337 length = contigLength;
1338
1339
1340 assert(address);
1341 assert(length);
1342
1343 } while (false);
1344
1345 // Update return values and state
1346 isP->fIO.fIOVMAddr = address;
1347 isP->fIO.fLength = length;
1348 isP->fIndex = ind;
1349 isP->fOffset2Index = off2Ind;
1350 isP->fNextOffset = isP->fIO.fOffset + length;
1351
1352 return kIOReturnSuccess;
1353}
1354
1355addr64_t
1356IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
1357{
1358 IOReturn ret;
1359 IOByteCount length = 0;
1360 addr64_t address = 0;
0b4e3aa0 1361
c0fea474
A
1362 if (offset < _length) // (within bounds?)
1363 {
1364 IOMDDMAWalkSegmentState _state;
1365 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) &_state;
1366
1367 state->fOffset = offset;
1368 state->fLength = _length - offset;
1369 state->fMapped = false;
1370
1371 ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state));
1372
1373 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret))
1374 DEBG("getPhysicalSegment64 dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
1375 ret, this, state->fOffset,
1376 state->fIOVMAddr, state->fLength);
1377 if (kIOReturnSuccess == ret)
1378 {
1379 address = state->fIOVMAddr;
1380 length = state->fLength;
1381 }
55e303ae
A
1382 if (!address)
1383 length = 0;
1384 }
de355530 1385
55e303ae
A
1386 if (lengthOfSegment)
1387 *lengthOfSegment = length;
de355530 1388
c0fea474
A
1389 return (address);
1390}
1391
1392IOPhysicalAddress
1393IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1394{
1395 IOReturn ret;
1396 IOByteCount length = 0;
1397 addr64_t address = 0;
1398
1399// assert(offset <= _length);
1400
1401 if (offset < _length) // (within bounds?)
1402 {
1403 IOMDDMAWalkSegmentState _state;
1404 IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) &_state;
1405
1406 state->fOffset = offset;
1407 state->fLength = _length - offset;
1408 state->fMapped = true;
1409
1410 ret = dmaCommandOperation(
1411 kIOMDFirstSegment, _state, sizeof(_state));
1412
1413 if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret))
1414 DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n",
1415 ret, this, state->fOffset,
1416 state->fIOVMAddr, state->fLength);
1417 if (kIOReturnSuccess == ret)
1418 {
1419 address = state->fIOVMAddr;
1420 length = state->fLength;
1421 }
1422
1423 if (!address)
1424 length = 0;
1425 }
1426
1427 if ((address + length) > 0x100000000ULL)
1428 {
1429 panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%x, class %s",
1430 address, length, (getMetaClass())->getClassName());
1431 }
1432
1433 if (lengthOfSegment)
1434 *lengthOfSegment = length;
1435
1436 return ((IOPhysicalAddress) address);
55e303ae 1437}
de355530 1438
c0fea474
A
1439addr64_t
1440IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment)
55e303ae
A
1441{
1442 IOPhysicalAddress phys32;
1443 IOByteCount length;
1444 addr64_t phys64;
c0fea474 1445 IOMapper * mapper = 0;
0b4e3aa0 1446
55e303ae
A
1447 phys32 = getPhysicalSegment(offset, lengthOfSegment);
1448 if (!phys32)
1449 return 0;
0b4e3aa0 1450
55e303ae 1451 if (gIOSystemMapper)
c0fea474
A
1452 mapper = gIOSystemMapper;
1453
1454 if (mapper)
1c79356b 1455 {
55e303ae
A
1456 IOByteCount origLen;
1457
c0fea474 1458 phys64 = mapper->mapAddr(phys32);
55e303ae
A
1459 origLen = *lengthOfSegment;
1460 length = page_size - (phys64 & (page_size - 1));
1461 while ((length < origLen)
c0fea474 1462 && ((phys64 + length) == mapper->mapAddr(phys32 + length)))
55e303ae
A
1463 length += page_size;
1464 if (length > origLen)
1465 length = origLen;
1466
1467 *lengthOfSegment = length;
0b4e3aa0 1468 }
55e303ae
A
1469 else
1470 phys64 = (addr64_t) phys32;
1c79356b 1471
55e303ae 1472 return phys64;
0b4e3aa0
A
1473}
1474
c0fea474
A
1475IOPhysicalAddress
1476IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment)
1c79356b 1477{
0b4e3aa0
A
1478 IOPhysicalAddress address = 0;
1479 IOPhysicalLength length = 0;
91447636 1480 IOOptionBits type = _flags & kIOMemoryTypeMask;
1c79356b 1481
0b4e3aa0 1482 assert(offset <= _length);
1c79356b 1483
91447636 1484 if ( type == kIOMemoryTypeUPL)
55e303ae 1485 return super::getSourceSegment( offset, lengthOfSegment );
91447636 1486 else if ( offset < _length ) // (within bounds?)
1c79356b 1487 {
0b4e3aa0 1488 unsigned rangesIndex = 0;
91447636
A
1489 Ranges vec = _ranges;
1490 user_addr_t addr;
1491
1492 // Find starting address within the vector of ranges
1493 for (;;) {
1494 getAddrLenForInd(addr, length, type, vec, rangesIndex);
1495 if (offset < length)
1496 break;
1497 offset -= length; // (make offset relative)
1498 rangesIndex++;
1499 }
1500
1501 // Now that we have the starting range,
1502 // lets find the last contiguous range
1503 addr += offset;
1504 length -= offset;
1505
1506 for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) {
1507 user_addr_t newAddr;
1508 IOPhysicalLength newLen;
1509
1510 getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex);
1511 if (addr + length != newAddr)
1512 break;
1513 length += newLen;
1514 }
1515 if (addr)
1516 address = (IOPhysicalAddress) addr; // Truncate address to 32bit
1517 else
1518 length = 0;
1c79356b 1519 }
0b4e3aa0
A
1520
1521 if ( lengthOfSegment ) *lengthOfSegment = length;
1522
1523 return address;
1524}
1525
1526/* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
1527/* DEPRECATED */ void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
1528/* DEPRECATED */ IOByteCount * lengthOfSegment)
1529/* DEPRECATED */ {
55e303ae
A
1530 if (_task == kernel_task)
1531 return (void *) getSourceSegment(offset, lengthOfSegment);
1532 else
1533 panic("IOGMD::getVirtualSegment deprecated");
1534
1535 return 0;
0b4e3aa0
A
1536/* DEPRECATED */ }
1537/* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
1c79356b 1538
91447636
A
1539
1540
c0fea474
A
1541IOReturn
1542IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
1543{
1544 if (kIOMDGetCharacteristics == op) {
1545 if (dataSize < sizeof(IOMDDMACharacteristics))
1546 return kIOReturnUnderrun;
1547
1548 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
1549 data->fLength = getLength();
1550 data->fSGCount = 0;
1551 data->fDirection = _direction;
1552 if (IOMapper::gSystem)
1553 data->fIsMapped = true;
1554 data->fIsPrepared = true; // Assume prepared - fails safe
1555 }
1556 else if (kIOMDWalkSegments & op) {
1557 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs))
1558 return kIOReturnUnderrun;
1559
1560 IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData;
1561 IOByteCount offset = (IOByteCount) data->fOffset;
1562
1563 IOPhysicalLength length;
1564 IOMemoryDescriptor *ncmd = const_cast<IOMemoryDescriptor *>(this);
1565 if (data->fMapped && IOMapper::gSystem)
1566 data->fIOVMAddr = ncmd->getPhysicalSegment(offset, &length);
1567 else
1568 data->fIOVMAddr = ncmd->getPhysicalSegment64(offset, &length);
1569 data->fLength = length;
1570 }
1571 else
1572 return kIOReturnBadArgument;
1573
1574 return kIOReturnSuccess;
1575}
1576
91447636
A
1577IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState,
1578 IOOptionBits * oldState )
1579{
1580 IOReturn err = kIOReturnSuccess;
1581 vm_purgable_t control;
1582 int state;
1583
1584 do
1585 {
1586 if (!_memEntry)
1587 {
1588 err = kIOReturnNotReady;
1589 break;
1590 }
1591
1592 control = VM_PURGABLE_SET_STATE;
1593 switch (newState)
1594 {
1595 case kIOMemoryPurgeableKeepCurrent:
1596 control = VM_PURGABLE_GET_STATE;
1597 break;
1598
1599 case kIOMemoryPurgeableNonVolatile:
1600 state = VM_PURGABLE_NONVOLATILE;
1601 break;
1602 case kIOMemoryPurgeableVolatile:
1603 state = VM_PURGABLE_VOLATILE;
1604 break;
1605 case kIOMemoryPurgeableEmpty:
1606 state = VM_PURGABLE_EMPTY;
1607 break;
1608 default:
1609 err = kIOReturnBadArgument;
1610 break;
1611 }
1612
1613 if (kIOReturnSuccess != err)
1614 break;
1615
1616 err = mach_memory_entry_purgable_control((ipc_port_t) _memEntry, control, &state);
1617
1618 if (oldState)
1619 {
1620 if (kIOReturnSuccess == err)
1621 {
1622 switch (state)
1623 {
1624 case VM_PURGABLE_NONVOLATILE:
1625 state = kIOMemoryPurgeableNonVolatile;
1626 break;
1627 case VM_PURGABLE_VOLATILE:
1628 state = kIOMemoryPurgeableVolatile;
1629 break;
1630 case VM_PURGABLE_EMPTY:
1631 state = kIOMemoryPurgeableEmpty;
1632 break;
1633 default:
1634 state = kIOMemoryPurgeableNonVolatile;
1635 err = kIOReturnNotReady;
1636 break;
1637 }
1638 *oldState = state;
1639 }
1640 }
1641 }
1642 while (false);
1643
1644 return (err);
1645}
1646
1647extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count);
1648extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count);
1649
1650IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options,
1651 IOByteCount offset, IOByteCount length )
1652{
1653 IOByteCount remaining;
1654 void (*func)(addr64_t pa, unsigned int count) = 0;
1655
1656 switch (options)
1657 {
1658 case kIOMemoryIncoherentIOFlush:
1659 func = &dcache_incoherent_io_flush64;
1660 break;
1661 case kIOMemoryIncoherentIOStore:
1662 func = &dcache_incoherent_io_store64;
1663 break;
1664 }
1665
1666 if (!func)
1667 return (kIOReturnUnsupported);
1668
1669 remaining = length = min(length, getLength() - offset);
1670 while (remaining)
1671 // (process another target segment?)
1672 {
1673 addr64_t dstAddr64;
1674 IOByteCount dstLen;
1675
1676 dstAddr64 = getPhysicalSegment64(offset, &dstLen);
1677 if (!dstAddr64)
1678 break;
1679
1680 // Clip segment length to remaining
1681 if (dstLen > remaining)
1682 dstLen = remaining;
1683
1684 (*func)(dstAddr64, dstLen);
1685
1686 offset += dstLen;
1687 remaining -= dstLen;
1688 }
1689
1690 return (remaining ? kIOReturnUnderrun : kIOReturnSuccess);
1691}
1692
55e303ae
A
1693#ifdef __ppc__
1694extern vm_offset_t static_memory_end;
1695#define io_kernel_static_end static_memory_end
1696#else
1697extern vm_offset_t first_avail;
1698#define io_kernel_static_end first_avail
1699#endif
1700
1701static kern_return_t
1702io_get_kernel_static_upl(
91447636 1703 vm_map_t /* map */,
55e303ae
A
1704 vm_address_t offset,
1705 vm_size_t *upl_size,
1706 upl_t *upl,
1707 upl_page_info_array_t page_list,
c0fea474
A
1708 unsigned int *count,
1709 ppnum_t *highest_page)
1c79356b 1710{
55e303ae
A
1711 unsigned int pageCount, page;
1712 ppnum_t phys;
c0fea474 1713 ppnum_t highestPage = 0;
1c79356b 1714
55e303ae
A
1715 pageCount = atop_32(*upl_size);
1716 if (pageCount > *count)
1717 pageCount = *count;
1c79356b 1718
55e303ae 1719 *upl = NULL;
1c79356b 1720
55e303ae
A
1721 for (page = 0; page < pageCount; page++)
1722 {
1723 phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page));
1724 if (!phys)
1725 break;
1726 page_list[page].phys_addr = phys;
1727 page_list[page].pageout = 0;
1728 page_list[page].absent = 0;
1729 page_list[page].dirty = 0;
1730 page_list[page].precious = 0;
1731 page_list[page].device = 0;
c0fea474
A
1732 if (phys > highestPage)
1733 highestPage = page;
55e303ae 1734 }
0b4e3aa0 1735
c0fea474
A
1736 *highest_page = highestPage;
1737
55e303ae
A
1738 return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError);
1739}
0b4e3aa0 1740
55e303ae
A
1741IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection)
1742{
91447636 1743 IOOptionBits type = _flags & kIOMemoryTypeMask;
55e303ae
A
1744 IOReturn error = kIOReturnNoMemory;
1745 ioGMDData *dataP;
1746 ppnum_t mapBase = 0;
1747 IOMapper *mapper;
1748 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
1c79356b 1749
55e303ae 1750 assert(!_wireCount);
c0fea474 1751 assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type);
1c79356b 1752
55e303ae
A
1753 if (_pages >= gIOMaximumMappedIOPageCount)
1754 return kIOReturnNoResources;
0b4e3aa0 1755
55e303ae
A
1756 dataP = getDataP(_memoryEntries);
1757 mapper = dataP->fMapper;
1758 if (mapper && _pages)
1759 mapBase = mapper->iovmAlloc(_pages);
d7e50217 1760
55e303ae
A
1761 // Note that appendBytes(NULL) zeros the data up to the
1762 // desired length.
1763 _memoryEntries->appendBytes(0, dataP->fPageCnt * sizeof(upl_page_info_t));
1764 dataP = 0; // May no longer be valid so lets not get tempted.
de355530 1765
55e303ae
A
1766 if (forDirection == kIODirectionNone)
1767 forDirection = _direction;
1768
1769 int uplFlags; // This Mem Desc's default flags for upl creation
c0fea474 1770 switch (kIODirectionOutIn & forDirection)
55e303ae
A
1771 {
1772 case kIODirectionOut:
1773 // Pages do not need to be marked as dirty on commit
1774 uplFlags = UPL_COPYOUT_FROM;
1775 _flags |= kIOMemoryPreparedReadOnly;
1776 break;
1777
1778 case kIODirectionIn:
1779 default:
1780 uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM
1781 break;
1782 }
1783 uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE;
1784
c0fea474
A
1785#ifdef UPL_NEED_32BIT_ADDR
1786 if (kIODirectionPrepareToPhys32 & forDirection)
1787 uplFlags |= UPL_NEED_32BIT_ADDR;
1788#endif
1789
91447636 1790 // Find the appropriate vm_map for the given task
55e303ae
A
1791 vm_map_t curMap;
1792 if (_task == kernel_task && (kIOMemoryBufferPageable & _flags))
1793 curMap = 0;
1794 else
1795 { curMap = get_task_map(_task); }
1796
91447636
A
1797 // Iterate over the vector of virtual ranges
1798 Ranges vec = _ranges;
1799 unsigned int pageIndex = 0;
1800 IOByteCount mdOffset = 0;
c0fea474 1801 ppnum_t highestPage = 0;
55e303ae
A
1802 for (UInt range = 0; range < _rangesCount; range++) {
1803 ioPLBlock iopl;
91447636 1804 user_addr_t startPage;
55e303ae 1805 IOByteCount numBytes;
c0fea474 1806 ppnum_t highPage = 0;
55e303ae 1807
91447636
A
1808 // Get the startPage address and length of vec[range]
1809 getAddrLenForInd(startPage, numBytes, type, vec, range);
1810 iopl.fPageOffset = (short) startPage & PAGE_MASK;
1811 numBytes += iopl.fPageOffset;
1812 startPage = trunc_page_64(startPage);
1813
55e303ae
A
1814 if (mapper)
1815 iopl.fMappedBase = mapBase + pageIndex;
1816 else
1817 iopl.fMappedBase = 0;
55e303ae 1818
91447636 1819 // Iterate over the current range, creating UPLs
55e303ae
A
1820 while (numBytes) {
1821 dataP = getDataP(_memoryEntries);
91447636
A
1822 vm_address_t kernelStart = (vm_address_t) startPage;
1823 vm_map_t theMap;
1824 if (curMap)
1825 theMap = curMap;
1826 else if (!sharedMem) {
1827 assert(_task == kernel_task);
1828 theMap = IOPageableMapForAddress(kernelStart);
1829 }
1830 else
1831 theMap = NULL;
1832
55e303ae
A
1833 upl_page_info_array_t pageInfo = getPageList(dataP);
1834 int ioplFlags = uplFlags;
1835 upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex];
1836
1837 vm_size_t ioplSize = round_page_32(numBytes);
1838 unsigned int numPageInfo = atop_32(ioplSize);
1839
91447636 1840 if (theMap == kernel_map && kernelStart < io_kernel_static_end) {
55e303ae 1841 error = io_get_kernel_static_upl(theMap,
91447636
A
1842 kernelStart,
1843 &ioplSize,
1844 &iopl.fIOPL,
1845 baseInfo,
c0fea474
A
1846 &numPageInfo,
1847 &highPage);
91447636
A
1848 }
1849 else if (sharedMem) {
55e303ae 1850 error = memory_object_iopl_request(sharedMem,
91447636
A
1851 ptoa_32(pageIndex),
1852 &ioplSize,
1853 &iopl.fIOPL,
1854 baseInfo,
1855 &numPageInfo,
1856 &ioplFlags);
1857 }
1858 else {
1859 assert(theMap);
1860 error = vm_map_create_upl(theMap,
1861 startPage,
1862 &ioplSize,
1863 &iopl.fIOPL,
1864 baseInfo,
1865 &numPageInfo,
1866 &ioplFlags);
de355530
A
1867 }
1868
55e303ae
A
1869 assert(ioplSize);
1870 if (error != KERN_SUCCESS)
1871 goto abortExit;
1872
c0fea474
A
1873 if (iopl.fIOPL)
1874 highPage = upl_get_highest_page(iopl.fIOPL);
1875 if (highPage > highestPage)
1876 highestPage = highPage;
1877
55e303ae
A
1878 error = kIOReturnNoMemory;
1879
1880 if (baseInfo->device) {
1881 numPageInfo = 1;
1882 iopl.fFlags = kIOPLOnDevice;
1883 // Don't translate device memory at all
1884 if (mapper && mapBase) {
1885 mapper->iovmFree(mapBase, _pages);
1886 mapBase = 0;
1887 iopl.fMappedBase = 0;
1888 }
1889 }
1890 else {
1891 iopl.fFlags = 0;
c0fea474 1892 if (mapper)
55e303ae
A
1893 mapper->iovmInsert(mapBase, pageIndex,
1894 baseInfo, numPageInfo);
1895 }
1896
1897 iopl.fIOMDOffset = mdOffset;
1898 iopl.fPageInfo = pageIndex;
1899
1900 if ((_flags & kIOMemoryAutoPrepare) && iopl.fIOPL)
1901 {
91447636
A
1902 upl_commit(iopl.fIOPL, 0, 0);
1903 upl_deallocate(iopl.fIOPL);
55e303ae 1904 iopl.fIOPL = 0;
de355530 1905 }
55e303ae
A
1906
1907 if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) {
1908 // Clean up partial created and unsaved iopl
91447636
A
1909 if (iopl.fIOPL) {
1910 upl_abort(iopl.fIOPL, 0);
1911 upl_deallocate(iopl.fIOPL);
1912 }
55e303ae
A
1913 goto abortExit;
1914 }
1915
1916 // Check for a multiple iopl's in one virtual range
1917 pageIndex += numPageInfo;
1918 mdOffset -= iopl.fPageOffset;
1919 if (ioplSize < numBytes) {
1920 numBytes -= ioplSize;
1921 startPage += ioplSize;
1922 mdOffset += ioplSize;
1923 iopl.fPageOffset = 0;
1924 if (mapper)
1925 iopl.fMappedBase = mapBase + pageIndex;
1926 }
1927 else {
1928 mdOffset += numBytes;
1929 break;
1930 }
1c79356b
A
1931 }
1932 }
55e303ae 1933
c0fea474
A
1934 _highestPage = highestPage;
1935
1c79356b
A
1936 return kIOReturnSuccess;
1937
1938abortExit:
55e303ae
A
1939 {
1940 dataP = getDataP(_memoryEntries);
91447636 1941 UInt done = getNumIOPL(_memoryEntries, dataP);
55e303ae
A
1942 ioPLBlock *ioplList = getIOPLList(dataP);
1943
1944 for (UInt range = 0; range < done; range++)
1945 {
91447636
A
1946 if (ioplList[range].fIOPL) {
1947 upl_abort(ioplList[range].fIOPL, 0);
1948 upl_deallocate(ioplList[range].fIOPL);
1949 }
55e303ae 1950 }
91447636 1951 (void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); // == setLength()
1c79356b 1952
55e303ae
A
1953 if (mapper && mapBase)
1954 mapper->iovmFree(mapBase, _pages);
1c79356b
A
1955 }
1956
55e303ae
A
1957 return error;
1958}
d7e50217 1959
55e303ae
A
1960/*
1961 * prepare
1962 *
1963 * Prepare the memory for an I/O transfer. This involves paging in
1964 * the memory, if necessary, and wiring it down for the duration of
1965 * the transfer. The complete() method completes the processing of
1966 * the memory after the I/O transfer finishes. This method needn't
1967 * called for non-pageable memory.
1968 */
1969IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection)
1970{
91447636
A
1971 IOReturn error = kIOReturnSuccess;
1972 IOOptionBits type = _flags & kIOMemoryTypeMask;
55e303ae 1973
91447636 1974 if (!_wireCount
c0fea474 1975 && (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) ) {
55e303ae
A
1976 error = wireVirtual(forDirection);
1977 if (error)
1978 return error;
de355530
A
1979 }
1980
55e303ae
A
1981 _wireCount++;
1982
1983 return kIOReturnSuccess;
1c79356b
A
1984}
1985
1986/*
1987 * complete
1988 *
1989 * Complete processing of the memory after an I/O transfer finishes.
1990 * This method should not be called unless a prepare was previously
1991 * issued; the prepare() and complete() must occur in pairs, before
1992 * before and after an I/O transfer involving pageable memory.
1993 */
1994
55e303ae 1995IOReturn IOGeneralMemoryDescriptor::complete(IODirection /* forDirection */)
1c79356b
A
1996{
1997 assert(_wireCount);
1998
55e303ae 1999 if (!_wireCount)
1c79356b
A
2000 return kIOReturnSuccess;
2001
2002 _wireCount--;
55e303ae 2003 if (!_wireCount) {
91447636
A
2004 IOOptionBits type = _flags & kIOMemoryTypeMask;
2005
c0fea474 2006 if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) {
55e303ae
A
2007 /* kIOMemoryTypePhysical */
2008 // DO NOTHING
d7e50217 2009 }
55e303ae
A
2010 else {
2011 ioGMDData * dataP = getDataP(_memoryEntries);
2012 ioPLBlock *ioplList = getIOPLList(dataP);
91447636 2013 UInt count = getNumIOPL(_memoryEntries, dataP);
55e303ae
A
2014
2015 if (dataP->fMapper && _pages && ioplList[0].fMappedBase)
2016 dataP->fMapper->iovmFree(ioplList[0].fMappedBase, _pages);
2017
2018 // Only complete iopls that we created which are for TypeVirtual
c0fea474 2019 if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) {
55e303ae 2020 for (UInt ind = 0; ind < count; ind++)
91447636
A
2021 if (ioplList[ind].fIOPL) {
2022 upl_commit(ioplList[ind].fIOPL, 0, 0);
2023 upl_deallocate(ioplList[ind].fIOPL);
2024 }
55e303ae 2025 }
de355530 2026
55e303ae
A
2027 (void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); // == setLength()
2028 }
1c79356b
A
2029 }
2030 return kIOReturnSuccess;
2031}
2032
2033IOReturn IOGeneralMemoryDescriptor::doMap(
2034 vm_map_t addressMap,
2035 IOVirtualAddress * atAddress,
2036 IOOptionBits options,
55e303ae
A
2037 IOByteCount sourceOffset,
2038 IOByteCount length )
1c79356b
A
2039{
2040 kern_return_t kr;
0b4e3aa0 2041 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
1c79356b 2042
91447636
A
2043 IOOptionBits type = _flags & kIOMemoryTypeMask;
2044 Ranges vec = _ranges;
2045
2046 user_addr_t range0Addr = 0;
2047 IOByteCount range0Len = 0;
2048
2049 if (vec.v)
2050 getAddrLenForInd(range0Addr, range0Len, type, vec, 0);
2051
1c79356b 2052 // mapping source == dest? (could be much better)
91447636
A
2053 if( _task
2054 && (addressMap == get_task_map(_task)) && (options & kIOMapAnywhere)
2055 && (1 == _rangesCount) && (0 == sourceOffset)
2056 && range0Addr && (length <= range0Len) ) {
2057 if (sizeof(user_addr_t) > 4 && ((UInt64) range0Addr) >> 32)
2058 return kIOReturnOverrun; // Doesn't fit in 32bit return field
2059 else {
2060 *atAddress = range0Addr;
1c79356b 2061 return( kIOReturnSuccess );
91447636 2062 }
1c79356b
A
2063 }
2064
0b4e3aa0 2065 if( 0 == sharedMem) {
1c79356b 2066
91447636 2067 vm_size_t size = ptoa_32(_pages);
1c79356b 2068
0b4e3aa0 2069 if( _task) {
c0fea474 2070
91447636
A
2071 memory_object_size_t actualSize = size;
2072 kr = mach_make_memory_entry_64(get_task_map(_task),
2073 &actualSize, range0Addr,
0b4e3aa0
A
2074 VM_PROT_READ | VM_PROT_WRITE, &sharedMem,
2075 NULL );
2076
55e303ae 2077 if( (KERN_SUCCESS == kr) && (actualSize != round_page_32(size))) {
0b4e3aa0 2078#if IOASSERT
91447636
A
2079 IOLog("mach_make_memory_entry_64 (%08llx) size (%08lx:%08x)\n",
2080 range0Addr, (UInt32) actualSize, size);
0b4e3aa0
A
2081#endif
2082 kr = kIOReturnVMError;
2083 ipc_port_release_send( sharedMem );
1c79356b
A
2084 }
2085
0b4e3aa0 2086 if( KERN_SUCCESS != kr)
0b4e3aa0 2087 sharedMem = MACH_PORT_NULL;
1c79356b 2088
c0fea474 2089 } else do { // _task == 0, must be physical
0b4e3aa0 2090
55e303ae
A
2091 memory_object_t pager;
2092 unsigned int flags = 0;
2093 addr64_t pa;
9bccf70c
A
2094 IOPhysicalLength segLen;
2095
55e303ae 2096 pa = getPhysicalSegment64( sourceOffset, &segLen );
0b4e3aa0
A
2097
2098 if( !reserved) {
2099 reserved = IONew( ExpansionData, 1 );
2100 if( !reserved)
2101 continue;
2102 }
2103 reserved->pagerContig = (1 == _rangesCount);
9bccf70c
A
2104 reserved->memory = this;
2105
55e303ae
A
2106 /*What cache mode do we need*/
2107 switch(options & kIOMapCacheMask ) {
9bccf70c
A
2108
2109 case kIOMapDefaultCache:
2110 default:
55e303ae
A
2111 flags = IODefaultCacheBits(pa);
2112 break;
9bccf70c
A
2113
2114 case kIOMapInhibitCache:
55e303ae
A
2115 flags = DEVICE_PAGER_CACHE_INHIB |
2116 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
2117 break;
9bccf70c
A
2118
2119 case kIOMapWriteThruCache:
55e303ae
A
2120 flags = DEVICE_PAGER_WRITE_THROUGH |
2121 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
2122 break;
9bccf70c
A
2123
2124 case kIOMapCopybackCache:
55e303ae
A
2125 flags = DEVICE_PAGER_COHERENT;
2126 break;
2127
2128 case kIOMapWriteCombineCache:
2129 flags = DEVICE_PAGER_CACHE_INHIB |
2130 DEVICE_PAGER_COHERENT;
2131 break;
9bccf70c
A
2132 }
2133
2134 flags |= reserved->pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0;
9bccf70c
A
2135
2136 pager = device_pager_setup( (memory_object_t) 0, (int) reserved,
2137 size, flags);
0b4e3aa0
A
2138 assert( pager );
2139
2140 if( pager) {
0b4e3aa0
A
2141 kr = mach_memory_object_memory_entry_64( (host_t) 1, false /*internal*/,
2142 size, VM_PROT_READ | VM_PROT_WRITE, pager, &sharedMem );
2143
2144 assert( KERN_SUCCESS == kr );
2145 if( KERN_SUCCESS != kr) {
9bccf70c 2146 device_pager_deallocate( pager );
0b4e3aa0
A
2147 pager = MACH_PORT_NULL;
2148 sharedMem = MACH_PORT_NULL;
2149 }
2150 }
9bccf70c
A
2151 if( pager && sharedMem)
2152 reserved->devicePager = pager;
2153 else {
2154 IODelete( reserved, ExpansionData, 1 );
2155 reserved = 0;
2156 }
1c79356b 2157
1c79356b
A
2158 } while( false );
2159
0b4e3aa0
A
2160 _memEntry = (void *) sharedMem;
2161 }
2162
91447636 2163
9bccf70c
A
2164 if( 0 == sharedMem)
2165 kr = kIOReturnVMError;
2166 else
9bccf70c 2167 kr = super::doMap( addressMap, atAddress,
1c79356b 2168 options, sourceOffset, length );
0b4e3aa0 2169
1c79356b
A
2170 return( kr );
2171}
2172
2173IOReturn IOGeneralMemoryDescriptor::doUnmap(
2174 vm_map_t addressMap,
2175 IOVirtualAddress logical,
2176 IOByteCount length )
2177{
2178 // could be much better
91447636
A
2179 if( _task && (addressMap == get_task_map(_task)) && (1 == _rangesCount)) {
2180
2181 IOOptionBits type = _flags & kIOMemoryTypeMask;
2182 user_addr_t range0Addr;
2183 IOByteCount range0Len;
2184
2185 getAddrLenForInd(range0Addr, range0Len, type, _ranges, 0);
2186 if (logical == range0Addr && length <= range0Len)
1c79356b 2187 return( kIOReturnSuccess );
91447636 2188 }
1c79356b
A
2189
2190 return( super::doUnmap( addressMap, logical, length ));
2191}
2192
2193/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2194
9bccf70c 2195OSDefineMetaClassAndAbstractStructors( IOMemoryMap, OSObject )
1c79356b 2196
9bccf70c
A
2197/* inline function implementation */
2198IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
2199 { return( getPhysicalSegment( 0, 0 )); }
1c79356b 2200
1c79356b
A
2201
2202#undef super
2203#define super IOMemoryMap
2204
2205OSDefineMetaClassAndStructors(_IOMemoryMap, IOMemoryMap)
2206
2207/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2208
9bccf70c 2209bool _IOMemoryMap::initCompatible(
1c79356b
A
2210 IOMemoryDescriptor * _memory,
2211 IOMemoryMap * _superMap,
2212 IOByteCount _offset,
2213 IOByteCount _length )
2214{
2215
2216 if( !super::init())
2217 return( false);
2218
2219 if( (_offset + _length) > _superMap->getLength())
2220 return( false);
2221
2222 _memory->retain();
2223 memory = _memory;
2224 _superMap->retain();
2225 superMap = _superMap;
2226
2227 offset = _offset;
2228 if( _length)
2229 length = _length;
2230 else
2231 length = _memory->getLength();
2232
2233 options = superMap->getMapOptions();
2234 logical = superMap->getVirtualAddress() + offset;
2235
2236 return( true );
2237}
2238
9bccf70c 2239bool _IOMemoryMap::initWithDescriptor(
1c79356b
A
2240 IOMemoryDescriptor * _memory,
2241 task_t intoTask,
2242 IOVirtualAddress toAddress,
2243 IOOptionBits _options,
2244 IOByteCount _offset,
2245 IOByteCount _length )
2246{
91447636
A
2247 bool ok;
2248 bool redir = ((kIOMapUnique|kIOMapReference) == ((kIOMapUnique|kIOMapReference) & _options));
1c79356b 2249
91447636 2250 if ((!_memory) || (!intoTask))
1c79356b
A
2251 return( false);
2252
2253 if( (_offset + _length) > _memory->getLength())
2254 return( false);
2255
91447636
A
2256 if (!redir)
2257 {
2258 if (!super::init())
2259 return(false);
2260 addressMap = get_task_map(intoTask);
2261 if( !addressMap)
2262 return( false);
2263 vm_map_reference(addressMap);
2264 addressTask = intoTask;
2265 logical = toAddress;
2266 options = _options;
2267 }
1c79356b
A
2268
2269 _memory->retain();
1c79356b
A
2270
2271 offset = _offset;
2272 if( _length)
2273 length = _length;
2274 else
2275 length = _memory->getLength();
2276
1c79356b
A
2277 if( options & kIOMapStatic)
2278 ok = true;
2279 else
91447636
A
2280 ok = (kIOReturnSuccess == _memory->doMap( addressMap, &toAddress,
2281 _options, offset, length ));
2282 if (ok || redir)
2283 {
2284 if (memory)
2285 memory->release();
2286 memory = _memory;
2287 logical = toAddress;
2288 }
2289 else
2290 {
2291 _memory->release();
2292 if (!redir)
2293 {
2294 logical = 0;
2295 memory = 0;
2296 vm_map_deallocate(addressMap);
2297 addressMap = 0;
2298 }
1c79356b 2299 }
91447636 2300
1c79356b
A
2301 return( ok );
2302}
2303
91447636 2304/* LP64todo - these need to expand */
0b4e3aa0
A
2305struct IOMemoryDescriptorMapAllocRef
2306{
2307 ipc_port_t sharedMem;
2308 vm_size_t size;
2309 vm_offset_t mapped;
2310 IOByteCount sourceOffset;
2311 IOOptionBits options;
2312};
2313
2314static kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
2315{
2316 IOMemoryDescriptorMapAllocRef * ref = (IOMemoryDescriptorMapAllocRef *)_ref;
2317 IOReturn err;
2318
2319 do {
2320 if( ref->sharedMem) {
2321 vm_prot_t prot = VM_PROT_READ
2322 | ((ref->options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE);
55e303ae
A
2323
2324 // set memory entry cache
2325 vm_prot_t memEntryCacheMode = prot | MAP_MEM_ONLY;
2326 switch (ref->options & kIOMapCacheMask)
2327 {
2328 case kIOMapInhibitCache:
2329 SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode);
2330 break;
2331
2332 case kIOMapWriteThruCache:
2333 SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode);
2334 break;
2335
2336 case kIOMapWriteCombineCache:
2337 SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode);
2338 break;
2339
2340 case kIOMapCopybackCache:
2341 SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode);
2342 break;
2343
2344 case kIOMapDefaultCache:
2345 default:
2346 SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode);
2347 break;
2348 }
2349
2350 vm_size_t unused = 0;
2351
2352 err = mach_make_memory_entry( NULL /*unused*/, &unused, 0 /*unused*/,
2353 memEntryCacheMode, NULL, ref->sharedMem );
2354 if (KERN_SUCCESS != err)
2355 IOLog("MAP_MEM_ONLY failed %d\n", err);
2356
0b4e3aa0
A
2357 err = vm_map( map,
2358 &ref->mapped,
2359 ref->size, 0 /* mask */,
2360 (( ref->options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
2361 | VM_MAKE_TAG(VM_MEMORY_IOKIT),
2362 ref->sharedMem, ref->sourceOffset,
2363 false, // copy
2364 prot, // cur
2365 prot, // max
2366 VM_INHERIT_NONE);
55e303ae 2367
0b4e3aa0
A
2368 if( KERN_SUCCESS != err) {
2369 ref->mapped = 0;
2370 continue;
2371 }
2372
2373 } else {
2374
2375 err = vm_allocate( map, &ref->mapped, ref->size,
2376 ((ref->options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
2377 | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
2378
2379 if( KERN_SUCCESS != err) {
2380 ref->mapped = 0;
2381 continue;
2382 }
2383
2384 // we have to make sure that these guys don't get copied if we fork.
2385 err = vm_inherit( map, ref->mapped, ref->size, VM_INHERIT_NONE);
2386 assert( KERN_SUCCESS == err );
2387 }
2388
2389 } while( false );
2390
2391 return( err );
2392}
2393
9bccf70c 2394
1c79356b
A
2395IOReturn IOMemoryDescriptor::doMap(
2396 vm_map_t addressMap,
2397 IOVirtualAddress * atAddress,
2398 IOOptionBits options,
55e303ae
A
2399 IOByteCount sourceOffset,
2400 IOByteCount length )
1c79356b
A
2401{
2402 IOReturn err = kIOReturnSuccess;
0b4e3aa0 2403 memory_object_t pager;
1c79356b
A
2404 vm_address_t logical;
2405 IOByteCount pageOffset;
0b4e3aa0
A
2406 IOPhysicalAddress sourceAddr;
2407 IOMemoryDescriptorMapAllocRef ref;
1c79356b 2408
0b4e3aa0
A
2409 ref.sharedMem = (ipc_port_t) _memEntry;
2410 ref.sourceOffset = sourceOffset;
2411 ref.options = options;
1c79356b 2412
0b4e3aa0 2413 do {
1c79356b 2414
0b4e3aa0
A
2415 if( 0 == length)
2416 length = getLength();
1c79356b 2417
91447636
A
2418 sourceAddr = getSourceSegment( sourceOffset, NULL );
2419 pageOffset = sourceAddr - trunc_page_32( sourceAddr );
1c79356b 2420
91447636 2421 ref.size = round_page_32( length + pageOffset );
0b4e3aa0 2422
91447636
A
2423 if ((kIOMapReference|kIOMapUnique) == ((kIOMapReference|kIOMapUnique) & options))
2424 {
2425 upl_t redirUPL2;
2426 vm_size_t size;
2427 int flags;
0b4e3aa0 2428
91447636
A
2429 _IOMemoryMap * mapping = (_IOMemoryMap *) *atAddress;
2430 ref.mapped = mapping->getVirtualAddress();
2431
2432 if (!_memEntry)
2433 {
2434 err = kIOReturnNotReadable;
2435 continue;
2436 }
2437
2438 size = length;
2439 flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
2440 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
2441
2442 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) _memEntry, 0, &size, &redirUPL2,
2443 NULL, NULL,
2444 &flags))
2445 redirUPL2 = NULL;
2446
2447 err = upl_transpose(redirUPL2, mapping->redirUPL);
2448 if (kIOReturnSuccess != err)
2449 {
2450 IOLog("upl_transpose(%x)\n", err);
2451 err = kIOReturnSuccess;
2452 }
2453
2454 if (redirUPL2)
2455 {
2456 upl_commit(redirUPL2, NULL, 0);
2457 upl_deallocate(redirUPL2);
2458 redirUPL2 = 0;
2459 }
2460 {
2461 // swap the memEntries since they now refer to different vm_objects
2462 void * me = _memEntry;
2463 _memEntry = mapping->memory->_memEntry;
2464 mapping->memory->_memEntry = me;
2465 }
2466 }
2467 else
2468 {
2469
2470 logical = *atAddress;
2471 if( options & kIOMapAnywhere)
2472 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
2473 ref.mapped = 0;
2474 else {
2475 ref.mapped = trunc_page_32( logical );
2476 if( (logical - ref.mapped) != pageOffset) {
2477 err = kIOReturnVMError;
2478 continue;
2479 }
2480 }
2481
2482 if( ref.sharedMem && (addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags))
2483 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
2484 else
2485 err = IOMemoryDescriptorMapAlloc( addressMap, &ref );
2486 }
0b4e3aa0
A
2487
2488 if( err != KERN_SUCCESS)
2489 continue;
2490
2491 if( reserved)
2492 pager = (memory_object_t) reserved->devicePager;
2493 else
2494 pager = MACH_PORT_NULL;
2495
2496 if( !ref.sharedMem || pager )
2497 err = handleFault( pager, addressMap, ref.mapped, sourceOffset, length, options );
2498
2499 } while( false );
2500
2501 if( err != KERN_SUCCESS) {
2502 if( ref.mapped)
2503 doUnmap( addressMap, ref.mapped, ref.size );
2504 *atAddress = NULL;
2505 } else
2506 *atAddress = ref.mapped + pageOffset;
2507
2508 return( err );
2509}
2510
2511enum {
2512 kIOMemoryRedirected = 0x00010000
2513};
2514
2515IOReturn IOMemoryDescriptor::handleFault(
2516 void * _pager,
2517 vm_map_t addressMap,
2518 IOVirtualAddress address,
2519 IOByteCount sourceOffset,
2520 IOByteCount length,
2521 IOOptionBits options )
2522{
2523 IOReturn err = kIOReturnSuccess;
2524 memory_object_t pager = (memory_object_t) _pager;
2525 vm_size_t size;
2526 vm_size_t bytes;
2527 vm_size_t page;
2528 IOByteCount pageOffset;
55e303ae 2529 IOByteCount pagerOffset;
0b4e3aa0 2530 IOPhysicalLength segLen;
55e303ae 2531 addr64_t physAddr;
0b4e3aa0
A
2532
2533 if( !addressMap) {
2534
0b4e3aa0 2535 if( kIOMemoryRedirected & _flags) {
1c79356b 2536#ifdef DEBUG
9bccf70c 2537 IOLog("sleep mem redirect %p, %lx\n", this, sourceOffset);
1c79356b 2538#endif
0b4e3aa0 2539 do {
9bccf70c 2540 SLEEP;
0b4e3aa0
A
2541 } while( kIOMemoryRedirected & _flags );
2542 }
1c79356b 2543
0b4e3aa0 2544 return( kIOReturnSuccess );
1c79356b
A
2545 }
2546
55e303ae 2547 physAddr = getPhysicalSegment64( sourceOffset, &segLen );
0b4e3aa0 2548 assert( physAddr );
55e303ae
A
2549 pageOffset = physAddr - trunc_page_64( physAddr );
2550 pagerOffset = sourceOffset;
0b4e3aa0
A
2551
2552 size = length + pageOffset;
2553 physAddr -= pageOffset;
1c79356b
A
2554
2555 segLen += pageOffset;
0b4e3aa0 2556 bytes = size;
1c79356b
A
2557 do {
2558 // in the middle of the loop only map whole pages
2559 if( segLen >= bytes)
2560 segLen = bytes;
55e303ae 2561 else if( segLen != trunc_page_32( segLen))
1c79356b 2562 err = kIOReturnVMError;
55e303ae 2563 if( physAddr != trunc_page_64( physAddr))
1c79356b
A
2564 err = kIOReturnBadArgument;
2565
2566#ifdef DEBUG
2567 if( kIOLogMapping & gIOKitDebug)
55e303ae 2568 IOLog("_IOMemoryMap::map(%p) %08lx->%08qx:%08lx\n",
0b4e3aa0 2569 addressMap, address + pageOffset, physAddr + pageOffset,
1c79356b
A
2570 segLen - pageOffset);
2571#endif
2572
0b4e3aa0
A
2573 if( pager) {
2574 if( reserved && reserved->pagerContig) {
2575 IOPhysicalLength allLen;
55e303ae 2576 addr64_t allPhys;
0b4e3aa0 2577
55e303ae 2578 allPhys = getPhysicalSegment64( 0, &allLen );
0b4e3aa0 2579 assert( allPhys );
55e303ae 2580 err = device_pager_populate_object( pager, 0, allPhys >> PAGE_SHIFT, round_page_32(allLen) );
0b4e3aa0
A
2581
2582 } else {
2583
55e303ae 2584 for( page = 0;
0b4e3aa0
A
2585 (page < segLen) && (KERN_SUCCESS == err);
2586 page += page_size) {
55e303ae
A
2587 err = device_pager_populate_object(pager, pagerOffset,
2588 (ppnum_t)((physAddr + page) >> PAGE_SHIFT), page_size);
2589 pagerOffset += page_size;
0b4e3aa0
A
2590 }
2591 }
2592 assert( KERN_SUCCESS == err );
2593 if( err)
2594 break;
2595 }
c0fea474 2596
9bccf70c
A
2597 /* *** ALERT *** */
2598 /* *** Temporary Workaround *** */
2599
2600 /* This call to vm_fault causes an early pmap level resolution */
2601 /* of the mappings created above. Need for this is in absolute */
2602 /* violation of the basic tenet that the pmap layer is a cache. */
2603 /* Further, it implies a serious I/O architectural violation on */
2604 /* the part of some user of the mapping. As of this writing, */
2605 /* the call to vm_fault is needed because the NVIDIA driver */
2606 /* makes a call to pmap_extract. The NVIDIA driver needs to be */
2607 /* fixed as soon as possible. The NVIDIA driver should not */
2608 /* need to query for this info as it should know from the doMap */
2609 /* call where the physical memory is mapped. When a query is */
2610 /* necessary to find a physical mapping, it should be done */
2611 /* through an iokit call which includes the mapped memory */
2612 /* handle. This is required for machine architecture independence.*/
2613
2614 if(!(kIOMemoryRedirected & _flags)) {
91447636
A
2615 vm_fault(addressMap,
2616 (vm_map_offset_t)address,
2617 VM_PROT_READ|VM_PROT_WRITE,
2618 FALSE, THREAD_UNINT, NULL,
2619 (vm_map_offset_t)0);
9bccf70c
A
2620 }
2621
2622 /* *** Temporary Workaround *** */
2623 /* *** ALERT *** */
c0fea474 2624
1c79356b 2625 sourceOffset += segLen - pageOffset;
0b4e3aa0 2626 address += segLen;
1c79356b
A
2627 bytes -= segLen;
2628 pageOffset = 0;
2629
2630 } while( bytes
55e303ae 2631 && (physAddr = getPhysicalSegment64( sourceOffset, &segLen )));
1c79356b
A
2632
2633 if( bytes)
2634 err = kIOReturnBadArgument;
1c79356b
A
2635
2636 return( err );
2637}
2638
2639IOReturn IOMemoryDescriptor::doUnmap(
2640 vm_map_t addressMap,
2641 IOVirtualAddress logical,
2642 IOByteCount length )
2643{
2644 IOReturn err;
2645
2646#ifdef DEBUG
2647 if( kIOLogMapping & gIOKitDebug)
2648 kprintf("IOMemoryDescriptor::doUnmap(%x) %08x:%08x\n",
2649 addressMap, logical, length );
2650#endif
2651
90556fb8 2652 if( true /* && (addressMap == kernel_map) || (addressMap == get_task_map(current_task()))*/) {
0b4e3aa0 2653
55e303ae 2654 if( _memEntry && (addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags))
0b4e3aa0
A
2655 addressMap = IOPageableMapForAddress( logical );
2656
1c79356b 2657 err = vm_deallocate( addressMap, logical, length );
0b4e3aa0
A
2658
2659 } else
1c79356b
A
2660 err = kIOReturnSuccess;
2661
2662 return( err );
2663}
2664
91447636 2665IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
e3027f41 2666{
91447636 2667 IOReturn err = kIOReturnSuccess;
e3027f41
A
2668 _IOMemoryMap * mapping = 0;
2669 OSIterator * iter;
2670
2671 LOCK;
2672
91447636
A
2673 if( doRedirect)
2674 _flags |= kIOMemoryRedirected;
2675 else
2676 _flags &= ~kIOMemoryRedirected;
2677
e3027f41
A
2678 do {
2679 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
91447636
A
2680 while( (mapping = (_IOMemoryMap *) iter->getNextObject()))
2681 mapping->redirect( safeTask, doRedirect );
e3027f41 2682
91447636
A
2683 iter->release();
2684 }
e3027f41
A
2685 } while( false );
2686
91447636
A
2687 if (!doRedirect)
2688 {
9bccf70c 2689 WAKEUP;
0b4e3aa0
A
2690 }
2691
e3027f41
A
2692 UNLOCK;
2693
2694 // temporary binary compatibility
2695 IOSubMemoryDescriptor * subMem;
2696 if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
91447636 2697 err = subMem->redirect( safeTask, doRedirect );
e3027f41 2698 else
91447636 2699 err = kIOReturnSuccess;
e3027f41
A
2700
2701 return( err );
2702}
2703
91447636 2704IOReturn IOSubMemoryDescriptor::redirect( task_t safeTask, bool doRedirect )
e3027f41 2705{
91447636 2706 return( _parent->redirect( safeTask, doRedirect ));
e3027f41
A
2707}
2708
91447636 2709IOReturn _IOMemoryMap::redirect( task_t safeTask, bool doRedirect )
e3027f41
A
2710{
2711 IOReturn err = kIOReturnSuccess;
2712
2713 if( superMap) {
91447636 2714// err = ((_IOMemoryMap *)superMap)->redirect( safeTask, doRedirect );
e3027f41
A
2715 } else {
2716
2717 LOCK;
c0fea474
A
2718
2719 do
91447636 2720 {
c0fea474
A
2721 if (!logical)
2722 break;
2723 if (!addressMap)
2724 break;
2725
2726 if ((!safeTask || (get_task_map(safeTask) != addressMap))
2727 && (0 == (options & kIOMapStatic)))
2728 {
2729 IOUnmapPages( addressMap, logical, length );
2730 if(!doRedirect && safeTask
2731 && (((memory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
2732 || ((memory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)))
2733 {
2734 err = vm_deallocate( addressMap, logical, length );
2735 err = memory->doMap( addressMap, &logical,
2736 (options & ~kIOMapAnywhere) /*| kIOMapReserve*/,
2737 offset, length );
2738 } else
2739 err = kIOReturnSuccess;
e3027f41 2740#ifdef DEBUG
c0fea474 2741 IOLog("IOMemoryMap::redirect(%d, %p) %x:%lx from %p\n", doRedirect, this, logical, length, addressMap);
e3027f41 2742#endif
c0fea474
A
2743 }
2744 else if (kIOMapWriteCombineCache == (options & kIOMapCacheMask))
2745 {
2746 IOOptionBits newMode;
2747 newMode = (options & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache);
2748 IOProtectCacheMode(addressMap, logical, length, newMode);
2749 }
2750 }
2751 while (false);
2752
2753 UNLOCK;
e3027f41
A
2754 }
2755
c0fea474
A
2756 if ((((memory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
2757 || ((memory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
91447636
A
2758 && safeTask
2759 && (doRedirect != (0 != (memory->_flags & kIOMemoryRedirected))))
2760 memory->redirect(safeTask, doRedirect);
2761
e3027f41
A
2762 return( err );
2763}
2764
1c79356b
A
2765IOReturn _IOMemoryMap::unmap( void )
2766{
2767 IOReturn err;
2768
2769 LOCK;
2770
2771 if( logical && addressMap && (0 == superMap)
2772 && (0 == (options & kIOMapStatic))) {
2773
2774 err = memory->doUnmap( addressMap, logical, length );
2775 vm_map_deallocate(addressMap);
2776 addressMap = 0;
2777
2778 } else
2779 err = kIOReturnSuccess;
2780
2781 logical = 0;
2782
2783 UNLOCK;
2784
2785 return( err );
2786}
2787
2788void _IOMemoryMap::taskDied( void )
2789{
2790 LOCK;
2791 if( addressMap) {
2792 vm_map_deallocate(addressMap);
2793 addressMap = 0;
2794 }
2795 addressTask = 0;
2796 logical = 0;
2797 UNLOCK;
2798}
2799
9bccf70c
A
2800// Overload the release mechanism. All mappings must be a member
2801// of a memory descriptors _mappings set. This means that we
2802// always have 2 references on a mapping. When either of these mappings
2803// are released we need to free ourselves.
55e303ae 2804void _IOMemoryMap::taggedRelease(const void *tag) const
9bccf70c 2805{
55e303ae 2806 LOCK;
9bccf70c 2807 super::taggedRelease(tag, 2);
55e303ae 2808 UNLOCK;
9bccf70c
A
2809}
2810
1c79356b
A
2811void _IOMemoryMap::free()
2812{
2813 unmap();
2814
2815 if( memory) {
2816 LOCK;
2817 memory->removeMapping( this);
2818 UNLOCK;
2819 memory->release();
2820 }
2821
91447636
A
2822 if (owner && (owner != memory))
2823 {
2824 LOCK;
2825 owner->removeMapping(this);
2826 UNLOCK;
2827 }
2828
1c79356b
A
2829 if( superMap)
2830 superMap->release();
2831
91447636
A
2832 if (redirUPL) {
2833 upl_commit(redirUPL, NULL, 0);
2834 upl_deallocate(redirUPL);
2835 }
2836
1c79356b
A
2837 super::free();
2838}
2839
2840IOByteCount _IOMemoryMap::getLength()
2841{
2842 return( length );
2843}
2844
2845IOVirtualAddress _IOMemoryMap::getVirtualAddress()
2846{
2847 return( logical);
2848}
2849
2850task_t _IOMemoryMap::getAddressTask()
2851{
2852 if( superMap)
2853 return( superMap->getAddressTask());
2854 else
2855 return( addressTask);
2856}
2857
2858IOOptionBits _IOMemoryMap::getMapOptions()
2859{
2860 return( options);
2861}
2862
2863IOMemoryDescriptor * _IOMemoryMap::getMemoryDescriptor()
2864{
2865 return( memory );
2866}
2867
9bccf70c 2868_IOMemoryMap * _IOMemoryMap::copyCompatible(
1c79356b
A
2869 IOMemoryDescriptor * owner,
2870 task_t task,
2871 IOVirtualAddress toAddress,
2872 IOOptionBits _options,
2873 IOByteCount _offset,
2874 IOByteCount _length )
2875{
2876 _IOMemoryMap * mapping;
2877
55e303ae 2878 if( (!task) || (!addressMap) || (addressMap != get_task_map(task)))
1c79356b 2879 return( 0 );
91447636
A
2880 if( options & kIOMapUnique)
2881 return( 0 );
9bccf70c
A
2882 if( (options ^ _options) & kIOMapReadOnly)
2883 return( 0 );
2884 if( (kIOMapDefaultCache != (_options & kIOMapCacheMask))
2885 && ((options ^ _options) & kIOMapCacheMask))
1c79356b
A
2886 return( 0 );
2887
2888 if( (0 == (_options & kIOMapAnywhere)) && (logical != toAddress))
2889 return( 0 );
2890
2891 if( _offset < offset)
2892 return( 0 );
2893
2894 _offset -= offset;
2895
2896 if( (_offset + _length) > length)
2897 return( 0 );
2898
2899 if( (length == _length) && (!_offset)) {
2900 retain();
2901 mapping = this;
2902
2903 } else {
2904 mapping = new _IOMemoryMap;
2905 if( mapping
9bccf70c 2906 && !mapping->initCompatible( owner, this, _offset, _length )) {
1c79356b
A
2907 mapping->release();
2908 mapping = 0;
2909 }
2910 }
2911
2912 return( mapping );
2913}
2914
c0fea474
A
2915IOPhysicalAddress
2916_IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length)
1c79356b
A
2917{
2918 IOPhysicalAddress address;
2919
2920 LOCK;
91447636 2921 address = memory->getPhysicalSegment( offset + _offset, _length );
1c79356b
A
2922 UNLOCK;
2923
2924 return( address );
2925}
2926
2927/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2928
2929#undef super
2930#define super OSObject
2931
2932/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2933
2934void IOMemoryDescriptor::initialize( void )
2935{
2936 if( 0 == gIOMemoryLock)
2937 gIOMemoryLock = IORecursiveLockAlloc();
55e303ae
A
2938
2939 IORegistryEntry::getRegistryRoot()->setProperty(kIOMaximumMappedIOByteCountKey,
2940 ptoa_64(gIOMaximumMappedIOPageCount), 64);
c0fea474
A
2941 if (!gIOCopyMapper)
2942 {
2943 IOMapper *
2944 mapper = new IOCopyMapper;
2945 if (mapper)
2946 {
2947 if (mapper->init() && mapper->start(NULL))
2948 gIOCopyMapper = (IOCopyMapper *) mapper;
2949 else
2950 mapper->release();
2951 }
2952 }
2953
2954 gIOLastPage = IOGetLastPageNumber();
1c79356b
A
2955}
2956
2957void IOMemoryDescriptor::free( void )
2958{
2959 if( _mappings)
2960 _mappings->release();
2961
2962 super::free();
2963}
2964
2965IOMemoryMap * IOMemoryDescriptor::setMapping(
2966 task_t intoTask,
2967 IOVirtualAddress mapAddress,
55e303ae 2968 IOOptionBits options )
1c79356b 2969{
91447636 2970 _IOMemoryMap * newMap;
1c79356b 2971
91447636 2972 newMap = new _IOMemoryMap;
1c79356b
A
2973
2974 LOCK;
2975
91447636
A
2976 if( newMap
2977 && !newMap->initWithDescriptor( this, intoTask, mapAddress,
1c79356b 2978 options | kIOMapStatic, 0, getLength() )) {
91447636
A
2979 newMap->release();
2980 newMap = 0;
1c79356b
A
2981 }
2982
91447636 2983 addMapping( newMap);
1c79356b
A
2984
2985 UNLOCK;
2986
91447636 2987 return( newMap);
1c79356b
A
2988}
2989
2990IOMemoryMap * IOMemoryDescriptor::map(
55e303ae 2991 IOOptionBits options )
1c79356b
A
2992{
2993
2994 return( makeMapping( this, kernel_task, 0,
2995 options | kIOMapAnywhere,
2996 0, getLength() ));
2997}
2998
2999IOMemoryMap * IOMemoryDescriptor::map(
3000 task_t intoTask,
3001 IOVirtualAddress toAddress,
3002 IOOptionBits options,
55e303ae
A
3003 IOByteCount offset,
3004 IOByteCount length )
1c79356b
A
3005{
3006 if( 0 == length)
3007 length = getLength();
3008
3009 return( makeMapping( this, intoTask, toAddress, options, offset, length ));
3010}
3011
91447636
A
3012IOReturn _IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory,
3013 IOOptionBits options,
3014 IOByteCount offset)
3015{
3016 IOReturn err = kIOReturnSuccess;
3017 IOMemoryDescriptor * physMem = 0;
3018
3019 LOCK;
3020
3021 if (logical && addressMap) do
3022 {
c0fea474
A
3023 if (((memory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3024 || ((memory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
91447636
A
3025 {
3026 physMem = memory;
3027 physMem->retain();
3028 }
3029
3030 if (!redirUPL)
3031 {
3032 vm_size_t size = length;
3033 int flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL
3034 | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS;
3035 if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) memory->_memEntry, 0, &size, &redirUPL,
3036 NULL, NULL,
3037 &flags))
3038 redirUPL = 0;
3039
3040 if (physMem)
3041 {
3042 IOUnmapPages( addressMap, logical, length );
3043 physMem->redirect(0, true);
3044 }
3045 }
3046
3047 if (newBackingMemory)
3048 {
3049 if (newBackingMemory != memory)
3050 {
3051 if (this != newBackingMemory->makeMapping(newBackingMemory, addressTask, (IOVirtualAddress) this,
3052 options | kIOMapUnique | kIOMapReference,
3053 offset, length))
3054 err = kIOReturnError;
3055 }
3056 if (redirUPL)
3057 {
3058 upl_commit(redirUPL, NULL, 0);
3059 upl_deallocate(redirUPL);
3060 redirUPL = 0;
3061 }
3062 if (physMem)
3063 physMem->redirect(0, false);
3064 }
3065 }
3066 while (false);
3067
3068 UNLOCK;
3069
3070 if (physMem)
3071 physMem->release();
3072
3073 return (err);
3074}
3075
1c79356b
A
3076IOMemoryMap * IOMemoryDescriptor::makeMapping(
3077 IOMemoryDescriptor * owner,
3078 task_t intoTask,
3079 IOVirtualAddress toAddress,
3080 IOOptionBits options,
3081 IOByteCount offset,
3082 IOByteCount length )
3083{
91447636 3084 IOMemoryDescriptor * mapDesc = 0;
1c79356b
A
3085 _IOMemoryMap * mapping = 0;
3086 OSIterator * iter;
3087
3088 LOCK;
3089
91447636
A
3090 do
3091 {
3092 if (kIOMapUnique & options)
3093 {
3094 IOPhysicalAddress phys;
3095 IOByteCount physLen;
1c79356b 3096
91447636
A
3097 if (owner != this)
3098 continue;
1c79356b 3099
c0fea474
A
3100 if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical)
3101 || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))
91447636
A
3102 {
3103 phys = getPhysicalSegment(offset, &physLen);
3104 if (!phys || (physLen < length))
3105 continue;
3106
3107 mapDesc = IOMemoryDescriptor::withPhysicalAddress(
3108 phys, length, _direction);
3109 if (!mapDesc)
3110 continue;
3111 offset = 0;
3112 }
3113 else
3114 {
3115 mapDesc = this;
3116 mapDesc->retain();
3117 }
3118
3119 if (kIOMapReference & options)
3120 {
3121 mapping = (_IOMemoryMap *) toAddress;
3122 mapping->retain();
3123
3124#if 1
3125 uint32_t pageOffset1 = mapDesc->getSourceSegment( offset, NULL );
3126 pageOffset1 -= trunc_page_32( pageOffset1 );
3127
3128 uint32_t pageOffset2 = mapping->getVirtualAddress();
3129 pageOffset2 -= trunc_page_32( pageOffset2 );
3130
3131 if (pageOffset1 != pageOffset2)
3132 IOLog("::redirect can't map offset %x to addr %x\n",
3133 pageOffset1, mapping->getVirtualAddress());
3134#endif
3135
3136
3137 if (!mapping->initWithDescriptor( mapDesc, intoTask, toAddress, options,
3138 offset, length ))
3139 {
3140#ifdef DEBUG
3141 IOLog("Didn't redirect map %08lx : %08lx\n", offset, length );
3142#endif
3143 }
3144
3145 if (mapping->owner)
3146 mapping->owner->removeMapping(mapping);
3147 continue;
3148 }
3149 }
3150 else
3151 {
3152 // look for an existing mapping
3153 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
3154
3155 while( (mapping = (_IOMemoryMap *) iter->getNextObject())) {
3156
3157 if( (mapping = mapping->copyCompatible(
3158 owner, intoTask, toAddress,
3159 options | kIOMapReference,
3160 offset, length )))
3161 break;
3162 }
3163 iter->release();
3164 }
1c79356b
A
3165
3166
91447636
A
3167 if (mapping)
3168 mapping->retain();
1c79356b 3169
91447636
A
3170 if( mapping || (options & kIOMapReference))
3171 continue;
3172
3173 mapDesc = owner;
3174 mapDesc->retain();
3175 }
1c79356b
A
3176 owner = this;
3177
3178 mapping = new _IOMemoryMap;
3179 if( mapping
91447636 3180 && !mapping->initWithDescriptor( mapDesc, intoTask, toAddress, options,
1c79356b 3181 offset, length )) {
9bccf70c 3182#ifdef DEBUG
1c79356b 3183 IOLog("Didn't make map %08lx : %08lx\n", offset, length );
9bccf70c 3184#endif
1c79356b
A
3185 mapping->release();
3186 mapping = 0;
3187 }
3188
91447636
A
3189 if (mapping)
3190 mapping->retain();
3191
1c79356b
A
3192 } while( false );
3193
91447636
A
3194 if (mapping)
3195 {
3196 mapping->owner = owner;
3197 owner->addMapping( mapping);
3198 mapping->release();
3199 }
1c79356b
A
3200
3201 UNLOCK;
3202
91447636
A
3203 if (mapDesc)
3204 mapDesc->release();
3205
1c79356b
A
3206 return( mapping);
3207}
3208
3209void IOMemoryDescriptor::addMapping(
3210 IOMemoryMap * mapping )
3211{
3212 if( mapping) {
3213 if( 0 == _mappings)
3214 _mappings = OSSet::withCapacity(1);
9bccf70c
A
3215 if( _mappings )
3216 _mappings->setObject( mapping );
1c79356b
A
3217 }
3218}
3219
3220void IOMemoryDescriptor::removeMapping(
3221 IOMemoryMap * mapping )
3222{
9bccf70c 3223 if( _mappings)
1c79356b 3224 _mappings->removeObject( mapping);
1c79356b
A
3225}
3226
3227/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3228
3229#undef super
3230#define super IOMemoryDescriptor
3231
3232OSDefineMetaClassAndStructors(IOSubMemoryDescriptor, IOMemoryDescriptor)
3233
3234/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3235
3236bool IOSubMemoryDescriptor::initSubRange( IOMemoryDescriptor * parent,
3237 IOByteCount offset, IOByteCount length,
55e303ae 3238 IODirection direction )
1c79356b 3239{
1c79356b
A
3240 if( !parent)
3241 return( false);
3242
3243 if( (offset + length) > parent->getLength())
3244 return( false);
3245
55e303ae
A
3246 /*
3247 * We can check the _parent instance variable before having ever set it
3248 * to an initial value because I/O Kit guarantees that all our instance
3249 * variables are zeroed on an object's allocation.
3250 */
3251
3252 if( !_parent) {
3253 if( !super::init())
3254 return( false );
3255 } else {
3256 /*
3257 * An existing memory descriptor is being retargeted to
3258 * point to somewhere else. Clean up our present state.
3259 */
3260
3261 _parent->release();
3262 _parent = 0;
3263 }
3264
1c79356b
A
3265 parent->retain();
3266 _parent = parent;
3267 _start = offset;
3268 _length = length;
55e303ae 3269 _direction = direction;
1c79356b
A
3270 _tag = parent->getTag();
3271
3272 return( true );
3273}
3274
3275void IOSubMemoryDescriptor::free( void )
3276{
3277 if( _parent)
3278 _parent->release();
3279
3280 super::free();
3281}
3282
3283
c0fea474
A
3284IOReturn
3285IOSubMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const
3286{
3287 IOReturn rtn;
3288
3289 if (kIOMDGetCharacteristics == op) {
3290
3291 rtn = _parent->dmaCommandOperation(op, vData, dataSize);
3292 if (kIOReturnSuccess == rtn) {
3293 IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData;
3294 data->fLength = _length;
3295 data->fSGCount = 0; // XXX gvdl: need to compute and pages
3296 data->fPages = 0;
3297 data->fPageAlign = 0;
3298 }
3299
3300 return rtn;
3301 }
3302 else if (kIOMDWalkSegments & op) {
3303 if (dataSize < sizeof(IOMDDMAWalkSegmentArgs))
3304 return kIOReturnUnderrun;
3305
3306 IOMDDMAWalkSegmentArgs *data =
3307 reinterpret_cast<IOMDDMAWalkSegmentArgs *>(vData);
3308 UInt offset = data->fOffset;
3309 UInt remain = _length - offset;
3310 if ((int) remain <= 0)
3311 return (!remain)? kIOReturnOverrun : kIOReturnInternalError;
3312
3313 data->fOffset = offset + _start;
3314 rtn = _parent->dmaCommandOperation(op, vData, dataSize);
3315 if (data->fLength > remain)
3316 data->fLength = remain;
3317 data->fOffset = offset;
3318
3319 return rtn;
3320 }
3321 else
3322 return kIOReturnBadArgument;
3323}
3324
3325addr64_t
3326IOSubMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount * length)
3327{
3328 addr64_t address;
3329 IOByteCount actualLength;
3330
3331 assert(offset <= _length);
3332
3333 if( length)
3334 *length = 0;
3335
3336 if( offset >= _length)
3337 return( 0 );
3338
3339 address = _parent->getPhysicalSegment64( offset + _start, &actualLength );
3340
3341 if( address && length)
3342 *length = min( _length - offset, actualLength );
3343
3344 return( address );
3345}
3346
3347IOPhysicalAddress
3348IOSubMemoryDescriptor::getPhysicalSegment( IOByteCount offset, IOByteCount * length )
1c79356b
A
3349{
3350 IOPhysicalAddress address;
3351 IOByteCount actualLength;
3352
3353 assert(offset <= _length);
3354
3355 if( length)
3356 *length = 0;
3357
3358 if( offset >= _length)
3359 return( 0 );
3360
3361 address = _parent->getPhysicalSegment( offset + _start, &actualLength );
3362
3363 if( address && length)
3364 *length = min( _length - offset, actualLength );
3365
3366 return( address );
3367}
3368
91447636
A
3369
3370IOReturn IOSubMemoryDescriptor::doMap(
3371 vm_map_t addressMap,
3372 IOVirtualAddress * atAddress,
3373 IOOptionBits options,
3374 IOByteCount sourceOffset,
3375 IOByteCount length )
3376{
3377 if( sourceOffset >= _length)
3378 return( kIOReturnOverrun );
3379 return (_parent->doMap(addressMap, atAddress, options, sourceOffset + _start, length));
3380}
3381
c0fea474
A
3382IOPhysicalAddress
3383IOSubMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length )
0b4e3aa0
A
3384{
3385 IOPhysicalAddress address;
3386 IOByteCount actualLength;
3387
3388 assert(offset <= _length);
3389
3390 if( length)
3391 *length = 0;
3392
3393 if( offset >= _length)
3394 return( 0 );
3395
3396 address = _parent->getSourceSegment( offset + _start, &actualLength );
3397
3398 if( address && length)
3399 *length = min( _length - offset, actualLength );
3400
3401 return( address );
3402}
3403
1c79356b
A
3404void * IOSubMemoryDescriptor::getVirtualSegment(IOByteCount offset,
3405 IOByteCount * lengthOfSegment)
3406{
3407 return( 0 );
3408}
3409
3410IOByteCount IOSubMemoryDescriptor::readBytes(IOByteCount offset,
55e303ae 3411 void * bytes, IOByteCount length)
1c79356b
A
3412{
3413 IOByteCount byteCount;
3414
3415 assert(offset <= _length);
3416
3417 if( offset >= _length)
3418 return( 0 );
3419
3420 LOCK;
3421 byteCount = _parent->readBytes( _start + offset, bytes,
55e303ae 3422 min(length, _length - offset) );
1c79356b
A
3423 UNLOCK;
3424
3425 return( byteCount );
3426}
3427
3428IOByteCount IOSubMemoryDescriptor::writeBytes(IOByteCount offset,
55e303ae 3429 const void* bytes, IOByteCount length)
1c79356b
A
3430{
3431 IOByteCount byteCount;
3432
3433 assert(offset <= _length);
3434
3435 if( offset >= _length)
3436 return( 0 );
3437
3438 LOCK;
3439 byteCount = _parent->writeBytes( _start + offset, bytes,
55e303ae 3440 min(length, _length - offset) );
1c79356b
A
3441 UNLOCK;
3442
3443 return( byteCount );
3444}
3445
91447636
A
3446IOReturn IOSubMemoryDescriptor::setPurgeable( IOOptionBits newState,
3447 IOOptionBits * oldState )
3448{
3449 IOReturn err;
3450
3451 LOCK;
3452 err = _parent->setPurgeable( newState, oldState );
3453 UNLOCK;
3454
3455 return( err );
3456}
3457
3458IOReturn IOSubMemoryDescriptor::performOperation( IOOptionBits options,
3459 IOByteCount offset, IOByteCount length )
3460{
3461 IOReturn err;
3462
3463 assert(offset <= _length);
3464
3465 if( offset >= _length)
3466 return( kIOReturnOverrun );
3467
3468 LOCK;
3469 err = _parent->performOperation( options, _start + offset,
3470 min(length, _length - offset) );
3471 UNLOCK;
3472
3473 return( err );
3474}
3475
1c79356b 3476IOReturn IOSubMemoryDescriptor::prepare(
55e303ae 3477 IODirection forDirection)
1c79356b
A
3478{
3479 IOReturn err;
3480
3481 LOCK;
3482 err = _parent->prepare( forDirection);
3483 UNLOCK;
3484
3485 return( err );
3486}
3487
3488IOReturn IOSubMemoryDescriptor::complete(
55e303ae 3489 IODirection forDirection)
1c79356b
A
3490{
3491 IOReturn err;
3492
3493 LOCK;
3494 err = _parent->complete( forDirection);
3495 UNLOCK;
3496
3497 return( err );
3498}
3499
3500IOMemoryMap * IOSubMemoryDescriptor::makeMapping(
3501 IOMemoryDescriptor * owner,
3502 task_t intoTask,
3503 IOVirtualAddress toAddress,
3504 IOOptionBits options,
3505 IOByteCount offset,
3506 IOByteCount length )
3507{
91447636 3508 IOMemoryMap * mapping = 0;
1c79356b 3509
91447636
A
3510 if (!(kIOMapUnique & options))
3511 mapping = (IOMemoryMap *) _parent->makeMapping(
1c79356b
A
3512 _parent, intoTask,
3513 toAddress - (_start + offset),
3514 options | kIOMapReference,
3515 _start + offset, length );
3516
0b4e3aa0
A
3517 if( !mapping)
3518 mapping = (IOMemoryMap *) _parent->makeMapping(
3519 _parent, intoTask,
3520 toAddress,
3521 options, _start + offset, length );
3522
1c79356b
A
3523 if( !mapping)
3524 mapping = super::makeMapping( owner, intoTask, toAddress, options,
3525 offset, length );
3526
3527 return( mapping );
3528}
3529
3530/* ick */
3531
3532bool
3533IOSubMemoryDescriptor::initWithAddress(void * address,
55e303ae
A
3534 IOByteCount length,
3535 IODirection direction)
1c79356b
A
3536{
3537 return( false );
3538}
3539
3540bool
3541IOSubMemoryDescriptor::initWithAddress(vm_address_t address,
55e303ae
A
3542 IOByteCount length,
3543 IODirection direction,
3544 task_t task)
1c79356b
A
3545{
3546 return( false );
3547}
3548
3549bool
3550IOSubMemoryDescriptor::initWithPhysicalAddress(
3551 IOPhysicalAddress address,
55e303ae
A
3552 IOByteCount length,
3553 IODirection direction )
1c79356b
A
3554{
3555 return( false );
3556}
3557
3558bool
3559IOSubMemoryDescriptor::initWithRanges(
3560 IOVirtualRange * ranges,
3561 UInt32 withCount,
55e303ae
A
3562 IODirection direction,
3563 task_t task,
3564 bool asReference)
1c79356b
A
3565{
3566 return( false );
3567}
3568
3569bool
3570IOSubMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
3571 UInt32 withCount,
55e303ae
A
3572 IODirection direction,
3573 bool asReference)
1c79356b
A
3574{
3575 return( false );
3576}
3577
3578/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3579
9bccf70c
A
3580bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
3581{
3582 OSSymbol const *keys[2];
3583 OSObject *values[2];
91447636
A
3584 struct SerData {
3585 user_addr_t address;
3586 user_size_t length;
3587 } *vcopy;
9bccf70c
A
3588 unsigned int index, nRanges;
3589 bool result;
3590
91447636
A
3591 IOOptionBits type = _flags & kIOMemoryTypeMask;
3592
9bccf70c
A
3593 if (s == NULL) return false;
3594 if (s->previouslySerialized(this)) return true;
3595
3596 // Pretend we are an array.
3597 if (!s->addXMLStartTag(this, "array")) return false;
3598
3599 nRanges = _rangesCount;
91447636 3600 vcopy = (SerData *) IOMalloc(sizeof(SerData) * nRanges);
9bccf70c
A
3601 if (vcopy == 0) return false;
3602
3603 keys[0] = OSSymbol::withCString("address");
3604 keys[1] = OSSymbol::withCString("length");
3605
3606 result = false;
3607 values[0] = values[1] = 0;
3608
3609 // From this point on we can go to bail.
3610
3611 // Copy the volatile data so we don't have to allocate memory
3612 // while the lock is held.
3613 LOCK;
3614 if (nRanges == _rangesCount) {
91447636 3615 Ranges vec = _ranges;
9bccf70c 3616 for (index = 0; index < nRanges; index++) {
91447636
A
3617 user_addr_t addr; IOByteCount len;
3618 getAddrLenForInd(addr, len, type, vec, index);
3619 vcopy[index].address = addr;
3620 vcopy[index].length = len;
9bccf70c
A
3621 }
3622 } else {
3623 // The descriptor changed out from under us. Give up.
3624 UNLOCK;
3625 result = false;
3626 goto bail;
3627 }
3628 UNLOCK;
3629
3630 for (index = 0; index < nRanges; index++)
3631 {
91447636
A
3632 user_addr_t addr = vcopy[index].address;
3633 IOByteCount len = (IOByteCount) vcopy[index].length;
3634 values[0] =
3635 OSNumber::withNumber(addr, (((UInt64) addr) >> 32)? 64 : 32);
9bccf70c
A
3636 if (values[0] == 0) {
3637 result = false;
3638 goto bail;
3639 }
91447636 3640 values[1] = OSNumber::withNumber(len, sizeof(len) * 8);
9bccf70c
A
3641 if (values[1] == 0) {
3642 result = false;
3643 goto bail;
3644 }
3645 OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
3646 if (dict == 0) {
3647 result = false;
3648 goto bail;
3649 }
3650 values[0]->release();
3651 values[1]->release();
3652 values[0] = values[1] = 0;
3653
3654 result = dict->serialize(s);
3655 dict->release();
3656 if (!result) {
3657 goto bail;
3658 }
3659 }
3660 result = s->addXMLEndTag("array");
3661
3662 bail:
3663 if (values[0])
3664 values[0]->release();
3665 if (values[1])
3666 values[1]->release();
3667 if (keys[0])
3668 keys[0]->release();
3669 if (keys[1])
3670 keys[1]->release();
3671 if (vcopy)
3672 IOFree(vcopy, sizeof(IOVirtualRange) * nRanges);
3673 return result;
3674}
3675
3676bool IOSubMemoryDescriptor::serialize(OSSerialize * s) const
3677{
3678 if (!s) {
3679 return (false);
3680 }
3681 if (s->previouslySerialized(this)) return true;
3682
3683 // Pretend we are a dictionary.
3684 // We must duplicate the functionality of OSDictionary here
3685 // because otherwise object references will not work;
3686 // they are based on the value of the object passed to
3687 // previouslySerialized and addXMLStartTag.
3688
3689 if (!s->addXMLStartTag(this, "dict")) return false;
3690
3691 char const *keys[3] = {"offset", "length", "parent"};
3692
3693 OSObject *values[3];
3694 values[0] = OSNumber::withNumber(_start, sizeof(_start) * 8);
3695 if (values[0] == 0)
3696 return false;
3697 values[1] = OSNumber::withNumber(_length, sizeof(_length) * 8);
3698 if (values[1] == 0) {
3699 values[0]->release();
3700 return false;
3701 }
3702 values[2] = _parent;
3703
3704 bool result = true;
3705 for (int i=0; i<3; i++) {
3706 if (!s->addString("<key>") ||
3707 !s->addString(keys[i]) ||
3708 !s->addXMLEndTag("key") ||
3709 !values[i]->serialize(s)) {
3710 result = false;
3711 break;
3712 }
3713 }
3714 values[0]->release();
3715 values[1]->release();
3716 if (!result) {
3717 return false;
3718 }
3719
3720 return s->addXMLEndTag("dict");
3721}
3722
3723/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
3724
0b4e3aa0 3725OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
55e303ae
A
3726OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1);
3727OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2);
91447636
A
3728OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3);
3729OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4);
c0fea474 3730OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5);
1c79356b
A
3731OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
3732OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
3733OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
3734OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
3735OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
3736OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
3737OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
3738OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
3739OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
3740OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
9bccf70c 3741
55e303ae 3742/* ex-inline function implementation */
c0fea474
A
3743IOPhysicalAddress
3744IOMemoryDescriptor::getPhysicalAddress()
9bccf70c 3745 { return( getPhysicalSegment( 0, 0 )); }
c0fea474
A
3746
3747
3748