]> git.saurik.com Git - apple/xnu.git/blame - iokit/Kernel/IOMemoryDescriptor.cpp
xnu-344.tar.gz
[apple/xnu.git] / iokit / Kernel / IOMemoryDescriptor.cpp
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * Copyright (c) 1998 Apple Computer, Inc. All rights reserved.
24 *
25 * HISTORY
26 *
27 */
28
29#include <IOKit/assert.h>
30#include <IOKit/system.h>
31#include <IOKit/IOLib.h>
32#include <IOKit/IOMemoryDescriptor.h>
33
34#include <IOKit/IOKitDebug.h>
35
36#include <libkern/c++/OSContainers.h>
9bccf70c
A
37#include <libkern/c++/OSDictionary.h>
38#include <libkern/c++/OSArray.h>
39#include <libkern/c++/OSSymbol.h>
40#include <libkern/c++/OSNumber.h>
1c79356b
A
41#include <sys/cdefs.h>
42
43__BEGIN_DECLS
44#include <vm/pmap.h>
0b4e3aa0
A
45#include <device/device_port.h>
46void bcopy_phys(char *from, char *to, int size);
1c79356b 47void pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa,
9bccf70c
A
48 vm_prot_t prot, unsigned int flags, boolean_t wired);
49#ifndef i386
50struct phys_entry *pmap_find_physentry(vm_offset_t pa);
51#endif
1c79356b
A
52void ipc_port_release_send(ipc_port_t port);
53vm_offset_t vm_map_get_phys_page(vm_map_t map, vm_offset_t offset);
0b4e3aa0
A
54
55memory_object_t
56device_pager_setup(
57 memory_object_t pager,
58 int device_handle,
59 vm_size_t size,
60 int flags);
9bccf70c
A
61void
62device_pager_deallocate(
63 memory_object_t);
0b4e3aa0
A
64kern_return_t
65device_pager_populate_object(
66 memory_object_t pager,
67 vm_object_offset_t offset,
68 vm_offset_t phys_addr,
69 vm_size_t size);
70
9bccf70c
A
71/*
72 * Page fault handling based on vm_map (or entries therein)
73 */
74extern kern_return_t vm_fault(
75 vm_map_t map,
76 vm_offset_t vaddr,
77 vm_prot_t fault_type,
78 boolean_t change_wiring,
79 int interruptible,
80 pmap_t caller_pmap,
81 vm_offset_t caller_pmap_addr);
82
1c79356b
A
83__END_DECLS
84
85/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
86
9bccf70c 87OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject )
1c79356b
A
88
89#define super IOMemoryDescriptor
90
91OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor)
92
0b4e3aa0
A
93extern "C" {
94
95vm_map_t IOPageableMapForAddress( vm_address_t address );
96
97typedef kern_return_t (*IOIteratePageableMapsCallback)(vm_map_t map, void * ref);
98
99kern_return_t IOIteratePageableMaps(vm_size_t size,
100 IOIteratePageableMapsCallback callback, void * ref);
101
102}
1c79356b
A
103
104/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
105
9bccf70c
A
106static IORecursiveLock * gIOMemoryLock;
107
108#define LOCK IORecursiveLockLock( gIOMemoryLock)
109#define UNLOCK IORecursiveLockUnlock( gIOMemoryLock)
110#define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT)
111#define WAKEUP \
112 IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false)
113
114/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
115
1c79356b
A
116inline vm_map_t IOGeneralMemoryDescriptor::getMapForTask( task_t task, vm_address_t address )
117{
118 if( (task == kernel_task) && (kIOMemoryRequiresWire & _flags))
119 return( IOPageableMapForAddress( address ) );
120 else
121 return( get_task_map( task ));
122}
123
0b4e3aa0
A
124inline vm_offset_t pmap_extract_safe(task_t task, vm_offset_t va)
125{
126 vm_offset_t pa = pmap_extract(get_task_pmap(task), va);
127
128 if ( pa == 0 )
129 {
130 pa = vm_map_get_phys_page(get_task_map(task), trunc_page(va));
131 if ( pa ) pa += va - trunc_page(va);
132 }
133
134 return pa;
135}
136
137inline void bcopy_phys_safe(char * from, char * to, int size)
138{
139 boolean_t enabled = ml_set_interrupts_enabled(FALSE);
140
141 bcopy_phys(from, to, size);
142
143 ml_set_interrupts_enabled(enabled);
144}
145
146#define next_page(a) ( trunc_page(a) + page_size )
147
148
149extern "C" {
150
151kern_return_t device_data_action(
152 int device_handle,
153 ipc_port_t device_pager,
154 vm_prot_t protection,
155 vm_object_offset_t offset,
156 vm_size_t size)
157{
9bccf70c
A
158 struct ExpansionData {
159 void * devicePager;
160 unsigned int pagerContig:1;
161 unsigned int unused:31;
162 IOMemoryDescriptor * memory;
163 };
164 kern_return_t kr;
165 ExpansionData * ref = (ExpansionData *) device_handle;
166 IOMemoryDescriptor * memDesc;
0b4e3aa0 167
9bccf70c
A
168 LOCK;
169 memDesc = ref->memory;
170 if( memDesc)
171 kr = memDesc->handleFault( device_pager, 0, 0,
172 offset, size, kIOMapDefaultCache /*?*/);
173 else
174 kr = KERN_ABORTED;
175 UNLOCK;
0b4e3aa0 176
9bccf70c 177 return( kr );
0b4e3aa0
A
178}
179
180kern_return_t device_close(
181 int device_handle)
182{
9bccf70c
A
183 struct ExpansionData {
184 void * devicePager;
185 unsigned int pagerContig:1;
186 unsigned int unused:31;
187 IOMemoryDescriptor * memory;
188 };
189 ExpansionData * ref = (ExpansionData *) device_handle;
0b4e3aa0 190
9bccf70c 191 IODelete( ref, ExpansionData, 1 );
0b4e3aa0
A
192
193 return( kIOReturnSuccess );
194}
195
196}
197
1c79356b
A
198/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
199
200/*
201 * withAddress:
202 *
203 * Create a new IOMemoryDescriptor. The buffer is a virtual address
204 * relative to the specified task. If no task is supplied, the kernel
205 * task is implied.
206 */
207IOMemoryDescriptor *
208IOMemoryDescriptor::withAddress(void * address,
209 IOByteCount withLength,
210 IODirection withDirection)
211{
212 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
213 if (that)
214 {
215 if (that->initWithAddress(address, withLength, withDirection))
216 return that;
217
218 that->release();
219 }
220 return 0;
221}
222
223IOMemoryDescriptor *
224IOMemoryDescriptor::withAddress(vm_address_t address,
225 IOByteCount withLength,
226 IODirection withDirection,
227 task_t withTask)
228{
229 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
230 if (that)
231 {
232 if (that->initWithAddress(address, withLength, withDirection, withTask))
233 return that;
234
235 that->release();
236 }
237 return 0;
238}
239
240IOMemoryDescriptor *
241IOMemoryDescriptor::withPhysicalAddress(
242 IOPhysicalAddress address,
243 IOByteCount withLength,
244 IODirection withDirection )
245{
246 return( IOMemoryDescriptor::withAddress( address, withLength,
247 withDirection, (task_t) 0 ));
248}
249
250
251/*
252 * withRanges:
253 *
254 * Create a new IOMemoryDescriptor. The buffer is made up of several
255 * virtual address ranges, from a given task.
256 *
257 * Passing the ranges as a reference will avoid an extra allocation.
258 */
259IOMemoryDescriptor *
260IOMemoryDescriptor::withRanges( IOVirtualRange * ranges,
261 UInt32 withCount,
262 IODirection withDirection,
263 task_t withTask,
264 bool asReference = false)
265{
266 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
267 if (that)
268 {
269 if (that->initWithRanges(ranges, withCount, withDirection, withTask, asReference))
270 return that;
271
272 that->release();
273 }
274 return 0;
275}
276
277IOMemoryDescriptor *
278IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges,
279 UInt32 withCount,
280 IODirection withDirection,
281 bool asReference = false)
282{
283 IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor;
284 if (that)
285 {
286 if (that->initWithPhysicalRanges(ranges, withCount, withDirection, asReference))
287 return that;
288
289 that->release();
290 }
291 return 0;
292}
293
294IOMemoryDescriptor *
295IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of,
296 IOByteCount offset,
297 IOByteCount length,
298 IODirection withDirection)
299{
300 IOSubMemoryDescriptor * that = new IOSubMemoryDescriptor;
301
302 if (that && !that->initSubRange(of, offset, length, withDirection)) {
303 that->release();
304 that = 0;
305 }
306 return that;
307}
308
309/*
310 * initWithAddress:
311 *
312 * Initialize an IOMemoryDescriptor. The buffer is a virtual address
313 * relative to the specified task. If no task is supplied, the kernel
314 * task is implied.
315 *
316 * An IOMemoryDescriptor can be re-used by calling initWithAddress or
317 * initWithRanges again on an existing instance -- note this behavior
318 * is not commonly supported in other I/O Kit classes, although it is
319 * supported here.
320 */
321bool
322IOGeneralMemoryDescriptor::initWithAddress(void * address,
323 IOByteCount withLength,
324 IODirection withDirection)
325{
326 _singleRange.v.address = (vm_address_t) address;
327 _singleRange.v.length = withLength;
328
329 return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true);
330}
331
332bool
333IOGeneralMemoryDescriptor::initWithAddress(vm_address_t address,
334 IOByteCount withLength,
335 IODirection withDirection,
336 task_t withTask)
337{
338 _singleRange.v.address = address;
339 _singleRange.v.length = withLength;
340
341 return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true);
342}
343
344bool
345IOGeneralMemoryDescriptor::initWithPhysicalAddress(
346 IOPhysicalAddress address,
347 IOByteCount withLength,
348 IODirection withDirection )
349{
350 _singleRange.p.address = address;
351 _singleRange.p.length = withLength;
352
353 return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true);
354}
355
356/*
357 * initWithRanges:
358 *
359 * Initialize an IOMemoryDescriptor. The buffer is made up of several
360 * virtual address ranges, from a given task
361 *
362 * Passing the ranges as a reference will avoid an extra allocation.
363 *
364 * An IOMemoryDescriptor can be re-used by calling initWithAddress or
365 * initWithRanges again on an existing instance -- note this behavior
366 * is not commonly supported in other I/O Kit classes, although it is
367 * supported here.
368 */
369bool
370IOGeneralMemoryDescriptor::initWithRanges(
371 IOVirtualRange * ranges,
372 UInt32 withCount,
373 IODirection withDirection,
374 task_t withTask,
375 bool asReference = false)
376{
377 assert(ranges);
378 assert(withCount);
379
380 /*
381 * We can check the _initialized instance variable before having ever set
382 * it to an initial value because I/O Kit guarantees that all our instance
383 * variables are zeroed on an object's allocation.
384 */
385
386 if (_initialized == false)
387 {
388 if (super::init() == false) return false;
389 _initialized = true;
390 }
391 else
392 {
393 /*
394 * An existing memory descriptor is being retargeted to point to
395 * somewhere else. Clean up our present state.
396 */
397
398 assert(_wireCount == 0);
399
400 while (_wireCount)
401 complete();
402 if (_kernPtrAligned)
403 unmapFromKernel();
404 if (_ranges.v && _rangesIsAllocated)
405 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
406 }
407
408 /*
409 * Initialize the memory descriptor.
410 */
411
412 _ranges.v = 0;
413 _rangesCount = withCount;
414 _rangesIsAllocated = asReference ? false : true;
415 _direction = withDirection;
416 _length = 0;
417 _task = withTask;
418 _position = 0;
419 _positionAtIndex = 0;
420 _positionAtOffset = 0;
421 _kernPtrAligned = 0;
422 _cachedPhysicalAddress = 0;
423 _cachedVirtualAddress = 0;
424 _flags = 0;
425
426 if (withTask && (withTask != kernel_task))
427 _flags |= kIOMemoryRequiresWire;
428
429 if (asReference)
430 _ranges.v = ranges;
431 else
432 {
433 _ranges.v = IONew(IOVirtualRange, withCount);
434 if (_ranges.v == 0) return false;
435 bcopy(/* from */ ranges, _ranges.v, withCount * sizeof(IOVirtualRange));
436 }
437
438 for (unsigned index = 0; index < _rangesCount; index++)
439 {
440 _length += _ranges.v[index].length;
441 }
442
443 return true;
444}
445
446bool
447IOGeneralMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
448 UInt32 withCount,
449 IODirection withDirection,
450 bool asReference = false)
451{
452#warning assuming virtual, physical addresses same size
453 return( initWithRanges( (IOVirtualRange *) ranges,
454 withCount, withDirection, (task_t) 0, asReference ));
455}
456
457/*
458 * free
459 *
460 * Free resources.
461 */
462void IOGeneralMemoryDescriptor::free()
463{
9bccf70c
A
464 LOCK;
465 if( reserved)
466 reserved->memory = 0;
467 UNLOCK;
468
1c79356b
A
469 while (_wireCount)
470 complete();
471 if (_kernPtrAligned)
472 unmapFromKernel();
473 if (_ranges.v && _rangesIsAllocated)
474 IODelete(_ranges.v, IOVirtualRange, _rangesCount);
9bccf70c
A
475
476 if( reserved && reserved->devicePager)
477 device_pager_deallocate( reserved->devicePager );
478
479 // memEntry holds a ref on the device pager which owns reserved (ExpansionData)
480 // so no reserved access after this point
1c79356b
A
481 if( _memEntry)
482 ipc_port_release_send( (ipc_port_t) _memEntry );
483 super::free();
484}
485
0b4e3aa0
A
486/* DEPRECATED */ void IOGeneralMemoryDescriptor::unmapFromKernel()
487/* DEPRECATED */ {
488/* DEPRECATED */ kern_return_t krtn;
489/* DEPRECATED */ vm_offset_t off;
490/* DEPRECATED */ // Pull the shared pages out of the task map
491/* DEPRECATED */ // Do we need to unwire it first?
492/* DEPRECATED */ for ( off = 0; off < _kernSize; off += page_size )
493/* DEPRECATED */ {
494/* DEPRECATED */ pmap_change_wiring(
495/* DEPRECATED */ kernel_pmap,
496/* DEPRECATED */ _kernPtrAligned + off,
497/* DEPRECATED */ FALSE);
498/* DEPRECATED */
499/* DEPRECATED */ pmap_remove(
500/* DEPRECATED */ kernel_pmap,
501/* DEPRECATED */ _kernPtrAligned + off,
502/* DEPRECATED */ _kernPtrAligned + off + page_size);
503/* DEPRECATED */ }
504/* DEPRECATED */ // Free the former shmem area in the task
505/* DEPRECATED */ krtn = vm_deallocate(kernel_map,
506/* DEPRECATED */ _kernPtrAligned,
507/* DEPRECATED */ _kernSize );
508/* DEPRECATED */ assert(krtn == KERN_SUCCESS);
509/* DEPRECATED */ _kernPtrAligned = 0;
510/* DEPRECATED */ }
511/* DEPRECATED */
512/* DEPRECATED */ void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex)
513/* DEPRECATED */ {
514/* DEPRECATED */ kern_return_t krtn;
515/* DEPRECATED */ vm_offset_t off;
516/* DEPRECATED */
517/* DEPRECATED */ if (_kernPtrAligned)
518/* DEPRECATED */ {
519/* DEPRECATED */ if (_kernPtrAtIndex == rangeIndex) return;
520/* DEPRECATED */ unmapFromKernel();
521/* DEPRECATED */ assert(_kernPtrAligned == 0);
522/* DEPRECATED */ }
523/* DEPRECATED */
524/* DEPRECATED */ vm_offset_t srcAlign = trunc_page(_ranges.v[rangeIndex].address);
525/* DEPRECATED */
526/* DEPRECATED */ _kernSize = trunc_page(_ranges.v[rangeIndex].address +
527/* DEPRECATED */ _ranges.v[rangeIndex].length +
528/* DEPRECATED */ page_size - 1) - srcAlign;
529/* DEPRECATED */
530/* DEPRECATED */ /* Find some memory of the same size in kernel task. We use vm_allocate() */
531/* DEPRECATED */ /* to do this. vm_allocate inserts the found memory object in the */
532/* DEPRECATED */ /* target task's map as a side effect. */
533/* DEPRECATED */ krtn = vm_allocate( kernel_map,
534/* DEPRECATED */ &_kernPtrAligned,
535/* DEPRECATED */ _kernSize,
536/* DEPRECATED */ VM_FLAGS_ANYWHERE|VM_MAKE_TAG(VM_MEMORY_IOKIT) ); // Find first fit
537/* DEPRECATED */ assert(krtn == KERN_SUCCESS);
538/* DEPRECATED */ if(krtn) return;
539/* DEPRECATED */
540/* DEPRECATED */ /* For each page in the area allocated from the kernel map, */
541/* DEPRECATED */ /* find the physical address of the page. */
542/* DEPRECATED */ /* Enter the page in the target task's pmap, at the */
543/* DEPRECATED */ /* appropriate target task virtual address. */
544/* DEPRECATED */ for ( off = 0; off < _kernSize; off += page_size )
545/* DEPRECATED */ {
546/* DEPRECATED */ vm_offset_t kern_phys_addr, phys_addr;
547/* DEPRECATED */ if( _task)
548/* DEPRECATED */ phys_addr = pmap_extract( get_task_pmap(_task), srcAlign + off );
549/* DEPRECATED */ else
550/* DEPRECATED */ phys_addr = srcAlign + off;
551/* DEPRECATED */ assert(phys_addr);
552/* DEPRECATED */ if(phys_addr == 0) return;
553/* DEPRECATED */
554/* DEPRECATED */ // Check original state.
555/* DEPRECATED */ kern_phys_addr = pmap_extract( kernel_pmap, _kernPtrAligned + off );
556/* DEPRECATED */ // Set virtual page to point to the right physical one
557/* DEPRECATED */ pmap_enter(
558/* DEPRECATED */ kernel_pmap,
559/* DEPRECATED */ _kernPtrAligned + off,
560/* DEPRECATED */ phys_addr,
561/* DEPRECATED */ VM_PROT_READ|VM_PROT_WRITE,
9bccf70c 562/* DEPRECATED */ VM_WIMG_USE_DEFAULT,
0b4e3aa0
A
563/* DEPRECATED */ TRUE);
564/* DEPRECATED */ }
565/* DEPRECATED */ _kernPtrAtIndex = rangeIndex;
566/* DEPRECATED */ }
1c79356b
A
567
568/*
569 * getDirection:
570 *
571 * Get the direction of the transfer.
572 */
573IODirection IOMemoryDescriptor::getDirection() const
574{
575 return _direction;
576}
577
578/*
579 * getLength:
580 *
581 * Get the length of the transfer (over all ranges).
582 */
583IOByteCount IOMemoryDescriptor::getLength() const
584{
585 return _length;
586}
587
588void IOMemoryDescriptor::setTag(
589 IOOptionBits tag )
590{
591 _tag = tag;
592}
593
594IOOptionBits IOMemoryDescriptor::getTag( void )
595{
596 return( _tag);
597}
598
0b4e3aa0
A
599IOPhysicalAddress IOMemoryDescriptor::getSourceSegment( IOByteCount offset,
600 IOByteCount * length )
601{
9bccf70c 602 IOPhysicalAddress physAddr = 0;
1c79356b 603
9bccf70c
A
604 if( prepare() == kIOReturnSuccess) {
605 physAddr = getPhysicalSegment( offset, length );
606 complete();
607 }
0b4e3aa0
A
608
609 return( physAddr );
610}
611
612IOByteCount IOMemoryDescriptor::readBytes( IOByteCount offset,
613 void * bytes,
614 IOByteCount withLength )
1c79356b 615{
0b4e3aa0 616 IOByteCount bytesCopied = 0;
1c79356b 617
0b4e3aa0
A
618 assert(offset <= _length);
619 assert(offset <= _length - withLength);
1c79356b 620
0b4e3aa0 621 if ( offset < _length )
1c79356b 622 {
0b4e3aa0 623 withLength = min(withLength, _length - offset);
1c79356b 624
0b4e3aa0
A
625 while ( withLength ) // (process another source segment?)
626 {
627 IOPhysicalAddress sourceSegment;
628 IOByteCount sourceSegmentLength;
1c79356b 629
0b4e3aa0
A
630 sourceSegment = getPhysicalSegment(offset, &sourceSegmentLength);
631 if ( sourceSegment == 0 ) goto readBytesErr;
1c79356b 632
0b4e3aa0 633 sourceSegmentLength = min(sourceSegmentLength, withLength);
1c79356b 634
0b4e3aa0
A
635 while ( sourceSegmentLength ) // (process another target segment?)
636 {
637 IOPhysicalAddress targetSegment;
638 IOByteCount targetSegmentLength;
1c79356b 639
0b4e3aa0
A
640 targetSegment = pmap_extract_safe(kernel_task, (vm_offset_t) bytes);
641 if ( targetSegment == 0 ) goto readBytesErr;
1c79356b 642
0b4e3aa0
A
643 targetSegmentLength = min(next_page(targetSegment) - targetSegment, sourceSegmentLength);
644
645 if ( sourceSegment + targetSegmentLength > next_page(sourceSegment) )
646 {
647 IOByteCount pageLength;
648
649 pageLength = next_page(sourceSegment) - sourceSegment;
650
651 bcopy_phys_safe( /* from */ (char *) sourceSegment,
652 /* to */ (char *) targetSegment,
653 /* size */ (int ) pageLength );
654
655 ((UInt8 *) bytes) += pageLength;
656 bytesCopied += pageLength;
657 offset += pageLength;
658 sourceSegment += pageLength;
659 sourceSegmentLength -= pageLength;
660 targetSegment += pageLength;
661 targetSegmentLength -= pageLength;
662 withLength -= pageLength;
663 }
664
665 bcopy_phys_safe( /* from */ (char *) sourceSegment,
666 /* to */ (char *) targetSegment,
667 /* size */ (int ) targetSegmentLength );
668
669 ((UInt8 *) bytes) += targetSegmentLength;
670 bytesCopied += targetSegmentLength;
671 offset += targetSegmentLength;
672 sourceSegment += targetSegmentLength;
673 sourceSegmentLength -= targetSegmentLength;
674 withLength -= targetSegmentLength;
675 }
676 }
1c79356b 677 }
0b4e3aa0
A
678
679readBytesErr:
680
681 if ( bytesCopied )
1c79356b 682 {
0b4e3aa0
A
683 // We mark the destination pages as modified, just
684 // in case they are made pageable later on in life.
685
686 pmap_modify_pages( /* pmap */ kernel_pmap,
687 /* start */ trunc_page(((vm_offset_t) bytes) - bytesCopied),
688 /* end */ round_page(((vm_offset_t) bytes)) );
1c79356b 689 }
1c79356b 690
0b4e3aa0 691 return bytesCopied;
1c79356b
A
692}
693
0b4e3aa0
A
694IOByteCount IOMemoryDescriptor::writeBytes( IOByteCount offset,
695 const void * bytes,
696 IOByteCount withLength )
1c79356b 697{
0b4e3aa0 698 IOByteCount bytesCopied = 0;
1c79356b 699
0b4e3aa0
A
700 assert(offset <= _length);
701 assert(offset <= _length - withLength);
1c79356b 702
0b4e3aa0 703 if ( offset < _length )
1c79356b 704 {
0b4e3aa0 705 withLength = min(withLength, _length - offset);
1c79356b 706
0b4e3aa0
A
707 while ( withLength ) // (process another target segment?)
708 {
709 IOPhysicalAddress targetSegment;
710 IOByteCount targetSegmentLength;
1c79356b 711
0b4e3aa0
A
712 targetSegment = getPhysicalSegment(offset, &targetSegmentLength);
713 if ( targetSegment == 0 ) goto writeBytesErr;
1c79356b 714
0b4e3aa0 715 targetSegmentLength = min(targetSegmentLength, withLength);
1c79356b 716
0b4e3aa0
A
717 while ( targetSegmentLength ) // (process another source segment?)
718 {
719 IOPhysicalAddress sourceSegment;
720 IOByteCount sourceSegmentLength;
1c79356b 721
0b4e3aa0
A
722 sourceSegment = pmap_extract_safe(kernel_task, (vm_offset_t) bytes);
723 if ( sourceSegment == 0 ) goto writeBytesErr;
1c79356b 724
0b4e3aa0 725 sourceSegmentLength = min(next_page(sourceSegment) - sourceSegment, targetSegmentLength);
1c79356b 726
0b4e3aa0
A
727 if ( targetSegment + sourceSegmentLength > next_page(targetSegment) )
728 {
729 IOByteCount pageLength;
1c79356b 730
0b4e3aa0 731 pageLength = next_page(targetSegment) - targetSegment;
1c79356b 732
0b4e3aa0
A
733 bcopy_phys_safe( /* from */ (char *) sourceSegment,
734 /* to */ (char *) targetSegment,
735 /* size */ (int ) pageLength );
1c79356b 736
0b4e3aa0
A
737 // We flush the data cache in case it is code we've copied,
738 // such that the instruction cache is in the know about it.
1c79356b 739
0b4e3aa0 740 flush_dcache(targetSegment, pageLength, true);
1c79356b 741
0b4e3aa0
A
742 ((UInt8 *) bytes) += pageLength;
743 bytesCopied += pageLength;
744 offset += pageLength;
745 sourceSegment += pageLength;
746 sourceSegmentLength -= pageLength;
747 targetSegment += pageLength;
748 targetSegmentLength -= pageLength;
749 withLength -= pageLength;
750 }
1c79356b 751
0b4e3aa0
A
752 bcopy_phys_safe( /* from */ (char *) sourceSegment,
753 /* to */ (char *) targetSegment,
754 /* size */ (int ) sourceSegmentLength );
1c79356b 755
0b4e3aa0
A
756 // We flush the data cache in case it is code we've copied,
757 // such that the instruction cache is in the know about it.
1c79356b 758
0b4e3aa0
A
759 flush_dcache(targetSegment, sourceSegmentLength, true);
760
761 ((UInt8 *) bytes) += sourceSegmentLength;
762 bytesCopied += sourceSegmentLength;
763 offset += sourceSegmentLength;
764 targetSegment += sourceSegmentLength;
765 targetSegmentLength -= sourceSegmentLength;
766 withLength -= sourceSegmentLength;
767 }
768 }
1c79356b
A
769 }
770
0b4e3aa0
A
771writeBytesErr:
772
773 return bytesCopied;
774}
775
9bccf70c
A
776extern "C" {
777// osfmk/device/iokit_rpc.c
778extern unsigned int IOTranslateCacheBits(struct phys_entry *pp);
779};
780
0b4e3aa0
A
781/* DEPRECATED */ void IOGeneralMemoryDescriptor::setPosition(IOByteCount position)
782/* DEPRECATED */ {
783/* DEPRECATED */ assert(position <= _length);
784/* DEPRECATED */
785/* DEPRECATED */ if (position >= _length)
786/* DEPRECATED */ {
787/* DEPRECATED */ _position = _length;
788/* DEPRECATED */ _positionAtIndex = _rangesCount; /* careful: out-of-bounds */
789/* DEPRECATED */ _positionAtOffset = 0;
790/* DEPRECATED */ return;
791/* DEPRECATED */ }
792/* DEPRECATED */
793/* DEPRECATED */ if (position < _position)
794/* DEPRECATED */ {
795/* DEPRECATED */ _positionAtOffset = position;
796/* DEPRECATED */ _positionAtIndex = 0;
797/* DEPRECATED */ }
798/* DEPRECATED */ else
799/* DEPRECATED */ {
800/* DEPRECATED */ _positionAtOffset += (position - _position);
801/* DEPRECATED */ }
802/* DEPRECATED */ _position = position;
803/* DEPRECATED */
804/* DEPRECATED */ while (_positionAtOffset >= _ranges.v[_positionAtIndex].length)
805/* DEPRECATED */ {
806/* DEPRECATED */ _positionAtOffset -= _ranges.v[_positionAtIndex].length;
807/* DEPRECATED */ _positionAtIndex++;
808/* DEPRECATED */ }
809/* DEPRECATED */ }
810
811IOPhysicalAddress IOGeneralMemoryDescriptor::getPhysicalSegment( IOByteCount offset,
812 IOByteCount * lengthOfSegment )
813{
814 IOPhysicalAddress address = 0;
815 IOPhysicalLength length = 0;
816
817
818// assert(offset <= _length);
819
820 if ( offset < _length ) // (within bounds?)
1c79356b 821 {
0b4e3aa0 822 unsigned rangesIndex = 0;
1c79356b 823
0b4e3aa0
A
824 for ( ; offset >= _ranges.v[rangesIndex].length; rangesIndex++ )
825 {
826 offset -= _ranges.v[rangesIndex].length; // (make offset relative)
827 }
1c79356b 828
0b4e3aa0
A
829 if ( _task == 0 ) // (physical memory?)
830 {
831 address = _ranges.v[rangesIndex].address + offset;
832 length = _ranges.v[rangesIndex].length - offset;
1c79356b 833
0b4e3aa0
A
834 for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ )
835 {
836 if ( address + length != _ranges.v[rangesIndex].address ) break;
1c79356b 837
0b4e3aa0
A
838 length += _ranges.v[rangesIndex].length; // (coalesce ranges)
839 }
840 }
841 else // (virtual memory?)
842 {
843 vm_address_t addressVirtual = _ranges.v[rangesIndex].address + offset;
1c79356b 844
0b4e3aa0 845 assert((0 == (kIOMemoryRequiresWire & _flags)) || _wireCount);
1c79356b 846
0b4e3aa0
A
847 address = pmap_extract_safe(_task, addressVirtual);
848 length = next_page(addressVirtual) - addressVirtual;
849 length = min(_ranges.v[rangesIndex].length - offset, length);
850 }
1c79356b 851
0b4e3aa0
A
852 assert(address);
853 if ( address == 0 ) length = 0;
854 }
1c79356b 855
0b4e3aa0 856 if ( lengthOfSegment ) *lengthOfSegment = length;
1c79356b 857
0b4e3aa0
A
858 return address;
859}
860
861IOPhysicalAddress IOGeneralMemoryDescriptor::getSourceSegment( IOByteCount offset,
862 IOByteCount * lengthOfSegment )
1c79356b 863{
0b4e3aa0
A
864 IOPhysicalAddress address = 0;
865 IOPhysicalLength length = 0;
1c79356b 866
0b4e3aa0 867 assert(offset <= _length);
1c79356b 868
0b4e3aa0 869 if ( offset < _length ) // (within bounds?)
1c79356b 870 {
0b4e3aa0 871 unsigned rangesIndex = 0;
1c79356b 872
0b4e3aa0
A
873 for ( ; offset >= _ranges.v[rangesIndex].length; rangesIndex++ )
874 {
875 offset -= _ranges.v[rangesIndex].length; // (make offset relative)
876 }
1c79356b 877
0b4e3aa0
A
878 address = _ranges.v[rangesIndex].address + offset;
879 length = _ranges.v[rangesIndex].length - offset;
1c79356b 880
0b4e3aa0
A
881 for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ )
882 {
883 if ( address + length != _ranges.v[rangesIndex].address ) break;
1c79356b 884
0b4e3aa0
A
885 length += _ranges.v[rangesIndex].length; // (coalesce ranges)
886 }
1c79356b 887
0b4e3aa0
A
888 assert(address);
889 if ( address == 0 ) length = 0;
1c79356b 890 }
0b4e3aa0
A
891
892 if ( lengthOfSegment ) *lengthOfSegment = length;
893
894 return address;
895}
896
897/* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
898/* DEPRECATED */ void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset,
899/* DEPRECATED */ IOByteCount * lengthOfSegment)
900/* DEPRECATED */ {
901/* DEPRECATED */ if( offset != _position)
902/* DEPRECATED */ setPosition( offset );
903/* DEPRECATED */
904/* DEPRECATED */ assert(_position <= _length);
905/* DEPRECATED */
906/* DEPRECATED */ /* Fail gracefully if the position is at (or past) the end-of-buffer. */
907/* DEPRECATED */ if (_position >= _length)
908/* DEPRECATED */ {
909/* DEPRECATED */ *lengthOfSegment = 0;
910/* DEPRECATED */ return 0;
911/* DEPRECATED */ }
912/* DEPRECATED */
913/* DEPRECATED */ /* Compute the relative length to the end of this virtual segment. */
914/* DEPRECATED */ *lengthOfSegment = _ranges.v[_positionAtIndex].length - _positionAtOffset;
915/* DEPRECATED */
916/* DEPRECATED */ /* Compute the relative address of this virtual segment. */
917/* DEPRECATED */ if (_task == kernel_task)
918/* DEPRECATED */ return (void *)(_ranges.v[_positionAtIndex].address + _positionAtOffset);
919/* DEPRECATED */ else
920/* DEPRECATED */ {
921/* DEPRECATED */ vm_offset_t off;
922/* DEPRECATED */
923/* DEPRECATED */ mapIntoKernel(_positionAtIndex);
924/* DEPRECATED */
925/* DEPRECATED */ off = _ranges.v[_kernPtrAtIndex].address;
926/* DEPRECATED */ off -= trunc_page(off);
927/* DEPRECATED */
928/* DEPRECATED */ return (void *) (_kernPtrAligned + off + _positionAtOffset);
929/* DEPRECATED */ }
930/* DEPRECATED */ }
931/* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */
1c79356b
A
932
933/*
934 * prepare
935 *
936 * Prepare the memory for an I/O transfer. This involves paging in
937 * the memory, if necessary, and wiring it down for the duration of
938 * the transfer. The complete() method completes the processing of
939 * the memory after the I/O transfer finishes. This method needn't
940 * called for non-pageable memory.
941 */
942IOReturn IOGeneralMemoryDescriptor::prepare(
943 IODirection forDirection = kIODirectionNone)
944{
945 UInt rangeIndex = 0;
946
947 if((_wireCount == 0) && (kIOMemoryRequiresWire & _flags)) {
948 kern_return_t rc;
949
950 if(forDirection == kIODirectionNone)
951 forDirection = _direction;
952
0b4e3aa0
A
953 vm_prot_t access;
954
955 switch (forDirection)
956 {
957 case kIODirectionIn:
958 access = VM_PROT_WRITE;
959 break;
960
961 case kIODirectionOut:
962 access = VM_PROT_READ;
963 break;
964
965 default:
966 access = VM_PROT_READ | VM_PROT_WRITE;
967 break;
968 }
1c79356b
A
969
970 //
971 // Check user read/write access to the data buffer.
972 //
973
974 for (rangeIndex = 0; rangeIndex < _rangesCount; rangeIndex++)
975 {
976 vm_offset_t checkBase = trunc_page(_ranges.v[rangeIndex].address);
977 vm_size_t checkSize = round_page(_ranges.v[rangeIndex].length );
978
979 while (checkSize)
980 {
981 vm_region_basic_info_data_t regionInfo;
982 mach_msg_type_number_t regionInfoSize = sizeof(regionInfo);
983 vm_size_t regionSize;
984
985 if ( (vm_region(
986 /* map */ getMapForTask(_task, checkBase),
987 /* address */ &checkBase,
988 /* size */ &regionSize,
989 /* flavor */ VM_REGION_BASIC_INFO,
990 /* info */ (vm_region_info_t) &regionInfo,
991 /* info size */ &regionInfoSize,
992 /* object name */ 0 ) != KERN_SUCCESS ) ||
993 ( (forDirection & kIODirectionIn ) &&
994 !(regionInfo.protection & VM_PROT_WRITE) ) ||
995 ( (forDirection & kIODirectionOut) &&
996 !(regionInfo.protection & VM_PROT_READ ) ) )
997 {
998 return kIOReturnVMError;
999 }
1000
1001 assert((regionSize & PAGE_MASK) == 0);
1002
1003 regionSize = min(regionSize, checkSize);
1004 checkSize -= regionSize;
1005 checkBase += regionSize;
1006 } // (for each vm region)
1007 } // (for each io range)
1008
1009 for (rangeIndex = 0; rangeIndex < _rangesCount; rangeIndex++) {
1010
1011 vm_offset_t srcAlign = trunc_page(_ranges.v[rangeIndex].address);
1012 IOByteCount srcAlignEnd = trunc_page(_ranges.v[rangeIndex].address +
1013 _ranges.v[rangeIndex].length +
1014 page_size - 1);
1015
1016 vm_map_t taskVMMap = getMapForTask(_task, srcAlign);
1017
1c79356b
A
1018 // If this I/O is for a user land task then protect ourselves
1019 // against COW and other vm_shenanigans
1020 if (_task && _task != kernel_task) {
1021 // setup a data object to hold the 'named' memory regions
1022 // @@@ gvdl: If we fail to allocate an OSData we will just
1023 // hope for the best for the time being. Lets not fail a
1024 // prepare at this late stage in product release.
1025 if (!_memoryEntries)
1026 _memoryEntries = OSData::withCapacity(16);
1027 if (_memoryEntries) {
1028 vm_object_offset_t desiredSize = srcAlignEnd - srcAlign;
1029 vm_object_offset_t entryStart = srcAlign;
1030 ipc_port_t memHandle;
1031
1032 do {
1033 vm_object_offset_t actualSize = desiredSize;
1034
1035 rc = mach_make_memory_entry_64
1036 (taskVMMap, &actualSize, entryStart,
1037 forDirection, &memHandle, NULL);
1038 if (KERN_SUCCESS != rc) {
1039 IOLog("IOMemoryDescriptor::prepare mach_make_memory_entry_64 failed: %d\n", rc);
1040 goto abortExit;
1041 }
1042
1043 _memoryEntries->
1044 appendBytes(&memHandle, sizeof(memHandle));
1045 desiredSize -= actualSize;
1046 entryStart += actualSize;
1047 } while (desiredSize);
1048 }
1049 }
0b4e3aa0
A
1050
1051 rc = vm_map_wire(taskVMMap, srcAlign, srcAlignEnd, access, FALSE);
1052 if (KERN_SUCCESS != rc) {
1053 IOLog("IOMemoryDescriptor::prepare vm_map_wire failed: %d\n", rc);
1054 goto abortExit;
1055 }
1c79356b
A
1056 }
1057 }
1058 _wireCount++;
1059 return kIOReturnSuccess;
1060
1061abortExit:
1062 UInt doneIndex;
1063
1064
1065 for(doneIndex = 0; doneIndex < rangeIndex; doneIndex++) {
1066 vm_offset_t srcAlign = trunc_page(_ranges.v[doneIndex].address);
1067 IOByteCount srcAlignEnd = trunc_page(_ranges.v[doneIndex].address +
1068 _ranges.v[doneIndex].length +
1069 page_size - 1);
1070
1071 vm_map_unwire(getMapForTask(_task, srcAlign), srcAlign,
1072 srcAlignEnd, FALSE);
1073 }
1074
1075 if (_memoryEntries) {
1076 ipc_port_t *handles, *handlesEnd;
1077
1078 handles = (ipc_port_t *) _memoryEntries->getBytesNoCopy();
1079 handlesEnd = (ipc_port_t *)
1080 ((vm_address_t) handles + _memoryEntries->getLength());
1081 while (handles < handlesEnd)
1082 ipc_port_release_send(*handles++);
1083 _memoryEntries->release();
1084 _memoryEntries = 0;
1085 }
1086
1087 return kIOReturnVMError;
1088}
1089
1090/*
1091 * complete
1092 *
1093 * Complete processing of the memory after an I/O transfer finishes.
1094 * This method should not be called unless a prepare was previously
1095 * issued; the prepare() and complete() must occur in pairs, before
1096 * before and after an I/O transfer involving pageable memory.
1097 */
1098
1099IOReturn IOGeneralMemoryDescriptor::complete(
1100 IODirection forDirection = kIODirectionNone)
1101{
1102 assert(_wireCount);
1103
1104 if(0 == _wireCount)
1105 return kIOReturnSuccess;
1106
1107 _wireCount--;
1108 if((_wireCount == 0) && (kIOMemoryRequiresWire & _flags)) {
1109 UInt rangeIndex;
1110 kern_return_t rc;
1111
1112 if(forDirection == kIODirectionNone)
1113 forDirection = _direction;
1114
1115 for(rangeIndex = 0; rangeIndex < _rangesCount; rangeIndex++) {
1116
1117 vm_offset_t srcAlign = trunc_page(_ranges.v[rangeIndex].address);
1118 IOByteCount srcAlignEnd = trunc_page(_ranges.v[rangeIndex].address +
1119 _ranges.v[rangeIndex].length +
1120 page_size - 1);
1121
1122 if(forDirection == kIODirectionIn)
1123 pmap_modify_pages(get_task_pmap(_task), srcAlign, srcAlignEnd);
1124
1125 rc = vm_map_unwire(getMapForTask(_task, srcAlign), srcAlign,
1126 srcAlignEnd, FALSE);
1127 if(rc != KERN_SUCCESS)
1128 IOLog("IOMemoryDescriptor::complete: vm_map_unwire failed: %d\n", rc);
1129 }
1130
1131 if (_memoryEntries) {
1132 ipc_port_t *handles, *handlesEnd;
1133
1134 handles = (ipc_port_t *) _memoryEntries->getBytesNoCopy();
1135 handlesEnd = (ipc_port_t *)
1136 ((vm_address_t) handles + _memoryEntries->getLength());
1137 while (handles < handlesEnd)
1138 ipc_port_release_send(*handles++);
1139
1140 _memoryEntries->release();
1141 _memoryEntries = 0;
1142 }
1c79356b
A
1143 }
1144 return kIOReturnSuccess;
1145}
1146
1147IOReturn IOGeneralMemoryDescriptor::doMap(
1148 vm_map_t addressMap,
1149 IOVirtualAddress * atAddress,
1150 IOOptionBits options,
1151 IOByteCount sourceOffset = 0,
1152 IOByteCount length = 0 )
1153{
1154 kern_return_t kr;
0b4e3aa0 1155 ipc_port_t sharedMem = (ipc_port_t) _memEntry;
1c79356b
A
1156
1157 // mapping source == dest? (could be much better)
1158 if( _task && (addressMap == get_task_map(_task)) && (options & kIOMapAnywhere)
1159 && (1 == _rangesCount) && (0 == sourceOffset)
1160 && (length <= _ranges.v[0].length) ) {
1161 *atAddress = _ranges.v[0].address;
1162 return( kIOReturnSuccess );
1163 }
1164
0b4e3aa0 1165 if( 0 == sharedMem) {
1c79356b 1166
0b4e3aa0 1167 vm_size_t size = 0;
1c79356b 1168
0b4e3aa0
A
1169 for (unsigned index = 0; index < _rangesCount; index++)
1170 size += round_page(_ranges.v[index].address + _ranges.v[index].length)
1171 - trunc_page(_ranges.v[index].address);
1c79356b 1172
0b4e3aa0 1173 if( _task) {
9bccf70c
A
1174#ifndef i386
1175 vm_size_t actualSize = size;
1176 kr = mach_make_memory_entry( get_task_map(_task),
0b4e3aa0
A
1177 &actualSize, _ranges.v[0].address,
1178 VM_PROT_READ | VM_PROT_WRITE, &sharedMem,
1179 NULL );
1180
9bccf70c 1181 if( (KERN_SUCCESS == kr) && (actualSize != round_page(size))) {
0b4e3aa0
A
1182#if IOASSERT
1183 IOLog("mach_make_memory_entry_64 (%08lx) size (%08lx:%08lx)\n",
1184 _ranges.v[0].address, (UInt32)actualSize, size);
1185#endif
1186 kr = kIOReturnVMError;
1187 ipc_port_release_send( sharedMem );
1c79356b
A
1188 }
1189
0b4e3aa0 1190 if( KERN_SUCCESS != kr)
9bccf70c 1191#endif /* i386 */
0b4e3aa0 1192 sharedMem = MACH_PORT_NULL;
1c79356b 1193
0b4e3aa0
A
1194 } else do {
1195
1196 memory_object_t pager;
9bccf70c
A
1197 unsigned int flags=0;
1198 struct phys_entry *pp;
1199 IOPhysicalAddress pa;
1200 IOPhysicalLength segLen;
1201
1202 pa = getPhysicalSegment( sourceOffset, &segLen );
0b4e3aa0
A
1203
1204 if( !reserved) {
1205 reserved = IONew( ExpansionData, 1 );
1206 if( !reserved)
1207 continue;
1208 }
1209 reserved->pagerContig = (1 == _rangesCount);
9bccf70c
A
1210 reserved->memory = this;
1211
1212#ifndef i386
1213 switch(options & kIOMapCacheMask ) { /*What cache mode do we need*/
1214
1215 case kIOMapDefaultCache:
1216 default:
1217 if((pp = pmap_find_physentry(pa))) {/* Find physical address */
1218 /* Use physical attributes as default */
1219 flags = IOTranslateCacheBits(pp);
0b4e3aa0 1220
9bccf70c
A
1221 }
1222 else { /* If no physical, just hard code attributes */
1223 flags = DEVICE_PAGER_CACHE_INHIB |
1224 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
1225 }
1226 break;
1227
1228 case kIOMapInhibitCache:
1229 flags = DEVICE_PAGER_CACHE_INHIB |
1230 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
1231 break;
1232
1233 case kIOMapWriteThruCache:
1234 flags = DEVICE_PAGER_WRITE_THROUGH |
1235 DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED;
1236 break;
1237
1238 case kIOMapCopybackCache:
1239 flags = DEVICE_PAGER_COHERENT;
1240 break;
1241 }
1242
1243 flags |= reserved->pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0;
1244#else
1245 flags = reserved->pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0;
1246#endif
1247
1248 pager = device_pager_setup( (memory_object_t) 0, (int) reserved,
1249 size, flags);
0b4e3aa0
A
1250 assert( pager );
1251
1252 if( pager) {
0b4e3aa0
A
1253 kr = mach_memory_object_memory_entry_64( (host_t) 1, false /*internal*/,
1254 size, VM_PROT_READ | VM_PROT_WRITE, pager, &sharedMem );
1255
1256 assert( KERN_SUCCESS == kr );
1257 if( KERN_SUCCESS != kr) {
9bccf70c 1258 device_pager_deallocate( pager );
0b4e3aa0
A
1259 pager = MACH_PORT_NULL;
1260 sharedMem = MACH_PORT_NULL;
1261 }
1262 }
9bccf70c
A
1263 if( pager && sharedMem)
1264 reserved->devicePager = pager;
1265 else {
1266 IODelete( reserved, ExpansionData, 1 );
1267 reserved = 0;
1268 }
1c79356b 1269
1c79356b
A
1270 } while( false );
1271
0b4e3aa0
A
1272 _memEntry = (void *) sharedMem;
1273 }
1274
9bccf70c
A
1275#ifndef i386
1276 if( 0 == sharedMem)
1277 kr = kIOReturnVMError;
1278 else
1279#endif
1280 kr = super::doMap( addressMap, atAddress,
1c79356b 1281 options, sourceOffset, length );
0b4e3aa0 1282
1c79356b
A
1283 return( kr );
1284}
1285
1286IOReturn IOGeneralMemoryDescriptor::doUnmap(
1287 vm_map_t addressMap,
1288 IOVirtualAddress logical,
1289 IOByteCount length )
1290{
1291 // could be much better
1292 if( _task && (addressMap == getMapForTask(_task, _ranges.v[0].address)) && (1 == _rangesCount)
1293 && (logical == _ranges.v[0].address)
1294 && (length <= _ranges.v[0].length) )
1295 return( kIOReturnSuccess );
1296
1297 return( super::doUnmap( addressMap, logical, length ));
1298}
1299
1300/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1301
1302extern "C" {
1303// osfmk/device/iokit_rpc.c
1304extern kern_return_t IOMapPages( vm_map_t map, vm_offset_t va, vm_offset_t pa,
1305 vm_size_t length, unsigned int mapFlags);
e3027f41 1306extern kern_return_t IOUnmapPages(vm_map_t map, vm_offset_t va, vm_size_t length);
1c79356b
A
1307};
1308
1309/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1310
9bccf70c 1311OSDefineMetaClassAndAbstractStructors( IOMemoryMap, OSObject )
1c79356b 1312
9bccf70c
A
1313/* inline function implementation */
1314IOPhysicalAddress IOMemoryMap::getPhysicalAddress()
1315 { return( getPhysicalSegment( 0, 0 )); }
1c79356b
A
1316
1317/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1318
1319class _IOMemoryMap : public IOMemoryMap
1320{
1321 OSDeclareDefaultStructors(_IOMemoryMap)
1322
1323 IOMemoryDescriptor * memory;
1324 IOMemoryMap * superMap;
1325 IOByteCount offset;
1326 IOByteCount length;
1327 IOVirtualAddress logical;
1328 task_t addressTask;
1329 vm_map_t addressMap;
1330 IOOptionBits options;
1331
9bccf70c
A
1332protected:
1333 virtual void taggedRelease(const void *tag = 0) const;
1c79356b
A
1334 virtual void free();
1335
9bccf70c
A
1336public:
1337
1c79356b
A
1338 // IOMemoryMap methods
1339 virtual IOVirtualAddress getVirtualAddress();
1340 virtual IOByteCount getLength();
1341 virtual task_t getAddressTask();
1342 virtual IOMemoryDescriptor * getMemoryDescriptor();
1343 virtual IOOptionBits getMapOptions();
1344
1345 virtual IOReturn unmap();
1346 virtual void taskDied();
1347
1348 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
1349 IOByteCount * length);
1350
1351 // for IOMemoryDescriptor use
9bccf70c 1352 _IOMemoryMap * copyCompatible(
1c79356b
A
1353 IOMemoryDescriptor * owner,
1354 task_t intoTask,
1355 IOVirtualAddress toAddress,
1356 IOOptionBits options,
1357 IOByteCount offset,
1358 IOByteCount length );
1359
9bccf70c 1360 bool initCompatible(
1c79356b
A
1361 IOMemoryDescriptor * memory,
1362 IOMemoryMap * superMap,
1363 IOByteCount offset,
1364 IOByteCount length );
1365
9bccf70c 1366 bool initWithDescriptor(
1c79356b
A
1367 IOMemoryDescriptor * memory,
1368 task_t intoTask,
1369 IOVirtualAddress toAddress,
1370 IOOptionBits options,
1371 IOByteCount offset,
1372 IOByteCount length );
e3027f41
A
1373
1374 IOReturn redirect(
1375 task_t intoTask, bool redirect );
1c79356b
A
1376};
1377
1378/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1379
1380#undef super
1381#define super IOMemoryMap
1382
1383OSDefineMetaClassAndStructors(_IOMemoryMap, IOMemoryMap)
1384
1385/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1386
9bccf70c 1387bool _IOMemoryMap::initCompatible(
1c79356b
A
1388 IOMemoryDescriptor * _memory,
1389 IOMemoryMap * _superMap,
1390 IOByteCount _offset,
1391 IOByteCount _length )
1392{
1393
1394 if( !super::init())
1395 return( false);
1396
1397 if( (_offset + _length) > _superMap->getLength())
1398 return( false);
1399
1400 _memory->retain();
1401 memory = _memory;
1402 _superMap->retain();
1403 superMap = _superMap;
1404
1405 offset = _offset;
1406 if( _length)
1407 length = _length;
1408 else
1409 length = _memory->getLength();
1410
1411 options = superMap->getMapOptions();
1412 logical = superMap->getVirtualAddress() + offset;
1413
1414 return( true );
1415}
1416
9bccf70c 1417bool _IOMemoryMap::initWithDescriptor(
1c79356b
A
1418 IOMemoryDescriptor * _memory,
1419 task_t intoTask,
1420 IOVirtualAddress toAddress,
1421 IOOptionBits _options,
1422 IOByteCount _offset,
1423 IOByteCount _length )
1424{
1425 bool ok;
1426
1427 if( (!_memory) || (!intoTask) || !super::init())
1428 return( false);
1429
1430 if( (_offset + _length) > _memory->getLength())
1431 return( false);
1432
1433 addressMap = get_task_map(intoTask);
1434 if( !addressMap)
1435 return( false);
9bccf70c 1436 vm_map_reference(addressMap);
1c79356b
A
1437
1438 _memory->retain();
1439 memory = _memory;
1440
1441 offset = _offset;
1442 if( _length)
1443 length = _length;
1444 else
1445 length = _memory->getLength();
1446
1447 addressTask = intoTask;
1448 logical = toAddress;
1449 options = _options;
1450
1451 if( options & kIOMapStatic)
1452 ok = true;
1453 else
1454 ok = (kIOReturnSuccess == memory->doMap( addressMap, &logical,
1455 options, offset, length ));
1456 if( !ok) {
1457 logical = 0;
e3027f41
A
1458 memory->release();
1459 memory = 0;
1c79356b
A
1460 vm_map_deallocate(addressMap);
1461 addressMap = 0;
1462 }
1463 return( ok );
1464}
1465
0b4e3aa0
A
1466struct IOMemoryDescriptorMapAllocRef
1467{
1468 ipc_port_t sharedMem;
1469 vm_size_t size;
1470 vm_offset_t mapped;
1471 IOByteCount sourceOffset;
1472 IOOptionBits options;
1473};
1474
1475static kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref)
1476{
1477 IOMemoryDescriptorMapAllocRef * ref = (IOMemoryDescriptorMapAllocRef *)_ref;
1478 IOReturn err;
1479
1480 do {
1481 if( ref->sharedMem) {
1482 vm_prot_t prot = VM_PROT_READ
1483 | ((ref->options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE);
1484
1485 err = vm_map( map,
1486 &ref->mapped,
1487 ref->size, 0 /* mask */,
1488 (( ref->options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
1489 | VM_MAKE_TAG(VM_MEMORY_IOKIT),
1490 ref->sharedMem, ref->sourceOffset,
1491 false, // copy
1492 prot, // cur
1493 prot, // max
1494 VM_INHERIT_NONE);
1495
1496 if( KERN_SUCCESS != err) {
1497 ref->mapped = 0;
1498 continue;
1499 }
1500
1501 } else {
1502
1503 err = vm_allocate( map, &ref->mapped, ref->size,
1504 ((ref->options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED)
1505 | VM_MAKE_TAG(VM_MEMORY_IOKIT) );
1506
1507 if( KERN_SUCCESS != err) {
1508 ref->mapped = 0;
1509 continue;
1510 }
1511
1512 // we have to make sure that these guys don't get copied if we fork.
1513 err = vm_inherit( map, ref->mapped, ref->size, VM_INHERIT_NONE);
1514 assert( KERN_SUCCESS == err );
1515 }
1516
1517 } while( false );
1518
1519 return( err );
1520}
1521
9bccf70c 1522
1c79356b
A
1523IOReturn IOMemoryDescriptor::doMap(
1524 vm_map_t addressMap,
1525 IOVirtualAddress * atAddress,
1526 IOOptionBits options,
1527 IOByteCount sourceOffset = 0,
1528 IOByteCount length = 0 )
1529{
1530 IOReturn err = kIOReturnSuccess;
0b4e3aa0 1531 memory_object_t pager;
1c79356b
A
1532 vm_address_t logical;
1533 IOByteCount pageOffset;
0b4e3aa0
A
1534 IOPhysicalAddress sourceAddr;
1535 IOMemoryDescriptorMapAllocRef ref;
1c79356b 1536
0b4e3aa0
A
1537 ref.sharedMem = (ipc_port_t) _memEntry;
1538 ref.sourceOffset = sourceOffset;
1539 ref.options = options;
1c79356b 1540
0b4e3aa0 1541 do {
1c79356b 1542
0b4e3aa0
A
1543 if( 0 == length)
1544 length = getLength();
1c79356b 1545
0b4e3aa0
A
1546 sourceAddr = getSourceSegment( sourceOffset, NULL );
1547 assert( sourceAddr );
1548 pageOffset = sourceAddr - trunc_page( sourceAddr );
1c79356b 1549
0b4e3aa0
A
1550 ref.size = round_page( length + pageOffset );
1551
1552 logical = *atAddress;
1553 if( options & kIOMapAnywhere)
1554 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
1555 ref.mapped = 0;
1556 else {
1557 ref.mapped = trunc_page( logical );
1558 if( (logical - ref.mapped) != pageOffset) {
1559 err = kIOReturnVMError;
1560 continue;
1561 }
1562 }
1563
1564 if( ref.sharedMem && (addressMap == kernel_map) && (kIOMemoryRequiresWire & _flags))
1565 err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref );
1566 else
1567 err = IOMemoryDescriptorMapAlloc( addressMap, &ref );
1568
1569 if( err != KERN_SUCCESS)
1570 continue;
1571
1572 if( reserved)
1573 pager = (memory_object_t) reserved->devicePager;
1574 else
1575 pager = MACH_PORT_NULL;
1576
1577 if( !ref.sharedMem || pager )
1578 err = handleFault( pager, addressMap, ref.mapped, sourceOffset, length, options );
1579
1580 } while( false );
1581
1582 if( err != KERN_SUCCESS) {
1583 if( ref.mapped)
1584 doUnmap( addressMap, ref.mapped, ref.size );
1585 *atAddress = NULL;
1586 } else
1587 *atAddress = ref.mapped + pageOffset;
1588
1589 return( err );
1590}
1591
1592enum {
1593 kIOMemoryRedirected = 0x00010000
1594};
1595
1596IOReturn IOMemoryDescriptor::handleFault(
1597 void * _pager,
1598 vm_map_t addressMap,
1599 IOVirtualAddress address,
1600 IOByteCount sourceOffset,
1601 IOByteCount length,
1602 IOOptionBits options )
1603{
1604 IOReturn err = kIOReturnSuccess;
1605 memory_object_t pager = (memory_object_t) _pager;
1606 vm_size_t size;
1607 vm_size_t bytes;
1608 vm_size_t page;
1609 IOByteCount pageOffset;
1610 IOPhysicalLength segLen;
1611 IOPhysicalAddress physAddr;
1612
1613 if( !addressMap) {
1614
0b4e3aa0 1615 if( kIOMemoryRedirected & _flags) {
1c79356b 1616#ifdef DEBUG
9bccf70c 1617 IOLog("sleep mem redirect %p, %lx\n", this, sourceOffset);
1c79356b 1618#endif
0b4e3aa0 1619 do {
9bccf70c 1620 SLEEP;
0b4e3aa0
A
1621 } while( kIOMemoryRedirected & _flags );
1622 }
1c79356b 1623
0b4e3aa0 1624 return( kIOReturnSuccess );
1c79356b
A
1625 }
1626
0b4e3aa0
A
1627 physAddr = getPhysicalSegment( sourceOffset, &segLen );
1628 assert( physAddr );
1629 pageOffset = physAddr - trunc_page( physAddr );
1630
1631 size = length + pageOffset;
1632 physAddr -= pageOffset;
1c79356b
A
1633
1634 segLen += pageOffset;
0b4e3aa0 1635 bytes = size;
1c79356b
A
1636 do {
1637 // in the middle of the loop only map whole pages
1638 if( segLen >= bytes)
1639 segLen = bytes;
1640 else if( segLen != trunc_page( segLen))
1641 err = kIOReturnVMError;
1642 if( physAddr != trunc_page( physAddr))
1643 err = kIOReturnBadArgument;
1644
1645#ifdef DEBUG
1646 if( kIOLogMapping & gIOKitDebug)
0b4e3aa0
A
1647 IOLog("_IOMemoryMap::map(%p) %08lx->%08lx:%08lx\n",
1648 addressMap, address + pageOffset, physAddr + pageOffset,
1c79356b
A
1649 segLen - pageOffset);
1650#endif
1651
9bccf70c
A
1652
1653
1654
1655
1656#ifdef i386
1657 /* i386 doesn't support faulting on device memory yet */
0b4e3aa0
A
1658 if( addressMap && (kIOReturnSuccess == err))
1659 err = IOMapPages( addressMap, address, physAddr, segLen, options );
1660 assert( KERN_SUCCESS == err );
1c79356b
A
1661 if( err)
1662 break;
9bccf70c 1663#endif
1c79356b 1664
0b4e3aa0
A
1665 if( pager) {
1666 if( reserved && reserved->pagerContig) {
1667 IOPhysicalLength allLen;
1668 IOPhysicalAddress allPhys;
1669
1670 allPhys = getPhysicalSegment( 0, &allLen );
1671 assert( allPhys );
9bccf70c 1672 err = device_pager_populate_object( pager, 0, trunc_page(allPhys), round_page(allLen) );
0b4e3aa0
A
1673
1674 } else {
1675
1676 for( page = 0;
1677 (page < segLen) && (KERN_SUCCESS == err);
1678 page += page_size) {
1679 err = device_pager_populate_object( pager, sourceOffset + page,
1680 physAddr + page, page_size );
1681 }
1682 }
1683 assert( KERN_SUCCESS == err );
1684 if( err)
1685 break;
1686 }
9bccf70c
A
1687#ifndef i386
1688 /* *** ALERT *** */
1689 /* *** Temporary Workaround *** */
1690
1691 /* This call to vm_fault causes an early pmap level resolution */
1692 /* of the mappings created above. Need for this is in absolute */
1693 /* violation of the basic tenet that the pmap layer is a cache. */
1694 /* Further, it implies a serious I/O architectural violation on */
1695 /* the part of some user of the mapping. As of this writing, */
1696 /* the call to vm_fault is needed because the NVIDIA driver */
1697 /* makes a call to pmap_extract. The NVIDIA driver needs to be */
1698 /* fixed as soon as possible. The NVIDIA driver should not */
1699 /* need to query for this info as it should know from the doMap */
1700 /* call where the physical memory is mapped. When a query is */
1701 /* necessary to find a physical mapping, it should be done */
1702 /* through an iokit call which includes the mapped memory */
1703 /* handle. This is required for machine architecture independence.*/
1704
1705 if(!(kIOMemoryRedirected & _flags)) {
1706 vm_fault(addressMap, address, 3, FALSE, FALSE, NULL, 0);
1707 }
1708
1709 /* *** Temporary Workaround *** */
1710 /* *** ALERT *** */
1711#endif
1c79356b 1712 sourceOffset += segLen - pageOffset;
0b4e3aa0 1713 address += segLen;
1c79356b
A
1714 bytes -= segLen;
1715 pageOffset = 0;
1716
1717 } while( bytes
1718 && (physAddr = getPhysicalSegment( sourceOffset, &segLen )));
1719
1720 if( bytes)
1721 err = kIOReturnBadArgument;
1c79356b
A
1722
1723 return( err );
1724}
1725
1726IOReturn IOMemoryDescriptor::doUnmap(
1727 vm_map_t addressMap,
1728 IOVirtualAddress logical,
1729 IOByteCount length )
1730{
1731 IOReturn err;
1732
1733#ifdef DEBUG
1734 if( kIOLogMapping & gIOKitDebug)
1735 kprintf("IOMemoryDescriptor::doUnmap(%x) %08x:%08x\n",
1736 addressMap, logical, length );
1737#endif
1738
0b4e3aa0
A
1739 if( (addressMap == kernel_map) || (addressMap == get_task_map(current_task()))) {
1740
1741 if( _memEntry && (addressMap == kernel_map) && (kIOMemoryRequiresWire & _flags))
1742 addressMap = IOPageableMapForAddress( logical );
1743
1c79356b 1744 err = vm_deallocate( addressMap, logical, length );
0b4e3aa0
A
1745
1746 } else
1c79356b
A
1747 err = kIOReturnSuccess;
1748
1749 return( err );
1750}
1751
e3027f41
A
1752IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool redirect )
1753{
1754 IOReturn err;
1755 _IOMemoryMap * mapping = 0;
1756 OSIterator * iter;
1757
1758 LOCK;
1759
1760 do {
1761 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
1762 while( (mapping = (_IOMemoryMap *) iter->getNextObject()))
1763 mapping->redirect( safeTask, redirect );
1764
1765 iter->release();
1766 }
1767 } while( false );
1768
0b4e3aa0
A
1769 if( redirect)
1770 _flags |= kIOMemoryRedirected;
1771 else {
1772 _flags &= ~kIOMemoryRedirected;
9bccf70c 1773 WAKEUP;
0b4e3aa0
A
1774 }
1775
e3027f41
A
1776 UNLOCK;
1777
1778 // temporary binary compatibility
1779 IOSubMemoryDescriptor * subMem;
1780 if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this)))
1781 err = subMem->redirect( safeTask, redirect );
1782 else
1783 err = kIOReturnSuccess;
1784
1785 return( err );
1786}
1787
1788IOReturn IOSubMemoryDescriptor::redirect( task_t safeTask, bool redirect )
1789{
e3027f41
A
1790 return( _parent->redirect( safeTask, redirect ));
1791}
1792
1793IOReturn _IOMemoryMap::redirect( task_t safeTask, bool redirect )
1794{
1795 IOReturn err = kIOReturnSuccess;
1796
1797 if( superMap) {
1798// err = ((_IOMemoryMap *)superMap)->redirect( safeTask, redirect );
1799 } else {
1800
1801 LOCK;
1802 if( logical && addressMap
1803 && (get_task_map( safeTask) != addressMap)
1804 && (0 == (options & kIOMapStatic))) {
9bccf70c 1805
e3027f41
A
1806 IOUnmapPages( addressMap, logical, length );
1807 if( !redirect) {
1808 err = vm_deallocate( addressMap, logical, length );
1809 err = memory->doMap( addressMap, &logical,
0b4e3aa0
A
1810 (options & ~kIOMapAnywhere) /*| kIOMapReserve*/,
1811 offset, length );
e3027f41
A
1812 } else
1813 err = kIOReturnSuccess;
1814#ifdef DEBUG
9bccf70c 1815 IOLog("IOMemoryMap::redirect(%d, %p) %x:%lx from %p\n", redirect, this, logical, length, addressMap);
e3027f41
A
1816#endif
1817 }
1818 UNLOCK;
1819 }
1820
1821 return( err );
1822}
1823
1c79356b
A
1824IOReturn _IOMemoryMap::unmap( void )
1825{
1826 IOReturn err;
1827
1828 LOCK;
1829
1830 if( logical && addressMap && (0 == superMap)
1831 && (0 == (options & kIOMapStatic))) {
1832
1833 err = memory->doUnmap( addressMap, logical, length );
1834 vm_map_deallocate(addressMap);
1835 addressMap = 0;
1836
1837 } else
1838 err = kIOReturnSuccess;
1839
1840 logical = 0;
1841
1842 UNLOCK;
1843
1844 return( err );
1845}
1846
1847void _IOMemoryMap::taskDied( void )
1848{
1849 LOCK;
1850 if( addressMap) {
1851 vm_map_deallocate(addressMap);
1852 addressMap = 0;
1853 }
1854 addressTask = 0;
1855 logical = 0;
1856 UNLOCK;
1857}
1858
9bccf70c
A
1859// Overload the release mechanism. All mappings must be a member
1860// of a memory descriptors _mappings set. This means that we
1861// always have 2 references on a mapping. When either of these mappings
1862// are released we need to free ourselves.
1863void _IOMemoryMap::taggedRelease(const void *tag = 0) const
1864{
1865 super::taggedRelease(tag, 2);
1866}
1867
1c79356b
A
1868void _IOMemoryMap::free()
1869{
1870 unmap();
1871
1872 if( memory) {
1873 LOCK;
1874 memory->removeMapping( this);
1875 UNLOCK;
1876 memory->release();
1877 }
1878
1879 if( superMap)
1880 superMap->release();
1881
1882 super::free();
1883}
1884
1885IOByteCount _IOMemoryMap::getLength()
1886{
1887 return( length );
1888}
1889
1890IOVirtualAddress _IOMemoryMap::getVirtualAddress()
1891{
1892 return( logical);
1893}
1894
1895task_t _IOMemoryMap::getAddressTask()
1896{
1897 if( superMap)
1898 return( superMap->getAddressTask());
1899 else
1900 return( addressTask);
1901}
1902
1903IOOptionBits _IOMemoryMap::getMapOptions()
1904{
1905 return( options);
1906}
1907
1908IOMemoryDescriptor * _IOMemoryMap::getMemoryDescriptor()
1909{
1910 return( memory );
1911}
1912
9bccf70c 1913_IOMemoryMap * _IOMemoryMap::copyCompatible(
1c79356b
A
1914 IOMemoryDescriptor * owner,
1915 task_t task,
1916 IOVirtualAddress toAddress,
1917 IOOptionBits _options,
1918 IOByteCount _offset,
1919 IOByteCount _length )
1920{
1921 _IOMemoryMap * mapping;
1922
1923 if( (!task) || (task != getAddressTask()))
1924 return( 0 );
9bccf70c
A
1925 if( (options ^ _options) & kIOMapReadOnly)
1926 return( 0 );
1927 if( (kIOMapDefaultCache != (_options & kIOMapCacheMask))
1928 && ((options ^ _options) & kIOMapCacheMask))
1c79356b
A
1929 return( 0 );
1930
1931 if( (0 == (_options & kIOMapAnywhere)) && (logical != toAddress))
1932 return( 0 );
1933
1934 if( _offset < offset)
1935 return( 0 );
1936
1937 _offset -= offset;
1938
1939 if( (_offset + _length) > length)
1940 return( 0 );
1941
1942 if( (length == _length) && (!_offset)) {
1943 retain();
1944 mapping = this;
1945
1946 } else {
1947 mapping = new _IOMemoryMap;
1948 if( mapping
9bccf70c 1949 && !mapping->initCompatible( owner, this, _offset, _length )) {
1c79356b
A
1950 mapping->release();
1951 mapping = 0;
1952 }
1953 }
1954
1955 return( mapping );
1956}
1957
1958IOPhysicalAddress _IOMemoryMap::getPhysicalSegment( IOByteCount _offset,
1959 IOPhysicalLength * length)
1960{
1961 IOPhysicalAddress address;
1962
1963 LOCK;
1964 address = memory->getPhysicalSegment( offset + _offset, length );
1965 UNLOCK;
1966
1967 return( address );
1968}
1969
1970/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1971
1972#undef super
1973#define super OSObject
1974
1975/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1976
1977void IOMemoryDescriptor::initialize( void )
1978{
1979 if( 0 == gIOMemoryLock)
1980 gIOMemoryLock = IORecursiveLockAlloc();
1981}
1982
1983void IOMemoryDescriptor::free( void )
1984{
1985 if( _mappings)
1986 _mappings->release();
1987
1988 super::free();
1989}
1990
1991IOMemoryMap * IOMemoryDescriptor::setMapping(
1992 task_t intoTask,
1993 IOVirtualAddress mapAddress,
1994 IOOptionBits options = 0 )
1995{
1996 _IOMemoryMap * map;
1997
1998 map = new _IOMemoryMap;
1999
2000 LOCK;
2001
2002 if( map
9bccf70c 2003 && !map->initWithDescriptor( this, intoTask, mapAddress,
1c79356b
A
2004 options | kIOMapStatic, 0, getLength() )) {
2005 map->release();
2006 map = 0;
2007 }
2008
2009 addMapping( map);
2010
2011 UNLOCK;
2012
2013 return( map);
2014}
2015
2016IOMemoryMap * IOMemoryDescriptor::map(
2017 IOOptionBits options = 0 )
2018{
2019
2020 return( makeMapping( this, kernel_task, 0,
2021 options | kIOMapAnywhere,
2022 0, getLength() ));
2023}
2024
2025IOMemoryMap * IOMemoryDescriptor::map(
2026 task_t intoTask,
2027 IOVirtualAddress toAddress,
2028 IOOptionBits options,
2029 IOByteCount offset = 0,
2030 IOByteCount length = 0 )
2031{
2032 if( 0 == length)
2033 length = getLength();
2034
2035 return( makeMapping( this, intoTask, toAddress, options, offset, length ));
2036}
2037
2038IOMemoryMap * IOMemoryDescriptor::makeMapping(
2039 IOMemoryDescriptor * owner,
2040 task_t intoTask,
2041 IOVirtualAddress toAddress,
2042 IOOptionBits options,
2043 IOByteCount offset,
2044 IOByteCount length )
2045{
2046 _IOMemoryMap * mapping = 0;
2047 OSIterator * iter;
2048
2049 LOCK;
2050
2051 do {
2052 // look for an existing mapping
2053 if( (iter = OSCollectionIterator::withCollection( _mappings))) {
2054
2055 while( (mapping = (_IOMemoryMap *) iter->getNextObject())) {
2056
9bccf70c 2057 if( (mapping = mapping->copyCompatible(
1c79356b
A
2058 owner, intoTask, toAddress,
2059 options | kIOMapReference,
2060 offset, length )))
2061 break;
2062 }
2063 iter->release();
2064 if( mapping)
2065 continue;
2066 }
2067
2068
2069 if( mapping || (options & kIOMapReference))
2070 continue;
2071
2072 owner = this;
2073
2074 mapping = new _IOMemoryMap;
2075 if( mapping
9bccf70c 2076 && !mapping->initWithDescriptor( owner, intoTask, toAddress, options,
1c79356b 2077 offset, length )) {
9bccf70c 2078#ifdef DEBUG
1c79356b 2079 IOLog("Didn't make map %08lx : %08lx\n", offset, length );
9bccf70c 2080#endif
1c79356b
A
2081 mapping->release();
2082 mapping = 0;
2083 }
2084
2085 } while( false );
2086
2087 owner->addMapping( mapping);
2088
2089 UNLOCK;
2090
2091 return( mapping);
2092}
2093
2094void IOMemoryDescriptor::addMapping(
2095 IOMemoryMap * mapping )
2096{
2097 if( mapping) {
2098 if( 0 == _mappings)
2099 _mappings = OSSet::withCapacity(1);
9bccf70c
A
2100 if( _mappings )
2101 _mappings->setObject( mapping );
1c79356b
A
2102 }
2103}
2104
2105void IOMemoryDescriptor::removeMapping(
2106 IOMemoryMap * mapping )
2107{
9bccf70c 2108 if( _mappings)
1c79356b 2109 _mappings->removeObject( mapping);
1c79356b
A
2110}
2111
2112/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2113
2114#undef super
2115#define super IOMemoryDescriptor
2116
2117OSDefineMetaClassAndStructors(IOSubMemoryDescriptor, IOMemoryDescriptor)
2118
2119/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2120
2121bool IOSubMemoryDescriptor::initSubRange( IOMemoryDescriptor * parent,
2122 IOByteCount offset, IOByteCount length,
2123 IODirection withDirection )
2124{
2125 if( !super::init())
2126 return( false );
2127
2128 if( !parent)
2129 return( false);
2130
2131 if( (offset + length) > parent->getLength())
2132 return( false);
2133
2134 parent->retain();
2135 _parent = parent;
2136 _start = offset;
2137 _length = length;
2138 _direction = withDirection;
2139 _tag = parent->getTag();
2140
2141 return( true );
2142}
2143
2144void IOSubMemoryDescriptor::free( void )
2145{
2146 if( _parent)
2147 _parent->release();
2148
2149 super::free();
2150}
2151
2152
2153IOPhysicalAddress IOSubMemoryDescriptor::getPhysicalSegment( IOByteCount offset,
2154 IOByteCount * length )
2155{
2156 IOPhysicalAddress address;
2157 IOByteCount actualLength;
2158
2159 assert(offset <= _length);
2160
2161 if( length)
2162 *length = 0;
2163
2164 if( offset >= _length)
2165 return( 0 );
2166
2167 address = _parent->getPhysicalSegment( offset + _start, &actualLength );
2168
2169 if( address && length)
2170 *length = min( _length - offset, actualLength );
2171
2172 return( address );
2173}
2174
0b4e3aa0
A
2175IOPhysicalAddress IOSubMemoryDescriptor::getSourceSegment( IOByteCount offset,
2176 IOByteCount * length )
2177{
2178 IOPhysicalAddress address;
2179 IOByteCount actualLength;
2180
2181 assert(offset <= _length);
2182
2183 if( length)
2184 *length = 0;
2185
2186 if( offset >= _length)
2187 return( 0 );
2188
2189 address = _parent->getSourceSegment( offset + _start, &actualLength );
2190
2191 if( address && length)
2192 *length = min( _length - offset, actualLength );
2193
2194 return( address );
2195}
2196
1c79356b
A
2197void * IOSubMemoryDescriptor::getVirtualSegment(IOByteCount offset,
2198 IOByteCount * lengthOfSegment)
2199{
2200 return( 0 );
2201}
2202
2203IOByteCount IOSubMemoryDescriptor::readBytes(IOByteCount offset,
2204 void * bytes, IOByteCount withLength)
2205{
2206 IOByteCount byteCount;
2207
2208 assert(offset <= _length);
2209
2210 if( offset >= _length)
2211 return( 0 );
2212
2213 LOCK;
2214 byteCount = _parent->readBytes( _start + offset, bytes,
2215 min(withLength, _length - offset) );
2216 UNLOCK;
2217
2218 return( byteCount );
2219}
2220
2221IOByteCount IOSubMemoryDescriptor::writeBytes(IOByteCount offset,
2222 const void* bytes, IOByteCount withLength)
2223{
2224 IOByteCount byteCount;
2225
2226 assert(offset <= _length);
2227
2228 if( offset >= _length)
2229 return( 0 );
2230
2231 LOCK;
2232 byteCount = _parent->writeBytes( _start + offset, bytes,
2233 min(withLength, _length - offset) );
2234 UNLOCK;
2235
2236 return( byteCount );
2237}
2238
2239IOReturn IOSubMemoryDescriptor::prepare(
2240 IODirection forDirection = kIODirectionNone)
2241{
2242 IOReturn err;
2243
2244 LOCK;
2245 err = _parent->prepare( forDirection);
2246 UNLOCK;
2247
2248 return( err );
2249}
2250
2251IOReturn IOSubMemoryDescriptor::complete(
2252 IODirection forDirection = kIODirectionNone)
2253{
2254 IOReturn err;
2255
2256 LOCK;
2257 err = _parent->complete( forDirection);
2258 UNLOCK;
2259
2260 return( err );
2261}
2262
2263IOMemoryMap * IOSubMemoryDescriptor::makeMapping(
2264 IOMemoryDescriptor * owner,
2265 task_t intoTask,
2266 IOVirtualAddress toAddress,
2267 IOOptionBits options,
2268 IOByteCount offset,
2269 IOByteCount length )
2270{
2271 IOMemoryMap * mapping;
2272
2273 mapping = (IOMemoryMap *) _parent->makeMapping(
2274 _parent, intoTask,
2275 toAddress - (_start + offset),
2276 options | kIOMapReference,
2277 _start + offset, length );
2278
0b4e3aa0
A
2279 if( !mapping)
2280 mapping = (IOMemoryMap *) _parent->makeMapping(
2281 _parent, intoTask,
2282 toAddress,
2283 options, _start + offset, length );
2284
1c79356b
A
2285 if( !mapping)
2286 mapping = super::makeMapping( owner, intoTask, toAddress, options,
2287 offset, length );
2288
2289 return( mapping );
2290}
2291
2292/* ick */
2293
2294bool
2295IOSubMemoryDescriptor::initWithAddress(void * address,
2296 IOByteCount withLength,
2297 IODirection withDirection)
2298{
2299 return( false );
2300}
2301
2302bool
2303IOSubMemoryDescriptor::initWithAddress(vm_address_t address,
2304 IOByteCount withLength,
2305 IODirection withDirection,
2306 task_t withTask)
2307{
2308 return( false );
2309}
2310
2311bool
2312IOSubMemoryDescriptor::initWithPhysicalAddress(
2313 IOPhysicalAddress address,
2314 IOByteCount withLength,
2315 IODirection withDirection )
2316{
2317 return( false );
2318}
2319
2320bool
2321IOSubMemoryDescriptor::initWithRanges(
2322 IOVirtualRange * ranges,
2323 UInt32 withCount,
2324 IODirection withDirection,
2325 task_t withTask,
2326 bool asReference = false)
2327{
2328 return( false );
2329}
2330
2331bool
2332IOSubMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges,
2333 UInt32 withCount,
2334 IODirection withDirection,
2335 bool asReference = false)
2336{
2337 return( false );
2338}
2339
2340/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2341
9bccf70c
A
2342bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const
2343{
2344 OSSymbol const *keys[2];
2345 OSObject *values[2];
2346 OSDictionary *dict;
2347 IOVirtualRange *vcopy;
2348 unsigned int index, nRanges;
2349 bool result;
2350
2351 if (s == NULL) return false;
2352 if (s->previouslySerialized(this)) return true;
2353
2354 // Pretend we are an array.
2355 if (!s->addXMLStartTag(this, "array")) return false;
2356
2357 nRanges = _rangesCount;
2358 vcopy = (IOVirtualRange *) IOMalloc(sizeof(IOVirtualRange) * nRanges);
2359 if (vcopy == 0) return false;
2360
2361 keys[0] = OSSymbol::withCString("address");
2362 keys[1] = OSSymbol::withCString("length");
2363
2364 result = false;
2365 values[0] = values[1] = 0;
2366
2367 // From this point on we can go to bail.
2368
2369 // Copy the volatile data so we don't have to allocate memory
2370 // while the lock is held.
2371 LOCK;
2372 if (nRanges == _rangesCount) {
2373 for (index = 0; index < nRanges; index++) {
2374 vcopy[index] = _ranges.v[index];
2375 }
2376 } else {
2377 // The descriptor changed out from under us. Give up.
2378 UNLOCK;
2379 result = false;
2380 goto bail;
2381 }
2382 UNLOCK;
2383
2384 for (index = 0; index < nRanges; index++)
2385 {
2386 values[0] = OSNumber::withNumber(_ranges.v[index].address, sizeof(_ranges.v[index].address) * 8);
2387 if (values[0] == 0) {
2388 result = false;
2389 goto bail;
2390 }
2391 values[1] = OSNumber::withNumber(_ranges.v[index].length, sizeof(_ranges.v[index].length) * 8);
2392 if (values[1] == 0) {
2393 result = false;
2394 goto bail;
2395 }
2396 OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2);
2397 if (dict == 0) {
2398 result = false;
2399 goto bail;
2400 }
2401 values[0]->release();
2402 values[1]->release();
2403 values[0] = values[1] = 0;
2404
2405 result = dict->serialize(s);
2406 dict->release();
2407 if (!result) {
2408 goto bail;
2409 }
2410 }
2411 result = s->addXMLEndTag("array");
2412
2413 bail:
2414 if (values[0])
2415 values[0]->release();
2416 if (values[1])
2417 values[1]->release();
2418 if (keys[0])
2419 keys[0]->release();
2420 if (keys[1])
2421 keys[1]->release();
2422 if (vcopy)
2423 IOFree(vcopy, sizeof(IOVirtualRange) * nRanges);
2424 return result;
2425}
2426
2427bool IOSubMemoryDescriptor::serialize(OSSerialize * s) const
2428{
2429 if (!s) {
2430 return (false);
2431 }
2432 if (s->previouslySerialized(this)) return true;
2433
2434 // Pretend we are a dictionary.
2435 // We must duplicate the functionality of OSDictionary here
2436 // because otherwise object references will not work;
2437 // they are based on the value of the object passed to
2438 // previouslySerialized and addXMLStartTag.
2439
2440 if (!s->addXMLStartTag(this, "dict")) return false;
2441
2442 char const *keys[3] = {"offset", "length", "parent"};
2443
2444 OSObject *values[3];
2445 values[0] = OSNumber::withNumber(_start, sizeof(_start) * 8);
2446 if (values[0] == 0)
2447 return false;
2448 values[1] = OSNumber::withNumber(_length, sizeof(_length) * 8);
2449 if (values[1] == 0) {
2450 values[0]->release();
2451 return false;
2452 }
2453 values[2] = _parent;
2454
2455 bool result = true;
2456 for (int i=0; i<3; i++) {
2457 if (!s->addString("<key>") ||
2458 !s->addString(keys[i]) ||
2459 !s->addXMLEndTag("key") ||
2460 !values[i]->serialize(s)) {
2461 result = false;
2462 break;
2463 }
2464 }
2465 values[0]->release();
2466 values[1]->release();
2467 if (!result) {
2468 return false;
2469 }
2470
2471 return s->addXMLEndTag("dict");
2472}
2473
2474/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2475
0b4e3aa0 2476OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0);
1c79356b
A
2477OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1);
2478OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2);
2479OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3);
2480OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4);
2481OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5);
2482OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6);
2483OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7);
2484OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8);
2485OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9);
2486OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10);
2487OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11);
2488OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12);
2489OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13);
2490OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14);
2491OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15);
9bccf70c
A
2492
2493/* inline function implementation */
2494IOPhysicalAddress IOMemoryDescriptor::getPhysicalAddress()
2495 { return( getPhysicalSegment( 0, 0 )); }