]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. | |
24 | * | |
25 | * HISTORY | |
26 | * | |
27 | */ | |
28 | ||
29 | #include <IOKit/assert.h> | |
30 | #include <IOKit/system.h> | |
31 | #include <IOKit/IOLib.h> | |
32 | #include <IOKit/IOMemoryDescriptor.h> | |
33 | ||
34 | #include <IOKit/IOKitDebug.h> | |
35 | ||
36 | #include <libkern/c++/OSContainers.h> | |
37 | #include <sys/cdefs.h> | |
38 | ||
39 | __BEGIN_DECLS | |
40 | #include <vm/pmap.h> | |
0b4e3aa0 A |
41 | #include <device/device_port.h> |
42 | void bcopy_phys(char *from, char *to, int size); | |
1c79356b A |
43 | void pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, |
44 | vm_prot_t prot, boolean_t wired); | |
45 | void ipc_port_release_send(ipc_port_t port); | |
46 | vm_offset_t vm_map_get_phys_page(vm_map_t map, vm_offset_t offset); | |
0b4e3aa0 A |
47 | |
48 | memory_object_t | |
49 | device_pager_setup( | |
50 | memory_object_t pager, | |
51 | int device_handle, | |
52 | vm_size_t size, | |
53 | int flags); | |
54 | kern_return_t | |
55 | device_pager_populate_object( | |
56 | memory_object_t pager, | |
57 | vm_object_offset_t offset, | |
58 | vm_offset_t phys_addr, | |
59 | vm_size_t size); | |
60 | ||
1c79356b A |
61 | __END_DECLS |
62 | ||
63 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
64 | ||
65 | OSDefineMetaClass( IOMemoryDescriptor, OSObject ) | |
66 | OSDefineAbstractStructors( IOMemoryDescriptor, OSObject ) | |
67 | ||
68 | #define super IOMemoryDescriptor | |
69 | ||
70 | OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor) | |
71 | ||
0b4e3aa0 A |
72 | extern "C" { |
73 | ||
74 | vm_map_t IOPageableMapForAddress( vm_address_t address ); | |
75 | ||
76 | typedef kern_return_t (*IOIteratePageableMapsCallback)(vm_map_t map, void * ref); | |
77 | ||
78 | kern_return_t IOIteratePageableMaps(vm_size_t size, | |
79 | IOIteratePageableMapsCallback callback, void * ref); | |
80 | ||
81 | } | |
1c79356b A |
82 | |
83 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
84 | ||
85 | inline vm_map_t IOGeneralMemoryDescriptor::getMapForTask( task_t task, vm_address_t address ) | |
86 | { | |
87 | if( (task == kernel_task) && (kIOMemoryRequiresWire & _flags)) | |
88 | return( IOPageableMapForAddress( address ) ); | |
89 | else | |
90 | return( get_task_map( task )); | |
91 | } | |
92 | ||
0b4e3aa0 A |
93 | inline vm_offset_t pmap_extract_safe(task_t task, vm_offset_t va) |
94 | { | |
95 | vm_offset_t pa = pmap_extract(get_task_pmap(task), va); | |
96 | ||
97 | if ( pa == 0 ) | |
98 | { | |
99 | pa = vm_map_get_phys_page(get_task_map(task), trunc_page(va)); | |
100 | if ( pa ) pa += va - trunc_page(va); | |
101 | } | |
102 | ||
103 | return pa; | |
104 | } | |
105 | ||
106 | inline void bcopy_phys_safe(char * from, char * to, int size) | |
107 | { | |
108 | boolean_t enabled = ml_set_interrupts_enabled(FALSE); | |
109 | ||
110 | bcopy_phys(from, to, size); | |
111 | ||
112 | ml_set_interrupts_enabled(enabled); | |
113 | } | |
114 | ||
115 | #define next_page(a) ( trunc_page(a) + page_size ) | |
116 | ||
117 | ||
118 | extern "C" { | |
119 | ||
120 | kern_return_t device_data_action( | |
121 | int device_handle, | |
122 | ipc_port_t device_pager, | |
123 | vm_prot_t protection, | |
124 | vm_object_offset_t offset, | |
125 | vm_size_t size) | |
126 | { | |
127 | IOMemoryDescriptor * memDesc = (IOMemoryDescriptor *) device_handle; | |
128 | ||
129 | assert( OSDynamicCast( IOMemoryDescriptor, memDesc )); | |
130 | ||
131 | return( memDesc->handleFault( device_pager, 0, 0, | |
132 | offset, size, kIOMapDefaultCache /*?*/)); | |
133 | } | |
134 | ||
135 | kern_return_t device_close( | |
136 | int device_handle) | |
137 | { | |
138 | IOMemoryDescriptor * memDesc = (IOMemoryDescriptor *) device_handle; | |
139 | ||
140 | assert( OSDynamicCast( IOMemoryDescriptor, memDesc )); | |
141 | ||
142 | memDesc->release(); | |
143 | ||
144 | return( kIOReturnSuccess ); | |
145 | } | |
146 | ||
147 | } | |
148 | ||
1c79356b A |
149 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
150 | ||
151 | /* | |
152 | * withAddress: | |
153 | * | |
154 | * Create a new IOMemoryDescriptor. The buffer is a virtual address | |
155 | * relative to the specified task. If no task is supplied, the kernel | |
156 | * task is implied. | |
157 | */ | |
158 | IOMemoryDescriptor * | |
159 | IOMemoryDescriptor::withAddress(void * address, | |
160 | IOByteCount withLength, | |
161 | IODirection withDirection) | |
162 | { | |
163 | IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; | |
164 | if (that) | |
165 | { | |
166 | if (that->initWithAddress(address, withLength, withDirection)) | |
167 | return that; | |
168 | ||
169 | that->release(); | |
170 | } | |
171 | return 0; | |
172 | } | |
173 | ||
174 | IOMemoryDescriptor * | |
175 | IOMemoryDescriptor::withAddress(vm_address_t address, | |
176 | IOByteCount withLength, | |
177 | IODirection withDirection, | |
178 | task_t withTask) | |
179 | { | |
180 | IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; | |
181 | if (that) | |
182 | { | |
183 | if (that->initWithAddress(address, withLength, withDirection, withTask)) | |
184 | return that; | |
185 | ||
186 | that->release(); | |
187 | } | |
188 | return 0; | |
189 | } | |
190 | ||
191 | IOMemoryDescriptor * | |
192 | IOMemoryDescriptor::withPhysicalAddress( | |
193 | IOPhysicalAddress address, | |
194 | IOByteCount withLength, | |
195 | IODirection withDirection ) | |
196 | { | |
197 | return( IOMemoryDescriptor::withAddress( address, withLength, | |
198 | withDirection, (task_t) 0 )); | |
199 | } | |
200 | ||
201 | ||
202 | /* | |
203 | * withRanges: | |
204 | * | |
205 | * Create a new IOMemoryDescriptor. The buffer is made up of several | |
206 | * virtual address ranges, from a given task. | |
207 | * | |
208 | * Passing the ranges as a reference will avoid an extra allocation. | |
209 | */ | |
210 | IOMemoryDescriptor * | |
211 | IOMemoryDescriptor::withRanges( IOVirtualRange * ranges, | |
212 | UInt32 withCount, | |
213 | IODirection withDirection, | |
214 | task_t withTask, | |
215 | bool asReference = false) | |
216 | { | |
217 | IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; | |
218 | if (that) | |
219 | { | |
220 | if (that->initWithRanges(ranges, withCount, withDirection, withTask, asReference)) | |
221 | return that; | |
222 | ||
223 | that->release(); | |
224 | } | |
225 | return 0; | |
226 | } | |
227 | ||
228 | IOMemoryDescriptor * | |
229 | IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges, | |
230 | UInt32 withCount, | |
231 | IODirection withDirection, | |
232 | bool asReference = false) | |
233 | { | |
234 | IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; | |
235 | if (that) | |
236 | { | |
237 | if (that->initWithPhysicalRanges(ranges, withCount, withDirection, asReference)) | |
238 | return that; | |
239 | ||
240 | that->release(); | |
241 | } | |
242 | return 0; | |
243 | } | |
244 | ||
245 | IOMemoryDescriptor * | |
246 | IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of, | |
247 | IOByteCount offset, | |
248 | IOByteCount length, | |
249 | IODirection withDirection) | |
250 | { | |
251 | IOSubMemoryDescriptor * that = new IOSubMemoryDescriptor; | |
252 | ||
253 | if (that && !that->initSubRange(of, offset, length, withDirection)) { | |
254 | that->release(); | |
255 | that = 0; | |
256 | } | |
257 | return that; | |
258 | } | |
259 | ||
260 | /* | |
261 | * initWithAddress: | |
262 | * | |
263 | * Initialize an IOMemoryDescriptor. The buffer is a virtual address | |
264 | * relative to the specified task. If no task is supplied, the kernel | |
265 | * task is implied. | |
266 | * | |
267 | * An IOMemoryDescriptor can be re-used by calling initWithAddress or | |
268 | * initWithRanges again on an existing instance -- note this behavior | |
269 | * is not commonly supported in other I/O Kit classes, although it is | |
270 | * supported here. | |
271 | */ | |
272 | bool | |
273 | IOGeneralMemoryDescriptor::initWithAddress(void * address, | |
274 | IOByteCount withLength, | |
275 | IODirection withDirection) | |
276 | { | |
277 | _singleRange.v.address = (vm_address_t) address; | |
278 | _singleRange.v.length = withLength; | |
279 | ||
280 | return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true); | |
281 | } | |
282 | ||
283 | bool | |
284 | IOGeneralMemoryDescriptor::initWithAddress(vm_address_t address, | |
285 | IOByteCount withLength, | |
286 | IODirection withDirection, | |
287 | task_t withTask) | |
288 | { | |
289 | _singleRange.v.address = address; | |
290 | _singleRange.v.length = withLength; | |
291 | ||
292 | return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true); | |
293 | } | |
294 | ||
295 | bool | |
296 | IOGeneralMemoryDescriptor::initWithPhysicalAddress( | |
297 | IOPhysicalAddress address, | |
298 | IOByteCount withLength, | |
299 | IODirection withDirection ) | |
300 | { | |
301 | _singleRange.p.address = address; | |
302 | _singleRange.p.length = withLength; | |
303 | ||
304 | return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true); | |
305 | } | |
306 | ||
307 | /* | |
308 | * initWithRanges: | |
309 | * | |
310 | * Initialize an IOMemoryDescriptor. The buffer is made up of several | |
311 | * virtual address ranges, from a given task | |
312 | * | |
313 | * Passing the ranges as a reference will avoid an extra allocation. | |
314 | * | |
315 | * An IOMemoryDescriptor can be re-used by calling initWithAddress or | |
316 | * initWithRanges again on an existing instance -- note this behavior | |
317 | * is not commonly supported in other I/O Kit classes, although it is | |
318 | * supported here. | |
319 | */ | |
320 | bool | |
321 | IOGeneralMemoryDescriptor::initWithRanges( | |
322 | IOVirtualRange * ranges, | |
323 | UInt32 withCount, | |
324 | IODirection withDirection, | |
325 | task_t withTask, | |
326 | bool asReference = false) | |
327 | { | |
328 | assert(ranges); | |
329 | assert(withCount); | |
330 | ||
331 | /* | |
332 | * We can check the _initialized instance variable before having ever set | |
333 | * it to an initial value because I/O Kit guarantees that all our instance | |
334 | * variables are zeroed on an object's allocation. | |
335 | */ | |
336 | ||
337 | if (_initialized == false) | |
338 | { | |
339 | if (super::init() == false) return false; | |
340 | _initialized = true; | |
341 | } | |
342 | else | |
343 | { | |
344 | /* | |
345 | * An existing memory descriptor is being retargeted to point to | |
346 | * somewhere else. Clean up our present state. | |
347 | */ | |
348 | ||
349 | assert(_wireCount == 0); | |
350 | ||
351 | while (_wireCount) | |
352 | complete(); | |
353 | if (_kernPtrAligned) | |
354 | unmapFromKernel(); | |
355 | if (_ranges.v && _rangesIsAllocated) | |
356 | IODelete(_ranges.v, IOVirtualRange, _rangesCount); | |
357 | } | |
358 | ||
359 | /* | |
360 | * Initialize the memory descriptor. | |
361 | */ | |
362 | ||
363 | _ranges.v = 0; | |
364 | _rangesCount = withCount; | |
365 | _rangesIsAllocated = asReference ? false : true; | |
366 | _direction = withDirection; | |
367 | _length = 0; | |
368 | _task = withTask; | |
369 | _position = 0; | |
370 | _positionAtIndex = 0; | |
371 | _positionAtOffset = 0; | |
372 | _kernPtrAligned = 0; | |
373 | _cachedPhysicalAddress = 0; | |
374 | _cachedVirtualAddress = 0; | |
375 | _flags = 0; | |
376 | ||
377 | if (withTask && (withTask != kernel_task)) | |
378 | _flags |= kIOMemoryRequiresWire; | |
379 | ||
380 | if (asReference) | |
381 | _ranges.v = ranges; | |
382 | else | |
383 | { | |
384 | _ranges.v = IONew(IOVirtualRange, withCount); | |
385 | if (_ranges.v == 0) return false; | |
386 | bcopy(/* from */ ranges, _ranges.v, withCount * sizeof(IOVirtualRange)); | |
387 | } | |
388 | ||
389 | for (unsigned index = 0; index < _rangesCount; index++) | |
390 | { | |
391 | _length += _ranges.v[index].length; | |
392 | } | |
393 | ||
394 | return true; | |
395 | } | |
396 | ||
397 | bool | |
398 | IOGeneralMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges, | |
399 | UInt32 withCount, | |
400 | IODirection withDirection, | |
401 | bool asReference = false) | |
402 | { | |
403 | #warning assuming virtual, physical addresses same size | |
404 | return( initWithRanges( (IOVirtualRange *) ranges, | |
405 | withCount, withDirection, (task_t) 0, asReference )); | |
406 | } | |
407 | ||
408 | /* | |
409 | * free | |
410 | * | |
411 | * Free resources. | |
412 | */ | |
413 | void IOGeneralMemoryDescriptor::free() | |
414 | { | |
415 | while (_wireCount) | |
416 | complete(); | |
417 | if (_kernPtrAligned) | |
418 | unmapFromKernel(); | |
419 | if (_ranges.v && _rangesIsAllocated) | |
420 | IODelete(_ranges.v, IOVirtualRange, _rangesCount); | |
421 | if( _memEntry) | |
422 | ipc_port_release_send( (ipc_port_t) _memEntry ); | |
423 | super::free(); | |
424 | } | |
425 | ||
0b4e3aa0 A |
426 | /* DEPRECATED */ void IOGeneralMemoryDescriptor::unmapFromKernel() |
427 | /* DEPRECATED */ { | |
428 | /* DEPRECATED */ kern_return_t krtn; | |
429 | /* DEPRECATED */ vm_offset_t off; | |
430 | /* DEPRECATED */ // Pull the shared pages out of the task map | |
431 | /* DEPRECATED */ // Do we need to unwire it first? | |
432 | /* DEPRECATED */ for ( off = 0; off < _kernSize; off += page_size ) | |
433 | /* DEPRECATED */ { | |
434 | /* DEPRECATED */ pmap_change_wiring( | |
435 | /* DEPRECATED */ kernel_pmap, | |
436 | /* DEPRECATED */ _kernPtrAligned + off, | |
437 | /* DEPRECATED */ FALSE); | |
438 | /* DEPRECATED */ | |
439 | /* DEPRECATED */ pmap_remove( | |
440 | /* DEPRECATED */ kernel_pmap, | |
441 | /* DEPRECATED */ _kernPtrAligned + off, | |
442 | /* DEPRECATED */ _kernPtrAligned + off + page_size); | |
443 | /* DEPRECATED */ } | |
444 | /* DEPRECATED */ // Free the former shmem area in the task | |
445 | /* DEPRECATED */ krtn = vm_deallocate(kernel_map, | |
446 | /* DEPRECATED */ _kernPtrAligned, | |
447 | /* DEPRECATED */ _kernSize ); | |
448 | /* DEPRECATED */ assert(krtn == KERN_SUCCESS); | |
449 | /* DEPRECATED */ _kernPtrAligned = 0; | |
450 | /* DEPRECATED */ } | |
451 | /* DEPRECATED */ | |
452 | /* DEPRECATED */ void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex) | |
453 | /* DEPRECATED */ { | |
454 | /* DEPRECATED */ kern_return_t krtn; | |
455 | /* DEPRECATED */ vm_offset_t off; | |
456 | /* DEPRECATED */ | |
457 | /* DEPRECATED */ if (_kernPtrAligned) | |
458 | /* DEPRECATED */ { | |
459 | /* DEPRECATED */ if (_kernPtrAtIndex == rangeIndex) return; | |
460 | /* DEPRECATED */ unmapFromKernel(); | |
461 | /* DEPRECATED */ assert(_kernPtrAligned == 0); | |
462 | /* DEPRECATED */ } | |
463 | /* DEPRECATED */ | |
464 | /* DEPRECATED */ vm_offset_t srcAlign = trunc_page(_ranges.v[rangeIndex].address); | |
465 | /* DEPRECATED */ | |
466 | /* DEPRECATED */ _kernSize = trunc_page(_ranges.v[rangeIndex].address + | |
467 | /* DEPRECATED */ _ranges.v[rangeIndex].length + | |
468 | /* DEPRECATED */ page_size - 1) - srcAlign; | |
469 | /* DEPRECATED */ | |
470 | /* DEPRECATED */ /* Find some memory of the same size in kernel task. We use vm_allocate() */ | |
471 | /* DEPRECATED */ /* to do this. vm_allocate inserts the found memory object in the */ | |
472 | /* DEPRECATED */ /* target task's map as a side effect. */ | |
473 | /* DEPRECATED */ krtn = vm_allocate( kernel_map, | |
474 | /* DEPRECATED */ &_kernPtrAligned, | |
475 | /* DEPRECATED */ _kernSize, | |
476 | /* DEPRECATED */ VM_FLAGS_ANYWHERE|VM_MAKE_TAG(VM_MEMORY_IOKIT) ); // Find first fit | |
477 | /* DEPRECATED */ assert(krtn == KERN_SUCCESS); | |
478 | /* DEPRECATED */ if(krtn) return; | |
479 | /* DEPRECATED */ | |
480 | /* DEPRECATED */ /* For each page in the area allocated from the kernel map, */ | |
481 | /* DEPRECATED */ /* find the physical address of the page. */ | |
482 | /* DEPRECATED */ /* Enter the page in the target task's pmap, at the */ | |
483 | /* DEPRECATED */ /* appropriate target task virtual address. */ | |
484 | /* DEPRECATED */ for ( off = 0; off < _kernSize; off += page_size ) | |
485 | /* DEPRECATED */ { | |
486 | /* DEPRECATED */ vm_offset_t kern_phys_addr, phys_addr; | |
487 | /* DEPRECATED */ if( _task) | |
488 | /* DEPRECATED */ phys_addr = pmap_extract( get_task_pmap(_task), srcAlign + off ); | |
489 | /* DEPRECATED */ else | |
490 | /* DEPRECATED */ phys_addr = srcAlign + off; | |
491 | /* DEPRECATED */ assert(phys_addr); | |
492 | /* DEPRECATED */ if(phys_addr == 0) return; | |
493 | /* DEPRECATED */ | |
494 | /* DEPRECATED */ // Check original state. | |
495 | /* DEPRECATED */ kern_phys_addr = pmap_extract( kernel_pmap, _kernPtrAligned + off ); | |
496 | /* DEPRECATED */ // Set virtual page to point to the right physical one | |
497 | /* DEPRECATED */ pmap_enter( | |
498 | /* DEPRECATED */ kernel_pmap, | |
499 | /* DEPRECATED */ _kernPtrAligned + off, | |
500 | /* DEPRECATED */ phys_addr, | |
501 | /* DEPRECATED */ VM_PROT_READ|VM_PROT_WRITE, | |
502 | /* DEPRECATED */ TRUE); | |
503 | /* DEPRECATED */ } | |
504 | /* DEPRECATED */ _kernPtrAtIndex = rangeIndex; | |
505 | /* DEPRECATED */ } | |
1c79356b A |
506 | |
507 | /* | |
508 | * getDirection: | |
509 | * | |
510 | * Get the direction of the transfer. | |
511 | */ | |
512 | IODirection IOMemoryDescriptor::getDirection() const | |
513 | { | |
514 | return _direction; | |
515 | } | |
516 | ||
517 | /* | |
518 | * getLength: | |
519 | * | |
520 | * Get the length of the transfer (over all ranges). | |
521 | */ | |
522 | IOByteCount IOMemoryDescriptor::getLength() const | |
523 | { | |
524 | return _length; | |
525 | } | |
526 | ||
527 | void IOMemoryDescriptor::setTag( | |
528 | IOOptionBits tag ) | |
529 | { | |
530 | _tag = tag; | |
531 | } | |
532 | ||
533 | IOOptionBits IOMemoryDescriptor::getTag( void ) | |
534 | { | |
535 | return( _tag); | |
536 | } | |
537 | ||
0b4e3aa0 A |
538 | IOPhysicalAddress IOMemoryDescriptor::getSourceSegment( IOByteCount offset, |
539 | IOByteCount * length ) | |
540 | { | |
541 | IOPhysicalAddress physAddr; | |
1c79356b | 542 | |
0b4e3aa0 A |
543 | prepare(); |
544 | physAddr = getPhysicalSegment( offset, length ); | |
545 | complete(); | |
546 | ||
547 | return( physAddr ); | |
548 | } | |
549 | ||
550 | IOByteCount IOMemoryDescriptor::readBytes( IOByteCount offset, | |
551 | void * bytes, | |
552 | IOByteCount withLength ) | |
1c79356b | 553 | { |
0b4e3aa0 | 554 | IOByteCount bytesCopied = 0; |
1c79356b | 555 | |
0b4e3aa0 A |
556 | assert(offset <= _length); |
557 | assert(offset <= _length - withLength); | |
1c79356b | 558 | |
0b4e3aa0 | 559 | if ( offset < _length ) |
1c79356b | 560 | { |
0b4e3aa0 | 561 | withLength = min(withLength, _length - offset); |
1c79356b | 562 | |
0b4e3aa0 A |
563 | while ( withLength ) // (process another source segment?) |
564 | { | |
565 | IOPhysicalAddress sourceSegment; | |
566 | IOByteCount sourceSegmentLength; | |
1c79356b | 567 | |
0b4e3aa0 A |
568 | sourceSegment = getPhysicalSegment(offset, &sourceSegmentLength); |
569 | if ( sourceSegment == 0 ) goto readBytesErr; | |
1c79356b | 570 | |
0b4e3aa0 | 571 | sourceSegmentLength = min(sourceSegmentLength, withLength); |
1c79356b | 572 | |
0b4e3aa0 A |
573 | while ( sourceSegmentLength ) // (process another target segment?) |
574 | { | |
575 | IOPhysicalAddress targetSegment; | |
576 | IOByteCount targetSegmentLength; | |
1c79356b | 577 | |
0b4e3aa0 A |
578 | targetSegment = pmap_extract_safe(kernel_task, (vm_offset_t) bytes); |
579 | if ( targetSegment == 0 ) goto readBytesErr; | |
1c79356b | 580 | |
0b4e3aa0 A |
581 | targetSegmentLength = min(next_page(targetSegment) - targetSegment, sourceSegmentLength); |
582 | ||
583 | if ( sourceSegment + targetSegmentLength > next_page(sourceSegment) ) | |
584 | { | |
585 | IOByteCount pageLength; | |
586 | ||
587 | pageLength = next_page(sourceSegment) - sourceSegment; | |
588 | ||
589 | bcopy_phys_safe( /* from */ (char *) sourceSegment, | |
590 | /* to */ (char *) targetSegment, | |
591 | /* size */ (int ) pageLength ); | |
592 | ||
593 | ((UInt8 *) bytes) += pageLength; | |
594 | bytesCopied += pageLength; | |
595 | offset += pageLength; | |
596 | sourceSegment += pageLength; | |
597 | sourceSegmentLength -= pageLength; | |
598 | targetSegment += pageLength; | |
599 | targetSegmentLength -= pageLength; | |
600 | withLength -= pageLength; | |
601 | } | |
602 | ||
603 | bcopy_phys_safe( /* from */ (char *) sourceSegment, | |
604 | /* to */ (char *) targetSegment, | |
605 | /* size */ (int ) targetSegmentLength ); | |
606 | ||
607 | ((UInt8 *) bytes) += targetSegmentLength; | |
608 | bytesCopied += targetSegmentLength; | |
609 | offset += targetSegmentLength; | |
610 | sourceSegment += targetSegmentLength; | |
611 | sourceSegmentLength -= targetSegmentLength; | |
612 | withLength -= targetSegmentLength; | |
613 | } | |
614 | } | |
1c79356b | 615 | } |
0b4e3aa0 A |
616 | |
617 | readBytesErr: | |
618 | ||
619 | if ( bytesCopied ) | |
1c79356b | 620 | { |
0b4e3aa0 A |
621 | // We mark the destination pages as modified, just |
622 | // in case they are made pageable later on in life. | |
623 | ||
624 | pmap_modify_pages( /* pmap */ kernel_pmap, | |
625 | /* start */ trunc_page(((vm_offset_t) bytes) - bytesCopied), | |
626 | /* end */ round_page(((vm_offset_t) bytes)) ); | |
1c79356b | 627 | } |
1c79356b | 628 | |
0b4e3aa0 | 629 | return bytesCopied; |
1c79356b A |
630 | } |
631 | ||
0b4e3aa0 A |
632 | IOByteCount IOMemoryDescriptor::writeBytes( IOByteCount offset, |
633 | const void * bytes, | |
634 | IOByteCount withLength ) | |
1c79356b | 635 | { |
0b4e3aa0 | 636 | IOByteCount bytesCopied = 0; |
1c79356b | 637 | |
0b4e3aa0 A |
638 | assert(offset <= _length); |
639 | assert(offset <= _length - withLength); | |
1c79356b | 640 | |
0b4e3aa0 | 641 | if ( offset < _length ) |
1c79356b | 642 | { |
0b4e3aa0 | 643 | withLength = min(withLength, _length - offset); |
1c79356b | 644 | |
0b4e3aa0 A |
645 | while ( withLength ) // (process another target segment?) |
646 | { | |
647 | IOPhysicalAddress targetSegment; | |
648 | IOByteCount targetSegmentLength; | |
1c79356b | 649 | |
0b4e3aa0 A |
650 | targetSegment = getPhysicalSegment(offset, &targetSegmentLength); |
651 | if ( targetSegment == 0 ) goto writeBytesErr; | |
1c79356b | 652 | |
0b4e3aa0 | 653 | targetSegmentLength = min(targetSegmentLength, withLength); |
1c79356b | 654 | |
0b4e3aa0 A |
655 | while ( targetSegmentLength ) // (process another source segment?) |
656 | { | |
657 | IOPhysicalAddress sourceSegment; | |
658 | IOByteCount sourceSegmentLength; | |
1c79356b | 659 | |
0b4e3aa0 A |
660 | sourceSegment = pmap_extract_safe(kernel_task, (vm_offset_t) bytes); |
661 | if ( sourceSegment == 0 ) goto writeBytesErr; | |
1c79356b | 662 | |
0b4e3aa0 | 663 | sourceSegmentLength = min(next_page(sourceSegment) - sourceSegment, targetSegmentLength); |
1c79356b | 664 | |
0b4e3aa0 A |
665 | if ( targetSegment + sourceSegmentLength > next_page(targetSegment) ) |
666 | { | |
667 | IOByteCount pageLength; | |
1c79356b | 668 | |
0b4e3aa0 | 669 | pageLength = next_page(targetSegment) - targetSegment; |
1c79356b | 670 | |
0b4e3aa0 A |
671 | bcopy_phys_safe( /* from */ (char *) sourceSegment, |
672 | /* to */ (char *) targetSegment, | |
673 | /* size */ (int ) pageLength ); | |
1c79356b | 674 | |
0b4e3aa0 A |
675 | // We flush the data cache in case it is code we've copied, |
676 | // such that the instruction cache is in the know about it. | |
1c79356b | 677 | |
0b4e3aa0 | 678 | flush_dcache(targetSegment, pageLength, true); |
1c79356b | 679 | |
0b4e3aa0 A |
680 | ((UInt8 *) bytes) += pageLength; |
681 | bytesCopied += pageLength; | |
682 | offset += pageLength; | |
683 | sourceSegment += pageLength; | |
684 | sourceSegmentLength -= pageLength; | |
685 | targetSegment += pageLength; | |
686 | targetSegmentLength -= pageLength; | |
687 | withLength -= pageLength; | |
688 | } | |
1c79356b | 689 | |
0b4e3aa0 A |
690 | bcopy_phys_safe( /* from */ (char *) sourceSegment, |
691 | /* to */ (char *) targetSegment, | |
692 | /* size */ (int ) sourceSegmentLength ); | |
1c79356b | 693 | |
0b4e3aa0 A |
694 | // We flush the data cache in case it is code we've copied, |
695 | // such that the instruction cache is in the know about it. | |
1c79356b | 696 | |
0b4e3aa0 A |
697 | flush_dcache(targetSegment, sourceSegmentLength, true); |
698 | ||
699 | ((UInt8 *) bytes) += sourceSegmentLength; | |
700 | bytesCopied += sourceSegmentLength; | |
701 | offset += sourceSegmentLength; | |
702 | targetSegment += sourceSegmentLength; | |
703 | targetSegmentLength -= sourceSegmentLength; | |
704 | withLength -= sourceSegmentLength; | |
705 | } | |
706 | } | |
1c79356b A |
707 | } |
708 | ||
0b4e3aa0 A |
709 | writeBytesErr: |
710 | ||
711 | return bytesCopied; | |
712 | } | |
713 | ||
714 | /* DEPRECATED */ void IOGeneralMemoryDescriptor::setPosition(IOByteCount position) | |
715 | /* DEPRECATED */ { | |
716 | /* DEPRECATED */ assert(position <= _length); | |
717 | /* DEPRECATED */ | |
718 | /* DEPRECATED */ if (position >= _length) | |
719 | /* DEPRECATED */ { | |
720 | /* DEPRECATED */ _position = _length; | |
721 | /* DEPRECATED */ _positionAtIndex = _rangesCount; /* careful: out-of-bounds */ | |
722 | /* DEPRECATED */ _positionAtOffset = 0; | |
723 | /* DEPRECATED */ return; | |
724 | /* DEPRECATED */ } | |
725 | /* DEPRECATED */ | |
726 | /* DEPRECATED */ if (position < _position) | |
727 | /* DEPRECATED */ { | |
728 | /* DEPRECATED */ _positionAtOffset = position; | |
729 | /* DEPRECATED */ _positionAtIndex = 0; | |
730 | /* DEPRECATED */ } | |
731 | /* DEPRECATED */ else | |
732 | /* DEPRECATED */ { | |
733 | /* DEPRECATED */ _positionAtOffset += (position - _position); | |
734 | /* DEPRECATED */ } | |
735 | /* DEPRECATED */ _position = position; | |
736 | /* DEPRECATED */ | |
737 | /* DEPRECATED */ while (_positionAtOffset >= _ranges.v[_positionAtIndex].length) | |
738 | /* DEPRECATED */ { | |
739 | /* DEPRECATED */ _positionAtOffset -= _ranges.v[_positionAtIndex].length; | |
740 | /* DEPRECATED */ _positionAtIndex++; | |
741 | /* DEPRECATED */ } | |
742 | /* DEPRECATED */ } | |
743 | ||
744 | IOPhysicalAddress IOGeneralMemoryDescriptor::getPhysicalSegment( IOByteCount offset, | |
745 | IOByteCount * lengthOfSegment ) | |
746 | { | |
747 | IOPhysicalAddress address = 0; | |
748 | IOPhysicalLength length = 0; | |
749 | ||
750 | ||
751 | // assert(offset <= _length); | |
752 | ||
753 | if ( offset < _length ) // (within bounds?) | |
1c79356b | 754 | { |
0b4e3aa0 | 755 | unsigned rangesIndex = 0; |
1c79356b | 756 | |
0b4e3aa0 A |
757 | for ( ; offset >= _ranges.v[rangesIndex].length; rangesIndex++ ) |
758 | { | |
759 | offset -= _ranges.v[rangesIndex].length; // (make offset relative) | |
760 | } | |
1c79356b | 761 | |
0b4e3aa0 A |
762 | if ( _task == 0 ) // (physical memory?) |
763 | { | |
764 | address = _ranges.v[rangesIndex].address + offset; | |
765 | length = _ranges.v[rangesIndex].length - offset; | |
1c79356b | 766 | |
0b4e3aa0 A |
767 | for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) |
768 | { | |
769 | if ( address + length != _ranges.v[rangesIndex].address ) break; | |
1c79356b | 770 | |
0b4e3aa0 A |
771 | length += _ranges.v[rangesIndex].length; // (coalesce ranges) |
772 | } | |
773 | } | |
774 | else // (virtual memory?) | |
775 | { | |
776 | vm_address_t addressVirtual = _ranges.v[rangesIndex].address + offset; | |
1c79356b | 777 | |
0b4e3aa0 | 778 | assert((0 == (kIOMemoryRequiresWire & _flags)) || _wireCount); |
1c79356b | 779 | |
0b4e3aa0 A |
780 | address = pmap_extract_safe(_task, addressVirtual); |
781 | length = next_page(addressVirtual) - addressVirtual; | |
782 | length = min(_ranges.v[rangesIndex].length - offset, length); | |
783 | } | |
1c79356b | 784 | |
0b4e3aa0 A |
785 | assert(address); |
786 | if ( address == 0 ) length = 0; | |
787 | } | |
1c79356b | 788 | |
0b4e3aa0 | 789 | if ( lengthOfSegment ) *lengthOfSegment = length; |
1c79356b | 790 | |
0b4e3aa0 A |
791 | return address; |
792 | } | |
793 | ||
794 | IOPhysicalAddress IOGeneralMemoryDescriptor::getSourceSegment( IOByteCount offset, | |
795 | IOByteCount * lengthOfSegment ) | |
1c79356b | 796 | { |
0b4e3aa0 A |
797 | IOPhysicalAddress address = 0; |
798 | IOPhysicalLength length = 0; | |
1c79356b | 799 | |
0b4e3aa0 | 800 | assert(offset <= _length); |
1c79356b | 801 | |
0b4e3aa0 | 802 | if ( offset < _length ) // (within bounds?) |
1c79356b | 803 | { |
0b4e3aa0 | 804 | unsigned rangesIndex = 0; |
1c79356b | 805 | |
0b4e3aa0 A |
806 | for ( ; offset >= _ranges.v[rangesIndex].length; rangesIndex++ ) |
807 | { | |
808 | offset -= _ranges.v[rangesIndex].length; // (make offset relative) | |
809 | } | |
1c79356b | 810 | |
0b4e3aa0 A |
811 | address = _ranges.v[rangesIndex].address + offset; |
812 | length = _ranges.v[rangesIndex].length - offset; | |
1c79356b | 813 | |
0b4e3aa0 A |
814 | for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) |
815 | { | |
816 | if ( address + length != _ranges.v[rangesIndex].address ) break; | |
1c79356b | 817 | |
0b4e3aa0 A |
818 | length += _ranges.v[rangesIndex].length; // (coalesce ranges) |
819 | } | |
1c79356b | 820 | |
0b4e3aa0 A |
821 | assert(address); |
822 | if ( address == 0 ) length = 0; | |
1c79356b | 823 | } |
0b4e3aa0 A |
824 | |
825 | if ( lengthOfSegment ) *lengthOfSegment = length; | |
826 | ||
827 | return address; | |
828 | } | |
829 | ||
830 | /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */ | |
831 | /* DEPRECATED */ void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset, | |
832 | /* DEPRECATED */ IOByteCount * lengthOfSegment) | |
833 | /* DEPRECATED */ { | |
834 | /* DEPRECATED */ if( offset != _position) | |
835 | /* DEPRECATED */ setPosition( offset ); | |
836 | /* DEPRECATED */ | |
837 | /* DEPRECATED */ assert(_position <= _length); | |
838 | /* DEPRECATED */ | |
839 | /* DEPRECATED */ /* Fail gracefully if the position is at (or past) the end-of-buffer. */ | |
840 | /* DEPRECATED */ if (_position >= _length) | |
841 | /* DEPRECATED */ { | |
842 | /* DEPRECATED */ *lengthOfSegment = 0; | |
843 | /* DEPRECATED */ return 0; | |
844 | /* DEPRECATED */ } | |
845 | /* DEPRECATED */ | |
846 | /* DEPRECATED */ /* Compute the relative length to the end of this virtual segment. */ | |
847 | /* DEPRECATED */ *lengthOfSegment = _ranges.v[_positionAtIndex].length - _positionAtOffset; | |
848 | /* DEPRECATED */ | |
849 | /* DEPRECATED */ /* Compute the relative address of this virtual segment. */ | |
850 | /* DEPRECATED */ if (_task == kernel_task) | |
851 | /* DEPRECATED */ return (void *)(_ranges.v[_positionAtIndex].address + _positionAtOffset); | |
852 | /* DEPRECATED */ else | |
853 | /* DEPRECATED */ { | |
854 | /* DEPRECATED */ vm_offset_t off; | |
855 | /* DEPRECATED */ | |
856 | /* DEPRECATED */ mapIntoKernel(_positionAtIndex); | |
857 | /* DEPRECATED */ | |
858 | /* DEPRECATED */ off = _ranges.v[_kernPtrAtIndex].address; | |
859 | /* DEPRECATED */ off -= trunc_page(off); | |
860 | /* DEPRECATED */ | |
861 | /* DEPRECATED */ return (void *) (_kernPtrAligned + off + _positionAtOffset); | |
862 | /* DEPRECATED */ } | |
863 | /* DEPRECATED */ } | |
864 | /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */ | |
1c79356b A |
865 | |
866 | /* | |
867 | * prepare | |
868 | * | |
869 | * Prepare the memory for an I/O transfer. This involves paging in | |
870 | * the memory, if necessary, and wiring it down for the duration of | |
871 | * the transfer. The complete() method completes the processing of | |
872 | * the memory after the I/O transfer finishes. This method needn't | |
873 | * called for non-pageable memory. | |
874 | */ | |
875 | IOReturn IOGeneralMemoryDescriptor::prepare( | |
876 | IODirection forDirection = kIODirectionNone) | |
877 | { | |
878 | UInt rangeIndex = 0; | |
879 | ||
880 | if((_wireCount == 0) && (kIOMemoryRequiresWire & _flags)) { | |
881 | kern_return_t rc; | |
882 | ||
883 | if(forDirection == kIODirectionNone) | |
884 | forDirection = _direction; | |
885 | ||
0b4e3aa0 A |
886 | vm_prot_t access; |
887 | ||
888 | switch (forDirection) | |
889 | { | |
890 | case kIODirectionIn: | |
891 | access = VM_PROT_WRITE; | |
892 | break; | |
893 | ||
894 | case kIODirectionOut: | |
895 | access = VM_PROT_READ; | |
896 | break; | |
897 | ||
898 | default: | |
899 | access = VM_PROT_READ | VM_PROT_WRITE; | |
900 | break; | |
901 | } | |
1c79356b A |
902 | |
903 | // | |
904 | // Check user read/write access to the data buffer. | |
905 | // | |
906 | ||
907 | for (rangeIndex = 0; rangeIndex < _rangesCount; rangeIndex++) | |
908 | { | |
909 | vm_offset_t checkBase = trunc_page(_ranges.v[rangeIndex].address); | |
910 | vm_size_t checkSize = round_page(_ranges.v[rangeIndex].length ); | |
911 | ||
912 | while (checkSize) | |
913 | { | |
914 | vm_region_basic_info_data_t regionInfo; | |
915 | mach_msg_type_number_t regionInfoSize = sizeof(regionInfo); | |
916 | vm_size_t regionSize; | |
917 | ||
918 | if ( (vm_region( | |
919 | /* map */ getMapForTask(_task, checkBase), | |
920 | /* address */ &checkBase, | |
921 | /* size */ ®ionSize, | |
922 | /* flavor */ VM_REGION_BASIC_INFO, | |
923 | /* info */ (vm_region_info_t) ®ionInfo, | |
924 | /* info size */ ®ionInfoSize, | |
925 | /* object name */ 0 ) != KERN_SUCCESS ) || | |
926 | ( (forDirection & kIODirectionIn ) && | |
927 | !(regionInfo.protection & VM_PROT_WRITE) ) || | |
928 | ( (forDirection & kIODirectionOut) && | |
929 | !(regionInfo.protection & VM_PROT_READ ) ) ) | |
930 | { | |
931 | return kIOReturnVMError; | |
932 | } | |
933 | ||
934 | assert((regionSize & PAGE_MASK) == 0); | |
935 | ||
936 | regionSize = min(regionSize, checkSize); | |
937 | checkSize -= regionSize; | |
938 | checkBase += regionSize; | |
939 | } // (for each vm region) | |
940 | } // (for each io range) | |
941 | ||
942 | for (rangeIndex = 0; rangeIndex < _rangesCount; rangeIndex++) { | |
943 | ||
944 | vm_offset_t srcAlign = trunc_page(_ranges.v[rangeIndex].address); | |
945 | IOByteCount srcAlignEnd = trunc_page(_ranges.v[rangeIndex].address + | |
946 | _ranges.v[rangeIndex].length + | |
947 | page_size - 1); | |
948 | ||
949 | vm_map_t taskVMMap = getMapForTask(_task, srcAlign); | |
950 | ||
1c79356b A |
951 | // If this I/O is for a user land task then protect ourselves |
952 | // against COW and other vm_shenanigans | |
953 | if (_task && _task != kernel_task) { | |
954 | // setup a data object to hold the 'named' memory regions | |
955 | // @@@ gvdl: If we fail to allocate an OSData we will just | |
956 | // hope for the best for the time being. Lets not fail a | |
957 | // prepare at this late stage in product release. | |
958 | if (!_memoryEntries) | |
959 | _memoryEntries = OSData::withCapacity(16); | |
960 | if (_memoryEntries) { | |
961 | vm_object_offset_t desiredSize = srcAlignEnd - srcAlign; | |
962 | vm_object_offset_t entryStart = srcAlign; | |
963 | ipc_port_t memHandle; | |
964 | ||
965 | do { | |
966 | vm_object_offset_t actualSize = desiredSize; | |
967 | ||
968 | rc = mach_make_memory_entry_64 | |
969 | (taskVMMap, &actualSize, entryStart, | |
970 | forDirection, &memHandle, NULL); | |
971 | if (KERN_SUCCESS != rc) { | |
972 | IOLog("IOMemoryDescriptor::prepare mach_make_memory_entry_64 failed: %d\n", rc); | |
973 | goto abortExit; | |
974 | } | |
975 | ||
976 | _memoryEntries-> | |
977 | appendBytes(&memHandle, sizeof(memHandle)); | |
978 | desiredSize -= actualSize; | |
979 | entryStart += actualSize; | |
980 | } while (desiredSize); | |
981 | } | |
982 | } | |
0b4e3aa0 A |
983 | |
984 | rc = vm_map_wire(taskVMMap, srcAlign, srcAlignEnd, access, FALSE); | |
985 | if (KERN_SUCCESS != rc) { | |
986 | IOLog("IOMemoryDescriptor::prepare vm_map_wire failed: %d\n", rc); | |
987 | goto abortExit; | |
988 | } | |
1c79356b A |
989 | } |
990 | } | |
991 | _wireCount++; | |
992 | return kIOReturnSuccess; | |
993 | ||
994 | abortExit: | |
995 | UInt doneIndex; | |
996 | ||
997 | ||
998 | for(doneIndex = 0; doneIndex < rangeIndex; doneIndex++) { | |
999 | vm_offset_t srcAlign = trunc_page(_ranges.v[doneIndex].address); | |
1000 | IOByteCount srcAlignEnd = trunc_page(_ranges.v[doneIndex].address + | |
1001 | _ranges.v[doneIndex].length + | |
1002 | page_size - 1); | |
1003 | ||
1004 | vm_map_unwire(getMapForTask(_task, srcAlign), srcAlign, | |
1005 | srcAlignEnd, FALSE); | |
1006 | } | |
1007 | ||
1008 | if (_memoryEntries) { | |
1009 | ipc_port_t *handles, *handlesEnd; | |
1010 | ||
1011 | handles = (ipc_port_t *) _memoryEntries->getBytesNoCopy(); | |
1012 | handlesEnd = (ipc_port_t *) | |
1013 | ((vm_address_t) handles + _memoryEntries->getLength()); | |
1014 | while (handles < handlesEnd) | |
1015 | ipc_port_release_send(*handles++); | |
1016 | _memoryEntries->release(); | |
1017 | _memoryEntries = 0; | |
1018 | } | |
1019 | ||
1020 | return kIOReturnVMError; | |
1021 | } | |
1022 | ||
1023 | /* | |
1024 | * complete | |
1025 | * | |
1026 | * Complete processing of the memory after an I/O transfer finishes. | |
1027 | * This method should not be called unless a prepare was previously | |
1028 | * issued; the prepare() and complete() must occur in pairs, before | |
1029 | * before and after an I/O transfer involving pageable memory. | |
1030 | */ | |
1031 | ||
1032 | IOReturn IOGeneralMemoryDescriptor::complete( | |
1033 | IODirection forDirection = kIODirectionNone) | |
1034 | { | |
1035 | assert(_wireCount); | |
1036 | ||
1037 | if(0 == _wireCount) | |
1038 | return kIOReturnSuccess; | |
1039 | ||
1040 | _wireCount--; | |
1041 | if((_wireCount == 0) && (kIOMemoryRequiresWire & _flags)) { | |
1042 | UInt rangeIndex; | |
1043 | kern_return_t rc; | |
1044 | ||
1045 | if(forDirection == kIODirectionNone) | |
1046 | forDirection = _direction; | |
1047 | ||
1048 | for(rangeIndex = 0; rangeIndex < _rangesCount; rangeIndex++) { | |
1049 | ||
1050 | vm_offset_t srcAlign = trunc_page(_ranges.v[rangeIndex].address); | |
1051 | IOByteCount srcAlignEnd = trunc_page(_ranges.v[rangeIndex].address + | |
1052 | _ranges.v[rangeIndex].length + | |
1053 | page_size - 1); | |
1054 | ||
1055 | if(forDirection == kIODirectionIn) | |
1056 | pmap_modify_pages(get_task_pmap(_task), srcAlign, srcAlignEnd); | |
1057 | ||
1058 | rc = vm_map_unwire(getMapForTask(_task, srcAlign), srcAlign, | |
1059 | srcAlignEnd, FALSE); | |
1060 | if(rc != KERN_SUCCESS) | |
1061 | IOLog("IOMemoryDescriptor::complete: vm_map_unwire failed: %d\n", rc); | |
1062 | } | |
1063 | ||
1064 | if (_memoryEntries) { | |
1065 | ipc_port_t *handles, *handlesEnd; | |
1066 | ||
1067 | handles = (ipc_port_t *) _memoryEntries->getBytesNoCopy(); | |
1068 | handlesEnd = (ipc_port_t *) | |
1069 | ((vm_address_t) handles + _memoryEntries->getLength()); | |
1070 | while (handles < handlesEnd) | |
1071 | ipc_port_release_send(*handles++); | |
1072 | ||
1073 | _memoryEntries->release(); | |
1074 | _memoryEntries = 0; | |
1075 | } | |
1c79356b A |
1076 | } |
1077 | return kIOReturnSuccess; | |
1078 | } | |
1079 | ||
1080 | IOReturn IOGeneralMemoryDescriptor::doMap( | |
1081 | vm_map_t addressMap, | |
1082 | IOVirtualAddress * atAddress, | |
1083 | IOOptionBits options, | |
1084 | IOByteCount sourceOffset = 0, | |
1085 | IOByteCount length = 0 ) | |
1086 | { | |
1087 | kern_return_t kr; | |
0b4e3aa0 | 1088 | ipc_port_t sharedMem = (ipc_port_t) _memEntry; |
1c79356b A |
1089 | |
1090 | // mapping source == dest? (could be much better) | |
1091 | if( _task && (addressMap == get_task_map(_task)) && (options & kIOMapAnywhere) | |
1092 | && (1 == _rangesCount) && (0 == sourceOffset) | |
1093 | && (length <= _ranges.v[0].length) ) { | |
1094 | *atAddress = _ranges.v[0].address; | |
1095 | return( kIOReturnSuccess ); | |
1096 | } | |
1097 | ||
0b4e3aa0 | 1098 | if( 0 == sharedMem) { |
1c79356b | 1099 | |
0b4e3aa0 | 1100 | vm_size_t size = 0; |
1c79356b | 1101 | |
0b4e3aa0 A |
1102 | for (unsigned index = 0; index < _rangesCount; index++) |
1103 | size += round_page(_ranges.v[index].address + _ranges.v[index].length) | |
1104 | - trunc_page(_ranges.v[index].address); | |
1c79356b | 1105 | |
0b4e3aa0 A |
1106 | if( _task) { |
1107 | #if NOTYET | |
1108 | vm_object_offset_t actualSize = size; | |
1109 | kr = mach_make_memory_entry_64( get_task_map(_task), | |
1110 | &actualSize, _ranges.v[0].address, | |
1111 | VM_PROT_READ | VM_PROT_WRITE, &sharedMem, | |
1112 | NULL ); | |
1113 | ||
1114 | if( (KERN_SUCCESS == kr) && (actualSize != size)) { | |
1115 | #if IOASSERT | |
1116 | IOLog("mach_make_memory_entry_64 (%08lx) size (%08lx:%08lx)\n", | |
1117 | _ranges.v[0].address, (UInt32)actualSize, size); | |
1118 | #endif | |
1119 | kr = kIOReturnVMError; | |
1120 | ipc_port_release_send( sharedMem ); | |
1c79356b A |
1121 | } |
1122 | ||
0b4e3aa0 A |
1123 | if( KERN_SUCCESS != kr) |
1124 | #endif /* NOTYET */ | |
1125 | sharedMem = MACH_PORT_NULL; | |
1c79356b | 1126 | |
0b4e3aa0 A |
1127 | } else do { |
1128 | ||
1129 | memory_object_t pager; | |
1130 | ||
1131 | if( !reserved) { | |
1132 | reserved = IONew( ExpansionData, 1 ); | |
1133 | if( !reserved) | |
1134 | continue; | |
1135 | } | |
1136 | reserved->pagerContig = (1 == _rangesCount); | |
1137 | ||
1138 | pager = device_pager_setup( (memory_object_t) 0, (int) this, size, | |
1139 | reserved->pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0 ); | |
1140 | assert( pager ); | |
1141 | ||
1142 | if( pager) { | |
1143 | retain(); // pager has a ref | |
1144 | kr = mach_memory_object_memory_entry_64( (host_t) 1, false /*internal*/, | |
1145 | size, VM_PROT_READ | VM_PROT_WRITE, pager, &sharedMem ); | |
1146 | ||
1147 | assert( KERN_SUCCESS == kr ); | |
1148 | if( KERN_SUCCESS != kr) { | |
1149 | // chris? | |
1150 | // ipc_port_release_send( (ipc_port_t) pager ); | |
1151 | pager = MACH_PORT_NULL; | |
1152 | sharedMem = MACH_PORT_NULL; | |
1153 | } | |
1154 | } | |
1155 | reserved->devicePager = pager; | |
1c79356b | 1156 | |
1c79356b A |
1157 | } while( false ); |
1158 | ||
0b4e3aa0 A |
1159 | _memEntry = (void *) sharedMem; |
1160 | } | |
1161 | ||
1162 | kr = super::doMap( addressMap, atAddress, | |
1c79356b | 1163 | options, sourceOffset, length ); |
0b4e3aa0 | 1164 | |
1c79356b A |
1165 | return( kr ); |
1166 | } | |
1167 | ||
1168 | IOReturn IOGeneralMemoryDescriptor::doUnmap( | |
1169 | vm_map_t addressMap, | |
1170 | IOVirtualAddress logical, | |
1171 | IOByteCount length ) | |
1172 | { | |
1173 | // could be much better | |
1174 | if( _task && (addressMap == getMapForTask(_task, _ranges.v[0].address)) && (1 == _rangesCount) | |
1175 | && (logical == _ranges.v[0].address) | |
1176 | && (length <= _ranges.v[0].length) ) | |
1177 | return( kIOReturnSuccess ); | |
1178 | ||
1179 | return( super::doUnmap( addressMap, logical, length )); | |
1180 | } | |
1181 | ||
1182 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
1183 | ||
1184 | extern "C" { | |
1185 | // osfmk/device/iokit_rpc.c | |
1186 | extern kern_return_t IOMapPages( vm_map_t map, vm_offset_t va, vm_offset_t pa, | |
1187 | vm_size_t length, unsigned int mapFlags); | |
e3027f41 | 1188 | extern kern_return_t IOUnmapPages(vm_map_t map, vm_offset_t va, vm_size_t length); |
1c79356b A |
1189 | }; |
1190 | ||
1191 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
1192 | ||
1193 | static IORecursiveLock * gIOMemoryLock; | |
1194 | ||
1195 | #define LOCK IORecursiveLockLock( gIOMemoryLock) | |
1196 | #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock) | |
1197 | ||
1198 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
1199 | ||
1200 | OSDefineMetaClass( IOMemoryMap, OSObject ) | |
1201 | OSDefineAbstractStructors( IOMemoryMap, OSObject ) | |
1202 | ||
1203 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
1204 | ||
1205 | class _IOMemoryMap : public IOMemoryMap | |
1206 | { | |
1207 | OSDeclareDefaultStructors(_IOMemoryMap) | |
1208 | ||
1209 | IOMemoryDescriptor * memory; | |
1210 | IOMemoryMap * superMap; | |
1211 | IOByteCount offset; | |
1212 | IOByteCount length; | |
1213 | IOVirtualAddress logical; | |
1214 | task_t addressTask; | |
1215 | vm_map_t addressMap; | |
1216 | IOOptionBits options; | |
1217 | ||
1218 | public: | |
1219 | virtual void free(); | |
1220 | ||
1221 | // IOMemoryMap methods | |
1222 | virtual IOVirtualAddress getVirtualAddress(); | |
1223 | virtual IOByteCount getLength(); | |
1224 | virtual task_t getAddressTask(); | |
1225 | virtual IOMemoryDescriptor * getMemoryDescriptor(); | |
1226 | virtual IOOptionBits getMapOptions(); | |
1227 | ||
1228 | virtual IOReturn unmap(); | |
1229 | virtual void taskDied(); | |
1230 | ||
1231 | virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset, | |
1232 | IOByteCount * length); | |
1233 | ||
1234 | // for IOMemoryDescriptor use | |
1235 | _IOMemoryMap * isCompatible( | |
1236 | IOMemoryDescriptor * owner, | |
1237 | task_t intoTask, | |
1238 | IOVirtualAddress toAddress, | |
1239 | IOOptionBits options, | |
1240 | IOByteCount offset, | |
1241 | IOByteCount length ); | |
1242 | ||
1243 | bool init( | |
1244 | IOMemoryDescriptor * memory, | |
1245 | IOMemoryMap * superMap, | |
1246 | IOByteCount offset, | |
1247 | IOByteCount length ); | |
1248 | ||
1249 | bool init( | |
1250 | IOMemoryDescriptor * memory, | |
1251 | task_t intoTask, | |
1252 | IOVirtualAddress toAddress, | |
1253 | IOOptionBits options, | |
1254 | IOByteCount offset, | |
1255 | IOByteCount length ); | |
e3027f41 A |
1256 | |
1257 | IOReturn redirect( | |
1258 | task_t intoTask, bool redirect ); | |
1c79356b A |
1259 | }; |
1260 | ||
1261 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
1262 | ||
1263 | #undef super | |
1264 | #define super IOMemoryMap | |
1265 | ||
1266 | OSDefineMetaClassAndStructors(_IOMemoryMap, IOMemoryMap) | |
1267 | ||
1268 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
1269 | ||
1270 | bool _IOMemoryMap::init( | |
1271 | IOMemoryDescriptor * _memory, | |
1272 | IOMemoryMap * _superMap, | |
1273 | IOByteCount _offset, | |
1274 | IOByteCount _length ) | |
1275 | { | |
1276 | ||
1277 | if( !super::init()) | |
1278 | return( false); | |
1279 | ||
1280 | if( (_offset + _length) > _superMap->getLength()) | |
1281 | return( false); | |
1282 | ||
1283 | _memory->retain(); | |
1284 | memory = _memory; | |
1285 | _superMap->retain(); | |
1286 | superMap = _superMap; | |
1287 | ||
1288 | offset = _offset; | |
1289 | if( _length) | |
1290 | length = _length; | |
1291 | else | |
1292 | length = _memory->getLength(); | |
1293 | ||
1294 | options = superMap->getMapOptions(); | |
1295 | logical = superMap->getVirtualAddress() + offset; | |
1296 | ||
1297 | return( true ); | |
1298 | } | |
1299 | ||
1300 | bool _IOMemoryMap::init( | |
1301 | IOMemoryDescriptor * _memory, | |
1302 | task_t intoTask, | |
1303 | IOVirtualAddress toAddress, | |
1304 | IOOptionBits _options, | |
1305 | IOByteCount _offset, | |
1306 | IOByteCount _length ) | |
1307 | { | |
1308 | bool ok; | |
1309 | ||
1310 | if( (!_memory) || (!intoTask) || !super::init()) | |
1311 | return( false); | |
1312 | ||
1313 | if( (_offset + _length) > _memory->getLength()) | |
1314 | return( false); | |
1315 | ||
1316 | addressMap = get_task_map(intoTask); | |
1317 | if( !addressMap) | |
1318 | return( false); | |
1319 | kernel_vm_map_reference(addressMap); | |
1320 | ||
1321 | _memory->retain(); | |
1322 | memory = _memory; | |
1323 | ||
1324 | offset = _offset; | |
1325 | if( _length) | |
1326 | length = _length; | |
1327 | else | |
1328 | length = _memory->getLength(); | |
1329 | ||
1330 | addressTask = intoTask; | |
1331 | logical = toAddress; | |
1332 | options = _options; | |
1333 | ||
1334 | if( options & kIOMapStatic) | |
1335 | ok = true; | |
1336 | else | |
1337 | ok = (kIOReturnSuccess == memory->doMap( addressMap, &logical, | |
1338 | options, offset, length )); | |
1339 | if( !ok) { | |
1340 | logical = 0; | |
e3027f41 A |
1341 | memory->release(); |
1342 | memory = 0; | |
1c79356b A |
1343 | vm_map_deallocate(addressMap); |
1344 | addressMap = 0; | |
1345 | } | |
1346 | return( ok ); | |
1347 | } | |
1348 | ||
0b4e3aa0 A |
1349 | struct IOMemoryDescriptorMapAllocRef |
1350 | { | |
1351 | ipc_port_t sharedMem; | |
1352 | vm_size_t size; | |
1353 | vm_offset_t mapped; | |
1354 | IOByteCount sourceOffset; | |
1355 | IOOptionBits options; | |
1356 | }; | |
1357 | ||
1358 | static kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref) | |
1359 | { | |
1360 | IOMemoryDescriptorMapAllocRef * ref = (IOMemoryDescriptorMapAllocRef *)_ref; | |
1361 | IOReturn err; | |
1362 | ||
1363 | do { | |
1364 | if( ref->sharedMem) { | |
1365 | vm_prot_t prot = VM_PROT_READ | |
1366 | | ((ref->options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE); | |
1367 | ||
1368 | err = vm_map( map, | |
1369 | &ref->mapped, | |
1370 | ref->size, 0 /* mask */, | |
1371 | (( ref->options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED) | |
1372 | | VM_MAKE_TAG(VM_MEMORY_IOKIT), | |
1373 | ref->sharedMem, ref->sourceOffset, | |
1374 | false, // copy | |
1375 | prot, // cur | |
1376 | prot, // max | |
1377 | VM_INHERIT_NONE); | |
1378 | ||
1379 | if( KERN_SUCCESS != err) { | |
1380 | ref->mapped = 0; | |
1381 | continue; | |
1382 | } | |
1383 | ||
1384 | } else { | |
1385 | ||
1386 | err = vm_allocate( map, &ref->mapped, ref->size, | |
1387 | ((ref->options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED) | |
1388 | | VM_MAKE_TAG(VM_MEMORY_IOKIT) ); | |
1389 | ||
1390 | if( KERN_SUCCESS != err) { | |
1391 | ref->mapped = 0; | |
1392 | continue; | |
1393 | } | |
1394 | ||
1395 | // we have to make sure that these guys don't get copied if we fork. | |
1396 | err = vm_inherit( map, ref->mapped, ref->size, VM_INHERIT_NONE); | |
1397 | assert( KERN_SUCCESS == err ); | |
1398 | } | |
1399 | ||
1400 | } while( false ); | |
1401 | ||
1402 | return( err ); | |
1403 | } | |
1404 | ||
1c79356b A |
1405 | IOReturn IOMemoryDescriptor::doMap( |
1406 | vm_map_t addressMap, | |
1407 | IOVirtualAddress * atAddress, | |
1408 | IOOptionBits options, | |
1409 | IOByteCount sourceOffset = 0, | |
1410 | IOByteCount length = 0 ) | |
1411 | { | |
1412 | IOReturn err = kIOReturnSuccess; | |
0b4e3aa0 | 1413 | memory_object_t pager; |
1c79356b A |
1414 | vm_address_t logical; |
1415 | IOByteCount pageOffset; | |
0b4e3aa0 A |
1416 | IOPhysicalAddress sourceAddr; |
1417 | IOMemoryDescriptorMapAllocRef ref; | |
1c79356b | 1418 | |
0b4e3aa0 A |
1419 | ref.sharedMem = (ipc_port_t) _memEntry; |
1420 | ref.sourceOffset = sourceOffset; | |
1421 | ref.options = options; | |
1c79356b | 1422 | |
0b4e3aa0 | 1423 | do { |
1c79356b | 1424 | |
0b4e3aa0 A |
1425 | if( 0 == length) |
1426 | length = getLength(); | |
1c79356b | 1427 | |
0b4e3aa0 A |
1428 | sourceAddr = getSourceSegment( sourceOffset, NULL ); |
1429 | assert( sourceAddr ); | |
1430 | pageOffset = sourceAddr - trunc_page( sourceAddr ); | |
1c79356b | 1431 | |
0b4e3aa0 A |
1432 | ref.size = round_page( length + pageOffset ); |
1433 | ||
1434 | logical = *atAddress; | |
1435 | if( options & kIOMapAnywhere) | |
1436 | // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE | |
1437 | ref.mapped = 0; | |
1438 | else { | |
1439 | ref.mapped = trunc_page( logical ); | |
1440 | if( (logical - ref.mapped) != pageOffset) { | |
1441 | err = kIOReturnVMError; | |
1442 | continue; | |
1443 | } | |
1444 | } | |
1445 | ||
1446 | if( ref.sharedMem && (addressMap == kernel_map) && (kIOMemoryRequiresWire & _flags)) | |
1447 | err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref ); | |
1448 | else | |
1449 | err = IOMemoryDescriptorMapAlloc( addressMap, &ref ); | |
1450 | ||
1451 | if( err != KERN_SUCCESS) | |
1452 | continue; | |
1453 | ||
1454 | if( reserved) | |
1455 | pager = (memory_object_t) reserved->devicePager; | |
1456 | else | |
1457 | pager = MACH_PORT_NULL; | |
1458 | ||
1459 | if( !ref.sharedMem || pager ) | |
1460 | err = handleFault( pager, addressMap, ref.mapped, sourceOffset, length, options ); | |
1461 | ||
1462 | } while( false ); | |
1463 | ||
1464 | if( err != KERN_SUCCESS) { | |
1465 | if( ref.mapped) | |
1466 | doUnmap( addressMap, ref.mapped, ref.size ); | |
1467 | *atAddress = NULL; | |
1468 | } else | |
1469 | *atAddress = ref.mapped + pageOffset; | |
1470 | ||
1471 | return( err ); | |
1472 | } | |
1473 | ||
1474 | enum { | |
1475 | kIOMemoryRedirected = 0x00010000 | |
1476 | }; | |
1477 | ||
1478 | IOReturn IOMemoryDescriptor::handleFault( | |
1479 | void * _pager, | |
1480 | vm_map_t addressMap, | |
1481 | IOVirtualAddress address, | |
1482 | IOByteCount sourceOffset, | |
1483 | IOByteCount length, | |
1484 | IOOptionBits options ) | |
1485 | { | |
1486 | IOReturn err = kIOReturnSuccess; | |
1487 | memory_object_t pager = (memory_object_t) _pager; | |
1488 | vm_size_t size; | |
1489 | vm_size_t bytes; | |
1490 | vm_size_t page; | |
1491 | IOByteCount pageOffset; | |
1492 | IOPhysicalLength segLen; | |
1493 | IOPhysicalAddress physAddr; | |
1494 | ||
1495 | if( !addressMap) { | |
1496 | ||
1497 | LOCK; | |
1498 | ||
1499 | if( kIOMemoryRedirected & _flags) { | |
1c79356b | 1500 | #ifdef DEBUG |
0b4e3aa0 | 1501 | IOLog("sleep mem redirect %x, %lx\n", address, sourceOffset); |
1c79356b | 1502 | #endif |
0b4e3aa0 A |
1503 | do { |
1504 | assert_wait( (event_t) this, THREAD_UNINT ); | |
1505 | UNLOCK; | |
1506 | thread_block((void (*)(void)) 0); | |
1507 | LOCK; | |
1508 | } while( kIOMemoryRedirected & _flags ); | |
1509 | } | |
1c79356b | 1510 | |
0b4e3aa0 A |
1511 | UNLOCK; |
1512 | return( kIOReturnSuccess ); | |
1c79356b A |
1513 | } |
1514 | ||
0b4e3aa0 A |
1515 | physAddr = getPhysicalSegment( sourceOffset, &segLen ); |
1516 | assert( physAddr ); | |
1517 | pageOffset = physAddr - trunc_page( physAddr ); | |
1518 | ||
1519 | size = length + pageOffset; | |
1520 | physAddr -= pageOffset; | |
1c79356b A |
1521 | |
1522 | segLen += pageOffset; | |
0b4e3aa0 | 1523 | bytes = size; |
1c79356b A |
1524 | do { |
1525 | // in the middle of the loop only map whole pages | |
1526 | if( segLen >= bytes) | |
1527 | segLen = bytes; | |
1528 | else if( segLen != trunc_page( segLen)) | |
1529 | err = kIOReturnVMError; | |
1530 | if( physAddr != trunc_page( physAddr)) | |
1531 | err = kIOReturnBadArgument; | |
1532 | ||
1533 | #ifdef DEBUG | |
1534 | if( kIOLogMapping & gIOKitDebug) | |
0b4e3aa0 A |
1535 | IOLog("_IOMemoryMap::map(%p) %08lx->%08lx:%08lx\n", |
1536 | addressMap, address + pageOffset, physAddr + pageOffset, | |
1c79356b A |
1537 | segLen - pageOffset); |
1538 | #endif | |
1539 | ||
0b4e3aa0 A |
1540 | if( addressMap && (kIOReturnSuccess == err)) |
1541 | err = IOMapPages( addressMap, address, physAddr, segLen, options ); | |
1542 | assert( KERN_SUCCESS == err ); | |
1c79356b A |
1543 | if( err) |
1544 | break; | |
1545 | ||
0b4e3aa0 A |
1546 | if( pager) { |
1547 | if( reserved && reserved->pagerContig) { | |
1548 | IOPhysicalLength allLen; | |
1549 | IOPhysicalAddress allPhys; | |
1550 | ||
1551 | allPhys = getPhysicalSegment( 0, &allLen ); | |
1552 | assert( allPhys ); | |
1553 | err = device_pager_populate_object( pager, 0, trunc_page(allPhys), round_page(allPhys + allLen) ); | |
1554 | ||
1555 | } else { | |
1556 | ||
1557 | for( page = 0; | |
1558 | (page < segLen) && (KERN_SUCCESS == err); | |
1559 | page += page_size) { | |
1560 | err = device_pager_populate_object( pager, sourceOffset + page, | |
1561 | physAddr + page, page_size ); | |
1562 | } | |
1563 | } | |
1564 | assert( KERN_SUCCESS == err ); | |
1565 | if( err) | |
1566 | break; | |
1567 | } | |
1c79356b | 1568 | sourceOffset += segLen - pageOffset; |
0b4e3aa0 | 1569 | address += segLen; |
1c79356b A |
1570 | bytes -= segLen; |
1571 | pageOffset = 0; | |
1572 | ||
1573 | } while( bytes | |
1574 | && (physAddr = getPhysicalSegment( sourceOffset, &segLen ))); | |
1575 | ||
1576 | if( bytes) | |
1577 | err = kIOReturnBadArgument; | |
1c79356b A |
1578 | |
1579 | return( err ); | |
1580 | } | |
1581 | ||
1582 | IOReturn IOMemoryDescriptor::doUnmap( | |
1583 | vm_map_t addressMap, | |
1584 | IOVirtualAddress logical, | |
1585 | IOByteCount length ) | |
1586 | { | |
1587 | IOReturn err; | |
1588 | ||
1589 | #ifdef DEBUG | |
1590 | if( kIOLogMapping & gIOKitDebug) | |
1591 | kprintf("IOMemoryDescriptor::doUnmap(%x) %08x:%08x\n", | |
1592 | addressMap, logical, length ); | |
1593 | #endif | |
1594 | ||
0b4e3aa0 A |
1595 | if( (addressMap == kernel_map) || (addressMap == get_task_map(current_task()))) { |
1596 | ||
1597 | if( _memEntry && (addressMap == kernel_map) && (kIOMemoryRequiresWire & _flags)) | |
1598 | addressMap = IOPageableMapForAddress( logical ); | |
1599 | ||
1c79356b | 1600 | err = vm_deallocate( addressMap, logical, length ); |
0b4e3aa0 A |
1601 | |
1602 | } else | |
1c79356b A |
1603 | err = kIOReturnSuccess; |
1604 | ||
1605 | return( err ); | |
1606 | } | |
1607 | ||
e3027f41 A |
1608 | IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool redirect ) |
1609 | { | |
1610 | IOReturn err; | |
1611 | _IOMemoryMap * mapping = 0; | |
1612 | OSIterator * iter; | |
1613 | ||
1614 | LOCK; | |
1615 | ||
1616 | do { | |
1617 | if( (iter = OSCollectionIterator::withCollection( _mappings))) { | |
1618 | while( (mapping = (_IOMemoryMap *) iter->getNextObject())) | |
1619 | mapping->redirect( safeTask, redirect ); | |
1620 | ||
1621 | iter->release(); | |
1622 | } | |
1623 | } while( false ); | |
1624 | ||
0b4e3aa0 A |
1625 | if( redirect) |
1626 | _flags |= kIOMemoryRedirected; | |
1627 | else { | |
1628 | _flags &= ~kIOMemoryRedirected; | |
1629 | thread_wakeup( (event_t) this); | |
1630 | } | |
1631 | ||
e3027f41 A |
1632 | UNLOCK; |
1633 | ||
1634 | // temporary binary compatibility | |
1635 | IOSubMemoryDescriptor * subMem; | |
1636 | if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this))) | |
1637 | err = subMem->redirect( safeTask, redirect ); | |
1638 | else | |
1639 | err = kIOReturnSuccess; | |
1640 | ||
1641 | return( err ); | |
1642 | } | |
1643 | ||
1644 | IOReturn IOSubMemoryDescriptor::redirect( task_t safeTask, bool redirect ) | |
1645 | { | |
e3027f41 A |
1646 | return( _parent->redirect( safeTask, redirect )); |
1647 | } | |
1648 | ||
1649 | IOReturn _IOMemoryMap::redirect( task_t safeTask, bool redirect ) | |
1650 | { | |
1651 | IOReturn err = kIOReturnSuccess; | |
1652 | ||
1653 | if( superMap) { | |
1654 | // err = ((_IOMemoryMap *)superMap)->redirect( safeTask, redirect ); | |
1655 | } else { | |
1656 | ||
1657 | LOCK; | |
1658 | if( logical && addressMap | |
1659 | && (get_task_map( safeTask) != addressMap) | |
1660 | && (0 == (options & kIOMapStatic))) { | |
1661 | ||
1662 | IOUnmapPages( addressMap, logical, length ); | |
1663 | if( !redirect) { | |
1664 | err = vm_deallocate( addressMap, logical, length ); | |
1665 | err = memory->doMap( addressMap, &logical, | |
0b4e3aa0 A |
1666 | (options & ~kIOMapAnywhere) /*| kIOMapReserve*/, |
1667 | offset, length ); | |
e3027f41 A |
1668 | } else |
1669 | err = kIOReturnSuccess; | |
1670 | #ifdef DEBUG | |
0b4e3aa0 | 1671 | IOLog("IOMemoryMap::redirect(%d, %x) %x from %p\n", redirect, err, logical, addressMap); |
e3027f41 A |
1672 | #endif |
1673 | } | |
1674 | UNLOCK; | |
1675 | } | |
1676 | ||
1677 | return( err ); | |
1678 | } | |
1679 | ||
1c79356b A |
1680 | IOReturn _IOMemoryMap::unmap( void ) |
1681 | { | |
1682 | IOReturn err; | |
1683 | ||
1684 | LOCK; | |
1685 | ||
1686 | if( logical && addressMap && (0 == superMap) | |
1687 | && (0 == (options & kIOMapStatic))) { | |
1688 | ||
1689 | err = memory->doUnmap( addressMap, logical, length ); | |
1690 | vm_map_deallocate(addressMap); | |
1691 | addressMap = 0; | |
1692 | ||
1693 | } else | |
1694 | err = kIOReturnSuccess; | |
1695 | ||
1696 | logical = 0; | |
1697 | ||
1698 | UNLOCK; | |
1699 | ||
1700 | return( err ); | |
1701 | } | |
1702 | ||
1703 | void _IOMemoryMap::taskDied( void ) | |
1704 | { | |
1705 | LOCK; | |
1706 | if( addressMap) { | |
1707 | vm_map_deallocate(addressMap); | |
1708 | addressMap = 0; | |
1709 | } | |
1710 | addressTask = 0; | |
1711 | logical = 0; | |
1712 | UNLOCK; | |
1713 | } | |
1714 | ||
1715 | void _IOMemoryMap::free() | |
1716 | { | |
1717 | unmap(); | |
1718 | ||
1719 | if( memory) { | |
1720 | LOCK; | |
1721 | memory->removeMapping( this); | |
1722 | UNLOCK; | |
1723 | memory->release(); | |
1724 | } | |
1725 | ||
1726 | if( superMap) | |
1727 | superMap->release(); | |
1728 | ||
1729 | super::free(); | |
1730 | } | |
1731 | ||
1732 | IOByteCount _IOMemoryMap::getLength() | |
1733 | { | |
1734 | return( length ); | |
1735 | } | |
1736 | ||
1737 | IOVirtualAddress _IOMemoryMap::getVirtualAddress() | |
1738 | { | |
1739 | return( logical); | |
1740 | } | |
1741 | ||
1742 | task_t _IOMemoryMap::getAddressTask() | |
1743 | { | |
1744 | if( superMap) | |
1745 | return( superMap->getAddressTask()); | |
1746 | else | |
1747 | return( addressTask); | |
1748 | } | |
1749 | ||
1750 | IOOptionBits _IOMemoryMap::getMapOptions() | |
1751 | { | |
1752 | return( options); | |
1753 | } | |
1754 | ||
1755 | IOMemoryDescriptor * _IOMemoryMap::getMemoryDescriptor() | |
1756 | { | |
1757 | return( memory ); | |
1758 | } | |
1759 | ||
1760 | _IOMemoryMap * _IOMemoryMap::isCompatible( | |
1761 | IOMemoryDescriptor * owner, | |
1762 | task_t task, | |
1763 | IOVirtualAddress toAddress, | |
1764 | IOOptionBits _options, | |
1765 | IOByteCount _offset, | |
1766 | IOByteCount _length ) | |
1767 | { | |
1768 | _IOMemoryMap * mapping; | |
1769 | ||
1770 | if( (!task) || (task != getAddressTask())) | |
1771 | return( 0 ); | |
1772 | if( (options ^ _options) & (kIOMapCacheMask | kIOMapReadOnly)) | |
1773 | return( 0 ); | |
1774 | ||
1775 | if( (0 == (_options & kIOMapAnywhere)) && (logical != toAddress)) | |
1776 | return( 0 ); | |
1777 | ||
1778 | if( _offset < offset) | |
1779 | return( 0 ); | |
1780 | ||
1781 | _offset -= offset; | |
1782 | ||
1783 | if( (_offset + _length) > length) | |
1784 | return( 0 ); | |
1785 | ||
1786 | if( (length == _length) && (!_offset)) { | |
1787 | retain(); | |
1788 | mapping = this; | |
1789 | ||
1790 | } else { | |
1791 | mapping = new _IOMemoryMap; | |
1792 | if( mapping | |
1793 | && !mapping->init( owner, this, _offset, _length )) { | |
1794 | mapping->release(); | |
1795 | mapping = 0; | |
1796 | } | |
1797 | } | |
1798 | ||
1799 | return( mapping ); | |
1800 | } | |
1801 | ||
1802 | IOPhysicalAddress _IOMemoryMap::getPhysicalSegment( IOByteCount _offset, | |
1803 | IOPhysicalLength * length) | |
1804 | { | |
1805 | IOPhysicalAddress address; | |
1806 | ||
1807 | LOCK; | |
1808 | address = memory->getPhysicalSegment( offset + _offset, length ); | |
1809 | UNLOCK; | |
1810 | ||
1811 | return( address ); | |
1812 | } | |
1813 | ||
1814 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
1815 | ||
1816 | #undef super | |
1817 | #define super OSObject | |
1818 | ||
1819 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
1820 | ||
1821 | void IOMemoryDescriptor::initialize( void ) | |
1822 | { | |
1823 | if( 0 == gIOMemoryLock) | |
1824 | gIOMemoryLock = IORecursiveLockAlloc(); | |
1825 | } | |
1826 | ||
1827 | void IOMemoryDescriptor::free( void ) | |
1828 | { | |
1829 | if( _mappings) | |
1830 | _mappings->release(); | |
1831 | ||
0b4e3aa0 A |
1832 | if( reserved) |
1833 | IODelete( reserved, ExpansionData, 1 ); | |
1834 | ||
1c79356b A |
1835 | super::free(); |
1836 | } | |
1837 | ||
1838 | IOMemoryMap * IOMemoryDescriptor::setMapping( | |
1839 | task_t intoTask, | |
1840 | IOVirtualAddress mapAddress, | |
1841 | IOOptionBits options = 0 ) | |
1842 | { | |
1843 | _IOMemoryMap * map; | |
1844 | ||
1845 | map = new _IOMemoryMap; | |
1846 | ||
1847 | LOCK; | |
1848 | ||
1849 | if( map | |
1850 | && !map->init( this, intoTask, mapAddress, | |
1851 | options | kIOMapStatic, 0, getLength() )) { | |
1852 | map->release(); | |
1853 | map = 0; | |
1854 | } | |
1855 | ||
1856 | addMapping( map); | |
1857 | ||
1858 | UNLOCK; | |
1859 | ||
1860 | return( map); | |
1861 | } | |
1862 | ||
1863 | IOMemoryMap * IOMemoryDescriptor::map( | |
1864 | IOOptionBits options = 0 ) | |
1865 | { | |
1866 | ||
1867 | return( makeMapping( this, kernel_task, 0, | |
1868 | options | kIOMapAnywhere, | |
1869 | 0, getLength() )); | |
1870 | } | |
1871 | ||
1872 | IOMemoryMap * IOMemoryDescriptor::map( | |
1873 | task_t intoTask, | |
1874 | IOVirtualAddress toAddress, | |
1875 | IOOptionBits options, | |
1876 | IOByteCount offset = 0, | |
1877 | IOByteCount length = 0 ) | |
1878 | { | |
1879 | if( 0 == length) | |
1880 | length = getLength(); | |
1881 | ||
1882 | return( makeMapping( this, intoTask, toAddress, options, offset, length )); | |
1883 | } | |
1884 | ||
1885 | IOMemoryMap * IOMemoryDescriptor::makeMapping( | |
1886 | IOMemoryDescriptor * owner, | |
1887 | task_t intoTask, | |
1888 | IOVirtualAddress toAddress, | |
1889 | IOOptionBits options, | |
1890 | IOByteCount offset, | |
1891 | IOByteCount length ) | |
1892 | { | |
1893 | _IOMemoryMap * mapping = 0; | |
1894 | OSIterator * iter; | |
1895 | ||
1896 | LOCK; | |
1897 | ||
1898 | do { | |
1899 | // look for an existing mapping | |
1900 | if( (iter = OSCollectionIterator::withCollection( _mappings))) { | |
1901 | ||
1902 | while( (mapping = (_IOMemoryMap *) iter->getNextObject())) { | |
1903 | ||
1904 | if( (mapping = mapping->isCompatible( | |
1905 | owner, intoTask, toAddress, | |
1906 | options | kIOMapReference, | |
1907 | offset, length ))) | |
1908 | break; | |
1909 | } | |
1910 | iter->release(); | |
1911 | if( mapping) | |
1912 | continue; | |
1913 | } | |
1914 | ||
1915 | ||
1916 | if( mapping || (options & kIOMapReference)) | |
1917 | continue; | |
1918 | ||
1919 | owner = this; | |
1920 | ||
1921 | mapping = new _IOMemoryMap; | |
1922 | if( mapping | |
1923 | && !mapping->init( owner, intoTask, toAddress, options, | |
1924 | offset, length )) { | |
1925 | ||
1926 | IOLog("Didn't make map %08lx : %08lx\n", offset, length ); | |
1927 | mapping->release(); | |
1928 | mapping = 0; | |
1929 | } | |
1930 | ||
1931 | } while( false ); | |
1932 | ||
1933 | owner->addMapping( mapping); | |
1934 | ||
1935 | UNLOCK; | |
1936 | ||
1937 | return( mapping); | |
1938 | } | |
1939 | ||
1940 | void IOMemoryDescriptor::addMapping( | |
1941 | IOMemoryMap * mapping ) | |
1942 | { | |
1943 | if( mapping) { | |
1944 | if( 0 == _mappings) | |
1945 | _mappings = OSSet::withCapacity(1); | |
1946 | if( _mappings && _mappings->setObject( mapping )) | |
1947 | mapping->release(); /* really */ | |
1948 | } | |
1949 | } | |
1950 | ||
1951 | void IOMemoryDescriptor::removeMapping( | |
1952 | IOMemoryMap * mapping ) | |
1953 | { | |
1954 | if( _mappings) { | |
1955 | mapping->retain(); | |
1956 | mapping->retain(); | |
1957 | _mappings->removeObject( mapping); | |
1958 | } | |
1959 | } | |
1960 | ||
1961 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
1962 | ||
1963 | #undef super | |
1964 | #define super IOMemoryDescriptor | |
1965 | ||
1966 | OSDefineMetaClassAndStructors(IOSubMemoryDescriptor, IOMemoryDescriptor) | |
1967 | ||
1968 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
1969 | ||
1970 | bool IOSubMemoryDescriptor::initSubRange( IOMemoryDescriptor * parent, | |
1971 | IOByteCount offset, IOByteCount length, | |
1972 | IODirection withDirection ) | |
1973 | { | |
1974 | if( !super::init()) | |
1975 | return( false ); | |
1976 | ||
1977 | if( !parent) | |
1978 | return( false); | |
1979 | ||
1980 | if( (offset + length) > parent->getLength()) | |
1981 | return( false); | |
1982 | ||
1983 | parent->retain(); | |
1984 | _parent = parent; | |
1985 | _start = offset; | |
1986 | _length = length; | |
1987 | _direction = withDirection; | |
1988 | _tag = parent->getTag(); | |
1989 | ||
1990 | return( true ); | |
1991 | } | |
1992 | ||
1993 | void IOSubMemoryDescriptor::free( void ) | |
1994 | { | |
1995 | if( _parent) | |
1996 | _parent->release(); | |
1997 | ||
1998 | super::free(); | |
1999 | } | |
2000 | ||
2001 | ||
2002 | IOPhysicalAddress IOSubMemoryDescriptor::getPhysicalSegment( IOByteCount offset, | |
2003 | IOByteCount * length ) | |
2004 | { | |
2005 | IOPhysicalAddress address; | |
2006 | IOByteCount actualLength; | |
2007 | ||
2008 | assert(offset <= _length); | |
2009 | ||
2010 | if( length) | |
2011 | *length = 0; | |
2012 | ||
2013 | if( offset >= _length) | |
2014 | return( 0 ); | |
2015 | ||
2016 | address = _parent->getPhysicalSegment( offset + _start, &actualLength ); | |
2017 | ||
2018 | if( address && length) | |
2019 | *length = min( _length - offset, actualLength ); | |
2020 | ||
2021 | return( address ); | |
2022 | } | |
2023 | ||
0b4e3aa0 A |
2024 | IOPhysicalAddress IOSubMemoryDescriptor::getSourceSegment( IOByteCount offset, |
2025 | IOByteCount * length ) | |
2026 | { | |
2027 | IOPhysicalAddress address; | |
2028 | IOByteCount actualLength; | |
2029 | ||
2030 | assert(offset <= _length); | |
2031 | ||
2032 | if( length) | |
2033 | *length = 0; | |
2034 | ||
2035 | if( offset >= _length) | |
2036 | return( 0 ); | |
2037 | ||
2038 | address = _parent->getSourceSegment( offset + _start, &actualLength ); | |
2039 | ||
2040 | if( address && length) | |
2041 | *length = min( _length - offset, actualLength ); | |
2042 | ||
2043 | return( address ); | |
2044 | } | |
2045 | ||
1c79356b A |
2046 | void * IOSubMemoryDescriptor::getVirtualSegment(IOByteCount offset, |
2047 | IOByteCount * lengthOfSegment) | |
2048 | { | |
2049 | return( 0 ); | |
2050 | } | |
2051 | ||
2052 | IOByteCount IOSubMemoryDescriptor::readBytes(IOByteCount offset, | |
2053 | void * bytes, IOByteCount withLength) | |
2054 | { | |
2055 | IOByteCount byteCount; | |
2056 | ||
2057 | assert(offset <= _length); | |
2058 | ||
2059 | if( offset >= _length) | |
2060 | return( 0 ); | |
2061 | ||
2062 | LOCK; | |
2063 | byteCount = _parent->readBytes( _start + offset, bytes, | |
2064 | min(withLength, _length - offset) ); | |
2065 | UNLOCK; | |
2066 | ||
2067 | return( byteCount ); | |
2068 | } | |
2069 | ||
2070 | IOByteCount IOSubMemoryDescriptor::writeBytes(IOByteCount offset, | |
2071 | const void* bytes, IOByteCount withLength) | |
2072 | { | |
2073 | IOByteCount byteCount; | |
2074 | ||
2075 | assert(offset <= _length); | |
2076 | ||
2077 | if( offset >= _length) | |
2078 | return( 0 ); | |
2079 | ||
2080 | LOCK; | |
2081 | byteCount = _parent->writeBytes( _start + offset, bytes, | |
2082 | min(withLength, _length - offset) ); | |
2083 | UNLOCK; | |
2084 | ||
2085 | return( byteCount ); | |
2086 | } | |
2087 | ||
2088 | IOReturn IOSubMemoryDescriptor::prepare( | |
2089 | IODirection forDirection = kIODirectionNone) | |
2090 | { | |
2091 | IOReturn err; | |
2092 | ||
2093 | LOCK; | |
2094 | err = _parent->prepare( forDirection); | |
2095 | UNLOCK; | |
2096 | ||
2097 | return( err ); | |
2098 | } | |
2099 | ||
2100 | IOReturn IOSubMemoryDescriptor::complete( | |
2101 | IODirection forDirection = kIODirectionNone) | |
2102 | { | |
2103 | IOReturn err; | |
2104 | ||
2105 | LOCK; | |
2106 | err = _parent->complete( forDirection); | |
2107 | UNLOCK; | |
2108 | ||
2109 | return( err ); | |
2110 | } | |
2111 | ||
2112 | IOMemoryMap * IOSubMemoryDescriptor::makeMapping( | |
2113 | IOMemoryDescriptor * owner, | |
2114 | task_t intoTask, | |
2115 | IOVirtualAddress toAddress, | |
2116 | IOOptionBits options, | |
2117 | IOByteCount offset, | |
2118 | IOByteCount length ) | |
2119 | { | |
2120 | IOMemoryMap * mapping; | |
2121 | ||
2122 | mapping = (IOMemoryMap *) _parent->makeMapping( | |
2123 | _parent, intoTask, | |
2124 | toAddress - (_start + offset), | |
2125 | options | kIOMapReference, | |
2126 | _start + offset, length ); | |
2127 | ||
0b4e3aa0 A |
2128 | if( !mapping) |
2129 | mapping = (IOMemoryMap *) _parent->makeMapping( | |
2130 | _parent, intoTask, | |
2131 | toAddress, | |
2132 | options, _start + offset, length ); | |
2133 | ||
1c79356b A |
2134 | if( !mapping) |
2135 | mapping = super::makeMapping( owner, intoTask, toAddress, options, | |
2136 | offset, length ); | |
2137 | ||
2138 | return( mapping ); | |
2139 | } | |
2140 | ||
2141 | /* ick */ | |
2142 | ||
2143 | bool | |
2144 | IOSubMemoryDescriptor::initWithAddress(void * address, | |
2145 | IOByteCount withLength, | |
2146 | IODirection withDirection) | |
2147 | { | |
2148 | return( false ); | |
2149 | } | |
2150 | ||
2151 | bool | |
2152 | IOSubMemoryDescriptor::initWithAddress(vm_address_t address, | |
2153 | IOByteCount withLength, | |
2154 | IODirection withDirection, | |
2155 | task_t withTask) | |
2156 | { | |
2157 | return( false ); | |
2158 | } | |
2159 | ||
2160 | bool | |
2161 | IOSubMemoryDescriptor::initWithPhysicalAddress( | |
2162 | IOPhysicalAddress address, | |
2163 | IOByteCount withLength, | |
2164 | IODirection withDirection ) | |
2165 | { | |
2166 | return( false ); | |
2167 | } | |
2168 | ||
2169 | bool | |
2170 | IOSubMemoryDescriptor::initWithRanges( | |
2171 | IOVirtualRange * ranges, | |
2172 | UInt32 withCount, | |
2173 | IODirection withDirection, | |
2174 | task_t withTask, | |
2175 | bool asReference = false) | |
2176 | { | |
2177 | return( false ); | |
2178 | } | |
2179 | ||
2180 | bool | |
2181 | IOSubMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges, | |
2182 | UInt32 withCount, | |
2183 | IODirection withDirection, | |
2184 | bool asReference = false) | |
2185 | { | |
2186 | return( false ); | |
2187 | } | |
2188 | ||
2189 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
2190 | ||
0b4e3aa0 | 2191 | OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0); |
1c79356b A |
2192 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1); |
2193 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2); | |
2194 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3); | |
2195 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4); | |
2196 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5); | |
2197 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6); | |
2198 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7); | |
2199 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8); | |
2200 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9); | |
2201 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10); | |
2202 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11); | |
2203 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12); | |
2204 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13); | |
2205 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14); | |
2206 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15); |