]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
91447636 | 2 | * Copyright (c) 1998-2004 Apple Computer, Inc. All rights reserved. |
1c79356b | 3 | * |
8ad349bb | 4 | * @APPLE_LICENSE_OSREFERENCE_HEADER_START@ |
1c79356b | 5 | * |
8ad349bb A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the | |
10 | * License may not be used to create, or enable the creation or | |
11 | * redistribution of, unlawful or unlicensed copies of an Apple operating | |
12 | * system, or to circumvent, violate, or enable the circumvention or | |
13 | * violation of, any terms of an Apple operating system software license | |
14 | * agreement. | |
15 | * | |
16 | * Please obtain a copy of the License at | |
17 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
18 | * file. | |
19 | * | |
20 | * The Original Code and all software distributed under the License are | |
21 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
22 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
23 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
24 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
25 | * Please see the License for the specific language governing rights and | |
26 | * limitations under the License. | |
27 | * | |
28 | * @APPLE_LICENSE_OSREFERENCE_HEADER_END@ | |
1c79356b A |
29 | */ |
30 | /* | |
31 | * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. | |
32 | * | |
33 | * HISTORY | |
34 | * | |
35 | */ | |
55e303ae A |
36 | // 45678901234567890123456789012345678901234567890123456789012345678901234567890 |
37 | #include <sys/cdefs.h> | |
1c79356b A |
38 | |
39 | #include <IOKit/assert.h> | |
40 | #include <IOKit/system.h> | |
41 | #include <IOKit/IOLib.h> | |
42 | #include <IOKit/IOMemoryDescriptor.h> | |
55e303ae A |
43 | #include <IOKit/IOMapper.h> |
44 | #include <IOKit/IOKitKeysPrivate.h> | |
1c79356b A |
45 | |
46 | #include <IOKit/IOKitDebug.h> | |
47 | ||
91447636 | 48 | #include "IOKitKernelInternal.h" |
5d5c5d0d | 49 | #include "IOCopyMapper.h" |
91447636 | 50 | |
1c79356b | 51 | #include <libkern/c++/OSContainers.h> |
9bccf70c A |
52 | #include <libkern/c++/OSDictionary.h> |
53 | #include <libkern/c++/OSArray.h> | |
54 | #include <libkern/c++/OSSymbol.h> | |
55 | #include <libkern/c++/OSNumber.h> | |
91447636 A |
56 | |
57 | #include <sys/uio.h> | |
1c79356b A |
58 | |
59 | __BEGIN_DECLS | |
60 | #include <vm/pmap.h> | |
91447636 A |
61 | #include <vm/vm_pageout.h> |
62 | #include <vm/vm_shared_memory_server.h> | |
55e303ae | 63 | #include <mach/memory_object_types.h> |
0b4e3aa0 | 64 | #include <device/device_port.h> |
55e303ae | 65 | |
91447636 A |
66 | #include <mach/vm_prot.h> |
67 | #include <vm/vm_fault.h> | |
91447636 | 68 | |
55e303ae | 69 | extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va); |
1c79356b | 70 | void ipc_port_release_send(ipc_port_t port); |
55e303ae A |
71 | |
72 | /* Copy between a physical page and a virtual address in the given vm_map */ | |
73 | kern_return_t copypv(addr64_t source, addr64_t sink, unsigned int size, int which); | |
0b4e3aa0 A |
74 | |
75 | memory_object_t | |
76 | device_pager_setup( | |
77 | memory_object_t pager, | |
78 | int device_handle, | |
79 | vm_size_t size, | |
80 | int flags); | |
9bccf70c A |
81 | void |
82 | device_pager_deallocate( | |
83 | memory_object_t); | |
0b4e3aa0 A |
84 | kern_return_t |
85 | device_pager_populate_object( | |
86 | memory_object_t pager, | |
87 | vm_object_offset_t offset, | |
55e303ae | 88 | ppnum_t phys_addr, |
0b4e3aa0 | 89 | vm_size_t size); |
55e303ae A |
90 | kern_return_t |
91 | memory_object_iopl_request( | |
92 | ipc_port_t port, | |
93 | memory_object_offset_t offset, | |
94 | vm_size_t *upl_size, | |
95 | upl_t *upl_ptr, | |
96 | upl_page_info_array_t user_page_list, | |
97 | unsigned int *page_list_count, | |
98 | int *flags); | |
0b4e3aa0 | 99 | |
55e303ae | 100 | unsigned int IOTranslateCacheBits(struct phys_entry *pp); |
1c79356b | 101 | |
55e303ae | 102 | __END_DECLS |
1c79356b | 103 | |
55e303ae | 104 | #define kIOMaximumMappedIOByteCount (512*1024*1024) |
1c79356b | 105 | |
5d5c5d0d A |
106 | static IOMapper * gIOSystemMapper = NULL; |
107 | ||
108 | IOCopyMapper * gIOCopyMapper = NULL; | |
109 | ||
55e303ae | 110 | static ppnum_t gIOMaximumMappedIOPageCount = atop_32(kIOMaximumMappedIOByteCount); |
de355530 | 111 | |
5d5c5d0d A |
112 | ppnum_t gIOLastPage; |
113 | ||
55e303ae | 114 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
de355530 | 115 | |
55e303ae | 116 | OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject ) |
de355530 | 117 | |
55e303ae | 118 | #define super IOMemoryDescriptor |
de355530 | 119 | |
55e303ae | 120 | OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor) |
de355530 | 121 | |
1c79356b A |
122 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
123 | ||
9bccf70c A |
124 | static IORecursiveLock * gIOMemoryLock; |
125 | ||
126 | #define LOCK IORecursiveLockLock( gIOMemoryLock) | |
127 | #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock) | |
128 | #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT) | |
129 | #define WAKEUP \ | |
130 | IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false) | |
131 | ||
5d5c5d0d A |
132 | #if 0 |
133 | #define DEBG(fmt, args...) { kprintf(fmt, ## args); } | |
134 | #else | |
135 | #define DEBG(fmt, args...) {} | |
136 | #endif | |
137 | ||
9bccf70c A |
138 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
139 | ||
91447636 A |
140 | class _IOMemoryMap : public IOMemoryMap |
141 | { | |
142 | OSDeclareDefaultStructors(_IOMemoryMap) | |
143 | public: | |
144 | IOMemoryDescriptor * memory; | |
145 | IOMemoryMap * superMap; | |
146 | IOByteCount offset; | |
147 | IOByteCount length; | |
148 | IOVirtualAddress logical; | |
149 | task_t addressTask; | |
150 | vm_map_t addressMap; | |
151 | IOOptionBits options; | |
152 | upl_t redirUPL; | |
153 | ipc_port_t redirEntry; | |
154 | IOMemoryDescriptor * owner; | |
155 | ||
156 | protected: | |
157 | virtual void taggedRelease(const void *tag = 0) const; | |
158 | virtual void free(); | |
159 | ||
160 | public: | |
161 | ||
162 | // IOMemoryMap methods | |
163 | virtual IOVirtualAddress getVirtualAddress(); | |
164 | virtual IOByteCount getLength(); | |
165 | virtual task_t getAddressTask(); | |
166 | virtual IOMemoryDescriptor * getMemoryDescriptor(); | |
167 | virtual IOOptionBits getMapOptions(); | |
168 | ||
169 | virtual IOReturn unmap(); | |
170 | virtual void taskDied(); | |
171 | ||
172 | virtual IOReturn redirect(IOMemoryDescriptor * newBackingMemory, | |
173 | IOOptionBits options, | |
174 | IOByteCount offset = 0); | |
175 | ||
176 | virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset, | |
177 | IOByteCount * length); | |
178 | ||
179 | // for IOMemoryDescriptor use | |
180 | _IOMemoryMap * copyCompatible( | |
181 | IOMemoryDescriptor * owner, | |
182 | task_t intoTask, | |
183 | IOVirtualAddress toAddress, | |
184 | IOOptionBits options, | |
185 | IOByteCount offset, | |
186 | IOByteCount length ); | |
187 | ||
188 | bool initCompatible( | |
189 | IOMemoryDescriptor * memory, | |
190 | IOMemoryMap * superMap, | |
191 | IOByteCount offset, | |
192 | IOByteCount length ); | |
193 | ||
194 | bool initWithDescriptor( | |
195 | IOMemoryDescriptor * memory, | |
196 | task_t intoTask, | |
197 | IOVirtualAddress toAddress, | |
198 | IOOptionBits options, | |
199 | IOByteCount offset, | |
200 | IOByteCount length ); | |
201 | ||
202 | IOReturn redirect( | |
203 | task_t intoTask, bool redirect ); | |
204 | }; | |
205 | ||
206 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
207 | ||
208 | // Some data structures and accessor macros used by the initWithOptions | |
209 | // Function | |
210 | ||
211 | enum ioPLBlockFlags { | |
212 | kIOPLOnDevice = 0x00000001, | |
213 | kIOPLExternUPL = 0x00000002, | |
214 | }; | |
215 | ||
216 | struct typePersMDData | |
217 | { | |
218 | const IOGeneralMemoryDescriptor *fMD; | |
219 | ipc_port_t fMemEntry; | |
220 | }; | |
221 | ||
222 | struct ioPLBlock { | |
223 | upl_t fIOPL; | |
224 | vm_address_t fIOMDOffset; // The offset of this iopl in descriptor | |
225 | vm_offset_t fPageInfo; // Pointer to page list or index into it | |
226 | ppnum_t fMappedBase; // Page number of first page in this iopl | |
227 | unsigned int fPageOffset; // Offset within first page of iopl | |
228 | unsigned int fFlags; // Flags | |
229 | }; | |
230 | ||
231 | struct ioGMDData { | |
232 | IOMapper *fMapper; | |
233 | unsigned int fPageCnt; | |
234 | upl_page_info_t fPageList[]; | |
235 | ioPLBlock fBlocks[]; | |
236 | }; | |
237 | ||
238 | #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy()) | |
239 | #define getIOPLList(d) ((ioPLBlock *) &(d->fPageList[d->fPageCnt])) | |
240 | #define getNumIOPL(osd, d) \ | |
241 | (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock)) | |
242 | #define getPageList(d) (&(d->fPageList[0])) | |
243 | #define computeDataSize(p, u) \ | |
244 | (sizeof(ioGMDData) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock)) | |
245 | ||
246 | ||
247 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
248 | ||
55e303ae | 249 | #define next_page(a) ( trunc_page_32(a) + PAGE_SIZE ) |
0b4e3aa0 A |
250 | |
251 | ||
252 | extern "C" { | |
253 | ||
254 | kern_return_t device_data_action( | |
255 | int device_handle, | |
256 | ipc_port_t device_pager, | |
257 | vm_prot_t protection, | |
258 | vm_object_offset_t offset, | |
259 | vm_size_t size) | |
260 | { | |
9bccf70c A |
261 | struct ExpansionData { |
262 | void * devicePager; | |
263 | unsigned int pagerContig:1; | |
264 | unsigned int unused:31; | |
265 | IOMemoryDescriptor * memory; | |
266 | }; | |
267 | kern_return_t kr; | |
268 | ExpansionData * ref = (ExpansionData *) device_handle; | |
269 | IOMemoryDescriptor * memDesc; | |
0b4e3aa0 | 270 | |
9bccf70c A |
271 | LOCK; |
272 | memDesc = ref->memory; | |
273 | if( memDesc) | |
91447636 A |
274 | { |
275 | memDesc->retain(); | |
9bccf70c A |
276 | kr = memDesc->handleFault( device_pager, 0, 0, |
277 | offset, size, kIOMapDefaultCache /*?*/); | |
91447636 A |
278 | memDesc->release(); |
279 | } | |
9bccf70c A |
280 | else |
281 | kr = KERN_ABORTED; | |
282 | UNLOCK; | |
0b4e3aa0 | 283 | |
9bccf70c | 284 | return( kr ); |
0b4e3aa0 A |
285 | } |
286 | ||
287 | kern_return_t device_close( | |
288 | int device_handle) | |
289 | { | |
9bccf70c A |
290 | struct ExpansionData { |
291 | void * devicePager; | |
292 | unsigned int pagerContig:1; | |
293 | unsigned int unused:31; | |
294 | IOMemoryDescriptor * memory; | |
295 | }; | |
296 | ExpansionData * ref = (ExpansionData *) device_handle; | |
0b4e3aa0 | 297 | |
9bccf70c | 298 | IODelete( ref, ExpansionData, 1 ); |
0b4e3aa0 A |
299 | |
300 | return( kIOReturnSuccess ); | |
301 | } | |
91447636 | 302 | }; // end extern "C" |
0b4e3aa0 | 303 | |
91447636 A |
304 | // Note this inline function uses C++ reference arguments to return values |
305 | // This means that pointers are not passed and NULLs don't have to be | |
306 | // checked for as a NULL reference is illegal. | |
307 | static inline void | |
5d5c5d0d | 308 | getAddrLenForInd(addr64_t &addr, IOPhysicalLength &len, // Output variables |
91447636 A |
309 | UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind) |
310 | { | |
5d5c5d0d A |
311 | assert(kIOMemoryTypeUIO == type |
312 | || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type | |
313 | || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type); | |
91447636 A |
314 | if (kIOMemoryTypeUIO == type) { |
315 | user_size_t us; | |
316 | uio_getiov((uio_t) r.uio, ind, &addr, &us); len = us; | |
317 | } | |
5d5c5d0d A |
318 | else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) { |
319 | IOAddressRange cur = r.v64[ind]; | |
320 | addr = cur.address; | |
321 | len = cur.length; | |
322 | } | |
91447636 A |
323 | else { |
324 | IOVirtualRange cur = r.v[ind]; | |
325 | addr = cur.address; | |
326 | len = cur.length; | |
327 | } | |
0b4e3aa0 A |
328 | } |
329 | ||
1c79356b A |
330 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
331 | ||
332 | /* | |
333 | * withAddress: | |
334 | * | |
335 | * Create a new IOMemoryDescriptor. The buffer is a virtual address | |
336 | * relative to the specified task. If no task is supplied, the kernel | |
337 | * task is implied. | |
338 | */ | |
339 | IOMemoryDescriptor * | |
340 | IOMemoryDescriptor::withAddress(void * address, | |
55e303ae A |
341 | IOByteCount length, |
342 | IODirection direction) | |
343 | { | |
344 | return IOMemoryDescriptor:: | |
345 | withAddress((vm_address_t) address, length, direction, kernel_task); | |
346 | } | |
347 | ||
348 | IOMemoryDescriptor * | |
349 | IOMemoryDescriptor::withAddress(vm_address_t address, | |
350 | IOByteCount length, | |
351 | IODirection direction, | |
352 | task_t task) | |
1c79356b | 353 | { |
5d5c5d0d A |
354 | #if TEST_V64 |
355 | if (task) | |
356 | { | |
357 | IOOptionBits options = (IOOptionBits) direction; | |
358 | if (task == kernel_task) | |
359 | options |= kIOMemoryAutoPrepare; | |
360 | return (IOMemoryDescriptor::withAddressRange(address, length, options, task)); | |
361 | } | |
362 | #endif | |
1c79356b A |
363 | IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; |
364 | if (that) | |
365 | { | |
55e303ae | 366 | if (that->initWithAddress(address, length, direction, task)) |
1c79356b A |
367 | return that; |
368 | ||
369 | that->release(); | |
370 | } | |
371 | return 0; | |
372 | } | |
373 | ||
374 | IOMemoryDescriptor * | |
55e303ae A |
375 | IOMemoryDescriptor::withPhysicalAddress( |
376 | IOPhysicalAddress address, | |
377 | IOByteCount length, | |
378 | IODirection direction ) | |
379 | { | |
5d5c5d0d A |
380 | #if TEST_P64 |
381 | return (IOMemoryDescriptor::withAddressRange(address, length, (IOOptionBits) direction, NULL)); | |
382 | #endif | |
55e303ae A |
383 | IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor; |
384 | if (self | |
385 | && !self->initWithPhysicalAddress(address, length, direction)) { | |
386 | self->release(); | |
387 | return 0; | |
388 | } | |
389 | ||
390 | return self; | |
391 | } | |
392 | ||
393 | IOMemoryDescriptor * | |
394 | IOMemoryDescriptor::withRanges( IOVirtualRange * ranges, | |
395 | UInt32 withCount, | |
396 | IODirection direction, | |
397 | task_t task, | |
398 | bool asReference) | |
1c79356b A |
399 | { |
400 | IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; | |
401 | if (that) | |
402 | { | |
55e303ae | 403 | if (that->initWithRanges(ranges, withCount, direction, task, asReference)) |
1c79356b A |
404 | return that; |
405 | ||
406 | that->release(); | |
407 | } | |
408 | return 0; | |
409 | } | |
410 | ||
5d5c5d0d A |
411 | IOMemoryDescriptor * |
412 | IOMemoryDescriptor::withAddressRange(mach_vm_address_t address, | |
413 | mach_vm_size_t length, | |
414 | IOOptionBits options, | |
415 | task_t task) | |
416 | { | |
417 | IOAddressRange range = { address, length }; | |
418 | return (IOMemoryDescriptor::withAddressRanges(&range, 1, options, task)); | |
419 | } | |
420 | ||
421 | IOMemoryDescriptor * | |
422 | IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges, | |
423 | UInt32 rangeCount, | |
424 | IOOptionBits options, | |
425 | task_t task) | |
426 | { | |
427 | IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; | |
428 | if (that) | |
429 | { | |
430 | if (task) | |
431 | options |= kIOMemoryTypeVirtual64; | |
432 | else | |
433 | options |= kIOMemoryTypePhysical64; | |
434 | ||
435 | if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ 0)) | |
436 | return that; | |
437 | ||
438 | that->release(); | |
439 | } | |
440 | ||
441 | return 0; | |
442 | } | |
443 | ||
1c79356b A |
444 | |
445 | /* | |
446 | * withRanges: | |
447 | * | |
448 | * Create a new IOMemoryDescriptor. The buffer is made up of several | |
449 | * virtual address ranges, from a given task. | |
450 | * | |
451 | * Passing the ranges as a reference will avoid an extra allocation. | |
452 | */ | |
453 | IOMemoryDescriptor * | |
55e303ae A |
454 | IOMemoryDescriptor::withOptions(void * buffers, |
455 | UInt32 count, | |
456 | UInt32 offset, | |
457 | task_t task, | |
458 | IOOptionBits opts, | |
459 | IOMapper * mapper) | |
1c79356b | 460 | { |
55e303ae | 461 | IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor; |
d7e50217 | 462 | |
55e303ae A |
463 | if (self |
464 | && !self->initWithOptions(buffers, count, offset, task, opts, mapper)) | |
465 | { | |
466 | self->release(); | |
467 | return 0; | |
de355530 | 468 | } |
55e303ae A |
469 | |
470 | return self; | |
471 | } | |
472 | ||
473 | // Can't leave abstract but this should never be used directly, | |
474 | bool IOMemoryDescriptor::initWithOptions(void * buffers, | |
475 | UInt32 count, | |
476 | UInt32 offset, | |
477 | task_t task, | |
478 | IOOptionBits options, | |
479 | IOMapper * mapper) | |
480 | { | |
481 | // @@@ gvdl: Should I panic? | |
482 | panic("IOMD::initWithOptions called\n"); | |
1c79356b A |
483 | return 0; |
484 | } | |
485 | ||
486 | IOMemoryDescriptor * | |
487 | IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges, | |
488 | UInt32 withCount, | |
55e303ae A |
489 | IODirection direction, |
490 | bool asReference) | |
1c79356b A |
491 | { |
492 | IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; | |
493 | if (that) | |
494 | { | |
55e303ae | 495 | if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference)) |
1c79356b A |
496 | return that; |
497 | ||
498 | that->release(); | |
499 | } | |
500 | return 0; | |
501 | } | |
502 | ||
503 | IOMemoryDescriptor * | |
504 | IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of, | |
505 | IOByteCount offset, | |
506 | IOByteCount length, | |
55e303ae | 507 | IODirection direction) |
1c79356b | 508 | { |
55e303ae | 509 | IOSubMemoryDescriptor *self = new IOSubMemoryDescriptor; |
1c79356b | 510 | |
55e303ae A |
511 | if (self && !self->initSubRange(of, offset, length, direction)) { |
512 | self->release(); | |
513 | self = 0; | |
1c79356b | 514 | } |
55e303ae | 515 | return self; |
1c79356b A |
516 | } |
517 | ||
5d5c5d0d A |
518 | IOMemoryDescriptor * |
519 | IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD) | |
91447636 A |
520 | { |
521 | IOGeneralMemoryDescriptor *origGenMD = | |
522 | OSDynamicCast(IOGeneralMemoryDescriptor, originalMD); | |
523 | ||
524 | if (origGenMD) | |
525 | return IOGeneralMemoryDescriptor:: | |
526 | withPersistentMemoryDescriptor(origGenMD); | |
527 | else | |
528 | return 0; | |
529 | } | |
530 | ||
5d5c5d0d A |
531 | IOMemoryDescriptor * |
532 | IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD) | |
91447636 A |
533 | { |
534 | ipc_port_t sharedMem = (ipc_port_t) originalMD->createNamedEntry(); | |
535 | ||
536 | if (!sharedMem) | |
537 | return 0; | |
538 | ||
539 | if (sharedMem == originalMD->_memEntry) { | |
540 | originalMD->retain(); // Add a new reference to ourselves | |
541 | ipc_port_release_send(sharedMem); // Remove extra send right | |
542 | return originalMD; | |
543 | } | |
544 | ||
545 | IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor; | |
546 | typePersMDData initData = { originalMD, sharedMem }; | |
547 | ||
548 | if (self | |
549 | && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) { | |
550 | self->release(); | |
551 | self = 0; | |
552 | } | |
553 | return self; | |
554 | } | |
555 | ||
556 | void *IOGeneralMemoryDescriptor::createNamedEntry() | |
557 | { | |
558 | kern_return_t error; | |
559 | ipc_port_t sharedMem; | |
560 | ||
561 | IOOptionBits type = _flags & kIOMemoryTypeMask; | |
562 | ||
563 | user_addr_t range0Addr; | |
564 | IOByteCount range0Len; | |
565 | getAddrLenForInd(range0Addr, range0Len, type, _ranges, 0); | |
566 | range0Addr = trunc_page_64(range0Addr); | |
567 | ||
568 | vm_size_t size = ptoa_32(_pages); | |
569 | vm_address_t kernelPage = (vm_address_t) range0Addr; | |
570 | ||
571 | vm_map_t theMap = ((_task == kernel_task) | |
572 | && (kIOMemoryBufferPageable & _flags)) | |
573 | ? IOPageableMapForAddress(kernelPage) | |
574 | : get_task_map(_task); | |
575 | ||
576 | memory_object_size_t actualSize = size; | |
577 | vm_prot_t prot = VM_PROT_READ | VM_PROT_WRITE; | |
578 | if (_memEntry) | |
579 | prot |= MAP_MEM_NAMED_REUSE; | |
580 | ||
581 | error = mach_make_memory_entry_64(theMap, | |
582 | &actualSize, range0Addr, prot, &sharedMem, (ipc_port_t) _memEntry); | |
583 | ||
584 | if (KERN_SUCCESS == error) { | |
585 | if (actualSize == size) { | |
586 | return sharedMem; | |
587 | } else { | |
588 | #if IOASSERT | |
589 | IOLog("IOGMD::mach_make_memory_entry_64 (%08llx) size (%08lx:%08x)\n", | |
590 | (UInt64)range0Addr, (UInt32)actualSize, size); | |
591 | #endif | |
592 | ipc_port_release_send( sharedMem ); | |
593 | } | |
594 | } | |
595 | ||
596 | return MACH_PORT_NULL; | |
597 | } | |
598 | ||
1c79356b A |
599 | /* |
600 | * initWithAddress: | |
601 | * | |
602 | * Initialize an IOMemoryDescriptor. The buffer is a virtual address | |
603 | * relative to the specified task. If no task is supplied, the kernel | |
604 | * task is implied. | |
605 | * | |
606 | * An IOMemoryDescriptor can be re-used by calling initWithAddress or | |
607 | * initWithRanges again on an existing instance -- note this behavior | |
608 | * is not commonly supported in other I/O Kit classes, although it is | |
609 | * supported here. | |
610 | */ | |
611 | bool | |
612 | IOGeneralMemoryDescriptor::initWithAddress(void * address, | |
613 | IOByteCount withLength, | |
614 | IODirection withDirection) | |
615 | { | |
616 | _singleRange.v.address = (vm_address_t) address; | |
617 | _singleRange.v.length = withLength; | |
618 | ||
619 | return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true); | |
620 | } | |
621 | ||
622 | bool | |
623 | IOGeneralMemoryDescriptor::initWithAddress(vm_address_t address, | |
624 | IOByteCount withLength, | |
625 | IODirection withDirection, | |
626 | task_t withTask) | |
627 | { | |
628 | _singleRange.v.address = address; | |
629 | _singleRange.v.length = withLength; | |
630 | ||
631 | return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true); | |
632 | } | |
633 | ||
634 | bool | |
635 | IOGeneralMemoryDescriptor::initWithPhysicalAddress( | |
636 | IOPhysicalAddress address, | |
637 | IOByteCount withLength, | |
638 | IODirection withDirection ) | |
639 | { | |
640 | _singleRange.p.address = address; | |
641 | _singleRange.p.length = withLength; | |
642 | ||
643 | return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true); | |
644 | } | |
645 | ||
55e303ae A |
646 | bool |
647 | IOGeneralMemoryDescriptor::initWithPhysicalRanges( | |
648 | IOPhysicalRange * ranges, | |
649 | UInt32 count, | |
650 | IODirection direction, | |
651 | bool reference) | |
652 | { | |
653 | IOOptionBits mdOpts = direction | kIOMemoryTypePhysical; | |
654 | ||
655 | if (reference) | |
656 | mdOpts |= kIOMemoryAsReference; | |
657 | ||
658 | return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0); | |
659 | } | |
660 | ||
661 | bool | |
662 | IOGeneralMemoryDescriptor::initWithRanges( | |
663 | IOVirtualRange * ranges, | |
664 | UInt32 count, | |
665 | IODirection direction, | |
666 | task_t task, | |
667 | bool reference) | |
668 | { | |
669 | IOOptionBits mdOpts = direction; | |
670 | ||
671 | if (reference) | |
672 | mdOpts |= kIOMemoryAsReference; | |
673 | ||
674 | if (task) { | |
675 | mdOpts |= kIOMemoryTypeVirtual; | |
91447636 A |
676 | |
677 | // Auto-prepare if this is a kernel memory descriptor as very few | |
678 | // clients bother to prepare() kernel memory. | |
679 | // But it was not enforced so what are you going to do? | |
55e303ae A |
680 | if (task == kernel_task) |
681 | mdOpts |= kIOMemoryAutoPrepare; | |
682 | } | |
683 | else | |
684 | mdOpts |= kIOMemoryTypePhysical; | |
55e303ae A |
685 | |
686 | return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0); | |
687 | } | |
688 | ||
1c79356b | 689 | /* |
55e303ae | 690 | * initWithOptions: |
1c79356b | 691 | * |
55e303ae | 692 | * IOMemoryDescriptor. The buffer is made up of several virtual address ranges, |
91447636 A |
693 | * from a given task, several physical ranges, an UPL from the ubc |
694 | * system or a uio (may be 64bit) from the BSD subsystem. | |
1c79356b A |
695 | * |
696 | * Passing the ranges as a reference will avoid an extra allocation. | |
697 | * | |
55e303ae A |
698 | * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an |
699 | * existing instance -- note this behavior is not commonly supported in other | |
700 | * I/O Kit classes, although it is supported here. | |
1c79356b | 701 | */ |
55e303ae | 702 | |
1c79356b | 703 | bool |
55e303ae A |
704 | IOGeneralMemoryDescriptor::initWithOptions(void * buffers, |
705 | UInt32 count, | |
706 | UInt32 offset, | |
707 | task_t task, | |
708 | IOOptionBits options, | |
709 | IOMapper * mapper) | |
710 | { | |
91447636 A |
711 | IOOptionBits type = options & kIOMemoryTypeMask; |
712 | ||
713 | // Grab the original MD's configuation data to initialse the | |
714 | // arguments to this function. | |
715 | if (kIOMemoryTypePersistentMD == type) { | |
716 | ||
717 | typePersMDData *initData = (typePersMDData *) buffers; | |
718 | const IOGeneralMemoryDescriptor *orig = initData->fMD; | |
719 | ioGMDData *dataP = getDataP(orig->_memoryEntries); | |
720 | ||
721 | // Only accept persistent memory descriptors with valid dataP data. | |
722 | assert(orig->_rangesCount == 1); | |
723 | if ( !(orig->_flags & kIOMemoryPersistent) || !dataP) | |
724 | return false; | |
725 | ||
726 | _memEntry = initData->fMemEntry; // Grab the new named entry | |
727 | options = orig->_flags | kIOMemoryAsReference; | |
728 | _singleRange = orig->_singleRange; // Initialise our range | |
729 | buffers = &_singleRange; | |
730 | count = 1; | |
55e303ae | 731 | |
91447636 A |
732 | // Now grab the original task and whatever mapper was previously used |
733 | task = orig->_task; | |
734 | mapper = dataP->fMapper; | |
735 | ||
736 | // We are ready to go through the original initialisation now | |
737 | } | |
738 | ||
739 | switch (type) { | |
740 | case kIOMemoryTypeUIO: | |
55e303ae | 741 | case kIOMemoryTypeVirtual: |
5d5c5d0d | 742 | case kIOMemoryTypeVirtual64: |
55e303ae A |
743 | assert(task); |
744 | if (!task) | |
745 | return false; | |
746 | else | |
747 | break; | |
748 | ||
749 | case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task | |
5d5c5d0d | 750 | case kIOMemoryTypePhysical64: |
55e303ae | 751 | mapper = kIOMapperNone; |
91447636 | 752 | |
55e303ae A |
753 | case kIOMemoryTypeUPL: |
754 | assert(!task); | |
755 | break; | |
756 | default: | |
55e303ae A |
757 | return false; /* bad argument */ |
758 | } | |
759 | ||
760 | assert(buffers); | |
761 | assert(count); | |
1c79356b A |
762 | |
763 | /* | |
764 | * We can check the _initialized instance variable before having ever set | |
765 | * it to an initial value because I/O Kit guarantees that all our instance | |
766 | * variables are zeroed on an object's allocation. | |
767 | */ | |
768 | ||
55e303ae | 769 | if (_initialized) { |
1c79356b A |
770 | /* |
771 | * An existing memory descriptor is being retargeted to point to | |
772 | * somewhere else. Clean up our present state. | |
773 | */ | |
774 | ||
1c79356b A |
775 | while (_wireCount) |
776 | complete(); | |
1c79356b | 777 | if (_ranges.v && _rangesIsAllocated) |
5d5c5d0d A |
778 | { |
779 | if (kIOMemoryTypeUIO == type) | |
780 | uio_free((uio_t) _ranges.v); | |
781 | else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) | |
782 | IODelete(_ranges.v64, IOAddressRange, _rangesCount); | |
783 | else | |
784 | IODelete(_ranges.v, IOVirtualRange, _rangesCount); | |
785 | } | |
91447636 A |
786 | if (_memEntry) |
787 | { ipc_port_release_send((ipc_port_t) _memEntry); _memEntry = 0; } | |
1c79356b | 788 | } |
55e303ae A |
789 | else { |
790 | if (!super::init()) | |
791 | return false; | |
792 | _initialized = true; | |
793 | } | |
d7e50217 | 794 | |
55e303ae A |
795 | // Grab the appropriate mapper |
796 | if (mapper == kIOMapperNone) | |
797 | mapper = 0; // No Mapper | |
5d5c5d0d | 798 | else if (mapper == kIOMapperSystem) { |
55e303ae A |
799 | IOMapper::checkForSystemMapper(); |
800 | gIOSystemMapper = mapper = IOMapper::gSystem; | |
801 | } | |
1c79356b | 802 | |
91447636 A |
803 | // Remove the dynamic internal use flags from the initial setting |
804 | options &= ~(kIOMemoryPreparedReadOnly); | |
55e303ae A |
805 | _flags = options; |
806 | _task = task; | |
807 | ||
808 | // DEPRECATED variable initialisation | |
809 | _direction = (IODirection) (_flags & kIOMemoryDirectionMask); | |
5d5c5d0d A |
810 | |
811 | __iomd_reservedA = 0; | |
812 | __iomd_reservedB = 0; | |
813 | __iomd_reservedC = 0; | |
814 | ||
815 | _highestPage = 0; | |
1c79356b | 816 | |
91447636 | 817 | if (kIOMemoryTypeUPL == type) { |
1c79356b | 818 | |
55e303ae A |
819 | ioGMDData *dataP; |
820 | unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1); | |
d7e50217 | 821 | |
55e303ae A |
822 | if (!_memoryEntries) { |
823 | _memoryEntries = OSData::withCapacity(dataSize); | |
824 | if (!_memoryEntries) | |
825 | return false; | |
826 | } | |
827 | else if (!_memoryEntries->initWithCapacity(dataSize)) | |
828 | return false; | |
829 | ||
830 | _memoryEntries->appendBytes(0, sizeof(ioGMDData)); | |
831 | dataP = getDataP(_memoryEntries); | |
832 | dataP->fMapper = mapper; | |
833 | dataP->fPageCnt = 0; | |
834 | ||
5d5c5d0d | 835 | // _wireCount++; // UPLs start out life wired |
55e303ae A |
836 | |
837 | _length = count; | |
838 | _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset); | |
839 | ||
840 | ioPLBlock iopl; | |
841 | upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST((upl_t) buffers); | |
842 | ||
843 | iopl.fIOPL = (upl_t) buffers; | |
844 | // Set the flag kIOPLOnDevice convieniently equal to 1 | |
845 | iopl.fFlags = pageList->device | kIOPLExternUPL; | |
846 | iopl.fIOMDOffset = 0; | |
5d5c5d0d A |
847 | |
848 | _highestPage = upl_get_highest_page(iopl.fIOPL); | |
849 | ||
55e303ae | 850 | if (!pageList->device) { |
55e303ae A |
851 | // Pre-compute the offset into the UPL's page list |
852 | pageList = &pageList[atop_32(offset)]; | |
853 | offset &= PAGE_MASK; | |
854 | if (mapper) { | |
855 | iopl.fMappedBase = mapper->iovmAlloc(_pages); | |
856 | mapper->iovmInsert(iopl.fMappedBase, 0, pageList, _pages); | |
857 | } | |
858 | else | |
859 | iopl.fMappedBase = 0; | |
860 | } | |
861 | else | |
862 | iopl.fMappedBase = 0; | |
863 | iopl.fPageInfo = (vm_address_t) pageList; | |
864 | iopl.fPageOffset = offset; | |
865 | ||
866 | _memoryEntries->appendBytes(&iopl, sizeof(iopl)); | |
d7e50217 | 867 | } |
91447636 | 868 | else { |
5d5c5d0d A |
869 | // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO |
870 | // kIOMemoryTypePhysical | kIOMemoryTypePhysical64 | |
91447636 A |
871 | |
872 | // Initialize the memory descriptor | |
873 | if (options & kIOMemoryAsReference) { | |
874 | _rangesIsAllocated = false; | |
875 | ||
876 | // Hack assignment to get the buffer arg into _ranges. | |
877 | // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't | |
878 | // work, C++ sigh. | |
879 | // This also initialises the uio & physical ranges. | |
880 | _ranges.v = (IOVirtualRange *) buffers; | |
881 | } | |
882 | else { | |
8ad349bb | 883 | _rangesIsAllocated = true; |
5d5c5d0d A |
884 | switch (_flags & kIOMemoryTypeMask) |
885 | { | |
886 | case kIOMemoryTypeUIO: | |
887 | _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers); | |
888 | break; | |
889 | ||
890 | case kIOMemoryTypeVirtual64: | |
891 | case kIOMemoryTypePhysical64: | |
892 | _ranges.v64 = IONew(IOAddressRange, count); | |
893 | if (!_ranges.v64) | |
894 | return false; | |
895 | bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange)); | |
896 | break; | |
897 | case kIOMemoryTypeVirtual: | |
898 | _ranges.v = IONew(IOVirtualRange, count); | |
899 | if (!_ranges.v) | |
900 | return false; | |
901 | bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange)); | |
902 | break; | |
903 | } | |
91447636 A |
904 | } |
905 | ||
906 | // Find starting address within the vector of ranges | |
907 | Ranges vec = _ranges; | |
908 | UInt32 length = 0; | |
909 | UInt32 pages = 0; | |
910 | for (unsigned ind = 0; ind < count; ind++) { | |
911 | user_addr_t addr; | |
912 | UInt32 len; | |
913 | ||
914 | // addr & len are returned by this function | |
915 | getAddrLenForInd(addr, len, type, vec, ind); | |
916 | pages += (atop_64(addr + len + PAGE_MASK) - atop_64(addr)); | |
917 | len += length; | |
5d5c5d0d | 918 | assert(len >= length); // Check for 32 bit wrap around |
91447636 | 919 | length = len; |
5d5c5d0d A |
920 | |
921 | if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) | |
922 | { | |
923 | ppnum_t highPage = atop_64(addr + len - 1); | |
924 | if (highPage > _highestPage) | |
925 | _highestPage = highPage; | |
926 | } | |
91447636 A |
927 | } |
928 | _length = length; | |
929 | _pages = pages; | |
930 | _rangesCount = count; | |
55e303ae A |
931 | |
932 | // Auto-prepare memory at creation time. | |
933 | // Implied completion when descriptor is free-ed | |
5d5c5d0d | 934 | if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) |
91447636 | 935 | _wireCount++; // Physical MDs are, by definition, wired |
5d5c5d0d | 936 | else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */ |
55e303ae | 937 | ioGMDData *dataP; |
91447636 | 938 | unsigned dataSize = computeDataSize(_pages, /* upls */ count * 2); |
55e303ae A |
939 | |
940 | if (!_memoryEntries) { | |
941 | _memoryEntries = OSData::withCapacity(dataSize); | |
942 | if (!_memoryEntries) | |
91447636 | 943 | return false; |
55e303ae A |
944 | } |
945 | else if (!_memoryEntries->initWithCapacity(dataSize)) | |
946 | return false; | |
947 | ||
948 | _memoryEntries->appendBytes(0, sizeof(ioGMDData)); | |
949 | dataP = getDataP(_memoryEntries); | |
950 | dataP->fMapper = mapper; | |
951 | dataP->fPageCnt = _pages; | |
952 | ||
91447636 A |
953 | if ( (kIOMemoryPersistent & _flags) && !_memEntry) |
954 | _memEntry = createNamedEntry(); | |
55e303ae A |
955 | |
956 | if ((_flags & kIOMemoryAutoPrepare) | |
957 | && prepare() != kIOReturnSuccess) | |
958 | return false; | |
959 | } | |
960 | } | |
961 | ||
962 | return true; | |
de355530 A |
963 | } |
964 | ||
1c79356b A |
965 | /* |
966 | * free | |
967 | * | |
968 | * Free resources. | |
969 | */ | |
970 | void IOGeneralMemoryDescriptor::free() | |
971 | { | |
9bccf70c A |
972 | LOCK; |
973 | if( reserved) | |
974 | reserved->memory = 0; | |
975 | UNLOCK; | |
976 | ||
1c79356b A |
977 | while (_wireCount) |
978 | complete(); | |
55e303ae A |
979 | if (_memoryEntries) |
980 | _memoryEntries->release(); | |
981 | ||
1c79356b | 982 | if (_ranges.v && _rangesIsAllocated) |
5d5c5d0d A |
983 | { |
984 | IOOptionBits type = _flags & kIOMemoryTypeMask; | |
985 | if (kIOMemoryTypeUIO == type) | |
986 | uio_free((uio_t) _ranges.v); | |
987 | else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) | |
988 | IODelete(_ranges.v64, IOAddressRange, _rangesCount); | |
989 | else | |
990 | IODelete(_ranges.v, IOVirtualRange, _rangesCount); | |
991 | } | |
9bccf70c | 992 | |
55e303ae A |
993 | if (reserved && reserved->devicePager) |
994 | device_pager_deallocate( (memory_object_t) reserved->devicePager ); | |
9bccf70c | 995 | |
55e303ae A |
996 | // memEntry holds a ref on the device pager which owns reserved |
997 | // (ExpansionData) so no reserved access after this point | |
998 | if (_memEntry) | |
1c79356b | 999 | ipc_port_release_send( (ipc_port_t) _memEntry ); |
55e303ae | 1000 | |
1c79356b A |
1001 | super::free(); |
1002 | } | |
1003 | ||
0b4e3aa0 A |
1004 | /* DEPRECATED */ void IOGeneralMemoryDescriptor::unmapFromKernel() |
1005 | /* DEPRECATED */ { | |
55e303ae | 1006 | panic("IOGMD::unmapFromKernel deprecated"); |
0b4e3aa0 A |
1007 | /* DEPRECATED */ } |
1008 | /* DEPRECATED */ | |
1009 | /* DEPRECATED */ void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex) | |
1010 | /* DEPRECATED */ { | |
55e303ae | 1011 | panic("IOGMD::mapIntoKernel deprecated"); |
0b4e3aa0 | 1012 | /* DEPRECATED */ } |
1c79356b A |
1013 | |
1014 | /* | |
1015 | * getDirection: | |
1016 | * | |
1017 | * Get the direction of the transfer. | |
1018 | */ | |
1019 | IODirection IOMemoryDescriptor::getDirection() const | |
1020 | { | |
1021 | return _direction; | |
1022 | } | |
1023 | ||
1024 | /* | |
1025 | * getLength: | |
1026 | * | |
1027 | * Get the length of the transfer (over all ranges). | |
1028 | */ | |
1029 | IOByteCount IOMemoryDescriptor::getLength() const | |
1030 | { | |
1031 | return _length; | |
1032 | } | |
1033 | ||
55e303ae | 1034 | void IOMemoryDescriptor::setTag( IOOptionBits tag ) |
1c79356b A |
1035 | { |
1036 | _tag = tag; | |
1037 | } | |
1038 | ||
1039 | IOOptionBits IOMemoryDescriptor::getTag( void ) | |
1040 | { | |
1041 | return( _tag); | |
1042 | } | |
1043 | ||
55e303ae | 1044 | // @@@ gvdl: who is using this API? Seems like a wierd thing to implement. |
5d5c5d0d A |
1045 | IOPhysicalAddress |
1046 | IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length ) | |
0b4e3aa0 | 1047 | { |
5d5c5d0d | 1048 | addr64_t physAddr = 0; |
1c79356b | 1049 | |
9bccf70c | 1050 | if( prepare() == kIOReturnSuccess) { |
5d5c5d0d | 1051 | physAddr = getPhysicalSegment64( offset, length ); |
9bccf70c A |
1052 | complete(); |
1053 | } | |
0b4e3aa0 | 1054 | |
5d5c5d0d | 1055 | return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used |
0b4e3aa0 A |
1056 | } |
1057 | ||
55e303ae A |
1058 | IOByteCount IOMemoryDescriptor::readBytes |
1059 | (IOByteCount offset, void *bytes, IOByteCount length) | |
1c79356b | 1060 | { |
55e303ae A |
1061 | addr64_t dstAddr = (addr64_t) (UInt32) bytes; |
1062 | IOByteCount remaining; | |
1c79356b | 1063 | |
55e303ae A |
1064 | // Assert that this entire I/O is withing the available range |
1065 | assert(offset < _length); | |
1066 | assert(offset + length <= _length); | |
1067 | if (offset >= _length) { | |
1068 | IOLog("IOGMD(%p): rB = o%lx, l%lx\n", this, offset, length); // @@@ gvdl | |
1069 | return 0; | |
1070 | } | |
1c79356b | 1071 | |
55e303ae A |
1072 | remaining = length = min(length, _length - offset); |
1073 | while (remaining) { // (process another target segment?) | |
1074 | addr64_t srcAddr64; | |
1075 | IOByteCount srcLen; | |
1c79356b | 1076 | |
55e303ae A |
1077 | srcAddr64 = getPhysicalSegment64(offset, &srcLen); |
1078 | if (!srcAddr64) | |
1079 | break; | |
1c79356b | 1080 | |
55e303ae A |
1081 | // Clip segment length to remaining |
1082 | if (srcLen > remaining) | |
1083 | srcLen = remaining; | |
1c79356b | 1084 | |
55e303ae A |
1085 | copypv(srcAddr64, dstAddr, srcLen, |
1086 | cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap); | |
1c79356b | 1087 | |
55e303ae A |
1088 | dstAddr += srcLen; |
1089 | offset += srcLen; | |
1090 | remaining -= srcLen; | |
1091 | } | |
1c79356b | 1092 | |
55e303ae | 1093 | assert(!remaining); |
1c79356b | 1094 | |
55e303ae A |
1095 | return length - remaining; |
1096 | } | |
0b4e3aa0 | 1097 | |
55e303ae A |
1098 | IOByteCount IOMemoryDescriptor::writeBytes |
1099 | (IOByteCount offset, const void *bytes, IOByteCount length) | |
1100 | { | |
1101 | addr64_t srcAddr = (addr64_t) (UInt32) bytes; | |
1102 | IOByteCount remaining; | |
0b4e3aa0 | 1103 | |
55e303ae A |
1104 | // Assert that this entire I/O is withing the available range |
1105 | assert(offset < _length); | |
1106 | assert(offset + length <= _length); | |
0b4e3aa0 | 1107 | |
55e303ae | 1108 | assert( !(kIOMemoryPreparedReadOnly & _flags) ); |
0b4e3aa0 | 1109 | |
55e303ae A |
1110 | if ( (kIOMemoryPreparedReadOnly & _flags) || offset >= _length) { |
1111 | IOLog("IOGMD(%p): wB = o%lx, l%lx\n", this, offset, length); // @@@ gvdl | |
1112 | return 0; | |
1113 | } | |
0b4e3aa0 | 1114 | |
55e303ae A |
1115 | remaining = length = min(length, _length - offset); |
1116 | while (remaining) { // (process another target segment?) | |
1117 | addr64_t dstAddr64; | |
1118 | IOByteCount dstLen; | |
0b4e3aa0 | 1119 | |
55e303ae A |
1120 | dstAddr64 = getPhysicalSegment64(offset, &dstLen); |
1121 | if (!dstAddr64) | |
1122 | break; | |
0b4e3aa0 | 1123 | |
55e303ae A |
1124 | // Clip segment length to remaining |
1125 | if (dstLen > remaining) | |
1126 | dstLen = remaining; | |
0b4e3aa0 | 1127 | |
55e303ae A |
1128 | copypv(srcAddr, (addr64_t) dstAddr64, dstLen, |
1129 | cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap); | |
0b4e3aa0 | 1130 | |
55e303ae A |
1131 | srcAddr += dstLen; |
1132 | offset += dstLen; | |
1133 | remaining -= dstLen; | |
1c79356b | 1134 | } |
1c79356b | 1135 | |
55e303ae A |
1136 | assert(!remaining); |
1137 | ||
1138 | return length - remaining; | |
1c79356b A |
1139 | } |
1140 | ||
55e303ae A |
1141 | // osfmk/device/iokit_rpc.c |
1142 | extern "C" unsigned int IODefaultCacheBits(addr64_t pa); | |
1c79356b | 1143 | |
55e303ae A |
1144 | /* DEPRECATED */ void IOGeneralMemoryDescriptor::setPosition(IOByteCount position) |
1145 | /* DEPRECATED */ { | |
1146 | panic("IOGMD::setPosition deprecated"); | |
1147 | /* DEPRECATED */ } | |
de355530 | 1148 | |
5d5c5d0d | 1149 | IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const |
55e303ae | 1150 | { |
5d5c5d0d | 1151 | if (kIOMDGetCharacteristics == op) { |
c0fea474 | 1152 | |
5d5c5d0d A |
1153 | if (dataSize < sizeof(IOMDDMACharacteristics)) |
1154 | return kIOReturnUnderrun; | |
c0fea474 | 1155 | |
5d5c5d0d A |
1156 | IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData; |
1157 | data->fLength = _length; | |
1158 | data->fSGCount = _rangesCount; | |
1159 | data->fPages = _pages; | |
1160 | data->fDirection = _direction; | |
1161 | if (!_wireCount) | |
1162 | data->fIsPrepared = false; | |
1163 | else { | |
1164 | data->fIsPrepared = true; | |
1165 | data->fHighestPage = _highestPage; | |
1166 | if (_memoryEntries) { | |
1167 | ioGMDData *gmdData = getDataP(_memoryEntries); | |
1168 | ioPLBlock *ioplList = getIOPLList(gmdData); | |
1169 | UInt count = getNumIOPL(_memoryEntries, gmdData); | |
1170 | ||
1171 | data->fIsMapped = (gmdData->fMapper && _pages && (count > 0) | |
1172 | && ioplList[0].fMappedBase); | |
1173 | if (count == 1) | |
1174 | data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK; | |
1175 | } | |
1176 | else | |
1177 | data->fIsMapped = false; | |
1178 | } | |
c0fea474 | 1179 | |
5d5c5d0d A |
1180 | return kIOReturnSuccess; |
1181 | } | |
1182 | else if (!(kIOMDWalkSegments & op)) | |
1183 | return kIOReturnBadArgument; | |
1184 | ||
1185 | // Get the next segment | |
1186 | struct InternalState { | |
1187 | IOMDDMAWalkSegmentArgs fIO; | |
1188 | UInt fOffset2Index; | |
1189 | UInt fIndex; | |
1190 | UInt fNextOffset; | |
1191 | } *isP; | |
1192 | ||
1193 | // Find the next segment | |
1194 | if (dataSize < sizeof(*isP)) | |
1195 | return kIOReturnUnderrun; | |
1196 | ||
1197 | isP = (InternalState *) vData; | |
1198 | UInt offset = isP->fIO.fOffset; | |
1199 | bool mapped = isP->fIO.fMapped; | |
1200 | ||
1201 | if (offset >= _length) | |
1202 | return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError; | |
1203 | ||
1204 | // Validate the previous offset | |
1205 | UInt ind, off2Ind = isP->fOffset2Index; | |
1206 | if ((kIOMDFirstSegment != op) | |
1207 | && offset | |
1208 | && (offset == isP->fNextOffset || off2Ind <= offset)) | |
1209 | ind = isP->fIndex; | |
1210 | else | |
1211 | ind = off2Ind = 0; // Start from beginning | |
c0fea474 | 1212 | |
5d5c5d0d A |
1213 | UInt length; |
1214 | UInt64 address; | |
1215 | if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) { | |
c0fea474 | 1216 | |
5d5c5d0d A |
1217 | // Physical address based memory descriptor |
1218 | const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0]; | |
c0fea474 | 1219 | |
5d5c5d0d A |
1220 | // Find the range after the one that contains the offset |
1221 | UInt len; | |
1222 | for (len = 0; off2Ind <= offset; ind++) { | |
1223 | len = physP[ind].length; | |
1224 | off2Ind += len; | |
1225 | } | |
c0fea474 | 1226 | |
5d5c5d0d A |
1227 | // Calculate length within range and starting address |
1228 | length = off2Ind - offset; | |
1229 | address = physP[ind - 1].address + len - length; | |
0b4e3aa0 | 1230 | |
5d5c5d0d A |
1231 | // see how far we can coalesce ranges |
1232 | while (ind < _rangesCount && address + length == physP[ind].address) { | |
1233 | len = physP[ind].length; | |
1234 | length += len; | |
1235 | off2Ind += len; | |
1236 | ind++; | |
1237 | } | |
c0fea474 | 1238 | |
5d5c5d0d A |
1239 | // correct contiguous check overshoot |
1240 | ind--; | |
1241 | off2Ind -= len; | |
1242 | } | |
1243 | else if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) { | |
c0fea474 | 1244 | |
5d5c5d0d A |
1245 | // Physical address based memory descriptor |
1246 | const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0]; | |
c0fea474 | 1247 | |
5d5c5d0d A |
1248 | // Find the range after the one that contains the offset |
1249 | mach_vm_size_t len; | |
1250 | for (len = 0; off2Ind <= offset; ind++) { | |
1251 | len = physP[ind].length; | |
1252 | off2Ind += len; | |
1253 | } | |
de355530 | 1254 | |
5d5c5d0d A |
1255 | // Calculate length within range and starting address |
1256 | length = off2Ind - offset; | |
1257 | address = physP[ind - 1].address + len - length; | |
de355530 | 1258 | |
5d5c5d0d A |
1259 | // see how far we can coalesce ranges |
1260 | while (ind < _rangesCount && address + length == physP[ind].address) { | |
1261 | len = physP[ind].length; | |
1262 | length += len; | |
1263 | off2Ind += len; | |
1264 | ind++; | |
1265 | } | |
1266 | ||
1267 | // correct contiguous check overshoot | |
1268 | ind--; | |
1269 | off2Ind -= len; | |
1270 | } | |
1271 | else do { | |
1272 | if (!_wireCount) | |
1273 | panic("IOGMD: not wired for the IODMACommand"); | |
c0fea474 | 1274 | |
5d5c5d0d | 1275 | assert(_memoryEntries); |
c0fea474 | 1276 | |
5d5c5d0d A |
1277 | ioGMDData * dataP = getDataP(_memoryEntries); |
1278 | const ioPLBlock *ioplList = getIOPLList(dataP); | |
1279 | UInt numIOPLs = getNumIOPL(_memoryEntries, dataP); | |
1280 | upl_page_info_t *pageList = getPageList(dataP); | |
c0fea474 | 1281 | |
5d5c5d0d | 1282 | assert(numIOPLs > 0); |
c0fea474 | 1283 | |
5d5c5d0d A |
1284 | // Scan through iopl info blocks looking for block containing offset |
1285 | while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset) | |
1286 | ind++; | |
c0fea474 | 1287 | |
5d5c5d0d A |
1288 | // Go back to actual range as search goes past it |
1289 | ioPLBlock ioplInfo = ioplList[ind - 1]; | |
1290 | off2Ind = ioplInfo.fIOMDOffset; | |
1291 | ||
1292 | if (ind < numIOPLs) | |
1293 | length = ioplList[ind].fIOMDOffset; | |
1294 | else | |
1295 | length = _length; | |
1296 | length -= offset; // Remainder within iopl | |
1297 | ||
1298 | // Subtract offset till this iopl in total list | |
1299 | offset -= off2Ind; | |
1300 | ||
1301 | // If a mapped address is requested and this is a pre-mapped IOPL | |
1302 | // then just need to compute an offset relative to the mapped base. | |
1303 | if (mapped && ioplInfo.fMappedBase) { | |
1304 | offset += (ioplInfo.fPageOffset & PAGE_MASK); | |
1305 | address = ptoa_64(ioplInfo.fMappedBase) + offset; | |
1306 | continue; // Done leave do/while(false) now | |
1307 | } | |
1308 | ||
1309 | // The offset is rebased into the current iopl. | |
1310 | // Now add the iopl 1st page offset. | |
1311 | offset += ioplInfo.fPageOffset; | |
1312 | ||
1313 | // For external UPLs the fPageInfo field points directly to | |
1314 | // the upl's upl_page_info_t array. | |
1315 | if (ioplInfo.fFlags & kIOPLExternUPL) | |
1316 | pageList = (upl_page_info_t *) ioplInfo.fPageInfo; | |
1317 | else | |
1318 | pageList = &pageList[ioplInfo.fPageInfo]; | |
1319 | ||
1320 | // Check for direct device non-paged memory | |
1321 | if ( ioplInfo.fFlags & kIOPLOnDevice ) { | |
1322 | address = ptoa_64(pageList->phys_addr) + offset; | |
1323 | continue; // Done leave do/while(false) now | |
1324 | } | |
c0fea474 | 1325 | |
5d5c5d0d A |
1326 | // Now we need compute the index into the pageList |
1327 | UInt pageInd = atop_32(offset); | |
1328 | offset &= PAGE_MASK; | |
1329 | ||
1330 | // Compute the starting address of this segment | |
1331 | IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr; | |
1332 | address = ptoa_64(pageAddr) + offset; | |
1333 | ||
1334 | // length is currently set to the length of the remainider of the iopl. | |
1335 | // We need to check that the remainder of the iopl is contiguous. | |
1336 | // This is indicated by pageList[ind].phys_addr being sequential. | |
1337 | IOByteCount contigLength = PAGE_SIZE - offset; | |
1338 | while (contigLength < length | |
1339 | && ++pageAddr == pageList[++pageInd].phys_addr) | |
1340 | { | |
1341 | contigLength += PAGE_SIZE; | |
1342 | } | |
1343 | ||
1344 | if (contigLength < length) | |
1345 | length = contigLength; | |
1346 | ||
1347 | ||
1348 | assert(address); | |
1349 | assert(length); | |
1350 | ||
1351 | } while (false); | |
1352 | ||
1353 | // Update return values and state | |
1354 | isP->fIO.fIOVMAddr = address; | |
1355 | isP->fIO.fLength = length; | |
1356 | isP->fIndex = ind; | |
1357 | isP->fOffset2Index = off2Ind; | |
1358 | isP->fNextOffset = isP->fIO.fOffset + length; | |
1359 | ||
1360 | return kIOReturnSuccess; | |
1361 | } | |
1362 | ||
1363 | addr64_t | |
1364 | IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment) | |
1365 | { | |
1366 | IOReturn ret; | |
1367 | IOByteCount length = 0; | |
1368 | addr64_t address = 0; | |
c0fea474 | 1369 | |
5d5c5d0d A |
1370 | if (offset < _length) // (within bounds?) |
1371 | { | |
1372 | IOMDDMAWalkSegmentState _state; | |
1373 | IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) &_state; | |
1374 | ||
1375 | state->fOffset = offset; | |
1376 | state->fLength = _length - offset; | |
1377 | state->fMapped = false; | |
1378 | ||
1379 | ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state)); | |
1380 | ||
1381 | if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret)) | |
1382 | DEBG("getPhysicalSegment64 dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n", | |
1383 | ret, this, state->fOffset, | |
1384 | state->fIOVMAddr, state->fLength); | |
1385 | if (kIOReturnSuccess == ret) | |
1386 | { | |
1387 | address = state->fIOVMAddr; | |
1388 | length = state->fLength; | |
1389 | } | |
c0fea474 A |
1390 | if (!address) |
1391 | length = 0; | |
1392 | } | |
1393 | ||
c0fea474 A |
1394 | if (lengthOfSegment) |
1395 | *lengthOfSegment = length; | |
1396 | ||
5d5c5d0d A |
1397 | return (address); |
1398 | } | |
1399 | ||
1400 | IOPhysicalAddress | |
1401 | IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment) | |
1402 | { | |
1403 | IOReturn ret; | |
1404 | IOByteCount length = 0; | |
1405 | addr64_t address = 0; | |
1406 | ||
1407 | // assert(offset <= _length); | |
1408 | ||
1409 | if (offset < _length) // (within bounds?) | |
1410 | { | |
1411 | IOMDDMAWalkSegmentState _state; | |
1412 | IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) &_state; | |
1413 | ||
1414 | state->fOffset = offset; | |
1415 | state->fLength = _length - offset; | |
1416 | state->fMapped = true; | |
1417 | ||
1418 | ret = dmaCommandOperation( | |
1419 | kIOMDFirstSegment, _state, sizeof(_state)); | |
1420 | ||
1421 | if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret)) | |
1422 | DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n", | |
1423 | ret, this, state->fOffset, | |
1424 | state->fIOVMAddr, state->fLength); | |
1425 | if (kIOReturnSuccess == ret) | |
1426 | { | |
1427 | address = state->fIOVMAddr; | |
1428 | length = state->fLength; | |
1429 | } | |
1430 | ||
1431 | if (!address) | |
1432 | length = 0; | |
1433 | } | |
1434 | ||
1435 | if ((address + length) > 0x100000000ULL) | |
1436 | { | |
1437 | panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%x, class %s", | |
1438 | address, length, (getMetaClass())->getClassName()); | |
1439 | } | |
1440 | ||
1441 | if (lengthOfSegment) | |
1442 | *lengthOfSegment = length; | |
1443 | ||
1444 | return ((IOPhysicalAddress) address); | |
55e303ae | 1445 | } |
de355530 | 1446 | |
5d5c5d0d A |
1447 | addr64_t |
1448 | IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment) | |
55e303ae A |
1449 | { |
1450 | IOPhysicalAddress phys32; | |
1451 | IOByteCount length; | |
1452 | addr64_t phys64; | |
5d5c5d0d | 1453 | IOMapper * mapper = 0; |
0b4e3aa0 | 1454 | |
55e303ae A |
1455 | phys32 = getPhysicalSegment(offset, lengthOfSegment); |
1456 | if (!phys32) | |
1457 | return 0; | |
0b4e3aa0 | 1458 | |
55e303ae | 1459 | if (gIOSystemMapper) |
5d5c5d0d A |
1460 | mapper = gIOSystemMapper; |
1461 | ||
1462 | if (mapper) | |
1c79356b | 1463 | { |
55e303ae A |
1464 | IOByteCount origLen; |
1465 | ||
5d5c5d0d | 1466 | phys64 = mapper->mapAddr(phys32); |
55e303ae A |
1467 | origLen = *lengthOfSegment; |
1468 | length = page_size - (phys64 & (page_size - 1)); | |
1469 | while ((length < origLen) | |
5d5c5d0d | 1470 | && ((phys64 + length) == mapper->mapAddr(phys32 + length))) |
55e303ae A |
1471 | length += page_size; |
1472 | if (length > origLen) | |
1473 | length = origLen; | |
1474 | ||
1475 | *lengthOfSegment = length; | |
0b4e3aa0 | 1476 | } |
55e303ae A |
1477 | else |
1478 | phys64 = (addr64_t) phys32; | |
1c79356b | 1479 | |
55e303ae | 1480 | return phys64; |
0b4e3aa0 A |
1481 | } |
1482 | ||
5d5c5d0d A |
1483 | IOPhysicalAddress |
1484 | IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment) | |
1c79356b | 1485 | { |
0b4e3aa0 A |
1486 | IOPhysicalAddress address = 0; |
1487 | IOPhysicalLength length = 0; | |
91447636 | 1488 | IOOptionBits type = _flags & kIOMemoryTypeMask; |
1c79356b | 1489 | |
0b4e3aa0 | 1490 | assert(offset <= _length); |
1c79356b | 1491 | |
91447636 | 1492 | if ( type == kIOMemoryTypeUPL) |
55e303ae | 1493 | return super::getSourceSegment( offset, lengthOfSegment ); |
91447636 | 1494 | else if ( offset < _length ) // (within bounds?) |
1c79356b | 1495 | { |
0b4e3aa0 | 1496 | unsigned rangesIndex = 0; |
91447636 A |
1497 | Ranges vec = _ranges; |
1498 | user_addr_t addr; | |
1499 | ||
1500 | // Find starting address within the vector of ranges | |
1501 | for (;;) { | |
1502 | getAddrLenForInd(addr, length, type, vec, rangesIndex); | |
1503 | if (offset < length) | |
1504 | break; | |
1505 | offset -= length; // (make offset relative) | |
1506 | rangesIndex++; | |
1507 | } | |
1508 | ||
1509 | // Now that we have the starting range, | |
1510 | // lets find the last contiguous range | |
1511 | addr += offset; | |
1512 | length -= offset; | |
1513 | ||
1514 | for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) { | |
1515 | user_addr_t newAddr; | |
1516 | IOPhysicalLength newLen; | |
1517 | ||
1518 | getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex); | |
1519 | if (addr + length != newAddr) | |
1520 | break; | |
1521 | length += newLen; | |
1522 | } | |
1523 | if (addr) | |
1524 | address = (IOPhysicalAddress) addr; // Truncate address to 32bit | |
1525 | else | |
1526 | length = 0; | |
1c79356b | 1527 | } |
0b4e3aa0 A |
1528 | |
1529 | if ( lengthOfSegment ) *lengthOfSegment = length; | |
1530 | ||
1531 | return address; | |
1532 | } | |
1533 | ||
1534 | /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */ | |
1535 | /* DEPRECATED */ void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset, | |
1536 | /* DEPRECATED */ IOByteCount * lengthOfSegment) | |
1537 | /* DEPRECATED */ { | |
55e303ae A |
1538 | if (_task == kernel_task) |
1539 | return (void *) getSourceSegment(offset, lengthOfSegment); | |
1540 | else | |
1541 | panic("IOGMD::getVirtualSegment deprecated"); | |
1542 | ||
1543 | return 0; | |
0b4e3aa0 A |
1544 | /* DEPRECATED */ } |
1545 | /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */ | |
1c79356b | 1546 | |
91447636 A |
1547 | |
1548 | ||
5d5c5d0d A |
1549 | IOReturn |
1550 | IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const | |
1551 | { | |
1552 | if (kIOMDGetCharacteristics == op) { | |
1553 | if (dataSize < sizeof(IOMDDMACharacteristics)) | |
1554 | return kIOReturnUnderrun; | |
1555 | ||
1556 | IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData; | |
1557 | data->fLength = getLength(); | |
1558 | data->fSGCount = 0; | |
1559 | data->fDirection = _direction; | |
1560 | if (IOMapper::gSystem) | |
1561 | data->fIsMapped = true; | |
1562 | data->fIsPrepared = true; // Assume prepared - fails safe | |
1563 | } | |
1564 | else if (kIOMDWalkSegments & op) { | |
1565 | if (dataSize < sizeof(IOMDDMAWalkSegmentArgs)) | |
1566 | return kIOReturnUnderrun; | |
1567 | ||
1568 | IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData; | |
1569 | IOByteCount offset = (IOByteCount) data->fOffset; | |
1570 | ||
1571 | IOPhysicalLength length; | |
1572 | IOMemoryDescriptor *ncmd = const_cast<IOMemoryDescriptor *>(this); | |
1573 | if (data->fMapped && IOMapper::gSystem) | |
1574 | data->fIOVMAddr = ncmd->getPhysicalSegment(offset, &length); | |
1575 | else | |
1576 | data->fIOVMAddr = ncmd->getPhysicalSegment64(offset, &length); | |
1577 | data->fLength = length; | |
1578 | } | |
1579 | else | |
1580 | return kIOReturnBadArgument; | |
1581 | ||
1582 | return kIOReturnSuccess; | |
1583 | } | |
1584 | ||
91447636 A |
1585 | IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState, |
1586 | IOOptionBits * oldState ) | |
1587 | { | |
1588 | IOReturn err = kIOReturnSuccess; | |
1589 | vm_purgable_t control; | |
1590 | int state; | |
1591 | ||
1592 | do | |
1593 | { | |
1594 | if (!_memEntry) | |
1595 | { | |
1596 | err = kIOReturnNotReady; | |
1597 | break; | |
1598 | } | |
1599 | ||
1600 | control = VM_PURGABLE_SET_STATE; | |
1601 | switch (newState) | |
1602 | { | |
1603 | case kIOMemoryPurgeableKeepCurrent: | |
1604 | control = VM_PURGABLE_GET_STATE; | |
1605 | break; | |
1606 | ||
1607 | case kIOMemoryPurgeableNonVolatile: | |
1608 | state = VM_PURGABLE_NONVOLATILE; | |
1609 | break; | |
1610 | case kIOMemoryPurgeableVolatile: | |
1611 | state = VM_PURGABLE_VOLATILE; | |
1612 | break; | |
1613 | case kIOMemoryPurgeableEmpty: | |
1614 | state = VM_PURGABLE_EMPTY; | |
1615 | break; | |
1616 | default: | |
1617 | err = kIOReturnBadArgument; | |
1618 | break; | |
1619 | } | |
1620 | ||
1621 | if (kIOReturnSuccess != err) | |
1622 | break; | |
1623 | ||
1624 | err = mach_memory_entry_purgable_control((ipc_port_t) _memEntry, control, &state); | |
1625 | ||
1626 | if (oldState) | |
1627 | { | |
1628 | if (kIOReturnSuccess == err) | |
1629 | { | |
1630 | switch (state) | |
1631 | { | |
1632 | case VM_PURGABLE_NONVOLATILE: | |
1633 | state = kIOMemoryPurgeableNonVolatile; | |
1634 | break; | |
1635 | case VM_PURGABLE_VOLATILE: | |
1636 | state = kIOMemoryPurgeableVolatile; | |
1637 | break; | |
1638 | case VM_PURGABLE_EMPTY: | |
1639 | state = kIOMemoryPurgeableEmpty; | |
1640 | break; | |
1641 | default: | |
1642 | state = kIOMemoryPurgeableNonVolatile; | |
1643 | err = kIOReturnNotReady; | |
1644 | break; | |
1645 | } | |
1646 | *oldState = state; | |
1647 | } | |
1648 | } | |
1649 | } | |
1650 | while (false); | |
1651 | ||
1652 | return (err); | |
1653 | } | |
1654 | ||
1655 | extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count); | |
1656 | extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count); | |
1657 | ||
1658 | IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options, | |
1659 | IOByteCount offset, IOByteCount length ) | |
1660 | { | |
1661 | IOByteCount remaining; | |
1662 | void (*func)(addr64_t pa, unsigned int count) = 0; | |
1663 | ||
1664 | switch (options) | |
1665 | { | |
1666 | case kIOMemoryIncoherentIOFlush: | |
1667 | func = &dcache_incoherent_io_flush64; | |
1668 | break; | |
1669 | case kIOMemoryIncoherentIOStore: | |
1670 | func = &dcache_incoherent_io_store64; | |
1671 | break; | |
1672 | } | |
1673 | ||
1674 | if (!func) | |
1675 | return (kIOReturnUnsupported); | |
1676 | ||
1677 | remaining = length = min(length, getLength() - offset); | |
1678 | while (remaining) | |
1679 | // (process another target segment?) | |
1680 | { | |
1681 | addr64_t dstAddr64; | |
1682 | IOByteCount dstLen; | |
1683 | ||
1684 | dstAddr64 = getPhysicalSegment64(offset, &dstLen); | |
1685 | if (!dstAddr64) | |
1686 | break; | |
1687 | ||
1688 | // Clip segment length to remaining | |
1689 | if (dstLen > remaining) | |
1690 | dstLen = remaining; | |
1691 | ||
1692 | (*func)(dstAddr64, dstLen); | |
1693 | ||
1694 | offset += dstLen; | |
1695 | remaining -= dstLen; | |
1696 | } | |
1697 | ||
1698 | return (remaining ? kIOReturnUnderrun : kIOReturnSuccess); | |
1699 | } | |
1700 | ||
55e303ae A |
1701 | #ifdef __ppc__ |
1702 | extern vm_offset_t static_memory_end; | |
1703 | #define io_kernel_static_end static_memory_end | |
1704 | #else | |
1705 | extern vm_offset_t first_avail; | |
1706 | #define io_kernel_static_end first_avail | |
1707 | #endif | |
1708 | ||
1709 | static kern_return_t | |
1710 | io_get_kernel_static_upl( | |
91447636 | 1711 | vm_map_t /* map */, |
55e303ae A |
1712 | vm_address_t offset, |
1713 | vm_size_t *upl_size, | |
1714 | upl_t *upl, | |
1715 | upl_page_info_array_t page_list, | |
5d5c5d0d A |
1716 | unsigned int *count, |
1717 | ppnum_t *highest_page) | |
1c79356b | 1718 | { |
55e303ae A |
1719 | unsigned int pageCount, page; |
1720 | ppnum_t phys; | |
5d5c5d0d | 1721 | ppnum_t highestPage = 0; |
1c79356b | 1722 | |
55e303ae A |
1723 | pageCount = atop_32(*upl_size); |
1724 | if (pageCount > *count) | |
1725 | pageCount = *count; | |
1c79356b | 1726 | |
55e303ae | 1727 | *upl = NULL; |
1c79356b | 1728 | |
55e303ae A |
1729 | for (page = 0; page < pageCount; page++) |
1730 | { | |
1731 | phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page)); | |
1732 | if (!phys) | |
1733 | break; | |
1734 | page_list[page].phys_addr = phys; | |
1735 | page_list[page].pageout = 0; | |
1736 | page_list[page].absent = 0; | |
1737 | page_list[page].dirty = 0; | |
1738 | page_list[page].precious = 0; | |
1739 | page_list[page].device = 0; | |
5d5c5d0d A |
1740 | if (phys > highestPage) |
1741 | highestPage = page; | |
55e303ae | 1742 | } |
0b4e3aa0 | 1743 | |
5d5c5d0d A |
1744 | *highest_page = highestPage; |
1745 | ||
55e303ae A |
1746 | return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError); |
1747 | } | |
0b4e3aa0 | 1748 | |
55e303ae A |
1749 | IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection) |
1750 | { | |
91447636 | 1751 | IOOptionBits type = _flags & kIOMemoryTypeMask; |
55e303ae A |
1752 | IOReturn error = kIOReturnNoMemory; |
1753 | ioGMDData *dataP; | |
1754 | ppnum_t mapBase = 0; | |
1755 | IOMapper *mapper; | |
1756 | ipc_port_t sharedMem = (ipc_port_t) _memEntry; | |
1c79356b | 1757 | |
55e303ae | 1758 | assert(!_wireCount); |
5d5c5d0d | 1759 | assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type); |
1c79356b | 1760 | |
55e303ae A |
1761 | if (_pages >= gIOMaximumMappedIOPageCount) |
1762 | return kIOReturnNoResources; | |
0b4e3aa0 | 1763 | |
55e303ae A |
1764 | dataP = getDataP(_memoryEntries); |
1765 | mapper = dataP->fMapper; | |
1766 | if (mapper && _pages) | |
1767 | mapBase = mapper->iovmAlloc(_pages); | |
d7e50217 | 1768 | |
55e303ae A |
1769 | // Note that appendBytes(NULL) zeros the data up to the |
1770 | // desired length. | |
1771 | _memoryEntries->appendBytes(0, dataP->fPageCnt * sizeof(upl_page_info_t)); | |
1772 | dataP = 0; // May no longer be valid so lets not get tempted. | |
de355530 | 1773 | |
55e303ae A |
1774 | if (forDirection == kIODirectionNone) |
1775 | forDirection = _direction; | |
1776 | ||
1777 | int uplFlags; // This Mem Desc's default flags for upl creation | |
5d5c5d0d | 1778 | switch (kIODirectionOutIn & forDirection) |
55e303ae A |
1779 | { |
1780 | case kIODirectionOut: | |
1781 | // Pages do not need to be marked as dirty on commit | |
1782 | uplFlags = UPL_COPYOUT_FROM; | |
1783 | _flags |= kIOMemoryPreparedReadOnly; | |
1784 | break; | |
1785 | ||
1786 | case kIODirectionIn: | |
1787 | default: | |
1788 | uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM | |
1789 | break; | |
1790 | } | |
1791 | uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE; | |
1792 | ||
5d5c5d0d A |
1793 | #ifdef UPL_NEED_32BIT_ADDR |
1794 | if (kIODirectionPrepareToPhys32 & forDirection) | |
1795 | uplFlags |= UPL_NEED_32BIT_ADDR; | |
1796 | #endif | |
1797 | ||
91447636 | 1798 | // Find the appropriate vm_map for the given task |
55e303ae A |
1799 | vm_map_t curMap; |
1800 | if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) | |
1801 | curMap = 0; | |
1802 | else | |
1803 | { curMap = get_task_map(_task); } | |
1804 | ||
91447636 A |
1805 | // Iterate over the vector of virtual ranges |
1806 | Ranges vec = _ranges; | |
1807 | unsigned int pageIndex = 0; | |
1808 | IOByteCount mdOffset = 0; | |
5d5c5d0d | 1809 | ppnum_t highestPage = 0; |
55e303ae A |
1810 | for (UInt range = 0; range < _rangesCount; range++) { |
1811 | ioPLBlock iopl; | |
91447636 | 1812 | user_addr_t startPage; |
55e303ae | 1813 | IOByteCount numBytes; |
5d5c5d0d | 1814 | ppnum_t highPage = 0; |
55e303ae | 1815 | |
91447636 A |
1816 | // Get the startPage address and length of vec[range] |
1817 | getAddrLenForInd(startPage, numBytes, type, vec, range); | |
1818 | iopl.fPageOffset = (short) startPage & PAGE_MASK; | |
1819 | numBytes += iopl.fPageOffset; | |
1820 | startPage = trunc_page_64(startPage); | |
1821 | ||
55e303ae A |
1822 | if (mapper) |
1823 | iopl.fMappedBase = mapBase + pageIndex; | |
1824 | else | |
1825 | iopl.fMappedBase = 0; | |
55e303ae | 1826 | |
91447636 | 1827 | // Iterate over the current range, creating UPLs |
55e303ae A |
1828 | while (numBytes) { |
1829 | dataP = getDataP(_memoryEntries); | |
91447636 A |
1830 | vm_address_t kernelStart = (vm_address_t) startPage; |
1831 | vm_map_t theMap; | |
1832 | if (curMap) | |
1833 | theMap = curMap; | |
1834 | else if (!sharedMem) { | |
1835 | assert(_task == kernel_task); | |
1836 | theMap = IOPageableMapForAddress(kernelStart); | |
1837 | } | |
1838 | else | |
1839 | theMap = NULL; | |
1840 | ||
55e303ae A |
1841 | upl_page_info_array_t pageInfo = getPageList(dataP); |
1842 | int ioplFlags = uplFlags; | |
1843 | upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex]; | |
1844 | ||
1845 | vm_size_t ioplSize = round_page_32(numBytes); | |
1846 | unsigned int numPageInfo = atop_32(ioplSize); | |
1847 | ||
91447636 | 1848 | if (theMap == kernel_map && kernelStart < io_kernel_static_end) { |
55e303ae | 1849 | error = io_get_kernel_static_upl(theMap, |
91447636 A |
1850 | kernelStart, |
1851 | &ioplSize, | |
1852 | &iopl.fIOPL, | |
1853 | baseInfo, | |
5d5c5d0d A |
1854 | &numPageInfo, |
1855 | &highPage); | |
91447636 A |
1856 | } |
1857 | else if (sharedMem) { | |
55e303ae | 1858 | error = memory_object_iopl_request(sharedMem, |
91447636 A |
1859 | ptoa_32(pageIndex), |
1860 | &ioplSize, | |
1861 | &iopl.fIOPL, | |
1862 | baseInfo, | |
1863 | &numPageInfo, | |
1864 | &ioplFlags); | |
1865 | } | |
1866 | else { | |
1867 | assert(theMap); | |
1868 | error = vm_map_create_upl(theMap, | |
1869 | startPage, | |
1870 | &ioplSize, | |
1871 | &iopl.fIOPL, | |
1872 | baseInfo, | |
1873 | &numPageInfo, | |
1874 | &ioplFlags); | |
de355530 A |
1875 | } |
1876 | ||
55e303ae A |
1877 | assert(ioplSize); |
1878 | if (error != KERN_SUCCESS) | |
1879 | goto abortExit; | |
1880 | ||
5d5c5d0d A |
1881 | if (iopl.fIOPL) |
1882 | highPage = upl_get_highest_page(iopl.fIOPL); | |
1883 | if (highPage > highestPage) | |
1884 | highestPage = highPage; | |
1885 | ||
55e303ae A |
1886 | error = kIOReturnNoMemory; |
1887 | ||
1888 | if (baseInfo->device) { | |
1889 | numPageInfo = 1; | |
1890 | iopl.fFlags = kIOPLOnDevice; | |
1891 | // Don't translate device memory at all | |
1892 | if (mapper && mapBase) { | |
1893 | mapper->iovmFree(mapBase, _pages); | |
1894 | mapBase = 0; | |
1895 | iopl.fMappedBase = 0; | |
1896 | } | |
1897 | } | |
1898 | else { | |
1899 | iopl.fFlags = 0; | |
5d5c5d0d | 1900 | if (mapper) |
55e303ae A |
1901 | mapper->iovmInsert(mapBase, pageIndex, |
1902 | baseInfo, numPageInfo); | |
1903 | } | |
1904 | ||
1905 | iopl.fIOMDOffset = mdOffset; | |
1906 | iopl.fPageInfo = pageIndex; | |
1907 | ||
1908 | if ((_flags & kIOMemoryAutoPrepare) && iopl.fIOPL) | |
1909 | { | |
91447636 A |
1910 | upl_commit(iopl.fIOPL, 0, 0); |
1911 | upl_deallocate(iopl.fIOPL); | |
55e303ae | 1912 | iopl.fIOPL = 0; |
de355530 | 1913 | } |
55e303ae A |
1914 | |
1915 | if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) { | |
1916 | // Clean up partial created and unsaved iopl | |
91447636 A |
1917 | if (iopl.fIOPL) { |
1918 | upl_abort(iopl.fIOPL, 0); | |
1919 | upl_deallocate(iopl.fIOPL); | |
1920 | } | |
55e303ae A |
1921 | goto abortExit; |
1922 | } | |
1923 | ||
1924 | // Check for a multiple iopl's in one virtual range | |
1925 | pageIndex += numPageInfo; | |
1926 | mdOffset -= iopl.fPageOffset; | |
1927 | if (ioplSize < numBytes) { | |
1928 | numBytes -= ioplSize; | |
1929 | startPage += ioplSize; | |
1930 | mdOffset += ioplSize; | |
1931 | iopl.fPageOffset = 0; | |
1932 | if (mapper) | |
1933 | iopl.fMappedBase = mapBase + pageIndex; | |
1934 | } | |
1935 | else { | |
1936 | mdOffset += numBytes; | |
1937 | break; | |
1938 | } | |
1c79356b A |
1939 | } |
1940 | } | |
55e303ae | 1941 | |
5d5c5d0d A |
1942 | _highestPage = highestPage; |
1943 | ||
1c79356b A |
1944 | return kIOReturnSuccess; |
1945 | ||
1946 | abortExit: | |
55e303ae A |
1947 | { |
1948 | dataP = getDataP(_memoryEntries); | |
91447636 | 1949 | UInt done = getNumIOPL(_memoryEntries, dataP); |
55e303ae A |
1950 | ioPLBlock *ioplList = getIOPLList(dataP); |
1951 | ||
1952 | for (UInt range = 0; range < done; range++) | |
1953 | { | |
91447636 A |
1954 | if (ioplList[range].fIOPL) { |
1955 | upl_abort(ioplList[range].fIOPL, 0); | |
1956 | upl_deallocate(ioplList[range].fIOPL); | |
1957 | } | |
55e303ae | 1958 | } |
91447636 | 1959 | (void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); // == setLength() |
1c79356b | 1960 | |
55e303ae A |
1961 | if (mapper && mapBase) |
1962 | mapper->iovmFree(mapBase, _pages); | |
1c79356b A |
1963 | } |
1964 | ||
55e303ae A |
1965 | return error; |
1966 | } | |
d7e50217 | 1967 | |
55e303ae A |
1968 | /* |
1969 | * prepare | |
1970 | * | |
1971 | * Prepare the memory for an I/O transfer. This involves paging in | |
1972 | * the memory, if necessary, and wiring it down for the duration of | |
1973 | * the transfer. The complete() method completes the processing of | |
1974 | * the memory after the I/O transfer finishes. This method needn't | |
1975 | * called for non-pageable memory. | |
1976 | */ | |
1977 | IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection) | |
1978 | { | |
91447636 A |
1979 | IOReturn error = kIOReturnSuccess; |
1980 | IOOptionBits type = _flags & kIOMemoryTypeMask; | |
55e303ae | 1981 | |
91447636 | 1982 | if (!_wireCount |
5d5c5d0d | 1983 | && (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) ) { |
55e303ae A |
1984 | error = wireVirtual(forDirection); |
1985 | if (error) | |
1986 | return error; | |
de355530 A |
1987 | } |
1988 | ||
55e303ae A |
1989 | _wireCount++; |
1990 | ||
1991 | return kIOReturnSuccess; | |
1c79356b A |
1992 | } |
1993 | ||
1994 | /* | |
1995 | * complete | |
1996 | * | |
1997 | * Complete processing of the memory after an I/O transfer finishes. | |
1998 | * This method should not be called unless a prepare was previously | |
1999 | * issued; the prepare() and complete() must occur in pairs, before | |
2000 | * before and after an I/O transfer involving pageable memory. | |
2001 | */ | |
2002 | ||
55e303ae | 2003 | IOReturn IOGeneralMemoryDescriptor::complete(IODirection /* forDirection */) |
1c79356b A |
2004 | { |
2005 | assert(_wireCount); | |
2006 | ||
55e303ae | 2007 | if (!_wireCount) |
1c79356b A |
2008 | return kIOReturnSuccess; |
2009 | ||
2010 | _wireCount--; | |
55e303ae | 2011 | if (!_wireCount) { |
91447636 A |
2012 | IOOptionBits type = _flags & kIOMemoryTypeMask; |
2013 | ||
5d5c5d0d | 2014 | if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) { |
55e303ae A |
2015 | /* kIOMemoryTypePhysical */ |
2016 | // DO NOTHING | |
d7e50217 | 2017 | } |
55e303ae A |
2018 | else { |
2019 | ioGMDData * dataP = getDataP(_memoryEntries); | |
2020 | ioPLBlock *ioplList = getIOPLList(dataP); | |
91447636 | 2021 | UInt count = getNumIOPL(_memoryEntries, dataP); |
55e303ae A |
2022 | |
2023 | if (dataP->fMapper && _pages && ioplList[0].fMappedBase) | |
2024 | dataP->fMapper->iovmFree(ioplList[0].fMappedBase, _pages); | |
2025 | ||
2026 | // Only complete iopls that we created which are for TypeVirtual | |
5d5c5d0d | 2027 | if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) { |
55e303ae | 2028 | for (UInt ind = 0; ind < count; ind++) |
91447636 A |
2029 | if (ioplList[ind].fIOPL) { |
2030 | upl_commit(ioplList[ind].fIOPL, 0, 0); | |
2031 | upl_deallocate(ioplList[ind].fIOPL); | |
2032 | } | |
55e303ae | 2033 | } |
de355530 | 2034 | |
55e303ae A |
2035 | (void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); // == setLength() |
2036 | } | |
1c79356b A |
2037 | } |
2038 | return kIOReturnSuccess; | |
2039 | } | |
2040 | ||
2041 | IOReturn IOGeneralMemoryDescriptor::doMap( | |
2042 | vm_map_t addressMap, | |
2043 | IOVirtualAddress * atAddress, | |
2044 | IOOptionBits options, | |
55e303ae A |
2045 | IOByteCount sourceOffset, |
2046 | IOByteCount length ) | |
1c79356b A |
2047 | { |
2048 | kern_return_t kr; | |
0b4e3aa0 | 2049 | ipc_port_t sharedMem = (ipc_port_t) _memEntry; |
1c79356b | 2050 | |
91447636 A |
2051 | IOOptionBits type = _flags & kIOMemoryTypeMask; |
2052 | Ranges vec = _ranges; | |
2053 | ||
2054 | user_addr_t range0Addr = 0; | |
2055 | IOByteCount range0Len = 0; | |
2056 | ||
2057 | if (vec.v) | |
2058 | getAddrLenForInd(range0Addr, range0Len, type, vec, 0); | |
2059 | ||
1c79356b | 2060 | // mapping source == dest? (could be much better) |
91447636 A |
2061 | if( _task |
2062 | && (addressMap == get_task_map(_task)) && (options & kIOMapAnywhere) | |
2063 | && (1 == _rangesCount) && (0 == sourceOffset) | |
2064 | && range0Addr && (length <= range0Len) ) { | |
2065 | if (sizeof(user_addr_t) > 4 && ((UInt64) range0Addr) >> 32) | |
2066 | return kIOReturnOverrun; // Doesn't fit in 32bit return field | |
2067 | else { | |
2068 | *atAddress = range0Addr; | |
1c79356b | 2069 | return( kIOReturnSuccess ); |
91447636 | 2070 | } |
1c79356b A |
2071 | } |
2072 | ||
0b4e3aa0 | 2073 | if( 0 == sharedMem) { |
1c79356b | 2074 | |
91447636 | 2075 | vm_size_t size = ptoa_32(_pages); |
1c79356b | 2076 | |
0b4e3aa0 | 2077 | if( _task) { |
5d5c5d0d | 2078 | |
91447636 A |
2079 | memory_object_size_t actualSize = size; |
2080 | kr = mach_make_memory_entry_64(get_task_map(_task), | |
2081 | &actualSize, range0Addr, | |
0b4e3aa0 A |
2082 | VM_PROT_READ | VM_PROT_WRITE, &sharedMem, |
2083 | NULL ); | |
2084 | ||
55e303ae | 2085 | if( (KERN_SUCCESS == kr) && (actualSize != round_page_32(size))) { |
0b4e3aa0 | 2086 | #if IOASSERT |
91447636 A |
2087 | IOLog("mach_make_memory_entry_64 (%08llx) size (%08lx:%08x)\n", |
2088 | range0Addr, (UInt32) actualSize, size); | |
0b4e3aa0 A |
2089 | #endif |
2090 | kr = kIOReturnVMError; | |
2091 | ipc_port_release_send( sharedMem ); | |
1c79356b A |
2092 | } |
2093 | ||
0b4e3aa0 | 2094 | if( KERN_SUCCESS != kr) |
0b4e3aa0 | 2095 | sharedMem = MACH_PORT_NULL; |
1c79356b | 2096 | |
5d5c5d0d | 2097 | } else do { // _task == 0, must be physical |
0b4e3aa0 | 2098 | |
55e303ae A |
2099 | memory_object_t pager; |
2100 | unsigned int flags = 0; | |
2101 | addr64_t pa; | |
9bccf70c A |
2102 | IOPhysicalLength segLen; |
2103 | ||
55e303ae | 2104 | pa = getPhysicalSegment64( sourceOffset, &segLen ); |
0b4e3aa0 A |
2105 | |
2106 | if( !reserved) { | |
2107 | reserved = IONew( ExpansionData, 1 ); | |
2108 | if( !reserved) | |
2109 | continue; | |
2110 | } | |
2111 | reserved->pagerContig = (1 == _rangesCount); | |
9bccf70c A |
2112 | reserved->memory = this; |
2113 | ||
55e303ae A |
2114 | /*What cache mode do we need*/ |
2115 | switch(options & kIOMapCacheMask ) { | |
9bccf70c A |
2116 | |
2117 | case kIOMapDefaultCache: | |
2118 | default: | |
55e303ae A |
2119 | flags = IODefaultCacheBits(pa); |
2120 | break; | |
9bccf70c A |
2121 | |
2122 | case kIOMapInhibitCache: | |
55e303ae A |
2123 | flags = DEVICE_PAGER_CACHE_INHIB | |
2124 | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED; | |
2125 | break; | |
9bccf70c A |
2126 | |
2127 | case kIOMapWriteThruCache: | |
55e303ae A |
2128 | flags = DEVICE_PAGER_WRITE_THROUGH | |
2129 | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED; | |
2130 | break; | |
9bccf70c A |
2131 | |
2132 | case kIOMapCopybackCache: | |
55e303ae A |
2133 | flags = DEVICE_PAGER_COHERENT; |
2134 | break; | |
2135 | ||
2136 | case kIOMapWriteCombineCache: | |
2137 | flags = DEVICE_PAGER_CACHE_INHIB | | |
2138 | DEVICE_PAGER_COHERENT; | |
2139 | break; | |
9bccf70c A |
2140 | } |
2141 | ||
2142 | flags |= reserved->pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0; | |
9bccf70c A |
2143 | |
2144 | pager = device_pager_setup( (memory_object_t) 0, (int) reserved, | |
2145 | size, flags); | |
0b4e3aa0 A |
2146 | assert( pager ); |
2147 | ||
2148 | if( pager) { | |
0b4e3aa0 A |
2149 | kr = mach_memory_object_memory_entry_64( (host_t) 1, false /*internal*/, |
2150 | size, VM_PROT_READ | VM_PROT_WRITE, pager, &sharedMem ); | |
2151 | ||
2152 | assert( KERN_SUCCESS == kr ); | |
2153 | if( KERN_SUCCESS != kr) { | |
9bccf70c | 2154 | device_pager_deallocate( pager ); |
0b4e3aa0 A |
2155 | pager = MACH_PORT_NULL; |
2156 | sharedMem = MACH_PORT_NULL; | |
2157 | } | |
2158 | } | |
9bccf70c A |
2159 | if( pager && sharedMem) |
2160 | reserved->devicePager = pager; | |
2161 | else { | |
2162 | IODelete( reserved, ExpansionData, 1 ); | |
2163 | reserved = 0; | |
2164 | } | |
1c79356b | 2165 | |
1c79356b A |
2166 | } while( false ); |
2167 | ||
0b4e3aa0 A |
2168 | _memEntry = (void *) sharedMem; |
2169 | } | |
2170 | ||
91447636 | 2171 | |
9bccf70c A |
2172 | if( 0 == sharedMem) |
2173 | kr = kIOReturnVMError; | |
2174 | else | |
9bccf70c | 2175 | kr = super::doMap( addressMap, atAddress, |
1c79356b | 2176 | options, sourceOffset, length ); |
0b4e3aa0 | 2177 | |
1c79356b A |
2178 | return( kr ); |
2179 | } | |
2180 | ||
2181 | IOReturn IOGeneralMemoryDescriptor::doUnmap( | |
2182 | vm_map_t addressMap, | |
2183 | IOVirtualAddress logical, | |
2184 | IOByteCount length ) | |
2185 | { | |
2186 | // could be much better | |
91447636 A |
2187 | if( _task && (addressMap == get_task_map(_task)) && (1 == _rangesCount)) { |
2188 | ||
2189 | IOOptionBits type = _flags & kIOMemoryTypeMask; | |
2190 | user_addr_t range0Addr; | |
2191 | IOByteCount range0Len; | |
2192 | ||
2193 | getAddrLenForInd(range0Addr, range0Len, type, _ranges, 0); | |
2194 | if (logical == range0Addr && length <= range0Len) | |
1c79356b | 2195 | return( kIOReturnSuccess ); |
91447636 | 2196 | } |
1c79356b A |
2197 | |
2198 | return( super::doUnmap( addressMap, logical, length )); | |
2199 | } | |
2200 | ||
2201 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
2202 | ||
9bccf70c | 2203 | OSDefineMetaClassAndAbstractStructors( IOMemoryMap, OSObject ) |
1c79356b | 2204 | |
9bccf70c A |
2205 | /* inline function implementation */ |
2206 | IOPhysicalAddress IOMemoryMap::getPhysicalAddress() | |
2207 | { return( getPhysicalSegment( 0, 0 )); } | |
1c79356b | 2208 | |
1c79356b A |
2209 | |
2210 | #undef super | |
2211 | #define super IOMemoryMap | |
2212 | ||
2213 | OSDefineMetaClassAndStructors(_IOMemoryMap, IOMemoryMap) | |
2214 | ||
2215 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
2216 | ||
9bccf70c | 2217 | bool _IOMemoryMap::initCompatible( |
1c79356b A |
2218 | IOMemoryDescriptor * _memory, |
2219 | IOMemoryMap * _superMap, | |
2220 | IOByteCount _offset, | |
2221 | IOByteCount _length ) | |
2222 | { | |
2223 | ||
2224 | if( !super::init()) | |
2225 | return( false); | |
2226 | ||
2227 | if( (_offset + _length) > _superMap->getLength()) | |
2228 | return( false); | |
2229 | ||
2230 | _memory->retain(); | |
2231 | memory = _memory; | |
2232 | _superMap->retain(); | |
2233 | superMap = _superMap; | |
2234 | ||
2235 | offset = _offset; | |
2236 | if( _length) | |
2237 | length = _length; | |
2238 | else | |
2239 | length = _memory->getLength(); | |
2240 | ||
2241 | options = superMap->getMapOptions(); | |
2242 | logical = superMap->getVirtualAddress() + offset; | |
2243 | ||
2244 | return( true ); | |
2245 | } | |
2246 | ||
9bccf70c | 2247 | bool _IOMemoryMap::initWithDescriptor( |
1c79356b A |
2248 | IOMemoryDescriptor * _memory, |
2249 | task_t intoTask, | |
2250 | IOVirtualAddress toAddress, | |
2251 | IOOptionBits _options, | |
2252 | IOByteCount _offset, | |
2253 | IOByteCount _length ) | |
2254 | { | |
91447636 A |
2255 | bool ok; |
2256 | bool redir = ((kIOMapUnique|kIOMapReference) == ((kIOMapUnique|kIOMapReference) & _options)); | |
1c79356b | 2257 | |
91447636 | 2258 | if ((!_memory) || (!intoTask)) |
1c79356b A |
2259 | return( false); |
2260 | ||
2261 | if( (_offset + _length) > _memory->getLength()) | |
2262 | return( false); | |
2263 | ||
91447636 A |
2264 | if (!redir) |
2265 | { | |
2266 | if (!super::init()) | |
2267 | return(false); | |
2268 | addressMap = get_task_map(intoTask); | |
2269 | if( !addressMap) | |
2270 | return( false); | |
2271 | vm_map_reference(addressMap); | |
2272 | addressTask = intoTask; | |
2273 | logical = toAddress; | |
2274 | options = _options; | |
2275 | } | |
1c79356b A |
2276 | |
2277 | _memory->retain(); | |
1c79356b A |
2278 | |
2279 | offset = _offset; | |
2280 | if( _length) | |
2281 | length = _length; | |
2282 | else | |
2283 | length = _memory->getLength(); | |
2284 | ||
1c79356b A |
2285 | if( options & kIOMapStatic) |
2286 | ok = true; | |
2287 | else | |
91447636 A |
2288 | ok = (kIOReturnSuccess == _memory->doMap( addressMap, &toAddress, |
2289 | _options, offset, length )); | |
2290 | if (ok || redir) | |
2291 | { | |
2292 | if (memory) | |
2293 | memory->release(); | |
2294 | memory = _memory; | |
2295 | logical = toAddress; | |
2296 | } | |
2297 | else | |
2298 | { | |
2299 | _memory->release(); | |
2300 | if (!redir) | |
2301 | { | |
2302 | logical = 0; | |
2303 | memory = 0; | |
2304 | vm_map_deallocate(addressMap); | |
2305 | addressMap = 0; | |
2306 | } | |
1c79356b | 2307 | } |
91447636 | 2308 | |
1c79356b A |
2309 | return( ok ); |
2310 | } | |
2311 | ||
91447636 | 2312 | /* LP64todo - these need to expand */ |
0b4e3aa0 A |
2313 | struct IOMemoryDescriptorMapAllocRef |
2314 | { | |
2315 | ipc_port_t sharedMem; | |
2316 | vm_size_t size; | |
2317 | vm_offset_t mapped; | |
2318 | IOByteCount sourceOffset; | |
2319 | IOOptionBits options; | |
2320 | }; | |
2321 | ||
2322 | static kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref) | |
2323 | { | |
2324 | IOMemoryDescriptorMapAllocRef * ref = (IOMemoryDescriptorMapAllocRef *)_ref; | |
2325 | IOReturn err; | |
2326 | ||
2327 | do { | |
2328 | if( ref->sharedMem) { | |
2329 | vm_prot_t prot = VM_PROT_READ | |
2330 | | ((ref->options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE); | |
55e303ae A |
2331 | |
2332 | // set memory entry cache | |
2333 | vm_prot_t memEntryCacheMode = prot | MAP_MEM_ONLY; | |
2334 | switch (ref->options & kIOMapCacheMask) | |
2335 | { | |
2336 | case kIOMapInhibitCache: | |
2337 | SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode); | |
2338 | break; | |
2339 | ||
2340 | case kIOMapWriteThruCache: | |
2341 | SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode); | |
2342 | break; | |
2343 | ||
2344 | case kIOMapWriteCombineCache: | |
2345 | SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode); | |
2346 | break; | |
2347 | ||
2348 | case kIOMapCopybackCache: | |
2349 | SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode); | |
2350 | break; | |
2351 | ||
2352 | case kIOMapDefaultCache: | |
2353 | default: | |
2354 | SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode); | |
2355 | break; | |
2356 | } | |
2357 | ||
2358 | vm_size_t unused = 0; | |
2359 | ||
2360 | err = mach_make_memory_entry( NULL /*unused*/, &unused, 0 /*unused*/, | |
2361 | memEntryCacheMode, NULL, ref->sharedMem ); | |
2362 | if (KERN_SUCCESS != err) | |
2363 | IOLog("MAP_MEM_ONLY failed %d\n", err); | |
2364 | ||
0b4e3aa0 A |
2365 | err = vm_map( map, |
2366 | &ref->mapped, | |
2367 | ref->size, 0 /* mask */, | |
2368 | (( ref->options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED) | |
2369 | | VM_MAKE_TAG(VM_MEMORY_IOKIT), | |
2370 | ref->sharedMem, ref->sourceOffset, | |
2371 | false, // copy | |
2372 | prot, // cur | |
2373 | prot, // max | |
2374 | VM_INHERIT_NONE); | |
55e303ae | 2375 | |
0b4e3aa0 A |
2376 | if( KERN_SUCCESS != err) { |
2377 | ref->mapped = 0; | |
2378 | continue; | |
2379 | } | |
2380 | ||
2381 | } else { | |
2382 | ||
2383 | err = vm_allocate( map, &ref->mapped, ref->size, | |
2384 | ((ref->options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED) | |
2385 | | VM_MAKE_TAG(VM_MEMORY_IOKIT) ); | |
2386 | ||
2387 | if( KERN_SUCCESS != err) { | |
2388 | ref->mapped = 0; | |
2389 | continue; | |
2390 | } | |
2391 | ||
2392 | // we have to make sure that these guys don't get copied if we fork. | |
2393 | err = vm_inherit( map, ref->mapped, ref->size, VM_INHERIT_NONE); | |
2394 | assert( KERN_SUCCESS == err ); | |
2395 | } | |
2396 | ||
2397 | } while( false ); | |
2398 | ||
2399 | return( err ); | |
2400 | } | |
2401 | ||
9bccf70c | 2402 | |
1c79356b A |
2403 | IOReturn IOMemoryDescriptor::doMap( |
2404 | vm_map_t addressMap, | |
2405 | IOVirtualAddress * atAddress, | |
2406 | IOOptionBits options, | |
55e303ae A |
2407 | IOByteCount sourceOffset, |
2408 | IOByteCount length ) | |
1c79356b A |
2409 | { |
2410 | IOReturn err = kIOReturnSuccess; | |
0b4e3aa0 | 2411 | memory_object_t pager; |
1c79356b A |
2412 | vm_address_t logical; |
2413 | IOByteCount pageOffset; | |
0b4e3aa0 A |
2414 | IOPhysicalAddress sourceAddr; |
2415 | IOMemoryDescriptorMapAllocRef ref; | |
1c79356b | 2416 | |
0b4e3aa0 A |
2417 | ref.sharedMem = (ipc_port_t) _memEntry; |
2418 | ref.sourceOffset = sourceOffset; | |
2419 | ref.options = options; | |
1c79356b | 2420 | |
0b4e3aa0 | 2421 | do { |
1c79356b | 2422 | |
0b4e3aa0 A |
2423 | if( 0 == length) |
2424 | length = getLength(); | |
1c79356b | 2425 | |
91447636 A |
2426 | sourceAddr = getSourceSegment( sourceOffset, NULL ); |
2427 | pageOffset = sourceAddr - trunc_page_32( sourceAddr ); | |
1c79356b | 2428 | |
91447636 | 2429 | ref.size = round_page_32( length + pageOffset ); |
0b4e3aa0 | 2430 | |
91447636 A |
2431 | if ((kIOMapReference|kIOMapUnique) == ((kIOMapReference|kIOMapUnique) & options)) |
2432 | { | |
2433 | upl_t redirUPL2; | |
2434 | vm_size_t size; | |
2435 | int flags; | |
0b4e3aa0 | 2436 | |
91447636 A |
2437 | _IOMemoryMap * mapping = (_IOMemoryMap *) *atAddress; |
2438 | ref.mapped = mapping->getVirtualAddress(); | |
2439 | ||
2440 | if (!_memEntry) | |
2441 | { | |
2442 | err = kIOReturnNotReadable; | |
2443 | continue; | |
2444 | } | |
2445 | ||
2446 | size = length; | |
2447 | flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL | |
2448 | | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS; | |
2449 | ||
2450 | if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) _memEntry, 0, &size, &redirUPL2, | |
2451 | NULL, NULL, | |
2452 | &flags)) | |
2453 | redirUPL2 = NULL; | |
2454 | ||
2455 | err = upl_transpose(redirUPL2, mapping->redirUPL); | |
2456 | if (kIOReturnSuccess != err) | |
2457 | { | |
2458 | IOLog("upl_transpose(%x)\n", err); | |
2459 | err = kIOReturnSuccess; | |
2460 | } | |
2461 | ||
2462 | if (redirUPL2) | |
2463 | { | |
2464 | upl_commit(redirUPL2, NULL, 0); | |
2465 | upl_deallocate(redirUPL2); | |
2466 | redirUPL2 = 0; | |
2467 | } | |
2468 | { | |
2469 | // swap the memEntries since they now refer to different vm_objects | |
2470 | void * me = _memEntry; | |
2471 | _memEntry = mapping->memory->_memEntry; | |
2472 | mapping->memory->_memEntry = me; | |
2473 | } | |
2474 | } | |
2475 | else | |
2476 | { | |
2477 | ||
2478 | logical = *atAddress; | |
2479 | if( options & kIOMapAnywhere) | |
2480 | // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE | |
2481 | ref.mapped = 0; | |
2482 | else { | |
2483 | ref.mapped = trunc_page_32( logical ); | |
2484 | if( (logical - ref.mapped) != pageOffset) { | |
2485 | err = kIOReturnVMError; | |
2486 | continue; | |
2487 | } | |
2488 | } | |
2489 | ||
2490 | if( ref.sharedMem && (addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) | |
2491 | err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref ); | |
2492 | else | |
2493 | err = IOMemoryDescriptorMapAlloc( addressMap, &ref ); | |
2494 | } | |
0b4e3aa0 A |
2495 | |
2496 | if( err != KERN_SUCCESS) | |
2497 | continue; | |
2498 | ||
2499 | if( reserved) | |
2500 | pager = (memory_object_t) reserved->devicePager; | |
2501 | else | |
2502 | pager = MACH_PORT_NULL; | |
2503 | ||
2504 | if( !ref.sharedMem || pager ) | |
2505 | err = handleFault( pager, addressMap, ref.mapped, sourceOffset, length, options ); | |
2506 | ||
2507 | } while( false ); | |
2508 | ||
2509 | if( err != KERN_SUCCESS) { | |
2510 | if( ref.mapped) | |
2511 | doUnmap( addressMap, ref.mapped, ref.size ); | |
2512 | *atAddress = NULL; | |
2513 | } else | |
2514 | *atAddress = ref.mapped + pageOffset; | |
2515 | ||
2516 | return( err ); | |
2517 | } | |
2518 | ||
2519 | enum { | |
2520 | kIOMemoryRedirected = 0x00010000 | |
2521 | }; | |
2522 | ||
2523 | IOReturn IOMemoryDescriptor::handleFault( | |
2524 | void * _pager, | |
2525 | vm_map_t addressMap, | |
2526 | IOVirtualAddress address, | |
2527 | IOByteCount sourceOffset, | |
2528 | IOByteCount length, | |
2529 | IOOptionBits options ) | |
2530 | { | |
2531 | IOReturn err = kIOReturnSuccess; | |
2532 | memory_object_t pager = (memory_object_t) _pager; | |
2533 | vm_size_t size; | |
2534 | vm_size_t bytes; | |
2535 | vm_size_t page; | |
2536 | IOByteCount pageOffset; | |
55e303ae | 2537 | IOByteCount pagerOffset; |
0b4e3aa0 | 2538 | IOPhysicalLength segLen; |
55e303ae | 2539 | addr64_t physAddr; |
0b4e3aa0 A |
2540 | |
2541 | if( !addressMap) { | |
2542 | ||
0b4e3aa0 | 2543 | if( kIOMemoryRedirected & _flags) { |
1c79356b | 2544 | #ifdef DEBUG |
9bccf70c | 2545 | IOLog("sleep mem redirect %p, %lx\n", this, sourceOffset); |
1c79356b | 2546 | #endif |
0b4e3aa0 | 2547 | do { |
9bccf70c | 2548 | SLEEP; |
0b4e3aa0 A |
2549 | } while( kIOMemoryRedirected & _flags ); |
2550 | } | |
1c79356b | 2551 | |
0b4e3aa0 | 2552 | return( kIOReturnSuccess ); |
1c79356b A |
2553 | } |
2554 | ||
55e303ae | 2555 | physAddr = getPhysicalSegment64( sourceOffset, &segLen ); |
0b4e3aa0 | 2556 | assert( physAddr ); |
55e303ae A |
2557 | pageOffset = physAddr - trunc_page_64( physAddr ); |
2558 | pagerOffset = sourceOffset; | |
0b4e3aa0 A |
2559 | |
2560 | size = length + pageOffset; | |
2561 | physAddr -= pageOffset; | |
1c79356b A |
2562 | |
2563 | segLen += pageOffset; | |
0b4e3aa0 | 2564 | bytes = size; |
1c79356b A |
2565 | do { |
2566 | // in the middle of the loop only map whole pages | |
2567 | if( segLen >= bytes) | |
2568 | segLen = bytes; | |
55e303ae | 2569 | else if( segLen != trunc_page_32( segLen)) |
1c79356b | 2570 | err = kIOReturnVMError; |
55e303ae | 2571 | if( physAddr != trunc_page_64( physAddr)) |
1c79356b A |
2572 | err = kIOReturnBadArgument; |
2573 | ||
2574 | #ifdef DEBUG | |
2575 | if( kIOLogMapping & gIOKitDebug) | |
55e303ae | 2576 | IOLog("_IOMemoryMap::map(%p) %08lx->%08qx:%08lx\n", |
0b4e3aa0 | 2577 | addressMap, address + pageOffset, physAddr + pageOffset, |
1c79356b A |
2578 | segLen - pageOffset); |
2579 | #endif | |
2580 | ||
0b4e3aa0 A |
2581 | if( pager) { |
2582 | if( reserved && reserved->pagerContig) { | |
2583 | IOPhysicalLength allLen; | |
55e303ae | 2584 | addr64_t allPhys; |
0b4e3aa0 | 2585 | |
55e303ae | 2586 | allPhys = getPhysicalSegment64( 0, &allLen ); |
0b4e3aa0 | 2587 | assert( allPhys ); |
55e303ae | 2588 | err = device_pager_populate_object( pager, 0, allPhys >> PAGE_SHIFT, round_page_32(allLen) ); |
0b4e3aa0 A |
2589 | |
2590 | } else { | |
2591 | ||
55e303ae | 2592 | for( page = 0; |
0b4e3aa0 A |
2593 | (page < segLen) && (KERN_SUCCESS == err); |
2594 | page += page_size) { | |
55e303ae A |
2595 | err = device_pager_populate_object(pager, pagerOffset, |
2596 | (ppnum_t)((physAddr + page) >> PAGE_SHIFT), page_size); | |
2597 | pagerOffset += page_size; | |
0b4e3aa0 A |
2598 | } |
2599 | } | |
2600 | assert( KERN_SUCCESS == err ); | |
2601 | if( err) | |
2602 | break; | |
2603 | } | |
5d5c5d0d | 2604 | |
9bccf70c A |
2605 | /* *** ALERT *** */ |
2606 | /* *** Temporary Workaround *** */ | |
2607 | ||
2608 | /* This call to vm_fault causes an early pmap level resolution */ | |
2609 | /* of the mappings created above. Need for this is in absolute */ | |
2610 | /* violation of the basic tenet that the pmap layer is a cache. */ | |
2611 | /* Further, it implies a serious I/O architectural violation on */ | |
2612 | /* the part of some user of the mapping. As of this writing, */ | |
2613 | /* the call to vm_fault is needed because the NVIDIA driver */ | |
2614 | /* makes a call to pmap_extract. The NVIDIA driver needs to be */ | |
2615 | /* fixed as soon as possible. The NVIDIA driver should not */ | |
2616 | /* need to query for this info as it should know from the doMap */ | |
2617 | /* call where the physical memory is mapped. When a query is */ | |
2618 | /* necessary to find a physical mapping, it should be done */ | |
2619 | /* through an iokit call which includes the mapped memory */ | |
2620 | /* handle. This is required for machine architecture independence.*/ | |
2621 | ||
2622 | if(!(kIOMemoryRedirected & _flags)) { | |
91447636 A |
2623 | vm_fault(addressMap, |
2624 | (vm_map_offset_t)address, | |
2625 | VM_PROT_READ|VM_PROT_WRITE, | |
2626 | FALSE, THREAD_UNINT, NULL, | |
2627 | (vm_map_offset_t)0); | |
9bccf70c A |
2628 | } |
2629 | ||
2630 | /* *** Temporary Workaround *** */ | |
2631 | /* *** ALERT *** */ | |
5d5c5d0d | 2632 | |
1c79356b | 2633 | sourceOffset += segLen - pageOffset; |
0b4e3aa0 | 2634 | address += segLen; |
1c79356b A |
2635 | bytes -= segLen; |
2636 | pageOffset = 0; | |
2637 | ||
2638 | } while( bytes | |
55e303ae | 2639 | && (physAddr = getPhysicalSegment64( sourceOffset, &segLen ))); |
1c79356b A |
2640 | |
2641 | if( bytes) | |
2642 | err = kIOReturnBadArgument; | |
1c79356b A |
2643 | |
2644 | return( err ); | |
2645 | } | |
2646 | ||
2647 | IOReturn IOMemoryDescriptor::doUnmap( | |
2648 | vm_map_t addressMap, | |
2649 | IOVirtualAddress logical, | |
2650 | IOByteCount length ) | |
2651 | { | |
2652 | IOReturn err; | |
2653 | ||
2654 | #ifdef DEBUG | |
2655 | if( kIOLogMapping & gIOKitDebug) | |
2656 | kprintf("IOMemoryDescriptor::doUnmap(%x) %08x:%08x\n", | |
2657 | addressMap, logical, length ); | |
2658 | #endif | |
2659 | ||
90556fb8 | 2660 | if( true /* && (addressMap == kernel_map) || (addressMap == get_task_map(current_task()))*/) { |
0b4e3aa0 | 2661 | |
55e303ae | 2662 | if( _memEntry && (addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) |
0b4e3aa0 A |
2663 | addressMap = IOPageableMapForAddress( logical ); |
2664 | ||
1c79356b | 2665 | err = vm_deallocate( addressMap, logical, length ); |
0b4e3aa0 A |
2666 | |
2667 | } else | |
1c79356b A |
2668 | err = kIOReturnSuccess; |
2669 | ||
2670 | return( err ); | |
2671 | } | |
2672 | ||
91447636 | 2673 | IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect ) |
e3027f41 | 2674 | { |
91447636 | 2675 | IOReturn err = kIOReturnSuccess; |
e3027f41 A |
2676 | _IOMemoryMap * mapping = 0; |
2677 | OSIterator * iter; | |
2678 | ||
2679 | LOCK; | |
2680 | ||
91447636 A |
2681 | if( doRedirect) |
2682 | _flags |= kIOMemoryRedirected; | |
2683 | else | |
2684 | _flags &= ~kIOMemoryRedirected; | |
2685 | ||
e3027f41 A |
2686 | do { |
2687 | if( (iter = OSCollectionIterator::withCollection( _mappings))) { | |
91447636 A |
2688 | while( (mapping = (_IOMemoryMap *) iter->getNextObject())) |
2689 | mapping->redirect( safeTask, doRedirect ); | |
e3027f41 | 2690 | |
91447636 A |
2691 | iter->release(); |
2692 | } | |
e3027f41 A |
2693 | } while( false ); |
2694 | ||
91447636 A |
2695 | if (!doRedirect) |
2696 | { | |
9bccf70c | 2697 | WAKEUP; |
0b4e3aa0 A |
2698 | } |
2699 | ||
e3027f41 A |
2700 | UNLOCK; |
2701 | ||
2702 | // temporary binary compatibility | |
2703 | IOSubMemoryDescriptor * subMem; | |
2704 | if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this))) | |
91447636 | 2705 | err = subMem->redirect( safeTask, doRedirect ); |
e3027f41 | 2706 | else |
91447636 | 2707 | err = kIOReturnSuccess; |
e3027f41 A |
2708 | |
2709 | return( err ); | |
2710 | } | |
2711 | ||
91447636 | 2712 | IOReturn IOSubMemoryDescriptor::redirect( task_t safeTask, bool doRedirect ) |
e3027f41 | 2713 | { |
91447636 | 2714 | return( _parent->redirect( safeTask, doRedirect )); |
e3027f41 A |
2715 | } |
2716 | ||
91447636 | 2717 | IOReturn _IOMemoryMap::redirect( task_t safeTask, bool doRedirect ) |
e3027f41 A |
2718 | { |
2719 | IOReturn err = kIOReturnSuccess; | |
2720 | ||
2721 | if( superMap) { | |
91447636 | 2722 | // err = ((_IOMemoryMap *)superMap)->redirect( safeTask, doRedirect ); |
e3027f41 A |
2723 | } else { |
2724 | ||
2725 | LOCK; | |
5d5c5d0d A |
2726 | |
2727 | do | |
91447636 | 2728 | { |
5d5c5d0d A |
2729 | if (!logical) |
2730 | break; | |
2731 | if (!addressMap) | |
2732 | break; | |
2733 | ||
2734 | if ((!safeTask || (get_task_map(safeTask) != addressMap)) | |
2735 | && (0 == (options & kIOMapStatic))) | |
2736 | { | |
2737 | IOUnmapPages( addressMap, logical, length ); | |
2738 | if(!doRedirect && safeTask | |
2739 | && (((memory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) | |
2740 | || ((memory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64))) | |
2741 | { | |
2742 | err = vm_deallocate( addressMap, logical, length ); | |
2743 | err = memory->doMap( addressMap, &logical, | |
2744 | (options & ~kIOMapAnywhere) /*| kIOMapReserve*/, | |
2745 | offset, length ); | |
2746 | } else | |
2747 | err = kIOReturnSuccess; | |
e3027f41 | 2748 | #ifdef DEBUG |
5d5c5d0d | 2749 | IOLog("IOMemoryMap::redirect(%d, %p) %x:%lx from %p\n", doRedirect, this, logical, length, addressMap); |
e3027f41 | 2750 | #endif |
5d5c5d0d A |
2751 | } |
2752 | else if (kIOMapWriteCombineCache == (options & kIOMapCacheMask)) | |
2753 | { | |
2754 | IOOptionBits newMode; | |
2755 | newMode = (options & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache); | |
2756 | IOProtectCacheMode(addressMap, logical, length, newMode); | |
2757 | } | |
2758 | } | |
2759 | while (false); | |
2760 | ||
2761 | UNLOCK; | |
e3027f41 A |
2762 | } |
2763 | ||
5d5c5d0d A |
2764 | if ((((memory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) |
2765 | || ((memory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) | |
91447636 A |
2766 | && safeTask |
2767 | && (doRedirect != (0 != (memory->_flags & kIOMemoryRedirected)))) | |
2768 | memory->redirect(safeTask, doRedirect); | |
2769 | ||
e3027f41 A |
2770 | return( err ); |
2771 | } | |
2772 | ||
1c79356b A |
2773 | IOReturn _IOMemoryMap::unmap( void ) |
2774 | { | |
2775 | IOReturn err; | |
2776 | ||
2777 | LOCK; | |
2778 | ||
2779 | if( logical && addressMap && (0 == superMap) | |
2780 | && (0 == (options & kIOMapStatic))) { | |
2781 | ||
2782 | err = memory->doUnmap( addressMap, logical, length ); | |
2783 | vm_map_deallocate(addressMap); | |
2784 | addressMap = 0; | |
2785 | ||
2786 | } else | |
2787 | err = kIOReturnSuccess; | |
2788 | ||
2789 | logical = 0; | |
2790 | ||
2791 | UNLOCK; | |
2792 | ||
2793 | return( err ); | |
2794 | } | |
2795 | ||
2796 | void _IOMemoryMap::taskDied( void ) | |
2797 | { | |
2798 | LOCK; | |
2799 | if( addressMap) { | |
2800 | vm_map_deallocate(addressMap); | |
2801 | addressMap = 0; | |
2802 | } | |
2803 | addressTask = 0; | |
2804 | logical = 0; | |
2805 | UNLOCK; | |
2806 | } | |
2807 | ||
9bccf70c A |
2808 | // Overload the release mechanism. All mappings must be a member |
2809 | // of a memory descriptors _mappings set. This means that we | |
2810 | // always have 2 references on a mapping. When either of these mappings | |
2811 | // are released we need to free ourselves. | |
55e303ae | 2812 | void _IOMemoryMap::taggedRelease(const void *tag) const |
9bccf70c | 2813 | { |
55e303ae | 2814 | LOCK; |
9bccf70c | 2815 | super::taggedRelease(tag, 2); |
55e303ae | 2816 | UNLOCK; |
9bccf70c A |
2817 | } |
2818 | ||
1c79356b A |
2819 | void _IOMemoryMap::free() |
2820 | { | |
2821 | unmap(); | |
2822 | ||
2823 | if( memory) { | |
2824 | LOCK; | |
2825 | memory->removeMapping( this); | |
2826 | UNLOCK; | |
2827 | memory->release(); | |
2828 | } | |
2829 | ||
91447636 A |
2830 | if (owner && (owner != memory)) |
2831 | { | |
2832 | LOCK; | |
2833 | owner->removeMapping(this); | |
2834 | UNLOCK; | |
2835 | } | |
2836 | ||
1c79356b A |
2837 | if( superMap) |
2838 | superMap->release(); | |
2839 | ||
91447636 A |
2840 | if (redirUPL) { |
2841 | upl_commit(redirUPL, NULL, 0); | |
2842 | upl_deallocate(redirUPL); | |
2843 | } | |
2844 | ||
1c79356b A |
2845 | super::free(); |
2846 | } | |
2847 | ||
2848 | IOByteCount _IOMemoryMap::getLength() | |
2849 | { | |
2850 | return( length ); | |
2851 | } | |
2852 | ||
2853 | IOVirtualAddress _IOMemoryMap::getVirtualAddress() | |
2854 | { | |
2855 | return( logical); | |
2856 | } | |
2857 | ||
2858 | task_t _IOMemoryMap::getAddressTask() | |
2859 | { | |
2860 | if( superMap) | |
2861 | return( superMap->getAddressTask()); | |
2862 | else | |
2863 | return( addressTask); | |
2864 | } | |
2865 | ||
2866 | IOOptionBits _IOMemoryMap::getMapOptions() | |
2867 | { | |
2868 | return( options); | |
2869 | } | |
2870 | ||
2871 | IOMemoryDescriptor * _IOMemoryMap::getMemoryDescriptor() | |
2872 | { | |
2873 | return( memory ); | |
2874 | } | |
2875 | ||
9bccf70c | 2876 | _IOMemoryMap * _IOMemoryMap::copyCompatible( |
1c79356b A |
2877 | IOMemoryDescriptor * owner, |
2878 | task_t task, | |
2879 | IOVirtualAddress toAddress, | |
2880 | IOOptionBits _options, | |
2881 | IOByteCount _offset, | |
2882 | IOByteCount _length ) | |
2883 | { | |
2884 | _IOMemoryMap * mapping; | |
2885 | ||
55e303ae | 2886 | if( (!task) || (!addressMap) || (addressMap != get_task_map(task))) |
1c79356b | 2887 | return( 0 ); |
91447636 A |
2888 | if( options & kIOMapUnique) |
2889 | return( 0 ); | |
9bccf70c A |
2890 | if( (options ^ _options) & kIOMapReadOnly) |
2891 | return( 0 ); | |
2892 | if( (kIOMapDefaultCache != (_options & kIOMapCacheMask)) | |
2893 | && ((options ^ _options) & kIOMapCacheMask)) | |
1c79356b A |
2894 | return( 0 ); |
2895 | ||
2896 | if( (0 == (_options & kIOMapAnywhere)) && (logical != toAddress)) | |
2897 | return( 0 ); | |
2898 | ||
2899 | if( _offset < offset) | |
2900 | return( 0 ); | |
2901 | ||
2902 | _offset -= offset; | |
2903 | ||
2904 | if( (_offset + _length) > length) | |
2905 | return( 0 ); | |
2906 | ||
2907 | if( (length == _length) && (!_offset)) { | |
2908 | retain(); | |
2909 | mapping = this; | |
2910 | ||
2911 | } else { | |
2912 | mapping = new _IOMemoryMap; | |
2913 | if( mapping | |
9bccf70c | 2914 | && !mapping->initCompatible( owner, this, _offset, _length )) { |
1c79356b A |
2915 | mapping->release(); |
2916 | mapping = 0; | |
2917 | } | |
2918 | } | |
2919 | ||
2920 | return( mapping ); | |
2921 | } | |
2922 | ||
5d5c5d0d A |
2923 | IOPhysicalAddress |
2924 | _IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length) | |
1c79356b A |
2925 | { |
2926 | IOPhysicalAddress address; | |
2927 | ||
2928 | LOCK; | |
91447636 | 2929 | address = memory->getPhysicalSegment( offset + _offset, _length ); |
1c79356b A |
2930 | UNLOCK; |
2931 | ||
2932 | return( address ); | |
2933 | } | |
2934 | ||
2935 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
2936 | ||
2937 | #undef super | |
2938 | #define super OSObject | |
2939 | ||
2940 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
2941 | ||
2942 | void IOMemoryDescriptor::initialize( void ) | |
2943 | { | |
2944 | if( 0 == gIOMemoryLock) | |
2945 | gIOMemoryLock = IORecursiveLockAlloc(); | |
55e303ae A |
2946 | |
2947 | IORegistryEntry::getRegistryRoot()->setProperty(kIOMaximumMappedIOByteCountKey, | |
2948 | ptoa_64(gIOMaximumMappedIOPageCount), 64); | |
5d5c5d0d A |
2949 | if (!gIOCopyMapper) |
2950 | { | |
2951 | IOMapper * | |
2952 | mapper = new IOCopyMapper; | |
2953 | if (mapper) | |
2954 | { | |
2955 | if (mapper->init() && mapper->start(NULL)) | |
2956 | gIOCopyMapper = (IOCopyMapper *) mapper; | |
2957 | else | |
2958 | mapper->release(); | |
2959 | } | |
2960 | } | |
2961 | ||
2962 | gIOLastPage = IOGetLastPageNumber(); | |
1c79356b A |
2963 | } |
2964 | ||
2965 | void IOMemoryDescriptor::free( void ) | |
2966 | { | |
2967 | if( _mappings) | |
2968 | _mappings->release(); | |
2969 | ||
2970 | super::free(); | |
2971 | } | |
2972 | ||
2973 | IOMemoryMap * IOMemoryDescriptor::setMapping( | |
2974 | task_t intoTask, | |
2975 | IOVirtualAddress mapAddress, | |
55e303ae | 2976 | IOOptionBits options ) |
1c79356b | 2977 | { |
91447636 | 2978 | _IOMemoryMap * newMap; |
1c79356b | 2979 | |
91447636 | 2980 | newMap = new _IOMemoryMap; |
1c79356b A |
2981 | |
2982 | LOCK; | |
2983 | ||
91447636 A |
2984 | if( newMap |
2985 | && !newMap->initWithDescriptor( this, intoTask, mapAddress, | |
1c79356b | 2986 | options | kIOMapStatic, 0, getLength() )) { |
91447636 A |
2987 | newMap->release(); |
2988 | newMap = 0; | |
1c79356b A |
2989 | } |
2990 | ||
91447636 | 2991 | addMapping( newMap); |
1c79356b A |
2992 | |
2993 | UNLOCK; | |
2994 | ||
91447636 | 2995 | return( newMap); |
1c79356b A |
2996 | } |
2997 | ||
2998 | IOMemoryMap * IOMemoryDescriptor::map( | |
55e303ae | 2999 | IOOptionBits options ) |
1c79356b A |
3000 | { |
3001 | ||
3002 | return( makeMapping( this, kernel_task, 0, | |
3003 | options | kIOMapAnywhere, | |
3004 | 0, getLength() )); | |
3005 | } | |
3006 | ||
3007 | IOMemoryMap * IOMemoryDescriptor::map( | |
3008 | task_t intoTask, | |
3009 | IOVirtualAddress toAddress, | |
3010 | IOOptionBits options, | |
55e303ae A |
3011 | IOByteCount offset, |
3012 | IOByteCount length ) | |
1c79356b A |
3013 | { |
3014 | if( 0 == length) | |
3015 | length = getLength(); | |
3016 | ||
3017 | return( makeMapping( this, intoTask, toAddress, options, offset, length )); | |
3018 | } | |
3019 | ||
91447636 A |
3020 | IOReturn _IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory, |
3021 | IOOptionBits options, | |
3022 | IOByteCount offset) | |
3023 | { | |
3024 | IOReturn err = kIOReturnSuccess; | |
3025 | IOMemoryDescriptor * physMem = 0; | |
3026 | ||
3027 | LOCK; | |
3028 | ||
3029 | if (logical && addressMap) do | |
3030 | { | |
5d5c5d0d A |
3031 | if (((memory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) |
3032 | || ((memory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) | |
91447636 A |
3033 | { |
3034 | physMem = memory; | |
3035 | physMem->retain(); | |
3036 | } | |
3037 | ||
3038 | if (!redirUPL) | |
3039 | { | |
3040 | vm_size_t size = length; | |
3041 | int flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL | |
3042 | | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS; | |
3043 | if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) memory->_memEntry, 0, &size, &redirUPL, | |
3044 | NULL, NULL, | |
3045 | &flags)) | |
3046 | redirUPL = 0; | |
3047 | ||
3048 | if (physMem) | |
3049 | { | |
3050 | IOUnmapPages( addressMap, logical, length ); | |
3051 | physMem->redirect(0, true); | |
3052 | } | |
3053 | } | |
3054 | ||
3055 | if (newBackingMemory) | |
3056 | { | |
3057 | if (newBackingMemory != memory) | |
3058 | { | |
3059 | if (this != newBackingMemory->makeMapping(newBackingMemory, addressTask, (IOVirtualAddress) this, | |
3060 | options | kIOMapUnique | kIOMapReference, | |
3061 | offset, length)) | |
3062 | err = kIOReturnError; | |
3063 | } | |
3064 | if (redirUPL) | |
3065 | { | |
3066 | upl_commit(redirUPL, NULL, 0); | |
3067 | upl_deallocate(redirUPL); | |
3068 | redirUPL = 0; | |
3069 | } | |
3070 | if (physMem) | |
3071 | physMem->redirect(0, false); | |
3072 | } | |
3073 | } | |
3074 | while (false); | |
3075 | ||
3076 | UNLOCK; | |
3077 | ||
3078 | if (physMem) | |
3079 | physMem->release(); | |
3080 | ||
3081 | return (err); | |
3082 | } | |
3083 | ||
1c79356b A |
3084 | IOMemoryMap * IOMemoryDescriptor::makeMapping( |
3085 | IOMemoryDescriptor * owner, | |
3086 | task_t intoTask, | |
3087 | IOVirtualAddress toAddress, | |
3088 | IOOptionBits options, | |
3089 | IOByteCount offset, | |
3090 | IOByteCount length ) | |
3091 | { | |
91447636 | 3092 | IOMemoryDescriptor * mapDesc = 0; |
1c79356b A |
3093 | _IOMemoryMap * mapping = 0; |
3094 | OSIterator * iter; | |
3095 | ||
3096 | LOCK; | |
3097 | ||
91447636 A |
3098 | do |
3099 | { | |
3100 | if (kIOMapUnique & options) | |
3101 | { | |
3102 | IOPhysicalAddress phys; | |
3103 | IOByteCount physLen; | |
1c79356b | 3104 | |
91447636 A |
3105 | if (owner != this) |
3106 | continue; | |
1c79356b | 3107 | |
5d5c5d0d A |
3108 | if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) |
3109 | || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) | |
91447636 A |
3110 | { |
3111 | phys = getPhysicalSegment(offset, &physLen); | |
3112 | if (!phys || (physLen < length)) | |
3113 | continue; | |
3114 | ||
3115 | mapDesc = IOMemoryDescriptor::withPhysicalAddress( | |
3116 | phys, length, _direction); | |
3117 | if (!mapDesc) | |
3118 | continue; | |
3119 | offset = 0; | |
3120 | } | |
3121 | else | |
3122 | { | |
3123 | mapDesc = this; | |
3124 | mapDesc->retain(); | |
3125 | } | |
3126 | ||
3127 | if (kIOMapReference & options) | |
3128 | { | |
3129 | mapping = (_IOMemoryMap *) toAddress; | |
3130 | mapping->retain(); | |
3131 | ||
3132 | #if 1 | |
3133 | uint32_t pageOffset1 = mapDesc->getSourceSegment( offset, NULL ); | |
3134 | pageOffset1 -= trunc_page_32( pageOffset1 ); | |
3135 | ||
3136 | uint32_t pageOffset2 = mapping->getVirtualAddress(); | |
3137 | pageOffset2 -= trunc_page_32( pageOffset2 ); | |
3138 | ||
3139 | if (pageOffset1 != pageOffset2) | |
3140 | IOLog("::redirect can't map offset %x to addr %x\n", | |
3141 | pageOffset1, mapping->getVirtualAddress()); | |
3142 | #endif | |
3143 | ||
3144 | ||
3145 | if (!mapping->initWithDescriptor( mapDesc, intoTask, toAddress, options, | |
3146 | offset, length )) | |
3147 | { | |
3148 | #ifdef DEBUG | |
3149 | IOLog("Didn't redirect map %08lx : %08lx\n", offset, length ); | |
3150 | #endif | |
3151 | } | |
3152 | ||
3153 | if (mapping->owner) | |
3154 | mapping->owner->removeMapping(mapping); | |
3155 | continue; | |
3156 | } | |
3157 | } | |
3158 | else | |
3159 | { | |
3160 | // look for an existing mapping | |
3161 | if( (iter = OSCollectionIterator::withCollection( _mappings))) { | |
3162 | ||
3163 | while( (mapping = (_IOMemoryMap *) iter->getNextObject())) { | |
3164 | ||
3165 | if( (mapping = mapping->copyCompatible( | |
3166 | owner, intoTask, toAddress, | |
3167 | options | kIOMapReference, | |
3168 | offset, length ))) | |
3169 | break; | |
3170 | } | |
3171 | iter->release(); | |
3172 | } | |
1c79356b A |
3173 | |
3174 | ||
91447636 A |
3175 | if (mapping) |
3176 | mapping->retain(); | |
1c79356b | 3177 | |
91447636 A |
3178 | if( mapping || (options & kIOMapReference)) |
3179 | continue; | |
3180 | ||
3181 | mapDesc = owner; | |
3182 | mapDesc->retain(); | |
3183 | } | |
1c79356b A |
3184 | owner = this; |
3185 | ||
3186 | mapping = new _IOMemoryMap; | |
3187 | if( mapping | |
91447636 | 3188 | && !mapping->initWithDescriptor( mapDesc, intoTask, toAddress, options, |
1c79356b | 3189 | offset, length )) { |
9bccf70c | 3190 | #ifdef DEBUG |
1c79356b | 3191 | IOLog("Didn't make map %08lx : %08lx\n", offset, length ); |
9bccf70c | 3192 | #endif |
1c79356b A |
3193 | mapping->release(); |
3194 | mapping = 0; | |
3195 | } | |
3196 | ||
91447636 A |
3197 | if (mapping) |
3198 | mapping->retain(); | |
3199 | ||
1c79356b A |
3200 | } while( false ); |
3201 | ||
91447636 A |
3202 | if (mapping) |
3203 | { | |
3204 | mapping->owner = owner; | |
3205 | owner->addMapping( mapping); | |
3206 | mapping->release(); | |
3207 | } | |
1c79356b A |
3208 | |
3209 | UNLOCK; | |
3210 | ||
91447636 A |
3211 | if (mapDesc) |
3212 | mapDesc->release(); | |
3213 | ||
1c79356b A |
3214 | return( mapping); |
3215 | } | |
3216 | ||
3217 | void IOMemoryDescriptor::addMapping( | |
3218 | IOMemoryMap * mapping ) | |
3219 | { | |
3220 | if( mapping) { | |
3221 | if( 0 == _mappings) | |
3222 | _mappings = OSSet::withCapacity(1); | |
9bccf70c A |
3223 | if( _mappings ) |
3224 | _mappings->setObject( mapping ); | |
1c79356b A |
3225 | } |
3226 | } | |
3227 | ||
3228 | void IOMemoryDescriptor::removeMapping( | |
3229 | IOMemoryMap * mapping ) | |
3230 | { | |
9bccf70c | 3231 | if( _mappings) |
1c79356b | 3232 | _mappings->removeObject( mapping); |
1c79356b A |
3233 | } |
3234 | ||
3235 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
3236 | ||
3237 | #undef super | |
3238 | #define super IOMemoryDescriptor | |
3239 | ||
3240 | OSDefineMetaClassAndStructors(IOSubMemoryDescriptor, IOMemoryDescriptor) | |
3241 | ||
3242 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
3243 | ||
3244 | bool IOSubMemoryDescriptor::initSubRange( IOMemoryDescriptor * parent, | |
3245 | IOByteCount offset, IOByteCount length, | |
55e303ae | 3246 | IODirection direction ) |
1c79356b | 3247 | { |
1c79356b A |
3248 | if( !parent) |
3249 | return( false); | |
3250 | ||
3251 | if( (offset + length) > parent->getLength()) | |
3252 | return( false); | |
3253 | ||
55e303ae A |
3254 | /* |
3255 | * We can check the _parent instance variable before having ever set it | |
3256 | * to an initial value because I/O Kit guarantees that all our instance | |
3257 | * variables are zeroed on an object's allocation. | |
3258 | */ | |
3259 | ||
3260 | if( !_parent) { | |
3261 | if( !super::init()) | |
3262 | return( false ); | |
3263 | } else { | |
3264 | /* | |
3265 | * An existing memory descriptor is being retargeted to | |
3266 | * point to somewhere else. Clean up our present state. | |
3267 | */ | |
3268 | ||
3269 | _parent->release(); | |
3270 | _parent = 0; | |
3271 | } | |
3272 | ||
1c79356b A |
3273 | parent->retain(); |
3274 | _parent = parent; | |
3275 | _start = offset; | |
3276 | _length = length; | |
55e303ae | 3277 | _direction = direction; |
1c79356b A |
3278 | _tag = parent->getTag(); |
3279 | ||
3280 | return( true ); | |
3281 | } | |
3282 | ||
3283 | void IOSubMemoryDescriptor::free( void ) | |
3284 | { | |
3285 | if( _parent) | |
3286 | _parent->release(); | |
3287 | ||
3288 | super::free(); | |
3289 | } | |
3290 | ||
3291 | ||
5d5c5d0d A |
3292 | IOReturn |
3293 | IOSubMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const | |
3294 | { | |
3295 | IOReturn rtn; | |
3296 | ||
3297 | if (kIOMDGetCharacteristics == op) { | |
3298 | ||
3299 | rtn = _parent->dmaCommandOperation(op, vData, dataSize); | |
3300 | if (kIOReturnSuccess == rtn) { | |
3301 | IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData; | |
3302 | data->fLength = _length; | |
3303 | data->fSGCount = 0; // XXX gvdl: need to compute and pages | |
3304 | data->fPages = 0; | |
3305 | data->fPageAlign = 0; | |
3306 | } | |
3307 | ||
3308 | return rtn; | |
3309 | } | |
3310 | else if (kIOMDWalkSegments & op) { | |
3311 | if (dataSize < sizeof(IOMDDMAWalkSegmentArgs)) | |
3312 | return kIOReturnUnderrun; | |
3313 | ||
3314 | IOMDDMAWalkSegmentArgs *data = | |
3315 | reinterpret_cast<IOMDDMAWalkSegmentArgs *>(vData); | |
3316 | UInt offset = data->fOffset; | |
3317 | UInt remain = _length - offset; | |
3318 | if ((int) remain <= 0) | |
3319 | return (!remain)? kIOReturnOverrun : kIOReturnInternalError; | |
3320 | ||
3321 | data->fOffset = offset + _start; | |
3322 | rtn = _parent->dmaCommandOperation(op, vData, dataSize); | |
3323 | if (data->fLength > remain) | |
3324 | data->fLength = remain; | |
3325 | data->fOffset = offset; | |
3326 | ||
3327 | return rtn; | |
3328 | } | |
3329 | else | |
3330 | return kIOReturnBadArgument; | |
3331 | } | |
3332 | ||
3333 | addr64_t | |
3334 | IOSubMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount * length) | |
3335 | { | |
3336 | addr64_t address; | |
3337 | IOByteCount actualLength; | |
3338 | ||
3339 | assert(offset <= _length); | |
3340 | ||
3341 | if( length) | |
3342 | *length = 0; | |
3343 | ||
3344 | if( offset >= _length) | |
3345 | return( 0 ); | |
3346 | ||
3347 | address = _parent->getPhysicalSegment64( offset + _start, &actualLength ); | |
3348 | ||
3349 | if( address && length) | |
3350 | *length = min( _length - offset, actualLength ); | |
3351 | ||
3352 | return( address ); | |
3353 | } | |
3354 | ||
3355 | IOPhysicalAddress | |
3356 | IOSubMemoryDescriptor::getPhysicalSegment( IOByteCount offset, IOByteCount * length ) | |
1c79356b A |
3357 | { |
3358 | IOPhysicalAddress address; | |
3359 | IOByteCount actualLength; | |
3360 | ||
3361 | assert(offset <= _length); | |
3362 | ||
3363 | if( length) | |
3364 | *length = 0; | |
3365 | ||
3366 | if( offset >= _length) | |
3367 | return( 0 ); | |
3368 | ||
3369 | address = _parent->getPhysicalSegment( offset + _start, &actualLength ); | |
3370 | ||
3371 | if( address && length) | |
3372 | *length = min( _length - offset, actualLength ); | |
3373 | ||
3374 | return( address ); | |
3375 | } | |
3376 | ||
91447636 A |
3377 | |
3378 | IOReturn IOSubMemoryDescriptor::doMap( | |
3379 | vm_map_t addressMap, | |
3380 | IOVirtualAddress * atAddress, | |
3381 | IOOptionBits options, | |
3382 | IOByteCount sourceOffset, | |
3383 | IOByteCount length ) | |
3384 | { | |
3385 | if( sourceOffset >= _length) | |
3386 | return( kIOReturnOverrun ); | |
3387 | return (_parent->doMap(addressMap, atAddress, options, sourceOffset + _start, length)); | |
3388 | } | |
3389 | ||
5d5c5d0d A |
3390 | IOPhysicalAddress |
3391 | IOSubMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length ) | |
0b4e3aa0 A |
3392 | { |
3393 | IOPhysicalAddress address; | |
3394 | IOByteCount actualLength; | |
3395 | ||
3396 | assert(offset <= _length); | |
3397 | ||
3398 | if( length) | |
3399 | *length = 0; | |
3400 | ||
3401 | if( offset >= _length) | |
3402 | return( 0 ); | |
3403 | ||
3404 | address = _parent->getSourceSegment( offset + _start, &actualLength ); | |
3405 | ||
3406 | if( address && length) | |
3407 | *length = min( _length - offset, actualLength ); | |
3408 | ||
3409 | return( address ); | |
3410 | } | |
3411 | ||
1c79356b A |
3412 | void * IOSubMemoryDescriptor::getVirtualSegment(IOByteCount offset, |
3413 | IOByteCount * lengthOfSegment) | |
3414 | { | |
3415 | return( 0 ); | |
3416 | } | |
3417 | ||
3418 | IOByteCount IOSubMemoryDescriptor::readBytes(IOByteCount offset, | |
55e303ae | 3419 | void * bytes, IOByteCount length) |
1c79356b A |
3420 | { |
3421 | IOByteCount byteCount; | |
3422 | ||
3423 | assert(offset <= _length); | |
3424 | ||
3425 | if( offset >= _length) | |
3426 | return( 0 ); | |
3427 | ||
3428 | LOCK; | |
3429 | byteCount = _parent->readBytes( _start + offset, bytes, | |
55e303ae | 3430 | min(length, _length - offset) ); |
1c79356b A |
3431 | UNLOCK; |
3432 | ||
3433 | return( byteCount ); | |
3434 | } | |
3435 | ||
3436 | IOByteCount IOSubMemoryDescriptor::writeBytes(IOByteCount offset, | |
55e303ae | 3437 | const void* bytes, IOByteCount length) |
1c79356b A |
3438 | { |
3439 | IOByteCount byteCount; | |
3440 | ||
3441 | assert(offset <= _length); | |
3442 | ||
3443 | if( offset >= _length) | |
3444 | return( 0 ); | |
3445 | ||
3446 | LOCK; | |
3447 | byteCount = _parent->writeBytes( _start + offset, bytes, | |
55e303ae | 3448 | min(length, _length - offset) ); |
1c79356b A |
3449 | UNLOCK; |
3450 | ||
3451 | return( byteCount ); | |
3452 | } | |
3453 | ||
91447636 A |
3454 | IOReturn IOSubMemoryDescriptor::setPurgeable( IOOptionBits newState, |
3455 | IOOptionBits * oldState ) | |
3456 | { | |
3457 | IOReturn err; | |
3458 | ||
3459 | LOCK; | |
3460 | err = _parent->setPurgeable( newState, oldState ); | |
3461 | UNLOCK; | |
3462 | ||
3463 | return( err ); | |
3464 | } | |
3465 | ||
3466 | IOReturn IOSubMemoryDescriptor::performOperation( IOOptionBits options, | |
3467 | IOByteCount offset, IOByteCount length ) | |
3468 | { | |
3469 | IOReturn err; | |
3470 | ||
3471 | assert(offset <= _length); | |
3472 | ||
3473 | if( offset >= _length) | |
3474 | return( kIOReturnOverrun ); | |
3475 | ||
3476 | LOCK; | |
3477 | err = _parent->performOperation( options, _start + offset, | |
3478 | min(length, _length - offset) ); | |
3479 | UNLOCK; | |
3480 | ||
3481 | return( err ); | |
3482 | } | |
3483 | ||
1c79356b | 3484 | IOReturn IOSubMemoryDescriptor::prepare( |
55e303ae | 3485 | IODirection forDirection) |
1c79356b A |
3486 | { |
3487 | IOReturn err; | |
3488 | ||
3489 | LOCK; | |
3490 | err = _parent->prepare( forDirection); | |
3491 | UNLOCK; | |
3492 | ||
3493 | return( err ); | |
3494 | } | |
3495 | ||
3496 | IOReturn IOSubMemoryDescriptor::complete( | |
55e303ae | 3497 | IODirection forDirection) |
1c79356b A |
3498 | { |
3499 | IOReturn err; | |
3500 | ||
3501 | LOCK; | |
3502 | err = _parent->complete( forDirection); | |
3503 | UNLOCK; | |
3504 | ||
3505 | return( err ); | |
3506 | } | |
3507 | ||
3508 | IOMemoryMap * IOSubMemoryDescriptor::makeMapping( | |
3509 | IOMemoryDescriptor * owner, | |
3510 | task_t intoTask, | |
3511 | IOVirtualAddress toAddress, | |
3512 | IOOptionBits options, | |
3513 | IOByteCount offset, | |
3514 | IOByteCount length ) | |
3515 | { | |
91447636 | 3516 | IOMemoryMap * mapping = 0; |
1c79356b | 3517 | |
91447636 A |
3518 | if (!(kIOMapUnique & options)) |
3519 | mapping = (IOMemoryMap *) _parent->makeMapping( | |
1c79356b A |
3520 | _parent, intoTask, |
3521 | toAddress - (_start + offset), | |
3522 | options | kIOMapReference, | |
3523 | _start + offset, length ); | |
3524 | ||
0b4e3aa0 A |
3525 | if( !mapping) |
3526 | mapping = (IOMemoryMap *) _parent->makeMapping( | |
3527 | _parent, intoTask, | |
3528 | toAddress, | |
3529 | options, _start + offset, length ); | |
3530 | ||
1c79356b A |
3531 | if( !mapping) |
3532 | mapping = super::makeMapping( owner, intoTask, toAddress, options, | |
3533 | offset, length ); | |
3534 | ||
3535 | return( mapping ); | |
3536 | } | |
3537 | ||
3538 | /* ick */ | |
3539 | ||
3540 | bool | |
3541 | IOSubMemoryDescriptor::initWithAddress(void * address, | |
55e303ae A |
3542 | IOByteCount length, |
3543 | IODirection direction) | |
1c79356b A |
3544 | { |
3545 | return( false ); | |
3546 | } | |
3547 | ||
3548 | bool | |
3549 | IOSubMemoryDescriptor::initWithAddress(vm_address_t address, | |
55e303ae A |
3550 | IOByteCount length, |
3551 | IODirection direction, | |
3552 | task_t task) | |
1c79356b A |
3553 | { |
3554 | return( false ); | |
3555 | } | |
3556 | ||
3557 | bool | |
3558 | IOSubMemoryDescriptor::initWithPhysicalAddress( | |
3559 | IOPhysicalAddress address, | |
55e303ae A |
3560 | IOByteCount length, |
3561 | IODirection direction ) | |
1c79356b A |
3562 | { |
3563 | return( false ); | |
3564 | } | |
3565 | ||
3566 | bool | |
3567 | IOSubMemoryDescriptor::initWithRanges( | |
3568 | IOVirtualRange * ranges, | |
3569 | UInt32 withCount, | |
55e303ae A |
3570 | IODirection direction, |
3571 | task_t task, | |
3572 | bool asReference) | |
1c79356b A |
3573 | { |
3574 | return( false ); | |
3575 | } | |
3576 | ||
3577 | bool | |
3578 | IOSubMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges, | |
3579 | UInt32 withCount, | |
55e303ae A |
3580 | IODirection direction, |
3581 | bool asReference) | |
1c79356b A |
3582 | { |
3583 | return( false ); | |
3584 | } | |
3585 | ||
3586 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
3587 | ||
9bccf70c A |
3588 | bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const |
3589 | { | |
3590 | OSSymbol const *keys[2]; | |
3591 | OSObject *values[2]; | |
91447636 A |
3592 | struct SerData { |
3593 | user_addr_t address; | |
3594 | user_size_t length; | |
3595 | } *vcopy; | |
9bccf70c A |
3596 | unsigned int index, nRanges; |
3597 | bool result; | |
3598 | ||
91447636 A |
3599 | IOOptionBits type = _flags & kIOMemoryTypeMask; |
3600 | ||
9bccf70c A |
3601 | if (s == NULL) return false; |
3602 | if (s->previouslySerialized(this)) return true; | |
3603 | ||
3604 | // Pretend we are an array. | |
3605 | if (!s->addXMLStartTag(this, "array")) return false; | |
3606 | ||
3607 | nRanges = _rangesCount; | |
91447636 | 3608 | vcopy = (SerData *) IOMalloc(sizeof(SerData) * nRanges); |
9bccf70c A |
3609 | if (vcopy == 0) return false; |
3610 | ||
3611 | keys[0] = OSSymbol::withCString("address"); | |
3612 | keys[1] = OSSymbol::withCString("length"); | |
3613 | ||
3614 | result = false; | |
3615 | values[0] = values[1] = 0; | |
3616 | ||
3617 | // From this point on we can go to bail. | |
3618 | ||
3619 | // Copy the volatile data so we don't have to allocate memory | |
3620 | // while the lock is held. | |
3621 | LOCK; | |
3622 | if (nRanges == _rangesCount) { | |
91447636 | 3623 | Ranges vec = _ranges; |
9bccf70c | 3624 | for (index = 0; index < nRanges; index++) { |
91447636 A |
3625 | user_addr_t addr; IOByteCount len; |
3626 | getAddrLenForInd(addr, len, type, vec, index); | |
3627 | vcopy[index].address = addr; | |
3628 | vcopy[index].length = len; | |
9bccf70c A |
3629 | } |
3630 | } else { | |
3631 | // The descriptor changed out from under us. Give up. | |
3632 | UNLOCK; | |
3633 | result = false; | |
3634 | goto bail; | |
3635 | } | |
3636 | UNLOCK; | |
3637 | ||
3638 | for (index = 0; index < nRanges; index++) | |
3639 | { | |
91447636 A |
3640 | user_addr_t addr = vcopy[index].address; |
3641 | IOByteCount len = (IOByteCount) vcopy[index].length; | |
3642 | values[0] = | |
3643 | OSNumber::withNumber(addr, (((UInt64) addr) >> 32)? 64 : 32); | |
9bccf70c A |
3644 | if (values[0] == 0) { |
3645 | result = false; | |
3646 | goto bail; | |
3647 | } | |
91447636 | 3648 | values[1] = OSNumber::withNumber(len, sizeof(len) * 8); |
9bccf70c A |
3649 | if (values[1] == 0) { |
3650 | result = false; | |
3651 | goto bail; | |
3652 | } | |
3653 | OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2); | |
3654 | if (dict == 0) { | |
3655 | result = false; | |
3656 | goto bail; | |
3657 | } | |
3658 | values[0]->release(); | |
3659 | values[1]->release(); | |
3660 | values[0] = values[1] = 0; | |
3661 | ||
3662 | result = dict->serialize(s); | |
3663 | dict->release(); | |
3664 | if (!result) { | |
3665 | goto bail; | |
3666 | } | |
3667 | } | |
3668 | result = s->addXMLEndTag("array"); | |
3669 | ||
3670 | bail: | |
3671 | if (values[0]) | |
3672 | values[0]->release(); | |
3673 | if (values[1]) | |
3674 | values[1]->release(); | |
3675 | if (keys[0]) | |
3676 | keys[0]->release(); | |
3677 | if (keys[1]) | |
3678 | keys[1]->release(); | |
3679 | if (vcopy) | |
3680 | IOFree(vcopy, sizeof(IOVirtualRange) * nRanges); | |
3681 | return result; | |
3682 | } | |
3683 | ||
3684 | bool IOSubMemoryDescriptor::serialize(OSSerialize * s) const | |
3685 | { | |
3686 | if (!s) { | |
3687 | return (false); | |
3688 | } | |
3689 | if (s->previouslySerialized(this)) return true; | |
3690 | ||
3691 | // Pretend we are a dictionary. | |
3692 | // We must duplicate the functionality of OSDictionary here | |
3693 | // because otherwise object references will not work; | |
3694 | // they are based on the value of the object passed to | |
3695 | // previouslySerialized and addXMLStartTag. | |
3696 | ||
3697 | if (!s->addXMLStartTag(this, "dict")) return false; | |
3698 | ||
3699 | char const *keys[3] = {"offset", "length", "parent"}; | |
3700 | ||
3701 | OSObject *values[3]; | |
3702 | values[0] = OSNumber::withNumber(_start, sizeof(_start) * 8); | |
3703 | if (values[0] == 0) | |
3704 | return false; | |
3705 | values[1] = OSNumber::withNumber(_length, sizeof(_length) * 8); | |
3706 | if (values[1] == 0) { | |
3707 | values[0]->release(); | |
3708 | return false; | |
3709 | } | |
3710 | values[2] = _parent; | |
3711 | ||
3712 | bool result = true; | |
3713 | for (int i=0; i<3; i++) { | |
3714 | if (!s->addString("<key>") || | |
3715 | !s->addString(keys[i]) || | |
3716 | !s->addXMLEndTag("key") || | |
3717 | !values[i]->serialize(s)) { | |
3718 | result = false; | |
3719 | break; | |
3720 | } | |
3721 | } | |
3722 | values[0]->release(); | |
3723 | values[1]->release(); | |
3724 | if (!result) { | |
3725 | return false; | |
3726 | } | |
3727 | ||
3728 | return s->addXMLEndTag("dict"); | |
3729 | } | |
3730 | ||
3731 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
3732 | ||
0b4e3aa0 | 3733 | OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0); |
55e303ae A |
3734 | OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1); |
3735 | OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2); | |
91447636 A |
3736 | OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3); |
3737 | OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4); | |
5d5c5d0d | 3738 | OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5); |
1c79356b A |
3739 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6); |
3740 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7); | |
3741 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8); | |
3742 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9); | |
3743 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10); | |
3744 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11); | |
3745 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12); | |
3746 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13); | |
3747 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14); | |
3748 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15); | |
9bccf70c | 3749 | |
55e303ae | 3750 | /* ex-inline function implementation */ |
5d5c5d0d A |
3751 | IOPhysicalAddress |
3752 | IOMemoryDescriptor::getPhysicalAddress() | |
9bccf70c | 3753 | { return( getPhysicalSegment( 0, 0 )); } |
5d5c5d0d A |
3754 | |
3755 | ||
3756 |