]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
2d21ac55 | 2 | * Copyright (c) 1998-2007 Apple Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. | |
30 | * | |
31 | * HISTORY | |
32 | * | |
33 | */ | |
b0d623f7 A |
34 | |
35 | ||
55e303ae | 36 | #include <sys/cdefs.h> |
1c79356b A |
37 | |
38 | #include <IOKit/assert.h> | |
39 | #include <IOKit/system.h> | |
40 | #include <IOKit/IOLib.h> | |
41 | #include <IOKit/IOMemoryDescriptor.h> | |
55e303ae A |
42 | #include <IOKit/IOMapper.h> |
43 | #include <IOKit/IOKitKeysPrivate.h> | |
1c79356b | 44 | |
b0d623f7 A |
45 | #ifndef __LP64__ |
46 | #include <IOKit/IOSubMemoryDescriptor.h> | |
47 | #endif /* !__LP64__ */ | |
48 | ||
1c79356b | 49 | #include <IOKit/IOKitDebug.h> |
2d21ac55 | 50 | #include <libkern/OSDebug.h> |
1c79356b | 51 | |
91447636 A |
52 | #include "IOKitKernelInternal.h" |
53 | ||
1c79356b | 54 | #include <libkern/c++/OSContainers.h> |
9bccf70c A |
55 | #include <libkern/c++/OSDictionary.h> |
56 | #include <libkern/c++/OSArray.h> | |
57 | #include <libkern/c++/OSSymbol.h> | |
58 | #include <libkern/c++/OSNumber.h> | |
91447636 A |
59 | |
60 | #include <sys/uio.h> | |
1c79356b A |
61 | |
62 | __BEGIN_DECLS | |
63 | #include <vm/pmap.h> | |
91447636 | 64 | #include <vm/vm_pageout.h> |
55e303ae | 65 | #include <mach/memory_object_types.h> |
0b4e3aa0 | 66 | #include <device/device_port.h> |
55e303ae | 67 | |
91447636 | 68 | #include <mach/vm_prot.h> |
2d21ac55 | 69 | #include <mach/mach_vm.h> |
91447636 | 70 | #include <vm/vm_fault.h> |
2d21ac55 | 71 | #include <vm/vm_protos.h> |
91447636 | 72 | |
55e303ae | 73 | extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va); |
6d2010ae A |
74 | extern void ipc_port_release_send(ipc_port_t port); |
75 | ||
55e303ae A |
76 | kern_return_t |
77 | memory_object_iopl_request( | |
78 | ipc_port_t port, | |
79 | memory_object_offset_t offset, | |
80 | vm_size_t *upl_size, | |
81 | upl_t *upl_ptr, | |
82 | upl_page_info_array_t user_page_list, | |
83 | unsigned int *page_list_count, | |
84 | int *flags); | |
0b4e3aa0 | 85 | |
55e303ae | 86 | unsigned int IOTranslateCacheBits(struct phys_entry *pp); |
1c79356b | 87 | |
55e303ae | 88 | __END_DECLS |
1c79356b | 89 | |
55e303ae | 90 | #define kIOMaximumMappedIOByteCount (512*1024*1024) |
1c79356b | 91 | |
0c530ab8 A |
92 | static IOMapper * gIOSystemMapper = NULL; |
93 | ||
55e303ae | 94 | static ppnum_t gIOMaximumMappedIOPageCount = atop_32(kIOMaximumMappedIOByteCount); |
de355530 | 95 | |
0c530ab8 A |
96 | ppnum_t gIOLastPage; |
97 | ||
55e303ae | 98 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
de355530 | 99 | |
55e303ae | 100 | OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject ) |
de355530 | 101 | |
55e303ae | 102 | #define super IOMemoryDescriptor |
de355530 | 103 | |
55e303ae | 104 | OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor) |
de355530 | 105 | |
1c79356b A |
106 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
107 | ||
9bccf70c A |
108 | static IORecursiveLock * gIOMemoryLock; |
109 | ||
110 | #define LOCK IORecursiveLockLock( gIOMemoryLock) | |
111 | #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock) | |
112 | #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT) | |
113 | #define WAKEUP \ | |
114 | IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false) | |
115 | ||
0c530ab8 A |
116 | #if 0 |
117 | #define DEBG(fmt, args...) { kprintf(fmt, ## args); } | |
118 | #else | |
119 | #define DEBG(fmt, args...) {} | |
120 | #endif | |
121 | ||
b0d623f7 | 122 | #define IOMD_DEBUG_DMAACTIVE 1 |
91447636 A |
123 | |
124 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
125 | ||
126 | // Some data structures and accessor macros used by the initWithOptions | |
127 | // Function | |
128 | ||
129 | enum ioPLBlockFlags { | |
130 | kIOPLOnDevice = 0x00000001, | |
131 | kIOPLExternUPL = 0x00000002, | |
132 | }; | |
133 | ||
134 | struct typePersMDData | |
135 | { | |
136 | const IOGeneralMemoryDescriptor *fMD; | |
137 | ipc_port_t fMemEntry; | |
138 | }; | |
139 | ||
140 | struct ioPLBlock { | |
141 | upl_t fIOPL; | |
b0d623f7 A |
142 | vm_address_t fPageInfo; // Pointer to page list or index into it |
143 | uint32_t fIOMDOffset; // The offset of this iopl in descriptor | |
144 | ppnum_t fMappedBase; // Page number of first page in this iopl | |
145 | unsigned int fPageOffset; // Offset within first page of iopl | |
146 | unsigned int fFlags; // Flags | |
91447636 A |
147 | }; |
148 | ||
149 | struct ioGMDData { | |
150 | IOMapper *fMapper; | |
b0d623f7 | 151 | uint64_t fPreparationID; |
91447636 | 152 | unsigned int fPageCnt; |
b0d623f7 A |
153 | #if __LP64__ |
154 | // align arrays to 8 bytes so following macros work | |
155 | unsigned int fPad; | |
156 | #endif | |
6d2010ae A |
157 | upl_page_info_t fPageList[1]; /* variable length */ |
158 | ioPLBlock fBlocks[1]; /* variable length */ | |
91447636 A |
159 | }; |
160 | ||
161 | #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy()) | |
162 | #define getIOPLList(d) ((ioPLBlock *) &(d->fPageList[d->fPageCnt])) | |
163 | #define getNumIOPL(osd, d) \ | |
164 | (((osd)->getLength() - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock)) | |
165 | #define getPageList(d) (&(d->fPageList[0])) | |
166 | #define computeDataSize(p, u) \ | |
6d2010ae | 167 | (offsetof(ioGMDData, fPageList) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock)) |
91447636 A |
168 | |
169 | ||
170 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
171 | ||
b0d623f7 | 172 | #define next_page(a) ( trunc_page(a) + PAGE_SIZE ) |
0b4e3aa0 A |
173 | |
174 | ||
175 | extern "C" { | |
176 | ||
177 | kern_return_t device_data_action( | |
b0d623f7 | 178 | uintptr_t device_handle, |
0b4e3aa0 A |
179 | ipc_port_t device_pager, |
180 | vm_prot_t protection, | |
181 | vm_object_offset_t offset, | |
182 | vm_size_t size) | |
183 | { | |
9bccf70c A |
184 | struct ExpansionData { |
185 | void * devicePager; | |
186 | unsigned int pagerContig:1; | |
187 | unsigned int unused:31; | |
188 | IOMemoryDescriptor * memory; | |
189 | }; | |
190 | kern_return_t kr; | |
191 | ExpansionData * ref = (ExpansionData *) device_handle; | |
192 | IOMemoryDescriptor * memDesc; | |
0b4e3aa0 | 193 | |
9bccf70c A |
194 | LOCK; |
195 | memDesc = ref->memory; | |
196 | if( memDesc) | |
91447636 A |
197 | { |
198 | memDesc->retain(); | |
9bccf70c A |
199 | kr = memDesc->handleFault( device_pager, 0, 0, |
200 | offset, size, kIOMapDefaultCache /*?*/); | |
91447636 A |
201 | memDesc->release(); |
202 | } | |
9bccf70c A |
203 | else |
204 | kr = KERN_ABORTED; | |
205 | UNLOCK; | |
0b4e3aa0 | 206 | |
9bccf70c | 207 | return( kr ); |
0b4e3aa0 A |
208 | } |
209 | ||
210 | kern_return_t device_close( | |
b0d623f7 | 211 | uintptr_t device_handle) |
0b4e3aa0 | 212 | { |
9bccf70c A |
213 | struct ExpansionData { |
214 | void * devicePager; | |
215 | unsigned int pagerContig:1; | |
216 | unsigned int unused:31; | |
217 | IOMemoryDescriptor * memory; | |
218 | }; | |
219 | ExpansionData * ref = (ExpansionData *) device_handle; | |
0b4e3aa0 | 220 | |
9bccf70c | 221 | IODelete( ref, ExpansionData, 1 ); |
0b4e3aa0 A |
222 | |
223 | return( kIOReturnSuccess ); | |
224 | } | |
91447636 | 225 | }; // end extern "C" |
0b4e3aa0 | 226 | |
91447636 A |
227 | // Note this inline function uses C++ reference arguments to return values |
228 | // This means that pointers are not passed and NULLs don't have to be | |
229 | // checked for as a NULL reference is illegal. | |
230 | static inline void | |
2d21ac55 | 231 | getAddrLenForInd(user_addr_t &addr, IOPhysicalLength &len, // Output variables |
91447636 A |
232 | UInt32 type, IOGeneralMemoryDescriptor::Ranges r, UInt32 ind) |
233 | { | |
0c530ab8 A |
234 | assert(kIOMemoryTypeUIO == type |
235 | || kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type | |
236 | || kIOMemoryTypePhysical == type || kIOMemoryTypePhysical64 == type); | |
91447636 A |
237 | if (kIOMemoryTypeUIO == type) { |
238 | user_size_t us; | |
239 | uio_getiov((uio_t) r.uio, ind, &addr, &us); len = us; | |
240 | } | |
b0d623f7 | 241 | #ifndef __LP64__ |
0c530ab8 A |
242 | else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) { |
243 | IOAddressRange cur = r.v64[ind]; | |
244 | addr = cur.address; | |
245 | len = cur.length; | |
246 | } | |
b0d623f7 | 247 | #endif /* !__LP64__ */ |
91447636 A |
248 | else { |
249 | IOVirtualRange cur = r.v[ind]; | |
250 | addr = cur.address; | |
251 | len = cur.length; | |
252 | } | |
0b4e3aa0 A |
253 | } |
254 | ||
1c79356b A |
255 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
256 | ||
1c79356b A |
257 | IOMemoryDescriptor * |
258 | IOMemoryDescriptor::withAddress(void * address, | |
55e303ae A |
259 | IOByteCount length, |
260 | IODirection direction) | |
261 | { | |
262 | return IOMemoryDescriptor:: | |
b0d623f7 | 263 | withAddressRange((IOVirtualAddress) address, length, direction | kIOMemoryAutoPrepare, kernel_task); |
55e303ae A |
264 | } |
265 | ||
b0d623f7 | 266 | #ifndef __LP64__ |
55e303ae | 267 | IOMemoryDescriptor * |
b0d623f7 | 268 | IOMemoryDescriptor::withAddress(IOVirtualAddress address, |
55e303ae A |
269 | IOByteCount length, |
270 | IODirection direction, | |
271 | task_t task) | |
1c79356b A |
272 | { |
273 | IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; | |
274 | if (that) | |
275 | { | |
55e303ae | 276 | if (that->initWithAddress(address, length, direction, task)) |
1c79356b A |
277 | return that; |
278 | ||
279 | that->release(); | |
280 | } | |
281 | return 0; | |
282 | } | |
b0d623f7 | 283 | #endif /* !__LP64__ */ |
1c79356b A |
284 | |
285 | IOMemoryDescriptor * | |
55e303ae A |
286 | IOMemoryDescriptor::withPhysicalAddress( |
287 | IOPhysicalAddress address, | |
288 | IOByteCount length, | |
289 | IODirection direction ) | |
290 | { | |
b0d623f7 | 291 | return (IOMemoryDescriptor::withAddressRange(address, length, direction, TASK_NULL)); |
55e303ae A |
292 | } |
293 | ||
b0d623f7 | 294 | #ifndef __LP64__ |
55e303ae A |
295 | IOMemoryDescriptor * |
296 | IOMemoryDescriptor::withRanges( IOVirtualRange * ranges, | |
297 | UInt32 withCount, | |
298 | IODirection direction, | |
299 | task_t task, | |
300 | bool asReference) | |
1c79356b A |
301 | { |
302 | IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; | |
303 | if (that) | |
304 | { | |
55e303ae | 305 | if (that->initWithRanges(ranges, withCount, direction, task, asReference)) |
1c79356b A |
306 | return that; |
307 | ||
308 | that->release(); | |
309 | } | |
310 | return 0; | |
311 | } | |
b0d623f7 | 312 | #endif /* !__LP64__ */ |
1c79356b | 313 | |
0c530ab8 A |
314 | IOMemoryDescriptor * |
315 | IOMemoryDescriptor::withAddressRange(mach_vm_address_t address, | |
2d21ac55 A |
316 | mach_vm_size_t length, |
317 | IOOptionBits options, | |
318 | task_t task) | |
0c530ab8 A |
319 | { |
320 | IOAddressRange range = { address, length }; | |
321 | return (IOMemoryDescriptor::withAddressRanges(&range, 1, options, task)); | |
322 | } | |
323 | ||
324 | IOMemoryDescriptor * | |
325 | IOMemoryDescriptor::withAddressRanges(IOAddressRange * ranges, | |
2d21ac55 A |
326 | UInt32 rangeCount, |
327 | IOOptionBits options, | |
328 | task_t task) | |
0c530ab8 A |
329 | { |
330 | IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; | |
331 | if (that) | |
332 | { | |
333 | if (task) | |
334 | options |= kIOMemoryTypeVirtual64; | |
335 | else | |
336 | options |= kIOMemoryTypePhysical64; | |
337 | ||
2d21ac55 A |
338 | if (that->initWithOptions(ranges, rangeCount, 0, task, options, /* mapper */ 0)) |
339 | return that; | |
0c530ab8 | 340 | |
2d21ac55 | 341 | that->release(); |
0c530ab8 A |
342 | } |
343 | ||
344 | return 0; | |
345 | } | |
346 | ||
1c79356b A |
347 | |
348 | /* | |
b0d623f7 | 349 | * withOptions: |
1c79356b A |
350 | * |
351 | * Create a new IOMemoryDescriptor. The buffer is made up of several | |
352 | * virtual address ranges, from a given task. | |
353 | * | |
354 | * Passing the ranges as a reference will avoid an extra allocation. | |
355 | */ | |
356 | IOMemoryDescriptor * | |
55e303ae A |
357 | IOMemoryDescriptor::withOptions(void * buffers, |
358 | UInt32 count, | |
359 | UInt32 offset, | |
360 | task_t task, | |
361 | IOOptionBits opts, | |
362 | IOMapper * mapper) | |
1c79356b | 363 | { |
55e303ae | 364 | IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor; |
d7e50217 | 365 | |
55e303ae A |
366 | if (self |
367 | && !self->initWithOptions(buffers, count, offset, task, opts, mapper)) | |
368 | { | |
369 | self->release(); | |
370 | return 0; | |
de355530 | 371 | } |
55e303ae A |
372 | |
373 | return self; | |
374 | } | |
375 | ||
55e303ae A |
376 | bool IOMemoryDescriptor::initWithOptions(void * buffers, |
377 | UInt32 count, | |
378 | UInt32 offset, | |
379 | task_t task, | |
380 | IOOptionBits options, | |
381 | IOMapper * mapper) | |
382 | { | |
b0d623f7 | 383 | return( false ); |
1c79356b A |
384 | } |
385 | ||
b0d623f7 | 386 | #ifndef __LP64__ |
1c79356b A |
387 | IOMemoryDescriptor * |
388 | IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges, | |
389 | UInt32 withCount, | |
55e303ae A |
390 | IODirection direction, |
391 | bool asReference) | |
1c79356b A |
392 | { |
393 | IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; | |
394 | if (that) | |
395 | { | |
55e303ae | 396 | if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference)) |
1c79356b A |
397 | return that; |
398 | ||
399 | that->release(); | |
400 | } | |
401 | return 0; | |
402 | } | |
403 | ||
404 | IOMemoryDescriptor * | |
405 | IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of, | |
406 | IOByteCount offset, | |
407 | IOByteCount length, | |
55e303ae | 408 | IODirection direction) |
1c79356b | 409 | { |
b0d623f7 | 410 | return (IOSubMemoryDescriptor::withSubRange(of, offset, length, direction | kIOMemoryThreadSafe)); |
1c79356b | 411 | } |
b0d623f7 | 412 | #endif /* !__LP64__ */ |
1c79356b | 413 | |
0c530ab8 A |
414 | IOMemoryDescriptor * |
415 | IOMemoryDescriptor::withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD) | |
91447636 A |
416 | { |
417 | IOGeneralMemoryDescriptor *origGenMD = | |
418 | OSDynamicCast(IOGeneralMemoryDescriptor, originalMD); | |
419 | ||
420 | if (origGenMD) | |
421 | return IOGeneralMemoryDescriptor:: | |
422 | withPersistentMemoryDescriptor(origGenMD); | |
423 | else | |
424 | return 0; | |
425 | } | |
426 | ||
0c530ab8 A |
427 | IOMemoryDescriptor * |
428 | IOGeneralMemoryDescriptor::withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD) | |
91447636 A |
429 | { |
430 | ipc_port_t sharedMem = (ipc_port_t) originalMD->createNamedEntry(); | |
431 | ||
432 | if (!sharedMem) | |
433 | return 0; | |
434 | ||
435 | if (sharedMem == originalMD->_memEntry) { | |
436 | originalMD->retain(); // Add a new reference to ourselves | |
437 | ipc_port_release_send(sharedMem); // Remove extra send right | |
438 | return originalMD; | |
439 | } | |
440 | ||
441 | IOGeneralMemoryDescriptor * self = new IOGeneralMemoryDescriptor; | |
442 | typePersMDData initData = { originalMD, sharedMem }; | |
443 | ||
444 | if (self | |
445 | && !self->initWithOptions(&initData, 1, 0, 0, kIOMemoryTypePersistentMD, 0)) { | |
446 | self->release(); | |
447 | self = 0; | |
448 | } | |
449 | return self; | |
450 | } | |
451 | ||
452 | void *IOGeneralMemoryDescriptor::createNamedEntry() | |
453 | { | |
454 | kern_return_t error; | |
455 | ipc_port_t sharedMem; | |
456 | ||
457 | IOOptionBits type = _flags & kIOMemoryTypeMask; | |
458 | ||
459 | user_addr_t range0Addr; | |
460 | IOByteCount range0Len; | |
461 | getAddrLenForInd(range0Addr, range0Len, type, _ranges, 0); | |
462 | range0Addr = trunc_page_64(range0Addr); | |
463 | ||
464 | vm_size_t size = ptoa_32(_pages); | |
465 | vm_address_t kernelPage = (vm_address_t) range0Addr; | |
466 | ||
467 | vm_map_t theMap = ((_task == kernel_task) | |
468 | && (kIOMemoryBufferPageable & _flags)) | |
469 | ? IOPageableMapForAddress(kernelPage) | |
470 | : get_task_map(_task); | |
471 | ||
472 | memory_object_size_t actualSize = size; | |
2d21ac55 | 473 | vm_prot_t prot = VM_PROT_READ; |
2d21ac55 | 474 | if (kIODirectionOut != (kIODirectionOutIn & _flags)) |
2d21ac55 A |
475 | prot |= VM_PROT_WRITE; |
476 | ||
91447636 A |
477 | if (_memEntry) |
478 | prot |= MAP_MEM_NAMED_REUSE; | |
479 | ||
480 | error = mach_make_memory_entry_64(theMap, | |
481 | &actualSize, range0Addr, prot, &sharedMem, (ipc_port_t) _memEntry); | |
482 | ||
483 | if (KERN_SUCCESS == error) { | |
484 | if (actualSize == size) { | |
485 | return sharedMem; | |
486 | } else { | |
487 | #if IOASSERT | |
b0d623f7 A |
488 | IOLog("IOGMD::mach_make_memory_entry_64 (%08llx) size (%08llx:%08llx)\n", |
489 | (UInt64)range0Addr, (UInt64)actualSize, (UInt64)size); | |
91447636 A |
490 | #endif |
491 | ipc_port_release_send( sharedMem ); | |
492 | } | |
493 | } | |
494 | ||
495 | return MACH_PORT_NULL; | |
496 | } | |
497 | ||
b0d623f7 | 498 | #ifndef __LP64__ |
1c79356b A |
499 | bool |
500 | IOGeneralMemoryDescriptor::initWithAddress(void * address, | |
501 | IOByteCount withLength, | |
502 | IODirection withDirection) | |
503 | { | |
b0d623f7 | 504 | _singleRange.v.address = (vm_offset_t) address; |
1c79356b A |
505 | _singleRange.v.length = withLength; |
506 | ||
507 | return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true); | |
508 | } | |
509 | ||
510 | bool | |
b0d623f7 | 511 | IOGeneralMemoryDescriptor::initWithAddress(IOVirtualAddress address, |
1c79356b A |
512 | IOByteCount withLength, |
513 | IODirection withDirection, | |
514 | task_t withTask) | |
515 | { | |
516 | _singleRange.v.address = address; | |
517 | _singleRange.v.length = withLength; | |
518 | ||
519 | return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true); | |
520 | } | |
521 | ||
522 | bool | |
523 | IOGeneralMemoryDescriptor::initWithPhysicalAddress( | |
524 | IOPhysicalAddress address, | |
525 | IOByteCount withLength, | |
526 | IODirection withDirection ) | |
527 | { | |
528 | _singleRange.p.address = address; | |
529 | _singleRange.p.length = withLength; | |
530 | ||
531 | return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true); | |
532 | } | |
533 | ||
55e303ae A |
534 | bool |
535 | IOGeneralMemoryDescriptor::initWithPhysicalRanges( | |
536 | IOPhysicalRange * ranges, | |
537 | UInt32 count, | |
538 | IODirection direction, | |
539 | bool reference) | |
540 | { | |
541 | IOOptionBits mdOpts = direction | kIOMemoryTypePhysical; | |
542 | ||
543 | if (reference) | |
544 | mdOpts |= kIOMemoryAsReference; | |
545 | ||
546 | return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0); | |
547 | } | |
548 | ||
549 | bool | |
550 | IOGeneralMemoryDescriptor::initWithRanges( | |
551 | IOVirtualRange * ranges, | |
552 | UInt32 count, | |
553 | IODirection direction, | |
554 | task_t task, | |
555 | bool reference) | |
556 | { | |
557 | IOOptionBits mdOpts = direction; | |
558 | ||
559 | if (reference) | |
560 | mdOpts |= kIOMemoryAsReference; | |
561 | ||
562 | if (task) { | |
563 | mdOpts |= kIOMemoryTypeVirtual; | |
91447636 A |
564 | |
565 | // Auto-prepare if this is a kernel memory descriptor as very few | |
566 | // clients bother to prepare() kernel memory. | |
2d21ac55 | 567 | // But it was not enforced so what are you going to do? |
55e303ae A |
568 | if (task == kernel_task) |
569 | mdOpts |= kIOMemoryAutoPrepare; | |
570 | } | |
571 | else | |
572 | mdOpts |= kIOMemoryTypePhysical; | |
55e303ae A |
573 | |
574 | return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0); | |
575 | } | |
b0d623f7 | 576 | #endif /* !__LP64__ */ |
55e303ae | 577 | |
1c79356b | 578 | /* |
55e303ae | 579 | * initWithOptions: |
1c79356b | 580 | * |
55e303ae | 581 | * IOMemoryDescriptor. The buffer is made up of several virtual address ranges, |
91447636 A |
582 | * from a given task, several physical ranges, an UPL from the ubc |
583 | * system or a uio (may be 64bit) from the BSD subsystem. | |
1c79356b A |
584 | * |
585 | * Passing the ranges as a reference will avoid an extra allocation. | |
586 | * | |
55e303ae A |
587 | * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an |
588 | * existing instance -- note this behavior is not commonly supported in other | |
589 | * I/O Kit classes, although it is supported here. | |
1c79356b | 590 | */ |
55e303ae | 591 | |
1c79356b | 592 | bool |
55e303ae A |
593 | IOGeneralMemoryDescriptor::initWithOptions(void * buffers, |
594 | UInt32 count, | |
595 | UInt32 offset, | |
596 | task_t task, | |
597 | IOOptionBits options, | |
598 | IOMapper * mapper) | |
599 | { | |
91447636 A |
600 | IOOptionBits type = options & kIOMemoryTypeMask; |
601 | ||
6d2010ae A |
602 | #ifndef __LP64__ |
603 | if (task | |
604 | && (kIOMemoryTypeVirtual == type) | |
605 | && vm_map_is_64bit(get_task_map(task)) | |
606 | && ((IOVirtualRange *) buffers)->address) | |
607 | { | |
608 | OSReportWithBacktrace("IOMemoryDescriptor: attempt to create 32b virtual in 64b task, use ::withAddressRange()"); | |
609 | return false; | |
610 | } | |
611 | #endif /* !__LP64__ */ | |
612 | ||
91447636 A |
613 | // Grab the original MD's configuation data to initialse the |
614 | // arguments to this function. | |
615 | if (kIOMemoryTypePersistentMD == type) { | |
616 | ||
617 | typePersMDData *initData = (typePersMDData *) buffers; | |
618 | const IOGeneralMemoryDescriptor *orig = initData->fMD; | |
619 | ioGMDData *dataP = getDataP(orig->_memoryEntries); | |
620 | ||
621 | // Only accept persistent memory descriptors with valid dataP data. | |
622 | assert(orig->_rangesCount == 1); | |
623 | if ( !(orig->_flags & kIOMemoryPersistent) || !dataP) | |
624 | return false; | |
625 | ||
626 | _memEntry = initData->fMemEntry; // Grab the new named entry | |
6d2010ae A |
627 | options = orig->_flags & ~kIOMemoryAsReference; |
628 | type = options & kIOMemoryTypeMask; | |
629 | buffers = orig->_ranges.v; | |
630 | count = orig->_rangesCount; | |
55e303ae | 631 | |
91447636 A |
632 | // Now grab the original task and whatever mapper was previously used |
633 | task = orig->_task; | |
634 | mapper = dataP->fMapper; | |
635 | ||
636 | // We are ready to go through the original initialisation now | |
637 | } | |
638 | ||
639 | switch (type) { | |
640 | case kIOMemoryTypeUIO: | |
55e303ae | 641 | case kIOMemoryTypeVirtual: |
b0d623f7 | 642 | #ifndef __LP64__ |
0c530ab8 | 643 | case kIOMemoryTypeVirtual64: |
b0d623f7 | 644 | #endif /* !__LP64__ */ |
55e303ae A |
645 | assert(task); |
646 | if (!task) | |
647 | return false; | |
2d21ac55 | 648 | break; |
55e303ae A |
649 | |
650 | case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task | |
b0d623f7 | 651 | #ifndef __LP64__ |
0c530ab8 | 652 | case kIOMemoryTypePhysical64: |
b0d623f7 | 653 | #endif /* !__LP64__ */ |
55e303ae A |
654 | case kIOMemoryTypeUPL: |
655 | assert(!task); | |
656 | break; | |
657 | default: | |
55e303ae A |
658 | return false; /* bad argument */ |
659 | } | |
660 | ||
661 | assert(buffers); | |
662 | assert(count); | |
1c79356b A |
663 | |
664 | /* | |
665 | * We can check the _initialized instance variable before having ever set | |
666 | * it to an initial value because I/O Kit guarantees that all our instance | |
667 | * variables are zeroed on an object's allocation. | |
668 | */ | |
669 | ||
55e303ae | 670 | if (_initialized) { |
1c79356b A |
671 | /* |
672 | * An existing memory descriptor is being retargeted to point to | |
673 | * somewhere else. Clean up our present state. | |
674 | */ | |
2d21ac55 A |
675 | IOOptionBits type = _flags & kIOMemoryTypeMask; |
676 | if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type)) | |
677 | { | |
678 | while (_wireCount) | |
679 | complete(); | |
680 | } | |
b0d623f7 | 681 | if (_ranges.v && !(kIOMemoryAsReference & _flags)) |
0c530ab8 A |
682 | { |
683 | if (kIOMemoryTypeUIO == type) | |
684 | uio_free((uio_t) _ranges.v); | |
b0d623f7 | 685 | #ifndef __LP64__ |
0c530ab8 A |
686 | else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) |
687 | IODelete(_ranges.v64, IOAddressRange, _rangesCount); | |
b0d623f7 | 688 | #endif /* !__LP64__ */ |
0c530ab8 A |
689 | else |
690 | IODelete(_ranges.v, IOVirtualRange, _rangesCount); | |
691 | } | |
2d21ac55 | 692 | |
91447636 | 693 | if (_memEntry) |
6d2010ae A |
694 | { |
695 | ipc_port_release_send((ipc_port_t) _memEntry); | |
696 | _memEntry = 0; | |
697 | } | |
2d21ac55 A |
698 | if (_mappings) |
699 | _mappings->flushCollection(); | |
1c79356b | 700 | } |
55e303ae A |
701 | else { |
702 | if (!super::init()) | |
703 | return false; | |
704 | _initialized = true; | |
705 | } | |
d7e50217 | 706 | |
55e303ae | 707 | // Grab the appropriate mapper |
b0d623f7 | 708 | if (kIOMemoryMapperNone & options) |
55e303ae | 709 | mapper = 0; // No Mapper |
0c530ab8 | 710 | else if (mapper == kIOMapperSystem) { |
55e303ae A |
711 | IOMapper::checkForSystemMapper(); |
712 | gIOSystemMapper = mapper = IOMapper::gSystem; | |
713 | } | |
1c79356b | 714 | |
c910b4d9 A |
715 | // Temp binary compatibility for kIOMemoryThreadSafe |
716 | if (kIOMemoryReserved6156215 & options) | |
717 | { | |
718 | options &= ~kIOMemoryReserved6156215; | |
719 | options |= kIOMemoryThreadSafe; | |
720 | } | |
91447636 A |
721 | // Remove the dynamic internal use flags from the initial setting |
722 | options &= ~(kIOMemoryPreparedReadOnly); | |
55e303ae A |
723 | _flags = options; |
724 | _task = task; | |
725 | ||
b0d623f7 | 726 | #ifndef __LP64__ |
55e303ae | 727 | _direction = (IODirection) (_flags & kIOMemoryDirectionMask); |
b0d623f7 | 728 | #endif /* !__LP64__ */ |
0c530ab8 A |
729 | |
730 | __iomd_reservedA = 0; | |
731 | __iomd_reservedB = 0; | |
0c530ab8 | 732 | _highestPage = 0; |
1c79356b | 733 | |
2d21ac55 A |
734 | if (kIOMemoryThreadSafe & options) |
735 | { | |
736 | if (!_prepareLock) | |
737 | _prepareLock = IOLockAlloc(); | |
738 | } | |
739 | else if (_prepareLock) | |
740 | { | |
741 | IOLockFree(_prepareLock); | |
742 | _prepareLock = NULL; | |
743 | } | |
744 | ||
91447636 | 745 | if (kIOMemoryTypeUPL == type) { |
1c79356b | 746 | |
55e303ae A |
747 | ioGMDData *dataP; |
748 | unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1); | |
d7e50217 | 749 | |
55e303ae A |
750 | if (!_memoryEntries) { |
751 | _memoryEntries = OSData::withCapacity(dataSize); | |
752 | if (!_memoryEntries) | |
753 | return false; | |
754 | } | |
755 | else if (!_memoryEntries->initWithCapacity(dataSize)) | |
756 | return false; | |
757 | ||
6d2010ae | 758 | _memoryEntries->appendBytes(0, computeDataSize(0, 0)); |
55e303ae A |
759 | dataP = getDataP(_memoryEntries); |
760 | dataP->fMapper = mapper; | |
761 | dataP->fPageCnt = 0; | |
762 | ||
0c530ab8 | 763 | // _wireCount++; // UPLs start out life wired |
55e303ae A |
764 | |
765 | _length = count; | |
766 | _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset); | |
767 | ||
768 | ioPLBlock iopl; | |
55e303ae | 769 | iopl.fIOPL = (upl_t) buffers; |
6d2010ae | 770 | upl_set_referenced(iopl.fIOPL, true); |
b0d623f7 A |
771 | upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST(iopl.fIOPL); |
772 | ||
773 | if (upl_get_size(iopl.fIOPL) < (count + offset)) | |
774 | panic("short external upl"); | |
775 | ||
55e303ae A |
776 | // Set the flag kIOPLOnDevice convieniently equal to 1 |
777 | iopl.fFlags = pageList->device | kIOPLExternUPL; | |
778 | iopl.fIOMDOffset = 0; | |
0c530ab8 A |
779 | |
780 | _highestPage = upl_get_highest_page(iopl.fIOPL); | |
781 | ||
55e303ae | 782 | if (!pageList->device) { |
55e303ae A |
783 | // Pre-compute the offset into the UPL's page list |
784 | pageList = &pageList[atop_32(offset)]; | |
785 | offset &= PAGE_MASK; | |
786 | if (mapper) { | |
787 | iopl.fMappedBase = mapper->iovmAlloc(_pages); | |
788 | mapper->iovmInsert(iopl.fMappedBase, 0, pageList, _pages); | |
789 | } | |
790 | else | |
791 | iopl.fMappedBase = 0; | |
792 | } | |
793 | else | |
794 | iopl.fMappedBase = 0; | |
795 | iopl.fPageInfo = (vm_address_t) pageList; | |
796 | iopl.fPageOffset = offset; | |
797 | ||
798 | _memoryEntries->appendBytes(&iopl, sizeof(iopl)); | |
d7e50217 | 799 | } |
91447636 | 800 | else { |
0c530ab8 A |
801 | // kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO |
802 | // kIOMemoryTypePhysical | kIOMemoryTypePhysical64 | |
91447636 A |
803 | |
804 | // Initialize the memory descriptor | |
805 | if (options & kIOMemoryAsReference) { | |
b0d623f7 | 806 | #ifndef __LP64__ |
91447636 | 807 | _rangesIsAllocated = false; |
b0d623f7 | 808 | #endif /* !__LP64__ */ |
91447636 A |
809 | |
810 | // Hack assignment to get the buffer arg into _ranges. | |
811 | // I'd prefer to do _ranges = (Ranges) buffers, but that doesn't | |
812 | // work, C++ sigh. | |
813 | // This also initialises the uio & physical ranges. | |
814 | _ranges.v = (IOVirtualRange *) buffers; | |
815 | } | |
816 | else { | |
b0d623f7 | 817 | #ifndef __LP64__ |
6601e61a | 818 | _rangesIsAllocated = true; |
b0d623f7 A |
819 | #endif /* !__LP64__ */ |
820 | switch (type) | |
0c530ab8 A |
821 | { |
822 | case kIOMemoryTypeUIO: | |
823 | _ranges.v = (IOVirtualRange *) uio_duplicate((uio_t) buffers); | |
824 | break; | |
825 | ||
b0d623f7 | 826 | #ifndef __LP64__ |
0c530ab8 A |
827 | case kIOMemoryTypeVirtual64: |
828 | case kIOMemoryTypePhysical64: | |
b0d623f7 | 829 | if (count == 1 |
6d2010ae A |
830 | && (((IOAddressRange *) buffers)->address + ((IOAddressRange *) buffers)->length) <= 0x100000000ULL |
831 | ) { | |
b0d623f7 A |
832 | if (kIOMemoryTypeVirtual64 == type) |
833 | type = kIOMemoryTypeVirtual; | |
834 | else | |
835 | type = kIOMemoryTypePhysical; | |
836 | _flags = (_flags & ~kIOMemoryTypeMask) | type | kIOMemoryAsReference; | |
837 | _rangesIsAllocated = false; | |
838 | _ranges.v = &_singleRange.v; | |
839 | _singleRange.v.address = ((IOAddressRange *) buffers)->address; | |
840 | _singleRange.v.length = ((IOAddressRange *) buffers)->length; | |
841 | break; | |
842 | } | |
0c530ab8 A |
843 | _ranges.v64 = IONew(IOAddressRange, count); |
844 | if (!_ranges.v64) | |
845 | return false; | |
846 | bcopy(buffers, _ranges.v, count * sizeof(IOAddressRange)); | |
847 | break; | |
b0d623f7 | 848 | #endif /* !__LP64__ */ |
0c530ab8 | 849 | case kIOMemoryTypeVirtual: |
2d21ac55 | 850 | case kIOMemoryTypePhysical: |
b0d623f7 A |
851 | if (count == 1) { |
852 | _flags |= kIOMemoryAsReference; | |
853 | #ifndef __LP64__ | |
854 | _rangesIsAllocated = false; | |
855 | #endif /* !__LP64__ */ | |
856 | _ranges.v = &_singleRange.v; | |
857 | } else { | |
858 | _ranges.v = IONew(IOVirtualRange, count); | |
859 | if (!_ranges.v) | |
860 | return false; | |
861 | } | |
0c530ab8 A |
862 | bcopy(buffers, _ranges.v, count * sizeof(IOVirtualRange)); |
863 | break; | |
864 | } | |
91447636 A |
865 | } |
866 | ||
867 | // Find starting address within the vector of ranges | |
868 | Ranges vec = _ranges; | |
869 | UInt32 length = 0; | |
870 | UInt32 pages = 0; | |
871 | for (unsigned ind = 0; ind < count; ind++) { | |
872 | user_addr_t addr; | |
b0d623f7 | 873 | IOPhysicalLength len; |
91447636 A |
874 | |
875 | // addr & len are returned by this function | |
876 | getAddrLenForInd(addr, len, type, vec, ind); | |
877 | pages += (atop_64(addr + len + PAGE_MASK) - atop_64(addr)); | |
878 | len += length; | |
0c530ab8 | 879 | assert(len >= length); // Check for 32 bit wrap around |
91447636 | 880 | length = len; |
0c530ab8 A |
881 | |
882 | if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) | |
883 | { | |
884 | ppnum_t highPage = atop_64(addr + len - 1); | |
885 | if (highPage > _highestPage) | |
886 | _highestPage = highPage; | |
887 | } | |
91447636 A |
888 | } |
889 | _length = length; | |
890 | _pages = pages; | |
891 | _rangesCount = count; | |
55e303ae A |
892 | |
893 | // Auto-prepare memory at creation time. | |
894 | // Implied completion when descriptor is free-ed | |
0c530ab8 | 895 | if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) |
91447636 | 896 | _wireCount++; // Physical MDs are, by definition, wired |
0c530ab8 | 897 | else { /* kIOMemoryTypeVirtual | kIOMemoryTypeVirtual64 | kIOMemoryTypeUIO */ |
55e303ae | 898 | ioGMDData *dataP; |
91447636 | 899 | unsigned dataSize = computeDataSize(_pages, /* upls */ count * 2); |
55e303ae A |
900 | |
901 | if (!_memoryEntries) { | |
902 | _memoryEntries = OSData::withCapacity(dataSize); | |
903 | if (!_memoryEntries) | |
91447636 | 904 | return false; |
55e303ae A |
905 | } |
906 | else if (!_memoryEntries->initWithCapacity(dataSize)) | |
907 | return false; | |
908 | ||
6d2010ae | 909 | _memoryEntries->appendBytes(0, computeDataSize(0, 0)); |
55e303ae A |
910 | dataP = getDataP(_memoryEntries); |
911 | dataP->fMapper = mapper; | |
912 | dataP->fPageCnt = _pages; | |
913 | ||
91447636 A |
914 | if ( (kIOMemoryPersistent & _flags) && !_memEntry) |
915 | _memEntry = createNamedEntry(); | |
55e303ae A |
916 | |
917 | if ((_flags & kIOMemoryAutoPrepare) | |
918 | && prepare() != kIOReturnSuccess) | |
919 | return false; | |
920 | } | |
921 | } | |
922 | ||
923 | return true; | |
de355530 A |
924 | } |
925 | ||
1c79356b A |
926 | /* |
927 | * free | |
928 | * | |
929 | * Free resources. | |
930 | */ | |
931 | void IOGeneralMemoryDescriptor::free() | |
932 | { | |
2d21ac55 A |
933 | IOOptionBits type = _flags & kIOMemoryTypeMask; |
934 | ||
9bccf70c | 935 | if( reserved) |
2d21ac55 A |
936 | { |
937 | LOCK; | |
9bccf70c | 938 | reserved->memory = 0; |
2d21ac55 A |
939 | UNLOCK; |
940 | } | |
9bccf70c | 941 | |
2d21ac55 A |
942 | if ((kIOMemoryTypePhysical != type) && (kIOMemoryTypePhysical64 != type)) |
943 | { | |
944 | while (_wireCount) | |
945 | complete(); | |
946 | } | |
55e303ae A |
947 | if (_memoryEntries) |
948 | _memoryEntries->release(); | |
949 | ||
b0d623f7 | 950 | if (_ranges.v && !(kIOMemoryAsReference & _flags)) |
0c530ab8 | 951 | { |
0c530ab8 A |
952 | if (kIOMemoryTypeUIO == type) |
953 | uio_free((uio_t) _ranges.v); | |
b0d623f7 | 954 | #ifndef __LP64__ |
0c530ab8 A |
955 | else if ((kIOMemoryTypeVirtual64 == type) || (kIOMemoryTypePhysical64 == type)) |
956 | IODelete(_ranges.v64, IOAddressRange, _rangesCount); | |
b0d623f7 | 957 | #endif /* !__LP64__ */ |
0c530ab8 A |
958 | else |
959 | IODelete(_ranges.v, IOVirtualRange, _rangesCount); | |
4a3eedf9 A |
960 | |
961 | _ranges.v = NULL; | |
0c530ab8 | 962 | } |
9bccf70c | 963 | |
55e303ae A |
964 | if (reserved && reserved->devicePager) |
965 | device_pager_deallocate( (memory_object_t) reserved->devicePager ); | |
9bccf70c | 966 | |
55e303ae A |
967 | // memEntry holds a ref on the device pager which owns reserved |
968 | // (ExpansionData) so no reserved access after this point | |
969 | if (_memEntry) | |
1c79356b | 970 | ipc_port_release_send( (ipc_port_t) _memEntry ); |
55e303ae | 971 | |
2d21ac55 A |
972 | if (_prepareLock) |
973 | IOLockFree(_prepareLock); | |
974 | ||
1c79356b A |
975 | super::free(); |
976 | } | |
977 | ||
b0d623f7 A |
978 | #ifndef __LP64__ |
979 | void IOGeneralMemoryDescriptor::unmapFromKernel() | |
980 | { | |
981 | panic("IOGMD::unmapFromKernel deprecated"); | |
982 | } | |
983 | ||
984 | void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex) | |
985 | { | |
986 | panic("IOGMD::mapIntoKernel deprecated"); | |
987 | } | |
988 | #endif /* !__LP64__ */ | |
1c79356b A |
989 | |
990 | /* | |
991 | * getDirection: | |
992 | * | |
993 | * Get the direction of the transfer. | |
994 | */ | |
995 | IODirection IOMemoryDescriptor::getDirection() const | |
996 | { | |
b0d623f7 A |
997 | #ifndef __LP64__ |
998 | if (_direction) | |
999 | return _direction; | |
1000 | #endif /* !__LP64__ */ | |
1001 | return (IODirection) (_flags & kIOMemoryDirectionMask); | |
1c79356b A |
1002 | } |
1003 | ||
1004 | /* | |
1005 | * getLength: | |
1006 | * | |
1007 | * Get the length of the transfer (over all ranges). | |
1008 | */ | |
1009 | IOByteCount IOMemoryDescriptor::getLength() const | |
1010 | { | |
1011 | return _length; | |
1012 | } | |
1013 | ||
55e303ae | 1014 | void IOMemoryDescriptor::setTag( IOOptionBits tag ) |
1c79356b A |
1015 | { |
1016 | _tag = tag; | |
1017 | } | |
1018 | ||
1019 | IOOptionBits IOMemoryDescriptor::getTag( void ) | |
1020 | { | |
1021 | return( _tag); | |
1022 | } | |
1023 | ||
b0d623f7 | 1024 | #ifndef __LP64__ |
55e303ae | 1025 | // @@@ gvdl: who is using this API? Seems like a wierd thing to implement. |
0c530ab8 A |
1026 | IOPhysicalAddress |
1027 | IOMemoryDescriptor::getSourceSegment( IOByteCount offset, IOByteCount * length ) | |
0b4e3aa0 | 1028 | { |
0c530ab8 | 1029 | addr64_t physAddr = 0; |
1c79356b | 1030 | |
9bccf70c | 1031 | if( prepare() == kIOReturnSuccess) { |
0c530ab8 | 1032 | physAddr = getPhysicalSegment64( offset, length ); |
9bccf70c A |
1033 | complete(); |
1034 | } | |
0b4e3aa0 | 1035 | |
0c530ab8 | 1036 | return( (IOPhysicalAddress) physAddr ); // truncated but only page offset is used |
0b4e3aa0 | 1037 | } |
b0d623f7 | 1038 | #endif /* !__LP64__ */ |
0b4e3aa0 | 1039 | |
55e303ae A |
1040 | IOByteCount IOMemoryDescriptor::readBytes |
1041 | (IOByteCount offset, void *bytes, IOByteCount length) | |
1c79356b | 1042 | { |
b0d623f7 | 1043 | addr64_t dstAddr = CAST_DOWN(addr64_t, bytes); |
55e303ae | 1044 | IOByteCount remaining; |
1c79356b | 1045 | |
55e303ae A |
1046 | // Assert that this entire I/O is withing the available range |
1047 | assert(offset < _length); | |
1048 | assert(offset + length <= _length); | |
1049 | if (offset >= _length) { | |
55e303ae A |
1050 | return 0; |
1051 | } | |
1c79356b | 1052 | |
b0d623f7 A |
1053 | if (kIOMemoryThreadSafe & _flags) |
1054 | LOCK; | |
1055 | ||
55e303ae A |
1056 | remaining = length = min(length, _length - offset); |
1057 | while (remaining) { // (process another target segment?) | |
1058 | addr64_t srcAddr64; | |
1059 | IOByteCount srcLen; | |
1c79356b | 1060 | |
b0d623f7 | 1061 | srcAddr64 = getPhysicalSegment(offset, &srcLen, kIOMemoryMapperNone); |
55e303ae A |
1062 | if (!srcAddr64) |
1063 | break; | |
1c79356b | 1064 | |
55e303ae A |
1065 | // Clip segment length to remaining |
1066 | if (srcLen > remaining) | |
1067 | srcLen = remaining; | |
1c79356b | 1068 | |
55e303ae A |
1069 | copypv(srcAddr64, dstAddr, srcLen, |
1070 | cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap); | |
1c79356b | 1071 | |
55e303ae A |
1072 | dstAddr += srcLen; |
1073 | offset += srcLen; | |
1074 | remaining -= srcLen; | |
1075 | } | |
1c79356b | 1076 | |
b0d623f7 A |
1077 | if (kIOMemoryThreadSafe & _flags) |
1078 | UNLOCK; | |
1079 | ||
55e303ae | 1080 | assert(!remaining); |
1c79356b | 1081 | |
55e303ae A |
1082 | return length - remaining; |
1083 | } | |
0b4e3aa0 | 1084 | |
55e303ae A |
1085 | IOByteCount IOMemoryDescriptor::writeBytes |
1086 | (IOByteCount offset, const void *bytes, IOByteCount length) | |
1087 | { | |
b0d623f7 | 1088 | addr64_t srcAddr = CAST_DOWN(addr64_t, bytes); |
55e303ae | 1089 | IOByteCount remaining; |
0b4e3aa0 | 1090 | |
55e303ae A |
1091 | // Assert that this entire I/O is withing the available range |
1092 | assert(offset < _length); | |
1093 | assert(offset + length <= _length); | |
0b4e3aa0 | 1094 | |
55e303ae | 1095 | assert( !(kIOMemoryPreparedReadOnly & _flags) ); |
0b4e3aa0 | 1096 | |
55e303ae | 1097 | if ( (kIOMemoryPreparedReadOnly & _flags) || offset >= _length) { |
55e303ae A |
1098 | return 0; |
1099 | } | |
0b4e3aa0 | 1100 | |
b0d623f7 A |
1101 | if (kIOMemoryThreadSafe & _flags) |
1102 | LOCK; | |
1103 | ||
55e303ae A |
1104 | remaining = length = min(length, _length - offset); |
1105 | while (remaining) { // (process another target segment?) | |
1106 | addr64_t dstAddr64; | |
1107 | IOByteCount dstLen; | |
0b4e3aa0 | 1108 | |
b0d623f7 | 1109 | dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone); |
55e303ae A |
1110 | if (!dstAddr64) |
1111 | break; | |
0b4e3aa0 | 1112 | |
55e303ae A |
1113 | // Clip segment length to remaining |
1114 | if (dstLen > remaining) | |
1115 | dstLen = remaining; | |
0b4e3aa0 | 1116 | |
55e303ae A |
1117 | copypv(srcAddr, (addr64_t) dstAddr64, dstLen, |
1118 | cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap); | |
0b4e3aa0 | 1119 | |
55e303ae A |
1120 | srcAddr += dstLen; |
1121 | offset += dstLen; | |
1122 | remaining -= dstLen; | |
1c79356b | 1123 | } |
1c79356b | 1124 | |
b0d623f7 A |
1125 | if (kIOMemoryThreadSafe & _flags) |
1126 | UNLOCK; | |
1127 | ||
55e303ae A |
1128 | assert(!remaining); |
1129 | ||
1130 | return length - remaining; | |
1c79356b A |
1131 | } |
1132 | ||
55e303ae A |
1133 | // osfmk/device/iokit_rpc.c |
1134 | extern "C" unsigned int IODefaultCacheBits(addr64_t pa); | |
1c79356b | 1135 | |
b0d623f7 A |
1136 | #ifndef __LP64__ |
1137 | void IOGeneralMemoryDescriptor::setPosition(IOByteCount position) | |
1138 | { | |
1139 | panic("IOGMD::setPosition deprecated"); | |
1140 | } | |
1141 | #endif /* !__LP64__ */ | |
1142 | ||
1143 | static volatile SInt64 gIOMDPreparationID __attribute__((aligned(8))) = (1ULL << 32); | |
1144 | ||
1145 | uint64_t | |
1146 | IOGeneralMemoryDescriptor::getPreparationID( void ) | |
1147 | { | |
1148 | ioGMDData *dataP; | |
7e4a7d39 A |
1149 | |
1150 | if (!_wireCount) | |
b0d623f7 | 1151 | return (kIOPreparationIDUnprepared); |
7e4a7d39 A |
1152 | |
1153 | if (_flags & (kIOMemoryTypePhysical | kIOMemoryTypePhysical64)) | |
1154 | return (kIOPreparationIDAlwaysPrepared); | |
1155 | ||
1156 | if (!_memoryEntries || !(dataP = getDataP(_memoryEntries))) | |
1157 | return (kIOPreparationIDUnprepared); | |
1158 | ||
b0d623f7 A |
1159 | if (kIOPreparationIDUnprepared == dataP->fPreparationID) |
1160 | { | |
b0d623f7 | 1161 | dataP->fPreparationID = OSIncrementAtomic64(&gIOMDPreparationID); |
b0d623f7 A |
1162 | } |
1163 | return (dataP->fPreparationID); | |
1164 | } | |
1165 | ||
1166 | uint64_t | |
1167 | IOMemoryDescriptor::getPreparationID( void ) | |
1168 | { | |
1169 | return (kIOPreparationIDUnsupported); | |
1170 | } | |
de355530 | 1171 | |
0c530ab8 | 1172 | IOReturn IOGeneralMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const |
55e303ae | 1173 | { |
0c530ab8 | 1174 | if (kIOMDGetCharacteristics == op) { |
4452a7af | 1175 | |
0c530ab8 A |
1176 | if (dataSize < sizeof(IOMDDMACharacteristics)) |
1177 | return kIOReturnUnderrun; | |
4452a7af | 1178 | |
0c530ab8 A |
1179 | IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData; |
1180 | data->fLength = _length; | |
1181 | data->fSGCount = _rangesCount; | |
1182 | data->fPages = _pages; | |
b0d623f7 | 1183 | data->fDirection = getDirection(); |
0c530ab8 A |
1184 | if (!_wireCount) |
1185 | data->fIsPrepared = false; | |
1186 | else { | |
1187 | data->fIsPrepared = true; | |
1188 | data->fHighestPage = _highestPage; | |
1189 | if (_memoryEntries) { | |
1190 | ioGMDData *gmdData = getDataP(_memoryEntries); | |
1191 | ioPLBlock *ioplList = getIOPLList(gmdData); | |
1192 | UInt count = getNumIOPL(_memoryEntries, gmdData); | |
1193 | ||
1194 | data->fIsMapped = (gmdData->fMapper && _pages && (count > 0) | |
1195 | && ioplList[0].fMappedBase); | |
1196 | if (count == 1) | |
1197 | data->fPageAlign = (ioplList[0].fPageOffset & PAGE_MASK) | ~PAGE_MASK; | |
1198 | } | |
1199 | else | |
1200 | data->fIsMapped = false; | |
1201 | } | |
4452a7af | 1202 | |
0c530ab8 | 1203 | return kIOReturnSuccess; |
b0d623f7 A |
1204 | |
1205 | #if IOMD_DEBUG_DMAACTIVE | |
1206 | } else if (kIOMDSetDMAActive == op) { | |
1207 | IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this); | |
7e4a7d39 | 1208 | OSIncrementAtomic(&md->__iomd_reservedA); |
b0d623f7 A |
1209 | } else if (kIOMDSetDMAInactive == op) { |
1210 | IOGeneralMemoryDescriptor * md = const_cast<IOGeneralMemoryDescriptor *>(this); | |
1211 | if (md->__iomd_reservedA) | |
7e4a7d39 | 1212 | OSDecrementAtomic(&md->__iomd_reservedA); |
b0d623f7 A |
1213 | else |
1214 | panic("kIOMDSetDMAInactive"); | |
1215 | #endif /* IOMD_DEBUG_DMAACTIVE */ | |
1216 | ||
1217 | } else if (!(kIOMDWalkSegments & op)) | |
0c530ab8 A |
1218 | return kIOReturnBadArgument; |
1219 | ||
1220 | // Get the next segment | |
1221 | struct InternalState { | |
1222 | IOMDDMAWalkSegmentArgs fIO; | |
1223 | UInt fOffset2Index; | |
1224 | UInt fIndex; | |
1225 | UInt fNextOffset; | |
1226 | } *isP; | |
1227 | ||
1228 | // Find the next segment | |
1229 | if (dataSize < sizeof(*isP)) | |
1230 | return kIOReturnUnderrun; | |
1231 | ||
1232 | isP = (InternalState *) vData; | |
1233 | UInt offset = isP->fIO.fOffset; | |
1234 | bool mapped = isP->fIO.fMapped; | |
1235 | ||
1236 | if (offset >= _length) | |
1237 | return (offset == _length)? kIOReturnOverrun : kIOReturnInternalError; | |
1238 | ||
1239 | // Validate the previous offset | |
1240 | UInt ind, off2Ind = isP->fOffset2Index; | |
1241 | if ((kIOMDFirstSegment != op) | |
1242 | && offset | |
1243 | && (offset == isP->fNextOffset || off2Ind <= offset)) | |
1244 | ind = isP->fIndex; | |
1245 | else | |
1246 | ind = off2Ind = 0; // Start from beginning | |
4452a7af | 1247 | |
0c530ab8 A |
1248 | UInt length; |
1249 | UInt64 address; | |
1250 | if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) { | |
4452a7af | 1251 | |
0c530ab8 A |
1252 | // Physical address based memory descriptor |
1253 | const IOPhysicalRange *physP = (IOPhysicalRange *) &_ranges.p[0]; | |
4452a7af | 1254 | |
0c530ab8 | 1255 | // Find the range after the one that contains the offset |
b0d623f7 | 1256 | mach_vm_size_t len; |
0c530ab8 A |
1257 | for (len = 0; off2Ind <= offset; ind++) { |
1258 | len = physP[ind].length; | |
1259 | off2Ind += len; | |
1260 | } | |
4452a7af | 1261 | |
0c530ab8 A |
1262 | // Calculate length within range and starting address |
1263 | length = off2Ind - offset; | |
1264 | address = physP[ind - 1].address + len - length; | |
89b3af67 | 1265 | |
0c530ab8 A |
1266 | // see how far we can coalesce ranges |
1267 | while (ind < _rangesCount && address + length == physP[ind].address) { | |
1268 | len = physP[ind].length; | |
1269 | length += len; | |
1270 | off2Ind += len; | |
1271 | ind++; | |
1272 | } | |
4452a7af | 1273 | |
0c530ab8 A |
1274 | // correct contiguous check overshoot |
1275 | ind--; | |
1276 | off2Ind -= len; | |
1277 | } | |
b0d623f7 | 1278 | #ifndef __LP64__ |
0c530ab8 | 1279 | else if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64) { |
4452a7af | 1280 | |
0c530ab8 A |
1281 | // Physical address based memory descriptor |
1282 | const IOAddressRange *physP = (IOAddressRange *) &_ranges.v64[0]; | |
4452a7af | 1283 | |
0c530ab8 A |
1284 | // Find the range after the one that contains the offset |
1285 | mach_vm_size_t len; | |
1286 | for (len = 0; off2Ind <= offset; ind++) { | |
1287 | len = physP[ind].length; | |
1288 | off2Ind += len; | |
1289 | } | |
89b3af67 | 1290 | |
0c530ab8 A |
1291 | // Calculate length within range and starting address |
1292 | length = off2Ind - offset; | |
1293 | address = physP[ind - 1].address + len - length; | |
89b3af67 | 1294 | |
0c530ab8 A |
1295 | // see how far we can coalesce ranges |
1296 | while (ind < _rangesCount && address + length == physP[ind].address) { | |
1297 | len = physP[ind].length; | |
1298 | length += len; | |
1299 | off2Ind += len; | |
1300 | ind++; | |
1301 | } | |
1302 | ||
1303 | // correct contiguous check overshoot | |
1304 | ind--; | |
1305 | off2Ind -= len; | |
1306 | } | |
b0d623f7 | 1307 | #endif /* !__LP64__ */ |
0c530ab8 A |
1308 | else do { |
1309 | if (!_wireCount) | |
1310 | panic("IOGMD: not wired for the IODMACommand"); | |
4452a7af | 1311 | |
0c530ab8 | 1312 | assert(_memoryEntries); |
4452a7af | 1313 | |
0c530ab8 A |
1314 | ioGMDData * dataP = getDataP(_memoryEntries); |
1315 | const ioPLBlock *ioplList = getIOPLList(dataP); | |
1316 | UInt numIOPLs = getNumIOPL(_memoryEntries, dataP); | |
1317 | upl_page_info_t *pageList = getPageList(dataP); | |
4452a7af | 1318 | |
0c530ab8 | 1319 | assert(numIOPLs > 0); |
4452a7af | 1320 | |
0c530ab8 A |
1321 | // Scan through iopl info blocks looking for block containing offset |
1322 | while (ind < numIOPLs && offset >= ioplList[ind].fIOMDOffset) | |
1323 | ind++; | |
4452a7af | 1324 | |
0c530ab8 A |
1325 | // Go back to actual range as search goes past it |
1326 | ioPLBlock ioplInfo = ioplList[ind - 1]; | |
1327 | off2Ind = ioplInfo.fIOMDOffset; | |
1328 | ||
1329 | if (ind < numIOPLs) | |
1330 | length = ioplList[ind].fIOMDOffset; | |
1331 | else | |
1332 | length = _length; | |
1333 | length -= offset; // Remainder within iopl | |
1334 | ||
1335 | // Subtract offset till this iopl in total list | |
1336 | offset -= off2Ind; | |
1337 | ||
1338 | // If a mapped address is requested and this is a pre-mapped IOPL | |
1339 | // then just need to compute an offset relative to the mapped base. | |
1340 | if (mapped && ioplInfo.fMappedBase) { | |
1341 | offset += (ioplInfo.fPageOffset & PAGE_MASK); | |
1342 | address = ptoa_64(ioplInfo.fMappedBase) + offset; | |
1343 | continue; // Done leave do/while(false) now | |
1344 | } | |
1345 | ||
1346 | // The offset is rebased into the current iopl. | |
1347 | // Now add the iopl 1st page offset. | |
1348 | offset += ioplInfo.fPageOffset; | |
1349 | ||
1350 | // For external UPLs the fPageInfo field points directly to | |
1351 | // the upl's upl_page_info_t array. | |
1352 | if (ioplInfo.fFlags & kIOPLExternUPL) | |
1353 | pageList = (upl_page_info_t *) ioplInfo.fPageInfo; | |
1354 | else | |
1355 | pageList = &pageList[ioplInfo.fPageInfo]; | |
1356 | ||
1357 | // Check for direct device non-paged memory | |
1358 | if ( ioplInfo.fFlags & kIOPLOnDevice ) { | |
1359 | address = ptoa_64(pageList->phys_addr) + offset; | |
1360 | continue; // Done leave do/while(false) now | |
1361 | } | |
4452a7af | 1362 | |
0c530ab8 A |
1363 | // Now we need compute the index into the pageList |
1364 | UInt pageInd = atop_32(offset); | |
1365 | offset &= PAGE_MASK; | |
1366 | ||
1367 | // Compute the starting address of this segment | |
1368 | IOPhysicalAddress pageAddr = pageList[pageInd].phys_addr; | |
b0d623f7 A |
1369 | if (!pageAddr) { |
1370 | panic("!pageList phys_addr"); | |
6d2010ae | 1371 | } |
b0d623f7 | 1372 | |
0c530ab8 A |
1373 | address = ptoa_64(pageAddr) + offset; |
1374 | ||
1375 | // length is currently set to the length of the remainider of the iopl. | |
1376 | // We need to check that the remainder of the iopl is contiguous. | |
1377 | // This is indicated by pageList[ind].phys_addr being sequential. | |
1378 | IOByteCount contigLength = PAGE_SIZE - offset; | |
1379 | while (contigLength < length | |
1380 | && ++pageAddr == pageList[++pageInd].phys_addr) | |
1381 | { | |
1382 | contigLength += PAGE_SIZE; | |
1383 | } | |
1384 | ||
1385 | if (contigLength < length) | |
1386 | length = contigLength; | |
1387 | ||
1388 | ||
1389 | assert(address); | |
1390 | assert(length); | |
1391 | ||
1392 | } while (false); | |
1393 | ||
1394 | // Update return values and state | |
1395 | isP->fIO.fIOVMAddr = address; | |
1396 | isP->fIO.fLength = length; | |
1397 | isP->fIndex = ind; | |
1398 | isP->fOffset2Index = off2Ind; | |
1399 | isP->fNextOffset = isP->fIO.fOffset + length; | |
1400 | ||
1401 | return kIOReturnSuccess; | |
1402 | } | |
1403 | ||
1404 | addr64_t | |
b0d623f7 | 1405 | IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options) |
0c530ab8 | 1406 | { |
b0d623f7 A |
1407 | IOReturn ret; |
1408 | addr64_t address = 0; | |
1409 | IOByteCount length = 0; | |
1410 | IOMapper * mapper = gIOSystemMapper; | |
1411 | IOOptionBits type = _flags & kIOMemoryTypeMask; | |
1412 | ||
1413 | if (lengthOfSegment) | |
1414 | *lengthOfSegment = 0; | |
1415 | ||
1416 | if (offset >= _length) | |
1417 | return 0; | |
4452a7af | 1418 | |
b0d623f7 A |
1419 | // IOMemoryDescriptor::doMap() cannot use getPhysicalSegment() to obtain the page offset, since it must |
1420 | // support the unwired memory case in IOGeneralMemoryDescriptor, and hibernate_write_image() cannot use | |
1421 | // map()->getVirtualAddress() to obtain the kernel pointer, since it must prevent the memory allocation | |
1422 | // due to IOMemoryMap, so _kIOMemorySourceSegment is a necessary evil until all of this gets cleaned up | |
2d21ac55 | 1423 | |
b0d623f7 A |
1424 | if ((options & _kIOMemorySourceSegment) && (kIOMemoryTypeUPL != type)) |
1425 | { | |
1426 | unsigned rangesIndex = 0; | |
1427 | Ranges vec = _ranges; | |
1428 | user_addr_t addr; | |
1429 | ||
1430 | // Find starting address within the vector of ranges | |
1431 | for (;;) { | |
1432 | getAddrLenForInd(addr, length, type, vec, rangesIndex); | |
1433 | if (offset < length) | |
1434 | break; | |
1435 | offset -= length; // (make offset relative) | |
1436 | rangesIndex++; | |
1437 | } | |
1438 | ||
1439 | // Now that we have the starting range, | |
1440 | // lets find the last contiguous range | |
1441 | addr += offset; | |
1442 | length -= offset; | |
1443 | ||
1444 | for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) { | |
1445 | user_addr_t newAddr; | |
1446 | IOPhysicalLength newLen; | |
1447 | ||
1448 | getAddrLenForInd(newAddr, newLen, type, vec, rangesIndex); | |
1449 | if (addr + length != newAddr) | |
1450 | break; | |
1451 | length += newLen; | |
1452 | } | |
1453 | if (addr) | |
1454 | address = (IOPhysicalAddress) addr; // Truncate address to 32bit | |
1455 | } | |
1456 | else | |
0c530ab8 A |
1457 | { |
1458 | IOMDDMAWalkSegmentState _state; | |
1459 | IOMDDMAWalkSegmentArgs * state = (IOMDDMAWalkSegmentArgs *) &_state; | |
1460 | ||
1461 | state->fOffset = offset; | |
1462 | state->fLength = _length - offset; | |
b0d623f7 | 1463 | state->fMapped = (0 == (options & kIOMemoryMapperNone)); |
0c530ab8 A |
1464 | |
1465 | ret = dmaCommandOperation(kIOMDFirstSegment, _state, sizeof(_state)); | |
1466 | ||
1467 | if ((kIOReturnSuccess != ret) && (kIOReturnOverrun != ret)) | |
b0d623f7 | 1468 | DEBG("getPhysicalSegment dmaCommandOperation(%lx), %p, offset %qx, addr %qx, len %qx\n", |
0c530ab8 A |
1469 | ret, this, state->fOffset, |
1470 | state->fIOVMAddr, state->fLength); | |
1471 | if (kIOReturnSuccess == ret) | |
1472 | { | |
1473 | address = state->fIOVMAddr; | |
1474 | length = state->fLength; | |
1475 | } | |
b0d623f7 A |
1476 | |
1477 | // dmaCommandOperation() does not distinguish between "mapped" and "unmapped" physical memory, even | |
1478 | // with fMapped set correctly, so we must handle the transformation here until this gets cleaned up | |
1479 | ||
1480 | if (mapper && ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type))) | |
1481 | { | |
1482 | if ((options & kIOMemoryMapperNone) && !(_flags & kIOMemoryMapperNone)) | |
1483 | { | |
1484 | addr64_t origAddr = address; | |
1485 | IOByteCount origLen = length; | |
1486 | ||
1487 | address = mapper->mapAddr(origAddr); | |
1488 | length = page_size - (address & (page_size - 1)); | |
1489 | while ((length < origLen) | |
1490 | && ((address + length) == mapper->mapAddr(origAddr + length))) | |
1491 | length += page_size; | |
1492 | if (length > origLen) | |
1493 | length = origLen; | |
1494 | } | |
1495 | #ifdef __LP64__ | |
1496 | else if (!(options & kIOMemoryMapperNone) && (_flags & kIOMemoryMapperNone)) | |
1497 | { | |
1498 | panic("getPhysicalSegment not mapped for I/O"); | |
1499 | } | |
1500 | #endif /* __LP64__ */ | |
1501 | } | |
4452a7af A |
1502 | } |
1503 | ||
b0d623f7 A |
1504 | if (!address) |
1505 | length = 0; | |
1506 | ||
4452a7af A |
1507 | if (lengthOfSegment) |
1508 | *lengthOfSegment = length; | |
1509 | ||
0c530ab8 A |
1510 | return (address); |
1511 | } | |
1512 | ||
b0d623f7 A |
1513 | #ifndef __LP64__ |
1514 | addr64_t | |
1515 | IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment, IOOptionBits options) | |
0c530ab8 | 1516 | { |
b0d623f7 | 1517 | addr64_t address = 0; |
0c530ab8 | 1518 | |
b0d623f7 | 1519 | if (options & _kIOMemorySourceSegment) |
0c530ab8 | 1520 | { |
b0d623f7 A |
1521 | address = getSourceSegment(offset, lengthOfSegment); |
1522 | } | |
1523 | else if (options & kIOMemoryMapperNone) | |
1524 | { | |
1525 | address = getPhysicalSegment64(offset, lengthOfSegment); | |
1526 | } | |
1527 | else | |
1528 | { | |
1529 | address = getPhysicalSegment(offset, lengthOfSegment); | |
1530 | } | |
0c530ab8 | 1531 | |
b0d623f7 A |
1532 | return (address); |
1533 | } | |
0c530ab8 | 1534 | |
b0d623f7 A |
1535 | addr64_t |
1536 | IOGeneralMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment) | |
1537 | { | |
1538 | return (getPhysicalSegment(offset, lengthOfSegment, kIOMemoryMapperNone)); | |
1539 | } | |
0c530ab8 | 1540 | |
b0d623f7 A |
1541 | IOPhysicalAddress |
1542 | IOGeneralMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment) | |
1543 | { | |
1544 | addr64_t address = 0; | |
1545 | IOByteCount length = 0; | |
0c530ab8 | 1546 | |
b0d623f7 A |
1547 | address = getPhysicalSegment(offset, lengthOfSegment, 0); |
1548 | ||
1549 | if (lengthOfSegment) | |
1550 | length = *lengthOfSegment; | |
0c530ab8 A |
1551 | |
1552 | if ((address + length) > 0x100000000ULL) | |
1553 | { | |
2d21ac55 | 1554 | panic("getPhysicalSegment() out of 32b range 0x%qx, len 0x%lx, class %s", |
b0d623f7 | 1555 | address, (long) length, (getMetaClass())->getClassName()); |
0c530ab8 A |
1556 | } |
1557 | ||
0c530ab8 | 1558 | return ((IOPhysicalAddress) address); |
55e303ae | 1559 | } |
de355530 | 1560 | |
0c530ab8 A |
1561 | addr64_t |
1562 | IOMemoryDescriptor::getPhysicalSegment64(IOByteCount offset, IOByteCount *lengthOfSegment) | |
55e303ae A |
1563 | { |
1564 | IOPhysicalAddress phys32; | |
1565 | IOByteCount length; | |
1566 | addr64_t phys64; | |
0c530ab8 | 1567 | IOMapper * mapper = 0; |
0b4e3aa0 | 1568 | |
55e303ae A |
1569 | phys32 = getPhysicalSegment(offset, lengthOfSegment); |
1570 | if (!phys32) | |
1571 | return 0; | |
0b4e3aa0 | 1572 | |
55e303ae | 1573 | if (gIOSystemMapper) |
0c530ab8 A |
1574 | mapper = gIOSystemMapper; |
1575 | ||
1576 | if (mapper) | |
1c79356b | 1577 | { |
55e303ae A |
1578 | IOByteCount origLen; |
1579 | ||
0c530ab8 | 1580 | phys64 = mapper->mapAddr(phys32); |
55e303ae A |
1581 | origLen = *lengthOfSegment; |
1582 | length = page_size - (phys64 & (page_size - 1)); | |
1583 | while ((length < origLen) | |
0c530ab8 | 1584 | && ((phys64 + length) == mapper->mapAddr(phys32 + length))) |
55e303ae A |
1585 | length += page_size; |
1586 | if (length > origLen) | |
1587 | length = origLen; | |
1588 | ||
1589 | *lengthOfSegment = length; | |
0b4e3aa0 | 1590 | } |
55e303ae A |
1591 | else |
1592 | phys64 = (addr64_t) phys32; | |
1c79356b | 1593 | |
55e303ae | 1594 | return phys64; |
0b4e3aa0 A |
1595 | } |
1596 | ||
0c530ab8 | 1597 | IOPhysicalAddress |
b0d623f7 | 1598 | IOMemoryDescriptor::getPhysicalSegment(IOByteCount offset, IOByteCount *lengthOfSegment) |
1c79356b | 1599 | { |
b0d623f7 | 1600 | return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, 0)); |
0b4e3aa0 A |
1601 | } |
1602 | ||
b0d623f7 A |
1603 | IOPhysicalAddress |
1604 | IOGeneralMemoryDescriptor::getSourceSegment(IOByteCount offset, IOByteCount *lengthOfSegment) | |
1605 | { | |
1606 | return ((IOPhysicalAddress) getPhysicalSegment(offset, lengthOfSegment, _kIOMemorySourceSegment)); | |
1607 | } | |
1c79356b | 1608 | |
b0d623f7 A |
1609 | void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset, |
1610 | IOByteCount * lengthOfSegment) | |
1611 | { | |
1612 | if (_task == kernel_task) | |
1613 | return (void *) getSourceSegment(offset, lengthOfSegment); | |
1614 | else | |
1615 | panic("IOGMD::getVirtualSegment deprecated"); | |
91447636 | 1616 | |
b0d623f7 A |
1617 | return 0; |
1618 | } | |
1619 | #endif /* !__LP64__ */ | |
91447636 | 1620 | |
0c530ab8 A |
1621 | IOReturn |
1622 | IOMemoryDescriptor::dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const | |
1623 | { | |
1624 | if (kIOMDGetCharacteristics == op) { | |
1625 | if (dataSize < sizeof(IOMDDMACharacteristics)) | |
1626 | return kIOReturnUnderrun; | |
1627 | ||
1628 | IOMDDMACharacteristics *data = (IOMDDMACharacteristics *) vData; | |
1629 | data->fLength = getLength(); | |
1630 | data->fSGCount = 0; | |
b0d623f7 | 1631 | data->fDirection = getDirection(); |
0c530ab8 A |
1632 | if (IOMapper::gSystem) |
1633 | data->fIsMapped = true; | |
1634 | data->fIsPrepared = true; // Assume prepared - fails safe | |
1635 | } | |
1636 | else if (kIOMDWalkSegments & op) { | |
1637 | if (dataSize < sizeof(IOMDDMAWalkSegmentArgs)) | |
1638 | return kIOReturnUnderrun; | |
1639 | ||
1640 | IOMDDMAWalkSegmentArgs *data = (IOMDDMAWalkSegmentArgs *) vData; | |
1641 | IOByteCount offset = (IOByteCount) data->fOffset; | |
1642 | ||
1643 | IOPhysicalLength length; | |
1644 | IOMemoryDescriptor *ncmd = const_cast<IOMemoryDescriptor *>(this); | |
1645 | if (data->fMapped && IOMapper::gSystem) | |
1646 | data->fIOVMAddr = ncmd->getPhysicalSegment(offset, &length); | |
1647 | else | |
b0d623f7 | 1648 | data->fIOVMAddr = ncmd->getPhysicalSegment(offset, &length, kIOMemoryMapperNone); |
0c530ab8 A |
1649 | data->fLength = length; |
1650 | } | |
1651 | else | |
1652 | return kIOReturnBadArgument; | |
1653 | ||
1654 | return kIOReturnSuccess; | |
1655 | } | |
1656 | ||
b0d623f7 A |
1657 | static IOReturn |
1658 | purgeableControlBits(IOOptionBits newState, vm_purgable_t * control, int * state) | |
1659 | { | |
1660 | IOReturn err = kIOReturnSuccess; | |
1661 | ||
1662 | *control = VM_PURGABLE_SET_STATE; | |
1663 | switch (newState) | |
1664 | { | |
1665 | case kIOMemoryPurgeableKeepCurrent: | |
1666 | *control = VM_PURGABLE_GET_STATE; | |
1667 | break; | |
1668 | ||
1669 | case kIOMemoryPurgeableNonVolatile: | |
1670 | *state = VM_PURGABLE_NONVOLATILE; | |
1671 | break; | |
1672 | case kIOMemoryPurgeableVolatile: | |
1673 | *state = VM_PURGABLE_VOLATILE; | |
1674 | break; | |
1675 | case kIOMemoryPurgeableEmpty: | |
1676 | *state = VM_PURGABLE_EMPTY; | |
1677 | break; | |
1678 | default: | |
1679 | err = kIOReturnBadArgument; | |
1680 | break; | |
1681 | } | |
1682 | return (err); | |
1683 | } | |
1684 | ||
1685 | static IOReturn | |
1686 | purgeableStateBits(int * state) | |
1687 | { | |
1688 | IOReturn err = kIOReturnSuccess; | |
1689 | ||
1690 | switch (*state) | |
1691 | { | |
1692 | case VM_PURGABLE_NONVOLATILE: | |
1693 | *state = kIOMemoryPurgeableNonVolatile; | |
1694 | break; | |
1695 | case VM_PURGABLE_VOLATILE: | |
1696 | *state = kIOMemoryPurgeableVolatile; | |
1697 | break; | |
1698 | case VM_PURGABLE_EMPTY: | |
1699 | *state = kIOMemoryPurgeableEmpty; | |
1700 | break; | |
1701 | default: | |
1702 | *state = kIOMemoryPurgeableNonVolatile; | |
1703 | err = kIOReturnNotReady; | |
1704 | break; | |
1705 | } | |
1706 | return (err); | |
1707 | } | |
1708 | ||
1709 | IOReturn | |
1710 | IOGeneralMemoryDescriptor::setPurgeable( IOOptionBits newState, | |
1711 | IOOptionBits * oldState ) | |
1712 | { | |
1713 | IOReturn err = kIOReturnSuccess; | |
1714 | vm_purgable_t control; | |
1715 | int state; | |
1716 | ||
1717 | if (_memEntry) | |
1718 | { | |
1719 | err = super::setPurgeable(newState, oldState); | |
1720 | } | |
1721 | else | |
1722 | { | |
1723 | if (kIOMemoryThreadSafe & _flags) | |
1724 | LOCK; | |
1725 | do | |
1726 | { | |
1727 | // Find the appropriate vm_map for the given task | |
1728 | vm_map_t curMap; | |
1729 | if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) | |
1730 | { | |
1731 | err = kIOReturnNotReady; | |
1732 | break; | |
1733 | } | |
1734 | else | |
1735 | curMap = get_task_map(_task); | |
1736 | ||
1737 | // can only do one range | |
1738 | Ranges vec = _ranges; | |
1739 | IOOptionBits type = _flags & kIOMemoryTypeMask; | |
1740 | user_addr_t addr; | |
1741 | IOByteCount len; | |
1742 | getAddrLenForInd(addr, len, type, vec, 0); | |
1743 | ||
1744 | err = purgeableControlBits(newState, &control, &state); | |
1745 | if (kIOReturnSuccess != err) | |
1746 | break; | |
1747 | err = mach_vm_purgable_control(curMap, addr, control, &state); | |
1748 | if (oldState) | |
1749 | { | |
1750 | if (kIOReturnSuccess == err) | |
1751 | { | |
1752 | err = purgeableStateBits(&state); | |
1753 | *oldState = state; | |
1754 | } | |
1755 | } | |
1756 | } | |
1757 | while (false); | |
1758 | if (kIOMemoryThreadSafe & _flags) | |
1759 | UNLOCK; | |
1760 | } | |
1761 | return (err); | |
1762 | } | |
1763 | ||
91447636 A |
1764 | IOReturn IOMemoryDescriptor::setPurgeable( IOOptionBits newState, |
1765 | IOOptionBits * oldState ) | |
1766 | { | |
1767 | IOReturn err = kIOReturnSuccess; | |
1768 | vm_purgable_t control; | |
1769 | int state; | |
1770 | ||
b0d623f7 A |
1771 | if (kIOMemoryThreadSafe & _flags) |
1772 | LOCK; | |
1773 | ||
91447636 A |
1774 | do |
1775 | { | |
1776 | if (!_memEntry) | |
1777 | { | |
1778 | err = kIOReturnNotReady; | |
1779 | break; | |
1780 | } | |
b0d623f7 A |
1781 | err = purgeableControlBits(newState, &control, &state); |
1782 | if (kIOReturnSuccess != err) | |
1783 | break; | |
91447636 | 1784 | err = mach_memory_entry_purgable_control((ipc_port_t) _memEntry, control, &state); |
b0d623f7 A |
1785 | if (oldState) |
1786 | { | |
1787 | if (kIOReturnSuccess == err) | |
1788 | { | |
1789 | err = purgeableStateBits(&state); | |
1790 | *oldState = state; | |
1791 | } | |
1792 | } | |
91447636 A |
1793 | } |
1794 | while (false); | |
1795 | ||
b0d623f7 A |
1796 | if (kIOMemoryThreadSafe & _flags) |
1797 | UNLOCK; | |
1798 | ||
91447636 A |
1799 | return (err); |
1800 | } | |
1801 | ||
1802 | extern "C" void dcache_incoherent_io_flush64(addr64_t pa, unsigned int count); | |
1803 | extern "C" void dcache_incoherent_io_store64(addr64_t pa, unsigned int count); | |
1804 | ||
0b4c1975 A |
1805 | static void SetEncryptOp(addr64_t pa, unsigned int count) |
1806 | { | |
1807 | ppnum_t page, end; | |
1808 | ||
1809 | page = atop_64(round_page_64(pa)); | |
1810 | end = atop_64(trunc_page_64(pa + count)); | |
1811 | for (; page < end; page++) | |
1812 | { | |
1813 | pmap_clear_noencrypt(page); | |
1814 | } | |
1815 | } | |
1816 | ||
1817 | static void ClearEncryptOp(addr64_t pa, unsigned int count) | |
1818 | { | |
1819 | ppnum_t page, end; | |
1820 | ||
1821 | page = atop_64(round_page_64(pa)); | |
1822 | end = atop_64(trunc_page_64(pa + count)); | |
1823 | for (; page < end; page++) | |
1824 | { | |
1825 | pmap_set_noencrypt(page); | |
1826 | } | |
1827 | } | |
1828 | ||
91447636 A |
1829 | IOReturn IOMemoryDescriptor::performOperation( IOOptionBits options, |
1830 | IOByteCount offset, IOByteCount length ) | |
1831 | { | |
1832 | IOByteCount remaining; | |
1833 | void (*func)(addr64_t pa, unsigned int count) = 0; | |
1834 | ||
1835 | switch (options) | |
1836 | { | |
1837 | case kIOMemoryIncoherentIOFlush: | |
1838 | func = &dcache_incoherent_io_flush64; | |
1839 | break; | |
1840 | case kIOMemoryIncoherentIOStore: | |
1841 | func = &dcache_incoherent_io_store64; | |
1842 | break; | |
0b4c1975 A |
1843 | |
1844 | case kIOMemorySetEncrypted: | |
1845 | func = &SetEncryptOp; | |
1846 | break; | |
1847 | case kIOMemoryClearEncrypted: | |
1848 | func = &ClearEncryptOp; | |
1849 | break; | |
91447636 A |
1850 | } |
1851 | ||
1852 | if (!func) | |
1853 | return (kIOReturnUnsupported); | |
1854 | ||
b0d623f7 A |
1855 | if (kIOMemoryThreadSafe & _flags) |
1856 | LOCK; | |
1857 | ||
91447636 A |
1858 | remaining = length = min(length, getLength() - offset); |
1859 | while (remaining) | |
1860 | // (process another target segment?) | |
1861 | { | |
1862 | addr64_t dstAddr64; | |
1863 | IOByteCount dstLen; | |
1864 | ||
b0d623f7 | 1865 | dstAddr64 = getPhysicalSegment(offset, &dstLen, kIOMemoryMapperNone); |
91447636 A |
1866 | if (!dstAddr64) |
1867 | break; | |
1868 | ||
1869 | // Clip segment length to remaining | |
1870 | if (dstLen > remaining) | |
1871 | dstLen = remaining; | |
1872 | ||
1873 | (*func)(dstAddr64, dstLen); | |
1874 | ||
1875 | offset += dstLen; | |
1876 | remaining -= dstLen; | |
1877 | } | |
1878 | ||
b0d623f7 A |
1879 | if (kIOMemoryThreadSafe & _flags) |
1880 | UNLOCK; | |
1881 | ||
91447636 A |
1882 | return (remaining ? kIOReturnUnderrun : kIOReturnSuccess); |
1883 | } | |
1884 | ||
55e303ae A |
1885 | extern vm_offset_t first_avail; |
1886 | #define io_kernel_static_end first_avail | |
55e303ae A |
1887 | |
1888 | static kern_return_t | |
1889 | io_get_kernel_static_upl( | |
91447636 | 1890 | vm_map_t /* map */, |
b0d623f7 | 1891 | uintptr_t offset, |
55e303ae A |
1892 | vm_size_t *upl_size, |
1893 | upl_t *upl, | |
1894 | upl_page_info_array_t page_list, | |
0c530ab8 A |
1895 | unsigned int *count, |
1896 | ppnum_t *highest_page) | |
1c79356b | 1897 | { |
55e303ae A |
1898 | unsigned int pageCount, page; |
1899 | ppnum_t phys; | |
0c530ab8 | 1900 | ppnum_t highestPage = 0; |
1c79356b | 1901 | |
55e303ae A |
1902 | pageCount = atop_32(*upl_size); |
1903 | if (pageCount > *count) | |
1904 | pageCount = *count; | |
1c79356b | 1905 | |
55e303ae | 1906 | *upl = NULL; |
1c79356b | 1907 | |
55e303ae A |
1908 | for (page = 0; page < pageCount; page++) |
1909 | { | |
1910 | phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page)); | |
1911 | if (!phys) | |
1912 | break; | |
1913 | page_list[page].phys_addr = phys; | |
1914 | page_list[page].pageout = 0; | |
1915 | page_list[page].absent = 0; | |
1916 | page_list[page].dirty = 0; | |
1917 | page_list[page].precious = 0; | |
1918 | page_list[page].device = 0; | |
0c530ab8 | 1919 | if (phys > highestPage) |
b0d623f7 | 1920 | highestPage = phys; |
55e303ae | 1921 | } |
0b4e3aa0 | 1922 | |
0c530ab8 A |
1923 | *highest_page = highestPage; |
1924 | ||
55e303ae A |
1925 | return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError); |
1926 | } | |
0b4e3aa0 | 1927 | |
55e303ae A |
1928 | IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection) |
1929 | { | |
91447636 | 1930 | IOOptionBits type = _flags & kIOMemoryTypeMask; |
2d21ac55 | 1931 | IOReturn error = kIOReturnCannotWire; |
55e303ae A |
1932 | ioGMDData *dataP; |
1933 | ppnum_t mapBase = 0; | |
1934 | IOMapper *mapper; | |
1935 | ipc_port_t sharedMem = (ipc_port_t) _memEntry; | |
1c79356b | 1936 | |
55e303ae | 1937 | assert(!_wireCount); |
0c530ab8 | 1938 | assert(kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type); |
1c79356b | 1939 | |
7ddcb079 | 1940 | if (_pages > gIOMaximumMappedIOPageCount) |
55e303ae | 1941 | return kIOReturnNoResources; |
0b4e3aa0 | 1942 | |
55e303ae A |
1943 | dataP = getDataP(_memoryEntries); |
1944 | mapper = dataP->fMapper; | |
1945 | if (mapper && _pages) | |
1946 | mapBase = mapper->iovmAlloc(_pages); | |
d7e50217 | 1947 | |
55e303ae A |
1948 | // Note that appendBytes(NULL) zeros the data up to the |
1949 | // desired length. | |
1950 | _memoryEntries->appendBytes(0, dataP->fPageCnt * sizeof(upl_page_info_t)); | |
1951 | dataP = 0; // May no longer be valid so lets not get tempted. | |
de355530 | 1952 | |
55e303ae | 1953 | if (forDirection == kIODirectionNone) |
b0d623f7 | 1954 | forDirection = getDirection(); |
55e303ae A |
1955 | |
1956 | int uplFlags; // This Mem Desc's default flags for upl creation | |
0c530ab8 | 1957 | switch (kIODirectionOutIn & forDirection) |
55e303ae A |
1958 | { |
1959 | case kIODirectionOut: | |
1960 | // Pages do not need to be marked as dirty on commit | |
1961 | uplFlags = UPL_COPYOUT_FROM; | |
1962 | _flags |= kIOMemoryPreparedReadOnly; | |
1963 | break; | |
1964 | ||
1965 | case kIODirectionIn: | |
1966 | default: | |
1967 | uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM | |
1968 | break; | |
1969 | } | |
1970 | uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE; | |
1971 | ||
0c530ab8 A |
1972 | #ifdef UPL_NEED_32BIT_ADDR |
1973 | if (kIODirectionPrepareToPhys32 & forDirection) | |
1974 | uplFlags |= UPL_NEED_32BIT_ADDR; | |
1975 | #endif | |
1976 | ||
91447636 | 1977 | // Find the appropriate vm_map for the given task |
55e303ae A |
1978 | vm_map_t curMap; |
1979 | if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) | |
1980 | curMap = 0; | |
1981 | else | |
1982 | { curMap = get_task_map(_task); } | |
1983 | ||
91447636 A |
1984 | // Iterate over the vector of virtual ranges |
1985 | Ranges vec = _ranges; | |
1986 | unsigned int pageIndex = 0; | |
1987 | IOByteCount mdOffset = 0; | |
0c530ab8 | 1988 | ppnum_t highestPage = 0; |
55e303ae A |
1989 | for (UInt range = 0; range < _rangesCount; range++) { |
1990 | ioPLBlock iopl; | |
91447636 | 1991 | user_addr_t startPage; |
55e303ae | 1992 | IOByteCount numBytes; |
0c530ab8 | 1993 | ppnum_t highPage = 0; |
55e303ae | 1994 | |
91447636 A |
1995 | // Get the startPage address and length of vec[range] |
1996 | getAddrLenForInd(startPage, numBytes, type, vec, range); | |
b0d623f7 | 1997 | iopl.fPageOffset = startPage & PAGE_MASK; |
91447636 A |
1998 | numBytes += iopl.fPageOffset; |
1999 | startPage = trunc_page_64(startPage); | |
2000 | ||
55e303ae A |
2001 | if (mapper) |
2002 | iopl.fMappedBase = mapBase + pageIndex; | |
2003 | else | |
2004 | iopl.fMappedBase = 0; | |
55e303ae | 2005 | |
91447636 | 2006 | // Iterate over the current range, creating UPLs |
55e303ae A |
2007 | while (numBytes) { |
2008 | dataP = getDataP(_memoryEntries); | |
91447636 A |
2009 | vm_address_t kernelStart = (vm_address_t) startPage; |
2010 | vm_map_t theMap; | |
2011 | if (curMap) | |
2012 | theMap = curMap; | |
2013 | else if (!sharedMem) { | |
2014 | assert(_task == kernel_task); | |
2015 | theMap = IOPageableMapForAddress(kernelStart); | |
2016 | } | |
2017 | else | |
2018 | theMap = NULL; | |
2019 | ||
55e303ae A |
2020 | upl_page_info_array_t pageInfo = getPageList(dataP); |
2021 | int ioplFlags = uplFlags; | |
2022 | upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex]; | |
2023 | ||
b0d623f7 | 2024 | vm_size_t ioplSize = round_page(numBytes); |
55e303ae A |
2025 | unsigned int numPageInfo = atop_32(ioplSize); |
2026 | ||
91447636 | 2027 | if (theMap == kernel_map && kernelStart < io_kernel_static_end) { |
55e303ae | 2028 | error = io_get_kernel_static_upl(theMap, |
91447636 A |
2029 | kernelStart, |
2030 | &ioplSize, | |
2031 | &iopl.fIOPL, | |
2032 | baseInfo, | |
0c530ab8 A |
2033 | &numPageInfo, |
2034 | &highPage); | |
91447636 A |
2035 | } |
2036 | else if (sharedMem) { | |
55e303ae | 2037 | error = memory_object_iopl_request(sharedMem, |
91447636 A |
2038 | ptoa_32(pageIndex), |
2039 | &ioplSize, | |
2040 | &iopl.fIOPL, | |
2041 | baseInfo, | |
2042 | &numPageInfo, | |
2043 | &ioplFlags); | |
2044 | } | |
2045 | else { | |
2046 | assert(theMap); | |
2047 | error = vm_map_create_upl(theMap, | |
2048 | startPage, | |
b0d623f7 | 2049 | (upl_size_t*)&ioplSize, |
91447636 A |
2050 | &iopl.fIOPL, |
2051 | baseInfo, | |
2052 | &numPageInfo, | |
2053 | &ioplFlags); | |
de355530 A |
2054 | } |
2055 | ||
55e303ae A |
2056 | assert(ioplSize); |
2057 | if (error != KERN_SUCCESS) | |
2058 | goto abortExit; | |
2059 | ||
0c530ab8 A |
2060 | if (iopl.fIOPL) |
2061 | highPage = upl_get_highest_page(iopl.fIOPL); | |
2062 | if (highPage > highestPage) | |
2063 | highestPage = highPage; | |
2064 | ||
2d21ac55 | 2065 | error = kIOReturnCannotWire; |
55e303ae A |
2066 | |
2067 | if (baseInfo->device) { | |
2068 | numPageInfo = 1; | |
2069 | iopl.fFlags = kIOPLOnDevice; | |
2070 | // Don't translate device memory at all | |
2071 | if (mapper && mapBase) { | |
2072 | mapper->iovmFree(mapBase, _pages); | |
2073 | mapBase = 0; | |
2074 | iopl.fMappedBase = 0; | |
2075 | } | |
2076 | } | |
2077 | else { | |
2078 | iopl.fFlags = 0; | |
0c530ab8 | 2079 | if (mapper) |
55e303ae A |
2080 | mapper->iovmInsert(mapBase, pageIndex, |
2081 | baseInfo, numPageInfo); | |
2082 | } | |
2083 | ||
2084 | iopl.fIOMDOffset = mdOffset; | |
2085 | iopl.fPageInfo = pageIndex; | |
2086 | ||
6d2010ae A |
2087 | #if 0 |
2088 | // used to remove the upl for auto prepares here, for some errant code | |
2089 | // that freed memory before the descriptor pointing at it | |
55e303ae A |
2090 | if ((_flags & kIOMemoryAutoPrepare) && iopl.fIOPL) |
2091 | { | |
91447636 A |
2092 | upl_commit(iopl.fIOPL, 0, 0); |
2093 | upl_deallocate(iopl.fIOPL); | |
55e303ae | 2094 | iopl.fIOPL = 0; |
de355530 | 2095 | } |
6d2010ae | 2096 | #endif |
55e303ae A |
2097 | |
2098 | if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) { | |
2099 | // Clean up partial created and unsaved iopl | |
91447636 A |
2100 | if (iopl.fIOPL) { |
2101 | upl_abort(iopl.fIOPL, 0); | |
2102 | upl_deallocate(iopl.fIOPL); | |
2103 | } | |
55e303ae A |
2104 | goto abortExit; |
2105 | } | |
2106 | ||
2107 | // Check for a multiple iopl's in one virtual range | |
2108 | pageIndex += numPageInfo; | |
2109 | mdOffset -= iopl.fPageOffset; | |
2110 | if (ioplSize < numBytes) { | |
2111 | numBytes -= ioplSize; | |
2112 | startPage += ioplSize; | |
2113 | mdOffset += ioplSize; | |
2114 | iopl.fPageOffset = 0; | |
2115 | if (mapper) | |
2116 | iopl.fMappedBase = mapBase + pageIndex; | |
2117 | } | |
2118 | else { | |
2119 | mdOffset += numBytes; | |
2120 | break; | |
2121 | } | |
1c79356b A |
2122 | } |
2123 | } | |
55e303ae | 2124 | |
0c530ab8 A |
2125 | _highestPage = highestPage; |
2126 | ||
1c79356b A |
2127 | return kIOReturnSuccess; |
2128 | ||
2129 | abortExit: | |
55e303ae A |
2130 | { |
2131 | dataP = getDataP(_memoryEntries); | |
91447636 | 2132 | UInt done = getNumIOPL(_memoryEntries, dataP); |
55e303ae A |
2133 | ioPLBlock *ioplList = getIOPLList(dataP); |
2134 | ||
2135 | for (UInt range = 0; range < done; range++) | |
2136 | { | |
91447636 A |
2137 | if (ioplList[range].fIOPL) { |
2138 | upl_abort(ioplList[range].fIOPL, 0); | |
2139 | upl_deallocate(ioplList[range].fIOPL); | |
2140 | } | |
55e303ae | 2141 | } |
6d2010ae | 2142 | (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength() |
1c79356b | 2143 | |
55e303ae A |
2144 | if (mapper && mapBase) |
2145 | mapper->iovmFree(mapBase, _pages); | |
1c79356b A |
2146 | } |
2147 | ||
2d21ac55 A |
2148 | if (error == KERN_FAILURE) |
2149 | error = kIOReturnCannotWire; | |
2150 | ||
55e303ae A |
2151 | return error; |
2152 | } | |
d7e50217 | 2153 | |
55e303ae A |
2154 | /* |
2155 | * prepare | |
2156 | * | |
2157 | * Prepare the memory for an I/O transfer. This involves paging in | |
2158 | * the memory, if necessary, and wiring it down for the duration of | |
2159 | * the transfer. The complete() method completes the processing of | |
2160 | * the memory after the I/O transfer finishes. This method needn't | |
2161 | * called for non-pageable memory. | |
2162 | */ | |
2163 | IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection) | |
2164 | { | |
91447636 A |
2165 | IOReturn error = kIOReturnSuccess; |
2166 | IOOptionBits type = _flags & kIOMemoryTypeMask; | |
55e303ae | 2167 | |
2d21ac55 A |
2168 | if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) |
2169 | return kIOReturnSuccess; | |
2170 | ||
2171 | if (_prepareLock) | |
2172 | IOLockLock(_prepareLock); | |
2173 | ||
91447636 | 2174 | if (!_wireCount |
0c530ab8 | 2175 | && (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) ) { |
55e303ae | 2176 | error = wireVirtual(forDirection); |
de355530 A |
2177 | } |
2178 | ||
2d21ac55 A |
2179 | if (kIOReturnSuccess == error) |
2180 | _wireCount++; | |
55e303ae | 2181 | |
0b4c1975 A |
2182 | if (1 == _wireCount) |
2183 | { | |
2184 | if (kIOMemoryClearEncrypt & _flags) | |
2185 | { | |
2186 | performOperation(kIOMemoryClearEncrypted, 0, _length); | |
2187 | } | |
2188 | } | |
2189 | ||
2d21ac55 A |
2190 | if (_prepareLock) |
2191 | IOLockUnlock(_prepareLock); | |
2192 | ||
2193 | return error; | |
1c79356b A |
2194 | } |
2195 | ||
2196 | /* | |
2197 | * complete | |
2198 | * | |
2199 | * Complete processing of the memory after an I/O transfer finishes. | |
2200 | * This method should not be called unless a prepare was previously | |
2201 | * issued; the prepare() and complete() must occur in pairs, before | |
2202 | * before and after an I/O transfer involving pageable memory. | |
2203 | */ | |
6d2010ae | 2204 | |
55e303ae | 2205 | IOReturn IOGeneralMemoryDescriptor::complete(IODirection /* forDirection */) |
1c79356b | 2206 | { |
2d21ac55 | 2207 | IOOptionBits type = _flags & kIOMemoryTypeMask; |
1c79356b | 2208 | |
2d21ac55 A |
2209 | if ((kIOMemoryTypePhysical == type) || (kIOMemoryTypePhysical64 == type)) |
2210 | return kIOReturnSuccess; | |
1c79356b | 2211 | |
2d21ac55 A |
2212 | if (_prepareLock) |
2213 | IOLockLock(_prepareLock); | |
91447636 | 2214 | |
2d21ac55 A |
2215 | assert(_wireCount); |
2216 | ||
2217 | if (_wireCount) | |
2218 | { | |
0b4c1975 A |
2219 | if ((kIOMemoryClearEncrypt & _flags) && (1 == _wireCount)) |
2220 | { | |
2221 | performOperation(kIOMemorySetEncrypted, 0, _length); | |
2222 | } | |
2223 | ||
2d21ac55 A |
2224 | _wireCount--; |
2225 | if (!_wireCount) | |
2226 | { | |
2227 | IOOptionBits type = _flags & kIOMemoryTypeMask; | |
2228 | ioGMDData * dataP = getDataP(_memoryEntries); | |
2229 | ioPLBlock *ioplList = getIOPLList(dataP); | |
91447636 | 2230 | UInt count = getNumIOPL(_memoryEntries, dataP); |
55e303ae | 2231 | |
b0d623f7 A |
2232 | #if IOMD_DEBUG_DMAACTIVE |
2233 | if (__iomd_reservedA) panic("complete() while dma active"); | |
2234 | #endif /* IOMD_DEBUG_DMAACTIVE */ | |
2235 | ||
2d21ac55 A |
2236 | if (dataP->fMapper && _pages && ioplList[0].fMappedBase) |
2237 | dataP->fMapper->iovmFree(ioplList[0].fMappedBase, _pages); | |
55e303ae | 2238 | |
2d21ac55 A |
2239 | // Only complete iopls that we created which are for TypeVirtual |
2240 | if (kIOMemoryTypeVirtual == type || kIOMemoryTypeVirtual64 == type || kIOMemoryTypeUIO == type) { | |
2241 | for (UInt ind = 0; ind < count; ind++) | |
91447636 A |
2242 | if (ioplList[ind].fIOPL) { |
2243 | upl_commit(ioplList[ind].fIOPL, 0, 0); | |
2244 | upl_deallocate(ioplList[ind].fIOPL); | |
2245 | } | |
6d2010ae A |
2246 | } else if (kIOMemoryTypeUPL == type) { |
2247 | upl_set_referenced(ioplList[0].fIOPL, false); | |
2d21ac55 | 2248 | } |
6d2010ae A |
2249 | |
2250 | (void) _memoryEntries->initWithBytes(dataP, computeDataSize(0, 0)); // == setLength() | |
b0d623f7 A |
2251 | |
2252 | dataP->fPreparationID = kIOPreparationIDUnprepared; | |
2d21ac55 | 2253 | } |
1c79356b | 2254 | } |
2d21ac55 A |
2255 | |
2256 | if (_prepareLock) | |
2257 | IOLockUnlock(_prepareLock); | |
2258 | ||
1c79356b A |
2259 | return kIOReturnSuccess; |
2260 | } | |
2261 | ||
2262 | IOReturn IOGeneralMemoryDescriptor::doMap( | |
2d21ac55 A |
2263 | vm_map_t __addressMap, |
2264 | IOVirtualAddress * __address, | |
1c79356b | 2265 | IOOptionBits options, |
2d21ac55 A |
2266 | IOByteCount __offset, |
2267 | IOByteCount __length ) | |
2268 | ||
1c79356b | 2269 | { |
b0d623f7 | 2270 | #ifndef __LP64__ |
2d21ac55 | 2271 | if (!(kIOMap64Bit & options)) panic("IOGeneralMemoryDescriptor::doMap !64bit"); |
b0d623f7 | 2272 | #endif /* !__LP64__ */ |
2d21ac55 | 2273 | |
b0d623f7 | 2274 | IOMemoryMap * mapping = (IOMemoryMap *) *__address; |
2d21ac55 A |
2275 | mach_vm_size_t offset = mapping->fOffset + __offset; |
2276 | mach_vm_size_t length = mapping->fLength; | |
2277 | ||
b0d623f7 | 2278 | kern_return_t kr = kIOReturnVMError; |
0b4e3aa0 | 2279 | ipc_port_t sharedMem = (ipc_port_t) _memEntry; |
1c79356b | 2280 | |
91447636 A |
2281 | IOOptionBits type = _flags & kIOMemoryTypeMask; |
2282 | Ranges vec = _ranges; | |
2283 | ||
2284 | user_addr_t range0Addr = 0; | |
2285 | IOByteCount range0Len = 0; | |
2286 | ||
060df5ea A |
2287 | if ((offset >= _length) || ((offset + length) > _length)) |
2288 | return( kIOReturnBadArgument ); | |
2289 | ||
91447636 A |
2290 | if (vec.v) |
2291 | getAddrLenForInd(range0Addr, range0Len, type, vec, 0); | |
2292 | ||
1c79356b | 2293 | // mapping source == dest? (could be much better) |
91447636 | 2294 | if( _task |
2d21ac55 A |
2295 | && (mapping->fAddressMap == get_task_map(_task)) && (options & kIOMapAnywhere) |
2296 | && (1 == _rangesCount) && (0 == offset) | |
2297 | && range0Addr && (length <= range0Len) ) | |
2298 | { | |
2299 | mapping->fAddress = range0Addr; | |
2300 | mapping->fOptions |= kIOMapStatic; | |
2301 | ||
2302 | return( kIOReturnSuccess ); | |
1c79356b A |
2303 | } |
2304 | ||
0b4e3aa0 | 2305 | if( 0 == sharedMem) { |
1c79356b | 2306 | |
91447636 | 2307 | vm_size_t size = ptoa_32(_pages); |
1c79356b | 2308 | |
0b4e3aa0 | 2309 | if( _task) { |
0c530ab8 | 2310 | |
91447636 | 2311 | memory_object_size_t actualSize = size; |
2d21ac55 A |
2312 | vm_prot_t prot = VM_PROT_READ; |
2313 | if (!(kIOMapReadOnly & options)) | |
2314 | prot |= VM_PROT_WRITE; | |
2315 | else if (kIOMapDefaultCache != (options & kIOMapCacheMask)) | |
2316 | prot |= VM_PROT_WRITE; | |
2317 | ||
060df5ea A |
2318 | if (_rangesCount == 1) |
2319 | { | |
2320 | kr = mach_make_memory_entry_64(get_task_map(_task), | |
2321 | &actualSize, range0Addr, | |
2322 | prot, &sharedMem, | |
2323 | NULL); | |
2324 | } | |
2325 | if( (_rangesCount != 1) | |
2326 | || ((KERN_SUCCESS == kr) && (actualSize != round_page(size)))) | |
2327 | do | |
b0d623f7 | 2328 | { |
0b4e3aa0 | 2329 | #if IOASSERT |
060df5ea A |
2330 | IOLog("mach_vm_remap path for ranges %d size (%08llx:%08llx)\n", |
2331 | _rangesCount, (UInt64)actualSize, (UInt64)size); | |
0b4e3aa0 A |
2332 | #endif |
2333 | kr = kIOReturnVMError; | |
060df5ea A |
2334 | if (sharedMem) |
2335 | { | |
2336 | ipc_port_release_send(sharedMem); | |
2337 | sharedMem = MACH_PORT_NULL; | |
2338 | } | |
b0d623f7 | 2339 | |
060df5ea A |
2340 | mach_vm_address_t address, segDestAddr; |
2341 | mach_vm_size_t mapLength; | |
2342 | unsigned rangesIndex; | |
2343 | IOOptionBits type = _flags & kIOMemoryTypeMask; | |
2344 | user_addr_t srcAddr; | |
2345 | IOPhysicalLength segLen = 0; | |
2346 | ||
2347 | // Find starting address within the vector of ranges | |
2348 | for (rangesIndex = 0; rangesIndex < _rangesCount; rangesIndex++) { | |
2349 | getAddrLenForInd(srcAddr, segLen, type, _ranges, rangesIndex); | |
2350 | if (offset < segLen) | |
2351 | break; | |
2352 | offset -= segLen; // (make offset relative) | |
2353 | } | |
2354 | ||
2355 | mach_vm_size_t pageOffset = (srcAddr & PAGE_MASK); | |
b0d623f7 | 2356 | address = trunc_page_64(mapping->fAddress); |
060df5ea | 2357 | |
b0d623f7 A |
2358 | if ((options & kIOMapAnywhere) || ((mapping->fAddress - address) == pageOffset)) |
2359 | { | |
060df5ea A |
2360 | vm_map_t map = mapping->fAddressMap; |
2361 | kr = IOMemoryDescriptorMapCopy(&map, | |
b0d623f7 A |
2362 | options, |
2363 | offset, &address, round_page_64(length + pageOffset)); | |
060df5ea A |
2364 | if (kr == KERN_SUCCESS) |
2365 | { | |
2366 | segDestAddr = address; | |
2367 | segLen -= offset; | |
2368 | mapLength = length; | |
2369 | ||
2370 | while (true) | |
2371 | { | |
2372 | vm_prot_t cur_prot, max_prot; | |
2373 | kr = mach_vm_remap(map, &segDestAddr, round_page_64(segLen), PAGE_MASK, | |
2374 | VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, | |
2375 | get_task_map(_task), trunc_page_64(srcAddr), | |
2376 | FALSE /* copy */, | |
2377 | &cur_prot, | |
2378 | &max_prot, | |
2379 | VM_INHERIT_NONE); | |
2380 | if (KERN_SUCCESS == kr) | |
2381 | { | |
2382 | if ((!(VM_PROT_READ & cur_prot)) | |
2383 | || (!(kIOMapReadOnly & options) && !(VM_PROT_WRITE & cur_prot))) | |
2384 | { | |
2385 | kr = KERN_PROTECTION_FAILURE; | |
2386 | } | |
2387 | } | |
2388 | if (KERN_SUCCESS != kr) | |
2389 | break; | |
2390 | segDestAddr += segLen; | |
2391 | mapLength -= segLen; | |
2392 | if (!mapLength) | |
2393 | break; | |
2394 | rangesIndex++; | |
2395 | if (rangesIndex >= _rangesCount) | |
2396 | { | |
2397 | kr = kIOReturnBadArgument; | |
2398 | break; | |
2399 | } | |
2400 | getAddrLenForInd(srcAddr, segLen, type, vec, rangesIndex); | |
2401 | if (srcAddr & PAGE_MASK) | |
2402 | { | |
2403 | kr = kIOReturnBadArgument; | |
2404 | break; | |
2405 | } | |
2406 | if (segLen > mapLength) | |
2407 | segLen = mapLength; | |
2408 | } | |
2409 | if (KERN_SUCCESS != kr) | |
2410 | { | |
2411 | mach_vm_deallocate(mapping->fAddressMap, address, round_page_64(length + pageOffset)); | |
2412 | } | |
2413 | } | |
2414 | ||
2415 | if (KERN_SUCCESS == kr) | |
b0d623f7 A |
2416 | mapping->fAddress = address + pageOffset; |
2417 | else | |
2418 | mapping->fAddress = NULL; | |
2419 | } | |
2420 | } | |
060df5ea | 2421 | while (false); |
b0d623f7 A |
2422 | } |
2423 | else do | |
2424 | { // _task == 0, must be physical | |
0b4e3aa0 | 2425 | |
55e303ae A |
2426 | memory_object_t pager; |
2427 | unsigned int flags = 0; | |
2428 | addr64_t pa; | |
9bccf70c A |
2429 | IOPhysicalLength segLen; |
2430 | ||
b0d623f7 | 2431 | pa = getPhysicalSegment( offset, &segLen, kIOMemoryMapperNone ); |
0b4e3aa0 A |
2432 | |
2433 | if( !reserved) { | |
2434 | reserved = IONew( ExpansionData, 1 ); | |
2435 | if( !reserved) | |
2436 | continue; | |
2437 | } | |
2438 | reserved->pagerContig = (1 == _rangesCount); | |
9bccf70c A |
2439 | reserved->memory = this; |
2440 | ||
55e303ae A |
2441 | /*What cache mode do we need*/ |
2442 | switch(options & kIOMapCacheMask ) { | |
9bccf70c A |
2443 | |
2444 | case kIOMapDefaultCache: | |
2445 | default: | |
55e303ae | 2446 | flags = IODefaultCacheBits(pa); |
2d21ac55 A |
2447 | if (DEVICE_PAGER_CACHE_INHIB & flags) |
2448 | { | |
2449 | if (DEVICE_PAGER_GUARDED & flags) | |
2450 | mapping->fOptions |= kIOMapInhibitCache; | |
2451 | else | |
2452 | mapping->fOptions |= kIOMapWriteCombineCache; | |
2453 | } | |
2454 | else if (DEVICE_PAGER_WRITE_THROUGH & flags) | |
2455 | mapping->fOptions |= kIOMapWriteThruCache; | |
2456 | else | |
2457 | mapping->fOptions |= kIOMapCopybackCache; | |
55e303ae | 2458 | break; |
9bccf70c A |
2459 | |
2460 | case kIOMapInhibitCache: | |
55e303ae A |
2461 | flags = DEVICE_PAGER_CACHE_INHIB | |
2462 | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED; | |
2463 | break; | |
9bccf70c A |
2464 | |
2465 | case kIOMapWriteThruCache: | |
55e303ae A |
2466 | flags = DEVICE_PAGER_WRITE_THROUGH | |
2467 | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED; | |
2468 | break; | |
9bccf70c A |
2469 | |
2470 | case kIOMapCopybackCache: | |
55e303ae A |
2471 | flags = DEVICE_PAGER_COHERENT; |
2472 | break; | |
2473 | ||
2474 | case kIOMapWriteCombineCache: | |
2475 | flags = DEVICE_PAGER_CACHE_INHIB | | |
2476 | DEVICE_PAGER_COHERENT; | |
2477 | break; | |
9bccf70c A |
2478 | } |
2479 | ||
2480 | flags |= reserved->pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0; | |
9bccf70c | 2481 | |
b0d623f7 | 2482 | pager = device_pager_setup( (memory_object_t) 0, (uintptr_t) reserved, |
9bccf70c | 2483 | size, flags); |
0b4e3aa0 A |
2484 | assert( pager ); |
2485 | ||
2486 | if( pager) { | |
0b4e3aa0 A |
2487 | kr = mach_memory_object_memory_entry_64( (host_t) 1, false /*internal*/, |
2488 | size, VM_PROT_READ | VM_PROT_WRITE, pager, &sharedMem ); | |
2489 | ||
2490 | assert( KERN_SUCCESS == kr ); | |
2d21ac55 A |
2491 | if( KERN_SUCCESS != kr) |
2492 | { | |
9bccf70c | 2493 | device_pager_deallocate( pager ); |
0b4e3aa0 A |
2494 | pager = MACH_PORT_NULL; |
2495 | sharedMem = MACH_PORT_NULL; | |
2496 | } | |
2497 | } | |
9bccf70c A |
2498 | if( pager && sharedMem) |
2499 | reserved->devicePager = pager; | |
2500 | else { | |
2501 | IODelete( reserved, ExpansionData, 1 ); | |
2502 | reserved = 0; | |
2503 | } | |
1c79356b | 2504 | |
1c79356b A |
2505 | } while( false ); |
2506 | ||
0b4e3aa0 A |
2507 | _memEntry = (void *) sharedMem; |
2508 | } | |
2509 | ||
2d21ac55 A |
2510 | IOReturn result; |
2511 | if (0 == sharedMem) | |
b0d623f7 | 2512 | result = kr; |
9bccf70c | 2513 | else |
2d21ac55 A |
2514 | result = super::doMap( __addressMap, __address, |
2515 | options, __offset, __length ); | |
0b4e3aa0 | 2516 | |
2d21ac55 | 2517 | return( result ); |
1c79356b A |
2518 | } |
2519 | ||
2520 | IOReturn IOGeneralMemoryDescriptor::doUnmap( | |
2521 | vm_map_t addressMap, | |
2d21ac55 A |
2522 | IOVirtualAddress __address, |
2523 | IOByteCount __length ) | |
1c79356b | 2524 | { |
2d21ac55 | 2525 | return (super::doUnmap(addressMap, __address, __length)); |
1c79356b A |
2526 | } |
2527 | ||
2528 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
2529 | ||
b0d623f7 A |
2530 | #undef super |
2531 | #define super OSObject | |
1c79356b | 2532 | |
b0d623f7 | 2533 | OSDefineMetaClassAndStructors( IOMemoryMap, OSObject ) |
1c79356b | 2534 | |
b0d623f7 A |
2535 | OSMetaClassDefineReservedUnused(IOMemoryMap, 0); |
2536 | OSMetaClassDefineReservedUnused(IOMemoryMap, 1); | |
2537 | OSMetaClassDefineReservedUnused(IOMemoryMap, 2); | |
2538 | OSMetaClassDefineReservedUnused(IOMemoryMap, 3); | |
2539 | OSMetaClassDefineReservedUnused(IOMemoryMap, 4); | |
2540 | OSMetaClassDefineReservedUnused(IOMemoryMap, 5); | |
2541 | OSMetaClassDefineReservedUnused(IOMemoryMap, 6); | |
2542 | OSMetaClassDefineReservedUnused(IOMemoryMap, 7); | |
1c79356b | 2543 | |
b0d623f7 A |
2544 | /* ex-inline function implementation */ |
2545 | IOPhysicalAddress IOMemoryMap::getPhysicalAddress() | |
2546 | { return( getPhysicalSegment( 0, 0 )); } | |
1c79356b A |
2547 | |
2548 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
2549 | ||
b0d623f7 | 2550 | bool IOMemoryMap::init( |
2d21ac55 A |
2551 | task_t intoTask, |
2552 | mach_vm_address_t toAddress, | |
2553 | IOOptionBits _options, | |
2554 | mach_vm_size_t _offset, | |
2555 | mach_vm_size_t _length ) | |
1c79356b | 2556 | { |
2d21ac55 | 2557 | if (!intoTask) |
1c79356b A |
2558 | return( false); |
2559 | ||
2d21ac55 A |
2560 | if (!super::init()) |
2561 | return(false); | |
1c79356b | 2562 | |
2d21ac55 A |
2563 | fAddressMap = get_task_map(intoTask); |
2564 | if (!fAddressMap) | |
2565 | return(false); | |
2566 | vm_map_reference(fAddressMap); | |
1c79356b | 2567 | |
2d21ac55 A |
2568 | fAddressTask = intoTask; |
2569 | fOptions = _options; | |
2570 | fLength = _length; | |
2571 | fOffset = _offset; | |
2572 | fAddress = toAddress; | |
1c79356b | 2573 | |
2d21ac55 | 2574 | return (true); |
1c79356b A |
2575 | } |
2576 | ||
b0d623f7 | 2577 | bool IOMemoryMap::setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset) |
1c79356b | 2578 | { |
2d21ac55 A |
2579 | if (!_memory) |
2580 | return(false); | |
1c79356b | 2581 | |
2d21ac55 | 2582 | if (!fSuperMap) |
91447636 | 2583 | { |
2d21ac55 | 2584 | if( (_offset + fLength) > _memory->getLength()) |
91447636 | 2585 | return( false); |
2d21ac55 | 2586 | fOffset = _offset; |
91447636 | 2587 | } |
1c79356b A |
2588 | |
2589 | _memory->retain(); | |
2d21ac55 | 2590 | if (fMemory) |
91447636 | 2591 | { |
2d21ac55 A |
2592 | if (fMemory != _memory) |
2593 | fMemory->removeMapping(this); | |
2594 | fMemory->release(); | |
1c79356b | 2595 | } |
2d21ac55 | 2596 | fMemory = _memory; |
91447636 | 2597 | |
2d21ac55 | 2598 | return( true ); |
1c79356b A |
2599 | } |
2600 | ||
0b4e3aa0 A |
2601 | struct IOMemoryDescriptorMapAllocRef |
2602 | { | |
2603 | ipc_port_t sharedMem; | |
060df5ea | 2604 | vm_map_t map; |
2d21ac55 A |
2605 | mach_vm_address_t mapped; |
2606 | mach_vm_size_t size; | |
2607 | mach_vm_size_t sourceOffset; | |
0b4e3aa0 A |
2608 | IOOptionBits options; |
2609 | }; | |
2610 | ||
2611 | static kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref) | |
2612 | { | |
2613 | IOMemoryDescriptorMapAllocRef * ref = (IOMemoryDescriptorMapAllocRef *)_ref; | |
2614 | IOReturn err; | |
2615 | ||
2616 | do { | |
2d21ac55 A |
2617 | if( ref->sharedMem) |
2618 | { | |
0b4e3aa0 A |
2619 | vm_prot_t prot = VM_PROT_READ |
2620 | | ((ref->options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE); | |
55e303ae | 2621 | |
2d21ac55 A |
2622 | // VM system requires write access to change cache mode |
2623 | if (kIOMapDefaultCache != (ref->options & kIOMapCacheMask)) | |
2624 | prot |= VM_PROT_WRITE; | |
2625 | ||
55e303ae A |
2626 | // set memory entry cache |
2627 | vm_prot_t memEntryCacheMode = prot | MAP_MEM_ONLY; | |
2628 | switch (ref->options & kIOMapCacheMask) | |
2629 | { | |
2630 | case kIOMapInhibitCache: | |
2631 | SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode); | |
2632 | break; | |
2633 | ||
2634 | case kIOMapWriteThruCache: | |
2635 | SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode); | |
2636 | break; | |
2637 | ||
2638 | case kIOMapWriteCombineCache: | |
2639 | SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode); | |
2640 | break; | |
2641 | ||
2642 | case kIOMapCopybackCache: | |
2643 | SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode); | |
2644 | break; | |
2645 | ||
2646 | case kIOMapDefaultCache: | |
2647 | default: | |
2648 | SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode); | |
2649 | break; | |
2650 | } | |
2651 | ||
2652 | vm_size_t unused = 0; | |
2653 | ||
2654 | err = mach_make_memory_entry( NULL /*unused*/, &unused, 0 /*unused*/, | |
2655 | memEntryCacheMode, NULL, ref->sharedMem ); | |
2656 | if (KERN_SUCCESS != err) | |
2657 | IOLog("MAP_MEM_ONLY failed %d\n", err); | |
2658 | ||
2d21ac55 | 2659 | err = mach_vm_map( map, |
0b4e3aa0 A |
2660 | &ref->mapped, |
2661 | ref->size, 0 /* mask */, | |
2662 | (( ref->options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED) | |
2663 | | VM_MAKE_TAG(VM_MEMORY_IOKIT), | |
2664 | ref->sharedMem, ref->sourceOffset, | |
2665 | false, // copy | |
2666 | prot, // cur | |
2667 | prot, // max | |
2668 | VM_INHERIT_NONE); | |
55e303ae | 2669 | |
0b4e3aa0 A |
2670 | if( KERN_SUCCESS != err) { |
2671 | ref->mapped = 0; | |
2672 | continue; | |
2673 | } | |
060df5ea | 2674 | ref->map = map; |
2d21ac55 A |
2675 | } |
2676 | else | |
2677 | { | |
060df5ea | 2678 | err = mach_vm_allocate(map, &ref->mapped, ref->size, |
0b4e3aa0 A |
2679 | ((ref->options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED) |
2680 | | VM_MAKE_TAG(VM_MEMORY_IOKIT) ); | |
0b4e3aa0 A |
2681 | if( KERN_SUCCESS != err) { |
2682 | ref->mapped = 0; | |
2683 | continue; | |
2684 | } | |
060df5ea | 2685 | ref->map = map; |
0b4e3aa0 | 2686 | // we have to make sure that these guys don't get copied if we fork. |
060df5ea | 2687 | err = vm_inherit(map, ref->mapped, ref->size, VM_INHERIT_NONE); |
0b4e3aa0 A |
2688 | assert( KERN_SUCCESS == err ); |
2689 | } | |
2d21ac55 A |
2690 | } |
2691 | while( false ); | |
0b4e3aa0 A |
2692 | |
2693 | return( err ); | |
2694 | } | |
2695 | ||
2d21ac55 | 2696 | kern_return_t |
060df5ea | 2697 | IOMemoryDescriptorMapMemEntry(vm_map_t * map, ipc_port_t entry, IOOptionBits options, bool pageable, |
2d21ac55 A |
2698 | mach_vm_size_t offset, |
2699 | mach_vm_address_t * address, mach_vm_size_t length) | |
2700 | { | |
2701 | IOReturn err; | |
2702 | IOMemoryDescriptorMapAllocRef ref; | |
2703 | ||
060df5ea | 2704 | ref.map = *map; |
b0d623f7 | 2705 | ref.sharedMem = entry; |
cf7d32b8 | 2706 | ref.sourceOffset = trunc_page_64(offset); |
b0d623f7 A |
2707 | ref.options = options; |
2708 | ref.size = length; | |
2d21ac55 A |
2709 | |
2710 | if (options & kIOMapAnywhere) | |
2711 | // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE | |
2712 | ref.mapped = 0; | |
2713 | else | |
2714 | ref.mapped = *address; | |
2715 | ||
060df5ea | 2716 | if( ref.sharedMem && (ref.map == kernel_map) && pageable) |
2d21ac55 A |
2717 | err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref ); |
2718 | else | |
060df5ea | 2719 | err = IOMemoryDescriptorMapAlloc( ref.map, &ref ); |
2d21ac55 A |
2720 | |
2721 | *address = ref.mapped; | |
060df5ea A |
2722 | *map = ref.map; |
2723 | ||
2d21ac55 A |
2724 | return (err); |
2725 | } | |
2726 | ||
b0d623f7 | 2727 | kern_return_t |
060df5ea | 2728 | IOMemoryDescriptorMapCopy(vm_map_t * map, |
b0d623f7 A |
2729 | IOOptionBits options, |
2730 | mach_vm_size_t offset, | |
2731 | mach_vm_address_t * address, mach_vm_size_t length) | |
2732 | { | |
2733 | IOReturn err; | |
2734 | IOMemoryDescriptorMapAllocRef ref; | |
2735 | ||
060df5ea | 2736 | ref.map = *map; |
b0d623f7 | 2737 | ref.sharedMem = NULL; |
b0d623f7 A |
2738 | ref.sourceOffset = trunc_page_64(offset); |
2739 | ref.options = options; | |
2740 | ref.size = length; | |
2741 | ||
2742 | if (options & kIOMapAnywhere) | |
2743 | // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE | |
2744 | ref.mapped = 0; | |
2745 | else | |
2746 | ref.mapped = *address; | |
2747 | ||
060df5ea | 2748 | if (ref.map == kernel_map) |
b0d623f7 A |
2749 | err = IOIteratePageableMaps(ref.size, &IOMemoryDescriptorMapAlloc, &ref); |
2750 | else | |
060df5ea | 2751 | err = IOMemoryDescriptorMapAlloc(ref.map, &ref); |
b0d623f7 A |
2752 | |
2753 | *address = ref.mapped; | |
060df5ea A |
2754 | *map = ref.map; |
2755 | ||
b0d623f7 A |
2756 | return (err); |
2757 | } | |
9bccf70c | 2758 | |
1c79356b | 2759 | IOReturn IOMemoryDescriptor::doMap( |
2d21ac55 A |
2760 | vm_map_t __addressMap, |
2761 | IOVirtualAddress * __address, | |
1c79356b | 2762 | IOOptionBits options, |
2d21ac55 A |
2763 | IOByteCount __offset, |
2764 | IOByteCount __length ) | |
1c79356b | 2765 | { |
b0d623f7 | 2766 | #ifndef __LP64__ |
2d21ac55 | 2767 | if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::doMap !64bit"); |
b0d623f7 | 2768 | #endif /* !__LP64__ */ |
1c79356b | 2769 | |
b0d623f7 | 2770 | IOMemoryMap * mapping = (IOMemoryMap *) *__address; |
2d21ac55 A |
2771 | mach_vm_size_t offset = mapping->fOffset + __offset; |
2772 | mach_vm_size_t length = mapping->fLength; | |
1c79356b | 2773 | |
2d21ac55 A |
2774 | IOReturn err = kIOReturnSuccess; |
2775 | memory_object_t pager; | |
2776 | mach_vm_size_t pageOffset; | |
2777 | IOPhysicalAddress sourceAddr; | |
b0d623f7 | 2778 | unsigned int lock_count; |
1c79356b | 2779 | |
2d21ac55 A |
2780 | do |
2781 | { | |
b0d623f7 A |
2782 | sourceAddr = getPhysicalSegment( offset, NULL, _kIOMemorySourceSegment ); |
2783 | pageOffset = sourceAddr - trunc_page( sourceAddr ); | |
1c79356b | 2784 | |
2d21ac55 A |
2785 | if( reserved) |
2786 | pager = (memory_object_t) reserved->devicePager; | |
2787 | else | |
2788 | pager = MACH_PORT_NULL; | |
0b4e3aa0 | 2789 | |
91447636 A |
2790 | if ((kIOMapReference|kIOMapUnique) == ((kIOMapReference|kIOMapUnique) & options)) |
2791 | { | |
2d21ac55 A |
2792 | upl_t redirUPL2; |
2793 | vm_size_t size; | |
2794 | int flags; | |
0b4e3aa0 | 2795 | |
91447636 A |
2796 | if (!_memEntry) |
2797 | { | |
2798 | err = kIOReturnNotReadable; | |
2799 | continue; | |
2800 | } | |
2801 | ||
b0d623f7 | 2802 | size = round_page(mapping->fLength + pageOffset); |
91447636 A |
2803 | flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL |
2804 | | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS; | |
2805 | ||
2806 | if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) _memEntry, 0, &size, &redirUPL2, | |
2807 | NULL, NULL, | |
2808 | &flags)) | |
2809 | redirUPL2 = NULL; | |
2810 | ||
b0d623f7 A |
2811 | for (lock_count = 0; |
2812 | IORecursiveLockHaveLock(gIOMemoryLock); | |
2813 | lock_count++) { | |
2814 | UNLOCK; | |
2815 | } | |
2d21ac55 | 2816 | err = upl_transpose(redirUPL2, mapping->fRedirUPL); |
b0d623f7 A |
2817 | for (; |
2818 | lock_count; | |
2819 | lock_count--) { | |
2820 | LOCK; | |
2821 | } | |
2822 | ||
91447636 A |
2823 | if (kIOReturnSuccess != err) |
2824 | { | |
2825 | IOLog("upl_transpose(%x)\n", err); | |
2826 | err = kIOReturnSuccess; | |
2827 | } | |
2828 | ||
2829 | if (redirUPL2) | |
2830 | { | |
2831 | upl_commit(redirUPL2, NULL, 0); | |
2832 | upl_deallocate(redirUPL2); | |
2833 | redirUPL2 = 0; | |
2834 | } | |
2835 | { | |
2836 | // swap the memEntries since they now refer to different vm_objects | |
2837 | void * me = _memEntry; | |
2d21ac55 A |
2838 | _memEntry = mapping->fMemory->_memEntry; |
2839 | mapping->fMemory->_memEntry = me; | |
91447636 | 2840 | } |
2d21ac55 A |
2841 | if (pager) |
2842 | err = handleFault( reserved->devicePager, mapping->fAddressMap, mapping->fAddress, offset, length, options ); | |
91447636 A |
2843 | } |
2844 | else | |
2845 | { | |
2d21ac55 A |
2846 | mach_vm_address_t address; |
2847 | ||
2848 | if (!(options & kIOMapAnywhere)) | |
2849 | { | |
2850 | address = trunc_page_64(mapping->fAddress); | |
2851 | if( (mapping->fAddress - address) != pageOffset) | |
2852 | { | |
91447636 A |
2853 | err = kIOReturnVMError; |
2854 | continue; | |
2855 | } | |
2856 | } | |
0b4e3aa0 | 2857 | |
060df5ea A |
2858 | vm_map_t map = mapping->fAddressMap; |
2859 | err = IOMemoryDescriptorMapMemEntry(&map, (ipc_port_t) _memEntry, | |
2d21ac55 A |
2860 | options, (kIOMemoryBufferPageable & _flags), |
2861 | offset, &address, round_page_64(length + pageOffset)); | |
2862 | if( err != KERN_SUCCESS) | |
2863 | continue; | |
0b4e3aa0 | 2864 | |
2d21ac55 A |
2865 | if (!_memEntry || pager) |
2866 | { | |
2867 | err = handleFault( pager, mapping->fAddressMap, address, offset, length, options ); | |
2868 | if (err != KERN_SUCCESS) | |
2869 | doUnmap( mapping->fAddressMap, (IOVirtualAddress) mapping, 0 ); | |
2870 | } | |
0b4e3aa0 | 2871 | |
b0d623f7 | 2872 | #if DEBUG |
2d21ac55 A |
2873 | if (kIOLogMapping & gIOKitDebug) |
2874 | IOLog("mapping(%x) desc %p @ %lx, map %p, address %qx, offset %qx, length %qx\n", | |
2875 | err, this, sourceAddr, mapping, address, offset, length); | |
2876 | #endif | |
0b4e3aa0 | 2877 | |
2d21ac55 A |
2878 | if (err == KERN_SUCCESS) |
2879 | mapping->fAddress = address + pageOffset; | |
2880 | else | |
2881 | mapping->fAddress = NULL; | |
2882 | } | |
2883 | } | |
2884 | while( false ); | |
0b4e3aa0 | 2885 | |
2d21ac55 | 2886 | return (err); |
0b4e3aa0 A |
2887 | } |
2888 | ||
0b4e3aa0 A |
2889 | IOReturn IOMemoryDescriptor::handleFault( |
2890 | void * _pager, | |
2891 | vm_map_t addressMap, | |
2d21ac55 A |
2892 | mach_vm_address_t address, |
2893 | mach_vm_size_t sourceOffset, | |
2894 | mach_vm_size_t length, | |
0b4e3aa0 A |
2895 | IOOptionBits options ) |
2896 | { | |
2897 | IOReturn err = kIOReturnSuccess; | |
2898 | memory_object_t pager = (memory_object_t) _pager; | |
2d21ac55 A |
2899 | mach_vm_size_t size; |
2900 | mach_vm_size_t bytes; | |
2901 | mach_vm_size_t page; | |
2902 | mach_vm_size_t pageOffset; | |
2903 | mach_vm_size_t pagerOffset; | |
0b4e3aa0 | 2904 | IOPhysicalLength segLen; |
55e303ae | 2905 | addr64_t physAddr; |
0b4e3aa0 | 2906 | |
2d21ac55 A |
2907 | if( !addressMap) |
2908 | { | |
2909 | if( kIOMemoryRedirected & _flags) | |
2910 | { | |
b0d623f7 | 2911 | #if DEBUG |
2d21ac55 | 2912 | IOLog("sleep mem redirect %p, %qx\n", this, sourceOffset); |
1c79356b | 2913 | #endif |
0b4e3aa0 | 2914 | do { |
9bccf70c | 2915 | SLEEP; |
0b4e3aa0 A |
2916 | } while( kIOMemoryRedirected & _flags ); |
2917 | } | |
1c79356b | 2918 | |
0b4e3aa0 | 2919 | return( kIOReturnSuccess ); |
1c79356b A |
2920 | } |
2921 | ||
b0d623f7 | 2922 | physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone ); |
0b4e3aa0 | 2923 | assert( physAddr ); |
55e303ae A |
2924 | pageOffset = physAddr - trunc_page_64( physAddr ); |
2925 | pagerOffset = sourceOffset; | |
0b4e3aa0 A |
2926 | |
2927 | size = length + pageOffset; | |
2928 | physAddr -= pageOffset; | |
1c79356b A |
2929 | |
2930 | segLen += pageOffset; | |
0b4e3aa0 | 2931 | bytes = size; |
2d21ac55 A |
2932 | do |
2933 | { | |
1c79356b A |
2934 | // in the middle of the loop only map whole pages |
2935 | if( segLen >= bytes) | |
2936 | segLen = bytes; | |
b0d623f7 | 2937 | else if( segLen != trunc_page( segLen)) |
1c79356b | 2938 | err = kIOReturnVMError; |
55e303ae | 2939 | if( physAddr != trunc_page_64( physAddr)) |
1c79356b | 2940 | err = kIOReturnBadArgument; |
8f6c56a5 A |
2941 | if (kIOReturnSuccess != err) |
2942 | break; | |
1c79356b | 2943 | |
b0d623f7 | 2944 | #if DEBUG |
1c79356b | 2945 | if( kIOLogMapping & gIOKitDebug) |
b0d623f7 | 2946 | IOLog("IOMemoryMap::map(%p) 0x%qx->0x%qx:0x%qx\n", |
0b4e3aa0 | 2947 | addressMap, address + pageOffset, physAddr + pageOffset, |
1c79356b A |
2948 | segLen - pageOffset); |
2949 | #endif | |
2950 | ||
2d21ac55 | 2951 | |
0b4e3aa0 A |
2952 | if( pager) { |
2953 | if( reserved && reserved->pagerContig) { | |
2954 | IOPhysicalLength allLen; | |
55e303ae | 2955 | addr64_t allPhys; |
0b4e3aa0 | 2956 | |
b0d623f7 | 2957 | allPhys = getPhysicalSegment( 0, &allLen, kIOMemoryMapperNone ); |
0b4e3aa0 | 2958 | assert( allPhys ); |
b0d623f7 | 2959 | err = device_pager_populate_object( pager, 0, atop_64(allPhys), round_page(allLen) ); |
2d21ac55 A |
2960 | } |
2961 | else | |
2962 | { | |
0b4e3aa0 | 2963 | |
2d21ac55 | 2964 | for( page = 0; |
0b4e3aa0 | 2965 | (page < segLen) && (KERN_SUCCESS == err); |
2d21ac55 A |
2966 | page += page_size) |
2967 | { | |
2968 | err = device_pager_populate_object(pager, pagerOffset, | |
2969 | (ppnum_t)(atop_64(physAddr + page)), page_size); | |
2970 | pagerOffset += page_size; | |
0b4e3aa0 A |
2971 | } |
2972 | } | |
2973 | assert( KERN_SUCCESS == err ); | |
2974 | if( err) | |
2975 | break; | |
2976 | } | |
0c530ab8 | 2977 | |
2d21ac55 A |
2978 | // This call to vm_fault causes an early pmap level resolution |
2979 | // of the mappings created above for kernel mappings, since | |
2980 | // faulting in later can't take place from interrupt level. | |
9bccf70c A |
2981 | /* *** ALERT *** */ |
2982 | /* *** Temporary Workaround *** */ | |
2983 | ||
2d21ac55 A |
2984 | if ((addressMap == kernel_map) && !(kIOMemoryRedirected & _flags)) |
2985 | { | |
91447636 A |
2986 | vm_fault(addressMap, |
2987 | (vm_map_offset_t)address, | |
2988 | VM_PROT_READ|VM_PROT_WRITE, | |
2989 | FALSE, THREAD_UNINT, NULL, | |
2990 | (vm_map_offset_t)0); | |
9bccf70c A |
2991 | } |
2992 | ||
2993 | /* *** Temporary Workaround *** */ | |
2994 | /* *** ALERT *** */ | |
0c530ab8 | 2995 | |
1c79356b | 2996 | sourceOffset += segLen - pageOffset; |
0b4e3aa0 | 2997 | address += segLen; |
1c79356b A |
2998 | bytes -= segLen; |
2999 | pageOffset = 0; | |
3000 | ||
2d21ac55 | 3001 | } |
b0d623f7 | 3002 | while (bytes && (physAddr = getPhysicalSegment( sourceOffset, &segLen, kIOMemoryMapperNone ))); |
1c79356b | 3003 | |
2d21ac55 | 3004 | if (bytes) |
1c79356b | 3005 | err = kIOReturnBadArgument; |
1c79356b | 3006 | |
2d21ac55 | 3007 | return (err); |
1c79356b A |
3008 | } |
3009 | ||
3010 | IOReturn IOMemoryDescriptor::doUnmap( | |
3011 | vm_map_t addressMap, | |
2d21ac55 A |
3012 | IOVirtualAddress __address, |
3013 | IOByteCount __length ) | |
1c79356b | 3014 | { |
2d21ac55 A |
3015 | IOReturn err; |
3016 | mach_vm_address_t address; | |
3017 | mach_vm_size_t length; | |
3018 | ||
3019 | if (__length) | |
3020 | { | |
3021 | address = __address; | |
3022 | length = __length; | |
3023 | } | |
3024 | else | |
3025 | { | |
b0d623f7 A |
3026 | addressMap = ((IOMemoryMap *) __address)->fAddressMap; |
3027 | address = ((IOMemoryMap *) __address)->fAddress; | |
3028 | length = ((IOMemoryMap *) __address)->fLength; | |
2d21ac55 A |
3029 | } |
3030 | ||
7e4a7d39 A |
3031 | if ((addressMap == kernel_map) |
3032 | && ((kIOMemoryBufferPageable & _flags) || !_memEntry)) | |
2d21ac55 | 3033 | addressMap = IOPageableMapForAddress( address ); |
1c79356b | 3034 | |
b0d623f7 | 3035 | #if DEBUG |
1c79356b | 3036 | if( kIOLogMapping & gIOKitDebug) |
2d21ac55 A |
3037 | IOLog("IOMemoryDescriptor::doUnmap map %p, 0x%qx:0x%qx\n", |
3038 | addressMap, address, length ); | |
1c79356b A |
3039 | #endif |
3040 | ||
2d21ac55 | 3041 | err = mach_vm_deallocate( addressMap, address, length ); |
1c79356b | 3042 | |
2d21ac55 | 3043 | return (err); |
1c79356b A |
3044 | } |
3045 | ||
91447636 | 3046 | IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool doRedirect ) |
e3027f41 | 3047 | { |
91447636 | 3048 | IOReturn err = kIOReturnSuccess; |
b0d623f7 | 3049 | IOMemoryMap * mapping = 0; |
e3027f41 A |
3050 | OSIterator * iter; |
3051 | ||
3052 | LOCK; | |
3053 | ||
91447636 A |
3054 | if( doRedirect) |
3055 | _flags |= kIOMemoryRedirected; | |
3056 | else | |
3057 | _flags &= ~kIOMemoryRedirected; | |
3058 | ||
e3027f41 A |
3059 | do { |
3060 | if( (iter = OSCollectionIterator::withCollection( _mappings))) { | |
b0d623f7 | 3061 | while( (mapping = (IOMemoryMap *) iter->getNextObject())) |
91447636 | 3062 | mapping->redirect( safeTask, doRedirect ); |
e3027f41 | 3063 | |
91447636 A |
3064 | iter->release(); |
3065 | } | |
e3027f41 A |
3066 | } while( false ); |
3067 | ||
91447636 A |
3068 | if (!doRedirect) |
3069 | { | |
9bccf70c | 3070 | WAKEUP; |
0b4e3aa0 A |
3071 | } |
3072 | ||
e3027f41 A |
3073 | UNLOCK; |
3074 | ||
b0d623f7 | 3075 | #ifndef __LP64__ |
e3027f41 A |
3076 | // temporary binary compatibility |
3077 | IOSubMemoryDescriptor * subMem; | |
3078 | if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this))) | |
91447636 | 3079 | err = subMem->redirect( safeTask, doRedirect ); |
e3027f41 | 3080 | else |
91447636 | 3081 | err = kIOReturnSuccess; |
b0d623f7 | 3082 | #endif /* !__LP64__ */ |
e3027f41 A |
3083 | |
3084 | return( err ); | |
3085 | } | |
3086 | ||
b0d623f7 | 3087 | IOReturn IOMemoryMap::redirect( task_t safeTask, bool doRedirect ) |
e3027f41 A |
3088 | { |
3089 | IOReturn err = kIOReturnSuccess; | |
3090 | ||
2d21ac55 | 3091 | if( fSuperMap) { |
b0d623f7 | 3092 | // err = ((IOMemoryMap *)superMap)->redirect( safeTask, doRedirect ); |
e3027f41 A |
3093 | } else { |
3094 | ||
3095 | LOCK; | |
0c530ab8 A |
3096 | |
3097 | do | |
91447636 | 3098 | { |
2d21ac55 | 3099 | if (!fAddress) |
0c530ab8 | 3100 | break; |
2d21ac55 | 3101 | if (!fAddressMap) |
0c530ab8 A |
3102 | break; |
3103 | ||
2d21ac55 A |
3104 | if ((!safeTask || (get_task_map(safeTask) != fAddressMap)) |
3105 | && (0 == (fOptions & kIOMapStatic))) | |
0c530ab8 | 3106 | { |
2d21ac55 | 3107 | IOUnmapPages( fAddressMap, fAddress, fLength ); |
b0d623f7 A |
3108 | err = kIOReturnSuccess; |
3109 | #if DEBUG | |
2d21ac55 | 3110 | IOLog("IOMemoryMap::redirect(%d, %p) 0x%qx:0x%qx from %p\n", doRedirect, this, fAddress, fLength, fAddressMap); |
e3027f41 | 3111 | #endif |
0c530ab8 | 3112 | } |
2d21ac55 | 3113 | else if (kIOMapWriteCombineCache == (fOptions & kIOMapCacheMask)) |
0c530ab8 A |
3114 | { |
3115 | IOOptionBits newMode; | |
2d21ac55 A |
3116 | newMode = (fOptions & ~kIOMapCacheMask) | (doRedirect ? kIOMapInhibitCache : kIOMapWriteCombineCache); |
3117 | IOProtectCacheMode(fAddressMap, fAddress, fLength, newMode); | |
0c530ab8 A |
3118 | } |
3119 | } | |
3120 | while (false); | |
0c530ab8 | 3121 | UNLOCK; |
e3027f41 A |
3122 | } |
3123 | ||
2d21ac55 A |
3124 | if ((((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) |
3125 | || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) | |
91447636 | 3126 | && safeTask |
2d21ac55 A |
3127 | && (doRedirect != (0 != (fMemory->_flags & kIOMemoryRedirected)))) |
3128 | fMemory->redirect(safeTask, doRedirect); | |
91447636 | 3129 | |
e3027f41 A |
3130 | return( err ); |
3131 | } | |
3132 | ||
b0d623f7 | 3133 | IOReturn IOMemoryMap::unmap( void ) |
1c79356b A |
3134 | { |
3135 | IOReturn err; | |
3136 | ||
3137 | LOCK; | |
3138 | ||
2d21ac55 A |
3139 | if( fAddress && fAddressMap && (0 == fSuperMap) && fMemory |
3140 | && (0 == (fOptions & kIOMapStatic))) { | |
1c79356b | 3141 | |
2d21ac55 | 3142 | err = fMemory->doUnmap(fAddressMap, (IOVirtualAddress) this, 0); |
1c79356b A |
3143 | |
3144 | } else | |
3145 | err = kIOReturnSuccess; | |
3146 | ||
2d21ac55 A |
3147 | if (fAddressMap) |
3148 | { | |
3149 | vm_map_deallocate(fAddressMap); | |
3150 | fAddressMap = 0; | |
3151 | } | |
3152 | ||
3153 | fAddress = 0; | |
1c79356b A |
3154 | |
3155 | UNLOCK; | |
3156 | ||
3157 | return( err ); | |
3158 | } | |
3159 | ||
b0d623f7 | 3160 | void IOMemoryMap::taskDied( void ) |
1c79356b A |
3161 | { |
3162 | LOCK; | |
b0d623f7 A |
3163 | if (fUserClientUnmap) |
3164 | unmap(); | |
2d21ac55 A |
3165 | if( fAddressMap) { |
3166 | vm_map_deallocate(fAddressMap); | |
3167 | fAddressMap = 0; | |
1c79356b | 3168 | } |
2d21ac55 A |
3169 | fAddressTask = 0; |
3170 | fAddress = 0; | |
1c79356b A |
3171 | UNLOCK; |
3172 | } | |
3173 | ||
b0d623f7 A |
3174 | IOReturn IOMemoryMap::userClientUnmap( void ) |
3175 | { | |
3176 | fUserClientUnmap = true; | |
3177 | return (kIOReturnSuccess); | |
3178 | } | |
3179 | ||
9bccf70c A |
3180 | // Overload the release mechanism. All mappings must be a member |
3181 | // of a memory descriptors _mappings set. This means that we | |
3182 | // always have 2 references on a mapping. When either of these mappings | |
3183 | // are released we need to free ourselves. | |
b0d623f7 | 3184 | void IOMemoryMap::taggedRelease(const void *tag) const |
9bccf70c | 3185 | { |
55e303ae | 3186 | LOCK; |
9bccf70c | 3187 | super::taggedRelease(tag, 2); |
55e303ae | 3188 | UNLOCK; |
9bccf70c A |
3189 | } |
3190 | ||
b0d623f7 | 3191 | void IOMemoryMap::free() |
1c79356b A |
3192 | { |
3193 | unmap(); | |
3194 | ||
2d21ac55 A |
3195 | if (fMemory) |
3196 | { | |
1c79356b | 3197 | LOCK; |
2d21ac55 | 3198 | fMemory->removeMapping(this); |
1c79356b | 3199 | UNLOCK; |
2d21ac55 | 3200 | fMemory->release(); |
1c79356b A |
3201 | } |
3202 | ||
2d21ac55 | 3203 | if (fOwner && (fOwner != fMemory)) |
91447636 A |
3204 | { |
3205 | LOCK; | |
2d21ac55 | 3206 | fOwner->removeMapping(this); |
91447636 A |
3207 | UNLOCK; |
3208 | } | |
3209 | ||
2d21ac55 A |
3210 | if (fSuperMap) |
3211 | fSuperMap->release(); | |
1c79356b | 3212 | |
2d21ac55 A |
3213 | if (fRedirUPL) { |
3214 | upl_commit(fRedirUPL, NULL, 0); | |
3215 | upl_deallocate(fRedirUPL); | |
91447636 A |
3216 | } |
3217 | ||
1c79356b A |
3218 | super::free(); |
3219 | } | |
3220 | ||
b0d623f7 | 3221 | IOByteCount IOMemoryMap::getLength() |
1c79356b | 3222 | { |
2d21ac55 | 3223 | return( fLength ); |
1c79356b A |
3224 | } |
3225 | ||
b0d623f7 | 3226 | IOVirtualAddress IOMemoryMap::getVirtualAddress() |
1c79356b | 3227 | { |
b0d623f7 | 3228 | #ifndef __LP64__ |
2d21ac55 A |
3229 | if (fSuperMap) |
3230 | fSuperMap->getVirtualAddress(); | |
b0d623f7 A |
3231 | else if (fAddressMap |
3232 | && vm_map_is_64bit(fAddressMap) | |
3233 | && (sizeof(IOVirtualAddress) < 8)) | |
2d21ac55 A |
3234 | { |
3235 | OSReportWithBacktrace("IOMemoryMap::getVirtualAddress(0x%qx) called on 64b map; use ::getAddress()", fAddress); | |
3236 | } | |
b0d623f7 | 3237 | #endif /* !__LP64__ */ |
2d21ac55 A |
3238 | |
3239 | return (fAddress); | |
3240 | } | |
3241 | ||
b0d623f7 A |
3242 | #ifndef __LP64__ |
3243 | mach_vm_address_t IOMemoryMap::getAddress() | |
2d21ac55 A |
3244 | { |
3245 | return( fAddress); | |
3246 | } | |
3247 | ||
b0d623f7 | 3248 | mach_vm_size_t IOMemoryMap::getSize() |
2d21ac55 A |
3249 | { |
3250 | return( fLength ); | |
1c79356b | 3251 | } |
b0d623f7 | 3252 | #endif /* !__LP64__ */ |
1c79356b | 3253 | |
2d21ac55 | 3254 | |
b0d623f7 | 3255 | task_t IOMemoryMap::getAddressTask() |
1c79356b | 3256 | { |
2d21ac55 A |
3257 | if( fSuperMap) |
3258 | return( fSuperMap->getAddressTask()); | |
1c79356b | 3259 | else |
2d21ac55 | 3260 | return( fAddressTask); |
1c79356b A |
3261 | } |
3262 | ||
b0d623f7 | 3263 | IOOptionBits IOMemoryMap::getMapOptions() |
1c79356b | 3264 | { |
2d21ac55 | 3265 | return( fOptions); |
1c79356b A |
3266 | } |
3267 | ||
b0d623f7 | 3268 | IOMemoryDescriptor * IOMemoryMap::getMemoryDescriptor() |
1c79356b | 3269 | { |
2d21ac55 | 3270 | return( fMemory ); |
1c79356b A |
3271 | } |
3272 | ||
b0d623f7 A |
3273 | IOMemoryMap * IOMemoryMap::copyCompatible( |
3274 | IOMemoryMap * newMapping ) | |
1c79356b | 3275 | { |
2d21ac55 A |
3276 | task_t task = newMapping->getAddressTask(); |
3277 | mach_vm_address_t toAddress = newMapping->fAddress; | |
3278 | IOOptionBits _options = newMapping->fOptions; | |
3279 | mach_vm_size_t _offset = newMapping->fOffset; | |
3280 | mach_vm_size_t _length = newMapping->fLength; | |
1c79356b | 3281 | |
2d21ac55 | 3282 | if( (!task) || (!fAddressMap) || (fAddressMap != get_task_map(task))) |
1c79356b | 3283 | return( 0 ); |
2d21ac55 | 3284 | if( (fOptions ^ _options) & kIOMapReadOnly) |
9bccf70c A |
3285 | return( 0 ); |
3286 | if( (kIOMapDefaultCache != (_options & kIOMapCacheMask)) | |
2d21ac55 | 3287 | && ((fOptions ^ _options) & kIOMapCacheMask)) |
1c79356b A |
3288 | return( 0 ); |
3289 | ||
2d21ac55 | 3290 | if( (0 == (_options & kIOMapAnywhere)) && (fAddress != toAddress)) |
1c79356b A |
3291 | return( 0 ); |
3292 | ||
2d21ac55 | 3293 | if( _offset < fOffset) |
1c79356b A |
3294 | return( 0 ); |
3295 | ||
2d21ac55 | 3296 | _offset -= fOffset; |
1c79356b | 3297 | |
2d21ac55 | 3298 | if( (_offset + _length) > fLength) |
1c79356b A |
3299 | return( 0 ); |
3300 | ||
2d21ac55 A |
3301 | retain(); |
3302 | if( (fLength == _length) && (!_offset)) | |
3303 | { | |
2d21ac55 A |
3304 | newMapping = this; |
3305 | } | |
3306 | else | |
3307 | { | |
3308 | newMapping->fSuperMap = this; | |
6d2010ae | 3309 | newMapping->fOffset = fOffset + _offset; |
2d21ac55 | 3310 | newMapping->fAddress = fAddress + _offset; |
1c79356b A |
3311 | } |
3312 | ||
2d21ac55 | 3313 | return( newMapping ); |
1c79356b A |
3314 | } |
3315 | ||
0c530ab8 | 3316 | IOPhysicalAddress |
b0d623f7 A |
3317 | #ifdef __LP64__ |
3318 | IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length, IOOptionBits _options) | |
3319 | #else /* !__LP64__ */ | |
3320 | IOMemoryMap::getPhysicalSegment( IOByteCount _offset, IOPhysicalLength * _length) | |
3321 | #endif /* !__LP64__ */ | |
1c79356b A |
3322 | { |
3323 | IOPhysicalAddress address; | |
3324 | ||
3325 | LOCK; | |
b0d623f7 A |
3326 | #ifdef __LP64__ |
3327 | address = fMemory->getPhysicalSegment( fOffset + _offset, _length, _options ); | |
3328 | #else /* !__LP64__ */ | |
2d21ac55 | 3329 | address = fMemory->getPhysicalSegment( fOffset + _offset, _length ); |
b0d623f7 | 3330 | #endif /* !__LP64__ */ |
1c79356b A |
3331 | UNLOCK; |
3332 | ||
3333 | return( address ); | |
3334 | } | |
3335 | ||
3336 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
3337 | ||
3338 | #undef super | |
3339 | #define super OSObject | |
3340 | ||
3341 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
3342 | ||
3343 | void IOMemoryDescriptor::initialize( void ) | |
3344 | { | |
3345 | if( 0 == gIOMemoryLock) | |
3346 | gIOMemoryLock = IORecursiveLockAlloc(); | |
55e303ae A |
3347 | |
3348 | IORegistryEntry::getRegistryRoot()->setProperty(kIOMaximumMappedIOByteCountKey, | |
3349 | ptoa_64(gIOMaximumMappedIOPageCount), 64); | |
0c530ab8 | 3350 | gIOLastPage = IOGetLastPageNumber(); |
1c79356b A |
3351 | } |
3352 | ||
3353 | void IOMemoryDescriptor::free( void ) | |
3354 | { | |
3355 | if( _mappings) | |
3356 | _mappings->release(); | |
3357 | ||
3358 | super::free(); | |
3359 | } | |
3360 | ||
3361 | IOMemoryMap * IOMemoryDescriptor::setMapping( | |
3362 | task_t intoTask, | |
3363 | IOVirtualAddress mapAddress, | |
55e303ae | 3364 | IOOptionBits options ) |
1c79356b | 3365 | { |
2d21ac55 A |
3366 | return (createMappingInTask( intoTask, mapAddress, |
3367 | options | kIOMapStatic, | |
3368 | 0, getLength() )); | |
1c79356b A |
3369 | } |
3370 | ||
3371 | IOMemoryMap * IOMemoryDescriptor::map( | |
55e303ae | 3372 | IOOptionBits options ) |
1c79356b | 3373 | { |
2d21ac55 A |
3374 | return (createMappingInTask( kernel_task, 0, |
3375 | options | kIOMapAnywhere, | |
3376 | 0, getLength() )); | |
1c79356b A |
3377 | } |
3378 | ||
b0d623f7 | 3379 | #ifndef __LP64__ |
2d21ac55 A |
3380 | IOMemoryMap * IOMemoryDescriptor::map( |
3381 | task_t intoTask, | |
3382 | IOVirtualAddress atAddress, | |
1c79356b | 3383 | IOOptionBits options, |
55e303ae A |
3384 | IOByteCount offset, |
3385 | IOByteCount length ) | |
1c79356b | 3386 | { |
2d21ac55 A |
3387 | if ((!(kIOMapAnywhere & options)) && vm_map_is_64bit(get_task_map(intoTask))) |
3388 | { | |
3389 | OSReportWithBacktrace("IOMemoryDescriptor::map() in 64b task, use ::createMappingInTask()"); | |
3390 | return (0); | |
3391 | } | |
3392 | ||
3393 | return (createMappingInTask(intoTask, atAddress, | |
3394 | options, offset, length)); | |
3395 | } | |
b0d623f7 | 3396 | #endif /* !__LP64__ */ |
2d21ac55 A |
3397 | |
3398 | IOMemoryMap * IOMemoryDescriptor::createMappingInTask( | |
3399 | task_t intoTask, | |
3400 | mach_vm_address_t atAddress, | |
3401 | IOOptionBits options, | |
3402 | mach_vm_size_t offset, | |
3403 | mach_vm_size_t length) | |
3404 | { | |
b0d623f7 A |
3405 | IOMemoryMap * result; |
3406 | IOMemoryMap * mapping; | |
2d21ac55 A |
3407 | |
3408 | if (0 == length) | |
1c79356b A |
3409 | length = getLength(); |
3410 | ||
b0d623f7 | 3411 | mapping = new IOMemoryMap; |
2d21ac55 A |
3412 | |
3413 | if( mapping | |
3414 | && !mapping->init( intoTask, atAddress, | |
3415 | options, offset, length )) { | |
3416 | mapping->release(); | |
3417 | mapping = 0; | |
3418 | } | |
3419 | ||
3420 | if (mapping) | |
3421 | result = makeMapping(this, intoTask, (IOVirtualAddress) mapping, options | kIOMap64Bit, 0, 0); | |
3422 | else | |
3423 | result = 0; | |
3424 | ||
b0d623f7 | 3425 | #if DEBUG |
2d21ac55 A |
3426 | if (!result) |
3427 | IOLog("createMappingInTask failed desc %p, addr %qx, options %lx, offset %qx, length %qx\n", | |
3428 | this, atAddress, options, offset, length); | |
3429 | #endif | |
3430 | ||
3431 | return (result); | |
1c79356b A |
3432 | } |
3433 | ||
b0d623f7 A |
3434 | #ifndef __LP64__ // there is only a 64 bit version for LP64 |
3435 | IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory, | |
91447636 A |
3436 | IOOptionBits options, |
3437 | IOByteCount offset) | |
2d21ac55 A |
3438 | { |
3439 | return (redirect(newBackingMemory, options, (mach_vm_size_t)offset)); | |
3440 | } | |
b0d623f7 | 3441 | #endif |
2d21ac55 | 3442 | |
b0d623f7 | 3443 | IOReturn IOMemoryMap::redirect(IOMemoryDescriptor * newBackingMemory, |
2d21ac55 A |
3444 | IOOptionBits options, |
3445 | mach_vm_size_t offset) | |
91447636 A |
3446 | { |
3447 | IOReturn err = kIOReturnSuccess; | |
3448 | IOMemoryDescriptor * physMem = 0; | |
3449 | ||
3450 | LOCK; | |
3451 | ||
2d21ac55 | 3452 | if (fAddress && fAddressMap) do |
91447636 | 3453 | { |
2d21ac55 A |
3454 | if (((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) |
3455 | || ((fMemory->_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) | |
91447636 | 3456 | { |
2d21ac55 | 3457 | physMem = fMemory; |
91447636 A |
3458 | physMem->retain(); |
3459 | } | |
3460 | ||
2d21ac55 | 3461 | if (!fRedirUPL) |
91447636 | 3462 | { |
b0d623f7 | 3463 | vm_size_t size = round_page(fLength); |
91447636 A |
3464 | int flags = UPL_COPYOUT_FROM | UPL_SET_INTERNAL |
3465 | | UPL_SET_LITE | UPL_SET_IO_WIRE | UPL_BLOCK_ACCESS; | |
2d21ac55 | 3466 | if (KERN_SUCCESS != memory_object_iopl_request((ipc_port_t) fMemory->_memEntry, 0, &size, &fRedirUPL, |
91447636 A |
3467 | NULL, NULL, |
3468 | &flags)) | |
2d21ac55 | 3469 | fRedirUPL = 0; |
91447636 A |
3470 | |
3471 | if (physMem) | |
3472 | { | |
2d21ac55 | 3473 | IOUnmapPages( fAddressMap, fAddress, fLength ); |
b0d623f7 A |
3474 | if (false) |
3475 | physMem->redirect(0, true); | |
91447636 A |
3476 | } |
3477 | } | |
3478 | ||
3479 | if (newBackingMemory) | |
3480 | { | |
2d21ac55 | 3481 | if (newBackingMemory != fMemory) |
91447636 | 3482 | { |
2d21ac55 A |
3483 | fOffset = 0; |
3484 | if (this != newBackingMemory->makeMapping(newBackingMemory, fAddressTask, (IOVirtualAddress) this, | |
3485 | options | kIOMapUnique | kIOMapReference | kIOMap64Bit, | |
3486 | offset, fLength)) | |
91447636 A |
3487 | err = kIOReturnError; |
3488 | } | |
2d21ac55 | 3489 | if (fRedirUPL) |
91447636 | 3490 | { |
2d21ac55 A |
3491 | upl_commit(fRedirUPL, NULL, 0); |
3492 | upl_deallocate(fRedirUPL); | |
3493 | fRedirUPL = 0; | |
91447636 | 3494 | } |
b0d623f7 | 3495 | if (false && physMem) |
91447636 A |
3496 | physMem->redirect(0, false); |
3497 | } | |
3498 | } | |
3499 | while (false); | |
3500 | ||
3501 | UNLOCK; | |
3502 | ||
3503 | if (physMem) | |
3504 | physMem->release(); | |
3505 | ||
3506 | return (err); | |
3507 | } | |
3508 | ||
1c79356b A |
3509 | IOMemoryMap * IOMemoryDescriptor::makeMapping( |
3510 | IOMemoryDescriptor * owner, | |
2d21ac55 A |
3511 | task_t __intoTask, |
3512 | IOVirtualAddress __address, | |
1c79356b | 3513 | IOOptionBits options, |
2d21ac55 A |
3514 | IOByteCount __offset, |
3515 | IOByteCount __length ) | |
1c79356b | 3516 | { |
b0d623f7 | 3517 | #ifndef __LP64__ |
2d21ac55 | 3518 | if (!(kIOMap64Bit & options)) panic("IOMemoryDescriptor::makeMapping !64bit"); |
b0d623f7 | 3519 | #endif /* !__LP64__ */ |
2d21ac55 | 3520 | |
91447636 | 3521 | IOMemoryDescriptor * mapDesc = 0; |
b0d623f7 | 3522 | IOMemoryMap * result = 0; |
2d21ac55 A |
3523 | OSIterator * iter; |
3524 | ||
b0d623f7 | 3525 | IOMemoryMap * mapping = (IOMemoryMap *) __address; |
2d21ac55 A |
3526 | mach_vm_size_t offset = mapping->fOffset + __offset; |
3527 | mach_vm_size_t length = mapping->fLength; | |
3528 | ||
3529 | mapping->fOffset = offset; | |
1c79356b A |
3530 | |
3531 | LOCK; | |
3532 | ||
91447636 A |
3533 | do |
3534 | { | |
2d21ac55 A |
3535 | if (kIOMapStatic & options) |
3536 | { | |
3537 | result = mapping; | |
3538 | addMapping(mapping); | |
3539 | mapping->setMemoryDescriptor(this, 0); | |
3540 | continue; | |
3541 | } | |
3542 | ||
91447636 A |
3543 | if (kIOMapUnique & options) |
3544 | { | |
060df5ea | 3545 | addr64_t phys; |
91447636 | 3546 | IOByteCount physLen; |
1c79356b | 3547 | |
2d21ac55 | 3548 | // if (owner != this) continue; |
1c79356b | 3549 | |
0c530ab8 A |
3550 | if (((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) |
3551 | || ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical64)) | |
91447636 | 3552 | { |
b0d623f7 | 3553 | phys = getPhysicalSegment(offset, &physLen, kIOMemoryMapperNone); |
91447636 A |
3554 | if (!phys || (physLen < length)) |
3555 | continue; | |
3556 | ||
b0d623f7 A |
3557 | mapDesc = IOMemoryDescriptor::withAddressRange( |
3558 | phys, length, getDirection() | kIOMemoryMapperNone, NULL); | |
91447636 A |
3559 | if (!mapDesc) |
3560 | continue; | |
3561 | offset = 0; | |
2d21ac55 | 3562 | mapping->fOffset = offset; |
91447636 A |
3563 | } |
3564 | } | |
3565 | else | |
3566 | { | |
2d21ac55 A |
3567 | // look for a compatible existing mapping |
3568 | if( (iter = OSCollectionIterator::withCollection(_mappings))) | |
3569 | { | |
b0d623f7 A |
3570 | IOMemoryMap * lookMapping; |
3571 | while ((lookMapping = (IOMemoryMap *) iter->getNextObject())) | |
2d21ac55 A |
3572 | { |
3573 | if ((result = lookMapping->copyCompatible(mapping))) | |
3574 | { | |
3575 | addMapping(result); | |
3576 | result->setMemoryDescriptor(this, offset); | |
91447636 | 3577 | break; |
2d21ac55 | 3578 | } |
91447636 A |
3579 | } |
3580 | iter->release(); | |
3581 | } | |
2d21ac55 | 3582 | if (result || (options & kIOMapReference)) |
6d2010ae A |
3583 | { |
3584 | if (result != mapping) | |
3585 | { | |
3586 | mapping->release(); | |
3587 | mapping = NULL; | |
3588 | } | |
91447636 | 3589 | continue; |
6d2010ae | 3590 | } |
2d21ac55 | 3591 | } |
91447636 | 3592 | |
2d21ac55 A |
3593 | if (!mapDesc) |
3594 | { | |
3595 | mapDesc = this; | |
91447636 A |
3596 | mapDesc->retain(); |
3597 | } | |
2d21ac55 A |
3598 | IOReturn |
3599 | kr = mapDesc->doMap( 0, (IOVirtualAddress *) &mapping, options, 0, 0 ); | |
3600 | if (kIOReturnSuccess == kr) | |
3601 | { | |
3602 | result = mapping; | |
3603 | mapDesc->addMapping(result); | |
3604 | result->setMemoryDescriptor(mapDesc, offset); | |
3605 | } | |
3606 | else | |
3607 | { | |
1c79356b | 3608 | mapping->release(); |
2d21ac55 | 3609 | mapping = NULL; |
1c79356b | 3610 | } |
91447636 | 3611 | } |
2d21ac55 | 3612 | while( false ); |
1c79356b A |
3613 | |
3614 | UNLOCK; | |
3615 | ||
91447636 A |
3616 | if (mapDesc) |
3617 | mapDesc->release(); | |
3618 | ||
2d21ac55 | 3619 | return (result); |
1c79356b A |
3620 | } |
3621 | ||
3622 | void IOMemoryDescriptor::addMapping( | |
3623 | IOMemoryMap * mapping ) | |
3624 | { | |
2d21ac55 A |
3625 | if( mapping) |
3626 | { | |
1c79356b A |
3627 | if( 0 == _mappings) |
3628 | _mappings = OSSet::withCapacity(1); | |
9bccf70c A |
3629 | if( _mappings ) |
3630 | _mappings->setObject( mapping ); | |
1c79356b A |
3631 | } |
3632 | } | |
3633 | ||
3634 | void IOMemoryDescriptor::removeMapping( | |
3635 | IOMemoryMap * mapping ) | |
3636 | { | |
9bccf70c | 3637 | if( _mappings) |
1c79356b | 3638 | _mappings->removeObject( mapping); |
1c79356b A |
3639 | } |
3640 | ||
b0d623f7 A |
3641 | #ifndef __LP64__ |
3642 | // obsolete initializers | |
3643 | // - initWithOptions is the designated initializer | |
1c79356b | 3644 | bool |
b0d623f7 | 3645 | IOMemoryDescriptor::initWithAddress(void * address, |
55e303ae A |
3646 | IOByteCount length, |
3647 | IODirection direction) | |
1c79356b A |
3648 | { |
3649 | return( false ); | |
3650 | } | |
3651 | ||
3652 | bool | |
b0d623f7 | 3653 | IOMemoryDescriptor::initWithAddress(IOVirtualAddress address, |
55e303ae A |
3654 | IOByteCount length, |
3655 | IODirection direction, | |
3656 | task_t task) | |
1c79356b A |
3657 | { |
3658 | return( false ); | |
3659 | } | |
3660 | ||
3661 | bool | |
b0d623f7 | 3662 | IOMemoryDescriptor::initWithPhysicalAddress( |
1c79356b | 3663 | IOPhysicalAddress address, |
55e303ae A |
3664 | IOByteCount length, |
3665 | IODirection direction ) | |
1c79356b A |
3666 | { |
3667 | return( false ); | |
3668 | } | |
3669 | ||
3670 | bool | |
b0d623f7 | 3671 | IOMemoryDescriptor::initWithRanges( |
1c79356b A |
3672 | IOVirtualRange * ranges, |
3673 | UInt32 withCount, | |
55e303ae A |
3674 | IODirection direction, |
3675 | task_t task, | |
3676 | bool asReference) | |
1c79356b A |
3677 | { |
3678 | return( false ); | |
3679 | } | |
3680 | ||
3681 | bool | |
b0d623f7 | 3682 | IOMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges, |
1c79356b | 3683 | UInt32 withCount, |
55e303ae A |
3684 | IODirection direction, |
3685 | bool asReference) | |
1c79356b A |
3686 | { |
3687 | return( false ); | |
3688 | } | |
3689 | ||
b0d623f7 A |
3690 | void * IOMemoryDescriptor::getVirtualSegment(IOByteCount offset, |
3691 | IOByteCount * lengthOfSegment) | |
3692 | { | |
3693 | return( 0 ); | |
3694 | } | |
3695 | #endif /* !__LP64__ */ | |
3696 | ||
1c79356b A |
3697 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
3698 | ||
9bccf70c A |
3699 | bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const |
3700 | { | |
3701 | OSSymbol const *keys[2]; | |
3702 | OSObject *values[2]; | |
91447636 A |
3703 | struct SerData { |
3704 | user_addr_t address; | |
3705 | user_size_t length; | |
3706 | } *vcopy; | |
9bccf70c A |
3707 | unsigned int index, nRanges; |
3708 | bool result; | |
3709 | ||
91447636 A |
3710 | IOOptionBits type = _flags & kIOMemoryTypeMask; |
3711 | ||
9bccf70c A |
3712 | if (s == NULL) return false; |
3713 | if (s->previouslySerialized(this)) return true; | |
3714 | ||
3715 | // Pretend we are an array. | |
3716 | if (!s->addXMLStartTag(this, "array")) return false; | |
3717 | ||
3718 | nRanges = _rangesCount; | |
91447636 | 3719 | vcopy = (SerData *) IOMalloc(sizeof(SerData) * nRanges); |
9bccf70c A |
3720 | if (vcopy == 0) return false; |
3721 | ||
3722 | keys[0] = OSSymbol::withCString("address"); | |
3723 | keys[1] = OSSymbol::withCString("length"); | |
3724 | ||
3725 | result = false; | |
3726 | values[0] = values[1] = 0; | |
3727 | ||
3728 | // From this point on we can go to bail. | |
3729 | ||
3730 | // Copy the volatile data so we don't have to allocate memory | |
3731 | // while the lock is held. | |
3732 | LOCK; | |
3733 | if (nRanges == _rangesCount) { | |
91447636 | 3734 | Ranges vec = _ranges; |
9bccf70c | 3735 | for (index = 0; index < nRanges; index++) { |
91447636 A |
3736 | user_addr_t addr; IOByteCount len; |
3737 | getAddrLenForInd(addr, len, type, vec, index); | |
3738 | vcopy[index].address = addr; | |
3739 | vcopy[index].length = len; | |
9bccf70c A |
3740 | } |
3741 | } else { | |
3742 | // The descriptor changed out from under us. Give up. | |
3743 | UNLOCK; | |
3744 | result = false; | |
3745 | goto bail; | |
3746 | } | |
3747 | UNLOCK; | |
3748 | ||
3749 | for (index = 0; index < nRanges; index++) | |
3750 | { | |
91447636 A |
3751 | user_addr_t addr = vcopy[index].address; |
3752 | IOByteCount len = (IOByteCount) vcopy[index].length; | |
3753 | values[0] = | |
060df5ea | 3754 | OSNumber::withNumber(addr, sizeof(addr) * 8); |
9bccf70c A |
3755 | if (values[0] == 0) { |
3756 | result = false; | |
3757 | goto bail; | |
3758 | } | |
91447636 | 3759 | values[1] = OSNumber::withNumber(len, sizeof(len) * 8); |
9bccf70c A |
3760 | if (values[1] == 0) { |
3761 | result = false; | |
3762 | goto bail; | |
3763 | } | |
3764 | OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2); | |
3765 | if (dict == 0) { | |
3766 | result = false; | |
3767 | goto bail; | |
3768 | } | |
3769 | values[0]->release(); | |
3770 | values[1]->release(); | |
3771 | values[0] = values[1] = 0; | |
3772 | ||
3773 | result = dict->serialize(s); | |
3774 | dict->release(); | |
3775 | if (!result) { | |
3776 | goto bail; | |
3777 | } | |
3778 | } | |
3779 | result = s->addXMLEndTag("array"); | |
3780 | ||
3781 | bail: | |
3782 | if (values[0]) | |
3783 | values[0]->release(); | |
3784 | if (values[1]) | |
3785 | values[1]->release(); | |
3786 | if (keys[0]) | |
3787 | keys[0]->release(); | |
3788 | if (keys[1]) | |
3789 | keys[1]->release(); | |
3790 | if (vcopy) | |
2d21ac55 | 3791 | IOFree(vcopy, sizeof(SerData) * nRanges); |
9bccf70c A |
3792 | return result; |
3793 | } | |
3794 | ||
9bccf70c A |
3795 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
3796 | ||
0b4e3aa0 | 3797 | OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0); |
b0d623f7 A |
3798 | #ifdef __LP64__ |
3799 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 1); | |
3800 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 2); | |
3801 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3); | |
3802 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4); | |
3803 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5); | |
3804 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6); | |
3805 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7); | |
3806 | #else /* !__LP64__ */ | |
55e303ae A |
3807 | OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1); |
3808 | OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2); | |
91447636 A |
3809 | OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 3); |
3810 | OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 4); | |
0c530ab8 | 3811 | OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 5); |
b0d623f7 A |
3812 | OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 6); |
3813 | OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 7); | |
3814 | #endif /* !__LP64__ */ | |
1c79356b A |
3815 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8); |
3816 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9); | |
3817 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10); | |
3818 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11); | |
3819 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12); | |
3820 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13); | |
3821 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14); | |
3822 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15); | |
9bccf70c | 3823 | |
55e303ae | 3824 | /* ex-inline function implementation */ |
0c530ab8 A |
3825 | IOPhysicalAddress |
3826 | IOMemoryDescriptor::getPhysicalAddress() | |
9bccf70c | 3827 | { return( getPhysicalSegment( 0, 0 )); } |