]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
43866e37 | 6 | * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. |
1c79356b | 7 | * |
43866e37 A |
8 | * This file contains Original Code and/or Modifications of Original Code |
9 | * as defined in and that are subject to the Apple Public Source License | |
10 | * Version 2.0 (the 'License'). You may not use this file except in | |
11 | * compliance with the License. Please obtain a copy of the License at | |
12 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
13 | * file. | |
14 | * | |
15 | * The Original Code and all software distributed under the License are | |
16 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
1c79356b A |
17 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
18 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
43866e37 A |
19 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
20 | * Please see the License for the specific language governing rights and | |
21 | * limitations under the License. | |
1c79356b A |
22 | * |
23 | * @APPLE_LICENSE_HEADER_END@ | |
24 | */ | |
25 | /* | |
26 | * Copyright (c) 1998 Apple Computer, Inc. All rights reserved. | |
27 | * | |
28 | * HISTORY | |
29 | * | |
30 | */ | |
55e303ae A |
31 | // 45678901234567890123456789012345678901234567890123456789012345678901234567890 |
32 | #include <sys/cdefs.h> | |
1c79356b A |
33 | |
34 | #include <IOKit/assert.h> | |
35 | #include <IOKit/system.h> | |
36 | #include <IOKit/IOLib.h> | |
37 | #include <IOKit/IOMemoryDescriptor.h> | |
55e303ae A |
38 | #include <IOKit/IOMapper.h> |
39 | #include <IOKit/IOKitKeysPrivate.h> | |
1c79356b A |
40 | |
41 | #include <IOKit/IOKitDebug.h> | |
42 | ||
43 | #include <libkern/c++/OSContainers.h> | |
9bccf70c A |
44 | #include <libkern/c++/OSDictionary.h> |
45 | #include <libkern/c++/OSArray.h> | |
46 | #include <libkern/c++/OSSymbol.h> | |
47 | #include <libkern/c++/OSNumber.h> | |
1c79356b A |
48 | #include <sys/cdefs.h> |
49 | ||
50 | __BEGIN_DECLS | |
51 | #include <vm/pmap.h> | |
55e303ae | 52 | #include <mach/memory_object_types.h> |
0b4e3aa0 | 53 | #include <device/device_port.h> |
55e303ae | 54 | |
9bccf70c | 55 | #ifndef i386 |
55e303ae | 56 | struct phys_entry *pmap_find_physentry(ppnum_t pa); |
9bccf70c | 57 | #endif |
55e303ae | 58 | extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va); |
1c79356b | 59 | void ipc_port_release_send(ipc_port_t port); |
55e303ae A |
60 | |
61 | /* Copy between a physical page and a virtual address in the given vm_map */ | |
62 | kern_return_t copypv(addr64_t source, addr64_t sink, unsigned int size, int which); | |
0b4e3aa0 A |
63 | |
64 | memory_object_t | |
65 | device_pager_setup( | |
66 | memory_object_t pager, | |
67 | int device_handle, | |
68 | vm_size_t size, | |
69 | int flags); | |
9bccf70c A |
70 | void |
71 | device_pager_deallocate( | |
72 | memory_object_t); | |
0b4e3aa0 A |
73 | kern_return_t |
74 | device_pager_populate_object( | |
75 | memory_object_t pager, | |
76 | vm_object_offset_t offset, | |
55e303ae | 77 | ppnum_t phys_addr, |
0b4e3aa0 | 78 | vm_size_t size); |
55e303ae A |
79 | kern_return_t |
80 | memory_object_iopl_request( | |
81 | ipc_port_t port, | |
82 | memory_object_offset_t offset, | |
83 | vm_size_t *upl_size, | |
84 | upl_t *upl_ptr, | |
85 | upl_page_info_array_t user_page_list, | |
86 | unsigned int *page_list_count, | |
87 | int *flags); | |
0b4e3aa0 | 88 | |
9bccf70c A |
89 | /* |
90 | * Page fault handling based on vm_map (or entries therein) | |
91 | */ | |
92 | extern kern_return_t vm_fault( | |
93 | vm_map_t map, | |
94 | vm_offset_t vaddr, | |
95 | vm_prot_t fault_type, | |
96 | boolean_t change_wiring, | |
97 | int interruptible, | |
98 | pmap_t caller_pmap, | |
99 | vm_offset_t caller_pmap_addr); | |
100 | ||
55e303ae | 101 | unsigned int IOTranslateCacheBits(struct phys_entry *pp); |
1c79356b | 102 | |
55e303ae | 103 | vm_map_t IOPageableMapForAddress( vm_address_t address ); |
1c79356b | 104 | |
55e303ae | 105 | typedef kern_return_t (*IOIteratePageableMapsCallback)(vm_map_t map, void * ref); |
1c79356b | 106 | |
55e303ae A |
107 | kern_return_t IOIteratePageableMaps(vm_size_t size, |
108 | IOIteratePageableMapsCallback callback, void * ref); | |
109 | __END_DECLS | |
1c79356b | 110 | |
55e303ae | 111 | #define kIOMaximumMappedIOByteCount (512*1024*1024) |
1c79356b | 112 | |
55e303ae A |
113 | static IOMapper * gIOSystemMapper; |
114 | static ppnum_t gIOMaximumMappedIOPageCount = atop_32(kIOMaximumMappedIOByteCount); | |
de355530 | 115 | |
55e303ae | 116 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
de355530 | 117 | |
55e303ae | 118 | OSDefineMetaClassAndAbstractStructors( IOMemoryDescriptor, OSObject ) |
de355530 | 119 | |
55e303ae | 120 | #define super IOMemoryDescriptor |
de355530 | 121 | |
55e303ae | 122 | OSDefineMetaClassAndStructors(IOGeneralMemoryDescriptor, IOMemoryDescriptor) |
de355530 | 123 | |
1c79356b A |
124 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
125 | ||
9bccf70c A |
126 | static IORecursiveLock * gIOMemoryLock; |
127 | ||
128 | #define LOCK IORecursiveLockLock( gIOMemoryLock) | |
129 | #define UNLOCK IORecursiveLockUnlock( gIOMemoryLock) | |
130 | #define SLEEP IORecursiveLockSleep( gIOMemoryLock, (void *)this, THREAD_UNINT) | |
131 | #define WAKEUP \ | |
132 | IORecursiveLockWakeup( gIOMemoryLock, (void *)this, /* one-thread */ false) | |
133 | ||
134 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
135 | ||
55e303ae | 136 | #define next_page(a) ( trunc_page_32(a) + PAGE_SIZE ) |
0b4e3aa0 A |
137 | |
138 | ||
139 | extern "C" { | |
140 | ||
141 | kern_return_t device_data_action( | |
142 | int device_handle, | |
143 | ipc_port_t device_pager, | |
144 | vm_prot_t protection, | |
145 | vm_object_offset_t offset, | |
146 | vm_size_t size) | |
147 | { | |
9bccf70c A |
148 | struct ExpansionData { |
149 | void * devicePager; | |
150 | unsigned int pagerContig:1; | |
151 | unsigned int unused:31; | |
152 | IOMemoryDescriptor * memory; | |
153 | }; | |
154 | kern_return_t kr; | |
155 | ExpansionData * ref = (ExpansionData *) device_handle; | |
156 | IOMemoryDescriptor * memDesc; | |
0b4e3aa0 | 157 | |
9bccf70c A |
158 | LOCK; |
159 | memDesc = ref->memory; | |
160 | if( memDesc) | |
161 | kr = memDesc->handleFault( device_pager, 0, 0, | |
162 | offset, size, kIOMapDefaultCache /*?*/); | |
163 | else | |
164 | kr = KERN_ABORTED; | |
165 | UNLOCK; | |
0b4e3aa0 | 166 | |
9bccf70c | 167 | return( kr ); |
0b4e3aa0 A |
168 | } |
169 | ||
170 | kern_return_t device_close( | |
171 | int device_handle) | |
172 | { | |
9bccf70c A |
173 | struct ExpansionData { |
174 | void * devicePager; | |
175 | unsigned int pagerContig:1; | |
176 | unsigned int unused:31; | |
177 | IOMemoryDescriptor * memory; | |
178 | }; | |
179 | ExpansionData * ref = (ExpansionData *) device_handle; | |
0b4e3aa0 | 180 | |
9bccf70c | 181 | IODelete( ref, ExpansionData, 1 ); |
0b4e3aa0 A |
182 | |
183 | return( kIOReturnSuccess ); | |
184 | } | |
185 | ||
186 | } | |
187 | ||
1c79356b A |
188 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ |
189 | ||
190 | /* | |
191 | * withAddress: | |
192 | * | |
193 | * Create a new IOMemoryDescriptor. The buffer is a virtual address | |
194 | * relative to the specified task. If no task is supplied, the kernel | |
195 | * task is implied. | |
196 | */ | |
197 | IOMemoryDescriptor * | |
198 | IOMemoryDescriptor::withAddress(void * address, | |
55e303ae A |
199 | IOByteCount length, |
200 | IODirection direction) | |
201 | { | |
202 | return IOMemoryDescriptor:: | |
203 | withAddress((vm_address_t) address, length, direction, kernel_task); | |
204 | } | |
205 | ||
206 | IOMemoryDescriptor * | |
207 | IOMemoryDescriptor::withAddress(vm_address_t address, | |
208 | IOByteCount length, | |
209 | IODirection direction, | |
210 | task_t task) | |
1c79356b A |
211 | { |
212 | IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; | |
213 | if (that) | |
214 | { | |
55e303ae | 215 | if (that->initWithAddress(address, length, direction, task)) |
1c79356b A |
216 | return that; |
217 | ||
218 | that->release(); | |
219 | } | |
220 | return 0; | |
221 | } | |
222 | ||
223 | IOMemoryDescriptor * | |
55e303ae A |
224 | IOMemoryDescriptor::withPhysicalAddress( |
225 | IOPhysicalAddress address, | |
226 | IOByteCount length, | |
227 | IODirection direction ) | |
228 | { | |
229 | IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor; | |
230 | if (self | |
231 | && !self->initWithPhysicalAddress(address, length, direction)) { | |
232 | self->release(); | |
233 | return 0; | |
234 | } | |
235 | ||
236 | return self; | |
237 | } | |
238 | ||
239 | IOMemoryDescriptor * | |
240 | IOMemoryDescriptor::withRanges( IOVirtualRange * ranges, | |
241 | UInt32 withCount, | |
242 | IODirection direction, | |
243 | task_t task, | |
244 | bool asReference) | |
1c79356b A |
245 | { |
246 | IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; | |
247 | if (that) | |
248 | { | |
55e303ae | 249 | if (that->initWithRanges(ranges, withCount, direction, task, asReference)) |
1c79356b A |
250 | return that; |
251 | ||
252 | that->release(); | |
253 | } | |
254 | return 0; | |
255 | } | |
256 | ||
1c79356b A |
257 | |
258 | /* | |
259 | * withRanges: | |
260 | * | |
261 | * Create a new IOMemoryDescriptor. The buffer is made up of several | |
262 | * virtual address ranges, from a given task. | |
263 | * | |
264 | * Passing the ranges as a reference will avoid an extra allocation. | |
265 | */ | |
266 | IOMemoryDescriptor * | |
55e303ae A |
267 | IOMemoryDescriptor::withOptions(void * buffers, |
268 | UInt32 count, | |
269 | UInt32 offset, | |
270 | task_t task, | |
271 | IOOptionBits opts, | |
272 | IOMapper * mapper) | |
1c79356b | 273 | { |
55e303ae | 274 | IOGeneralMemoryDescriptor *self = new IOGeneralMemoryDescriptor; |
d7e50217 | 275 | |
55e303ae A |
276 | if (self |
277 | && !self->initWithOptions(buffers, count, offset, task, opts, mapper)) | |
278 | { | |
279 | self->release(); | |
280 | return 0; | |
de355530 | 281 | } |
55e303ae A |
282 | |
283 | return self; | |
284 | } | |
285 | ||
286 | // Can't leave abstract but this should never be used directly, | |
287 | bool IOMemoryDescriptor::initWithOptions(void * buffers, | |
288 | UInt32 count, | |
289 | UInt32 offset, | |
290 | task_t task, | |
291 | IOOptionBits options, | |
292 | IOMapper * mapper) | |
293 | { | |
294 | // @@@ gvdl: Should I panic? | |
295 | panic("IOMD::initWithOptions called\n"); | |
1c79356b A |
296 | return 0; |
297 | } | |
298 | ||
299 | IOMemoryDescriptor * | |
300 | IOMemoryDescriptor::withPhysicalRanges( IOPhysicalRange * ranges, | |
301 | UInt32 withCount, | |
55e303ae A |
302 | IODirection direction, |
303 | bool asReference) | |
1c79356b A |
304 | { |
305 | IOGeneralMemoryDescriptor * that = new IOGeneralMemoryDescriptor; | |
306 | if (that) | |
307 | { | |
55e303ae | 308 | if (that->initWithPhysicalRanges(ranges, withCount, direction, asReference)) |
1c79356b A |
309 | return that; |
310 | ||
311 | that->release(); | |
312 | } | |
313 | return 0; | |
314 | } | |
315 | ||
316 | IOMemoryDescriptor * | |
317 | IOMemoryDescriptor::withSubRange(IOMemoryDescriptor * of, | |
318 | IOByteCount offset, | |
319 | IOByteCount length, | |
55e303ae | 320 | IODirection direction) |
1c79356b | 321 | { |
55e303ae | 322 | IOSubMemoryDescriptor *self = new IOSubMemoryDescriptor; |
1c79356b | 323 | |
55e303ae A |
324 | if (self && !self->initSubRange(of, offset, length, direction)) { |
325 | self->release(); | |
326 | self = 0; | |
1c79356b | 327 | } |
55e303ae | 328 | return self; |
1c79356b A |
329 | } |
330 | ||
331 | /* | |
332 | * initWithAddress: | |
333 | * | |
334 | * Initialize an IOMemoryDescriptor. The buffer is a virtual address | |
335 | * relative to the specified task. If no task is supplied, the kernel | |
336 | * task is implied. | |
337 | * | |
338 | * An IOMemoryDescriptor can be re-used by calling initWithAddress or | |
339 | * initWithRanges again on an existing instance -- note this behavior | |
340 | * is not commonly supported in other I/O Kit classes, although it is | |
341 | * supported here. | |
342 | */ | |
343 | bool | |
344 | IOGeneralMemoryDescriptor::initWithAddress(void * address, | |
345 | IOByteCount withLength, | |
346 | IODirection withDirection) | |
347 | { | |
348 | _singleRange.v.address = (vm_address_t) address; | |
349 | _singleRange.v.length = withLength; | |
350 | ||
351 | return initWithRanges(&_singleRange.v, 1, withDirection, kernel_task, true); | |
352 | } | |
353 | ||
354 | bool | |
355 | IOGeneralMemoryDescriptor::initWithAddress(vm_address_t address, | |
356 | IOByteCount withLength, | |
357 | IODirection withDirection, | |
358 | task_t withTask) | |
359 | { | |
360 | _singleRange.v.address = address; | |
361 | _singleRange.v.length = withLength; | |
362 | ||
363 | return initWithRanges(&_singleRange.v, 1, withDirection, withTask, true); | |
364 | } | |
365 | ||
366 | bool | |
367 | IOGeneralMemoryDescriptor::initWithPhysicalAddress( | |
368 | IOPhysicalAddress address, | |
369 | IOByteCount withLength, | |
370 | IODirection withDirection ) | |
371 | { | |
372 | _singleRange.p.address = address; | |
373 | _singleRange.p.length = withLength; | |
374 | ||
375 | return initWithPhysicalRanges( &_singleRange.p, 1, withDirection, true); | |
376 | } | |
377 | ||
55e303ae A |
378 | bool |
379 | IOGeneralMemoryDescriptor::initWithPhysicalRanges( | |
380 | IOPhysicalRange * ranges, | |
381 | UInt32 count, | |
382 | IODirection direction, | |
383 | bool reference) | |
384 | { | |
385 | IOOptionBits mdOpts = direction | kIOMemoryTypePhysical; | |
386 | ||
387 | if (reference) | |
388 | mdOpts |= kIOMemoryAsReference; | |
389 | ||
390 | return initWithOptions(ranges, count, 0, 0, mdOpts, /* mapper */ 0); | |
391 | } | |
392 | ||
393 | bool | |
394 | IOGeneralMemoryDescriptor::initWithRanges( | |
395 | IOVirtualRange * ranges, | |
396 | UInt32 count, | |
397 | IODirection direction, | |
398 | task_t task, | |
399 | bool reference) | |
400 | { | |
401 | IOOptionBits mdOpts = direction; | |
402 | ||
403 | if (reference) | |
404 | mdOpts |= kIOMemoryAsReference; | |
405 | ||
406 | if (task) { | |
407 | mdOpts |= kIOMemoryTypeVirtual; | |
408 | if (task == kernel_task) | |
409 | mdOpts |= kIOMemoryAutoPrepare; | |
410 | } | |
411 | else | |
412 | mdOpts |= kIOMemoryTypePhysical; | |
413 | ||
414 | // @@@ gvdl: Need to remove this | |
415 | // Auto-prepare if this is a kernel memory descriptor as very few | |
416 | // clients bother to prepare() kernel memory. | |
417 | // But it has been enforced so what are you going to do? | |
418 | ||
419 | return initWithOptions(ranges, count, 0, task, mdOpts, /* mapper */ 0); | |
420 | } | |
421 | ||
1c79356b | 422 | /* |
55e303ae | 423 | * initWithOptions: |
1c79356b | 424 | * |
55e303ae A |
425 | * IOMemoryDescriptor. The buffer is made up of several virtual address ranges, |
426 | * from a given task or several physical ranges or finally an UPL from the ubc | |
427 | * system. | |
1c79356b A |
428 | * |
429 | * Passing the ranges as a reference will avoid an extra allocation. | |
430 | * | |
55e303ae A |
431 | * An IOMemoryDescriptor can be re-used by calling initWithOptions again on an |
432 | * existing instance -- note this behavior is not commonly supported in other | |
433 | * I/O Kit classes, although it is supported here. | |
1c79356b | 434 | */ |
55e303ae A |
435 | |
436 | enum ioPLBlockFlags { | |
437 | kIOPLOnDevice = 0x00000001, | |
438 | kIOPLExternUPL = 0x00000002, | |
439 | }; | |
440 | ||
441 | struct ioPLBlock { | |
442 | upl_t fIOPL; | |
443 | vm_address_t fIOMDOffset; // The offset of this iopl in descriptor | |
444 | vm_offset_t fPageInfo; // Pointer to page list or index into it | |
445 | ppnum_t fMappedBase; // Page number of first page in this iopl | |
446 | unsigned int fPageOffset; // Offset within first page of iopl | |
447 | unsigned int fFlags; // Flags | |
448 | }; | |
449 | ||
450 | struct ioGMDData { | |
451 | IOMapper *fMapper; | |
452 | unsigned int fPageCnt; | |
453 | upl_page_info_t fPageList[0]; // @@@ gvdl need to get rid of this | |
454 | // should be able to use upl directly | |
455 | ioPLBlock fBlocks[0]; | |
456 | }; | |
457 | ||
458 | #define getDataP(osd) ((ioGMDData *) (osd)->getBytesNoCopy()) | |
459 | #define getIOPLList(d) ((ioPLBlock *) &(d->fPageList[d->fPageCnt])) | |
460 | #define getNumIOPL(d,len) \ | |
461 | ((len - ((char *) getIOPLList(d) - (char *) d)) / sizeof(ioPLBlock)) | |
462 | #define getPageList(d) (&(d->fPageList[0])) | |
463 | #define computeDataSize(p, u) \ | |
464 | (sizeof(ioGMDData) + p * sizeof(upl_page_info_t) + u * sizeof(ioPLBlock)) | |
465 | ||
1c79356b | 466 | bool |
55e303ae A |
467 | IOGeneralMemoryDescriptor::initWithOptions(void * buffers, |
468 | UInt32 count, | |
469 | UInt32 offset, | |
470 | task_t task, | |
471 | IOOptionBits options, | |
472 | IOMapper * mapper) | |
473 | { | |
474 | ||
475 | switch (options & kIOMemoryTypeMask) { | |
476 | case kIOMemoryTypeVirtual: | |
477 | assert(task); | |
478 | if (!task) | |
479 | return false; | |
480 | else | |
481 | break; | |
482 | ||
483 | case kIOMemoryTypePhysical: // Neither Physical nor UPL should have a task | |
484 | mapper = kIOMapperNone; | |
485 | case kIOMemoryTypeUPL: | |
486 | assert(!task); | |
487 | break; | |
488 | default: | |
489 | panic("IOGMD::iWO(): bad type"); // @@@ gvdl: for testing | |
490 | return false; /* bad argument */ | |
491 | } | |
492 | ||
493 | assert(buffers); | |
494 | assert(count); | |
1c79356b A |
495 | |
496 | /* | |
497 | * We can check the _initialized instance variable before having ever set | |
498 | * it to an initial value because I/O Kit guarantees that all our instance | |
499 | * variables are zeroed on an object's allocation. | |
500 | */ | |
501 | ||
55e303ae | 502 | if (_initialized) { |
1c79356b A |
503 | /* |
504 | * An existing memory descriptor is being retargeted to point to | |
505 | * somewhere else. Clean up our present state. | |
506 | */ | |
507 | ||
1c79356b A |
508 | while (_wireCount) |
509 | complete(); | |
510 | if (_kernPtrAligned) | |
511 | unmapFromKernel(); | |
512 | if (_ranges.v && _rangesIsAllocated) | |
513 | IODelete(_ranges.v, IOVirtualRange, _rangesCount); | |
514 | } | |
55e303ae A |
515 | else { |
516 | if (!super::init()) | |
517 | return false; | |
518 | _initialized = true; | |
519 | } | |
d7e50217 | 520 | |
55e303ae A |
521 | // Grab the appropriate mapper |
522 | if (mapper == kIOMapperNone) | |
523 | mapper = 0; // No Mapper | |
524 | else if (!mapper) { | |
525 | IOMapper::checkForSystemMapper(); | |
526 | gIOSystemMapper = mapper = IOMapper::gSystem; | |
527 | } | |
1c79356b | 528 | |
55e303ae A |
529 | _flags = options; |
530 | _task = task; | |
531 | ||
532 | // DEPRECATED variable initialisation | |
533 | _direction = (IODirection) (_flags & kIOMemoryDirectionMask); | |
1c79356b | 534 | _position = 0; |
1c79356b A |
535 | _kernPtrAligned = 0; |
536 | _cachedPhysicalAddress = 0; | |
537 | _cachedVirtualAddress = 0; | |
1c79356b | 538 | |
55e303ae | 539 | if ( (options & kIOMemoryTypeMask) == kIOMemoryTypeUPL) { |
1c79356b | 540 | |
55e303ae A |
541 | ioGMDData *dataP; |
542 | unsigned int dataSize = computeDataSize(/* pages */ 0, /* upls */ 1); | |
d7e50217 | 543 | |
55e303ae A |
544 | if (!_memoryEntries) { |
545 | _memoryEntries = OSData::withCapacity(dataSize); | |
546 | if (!_memoryEntries) | |
547 | return false; | |
548 | } | |
549 | else if (!_memoryEntries->initWithCapacity(dataSize)) | |
550 | return false; | |
551 | ||
552 | _memoryEntries->appendBytes(0, sizeof(ioGMDData)); | |
553 | dataP = getDataP(_memoryEntries); | |
554 | dataP->fMapper = mapper; | |
555 | dataP->fPageCnt = 0; | |
556 | ||
557 | _wireCount++; // UPLs start out life wired | |
558 | ||
559 | _length = count; | |
560 | _pages += atop_32(offset + count + PAGE_MASK) - atop_32(offset); | |
561 | ||
562 | ioPLBlock iopl; | |
563 | upl_page_info_t *pageList = UPL_GET_INTERNAL_PAGE_LIST((upl_t) buffers); | |
564 | ||
565 | iopl.fIOPL = (upl_t) buffers; | |
566 | // Set the flag kIOPLOnDevice convieniently equal to 1 | |
567 | iopl.fFlags = pageList->device | kIOPLExternUPL; | |
568 | iopl.fIOMDOffset = 0; | |
569 | if (!pageList->device) { | |
570 | // @@@ gvdl: Ask JoeS are the pages contiguious with the list? | |
571 | // or there a chance that we may be inserting 0 phys_addrs? | |
572 | // Pre-compute the offset into the UPL's page list | |
573 | pageList = &pageList[atop_32(offset)]; | |
574 | offset &= PAGE_MASK; | |
575 | if (mapper) { | |
576 | iopl.fMappedBase = mapper->iovmAlloc(_pages); | |
577 | mapper->iovmInsert(iopl.fMappedBase, 0, pageList, _pages); | |
578 | } | |
579 | else | |
580 | iopl.fMappedBase = 0; | |
581 | } | |
582 | else | |
583 | iopl.fMappedBase = 0; | |
584 | iopl.fPageInfo = (vm_address_t) pageList; | |
585 | iopl.fPageOffset = offset; | |
586 | ||
587 | _memoryEntries->appendBytes(&iopl, sizeof(iopl)); | |
d7e50217 | 588 | } |
55e303ae A |
589 | else { /* kIOMemoryTypeVirtual | kIOMemoryTypePhysical */ |
590 | IOVirtualRange *ranges = (IOVirtualRange *) buffers; | |
d7e50217 | 591 | |
55e303ae A |
592 | /* |
593 | * Initialize the memory descriptor. | |
594 | */ | |
1c79356b | 595 | |
55e303ae A |
596 | _length = 0; |
597 | _pages = 0; | |
598 | for (unsigned ind = 0; ind < count; ind++) { | |
599 | IOVirtualRange cur = ranges[ind]; | |
600 | ||
601 | _length += cur.length; | |
602 | _pages += atop_32(cur.address + cur.length + PAGE_MASK) | |
603 | - atop_32(cur.address); | |
604 | } | |
605 | ||
606 | _ranges.v = 0; | |
607 | _rangesIsAllocated = !(options & kIOMemoryAsReference); | |
608 | _rangesCount = count; | |
609 | ||
610 | if (options & kIOMemoryAsReference) | |
611 | _ranges.v = ranges; | |
612 | else { | |
613 | _ranges.v = IONew(IOVirtualRange, count); | |
614 | if (!_ranges.v) | |
615 | return false; | |
616 | bcopy(/* from */ ranges, _ranges.v, | |
617 | count * sizeof(IOVirtualRange)); | |
618 | } | |
619 | ||
620 | // Auto-prepare memory at creation time. | |
621 | // Implied completion when descriptor is free-ed | |
622 | if ( (options & kIOMemoryTypeMask) == kIOMemoryTypePhysical) | |
623 | _wireCount++; // Physical MDs are start out wired | |
624 | else { /* kIOMemoryTypeVirtual */ | |
625 | ioGMDData *dataP; | |
626 | unsigned int dataSize = | |
627 | computeDataSize(_pages, /* upls */ _rangesCount * 2); | |
628 | ||
629 | if (!_memoryEntries) { | |
630 | _memoryEntries = OSData::withCapacity(dataSize); | |
631 | if (!_memoryEntries) | |
632 | return false; | |
633 | } | |
634 | else if (!_memoryEntries->initWithCapacity(dataSize)) | |
635 | return false; | |
636 | ||
637 | _memoryEntries->appendBytes(0, sizeof(ioGMDData)); | |
638 | dataP = getDataP(_memoryEntries); | |
639 | dataP->fMapper = mapper; | |
640 | dataP->fPageCnt = _pages; | |
641 | ||
642 | if (kIOMemoryPersistent & _flags) | |
643 | { | |
644 | kern_return_t error; | |
645 | ipc_port_t sharedMem; | |
646 | ||
647 | vm_size_t size = _pages << PAGE_SHIFT; | |
648 | vm_address_t startPage; | |
649 | ||
650 | startPage = trunc_page_32(_ranges.v[0].address); | |
651 | ||
652 | vm_map_t theMap = ((_task == kernel_task) && (kIOMemoryBufferPageable & _flags)) | |
653 | ? IOPageableMapForAddress(startPage) | |
654 | : get_task_map(_task); | |
655 | ||
656 | vm_size_t actualSize = size; | |
657 | error = mach_make_memory_entry( theMap, | |
658 | &actualSize, startPage, | |
659 | VM_PROT_READ | VM_PROT_WRITE, &sharedMem, | |
660 | NULL ); | |
661 | ||
662 | if (KERN_SUCCESS == error) { | |
663 | if (actualSize == round_page_32(size)) { | |
664 | _memEntry = (void *) sharedMem; | |
665 | } else { | |
666 | #if IOASSERT | |
667 | IOLog("mach_make_memory_entry_64 (%08x) size (%08lx:%08x)\n", | |
668 | startPage, (UInt32)actualSize, size); | |
669 | #endif | |
670 | ipc_port_release_send( sharedMem ); | |
671 | } | |
672 | } | |
673 | } | |
674 | ||
675 | if ((_flags & kIOMemoryAutoPrepare) | |
676 | && prepare() != kIOReturnSuccess) | |
677 | return false; | |
678 | } | |
679 | } | |
680 | ||
681 | return true; | |
de355530 A |
682 | } |
683 | ||
1c79356b A |
684 | /* |
685 | * free | |
686 | * | |
687 | * Free resources. | |
688 | */ | |
689 | void IOGeneralMemoryDescriptor::free() | |
690 | { | |
9bccf70c A |
691 | LOCK; |
692 | if( reserved) | |
693 | reserved->memory = 0; | |
694 | UNLOCK; | |
695 | ||
1c79356b A |
696 | while (_wireCount) |
697 | complete(); | |
55e303ae A |
698 | if (_memoryEntries) |
699 | _memoryEntries->release(); | |
700 | ||
1c79356b A |
701 | if (_kernPtrAligned) |
702 | unmapFromKernel(); | |
703 | if (_ranges.v && _rangesIsAllocated) | |
704 | IODelete(_ranges.v, IOVirtualRange, _rangesCount); | |
9bccf70c | 705 | |
55e303ae A |
706 | if (reserved && reserved->devicePager) |
707 | device_pager_deallocate( (memory_object_t) reserved->devicePager ); | |
9bccf70c | 708 | |
55e303ae A |
709 | // memEntry holds a ref on the device pager which owns reserved |
710 | // (ExpansionData) so no reserved access after this point | |
711 | if (_memEntry) | |
1c79356b | 712 | ipc_port_release_send( (ipc_port_t) _memEntry ); |
55e303ae | 713 | |
1c79356b A |
714 | super::free(); |
715 | } | |
716 | ||
0b4e3aa0 A |
717 | /* DEPRECATED */ void IOGeneralMemoryDescriptor::unmapFromKernel() |
718 | /* DEPRECATED */ { | |
55e303ae | 719 | panic("IOGMD::unmapFromKernel deprecated"); |
0b4e3aa0 A |
720 | /* DEPRECATED */ } |
721 | /* DEPRECATED */ | |
722 | /* DEPRECATED */ void IOGeneralMemoryDescriptor::mapIntoKernel(unsigned rangeIndex) | |
723 | /* DEPRECATED */ { | |
55e303ae | 724 | panic("IOGMD::mapIntoKernel deprecated"); |
0b4e3aa0 | 725 | /* DEPRECATED */ } |
1c79356b A |
726 | |
727 | /* | |
728 | * getDirection: | |
729 | * | |
730 | * Get the direction of the transfer. | |
731 | */ | |
732 | IODirection IOMemoryDescriptor::getDirection() const | |
733 | { | |
734 | return _direction; | |
735 | } | |
736 | ||
737 | /* | |
738 | * getLength: | |
739 | * | |
740 | * Get the length of the transfer (over all ranges). | |
741 | */ | |
742 | IOByteCount IOMemoryDescriptor::getLength() const | |
743 | { | |
744 | return _length; | |
745 | } | |
746 | ||
55e303ae | 747 | void IOMemoryDescriptor::setTag( IOOptionBits tag ) |
1c79356b A |
748 | { |
749 | _tag = tag; | |
750 | } | |
751 | ||
752 | IOOptionBits IOMemoryDescriptor::getTag( void ) | |
753 | { | |
754 | return( _tag); | |
755 | } | |
756 | ||
55e303ae | 757 | // @@@ gvdl: who is using this API? Seems like a wierd thing to implement. |
0b4e3aa0 A |
758 | IOPhysicalAddress IOMemoryDescriptor::getSourceSegment( IOByteCount offset, |
759 | IOByteCount * length ) | |
760 | { | |
9bccf70c | 761 | IOPhysicalAddress physAddr = 0; |
1c79356b | 762 | |
9bccf70c A |
763 | if( prepare() == kIOReturnSuccess) { |
764 | physAddr = getPhysicalSegment( offset, length ); | |
765 | complete(); | |
766 | } | |
0b4e3aa0 A |
767 | |
768 | return( physAddr ); | |
769 | } | |
770 | ||
55e303ae A |
771 | IOByteCount IOMemoryDescriptor::readBytes |
772 | (IOByteCount offset, void *bytes, IOByteCount length) | |
1c79356b | 773 | { |
55e303ae A |
774 | addr64_t dstAddr = (addr64_t) (UInt32) bytes; |
775 | IOByteCount remaining; | |
1c79356b | 776 | |
55e303ae A |
777 | // Assert that this entire I/O is withing the available range |
778 | assert(offset < _length); | |
779 | assert(offset + length <= _length); | |
780 | if (offset >= _length) { | |
781 | IOLog("IOGMD(%p): rB = o%lx, l%lx\n", this, offset, length); // @@@ gvdl | |
782 | return 0; | |
783 | } | |
1c79356b | 784 | |
55e303ae A |
785 | remaining = length = min(length, _length - offset); |
786 | while (remaining) { // (process another target segment?) | |
787 | addr64_t srcAddr64; | |
788 | IOByteCount srcLen; | |
1c79356b | 789 | |
55e303ae A |
790 | srcAddr64 = getPhysicalSegment64(offset, &srcLen); |
791 | if (!srcAddr64) | |
792 | break; | |
1c79356b | 793 | |
55e303ae A |
794 | // Clip segment length to remaining |
795 | if (srcLen > remaining) | |
796 | srcLen = remaining; | |
1c79356b | 797 | |
55e303ae A |
798 | copypv(srcAddr64, dstAddr, srcLen, |
799 | cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap); | |
1c79356b | 800 | |
55e303ae A |
801 | dstAddr += srcLen; |
802 | offset += srcLen; | |
803 | remaining -= srcLen; | |
804 | } | |
1c79356b | 805 | |
55e303ae | 806 | assert(!remaining); |
1c79356b | 807 | |
55e303ae A |
808 | return length - remaining; |
809 | } | |
0b4e3aa0 | 810 | |
55e303ae A |
811 | IOByteCount IOMemoryDescriptor::writeBytes |
812 | (IOByteCount offset, const void *bytes, IOByteCount length) | |
813 | { | |
814 | addr64_t srcAddr = (addr64_t) (UInt32) bytes; | |
815 | IOByteCount remaining; | |
0b4e3aa0 | 816 | |
55e303ae A |
817 | // Assert that this entire I/O is withing the available range |
818 | assert(offset < _length); | |
819 | assert(offset + length <= _length); | |
0b4e3aa0 | 820 | |
55e303ae | 821 | assert( !(kIOMemoryPreparedReadOnly & _flags) ); |
0b4e3aa0 | 822 | |
55e303ae A |
823 | if ( (kIOMemoryPreparedReadOnly & _flags) || offset >= _length) { |
824 | IOLog("IOGMD(%p): wB = o%lx, l%lx\n", this, offset, length); // @@@ gvdl | |
825 | return 0; | |
826 | } | |
0b4e3aa0 | 827 | |
55e303ae A |
828 | remaining = length = min(length, _length - offset); |
829 | while (remaining) { // (process another target segment?) | |
830 | addr64_t dstAddr64; | |
831 | IOByteCount dstLen; | |
0b4e3aa0 | 832 | |
55e303ae A |
833 | dstAddr64 = getPhysicalSegment64(offset, &dstLen); |
834 | if (!dstAddr64) | |
835 | break; | |
0b4e3aa0 | 836 | |
55e303ae A |
837 | // Clip segment length to remaining |
838 | if (dstLen > remaining) | |
839 | dstLen = remaining; | |
0b4e3aa0 | 840 | |
55e303ae A |
841 | copypv(srcAddr, (addr64_t) dstAddr64, dstLen, |
842 | cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap); | |
0b4e3aa0 | 843 | |
55e303ae A |
844 | srcAddr += dstLen; |
845 | offset += dstLen; | |
846 | remaining -= dstLen; | |
1c79356b | 847 | } |
1c79356b | 848 | |
55e303ae A |
849 | assert(!remaining); |
850 | ||
851 | return length - remaining; | |
1c79356b A |
852 | } |
853 | ||
55e303ae A |
854 | // osfmk/device/iokit_rpc.c |
855 | extern "C" unsigned int IODefaultCacheBits(addr64_t pa); | |
1c79356b | 856 | |
55e303ae A |
857 | /* DEPRECATED */ void IOGeneralMemoryDescriptor::setPosition(IOByteCount position) |
858 | /* DEPRECATED */ { | |
859 | panic("IOGMD::setPosition deprecated"); | |
860 | /* DEPRECATED */ } | |
de355530 | 861 | |
55e303ae A |
862 | IOPhysicalAddress IOGeneralMemoryDescriptor::getPhysicalSegment |
863 | (IOByteCount offset, IOByteCount *lengthOfSegment) | |
864 | { | |
865 | IOPhysicalAddress address = 0; | |
866 | IOPhysicalLength length = 0; | |
1c79356b | 867 | |
55e303ae A |
868 | // assert(offset <= _length); |
869 | if (offset < _length) // (within bounds?) | |
870 | { | |
871 | if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) { | |
872 | unsigned int ind; | |
1c79356b | 873 | |
55e303ae | 874 | // Physical address based memory descriptor |
1c79356b | 875 | |
55e303ae A |
876 | // Find offset within descriptor and make it relative |
877 | // to the current _range. | |
878 | for (ind = 0 ; offset >= _ranges.p[ind].length; ind++ ) | |
879 | offset -= _ranges.p[ind].length; | |
880 | ||
881 | IOPhysicalRange cur = _ranges.p[ind]; | |
882 | address = cur.address + offset; | |
883 | length = cur.length - offset; | |
884 | ||
885 | // see how far we can coalesce ranges | |
886 | for (++ind; ind < _rangesCount; ind++) { | |
887 | cur = _ranges.p[ind]; | |
888 | ||
889 | if (address + length != cur.address) | |
890 | break; | |
891 | ||
892 | length += cur.length; | |
893 | } | |
1c79356b | 894 | |
55e303ae A |
895 | // @@@ gvdl: should assert(address); |
896 | // but can't as NVidia GeForce creates a bogus physical mem | |
de355530 | 897 | { |
55e303ae A |
898 | assert(address || /*nvidia*/(!_ranges.p[0].address && 1 == _rangesCount)); |
899 | } | |
900 | assert(length); | |
901 | } | |
902 | else do { | |
903 | // We need wiring & we are wired. | |
904 | assert(_wireCount); | |
1c79356b | 905 | |
55e303ae A |
906 | if (!_wireCount) |
907 | { | |
908 | panic("IOGMD: not wired for getPhysicalSegment()"); | |
909 | continue; | |
910 | } | |
1c79356b | 911 | |
55e303ae | 912 | assert(_memoryEntries); |
1c79356b | 913 | |
55e303ae A |
914 | ioGMDData * dataP = getDataP(_memoryEntries); |
915 | const ioPLBlock *ioplList = getIOPLList(dataP); | |
916 | UInt ind, numIOPLs = getNumIOPL(dataP, _memoryEntries->getLength()); | |
917 | upl_page_info_t *pageList = getPageList(dataP); | |
1c79356b | 918 | |
55e303ae | 919 | assert(numIOPLs > 0); |
1c79356b | 920 | |
55e303ae A |
921 | // Scan through iopl info blocks looking for block containing offset |
922 | for (ind = 1; ind < numIOPLs; ind++) { | |
923 | if (offset < ioplList[ind].fIOMDOffset) | |
924 | break; | |
925 | } | |
1c79356b | 926 | |
55e303ae A |
927 | // Go back to actual range as search goes past it |
928 | ioPLBlock ioplInfo = ioplList[ind - 1]; | |
1c79356b | 929 | |
55e303ae A |
930 | if (ind < numIOPLs) |
931 | length = ioplList[ind].fIOMDOffset; | |
932 | else | |
933 | length = _length; | |
934 | length -= offset; // Remainder within iopl | |
1c79356b | 935 | |
55e303ae A |
936 | // Subtract offset till this iopl in total list |
937 | offset -= ioplInfo.fIOMDOffset; | |
1c79356b | 938 | |
55e303ae A |
939 | // This is a mapped IOPL so we just need to compute an offset |
940 | // relative to the mapped base. | |
941 | if (ioplInfo.fMappedBase) { | |
942 | offset += (ioplInfo.fPageOffset & PAGE_MASK); | |
943 | address = ptoa_32(ioplInfo.fMappedBase) + offset; | |
944 | continue; | |
945 | } | |
1c79356b | 946 | |
55e303ae A |
947 | // Currently the offset is rebased into the current iopl. |
948 | // Now add the iopl 1st page offset. | |
949 | offset += ioplInfo.fPageOffset; | |
0b4e3aa0 | 950 | |
55e303ae A |
951 | // For external UPLs the fPageInfo field points directly to |
952 | // the upl's upl_page_info_t array. | |
953 | if (ioplInfo.fFlags & kIOPLExternUPL) | |
954 | pageList = (upl_page_info_t *) ioplInfo.fPageInfo; | |
955 | else | |
956 | pageList = &pageList[ioplInfo.fPageInfo]; | |
1c79356b | 957 | |
55e303ae A |
958 | // Check for direct device non-paged memory |
959 | if ( ioplInfo.fFlags & kIOPLOnDevice ) { | |
960 | address = ptoa_32(pageList->phys_addr) + offset; | |
961 | continue; | |
d7e50217 | 962 | } |
9bccf70c | 963 | |
55e303ae A |
964 | // Now we need compute the index into the pageList |
965 | ind = atop_32(offset); | |
966 | offset &= PAGE_MASK; | |
967 | ||
968 | IOPhysicalAddress pageAddr = pageList[ind].phys_addr; | |
969 | address = ptoa_32(pageAddr) + offset; | |
970 | ||
971 | // Check for the remaining data in this upl being longer than the | |
972 | // remainder on the current page. This should be checked for | |
973 | // contiguous pages | |
974 | if (length > PAGE_SIZE - offset) { | |
975 | // See if the next page is contiguous. Stop looking when we hit | |
976 | // the end of this upl, which is indicated by the | |
977 | // contigLength >= length. | |
978 | IOByteCount contigLength = PAGE_SIZE - offset; | |
979 | ||
980 | // Look for contiguous segment | |
981 | while (contigLength < length | |
982 | && ++pageAddr == pageList[++ind].phys_addr) { | |
983 | contigLength += PAGE_SIZE; | |
984 | } | |
985 | if (length > contigLength) | |
986 | length = contigLength; | |
987 | } | |
988 | ||
989 | assert(address); | |
990 | assert(length); | |
0b4e3aa0 | 991 | |
55e303ae | 992 | } while (0); |
0b4e3aa0 | 993 | |
55e303ae A |
994 | if (!address) |
995 | length = 0; | |
996 | } | |
de355530 | 997 | |
55e303ae A |
998 | if (lengthOfSegment) |
999 | *lengthOfSegment = length; | |
de355530 | 1000 | |
55e303ae A |
1001 | return address; |
1002 | } | |
de355530 | 1003 | |
55e303ae A |
1004 | addr64_t IOMemoryDescriptor::getPhysicalSegment64 |
1005 | (IOByteCount offset, IOByteCount *lengthOfSegment) | |
1006 | { | |
1007 | IOPhysicalAddress phys32; | |
1008 | IOByteCount length; | |
1009 | addr64_t phys64; | |
0b4e3aa0 | 1010 | |
55e303ae A |
1011 | phys32 = getPhysicalSegment(offset, lengthOfSegment); |
1012 | if (!phys32) | |
1013 | return 0; | |
0b4e3aa0 | 1014 | |
55e303ae | 1015 | if (gIOSystemMapper) |
1c79356b | 1016 | { |
55e303ae A |
1017 | IOByteCount origLen; |
1018 | ||
1019 | phys64 = gIOSystemMapper->mapAddr(phys32); | |
1020 | origLen = *lengthOfSegment; | |
1021 | length = page_size - (phys64 & (page_size - 1)); | |
1022 | while ((length < origLen) | |
1023 | && ((phys64 + length) == gIOSystemMapper->mapAddr(phys32 + length))) | |
1024 | length += page_size; | |
1025 | if (length > origLen) | |
1026 | length = origLen; | |
1027 | ||
1028 | *lengthOfSegment = length; | |
0b4e3aa0 | 1029 | } |
55e303ae A |
1030 | else |
1031 | phys64 = (addr64_t) phys32; | |
1c79356b | 1032 | |
55e303ae | 1033 | return phys64; |
0b4e3aa0 A |
1034 | } |
1035 | ||
55e303ae A |
1036 | IOPhysicalAddress IOGeneralMemoryDescriptor::getSourceSegment |
1037 | (IOByteCount offset, IOByteCount *lengthOfSegment) | |
1c79356b | 1038 | { |
0b4e3aa0 A |
1039 | IOPhysicalAddress address = 0; |
1040 | IOPhysicalLength length = 0; | |
1c79356b | 1041 | |
0b4e3aa0 | 1042 | assert(offset <= _length); |
1c79356b | 1043 | |
55e303ae A |
1044 | if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypeUPL) |
1045 | return super::getSourceSegment( offset, lengthOfSegment ); | |
1046 | ||
0b4e3aa0 | 1047 | if ( offset < _length ) // (within bounds?) |
1c79356b | 1048 | { |
0b4e3aa0 | 1049 | unsigned rangesIndex = 0; |
1c79356b | 1050 | |
0b4e3aa0 A |
1051 | for ( ; offset >= _ranges.v[rangesIndex].length; rangesIndex++ ) |
1052 | { | |
1053 | offset -= _ranges.v[rangesIndex].length; // (make offset relative) | |
1054 | } | |
1c79356b | 1055 | |
0b4e3aa0 A |
1056 | address = _ranges.v[rangesIndex].address + offset; |
1057 | length = _ranges.v[rangesIndex].length - offset; | |
1c79356b | 1058 | |
0b4e3aa0 A |
1059 | for ( ++rangesIndex; rangesIndex < _rangesCount; rangesIndex++ ) |
1060 | { | |
1061 | if ( address + length != _ranges.v[rangesIndex].address ) break; | |
1c79356b | 1062 | |
0b4e3aa0 A |
1063 | length += _ranges.v[rangesIndex].length; // (coalesce ranges) |
1064 | } | |
1c79356b | 1065 | |
0b4e3aa0 A |
1066 | assert(address); |
1067 | if ( address == 0 ) length = 0; | |
1c79356b | 1068 | } |
0b4e3aa0 A |
1069 | |
1070 | if ( lengthOfSegment ) *lengthOfSegment = length; | |
1071 | ||
1072 | return address; | |
1073 | } | |
1074 | ||
1075 | /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */ | |
1076 | /* DEPRECATED */ void * IOGeneralMemoryDescriptor::getVirtualSegment(IOByteCount offset, | |
1077 | /* DEPRECATED */ IOByteCount * lengthOfSegment) | |
1078 | /* DEPRECATED */ { | |
55e303ae A |
1079 | if (_task == kernel_task) |
1080 | return (void *) getSourceSegment(offset, lengthOfSegment); | |
1081 | else | |
1082 | panic("IOGMD::getVirtualSegment deprecated"); | |
1083 | ||
1084 | return 0; | |
0b4e3aa0 A |
1085 | /* DEPRECATED */ } |
1086 | /* DEPRECATED */ /* USE INSTEAD: map(), readBytes(), writeBytes() */ | |
1c79356b | 1087 | |
55e303ae A |
1088 | #ifdef __ppc__ |
1089 | extern vm_offset_t static_memory_end; | |
1090 | #define io_kernel_static_end static_memory_end | |
1091 | #else | |
1092 | extern vm_offset_t first_avail; | |
1093 | #define io_kernel_static_end first_avail | |
1094 | #endif | |
1095 | ||
1096 | static kern_return_t | |
1097 | io_get_kernel_static_upl( | |
1098 | vm_map_t map, | |
1099 | vm_address_t offset, | |
1100 | vm_size_t *upl_size, | |
1101 | upl_t *upl, | |
1102 | upl_page_info_array_t page_list, | |
1103 | unsigned int *count, | |
1104 | int *flags, | |
1105 | int force_data_sync) | |
1c79356b | 1106 | { |
55e303ae A |
1107 | unsigned int pageCount, page; |
1108 | ppnum_t phys; | |
1c79356b | 1109 | |
55e303ae A |
1110 | pageCount = atop_32(*upl_size); |
1111 | if (pageCount > *count) | |
1112 | pageCount = *count; | |
1c79356b | 1113 | |
55e303ae | 1114 | *upl = NULL; |
1c79356b | 1115 | |
55e303ae A |
1116 | for (page = 0; page < pageCount; page++) |
1117 | { | |
1118 | phys = pmap_find_phys(kernel_pmap, ((addr64_t)offset) + ptoa_64(page)); | |
1119 | if (!phys) | |
1120 | break; | |
1121 | page_list[page].phys_addr = phys; | |
1122 | page_list[page].pageout = 0; | |
1123 | page_list[page].absent = 0; | |
1124 | page_list[page].dirty = 0; | |
1125 | page_list[page].precious = 0; | |
1126 | page_list[page].device = 0; | |
1127 | } | |
0b4e3aa0 | 1128 | |
55e303ae A |
1129 | return ((page >= pageCount) ? kIOReturnSuccess : kIOReturnVMError); |
1130 | } | |
0b4e3aa0 | 1131 | |
55e303ae A |
1132 | IOReturn IOGeneralMemoryDescriptor::wireVirtual(IODirection forDirection) |
1133 | { | |
1134 | IOReturn error = kIOReturnNoMemory; | |
1135 | ioGMDData *dataP; | |
1136 | ppnum_t mapBase = 0; | |
1137 | IOMapper *mapper; | |
1138 | ipc_port_t sharedMem = (ipc_port_t) _memEntry; | |
1c79356b | 1139 | |
55e303ae | 1140 | assert(!_wireCount); |
1c79356b | 1141 | |
55e303ae A |
1142 | if (_pages >= gIOMaximumMappedIOPageCount) |
1143 | return kIOReturnNoResources; | |
0b4e3aa0 | 1144 | |
55e303ae A |
1145 | dataP = getDataP(_memoryEntries); |
1146 | mapper = dataP->fMapper; | |
1147 | if (mapper && _pages) | |
1148 | mapBase = mapper->iovmAlloc(_pages); | |
d7e50217 | 1149 | |
55e303ae A |
1150 | // Note that appendBytes(NULL) zeros the data up to the |
1151 | // desired length. | |
1152 | _memoryEntries->appendBytes(0, dataP->fPageCnt * sizeof(upl_page_info_t)); | |
1153 | dataP = 0; // May no longer be valid so lets not get tempted. | |
de355530 | 1154 | |
55e303ae A |
1155 | if (forDirection == kIODirectionNone) |
1156 | forDirection = _direction; | |
1157 | ||
1158 | int uplFlags; // This Mem Desc's default flags for upl creation | |
1159 | switch (forDirection) | |
1160 | { | |
1161 | case kIODirectionOut: | |
1162 | // Pages do not need to be marked as dirty on commit | |
1163 | uplFlags = UPL_COPYOUT_FROM; | |
1164 | _flags |= kIOMemoryPreparedReadOnly; | |
1165 | break; | |
1166 | ||
1167 | case kIODirectionIn: | |
1168 | default: | |
1169 | uplFlags = 0; // i.e. ~UPL_COPYOUT_FROM | |
1170 | break; | |
1171 | } | |
1172 | uplFlags |= UPL_SET_IO_WIRE | UPL_SET_LITE; | |
1173 | ||
1174 | // | |
1175 | // Check user read/write access to the data buffer. | |
1176 | // | |
1177 | unsigned int pageIndex = 0; | |
1178 | IOByteCount mdOffset = 0; | |
1179 | vm_map_t curMap; | |
1180 | if (_task == kernel_task && (kIOMemoryBufferPageable & _flags)) | |
1181 | curMap = 0; | |
1182 | else | |
1183 | { curMap = get_task_map(_task); } | |
1184 | ||
1185 | for (UInt range = 0; range < _rangesCount; range++) { | |
1186 | ioPLBlock iopl; | |
1187 | IOVirtualRange curRange = _ranges.v[range]; | |
1188 | vm_address_t startPage; | |
1189 | IOByteCount numBytes; | |
1190 | ||
1191 | startPage = trunc_page_32(curRange.address); | |
1192 | iopl.fPageOffset = (short) curRange.address & PAGE_MASK; | |
1193 | if (mapper) | |
1194 | iopl.fMappedBase = mapBase + pageIndex; | |
1195 | else | |
1196 | iopl.fMappedBase = 0; | |
1197 | numBytes = iopl.fPageOffset + curRange.length; | |
1198 | ||
1199 | while (numBytes) { | |
1200 | dataP = getDataP(_memoryEntries); | |
1201 | vm_map_t theMap = | |
1202 | (curMap)? curMap | |
1203 | : IOPageableMapForAddress(startPage); | |
1204 | upl_page_info_array_t pageInfo = getPageList(dataP); | |
1205 | int ioplFlags = uplFlags; | |
1206 | upl_page_list_ptr_t baseInfo = &pageInfo[pageIndex]; | |
1207 | ||
1208 | vm_size_t ioplSize = round_page_32(numBytes); | |
1209 | unsigned int numPageInfo = atop_32(ioplSize); | |
1210 | ||
1211 | if ((theMap == kernel_map) && (startPage < io_kernel_static_end)) | |
1212 | { | |
1213 | error = io_get_kernel_static_upl(theMap, | |
1214 | startPage, | |
1215 | &ioplSize, | |
1216 | &iopl.fIOPL, | |
1217 | baseInfo, | |
1218 | &numPageInfo, | |
1219 | &ioplFlags, | |
1220 | false); | |
1221 | ||
1222 | } else if (sharedMem && (kIOMemoryPersistent & _flags)) { | |
1223 | ||
1224 | error = memory_object_iopl_request(sharedMem, | |
1225 | ptoa_32(pageIndex), | |
1226 | &ioplSize, | |
1227 | &iopl.fIOPL, | |
1228 | baseInfo, | |
1229 | &numPageInfo, | |
1230 | &ioplFlags); | |
1231 | ||
1232 | } else { | |
1233 | error = vm_map_get_upl(theMap, | |
1234 | startPage, | |
1235 | &ioplSize, | |
1236 | &iopl.fIOPL, | |
1237 | baseInfo, | |
1238 | &numPageInfo, | |
1239 | &ioplFlags, | |
1240 | false); | |
de355530 A |
1241 | } |
1242 | ||
55e303ae A |
1243 | assert(ioplSize); |
1244 | if (error != KERN_SUCCESS) | |
1245 | goto abortExit; | |
1246 | ||
1247 | error = kIOReturnNoMemory; | |
1248 | ||
1249 | if (baseInfo->device) { | |
1250 | numPageInfo = 1; | |
1251 | iopl.fFlags = kIOPLOnDevice; | |
1252 | // Don't translate device memory at all | |
1253 | if (mapper && mapBase) { | |
1254 | mapper->iovmFree(mapBase, _pages); | |
1255 | mapBase = 0; | |
1256 | iopl.fMappedBase = 0; | |
1257 | } | |
1258 | } | |
1259 | else { | |
1260 | iopl.fFlags = 0; | |
1261 | if (mapper) | |
1262 | mapper->iovmInsert(mapBase, pageIndex, | |
1263 | baseInfo, numPageInfo); | |
1264 | } | |
1265 | ||
1266 | iopl.fIOMDOffset = mdOffset; | |
1267 | iopl.fPageInfo = pageIndex; | |
1268 | ||
1269 | if ((_flags & kIOMemoryAutoPrepare) && iopl.fIOPL) | |
1270 | { | |
1271 | kernel_upl_commit(iopl.fIOPL, 0, 0); | |
1272 | iopl.fIOPL = 0; | |
de355530 | 1273 | } |
55e303ae A |
1274 | |
1275 | if (!_memoryEntries->appendBytes(&iopl, sizeof(iopl))) { | |
1276 | // Clean up partial created and unsaved iopl | |
1277 | if (iopl.fIOPL) | |
1278 | kernel_upl_abort(iopl.fIOPL, 0); | |
1279 | goto abortExit; | |
1280 | } | |
1281 | ||
1282 | // Check for a multiple iopl's in one virtual range | |
1283 | pageIndex += numPageInfo; | |
1284 | mdOffset -= iopl.fPageOffset; | |
1285 | if (ioplSize < numBytes) { | |
1286 | numBytes -= ioplSize; | |
1287 | startPage += ioplSize; | |
1288 | mdOffset += ioplSize; | |
1289 | iopl.fPageOffset = 0; | |
1290 | if (mapper) | |
1291 | iopl.fMappedBase = mapBase + pageIndex; | |
1292 | } | |
1293 | else { | |
1294 | mdOffset += numBytes; | |
1295 | break; | |
1296 | } | |
1c79356b A |
1297 | } |
1298 | } | |
55e303ae | 1299 | |
1c79356b A |
1300 | return kIOReturnSuccess; |
1301 | ||
1302 | abortExit: | |
55e303ae A |
1303 | { |
1304 | dataP = getDataP(_memoryEntries); | |
1305 | UInt done = getNumIOPL(dataP, _memoryEntries->getLength()); | |
1306 | ioPLBlock *ioplList = getIOPLList(dataP); | |
1307 | ||
1308 | for (UInt range = 0; range < done; range++) | |
1309 | { | |
1310 | if (ioplList[range].fIOPL) | |
1311 | kernel_upl_abort(ioplList[range].fIOPL, 0); | |
1312 | } | |
1c79356b | 1313 | |
55e303ae A |
1314 | if (mapper && mapBase) |
1315 | mapper->iovmFree(mapBase, _pages); | |
1c79356b A |
1316 | } |
1317 | ||
55e303ae A |
1318 | return error; |
1319 | } | |
d7e50217 | 1320 | |
55e303ae A |
1321 | /* |
1322 | * prepare | |
1323 | * | |
1324 | * Prepare the memory for an I/O transfer. This involves paging in | |
1325 | * the memory, if necessary, and wiring it down for the duration of | |
1326 | * the transfer. The complete() method completes the processing of | |
1327 | * the memory after the I/O transfer finishes. This method needn't | |
1328 | * called for non-pageable memory. | |
1329 | */ | |
1330 | IOReturn IOGeneralMemoryDescriptor::prepare(IODirection forDirection) | |
1331 | { | |
1332 | IOReturn error = kIOReturnSuccess; | |
1333 | ||
1334 | if (!_wireCount && (_flags & kIOMemoryTypeMask) == kIOMemoryTypeVirtual) { | |
1335 | error = wireVirtual(forDirection); | |
1336 | if (error) | |
1337 | return error; | |
de355530 A |
1338 | } |
1339 | ||
55e303ae A |
1340 | _wireCount++; |
1341 | ||
1342 | return kIOReturnSuccess; | |
1c79356b A |
1343 | } |
1344 | ||
1345 | /* | |
1346 | * complete | |
1347 | * | |
1348 | * Complete processing of the memory after an I/O transfer finishes. | |
1349 | * This method should not be called unless a prepare was previously | |
1350 | * issued; the prepare() and complete() must occur in pairs, before | |
1351 | * before and after an I/O transfer involving pageable memory. | |
1352 | */ | |
1353 | ||
55e303ae | 1354 | IOReturn IOGeneralMemoryDescriptor::complete(IODirection /* forDirection */) |
1c79356b A |
1355 | { |
1356 | assert(_wireCount); | |
1357 | ||
55e303ae | 1358 | if (!_wireCount) |
1c79356b A |
1359 | return kIOReturnSuccess; |
1360 | ||
1361 | _wireCount--; | |
55e303ae A |
1362 | if (!_wireCount) { |
1363 | if ((_flags & kIOMemoryTypeMask) == kIOMemoryTypePhysical) { | |
1364 | /* kIOMemoryTypePhysical */ | |
1365 | // DO NOTHING | |
d7e50217 | 1366 | } |
55e303ae A |
1367 | else { |
1368 | ioGMDData * dataP = getDataP(_memoryEntries); | |
1369 | ioPLBlock *ioplList = getIOPLList(dataP); | |
1370 | UInt count = getNumIOPL(dataP, _memoryEntries->getLength()); | |
1371 | ||
1372 | if (dataP->fMapper && _pages && ioplList[0].fMappedBase) | |
1373 | dataP->fMapper->iovmFree(ioplList[0].fMappedBase, _pages); | |
1374 | ||
1375 | // Only complete iopls that we created which are for TypeVirtual | |
1376 | if ( (_flags & kIOMemoryTypeMask) == kIOMemoryTypeVirtual) { | |
1377 | for (UInt ind = 0; ind < count; ind++) | |
1378 | if (ioplList[ind].fIOPL) | |
1379 | kernel_upl_commit(ioplList[ind].fIOPL, 0, 0); | |
1380 | } | |
de355530 | 1381 | |
55e303ae A |
1382 | (void) _memoryEntries->initWithBytes(dataP, sizeof(ioGMDData)); // == setLength() |
1383 | } | |
1c79356b A |
1384 | } |
1385 | return kIOReturnSuccess; | |
1386 | } | |
1387 | ||
1388 | IOReturn IOGeneralMemoryDescriptor::doMap( | |
1389 | vm_map_t addressMap, | |
1390 | IOVirtualAddress * atAddress, | |
1391 | IOOptionBits options, | |
55e303ae A |
1392 | IOByteCount sourceOffset, |
1393 | IOByteCount length ) | |
1c79356b A |
1394 | { |
1395 | kern_return_t kr; | |
0b4e3aa0 | 1396 | ipc_port_t sharedMem = (ipc_port_t) _memEntry; |
1c79356b A |
1397 | |
1398 | // mapping source == dest? (could be much better) | |
1399 | if( _task && (addressMap == get_task_map(_task)) && (options & kIOMapAnywhere) | |
1400 | && (1 == _rangesCount) && (0 == sourceOffset) | |
1401 | && (length <= _ranges.v[0].length) ) { | |
1402 | *atAddress = _ranges.v[0].address; | |
1403 | return( kIOReturnSuccess ); | |
1404 | } | |
1405 | ||
0b4e3aa0 | 1406 | if( 0 == sharedMem) { |
1c79356b | 1407 | |
55e303ae | 1408 | vm_size_t size = _pages << PAGE_SHIFT; |
1c79356b | 1409 | |
0b4e3aa0 | 1410 | if( _task) { |
9bccf70c A |
1411 | #ifndef i386 |
1412 | vm_size_t actualSize = size; | |
1413 | kr = mach_make_memory_entry( get_task_map(_task), | |
0b4e3aa0 A |
1414 | &actualSize, _ranges.v[0].address, |
1415 | VM_PROT_READ | VM_PROT_WRITE, &sharedMem, | |
1416 | NULL ); | |
1417 | ||
55e303ae | 1418 | if( (KERN_SUCCESS == kr) && (actualSize != round_page_32(size))) { |
0b4e3aa0 | 1419 | #if IOASSERT |
55e303ae | 1420 | IOLog("mach_make_memory_entry_64 (%08x) size (%08lx:%08x)\n", |
0b4e3aa0 A |
1421 | _ranges.v[0].address, (UInt32)actualSize, size); |
1422 | #endif | |
1423 | kr = kIOReturnVMError; | |
1424 | ipc_port_release_send( sharedMem ); | |
1c79356b A |
1425 | } |
1426 | ||
0b4e3aa0 | 1427 | if( KERN_SUCCESS != kr) |
9bccf70c | 1428 | #endif /* i386 */ |
0b4e3aa0 | 1429 | sharedMem = MACH_PORT_NULL; |
1c79356b | 1430 | |
0b4e3aa0 A |
1431 | } else do { |
1432 | ||
55e303ae A |
1433 | memory_object_t pager; |
1434 | unsigned int flags = 0; | |
1435 | addr64_t pa; | |
9bccf70c A |
1436 | IOPhysicalLength segLen; |
1437 | ||
55e303ae | 1438 | pa = getPhysicalSegment64( sourceOffset, &segLen ); |
0b4e3aa0 A |
1439 | |
1440 | if( !reserved) { | |
1441 | reserved = IONew( ExpansionData, 1 ); | |
1442 | if( !reserved) | |
1443 | continue; | |
1444 | } | |
1445 | reserved->pagerContig = (1 == _rangesCount); | |
9bccf70c A |
1446 | reserved->memory = this; |
1447 | ||
55e303ae A |
1448 | /*What cache mode do we need*/ |
1449 | switch(options & kIOMapCacheMask ) { | |
9bccf70c A |
1450 | |
1451 | case kIOMapDefaultCache: | |
1452 | default: | |
55e303ae A |
1453 | flags = IODefaultCacheBits(pa); |
1454 | break; | |
9bccf70c A |
1455 | |
1456 | case kIOMapInhibitCache: | |
55e303ae A |
1457 | flags = DEVICE_PAGER_CACHE_INHIB | |
1458 | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED; | |
1459 | break; | |
9bccf70c A |
1460 | |
1461 | case kIOMapWriteThruCache: | |
55e303ae A |
1462 | flags = DEVICE_PAGER_WRITE_THROUGH | |
1463 | DEVICE_PAGER_COHERENT | DEVICE_PAGER_GUARDED; | |
1464 | break; | |
9bccf70c A |
1465 | |
1466 | case kIOMapCopybackCache: | |
55e303ae A |
1467 | flags = DEVICE_PAGER_COHERENT; |
1468 | break; | |
1469 | ||
1470 | case kIOMapWriteCombineCache: | |
1471 | flags = DEVICE_PAGER_CACHE_INHIB | | |
1472 | DEVICE_PAGER_COHERENT; | |
1473 | break; | |
9bccf70c A |
1474 | } |
1475 | ||
1476 | flags |= reserved->pagerContig ? DEVICE_PAGER_CONTIGUOUS : 0; | |
9bccf70c A |
1477 | |
1478 | pager = device_pager_setup( (memory_object_t) 0, (int) reserved, | |
1479 | size, flags); | |
0b4e3aa0 A |
1480 | assert( pager ); |
1481 | ||
1482 | if( pager) { | |
0b4e3aa0 A |
1483 | kr = mach_memory_object_memory_entry_64( (host_t) 1, false /*internal*/, |
1484 | size, VM_PROT_READ | VM_PROT_WRITE, pager, &sharedMem ); | |
1485 | ||
1486 | assert( KERN_SUCCESS == kr ); | |
1487 | if( KERN_SUCCESS != kr) { | |
9bccf70c | 1488 | device_pager_deallocate( pager ); |
0b4e3aa0 A |
1489 | pager = MACH_PORT_NULL; |
1490 | sharedMem = MACH_PORT_NULL; | |
1491 | } | |
1492 | } | |
9bccf70c A |
1493 | if( pager && sharedMem) |
1494 | reserved->devicePager = pager; | |
1495 | else { | |
1496 | IODelete( reserved, ExpansionData, 1 ); | |
1497 | reserved = 0; | |
1498 | } | |
1c79356b | 1499 | |
1c79356b A |
1500 | } while( false ); |
1501 | ||
0b4e3aa0 A |
1502 | _memEntry = (void *) sharedMem; |
1503 | } | |
1504 | ||
9bccf70c A |
1505 | #ifndef i386 |
1506 | if( 0 == sharedMem) | |
1507 | kr = kIOReturnVMError; | |
1508 | else | |
1509 | #endif | |
1510 | kr = super::doMap( addressMap, atAddress, | |
1c79356b | 1511 | options, sourceOffset, length ); |
0b4e3aa0 | 1512 | |
1c79356b A |
1513 | return( kr ); |
1514 | } | |
1515 | ||
1516 | IOReturn IOGeneralMemoryDescriptor::doUnmap( | |
1517 | vm_map_t addressMap, | |
1518 | IOVirtualAddress logical, | |
1519 | IOByteCount length ) | |
1520 | { | |
1521 | // could be much better | |
55e303ae | 1522 | if( _task && (addressMap == get_task_map(_task)) && (1 == _rangesCount) |
1c79356b A |
1523 | && (logical == _ranges.v[0].address) |
1524 | && (length <= _ranges.v[0].length) ) | |
1525 | return( kIOReturnSuccess ); | |
1526 | ||
1527 | return( super::doUnmap( addressMap, logical, length )); | |
1528 | } | |
1529 | ||
1530 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
1531 | ||
1532 | extern "C" { | |
1533 | // osfmk/device/iokit_rpc.c | |
1534 | extern kern_return_t IOMapPages( vm_map_t map, vm_offset_t va, vm_offset_t pa, | |
1535 | vm_size_t length, unsigned int mapFlags); | |
e3027f41 | 1536 | extern kern_return_t IOUnmapPages(vm_map_t map, vm_offset_t va, vm_size_t length); |
1c79356b A |
1537 | }; |
1538 | ||
1539 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
1540 | ||
9bccf70c | 1541 | OSDefineMetaClassAndAbstractStructors( IOMemoryMap, OSObject ) |
1c79356b | 1542 | |
9bccf70c A |
1543 | /* inline function implementation */ |
1544 | IOPhysicalAddress IOMemoryMap::getPhysicalAddress() | |
1545 | { return( getPhysicalSegment( 0, 0 )); } | |
1c79356b A |
1546 | |
1547 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
1548 | ||
1549 | class _IOMemoryMap : public IOMemoryMap | |
1550 | { | |
1551 | OSDeclareDefaultStructors(_IOMemoryMap) | |
1552 | ||
1553 | IOMemoryDescriptor * memory; | |
1554 | IOMemoryMap * superMap; | |
1555 | IOByteCount offset; | |
1556 | IOByteCount length; | |
1557 | IOVirtualAddress logical; | |
1558 | task_t addressTask; | |
1559 | vm_map_t addressMap; | |
1560 | IOOptionBits options; | |
1561 | ||
9bccf70c A |
1562 | protected: |
1563 | virtual void taggedRelease(const void *tag = 0) const; | |
1c79356b A |
1564 | virtual void free(); |
1565 | ||
9bccf70c A |
1566 | public: |
1567 | ||
1c79356b A |
1568 | // IOMemoryMap methods |
1569 | virtual IOVirtualAddress getVirtualAddress(); | |
1570 | virtual IOByteCount getLength(); | |
1571 | virtual task_t getAddressTask(); | |
1572 | virtual IOMemoryDescriptor * getMemoryDescriptor(); | |
1573 | virtual IOOptionBits getMapOptions(); | |
1574 | ||
1575 | virtual IOReturn unmap(); | |
1576 | virtual void taskDied(); | |
1577 | ||
1578 | virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset, | |
1579 | IOByteCount * length); | |
1580 | ||
1581 | // for IOMemoryDescriptor use | |
9bccf70c | 1582 | _IOMemoryMap * copyCompatible( |
1c79356b A |
1583 | IOMemoryDescriptor * owner, |
1584 | task_t intoTask, | |
1585 | IOVirtualAddress toAddress, | |
1586 | IOOptionBits options, | |
1587 | IOByteCount offset, | |
1588 | IOByteCount length ); | |
1589 | ||
9bccf70c | 1590 | bool initCompatible( |
1c79356b A |
1591 | IOMemoryDescriptor * memory, |
1592 | IOMemoryMap * superMap, | |
1593 | IOByteCount offset, | |
1594 | IOByteCount length ); | |
1595 | ||
9bccf70c | 1596 | bool initWithDescriptor( |
1c79356b A |
1597 | IOMemoryDescriptor * memory, |
1598 | task_t intoTask, | |
1599 | IOVirtualAddress toAddress, | |
1600 | IOOptionBits options, | |
1601 | IOByteCount offset, | |
1602 | IOByteCount length ); | |
e3027f41 A |
1603 | |
1604 | IOReturn redirect( | |
1605 | task_t intoTask, bool redirect ); | |
1c79356b A |
1606 | }; |
1607 | ||
1608 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
1609 | ||
1610 | #undef super | |
1611 | #define super IOMemoryMap | |
1612 | ||
1613 | OSDefineMetaClassAndStructors(_IOMemoryMap, IOMemoryMap) | |
1614 | ||
1615 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
1616 | ||
9bccf70c | 1617 | bool _IOMemoryMap::initCompatible( |
1c79356b A |
1618 | IOMemoryDescriptor * _memory, |
1619 | IOMemoryMap * _superMap, | |
1620 | IOByteCount _offset, | |
1621 | IOByteCount _length ) | |
1622 | { | |
1623 | ||
1624 | if( !super::init()) | |
1625 | return( false); | |
1626 | ||
1627 | if( (_offset + _length) > _superMap->getLength()) | |
1628 | return( false); | |
1629 | ||
1630 | _memory->retain(); | |
1631 | memory = _memory; | |
1632 | _superMap->retain(); | |
1633 | superMap = _superMap; | |
1634 | ||
1635 | offset = _offset; | |
1636 | if( _length) | |
1637 | length = _length; | |
1638 | else | |
1639 | length = _memory->getLength(); | |
1640 | ||
1641 | options = superMap->getMapOptions(); | |
1642 | logical = superMap->getVirtualAddress() + offset; | |
1643 | ||
1644 | return( true ); | |
1645 | } | |
1646 | ||
9bccf70c | 1647 | bool _IOMemoryMap::initWithDescriptor( |
1c79356b A |
1648 | IOMemoryDescriptor * _memory, |
1649 | task_t intoTask, | |
1650 | IOVirtualAddress toAddress, | |
1651 | IOOptionBits _options, | |
1652 | IOByteCount _offset, | |
1653 | IOByteCount _length ) | |
1654 | { | |
1655 | bool ok; | |
1656 | ||
1657 | if( (!_memory) || (!intoTask) || !super::init()) | |
1658 | return( false); | |
1659 | ||
1660 | if( (_offset + _length) > _memory->getLength()) | |
1661 | return( false); | |
1662 | ||
1663 | addressMap = get_task_map(intoTask); | |
1664 | if( !addressMap) | |
1665 | return( false); | |
9bccf70c | 1666 | vm_map_reference(addressMap); |
1c79356b A |
1667 | |
1668 | _memory->retain(); | |
1669 | memory = _memory; | |
1670 | ||
1671 | offset = _offset; | |
1672 | if( _length) | |
1673 | length = _length; | |
1674 | else | |
1675 | length = _memory->getLength(); | |
1676 | ||
1677 | addressTask = intoTask; | |
1678 | logical = toAddress; | |
1679 | options = _options; | |
1680 | ||
1681 | if( options & kIOMapStatic) | |
1682 | ok = true; | |
1683 | else | |
1684 | ok = (kIOReturnSuccess == memory->doMap( addressMap, &logical, | |
1685 | options, offset, length )); | |
1686 | if( !ok) { | |
1687 | logical = 0; | |
e3027f41 A |
1688 | memory->release(); |
1689 | memory = 0; | |
1c79356b A |
1690 | vm_map_deallocate(addressMap); |
1691 | addressMap = 0; | |
1692 | } | |
1693 | return( ok ); | |
1694 | } | |
1695 | ||
0b4e3aa0 A |
1696 | struct IOMemoryDescriptorMapAllocRef |
1697 | { | |
1698 | ipc_port_t sharedMem; | |
1699 | vm_size_t size; | |
1700 | vm_offset_t mapped; | |
1701 | IOByteCount sourceOffset; | |
1702 | IOOptionBits options; | |
1703 | }; | |
1704 | ||
1705 | static kern_return_t IOMemoryDescriptorMapAlloc(vm_map_t map, void * _ref) | |
1706 | { | |
1707 | IOMemoryDescriptorMapAllocRef * ref = (IOMemoryDescriptorMapAllocRef *)_ref; | |
1708 | IOReturn err; | |
1709 | ||
1710 | do { | |
1711 | if( ref->sharedMem) { | |
1712 | vm_prot_t prot = VM_PROT_READ | |
1713 | | ((ref->options & kIOMapReadOnly) ? 0 : VM_PROT_WRITE); | |
55e303ae A |
1714 | |
1715 | // set memory entry cache | |
1716 | vm_prot_t memEntryCacheMode = prot | MAP_MEM_ONLY; | |
1717 | switch (ref->options & kIOMapCacheMask) | |
1718 | { | |
1719 | case kIOMapInhibitCache: | |
1720 | SET_MAP_MEM(MAP_MEM_IO, memEntryCacheMode); | |
1721 | break; | |
1722 | ||
1723 | case kIOMapWriteThruCache: | |
1724 | SET_MAP_MEM(MAP_MEM_WTHRU, memEntryCacheMode); | |
1725 | break; | |
1726 | ||
1727 | case kIOMapWriteCombineCache: | |
1728 | SET_MAP_MEM(MAP_MEM_WCOMB, memEntryCacheMode); | |
1729 | break; | |
1730 | ||
1731 | case kIOMapCopybackCache: | |
1732 | SET_MAP_MEM(MAP_MEM_COPYBACK, memEntryCacheMode); | |
1733 | break; | |
1734 | ||
1735 | case kIOMapDefaultCache: | |
1736 | default: | |
1737 | SET_MAP_MEM(MAP_MEM_NOOP, memEntryCacheMode); | |
1738 | break; | |
1739 | } | |
1740 | ||
1741 | vm_size_t unused = 0; | |
1742 | ||
1743 | err = mach_make_memory_entry( NULL /*unused*/, &unused, 0 /*unused*/, | |
1744 | memEntryCacheMode, NULL, ref->sharedMem ); | |
1745 | if (KERN_SUCCESS != err) | |
1746 | IOLog("MAP_MEM_ONLY failed %d\n", err); | |
1747 | ||
0b4e3aa0 A |
1748 | err = vm_map( map, |
1749 | &ref->mapped, | |
1750 | ref->size, 0 /* mask */, | |
1751 | (( ref->options & kIOMapAnywhere ) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED) | |
1752 | | VM_MAKE_TAG(VM_MEMORY_IOKIT), | |
1753 | ref->sharedMem, ref->sourceOffset, | |
1754 | false, // copy | |
1755 | prot, // cur | |
1756 | prot, // max | |
1757 | VM_INHERIT_NONE); | |
55e303ae | 1758 | |
0b4e3aa0 A |
1759 | if( KERN_SUCCESS != err) { |
1760 | ref->mapped = 0; | |
1761 | continue; | |
1762 | } | |
1763 | ||
1764 | } else { | |
1765 | ||
1766 | err = vm_allocate( map, &ref->mapped, ref->size, | |
1767 | ((ref->options & kIOMapAnywhere) ? VM_FLAGS_ANYWHERE : VM_FLAGS_FIXED) | |
1768 | | VM_MAKE_TAG(VM_MEMORY_IOKIT) ); | |
1769 | ||
1770 | if( KERN_SUCCESS != err) { | |
1771 | ref->mapped = 0; | |
1772 | continue; | |
1773 | } | |
1774 | ||
1775 | // we have to make sure that these guys don't get copied if we fork. | |
1776 | err = vm_inherit( map, ref->mapped, ref->size, VM_INHERIT_NONE); | |
1777 | assert( KERN_SUCCESS == err ); | |
1778 | } | |
1779 | ||
1780 | } while( false ); | |
1781 | ||
1782 | return( err ); | |
1783 | } | |
1784 | ||
9bccf70c | 1785 | |
1c79356b A |
1786 | IOReturn IOMemoryDescriptor::doMap( |
1787 | vm_map_t addressMap, | |
1788 | IOVirtualAddress * atAddress, | |
1789 | IOOptionBits options, | |
55e303ae A |
1790 | IOByteCount sourceOffset, |
1791 | IOByteCount length ) | |
1c79356b A |
1792 | { |
1793 | IOReturn err = kIOReturnSuccess; | |
0b4e3aa0 | 1794 | memory_object_t pager; |
1c79356b A |
1795 | vm_address_t logical; |
1796 | IOByteCount pageOffset; | |
0b4e3aa0 A |
1797 | IOPhysicalAddress sourceAddr; |
1798 | IOMemoryDescriptorMapAllocRef ref; | |
1c79356b | 1799 | |
0b4e3aa0 A |
1800 | ref.sharedMem = (ipc_port_t) _memEntry; |
1801 | ref.sourceOffset = sourceOffset; | |
1802 | ref.options = options; | |
1c79356b | 1803 | |
0b4e3aa0 | 1804 | do { |
1c79356b | 1805 | |
0b4e3aa0 A |
1806 | if( 0 == length) |
1807 | length = getLength(); | |
1c79356b | 1808 | |
0b4e3aa0 A |
1809 | sourceAddr = getSourceSegment( sourceOffset, NULL ); |
1810 | assert( sourceAddr ); | |
55e303ae | 1811 | pageOffset = sourceAddr - trunc_page_32( sourceAddr ); |
1c79356b | 1812 | |
55e303ae | 1813 | ref.size = round_page_32( length + pageOffset ); |
0b4e3aa0 A |
1814 | |
1815 | logical = *atAddress; | |
1816 | if( options & kIOMapAnywhere) | |
1817 | // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE | |
1818 | ref.mapped = 0; | |
1819 | else { | |
55e303ae | 1820 | ref.mapped = trunc_page_32( logical ); |
0b4e3aa0 A |
1821 | if( (logical - ref.mapped) != pageOffset) { |
1822 | err = kIOReturnVMError; | |
1823 | continue; | |
1824 | } | |
1825 | } | |
1826 | ||
55e303ae | 1827 | if( ref.sharedMem && (addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) |
0b4e3aa0 A |
1828 | err = IOIteratePageableMaps( ref.size, &IOMemoryDescriptorMapAlloc, &ref ); |
1829 | else | |
1830 | err = IOMemoryDescriptorMapAlloc( addressMap, &ref ); | |
1831 | ||
1832 | if( err != KERN_SUCCESS) | |
1833 | continue; | |
1834 | ||
1835 | if( reserved) | |
1836 | pager = (memory_object_t) reserved->devicePager; | |
1837 | else | |
1838 | pager = MACH_PORT_NULL; | |
1839 | ||
1840 | if( !ref.sharedMem || pager ) | |
1841 | err = handleFault( pager, addressMap, ref.mapped, sourceOffset, length, options ); | |
1842 | ||
1843 | } while( false ); | |
1844 | ||
1845 | if( err != KERN_SUCCESS) { | |
1846 | if( ref.mapped) | |
1847 | doUnmap( addressMap, ref.mapped, ref.size ); | |
1848 | *atAddress = NULL; | |
1849 | } else | |
1850 | *atAddress = ref.mapped + pageOffset; | |
1851 | ||
1852 | return( err ); | |
1853 | } | |
1854 | ||
1855 | enum { | |
1856 | kIOMemoryRedirected = 0x00010000 | |
1857 | }; | |
1858 | ||
1859 | IOReturn IOMemoryDescriptor::handleFault( | |
1860 | void * _pager, | |
1861 | vm_map_t addressMap, | |
1862 | IOVirtualAddress address, | |
1863 | IOByteCount sourceOffset, | |
1864 | IOByteCount length, | |
1865 | IOOptionBits options ) | |
1866 | { | |
1867 | IOReturn err = kIOReturnSuccess; | |
1868 | memory_object_t pager = (memory_object_t) _pager; | |
1869 | vm_size_t size; | |
1870 | vm_size_t bytes; | |
1871 | vm_size_t page; | |
1872 | IOByteCount pageOffset; | |
55e303ae | 1873 | IOByteCount pagerOffset; |
0b4e3aa0 | 1874 | IOPhysicalLength segLen; |
55e303ae | 1875 | addr64_t physAddr; |
0b4e3aa0 A |
1876 | |
1877 | if( !addressMap) { | |
1878 | ||
0b4e3aa0 | 1879 | if( kIOMemoryRedirected & _flags) { |
1c79356b | 1880 | #ifdef DEBUG |
9bccf70c | 1881 | IOLog("sleep mem redirect %p, %lx\n", this, sourceOffset); |
1c79356b | 1882 | #endif |
0b4e3aa0 | 1883 | do { |
9bccf70c | 1884 | SLEEP; |
0b4e3aa0 A |
1885 | } while( kIOMemoryRedirected & _flags ); |
1886 | } | |
1c79356b | 1887 | |
0b4e3aa0 | 1888 | return( kIOReturnSuccess ); |
1c79356b A |
1889 | } |
1890 | ||
55e303ae | 1891 | physAddr = getPhysicalSegment64( sourceOffset, &segLen ); |
0b4e3aa0 | 1892 | assert( physAddr ); |
55e303ae A |
1893 | pageOffset = physAddr - trunc_page_64( physAddr ); |
1894 | pagerOffset = sourceOffset; | |
0b4e3aa0 A |
1895 | |
1896 | size = length + pageOffset; | |
1897 | physAddr -= pageOffset; | |
1c79356b A |
1898 | |
1899 | segLen += pageOffset; | |
0b4e3aa0 | 1900 | bytes = size; |
1c79356b A |
1901 | do { |
1902 | // in the middle of the loop only map whole pages | |
1903 | if( segLen >= bytes) | |
1904 | segLen = bytes; | |
55e303ae | 1905 | else if( segLen != trunc_page_32( segLen)) |
1c79356b | 1906 | err = kIOReturnVMError; |
55e303ae | 1907 | if( physAddr != trunc_page_64( physAddr)) |
1c79356b A |
1908 | err = kIOReturnBadArgument; |
1909 | ||
1910 | #ifdef DEBUG | |
1911 | if( kIOLogMapping & gIOKitDebug) | |
55e303ae | 1912 | IOLog("_IOMemoryMap::map(%p) %08lx->%08qx:%08lx\n", |
0b4e3aa0 | 1913 | addressMap, address + pageOffset, physAddr + pageOffset, |
1c79356b A |
1914 | segLen - pageOffset); |
1915 | #endif | |
1916 | ||
9bccf70c A |
1917 | |
1918 | ||
1919 | ||
1920 | ||
1921 | #ifdef i386 | |
1922 | /* i386 doesn't support faulting on device memory yet */ | |
0b4e3aa0 | 1923 | if( addressMap && (kIOReturnSuccess == err)) |
55e303ae | 1924 | err = IOMapPages( addressMap, address, (IOPhysicalAddress) physAddr, segLen, options ); |
0b4e3aa0 | 1925 | assert( KERN_SUCCESS == err ); |
1c79356b A |
1926 | if( err) |
1927 | break; | |
9bccf70c | 1928 | #endif |
1c79356b | 1929 | |
0b4e3aa0 A |
1930 | if( pager) { |
1931 | if( reserved && reserved->pagerContig) { | |
1932 | IOPhysicalLength allLen; | |
55e303ae | 1933 | addr64_t allPhys; |
0b4e3aa0 | 1934 | |
55e303ae | 1935 | allPhys = getPhysicalSegment64( 0, &allLen ); |
0b4e3aa0 | 1936 | assert( allPhys ); |
55e303ae | 1937 | err = device_pager_populate_object( pager, 0, allPhys >> PAGE_SHIFT, round_page_32(allLen) ); |
0b4e3aa0 A |
1938 | |
1939 | } else { | |
1940 | ||
55e303ae | 1941 | for( page = 0; |
0b4e3aa0 A |
1942 | (page < segLen) && (KERN_SUCCESS == err); |
1943 | page += page_size) { | |
55e303ae A |
1944 | err = device_pager_populate_object(pager, pagerOffset, |
1945 | (ppnum_t)((physAddr + page) >> PAGE_SHIFT), page_size); | |
1946 | pagerOffset += page_size; | |
0b4e3aa0 A |
1947 | } |
1948 | } | |
1949 | assert( KERN_SUCCESS == err ); | |
1950 | if( err) | |
1951 | break; | |
1952 | } | |
9bccf70c A |
1953 | #ifndef i386 |
1954 | /* *** ALERT *** */ | |
1955 | /* *** Temporary Workaround *** */ | |
1956 | ||
1957 | /* This call to vm_fault causes an early pmap level resolution */ | |
1958 | /* of the mappings created above. Need for this is in absolute */ | |
1959 | /* violation of the basic tenet that the pmap layer is a cache. */ | |
1960 | /* Further, it implies a serious I/O architectural violation on */ | |
1961 | /* the part of some user of the mapping. As of this writing, */ | |
1962 | /* the call to vm_fault is needed because the NVIDIA driver */ | |
1963 | /* makes a call to pmap_extract. The NVIDIA driver needs to be */ | |
1964 | /* fixed as soon as possible. The NVIDIA driver should not */ | |
1965 | /* need to query for this info as it should know from the doMap */ | |
1966 | /* call where the physical memory is mapped. When a query is */ | |
1967 | /* necessary to find a physical mapping, it should be done */ | |
1968 | /* through an iokit call which includes the mapped memory */ | |
1969 | /* handle. This is required for machine architecture independence.*/ | |
1970 | ||
1971 | if(!(kIOMemoryRedirected & _flags)) { | |
1972 | vm_fault(addressMap, address, 3, FALSE, FALSE, NULL, 0); | |
1973 | } | |
1974 | ||
1975 | /* *** Temporary Workaround *** */ | |
1976 | /* *** ALERT *** */ | |
1977 | #endif | |
1c79356b | 1978 | sourceOffset += segLen - pageOffset; |
0b4e3aa0 | 1979 | address += segLen; |
1c79356b A |
1980 | bytes -= segLen; |
1981 | pageOffset = 0; | |
1982 | ||
1983 | } while( bytes | |
55e303ae | 1984 | && (physAddr = getPhysicalSegment64( sourceOffset, &segLen ))); |
1c79356b A |
1985 | |
1986 | if( bytes) | |
1987 | err = kIOReturnBadArgument; | |
1c79356b A |
1988 | |
1989 | return( err ); | |
1990 | } | |
1991 | ||
1992 | IOReturn IOMemoryDescriptor::doUnmap( | |
1993 | vm_map_t addressMap, | |
1994 | IOVirtualAddress logical, | |
1995 | IOByteCount length ) | |
1996 | { | |
1997 | IOReturn err; | |
1998 | ||
1999 | #ifdef DEBUG | |
2000 | if( kIOLogMapping & gIOKitDebug) | |
2001 | kprintf("IOMemoryDescriptor::doUnmap(%x) %08x:%08x\n", | |
2002 | addressMap, logical, length ); | |
2003 | #endif | |
2004 | ||
90556fb8 | 2005 | if( true /* && (addressMap == kernel_map) || (addressMap == get_task_map(current_task()))*/) { |
0b4e3aa0 | 2006 | |
55e303ae | 2007 | if( _memEntry && (addressMap == kernel_map) && (kIOMemoryBufferPageable & _flags)) |
0b4e3aa0 A |
2008 | addressMap = IOPageableMapForAddress( logical ); |
2009 | ||
1c79356b | 2010 | err = vm_deallocate( addressMap, logical, length ); |
0b4e3aa0 A |
2011 | |
2012 | } else | |
1c79356b A |
2013 | err = kIOReturnSuccess; |
2014 | ||
2015 | return( err ); | |
2016 | } | |
2017 | ||
e3027f41 A |
2018 | IOReturn IOMemoryDescriptor::redirect( task_t safeTask, bool redirect ) |
2019 | { | |
2020 | IOReturn err; | |
2021 | _IOMemoryMap * mapping = 0; | |
2022 | OSIterator * iter; | |
2023 | ||
2024 | LOCK; | |
2025 | ||
2026 | do { | |
2027 | if( (iter = OSCollectionIterator::withCollection( _mappings))) { | |
2028 | while( (mapping = (_IOMemoryMap *) iter->getNextObject())) | |
2029 | mapping->redirect( safeTask, redirect ); | |
2030 | ||
2031 | iter->release(); | |
2032 | } | |
2033 | } while( false ); | |
2034 | ||
0b4e3aa0 A |
2035 | if( redirect) |
2036 | _flags |= kIOMemoryRedirected; | |
2037 | else { | |
2038 | _flags &= ~kIOMemoryRedirected; | |
9bccf70c | 2039 | WAKEUP; |
0b4e3aa0 A |
2040 | } |
2041 | ||
e3027f41 A |
2042 | UNLOCK; |
2043 | ||
2044 | // temporary binary compatibility | |
2045 | IOSubMemoryDescriptor * subMem; | |
2046 | if( (subMem = OSDynamicCast( IOSubMemoryDescriptor, this))) | |
2047 | err = subMem->redirect( safeTask, redirect ); | |
2048 | else | |
2049 | err = kIOReturnSuccess; | |
2050 | ||
2051 | return( err ); | |
2052 | } | |
2053 | ||
2054 | IOReturn IOSubMemoryDescriptor::redirect( task_t safeTask, bool redirect ) | |
2055 | { | |
e3027f41 A |
2056 | return( _parent->redirect( safeTask, redirect )); |
2057 | } | |
2058 | ||
2059 | IOReturn _IOMemoryMap::redirect( task_t safeTask, bool redirect ) | |
2060 | { | |
2061 | IOReturn err = kIOReturnSuccess; | |
2062 | ||
2063 | if( superMap) { | |
2064 | // err = ((_IOMemoryMap *)superMap)->redirect( safeTask, redirect ); | |
2065 | } else { | |
2066 | ||
2067 | LOCK; | |
2068 | if( logical && addressMap | |
2069 | && (get_task_map( safeTask) != addressMap) | |
2070 | && (0 == (options & kIOMapStatic))) { | |
9bccf70c | 2071 | |
e3027f41 A |
2072 | IOUnmapPages( addressMap, logical, length ); |
2073 | if( !redirect) { | |
2074 | err = vm_deallocate( addressMap, logical, length ); | |
2075 | err = memory->doMap( addressMap, &logical, | |
0b4e3aa0 A |
2076 | (options & ~kIOMapAnywhere) /*| kIOMapReserve*/, |
2077 | offset, length ); | |
e3027f41 A |
2078 | } else |
2079 | err = kIOReturnSuccess; | |
2080 | #ifdef DEBUG | |
9bccf70c | 2081 | IOLog("IOMemoryMap::redirect(%d, %p) %x:%lx from %p\n", redirect, this, logical, length, addressMap); |
e3027f41 A |
2082 | #endif |
2083 | } | |
2084 | UNLOCK; | |
2085 | } | |
2086 | ||
2087 | return( err ); | |
2088 | } | |
2089 | ||
1c79356b A |
2090 | IOReturn _IOMemoryMap::unmap( void ) |
2091 | { | |
2092 | IOReturn err; | |
2093 | ||
2094 | LOCK; | |
2095 | ||
2096 | if( logical && addressMap && (0 == superMap) | |
2097 | && (0 == (options & kIOMapStatic))) { | |
2098 | ||
2099 | err = memory->doUnmap( addressMap, logical, length ); | |
2100 | vm_map_deallocate(addressMap); | |
2101 | addressMap = 0; | |
2102 | ||
2103 | } else | |
2104 | err = kIOReturnSuccess; | |
2105 | ||
2106 | logical = 0; | |
2107 | ||
2108 | UNLOCK; | |
2109 | ||
2110 | return( err ); | |
2111 | } | |
2112 | ||
2113 | void _IOMemoryMap::taskDied( void ) | |
2114 | { | |
2115 | LOCK; | |
2116 | if( addressMap) { | |
2117 | vm_map_deallocate(addressMap); | |
2118 | addressMap = 0; | |
2119 | } | |
2120 | addressTask = 0; | |
2121 | logical = 0; | |
2122 | UNLOCK; | |
2123 | } | |
2124 | ||
9bccf70c A |
2125 | // Overload the release mechanism. All mappings must be a member |
2126 | // of a memory descriptors _mappings set. This means that we | |
2127 | // always have 2 references on a mapping. When either of these mappings | |
2128 | // are released we need to free ourselves. | |
55e303ae | 2129 | void _IOMemoryMap::taggedRelease(const void *tag) const |
9bccf70c | 2130 | { |
55e303ae | 2131 | LOCK; |
9bccf70c | 2132 | super::taggedRelease(tag, 2); |
55e303ae | 2133 | UNLOCK; |
9bccf70c A |
2134 | } |
2135 | ||
1c79356b A |
2136 | void _IOMemoryMap::free() |
2137 | { | |
2138 | unmap(); | |
2139 | ||
2140 | if( memory) { | |
2141 | LOCK; | |
2142 | memory->removeMapping( this); | |
2143 | UNLOCK; | |
2144 | memory->release(); | |
2145 | } | |
2146 | ||
2147 | if( superMap) | |
2148 | superMap->release(); | |
2149 | ||
2150 | super::free(); | |
2151 | } | |
2152 | ||
2153 | IOByteCount _IOMemoryMap::getLength() | |
2154 | { | |
2155 | return( length ); | |
2156 | } | |
2157 | ||
2158 | IOVirtualAddress _IOMemoryMap::getVirtualAddress() | |
2159 | { | |
2160 | return( logical); | |
2161 | } | |
2162 | ||
2163 | task_t _IOMemoryMap::getAddressTask() | |
2164 | { | |
2165 | if( superMap) | |
2166 | return( superMap->getAddressTask()); | |
2167 | else | |
2168 | return( addressTask); | |
2169 | } | |
2170 | ||
2171 | IOOptionBits _IOMemoryMap::getMapOptions() | |
2172 | { | |
2173 | return( options); | |
2174 | } | |
2175 | ||
2176 | IOMemoryDescriptor * _IOMemoryMap::getMemoryDescriptor() | |
2177 | { | |
2178 | return( memory ); | |
2179 | } | |
2180 | ||
9bccf70c | 2181 | _IOMemoryMap * _IOMemoryMap::copyCompatible( |
1c79356b A |
2182 | IOMemoryDescriptor * owner, |
2183 | task_t task, | |
2184 | IOVirtualAddress toAddress, | |
2185 | IOOptionBits _options, | |
2186 | IOByteCount _offset, | |
2187 | IOByteCount _length ) | |
2188 | { | |
2189 | _IOMemoryMap * mapping; | |
2190 | ||
55e303ae | 2191 | if( (!task) || (!addressMap) || (addressMap != get_task_map(task))) |
1c79356b | 2192 | return( 0 ); |
9bccf70c A |
2193 | if( (options ^ _options) & kIOMapReadOnly) |
2194 | return( 0 ); | |
2195 | if( (kIOMapDefaultCache != (_options & kIOMapCacheMask)) | |
2196 | && ((options ^ _options) & kIOMapCacheMask)) | |
1c79356b A |
2197 | return( 0 ); |
2198 | ||
2199 | if( (0 == (_options & kIOMapAnywhere)) && (logical != toAddress)) | |
2200 | return( 0 ); | |
2201 | ||
2202 | if( _offset < offset) | |
2203 | return( 0 ); | |
2204 | ||
2205 | _offset -= offset; | |
2206 | ||
2207 | if( (_offset + _length) > length) | |
2208 | return( 0 ); | |
2209 | ||
2210 | if( (length == _length) && (!_offset)) { | |
2211 | retain(); | |
2212 | mapping = this; | |
2213 | ||
2214 | } else { | |
2215 | mapping = new _IOMemoryMap; | |
2216 | if( mapping | |
9bccf70c | 2217 | && !mapping->initCompatible( owner, this, _offset, _length )) { |
1c79356b A |
2218 | mapping->release(); |
2219 | mapping = 0; | |
2220 | } | |
2221 | } | |
2222 | ||
2223 | return( mapping ); | |
2224 | } | |
2225 | ||
2226 | IOPhysicalAddress _IOMemoryMap::getPhysicalSegment( IOByteCount _offset, | |
2227 | IOPhysicalLength * length) | |
2228 | { | |
2229 | IOPhysicalAddress address; | |
2230 | ||
2231 | LOCK; | |
2232 | address = memory->getPhysicalSegment( offset + _offset, length ); | |
2233 | UNLOCK; | |
2234 | ||
2235 | return( address ); | |
2236 | } | |
2237 | ||
2238 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
2239 | ||
2240 | #undef super | |
2241 | #define super OSObject | |
2242 | ||
2243 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
2244 | ||
2245 | void IOMemoryDescriptor::initialize( void ) | |
2246 | { | |
2247 | if( 0 == gIOMemoryLock) | |
2248 | gIOMemoryLock = IORecursiveLockAlloc(); | |
55e303ae A |
2249 | |
2250 | IORegistryEntry::getRegistryRoot()->setProperty(kIOMaximumMappedIOByteCountKey, | |
2251 | ptoa_64(gIOMaximumMappedIOPageCount), 64); | |
1c79356b A |
2252 | } |
2253 | ||
2254 | void IOMemoryDescriptor::free( void ) | |
2255 | { | |
2256 | if( _mappings) | |
2257 | _mappings->release(); | |
2258 | ||
2259 | super::free(); | |
2260 | } | |
2261 | ||
2262 | IOMemoryMap * IOMemoryDescriptor::setMapping( | |
2263 | task_t intoTask, | |
2264 | IOVirtualAddress mapAddress, | |
55e303ae | 2265 | IOOptionBits options ) |
1c79356b A |
2266 | { |
2267 | _IOMemoryMap * map; | |
2268 | ||
2269 | map = new _IOMemoryMap; | |
2270 | ||
2271 | LOCK; | |
2272 | ||
2273 | if( map | |
9bccf70c | 2274 | && !map->initWithDescriptor( this, intoTask, mapAddress, |
1c79356b A |
2275 | options | kIOMapStatic, 0, getLength() )) { |
2276 | map->release(); | |
2277 | map = 0; | |
2278 | } | |
2279 | ||
2280 | addMapping( map); | |
2281 | ||
2282 | UNLOCK; | |
2283 | ||
2284 | return( map); | |
2285 | } | |
2286 | ||
2287 | IOMemoryMap * IOMemoryDescriptor::map( | |
55e303ae | 2288 | IOOptionBits options ) |
1c79356b A |
2289 | { |
2290 | ||
2291 | return( makeMapping( this, kernel_task, 0, | |
2292 | options | kIOMapAnywhere, | |
2293 | 0, getLength() )); | |
2294 | } | |
2295 | ||
2296 | IOMemoryMap * IOMemoryDescriptor::map( | |
2297 | task_t intoTask, | |
2298 | IOVirtualAddress toAddress, | |
2299 | IOOptionBits options, | |
55e303ae A |
2300 | IOByteCount offset, |
2301 | IOByteCount length ) | |
1c79356b A |
2302 | { |
2303 | if( 0 == length) | |
2304 | length = getLength(); | |
2305 | ||
2306 | return( makeMapping( this, intoTask, toAddress, options, offset, length )); | |
2307 | } | |
2308 | ||
2309 | IOMemoryMap * IOMemoryDescriptor::makeMapping( | |
2310 | IOMemoryDescriptor * owner, | |
2311 | task_t intoTask, | |
2312 | IOVirtualAddress toAddress, | |
2313 | IOOptionBits options, | |
2314 | IOByteCount offset, | |
2315 | IOByteCount length ) | |
2316 | { | |
2317 | _IOMemoryMap * mapping = 0; | |
2318 | OSIterator * iter; | |
2319 | ||
2320 | LOCK; | |
2321 | ||
2322 | do { | |
2323 | // look for an existing mapping | |
2324 | if( (iter = OSCollectionIterator::withCollection( _mappings))) { | |
2325 | ||
2326 | while( (mapping = (_IOMemoryMap *) iter->getNextObject())) { | |
2327 | ||
9bccf70c | 2328 | if( (mapping = mapping->copyCompatible( |
1c79356b A |
2329 | owner, intoTask, toAddress, |
2330 | options | kIOMapReference, | |
2331 | offset, length ))) | |
2332 | break; | |
2333 | } | |
2334 | iter->release(); | |
2335 | if( mapping) | |
2336 | continue; | |
2337 | } | |
2338 | ||
2339 | ||
2340 | if( mapping || (options & kIOMapReference)) | |
2341 | continue; | |
2342 | ||
2343 | owner = this; | |
2344 | ||
2345 | mapping = new _IOMemoryMap; | |
2346 | if( mapping | |
9bccf70c | 2347 | && !mapping->initWithDescriptor( owner, intoTask, toAddress, options, |
1c79356b | 2348 | offset, length )) { |
9bccf70c | 2349 | #ifdef DEBUG |
1c79356b | 2350 | IOLog("Didn't make map %08lx : %08lx\n", offset, length ); |
9bccf70c | 2351 | #endif |
1c79356b A |
2352 | mapping->release(); |
2353 | mapping = 0; | |
2354 | } | |
2355 | ||
2356 | } while( false ); | |
2357 | ||
2358 | owner->addMapping( mapping); | |
2359 | ||
2360 | UNLOCK; | |
2361 | ||
2362 | return( mapping); | |
2363 | } | |
2364 | ||
2365 | void IOMemoryDescriptor::addMapping( | |
2366 | IOMemoryMap * mapping ) | |
2367 | { | |
2368 | if( mapping) { | |
2369 | if( 0 == _mappings) | |
2370 | _mappings = OSSet::withCapacity(1); | |
9bccf70c A |
2371 | if( _mappings ) |
2372 | _mappings->setObject( mapping ); | |
1c79356b A |
2373 | } |
2374 | } | |
2375 | ||
2376 | void IOMemoryDescriptor::removeMapping( | |
2377 | IOMemoryMap * mapping ) | |
2378 | { | |
9bccf70c | 2379 | if( _mappings) |
1c79356b | 2380 | _mappings->removeObject( mapping); |
1c79356b A |
2381 | } |
2382 | ||
2383 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
2384 | ||
2385 | #undef super | |
2386 | #define super IOMemoryDescriptor | |
2387 | ||
2388 | OSDefineMetaClassAndStructors(IOSubMemoryDescriptor, IOMemoryDescriptor) | |
2389 | ||
2390 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
2391 | ||
2392 | bool IOSubMemoryDescriptor::initSubRange( IOMemoryDescriptor * parent, | |
2393 | IOByteCount offset, IOByteCount length, | |
55e303ae | 2394 | IODirection direction ) |
1c79356b | 2395 | { |
1c79356b A |
2396 | if( !parent) |
2397 | return( false); | |
2398 | ||
2399 | if( (offset + length) > parent->getLength()) | |
2400 | return( false); | |
2401 | ||
55e303ae A |
2402 | /* |
2403 | * We can check the _parent instance variable before having ever set it | |
2404 | * to an initial value because I/O Kit guarantees that all our instance | |
2405 | * variables are zeroed on an object's allocation. | |
2406 | */ | |
2407 | ||
2408 | if( !_parent) { | |
2409 | if( !super::init()) | |
2410 | return( false ); | |
2411 | } else { | |
2412 | /* | |
2413 | * An existing memory descriptor is being retargeted to | |
2414 | * point to somewhere else. Clean up our present state. | |
2415 | */ | |
2416 | ||
2417 | _parent->release(); | |
2418 | _parent = 0; | |
2419 | } | |
2420 | ||
1c79356b A |
2421 | parent->retain(); |
2422 | _parent = parent; | |
2423 | _start = offset; | |
2424 | _length = length; | |
55e303ae | 2425 | _direction = direction; |
1c79356b A |
2426 | _tag = parent->getTag(); |
2427 | ||
2428 | return( true ); | |
2429 | } | |
2430 | ||
2431 | void IOSubMemoryDescriptor::free( void ) | |
2432 | { | |
2433 | if( _parent) | |
2434 | _parent->release(); | |
2435 | ||
2436 | super::free(); | |
2437 | } | |
2438 | ||
2439 | ||
2440 | IOPhysicalAddress IOSubMemoryDescriptor::getPhysicalSegment( IOByteCount offset, | |
2441 | IOByteCount * length ) | |
2442 | { | |
2443 | IOPhysicalAddress address; | |
2444 | IOByteCount actualLength; | |
2445 | ||
2446 | assert(offset <= _length); | |
2447 | ||
2448 | if( length) | |
2449 | *length = 0; | |
2450 | ||
2451 | if( offset >= _length) | |
2452 | return( 0 ); | |
2453 | ||
2454 | address = _parent->getPhysicalSegment( offset + _start, &actualLength ); | |
2455 | ||
2456 | if( address && length) | |
2457 | *length = min( _length - offset, actualLength ); | |
2458 | ||
2459 | return( address ); | |
2460 | } | |
2461 | ||
0b4e3aa0 A |
2462 | IOPhysicalAddress IOSubMemoryDescriptor::getSourceSegment( IOByteCount offset, |
2463 | IOByteCount * length ) | |
2464 | { | |
2465 | IOPhysicalAddress address; | |
2466 | IOByteCount actualLength; | |
2467 | ||
2468 | assert(offset <= _length); | |
2469 | ||
2470 | if( length) | |
2471 | *length = 0; | |
2472 | ||
2473 | if( offset >= _length) | |
2474 | return( 0 ); | |
2475 | ||
2476 | address = _parent->getSourceSegment( offset + _start, &actualLength ); | |
2477 | ||
2478 | if( address && length) | |
2479 | *length = min( _length - offset, actualLength ); | |
2480 | ||
2481 | return( address ); | |
2482 | } | |
2483 | ||
1c79356b A |
2484 | void * IOSubMemoryDescriptor::getVirtualSegment(IOByteCount offset, |
2485 | IOByteCount * lengthOfSegment) | |
2486 | { | |
2487 | return( 0 ); | |
2488 | } | |
2489 | ||
2490 | IOByteCount IOSubMemoryDescriptor::readBytes(IOByteCount offset, | |
55e303ae | 2491 | void * bytes, IOByteCount length) |
1c79356b A |
2492 | { |
2493 | IOByteCount byteCount; | |
2494 | ||
2495 | assert(offset <= _length); | |
2496 | ||
2497 | if( offset >= _length) | |
2498 | return( 0 ); | |
2499 | ||
2500 | LOCK; | |
2501 | byteCount = _parent->readBytes( _start + offset, bytes, | |
55e303ae | 2502 | min(length, _length - offset) ); |
1c79356b A |
2503 | UNLOCK; |
2504 | ||
2505 | return( byteCount ); | |
2506 | } | |
2507 | ||
2508 | IOByteCount IOSubMemoryDescriptor::writeBytes(IOByteCount offset, | |
55e303ae | 2509 | const void* bytes, IOByteCount length) |
1c79356b A |
2510 | { |
2511 | IOByteCount byteCount; | |
2512 | ||
2513 | assert(offset <= _length); | |
2514 | ||
2515 | if( offset >= _length) | |
2516 | return( 0 ); | |
2517 | ||
2518 | LOCK; | |
2519 | byteCount = _parent->writeBytes( _start + offset, bytes, | |
55e303ae | 2520 | min(length, _length - offset) ); |
1c79356b A |
2521 | UNLOCK; |
2522 | ||
2523 | return( byteCount ); | |
2524 | } | |
2525 | ||
2526 | IOReturn IOSubMemoryDescriptor::prepare( | |
55e303ae | 2527 | IODirection forDirection) |
1c79356b A |
2528 | { |
2529 | IOReturn err; | |
2530 | ||
2531 | LOCK; | |
2532 | err = _parent->prepare( forDirection); | |
2533 | UNLOCK; | |
2534 | ||
2535 | return( err ); | |
2536 | } | |
2537 | ||
2538 | IOReturn IOSubMemoryDescriptor::complete( | |
55e303ae | 2539 | IODirection forDirection) |
1c79356b A |
2540 | { |
2541 | IOReturn err; | |
2542 | ||
2543 | LOCK; | |
2544 | err = _parent->complete( forDirection); | |
2545 | UNLOCK; | |
2546 | ||
2547 | return( err ); | |
2548 | } | |
2549 | ||
2550 | IOMemoryMap * IOSubMemoryDescriptor::makeMapping( | |
2551 | IOMemoryDescriptor * owner, | |
2552 | task_t intoTask, | |
2553 | IOVirtualAddress toAddress, | |
2554 | IOOptionBits options, | |
2555 | IOByteCount offset, | |
2556 | IOByteCount length ) | |
2557 | { | |
2558 | IOMemoryMap * mapping; | |
2559 | ||
2560 | mapping = (IOMemoryMap *) _parent->makeMapping( | |
2561 | _parent, intoTask, | |
2562 | toAddress - (_start + offset), | |
2563 | options | kIOMapReference, | |
2564 | _start + offset, length ); | |
2565 | ||
0b4e3aa0 A |
2566 | if( !mapping) |
2567 | mapping = (IOMemoryMap *) _parent->makeMapping( | |
2568 | _parent, intoTask, | |
2569 | toAddress, | |
2570 | options, _start + offset, length ); | |
2571 | ||
1c79356b A |
2572 | if( !mapping) |
2573 | mapping = super::makeMapping( owner, intoTask, toAddress, options, | |
2574 | offset, length ); | |
2575 | ||
2576 | return( mapping ); | |
2577 | } | |
2578 | ||
2579 | /* ick */ | |
2580 | ||
2581 | bool | |
2582 | IOSubMemoryDescriptor::initWithAddress(void * address, | |
55e303ae A |
2583 | IOByteCount length, |
2584 | IODirection direction) | |
1c79356b A |
2585 | { |
2586 | return( false ); | |
2587 | } | |
2588 | ||
2589 | bool | |
2590 | IOSubMemoryDescriptor::initWithAddress(vm_address_t address, | |
55e303ae A |
2591 | IOByteCount length, |
2592 | IODirection direction, | |
2593 | task_t task) | |
1c79356b A |
2594 | { |
2595 | return( false ); | |
2596 | } | |
2597 | ||
2598 | bool | |
2599 | IOSubMemoryDescriptor::initWithPhysicalAddress( | |
2600 | IOPhysicalAddress address, | |
55e303ae A |
2601 | IOByteCount length, |
2602 | IODirection direction ) | |
1c79356b A |
2603 | { |
2604 | return( false ); | |
2605 | } | |
2606 | ||
2607 | bool | |
2608 | IOSubMemoryDescriptor::initWithRanges( | |
2609 | IOVirtualRange * ranges, | |
2610 | UInt32 withCount, | |
55e303ae A |
2611 | IODirection direction, |
2612 | task_t task, | |
2613 | bool asReference) | |
1c79356b A |
2614 | { |
2615 | return( false ); | |
2616 | } | |
2617 | ||
2618 | bool | |
2619 | IOSubMemoryDescriptor::initWithPhysicalRanges( IOPhysicalRange * ranges, | |
2620 | UInt32 withCount, | |
55e303ae A |
2621 | IODirection direction, |
2622 | bool asReference) | |
1c79356b A |
2623 | { |
2624 | return( false ); | |
2625 | } | |
2626 | ||
2627 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
2628 | ||
9bccf70c A |
2629 | bool IOGeneralMemoryDescriptor::serialize(OSSerialize * s) const |
2630 | { | |
2631 | OSSymbol const *keys[2]; | |
2632 | OSObject *values[2]; | |
9bccf70c A |
2633 | IOVirtualRange *vcopy; |
2634 | unsigned int index, nRanges; | |
2635 | bool result; | |
2636 | ||
2637 | if (s == NULL) return false; | |
2638 | if (s->previouslySerialized(this)) return true; | |
2639 | ||
2640 | // Pretend we are an array. | |
2641 | if (!s->addXMLStartTag(this, "array")) return false; | |
2642 | ||
2643 | nRanges = _rangesCount; | |
2644 | vcopy = (IOVirtualRange *) IOMalloc(sizeof(IOVirtualRange) * nRanges); | |
2645 | if (vcopy == 0) return false; | |
2646 | ||
2647 | keys[0] = OSSymbol::withCString("address"); | |
2648 | keys[1] = OSSymbol::withCString("length"); | |
2649 | ||
2650 | result = false; | |
2651 | values[0] = values[1] = 0; | |
2652 | ||
2653 | // From this point on we can go to bail. | |
2654 | ||
2655 | // Copy the volatile data so we don't have to allocate memory | |
2656 | // while the lock is held. | |
2657 | LOCK; | |
2658 | if (nRanges == _rangesCount) { | |
2659 | for (index = 0; index < nRanges; index++) { | |
2660 | vcopy[index] = _ranges.v[index]; | |
2661 | } | |
2662 | } else { | |
2663 | // The descriptor changed out from under us. Give up. | |
2664 | UNLOCK; | |
2665 | result = false; | |
2666 | goto bail; | |
2667 | } | |
2668 | UNLOCK; | |
2669 | ||
2670 | for (index = 0; index < nRanges; index++) | |
2671 | { | |
2672 | values[0] = OSNumber::withNumber(_ranges.v[index].address, sizeof(_ranges.v[index].address) * 8); | |
2673 | if (values[0] == 0) { | |
2674 | result = false; | |
2675 | goto bail; | |
2676 | } | |
2677 | values[1] = OSNumber::withNumber(_ranges.v[index].length, sizeof(_ranges.v[index].length) * 8); | |
2678 | if (values[1] == 0) { | |
2679 | result = false; | |
2680 | goto bail; | |
2681 | } | |
2682 | OSDictionary *dict = OSDictionary::withObjects((const OSObject **)values, (const OSSymbol **)keys, 2); | |
2683 | if (dict == 0) { | |
2684 | result = false; | |
2685 | goto bail; | |
2686 | } | |
2687 | values[0]->release(); | |
2688 | values[1]->release(); | |
2689 | values[0] = values[1] = 0; | |
2690 | ||
2691 | result = dict->serialize(s); | |
2692 | dict->release(); | |
2693 | if (!result) { | |
2694 | goto bail; | |
2695 | } | |
2696 | } | |
2697 | result = s->addXMLEndTag("array"); | |
2698 | ||
2699 | bail: | |
2700 | if (values[0]) | |
2701 | values[0]->release(); | |
2702 | if (values[1]) | |
2703 | values[1]->release(); | |
2704 | if (keys[0]) | |
2705 | keys[0]->release(); | |
2706 | if (keys[1]) | |
2707 | keys[1]->release(); | |
2708 | if (vcopy) | |
2709 | IOFree(vcopy, sizeof(IOVirtualRange) * nRanges); | |
2710 | return result; | |
2711 | } | |
2712 | ||
2713 | bool IOSubMemoryDescriptor::serialize(OSSerialize * s) const | |
2714 | { | |
2715 | if (!s) { | |
2716 | return (false); | |
2717 | } | |
2718 | if (s->previouslySerialized(this)) return true; | |
2719 | ||
2720 | // Pretend we are a dictionary. | |
2721 | // We must duplicate the functionality of OSDictionary here | |
2722 | // because otherwise object references will not work; | |
2723 | // they are based on the value of the object passed to | |
2724 | // previouslySerialized and addXMLStartTag. | |
2725 | ||
2726 | if (!s->addXMLStartTag(this, "dict")) return false; | |
2727 | ||
2728 | char const *keys[3] = {"offset", "length", "parent"}; | |
2729 | ||
2730 | OSObject *values[3]; | |
2731 | values[0] = OSNumber::withNumber(_start, sizeof(_start) * 8); | |
2732 | if (values[0] == 0) | |
2733 | return false; | |
2734 | values[1] = OSNumber::withNumber(_length, sizeof(_length) * 8); | |
2735 | if (values[1] == 0) { | |
2736 | values[0]->release(); | |
2737 | return false; | |
2738 | } | |
2739 | values[2] = _parent; | |
2740 | ||
2741 | bool result = true; | |
2742 | for (int i=0; i<3; i++) { | |
2743 | if (!s->addString("<key>") || | |
2744 | !s->addString(keys[i]) || | |
2745 | !s->addXMLEndTag("key") || | |
2746 | !values[i]->serialize(s)) { | |
2747 | result = false; | |
2748 | break; | |
2749 | } | |
2750 | } | |
2751 | values[0]->release(); | |
2752 | values[1]->release(); | |
2753 | if (!result) { | |
2754 | return false; | |
2755 | } | |
2756 | ||
2757 | return s->addXMLEndTag("dict"); | |
2758 | } | |
2759 | ||
2760 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | |
2761 | ||
0b4e3aa0 | 2762 | OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 0); |
55e303ae A |
2763 | OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 1); |
2764 | OSMetaClassDefineReservedUsed(IOMemoryDescriptor, 2); | |
1c79356b A |
2765 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 3); |
2766 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 4); | |
2767 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 5); | |
2768 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 6); | |
2769 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 7); | |
2770 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 8); | |
2771 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 9); | |
2772 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 10); | |
2773 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 11); | |
2774 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 12); | |
2775 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 13); | |
2776 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 14); | |
2777 | OSMetaClassDefineReservedUnused(IOMemoryDescriptor, 15); | |
9bccf70c | 2778 | |
55e303ae | 2779 | /* ex-inline function implementation */ |
9bccf70c A |
2780 | IOPhysicalAddress IOMemoryDescriptor::getPhysicalAddress() |
2781 | { return( getPhysicalSegment( 0, 0 )); } |