]> git.saurik.com Git - apple/xnu.git/blob - iokit/IOKit/IOMemoryDescriptor.h
xnu-2782.20.48.tar.gz
[apple/xnu.git] / iokit / IOKit / IOMemoryDescriptor.h
1 /*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #ifndef _IOMEMORYDESCRIPTOR_H
29 #define _IOMEMORYDESCRIPTOR_H
30
31 #include <sys/cdefs.h>
32
33 #include <IOKit/IOTypes.h>
34 #include <IOKit/IOLocks.h>
35 #include <libkern/c++/OSContainers.h>
36
37 #include <mach/memory_object_types.h>
38
39 class IOMemoryMap;
40 class IOMapper;
41 class IOService;
42
43 /*
44 * Direction of transfer, with respect to the described memory.
45 */
46 #ifdef __LP64__
47 enum
48 #else /* !__LP64__ */
49 enum IODirection
50 #endif /* !__LP64__ */
51 {
52 kIODirectionNone = 0x0, // same as VM_PROT_NONE
53 kIODirectionIn = 0x1, // User land 'read', same as VM_PROT_READ
54 kIODirectionOut = 0x2, // User land 'write', same as VM_PROT_WRITE
55 kIODirectionOutIn = kIODirectionOut | kIODirectionIn,
56 kIODirectionInOut = kIODirectionIn | kIODirectionOut,
57
58 // these flags are valid for the prepare() method only
59 kIODirectionPrepareToPhys32 = 0x00000004,
60 kIODirectionPrepareNoFault = 0x00000008,
61 kIODirectionPrepareReserved1 = 0x00000010,
62 #define IODIRECTIONPREPARENONCOHERENTDEFINED 1
63 kIODirectionPrepareNonCoherent = 0x00000020,
64
65 // these flags are valid for the complete() method only
66 #define IODIRECTIONCOMPLETEWITHERRORDEFINED 1
67 kIODirectionCompleteWithError = 0x00000040,
68 #define IODIRECTIONCOMPLETEWITHDATAVALIDDEFINED 1
69 kIODirectionCompleteWithDataValid = 0x00000080,
70 };
71 #ifdef __LP64__
72 typedef IOOptionBits IODirection;
73 #endif /* __LP64__ */
74
75 /*
76 * IOOptionBits used in the withOptions variant
77 */
78 enum {
79 kIOMemoryDirectionMask = 0x00000007,
80 #ifdef XNU_KERNEL_PRIVATE
81 kIOMemoryAutoPrepare = 0x00000008, // Shared with Buffer MD
82 #endif
83
84 kIOMemoryTypeVirtual = 0x00000010,
85 kIOMemoryTypePhysical = 0x00000020,
86 kIOMemoryTypeUPL = 0x00000030,
87 kIOMemoryTypePersistentMD = 0x00000040, // Persistent Memory Descriptor
88 kIOMemoryTypeUIO = 0x00000050,
89 #ifdef __LP64__
90 kIOMemoryTypeVirtual64 = kIOMemoryTypeVirtual,
91 kIOMemoryTypePhysical64 = kIOMemoryTypePhysical,
92 #else /* !__LP64__ */
93 kIOMemoryTypeVirtual64 = 0x00000060,
94 kIOMemoryTypePhysical64 = 0x00000070,
95 #endif /* !__LP64__ */
96 kIOMemoryTypeMask = 0x000000f0,
97
98 kIOMemoryAsReference = 0x00000100,
99 kIOMemoryBufferPageable = 0x00000400,
100 kIOMemoryMapperNone = 0x00000800, // Shared with Buffer MD
101 kIOMemoryHostOnly = 0x00001000, // Never DMA accessible
102 #ifdef XNU_KERNEL_PRIVATE
103 kIOMemoryRedirected = 0x00004000,
104 kIOMemoryPreparedReadOnly = 0x00008000,
105 #endif
106 kIOMemoryPersistent = 0x00010000,
107 #ifdef XNU_KERNEL_PRIVATE
108 kIOMemoryReserved6156215 = 0x00020000,
109 #endif
110 kIOMemoryThreadSafe = 0x00100000, // Shared with Buffer MD
111 kIOMemoryClearEncrypt = 0x00200000, // Shared with Buffer MD
112
113 #ifdef XNU_KERNEL_PRIVATE
114 kIOMemoryBufferPurgeable = 0x00400000,
115 kIOMemoryBufferCacheMask = 0x70000000,
116 kIOMemoryBufferCacheShift = 28,
117 #endif
118 };
119
120 #define kIOMapperSystem ((IOMapper *) 0)
121
122 enum
123 {
124 kIOMemoryPurgeableKeepCurrent = 1,
125
126 kIOMemoryPurgeableNonVolatile = 2,
127 kIOMemoryPurgeableVolatile = 3,
128 kIOMemoryPurgeableEmpty = 4,
129
130 // modifiers for kIOMemoryPurgeableVolatile behavior
131 kIOMemoryPurgeableVolatileGroup0 = VM_VOLATILE_GROUP_0,
132 kIOMemoryPurgeableVolatileGroup1 = VM_VOLATILE_GROUP_1,
133 kIOMemoryPurgeableVolatileGroup2 = VM_VOLATILE_GROUP_2,
134 kIOMemoryPurgeableVolatileGroup3 = VM_VOLATILE_GROUP_3,
135 kIOMemoryPurgeableVolatileGroup4 = VM_VOLATILE_GROUP_4,
136 kIOMemoryPurgeableVolatileGroup5 = VM_VOLATILE_GROUP_5,
137 kIOMemoryPurgeableVolatileGroup6 = VM_VOLATILE_GROUP_6,
138 kIOMemoryPurgeableVolatileGroup7 = VM_VOLATILE_GROUP_7,
139 kIOMemoryPurgeableVolatileBehaviorFifo = VM_PURGABLE_BEHAVIOR_FIFO,
140 kIOMemoryPurgeableVolatileBehaviorLifo = VM_PURGABLE_BEHAVIOR_LIFO,
141 kIOMemoryPurgeableVolatileOrderingObsolete = VM_PURGABLE_ORDERING_OBSOLETE,
142 kIOMemoryPurgeableVolatileOrderingNormal = VM_PURGABLE_ORDERING_NORMAL,
143 };
144 enum
145 {
146 kIOMemoryIncoherentIOFlush = 1,
147 kIOMemoryIncoherentIOStore = 2,
148
149 kIOMemoryClearEncrypted = 50,
150 kIOMemorySetEncrypted = 51,
151 };
152
153 #define IOMEMORYDESCRIPTOR_SUPPORTS_DMACOMMAND 1
154
155 struct IODMAMapSpecification
156 {
157 uint64_t alignment;
158 IOService * device;
159 uint32_t options;
160 uint8_t numAddressBits;
161 uint8_t resvA[3];
162 uint32_t resvB[4];
163 };
164
165 enum
166 {
167 kIODMAMapWriteAccess = 0x00000002,
168 kIODMAMapPhysicallyContiguous = 0x00000010,
169 kIODMAMapDeviceMemory = 0x00000020,
170 kIODMAMapPagingPath = 0x00000040,
171 kIODMAMapIdentityMap = 0x00000080,
172 };
173
174
175 enum
176 {
177 kIOPreparationIDUnprepared = 0,
178 kIOPreparationIDUnsupported = 1,
179 kIOPreparationIDAlwaysPrepared = 2,
180 };
181
182 #ifdef XNU_KERNEL_PRIVATE
183 struct IOMemoryReference;
184 #endif
185
186
187 /*! @class IOMemoryDescriptor : public OSObject
188 @abstract An abstract base class defining common methods for describing physical or virtual memory.
189 @discussion The IOMemoryDescriptor object represents a buffer or range of memory, specified as one or more physical or virtual address ranges. It contains methods to return the memory's physically contiguous segments (fragments), for use with the IOMemoryCursor, and methods to map the memory into any address space with caching and placed mapping options. */
190
191 class IOMemoryDescriptor : public OSObject
192 {
193 friend class IOMemoryMap;
194
195 OSDeclareDefaultStructors(IOMemoryDescriptor);
196
197 protected:
198
199 /*! @var reserved
200 Reserved for future use. (Internal use only) */
201 struct IOMemoryDescriptorReserved * reserved;
202
203 protected:
204 OSSet * _mappings;
205 IOOptionBits _flags;
206
207
208 #ifdef XNU_KERNEL_PRIVATE
209 public:
210 struct IOMemoryReference * _memRef;
211 protected:
212 #else
213 void * __iomd_reserved5;
214 #endif
215
216 #ifdef __LP64__
217 uint64_t __iomd_reserved1;
218 uint64_t __iomd_reserved2;
219 uint64_t __iomd_reserved3;
220 uint64_t __iomd_reserved4;
221 #else /* !__LP64__ */
222 IODirection _direction; /* use _flags instead */
223 #endif /* !__LP64__ */
224 IOByteCount _length; /* length of all ranges */
225 IOOptionBits _tag;
226
227 public:
228 typedef IOOptionBits DMACommandOps;
229 #ifndef __LP64__
230 virtual IOPhysicalAddress getSourceSegment( IOByteCount offset,
231 IOByteCount * length ) APPLE_KEXT_DEPRECATED;
232 #endif /* !__LP64__ */
233
234 /*! @function initWithOptions
235 @abstract Master initialiser for all variants of memory descriptors. For a more complete description see IOMemoryDescriptor::withOptions.
236 @discussion Note this function can be used to re-init a previously created memory descriptor.
237 @result true on success, false on failure. */
238 virtual bool initWithOptions(void * buffers,
239 UInt32 count,
240 UInt32 offset,
241 task_t task,
242 IOOptionBits options,
243 IOMapper * mapper = kIOMapperSystem);
244
245 #ifndef __LP64__
246 virtual addr64_t getPhysicalSegment64( IOByteCount offset,
247 IOByteCount * length ) APPLE_KEXT_DEPRECATED; /* use getPhysicalSegment() and kIOMemoryMapperNone instead */
248 #endif /* !__LP64__ */
249
250 /*! @function setPurgeable
251 @abstract Control the purgeable status of a memory descriptors memory.
252 @discussion Buffers may be allocated with the ability to have their purgeable status changed - IOBufferMemoryDescriptor with the kIOMemoryPurgeable option, VM_FLAGS_PURGEABLE may be passed to vm_allocate() in user space to allocate such buffers. The purgeable status of such a buffer may be controlled with setPurgeable(). The process of making a purgeable memory descriptor non-volatile and determining its previous state is atomic - if a purgeable memory descriptor is made nonvolatile and the old state is returned as kIOMemoryPurgeableVolatile, then the memory's previous contents are completely intact and will remain so until the memory is made volatile again. If the old state is returned as kIOMemoryPurgeableEmpty then the memory was reclaimed while it was in a volatile state and its previous contents have been lost.
253 @param newState - the desired new purgeable state of the memory:<br>
254 kIOMemoryPurgeableKeepCurrent - make no changes to the memory's purgeable state.<br>
255 kIOMemoryPurgeableVolatile - make the memory volatile - the memory may be reclaimed by the VM system without saving its contents to backing store.<br>
256 kIOMemoryPurgeableNonVolatile - make the memory nonvolatile - the memory is treated as with usual allocations and must be saved to backing store if paged.<br>
257 kIOMemoryPurgeableEmpty - make the memory volatile, and discard any pages allocated to it.
258 @param oldState - if non-NULL, the previous purgeable state of the memory is returned here:<br>
259 kIOMemoryPurgeableNonVolatile - the memory was nonvolatile.<br>
260 kIOMemoryPurgeableVolatile - the memory was volatile but its content has not been discarded by the VM system.<br>
261 kIOMemoryPurgeableEmpty - the memory was volatile and has been discarded by the VM system.<br>
262 @result An IOReturn code. */
263
264 virtual IOReturn setPurgeable( IOOptionBits newState,
265 IOOptionBits * oldState );
266
267
268 /*! @function getPageCounts
269 @abstract Retrieve the number of resident and/or dirty pages encompassed by an IOMemoryDescriptor.
270 @discussion This method returns the number of resident and/or dirty pages encompassed by an IOMemoryDescriptor.
271 @param residentPageCount - If non-null, a pointer to a byte count that will return the number of resident pages encompassed by this IOMemoryDescriptor.
272 @param dirtyPageCount - If non-null, a pointer to a byte count that will return the number of dirty pages encompassed by this IOMemoryDescriptor.
273 @result An IOReturn code. */
274
275 IOReturn getPageCounts( IOByteCount * residentPageCount,
276 IOByteCount * dirtyPageCount);
277
278 /*! @function performOperation
279 @abstract Perform an operation on the memory descriptor's memory.
280 @discussion This method performs some operation on a range of the memory descriptor's memory. When a memory descriptor's memory is not mapped, it should be more efficient to use this method than mapping the memory to perform the operation virtually.
281 @param options The operation to perform on the memory:<br>
282 kIOMemoryIncoherentIOFlush - pass this option to store to memory and flush any data in the processor cache for the memory range, with synchronization to ensure the data has passed through all levels of processor cache. It may not be supported on all architectures. This type of flush may be used for non-coherent I/O such as AGP - it is NOT required for PCI coherent operations. The memory descriptor must have been previously prepared.<br>
283 kIOMemoryIncoherentIOStore - pass this option to store to memory any data in the processor cache for the memory range, with synchronization to ensure the data has passed through all levels of processor cache. It may not be supported on all architectures. This type of flush may be used for non-coherent I/O such as AGP - it is NOT required for PCI coherent operations. The memory descriptor must have been previously prepared.
284 @param offset A byte offset into the memory descriptor's memory.
285 @param length The length of the data range.
286 @result An IOReturn code. */
287
288 virtual IOReturn performOperation( IOOptionBits options,
289 IOByteCount offset, IOByteCount length );
290
291 // Used for dedicated communications for IODMACommand
292 virtual IOReturn dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const;
293
294 /*! @function getPhysicalSegment
295 @abstract Break a memory descriptor into its physically contiguous segments.
296 @discussion This method returns the physical address of the byte at the given offset into the memory, and optionally the length of the physically contiguous segment from that offset.
297 @param offset A byte offset into the memory whose physical address to return.
298 @param length If non-zero, getPhysicalSegment will store here the length of the physically contiguous segement at the given offset.
299 @result A physical address, or zero if the offset is beyond the length of the memory. */
300
301 #ifdef __LP64__
302 virtual addr64_t getPhysicalSegment( IOByteCount offset,
303 IOByteCount * length,
304 IOOptionBits options = 0 ) = 0;
305 #else /* !__LP64__ */
306 virtual addr64_t getPhysicalSegment( IOByteCount offset,
307 IOByteCount * length,
308 IOOptionBits options );
309 #endif /* !__LP64__ */
310
311 virtual uint64_t getPreparationID( void );
312 void setPreparationID( void );
313
314 #ifdef XNU_KERNEL_PRIVATE
315 IOMemoryDescriptorReserved * getKernelReserved( void );
316 IOReturn dmaMap(
317 IOMapper * mapper,
318 const IODMAMapSpecification * mapSpec,
319 uint64_t offset,
320 uint64_t length,
321 uint64_t * address,
322 ppnum_t * mapPages);
323 #endif
324
325 private:
326 OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 0);
327 #ifdef __LP64__
328 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 1);
329 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 2);
330 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 3);
331 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 4);
332 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 5);
333 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 6);
334 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 7);
335 #else /* !__LP64__ */
336 OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 1);
337 OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 2);
338 OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 3);
339 OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 4);
340 OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 5);
341 OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 6);
342 OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 7);
343 #endif /* !__LP64__ */
344 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 8);
345 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 9);
346 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 10);
347 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 11);
348 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 12);
349 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 13);
350 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 14);
351 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 15);
352
353 protected:
354 virtual void free();
355 public:
356 static void initialize( void );
357
358 public:
359 /*! @function withAddress
360 @abstract Create an IOMemoryDescriptor to describe one virtual range of the kernel task.
361 @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a single virtual memory range mapped into the kernel map. This memory descriptor needs to be prepared before it can be used to extract data from the memory described.
362 @param address The virtual address of the first byte in the memory.
363 @param withLength The length of memory.
364 @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
365 @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
366
367 static IOMemoryDescriptor * withAddress(void * address,
368 IOByteCount withLength,
369 IODirection withDirection);
370
371 #ifndef __LP64__
372 static IOMemoryDescriptor * withAddress(IOVirtualAddress address,
373 IOByteCount withLength,
374 IODirection withDirection,
375 task_t withTask) APPLE_KEXT_DEPRECATED; /* use withAddressRange() and prepare() instead */
376 #endif /* !__LP64__ */
377
378 /*! @function withPhysicalAddress
379 @abstract Create an IOMemoryDescriptor to describe one physical range.
380 @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a single physical memory range.
381 @param address The physical address of the first byte in the memory.
382 @param withLength The length of memory.
383 @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
384 @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
385
386 static IOMemoryDescriptor * withPhysicalAddress(
387 IOPhysicalAddress address,
388 IOByteCount withLength,
389 IODirection withDirection );
390
391 #ifndef __LP64__
392 static IOMemoryDescriptor * withRanges(IOVirtualRange * ranges,
393 UInt32 withCount,
394 IODirection withDirection,
395 task_t withTask,
396 bool asReference = false) APPLE_KEXT_DEPRECATED; /* use withAddressRanges() instead */
397 #endif /* !__LP64__ */
398
399 /*! @function withAddressRange
400 @abstract Create an IOMemoryDescriptor to describe one virtual range of the specified map.
401 @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a single virtual memory range mapped into the specified map. This memory descriptor needs to be prepared before it can be used to extract data from the memory described.
402 @param address The virtual address of the first byte in the memory.
403 @param withLength The length of memory.
404 @param options
405 kIOMemoryDirectionMask (options:direction) This nibble indicates the I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
406 @param task The task the virtual ranges are mapped into. Note that unlike IOMemoryDescriptor::withAddress(), kernel_task memory must be explicitly prepared when passed to this api. The task argument may be NULL to specify memory by physical address.
407 @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
408
409 static IOMemoryDescriptor * withAddressRange(
410 mach_vm_address_t address,
411 mach_vm_size_t length,
412 IOOptionBits options,
413 task_t task);
414
415 /*! @function withAddressRanges
416 @abstract Create an IOMemoryDescriptor to describe one or more virtual ranges.
417 @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of an array of virtual memory ranges each mapped into a specified source task. This memory descriptor needs to be prepared before it can be used to extract data from the memory described.
418 @param ranges An array of IOAddressRange structures which specify the virtual ranges in the specified map which make up the memory to be described. IOAddressRange is the 64bit version of IOVirtualRange.
419 @param rangeCount The member count of the ranges array.
420 @param options
421 kIOMemoryDirectionMask (options:direction) This nibble indicates the I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
422 kIOMemoryAsReference For options:type = Virtual or Physical this indicate that the memory descriptor need not copy the ranges array into local memory. This is an optimisation to try to minimise unnecessary allocations.
423 @param task The task each of the virtual ranges are mapped into. Note that unlike IOMemoryDescriptor::withAddress(), kernel_task memory must be explicitly prepared when passed to this api. The task argument may be NULL to specify memory by physical address.
424 @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
425
426 static IOMemoryDescriptor * withAddressRanges(
427 IOAddressRange * ranges,
428 UInt32 rangeCount,
429 IOOptionBits options,
430 task_t task);
431
432 /*! @function withOptions
433 @abstract Master initialiser for all variants of memory descriptors.
434 @discussion This method creates and initializes an IOMemoryDescriptor for memory it has three main variants: Virtual, Physical & mach UPL. These variants are selected with the options parameter, see below. This memory descriptor needs to be prepared before it can be used to extract data from the memory described.
435
436
437 @param buffers A pointer to an array of IOAddressRange when options:type is kIOMemoryTypeVirtual64 or kIOMemoryTypePhysical64 or a 64bit kernel. For type UPL it is a upl_t returned by the mach/memory_object_types.h apis, primarily used internally by the UBC. IOVirtualRanges or IOPhysicalRanges are 32 bit only types for use when options:type is kIOMemoryTypeVirtual or kIOMemoryTypePhysical on 32bit kernels.
438
439 @param count options:type = Virtual or Physical count contains a count of the number of entires in the buffers array. For options:type = UPL this field contains a total length.
440
441 @param offset Only used when options:type = UPL, in which case this field contains an offset for the memory within the buffers upl.
442
443 @param task Only used options:type = Virtual, The task each of the virtual ranges are mapped into.
444
445 @param options
446 kIOMemoryDirectionMask (options:direction) This nibble indicates the I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
447 kIOMemoryTypeMask (options:type) kIOMemoryTypeVirtual64, kIOMemoryTypeVirtual, kIOMemoryTypePhysical64, kIOMemoryTypePhysical, kIOMemoryTypeUPL Indicates that what type of memory basic memory descriptor to use. This sub-field also controls the interpretation of the buffers, count, offset & task parameters.
448 kIOMemoryAsReference For options:type = Virtual or Physical this indicate that the memory descriptor need not copy the ranges array into local memory. This is an optimisation to try to minimise unnecessary allocations.
449 kIOMemoryBufferPageable Only used by the IOBufferMemoryDescriptor as an indication that the kernel virtual memory is in fact pageable and we need to use the kernel pageable submap rather than the default map.
450
451 @param mapper Which IOMapper should be used to map the in-memory physical addresses into I/O space addresses. Defaults to 0 which indicates that the system mapper is to be used, if present.
452
453 @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
454
455 static IOMemoryDescriptor *withOptions(void * buffers,
456 UInt32 count,
457 UInt32 offset,
458 task_t task,
459 IOOptionBits options,
460 IOMapper * mapper = kIOMapperSystem);
461
462 #ifndef __LP64__
463 static IOMemoryDescriptor * withPhysicalRanges(
464 IOPhysicalRange * ranges,
465 UInt32 withCount,
466 IODirection withDirection,
467 bool asReference = false) APPLE_KEXT_DEPRECATED; /* use withOptions() and kIOMemoryTypePhysical instead */
468 #endif /* !__LP64__ */
469
470 #ifndef __LP64__
471 static IOMemoryDescriptor * withSubRange(IOMemoryDescriptor *of,
472 IOByteCount offset,
473 IOByteCount length,
474 IODirection withDirection) APPLE_KEXT_DEPRECATED; /* use IOSubMemoryDescriptor::withSubRange() and kIOMemoryThreadSafe instead */
475 #endif /* !__LP64__ */
476
477 /*! @function withPersistentMemoryDescriptor
478 @abstract Copy constructor that generates a new memory descriptor if the backing memory for the same task's virtual address and length has changed.
479 @discussion If the original memory descriptor's address and length is still backed by the same real memory, i.e. the user hasn't deallocated and the reallocated memory at the same address then the original memory descriptor is returned with a additional reference. Otherwise we build a totally new memory descriptor with the same characteristics as the previous one but with a new view of the vm. Note not legal to call this function with anything except an IOGeneralMemoryDescriptor that was created with the kIOMemoryPersistent option.
480 @param originalMD The memory descriptor to be duplicated.
481 @result Either the original memory descriptor with an additional retain or a new memory descriptor, 0 for a bad original memory descriptor or some other resource shortage. */
482 static IOMemoryDescriptor *
483 withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD);
484
485 #ifndef __LP64__
486 // obsolete initializers
487 // - initWithOptions is the designated initializer
488 virtual bool initWithAddress(void * address,
489 IOByteCount withLength,
490 IODirection withDirection) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */
491 virtual bool initWithAddress(IOVirtualAddress address,
492 IOByteCount withLength,
493 IODirection withDirection,
494 task_t withTask) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */
495 virtual bool initWithPhysicalAddress(
496 IOPhysicalAddress address,
497 IOByteCount withLength,
498 IODirection withDirection ) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */
499 virtual bool initWithRanges(IOVirtualRange * ranges,
500 UInt32 withCount,
501 IODirection withDirection,
502 task_t withTask,
503 bool asReference = false) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */
504 virtual bool initWithPhysicalRanges(IOPhysicalRange * ranges,
505 UInt32 withCount,
506 IODirection withDirection,
507 bool asReference = false) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */
508 #endif /* __LP64__ */
509
510 /*! @function getDirection
511 @abstract Accessor to get the direction the memory descriptor was created with.
512 @discussion This method returns the direction the memory descriptor was created with.
513 @result The direction. */
514
515 virtual IODirection getDirection() const;
516
517 /*! @function getLength
518 @abstract Accessor to get the length of the memory descriptor (over all its ranges).
519 @discussion This method returns the total length of the memory described by the descriptor, ie. the sum of its ranges' lengths.
520 @result The byte count. */
521
522 virtual IOByteCount getLength() const;
523
524 /*! @function setTag
525 @abstract Set the tag for the memory descriptor.
526 @discussion This method sets the tag for the memory descriptor. Tag bits are not interpreted by IOMemoryDescriptor.
527 @param tag The tag. */
528
529 virtual void setTag( IOOptionBits tag );
530
531 /*! @function getTag
532 @abstract Accessor to the retrieve the tag for the memory descriptor.
533 @discussion This method returns the tag for the memory descriptor. Tag bits are not interpreted by IOMemoryDescriptor.
534 @result The tag. */
535
536 virtual IOOptionBits getTag( void );
537
538 /*! @function readBytes
539 @abstract Copy data from the memory descriptor's buffer to the specified buffer.
540 @discussion This method copies data from the memory descriptor's memory at the given offset, to the caller's buffer. The memory descriptor MUST have the kIODirectionOut direcction bit set and be prepared. kIODirectionOut means that this memory descriptor will be output to an external device, so readBytes is used to get memory into a local buffer for a PIO transfer to the device.
541 @param offset A byte offset into the memory descriptor's memory.
542 @param bytes The caller supplied buffer to copy the data to.
543 @param withLength The length of the data to copy.
544 @result The number of bytes copied, zero will be returned if the specified offset is beyond the length of the descriptor. Development/debug kernel builds will assert if the offset is beyond the length of the descriptor. */
545
546 virtual IOByteCount readBytes(IOByteCount offset,
547 void * bytes, IOByteCount withLength);
548
549 /*! @function writeBytes
550 @abstract Copy data to the memory descriptor's buffer from the specified buffer.
551 @discussion This method copies data to the memory descriptor's memory at the given offset, from the caller's buffer. The memory descriptor MUST have the kIODirectionIn direcction bit set and be prepared. kIODirectionIn means that this memory descriptor will be input from an external device, so writeBytes is used to write memory into the descriptor for PIO drivers.
552 @param offset A byte offset into the memory descriptor's memory.
553 @param bytes The caller supplied buffer to copy the data from.
554 @param withLength The length of the data to copy.
555 @result The number of bytes copied, zero will be returned if the specified offset is beyond the length of the descriptor. Development/debug kernel builds will assert if the offset is beyond the length of the descriptor. */
556
557 virtual IOByteCount writeBytes(IOByteCount offset,
558 const void * bytes, IOByteCount withLength);
559
560 #ifndef __LP64__
561 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
562 IOByteCount * length);
563 #endif /* !__LP64__ */
564
565 /*! @function getPhysicalAddress
566 @abstract Return the physical address of the first byte in the memory.
567 @discussion This method returns the physical address of the first byte in the memory. It is most useful on memory known to be physically contiguous.
568 @result A physical address. */
569
570 IOPhysicalAddress getPhysicalAddress();
571
572 #ifndef __LP64__
573 virtual void * getVirtualSegment(IOByteCount offset,
574 IOByteCount * length) APPLE_KEXT_DEPRECATED; /* use map() and getVirtualAddress() instead */
575 #endif /* !__LP64__ */
576
577 /*! @function prepare
578 @abstract Prepare the memory for an I/O transfer.
579 @discussion This involves paging in the memory, if necessary, and wiring it down for the duration of the transfer. The complete() method completes the processing of the memory after the I/O transfer finishes. Note that the prepare call is not thread safe and it is expected that the client will more easily be able to guarantee single threading a particular memory descriptor.
580 @param forDirection The direction of the I/O just completed, or kIODirectionNone for the direction specified by the memory descriptor.
581 @result An IOReturn code. */
582
583 virtual IOReturn prepare(IODirection forDirection = kIODirectionNone) = 0;
584
585 /*! @function complete
586 @abstract Complete processing of the memory after an I/O transfer finishes.
587 @discussion This method should not be called unless a prepare was previously issued; the prepare() and complete() must occur in pairs, before and after an I/O transfer involving pageable memory. In 10.3 or greater systems the direction argument to complete is not longer respected. The direction is totally determined at prepare() time.
588 @param forDirection DEPRECATED The direction of the I/O just completed, or kIODirectionNone for the direction specified by the memory descriptor.
589 @result An IOReturn code. */
590
591 virtual IOReturn complete(IODirection forDirection = kIODirectionNone) = 0;
592
593 /*
594 * Mapping functions.
595 */
596
597 /*! @function createMappingInTask
598 @abstract Maps a IOMemoryDescriptor into a task.
599 @discussion This is the general purpose method to map all or part of the memory described by a memory descriptor into a task at any available address, or at a fixed address if possible. Caching & read-only options may be set for the mapping. The mapping is represented as a returned reference to a IOMemoryMap object, which may be shared if the mapping is compatible with an existing mapping of the IOMemoryDescriptor. The IOMemoryMap object returned should be released only when the caller has finished accessing the mapping, as freeing the object destroys the mapping.
600 @param intoTask Sets the target task for the mapping. Pass kernel_task for the kernel address space.
601 @param atAddress If a placed mapping is requested, atAddress specifies its address, and the kIOMapAnywhere should not be set. Otherwise, atAddress is ignored.
602 @param options Mapping options are defined in IOTypes.h,<br>
603 kIOMapAnywhere should be passed if the mapping can be created anywhere. If not set, the atAddress parameter sets the location of the mapping, if it is available in the target map.<br>
604 kIOMapDefaultCache to inhibit the cache in I/O areas, kIOMapCopybackCache in general purpose RAM.<br>
605 kIOMapInhibitCache, kIOMapWriteThruCache, kIOMapCopybackCache to set the appropriate caching.<br>
606 kIOMapReadOnly to allow only read only accesses to the memory - writes will cause and access fault.<br>
607 kIOMapReference will only succeed if the mapping already exists, and the IOMemoryMap object is just an extra reference, ie. no new mapping will be created.<br>
608 kIOMapUnique allows a special kind of mapping to be created that may be used with the IOMemoryMap::redirect() API. These mappings will not be shared as is the default - there will always be a unique mapping created for the caller, not an existing mapping with an extra reference.<br>
609 kIOMapPrefault will try to prefault the pages corresponding to the mapping. This must not be done on the kernel task, and the memory must have been wired via prepare(). Otherwise, the function will fail.<br>
610 @param offset Is a beginning offset into the IOMemoryDescriptor's memory where the mapping starts. Zero is the default to map all the memory.
611 @param length Is the length of the mapping requested for a subset of the IOMemoryDescriptor. Zero is the default to map all the memory.
612 @result A reference to an IOMemoryMap object representing the mapping, which can supply the virtual address of the mapping and other information. The mapping may be shared with multiple callers - multiple maps are avoided if a compatible one exists. The IOMemoryMap object returned should be released only when the caller has finished accessing the mapping, as freeing the object destroys the mapping. The IOMemoryMap instance also retains the IOMemoryDescriptor it maps while it exists. */
613
614 IOMemoryMap * createMappingInTask(
615 task_t intoTask,
616 mach_vm_address_t atAddress,
617 IOOptionBits options,
618 mach_vm_size_t offset = 0,
619 mach_vm_size_t length = 0 );
620
621 #ifndef __LP64__
622 virtual IOMemoryMap * map(
623 task_t intoTask,
624 IOVirtualAddress atAddress,
625 IOOptionBits options,
626 IOByteCount offset = 0,
627 IOByteCount length = 0 ) APPLE_KEXT_DEPRECATED; /* use createMappingInTask() instead */
628 #endif /* !__LP64__ */
629
630 /*! @function map
631 @abstract Maps a IOMemoryDescriptor into the kernel map.
632 @discussion This is a shortcut method to map all the memory described by a memory descriptor into the kernel map at any available address. See the full version of the createMappingInTask method for further details.
633 @param options Mapping options as in the full version of the createMappingInTask method, with kIOMapAnywhere assumed.
634 @result See the full version of the createMappingInTask method. */
635
636 virtual IOMemoryMap * map(
637 IOOptionBits options = 0 );
638
639 /*! @function setMapping
640 @abstract Establishes an already existing mapping.
641 @discussion This method tells the IOMemoryDescriptor about a mapping that exists, but was created elsewhere. It allows later callers of the map method to share this externally created mapping. The IOMemoryMap object returned is created to represent it. This method is not commonly needed.
642 @param task Address space in which the mapping exists.
643 @param mapAddress Virtual address of the mapping.
644 @param options Caching and read-only attributes of the mapping.
645 @result A IOMemoryMap object created to represent the mapping. */
646
647 virtual IOMemoryMap * setMapping(
648 task_t task,
649 IOVirtualAddress mapAddress,
650 IOOptionBits options = 0 );
651
652 // Following methods are private implementation
653
654 #ifdef __LP64__
655 virtual
656 #endif /* __LP64__ */
657 IOReturn redirect( task_t safeTask, bool redirect );
658
659 IOReturn handleFault(
660 void * _pager,
661 mach_vm_size_t sourceOffset,
662 mach_vm_size_t length);
663
664 IOReturn populateDevicePager(
665 void * pager,
666 vm_map_t addressMap,
667 mach_vm_address_t address,
668 mach_vm_size_t sourceOffset,
669 mach_vm_size_t length,
670 IOOptionBits options );
671
672 virtual IOMemoryMap * makeMapping(
673 IOMemoryDescriptor * owner,
674 task_t intoTask,
675 IOVirtualAddress atAddress,
676 IOOptionBits options,
677 IOByteCount offset,
678 IOByteCount length );
679
680 protected:
681 virtual void addMapping(
682 IOMemoryMap * mapping );
683
684 virtual void removeMapping(
685 IOMemoryMap * mapping );
686
687 virtual IOReturn doMap(
688 vm_map_t addressMap,
689 IOVirtualAddress * atAddress,
690 IOOptionBits options,
691 IOByteCount sourceOffset = 0,
692 IOByteCount length = 0 );
693
694 virtual IOReturn doUnmap(
695 vm_map_t addressMap,
696 IOVirtualAddress logical,
697 IOByteCount length );
698 };
699
700 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
701
702 /*! @class IOMemoryMap : public OSObject
703 @abstract A class defining common methods for describing a memory mapping.
704 @discussion The IOMemoryMap object represents a mapped range of memory, described by a IOMemoryDescriptor. The mapping may be in the kernel or a non-kernel task and has processor cache mode attributes. IOMemoryMap instances are created by IOMemoryDescriptor when it creates mappings in its map method, and returned to the caller. */
705
706 class IOMemoryMap : public OSObject
707 {
708 OSDeclareDefaultStructors(IOMemoryMap)
709 #ifdef XNU_KERNEL_PRIVATE
710 public:
711 IOMemoryDescriptor * fMemory;
712 IOMemoryMap * fSuperMap;
713 mach_vm_size_t fOffset;
714 mach_vm_address_t fAddress;
715 mach_vm_size_t fLength;
716 task_t fAddressTask;
717 vm_map_t fAddressMap;
718 IOOptionBits fOptions;
719 upl_t fRedirUPL;
720 ipc_port_t fRedirEntry;
721 IOMemoryDescriptor * fOwner;
722 uint8_t fUserClientUnmap;
723 #endif /* XNU_KERNEL_PRIVATE */
724
725 protected:
726 virtual void taggedRelease(const void *tag = 0) const;
727 virtual void free();
728
729 public:
730 /*! @function getVirtualAddress
731 @abstract Accessor to the virtual address of the first byte in the mapping.
732 @discussion This method returns the virtual address of the first byte in the mapping. Since the IOVirtualAddress is only 32bit in 32bit kernels, the getAddress() method should be used for compatibility with 64bit task mappings.
733 @result A virtual address. */
734
735 virtual IOVirtualAddress getVirtualAddress();
736
737 /*! @function getPhysicalSegment
738 @abstract Break a mapping into its physically contiguous segments.
739 @discussion This method returns the physical address of the byte at the given offset into the mapping, and optionally the length of the physically contiguous segment from that offset. It functions similarly to IOMemoryDescriptor::getPhysicalSegment.
740 @param offset A byte offset into the mapping whose physical address to return.
741 @param length If non-zero, getPhysicalSegment will store here the length of the physically contiguous segement at the given offset.
742 @result A physical address, or zero if the offset is beyond the length of the mapping. */
743
744 #ifdef __LP64__
745 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
746 IOByteCount * length,
747 IOOptionBits options = 0);
748 #else /* !__LP64__ */
749 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
750 IOByteCount * length);
751 #endif /* !__LP64__ */
752
753 /*! @function getPhysicalAddress
754 @abstract Return the physical address of the first byte in the mapping.
755 @discussion This method returns the physical address of the first byte in the mapping. It is most useful on mappings known to be physically contiguous.
756 @result A physical address. */
757
758 IOPhysicalAddress getPhysicalAddress();
759
760 /*! @function getLength
761 @abstract Accessor to the length of the mapping.
762 @discussion This method returns the length of the mapping.
763 @result A byte count. */
764
765 virtual IOByteCount getLength();
766
767 /*! @function getAddressTask
768 @abstract Accessor to the task of the mapping.
769 @discussion This method returns the mach task the mapping exists in.
770 @result A mach task_t. */
771
772 virtual task_t getAddressTask();
773
774 /*! @function getMemoryDescriptor
775 @abstract Accessor to the IOMemoryDescriptor the mapping was created from.
776 @discussion This method returns the IOMemoryDescriptor the mapping was created from.
777 @result An IOMemoryDescriptor reference, which is valid while the IOMemoryMap object is retained. It should not be released by the caller. */
778
779 virtual IOMemoryDescriptor * getMemoryDescriptor();
780
781 /*! @function getMapOptions
782 @abstract Accessor to the options the mapping was created with.
783 @discussion This method returns the options to IOMemoryDescriptor::map the mapping was created with.
784 @result Options for the mapping, including cache settings. */
785
786 virtual IOOptionBits getMapOptions();
787
788 /*! @function unmap
789 @abstract Force the IOMemoryMap to unmap, without destroying the object.
790 @discussion IOMemoryMap instances will unmap themselves upon free, ie. when the last client with a reference calls release. This method forces the IOMemoryMap to destroy the mapping it represents, regardless of the number of clients. It is not generally used.
791 @result An IOReturn code. */
792
793 virtual IOReturn unmap();
794
795 virtual void taskDied();
796
797 /*! @function redirect
798 @abstract Replace the memory mapped in a process with new backing memory.
799 @discussion An IOMemoryMap created with the kIOMapUnique option to IOMemoryDescriptor::map() can remapped to a new IOMemoryDescriptor backing object. If the new IOMemoryDescriptor is specified as NULL, client access to the memory map is blocked until a new backing object has been set. By blocking access and copying data, the caller can create atomic copies of the memory while the client is potentially reading or writing the memory.
800 @param newBackingMemory The IOMemoryDescriptor that represents the physical memory that is to be now mapped in the virtual range the IOMemoryMap represents. If newBackingMemory is NULL, any access to the mapping will hang (in vm_fault()) until access has been restored by a new call to redirect() with non-NULL newBackingMemory argument.
801 @param options Mapping options are defined in IOTypes.h, and are documented in IOMemoryDescriptor::map()
802 @param offset As with IOMemoryDescriptor::map(), a beginning offset into the IOMemoryDescriptor's memory where the mapping starts. Zero is the default.
803 @result An IOReturn code. */
804
805 #ifndef __LP64__
806 // For 32 bit XNU, there is a 32 bit (IOByteCount) and a 64 bit (mach_vm_size_t) interface;
807 // for 64 bit, these fall together on the 64 bit one.
808 virtual IOReturn redirect(IOMemoryDescriptor * newBackingMemory,
809 IOOptionBits options,
810 IOByteCount offset = 0);
811 #endif
812 virtual IOReturn redirect(IOMemoryDescriptor * newBackingMemory,
813 IOOptionBits options,
814 mach_vm_size_t offset = 0);
815
816 #ifdef __LP64__
817 /*! @function getAddress
818 @abstract Accessor to the virtual address of the first byte in the mapping.
819 @discussion This method returns the virtual address of the first byte in the mapping.
820 @result A virtual address. */
821 /*! @function getSize
822 @abstract Accessor to the length of the mapping.
823 @discussion This method returns the length of the mapping.
824 @result A byte count. */
825 inline mach_vm_address_t getAddress() __attribute__((always_inline));
826 inline mach_vm_size_t getSize() __attribute__((always_inline));
827 #else /* !__LP64__ */
828 /*! @function getAddress
829 @abstract Accessor to the virtual address of the first byte in the mapping.
830 @discussion This method returns the virtual address of the first byte in the mapping.
831 @result A virtual address. */
832 /*! @function getSize
833 @abstract Accessor to the length of the mapping.
834 @discussion This method returns the length of the mapping.
835 @result A byte count. */
836 virtual mach_vm_address_t getAddress();
837 virtual mach_vm_size_t getSize();
838 #endif /* !__LP64__ */
839
840 #ifdef XNU_KERNEL_PRIVATE
841 // for IOMemoryDescriptor use
842 IOMemoryMap * copyCompatible( IOMemoryMap * newMapping );
843
844 bool init(
845 task_t intoTask,
846 mach_vm_address_t toAddress,
847 IOOptionBits options,
848 mach_vm_size_t offset,
849 mach_vm_size_t length );
850
851 bool setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset);
852
853 IOReturn redirect(
854 task_t intoTask, bool redirect );
855
856 IOReturn userClientUnmap();
857 #endif /* XNU_KERNEL_PRIVATE */
858
859 IOReturn wireRange(
860 uint32_t options,
861 mach_vm_size_t offset,
862 mach_vm_size_t length);
863
864 OSMetaClassDeclareReservedUnused(IOMemoryMap, 0);
865 OSMetaClassDeclareReservedUnused(IOMemoryMap, 1);
866 OSMetaClassDeclareReservedUnused(IOMemoryMap, 2);
867 OSMetaClassDeclareReservedUnused(IOMemoryMap, 3);
868 OSMetaClassDeclareReservedUnused(IOMemoryMap, 4);
869 OSMetaClassDeclareReservedUnused(IOMemoryMap, 5);
870 OSMetaClassDeclareReservedUnused(IOMemoryMap, 6);
871 OSMetaClassDeclareReservedUnused(IOMemoryMap, 7);
872 };
873
874 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
875 #ifdef XNU_KERNEL_PRIVATE
876 // Also these flags should not overlap with the options to
877 // IOMemoryDescriptor::initWithRanges(... IOOptionsBits options);
878 enum {
879 _kIOMemorySourceSegment = 0x00002000
880 };
881 #endif /* XNU_KERNEL_PRIVATE */
882
883 // The following classes are private implementation of IOMemoryDescriptor - they
884 // should not be referenced directly, just through the public API's in the
885 // IOMemoryDescriptor class. For example, an IOGeneralMemoryDescriptor instance
886 // might be created by IOMemoryDescriptor::withAddressRange(), but there should be
887 // no need to reference as anything but a generic IOMemoryDescriptor *.
888
889 class IOGeneralMemoryDescriptor : public IOMemoryDescriptor
890 {
891 OSDeclareDefaultStructors(IOGeneralMemoryDescriptor);
892
893 public:
894 union Ranges {
895 IOVirtualRange *v;
896 IOAddressRange *v64;
897 IOPhysicalRange *p;
898 void *uio;
899 };
900 protected:
901 Ranges _ranges;
902 unsigned _rangesCount; /* number of address ranges in list */
903 #ifndef __LP64__
904 bool _rangesIsAllocated; /* is list allocated by us? */
905 #endif /* !__LP64__ */
906
907 task_t _task; /* task where all ranges are mapped to */
908
909 union {
910 IOVirtualRange v;
911 IOPhysicalRange p;
912 } _singleRange; /* storage space for a single range */
913
914 unsigned _wireCount; /* number of outstanding wires */
915
916 #ifndef __LP64__
917 uintptr_t _cachedVirtualAddress;
918
919 IOPhysicalAddress _cachedPhysicalAddress;
920 #endif /* !__LP64__ */
921
922 bool _initialized; /* has superclass been initialized? */
923
924 public:
925 virtual void free();
926
927 virtual IOReturn dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const;
928
929 virtual uint64_t getPreparationID( void );
930
931 #ifdef XNU_KERNEL_PRIVATE
932 // Internal APIs may be made virtual at some time in the future.
933 IOReturn wireVirtual(IODirection forDirection);
934 IOReturn dmaMap(
935 IOMapper * mapper,
936 const IODMAMapSpecification * mapSpec,
937 uint64_t offset,
938 uint64_t length,
939 uint64_t * address,
940 ppnum_t * mapPages);
941 bool initMemoryEntries(size_t size, IOMapper * mapper);
942
943 IOMemoryReference * memoryReferenceAlloc(uint32_t capacity,
944 IOMemoryReference * realloc);
945 void memoryReferenceFree(IOMemoryReference * ref);
946 void memoryReferenceRelease(IOMemoryReference * ref);
947
948 IOReturn memoryReferenceCreate(
949 IOOptionBits options,
950 IOMemoryReference ** reference);
951
952 IOReturn memoryReferenceMap(IOMemoryReference * ref,
953 vm_map_t map,
954 mach_vm_size_t inoffset,
955 mach_vm_size_t size,
956 IOOptionBits options,
957 mach_vm_address_t * inaddr);
958
959 static IOReturn memoryReferenceSetPurgeable(
960 IOMemoryReference * ref,
961 IOOptionBits newState,
962 IOOptionBits * oldState);
963 static IOReturn memoryReferenceGetPageCounts(
964 IOMemoryReference * ref,
965 IOByteCount * residentPageCount,
966 IOByteCount * dirtyPageCount);
967 #endif
968
969 private:
970
971 #ifndef __LP64__
972 virtual void setPosition(IOByteCount position);
973 virtual void mapIntoKernel(unsigned rangeIndex);
974 virtual void unmapFromKernel();
975 #endif /* !__LP64__ */
976
977 // Internal
978 OSData * _memoryEntries;
979 unsigned int _pages;
980 ppnum_t _highestPage;
981 uint32_t __iomd_reservedA;
982 uint32_t __iomd_reservedB;
983
984 IOLock * _prepareLock;
985
986 public:
987 /*
988 * IOMemoryDescriptor required methods
989 */
990
991 // Master initaliser
992 virtual bool initWithOptions(void * buffers,
993 UInt32 count,
994 UInt32 offset,
995 task_t task,
996 IOOptionBits options,
997 IOMapper * mapper = kIOMapperSystem);
998
999 #ifndef __LP64__
1000 // Secondary initialisers
1001 virtual bool initWithAddress(void * address,
1002 IOByteCount withLength,
1003 IODirection withDirection) APPLE_KEXT_DEPRECATED;
1004
1005 virtual bool initWithAddress(IOVirtualAddress address,
1006 IOByteCount withLength,
1007 IODirection withDirection,
1008 task_t withTask) APPLE_KEXT_DEPRECATED;
1009
1010 virtual bool initWithPhysicalAddress(
1011 IOPhysicalAddress address,
1012 IOByteCount withLength,
1013 IODirection withDirection ) APPLE_KEXT_DEPRECATED;
1014
1015 virtual bool initWithRanges( IOVirtualRange * ranges,
1016 UInt32 withCount,
1017 IODirection withDirection,
1018 task_t withTask,
1019 bool asReference = false) APPLE_KEXT_DEPRECATED;
1020
1021 virtual bool initWithPhysicalRanges(IOPhysicalRange * ranges,
1022 UInt32 withCount,
1023 IODirection withDirection,
1024 bool asReference = false) APPLE_KEXT_DEPRECATED;
1025
1026 virtual addr64_t getPhysicalSegment64( IOByteCount offset,
1027 IOByteCount * length ) APPLE_KEXT_DEPRECATED;
1028
1029 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
1030 IOByteCount * length);
1031
1032 virtual IOPhysicalAddress getSourceSegment(IOByteCount offset,
1033 IOByteCount * length) APPLE_KEXT_DEPRECATED;
1034
1035 virtual void * getVirtualSegment(IOByteCount offset,
1036 IOByteCount * length) APPLE_KEXT_DEPRECATED;
1037 #endif /* !__LP64__ */
1038
1039 virtual IOReturn setPurgeable( IOOptionBits newState,
1040 IOOptionBits * oldState );
1041
1042 virtual addr64_t getPhysicalSegment( IOByteCount offset,
1043 IOByteCount * length,
1044 #ifdef __LP64__
1045 IOOptionBits options = 0 );
1046 #else /* !__LP64__ */
1047 IOOptionBits options );
1048 #endif /* !__LP64__ */
1049
1050 virtual IOReturn prepare(IODirection forDirection = kIODirectionNone);
1051
1052 virtual IOReturn complete(IODirection forDirection = kIODirectionNone);
1053
1054 virtual IOReturn doMap(
1055 vm_map_t addressMap,
1056 IOVirtualAddress * atAddress,
1057 IOOptionBits options,
1058 IOByteCount sourceOffset = 0,
1059 IOByteCount length = 0 );
1060
1061 virtual IOReturn doUnmap(
1062 vm_map_t addressMap,
1063 IOVirtualAddress logical,
1064 IOByteCount length );
1065
1066 virtual bool serialize(OSSerialize *s) const;
1067
1068 // Factory method for cloning a persistent IOMD, see IOMemoryDescriptor
1069 static IOMemoryDescriptor *
1070 withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD);
1071
1072 };
1073
1074 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1075
1076 #ifdef __LP64__
1077 mach_vm_address_t IOMemoryMap::getAddress()
1078 {
1079 return (getVirtualAddress());
1080 }
1081
1082 mach_vm_size_t IOMemoryMap::getSize()
1083 {
1084 return (getLength());
1085 }
1086 #else /* !__LP64__ */
1087 #include <IOKit/IOSubMemoryDescriptor.h>
1088 #endif /* !__LP64__ */
1089
1090 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1091
1092 #endif /* !_IOMEMORYDESCRIPTOR_H */