]> git.saurik.com Git - apple/xnu.git/blob - iokit/IOKit/IOMemoryDescriptor.h
xnu-3789.21.4.tar.gz
[apple/xnu.git] / iokit / IOKit / IOMemoryDescriptor.h
1 /*
2 * Copyright (c) 1998-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #ifndef _IOMEMORYDESCRIPTOR_H
29 #define _IOMEMORYDESCRIPTOR_H
30
31 #include <sys/cdefs.h>
32
33 #include <IOKit/IOTypes.h>
34 #include <IOKit/IOLocks.h>
35 #include <libkern/c++/OSContainers.h>
36 #ifdef XNU_KERNEL_PRIVATE
37 #include <IOKit/IOKitDebug.h>
38 #endif
39
40 #include <mach/memory_object_types.h>
41
42 class IOMemoryMap;
43 class IOMapper;
44 class IOService;
45 class IODMACommand;
46
47 /*
48 * Direction of transfer, with respect to the described memory.
49 */
50 #ifdef __LP64__
51 enum
52 #else /* !__LP64__ */
53 enum IODirection
54 #endif /* !__LP64__ */
55 {
56 kIODirectionNone = 0x0, // same as VM_PROT_NONE
57 kIODirectionIn = 0x1, // User land 'read', same as VM_PROT_READ
58 kIODirectionOut = 0x2, // User land 'write', same as VM_PROT_WRITE
59 kIODirectionOutIn = kIODirectionOut | kIODirectionIn,
60 kIODirectionInOut = kIODirectionIn | kIODirectionOut,
61
62 // these flags are valid for the prepare() method only
63 kIODirectionPrepareToPhys32 = 0x00000004,
64 kIODirectionPrepareNoFault = 0x00000008,
65 kIODirectionPrepareReserved1 = 0x00000010,
66 #define IODIRECTIONPREPARENONCOHERENTDEFINED 1
67 kIODirectionPrepareNonCoherent = 0x00000020,
68
69 // these flags are valid for the complete() method only
70 #define IODIRECTIONCOMPLETEWITHERRORDEFINED 1
71 kIODirectionCompleteWithError = 0x00000040,
72 #define IODIRECTIONCOMPLETEWITHDATAVALIDDEFINED 1
73 kIODirectionCompleteWithDataValid = 0x00000080,
74 };
75
76
77 #if XNU_KERNEL_PRIVATE
78 enum
79 {
80 // prepare/complete() notify DMA command active
81 kIODirectionDMACommand = 0x00000100,
82 kIODirectionDMACommandMask = 0x0001FE00,
83 kIODirectionDMACommandShift = 9,
84 };
85 #endif
86
87
88 #ifdef __LP64__
89 typedef IOOptionBits IODirection;
90 #endif /* __LP64__ */
91
92 /*
93 * IOOptionBits used in the withOptions variant
94 */
95 enum {
96 kIOMemoryDirectionMask = 0x00000007,
97 #ifdef XNU_KERNEL_PRIVATE
98 kIOMemoryAutoPrepare = 0x00000008, // Shared with Buffer MD
99 #endif
100
101 kIOMemoryTypeVirtual = 0x00000010,
102 kIOMemoryTypePhysical = 0x00000020,
103 kIOMemoryTypeUPL = 0x00000030,
104 kIOMemoryTypePersistentMD = 0x00000040, // Persistent Memory Descriptor
105 kIOMemoryTypeUIO = 0x00000050,
106 #ifdef __LP64__
107 kIOMemoryTypeVirtual64 = kIOMemoryTypeVirtual,
108 kIOMemoryTypePhysical64 = kIOMemoryTypePhysical,
109 #else /* !__LP64__ */
110 kIOMemoryTypeVirtual64 = 0x00000060,
111 kIOMemoryTypePhysical64 = 0x00000070,
112 #endif /* !__LP64__ */
113 kIOMemoryTypeMask = 0x000000f0,
114
115 kIOMemoryAsReference = 0x00000100,
116 kIOMemoryBufferPageable = 0x00000400,
117 kIOMemoryMapperNone = 0x00000800, // Shared with Buffer MD
118 kIOMemoryHostOnly = 0x00001000, // Never DMA accessible
119 #ifdef XNU_KERNEL_PRIVATE
120 kIOMemoryRedirected = 0x00004000,
121 kIOMemoryPreparedReadOnly = 0x00008000,
122 #endif
123 kIOMemoryPersistent = 0x00010000,
124 #ifdef XNU_KERNEL_PRIVATE
125 kIOMemoryReserved6156215 = 0x00020000,
126 #endif
127 kIOMemoryThreadSafe = 0x00100000, // Shared with Buffer MD
128 kIOMemoryClearEncrypt = 0x00200000, // Shared with Buffer MD
129 kIOMemoryUseReserve = 0x00800000, // Shared with Buffer MD
130 #define IOMEMORYUSERESERVEDEFINED 1
131
132 #ifdef XNU_KERNEL_PRIVATE
133 kIOMemoryBufferPurgeable = 0x00400000,
134 kIOMemoryBufferCacheMask = 0x70000000,
135 kIOMemoryBufferCacheShift = 28,
136 #endif
137 };
138
139 #define kIOMapperSystem ((IOMapper *) 0)
140
141 enum
142 {
143 kIOMemoryPurgeableKeepCurrent = 1,
144
145 kIOMemoryPurgeableNonVolatile = 2,
146 kIOMemoryPurgeableVolatile = 3,
147 kIOMemoryPurgeableEmpty = 4,
148
149 // modifiers for kIOMemoryPurgeableVolatile behavior
150 kIOMemoryPurgeableVolatileGroup0 = VM_VOLATILE_GROUP_0,
151 kIOMemoryPurgeableVolatileGroup1 = VM_VOLATILE_GROUP_1,
152 kIOMemoryPurgeableVolatileGroup2 = VM_VOLATILE_GROUP_2,
153 kIOMemoryPurgeableVolatileGroup3 = VM_VOLATILE_GROUP_3,
154 kIOMemoryPurgeableVolatileGroup4 = VM_VOLATILE_GROUP_4,
155 kIOMemoryPurgeableVolatileGroup5 = VM_VOLATILE_GROUP_5,
156 kIOMemoryPurgeableVolatileGroup6 = VM_VOLATILE_GROUP_6,
157 kIOMemoryPurgeableVolatileGroup7 = VM_VOLATILE_GROUP_7,
158 kIOMemoryPurgeableVolatileBehaviorFifo = VM_PURGABLE_BEHAVIOR_FIFO,
159 kIOMemoryPurgeableVolatileBehaviorLifo = VM_PURGABLE_BEHAVIOR_LIFO,
160 kIOMemoryPurgeableVolatileOrderingObsolete = VM_PURGABLE_ORDERING_OBSOLETE,
161 kIOMemoryPurgeableVolatileOrderingNormal = VM_PURGABLE_ORDERING_NORMAL,
162 kIOMemoryPurgeableFaultOnAccess = VM_PURGABLE_DEBUG_FAULT,
163 };
164 enum
165 {
166 kIOMemoryIncoherentIOFlush = 1,
167 kIOMemoryIncoherentIOStore = 2,
168
169 kIOMemoryClearEncrypted = 50,
170 kIOMemorySetEncrypted = 51,
171 };
172
173 #define IOMEMORYDESCRIPTOR_SUPPORTS_DMACOMMAND 1
174
175 struct IODMAMapSpecification
176 {
177 uint64_t alignment;
178 IOService * device;
179 uint32_t options;
180 uint8_t numAddressBits;
181 uint8_t resvA[3];
182 uint32_t resvB[4];
183 };
184
185 struct IODMAMapPageList
186 {
187 uint32_t pageOffset;
188 uint32_t pageListCount;
189 const upl_page_info_t * pageList;
190 };
191
192 // mapOptions for iovmMapMemory
193 enum
194 {
195 kIODMAMapReadAccess = 0x00000001,
196 kIODMAMapWriteAccess = 0x00000002,
197 kIODMAMapPhysicallyContiguous = 0x00000010,
198 kIODMAMapDeviceMemory = 0x00000020,
199 kIODMAMapPagingPath = 0x00000040,
200 kIODMAMapIdentityMap = 0x00000080,
201
202 kIODMAMapPageListFullyOccupied = 0x00000100,
203 kIODMAMapFixedAddress = 0x00000200,
204 };
205
206 #ifdef KERNEL_PRIVATE
207
208 // Used for dmaCommandOperation communications for IODMACommand and mappers
209
210 enum {
211 kIOMDWalkSegments = 0x01000000,
212 kIOMDFirstSegment = 1 | kIOMDWalkSegments,
213 kIOMDGetCharacteristics = 0x02000000,
214 kIOMDGetCharacteristicsMapped = 1 | kIOMDGetCharacteristics,
215 kIOMDDMAActive = 0x03000000,
216 kIOMDSetDMAActive = 1 | kIOMDDMAActive,
217 kIOMDSetDMAInactive = kIOMDDMAActive,
218 kIOMDAddDMAMapSpec = 0x04000000,
219 kIOMDDMAMap = 0x05000000,
220 kIOMDDMACommandOperationMask = 0xFF000000,
221 };
222 struct IOMDDMACharacteristics {
223 UInt64 fLength;
224 UInt32 fSGCount;
225 UInt32 fPages;
226 UInt32 fPageAlign;
227 ppnum_t fHighestPage;
228 IODirection fDirection;
229 UInt8 fIsPrepared;
230 };
231
232 struct IOMDDMAMapArgs {
233 IOMapper * fMapper;
234 IODMACommand * fCommand;
235 IODMAMapSpecification fMapSpec;
236 uint64_t fOffset;
237 uint64_t fLength;
238 uint64_t fAlloc;
239 uint64_t fAllocLength;
240 uint8_t fMapContig;
241 };
242
243 struct IOMDDMAWalkSegmentArgs {
244 UInt64 fOffset; // Input/Output offset
245 UInt64 fIOVMAddr, fLength; // Output variables
246 UInt8 fMapped; // Input Variable, Require mapped IOVMA
247 };
248 typedef UInt8 IOMDDMAWalkSegmentState[128];
249
250 #endif /* KERNEL_PRIVATE */
251
252 enum
253 {
254 kIOPreparationIDUnprepared = 0,
255 kIOPreparationIDUnsupported = 1,
256 kIOPreparationIDAlwaysPrepared = 2,
257 };
258
259 #ifdef XNU_KERNEL_PRIVATE
260 struct IOMemoryReference;
261 #endif
262
263
264 /*! @class IOMemoryDescriptor : public OSObject
265 @abstract An abstract base class defining common methods for describing physical or virtual memory.
266 @discussion The IOMemoryDescriptor object represents a buffer or range of memory, specified as one or more physical or virtual address ranges. It contains methods to return the memory's physically contiguous segments (fragments), for use with the IOMemoryCursor, and methods to map the memory into any address space with caching and placed mapping options. */
267
268 class IOMemoryDescriptor : public OSObject
269 {
270 friend class IOMemoryMap;
271 friend class IOMultiMemoryDescriptor;
272
273 OSDeclareDefaultStructors(IOMemoryDescriptor);
274
275 protected:
276
277 /*! @var reserved
278 Reserved for future use. (Internal use only) */
279 struct IOMemoryDescriptorReserved * reserved;
280
281 protected:
282 OSSet * _mappings;
283 IOOptionBits _flags;
284
285
286 #ifdef XNU_KERNEL_PRIVATE
287 public:
288 struct IOMemoryReference * _memRef;
289 protected:
290 #else
291 void * __iomd_reserved5;
292 #endif
293
294 #ifdef __LP64__
295 uint64_t __iomd_reserved1;
296 uint64_t __iomd_reserved2;
297 uint64_t __iomd_reserved3;
298 uint64_t __iomd_reserved4;
299 #else /* !__LP64__ */
300 IODirection _direction; /* use _flags instead */
301 #endif /* !__LP64__ */
302 IOByteCount _length; /* length of all ranges */
303 IOOptionBits _tag;
304
305 public:
306 typedef IOOptionBits DMACommandOps;
307 #ifndef __LP64__
308 virtual IOPhysicalAddress getSourceSegment( IOByteCount offset,
309 IOByteCount * length ) APPLE_KEXT_DEPRECATED;
310 #endif /* !__LP64__ */
311
312 /*! @function initWithOptions
313 @abstract Master initialiser for all variants of memory descriptors. For a more complete description see IOMemoryDescriptor::withOptions.
314 @discussion Note this function can be used to re-init a previously created memory descriptor.
315 @result true on success, false on failure. */
316 virtual bool initWithOptions(void * buffers,
317 UInt32 count,
318 UInt32 offset,
319 task_t task,
320 IOOptionBits options,
321 IOMapper * mapper = kIOMapperSystem);
322
323 #ifndef __LP64__
324 virtual addr64_t getPhysicalSegment64( IOByteCount offset,
325 IOByteCount * length ) APPLE_KEXT_DEPRECATED; /* use getPhysicalSegment() and kIOMemoryMapperNone instead */
326 #endif /* !__LP64__ */
327
328 /*! @function setPurgeable
329 @abstract Control the purgeable status of a memory descriptors memory.
330 @discussion Buffers may be allocated with the ability to have their purgeable status changed - IOBufferMemoryDescriptor with the kIOMemoryPurgeable option, VM_FLAGS_PURGEABLE may be passed to vm_allocate() in user space to allocate such buffers. The purgeable status of such a buffer may be controlled with setPurgeable(). The process of making a purgeable memory descriptor non-volatile and determining its previous state is atomic - if a purgeable memory descriptor is made nonvolatile and the old state is returned as kIOMemoryPurgeableVolatile, then the memory's previous contents are completely intact and will remain so until the memory is made volatile again. If the old state is returned as kIOMemoryPurgeableEmpty then the memory was reclaimed while it was in a volatile state and its previous contents have been lost.
331 @param newState - the desired new purgeable state of the memory:<br>
332 kIOMemoryPurgeableKeepCurrent - make no changes to the memory's purgeable state.<br>
333 kIOMemoryPurgeableVolatile - make the memory volatile - the memory may be reclaimed by the VM system without saving its contents to backing store.<br>
334 kIOMemoryPurgeableNonVolatile - make the memory nonvolatile - the memory is treated as with usual allocations and must be saved to backing store if paged.<br>
335 kIOMemoryPurgeableEmpty - make the memory volatile, and discard any pages allocated to it.
336 @param oldState - if non-NULL, the previous purgeable state of the memory is returned here:<br>
337 kIOMemoryPurgeableNonVolatile - the memory was nonvolatile.<br>
338 kIOMemoryPurgeableVolatile - the memory was volatile but its content has not been discarded by the VM system.<br>
339 kIOMemoryPurgeableEmpty - the memory was volatile and has been discarded by the VM system.<br>
340 @result An IOReturn code. */
341
342 virtual IOReturn setPurgeable( IOOptionBits newState,
343 IOOptionBits * oldState );
344
345
346 /*! @function getPageCounts
347 @abstract Retrieve the number of resident and/or dirty pages encompassed by an IOMemoryDescriptor.
348 @discussion This method returns the number of resident and/or dirty pages encompassed by an IOMemoryDescriptor.
349 @param residentPageCount - If non-null, a pointer to a byte count that will return the number of resident pages encompassed by this IOMemoryDescriptor.
350 @param dirtyPageCount - If non-null, a pointer to a byte count that will return the number of dirty pages encompassed by this IOMemoryDescriptor.
351 @result An IOReturn code. */
352
353 IOReturn getPageCounts( IOByteCount * residentPageCount,
354 IOByteCount * dirtyPageCount);
355
356 /*! @function performOperation
357 @abstract Perform an operation on the memory descriptor's memory.
358 @discussion This method performs some operation on a range of the memory descriptor's memory. When a memory descriptor's memory is not mapped, it should be more efficient to use this method than mapping the memory to perform the operation virtually.
359 @param options The operation to perform on the memory:<br>
360 kIOMemoryIncoherentIOFlush - pass this option to store to memory and flush any data in the processor cache for the memory range, with synchronization to ensure the data has passed through all levels of processor cache. It may not be supported on all architectures. This type of flush may be used for non-coherent I/O such as AGP - it is NOT required for PCI coherent operations. The memory descriptor must have been previously prepared.<br>
361 kIOMemoryIncoherentIOStore - pass this option to store to memory any data in the processor cache for the memory range, with synchronization to ensure the data has passed through all levels of processor cache. It may not be supported on all architectures. This type of flush may be used for non-coherent I/O such as AGP - it is NOT required for PCI coherent operations. The memory descriptor must have been previously prepared.
362 @param offset A byte offset into the memory descriptor's memory.
363 @param length The length of the data range.
364 @result An IOReturn code. */
365
366 virtual IOReturn performOperation( IOOptionBits options,
367 IOByteCount offset, IOByteCount length );
368
369 // Used for dedicated communications for IODMACommand
370 virtual IOReturn dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const;
371
372 /*! @function getPhysicalSegment
373 @abstract Break a memory descriptor into its physically contiguous segments.
374 @discussion This method returns the physical address of the byte at the given offset into the memory, and optionally the length of the physically contiguous segment from that offset.
375 @param offset A byte offset into the memory whose physical address to return.
376 @param length If non-zero, getPhysicalSegment will store here the length of the physically contiguous segement at the given offset.
377 @result A physical address, or zero if the offset is beyond the length of the memory. */
378
379 #ifdef __LP64__
380 virtual addr64_t getPhysicalSegment( IOByteCount offset,
381 IOByteCount * length,
382 IOOptionBits options = 0 ) = 0;
383 #else /* !__LP64__ */
384 virtual addr64_t getPhysicalSegment( IOByteCount offset,
385 IOByteCount * length,
386 IOOptionBits options );
387 #endif /* !__LP64__ */
388
389 virtual uint64_t getPreparationID( void );
390 void setPreparationID( void );
391
392 #ifdef XNU_KERNEL_PRIVATE
393 IOMemoryDescriptorReserved * getKernelReserved( void );
394 IOReturn dmaMap(
395 IOMapper * mapper,
396 IODMACommand * command,
397 const IODMAMapSpecification * mapSpec,
398 uint64_t offset,
399 uint64_t length,
400 uint64_t * mapAddress,
401 uint64_t * mapLength);
402
403 void setVMTags(vm_tag_t kernelTag, vm_tag_t userTag);
404 vm_tag_t getVMTag(vm_map_t map);
405 #endif
406
407 private:
408 OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 0);
409 #ifdef __LP64__
410 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 1);
411 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 2);
412 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 3);
413 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 4);
414 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 5);
415 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 6);
416 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 7);
417 #else /* !__LP64__ */
418 OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 1);
419 OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 2);
420 OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 3);
421 OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 4);
422 OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 5);
423 OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 6);
424 OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 7);
425 #endif /* !__LP64__ */
426 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 8);
427 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 9);
428 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 10);
429 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 11);
430 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 12);
431 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 13);
432 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 14);
433 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 15);
434
435 protected:
436 virtual void free() APPLE_KEXT_OVERRIDE;
437 public:
438 static void initialize( void );
439
440 public:
441 /*! @function withAddress
442 @abstract Create an IOMemoryDescriptor to describe one virtual range of the kernel task.
443 @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a single virtual memory range mapped into the kernel map. This memory descriptor needs to be prepared before it can be used to extract data from the memory described.
444 @param address The virtual address of the first byte in the memory.
445 @param withLength The length of memory.
446 @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
447 @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
448
449 static IOMemoryDescriptor * withAddress(void * address,
450 IOByteCount withLength,
451 IODirection withDirection);
452
453 #ifndef __LP64__
454 static IOMemoryDescriptor * withAddress(IOVirtualAddress address,
455 IOByteCount withLength,
456 IODirection withDirection,
457 task_t withTask) APPLE_KEXT_DEPRECATED; /* use withAddressRange() and prepare() instead */
458 #endif /* !__LP64__ */
459
460 /*! @function withPhysicalAddress
461 @abstract Create an IOMemoryDescriptor to describe one physical range.
462 @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a single physical memory range.
463 @param address The physical address of the first byte in the memory.
464 @param withLength The length of memory.
465 @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
466 @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
467
468 static IOMemoryDescriptor * withPhysicalAddress(
469 IOPhysicalAddress address,
470 IOByteCount withLength,
471 IODirection withDirection );
472
473 #ifndef __LP64__
474 static IOMemoryDescriptor * withRanges(IOVirtualRange * ranges,
475 UInt32 withCount,
476 IODirection withDirection,
477 task_t withTask,
478 bool asReference = false) APPLE_KEXT_DEPRECATED; /* use withAddressRanges() instead */
479 #endif /* !__LP64__ */
480
481 /*! @function withAddressRange
482 @abstract Create an IOMemoryDescriptor to describe one virtual range of the specified map.
483 @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a single virtual memory range mapped into the specified map. This memory descriptor needs to be prepared before it can be used to extract data from the memory described.
484 @param address The virtual address of the first byte in the memory.
485 @param length The length of memory.
486 @param options
487 kIOMemoryDirectionMask (options:direction) This nibble indicates the I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
488 @param task The task the virtual ranges are mapped into. Note that unlike IOMemoryDescriptor::withAddress(), kernel_task memory must be explicitly prepared when passed to this api. The task argument may be NULL to specify memory by physical address.
489 @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
490
491 static IOMemoryDescriptor * withAddressRange(
492 mach_vm_address_t address,
493 mach_vm_size_t length,
494 IOOptionBits options,
495 task_t task);
496
497 /*! @function withAddressRanges
498 @abstract Create an IOMemoryDescriptor to describe one or more virtual ranges.
499 @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of an array of virtual memory ranges each mapped into a specified source task. This memory descriptor needs to be prepared before it can be used to extract data from the memory described.
500 @param ranges An array of IOAddressRange structures which specify the virtual ranges in the specified map which make up the memory to be described. IOAddressRange is the 64bit version of IOVirtualRange.
501 @param rangeCount The member count of the ranges array.
502 @param options
503 kIOMemoryDirectionMask (options:direction) This nibble indicates the I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
504 kIOMemoryAsReference For options:type = Virtual or Physical this indicate that the memory descriptor need not copy the ranges array into local memory. This is an optimisation to try to minimise unnecessary allocations.
505 @param task The task each of the virtual ranges are mapped into. Note that unlike IOMemoryDescriptor::withAddress(), kernel_task memory must be explicitly prepared when passed to this api. The task argument may be NULL to specify memory by physical address.
506 @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
507
508 static IOMemoryDescriptor * withAddressRanges(
509 IOAddressRange * ranges,
510 UInt32 rangeCount,
511 IOOptionBits options,
512 task_t task);
513
514 /*! @function withOptions
515 @abstract Master initialiser for all variants of memory descriptors.
516 @discussion This method creates and initializes an IOMemoryDescriptor for memory it has three main variants: Virtual, Physical & mach UPL. These variants are selected with the options parameter, see below. This memory descriptor needs to be prepared before it can be used to extract data from the memory described.
517
518
519 @param buffers A pointer to an array of IOAddressRange when options:type is kIOMemoryTypeVirtual64 or kIOMemoryTypePhysical64 or a 64bit kernel. For type UPL it is a upl_t returned by the mach/memory_object_types.h apis, primarily used internally by the UBC. IOVirtualRanges or IOPhysicalRanges are 32 bit only types for use when options:type is kIOMemoryTypeVirtual or kIOMemoryTypePhysical on 32bit kernels.
520
521 @param count options:type = Virtual or Physical count contains a count of the number of entires in the buffers array. For options:type = UPL this field contains a total length.
522
523 @param offset Only used when options:type = UPL, in which case this field contains an offset for the memory within the buffers upl.
524
525 @param task Only used options:type = Virtual, The task each of the virtual ranges are mapped into.
526
527 @param options
528 kIOMemoryDirectionMask (options:direction) This nibble indicates the I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
529 kIOMemoryTypeMask (options:type) kIOMemoryTypeVirtual64, kIOMemoryTypeVirtual, kIOMemoryTypePhysical64, kIOMemoryTypePhysical, kIOMemoryTypeUPL Indicates that what type of memory basic memory descriptor to use. This sub-field also controls the interpretation of the buffers, count, offset & task parameters.
530 kIOMemoryAsReference For options:type = Virtual or Physical this indicate that the memory descriptor need not copy the ranges array into local memory. This is an optimisation to try to minimise unnecessary allocations.
531 kIOMemoryBufferPageable Only used by the IOBufferMemoryDescriptor as an indication that the kernel virtual memory is in fact pageable and we need to use the kernel pageable submap rather than the default map.
532
533 @param mapper Which IOMapper should be used to map the in-memory physical addresses into I/O space addresses. Defaults to 0 which indicates that the system mapper is to be used, if present.
534
535 @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
536
537 static IOMemoryDescriptor *withOptions(void * buffers,
538 UInt32 count,
539 UInt32 offset,
540 task_t task,
541 IOOptionBits options,
542 IOMapper * mapper = kIOMapperSystem);
543
544 #ifndef __LP64__
545 static IOMemoryDescriptor * withPhysicalRanges(
546 IOPhysicalRange * ranges,
547 UInt32 withCount,
548 IODirection withDirection,
549 bool asReference = false) APPLE_KEXT_DEPRECATED; /* use withOptions() and kIOMemoryTypePhysical instead */
550 #endif /* !__LP64__ */
551
552 #ifndef __LP64__
553 static IOMemoryDescriptor * withSubRange(IOMemoryDescriptor *of,
554 IOByteCount offset,
555 IOByteCount length,
556 IODirection withDirection) APPLE_KEXT_DEPRECATED; /* use IOSubMemoryDescriptor::withSubRange() and kIOMemoryThreadSafe instead */
557 #endif /* !__LP64__ */
558
559 /*! @function withPersistentMemoryDescriptor
560 @abstract Copy constructor that generates a new memory descriptor if the backing memory for the same task's virtual address and length has changed.
561 @discussion If the original memory descriptor's address and length is still backed by the same real memory, i.e. the user hasn't deallocated and the reallocated memory at the same address then the original memory descriptor is returned with a additional reference. Otherwise we build a totally new memory descriptor with the same characteristics as the previous one but with a new view of the vm. Note not legal to call this function with anything except an IOGeneralMemoryDescriptor that was created with the kIOMemoryPersistent option.
562 @param originalMD The memory descriptor to be duplicated.
563 @result Either the original memory descriptor with an additional retain or a new memory descriptor, 0 for a bad original memory descriptor or some other resource shortage. */
564 static IOMemoryDescriptor *
565 withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD);
566
567 #ifndef __LP64__
568 // obsolete initializers
569 // - initWithOptions is the designated initializer
570 virtual bool initWithAddress(void * address,
571 IOByteCount withLength,
572 IODirection withDirection) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */
573 virtual bool initWithAddress(IOVirtualAddress address,
574 IOByteCount withLength,
575 IODirection withDirection,
576 task_t withTask) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */
577 virtual bool initWithPhysicalAddress(
578 IOPhysicalAddress address,
579 IOByteCount withLength,
580 IODirection withDirection ) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */
581 virtual bool initWithRanges(IOVirtualRange * ranges,
582 UInt32 withCount,
583 IODirection withDirection,
584 task_t withTask,
585 bool asReference = false) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */
586 virtual bool initWithPhysicalRanges(IOPhysicalRange * ranges,
587 UInt32 withCount,
588 IODirection withDirection,
589 bool asReference = false) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */
590 #endif /* __LP64__ */
591
592 /*! @function getDirection
593 @abstract Accessor to get the direction the memory descriptor was created with.
594 @discussion This method returns the direction the memory descriptor was created with.
595 @result The direction. */
596
597 virtual IODirection getDirection() const;
598
599 /*! @function getLength
600 @abstract Accessor to get the length of the memory descriptor (over all its ranges).
601 @discussion This method returns the total length of the memory described by the descriptor, ie. the sum of its ranges' lengths.
602 @result The byte count. */
603
604 virtual IOByteCount getLength() const;
605
606 /*! @function setTag
607 @abstract Set the tag for the memory descriptor.
608 @discussion This method sets the tag for the memory descriptor. Tag bits are not interpreted by IOMemoryDescriptor.
609 @param tag The tag. */
610
611 virtual void setTag( IOOptionBits tag );
612
613 /*! @function getTag
614 @abstract Accessor to the retrieve the tag for the memory descriptor.
615 @discussion This method returns the tag for the memory descriptor. Tag bits are not interpreted by IOMemoryDescriptor.
616 @result The tag. */
617
618 virtual IOOptionBits getTag( void );
619
620 /*! @function readBytes
621 @abstract Copy data from the memory descriptor's buffer to the specified buffer.
622 @discussion This method copies data from the memory descriptor's memory at the given offset, to the caller's buffer. The memory descriptor MUST have the kIODirectionOut direcction bit set and be prepared. kIODirectionOut means that this memory descriptor will be output to an external device, so readBytes is used to get memory into a local buffer for a PIO transfer to the device.
623 @param offset A byte offset into the memory descriptor's memory.
624 @param bytes The caller supplied buffer to copy the data to.
625 @param withLength The length of the data to copy.
626 @result The number of bytes copied, zero will be returned if the specified offset is beyond the length of the descriptor. Development/debug kernel builds will assert if the offset is beyond the length of the descriptor. */
627
628 virtual IOByteCount readBytes(IOByteCount offset,
629 void * bytes, IOByteCount withLength);
630
631 /*! @function writeBytes
632 @abstract Copy data to the memory descriptor's buffer from the specified buffer.
633 @discussion This method copies data to the memory descriptor's memory at the given offset, from the caller's buffer. The memory descriptor MUST have the kIODirectionIn direcction bit set and be prepared. kIODirectionIn means that this memory descriptor will be input from an external device, so writeBytes is used to write memory into the descriptor for PIO drivers.
634 @param offset A byte offset into the memory descriptor's memory.
635 @param bytes The caller supplied buffer to copy the data from.
636 @param withLength The length of the data to copy.
637 @result The number of bytes copied, zero will be returned if the specified offset is beyond the length of the descriptor. Development/debug kernel builds will assert if the offset is beyond the length of the descriptor. */
638
639 virtual IOByteCount writeBytes(IOByteCount offset,
640 const void * bytes, IOByteCount withLength);
641
642 #ifndef __LP64__
643 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
644 IOByteCount * length);
645 #endif /* !__LP64__ */
646
647 /*! @function getPhysicalAddress
648 @abstract Return the physical address of the first byte in the memory.
649 @discussion This method returns the physical address of the first byte in the memory. It is most useful on memory known to be physically contiguous.
650 @result A physical address. */
651
652 IOPhysicalAddress getPhysicalAddress();
653
654 #ifndef __LP64__
655 virtual void * getVirtualSegment(IOByteCount offset,
656 IOByteCount * length) APPLE_KEXT_DEPRECATED; /* use map() and getVirtualAddress() instead */
657 #endif /* !__LP64__ */
658
659 /*! @function prepare
660 @abstract Prepare the memory for an I/O transfer.
661 @discussion This involves paging in the memory, if necessary, and wiring it down for the duration of the transfer. The complete() method completes the processing of the memory after the I/O transfer finishes. Note that the prepare call is not thread safe and it is expected that the client will more easily be able to guarantee single threading a particular memory descriptor.
662 @param forDirection The direction of the I/O just completed, or kIODirectionNone for the direction specified by the memory descriptor.
663 @result An IOReturn code. */
664
665 virtual IOReturn prepare(IODirection forDirection = kIODirectionNone) = 0;
666
667 /*! @function complete
668 @abstract Complete processing of the memory after an I/O transfer finishes.
669 @discussion This method should not be called unless a prepare was previously issued; the prepare() and complete() must occur in pairs, before and after an I/O transfer involving pageable memory. In 10.3 or greater systems the direction argument to complete is not longer respected. The direction is totally determined at prepare() time.
670 @param forDirection DEPRECATED The direction of the I/O just completed, or kIODirectionNone for the direction specified by the memory descriptor.
671 @result An IOReturn code. */
672
673 virtual IOReturn complete(IODirection forDirection = kIODirectionNone) = 0;
674
675 /*
676 * Mapping functions.
677 */
678
679 /*! @function createMappingInTask
680 @abstract Maps a IOMemoryDescriptor into a task.
681 @discussion This is the general purpose method to map all or part of the memory described by a memory descriptor into a task at any available address, or at a fixed address if possible. Caching & read-only options may be set for the mapping. The mapping is represented as a returned reference to a IOMemoryMap object, which may be shared if the mapping is compatible with an existing mapping of the IOMemoryDescriptor. The IOMemoryMap object returned should be released only when the caller has finished accessing the mapping, as freeing the object destroys the mapping.
682 @param intoTask Sets the target task for the mapping. Pass kernel_task for the kernel address space.
683 @param atAddress If a placed mapping is requested, atAddress specifies its address, and the kIOMapAnywhere should not be set. Otherwise, atAddress is ignored.
684 @param options Mapping options are defined in IOTypes.h,<br>
685 kIOMapAnywhere should be passed if the mapping can be created anywhere. If not set, the atAddress parameter sets the location of the mapping, if it is available in the target map.<br>
686 kIOMapDefaultCache to inhibit the cache in I/O areas, kIOMapCopybackCache in general purpose RAM.<br>
687 kIOMapInhibitCache, kIOMapWriteThruCache, kIOMapCopybackCache to set the appropriate caching.<br>
688 kIOMapReadOnly to allow only read only accesses to the memory - writes will cause and access fault.<br>
689 kIOMapReference will only succeed if the mapping already exists, and the IOMemoryMap object is just an extra reference, ie. no new mapping will be created.<br>
690 kIOMapUnique allows a special kind of mapping to be created that may be used with the IOMemoryMap::redirect() API. These mappings will not be shared as is the default - there will always be a unique mapping created for the caller, not an existing mapping with an extra reference.<br>
691 kIOMapPrefault will try to prefault the pages corresponding to the mapping. This must not be done on the kernel task, and the memory must have been wired via prepare(). Otherwise, the function will fail.<br>
692 @param offset Is a beginning offset into the IOMemoryDescriptor's memory where the mapping starts. Zero is the default to map all the memory.
693 @param length Is the length of the mapping requested for a subset of the IOMemoryDescriptor. Zero is the default to map all the memory.
694 @result A reference to an IOMemoryMap object representing the mapping, which can supply the virtual address of the mapping and other information. The mapping may be shared with multiple callers - multiple maps are avoided if a compatible one exists. The IOMemoryMap object returned should be released only when the caller has finished accessing the mapping, as freeing the object destroys the mapping. The IOMemoryMap instance also retains the IOMemoryDescriptor it maps while it exists. */
695
696 IOMemoryMap * createMappingInTask(
697 task_t intoTask,
698 mach_vm_address_t atAddress,
699 IOOptionBits options,
700 mach_vm_size_t offset = 0,
701 mach_vm_size_t length = 0 );
702
703 #ifndef __LP64__
704 virtual IOMemoryMap * map(
705 task_t intoTask,
706 IOVirtualAddress atAddress,
707 IOOptionBits options,
708 IOByteCount offset = 0,
709 IOByteCount length = 0 ) APPLE_KEXT_DEPRECATED; /* use createMappingInTask() instead */
710 #endif /* !__LP64__ */
711
712 /*! @function map
713 @abstract Maps a IOMemoryDescriptor into the kernel map.
714 @discussion This is a shortcut method to map all the memory described by a memory descriptor into the kernel map at any available address. See the full version of the createMappingInTask method for further details.
715 @param options Mapping options as in the full version of the createMappingInTask method, with kIOMapAnywhere assumed.
716 @result See the full version of the createMappingInTask method. */
717
718 virtual IOMemoryMap * map(
719 IOOptionBits options = 0 );
720
721 /*! @function setMapping
722 @abstract Establishes an already existing mapping.
723 @discussion This method tells the IOMemoryDescriptor about a mapping that exists, but was created elsewhere. It allows later callers of the map method to share this externally created mapping. The IOMemoryMap object returned is created to represent it. This method is not commonly needed.
724 @param task Address space in which the mapping exists.
725 @param mapAddress Virtual address of the mapping.
726 @param options Caching and read-only attributes of the mapping.
727 @result A IOMemoryMap object created to represent the mapping. */
728
729 virtual IOMemoryMap * setMapping(
730 task_t task,
731 IOVirtualAddress mapAddress,
732 IOOptionBits options = 0 );
733
734 // Following methods are private implementation
735
736 #ifdef __LP64__
737 virtual
738 #endif /* __LP64__ */
739 IOReturn redirect( task_t safeTask, bool redirect );
740
741 IOReturn handleFault(
742 void * _pager,
743 mach_vm_size_t sourceOffset,
744 mach_vm_size_t length);
745
746 IOReturn populateDevicePager(
747 void * pager,
748 vm_map_t addressMap,
749 mach_vm_address_t address,
750 mach_vm_size_t sourceOffset,
751 mach_vm_size_t length,
752 IOOptionBits options );
753
754 virtual IOMemoryMap * makeMapping(
755 IOMemoryDescriptor * owner,
756 task_t intoTask,
757 IOVirtualAddress atAddress,
758 IOOptionBits options,
759 IOByteCount offset,
760 IOByteCount length );
761
762 protected:
763 virtual void addMapping(
764 IOMemoryMap * mapping );
765
766 virtual void removeMapping(
767 IOMemoryMap * mapping );
768
769 virtual IOReturn doMap(
770 vm_map_t addressMap,
771 IOVirtualAddress * atAddress,
772 IOOptionBits options,
773 IOByteCount sourceOffset = 0,
774 IOByteCount length = 0 );
775
776 virtual IOReturn doUnmap(
777 vm_map_t addressMap,
778 IOVirtualAddress logical,
779 IOByteCount length );
780 };
781
782 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
783
784 /*! @class IOMemoryMap : public OSObject
785 @abstract A class defining common methods for describing a memory mapping.
786 @discussion The IOMemoryMap object represents a mapped range of memory, described by a IOMemoryDescriptor. The mapping may be in the kernel or a non-kernel task and has processor cache mode attributes. IOMemoryMap instances are created by IOMemoryDescriptor when it creates mappings in its map method, and returned to the caller. */
787
788 class IOMemoryMap : public OSObject
789 {
790 OSDeclareDefaultStructors(IOMemoryMap)
791 #ifdef XNU_KERNEL_PRIVATE
792 public:
793 IOMemoryDescriptor * fMemory;
794 IOMemoryMap * fSuperMap;
795 mach_vm_size_t fOffset;
796 mach_vm_address_t fAddress;
797 mach_vm_size_t fLength;
798 task_t fAddressTask;
799 vm_map_t fAddressMap;
800 IOOptionBits fOptions;
801 upl_t fRedirUPL;
802 ipc_port_t fRedirEntry;
803 IOMemoryDescriptor * fOwner;
804 uint8_t fUserClientUnmap;
805 #if IOTRACKING
806 IOTrackingUser fTracking;
807 #endif
808 #endif /* XNU_KERNEL_PRIVATE */
809
810 protected:
811 virtual void taggedRelease(const void *tag = 0) const APPLE_KEXT_OVERRIDE;
812 virtual void free() APPLE_KEXT_OVERRIDE;
813
814 public:
815 /*! @function getVirtualAddress
816 @abstract Accessor to the virtual address of the first byte in the mapping.
817 @discussion This method returns the virtual address of the first byte in the mapping. Since the IOVirtualAddress is only 32bit in 32bit kernels, the getAddress() method should be used for compatibility with 64bit task mappings.
818 @result A virtual address. */
819
820 virtual IOVirtualAddress getVirtualAddress();
821
822 /*! @function getPhysicalSegment
823 @abstract Break a mapping into its physically contiguous segments.
824 @discussion This method returns the physical address of the byte at the given offset into the mapping, and optionally the length of the physically contiguous segment from that offset. It functions similarly to IOMemoryDescriptor::getPhysicalSegment.
825 @param offset A byte offset into the mapping whose physical address to return.
826 @param length If non-zero, getPhysicalSegment will store here the length of the physically contiguous segement at the given offset.
827 @result A physical address, or zero if the offset is beyond the length of the mapping. */
828
829 #ifdef __LP64__
830 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
831 IOByteCount * length,
832 IOOptionBits options = 0);
833 #else /* !__LP64__ */
834 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
835 IOByteCount * length);
836 #endif /* !__LP64__ */
837
838 /*! @function getPhysicalAddress
839 @abstract Return the physical address of the first byte in the mapping.
840 @discussion This method returns the physical address of the first byte in the mapping. It is most useful on mappings known to be physically contiguous.
841 @result A physical address. */
842
843 IOPhysicalAddress getPhysicalAddress();
844
845 /*! @function getLength
846 @abstract Accessor to the length of the mapping.
847 @discussion This method returns the length of the mapping.
848 @result A byte count. */
849
850 virtual IOByteCount getLength();
851
852 /*! @function getAddressTask
853 @abstract Accessor to the task of the mapping.
854 @discussion This method returns the mach task the mapping exists in.
855 @result A mach task_t. */
856
857 virtual task_t getAddressTask();
858
859 /*! @function getMemoryDescriptor
860 @abstract Accessor to the IOMemoryDescriptor the mapping was created from.
861 @discussion This method returns the IOMemoryDescriptor the mapping was created from.
862 @result An IOMemoryDescriptor reference, which is valid while the IOMemoryMap object is retained. It should not be released by the caller. */
863
864 virtual IOMemoryDescriptor * getMemoryDescriptor();
865
866 /*! @function getMapOptions
867 @abstract Accessor to the options the mapping was created with.
868 @discussion This method returns the options to IOMemoryDescriptor::map the mapping was created with.
869 @result Options for the mapping, including cache settings. */
870
871 virtual IOOptionBits getMapOptions();
872
873 /*! @function unmap
874 @abstract Force the IOMemoryMap to unmap, without destroying the object.
875 @discussion IOMemoryMap instances will unmap themselves upon free, ie. when the last client with a reference calls release. This method forces the IOMemoryMap to destroy the mapping it represents, regardless of the number of clients. It is not generally used.
876 @result An IOReturn code. */
877
878 virtual IOReturn unmap();
879
880 virtual void taskDied();
881
882 /*! @function redirect
883 @abstract Replace the memory mapped in a process with new backing memory.
884 @discussion An IOMemoryMap created with the kIOMapUnique option to IOMemoryDescriptor::map() can remapped to a new IOMemoryDescriptor backing object. If the new IOMemoryDescriptor is specified as NULL, client access to the memory map is blocked until a new backing object has been set. By blocking access and copying data, the caller can create atomic copies of the memory while the client is potentially reading or writing the memory.
885 @param newBackingMemory The IOMemoryDescriptor that represents the physical memory that is to be now mapped in the virtual range the IOMemoryMap represents. If newBackingMemory is NULL, any access to the mapping will hang (in vm_fault()) until access has been restored by a new call to redirect() with non-NULL newBackingMemory argument.
886 @param options Mapping options are defined in IOTypes.h, and are documented in IOMemoryDescriptor::map()
887 @param offset As with IOMemoryDescriptor::map(), a beginning offset into the IOMemoryDescriptor's memory where the mapping starts. Zero is the default.
888 @result An IOReturn code. */
889
890 #ifndef __LP64__
891 // For 32 bit XNU, there is a 32 bit (IOByteCount) and a 64 bit (mach_vm_size_t) interface;
892 // for 64 bit, these fall together on the 64 bit one.
893 virtual IOReturn redirect(IOMemoryDescriptor * newBackingMemory,
894 IOOptionBits options,
895 IOByteCount offset = 0);
896 #endif
897 virtual IOReturn redirect(IOMemoryDescriptor * newBackingMemory,
898 IOOptionBits options,
899 mach_vm_size_t offset = 0);
900
901 #ifdef __LP64__
902 /*! @function getAddress
903 @abstract Accessor to the virtual address of the first byte in the mapping.
904 @discussion This method returns the virtual address of the first byte in the mapping.
905 @result A virtual address. */
906 inline mach_vm_address_t getAddress() __attribute__((always_inline));
907 /*! @function getSize
908 @abstract Accessor to the length of the mapping.
909 @discussion This method returns the length of the mapping.
910 @result A byte count. */
911 inline mach_vm_size_t getSize() __attribute__((always_inline));
912 #else /* !__LP64__ */
913 /*! @function getAddress
914 @abstract Accessor to the virtual address of the first byte in the mapping.
915 @discussion This method returns the virtual address of the first byte in the mapping.
916 @result A virtual address. */
917 virtual mach_vm_address_t getAddress();
918 /*! @function getSize
919 @abstract Accessor to the length of the mapping.
920 @discussion This method returns the length of the mapping.
921 @result A byte count. */
922 virtual mach_vm_size_t getSize();
923 #endif /* !__LP64__ */
924
925 #ifdef XNU_KERNEL_PRIVATE
926 // for IOMemoryDescriptor use
927 IOMemoryMap * copyCompatible( IOMemoryMap * newMapping );
928
929 bool init(
930 task_t intoTask,
931 mach_vm_address_t toAddress,
932 IOOptionBits options,
933 mach_vm_size_t offset,
934 mach_vm_size_t length );
935
936 bool setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset);
937
938 IOReturn redirect(
939 task_t intoTask, bool redirect );
940
941 IOReturn userClientUnmap();
942 #endif /* XNU_KERNEL_PRIVATE */
943
944 IOReturn wireRange(
945 uint32_t options,
946 mach_vm_size_t offset,
947 mach_vm_size_t length);
948
949 OSMetaClassDeclareReservedUnused(IOMemoryMap, 0);
950 OSMetaClassDeclareReservedUnused(IOMemoryMap, 1);
951 OSMetaClassDeclareReservedUnused(IOMemoryMap, 2);
952 OSMetaClassDeclareReservedUnused(IOMemoryMap, 3);
953 OSMetaClassDeclareReservedUnused(IOMemoryMap, 4);
954 OSMetaClassDeclareReservedUnused(IOMemoryMap, 5);
955 OSMetaClassDeclareReservedUnused(IOMemoryMap, 6);
956 OSMetaClassDeclareReservedUnused(IOMemoryMap, 7);
957 };
958
959 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
960 #ifdef XNU_KERNEL_PRIVATE
961 // Also these flags should not overlap with the options to
962 // IOMemoryDescriptor::initWithRanges(... IOOptionsBits options);
963 enum {
964 _kIOMemorySourceSegment = 0x00002000
965 };
966 #endif /* XNU_KERNEL_PRIVATE */
967
968 // The following classes are private implementation of IOMemoryDescriptor - they
969 // should not be referenced directly, just through the public API's in the
970 // IOMemoryDescriptor class. For example, an IOGeneralMemoryDescriptor instance
971 // might be created by IOMemoryDescriptor::withAddressRange(), but there should be
972 // no need to reference as anything but a generic IOMemoryDescriptor *.
973
974 class IOGeneralMemoryDescriptor : public IOMemoryDescriptor
975 {
976 OSDeclareDefaultStructors(IOGeneralMemoryDescriptor);
977
978 public:
979 union Ranges {
980 IOVirtualRange *v;
981 IOAddressRange *v64;
982 IOPhysicalRange *p;
983 void *uio;
984 };
985 protected:
986 Ranges _ranges;
987 unsigned _rangesCount; /* number of address ranges in list */
988 #ifndef __LP64__
989 bool _rangesIsAllocated; /* is list allocated by us? */
990 #endif /* !__LP64__ */
991
992 task_t _task; /* task where all ranges are mapped to */
993
994 union {
995 IOVirtualRange v;
996 IOPhysicalRange p;
997 } _singleRange; /* storage space for a single range */
998
999 unsigned _wireCount; /* number of outstanding wires */
1000
1001 #ifndef __LP64__
1002 uintptr_t _cachedVirtualAddress;
1003
1004 IOPhysicalAddress _cachedPhysicalAddress;
1005 #endif /* !__LP64__ */
1006
1007 bool _initialized; /* has superclass been initialized? */
1008
1009 public:
1010 virtual void free() APPLE_KEXT_OVERRIDE;
1011
1012 virtual IOReturn dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const APPLE_KEXT_OVERRIDE;
1013
1014 virtual uint64_t getPreparationID( void ) APPLE_KEXT_OVERRIDE;
1015
1016 #ifdef XNU_KERNEL_PRIVATE
1017 // Internal APIs may be made virtual at some time in the future.
1018 IOReturn wireVirtual(IODirection forDirection);
1019 IOReturn dmaMap(
1020 IOMapper * mapper,
1021 IODMACommand * command,
1022 const IODMAMapSpecification * mapSpec,
1023 uint64_t offset,
1024 uint64_t length,
1025 uint64_t * mapAddress,
1026 uint64_t * mapLength);
1027 bool initMemoryEntries(size_t size, IOMapper * mapper);
1028
1029 IOMemoryReference * memoryReferenceAlloc(uint32_t capacity,
1030 IOMemoryReference * realloc);
1031 void memoryReferenceFree(IOMemoryReference * ref);
1032 void memoryReferenceRelease(IOMemoryReference * ref);
1033
1034 IOReturn memoryReferenceCreate(
1035 IOOptionBits options,
1036 IOMemoryReference ** reference);
1037
1038 IOReturn memoryReferenceMap(IOMemoryReference * ref,
1039 vm_map_t map,
1040 mach_vm_size_t inoffset,
1041 mach_vm_size_t size,
1042 IOOptionBits options,
1043 mach_vm_address_t * inaddr);
1044
1045 static IOReturn memoryReferenceSetPurgeable(
1046 IOMemoryReference * ref,
1047 IOOptionBits newState,
1048 IOOptionBits * oldState);
1049 static IOReturn memoryReferenceGetPageCounts(
1050 IOMemoryReference * ref,
1051 IOByteCount * residentPageCount,
1052 IOByteCount * dirtyPageCount);
1053 #endif
1054
1055 private:
1056
1057 #ifndef __LP64__
1058 virtual void setPosition(IOByteCount position);
1059 virtual void mapIntoKernel(unsigned rangeIndex);
1060 virtual void unmapFromKernel();
1061 #endif /* !__LP64__ */
1062
1063 // Internal
1064 OSData * _memoryEntries;
1065 unsigned int _pages;
1066 ppnum_t _highestPage;
1067 uint32_t __iomd_reservedA;
1068 uint32_t __iomd_reservedB;
1069
1070 IOLock * _prepareLock;
1071
1072 public:
1073 /*
1074 * IOMemoryDescriptor required methods
1075 */
1076
1077 // Master initaliser
1078 virtual bool initWithOptions(void * buffers,
1079 UInt32 count,
1080 UInt32 offset,
1081 task_t task,
1082 IOOptionBits options,
1083 IOMapper * mapper = kIOMapperSystem) APPLE_KEXT_OVERRIDE;
1084
1085 #ifndef __LP64__
1086 // Secondary initialisers
1087 virtual bool initWithAddress(void * address,
1088 IOByteCount withLength,
1089 IODirection withDirection) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1090
1091 virtual bool initWithAddress(IOVirtualAddress address,
1092 IOByteCount withLength,
1093 IODirection withDirection,
1094 task_t withTask) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1095
1096 virtual bool initWithPhysicalAddress(
1097 IOPhysicalAddress address,
1098 IOByteCount withLength,
1099 IODirection withDirection ) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1100
1101 virtual bool initWithRanges( IOVirtualRange * ranges,
1102 UInt32 withCount,
1103 IODirection withDirection,
1104 task_t withTask,
1105 bool asReference = false) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1106
1107 virtual bool initWithPhysicalRanges(IOPhysicalRange * ranges,
1108 UInt32 withCount,
1109 IODirection withDirection,
1110 bool asReference = false) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1111
1112 virtual addr64_t getPhysicalSegment64( IOByteCount offset,
1113 IOByteCount * length ) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1114
1115 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
1116 IOByteCount * length) APPLE_KEXT_OVERRIDE;
1117
1118 virtual IOPhysicalAddress getSourceSegment(IOByteCount offset,
1119 IOByteCount * length) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1120
1121 virtual void * getVirtualSegment(IOByteCount offset,
1122 IOByteCount * length) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1123 #endif /* !__LP64__ */
1124
1125 virtual IOReturn setPurgeable( IOOptionBits newState,
1126 IOOptionBits * oldState ) APPLE_KEXT_OVERRIDE;
1127
1128 virtual addr64_t getPhysicalSegment( IOByteCount offset,
1129 IOByteCount * length,
1130 #ifdef __LP64__
1131 IOOptionBits options = 0 ) APPLE_KEXT_OVERRIDE;
1132 #else /* !__LP64__ */
1133 IOOptionBits options ) APPLE_KEXT_OVERRIDE;
1134 #endif /* !__LP64__ */
1135
1136 virtual IOReturn prepare(IODirection forDirection = kIODirectionNone) APPLE_KEXT_OVERRIDE;
1137
1138 virtual IOReturn complete(IODirection forDirection = kIODirectionNone) APPLE_KEXT_OVERRIDE;
1139
1140 virtual IOReturn doMap(
1141 vm_map_t addressMap,
1142 IOVirtualAddress * atAddress,
1143 IOOptionBits options,
1144 IOByteCount sourceOffset = 0,
1145 IOByteCount length = 0 ) APPLE_KEXT_OVERRIDE;
1146
1147 virtual IOReturn doUnmap(
1148 vm_map_t addressMap,
1149 IOVirtualAddress logical,
1150 IOByteCount length ) APPLE_KEXT_OVERRIDE;
1151
1152 virtual bool serialize(OSSerialize *s) const APPLE_KEXT_OVERRIDE;
1153
1154 // Factory method for cloning a persistent IOMD, see IOMemoryDescriptor
1155 static IOMemoryDescriptor *
1156 withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD);
1157
1158 };
1159
1160 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1161
1162 #ifdef __LP64__
1163 mach_vm_address_t IOMemoryMap::getAddress()
1164 {
1165 return (getVirtualAddress());
1166 }
1167
1168 mach_vm_size_t IOMemoryMap::getSize()
1169 {
1170 return (getLength());
1171 }
1172 #else /* !__LP64__ */
1173 #include <IOKit/IOSubMemoryDescriptor.h>
1174 #endif /* !__LP64__ */
1175
1176 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1177
1178 #endif /* !_IOMEMORYDESCRIPTOR_H */