]> git.saurik.com Git - apple/xnu.git/blob - iokit/IOKit/IOMemoryDescriptor.h
xnu-7195.60.75.tar.gz
[apple/xnu.git] / iokit / IOKit / IOMemoryDescriptor.h
1 /*
2 * Copyright (c) 1998-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #ifndef _IOMEMORYDESCRIPTOR_H
29 #define _IOMEMORYDESCRIPTOR_H
30
31 #include <sys/cdefs.h>
32
33 #include <IOKit/IOTypes.h>
34 #include <IOKit/IOLocks.h>
35 #include <libkern/c++/OSPtr.h>
36 #include <libkern/c++/OSContainers.h>
37 #include <DriverKit/IOMemoryDescriptor.h>
38 #include <DriverKit/IOMemoryMap.h>
39 #ifdef XNU_KERNEL_PRIVATE
40 #include <IOKit/IOKitDebug.h>
41 #endif
42
43 #include <mach/memory_object_types.h>
44
45 class IOMemoryDescriptor;
46 class IOMemoryMap;
47 class IOMapper;
48 class IOService;
49 class IODMACommand;
50
51 /*
52 * Direction of transfer, with respect to the described memory.
53 */
54 #ifdef __LP64__
55 enum
56 #else /* !__LP64__ */
57 enum IODirection
58 #endif /* !__LP64__ */
59 {
60 kIODirectionNone = 0x0,// same as VM_PROT_NONE
61 kIODirectionIn = 0x1,// User land 'read', same as VM_PROT_READ
62 kIODirectionOut = 0x2,// User land 'write', same as VM_PROT_WRITE
63 kIODirectionOutIn = kIODirectionOut | kIODirectionIn,
64 kIODirectionInOut = kIODirectionIn | kIODirectionOut,
65
66 // these flags are valid for the prepare() method only
67 kIODirectionPrepareToPhys32 = 0x00000004,
68 kIODirectionPrepareNoFault = 0x00000008,
69 kIODirectionPrepareReserved1 = 0x00000010,
70 #define IODIRECTIONPREPARENONCOHERENTDEFINED 1
71 kIODirectionPrepareNonCoherent = 0x00000020,
72 #if KERNEL_PRIVATE
73 #define IODIRECTIONPREPAREAVOIDTHROTTLING 1
74 kIODirectionPrepareAvoidThrottling = 0x00000100,
75 #endif
76
77 // these flags are valid for the complete() method only
78 #define IODIRECTIONCOMPLETEWITHERRORDEFINED 1
79 kIODirectionCompleteWithError = 0x00000040,
80 #define IODIRECTIONCOMPLETEWITHDATAVALIDDEFINED 1
81 kIODirectionCompleteWithDataValid = 0x00000080,
82 };
83
84 #ifdef __LP64__
85 typedef IOOptionBits IODirection;
86 #endif /* __LP64__ */
87
88 /*
89 * IOOptionBits used in the withOptions variant
90 */
91 enum {
92 kIOMemoryDirectionMask = 0x00000007,
93 #ifdef XNU_KERNEL_PRIVATE
94 kIOMemoryAutoPrepare = 0x00000008,// Shared with Buffer MD
95 #endif
96
97 kIOMemoryTypeVirtual = 0x00000010,
98 kIOMemoryTypePhysical = 0x00000020,
99 kIOMemoryTypeUPL = 0x00000030,
100 kIOMemoryTypePersistentMD = 0x00000040,// Persistent Memory Descriptor
101 kIOMemoryTypeUIO = 0x00000050,
102 #ifdef __LP64__
103 kIOMemoryTypeVirtual64 = kIOMemoryTypeVirtual,
104 kIOMemoryTypePhysical64 = kIOMemoryTypePhysical,
105 #else /* !__LP64__ */
106 kIOMemoryTypeVirtual64 = 0x00000060,
107 kIOMemoryTypePhysical64 = 0x00000070,
108 #endif /* !__LP64__ */
109 kIOMemoryTypeMask = 0x000000f0,
110
111 kIOMemoryAsReference = 0x00000100,
112 kIOMemoryBufferPageable = 0x00000400,
113 kIOMemoryMapperNone = 0x00000800,// Shared with Buffer MD
114 kIOMemoryHostOnly = 0x00001000,// Never DMA accessible
115 #ifdef XNU_KERNEL_PRIVATE
116 kIOMemoryRedirected = 0x00004000,
117 kIOMemoryPreparedReadOnly = 0x00008000,
118 #endif
119 kIOMemoryPersistent = 0x00010000,
120 kIOMemoryMapCopyOnWrite = 0x00020000,
121 kIOMemoryRemote = 0x00040000,
122 kIOMemoryThreadSafe = 0x00100000,// Shared with Buffer MD
123 kIOMemoryClearEncrypt = 0x00200000,// Shared with Buffer MD
124 kIOMemoryUseReserve = 0x00800000,// Shared with Buffer MD
125 #define IOMEMORYUSERESERVEDEFINED 1
126
127 #ifdef XNU_KERNEL_PRIVATE
128 kIOMemoryBufferPurgeable = 0x00400000,
129 kIOMemoryBufferCacheMask = 0x70000000,
130 kIOMemoryBufferCacheShift = 28,
131 #endif
132 };
133
134 #define kIOMapperSystem ((IOMapper *) NULL)
135
136 enum{
137 kIOMemoryLedgerTagDefault = VM_LEDGER_TAG_DEFAULT,
138 kIOmemoryLedgerTagNetwork = VM_LEDGER_TAG_NETWORK,
139 kIOMemoryLedgerTagMedia = VM_LEDGER_TAG_MEDIA,
140 kIOMemoryLedgerTagGraphics = VM_LEDGER_TAG_GRAPHICS,
141 kIOMemoryLedgerTagNeural = VM_LEDGER_TAG_NEURAL,
142 };
143 enum{
144 kIOMemoryLedgerFlagNoFootprint = VM_LEDGER_FLAG_NO_FOOTPRINT,
145 };
146
147 enum{
148 kIOMemoryPurgeableKeepCurrent = 1,
149
150 kIOMemoryPurgeableNonVolatile = 2,
151 kIOMemoryPurgeableVolatile = 3,
152 kIOMemoryPurgeableEmpty = 4,
153
154 // modifiers for kIOMemoryPurgeableVolatile behavior
155 kIOMemoryPurgeableVolatileGroup0 = VM_VOLATILE_GROUP_0,
156 kIOMemoryPurgeableVolatileGroup1 = VM_VOLATILE_GROUP_1,
157 kIOMemoryPurgeableVolatileGroup2 = VM_VOLATILE_GROUP_2,
158 kIOMemoryPurgeableVolatileGroup3 = VM_VOLATILE_GROUP_3,
159 kIOMemoryPurgeableVolatileGroup4 = VM_VOLATILE_GROUP_4,
160 kIOMemoryPurgeableVolatileGroup5 = VM_VOLATILE_GROUP_5,
161 kIOMemoryPurgeableVolatileGroup6 = VM_VOLATILE_GROUP_6,
162 kIOMemoryPurgeableVolatileGroup7 = VM_VOLATILE_GROUP_7,
163 kIOMemoryPurgeableVolatileBehaviorFifo = VM_PURGABLE_BEHAVIOR_FIFO,
164 kIOMemoryPurgeableVolatileBehaviorLifo = VM_PURGABLE_BEHAVIOR_LIFO,
165 kIOMemoryPurgeableVolatileOrderingObsolete = VM_PURGABLE_ORDERING_OBSOLETE,
166 kIOMemoryPurgeableVolatileOrderingNormal = VM_PURGABLE_ORDERING_NORMAL,
167 kIOMemoryPurgeableFaultOnAccess = VM_PURGABLE_DEBUG_FAULT,
168 };
169 enum{
170 kIOMemoryIncoherentIOFlush = 1,
171 kIOMemoryIncoherentIOStore = 2,
172
173 kIOMemoryClearEncrypted = 50,
174 kIOMemorySetEncrypted = 51,
175 };
176
177 #define IOMEMORYDESCRIPTOR_SUPPORTS_DMACOMMAND 1
178
179 struct IODMAMapSpecification {
180 uint64_t alignment;
181 IOService * device;
182 uint32_t options;
183 uint8_t numAddressBits;
184 uint8_t resvA[3];
185 uint32_t resvB[4];
186 };
187
188 struct IODMAMapPageList {
189 uint32_t pageOffset;
190 uint32_t pageListCount;
191 const upl_page_info_t * pageList;
192 };
193
194 // mapOptions for iovmMapMemory
195 enum{
196 kIODMAMapReadAccess = 0x00000001,
197 kIODMAMapWriteAccess = 0x00000002,
198 kIODMAMapPhysicallyContiguous = 0x00000010,
199 kIODMAMapDeviceMemory = 0x00000020,
200 kIODMAMapPagingPath = 0x00000040,
201 kIODMAMapIdentityMap = 0x00000080,
202
203 kIODMAMapPageListFullyOccupied = 0x00000100,
204 kIODMAMapFixedAddress = 0x00000200,
205 };
206
207 #ifdef KERNEL_PRIVATE
208
209 // Used for dmaCommandOperation communications for IODMACommand and mappers
210
211 enum {
212 kIOMDWalkSegments = 0x01000000,
213 kIOMDFirstSegment = 1 | kIOMDWalkSegments,
214 kIOMDGetCharacteristics = 0x02000000,
215 kIOMDGetCharacteristicsMapped = 1 | kIOMDGetCharacteristics,
216 kIOMDDMAActive = 0x03000000,
217 kIOMDSetDMAActive = 1 | kIOMDDMAActive,
218 kIOMDSetDMAInactive = kIOMDDMAActive,
219 kIOMDAddDMAMapSpec = 0x04000000,
220 kIOMDDMAMap = 0x05000000,
221 kIOMDDMAUnmap = 0x06000000,
222 kIOMDDMACommandOperationMask = 0xFF000000,
223 };
224 struct IOMDDMACharacteristics {
225 UInt64 fLength;
226 UInt32 fSGCount;
227 UInt32 fPages;
228 UInt32 fPageAlign;
229 ppnum_t fHighestPage;
230 IODirection fDirection;
231 UInt8 fIsPrepared;
232 };
233
234 struct IOMDDMAMapArgs {
235 IOMapper * fMapper;
236 IODMACommand * fCommand;
237 IODMAMapSpecification fMapSpec;
238 uint64_t fOffset;
239 uint64_t fLength;
240 uint64_t fAlloc;
241 uint64_t fAllocLength;
242 };
243
244 struct IOMDDMAWalkSegmentArgs {
245 UInt64 fOffset; // Input/Output offset
246 UInt64 fIOVMAddr, fLength; // Output variables
247 UInt8 fMapped; // Input Variable, Require mapped IOVMA
248 UInt64 fMappedBase; // Input base of mapping
249 };
250 typedef UInt8 IOMDDMAWalkSegmentState[128];
251
252 #endif /* KERNEL_PRIVATE */
253
254 enum{
255 kIOPreparationIDUnprepared = 0,
256 kIOPreparationIDUnsupported = 1,
257 kIOPreparationIDAlwaysPrepared = 2,
258 };
259
260 #ifdef KERNEL_PRIVATE
261 #define kIODescriptorIDInvalid (0)
262 #endif
263
264 #ifdef XNU_KERNEL_PRIVATE
265 struct IOMemoryReference;
266 #endif
267
268
269 /*! @class IOMemoryDescriptor : public OSObject
270 * @abstract An abstract base class defining common methods for describing physical or virtual memory.
271 * @discussion The IOMemoryDescriptor object represents a buffer or range of memory, specified as one or more physical or virtual address ranges. It contains methods to return the memory's physically contiguous segments (fragments), for use with the IOMemoryCursor, and methods to map the memory into any address space with caching and placed mapping options. */
272
273 class IOMemoryDescriptor : public OSObject
274 {
275 friend class IOMemoryMap;
276 friend class IOMultiMemoryDescriptor;
277
278 OSDeclareDefaultStructorsWithDispatch(IOMemoryDescriptor);
279
280 protected:
281
282 /*! @var reserved
283 * Reserved for future use. (Internal use only) */
284 struct IOMemoryDescriptorReserved * reserved;
285
286 protected:
287 OSPtr<OSSet> _mappings;
288 IOOptionBits _flags;
289
290
291 #ifdef XNU_KERNEL_PRIVATE
292 public:
293 struct IOMemoryReference * _memRef;
294 vm_tag_t _kernelTag;
295 vm_tag_t _userTag;
296 int16_t _dmaReferences;
297 uint16_t _internalFlags;
298 kern_allocation_name_t _mapName;
299 protected:
300 #else /* XNU_KERNEL_PRIVATE */
301 void * __iomd_reserved5;
302 uint16_t __iomd_reserved1[4];
303 uintptr_t __iomd_reserved2;
304 #endif /* XNU_KERNEL_PRIVATE */
305
306 uintptr_t __iomd_reserved3;
307 uintptr_t __iomd_reserved4;
308
309 #ifndef __LP64__
310 IODirection _direction; /* use _flags instead */
311 #endif /* !__LP64__ */
312 IOByteCount _length; /* length of all ranges */
313 IOOptionBits _tag;
314
315 public:
316 typedef IOOptionBits DMACommandOps;
317 #ifndef __LP64__
318 virtual IOPhysicalAddress getSourceSegment( IOByteCount offset,
319 IOByteCount * length ) APPLE_KEXT_DEPRECATED;
320 #endif /* !__LP64__ */
321
322 /*! @function initWithOptions
323 * @abstract Master initialiser for all variants of memory descriptors. For a more complete description see IOMemoryDescriptor::withOptions.
324 * @discussion Note this function can be used to re-init a previously created memory descriptor.
325 * @result true on success, false on failure. */
326 virtual bool initWithOptions(void * buffers,
327 UInt32 count,
328 UInt32 offset,
329 task_t task,
330 IOOptionBits options,
331 IOMapper * mapper = kIOMapperSystem);
332
333 #ifndef __LP64__
334 virtual addr64_t getPhysicalSegment64( IOByteCount offset,
335 IOByteCount * length ) APPLE_KEXT_DEPRECATED; /* use getPhysicalSegment() and kIOMemoryMapperNone instead */
336 #endif /* !__LP64__ */
337
338 /*! @function setPurgeable
339 * @abstract Control the purgeable status of a memory descriptors memory.
340 * @discussion Buffers may be allocated with the ability to have their purgeable status changed - IOBufferMemoryDescriptor with the kIOMemoryPurgeable option, VM_FLAGS_PURGEABLE may be passed to vm_allocate() in user space to allocate such buffers. The purgeable status of such a buffer may be controlled with setPurgeable(). The process of making a purgeable memory descriptor non-volatile and determining its previous state is atomic - if a purgeable memory descriptor is made nonvolatile and the old state is returned as kIOMemoryPurgeableVolatile, then the memory's previous contents are completely intact and will remain so until the memory is made volatile again. If the old state is returned as kIOMemoryPurgeableEmpty then the memory was reclaimed while it was in a volatile state and its previous contents have been lost.
341 * @param newState - the desired new purgeable state of the memory:<br>
342 * kIOMemoryPurgeableKeepCurrent - make no changes to the memory's purgeable state.<br>
343 * kIOMemoryPurgeableVolatile - make the memory volatile - the memory may be reclaimed by the VM system without saving its contents to backing store.<br>
344 * kIOMemoryPurgeableNonVolatile - make the memory nonvolatile - the memory is treated as with usual allocations and must be saved to backing store if paged.<br>
345 * kIOMemoryPurgeableEmpty - make the memory volatile, and discard any pages allocated to it.
346 * @param oldState - if non-NULL, the previous purgeable state of the memory is returned here:<br>
347 * kIOMemoryPurgeableNonVolatile - the memory was nonvolatile.<br>
348 * kIOMemoryPurgeableVolatile - the memory was volatile but its content has not been discarded by the VM system.<br>
349 * kIOMemoryPurgeableEmpty - the memory was volatile and has been discarded by the VM system.<br>
350 * @result An IOReturn code. */
351
352 virtual IOReturn setPurgeable( IOOptionBits newState,
353 IOOptionBits * oldState );
354
355 /*! @function setOwnership
356 * @abstract Control the ownership of a memory descriptors memory.
357 * @discussion IOBufferMemoryDescriptor are owned by a specific task. The ownership of such a buffer may be controlled with setOwnership().
358 * @param newOwner - the task to be the new owner of the memory.
359 * @param newLedgerTag - the ledger this memory should be accounted in.
360 * @param newLedgerOptions - accounting options
361 * @result An IOReturn code. */
362
363 IOReturn setOwnership( task_t newOwner,
364 int newLedgerTag,
365 IOOptionBits newLedgerOptions );
366
367 /*! @function getPageCounts
368 * @abstract Retrieve the number of resident and/or dirty pages encompassed by an IOMemoryDescriptor.
369 * @discussion This method returns the number of resident and/or dirty pages encompassed by an IOMemoryDescriptor.
370 * @param residentPageCount - If non-null, a pointer to a byte count that will return the number of resident pages encompassed by this IOMemoryDescriptor.
371 * @param dirtyPageCount - If non-null, a pointer to a byte count that will return the number of dirty pages encompassed by this IOMemoryDescriptor.
372 * @result An IOReturn code. */
373
374 IOReturn getPageCounts( IOByteCount * residentPageCount,
375 IOByteCount * dirtyPageCount);
376
377 /*! @function performOperation
378 * @abstract Perform an operation on the memory descriptor's memory.
379 * @discussion This method performs some operation on a range of the memory descriptor's memory. When a memory descriptor's memory is not mapped, it should be more efficient to use this method than mapping the memory to perform the operation virtually.
380 * @param options The operation to perform on the memory:<br>
381 * kIOMemoryIncoherentIOFlush - pass this option to store to memory and flush any data in the processor cache for the memory range, with synchronization to ensure the data has passed through all levels of processor cache. It may not be supported on all architectures. This type of flush may be used for non-coherent I/O such as AGP - it is NOT required for PCI coherent operations. The memory descriptor must have been previously prepared.<br>
382 * kIOMemoryIncoherentIOStore - pass this option to store to memory any data in the processor cache for the memory range, with synchronization to ensure the data has passed through all levels of processor cache. It may not be supported on all architectures. This type of flush may be used for non-coherent I/O such as AGP - it is NOT required for PCI coherent operations. The memory descriptor must have been previously prepared.
383 * @param offset A byte offset into the memory descriptor's memory.
384 * @param length The length of the data range.
385 * @result An IOReturn code. */
386
387 virtual IOReturn performOperation( IOOptionBits options,
388 IOByteCount offset, IOByteCount length );
389
390 // Used for dedicated communications for IODMACommand
391 virtual IOReturn dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const;
392
393 /*! @function getPhysicalSegment
394 * @abstract Break a memory descriptor into its physically contiguous segments.
395 * @discussion This method returns the physical address of the byte at the given offset into the memory, and optionally the length of the physically contiguous segment from that offset.
396 * @param offset A byte offset into the memory whose physical address to return.
397 * @param length If non-zero, getPhysicalSegment will store here the length of the physically contiguous segement at the given offset.
398 * @result A physical address, or zero if the offset is beyond the length of the memory. */
399
400 #ifdef __LP64__
401 virtual addr64_t getPhysicalSegment( IOByteCount offset,
402 IOByteCount * length,
403 IOOptionBits options = 0 ) = 0;
404 #else /* !__LP64__ */
405 virtual addr64_t getPhysicalSegment( IOByteCount offset,
406 IOByteCount * length,
407 IOOptionBits options );
408 #endif /* !__LP64__ */
409
410 virtual uint64_t getPreparationID( void );
411 void setPreparationID( void );
412
413 void setVMTags(uint32_t kernelTag, uint32_t userTag);
414 uint32_t getVMTag(vm_map_t map);
415
416 #ifdef KERNEL_PRIVATE
417 uint64_t getDescriptorID( void );
418 void setDescriptorID( void );
419
420 IOReturn ktraceEmitPhysicalSegments( void );
421 #endif
422
423 #ifdef XNU_KERNEL_PRIVATE
424 IOMemoryDescriptorReserved * getKernelReserved( void );
425 void cleanKernelReserved(IOMemoryDescriptorReserved * reserved);
426 IOReturn dmaMap(
427 IOMapper * mapper,
428 IOMemoryDescriptor * memory,
429 IODMACommand * command,
430 const IODMAMapSpecification * mapSpec,
431 uint64_t offset,
432 uint64_t length,
433 uint64_t * mapAddress,
434 uint64_t * mapLength);
435 IOReturn dmaUnmap(
436 IOMapper * mapper,
437 IODMACommand * command,
438 uint64_t offset,
439 uint64_t mapAddress,
440 uint64_t mapLength);
441 void dmaMapRecord(
442 IOMapper * mapper,
443 IODMACommand * command,
444 uint64_t mapLength);
445 #endif
446
447 private:
448 OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 0);
449 #ifdef __LP64__
450 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 1);
451 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 2);
452 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 3);
453 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 4);
454 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 5);
455 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 6);
456 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 7);
457 #else /* !__LP64__ */
458 OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 1);
459 OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 2);
460 OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 3);
461 OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 4);
462 OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 5);
463 OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 6);
464 OSMetaClassDeclareReservedUsedX86(IOMemoryDescriptor, 7);
465 #endif /* !__LP64__ */
466 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 8);
467 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 9);
468 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 10);
469 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 11);
470 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 12);
471 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 13);
472 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 14);
473 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 15);
474
475 protected:
476 virtual void free(void) APPLE_KEXT_OVERRIDE;
477 public:
478 static void initialize( void );
479
480 public:
481 /*! @function withAddress
482 * @abstract Create an IOMemoryDescriptor to describe one virtual range of the kernel task.
483 * @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a single virtual memory range mapped into the kernel map. This memory descriptor needs to be prepared before it can be used to extract data from the memory described.
484 * @param address The virtual address of the first byte in the memory.
485 * @param withLength The length of memory.
486 * @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
487 * @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
488
489 static OSPtr<IOMemoryDescriptor> withAddress(void * address,
490 IOByteCount withLength,
491 IODirection withDirection);
492
493 #ifndef __LP64__
494 static OSPtr<IOMemoryDescriptor> withAddress(IOVirtualAddress address,
495 IOByteCount withLength,
496 IODirection withDirection,
497 task_t withTask) APPLE_KEXT_DEPRECATED; /* use withAddressRange() and prepare() instead */
498 #endif /* !__LP64__ */
499
500 /*! @function withPhysicalAddress
501 * @abstract Create an IOMemoryDescriptor to describe one physical range.
502 * @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a single physical memory range.
503 * @param address The physical address of the first byte in the memory.
504 * @param withLength The length of memory.
505 * @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
506 * @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
507
508 static OSPtr<IOMemoryDescriptor> withPhysicalAddress(
509 IOPhysicalAddress address,
510 IOByteCount withLength,
511 IODirection withDirection );
512
513 #ifndef __LP64__
514 static OSPtr<IOMemoryDescriptor> withRanges(IOVirtualRange * ranges,
515 UInt32 withCount,
516 IODirection withDirection,
517 task_t withTask,
518 bool asReference = false) APPLE_KEXT_DEPRECATED; /* use withAddressRanges() instead */
519 #endif /* !__LP64__ */
520
521 /*! @function withAddressRange
522 * @abstract Create an IOMemoryDescriptor to describe one virtual range of the specified map.
523 * @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a single virtual memory range mapped into the specified map. This memory descriptor needs to be prepared before it can be used to extract data from the memory described.
524 * @param address The virtual address of the first byte in the memory.
525 * @param length The length of memory.
526 * @param options
527 * kIOMemoryDirectionMask (options:direction) This nibble indicates the I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
528 * @param task The task the virtual ranges are mapped into. Note that unlike IOMemoryDescriptor::withAddress(), kernel_task memory must be explicitly prepared when passed to this api. The task argument may be NULL to specify memory by physical address.
529 * @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
530
531 static OSPtr<IOMemoryDescriptor> withAddressRange(
532 mach_vm_address_t address,
533 mach_vm_size_t length,
534 IOOptionBits options,
535 task_t task);
536
537 /*! @function withAddressRanges
538 * @abstract Create an IOMemoryDescriptor to describe one or more virtual ranges.
539 * @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of an array of virtual memory ranges each mapped into a specified source task. This memory descriptor needs to be prepared before it can be used to extract data from the memory described.
540 * @param ranges An array of IOAddressRange structures which specify the virtual ranges in the specified map which make up the memory to be described. IOAddressRange is the 64bit version of IOVirtualRange.
541 * @param rangeCount The member count of the ranges array.
542 * @param options
543 * kIOMemoryDirectionMask (options:direction) This nibble indicates the I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
544 * kIOMemoryAsReference For options:type = Virtual or Physical this indicate that the memory descriptor need not copy the ranges array into local memory. This is an optimisation to try to minimise unnecessary allocations.
545 * @param task The task each of the virtual ranges are mapped into. Note that unlike IOMemoryDescriptor::withAddress(), kernel_task memory must be explicitly prepared when passed to this api. The task argument may be NULL to specify memory by physical address.
546 * @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
547
548 static OSPtr<IOMemoryDescriptor> withAddressRanges(
549 IOAddressRange * ranges,
550 UInt32 rangeCount,
551 IOOptionBits options,
552 task_t task);
553
554 /*! @function withOptions
555 * @abstract Master initialiser for all variants of memory descriptors.
556 * @discussion This method creates and initializes an IOMemoryDescriptor for memory it has three main variants: Virtual, Physical & mach UPL. These variants are selected with the options parameter, see below. This memory descriptor needs to be prepared before it can be used to extract data from the memory described.
557 *
558 *
559 * @param buffers A pointer to an array of IOAddressRange when options:type is kIOMemoryTypeVirtual64 or kIOMemoryTypePhysical64 or a 64bit kernel. For type UPL it is a upl_t returned by the mach/memory_object_types.h apis, primarily used internally by the UBC. IOVirtualRanges or IOPhysicalRanges are 32 bit only types for use when options:type is kIOMemoryTypeVirtual or kIOMemoryTypePhysical on 32bit kernels.
560 *
561 * @param count options:type = Virtual or Physical count contains a count of the number of entires in the buffers array. For options:type = UPL this field contains a total length.
562 *
563 * @param offset Only used when options:type = UPL, in which case this field contains an offset for the memory within the buffers upl.
564 *
565 * @param task Only used options:type = Virtual, The task each of the virtual ranges are mapped into.
566 *
567 * @param options
568 * kIOMemoryDirectionMask (options:direction) This nibble indicates the I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
569 * kIOMemoryTypeMask (options:type) kIOMemoryTypeVirtual64, kIOMemoryTypeVirtual, kIOMemoryTypePhysical64, kIOMemoryTypePhysical, kIOMemoryTypeUPL Indicates that what type of memory basic memory descriptor to use. This sub-field also controls the interpretation of the buffers, count, offset & task parameters.
570 * kIOMemoryAsReference For options:type = Virtual or Physical this indicate that the memory descriptor need not copy the ranges array into local memory. This is an optimisation to try to minimise unnecessary allocations.
571 * kIOMemoryBufferPageable Only used by the IOBufferMemoryDescriptor as an indication that the kernel virtual memory is in fact pageable and we need to use the kernel pageable submap rather than the default map.
572 *
573 * @param mapper Which IOMapper should be used to map the in-memory physical addresses into I/O space addresses. Defaults to 0 which indicates that the system mapper is to be used, if present.
574 *
575 * @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
576
577 static OSPtr<IOMemoryDescriptor> withOptions(void * buffers,
578 UInt32 count,
579 UInt32 offset,
580 task_t task,
581 IOOptionBits options,
582 IOMapper * mapper = kIOMapperSystem);
583
584 #ifndef __LP64__
585 static OSPtr<IOMemoryDescriptor> withPhysicalRanges(
586 IOPhysicalRange * ranges,
587 UInt32 withCount,
588 IODirection withDirection,
589 bool asReference = false) APPLE_KEXT_DEPRECATED; /* use withOptions() and kIOMemoryTypePhysical instead */
590 #endif /* !__LP64__ */
591
592 #ifndef __LP64__
593 static OSPtr<IOMemoryDescriptor> withSubRange(IOMemoryDescriptor *of,
594 IOByteCount offset,
595 IOByteCount length,
596 IODirection withDirection) APPLE_KEXT_DEPRECATED; /* use IOSubMemoryDescriptor::withSubRange() and kIOMemoryThreadSafe instead */
597 #endif /* !__LP64__ */
598
599 /*! @function withPersistentMemoryDescriptor
600 * @abstract Copy constructor that generates a new memory descriptor if the backing memory for the same task's virtual address and length has changed.
601 * @discussion If the original memory descriptor's address and length is still backed by the same real memory, i.e. the user hasn't deallocated and the reallocated memory at the same address then the original memory descriptor is returned with a additional reference. Otherwise we build a totally new memory descriptor with the same characteristics as the previous one but with a new view of the vm. Note not legal to call this function with anything except an IOGeneralMemoryDescriptor that was created with the kIOMemoryPersistent option.
602 * @param originalMD The memory descriptor to be duplicated.
603 * @result Either the original memory descriptor with an additional retain or a new memory descriptor, 0 for a bad original memory descriptor or some other resource shortage. */
604 static OSPtr<IOMemoryDescriptor>
605 withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD);
606
607 #ifndef __LP64__
608 // obsolete initializers
609 // - initWithOptions is the designated initializer
610 virtual bool initWithAddress(void * address,
611 IOByteCount withLength,
612 IODirection withDirection) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */
613 virtual bool initWithAddress(IOVirtualAddress address,
614 IOByteCount withLength,
615 IODirection withDirection,
616 task_t withTask) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */
617 virtual bool initWithPhysicalAddress(
618 IOPhysicalAddress address,
619 IOByteCount withLength,
620 IODirection withDirection ) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */
621 virtual bool initWithRanges(IOVirtualRange * ranges,
622 UInt32 withCount,
623 IODirection withDirection,
624 task_t withTask,
625 bool asReference = false) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */
626 virtual bool initWithPhysicalRanges(IOPhysicalRange * ranges,
627 UInt32 withCount,
628 IODirection withDirection,
629 bool asReference = false) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */
630 #endif /* __LP64__ */
631
632 /*! @function getDirection
633 * @abstract Accessor to get the direction the memory descriptor was created with.
634 * @discussion This method returns the direction the memory descriptor was created with.
635 * @result The direction. */
636
637 virtual IODirection getDirection() const;
638
639 /*! @function getLength
640 * @abstract Accessor to get the length of the memory descriptor (over all its ranges).
641 * @discussion This method returns the total length of the memory described by the descriptor, ie. the sum of its ranges' lengths.
642 * @result The byte count. */
643
644 virtual IOByteCount getLength() const;
645
646 #define IOMEMORYDESCRIPTOR_SUPPORTS_GETDMAMAPLENGTH
647 uint64_t getDMAMapLength(uint64_t * offset = NULL);
648
649 /*! @function setTag
650 * @abstract Set the tag for the memory descriptor.
651 * @discussion This method sets the tag for the memory descriptor. Tag bits are not interpreted by IOMemoryDescriptor.
652 * @param tag The tag. */
653
654 virtual void setTag( IOOptionBits tag );
655
656 /*! @function getTag
657 * @abstract Accessor to the retrieve the tag for the memory descriptor.
658 * @discussion This method returns the tag for the memory descriptor. Tag bits are not interpreted by IOMemoryDescriptor.
659 * @result The tag. */
660
661 virtual IOOptionBits getTag( void );
662
663 /*! @function getFlags
664 * @abstract Accessor to the retrieve the options the memory descriptor was created with.
665 * @discussion Accessor to the retrieve the options the memory descriptor was created with, and flags with its state. These bits are defined by the kIOMemory* enum.
666 * @result The flags bitfield. */
667
668 uint64_t getFlags(void);
669
670 /*! @function readBytes
671 * @abstract Copy data from the memory descriptor's buffer to the specified buffer.
672 * @discussion This method copies data from the memory descriptor's memory at the given offset, to the caller's buffer. The memory descriptor MUST have the kIODirectionOut direcction bit set and be prepared. kIODirectionOut means that this memory descriptor will be output to an external device, so readBytes is used to get memory into a local buffer for a PIO transfer to the device.
673 * @param offset A byte offset into the memory descriptor's memory.
674 * @param bytes The caller supplied buffer to copy the data to.
675 * @param withLength The length of the data to copy.
676 * @result The number of bytes copied, zero will be returned if the specified offset is beyond the length of the descriptor. Development/debug kernel builds will assert if the offset is beyond the length of the descriptor. */
677
678 virtual IOByteCount readBytes(IOByteCount offset,
679 void * bytes, IOByteCount withLength);
680
681 /*! @function writeBytes
682 * @abstract Copy data to the memory descriptor's buffer from the specified buffer.
683 * @discussion This method copies data to the memory descriptor's memory at the given offset, from the caller's buffer. The memory descriptor MUST have the kIODirectionIn direcction bit set and be prepared. kIODirectionIn means that this memory descriptor will be input from an external device, so writeBytes is used to write memory into the descriptor for PIO drivers.
684 * @param offset A byte offset into the memory descriptor's memory.
685 * @param bytes The caller supplied buffer to copy the data from.
686 * @param withLength The length of the data to copy.
687 * @result The number of bytes copied, zero will be returned if the specified offset is beyond the length of the descriptor. Development/debug kernel builds will assert if the offset is beyond the length of the descriptor. */
688
689 virtual IOByteCount writeBytes(IOByteCount offset,
690 const void * bytes, IOByteCount withLength);
691
692 #ifndef __LP64__
693 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
694 IOByteCount * length);
695 #endif /* !__LP64__ */
696
697 /*! @function getPhysicalAddress
698 * @abstract Return the physical address of the first byte in the memory.
699 * @discussion This method returns the physical address of the first byte in the memory. It is most useful on memory known to be physically contiguous.
700 * @result A physical address. */
701
702 IOPhysicalAddress getPhysicalAddress();
703
704 #ifndef __LP64__
705 virtual void * getVirtualSegment(IOByteCount offset,
706 IOByteCount * length) APPLE_KEXT_DEPRECATED; /* use map() and getVirtualAddress() instead */
707 #endif /* !__LP64__ */
708
709 /*! @function prepare
710 * @abstract Prepare the memory for an I/O transfer.
711 * @discussion This involves paging in the memory, if necessary, and wiring it down for the duration of the transfer. The complete() method completes the processing of the memory after the I/O transfer finishes. Note that the prepare call is not thread safe and it is expected that the client will more easily be able to guarantee single threading a particular memory descriptor.
712 * @param forDirection The direction of the I/O just completed, or kIODirectionNone for the direction specified by the memory descriptor.
713 * @result An IOReturn code. */
714
715 virtual IOReturn prepare(IODirection forDirection = kIODirectionNone) = 0;
716
717 /*! @function complete
718 * @abstract Complete processing of the memory after an I/O transfer finishes.
719 * @discussion This method should not be called unless a prepare was previously issued; the prepare() and complete() must occur in pairs, before and after an I/O transfer involving pageable memory. In 10.3 or greater systems the direction argument to complete is not longer respected. The direction is totally determined at prepare() time.
720 * @param forDirection DEPRECATED The direction of the I/O just completed, or kIODirectionNone for the direction specified by the memory descriptor.
721 * @result An IOReturn code. */
722
723 virtual IOReturn complete(IODirection forDirection = kIODirectionNone) = 0;
724
725 /*
726 * Mapping functions.
727 */
728
729 /*! @function createMappingInTask
730 * @abstract Maps a IOMemoryDescriptor into a task.
731 * @discussion This is the general purpose method to map all or part of the memory described by a memory descriptor into a task at any available address, or at a fixed address if possible. Caching & read-only options may be set for the mapping. The mapping is represented as a returned reference to a IOMemoryMap object, which may be shared if the mapping is compatible with an existing mapping of the IOMemoryDescriptor. The IOMemoryMap object returned should be released only when the caller has finished accessing the mapping, as freeing the object destroys the mapping.
732 * @param intoTask Sets the target task for the mapping. Pass kernel_task for the kernel address space.
733 * @param atAddress If a placed mapping is requested, atAddress specifies its address, and the kIOMapAnywhere should not be set. Otherwise, atAddress is ignored.
734 * @param options Mapping options are defined in IOTypes.h,<br>
735 * kIOMapAnywhere should be passed if the mapping can be created anywhere. If not set, the atAddress parameter sets the location of the mapping, if it is available in the target map.<br>
736 * kIOMapDefaultCache to inhibit the cache in I/O areas, kIOMapCopybackCache in general purpose RAM.<br>
737 * kIOMapInhibitCache, kIOMapWriteThruCache, kIOMapCopybackCache to set the appropriate caching.<br>
738 * kIOMapReadOnly to allow only read only accesses to the memory - writes will cause and access fault.<br>
739 * kIOMapReference will only succeed if the mapping already exists, and the IOMemoryMap object is just an extra reference, ie. no new mapping will be created.<br>
740 * kIOMapUnique allows a special kind of mapping to be created that may be used with the IOMemoryMap::redirect() API. These mappings will not be shared as is the default - there will always be a unique mapping created for the caller, not an existing mapping with an extra reference.<br>
741 * kIOMapPrefault will try to prefault the pages corresponding to the mapping. This must not be done on the kernel task, and the memory must have been wired via prepare(). Otherwise, the function will fail.<br>
742 * @param offset Is a beginning offset into the IOMemoryDescriptor's memory where the mapping starts. Zero is the default to map all the memory.
743 * @param length Is the length of the mapping requested for a subset of the IOMemoryDescriptor. Zero is the default to map all the memory.
744 * @result A reference to an IOMemoryMap object representing the mapping, which can supply the virtual address of the mapping and other information. The mapping may be shared with multiple callers - multiple maps are avoided if a compatible one exists. The IOMemoryMap object returned should be released only when the caller has finished accessing the mapping, as freeing the object destroys the mapping. The IOMemoryMap instance also retains the IOMemoryDescriptor it maps while it exists. */
745
746 OSPtr<IOMemoryMap> createMappingInTask(
747 task_t intoTask,
748 mach_vm_address_t atAddress,
749 IOOptionBits options,
750 mach_vm_size_t offset = 0,
751 mach_vm_size_t length = 0 );
752
753 #ifndef __LP64__
754 virtual OSPtr<IOMemoryMap> map(
755 task_t intoTask,
756 IOVirtualAddress atAddress,
757 IOOptionBits options,
758 IOByteCount offset = 0,
759 IOByteCount length = 0 ) APPLE_KEXT_DEPRECATED;/* use createMappingInTask() instead */
760 #endif /* !__LP64__ */
761
762 /*! @function map
763 * @abstract Maps a IOMemoryDescriptor into the kernel map.
764 * @discussion This is a shortcut method to map all the memory described by a memory descriptor into the kernel map at any available address. See the full version of the createMappingInTask method for further details.
765 * @param options Mapping options as in the full version of the createMappingInTask method, with kIOMapAnywhere assumed.
766 * @result See the full version of the createMappingInTask method. */
767
768 virtual OSPtr<IOMemoryMap> map(
769 IOOptionBits options = 0 );
770
771 /*! @function setMapping
772 * @abstract Establishes an already existing mapping.
773 * @discussion This method tells the IOMemoryDescriptor about a mapping that exists, but was created elsewhere. It allows later callers of the map method to share this externally created mapping. The IOMemoryMap object returned is created to represent it. This method is not commonly needed.
774 * @param task Address space in which the mapping exists.
775 * @param mapAddress Virtual address of the mapping.
776 * @param options Caching and read-only attributes of the mapping.
777 * @result A IOMemoryMap object created to represent the mapping. */
778
779 virtual OSPtr<IOMemoryMap> setMapping(
780 task_t task,
781 IOVirtualAddress mapAddress,
782 IOOptionBits options = 0 );
783
784 // Following methods are private implementation
785
786 #ifdef __LP64__
787 virtual
788 #endif /* __LP64__ */
789 IOReturn redirect( task_t safeTask, bool redirect );
790
791 IOReturn handleFault(
792 void * _pager,
793 mach_vm_size_t sourceOffset,
794 mach_vm_size_t length);
795
796 IOReturn populateDevicePager(
797 void * pager,
798 vm_map_t addressMap,
799 mach_vm_address_t address,
800 mach_vm_size_t sourceOffset,
801 mach_vm_size_t length,
802 IOOptionBits options );
803
804 virtual IOMemoryMap * makeMapping(
805 IOMemoryDescriptor * owner,
806 task_t intoTask,
807 IOVirtualAddress atAddress,
808 IOOptionBits options,
809 IOByteCount offset,
810 IOByteCount length );
811
812 protected:
813 virtual void addMapping(
814 IOMemoryMap * mapping );
815
816 virtual void removeMapping(
817 IOMemoryMap * mapping );
818
819 virtual IOReturn doMap(
820 vm_map_t addressMap,
821 IOVirtualAddress * atAddress,
822 IOOptionBits options,
823 IOByteCount sourceOffset = 0,
824 IOByteCount length = 0 );
825
826 virtual IOReturn doUnmap(
827 vm_map_t addressMap,
828 IOVirtualAddress logical,
829 IOByteCount length );
830 };
831
832 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
833
834 /*! @class IOMemoryMap : public OSObject
835 * @abstract A class defining common methods for describing a memory mapping.
836 * @discussion The IOMemoryMap object represents a mapped range of memory, described by a IOMemoryDescriptor. The mapping may be in the kernel or a non-kernel task and has processor cache mode attributes. IOMemoryMap instances are created by IOMemoryDescriptor when it creates mappings in its map method, and returned to the caller. */
837
838 class IOMemoryMap : public OSObject
839 {
840 OSDeclareDefaultStructorsWithDispatch(IOMemoryMap);
841 #ifdef XNU_KERNEL_PRIVATE
842 public:
843 OSPtr<IOMemoryDescriptor> fMemory;
844 OSPtr<IOMemoryMap> fSuperMap;
845 mach_vm_size_t fOffset;
846 mach_vm_address_t fAddress;
847 mach_vm_size_t fLength;
848 task_t fAddressTask;
849 vm_map_t fAddressMap;
850 IOOptionBits fOptions;
851 upl_t fRedirUPL;
852 ipc_port_t fRedirEntry;
853 IOMemoryDescriptor * fOwner;
854 uint8_t fUserClientUnmap;
855 #if IOTRACKING
856 IOTrackingUser fTracking;
857 #endif
858 #endif /* XNU_KERNEL_PRIVATE */
859
860 protected:
861 virtual void taggedRelease(const void *tag = NULL) const APPLE_KEXT_OVERRIDE;
862 virtual void free(void) APPLE_KEXT_OVERRIDE;
863
864 public:
865 /*! @function getVirtualAddress
866 * @abstract Accessor to the virtual address of the first byte in the mapping.
867 * @discussion This method returns the virtual address of the first byte in the mapping. Since the IOVirtualAddress is only 32bit in 32bit kernels, the getAddress() method should be used for compatibility with 64bit task mappings.
868 * @result A virtual address. */
869
870 virtual IOVirtualAddress getVirtualAddress(void);
871
872 /*! @function getPhysicalSegment
873 * @abstract Break a mapping into its physically contiguous segments.
874 * @discussion This method returns the physical address of the byte at the given offset into the mapping, and optionally the length of the physically contiguous segment from that offset. It functions similarly to IOMemoryDescriptor::getPhysicalSegment.
875 * @param offset A byte offset into the mapping whose physical address to return.
876 * @param length If non-zero, getPhysicalSegment will store here the length of the physically contiguous segement at the given offset.
877 * @result A physical address, or zero if the offset is beyond the length of the mapping. */
878
879 #ifdef __LP64__
880 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
881 IOByteCount * length,
882 IOOptionBits options = 0);
883 #else /* !__LP64__ */
884 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
885 IOByteCount * length);
886 #endif /* !__LP64__ */
887
888 /*! @function getPhysicalAddress
889 * @abstract Return the physical address of the first byte in the mapping.
890 * @discussion This method returns the physical address of the first byte in the mapping. It is most useful on mappings known to be physically contiguous.
891 * @result A physical address. */
892
893 IOPhysicalAddress getPhysicalAddress(void);
894
895 /*! @function getLength
896 * @abstract Accessor to the length of the mapping.
897 * @discussion This method returns the length of the mapping.
898 * @result A byte count. */
899
900 virtual IOByteCount getLength(void);
901
902 /*! @function getAddressTask
903 * @abstract Accessor to the task of the mapping.
904 * @discussion This method returns the mach task the mapping exists in.
905 * @result A mach task_t. */
906
907 virtual task_t getAddressTask();
908
909 /*! @function getMemoryDescriptor
910 * @abstract Accessor to the IOMemoryDescriptor the mapping was created from.
911 * @discussion This method returns the IOMemoryDescriptor the mapping was created from.
912 * @result An IOMemoryDescriptor reference, which is valid while the IOMemoryMap object is retained. It should not be released by the caller. */
913
914 virtual IOMemoryDescriptor * getMemoryDescriptor();
915
916 /*! @function getMapOptions
917 * @abstract Accessor to the options the mapping was created with.
918 * @discussion This method returns the options to IOMemoryDescriptor::map the mapping was created with.
919 * @result Options for the mapping, including cache settings. */
920
921 virtual IOOptionBits getMapOptions();
922
923 /*! @function unmap
924 * @abstract Force the IOMemoryMap to unmap, without destroying the object.
925 * @discussion IOMemoryMap instances will unmap themselves upon free, ie. when the last client with a reference calls release. This method forces the IOMemoryMap to destroy the mapping it represents, regardless of the number of clients. It is not generally used.
926 * @result An IOReturn code. */
927
928 virtual IOReturn unmap();
929
930 virtual void taskDied();
931
932 /*! @function redirect
933 * @abstract Replace the memory mapped in a process with new backing memory.
934 * @discussion An IOMemoryMap created with the kIOMapUnique option to IOMemoryDescriptor::map() can remapped to a new IOMemoryDescriptor backing object. If the new IOMemoryDescriptor is specified as NULL, client access to the memory map is blocked until a new backing object has been set. By blocking access and copying data, the caller can create atomic copies of the memory while the client is potentially reading or writing the memory.
935 * @param newBackingMemory The IOMemoryDescriptor that represents the physical memory that is to be now mapped in the virtual range the IOMemoryMap represents. If newBackingMemory is NULL, any access to the mapping will hang (in vm_fault()) until access has been restored by a new call to redirect() with non-NULL newBackingMemory argument.
936 * @param options Mapping options are defined in IOTypes.h, and are documented in IOMemoryDescriptor::map()
937 * @param offset As with IOMemoryDescriptor::map(), a beginning offset into the IOMemoryDescriptor's memory where the mapping starts. Zero is the default.
938 * @result An IOReturn code. */
939
940 #ifndef __LP64__
941 // For 32 bit XNU, there is a 32 bit (IOByteCount) and a 64 bit (mach_vm_size_t) interface;
942 // for 64 bit, these fall together on the 64 bit one.
943 virtual IOReturn redirect(IOMemoryDescriptor * newBackingMemory,
944 IOOptionBits options,
945 IOByteCount offset = 0);
946 #endif
947 virtual IOReturn redirect(IOMemoryDescriptor * newBackingMemory,
948 IOOptionBits options,
949 mach_vm_size_t offset = 0);
950
951 #ifdef __LP64__
952 /*! @function getAddress
953 * @abstract Accessor to the virtual address of the first byte in the mapping.
954 * @discussion This method returns the virtual address of the first byte in the mapping.
955 * @result A virtual address. */
956 inline mach_vm_address_t getAddress() __attribute__((always_inline));
957 /*! @function getSize
958 * @abstract Accessor to the length of the mapping.
959 * @discussion This method returns the length of the mapping.
960 * @result A byte count. */
961 inline mach_vm_size_t getSize() __attribute__((always_inline));
962 #else /* !__LP64__ */
963 /*! @function getAddress
964 * @abstract Accessor to the virtual address of the first byte in the mapping.
965 * @discussion This method returns the virtual address of the first byte in the mapping.
966 * @result A virtual address. */
967 virtual mach_vm_address_t getAddress();
968 /*! @function getSize
969 * @abstract Accessor to the length of the mapping.
970 * @discussion This method returns the length of the mapping.
971 * @result A byte count. */
972 virtual mach_vm_size_t getSize();
973 #endif /* !__LP64__ */
974
975 #ifdef XNU_KERNEL_PRIVATE
976 // for IOMemoryDescriptor use
977 IOMemoryMap * copyCompatible( IOMemoryMap * newMapping );
978
979 bool init(
980 task_t intoTask,
981 mach_vm_address_t toAddress,
982 IOOptionBits options,
983 mach_vm_size_t offset,
984 mach_vm_size_t length );
985
986 bool setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset);
987
988 IOReturn redirect(
989 task_t intoTask, bool redirect );
990
991 IOReturn userClientUnmap();
992 #endif /* XNU_KERNEL_PRIVATE */
993
994 IOReturn wireRange(
995 uint32_t options,
996 mach_vm_size_t offset,
997 mach_vm_size_t length);
998
999 OSMetaClassDeclareReservedUnused(IOMemoryMap, 0);
1000 OSMetaClassDeclareReservedUnused(IOMemoryMap, 1);
1001 OSMetaClassDeclareReservedUnused(IOMemoryMap, 2);
1002 OSMetaClassDeclareReservedUnused(IOMemoryMap, 3);
1003 OSMetaClassDeclareReservedUnused(IOMemoryMap, 4);
1004 OSMetaClassDeclareReservedUnused(IOMemoryMap, 5);
1005 OSMetaClassDeclareReservedUnused(IOMemoryMap, 6);
1006 OSMetaClassDeclareReservedUnused(IOMemoryMap, 7);
1007 };
1008
1009 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1010 #ifdef XNU_KERNEL_PRIVATE
1011 // Also these flags should not overlap with the options to
1012 // IOMemoryDescriptor::initWithRanges(... IOOptionsBits options);
1013 enum {
1014 _kIOMemorySourceSegment = 0x00002000
1015 };
1016 #endif /* XNU_KERNEL_PRIVATE */
1017
1018 // The following classes are private implementation of IOMemoryDescriptor - they
1019 // should not be referenced directly, just through the public API's in the
1020 // IOMemoryDescriptor class. For example, an IOGeneralMemoryDescriptor instance
1021 // might be created by IOMemoryDescriptor::withAddressRange(), but there should be
1022 // no need to reference as anything but a generic IOMemoryDescriptor *.
1023
1024 class IOGeneralMemoryDescriptor : public IOMemoryDescriptor
1025 {
1026 OSDeclareDefaultStructors(IOGeneralMemoryDescriptor);
1027
1028 public:
1029 union Ranges {
1030 IOVirtualRange *v;
1031 IOAddressRange *v64;
1032 IOPhysicalRange *p;
1033 void *uio;
1034 };
1035 protected:
1036 Ranges _ranges;
1037 unsigned _rangesCount; /* number of address ranges in list */
1038 #ifndef __LP64__
1039 bool _rangesIsAllocated;/* is list allocated by us? */
1040 #endif /* !__LP64__ */
1041
1042 task_t _task; /* task where all ranges are mapped to */
1043
1044 union {
1045 IOVirtualRange v;
1046 IOPhysicalRange p;
1047 } _singleRange; /* storage space for a single range */
1048
1049 unsigned _wireCount; /* number of outstanding wires */
1050
1051 #ifndef __LP64__
1052 uintptr_t _cachedVirtualAddress;
1053
1054 IOPhysicalAddress _cachedPhysicalAddress;
1055 #endif /* !__LP64__ */
1056
1057 bool _initialized; /* has superclass been initialized? */
1058
1059 public:
1060 virtual void free() APPLE_KEXT_OVERRIDE;
1061
1062 virtual IOReturn dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const APPLE_KEXT_OVERRIDE;
1063
1064 virtual uint64_t getPreparationID( void ) APPLE_KEXT_OVERRIDE;
1065
1066 #ifdef XNU_KERNEL_PRIVATE
1067 // Internal APIs may be made virtual at some time in the future.
1068 IOReturn wireVirtual(IODirection forDirection);
1069 IOReturn dmaMap(
1070 IOMapper * mapper,
1071 IOMemoryDescriptor * memory,
1072 IODMACommand * command,
1073 const IODMAMapSpecification * mapSpec,
1074 uint64_t offset,
1075 uint64_t length,
1076 uint64_t * mapAddress,
1077 uint64_t * mapLength);
1078 bool initMemoryEntries(size_t size, IOMapper * mapper);
1079
1080 IOMemoryReference * memoryReferenceAlloc(uint32_t capacity,
1081 IOMemoryReference * realloc);
1082 void memoryReferenceFree(IOMemoryReference * ref);
1083 void memoryReferenceRelease(IOMemoryReference * ref);
1084
1085 IOReturn memoryReferenceCreate(
1086 IOOptionBits options,
1087 IOMemoryReference ** reference);
1088
1089 IOReturn memoryReferenceMap(IOMemoryReference * ref,
1090 vm_map_t map,
1091 mach_vm_size_t inoffset,
1092 mach_vm_size_t size,
1093 IOOptionBits options,
1094 mach_vm_address_t * inaddr);
1095
1096 IOReturn memoryReferenceMapNew(IOMemoryReference * ref,
1097 vm_map_t map,
1098 mach_vm_size_t inoffset,
1099 mach_vm_size_t size,
1100 IOOptionBits options,
1101 mach_vm_address_t * inaddr);
1102
1103 static IOReturn memoryReferenceSetPurgeable(
1104 IOMemoryReference * ref,
1105 IOOptionBits newState,
1106 IOOptionBits * oldState);
1107 static IOReturn memoryReferenceSetOwnership(
1108 IOMemoryReference * ref,
1109 task_t newOwner,
1110 int newLedgerTag,
1111 IOOptionBits newLedgerOptions);
1112 static IOReturn memoryReferenceGetPageCounts(
1113 IOMemoryReference * ref,
1114 IOByteCount * residentPageCount,
1115 IOByteCount * dirtyPageCount);
1116
1117 static uint64_t memoryReferenceGetDMAMapLength(
1118 IOMemoryReference * ref,
1119 uint64_t * offset);
1120
1121 #endif
1122
1123 private:
1124
1125 #ifndef __LP64__
1126 virtual void setPosition(IOByteCount position);
1127 virtual void mapIntoKernel(unsigned rangeIndex);
1128 virtual void unmapFromKernel();
1129 #endif /* !__LP64__ */
1130
1131 // Internal
1132 OSPtr<OSData> _memoryEntries;
1133 unsigned int _pages;
1134 ppnum_t _highestPage;
1135 uint32_t __iomd_reservedA;
1136 uint32_t __iomd_reservedB;
1137
1138 IOLock * _prepareLock;
1139
1140 public:
1141 /*
1142 * IOMemoryDescriptor required methods
1143 */
1144
1145 // Master initaliser
1146 virtual bool initWithOptions(void * buffers,
1147 UInt32 count,
1148 UInt32 offset,
1149 task_t task,
1150 IOOptionBits options,
1151 IOMapper * mapper = kIOMapperSystem) APPLE_KEXT_OVERRIDE;
1152
1153 #ifndef __LP64__
1154 // Secondary initialisers
1155 virtual bool initWithAddress(void * address,
1156 IOByteCount withLength,
1157 IODirection withDirection) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1158
1159 virtual bool initWithAddress(IOVirtualAddress address,
1160 IOByteCount withLength,
1161 IODirection withDirection,
1162 task_t withTask) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1163
1164 virtual bool initWithPhysicalAddress(
1165 IOPhysicalAddress address,
1166 IOByteCount withLength,
1167 IODirection withDirection ) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1168
1169 virtual bool initWithRanges( IOVirtualRange * ranges,
1170 UInt32 withCount,
1171 IODirection withDirection,
1172 task_t withTask,
1173 bool asReference = false) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1174
1175 virtual bool initWithPhysicalRanges(IOPhysicalRange * ranges,
1176 UInt32 withCount,
1177 IODirection withDirection,
1178 bool asReference = false) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1179
1180 virtual addr64_t getPhysicalSegment64( IOByteCount offset,
1181 IOByteCount * length ) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1182
1183 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
1184 IOByteCount * length) APPLE_KEXT_OVERRIDE;
1185
1186 virtual IOPhysicalAddress getSourceSegment(IOByteCount offset,
1187 IOByteCount * length) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1188
1189 virtual void * getVirtualSegment(IOByteCount offset,
1190 IOByteCount * length) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1191 #endif /* !__LP64__ */
1192
1193 virtual IOReturn setPurgeable( IOOptionBits newState,
1194 IOOptionBits * oldState ) APPLE_KEXT_OVERRIDE;
1195
1196 IOReturn setOwnership( task_t newOwner,
1197 int newLedgerTag,
1198 IOOptionBits newLedgerOptions );
1199
1200 virtual addr64_t getPhysicalSegment( IOByteCount offset,
1201 IOByteCount * length,
1202 #ifdef __LP64__
1203 IOOptionBits options = 0 ) APPLE_KEXT_OVERRIDE;
1204 #else /* !__LP64__ */
1205 IOOptionBits options)APPLE_KEXT_OVERRIDE;
1206 #endif /* !__LP64__ */
1207
1208 virtual IOReturn prepare(IODirection forDirection = kIODirectionNone) APPLE_KEXT_OVERRIDE;
1209
1210 virtual IOReturn complete(IODirection forDirection = kIODirectionNone) APPLE_KEXT_OVERRIDE;
1211
1212 virtual IOReturn doMap(
1213 vm_map_t addressMap,
1214 IOVirtualAddress * atAddress,
1215 IOOptionBits options,
1216 IOByteCount sourceOffset = 0,
1217 IOByteCount length = 0 ) APPLE_KEXT_OVERRIDE;
1218
1219 virtual IOReturn doUnmap(
1220 vm_map_t addressMap,
1221 IOVirtualAddress logical,
1222 IOByteCount length ) APPLE_KEXT_OVERRIDE;
1223
1224 virtual bool serialize(OSSerialize *s) const APPLE_KEXT_OVERRIDE;
1225
1226 // Factory method for cloning a persistent IOMD, see IOMemoryDescriptor
1227 static OSPtr<IOMemoryDescriptor>
1228 withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD);
1229 };
1230
1231 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1232
1233 #ifdef __LP64__
1234 mach_vm_address_t
1235 IOMemoryMap::getAddress()
1236 {
1237 return getVirtualAddress();
1238 }
1239
1240 mach_vm_size_t
1241 IOMemoryMap::getSize()
1242 {
1243 return getLength();
1244 }
1245 #else /* !__LP64__ */
1246 #include <IOKit/IOSubMemoryDescriptor.h>
1247 #endif /* !__LP64__ */
1248
1249 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1250
1251 extern bool iokit_iomd_setownership_enabled;
1252
1253 #endif /* !_IOMEMORYDESCRIPTOR_H */