]> git.saurik.com Git - apple/xnu.git/blob - iokit/IOKit/IOMemoryDescriptor.h
xnu-6153.81.5.tar.gz
[apple/xnu.git] / iokit / IOKit / IOMemoryDescriptor.h
1 /*
2 * Copyright (c) 1998-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #ifndef _IOMEMORYDESCRIPTOR_H
29 #define _IOMEMORYDESCRIPTOR_H
30
31 #include <sys/cdefs.h>
32
33 #include <IOKit/IOTypes.h>
34 #include <IOKit/IOLocks.h>
35 #include <libkern/c++/OSContainers.h>
36 #include <DriverKit/IOMemoryDescriptor.h>
37 #include <DriverKit/IOMemoryMap.h>
38 #ifdef XNU_KERNEL_PRIVATE
39 #include <IOKit/IOKitDebug.h>
40 #endif
41
42 #include <mach/memory_object_types.h>
43
44 class IOMemoryMap;
45 class IOMapper;
46 class IOService;
47 class IODMACommand;
48
49 /*
50 * Direction of transfer, with respect to the described memory.
51 */
52 #ifdef __LP64__
53 enum
54 #else /* !__LP64__ */
55 enum IODirection
56 #endif /* !__LP64__ */
57 {
58 kIODirectionNone = 0x0,// same as VM_PROT_NONE
59 kIODirectionIn = 0x1,// User land 'read', same as VM_PROT_READ
60 kIODirectionOut = 0x2,// User land 'write', same as VM_PROT_WRITE
61 kIODirectionOutIn = kIODirectionOut | kIODirectionIn,
62 kIODirectionInOut = kIODirectionIn | kIODirectionOut,
63
64 // these flags are valid for the prepare() method only
65 kIODirectionPrepareToPhys32 = 0x00000004,
66 kIODirectionPrepareNoFault = 0x00000008,
67 kIODirectionPrepareReserved1 = 0x00000010,
68 #define IODIRECTIONPREPARENONCOHERENTDEFINED 1
69 kIODirectionPrepareNonCoherent = 0x00000020,
70
71 // these flags are valid for the complete() method only
72 #define IODIRECTIONCOMPLETEWITHERRORDEFINED 1
73 kIODirectionCompleteWithError = 0x00000040,
74 #define IODIRECTIONCOMPLETEWITHDATAVALIDDEFINED 1
75 kIODirectionCompleteWithDataValid = 0x00000080,
76 };
77
78 #ifdef __LP64__
79 typedef IOOptionBits IODirection;
80 #endif /* __LP64__ */
81
82 /*
83 * IOOptionBits used in the withOptions variant
84 */
85 enum {
86 kIOMemoryDirectionMask = 0x00000007,
87 #ifdef XNU_KERNEL_PRIVATE
88 kIOMemoryAutoPrepare = 0x00000008,// Shared with Buffer MD
89 #endif
90
91 kIOMemoryTypeVirtual = 0x00000010,
92 kIOMemoryTypePhysical = 0x00000020,
93 kIOMemoryTypeUPL = 0x00000030,
94 kIOMemoryTypePersistentMD = 0x00000040,// Persistent Memory Descriptor
95 kIOMemoryTypeUIO = 0x00000050,
96 #ifdef __LP64__
97 kIOMemoryTypeVirtual64 = kIOMemoryTypeVirtual,
98 kIOMemoryTypePhysical64 = kIOMemoryTypePhysical,
99 #else /* !__LP64__ */
100 kIOMemoryTypeVirtual64 = 0x00000060,
101 kIOMemoryTypePhysical64 = 0x00000070,
102 #endif /* !__LP64__ */
103 kIOMemoryTypeMask = 0x000000f0,
104
105 kIOMemoryAsReference = 0x00000100,
106 kIOMemoryBufferPageable = 0x00000400,
107 kIOMemoryMapperNone = 0x00000800,// Shared with Buffer MD
108 kIOMemoryHostOnly = 0x00001000,// Never DMA accessible
109 #ifdef XNU_KERNEL_PRIVATE
110 kIOMemoryRedirected = 0x00004000,
111 kIOMemoryPreparedReadOnly = 0x00008000,
112 #endif
113 kIOMemoryPersistent = 0x00010000,
114 kIOMemoryMapCopyOnWrite = 0x00020000,
115 kIOMemoryRemote = 0x00040000,
116 kIOMemoryThreadSafe = 0x00100000,// Shared with Buffer MD
117 kIOMemoryClearEncrypt = 0x00200000,// Shared with Buffer MD
118 kIOMemoryUseReserve = 0x00800000,// Shared with Buffer MD
119 #define IOMEMORYUSERESERVEDEFINED 1
120
121 #ifdef XNU_KERNEL_PRIVATE
122 kIOMemoryBufferPurgeable = 0x00400000,
123 kIOMemoryBufferCacheMask = 0x70000000,
124 kIOMemoryBufferCacheShift = 28,
125 #endif
126 };
127
128 #define kIOMapperSystem ((IOMapper *) NULL)
129
130 enum{
131 kIOMemoryLedgerTagDefault = VM_LEDGER_TAG_DEFAULT,
132 kIOmemoryLedgerTagNetwork = VM_LEDGER_TAG_NETWORK,
133 kIOMemoryLedgerTagMedia = VM_LEDGER_TAG_MEDIA,
134 kIOMemoryLedgerTagGraphics = VM_LEDGER_TAG_GRAPHICS,
135 kIOMemoryLedgerTagNeural = VM_LEDGER_TAG_NEURAL,
136 };
137 enum{
138 kIOMemoryLedgerFlagNoFootprint = VM_LEDGER_FLAG_NO_FOOTPRINT,
139 };
140
141 enum{
142 kIOMemoryPurgeableKeepCurrent = 1,
143
144 kIOMemoryPurgeableNonVolatile = 2,
145 kIOMemoryPurgeableVolatile = 3,
146 kIOMemoryPurgeableEmpty = 4,
147
148 // modifiers for kIOMemoryPurgeableVolatile behavior
149 kIOMemoryPurgeableVolatileGroup0 = VM_VOLATILE_GROUP_0,
150 kIOMemoryPurgeableVolatileGroup1 = VM_VOLATILE_GROUP_1,
151 kIOMemoryPurgeableVolatileGroup2 = VM_VOLATILE_GROUP_2,
152 kIOMemoryPurgeableVolatileGroup3 = VM_VOLATILE_GROUP_3,
153 kIOMemoryPurgeableVolatileGroup4 = VM_VOLATILE_GROUP_4,
154 kIOMemoryPurgeableVolatileGroup5 = VM_VOLATILE_GROUP_5,
155 kIOMemoryPurgeableVolatileGroup6 = VM_VOLATILE_GROUP_6,
156 kIOMemoryPurgeableVolatileGroup7 = VM_VOLATILE_GROUP_7,
157 kIOMemoryPurgeableVolatileBehaviorFifo = VM_PURGABLE_BEHAVIOR_FIFO,
158 kIOMemoryPurgeableVolatileBehaviorLifo = VM_PURGABLE_BEHAVIOR_LIFO,
159 kIOMemoryPurgeableVolatileOrderingObsolete = VM_PURGABLE_ORDERING_OBSOLETE,
160 kIOMemoryPurgeableVolatileOrderingNormal = VM_PURGABLE_ORDERING_NORMAL,
161 kIOMemoryPurgeableFaultOnAccess = VM_PURGABLE_DEBUG_FAULT,
162 };
163 enum{
164 kIOMemoryIncoherentIOFlush = 1,
165 kIOMemoryIncoherentIOStore = 2,
166
167 kIOMemoryClearEncrypted = 50,
168 kIOMemorySetEncrypted = 51,
169 };
170
171 #define IOMEMORYDESCRIPTOR_SUPPORTS_DMACOMMAND 1
172
173 struct IODMAMapSpecification {
174 uint64_t alignment;
175 IOService * device;
176 uint32_t options;
177 uint8_t numAddressBits;
178 uint8_t resvA[3];
179 uint32_t resvB[4];
180 };
181
182 struct IODMAMapPageList {
183 uint32_t pageOffset;
184 uint32_t pageListCount;
185 const upl_page_info_t * pageList;
186 };
187
188 // mapOptions for iovmMapMemory
189 enum{
190 kIODMAMapReadAccess = 0x00000001,
191 kIODMAMapWriteAccess = 0x00000002,
192 kIODMAMapPhysicallyContiguous = 0x00000010,
193 kIODMAMapDeviceMemory = 0x00000020,
194 kIODMAMapPagingPath = 0x00000040,
195 kIODMAMapIdentityMap = 0x00000080,
196
197 kIODMAMapPageListFullyOccupied = 0x00000100,
198 kIODMAMapFixedAddress = 0x00000200,
199 };
200
201 #ifdef KERNEL_PRIVATE
202
203 // Used for dmaCommandOperation communications for IODMACommand and mappers
204
205 enum {
206 kIOMDWalkSegments = 0x01000000,
207 kIOMDFirstSegment = 1 | kIOMDWalkSegments,
208 kIOMDGetCharacteristics = 0x02000000,
209 kIOMDGetCharacteristicsMapped = 1 | kIOMDGetCharacteristics,
210 kIOMDDMAActive = 0x03000000,
211 kIOMDSetDMAActive = 1 | kIOMDDMAActive,
212 kIOMDSetDMAInactive = kIOMDDMAActive,
213 kIOMDAddDMAMapSpec = 0x04000000,
214 kIOMDDMAMap = 0x05000000,
215 kIOMDDMAUnmap = 0x06000000,
216 kIOMDDMACommandOperationMask = 0xFF000000,
217 };
218 struct IOMDDMACharacteristics {
219 UInt64 fLength;
220 UInt32 fSGCount;
221 UInt32 fPages;
222 UInt32 fPageAlign;
223 ppnum_t fHighestPage;
224 IODirection fDirection;
225 UInt8 fIsPrepared;
226 };
227
228 struct IOMDDMAMapArgs {
229 IOMapper * fMapper;
230 IODMACommand * fCommand;
231 IODMAMapSpecification fMapSpec;
232 uint64_t fOffset;
233 uint64_t fLength;
234 uint64_t fAlloc;
235 uint64_t fAllocLength;
236 uint8_t fMapContig;
237 };
238
239 struct IOMDDMAWalkSegmentArgs {
240 UInt64 fOffset; // Input/Output offset
241 UInt64 fIOVMAddr, fLength; // Output variables
242 UInt8 fMapped; // Input Variable, Require mapped IOVMA
243 UInt64 fMappedBase; // Input base of mapping
244 };
245 typedef UInt8 IOMDDMAWalkSegmentState[128];
246 // fMapped:
247 enum{
248 kIOMDDMAWalkMappedLocal = 2
249 };
250
251 #endif /* KERNEL_PRIVATE */
252
253 enum{
254 kIOPreparationIDUnprepared = 0,
255 kIOPreparationIDUnsupported = 1,
256 kIOPreparationIDAlwaysPrepared = 2,
257 };
258
259 #ifdef XNU_KERNEL_PRIVATE
260 struct IOMemoryReference;
261 #endif
262
263
264 /*! @class IOMemoryDescriptor : public OSObject
265 * @abstract An abstract base class defining common methods for describing physical or virtual memory.
266 * @discussion The IOMemoryDescriptor object represents a buffer or range of memory, specified as one or more physical or virtual address ranges. It contains methods to return the memory's physically contiguous segments (fragments), for use with the IOMemoryCursor, and methods to map the memory into any address space with caching and placed mapping options. */
267
268 class IOMemoryDescriptor : public OSObject
269 {
270 friend class IOMemoryMap;
271 friend class IOMultiMemoryDescriptor;
272
273 OSDeclareDefaultStructorsWithDispatch(IOMemoryDescriptor);
274
275 protected:
276
277 /*! @var reserved
278 * Reserved for future use. (Internal use only) */
279 struct IOMemoryDescriptorReserved * reserved;
280
281 protected:
282 OSSet * _mappings;
283 IOOptionBits _flags;
284
285
286 #ifdef XNU_KERNEL_PRIVATE
287 public:
288 struct IOMemoryReference * _memRef;
289 vm_tag_t _kernelTag;
290 vm_tag_t _userTag;
291 int16_t _dmaReferences;
292 uint16_t _internalFlags;
293 kern_allocation_name_t _mapName;
294 protected:
295 #else /* XNU_KERNEL_PRIVATE */
296 void * __iomd_reserved5;
297 uint16_t __iomd_reserved1[4];
298 uintptr_t __iomd_reserved2;
299 #endif /* XNU_KERNEL_PRIVATE */
300
301 uintptr_t __iomd_reserved3;
302 uintptr_t __iomd_reserved4;
303
304 #ifndef __LP64__
305 IODirection _direction; /* use _flags instead */
306 #endif /* !__LP64__ */
307 IOByteCount _length; /* length of all ranges */
308 IOOptionBits _tag;
309
310 public:
311 typedef IOOptionBits DMACommandOps;
312 #ifndef __LP64__
313 virtual IOPhysicalAddress getSourceSegment( IOByteCount offset,
314 IOByteCount * length ) APPLE_KEXT_DEPRECATED;
315 #endif /* !__LP64__ */
316
317 /*! @function initWithOptions
318 * @abstract Master initialiser for all variants of memory descriptors. For a more complete description see IOMemoryDescriptor::withOptions.
319 * @discussion Note this function can be used to re-init a previously created memory descriptor.
320 * @result true on success, false on failure. */
321 virtual bool initWithOptions(void * buffers,
322 UInt32 count,
323 UInt32 offset,
324 task_t task,
325 IOOptionBits options,
326 IOMapper * mapper = kIOMapperSystem);
327
328 #ifndef __LP64__
329 virtual addr64_t getPhysicalSegment64( IOByteCount offset,
330 IOByteCount * length ) APPLE_KEXT_DEPRECATED; /* use getPhysicalSegment() and kIOMemoryMapperNone instead */
331 #endif /* !__LP64__ */
332
333 /*! @function setPurgeable
334 * @abstract Control the purgeable status of a memory descriptors memory.
335 * @discussion Buffers may be allocated with the ability to have their purgeable status changed - IOBufferMemoryDescriptor with the kIOMemoryPurgeable option, VM_FLAGS_PURGEABLE may be passed to vm_allocate() in user space to allocate such buffers. The purgeable status of such a buffer may be controlled with setPurgeable(). The process of making a purgeable memory descriptor non-volatile and determining its previous state is atomic - if a purgeable memory descriptor is made nonvolatile and the old state is returned as kIOMemoryPurgeableVolatile, then the memory's previous contents are completely intact and will remain so until the memory is made volatile again. If the old state is returned as kIOMemoryPurgeableEmpty then the memory was reclaimed while it was in a volatile state and its previous contents have been lost.
336 * @param newState - the desired new purgeable state of the memory:<br>
337 * kIOMemoryPurgeableKeepCurrent - make no changes to the memory's purgeable state.<br>
338 * kIOMemoryPurgeableVolatile - make the memory volatile - the memory may be reclaimed by the VM system without saving its contents to backing store.<br>
339 * kIOMemoryPurgeableNonVolatile - make the memory nonvolatile - the memory is treated as with usual allocations and must be saved to backing store if paged.<br>
340 * kIOMemoryPurgeableEmpty - make the memory volatile, and discard any pages allocated to it.
341 * @param oldState - if non-NULL, the previous purgeable state of the memory is returned here:<br>
342 * kIOMemoryPurgeableNonVolatile - the memory was nonvolatile.<br>
343 * kIOMemoryPurgeableVolatile - the memory was volatile but its content has not been discarded by the VM system.<br>
344 * kIOMemoryPurgeableEmpty - the memory was volatile and has been discarded by the VM system.<br>
345 * @result An IOReturn code. */
346
347 virtual IOReturn setPurgeable( IOOptionBits newState,
348 IOOptionBits * oldState );
349
350 /*! @function setOwnership
351 * @abstract Control the ownership of a memory descriptors memory.
352 * @discussion IOBufferMemoryDescriptor are owned by a specific task. The ownership of such a buffer may be controlled with setOwnership().
353 * @param newOwner - the task to be the new owner of the memory.
354 * @param newLedgerTag - the ledger this memory should be accounted in.
355 * @param newLedgerOptions - accounting options
356 * @result An IOReturn code. */
357
358 IOReturn setOwnership( task_t newOwner,
359 int newLedgerTag,
360 IOOptionBits newLedgerOptions );
361
362 /*! @function getPageCounts
363 * @abstract Retrieve the number of resident and/or dirty pages encompassed by an IOMemoryDescriptor.
364 * @discussion This method returns the number of resident and/or dirty pages encompassed by an IOMemoryDescriptor.
365 * @param residentPageCount - If non-null, a pointer to a byte count that will return the number of resident pages encompassed by this IOMemoryDescriptor.
366 * @param dirtyPageCount - If non-null, a pointer to a byte count that will return the number of dirty pages encompassed by this IOMemoryDescriptor.
367 * @result An IOReturn code. */
368
369 IOReturn getPageCounts( IOByteCount * residentPageCount,
370 IOByteCount * dirtyPageCount);
371
372 /*! @function performOperation
373 * @abstract Perform an operation on the memory descriptor's memory.
374 * @discussion This method performs some operation on a range of the memory descriptor's memory. When a memory descriptor's memory is not mapped, it should be more efficient to use this method than mapping the memory to perform the operation virtually.
375 * @param options The operation to perform on the memory:<br>
376 * kIOMemoryIncoherentIOFlush - pass this option to store to memory and flush any data in the processor cache for the memory range, with synchronization to ensure the data has passed through all levels of processor cache. It may not be supported on all architectures. This type of flush may be used for non-coherent I/O such as AGP - it is NOT required for PCI coherent operations. The memory descriptor must have been previously prepared.<br>
377 * kIOMemoryIncoherentIOStore - pass this option to store to memory any data in the processor cache for the memory range, with synchronization to ensure the data has passed through all levels of processor cache. It may not be supported on all architectures. This type of flush may be used for non-coherent I/O such as AGP - it is NOT required for PCI coherent operations. The memory descriptor must have been previously prepared.
378 * @param offset A byte offset into the memory descriptor's memory.
379 * @param length The length of the data range.
380 * @result An IOReturn code. */
381
382 virtual IOReturn performOperation( IOOptionBits options,
383 IOByteCount offset, IOByteCount length );
384
385 // Used for dedicated communications for IODMACommand
386 virtual IOReturn dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const;
387
388 /*! @function getPhysicalSegment
389 * @abstract Break a memory descriptor into its physically contiguous segments.
390 * @discussion This method returns the physical address of the byte at the given offset into the memory, and optionally the length of the physically contiguous segment from that offset.
391 * @param offset A byte offset into the memory whose physical address to return.
392 * @param length If non-zero, getPhysicalSegment will store here the length of the physically contiguous segement at the given offset.
393 * @result A physical address, or zero if the offset is beyond the length of the memory. */
394
395 #ifdef __LP64__
396 virtual addr64_t getPhysicalSegment( IOByteCount offset,
397 IOByteCount * length,
398 IOOptionBits options = 0 ) = 0;
399 #else /* !__LP64__ */
400 virtual addr64_t getPhysicalSegment( IOByteCount offset,
401 IOByteCount * length,
402 IOOptionBits options );
403 #endif /* !__LP64__ */
404
405 virtual uint64_t getPreparationID( void );
406 void setPreparationID( void );
407
408 void setVMTags(uint32_t kernelTag, uint32_t userTag);
409 uint32_t getVMTag(vm_map_t map);
410
411 #ifdef XNU_KERNEL_PRIVATE
412 IOMemoryDescriptorReserved * getKernelReserved( void );
413 void cleanKernelReserved(IOMemoryDescriptorReserved * reserved);
414 IOReturn dmaMap(
415 IOMapper * mapper,
416 IODMACommand * command,
417 const IODMAMapSpecification * mapSpec,
418 uint64_t offset,
419 uint64_t length,
420 uint64_t * mapAddress,
421 uint64_t * mapLength);
422 IOReturn dmaUnmap(
423 IOMapper * mapper,
424 IODMACommand * command,
425 uint64_t offset,
426 uint64_t mapAddress,
427 uint64_t mapLength);
428 void dmaMapRecord(
429 IOMapper * mapper,
430 IODMACommand * command,
431 uint64_t mapLength);
432 #endif
433
434 private:
435 OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 0);
436 #ifdef __LP64__
437 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 1);
438 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 2);
439 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 3);
440 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 4);
441 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 5);
442 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 6);
443 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 7);
444 #else /* !__LP64__ */
445 OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 1);
446 OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 2);
447 OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 3);
448 OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 4);
449 OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 5);
450 OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 6);
451 OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 7);
452 #endif /* !__LP64__ */
453 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 8);
454 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 9);
455 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 10);
456 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 11);
457 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 12);
458 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 13);
459 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 14);
460 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 15);
461
462 protected:
463 virtual void free(void) APPLE_KEXT_OVERRIDE;
464 public:
465 static void initialize( void );
466
467 public:
468 /*! @function withAddress
469 * @abstract Create an IOMemoryDescriptor to describe one virtual range of the kernel task.
470 * @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a single virtual memory range mapped into the kernel map. This memory descriptor needs to be prepared before it can be used to extract data from the memory described.
471 * @param address The virtual address of the first byte in the memory.
472 * @param withLength The length of memory.
473 * @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
474 * @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
475
476 static IOMemoryDescriptor * withAddress(void * address,
477 IOByteCount withLength,
478 IODirection withDirection);
479
480 #ifndef __LP64__
481 static IOMemoryDescriptor * withAddress(IOVirtualAddress address,
482 IOByteCount withLength,
483 IODirection withDirection,
484 task_t withTask) APPLE_KEXT_DEPRECATED; /* use withAddressRange() and prepare() instead */
485 #endif /* !__LP64__ */
486
487 /*! @function withPhysicalAddress
488 * @abstract Create an IOMemoryDescriptor to describe one physical range.
489 * @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a single physical memory range.
490 * @param address The physical address of the first byte in the memory.
491 * @param withLength The length of memory.
492 * @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
493 * @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
494
495 static IOMemoryDescriptor * withPhysicalAddress(
496 IOPhysicalAddress address,
497 IOByteCount withLength,
498 IODirection withDirection );
499
500 #ifndef __LP64__
501 static IOMemoryDescriptor * withRanges(IOVirtualRange * ranges,
502 UInt32 withCount,
503 IODirection withDirection,
504 task_t withTask,
505 bool asReference = false) APPLE_KEXT_DEPRECATED; /* use withAddressRanges() instead */
506 #endif /* !__LP64__ */
507
508 /*! @function withAddressRange
509 * @abstract Create an IOMemoryDescriptor to describe one virtual range of the specified map.
510 * @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a single virtual memory range mapped into the specified map. This memory descriptor needs to be prepared before it can be used to extract data from the memory described.
511 * @param address The virtual address of the first byte in the memory.
512 * @param length The length of memory.
513 * @param options
514 * kIOMemoryDirectionMask (options:direction) This nibble indicates the I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
515 * @param task The task the virtual ranges are mapped into. Note that unlike IOMemoryDescriptor::withAddress(), kernel_task memory must be explicitly prepared when passed to this api. The task argument may be NULL to specify memory by physical address.
516 * @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
517
518 static IOMemoryDescriptor * withAddressRange(
519 mach_vm_address_t address,
520 mach_vm_size_t length,
521 IOOptionBits options,
522 task_t task);
523
524 /*! @function withAddressRanges
525 * @abstract Create an IOMemoryDescriptor to describe one or more virtual ranges.
526 * @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of an array of virtual memory ranges each mapped into a specified source task. This memory descriptor needs to be prepared before it can be used to extract data from the memory described.
527 * @param ranges An array of IOAddressRange structures which specify the virtual ranges in the specified map which make up the memory to be described. IOAddressRange is the 64bit version of IOVirtualRange.
528 * @param rangeCount The member count of the ranges array.
529 * @param options
530 * kIOMemoryDirectionMask (options:direction) This nibble indicates the I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
531 * kIOMemoryAsReference For options:type = Virtual or Physical this indicate that the memory descriptor need not copy the ranges array into local memory. This is an optimisation to try to minimise unnecessary allocations.
532 * @param task The task each of the virtual ranges are mapped into. Note that unlike IOMemoryDescriptor::withAddress(), kernel_task memory must be explicitly prepared when passed to this api. The task argument may be NULL to specify memory by physical address.
533 * @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
534
535 static IOMemoryDescriptor * withAddressRanges(
536 IOAddressRange * ranges,
537 UInt32 rangeCount,
538 IOOptionBits options,
539 task_t task);
540
541 /*! @function withOptions
542 * @abstract Master initialiser for all variants of memory descriptors.
543 * @discussion This method creates and initializes an IOMemoryDescriptor for memory it has three main variants: Virtual, Physical & mach UPL. These variants are selected with the options parameter, see below. This memory descriptor needs to be prepared before it can be used to extract data from the memory described.
544 *
545 *
546 * @param buffers A pointer to an array of IOAddressRange when options:type is kIOMemoryTypeVirtual64 or kIOMemoryTypePhysical64 or a 64bit kernel. For type UPL it is a upl_t returned by the mach/memory_object_types.h apis, primarily used internally by the UBC. IOVirtualRanges or IOPhysicalRanges are 32 bit only types for use when options:type is kIOMemoryTypeVirtual or kIOMemoryTypePhysical on 32bit kernels.
547 *
548 * @param count options:type = Virtual or Physical count contains a count of the number of entires in the buffers array. For options:type = UPL this field contains a total length.
549 *
550 * @param offset Only used when options:type = UPL, in which case this field contains an offset for the memory within the buffers upl.
551 *
552 * @param task Only used options:type = Virtual, The task each of the virtual ranges are mapped into.
553 *
554 * @param options
555 * kIOMemoryDirectionMask (options:direction) This nibble indicates the I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
556 * kIOMemoryTypeMask (options:type) kIOMemoryTypeVirtual64, kIOMemoryTypeVirtual, kIOMemoryTypePhysical64, kIOMemoryTypePhysical, kIOMemoryTypeUPL Indicates that what type of memory basic memory descriptor to use. This sub-field also controls the interpretation of the buffers, count, offset & task parameters.
557 * kIOMemoryAsReference For options:type = Virtual or Physical this indicate that the memory descriptor need not copy the ranges array into local memory. This is an optimisation to try to minimise unnecessary allocations.
558 * kIOMemoryBufferPageable Only used by the IOBufferMemoryDescriptor as an indication that the kernel virtual memory is in fact pageable and we need to use the kernel pageable submap rather than the default map.
559 *
560 * @param mapper Which IOMapper should be used to map the in-memory physical addresses into I/O space addresses. Defaults to 0 which indicates that the system mapper is to be used, if present.
561 *
562 * @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
563
564 static IOMemoryDescriptor *withOptions(void * buffers,
565 UInt32 count,
566 UInt32 offset,
567 task_t task,
568 IOOptionBits options,
569 IOMapper * mapper = kIOMapperSystem);
570
571 #ifndef __LP64__
572 static IOMemoryDescriptor * withPhysicalRanges(
573 IOPhysicalRange * ranges,
574 UInt32 withCount,
575 IODirection withDirection,
576 bool asReference = false) APPLE_KEXT_DEPRECATED; /* use withOptions() and kIOMemoryTypePhysical instead */
577 #endif /* !__LP64__ */
578
579 #ifndef __LP64__
580 static IOMemoryDescriptor * withSubRange(IOMemoryDescriptor *of,
581 IOByteCount offset,
582 IOByteCount length,
583 IODirection withDirection) APPLE_KEXT_DEPRECATED; /* use IOSubMemoryDescriptor::withSubRange() and kIOMemoryThreadSafe instead */
584 #endif /* !__LP64__ */
585
586 /*! @function withPersistentMemoryDescriptor
587 * @abstract Copy constructor that generates a new memory descriptor if the backing memory for the same task's virtual address and length has changed.
588 * @discussion If the original memory descriptor's address and length is still backed by the same real memory, i.e. the user hasn't deallocated and the reallocated memory at the same address then the original memory descriptor is returned with a additional reference. Otherwise we build a totally new memory descriptor with the same characteristics as the previous one but with a new view of the vm. Note not legal to call this function with anything except an IOGeneralMemoryDescriptor that was created with the kIOMemoryPersistent option.
589 * @param originalMD The memory descriptor to be duplicated.
590 * @result Either the original memory descriptor with an additional retain or a new memory descriptor, 0 for a bad original memory descriptor or some other resource shortage. */
591 static IOMemoryDescriptor *
592 withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD);
593
594 #ifndef __LP64__
595 // obsolete initializers
596 // - initWithOptions is the designated initializer
597 virtual bool initWithAddress(void * address,
598 IOByteCount withLength,
599 IODirection withDirection) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */
600 virtual bool initWithAddress(IOVirtualAddress address,
601 IOByteCount withLength,
602 IODirection withDirection,
603 task_t withTask) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */
604 virtual bool initWithPhysicalAddress(
605 IOPhysicalAddress address,
606 IOByteCount withLength,
607 IODirection withDirection ) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */
608 virtual bool initWithRanges(IOVirtualRange * ranges,
609 UInt32 withCount,
610 IODirection withDirection,
611 task_t withTask,
612 bool asReference = false) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */
613 virtual bool initWithPhysicalRanges(IOPhysicalRange * ranges,
614 UInt32 withCount,
615 IODirection withDirection,
616 bool asReference = false) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */
617 #endif /* __LP64__ */
618
619 /*! @function getDirection
620 * @abstract Accessor to get the direction the memory descriptor was created with.
621 * @discussion This method returns the direction the memory descriptor was created with.
622 * @result The direction. */
623
624 virtual IODirection getDirection() const;
625
626 /*! @function getLength
627 * @abstract Accessor to get the length of the memory descriptor (over all its ranges).
628 * @discussion This method returns the total length of the memory described by the descriptor, ie. the sum of its ranges' lengths.
629 * @result The byte count. */
630
631 virtual IOByteCount getLength() const;
632
633 /*! @function setTag
634 * @abstract Set the tag for the memory descriptor.
635 * @discussion This method sets the tag for the memory descriptor. Tag bits are not interpreted by IOMemoryDescriptor.
636 * @param tag The tag. */
637
638 virtual void setTag( IOOptionBits tag );
639
640 /*! @function getTag
641 * @abstract Accessor to the retrieve the tag for the memory descriptor.
642 * @discussion This method returns the tag for the memory descriptor. Tag bits are not interpreted by IOMemoryDescriptor.
643 * @result The tag. */
644
645 virtual IOOptionBits getTag( void );
646
647 /*! @function getFlags
648 * @abstract Accessor to the retrieve the options the memory descriptor was created with.
649 * @discussion Accessor to the retrieve the options the memory descriptor was created with, and flags with its state. These bits are defined by the kIOMemory* enum.
650 * @result The flags bitfield. */
651
652 uint64_t getFlags(void);
653
654 /*! @function readBytes
655 * @abstract Copy data from the memory descriptor's buffer to the specified buffer.
656 * @discussion This method copies data from the memory descriptor's memory at the given offset, to the caller's buffer. The memory descriptor MUST have the kIODirectionOut direcction bit set and be prepared. kIODirectionOut means that this memory descriptor will be output to an external device, so readBytes is used to get memory into a local buffer for a PIO transfer to the device.
657 * @param offset A byte offset into the memory descriptor's memory.
658 * @param bytes The caller supplied buffer to copy the data to.
659 * @param withLength The length of the data to copy.
660 * @result The number of bytes copied, zero will be returned if the specified offset is beyond the length of the descriptor. Development/debug kernel builds will assert if the offset is beyond the length of the descriptor. */
661
662 virtual IOByteCount readBytes(IOByteCount offset,
663 void * bytes, IOByteCount withLength);
664
665 /*! @function writeBytes
666 * @abstract Copy data to the memory descriptor's buffer from the specified buffer.
667 * @discussion This method copies data to the memory descriptor's memory at the given offset, from the caller's buffer. The memory descriptor MUST have the kIODirectionIn direcction bit set and be prepared. kIODirectionIn means that this memory descriptor will be input from an external device, so writeBytes is used to write memory into the descriptor for PIO drivers.
668 * @param offset A byte offset into the memory descriptor's memory.
669 * @param bytes The caller supplied buffer to copy the data from.
670 * @param withLength The length of the data to copy.
671 * @result The number of bytes copied, zero will be returned if the specified offset is beyond the length of the descriptor. Development/debug kernel builds will assert if the offset is beyond the length of the descriptor. */
672
673 virtual IOByteCount writeBytes(IOByteCount offset,
674 const void * bytes, IOByteCount withLength);
675
676 #ifndef __LP64__
677 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
678 IOByteCount * length);
679 #endif /* !__LP64__ */
680
681 /*! @function getPhysicalAddress
682 * @abstract Return the physical address of the first byte in the memory.
683 * @discussion This method returns the physical address of the first byte in the memory. It is most useful on memory known to be physically contiguous.
684 * @result A physical address. */
685
686 IOPhysicalAddress getPhysicalAddress();
687
688 #ifndef __LP64__
689 virtual void * getVirtualSegment(IOByteCount offset,
690 IOByteCount * length) APPLE_KEXT_DEPRECATED; /* use map() and getVirtualAddress() instead */
691 #endif /* !__LP64__ */
692
693 /*! @function prepare
694 * @abstract Prepare the memory for an I/O transfer.
695 * @discussion This involves paging in the memory, if necessary, and wiring it down for the duration of the transfer. The complete() method completes the processing of the memory after the I/O transfer finishes. Note that the prepare call is not thread safe and it is expected that the client will more easily be able to guarantee single threading a particular memory descriptor.
696 * @param forDirection The direction of the I/O just completed, or kIODirectionNone for the direction specified by the memory descriptor.
697 * @result An IOReturn code. */
698
699 virtual IOReturn prepare(IODirection forDirection = kIODirectionNone) = 0;
700
701 /*! @function complete
702 * @abstract Complete processing of the memory after an I/O transfer finishes.
703 * @discussion This method should not be called unless a prepare was previously issued; the prepare() and complete() must occur in pairs, before and after an I/O transfer involving pageable memory. In 10.3 or greater systems the direction argument to complete is not longer respected. The direction is totally determined at prepare() time.
704 * @param forDirection DEPRECATED The direction of the I/O just completed, or kIODirectionNone for the direction specified by the memory descriptor.
705 * @result An IOReturn code. */
706
707 virtual IOReturn complete(IODirection forDirection = kIODirectionNone) = 0;
708
709 /*
710 * Mapping functions.
711 */
712
713 /*! @function createMappingInTask
714 * @abstract Maps a IOMemoryDescriptor into a task.
715 * @discussion This is the general purpose method to map all or part of the memory described by a memory descriptor into a task at any available address, or at a fixed address if possible. Caching & read-only options may be set for the mapping. The mapping is represented as a returned reference to a IOMemoryMap object, which may be shared if the mapping is compatible with an existing mapping of the IOMemoryDescriptor. The IOMemoryMap object returned should be released only when the caller has finished accessing the mapping, as freeing the object destroys the mapping.
716 * @param intoTask Sets the target task for the mapping. Pass kernel_task for the kernel address space.
717 * @param atAddress If a placed mapping is requested, atAddress specifies its address, and the kIOMapAnywhere should not be set. Otherwise, atAddress is ignored.
718 * @param options Mapping options are defined in IOTypes.h,<br>
719 * kIOMapAnywhere should be passed if the mapping can be created anywhere. If not set, the atAddress parameter sets the location of the mapping, if it is available in the target map.<br>
720 * kIOMapDefaultCache to inhibit the cache in I/O areas, kIOMapCopybackCache in general purpose RAM.<br>
721 * kIOMapInhibitCache, kIOMapWriteThruCache, kIOMapCopybackCache to set the appropriate caching.<br>
722 * kIOMapReadOnly to allow only read only accesses to the memory - writes will cause and access fault.<br>
723 * kIOMapReference will only succeed if the mapping already exists, and the IOMemoryMap object is just an extra reference, ie. no new mapping will be created.<br>
724 * kIOMapUnique allows a special kind of mapping to be created that may be used with the IOMemoryMap::redirect() API. These mappings will not be shared as is the default - there will always be a unique mapping created for the caller, not an existing mapping with an extra reference.<br>
725 * kIOMapPrefault will try to prefault the pages corresponding to the mapping. This must not be done on the kernel task, and the memory must have been wired via prepare(). Otherwise, the function will fail.<br>
726 * @param offset Is a beginning offset into the IOMemoryDescriptor's memory where the mapping starts. Zero is the default to map all the memory.
727 * @param length Is the length of the mapping requested for a subset of the IOMemoryDescriptor. Zero is the default to map all the memory.
728 * @result A reference to an IOMemoryMap object representing the mapping, which can supply the virtual address of the mapping and other information. The mapping may be shared with multiple callers - multiple maps are avoided if a compatible one exists. The IOMemoryMap object returned should be released only when the caller has finished accessing the mapping, as freeing the object destroys the mapping. The IOMemoryMap instance also retains the IOMemoryDescriptor it maps while it exists. */
729
730 IOMemoryMap * createMappingInTask(
731 task_t intoTask,
732 mach_vm_address_t atAddress,
733 IOOptionBits options,
734 mach_vm_size_t offset = 0,
735 mach_vm_size_t length = 0 );
736
737 #ifndef __LP64__
738 virtual IOMemoryMap * map(
739 task_t intoTask,
740 IOVirtualAddress atAddress,
741 IOOptionBits options,
742 IOByteCount offset = 0,
743 IOByteCount length = 0 ) APPLE_KEXT_DEPRECATED;/* use createMappingInTask() instead */
744 #endif /* !__LP64__ */
745
746 /*! @function map
747 * @abstract Maps a IOMemoryDescriptor into the kernel map.
748 * @discussion This is a shortcut method to map all the memory described by a memory descriptor into the kernel map at any available address. See the full version of the createMappingInTask method for further details.
749 * @param options Mapping options as in the full version of the createMappingInTask method, with kIOMapAnywhere assumed.
750 * @result See the full version of the createMappingInTask method. */
751
752 virtual IOMemoryMap * map(
753 IOOptionBits options = 0 );
754
755 /*! @function setMapping
756 * @abstract Establishes an already existing mapping.
757 * @discussion This method tells the IOMemoryDescriptor about a mapping that exists, but was created elsewhere. It allows later callers of the map method to share this externally created mapping. The IOMemoryMap object returned is created to represent it. This method is not commonly needed.
758 * @param task Address space in which the mapping exists.
759 * @param mapAddress Virtual address of the mapping.
760 * @param options Caching and read-only attributes of the mapping.
761 * @result A IOMemoryMap object created to represent the mapping. */
762
763 virtual IOMemoryMap * setMapping(
764 task_t task,
765 IOVirtualAddress mapAddress,
766 IOOptionBits options = 0 );
767
768 // Following methods are private implementation
769
770 #ifdef __LP64__
771 virtual
772 #endif /* __LP64__ */
773 IOReturn redirect( task_t safeTask, bool redirect );
774
775 IOReturn handleFault(
776 void * _pager,
777 mach_vm_size_t sourceOffset,
778 mach_vm_size_t length);
779
780 IOReturn populateDevicePager(
781 void * pager,
782 vm_map_t addressMap,
783 mach_vm_address_t address,
784 mach_vm_size_t sourceOffset,
785 mach_vm_size_t length,
786 IOOptionBits options );
787
788 virtual IOMemoryMap * makeMapping(
789 IOMemoryDescriptor * owner,
790 task_t intoTask,
791 IOVirtualAddress atAddress,
792 IOOptionBits options,
793 IOByteCount offset,
794 IOByteCount length );
795
796 protected:
797 virtual void addMapping(
798 IOMemoryMap * mapping );
799
800 virtual void removeMapping(
801 IOMemoryMap * mapping );
802
803 virtual IOReturn doMap(
804 vm_map_t addressMap,
805 IOVirtualAddress * atAddress,
806 IOOptionBits options,
807 IOByteCount sourceOffset = 0,
808 IOByteCount length = 0 );
809
810 virtual IOReturn doUnmap(
811 vm_map_t addressMap,
812 IOVirtualAddress logical,
813 IOByteCount length );
814 };
815
816 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
817
818 /*! @class IOMemoryMap : public OSObject
819 * @abstract A class defining common methods for describing a memory mapping.
820 * @discussion The IOMemoryMap object represents a mapped range of memory, described by a IOMemoryDescriptor. The mapping may be in the kernel or a non-kernel task and has processor cache mode attributes. IOMemoryMap instances are created by IOMemoryDescriptor when it creates mappings in its map method, and returned to the caller. */
821
822 class IOMemoryMap : public OSObject
823 {
824 OSDeclareDefaultStructorsWithDispatch(IOMemoryMap);
825 #ifdef XNU_KERNEL_PRIVATE
826 public:
827 IOMemoryDescriptor * fMemory;
828 IOMemoryMap * fSuperMap;
829 mach_vm_size_t fOffset;
830 mach_vm_address_t fAddress;
831 mach_vm_size_t fLength;
832 task_t fAddressTask;
833 vm_map_t fAddressMap;
834 IOOptionBits fOptions;
835 upl_t fRedirUPL;
836 ipc_port_t fRedirEntry;
837 IOMemoryDescriptor * fOwner;
838 uint8_t fUserClientUnmap;
839 #if IOTRACKING
840 IOTrackingUser fTracking;
841 #endif
842 #endif /* XNU_KERNEL_PRIVATE */
843
844 protected:
845 virtual void taggedRelease(const void *tag = NULL) const APPLE_KEXT_OVERRIDE;
846 virtual void free(void) APPLE_KEXT_OVERRIDE;
847
848 public:
849 /*! @function getVirtualAddress
850 * @abstract Accessor to the virtual address of the first byte in the mapping.
851 * @discussion This method returns the virtual address of the first byte in the mapping. Since the IOVirtualAddress is only 32bit in 32bit kernels, the getAddress() method should be used for compatibility with 64bit task mappings.
852 * @result A virtual address. */
853
854 virtual IOVirtualAddress getVirtualAddress(void);
855
856 /*! @function getPhysicalSegment
857 * @abstract Break a mapping into its physically contiguous segments.
858 * @discussion This method returns the physical address of the byte at the given offset into the mapping, and optionally the length of the physically contiguous segment from that offset. It functions similarly to IOMemoryDescriptor::getPhysicalSegment.
859 * @param offset A byte offset into the mapping whose physical address to return.
860 * @param length If non-zero, getPhysicalSegment will store here the length of the physically contiguous segement at the given offset.
861 * @result A physical address, or zero if the offset is beyond the length of the mapping. */
862
863 #ifdef __LP64__
864 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
865 IOByteCount * length,
866 IOOptionBits options = 0);
867 #else /* !__LP64__ */
868 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
869 IOByteCount * length);
870 #endif /* !__LP64__ */
871
872 /*! @function getPhysicalAddress
873 * @abstract Return the physical address of the first byte in the mapping.
874 * @discussion This method returns the physical address of the first byte in the mapping. It is most useful on mappings known to be physically contiguous.
875 * @result A physical address. */
876
877 IOPhysicalAddress getPhysicalAddress(void);
878
879 /*! @function getLength
880 * @abstract Accessor to the length of the mapping.
881 * @discussion This method returns the length of the mapping.
882 * @result A byte count. */
883
884 virtual IOByteCount getLength(void);
885
886 /*! @function getAddressTask
887 * @abstract Accessor to the task of the mapping.
888 * @discussion This method returns the mach task the mapping exists in.
889 * @result A mach task_t. */
890
891 virtual task_t getAddressTask();
892
893 /*! @function getMemoryDescriptor
894 * @abstract Accessor to the IOMemoryDescriptor the mapping was created from.
895 * @discussion This method returns the IOMemoryDescriptor the mapping was created from.
896 * @result An IOMemoryDescriptor reference, which is valid while the IOMemoryMap object is retained. It should not be released by the caller. */
897
898 virtual IOMemoryDescriptor * getMemoryDescriptor();
899
900 /*! @function getMapOptions
901 * @abstract Accessor to the options the mapping was created with.
902 * @discussion This method returns the options to IOMemoryDescriptor::map the mapping was created with.
903 * @result Options for the mapping, including cache settings. */
904
905 virtual IOOptionBits getMapOptions();
906
907 /*! @function unmap
908 * @abstract Force the IOMemoryMap to unmap, without destroying the object.
909 * @discussion IOMemoryMap instances will unmap themselves upon free, ie. when the last client with a reference calls release. This method forces the IOMemoryMap to destroy the mapping it represents, regardless of the number of clients. It is not generally used.
910 * @result An IOReturn code. */
911
912 virtual IOReturn unmap();
913
914 virtual void taskDied();
915
916 /*! @function redirect
917 * @abstract Replace the memory mapped in a process with new backing memory.
918 * @discussion An IOMemoryMap created with the kIOMapUnique option to IOMemoryDescriptor::map() can remapped to a new IOMemoryDescriptor backing object. If the new IOMemoryDescriptor is specified as NULL, client access to the memory map is blocked until a new backing object has been set. By blocking access and copying data, the caller can create atomic copies of the memory while the client is potentially reading or writing the memory.
919 * @param newBackingMemory The IOMemoryDescriptor that represents the physical memory that is to be now mapped in the virtual range the IOMemoryMap represents. If newBackingMemory is NULL, any access to the mapping will hang (in vm_fault()) until access has been restored by a new call to redirect() with non-NULL newBackingMemory argument.
920 * @param options Mapping options are defined in IOTypes.h, and are documented in IOMemoryDescriptor::map()
921 * @param offset As with IOMemoryDescriptor::map(), a beginning offset into the IOMemoryDescriptor's memory where the mapping starts. Zero is the default.
922 * @result An IOReturn code. */
923
924 #ifndef __LP64__
925 // For 32 bit XNU, there is a 32 bit (IOByteCount) and a 64 bit (mach_vm_size_t) interface;
926 // for 64 bit, these fall together on the 64 bit one.
927 virtual IOReturn redirect(IOMemoryDescriptor * newBackingMemory,
928 IOOptionBits options,
929 IOByteCount offset = 0);
930 #endif
931 virtual IOReturn redirect(IOMemoryDescriptor * newBackingMemory,
932 IOOptionBits options,
933 mach_vm_size_t offset = 0);
934
935 #ifdef __LP64__
936 /*! @function getAddress
937 * @abstract Accessor to the virtual address of the first byte in the mapping.
938 * @discussion This method returns the virtual address of the first byte in the mapping.
939 * @result A virtual address. */
940 inline mach_vm_address_t getAddress() __attribute__((always_inline));
941 /*! @function getSize
942 * @abstract Accessor to the length of the mapping.
943 * @discussion This method returns the length of the mapping.
944 * @result A byte count. */
945 inline mach_vm_size_t getSize() __attribute__((always_inline));
946 #else /* !__LP64__ */
947 /*! @function getAddress
948 * @abstract Accessor to the virtual address of the first byte in the mapping.
949 * @discussion This method returns the virtual address of the first byte in the mapping.
950 * @result A virtual address. */
951 virtual mach_vm_address_t getAddress();
952 /*! @function getSize
953 * @abstract Accessor to the length of the mapping.
954 * @discussion This method returns the length of the mapping.
955 * @result A byte count. */
956 virtual mach_vm_size_t getSize();
957 #endif /* !__LP64__ */
958
959 #ifdef XNU_KERNEL_PRIVATE
960 // for IOMemoryDescriptor use
961 IOMemoryMap * copyCompatible( IOMemoryMap * newMapping );
962
963 bool init(
964 task_t intoTask,
965 mach_vm_address_t toAddress,
966 IOOptionBits options,
967 mach_vm_size_t offset,
968 mach_vm_size_t length );
969
970 bool setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset);
971
972 IOReturn redirect(
973 task_t intoTask, bool redirect );
974
975 IOReturn userClientUnmap();
976 #endif /* XNU_KERNEL_PRIVATE */
977
978 IOReturn wireRange(
979 uint32_t options,
980 mach_vm_size_t offset,
981 mach_vm_size_t length);
982
983 OSMetaClassDeclareReservedUnused(IOMemoryMap, 0);
984 OSMetaClassDeclareReservedUnused(IOMemoryMap, 1);
985 OSMetaClassDeclareReservedUnused(IOMemoryMap, 2);
986 OSMetaClassDeclareReservedUnused(IOMemoryMap, 3);
987 OSMetaClassDeclareReservedUnused(IOMemoryMap, 4);
988 OSMetaClassDeclareReservedUnused(IOMemoryMap, 5);
989 OSMetaClassDeclareReservedUnused(IOMemoryMap, 6);
990 OSMetaClassDeclareReservedUnused(IOMemoryMap, 7);
991 };
992
993 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
994 #ifdef XNU_KERNEL_PRIVATE
995 // Also these flags should not overlap with the options to
996 // IOMemoryDescriptor::initWithRanges(... IOOptionsBits options);
997 enum {
998 _kIOMemorySourceSegment = 0x00002000
999 };
1000 #endif /* XNU_KERNEL_PRIVATE */
1001
1002 // The following classes are private implementation of IOMemoryDescriptor - they
1003 // should not be referenced directly, just through the public API's in the
1004 // IOMemoryDescriptor class. For example, an IOGeneralMemoryDescriptor instance
1005 // might be created by IOMemoryDescriptor::withAddressRange(), but there should be
1006 // no need to reference as anything but a generic IOMemoryDescriptor *.
1007
1008 class IOGeneralMemoryDescriptor : public IOMemoryDescriptor
1009 {
1010 OSDeclareDefaultStructors(IOGeneralMemoryDescriptor);
1011
1012 public:
1013 union Ranges {
1014 IOVirtualRange *v;
1015 IOAddressRange *v64;
1016 IOPhysicalRange *p;
1017 void *uio;
1018 };
1019 protected:
1020 Ranges _ranges;
1021 unsigned _rangesCount; /* number of address ranges in list */
1022 #ifndef __LP64__
1023 bool _rangesIsAllocated;/* is list allocated by us? */
1024 #endif /* !__LP64__ */
1025
1026 task_t _task; /* task where all ranges are mapped to */
1027
1028 union {
1029 IOVirtualRange v;
1030 IOPhysicalRange p;
1031 } _singleRange; /* storage space for a single range */
1032
1033 unsigned _wireCount; /* number of outstanding wires */
1034
1035 #ifndef __LP64__
1036 uintptr_t _cachedVirtualAddress;
1037
1038 IOPhysicalAddress _cachedPhysicalAddress;
1039 #endif /* !__LP64__ */
1040
1041 bool _initialized; /* has superclass been initialized? */
1042
1043 public:
1044 virtual void free() APPLE_KEXT_OVERRIDE;
1045
1046 virtual IOReturn dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const APPLE_KEXT_OVERRIDE;
1047
1048 virtual uint64_t getPreparationID( void ) APPLE_KEXT_OVERRIDE;
1049
1050 #ifdef XNU_KERNEL_PRIVATE
1051 // Internal APIs may be made virtual at some time in the future.
1052 IOReturn wireVirtual(IODirection forDirection);
1053 IOReturn dmaMap(
1054 IOMapper * mapper,
1055 IODMACommand * command,
1056 const IODMAMapSpecification * mapSpec,
1057 uint64_t offset,
1058 uint64_t length,
1059 uint64_t * mapAddress,
1060 uint64_t * mapLength);
1061 bool initMemoryEntries(size_t size, IOMapper * mapper);
1062
1063 IOMemoryReference * memoryReferenceAlloc(uint32_t capacity,
1064 IOMemoryReference * realloc);
1065 void memoryReferenceFree(IOMemoryReference * ref);
1066 void memoryReferenceRelease(IOMemoryReference * ref);
1067
1068 IOReturn memoryReferenceCreate(
1069 IOOptionBits options,
1070 IOMemoryReference ** reference);
1071
1072 IOReturn memoryReferenceMap(IOMemoryReference * ref,
1073 vm_map_t map,
1074 mach_vm_size_t inoffset,
1075 mach_vm_size_t size,
1076 IOOptionBits options,
1077 mach_vm_address_t * inaddr);
1078
1079 static IOReturn memoryReferenceSetPurgeable(
1080 IOMemoryReference * ref,
1081 IOOptionBits newState,
1082 IOOptionBits * oldState);
1083 static IOReturn memoryReferenceSetOwnership(
1084 IOMemoryReference * ref,
1085 task_t newOwner,
1086 int newLedgerTag,
1087 IOOptionBits newLedgerOptions);
1088 static IOReturn memoryReferenceGetPageCounts(
1089 IOMemoryReference * ref,
1090 IOByteCount * residentPageCount,
1091 IOByteCount * dirtyPageCount);
1092 #endif
1093
1094 private:
1095
1096 #ifndef __LP64__
1097 virtual void setPosition(IOByteCount position);
1098 virtual void mapIntoKernel(unsigned rangeIndex);
1099 virtual void unmapFromKernel();
1100 #endif /* !__LP64__ */
1101
1102 // Internal
1103 OSData * _memoryEntries;
1104 unsigned int _pages;
1105 ppnum_t _highestPage;
1106 uint32_t __iomd_reservedA;
1107 uint32_t __iomd_reservedB;
1108
1109 IOLock * _prepareLock;
1110
1111 public:
1112 /*
1113 * IOMemoryDescriptor required methods
1114 */
1115
1116 // Master initaliser
1117 virtual bool initWithOptions(void * buffers,
1118 UInt32 count,
1119 UInt32 offset,
1120 task_t task,
1121 IOOptionBits options,
1122 IOMapper * mapper = kIOMapperSystem) APPLE_KEXT_OVERRIDE;
1123
1124 #ifndef __LP64__
1125 // Secondary initialisers
1126 virtual bool initWithAddress(void * address,
1127 IOByteCount withLength,
1128 IODirection withDirection) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1129
1130 virtual bool initWithAddress(IOVirtualAddress address,
1131 IOByteCount withLength,
1132 IODirection withDirection,
1133 task_t withTask) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1134
1135 virtual bool initWithPhysicalAddress(
1136 IOPhysicalAddress address,
1137 IOByteCount withLength,
1138 IODirection withDirection ) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1139
1140 virtual bool initWithRanges( IOVirtualRange * ranges,
1141 UInt32 withCount,
1142 IODirection withDirection,
1143 task_t withTask,
1144 bool asReference = false) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1145
1146 virtual bool initWithPhysicalRanges(IOPhysicalRange * ranges,
1147 UInt32 withCount,
1148 IODirection withDirection,
1149 bool asReference = false) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1150
1151 virtual addr64_t getPhysicalSegment64( IOByteCount offset,
1152 IOByteCount * length ) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1153
1154 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
1155 IOByteCount * length) APPLE_KEXT_OVERRIDE;
1156
1157 virtual IOPhysicalAddress getSourceSegment(IOByteCount offset,
1158 IOByteCount * length) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1159
1160 virtual void * getVirtualSegment(IOByteCount offset,
1161 IOByteCount * length) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1162 #endif /* !__LP64__ */
1163
1164 virtual IOReturn setPurgeable( IOOptionBits newState,
1165 IOOptionBits * oldState ) APPLE_KEXT_OVERRIDE;
1166
1167 IOReturn setOwnership( task_t newOwner,
1168 int newLedgerTag,
1169 IOOptionBits newLedgerOptions );
1170
1171 virtual addr64_t getPhysicalSegment( IOByteCount offset,
1172 IOByteCount * length,
1173 #ifdef __LP64__
1174 IOOptionBits options = 0 ) APPLE_KEXT_OVERRIDE;
1175 #else /* !__LP64__ */
1176 IOOptionBits options)APPLE_KEXT_OVERRIDE;
1177 #endif /* !__LP64__ */
1178
1179 virtual IOReturn prepare(IODirection forDirection = kIODirectionNone) APPLE_KEXT_OVERRIDE;
1180
1181 virtual IOReturn complete(IODirection forDirection = kIODirectionNone) APPLE_KEXT_OVERRIDE;
1182
1183 virtual IOReturn doMap(
1184 vm_map_t addressMap,
1185 IOVirtualAddress * atAddress,
1186 IOOptionBits options,
1187 IOByteCount sourceOffset = 0,
1188 IOByteCount length = 0 ) APPLE_KEXT_OVERRIDE;
1189
1190 virtual IOReturn doUnmap(
1191 vm_map_t addressMap,
1192 IOVirtualAddress logical,
1193 IOByteCount length ) APPLE_KEXT_OVERRIDE;
1194
1195 virtual bool serialize(OSSerialize *s) const APPLE_KEXT_OVERRIDE;
1196
1197 // Factory method for cloning a persistent IOMD, see IOMemoryDescriptor
1198 static IOMemoryDescriptor *
1199 withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD);
1200 };
1201
1202 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1203
1204 #ifdef __LP64__
1205 mach_vm_address_t
1206 IOMemoryMap::getAddress()
1207 {
1208 return getVirtualAddress();
1209 }
1210
1211 mach_vm_size_t
1212 IOMemoryMap::getSize()
1213 {
1214 return getLength();
1215 }
1216 #else /* !__LP64__ */
1217 #include <IOKit/IOSubMemoryDescriptor.h>
1218 #endif /* !__LP64__ */
1219
1220 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1221
1222 extern boolean_t iokit_iomd_setownership_enabled;
1223
1224 #endif /* !_IOMEMORYDESCRIPTOR_H */