]> git.saurik.com Git - apple/xnu.git/blob - iokit/IOKit/IOMemoryDescriptor.h
xnu-6153.121.1.tar.gz
[apple/xnu.git] / iokit / IOKit / IOMemoryDescriptor.h
1 /*
2 * Copyright (c) 1998-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #ifndef _IOMEMORYDESCRIPTOR_H
29 #define _IOMEMORYDESCRIPTOR_H
30
31 #include <sys/cdefs.h>
32
33 #include <IOKit/IOTypes.h>
34 #include <IOKit/IOLocks.h>
35 #include <libkern/c++/OSContainers.h>
36 #include <DriverKit/IOMemoryDescriptor.h>
37 #include <DriverKit/IOMemoryMap.h>
38 #ifdef XNU_KERNEL_PRIVATE
39 #include <IOKit/IOKitDebug.h>
40 #endif
41
42 #include <mach/memory_object_types.h>
43
44 class IOMemoryMap;
45 class IOMapper;
46 class IOService;
47 class IODMACommand;
48
49 /*
50 * Direction of transfer, with respect to the described memory.
51 */
52 #ifdef __LP64__
53 enum
54 #else /* !__LP64__ */
55 enum IODirection
56 #endif /* !__LP64__ */
57 {
58 kIODirectionNone = 0x0,// same as VM_PROT_NONE
59 kIODirectionIn = 0x1,// User land 'read', same as VM_PROT_READ
60 kIODirectionOut = 0x2,// User land 'write', same as VM_PROT_WRITE
61 kIODirectionOutIn = kIODirectionOut | kIODirectionIn,
62 kIODirectionInOut = kIODirectionIn | kIODirectionOut,
63
64 // these flags are valid for the prepare() method only
65 kIODirectionPrepareToPhys32 = 0x00000004,
66 kIODirectionPrepareNoFault = 0x00000008,
67 kIODirectionPrepareReserved1 = 0x00000010,
68 #define IODIRECTIONPREPARENONCOHERENTDEFINED 1
69 kIODirectionPrepareNonCoherent = 0x00000020,
70 #if KERNEL_PRIVATE
71 #define IODIRECTIONPREPAREAVOIDTHROTTLING 1
72 kIODirectionPrepareAvoidThrottling = 0x00000100,
73 #endif
74
75 // these flags are valid for the complete() method only
76 #define IODIRECTIONCOMPLETEWITHERRORDEFINED 1
77 kIODirectionCompleteWithError = 0x00000040,
78 #define IODIRECTIONCOMPLETEWITHDATAVALIDDEFINED 1
79 kIODirectionCompleteWithDataValid = 0x00000080,
80 };
81
82 #ifdef __LP64__
83 typedef IOOptionBits IODirection;
84 #endif /* __LP64__ */
85
86 /*
87 * IOOptionBits used in the withOptions variant
88 */
89 enum {
90 kIOMemoryDirectionMask = 0x00000007,
91 #ifdef XNU_KERNEL_PRIVATE
92 kIOMemoryAutoPrepare = 0x00000008,// Shared with Buffer MD
93 #endif
94
95 kIOMemoryTypeVirtual = 0x00000010,
96 kIOMemoryTypePhysical = 0x00000020,
97 kIOMemoryTypeUPL = 0x00000030,
98 kIOMemoryTypePersistentMD = 0x00000040,// Persistent Memory Descriptor
99 kIOMemoryTypeUIO = 0x00000050,
100 #ifdef __LP64__
101 kIOMemoryTypeVirtual64 = kIOMemoryTypeVirtual,
102 kIOMemoryTypePhysical64 = kIOMemoryTypePhysical,
103 #else /* !__LP64__ */
104 kIOMemoryTypeVirtual64 = 0x00000060,
105 kIOMemoryTypePhysical64 = 0x00000070,
106 #endif /* !__LP64__ */
107 kIOMemoryTypeMask = 0x000000f0,
108
109 kIOMemoryAsReference = 0x00000100,
110 kIOMemoryBufferPageable = 0x00000400,
111 kIOMemoryMapperNone = 0x00000800,// Shared with Buffer MD
112 kIOMemoryHostOnly = 0x00001000,// Never DMA accessible
113 #ifdef XNU_KERNEL_PRIVATE
114 kIOMemoryRedirected = 0x00004000,
115 kIOMemoryPreparedReadOnly = 0x00008000,
116 #endif
117 kIOMemoryPersistent = 0x00010000,
118 kIOMemoryMapCopyOnWrite = 0x00020000,
119 kIOMemoryRemote = 0x00040000,
120 kIOMemoryThreadSafe = 0x00100000,// Shared with Buffer MD
121 kIOMemoryClearEncrypt = 0x00200000,// Shared with Buffer MD
122 kIOMemoryUseReserve = 0x00800000,// Shared with Buffer MD
123 #define IOMEMORYUSERESERVEDEFINED 1
124
125 #ifdef XNU_KERNEL_PRIVATE
126 kIOMemoryBufferPurgeable = 0x00400000,
127 kIOMemoryBufferCacheMask = 0x70000000,
128 kIOMemoryBufferCacheShift = 28,
129 #endif
130 };
131
132 #define kIOMapperSystem ((IOMapper *) NULL)
133
134 enum{
135 kIOMemoryLedgerTagDefault = VM_LEDGER_TAG_DEFAULT,
136 kIOmemoryLedgerTagNetwork = VM_LEDGER_TAG_NETWORK,
137 kIOMemoryLedgerTagMedia = VM_LEDGER_TAG_MEDIA,
138 kIOMemoryLedgerTagGraphics = VM_LEDGER_TAG_GRAPHICS,
139 kIOMemoryLedgerTagNeural = VM_LEDGER_TAG_NEURAL,
140 };
141 enum{
142 kIOMemoryLedgerFlagNoFootprint = VM_LEDGER_FLAG_NO_FOOTPRINT,
143 };
144
145 enum{
146 kIOMemoryPurgeableKeepCurrent = 1,
147
148 kIOMemoryPurgeableNonVolatile = 2,
149 kIOMemoryPurgeableVolatile = 3,
150 kIOMemoryPurgeableEmpty = 4,
151
152 // modifiers for kIOMemoryPurgeableVolatile behavior
153 kIOMemoryPurgeableVolatileGroup0 = VM_VOLATILE_GROUP_0,
154 kIOMemoryPurgeableVolatileGroup1 = VM_VOLATILE_GROUP_1,
155 kIOMemoryPurgeableVolatileGroup2 = VM_VOLATILE_GROUP_2,
156 kIOMemoryPurgeableVolatileGroup3 = VM_VOLATILE_GROUP_3,
157 kIOMemoryPurgeableVolatileGroup4 = VM_VOLATILE_GROUP_4,
158 kIOMemoryPurgeableVolatileGroup5 = VM_VOLATILE_GROUP_5,
159 kIOMemoryPurgeableVolatileGroup6 = VM_VOLATILE_GROUP_6,
160 kIOMemoryPurgeableVolatileGroup7 = VM_VOLATILE_GROUP_7,
161 kIOMemoryPurgeableVolatileBehaviorFifo = VM_PURGABLE_BEHAVIOR_FIFO,
162 kIOMemoryPurgeableVolatileBehaviorLifo = VM_PURGABLE_BEHAVIOR_LIFO,
163 kIOMemoryPurgeableVolatileOrderingObsolete = VM_PURGABLE_ORDERING_OBSOLETE,
164 kIOMemoryPurgeableVolatileOrderingNormal = VM_PURGABLE_ORDERING_NORMAL,
165 kIOMemoryPurgeableFaultOnAccess = VM_PURGABLE_DEBUG_FAULT,
166 };
167 enum{
168 kIOMemoryIncoherentIOFlush = 1,
169 kIOMemoryIncoherentIOStore = 2,
170
171 kIOMemoryClearEncrypted = 50,
172 kIOMemorySetEncrypted = 51,
173 };
174
175 #define IOMEMORYDESCRIPTOR_SUPPORTS_DMACOMMAND 1
176
177 struct IODMAMapSpecification {
178 uint64_t alignment;
179 IOService * device;
180 uint32_t options;
181 uint8_t numAddressBits;
182 uint8_t resvA[3];
183 uint32_t resvB[4];
184 };
185
186 struct IODMAMapPageList {
187 uint32_t pageOffset;
188 uint32_t pageListCount;
189 const upl_page_info_t * pageList;
190 };
191
192 // mapOptions for iovmMapMemory
193 enum{
194 kIODMAMapReadAccess = 0x00000001,
195 kIODMAMapWriteAccess = 0x00000002,
196 kIODMAMapPhysicallyContiguous = 0x00000010,
197 kIODMAMapDeviceMemory = 0x00000020,
198 kIODMAMapPagingPath = 0x00000040,
199 kIODMAMapIdentityMap = 0x00000080,
200
201 kIODMAMapPageListFullyOccupied = 0x00000100,
202 kIODMAMapFixedAddress = 0x00000200,
203 };
204
205 #ifdef KERNEL_PRIVATE
206
207 // Used for dmaCommandOperation communications for IODMACommand and mappers
208
209 enum {
210 kIOMDWalkSegments = 0x01000000,
211 kIOMDFirstSegment = 1 | kIOMDWalkSegments,
212 kIOMDGetCharacteristics = 0x02000000,
213 kIOMDGetCharacteristicsMapped = 1 | kIOMDGetCharacteristics,
214 kIOMDDMAActive = 0x03000000,
215 kIOMDSetDMAActive = 1 | kIOMDDMAActive,
216 kIOMDSetDMAInactive = kIOMDDMAActive,
217 kIOMDAddDMAMapSpec = 0x04000000,
218 kIOMDDMAMap = 0x05000000,
219 kIOMDDMAUnmap = 0x06000000,
220 kIOMDDMACommandOperationMask = 0xFF000000,
221 };
222 struct IOMDDMACharacteristics {
223 UInt64 fLength;
224 UInt32 fSGCount;
225 UInt32 fPages;
226 UInt32 fPageAlign;
227 ppnum_t fHighestPage;
228 IODirection fDirection;
229 UInt8 fIsPrepared;
230 };
231
232 struct IOMDDMAMapArgs {
233 IOMapper * fMapper;
234 IODMACommand * fCommand;
235 IODMAMapSpecification fMapSpec;
236 uint64_t fOffset;
237 uint64_t fLength;
238 uint64_t fAlloc;
239 uint64_t fAllocLength;
240 uint8_t fMapContig;
241 };
242
243 struct IOMDDMAWalkSegmentArgs {
244 UInt64 fOffset; // Input/Output offset
245 UInt64 fIOVMAddr, fLength; // Output variables
246 UInt8 fMapped; // Input Variable, Require mapped IOVMA
247 UInt64 fMappedBase; // Input base of mapping
248 };
249 typedef UInt8 IOMDDMAWalkSegmentState[128];
250 // fMapped:
251 enum{
252 kIOMDDMAWalkMappedLocal = 2
253 };
254
255 #endif /* KERNEL_PRIVATE */
256
257 enum{
258 kIOPreparationIDUnprepared = 0,
259 kIOPreparationIDUnsupported = 1,
260 kIOPreparationIDAlwaysPrepared = 2,
261 };
262
263 #ifdef XNU_KERNEL_PRIVATE
264 struct IOMemoryReference;
265 #endif
266
267
268 /*! @class IOMemoryDescriptor : public OSObject
269 * @abstract An abstract base class defining common methods for describing physical or virtual memory.
270 * @discussion The IOMemoryDescriptor object represents a buffer or range of memory, specified as one or more physical or virtual address ranges. It contains methods to return the memory's physically contiguous segments (fragments), for use with the IOMemoryCursor, and methods to map the memory into any address space with caching and placed mapping options. */
271
272 class IOMemoryDescriptor : public OSObject
273 {
274 friend class IOMemoryMap;
275 friend class IOMultiMemoryDescriptor;
276
277 OSDeclareDefaultStructorsWithDispatch(IOMemoryDescriptor);
278
279 protected:
280
281 /*! @var reserved
282 * Reserved for future use. (Internal use only) */
283 struct IOMemoryDescriptorReserved * reserved;
284
285 protected:
286 OSSet * _mappings;
287 IOOptionBits _flags;
288
289
290 #ifdef XNU_KERNEL_PRIVATE
291 public:
292 struct IOMemoryReference * _memRef;
293 vm_tag_t _kernelTag;
294 vm_tag_t _userTag;
295 int16_t _dmaReferences;
296 uint16_t _internalFlags;
297 kern_allocation_name_t _mapName;
298 protected:
299 #else /* XNU_KERNEL_PRIVATE */
300 void * __iomd_reserved5;
301 uint16_t __iomd_reserved1[4];
302 uintptr_t __iomd_reserved2;
303 #endif /* XNU_KERNEL_PRIVATE */
304
305 uintptr_t __iomd_reserved3;
306 uintptr_t __iomd_reserved4;
307
308 #ifndef __LP64__
309 IODirection _direction; /* use _flags instead */
310 #endif /* !__LP64__ */
311 IOByteCount _length; /* length of all ranges */
312 IOOptionBits _tag;
313
314 public:
315 typedef IOOptionBits DMACommandOps;
316 #ifndef __LP64__
317 virtual IOPhysicalAddress getSourceSegment( IOByteCount offset,
318 IOByteCount * length ) APPLE_KEXT_DEPRECATED;
319 #endif /* !__LP64__ */
320
321 /*! @function initWithOptions
322 * @abstract Master initialiser for all variants of memory descriptors. For a more complete description see IOMemoryDescriptor::withOptions.
323 * @discussion Note this function can be used to re-init a previously created memory descriptor.
324 * @result true on success, false on failure. */
325 virtual bool initWithOptions(void * buffers,
326 UInt32 count,
327 UInt32 offset,
328 task_t task,
329 IOOptionBits options,
330 IOMapper * mapper = kIOMapperSystem);
331
332 #ifndef __LP64__
333 virtual addr64_t getPhysicalSegment64( IOByteCount offset,
334 IOByteCount * length ) APPLE_KEXT_DEPRECATED; /* use getPhysicalSegment() and kIOMemoryMapperNone instead */
335 #endif /* !__LP64__ */
336
337 /*! @function setPurgeable
338 * @abstract Control the purgeable status of a memory descriptors memory.
339 * @discussion Buffers may be allocated with the ability to have their purgeable status changed - IOBufferMemoryDescriptor with the kIOMemoryPurgeable option, VM_FLAGS_PURGEABLE may be passed to vm_allocate() in user space to allocate such buffers. The purgeable status of such a buffer may be controlled with setPurgeable(). The process of making a purgeable memory descriptor non-volatile and determining its previous state is atomic - if a purgeable memory descriptor is made nonvolatile and the old state is returned as kIOMemoryPurgeableVolatile, then the memory's previous contents are completely intact and will remain so until the memory is made volatile again. If the old state is returned as kIOMemoryPurgeableEmpty then the memory was reclaimed while it was in a volatile state and its previous contents have been lost.
340 * @param newState - the desired new purgeable state of the memory:<br>
341 * kIOMemoryPurgeableKeepCurrent - make no changes to the memory's purgeable state.<br>
342 * kIOMemoryPurgeableVolatile - make the memory volatile - the memory may be reclaimed by the VM system without saving its contents to backing store.<br>
343 * kIOMemoryPurgeableNonVolatile - make the memory nonvolatile - the memory is treated as with usual allocations and must be saved to backing store if paged.<br>
344 * kIOMemoryPurgeableEmpty - make the memory volatile, and discard any pages allocated to it.
345 * @param oldState - if non-NULL, the previous purgeable state of the memory is returned here:<br>
346 * kIOMemoryPurgeableNonVolatile - the memory was nonvolatile.<br>
347 * kIOMemoryPurgeableVolatile - the memory was volatile but its content has not been discarded by the VM system.<br>
348 * kIOMemoryPurgeableEmpty - the memory was volatile and has been discarded by the VM system.<br>
349 * @result An IOReturn code. */
350
351 virtual IOReturn setPurgeable( IOOptionBits newState,
352 IOOptionBits * oldState );
353
354 /*! @function setOwnership
355 * @abstract Control the ownership of a memory descriptors memory.
356 * @discussion IOBufferMemoryDescriptor are owned by a specific task. The ownership of such a buffer may be controlled with setOwnership().
357 * @param newOwner - the task to be the new owner of the memory.
358 * @param newLedgerTag - the ledger this memory should be accounted in.
359 * @param newLedgerOptions - accounting options
360 * @result An IOReturn code. */
361
362 IOReturn setOwnership( task_t newOwner,
363 int newLedgerTag,
364 IOOptionBits newLedgerOptions );
365
366 /*! @function getPageCounts
367 * @abstract Retrieve the number of resident and/or dirty pages encompassed by an IOMemoryDescriptor.
368 * @discussion This method returns the number of resident and/or dirty pages encompassed by an IOMemoryDescriptor.
369 * @param residentPageCount - If non-null, a pointer to a byte count that will return the number of resident pages encompassed by this IOMemoryDescriptor.
370 * @param dirtyPageCount - If non-null, a pointer to a byte count that will return the number of dirty pages encompassed by this IOMemoryDescriptor.
371 * @result An IOReturn code. */
372
373 IOReturn getPageCounts( IOByteCount * residentPageCount,
374 IOByteCount * dirtyPageCount);
375
376 /*! @function performOperation
377 * @abstract Perform an operation on the memory descriptor's memory.
378 * @discussion This method performs some operation on a range of the memory descriptor's memory. When a memory descriptor's memory is not mapped, it should be more efficient to use this method than mapping the memory to perform the operation virtually.
379 * @param options The operation to perform on the memory:<br>
380 * kIOMemoryIncoherentIOFlush - pass this option to store to memory and flush any data in the processor cache for the memory range, with synchronization to ensure the data has passed through all levels of processor cache. It may not be supported on all architectures. This type of flush may be used for non-coherent I/O such as AGP - it is NOT required for PCI coherent operations. The memory descriptor must have been previously prepared.<br>
381 * kIOMemoryIncoherentIOStore - pass this option to store to memory any data in the processor cache for the memory range, with synchronization to ensure the data has passed through all levels of processor cache. It may not be supported on all architectures. This type of flush may be used for non-coherent I/O such as AGP - it is NOT required for PCI coherent operations. The memory descriptor must have been previously prepared.
382 * @param offset A byte offset into the memory descriptor's memory.
383 * @param length The length of the data range.
384 * @result An IOReturn code. */
385
386 virtual IOReturn performOperation( IOOptionBits options,
387 IOByteCount offset, IOByteCount length );
388
389 // Used for dedicated communications for IODMACommand
390 virtual IOReturn dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const;
391
392 /*! @function getPhysicalSegment
393 * @abstract Break a memory descriptor into its physically contiguous segments.
394 * @discussion This method returns the physical address of the byte at the given offset into the memory, and optionally the length of the physically contiguous segment from that offset.
395 * @param offset A byte offset into the memory whose physical address to return.
396 * @param length If non-zero, getPhysicalSegment will store here the length of the physically contiguous segement at the given offset.
397 * @result A physical address, or zero if the offset is beyond the length of the memory. */
398
399 #ifdef __LP64__
400 virtual addr64_t getPhysicalSegment( IOByteCount offset,
401 IOByteCount * length,
402 IOOptionBits options = 0 ) = 0;
403 #else /* !__LP64__ */
404 virtual addr64_t getPhysicalSegment( IOByteCount offset,
405 IOByteCount * length,
406 IOOptionBits options );
407 #endif /* !__LP64__ */
408
409 virtual uint64_t getPreparationID( void );
410 void setPreparationID( void );
411
412 void setVMTags(uint32_t kernelTag, uint32_t userTag);
413 uint32_t getVMTag(vm_map_t map);
414
415 #ifdef XNU_KERNEL_PRIVATE
416 IOMemoryDescriptorReserved * getKernelReserved( void );
417 void cleanKernelReserved(IOMemoryDescriptorReserved * reserved);
418 IOReturn dmaMap(
419 IOMapper * mapper,
420 IODMACommand * command,
421 const IODMAMapSpecification * mapSpec,
422 uint64_t offset,
423 uint64_t length,
424 uint64_t * mapAddress,
425 uint64_t * mapLength);
426 IOReturn dmaUnmap(
427 IOMapper * mapper,
428 IODMACommand * command,
429 uint64_t offset,
430 uint64_t mapAddress,
431 uint64_t mapLength);
432 void dmaMapRecord(
433 IOMapper * mapper,
434 IODMACommand * command,
435 uint64_t mapLength);
436 #endif
437
438 private:
439 OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 0);
440 #ifdef __LP64__
441 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 1);
442 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 2);
443 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 3);
444 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 4);
445 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 5);
446 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 6);
447 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 7);
448 #else /* !__LP64__ */
449 OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 1);
450 OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 2);
451 OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 3);
452 OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 4);
453 OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 5);
454 OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 6);
455 OSMetaClassDeclareReservedUsed(IOMemoryDescriptor, 7);
456 #endif /* !__LP64__ */
457 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 8);
458 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 9);
459 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 10);
460 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 11);
461 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 12);
462 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 13);
463 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 14);
464 OSMetaClassDeclareReservedUnused(IOMemoryDescriptor, 15);
465
466 protected:
467 virtual void free(void) APPLE_KEXT_OVERRIDE;
468 public:
469 static void initialize( void );
470
471 public:
472 /*! @function withAddress
473 * @abstract Create an IOMemoryDescriptor to describe one virtual range of the kernel task.
474 * @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a single virtual memory range mapped into the kernel map. This memory descriptor needs to be prepared before it can be used to extract data from the memory described.
475 * @param address The virtual address of the first byte in the memory.
476 * @param withLength The length of memory.
477 * @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
478 * @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
479
480 static IOMemoryDescriptor * withAddress(void * address,
481 IOByteCount withLength,
482 IODirection withDirection);
483
484 #ifndef __LP64__
485 static IOMemoryDescriptor * withAddress(IOVirtualAddress address,
486 IOByteCount withLength,
487 IODirection withDirection,
488 task_t withTask) APPLE_KEXT_DEPRECATED; /* use withAddressRange() and prepare() instead */
489 #endif /* !__LP64__ */
490
491 /*! @function withPhysicalAddress
492 * @abstract Create an IOMemoryDescriptor to describe one physical range.
493 * @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a single physical memory range.
494 * @param address The physical address of the first byte in the memory.
495 * @param withLength The length of memory.
496 * @param withDirection An I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
497 * @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
498
499 static IOMemoryDescriptor * withPhysicalAddress(
500 IOPhysicalAddress address,
501 IOByteCount withLength,
502 IODirection withDirection );
503
504 #ifndef __LP64__
505 static IOMemoryDescriptor * withRanges(IOVirtualRange * ranges,
506 UInt32 withCount,
507 IODirection withDirection,
508 task_t withTask,
509 bool asReference = false) APPLE_KEXT_DEPRECATED; /* use withAddressRanges() instead */
510 #endif /* !__LP64__ */
511
512 /*! @function withAddressRange
513 * @abstract Create an IOMemoryDescriptor to describe one virtual range of the specified map.
514 * @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of a single virtual memory range mapped into the specified map. This memory descriptor needs to be prepared before it can be used to extract data from the memory described.
515 * @param address The virtual address of the first byte in the memory.
516 * @param length The length of memory.
517 * @param options
518 * kIOMemoryDirectionMask (options:direction) This nibble indicates the I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
519 * @param task The task the virtual ranges are mapped into. Note that unlike IOMemoryDescriptor::withAddress(), kernel_task memory must be explicitly prepared when passed to this api. The task argument may be NULL to specify memory by physical address.
520 * @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
521
522 static IOMemoryDescriptor * withAddressRange(
523 mach_vm_address_t address,
524 mach_vm_size_t length,
525 IOOptionBits options,
526 task_t task);
527
528 /*! @function withAddressRanges
529 * @abstract Create an IOMemoryDescriptor to describe one or more virtual ranges.
530 * @discussion This method creates and initializes an IOMemoryDescriptor for memory consisting of an array of virtual memory ranges each mapped into a specified source task. This memory descriptor needs to be prepared before it can be used to extract data from the memory described.
531 * @param ranges An array of IOAddressRange structures which specify the virtual ranges in the specified map which make up the memory to be described. IOAddressRange is the 64bit version of IOVirtualRange.
532 * @param rangeCount The member count of the ranges array.
533 * @param options
534 * kIOMemoryDirectionMask (options:direction) This nibble indicates the I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
535 * kIOMemoryAsReference For options:type = Virtual or Physical this indicate that the memory descriptor need not copy the ranges array into local memory. This is an optimisation to try to minimise unnecessary allocations.
536 * @param task The task each of the virtual ranges are mapped into. Note that unlike IOMemoryDescriptor::withAddress(), kernel_task memory must be explicitly prepared when passed to this api. The task argument may be NULL to specify memory by physical address.
537 * @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
538
539 static IOMemoryDescriptor * withAddressRanges(
540 IOAddressRange * ranges,
541 UInt32 rangeCount,
542 IOOptionBits options,
543 task_t task);
544
545 /*! @function withOptions
546 * @abstract Master initialiser for all variants of memory descriptors.
547 * @discussion This method creates and initializes an IOMemoryDescriptor for memory it has three main variants: Virtual, Physical & mach UPL. These variants are selected with the options parameter, see below. This memory descriptor needs to be prepared before it can be used to extract data from the memory described.
548 *
549 *
550 * @param buffers A pointer to an array of IOAddressRange when options:type is kIOMemoryTypeVirtual64 or kIOMemoryTypePhysical64 or a 64bit kernel. For type UPL it is a upl_t returned by the mach/memory_object_types.h apis, primarily used internally by the UBC. IOVirtualRanges or IOPhysicalRanges are 32 bit only types for use when options:type is kIOMemoryTypeVirtual or kIOMemoryTypePhysical on 32bit kernels.
551 *
552 * @param count options:type = Virtual or Physical count contains a count of the number of entires in the buffers array. For options:type = UPL this field contains a total length.
553 *
554 * @param offset Only used when options:type = UPL, in which case this field contains an offset for the memory within the buffers upl.
555 *
556 * @param task Only used options:type = Virtual, The task each of the virtual ranges are mapped into.
557 *
558 * @param options
559 * kIOMemoryDirectionMask (options:direction) This nibble indicates the I/O direction to be associated with the descriptor, which may affect the operation of the prepare and complete methods on some architectures.
560 * kIOMemoryTypeMask (options:type) kIOMemoryTypeVirtual64, kIOMemoryTypeVirtual, kIOMemoryTypePhysical64, kIOMemoryTypePhysical, kIOMemoryTypeUPL Indicates that what type of memory basic memory descriptor to use. This sub-field also controls the interpretation of the buffers, count, offset & task parameters.
561 * kIOMemoryAsReference For options:type = Virtual or Physical this indicate that the memory descriptor need not copy the ranges array into local memory. This is an optimisation to try to minimise unnecessary allocations.
562 * kIOMemoryBufferPageable Only used by the IOBufferMemoryDescriptor as an indication that the kernel virtual memory is in fact pageable and we need to use the kernel pageable submap rather than the default map.
563 *
564 * @param mapper Which IOMapper should be used to map the in-memory physical addresses into I/O space addresses. Defaults to 0 which indicates that the system mapper is to be used, if present.
565 *
566 * @result The created IOMemoryDescriptor on success, to be released by the caller, or zero on failure. */
567
568 static IOMemoryDescriptor *withOptions(void * buffers,
569 UInt32 count,
570 UInt32 offset,
571 task_t task,
572 IOOptionBits options,
573 IOMapper * mapper = kIOMapperSystem);
574
575 #ifndef __LP64__
576 static IOMemoryDescriptor * withPhysicalRanges(
577 IOPhysicalRange * ranges,
578 UInt32 withCount,
579 IODirection withDirection,
580 bool asReference = false) APPLE_KEXT_DEPRECATED; /* use withOptions() and kIOMemoryTypePhysical instead */
581 #endif /* !__LP64__ */
582
583 #ifndef __LP64__
584 static IOMemoryDescriptor * withSubRange(IOMemoryDescriptor *of,
585 IOByteCount offset,
586 IOByteCount length,
587 IODirection withDirection) APPLE_KEXT_DEPRECATED; /* use IOSubMemoryDescriptor::withSubRange() and kIOMemoryThreadSafe instead */
588 #endif /* !__LP64__ */
589
590 /*! @function withPersistentMemoryDescriptor
591 * @abstract Copy constructor that generates a new memory descriptor if the backing memory for the same task's virtual address and length has changed.
592 * @discussion If the original memory descriptor's address and length is still backed by the same real memory, i.e. the user hasn't deallocated and the reallocated memory at the same address then the original memory descriptor is returned with a additional reference. Otherwise we build a totally new memory descriptor with the same characteristics as the previous one but with a new view of the vm. Note not legal to call this function with anything except an IOGeneralMemoryDescriptor that was created with the kIOMemoryPersistent option.
593 * @param originalMD The memory descriptor to be duplicated.
594 * @result Either the original memory descriptor with an additional retain or a new memory descriptor, 0 for a bad original memory descriptor or some other resource shortage. */
595 static IOMemoryDescriptor *
596 withPersistentMemoryDescriptor(IOMemoryDescriptor *originalMD);
597
598 #ifndef __LP64__
599 // obsolete initializers
600 // - initWithOptions is the designated initializer
601 virtual bool initWithAddress(void * address,
602 IOByteCount withLength,
603 IODirection withDirection) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */
604 virtual bool initWithAddress(IOVirtualAddress address,
605 IOByteCount withLength,
606 IODirection withDirection,
607 task_t withTask) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */
608 virtual bool initWithPhysicalAddress(
609 IOPhysicalAddress address,
610 IOByteCount withLength,
611 IODirection withDirection ) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */
612 virtual bool initWithRanges(IOVirtualRange * ranges,
613 UInt32 withCount,
614 IODirection withDirection,
615 task_t withTask,
616 bool asReference = false) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */
617 virtual bool initWithPhysicalRanges(IOPhysicalRange * ranges,
618 UInt32 withCount,
619 IODirection withDirection,
620 bool asReference = false) APPLE_KEXT_DEPRECATED; /* use initWithOptions() instead */
621 #endif /* __LP64__ */
622
623 /*! @function getDirection
624 * @abstract Accessor to get the direction the memory descriptor was created with.
625 * @discussion This method returns the direction the memory descriptor was created with.
626 * @result The direction. */
627
628 virtual IODirection getDirection() const;
629
630 /*! @function getLength
631 * @abstract Accessor to get the length of the memory descriptor (over all its ranges).
632 * @discussion This method returns the total length of the memory described by the descriptor, ie. the sum of its ranges' lengths.
633 * @result The byte count. */
634
635 virtual IOByteCount getLength() const;
636
637 /*! @function setTag
638 * @abstract Set the tag for the memory descriptor.
639 * @discussion This method sets the tag for the memory descriptor. Tag bits are not interpreted by IOMemoryDescriptor.
640 * @param tag The tag. */
641
642 virtual void setTag( IOOptionBits tag );
643
644 /*! @function getTag
645 * @abstract Accessor to the retrieve the tag for the memory descriptor.
646 * @discussion This method returns the tag for the memory descriptor. Tag bits are not interpreted by IOMemoryDescriptor.
647 * @result The tag. */
648
649 virtual IOOptionBits getTag( void );
650
651 /*! @function getFlags
652 * @abstract Accessor to the retrieve the options the memory descriptor was created with.
653 * @discussion Accessor to the retrieve the options the memory descriptor was created with, and flags with its state. These bits are defined by the kIOMemory* enum.
654 * @result The flags bitfield. */
655
656 uint64_t getFlags(void);
657
658 /*! @function readBytes
659 * @abstract Copy data from the memory descriptor's buffer to the specified buffer.
660 * @discussion This method copies data from the memory descriptor's memory at the given offset, to the caller's buffer. The memory descriptor MUST have the kIODirectionOut direcction bit set and be prepared. kIODirectionOut means that this memory descriptor will be output to an external device, so readBytes is used to get memory into a local buffer for a PIO transfer to the device.
661 * @param offset A byte offset into the memory descriptor's memory.
662 * @param bytes The caller supplied buffer to copy the data to.
663 * @param withLength The length of the data to copy.
664 * @result The number of bytes copied, zero will be returned if the specified offset is beyond the length of the descriptor. Development/debug kernel builds will assert if the offset is beyond the length of the descriptor. */
665
666 virtual IOByteCount readBytes(IOByteCount offset,
667 void * bytes, IOByteCount withLength);
668
669 /*! @function writeBytes
670 * @abstract Copy data to the memory descriptor's buffer from the specified buffer.
671 * @discussion This method copies data to the memory descriptor's memory at the given offset, from the caller's buffer. The memory descriptor MUST have the kIODirectionIn direcction bit set and be prepared. kIODirectionIn means that this memory descriptor will be input from an external device, so writeBytes is used to write memory into the descriptor for PIO drivers.
672 * @param offset A byte offset into the memory descriptor's memory.
673 * @param bytes The caller supplied buffer to copy the data from.
674 * @param withLength The length of the data to copy.
675 * @result The number of bytes copied, zero will be returned if the specified offset is beyond the length of the descriptor. Development/debug kernel builds will assert if the offset is beyond the length of the descriptor. */
676
677 virtual IOByteCount writeBytes(IOByteCount offset,
678 const void * bytes, IOByteCount withLength);
679
680 #ifndef __LP64__
681 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
682 IOByteCount * length);
683 #endif /* !__LP64__ */
684
685 /*! @function getPhysicalAddress
686 * @abstract Return the physical address of the first byte in the memory.
687 * @discussion This method returns the physical address of the first byte in the memory. It is most useful on memory known to be physically contiguous.
688 * @result A physical address. */
689
690 IOPhysicalAddress getPhysicalAddress();
691
692 #ifndef __LP64__
693 virtual void * getVirtualSegment(IOByteCount offset,
694 IOByteCount * length) APPLE_KEXT_DEPRECATED; /* use map() and getVirtualAddress() instead */
695 #endif /* !__LP64__ */
696
697 /*! @function prepare
698 * @abstract Prepare the memory for an I/O transfer.
699 * @discussion This involves paging in the memory, if necessary, and wiring it down for the duration of the transfer. The complete() method completes the processing of the memory after the I/O transfer finishes. Note that the prepare call is not thread safe and it is expected that the client will more easily be able to guarantee single threading a particular memory descriptor.
700 * @param forDirection The direction of the I/O just completed, or kIODirectionNone for the direction specified by the memory descriptor.
701 * @result An IOReturn code. */
702
703 virtual IOReturn prepare(IODirection forDirection = kIODirectionNone) = 0;
704
705 /*! @function complete
706 * @abstract Complete processing of the memory after an I/O transfer finishes.
707 * @discussion This method should not be called unless a prepare was previously issued; the prepare() and complete() must occur in pairs, before and after an I/O transfer involving pageable memory. In 10.3 or greater systems the direction argument to complete is not longer respected. The direction is totally determined at prepare() time.
708 * @param forDirection DEPRECATED The direction of the I/O just completed, or kIODirectionNone for the direction specified by the memory descriptor.
709 * @result An IOReturn code. */
710
711 virtual IOReturn complete(IODirection forDirection = kIODirectionNone) = 0;
712
713 /*
714 * Mapping functions.
715 */
716
717 /*! @function createMappingInTask
718 * @abstract Maps a IOMemoryDescriptor into a task.
719 * @discussion This is the general purpose method to map all or part of the memory described by a memory descriptor into a task at any available address, or at a fixed address if possible. Caching & read-only options may be set for the mapping. The mapping is represented as a returned reference to a IOMemoryMap object, which may be shared if the mapping is compatible with an existing mapping of the IOMemoryDescriptor. The IOMemoryMap object returned should be released only when the caller has finished accessing the mapping, as freeing the object destroys the mapping.
720 * @param intoTask Sets the target task for the mapping. Pass kernel_task for the kernel address space.
721 * @param atAddress If a placed mapping is requested, atAddress specifies its address, and the kIOMapAnywhere should not be set. Otherwise, atAddress is ignored.
722 * @param options Mapping options are defined in IOTypes.h,<br>
723 * kIOMapAnywhere should be passed if the mapping can be created anywhere. If not set, the atAddress parameter sets the location of the mapping, if it is available in the target map.<br>
724 * kIOMapDefaultCache to inhibit the cache in I/O areas, kIOMapCopybackCache in general purpose RAM.<br>
725 * kIOMapInhibitCache, kIOMapWriteThruCache, kIOMapCopybackCache to set the appropriate caching.<br>
726 * kIOMapReadOnly to allow only read only accesses to the memory - writes will cause and access fault.<br>
727 * kIOMapReference will only succeed if the mapping already exists, and the IOMemoryMap object is just an extra reference, ie. no new mapping will be created.<br>
728 * kIOMapUnique allows a special kind of mapping to be created that may be used with the IOMemoryMap::redirect() API. These mappings will not be shared as is the default - there will always be a unique mapping created for the caller, not an existing mapping with an extra reference.<br>
729 * kIOMapPrefault will try to prefault the pages corresponding to the mapping. This must not be done on the kernel task, and the memory must have been wired via prepare(). Otherwise, the function will fail.<br>
730 * @param offset Is a beginning offset into the IOMemoryDescriptor's memory where the mapping starts. Zero is the default to map all the memory.
731 * @param length Is the length of the mapping requested for a subset of the IOMemoryDescriptor. Zero is the default to map all the memory.
732 * @result A reference to an IOMemoryMap object representing the mapping, which can supply the virtual address of the mapping and other information. The mapping may be shared with multiple callers - multiple maps are avoided if a compatible one exists. The IOMemoryMap object returned should be released only when the caller has finished accessing the mapping, as freeing the object destroys the mapping. The IOMemoryMap instance also retains the IOMemoryDescriptor it maps while it exists. */
733
734 IOMemoryMap * createMappingInTask(
735 task_t intoTask,
736 mach_vm_address_t atAddress,
737 IOOptionBits options,
738 mach_vm_size_t offset = 0,
739 mach_vm_size_t length = 0 );
740
741 #ifndef __LP64__
742 virtual IOMemoryMap * map(
743 task_t intoTask,
744 IOVirtualAddress atAddress,
745 IOOptionBits options,
746 IOByteCount offset = 0,
747 IOByteCount length = 0 ) APPLE_KEXT_DEPRECATED;/* use createMappingInTask() instead */
748 #endif /* !__LP64__ */
749
750 /*! @function map
751 * @abstract Maps a IOMemoryDescriptor into the kernel map.
752 * @discussion This is a shortcut method to map all the memory described by a memory descriptor into the kernel map at any available address. See the full version of the createMappingInTask method for further details.
753 * @param options Mapping options as in the full version of the createMappingInTask method, with kIOMapAnywhere assumed.
754 * @result See the full version of the createMappingInTask method. */
755
756 virtual IOMemoryMap * map(
757 IOOptionBits options = 0 );
758
759 /*! @function setMapping
760 * @abstract Establishes an already existing mapping.
761 * @discussion This method tells the IOMemoryDescriptor about a mapping that exists, but was created elsewhere. It allows later callers of the map method to share this externally created mapping. The IOMemoryMap object returned is created to represent it. This method is not commonly needed.
762 * @param task Address space in which the mapping exists.
763 * @param mapAddress Virtual address of the mapping.
764 * @param options Caching and read-only attributes of the mapping.
765 * @result A IOMemoryMap object created to represent the mapping. */
766
767 virtual IOMemoryMap * setMapping(
768 task_t task,
769 IOVirtualAddress mapAddress,
770 IOOptionBits options = 0 );
771
772 // Following methods are private implementation
773
774 #ifdef __LP64__
775 virtual
776 #endif /* __LP64__ */
777 IOReturn redirect( task_t safeTask, bool redirect );
778
779 IOReturn handleFault(
780 void * _pager,
781 mach_vm_size_t sourceOffset,
782 mach_vm_size_t length);
783
784 IOReturn populateDevicePager(
785 void * pager,
786 vm_map_t addressMap,
787 mach_vm_address_t address,
788 mach_vm_size_t sourceOffset,
789 mach_vm_size_t length,
790 IOOptionBits options );
791
792 virtual IOMemoryMap * makeMapping(
793 IOMemoryDescriptor * owner,
794 task_t intoTask,
795 IOVirtualAddress atAddress,
796 IOOptionBits options,
797 IOByteCount offset,
798 IOByteCount length );
799
800 protected:
801 virtual void addMapping(
802 IOMemoryMap * mapping );
803
804 virtual void removeMapping(
805 IOMemoryMap * mapping );
806
807 virtual IOReturn doMap(
808 vm_map_t addressMap,
809 IOVirtualAddress * atAddress,
810 IOOptionBits options,
811 IOByteCount sourceOffset = 0,
812 IOByteCount length = 0 );
813
814 virtual IOReturn doUnmap(
815 vm_map_t addressMap,
816 IOVirtualAddress logical,
817 IOByteCount length );
818 };
819
820 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
821
822 /*! @class IOMemoryMap : public OSObject
823 * @abstract A class defining common methods for describing a memory mapping.
824 * @discussion The IOMemoryMap object represents a mapped range of memory, described by a IOMemoryDescriptor. The mapping may be in the kernel or a non-kernel task and has processor cache mode attributes. IOMemoryMap instances are created by IOMemoryDescriptor when it creates mappings in its map method, and returned to the caller. */
825
826 class IOMemoryMap : public OSObject
827 {
828 OSDeclareDefaultStructorsWithDispatch(IOMemoryMap);
829 #ifdef XNU_KERNEL_PRIVATE
830 public:
831 IOMemoryDescriptor * fMemory;
832 IOMemoryMap * fSuperMap;
833 mach_vm_size_t fOffset;
834 mach_vm_address_t fAddress;
835 mach_vm_size_t fLength;
836 task_t fAddressTask;
837 vm_map_t fAddressMap;
838 IOOptionBits fOptions;
839 upl_t fRedirUPL;
840 ipc_port_t fRedirEntry;
841 IOMemoryDescriptor * fOwner;
842 uint8_t fUserClientUnmap;
843 #if IOTRACKING
844 IOTrackingUser fTracking;
845 #endif
846 #endif /* XNU_KERNEL_PRIVATE */
847
848 protected:
849 virtual void taggedRelease(const void *tag = NULL) const APPLE_KEXT_OVERRIDE;
850 virtual void free(void) APPLE_KEXT_OVERRIDE;
851
852 public:
853 /*! @function getVirtualAddress
854 * @abstract Accessor to the virtual address of the first byte in the mapping.
855 * @discussion This method returns the virtual address of the first byte in the mapping. Since the IOVirtualAddress is only 32bit in 32bit kernels, the getAddress() method should be used for compatibility with 64bit task mappings.
856 * @result A virtual address. */
857
858 virtual IOVirtualAddress getVirtualAddress(void);
859
860 /*! @function getPhysicalSegment
861 * @abstract Break a mapping into its physically contiguous segments.
862 * @discussion This method returns the physical address of the byte at the given offset into the mapping, and optionally the length of the physically contiguous segment from that offset. It functions similarly to IOMemoryDescriptor::getPhysicalSegment.
863 * @param offset A byte offset into the mapping whose physical address to return.
864 * @param length If non-zero, getPhysicalSegment will store here the length of the physically contiguous segement at the given offset.
865 * @result A physical address, or zero if the offset is beyond the length of the mapping. */
866
867 #ifdef __LP64__
868 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
869 IOByteCount * length,
870 IOOptionBits options = 0);
871 #else /* !__LP64__ */
872 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
873 IOByteCount * length);
874 #endif /* !__LP64__ */
875
876 /*! @function getPhysicalAddress
877 * @abstract Return the physical address of the first byte in the mapping.
878 * @discussion This method returns the physical address of the first byte in the mapping. It is most useful on mappings known to be physically contiguous.
879 * @result A physical address. */
880
881 IOPhysicalAddress getPhysicalAddress(void);
882
883 /*! @function getLength
884 * @abstract Accessor to the length of the mapping.
885 * @discussion This method returns the length of the mapping.
886 * @result A byte count. */
887
888 virtual IOByteCount getLength(void);
889
890 /*! @function getAddressTask
891 * @abstract Accessor to the task of the mapping.
892 * @discussion This method returns the mach task the mapping exists in.
893 * @result A mach task_t. */
894
895 virtual task_t getAddressTask();
896
897 /*! @function getMemoryDescriptor
898 * @abstract Accessor to the IOMemoryDescriptor the mapping was created from.
899 * @discussion This method returns the IOMemoryDescriptor the mapping was created from.
900 * @result An IOMemoryDescriptor reference, which is valid while the IOMemoryMap object is retained. It should not be released by the caller. */
901
902 virtual IOMemoryDescriptor * getMemoryDescriptor();
903
904 /*! @function getMapOptions
905 * @abstract Accessor to the options the mapping was created with.
906 * @discussion This method returns the options to IOMemoryDescriptor::map the mapping was created with.
907 * @result Options for the mapping, including cache settings. */
908
909 virtual IOOptionBits getMapOptions();
910
911 /*! @function unmap
912 * @abstract Force the IOMemoryMap to unmap, without destroying the object.
913 * @discussion IOMemoryMap instances will unmap themselves upon free, ie. when the last client with a reference calls release. This method forces the IOMemoryMap to destroy the mapping it represents, regardless of the number of clients. It is not generally used.
914 * @result An IOReturn code. */
915
916 virtual IOReturn unmap();
917
918 virtual void taskDied();
919
920 /*! @function redirect
921 * @abstract Replace the memory mapped in a process with new backing memory.
922 * @discussion An IOMemoryMap created with the kIOMapUnique option to IOMemoryDescriptor::map() can remapped to a new IOMemoryDescriptor backing object. If the new IOMemoryDescriptor is specified as NULL, client access to the memory map is blocked until a new backing object has been set. By blocking access and copying data, the caller can create atomic copies of the memory while the client is potentially reading or writing the memory.
923 * @param newBackingMemory The IOMemoryDescriptor that represents the physical memory that is to be now mapped in the virtual range the IOMemoryMap represents. If newBackingMemory is NULL, any access to the mapping will hang (in vm_fault()) until access has been restored by a new call to redirect() with non-NULL newBackingMemory argument.
924 * @param options Mapping options are defined in IOTypes.h, and are documented in IOMemoryDescriptor::map()
925 * @param offset As with IOMemoryDescriptor::map(), a beginning offset into the IOMemoryDescriptor's memory where the mapping starts. Zero is the default.
926 * @result An IOReturn code. */
927
928 #ifndef __LP64__
929 // For 32 bit XNU, there is a 32 bit (IOByteCount) and a 64 bit (mach_vm_size_t) interface;
930 // for 64 bit, these fall together on the 64 bit one.
931 virtual IOReturn redirect(IOMemoryDescriptor * newBackingMemory,
932 IOOptionBits options,
933 IOByteCount offset = 0);
934 #endif
935 virtual IOReturn redirect(IOMemoryDescriptor * newBackingMemory,
936 IOOptionBits options,
937 mach_vm_size_t offset = 0);
938
939 #ifdef __LP64__
940 /*! @function getAddress
941 * @abstract Accessor to the virtual address of the first byte in the mapping.
942 * @discussion This method returns the virtual address of the first byte in the mapping.
943 * @result A virtual address. */
944 inline mach_vm_address_t getAddress() __attribute__((always_inline));
945 /*! @function getSize
946 * @abstract Accessor to the length of the mapping.
947 * @discussion This method returns the length of the mapping.
948 * @result A byte count. */
949 inline mach_vm_size_t getSize() __attribute__((always_inline));
950 #else /* !__LP64__ */
951 /*! @function getAddress
952 * @abstract Accessor to the virtual address of the first byte in the mapping.
953 * @discussion This method returns the virtual address of the first byte in the mapping.
954 * @result A virtual address. */
955 virtual mach_vm_address_t getAddress();
956 /*! @function getSize
957 * @abstract Accessor to the length of the mapping.
958 * @discussion This method returns the length of the mapping.
959 * @result A byte count. */
960 virtual mach_vm_size_t getSize();
961 #endif /* !__LP64__ */
962
963 #ifdef XNU_KERNEL_PRIVATE
964 // for IOMemoryDescriptor use
965 IOMemoryMap * copyCompatible( IOMemoryMap * newMapping );
966
967 bool init(
968 task_t intoTask,
969 mach_vm_address_t toAddress,
970 IOOptionBits options,
971 mach_vm_size_t offset,
972 mach_vm_size_t length );
973
974 bool setMemoryDescriptor(IOMemoryDescriptor * _memory, mach_vm_size_t _offset);
975
976 IOReturn redirect(
977 task_t intoTask, bool redirect );
978
979 IOReturn userClientUnmap();
980 #endif /* XNU_KERNEL_PRIVATE */
981
982 IOReturn wireRange(
983 uint32_t options,
984 mach_vm_size_t offset,
985 mach_vm_size_t length);
986
987 OSMetaClassDeclareReservedUnused(IOMemoryMap, 0);
988 OSMetaClassDeclareReservedUnused(IOMemoryMap, 1);
989 OSMetaClassDeclareReservedUnused(IOMemoryMap, 2);
990 OSMetaClassDeclareReservedUnused(IOMemoryMap, 3);
991 OSMetaClassDeclareReservedUnused(IOMemoryMap, 4);
992 OSMetaClassDeclareReservedUnused(IOMemoryMap, 5);
993 OSMetaClassDeclareReservedUnused(IOMemoryMap, 6);
994 OSMetaClassDeclareReservedUnused(IOMemoryMap, 7);
995 };
996
997 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
998 #ifdef XNU_KERNEL_PRIVATE
999 // Also these flags should not overlap with the options to
1000 // IOMemoryDescriptor::initWithRanges(... IOOptionsBits options);
1001 enum {
1002 _kIOMemorySourceSegment = 0x00002000
1003 };
1004 #endif /* XNU_KERNEL_PRIVATE */
1005
1006 // The following classes are private implementation of IOMemoryDescriptor - they
1007 // should not be referenced directly, just through the public API's in the
1008 // IOMemoryDescriptor class. For example, an IOGeneralMemoryDescriptor instance
1009 // might be created by IOMemoryDescriptor::withAddressRange(), but there should be
1010 // no need to reference as anything but a generic IOMemoryDescriptor *.
1011
1012 class IOGeneralMemoryDescriptor : public IOMemoryDescriptor
1013 {
1014 OSDeclareDefaultStructors(IOGeneralMemoryDescriptor);
1015
1016 public:
1017 union Ranges {
1018 IOVirtualRange *v;
1019 IOAddressRange *v64;
1020 IOPhysicalRange *p;
1021 void *uio;
1022 };
1023 protected:
1024 Ranges _ranges;
1025 unsigned _rangesCount; /* number of address ranges in list */
1026 #ifndef __LP64__
1027 bool _rangesIsAllocated;/* is list allocated by us? */
1028 #endif /* !__LP64__ */
1029
1030 task_t _task; /* task where all ranges are mapped to */
1031
1032 union {
1033 IOVirtualRange v;
1034 IOPhysicalRange p;
1035 } _singleRange; /* storage space for a single range */
1036
1037 unsigned _wireCount; /* number of outstanding wires */
1038
1039 #ifndef __LP64__
1040 uintptr_t _cachedVirtualAddress;
1041
1042 IOPhysicalAddress _cachedPhysicalAddress;
1043 #endif /* !__LP64__ */
1044
1045 bool _initialized; /* has superclass been initialized? */
1046
1047 public:
1048 virtual void free() APPLE_KEXT_OVERRIDE;
1049
1050 virtual IOReturn dmaCommandOperation(DMACommandOps op, void *vData, UInt dataSize) const APPLE_KEXT_OVERRIDE;
1051
1052 virtual uint64_t getPreparationID( void ) APPLE_KEXT_OVERRIDE;
1053
1054 #ifdef XNU_KERNEL_PRIVATE
1055 // Internal APIs may be made virtual at some time in the future.
1056 IOReturn wireVirtual(IODirection forDirection);
1057 IOReturn dmaMap(
1058 IOMapper * mapper,
1059 IODMACommand * command,
1060 const IODMAMapSpecification * mapSpec,
1061 uint64_t offset,
1062 uint64_t length,
1063 uint64_t * mapAddress,
1064 uint64_t * mapLength);
1065 bool initMemoryEntries(size_t size, IOMapper * mapper);
1066
1067 IOMemoryReference * memoryReferenceAlloc(uint32_t capacity,
1068 IOMemoryReference * realloc);
1069 void memoryReferenceFree(IOMemoryReference * ref);
1070 void memoryReferenceRelease(IOMemoryReference * ref);
1071
1072 IOReturn memoryReferenceCreate(
1073 IOOptionBits options,
1074 IOMemoryReference ** reference);
1075
1076 IOReturn memoryReferenceMap(IOMemoryReference * ref,
1077 vm_map_t map,
1078 mach_vm_size_t inoffset,
1079 mach_vm_size_t size,
1080 IOOptionBits options,
1081 mach_vm_address_t * inaddr);
1082
1083 static IOReturn memoryReferenceSetPurgeable(
1084 IOMemoryReference * ref,
1085 IOOptionBits newState,
1086 IOOptionBits * oldState);
1087 static IOReturn memoryReferenceSetOwnership(
1088 IOMemoryReference * ref,
1089 task_t newOwner,
1090 int newLedgerTag,
1091 IOOptionBits newLedgerOptions);
1092 static IOReturn memoryReferenceGetPageCounts(
1093 IOMemoryReference * ref,
1094 IOByteCount * residentPageCount,
1095 IOByteCount * dirtyPageCount);
1096 #endif
1097
1098 private:
1099
1100 #ifndef __LP64__
1101 virtual void setPosition(IOByteCount position);
1102 virtual void mapIntoKernel(unsigned rangeIndex);
1103 virtual void unmapFromKernel();
1104 #endif /* !__LP64__ */
1105
1106 // Internal
1107 OSData * _memoryEntries;
1108 unsigned int _pages;
1109 ppnum_t _highestPage;
1110 uint32_t __iomd_reservedA;
1111 uint32_t __iomd_reservedB;
1112
1113 IOLock * _prepareLock;
1114
1115 public:
1116 /*
1117 * IOMemoryDescriptor required methods
1118 */
1119
1120 // Master initaliser
1121 virtual bool initWithOptions(void * buffers,
1122 UInt32 count,
1123 UInt32 offset,
1124 task_t task,
1125 IOOptionBits options,
1126 IOMapper * mapper = kIOMapperSystem) APPLE_KEXT_OVERRIDE;
1127
1128 #ifndef __LP64__
1129 // Secondary initialisers
1130 virtual bool initWithAddress(void * address,
1131 IOByteCount withLength,
1132 IODirection withDirection) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1133
1134 virtual bool initWithAddress(IOVirtualAddress address,
1135 IOByteCount withLength,
1136 IODirection withDirection,
1137 task_t withTask) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1138
1139 virtual bool initWithPhysicalAddress(
1140 IOPhysicalAddress address,
1141 IOByteCount withLength,
1142 IODirection withDirection ) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1143
1144 virtual bool initWithRanges( IOVirtualRange * ranges,
1145 UInt32 withCount,
1146 IODirection withDirection,
1147 task_t withTask,
1148 bool asReference = false) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1149
1150 virtual bool initWithPhysicalRanges(IOPhysicalRange * ranges,
1151 UInt32 withCount,
1152 IODirection withDirection,
1153 bool asReference = false) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1154
1155 virtual addr64_t getPhysicalSegment64( IOByteCount offset,
1156 IOByteCount * length ) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1157
1158 virtual IOPhysicalAddress getPhysicalSegment(IOByteCount offset,
1159 IOByteCount * length) APPLE_KEXT_OVERRIDE;
1160
1161 virtual IOPhysicalAddress getSourceSegment(IOByteCount offset,
1162 IOByteCount * length) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1163
1164 virtual void * getVirtualSegment(IOByteCount offset,
1165 IOByteCount * length) APPLE_KEXT_OVERRIDE APPLE_KEXT_DEPRECATED;
1166 #endif /* !__LP64__ */
1167
1168 virtual IOReturn setPurgeable( IOOptionBits newState,
1169 IOOptionBits * oldState ) APPLE_KEXT_OVERRIDE;
1170
1171 IOReturn setOwnership( task_t newOwner,
1172 int newLedgerTag,
1173 IOOptionBits newLedgerOptions );
1174
1175 virtual addr64_t getPhysicalSegment( IOByteCount offset,
1176 IOByteCount * length,
1177 #ifdef __LP64__
1178 IOOptionBits options = 0 ) APPLE_KEXT_OVERRIDE;
1179 #else /* !__LP64__ */
1180 IOOptionBits options)APPLE_KEXT_OVERRIDE;
1181 #endif /* !__LP64__ */
1182
1183 virtual IOReturn prepare(IODirection forDirection = kIODirectionNone) APPLE_KEXT_OVERRIDE;
1184
1185 virtual IOReturn complete(IODirection forDirection = kIODirectionNone) APPLE_KEXT_OVERRIDE;
1186
1187 virtual IOReturn doMap(
1188 vm_map_t addressMap,
1189 IOVirtualAddress * atAddress,
1190 IOOptionBits options,
1191 IOByteCount sourceOffset = 0,
1192 IOByteCount length = 0 ) APPLE_KEXT_OVERRIDE;
1193
1194 virtual IOReturn doUnmap(
1195 vm_map_t addressMap,
1196 IOVirtualAddress logical,
1197 IOByteCount length ) APPLE_KEXT_OVERRIDE;
1198
1199 virtual bool serialize(OSSerialize *s) const APPLE_KEXT_OVERRIDE;
1200
1201 // Factory method for cloning a persistent IOMD, see IOMemoryDescriptor
1202 static IOMemoryDescriptor *
1203 withPersistentMemoryDescriptor(IOGeneralMemoryDescriptor *originalMD);
1204 };
1205
1206 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1207
1208 #ifdef __LP64__
1209 mach_vm_address_t
1210 IOMemoryMap::getAddress()
1211 {
1212 return getVirtualAddress();
1213 }
1214
1215 mach_vm_size_t
1216 IOMemoryMap::getSize()
1217 {
1218 return getLength();
1219 }
1220 #else /* !__LP64__ */
1221 #include <IOKit/IOSubMemoryDescriptor.h>
1222 #endif /* !__LP64__ */
1223
1224 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
1225
1226 extern boolean_t iokit_iomd_setownership_enabled;
1227
1228 #endif /* !_IOMEMORYDESCRIPTOR_H */