2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <IOKit/IOLib.h>
30 #include <IOKit/IOMultiMemoryDescriptor.h>
32 #define super IOMemoryDescriptor
33 OSDefineMetaClassAndStructors(IOMultiMemoryDescriptor
, IOMemoryDescriptor
)
35 IOMultiMemoryDescriptor
* IOMultiMemoryDescriptor::withDescriptors(
36 IOMemoryDescriptor
** descriptors
,
38 IODirection withDirection
,
42 // Create a new IOMultiMemoryDescriptor. The "buffer" is made up of several
43 // memory descriptors, that are to be chained end-to-end to make up a single
46 // Passing the ranges as a reference will avoid an extra allocation.
49 IOMultiMemoryDescriptor
* me
= new IOMultiMemoryDescriptor
;
51 if ( me
&& me
->initWithDescriptors(
52 /* descriptors */ descriptors
,
53 /* withCount */ withCount
,
54 /* withDirection */ withDirection
,
55 /* asReference */ asReference
) == false )
64 bool IOMultiMemoryDescriptor::initWithDescriptors(
65 IOMemoryDescriptor
** descriptors
,
67 IODirection withDirection
,
71 IOOptionBits copyFlags
;
73 // Initialize an IOMultiMemoryDescriptor. The "buffer" is made up of several
74 // memory descriptors, that are to be chained end-to-end to make up a single
77 // Passing the ranges as a reference will avoid an extra allocation.
82 // Release existing descriptors, if any
85 for ( unsigned index
= 0; index
< _descriptorsCount
; index
++ )
86 _descriptors
[index
]->release();
88 if ( _descriptorsIsAllocated
)
89 IODelete(_descriptors
, IOMemoryDescriptor
*, _descriptorsCount
);
91 // Ask our superclass' opinion.
92 if ( super::init() == false ) return false;
95 // Initialize our minimal state.
98 _descriptorsCount
= withCount
;
99 _descriptorsIsAllocated
= asReference
? false : true;
100 _flags
= withDirection
;
102 _direction
= (IODirection
) (_flags
& kIOMemoryDirectionMask
);
103 #endif /* !__LP64__ */
110 _descriptors
= descriptors
;
114 _descriptors
= IONew(IOMemoryDescriptor
*, withCount
);
115 if ( _descriptors
== 0 ) return false;
117 bcopy( /* from */ descriptors
,
118 /* to */ _descriptors
,
119 /* bytes */ withCount
* sizeof(IOMemoryDescriptor
*) );
122 for ( index
= 0; index
< withCount
; index
++ )
124 descriptors
[index
]->retain();
125 _length
+= descriptors
[index
]->getLength();
126 if ( _tag
== 0 ) _tag
= descriptors
[index
]->getTag();
127 assert(descriptors
[index
]->getDirection() ==
128 (withDirection
& kIOMemoryDirectionMask
));
131 enum { kCopyFlags
= kIOMemoryBufferPageable
};
133 for ( index
= 0; index
< withCount
; index
++ )
135 if (!index
) copyFlags
= (kCopyFlags
& descriptors
[index
]->_flags
);
136 else if (copyFlags
!= (kCopyFlags
& descriptors
[index
]->_flags
)) break;
138 if (index
< withCount
) return (false);
144 void IOMultiMemoryDescriptor::free()
147 // Free all of this object's outstanding resources.
152 for ( unsigned index
= 0; index
< _descriptorsCount
; index
++ )
153 _descriptors
[index
]->release();
155 if ( _descriptorsIsAllocated
)
156 IODelete(_descriptors
, IOMemoryDescriptor
*, _descriptorsCount
);
162 IOReturn
IOMultiMemoryDescriptor::prepare(IODirection forDirection
)
165 // Prepare the memory for an I/O transfer.
167 // This involves paging in the memory and wiring it down for the duration
168 // of the transfer. The complete() method finishes the processing of the
169 // memory after the I/O transfer finishes.
173 IOReturn status
= kIOReturnInternalError
;
176 if ( forDirection
== kIODirectionNone
)
178 forDirection
= getDirection();
181 for ( index
= 0; index
< _descriptorsCount
; index
++ )
183 status
= _descriptors
[index
]->prepare(forDirection
);
184 if ( status
!= kIOReturnSuccess
) break;
187 if ( status
!= kIOReturnSuccess
)
189 for ( unsigned indexUndo
= 0; indexUndo
< index
; indexUndo
++ )
191 statusUndo
= _descriptors
[indexUndo
]->complete(forDirection
);
192 assert(statusUndo
== kIOReturnSuccess
);
199 IOReturn
IOMultiMemoryDescriptor::complete(IODirection forDirection
)
202 // Complete processing of the memory after an I/O transfer finishes.
204 // This method shouldn't be called unless a prepare() was previously issued;
205 // the prepare() and complete() must occur in pairs, before and after an I/O
210 IOReturn statusFinal
= kIOReturnSuccess
;
212 if ( forDirection
== kIODirectionNone
)
214 forDirection
= getDirection();
217 for ( unsigned index
= 0; index
< _descriptorsCount
; index
++ )
219 status
= _descriptors
[index
]->complete(forDirection
);
220 if ( status
!= kIOReturnSuccess
) statusFinal
= status
;
221 assert(status
== kIOReturnSuccess
);
227 addr64_t
IOMultiMemoryDescriptor::getPhysicalSegment(IOByteCount offset
,
228 IOByteCount
* length
,
229 IOOptionBits options
)
232 // This method returns the physical address of the byte at the given offset
233 // into the memory, and optionally the length of the physically contiguous
234 // segment from that offset.
237 assert(offset
<= _length
);
239 for ( unsigned index
= 0; index
< _descriptorsCount
; index
++ )
241 if ( offset
< _descriptors
[index
]->getLength() )
243 return _descriptors
[index
]->getPhysicalSegment(offset
, length
, options
);
245 offset
-= _descriptors
[index
]->getLength();
248 if ( length
) *length
= 0;
253 #include "IOKitKernelInternal.h"
255 IOReturn
IOMultiMemoryDescriptor::doMap(vm_map_t __addressMap
,
256 IOVirtualAddress
* __address
,
257 IOOptionBits options
,
258 IOByteCount __offset
,
259 IOByteCount __length
)
261 IOMemoryMap
* mapping
= (IOMemoryMap
*) *__address
;
262 vm_map_t map
= mapping
->fAddressMap
;
263 mach_vm_size_t offset
= mapping
->fOffset
;
264 mach_vm_size_t length
= mapping
->fLength
;
265 mach_vm_address_t address
= mapping
->fAddress
;
268 IOOptionBits subOptions
;
269 mach_vm_size_t mapOffset
;
270 mach_vm_size_t bytesRemaining
, chunk
;
271 mach_vm_address_t nextAddress
;
272 IOMemoryDescriptorMapAllocRef ref
;
278 if (!(kIOMapReadOnly
& options
)) prot
|= VM_PROT_WRITE
;
280 if (kIOMapOverwrite
& options
)
282 if ((map
== kernel_map
) && (kIOMemoryBufferPageable
& _flags
))
284 map
= IOPageableMapForAddress(address
);
291 ref
.tag
= IOMemoryTag(map
);
292 ref
.options
= options
;
295 if (options
& kIOMapAnywhere
)
296 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
299 ref
.mapped
= mapping
->fAddress
;
301 if ((ref
.map
== kernel_map
) && (kIOMemoryBufferPageable
& _flags
))
302 err
= IOIteratePageableMaps(ref
.size
, &IOMemoryDescriptorMapAlloc
, &ref
);
304 err
= IOMemoryDescriptorMapAlloc(ref
.map
, &ref
);
306 if (KERN_SUCCESS
!= err
) break;
308 address
= ref
.mapped
;
309 mapping
->fAddress
= address
;
313 bytesRemaining
= length
;
314 nextAddress
= address
;
315 assert(mapOffset
<= _length
);
316 subOptions
= (options
& ~kIOMapAnywhere
) | kIOMapOverwrite
;
318 for (unsigned index
= 0; bytesRemaining
&& (index
< _descriptorsCount
); index
++)
320 chunk
= _descriptors
[index
]->getLength();
321 if (mapOffset
>= chunk
)
327 if (chunk
> bytesRemaining
) chunk
= bytesRemaining
;
328 IOMemoryMap
* subMap
;
329 subMap
= _descriptors
[index
]->createMappingInTask(mapping
->fAddressTask
, nextAddress
, subOptions
, mapOffset
, chunk
);
331 subMap
->release(); // kIOMapOverwrite means it will not deallocate
333 bytesRemaining
-= chunk
;
334 nextAddress
+= chunk
;
337 if (bytesRemaining
) err
= kIOReturnUnderrun
;
341 if (kIOReturnSuccess
== err
)
344 IOTrackingAddUser(gIOMapTracking
, &mapping
->fTracking
, mapping
->fLength
);
351 IOReturn
IOMultiMemoryDescriptor::setPurgeable( IOOptionBits newState
,
352 IOOptionBits
* oldState
)
355 IOOptionBits totalState
, state
;
357 totalState
= kIOMemoryPurgeableNonVolatile
;
358 err
= kIOReturnSuccess
;
359 for (unsigned index
= 0; index
< _descriptorsCount
; index
++)
361 err
= _descriptors
[index
]->setPurgeable(newState
, &state
);
362 if (kIOReturnSuccess
!= err
) break;
364 if (kIOMemoryPurgeableEmpty
== state
) totalState
= kIOMemoryPurgeableEmpty
;
365 else if (kIOMemoryPurgeableEmpty
== totalState
) continue;
366 else if (kIOMemoryPurgeableVolatile
== totalState
) continue;
367 else if (kIOMemoryPurgeableVolatile
== state
) totalState
= kIOMemoryPurgeableVolatile
;
368 else totalState
= kIOMemoryPurgeableNonVolatile
;
370 if (oldState
) *oldState
= totalState
;
375 IOReturn
IOMultiMemoryDescriptor::getPageCounts(IOByteCount
* pResidentPageCount
,
376 IOByteCount
* pDirtyPageCount
)
379 IOByteCount totalResidentPageCount
, totalDirtyPageCount
;
380 IOByteCount residentPageCount
, dirtyPageCount
;
382 err
= kIOReturnSuccess
;
383 totalResidentPageCount
= totalDirtyPageCount
= 0;
384 for (unsigned index
= 0; index
< _descriptorsCount
; index
++)
386 err
= _descriptors
[index
]->getPageCounts(&residentPageCount
, &dirtyPageCount
);
387 if (kIOReturnSuccess
!= err
) break;
388 totalResidentPageCount
+= residentPageCount
;
389 totalDirtyPageCount
+= dirtyPageCount
;
392 if (pResidentPageCount
) *pResidentPageCount
= totalResidentPageCount
;
393 if (pDirtyPageCount
) *pDirtyPageCount
= totalDirtyPageCount
;