]> git.saurik.com Git - apple/xnu.git/blame - iokit/Kernel/IOMultiMemoryDescriptor.cpp
xnu-3248.50.21.tar.gz
[apple/xnu.git] / iokit / Kernel / IOMultiMemoryDescriptor.cpp
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28
29#include <IOKit/IOLib.h>
30#include <IOKit/IOMultiMemoryDescriptor.h>
31
32#define super IOMemoryDescriptor
33OSDefineMetaClassAndStructors(IOMultiMemoryDescriptor, IOMemoryDescriptor)
34
1c79356b
A
35IOMultiMemoryDescriptor * IOMultiMemoryDescriptor::withDescriptors(
36 IOMemoryDescriptor ** descriptors,
37 UInt32 withCount,
38 IODirection withDirection,
55e303ae 39 bool asReference )
1c79356b
A
40{
41 //
42 // Create a new IOMultiMemoryDescriptor. The "buffer" is made up of several
43 // memory descriptors, that are to be chained end-to-end to make up a single
44 // memory descriptor.
45 //
46 // Passing the ranges as a reference will avoid an extra allocation.
47 //
48
49 IOMultiMemoryDescriptor * me = new IOMultiMemoryDescriptor;
50
51 if ( me && me->initWithDescriptors(
52 /* descriptors */ descriptors,
53 /* withCount */ withCount,
54 /* withDirection */ withDirection,
55 /* asReference */ asReference ) == false )
56 {
57 me->release();
58 me = 0;
59 }
60
61 return me;
62}
63
1c79356b
A
64bool IOMultiMemoryDescriptor::initWithDescriptors(
65 IOMemoryDescriptor ** descriptors,
66 UInt32 withCount,
67 IODirection withDirection,
55e303ae 68 bool asReference )
1c79356b 69{
3e170ce0
A
70 unsigned index;
71 IOOptionBits copyFlags;
1c79356b
A
72 //
73 // Initialize an IOMultiMemoryDescriptor. The "buffer" is made up of several
74 // memory descriptors, that are to be chained end-to-end to make up a single
75 // memory descriptor.
76 //
77 // Passing the ranges as a reference will avoid an extra allocation.
78 //
79
80 assert(descriptors);
1c79356b 81
55e303ae
A
82 // Release existing descriptors, if any
83 if ( _descriptors )
84 {
85 for ( unsigned index = 0; index < _descriptorsCount; index++ )
86 _descriptors[index]->release();
87
88 if ( _descriptorsIsAllocated )
89 IODelete(_descriptors, IOMemoryDescriptor *, _descriptorsCount);
90 } else {
91 // Ask our superclass' opinion.
92 if ( super::init() == false ) return false;
93 }
94
1c79356b
A
95 // Initialize our minimal state.
96
97 _descriptors = 0;
98 _descriptorsCount = withCount;
99 _descriptorsIsAllocated = asReference ? false : true;
b0d623f7
A
100 _flags = withDirection;
101#ifndef __LP64__
102 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
103#endif /* !__LP64__ */
1c79356b
A
104 _length = 0;
105 _mappings = 0;
106 _tag = 0;
107
108 if ( asReference )
109 {
110 _descriptors = descriptors;
111 }
112 else
113 {
114 _descriptors = IONew(IOMemoryDescriptor *, withCount);
115 if ( _descriptors == 0 ) return false;
116
117 bcopy( /* from */ descriptors,
118 /* to */ _descriptors,
119 /* bytes */ withCount * sizeof(IOMemoryDescriptor *) );
120 }
121
3e170ce0 122 for ( index = 0; index < withCount; index++ )
1c79356b
A
123 {
124 descriptors[index]->retain();
125 _length += descriptors[index]->getLength();
126 if ( _tag == 0 ) _tag = descriptors[index]->getTag();
39236c6e
A
127 assert(descriptors[index]->getDirection() ==
128 (withDirection & kIOMemoryDirectionMask));
1c79356b
A
129 }
130
3e170ce0
A
131 enum { kCopyFlags = kIOMemoryBufferPageable };
132 copyFlags = 0;
133 for ( index = 0; index < withCount; index++ )
134 {
135 if (!index) copyFlags = (kCopyFlags & descriptors[index]->_flags);
136 else if (copyFlags != (kCopyFlags & descriptors[index]->_flags)) break;
137 }
138 if (index < withCount) return (false);
139 _flags |= copyFlags;
140
1c79356b
A
141 return true;
142}
143
1c79356b
A
144void IOMultiMemoryDescriptor::free()
145{
146 //
147 // Free all of this object's outstanding resources.
148 //
149
150 if ( _descriptors )
151 {
152 for ( unsigned index = 0; index < _descriptorsCount; index++ )
153 _descriptors[index]->release();
154
155 if ( _descriptorsIsAllocated )
156 IODelete(_descriptors, IOMemoryDescriptor *, _descriptorsCount);
157 }
158
159 super::free();
160}
161
1c79356b
A
162IOReturn IOMultiMemoryDescriptor::prepare(IODirection forDirection)
163{
164 //
165 // Prepare the memory for an I/O transfer.
166 //
167 // This involves paging in the memory and wiring it down for the duration
168 // of the transfer. The complete() method finishes the processing of the
169 // memory after the I/O transfer finishes.
170 //
171
172 unsigned index;
173 IOReturn status = kIOReturnInternalError;
174 IOReturn statusUndo;
175
176 if ( forDirection == kIODirectionNone )
177 {
b0d623f7 178 forDirection = getDirection();
1c79356b
A
179 }
180
181 for ( index = 0; index < _descriptorsCount; index++ )
182 {
183 status = _descriptors[index]->prepare(forDirection);
184 if ( status != kIOReturnSuccess ) break;
185 }
186
187 if ( status != kIOReturnSuccess )
188 {
3e170ce0 189 for ( unsigned indexUndo = 0; indexUndo < index; indexUndo++ )
1c79356b 190 {
3e170ce0 191 statusUndo = _descriptors[indexUndo]->complete(forDirection);
1c79356b
A
192 assert(statusUndo == kIOReturnSuccess);
193 }
194 }
195
196 return status;
197}
198
1c79356b
A
199IOReturn IOMultiMemoryDescriptor::complete(IODirection forDirection)
200{
201 //
202 // Complete processing of the memory after an I/O transfer finishes.
203 //
204 // This method shouldn't be called unless a prepare() was previously issued;
205 // the prepare() and complete() must occur in pairs, before and after an I/O
206 // transfer.
207 //
208
209 IOReturn status;
210 IOReturn statusFinal = kIOReturnSuccess;
211
212 if ( forDirection == kIODirectionNone )
213 {
b0d623f7 214 forDirection = getDirection();
1c79356b
A
215 }
216
217 for ( unsigned index = 0; index < _descriptorsCount; index++ )
218 {
219 status = _descriptors[index]->complete(forDirection);
220 if ( status != kIOReturnSuccess ) statusFinal = status;
221 assert(status == kIOReturnSuccess);
222 }
223
224 return statusFinal;
225}
226
3e170ce0
A
227addr64_t IOMultiMemoryDescriptor::getPhysicalSegment(IOByteCount offset,
228 IOByteCount * length,
229 IOOptionBits options)
0b4e3aa0
A
230{
231 //
232 // This method returns the physical address of the byte at the given offset
233 // into the memory, and optionally the length of the physically contiguous
234 // segment from that offset.
235 //
236
237 assert(offset <= _length);
238
239 for ( unsigned index = 0; index < _descriptorsCount; index++ )
240 {
241 if ( offset < _descriptors[index]->getLength() )
242 {
b0d623f7 243 return _descriptors[index]->getPhysicalSegment(offset, length, options);
0b4e3aa0
A
244 }
245 offset -= _descriptors[index]->getLength();
246 }
247
248 if ( length ) *length = 0;
249
250 return 0;
251}
3e170ce0
A
252
253#include "IOKitKernelInternal.h"
254
255IOReturn IOMultiMemoryDescriptor::doMap(vm_map_t __addressMap,
256 IOVirtualAddress * __address,
257 IOOptionBits options,
258 IOByteCount __offset,
259 IOByteCount __length)
260{
261 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
262 vm_map_t map = mapping->fAddressMap;
263 mach_vm_size_t offset = mapping->fOffset;
264 mach_vm_size_t length = mapping->fLength;
265 mach_vm_address_t address = mapping->fAddress;
266
267 kern_return_t err;
268 IOOptionBits subOptions;
269 mach_vm_size_t mapOffset;
270 mach_vm_size_t bytesRemaining, chunk;
271 mach_vm_address_t nextAddress;
272 IOMemoryDescriptorMapAllocRef ref;
273 vm_prot_t prot;
274
275 do
276 {
277 prot = VM_PROT_READ;
278 if (!(kIOMapReadOnly & options)) prot |= VM_PROT_WRITE;
279 ref.map = map;
280 ref.tag = IOMemoryTag(map);
281 ref.options = options;
282 ref.size = length;
283 ref.prot = prot;
284 if (options & kIOMapAnywhere)
285 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
286 ref.mapped = 0;
287 else
288 ref.mapped = mapping->fAddress;
289
290 if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags))
291 err = IOIteratePageableMaps(ref.size, &IOMemoryDescriptorMapAlloc, &ref);
292 else
293 err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
294
295 if (KERN_SUCCESS != err) break;
296
297 address = ref.mapped;
298 mapping->fAddress = address;
299
300 mapOffset = offset;
301 bytesRemaining = length;
302 nextAddress = address;
303 assert(mapOffset <= _length);
304 subOptions = (options & ~kIOMapAnywhere) | kIOMapOverwrite;
305
306 for (unsigned index = 0; bytesRemaining && (index < _descriptorsCount); index++)
307 {
308 chunk = _descriptors[index]->getLength();
309 if (mapOffset >= chunk)
310 {
311 mapOffset -= chunk;
312 continue;
313 }
314 chunk -= mapOffset;
315 if (chunk > bytesRemaining) chunk = bytesRemaining;
316 IOMemoryMap * subMap;
317 subMap = _descriptors[index]->createMappingInTask(mapping->fAddressTask, nextAddress, subOptions, mapOffset, chunk );
318 if (!subMap) break;
319 subMap->release(); // kIOMapOverwrite means it will not deallocate
320
321 bytesRemaining -= chunk;
322 nextAddress += chunk;
323 mapOffset = 0;
324 }
325 if (bytesRemaining) err = kIOReturnUnderrun;
326 }
327 while (false);
328
329 if (kIOReturnSuccess == err)
330 {
331#if IOTRACKING
332 IOTrackingAdd(gIOMapTracking, &mapping->fTracking, length, false);
333#endif
334 }
335 else
336 {
337 mapping->release();
338 mapping = 0;
339 }
340
341 return (err);
342}
343
344IOReturn IOMultiMemoryDescriptor::setPurgeable( IOOptionBits newState,
345 IOOptionBits * oldState )
346{
347 IOReturn err;
348 IOOptionBits totalState, state;
349
350 totalState = kIOMemoryPurgeableNonVolatile;
351 for (unsigned index = 0; index < _descriptorsCount; index++)
352 {
353 err = _descriptors[index]->setPurgeable(newState, &state);
354 if (kIOReturnSuccess != err) break;
355
356 if (kIOMemoryPurgeableEmpty == state) totalState = kIOMemoryPurgeableEmpty;
357 else if (kIOMemoryPurgeableEmpty == totalState) continue;
358 else if (kIOMemoryPurgeableVolatile == totalState) continue;
359 else if (kIOMemoryPurgeableVolatile == state) totalState = kIOMemoryPurgeableVolatile;
360 else totalState = kIOMemoryPurgeableNonVolatile;
361 }
362 if (oldState) *oldState = totalState;
363
364 return (err);
365}
366
367IOReturn IOMultiMemoryDescriptor::getPageCounts(IOByteCount * pResidentPageCount,
368 IOByteCount * pDirtyPageCount)
369{
370 IOReturn err;
371 IOByteCount totalResidentPageCount, totalDirtyPageCount;
372 IOByteCount residentPageCount, dirtyPageCount;
373
374 err = kIOReturnSuccess;
375 totalResidentPageCount = totalDirtyPageCount = 0;
376 for (unsigned index = 0; index < _descriptorsCount; index++)
377 {
378 err = _descriptors[index]->getPageCounts(&residentPageCount, &dirtyPageCount);
379 if (kIOReturnSuccess != err) break;
380 totalResidentPageCount += residentPageCount;
381 totalDirtyPageCount += dirtyPageCount;
382 }
383
384 if (pResidentPageCount) *pResidentPageCount = totalResidentPageCount;
385 if (pDirtyPageCount) *pDirtyPageCount = totalDirtyPageCount;
386
387 return (err);
388}