]> git.saurik.com Git - apple/xnu.git/blame - iokit/Kernel/IOMultiMemoryDescriptor.cpp
xnu-6153.11.26.tar.gz
[apple/xnu.git] / iokit / Kernel / IOMultiMemoryDescriptor.cpp
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 1998-2000 Apple Computer, Inc. All rights reserved.
3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
2d21ac55
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28
29#include <IOKit/IOLib.h>
30#include <IOKit/IOMultiMemoryDescriptor.h>
31
32#define super IOMemoryDescriptor
33OSDefineMetaClassAndStructors(IOMultiMemoryDescriptor, IOMemoryDescriptor)
34
1c79356b 35IOMultiMemoryDescriptor * IOMultiMemoryDescriptor::withDescriptors(
0a7de745
A
36 IOMemoryDescriptor * *descriptors,
37 UInt32 withCount,
38 IODirection withDirection,
39 bool asReference )
1c79356b 40{
0a7de745
A
41 //
42 // Create a new IOMultiMemoryDescriptor. The "buffer" is made up of several
43 // memory descriptors, that are to be chained end-to-end to make up a single
44 // memory descriptor.
45 //
46 // Passing the ranges as a reference will avoid an extra allocation.
47 //
48
49 IOMultiMemoryDescriptor * me = new IOMultiMemoryDescriptor;
50
51 if (me && me->initWithDescriptors(
52 /* descriptors */ descriptors,
53 /* withCount */ withCount,
54 /* withDirection */ withDirection,
55 /* asReference */ asReference ) == false) {
56 me->release();
cb323159 57 me = NULL;
0a7de745
A
58 }
59
60 return me;
1c79356b
A
61}
62
0a7de745
A
63bool
64IOMultiMemoryDescriptor::initWithDescriptors(
65 IOMemoryDescriptor ** descriptors,
66 UInt32 withCount,
67 IODirection withDirection,
68 bool asReference )
1c79356b 69{
0a7de745
A
70 unsigned index;
71 IOOptionBits copyFlags;
72 //
73 // Initialize an IOMultiMemoryDescriptor. The "buffer" is made up of several
74 // memory descriptors, that are to be chained end-to-end to make up a single
75 // memory descriptor.
76 //
77 // Passing the ranges as a reference will avoid an extra allocation.
78 //
79
80 assert(descriptors);
81
82 // Release existing descriptors, if any
83 if (_descriptors) {
84 for (unsigned index = 0; index < _descriptorsCount; index++) {
85 _descriptors[index]->release();
86 }
87
88 if (_descriptorsIsAllocated) {
89 IODelete(_descriptors, IOMemoryDescriptor *, _descriptorsCount);
90 }
91 } else {
92 // Ask our superclass' opinion.
93 if (super::init() == false) {
94 return false;
95 }
96 }
97
98 // Initialize our minimal state.
99
cb323159 100 _descriptors = NULL;
0a7de745
A
101 _descriptorsCount = withCount;
102 _descriptorsIsAllocated = asReference ? false : true;
103 _flags = withDirection;
b0d623f7 104#ifndef __LP64__
0a7de745 105 _direction = (IODirection) (_flags & kIOMemoryDirectionMask);
b0d623f7 106#endif /* !__LP64__ */
0a7de745 107 _length = 0;
cb323159 108 _mappings = NULL;
0a7de745
A
109 _tag = 0;
110
111 if (asReference) {
112 _descriptors = descriptors;
113 } else {
114 _descriptors = IONew(IOMemoryDescriptor *, withCount);
cb323159 115 if (_descriptors == NULL) {
0a7de745
A
116 return false;
117 }
118
119 bcopy( /* from */ descriptors,
120 /* to */ _descriptors,
121 /* bytes */ withCount * sizeof(IOMemoryDescriptor *));
122 }
1c79356b 123
0a7de745
A
124 for (index = 0; index < withCount; index++) {
125 descriptors[index]->retain();
126 _length += descriptors[index]->getLength();
127 if (_tag == 0) {
128 _tag = descriptors[index]->getTag();
129 }
130 assert(descriptors[index]->getDirection() ==
131 (withDirection & kIOMemoryDirectionMask));
132 }
1c79356b 133
0a7de745
A
134 enum { kCopyFlags = kIOMemoryBufferPageable };
135 copyFlags = 0;
136 for (index = 0; index < withCount; index++) {
137 if (!index) {
138 copyFlags = (kCopyFlags & descriptors[index]->_flags);
139 } else if (copyFlags != (kCopyFlags & descriptors[index]->_flags)) {
140 break;
141 }
142 }
143 if (index < withCount) {
144 return false;
145 }
146 _flags |= copyFlags;
1c79356b 147
0a7de745 148 return true;
1c79356b
A
149}
150
0a7de745
A
151void
152IOMultiMemoryDescriptor::free()
1c79356b 153{
0a7de745
A
154 //
155 // Free all of this object's outstanding resources.
156 //
157
158 if (_descriptors) {
159 for (unsigned index = 0; index < _descriptorsCount; index++) {
160 _descriptors[index]->release();
161 }
162
163 if (_descriptorsIsAllocated) {
164 IODelete(_descriptors, IOMemoryDescriptor *, _descriptorsCount);
165 }
166 }
167
168 super::free();
1c79356b
A
169}
170
0a7de745
A
171IOReturn
172IOMultiMemoryDescriptor::prepare(IODirection forDirection)
1c79356b 173{
0a7de745
A
174 //
175 // Prepare the memory for an I/O transfer.
176 //
177 // This involves paging in the memory and wiring it down for the duration
178 // of the transfer. The complete() method finishes the processing of the
179 // memory after the I/O transfer finishes.
180 //
181
182 unsigned index;
183 IOReturn status = kIOReturnInternalError;
184 IOReturn statusUndo;
185
186 if (forDirection == kIODirectionNone) {
187 forDirection = getDirection();
188 }
189
190 for (index = 0; index < _descriptorsCount; index++) {
191 status = _descriptors[index]->prepare(forDirection);
192 if (status != kIOReturnSuccess) {
193 break;
194 }
195 }
196
197 if (status != kIOReturnSuccess) {
198 for (unsigned indexUndo = 0; indexUndo < index; indexUndo++) {
199 statusUndo = _descriptors[indexUndo]->complete(forDirection);
200 assert(statusUndo == kIOReturnSuccess);
201 }
202 }
203
204 return status;
1c79356b
A
205}
206
0a7de745
A
207IOReturn
208IOMultiMemoryDescriptor::complete(IODirection forDirection)
0b4e3aa0 209{
0a7de745
A
210 //
211 // Complete processing of the memory after an I/O transfer finishes.
212 //
213 // This method shouldn't be called unless a prepare() was previously issued;
214 // the prepare() and complete() must occur in pairs, before and after an I/O
215 // transfer.
216 //
217
218 IOReturn status;
219 IOReturn statusFinal = kIOReturnSuccess;
220
221 if (forDirection == kIODirectionNone) {
222 forDirection = getDirection();
223 }
3e170ce0 224
0a7de745
A
225 for (unsigned index = 0; index < _descriptorsCount; index++) {
226 status = _descriptors[index]->complete(forDirection);
227 if (status != kIOReturnSuccess) {
228 statusFinal = status;
229 }
230 assert(status == kIOReturnSuccess);
231 }
232
233 return statusFinal;
234}
3e170ce0 235
0a7de745
A
236addr64_t
237IOMultiMemoryDescriptor::getPhysicalSegment(IOByteCount offset,
238 IOByteCount * length,
239 IOOptionBits options)
3e170ce0 240{
0a7de745
A
241 //
242 // This method returns the physical address of the byte at the given offset
243 // into the memory, and optionally the length of the physically contiguous
244 // segment from that offset.
245 //
246
247 assert(offset <= _length);
248
249 for (unsigned index = 0; index < _descriptorsCount; index++) {
250 if (offset < _descriptors[index]->getLength()) {
251 return _descriptors[index]->getPhysicalSegment(offset, length, options);
252 }
253 offset -= _descriptors[index]->getLength();
39037602 254 }
0a7de745
A
255
256 if (length) {
257 *length = 0;
39037602 258 }
3e170ce0 259
0a7de745
A
260 return 0;
261}
262
263#include "IOKitKernelInternal.h"
264
265IOReturn
266IOMultiMemoryDescriptor::doMap(vm_map_t __addressMap,
267 IOVirtualAddress * __address,
268 IOOptionBits options,
269 IOByteCount __offset,
270 IOByteCount __length)
271{
272 IOMemoryMap * mapping = (IOMemoryMap *) *__address;
273 vm_map_t map = mapping->fAddressMap;
274 mach_vm_size_t offset = mapping->fOffset;
275 mach_vm_size_t length = mapping->fLength;
276 mach_vm_address_t address = mapping->fAddress;
277
278 kern_return_t err;
279 IOOptionBits subOptions;
280 mach_vm_size_t mapOffset;
281 mach_vm_size_t bytesRemaining, chunk;
282 mach_vm_address_t nextAddress;
283 IOMemoryDescriptorMapAllocRef ref;
284 vm_prot_t prot;
285
286 do{
287 prot = VM_PROT_READ;
288 if (!(kIOMapReadOnly & options)) {
289 prot |= VM_PROT_WRITE;
290 }
291
292 if (kIOMapOverwrite & options) {
293 if ((map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
294 map = IOPageableMapForAddress(address);
295 }
296 err = KERN_SUCCESS;
297 } else {
298 ref.map = map;
299 ref.tag = IOMemoryTag(map);
300 ref.options = options;
301 ref.size = length;
302 ref.prot = prot;
303 if (options & kIOMapAnywhere) {
304 // vm_map looks for addresses above here, even when VM_FLAGS_ANYWHERE
305 ref.mapped = 0;
306 } else {
307 ref.mapped = mapping->fAddress;
308 }
309
310 if ((ref.map == kernel_map) && (kIOMemoryBufferPageable & _flags)) {
311 err = IOIteratePageableMaps(ref.size, &IOMemoryDescriptorMapAlloc, &ref);
312 } else {
313 err = IOMemoryDescriptorMapAlloc(ref.map, &ref);
314 }
315
316 if (KERN_SUCCESS != err) {
317 break;
318 }
319
320 address = ref.mapped;
321 mapping->fAddress = address;
322 }
323
324 mapOffset = offset;
325 bytesRemaining = length;
326 nextAddress = address;
327 assert(mapOffset <= _length);
328 subOptions = (options & ~kIOMapAnywhere) | kIOMapOverwrite;
329
330 for (unsigned index = 0; bytesRemaining && (index < _descriptorsCount); index++) {
331 chunk = _descriptors[index]->getLength();
332 if (mapOffset >= chunk) {
333 mapOffset -= chunk;
334 continue;
335 }
336 chunk -= mapOffset;
337 if (chunk > bytesRemaining) {
338 chunk = bytesRemaining;
339 }
340 IOMemoryMap * subMap;
341 subMap = _descriptors[index]->createMappingInTask(mapping->fAddressTask, nextAddress, subOptions, mapOffset, chunk );
342 if (!subMap) {
343 break;
344 }
345 subMap->release(); // kIOMapOverwrite means it will not deallocate
346
347 bytesRemaining -= chunk;
348 nextAddress += chunk;
349 mapOffset = 0;
350 }
351 if (bytesRemaining) {
352 err = kIOReturnUnderrun;
353 }
354 }while (false);
355
356 if (kIOReturnSuccess == err) {
3e170ce0 357#if IOTRACKING
0a7de745 358 IOTrackingAddUser(gIOMapTracking, &mapping->fTracking, mapping->fLength);
3e170ce0 359#endif
0a7de745 360 }
3e170ce0 361
0a7de745 362 return err;
3e170ce0
A
363}
364
0a7de745
A
365IOReturn
366IOMultiMemoryDescriptor::setPurgeable( IOOptionBits newState,
367 IOOptionBits * oldState )
3e170ce0 368{
0a7de745
A
369 IOReturn err;
370 IOOptionBits totalState, state;
371
372 totalState = kIOMemoryPurgeableNonVolatile;
373 err = kIOReturnSuccess;
374 for (unsigned index = 0; index < _descriptorsCount; index++) {
375 err = _descriptors[index]->setPurgeable(newState, &state);
376 if (kIOReturnSuccess != err) {
377 break;
378 }
379
380 if (kIOMemoryPurgeableEmpty == state) {
381 totalState = kIOMemoryPurgeableEmpty;
382 } else if (kIOMemoryPurgeableEmpty == totalState) {
383 continue;
384 } else if (kIOMemoryPurgeableVolatile == totalState) {
385 continue;
386 } else if (kIOMemoryPurgeableVolatile == state) {
387 totalState = kIOMemoryPurgeableVolatile;
388 } else {
389 totalState = kIOMemoryPurgeableNonVolatile;
390 }
391 }
392 if (oldState) {
393 *oldState = totalState;
394 }
395
396 return err;
3e170ce0
A
397}
398
cb323159
A
399IOReturn
400IOMultiMemoryDescriptor::setOwnership( task_t newOwner,
401 int newLedgerTag,
402 IOOptionBits newLedgerOptions )
403{
404 IOReturn err;
405
406 if (iokit_iomd_setownership_enabled == FALSE) {
407 return kIOReturnUnsupported;
408 }
409
410 err = kIOReturnSuccess;
411 for (unsigned index = 0; index < _descriptorsCount; index++) {
412 err = _descriptors[index]->setOwnership(newOwner, newLedgerTag, newLedgerOptions);
413 if (kIOReturnSuccess != err) {
414 break;
415 }
416 }
417
418 return err;
419}
420
0a7de745
A
421IOReturn
422IOMultiMemoryDescriptor::getPageCounts(IOByteCount * pResidentPageCount,
423 IOByteCount * pDirtyPageCount)
3e170ce0 424{
0a7de745
A
425 IOReturn err;
426 IOByteCount totalResidentPageCount, totalDirtyPageCount;
427 IOByteCount residentPageCount, dirtyPageCount;
428
429 err = kIOReturnSuccess;
430 totalResidentPageCount = totalDirtyPageCount = 0;
431 for (unsigned index = 0; index < _descriptorsCount; index++) {
432 err = _descriptors[index]->getPageCounts(&residentPageCount, &dirtyPageCount);
433 if (kIOReturnSuccess != err) {
434 break;
435 }
436 totalResidentPageCount += residentPageCount;
437 totalDirtyPageCount += dirtyPageCount;
438 }
439
440 if (pResidentPageCount) {
441 *pResidentPageCount = totalResidentPageCount;
442 }
443 if (pDirtyPageCount) {
444 *pDirtyPageCount = totalDirtyPageCount;
445 }
446
447 return err;
3e170ce0 448}
d9a64523 449
0a7de745
A
450uint64_t
451IOMultiMemoryDescriptor::getPreparationID( void )
d9a64523 452{
0a7de745
A
453 if (!super::getKernelReserved()) {
454 return kIOPreparationIDUnsupported;
455 }
d9a64523 456
0a7de745
A
457 for (unsigned index = 0; index < _descriptorsCount; index++) {
458 uint64_t preparationID = _descriptors[index]->getPreparationID();
d9a64523 459
0a7de745
A
460 if (preparationID == kIOPreparationIDUnsupported) {
461 return kIOPreparationIDUnsupported;
462 }
d9a64523 463
0a7de745
A
464 if (preparationID == kIOPreparationIDUnprepared) {
465 return kIOPreparationIDUnprepared;
466 }
467 }
d9a64523 468
0a7de745 469 super::setPreparationID();
d9a64523 470
0a7de745 471 return super::getPreparationID();
d9a64523 472}