]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IODMACommand.cpp
xnu-1699.22.81.tar.gz
[apple/xnu.git] / iokit / Kernel / IODMACommand.cpp
1 /*
2 * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <IOKit/assert.h>
30
31 #include <libkern/OSTypes.h>
32 #include <libkern/OSByteOrder.h>
33
34 #include <IOKit/IOReturn.h>
35 #include <IOKit/IOLib.h>
36 #include <IOKit/IODMACommand.h>
37 #include <IOKit/IOMapper.h>
38 #include <IOKit/IOMemoryDescriptor.h>
39 #include <IOKit/IOBufferMemoryDescriptor.h>
40
41 #include "IOKitKernelInternal.h"
42
43 #define MAPTYPE(type) ((UInt) (type) & kTypeMask)
44 #define IS_MAPPED(type) (MAPTYPE(type) == kMapped)
45 #define IS_BYPASSED(type) (MAPTYPE(type) == kBypassed)
46 #define IS_NONCOHERENT(type) (MAPTYPE(type) == kNonCoherent)
47
48 enum
49 {
50 kWalkSyncIn = 0x01, // bounce -> md
51 kWalkSyncOut = 0x02, // bounce <- md
52 kWalkSyncAlways = 0x04,
53 kWalkPreflight = 0x08,
54 kWalkDoubleBuffer = 0x10,
55 kWalkPrepare = 0x20,
56 kWalkComplete = 0x40,
57 kWalkClient = 0x80
58 };
59
60
61 #define fInternalState reserved
62 #define fState reserved->fState
63 #define fMDSummary reserved->fMDSummary
64
65
66 #if 1
67 // no direction => OutIn
68 #define SHOULD_COPY_DIR(op, direction) \
69 ((kIODirectionNone == (direction)) \
70 || (kWalkSyncAlways & (op)) \
71 || (((kWalkSyncIn & (op)) ? kIODirectionIn : kIODirectionOut) \
72 & (direction)))
73
74 #else
75 #define SHOULD_COPY_DIR(state, direction) (true)
76 #endif
77
78 #if 0
79 #define DEBG(fmt, args...) { IOLog(fmt, ## args); kprintf(fmt, ## args); }
80 #else
81 #define DEBG(fmt, args...) {}
82 #endif
83
84 /**************************** class IODMACommand ***************************/
85
86 #undef super
87 #define super IOCommand
88 OSDefineMetaClassAndStructors(IODMACommand, IOCommand);
89
90 OSMetaClassDefineReservedUsed(IODMACommand, 0);
91 OSMetaClassDefineReservedUsed(IODMACommand, 1);
92 OSMetaClassDefineReservedUsed(IODMACommand, 2);
93 OSMetaClassDefineReservedUnused(IODMACommand, 3);
94 OSMetaClassDefineReservedUnused(IODMACommand, 4);
95 OSMetaClassDefineReservedUnused(IODMACommand, 5);
96 OSMetaClassDefineReservedUnused(IODMACommand, 6);
97 OSMetaClassDefineReservedUnused(IODMACommand, 7);
98 OSMetaClassDefineReservedUnused(IODMACommand, 8);
99 OSMetaClassDefineReservedUnused(IODMACommand, 9);
100 OSMetaClassDefineReservedUnused(IODMACommand, 10);
101 OSMetaClassDefineReservedUnused(IODMACommand, 11);
102 OSMetaClassDefineReservedUnused(IODMACommand, 12);
103 OSMetaClassDefineReservedUnused(IODMACommand, 13);
104 OSMetaClassDefineReservedUnused(IODMACommand, 14);
105 OSMetaClassDefineReservedUnused(IODMACommand, 15);
106
107 IODMACommand *
108 IODMACommand::withSpecification(SegmentFunction outSegFunc,
109 UInt8 numAddressBits,
110 UInt64 maxSegmentSize,
111 MappingOptions mappingOptions,
112 UInt64 maxTransferSize,
113 UInt32 alignment,
114 IOMapper *mapper,
115 void *refCon)
116 {
117 IODMACommand * me = new IODMACommand;
118
119 if (me && !me->initWithSpecification(outSegFunc,
120 numAddressBits, maxSegmentSize,
121 mappingOptions, maxTransferSize,
122 alignment, mapper, refCon))
123 {
124 me->release();
125 return 0;
126 };
127
128 return me;
129 }
130
131 IODMACommand *
132 IODMACommand::cloneCommand(void *refCon)
133 {
134 return withSpecification(fOutSeg, fNumAddressBits, fMaxSegmentSize,
135 fMappingOptions, fMaxTransferSize, fAlignMask + 1, fMapper, refCon);
136 }
137
138 #define kLastOutputFunction ((SegmentFunction) kLastOutputFunction)
139
140 bool
141 IODMACommand::initWithSpecification(SegmentFunction outSegFunc,
142 UInt8 numAddressBits,
143 UInt64 maxSegmentSize,
144 MappingOptions mappingOptions,
145 UInt64 maxTransferSize,
146 UInt32 alignment,
147 IOMapper *mapper,
148 void *refCon)
149 {
150 if (!super::init() || !outSegFunc || !numAddressBits)
151 return false;
152
153 bool is32Bit = (OutputHost32 == outSegFunc || OutputBig32 == outSegFunc
154 || OutputLittle32 == outSegFunc);
155 if (is32Bit)
156 {
157 if (!numAddressBits)
158 numAddressBits = 32;
159 else if (numAddressBits > 32)
160 return false; // Wrong output function for bits
161 }
162
163 if (numAddressBits && (numAddressBits < PAGE_SHIFT))
164 return false;
165
166 if (!maxSegmentSize)
167 maxSegmentSize--; // Set Max segment to -1
168 if (!maxTransferSize)
169 maxTransferSize--; // Set Max transfer to -1
170
171 if (!mapper)
172 {
173 IOMapper::checkForSystemMapper();
174 mapper = IOMapper::gSystem;
175 }
176
177 fNumSegments = 0;
178 fBypassMask = 0;
179 fOutSeg = outSegFunc;
180 fNumAddressBits = numAddressBits;
181 fMaxSegmentSize = maxSegmentSize;
182 fMappingOptions = mappingOptions;
183 fMaxTransferSize = maxTransferSize;
184 if (!alignment)
185 alignment = 1;
186 fAlignMask = alignment - 1;
187 fMapper = mapper;
188 fRefCon = refCon;
189
190 switch (MAPTYPE(mappingOptions))
191 {
192 case kMapped: break;
193 case kNonCoherent: fMapper = 0; break;
194 case kBypassed:
195 if (mapper && !mapper->getBypassMask(&fBypassMask))
196 return false;
197 break;
198 default:
199 return false;
200 };
201
202 if (fMapper)
203 fMapper->retain();
204
205 reserved = IONew(IODMACommandInternal, 1);
206 if (!reserved)
207 return false;
208 bzero(reserved, sizeof(IODMACommandInternal));
209
210 fInternalState->fIterateOnly = (0 != (kIterateOnly & mappingOptions));
211
212 return true;
213 }
214
215 void
216 IODMACommand::free()
217 {
218 if (reserved)
219 IODelete(reserved, IODMACommandInternal, 1);
220
221 if (fMapper)
222 fMapper->release();
223
224 super::free();
225 }
226
227 IOReturn
228 IODMACommand::setMemoryDescriptor(const IOMemoryDescriptor *mem, bool autoPrepare)
229 {
230 IOReturn err = kIOReturnSuccess;
231
232 if (mem == fMemory)
233 {
234 if (!autoPrepare)
235 {
236 while (fActive)
237 complete();
238 }
239 return kIOReturnSuccess;
240 }
241
242 if (fMemory) {
243 // As we are almost certainly being called from a work loop thread
244 // if fActive is true it is probably not a good time to potentially
245 // block. Just test for it and return an error
246 if (fActive)
247 return kIOReturnBusy;
248 clearMemoryDescriptor();
249 }
250
251 if (mem) {
252 bzero(&fMDSummary, sizeof(fMDSummary));
253 err = mem->dmaCommandOperation(
254 kIOMDGetCharacteristics,
255 &fMDSummary, sizeof(fMDSummary));
256 if (err)
257 return err;
258
259 ppnum_t highPage = fMDSummary.fHighestPage ? fMDSummary.fHighestPage : gIOLastPage;
260
261 if ((kMapped == MAPTYPE(fMappingOptions))
262 && fMapper
263 && (!fNumAddressBits || (fNumAddressBits >= 31)))
264 // assuming mapped space is 2G
265 fInternalState->fCheckAddressing = false;
266 else
267 fInternalState->fCheckAddressing = (fNumAddressBits && (highPage >= (1UL << (fNumAddressBits - PAGE_SHIFT))));
268
269 fInternalState->fNewMD = true;
270 mem->retain();
271 fMemory = mem;
272
273 mem->dmaCommandOperation(kIOMDSetDMAActive, this, 0);
274 if (autoPrepare) {
275 err = prepare();
276 if (err) {
277 clearMemoryDescriptor();
278 }
279 }
280 }
281
282 return err;
283 }
284
285 IOReturn
286 IODMACommand::clearMemoryDescriptor(bool autoComplete)
287 {
288 if (fActive && !autoComplete)
289 return (kIOReturnNotReady);
290
291 if (fMemory) {
292 while (fActive)
293 complete();
294 fMemory->dmaCommandOperation(kIOMDSetDMAInactive, this, 0);
295 fMemory->release();
296 fMemory = 0;
297 }
298
299 return (kIOReturnSuccess);
300 }
301
302 const IOMemoryDescriptor *
303 IODMACommand::getMemoryDescriptor() const
304 {
305 return fMemory;
306 }
307
308
309 IOReturn
310 IODMACommand::segmentOp(
311 void *reference,
312 IODMACommand *target,
313 Segment64 segment,
314 void *segments,
315 UInt32 segmentIndex)
316 {
317 IOOptionBits op = (uintptr_t) reference;
318 addr64_t maxPhys, address;
319 uint64_t length;
320 uint32_t numPages;
321
322 IODMACommandInternal * state = target->reserved;
323
324 if (target->fNumAddressBits && (target->fNumAddressBits < 64) && !state->fLocalMapper)
325 maxPhys = (1ULL << target->fNumAddressBits);
326 else
327 maxPhys = 0;
328 maxPhys--;
329
330 address = segment.fIOVMAddr;
331 length = segment.fLength;
332
333 assert(address);
334 assert(length);
335
336 if (!state->fMisaligned)
337 {
338 state->fMisaligned |= (0 != (state->fSourceAlignMask & address));
339 if (state->fMisaligned) DEBG("misaligned %qx:%qx, %lx\n", address, length, state->fSourceAlignMask);
340 }
341
342 if (state->fMisaligned && (kWalkPreflight & op))
343 return (kIOReturnNotAligned);
344
345 if (!state->fDoubleBuffer)
346 {
347 if ((address + length - 1) <= maxPhys)
348 {
349 length = 0;
350 }
351 else if (address <= maxPhys)
352 {
353 DEBG("tail %qx, %qx", address, length);
354 length = (address + length - maxPhys - 1);
355 address = maxPhys + 1;
356 DEBG("-> %qx, %qx\n", address, length);
357 }
358 }
359
360 if (!length)
361 return (kIOReturnSuccess);
362
363 numPages = atop_64(round_page_64((address & PAGE_MASK) + length));
364
365 if (kWalkPreflight & op)
366 {
367 state->fCopyPageCount += numPages;
368 }
369 else
370 {
371 vm_page_t lastPage;
372 lastPage = NULL;
373 if (kWalkPrepare & op)
374 {
375 lastPage = state->fCopyNext;
376 for (IOItemCount idx = 0; idx < numPages; idx++)
377 {
378 vm_page_set_offset(lastPage, atop_64(address) + idx);
379 lastPage = vm_page_get_next(lastPage);
380 }
381 }
382
383 if (!lastPage || SHOULD_COPY_DIR(op, target->fMDSummary.fDirection))
384 {
385 lastPage = state->fCopyNext;
386 for (IOItemCount idx = 0; idx < numPages; idx++)
387 {
388 if (SHOULD_COPY_DIR(op, target->fMDSummary.fDirection))
389 {
390 addr64_t remapAddr;
391 uint64_t chunk;
392
393 remapAddr = ptoa_64(vm_page_get_phys_page(lastPage));
394 if (!state->fDoubleBuffer)
395 {
396 remapAddr += (address & PAGE_MASK);
397 }
398 chunk = PAGE_SIZE - (address & PAGE_MASK);
399 if (chunk > length)
400 chunk = length;
401
402 DEBG("cpv: 0x%qx %s 0x%qx, 0x%qx, 0x%02lx\n", remapAddr,
403 (kWalkSyncIn & op) ? "->" : "<-",
404 address, chunk, op);
405
406 if (kWalkSyncIn & op)
407 { // cppvNoModSnk
408 copypv(remapAddr, address, chunk,
409 cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
410 }
411 else
412 {
413 copypv(address, remapAddr, chunk,
414 cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
415 }
416 address += chunk;
417 length -= chunk;
418 }
419 lastPage = vm_page_get_next(lastPage);
420 }
421 }
422 state->fCopyNext = lastPage;
423 }
424
425 return kIOReturnSuccess;
426 }
427
428 IOReturn
429 IODMACommand::walkAll(UInt8 op)
430 {
431 IODMACommandInternal * state = fInternalState;
432
433 IOReturn ret = kIOReturnSuccess;
434 UInt32 numSegments;
435 UInt64 offset;
436
437 if (kWalkPreflight & op)
438 {
439 state->fMapContig = false;
440 state->fMisaligned = false;
441 state->fDoubleBuffer = false;
442 state->fPrepared = false;
443 state->fCopyNext = NULL;
444 state->fCopyPageAlloc = 0;
445 state->fLocalMapperPageAlloc = 0;
446 state->fCopyPageCount = 0;
447 state->fNextRemapPage = NULL;
448 state->fCopyMD = 0;
449
450 if (!(kWalkDoubleBuffer & op))
451 {
452 offset = 0;
453 numSegments = 0-1;
454 ret = genIOVMSegments(op, segmentOp, (void *) op, &offset, state, &numSegments);
455 }
456
457 op &= ~kWalkPreflight;
458
459 state->fDoubleBuffer = (state->fMisaligned || (kWalkDoubleBuffer & op));
460 if (state->fDoubleBuffer)
461 state->fCopyPageCount = atop_64(round_page(state->fPreparedLength));
462
463 if (state->fCopyPageCount)
464 {
465 vm_page_t mapBase = NULL;
466
467 DEBG("preflight fCopyPageCount %d\n", state->fCopyPageCount);
468
469 if (!state->fDoubleBuffer)
470 {
471 kern_return_t kr;
472 kr = vm_page_alloc_list(state->fCopyPageCount,
473 KMA_LOMEM | KMA_NOPAGEWAIT, &mapBase);
474 if (KERN_SUCCESS != kr)
475 {
476 DEBG("vm_page_alloc_list(%d) failed (%d)\n", state->fCopyPageCount, kr);
477 mapBase = NULL;
478 }
479 }
480
481 if (mapBase)
482 {
483 state->fCopyPageAlloc = mapBase;
484 state->fCopyNext = state->fCopyPageAlloc;
485 offset = 0;
486 numSegments = 0-1;
487 ret = genIOVMSegments(op, segmentOp, (void *) op, &offset, state, &numSegments);
488 state->fPrepared = true;
489 op &= ~(kWalkSyncIn | kWalkSyncOut);
490 }
491 else
492 {
493 DEBG("alloc IOBMD\n");
494 mach_vm_address_t mask = 0xFFFFF000; //state->fSourceAlignMask
495 state->fCopyMD = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task,
496 fMDSummary.fDirection, state->fPreparedLength, mask);
497
498 if (state->fCopyMD)
499 {
500 ret = kIOReturnSuccess;
501 state->fPrepared = true;
502 }
503 else
504 {
505 DEBG("IODMACommand !iovmAlloc");
506 return (kIOReturnNoResources);
507 }
508 }
509 }
510
511 if (state->fLocalMapper)
512 {
513 state->fLocalMapperPageCount = atop_64(round_page(
514 state->fPreparedLength + ((state->fPreparedOffset + fMDSummary.fPageAlign) & page_mask)));
515 state->fLocalMapperPageAlloc = fMapper->iovmAllocDMACommand(this, state->fLocalMapperPageCount);
516 state->fMapContig = true;
517 }
518 }
519
520 if (state->fPrepared && ((kWalkSyncIn | kWalkSyncOut) & op))
521 {
522 if (state->fCopyPageCount)
523 {
524 DEBG("sync fCopyPageCount %d\n", state->fCopyPageCount);
525
526 if (state->fCopyPageAlloc)
527 {
528 state->fCopyNext = state->fCopyPageAlloc;
529 offset = 0;
530 numSegments = 0-1;
531 ret = genIOVMSegments(op, segmentOp, (void *) op, &offset, state, &numSegments);
532 }
533 else if (state->fCopyMD)
534 {
535 DEBG("sync IOBMD\n");
536
537 if (SHOULD_COPY_DIR(op, fMDSummary.fDirection))
538 {
539 IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory);
540
541 IOByteCount bytes;
542
543 if (kWalkSyncIn & op)
544 bytes = poMD->writeBytes(state->fPreparedOffset,
545 state->fCopyMD->getBytesNoCopy(),
546 state->fPreparedLength);
547 else
548 bytes = poMD->readBytes(state->fPreparedOffset,
549 state->fCopyMD->getBytesNoCopy(),
550 state->fPreparedLength);
551 DEBG("fCopyMD %s %lx bytes\n", (kWalkSyncIn & op) ? "wrote" : "read", bytes);
552 ret = (bytes == state->fPreparedLength) ? kIOReturnSuccess : kIOReturnUnderrun;
553 }
554 else
555 ret = kIOReturnSuccess;
556 }
557 }
558 }
559
560 if (kWalkComplete & op)
561 {
562 if (state->fLocalMapperPageAlloc)
563 {
564 fMapper->iovmFreeDMACommand(this, state->fLocalMapperPageAlloc, state->fLocalMapperPageCount);
565 state->fLocalMapperPageAlloc = 0;
566 state->fLocalMapperPageCount = 0;
567 }
568 if (state->fCopyPageAlloc)
569 {
570 vm_page_free_list(state->fCopyPageAlloc, FALSE);
571 state->fCopyPageAlloc = 0;
572 state->fCopyPageCount = 0;
573 }
574 if (state->fCopyMD)
575 {
576 state->fCopyMD->release();
577 state->fCopyMD = 0;
578 }
579
580 state->fPrepared = false;
581 }
582 return (ret);
583 }
584
585 UInt8
586 IODMACommand::getNumAddressBits(void)
587 {
588 return (fNumAddressBits);
589 }
590
591 UInt32
592 IODMACommand::getAlignment(void)
593 {
594 return (fAlignMask + 1);
595 }
596
597 IOReturn
598 IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc,
599 UInt8 numAddressBits,
600 UInt64 maxSegmentSize,
601 MappingOptions mappingOptions,
602 UInt64 maxTransferSize,
603 UInt32 alignment,
604 IOMapper *mapper,
605 UInt64 offset,
606 UInt64 length,
607 bool flushCache,
608 bool synchronize)
609 {
610 if (fActive)
611 return kIOReturnNotPermitted;
612
613 if (!outSegFunc || !numAddressBits)
614 return kIOReturnBadArgument;
615
616 bool is32Bit = (OutputHost32 == outSegFunc || OutputBig32 == outSegFunc
617 || OutputLittle32 == outSegFunc);
618 if (is32Bit)
619 {
620 if (!numAddressBits)
621 numAddressBits = 32;
622 else if (numAddressBits > 32)
623 return kIOReturnBadArgument; // Wrong output function for bits
624 }
625
626 if (numAddressBits && (numAddressBits < PAGE_SHIFT))
627 return kIOReturnBadArgument;
628
629 if (!maxSegmentSize)
630 maxSegmentSize--; // Set Max segment to -1
631 if (!maxTransferSize)
632 maxTransferSize--; // Set Max transfer to -1
633
634 if (!mapper)
635 {
636 IOMapper::checkForSystemMapper();
637 mapper = IOMapper::gSystem;
638 }
639
640 switch (MAPTYPE(mappingOptions))
641 {
642 case kMapped: break;
643 case kNonCoherent: fMapper = 0; break;
644 case kBypassed:
645 if (mapper && !mapper->getBypassMask(&fBypassMask))
646 return kIOReturnBadArgument;
647 break;
648 default:
649 return kIOReturnBadArgument;
650 };
651
652 fNumSegments = 0;
653 fBypassMask = 0;
654 fOutSeg = outSegFunc;
655 fNumAddressBits = numAddressBits;
656 fMaxSegmentSize = maxSegmentSize;
657 fMappingOptions = mappingOptions;
658 fMaxTransferSize = maxTransferSize;
659 if (!alignment)
660 alignment = 1;
661 fAlignMask = alignment - 1;
662 if (mapper != fMapper)
663 {
664 mapper->retain();
665 fMapper->release();
666 fMapper = mapper;
667 }
668
669 fInternalState->fIterateOnly = (0 != (kIterateOnly & mappingOptions));
670
671 return prepare(offset, length, flushCache, synchronize);
672 }
673
674
675 IOReturn
676 IODMACommand::prepare(UInt64 offset, UInt64 length, bool flushCache, bool synchronize)
677 {
678 IODMACommandInternal * state = fInternalState;
679 IOReturn ret = kIOReturnSuccess;
680 MappingOptions mappingOptions = fMappingOptions;
681
682 if (!length)
683 length = fMDSummary.fLength;
684
685 if (length > fMaxTransferSize)
686 return kIOReturnNoSpace;
687
688 if (IS_NONCOHERENT(mappingOptions) && flushCache) {
689 IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory);
690
691 poMD->performOperation(kIOMemoryIncoherentIOStore, offset, length);
692 }
693 if (fActive++)
694 {
695 if ((state->fPreparedOffset != offset)
696 || (state->fPreparedLength != length))
697 ret = kIOReturnNotReady;
698 }
699 else
700 {
701 state->fPreparedOffset = offset;
702 state->fPreparedLength = length;
703
704 state->fMapContig = false;
705 state->fMisaligned = false;
706 state->fDoubleBuffer = false;
707 state->fPrepared = false;
708 state->fCopyNext = NULL;
709 state->fCopyPageAlloc = 0;
710 state->fCopyPageCount = 0;
711 state->fNextRemapPage = NULL;
712 state->fCopyMD = 0;
713 state->fLocalMapperPageAlloc = 0;
714 state->fLocalMapperPageCount = 0;
715
716 state->fLocalMapper = (fMapper && (fMapper != IOMapper::gSystem));
717
718 state->fSourceAlignMask = fAlignMask;
719 if (state->fLocalMapper)
720 state->fSourceAlignMask &= page_mask;
721
722 state->fCursor = state->fIterateOnly
723 || (!state->fCheckAddressing
724 && !state->fLocalMapper
725 && (!state->fSourceAlignMask
726 || ((fMDSummary.fPageAlign & (1 << 31)) && (0 == (fMDSummary.fPageAlign & state->fSourceAlignMask)))));
727
728 if (!state->fCursor)
729 {
730 IOOptionBits op = kWalkPrepare | kWalkPreflight;
731 if (synchronize)
732 op |= kWalkSyncOut;
733 ret = walkAll(op);
734 }
735 if (kIOReturnSuccess == ret)
736 state->fPrepared = true;
737 }
738 return ret;
739 }
740
741 IOReturn
742 IODMACommand::complete(bool invalidateCache, bool synchronize)
743 {
744 IODMACommandInternal * state = fInternalState;
745 IOReturn ret = kIOReturnSuccess;
746
747 if (fActive < 1)
748 return kIOReturnNotReady;
749
750 if (!--fActive)
751 {
752 if (!state->fCursor)
753 {
754 IOOptionBits op = kWalkComplete;
755 if (synchronize)
756 op |= kWalkSyncIn;
757 ret = walkAll(op);
758 }
759 state->fPrepared = false;
760
761 if (IS_NONCOHERENT(fMappingOptions) && invalidateCache)
762 {
763 IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory);
764
765 poMD->performOperation(kIOMemoryIncoherentIOFlush, state->fPreparedOffset, state->fPreparedLength);
766 }
767 }
768
769 return ret;
770 }
771
772 IOReturn
773 IODMACommand::getPreparedOffsetAndLength(UInt64 * offset, UInt64 * length)
774 {
775 IODMACommandInternal * state = fInternalState;
776 if (fActive < 1)
777 return (kIOReturnNotReady);
778
779 if (offset)
780 *offset = state->fPreparedOffset;
781 if (length)
782 *length = state->fPreparedLength;
783
784 return (kIOReturnSuccess);
785 }
786
787 IOReturn
788 IODMACommand::synchronize(IOOptionBits options)
789 {
790 IODMACommandInternal * state = fInternalState;
791 IOReturn ret = kIOReturnSuccess;
792 IOOptionBits op;
793
794 if (kIODirectionOutIn == (kIODirectionOutIn & options))
795 return kIOReturnBadArgument;
796
797 if (fActive < 1)
798 return kIOReturnNotReady;
799
800 op = 0;
801 if (kForceDoubleBuffer & options)
802 {
803 if (state->fDoubleBuffer)
804 return kIOReturnSuccess;
805 if (state->fCursor)
806 state->fCursor = false;
807 else
808 ret = walkAll(kWalkComplete);
809
810 op |= kWalkPrepare | kWalkPreflight | kWalkDoubleBuffer;
811 }
812 else if (state->fCursor)
813 return kIOReturnSuccess;
814
815 if (kIODirectionIn & options)
816 op |= kWalkSyncIn | kWalkSyncAlways;
817 else if (kIODirectionOut & options)
818 op |= kWalkSyncOut | kWalkSyncAlways;
819
820 ret = walkAll(op);
821
822 return ret;
823 }
824
825 struct IODMACommandTransferContext
826 {
827 void * buffer;
828 UInt64 bufferOffset;
829 UInt64 remaining;
830 UInt32 op;
831 };
832 enum
833 {
834 kIODMACommandTransferOpReadBytes = 1,
835 kIODMACommandTransferOpWriteBytes = 2
836 };
837
838 IOReturn
839 IODMACommand::transferSegment(void *reference,
840 IODMACommand *target,
841 Segment64 segment,
842 void *segments,
843 UInt32 segmentIndex)
844 {
845 IODMACommandTransferContext * context = (IODMACommandTransferContext *) reference;
846 UInt64 length = min(segment.fLength, context->remaining);
847 addr64_t ioAddr = segment.fIOVMAddr;
848 addr64_t cpuAddr = ioAddr;
849
850 context->remaining -= length;
851
852 while (length)
853 {
854 UInt64 copyLen = length;
855 if ((kMapped == MAPTYPE(target->fMappingOptions))
856 && target->fMapper)
857 {
858 cpuAddr = target->fMapper->mapAddr(ioAddr);
859 copyLen = min(copyLen, page_size - (ioAddr & (page_size - 1)));
860 ioAddr += copyLen;
861 }
862
863 switch (context->op)
864 {
865 case kIODMACommandTransferOpReadBytes:
866 copypv(cpuAddr, context->bufferOffset + (addr64_t) context->buffer, copyLen,
867 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
868 break;
869 case kIODMACommandTransferOpWriteBytes:
870 copypv(context->bufferOffset + (addr64_t) context->buffer, cpuAddr, copyLen,
871 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
872 break;
873 }
874 length -= copyLen;
875 context->bufferOffset += copyLen;
876 }
877
878 return (context->remaining ? kIOReturnSuccess : kIOReturnOverrun);
879 }
880
881 UInt64
882 IODMACommand::transfer(IOOptionBits transferOp, UInt64 offset, void * buffer, UInt64 length)
883 {
884 IODMACommandInternal * state = fInternalState;
885 IODMACommandTransferContext context;
886 Segment64 segments[1];
887 UInt32 numSegments = 0-1;
888
889 if (fActive < 1)
890 return (0);
891
892 if (offset >= state->fPreparedLength)
893 return (0);
894 length = min(length, state->fPreparedLength - offset);
895
896 context.buffer = buffer;
897 context.bufferOffset = 0;
898 context.remaining = length;
899 context.op = transferOp;
900 (void) genIOVMSegments(kWalkClient, transferSegment, &context, &offset, &segments[0], &numSegments);
901
902 return (length - context.remaining);
903 }
904
905 UInt64
906 IODMACommand::readBytes(UInt64 offset, void *bytes, UInt64 length)
907 {
908 return (transfer(kIODMACommandTransferOpReadBytes, offset, bytes, length));
909 }
910
911 UInt64
912 IODMACommand::writeBytes(UInt64 offset, const void *bytes, UInt64 length)
913 {
914 return (transfer(kIODMACommandTransferOpWriteBytes, offset, const_cast<void *>(bytes), length));
915 }
916
917 IOReturn
918 IODMACommand::genIOVMSegments(UInt64 *offsetP,
919 void *segmentsP,
920 UInt32 *numSegmentsP)
921 {
922 return (genIOVMSegments(kWalkClient, clientOutputSegment, (void *) fOutSeg,
923 offsetP, segmentsP, numSegmentsP));
924 }
925
926 IOReturn
927 IODMACommand::genIOVMSegments(uint32_t op,
928 InternalSegmentFunction outSegFunc,
929 void *reference,
930 UInt64 *offsetP,
931 void *segmentsP,
932 UInt32 *numSegmentsP)
933 {
934 IODMACommandInternal * internalState = fInternalState;
935 IOOptionBits mdOp = kIOMDWalkSegments;
936 IOReturn ret = kIOReturnSuccess;
937
938 if (!(kWalkComplete & op) && !fActive)
939 return kIOReturnNotReady;
940
941 if (!offsetP || !segmentsP || !numSegmentsP || !*numSegmentsP)
942 return kIOReturnBadArgument;
943
944 IOMDDMAWalkSegmentArgs *state =
945 (IOMDDMAWalkSegmentArgs *) fState;
946
947 UInt64 offset = *offsetP + internalState->fPreparedOffset;
948 UInt64 memLength = internalState->fPreparedOffset + internalState->fPreparedLength;
949
950 if (offset >= memLength)
951 return kIOReturnOverrun;
952
953 if ((offset == internalState->fPreparedOffset) || (offset != state->fOffset) || internalState->fNewMD) {
954 state->fOffset = 0;
955 state->fIOVMAddr = 0;
956 internalState->fNextRemapPage = NULL;
957 internalState->fNewMD = false;
958 state->fMapped = (IS_MAPPED(fMappingOptions) && fMapper);
959 mdOp = kIOMDFirstSegment;
960 };
961
962 UInt64 bypassMask = fBypassMask;
963 UInt32 segIndex = 0;
964 UInt32 numSegments = *numSegmentsP;
965 Segment64 curSeg = { 0, 0 };
966 addr64_t maxPhys;
967
968 if (fNumAddressBits && (fNumAddressBits < 64))
969 maxPhys = (1ULL << fNumAddressBits);
970 else
971 maxPhys = 0;
972 maxPhys--;
973
974 while (state->fIOVMAddr || (state->fOffset < memLength))
975 {
976 // state = next seg
977 if (!state->fIOVMAddr) {
978
979 IOReturn rtn;
980
981 state->fOffset = offset;
982 state->fLength = memLength - offset;
983
984 if (internalState->fMapContig && (kWalkClient & op))
985 {
986 ppnum_t pageNum = internalState->fLocalMapperPageAlloc;
987 state->fIOVMAddr = ptoa_64(pageNum)
988 + offset - internalState->fPreparedOffset;
989 rtn = kIOReturnSuccess;
990 }
991 else
992 {
993 const IOMemoryDescriptor * memory =
994 internalState->fCopyMD ? internalState->fCopyMD : fMemory;
995 rtn = memory->dmaCommandOperation(mdOp, fState, sizeof(fState));
996 mdOp = kIOMDWalkSegments;
997 }
998
999 if (rtn == kIOReturnSuccess)
1000 {
1001 assert(state->fIOVMAddr);
1002 assert(state->fLength);
1003 if ((curSeg.fIOVMAddr + curSeg.fLength) == state->fIOVMAddr) {
1004 UInt64 length = state->fLength;
1005 offset += length;
1006 curSeg.fLength += length;
1007 state->fIOVMAddr = 0;
1008 }
1009 }
1010 else if (rtn == kIOReturnOverrun)
1011 state->fIOVMAddr = state->fLength = 0; // At end
1012 else
1013 return rtn;
1014 }
1015
1016 // seg = state, offset = end of seg
1017 if (!curSeg.fIOVMAddr)
1018 {
1019 UInt64 length = state->fLength;
1020 offset += length;
1021 curSeg.fIOVMAddr = state->fIOVMAddr | bypassMask;
1022 curSeg.fLength = length;
1023 state->fIOVMAddr = 0;
1024 }
1025
1026 if (!state->fIOVMAddr)
1027 {
1028 if ((kWalkClient & op) && (curSeg.fIOVMAddr + curSeg.fLength - 1) > maxPhys)
1029 {
1030 if (internalState->fCursor)
1031 {
1032 curSeg.fIOVMAddr = 0;
1033 ret = kIOReturnMessageTooLarge;
1034 break;
1035 }
1036 else if (curSeg.fIOVMAddr <= maxPhys)
1037 {
1038 UInt64 remain, newLength;
1039
1040 newLength = (maxPhys + 1 - curSeg.fIOVMAddr);
1041 DEBG("trunc %qx, %qx-> %qx\n", curSeg.fIOVMAddr, curSeg.fLength, newLength);
1042 remain = curSeg.fLength - newLength;
1043 state->fIOVMAddr = newLength + curSeg.fIOVMAddr;
1044 curSeg.fLength = newLength;
1045 state->fLength = remain;
1046 offset -= remain;
1047 }
1048 else
1049 {
1050 UInt64 addr = curSeg.fIOVMAddr;
1051 ppnum_t addrPage = atop_64(addr);
1052 vm_page_t remap = NULL;
1053 UInt64 remain, newLength;
1054
1055 DEBG("sparse switch %qx, %qx ", addr, curSeg.fLength);
1056
1057 remap = internalState->fNextRemapPage;
1058 if (remap && (addrPage == vm_page_get_offset(remap)))
1059 {
1060 }
1061 else for (remap = internalState->fCopyPageAlloc;
1062 remap && (addrPage != vm_page_get_offset(remap));
1063 remap = vm_page_get_next(remap))
1064 {
1065 }
1066
1067 if (!remap) panic("no remap page found");
1068
1069 curSeg.fIOVMAddr = ptoa_64(vm_page_get_phys_page(remap))
1070 + (addr & PAGE_MASK);
1071 internalState->fNextRemapPage = vm_page_get_next(remap);
1072
1073 newLength = PAGE_SIZE - (addr & PAGE_MASK);
1074 if (newLength < curSeg.fLength)
1075 {
1076 remain = curSeg.fLength - newLength;
1077 state->fIOVMAddr = addr + newLength;
1078 curSeg.fLength = newLength;
1079 state->fLength = remain;
1080 offset -= remain;
1081 }
1082 DEBG("-> %qx, %qx offset %qx\n", curSeg.fIOVMAddr, curSeg.fLength, offset);
1083 }
1084 }
1085
1086 if (curSeg.fLength > fMaxSegmentSize)
1087 {
1088 UInt64 remain = curSeg.fLength - fMaxSegmentSize;
1089
1090 state->fIOVMAddr = fMaxSegmentSize + curSeg.fIOVMAddr;
1091 curSeg.fLength = fMaxSegmentSize;
1092
1093 state->fLength = remain;
1094 offset -= remain;
1095 }
1096
1097 if (internalState->fCursor
1098 && (0 != (internalState->fSourceAlignMask & curSeg.fIOVMAddr)))
1099 {
1100 curSeg.fIOVMAddr = 0;
1101 ret = kIOReturnNotAligned;
1102 break;
1103 }
1104
1105 if (offset >= memLength)
1106 {
1107 curSeg.fLength -= (offset - memLength);
1108 offset = memLength;
1109 state->fIOVMAddr = state->fLength = 0; // At end
1110 break;
1111 }
1112 }
1113
1114 if (state->fIOVMAddr) {
1115 if ((segIndex + 1 == numSegments))
1116 break;
1117
1118 ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++);
1119 curSeg.fIOVMAddr = 0;
1120 if (kIOReturnSuccess != ret)
1121 break;
1122 }
1123 }
1124
1125 if (curSeg.fIOVMAddr) {
1126 ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++);
1127 }
1128
1129 if (kIOReturnSuccess == ret)
1130 {
1131 state->fOffset = offset;
1132 *offsetP = offset - internalState->fPreparedOffset;
1133 *numSegmentsP = segIndex;
1134 }
1135 return ret;
1136 }
1137
1138 IOReturn
1139 IODMACommand::clientOutputSegment(
1140 void *reference, IODMACommand *target,
1141 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1142 {
1143 SegmentFunction segmentFunction = (SegmentFunction) reference;
1144 IOReturn ret = kIOReturnSuccess;
1145
1146 if ((target->fNumAddressBits < 64)
1147 && ((segment.fIOVMAddr + segment.fLength - 1) >> target->fNumAddressBits)
1148 && (target->reserved->fLocalMapperPageAlloc || !target->reserved->fLocalMapper))
1149 {
1150 DEBG("kIOReturnMessageTooLarge(fNumAddressBits) %qx, %qx\n", segment.fIOVMAddr, segment.fLength);
1151 ret = kIOReturnMessageTooLarge;
1152 }
1153
1154 if (!(*segmentFunction)(target, segment, vSegList, outSegIndex))
1155 {
1156 DEBG("kIOReturnMessageTooLarge(fOutSeg) %qx, %qx\n", segment.fIOVMAddr, segment.fLength);
1157 ret = kIOReturnMessageTooLarge;
1158 }
1159
1160 return (ret);
1161 }
1162
1163 IOReturn
1164 IODMACommand::genIOVMSegments(SegmentFunction segmentFunction,
1165 UInt64 *offsetP,
1166 void *segmentsP,
1167 UInt32 *numSegmentsP)
1168 {
1169 return (genIOVMSegments(kWalkClient, clientOutputSegment, (void *) segmentFunction,
1170 offsetP, segmentsP, numSegmentsP));
1171 }
1172
1173 bool
1174 IODMACommand::OutputHost32(IODMACommand *,
1175 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1176 {
1177 Segment32 *base = (Segment32 *) vSegList;
1178 base[outSegIndex].fIOVMAddr = (UInt32) segment.fIOVMAddr;
1179 base[outSegIndex].fLength = (UInt32) segment.fLength;
1180 return true;
1181 }
1182
1183 bool
1184 IODMACommand::OutputBig32(IODMACommand *,
1185 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1186 {
1187 const UInt offAddr = outSegIndex * sizeof(Segment32);
1188 const UInt offLen = offAddr + sizeof(UInt32);
1189 OSWriteBigInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr);
1190 OSWriteBigInt32(vSegList, offLen, (UInt32) segment.fLength);
1191 return true;
1192 }
1193
1194 bool
1195 IODMACommand::OutputLittle32(IODMACommand *,
1196 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1197 {
1198 const UInt offAddr = outSegIndex * sizeof(Segment32);
1199 const UInt offLen = offAddr + sizeof(UInt32);
1200 OSWriteLittleInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr);
1201 OSWriteLittleInt32(vSegList, offLen, (UInt32) segment.fLength);
1202 return true;
1203 }
1204
1205 bool
1206 IODMACommand::OutputHost64(IODMACommand *,
1207 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1208 {
1209 Segment64 *base = (Segment64 *) vSegList;
1210 base[outSegIndex] = segment;
1211 return true;
1212 }
1213
1214 bool
1215 IODMACommand::OutputBig64(IODMACommand *,
1216 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1217 {
1218 const UInt offAddr = outSegIndex * sizeof(Segment64);
1219 const UInt offLen = offAddr + sizeof(UInt64);
1220 OSWriteBigInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr);
1221 OSWriteBigInt64(vSegList, offLen, (UInt64) segment.fLength);
1222 return true;
1223 }
1224
1225 bool
1226 IODMACommand::OutputLittle64(IODMACommand *,
1227 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1228 {
1229 const UInt offAddr = outSegIndex * sizeof(Segment64);
1230 const UInt offLen = offAddr + sizeof(UInt64);
1231 OSWriteLittleInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr);
1232 OSWriteLittleInt64(vSegList, offLen, (UInt64) segment.fLength);
1233 return true;
1234 }
1235
1236