]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IODMACommand.cpp
xnu-1504.7.4.tar.gz
[apple/xnu.git] / iokit / Kernel / IODMACommand.cpp
1 /*
2 * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <IOKit/assert.h>
30
31 #include <libkern/OSTypes.h>
32 #include <libkern/OSByteOrder.h>
33
34 #include <IOKit/IOReturn.h>
35 #include <IOKit/IOLib.h>
36 #include <IOKit/IODMACommand.h>
37 #include <IOKit/IOMapper.h>
38 #include <IOKit/IOMemoryDescriptor.h>
39 #include <IOKit/IOBufferMemoryDescriptor.h>
40
41 #include "IOKitKernelInternal.h"
42 #include "IOCopyMapper.h"
43
44 #define MAPTYPE(type) ((UInt) (type) & kTypeMask)
45 #define IS_MAPPED(type) (MAPTYPE(type) == kMapped)
46 #define IS_BYPASSED(type) (MAPTYPE(type) == kBypassed)
47 #define IS_NONCOHERENT(type) (MAPTYPE(type) == kNonCoherent)
48
49 enum
50 {
51 kWalkSyncIn = 0x01, // bounce -> md
52 kWalkSyncOut = 0x02, // bounce <- md
53 kWalkSyncAlways = 0x04,
54 kWalkPreflight = 0x08,
55 kWalkDoubleBuffer = 0x10,
56 kWalkPrepare = 0x20,
57 kWalkComplete = 0x40,
58 kWalkClient = 0x80
59 };
60
61
62 #define fInternalState reserved
63 #define fState reserved->fState
64 #define fMDSummary reserved->fMDSummary
65
66
67 #if 1
68 // no direction => OutIn
69 #define SHOULD_COPY_DIR(op, direction) \
70 ((kIODirectionNone == (direction)) \
71 || (kWalkSyncAlways & (op)) \
72 || (((kWalkSyncIn & (op)) ? kIODirectionIn : kIODirectionOut) \
73 & (direction)))
74
75 #else
76 #define SHOULD_COPY_DIR(state, direction) (true)
77 #endif
78
79 #if 0
80 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
81 #else
82 #define DEBG(fmt, args...) {}
83 #endif
84
85
86 /**************************** class IODMACommand ***************************/
87
88 #undef super
89 #define super OSObject
90 OSDefineMetaClassAndStructors(IODMACommand, IOCommand);
91
92 OSMetaClassDefineReservedUsed(IODMACommand, 0);
93 OSMetaClassDefineReservedUsed(IODMACommand, 1);
94 OSMetaClassDefineReservedUsed(IODMACommand, 2);
95 OSMetaClassDefineReservedUnused(IODMACommand, 3);
96 OSMetaClassDefineReservedUnused(IODMACommand, 4);
97 OSMetaClassDefineReservedUnused(IODMACommand, 5);
98 OSMetaClassDefineReservedUnused(IODMACommand, 6);
99 OSMetaClassDefineReservedUnused(IODMACommand, 7);
100 OSMetaClassDefineReservedUnused(IODMACommand, 8);
101 OSMetaClassDefineReservedUnused(IODMACommand, 9);
102 OSMetaClassDefineReservedUnused(IODMACommand, 10);
103 OSMetaClassDefineReservedUnused(IODMACommand, 11);
104 OSMetaClassDefineReservedUnused(IODMACommand, 12);
105 OSMetaClassDefineReservedUnused(IODMACommand, 13);
106 OSMetaClassDefineReservedUnused(IODMACommand, 14);
107 OSMetaClassDefineReservedUnused(IODMACommand, 15);
108
109 IODMACommand *
110 IODMACommand::withSpecification(SegmentFunction outSegFunc,
111 UInt8 numAddressBits,
112 UInt64 maxSegmentSize,
113 MappingOptions mappingOptions,
114 UInt64 maxTransferSize,
115 UInt32 alignment,
116 IOMapper *mapper,
117 void *refCon)
118 {
119 IODMACommand * me = new IODMACommand;
120
121 if (me && !me->initWithSpecification(outSegFunc,
122 numAddressBits, maxSegmentSize,
123 mappingOptions, maxTransferSize,
124 alignment, mapper, refCon))
125 {
126 me->release();
127 return 0;
128 };
129
130 return me;
131 }
132
133 IODMACommand *
134 IODMACommand::cloneCommand(void *refCon)
135 {
136 return withSpecification(fOutSeg, fNumAddressBits, fMaxSegmentSize,
137 fMappingOptions, fMaxTransferSize, fAlignMask + 1, fMapper, refCon);
138 }
139
140 #define kLastOutputFunction ((SegmentFunction) kLastOutputFunction)
141
142 bool
143 IODMACommand::initWithSpecification(SegmentFunction outSegFunc,
144 UInt8 numAddressBits,
145 UInt64 maxSegmentSize,
146 MappingOptions mappingOptions,
147 UInt64 maxTransferSize,
148 UInt32 alignment,
149 IOMapper *mapper,
150 void *refCon)
151 {
152 if (!super::init() || !outSegFunc || !numAddressBits)
153 return false;
154
155 bool is32Bit = (OutputHost32 == outSegFunc || OutputBig32 == outSegFunc
156 || OutputLittle32 == outSegFunc);
157 if (is32Bit)
158 {
159 if (!numAddressBits)
160 numAddressBits = 32;
161 else if (numAddressBits > 32)
162 return false; // Wrong output function for bits
163 }
164
165 if (numAddressBits && (numAddressBits < PAGE_SHIFT))
166 return false;
167
168 if (!maxSegmentSize)
169 maxSegmentSize--; // Set Max segment to -1
170 if (!maxTransferSize)
171 maxTransferSize--; // Set Max transfer to -1
172
173 if (!mapper)
174 {
175 IOMapper::checkForSystemMapper();
176 mapper = IOMapper::gSystem;
177 }
178
179 fNumSegments = 0;
180 fBypassMask = 0;
181 fOutSeg = outSegFunc;
182 fNumAddressBits = numAddressBits;
183 fMaxSegmentSize = maxSegmentSize;
184 fMappingOptions = mappingOptions;
185 fMaxTransferSize = maxTransferSize;
186 if (!alignment)
187 alignment = 1;
188 fAlignMask = alignment - 1;
189 fMapper = mapper;
190 fRefCon = refCon;
191
192 switch (MAPTYPE(mappingOptions))
193 {
194 case kMapped: break;
195 case kNonCoherent: fMapper = 0; break;
196 case kBypassed:
197 if (mapper && !mapper->getBypassMask(&fBypassMask))
198 return false;
199 break;
200 default:
201 return false;
202 };
203
204 if (fMapper)
205 fMapper->retain();
206
207 reserved = IONew(IODMACommandInternal, 1);
208 if (!reserved)
209 return false;
210 bzero(reserved, sizeof(IODMACommandInternal));
211
212 fInternalState->fIterateOnly = (0 != (kIterateOnly & mappingOptions));
213
214 return true;
215 }
216
217 void
218 IODMACommand::free()
219 {
220 if (reserved)
221 IODelete(reserved, IODMACommandInternal, 1);
222
223 if (fMapper)
224 fMapper->release();
225
226 super::free();
227 }
228
229 IOReturn
230 IODMACommand::setMemoryDescriptor(const IOMemoryDescriptor *mem, bool autoPrepare)
231 {
232 if (mem == fMemory)
233 {
234 if (!autoPrepare)
235 {
236 while (fActive)
237 complete();
238 }
239 return kIOReturnSuccess;
240 }
241
242 if (fMemory) {
243 // As we are almost certainly being called from a work loop thread
244 // if fActive is true it is probably not a good time to potentially
245 // block. Just test for it and return an error
246 if (fActive)
247 return kIOReturnBusy;
248 clearMemoryDescriptor();
249 };
250
251 if (mem) {
252 bzero(&fMDSummary, sizeof(fMDSummary));
253 IOReturn rtn = mem->dmaCommandOperation(
254 kIOMDGetCharacteristics,
255 &fMDSummary, sizeof(fMDSummary));
256 if (rtn)
257 return rtn;
258
259 ppnum_t highPage = fMDSummary.fHighestPage ? fMDSummary.fHighestPage : gIOLastPage;
260
261 if ((kMapped == MAPTYPE(fMappingOptions))
262 && fMapper
263 && (!fNumAddressBits || (fNumAddressBits >= 31)))
264 // assuming mapped space is 2G
265 fInternalState->fCheckAddressing = false;
266 else
267 fInternalState->fCheckAddressing = (fNumAddressBits && (highPage >= (1UL << (fNumAddressBits - PAGE_SHIFT))));
268
269 fInternalState->fNewMD = true;
270 mem->retain();
271 fMemory = mem;
272
273 mem->dmaCommandOperation(kIOMDSetDMAActive, this, 0);
274 if (autoPrepare)
275 return prepare();
276 };
277
278 return kIOReturnSuccess;
279 }
280
281 IOReturn
282 IODMACommand::clearMemoryDescriptor(bool autoComplete)
283 {
284 if (fActive && !autoComplete)
285 return (kIOReturnNotReady);
286
287 if (fMemory) {
288 while (fActive)
289 complete();
290 fMemory->dmaCommandOperation(kIOMDSetDMAInactive, this, 0);
291 fMemory->release();
292 fMemory = 0;
293 }
294
295 return (kIOReturnSuccess);
296 }
297
298 const IOMemoryDescriptor *
299 IODMACommand::getMemoryDescriptor() const
300 {
301 return fMemory;
302 }
303
304
305 IOReturn
306 IODMACommand::segmentOp(
307 void *reference,
308 IODMACommand *target,
309 Segment64 segment,
310 void *segments,
311 UInt32 segmentIndex)
312 {
313 IOOptionBits op = (uintptr_t) reference;
314 addr64_t maxPhys, address;
315 addr64_t remapAddr = 0;
316 uint64_t length;
317 uint32_t numPages;
318
319 IODMACommandInternal * state = target->reserved;
320
321 if (target->fNumAddressBits && (target->fNumAddressBits < 64) && !state->fLocalMapper)
322 maxPhys = (1ULL << target->fNumAddressBits);
323 else
324 maxPhys = 0;
325 maxPhys--;
326
327 address = segment.fIOVMAddr;
328 length = segment.fLength;
329
330 assert(address);
331 assert(length);
332
333 if (!state->fMisaligned)
334 {
335 state->fMisaligned |= (0 != (state->fSourceAlignMask & address));
336 if (state->fMisaligned) DEBG("misaligned %qx:%qx, %lx\n", address, length, state->fSourceAlignMask);
337 }
338
339 if (state->fMisaligned && (kWalkPreflight & op))
340 return (kIOReturnNotAligned);
341
342 if (!state->fDoubleBuffer)
343 {
344 if ((address + length - 1) <= maxPhys)
345 {
346 length = 0;
347 }
348 else if (address <= maxPhys)
349 {
350 DEBG("tail %qx, %qx", address, length);
351 length = (address + length - maxPhys - 1);
352 address = maxPhys + 1;
353 DEBG("-> %qx, %qx\n", address, length);
354 }
355 }
356
357 if (!length)
358 return (kIOReturnSuccess);
359
360 numPages = atop_64(round_page_64(length));
361 remapAddr = state->fCopyNext;
362
363 if (kWalkPreflight & op)
364 {
365 state->fCopyPageCount += numPages;
366 }
367 else
368 {
369 if (kWalkPrepare & op)
370 {
371 for (IOItemCount idx = 0; idx < numPages; idx++)
372 gIOCopyMapper->iovmInsert(atop_64(remapAddr), idx, atop_64(address) + idx);
373 }
374 if (state->fDoubleBuffer)
375 state->fCopyNext += length;
376 else
377 {
378 state->fCopyNext += round_page(length);
379 remapAddr += (address & PAGE_MASK);
380 }
381
382 if (SHOULD_COPY_DIR(op, target->fMDSummary.fDirection))
383 {
384 DEBG("cpv: 0x%qx %s 0x%qx, 0x%qx, 0x%02lx\n", remapAddr,
385 (kWalkSyncIn & op) ? "->" : "<-",
386 address, length, op);
387 if (kWalkSyncIn & op)
388 { // cppvNoModSnk
389 copypv(remapAddr, address, length,
390 cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
391 }
392 else
393 {
394 copypv(address, remapAddr, length,
395 cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
396 }
397 }
398 }
399
400 return kIOReturnSuccess;
401 }
402
403 IOReturn
404 IODMACommand::walkAll(UInt8 op)
405 {
406 IODMACommandInternal * state = fInternalState;
407
408 IOReturn ret = kIOReturnSuccess;
409 UInt32 numSegments;
410 UInt64 offset;
411
412 if (kWalkPreflight & op)
413 {
414 state->fMapContig = false;
415 state->fMisaligned = false;
416 state->fDoubleBuffer = false;
417 state->fPrepared = false;
418 state->fCopyNext = 0;
419 state->fCopyMapperPageAlloc = 0;
420 state->fLocalMapperPageAlloc = 0;
421 state->fCopyPageCount = 0;
422 state->fNextRemapIndex = 0;
423 state->fCopyMD = 0;
424
425 if (!(kWalkDoubleBuffer & op))
426 {
427 offset = 0;
428 numSegments = 0-1;
429 ret = genIOVMSegments(op, segmentOp, (void *) op, &offset, state, &numSegments);
430 }
431
432 op &= ~kWalkPreflight;
433
434 state->fDoubleBuffer = (state->fMisaligned || (kWalkDoubleBuffer & op));
435 if (state->fDoubleBuffer)
436 state->fCopyPageCount = atop_64(round_page(state->fPreparedLength));
437
438 if (state->fCopyPageCount)
439 {
440 IOMapper * mapper;
441 ppnum_t mapBase = 0;
442
443 DEBG("preflight fCopyPageCount %d\n", state->fCopyPageCount);
444
445 mapper = gIOCopyMapper;
446 if (mapper)
447 mapBase = mapper->iovmAlloc(state->fCopyPageCount);
448 if (mapBase)
449 {
450 state->fCopyMapperPageAlloc = mapBase;
451 if (state->fCopyMapperPageAlloc && state->fDoubleBuffer)
452 {
453 DEBG("contig copy map\n");
454 state->fMapContig = true;
455 }
456
457 state->fCopyNext = ptoa_64(state->fCopyMapperPageAlloc);
458 offset = 0;
459 numSegments = 0-1;
460 ret = genIOVMSegments(op, segmentOp, (void *) op, &offset, state, &numSegments);
461 state->fPrepared = true;
462 op &= ~(kWalkSyncIn | kWalkSyncOut);
463 }
464 else
465 {
466 DEBG("alloc IOBMD\n");
467 state->fCopyMD = IOBufferMemoryDescriptor::withOptions(
468 fMDSummary.fDirection, state->fPreparedLength, state->fSourceAlignMask);
469
470 if (state->fCopyMD)
471 {
472 ret = kIOReturnSuccess;
473 state->fPrepared = true;
474 }
475 else
476 {
477 DEBG("IODMACommand !iovmAlloc");
478 return (kIOReturnNoResources);
479 }
480 }
481 }
482
483 if (state->fLocalMapper)
484 {
485 state->fLocalMapperPageCount = atop_64(round_page(
486 state->fPreparedLength + ((state->fPreparedOffset + fMDSummary.fPageAlign) & page_mask)));
487 state->fLocalMapperPageAlloc = fMapper->iovmAllocDMACommand(this, state->fLocalMapperPageCount);
488 state->fMapContig = true;
489 }
490 }
491
492 if (state->fPrepared && ((kWalkSyncIn | kWalkSyncOut) & op))
493 {
494 if (state->fCopyPageCount)
495 {
496 DEBG("sync fCopyPageCount %d\n", state->fCopyPageCount);
497
498 if (state->fCopyMapperPageAlloc)
499 {
500 state->fCopyNext = ptoa_64(state->fCopyMapperPageAlloc);
501 offset = 0;
502 numSegments = 0-1;
503 ret = genIOVMSegments(op, segmentOp, (void *) op, &offset, state, &numSegments);
504 }
505 else if (state->fCopyMD)
506 {
507 DEBG("sync IOBMD\n");
508
509 if (SHOULD_COPY_DIR(op, fMDSummary.fDirection))
510 {
511 IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory);
512
513 IOByteCount bytes;
514
515 if (kWalkSyncIn & op)
516 bytes = poMD->writeBytes(state->fPreparedOffset,
517 state->fCopyMD->getBytesNoCopy(),
518 state->fPreparedLength);
519 else
520 bytes = poMD->readBytes(state->fPreparedOffset,
521 state->fCopyMD->getBytesNoCopy(),
522 state->fPreparedLength);
523 DEBG("fCopyMD %s %lx bytes\n", (kWalkSyncIn & op) ? "wrote" : "read", bytes);
524 ret = (bytes == state->fPreparedLength) ? kIOReturnSuccess : kIOReturnUnderrun;
525 }
526 else
527 ret = kIOReturnSuccess;
528 }
529 }
530 }
531
532 if (kWalkComplete & op)
533 {
534 if (state->fLocalMapperPageAlloc)
535 {
536 fMapper->iovmFreeDMACommand(this, state->fLocalMapperPageAlloc, state->fLocalMapperPageCount);
537 state->fLocalMapperPageAlloc = 0;
538 state->fLocalMapperPageCount = 0;
539 }
540 if (state->fCopyMapperPageAlloc)
541 {
542 gIOCopyMapper->iovmFree(state->fCopyMapperPageAlloc, state->fCopyPageCount);
543 state->fCopyMapperPageAlloc = 0;
544 state->fCopyPageCount = 0;
545 }
546 if (state->fCopyMD)
547 {
548 state->fCopyMD->release();
549 state->fCopyMD = 0;
550 }
551
552 state->fPrepared = false;
553 }
554 return (ret);
555 }
556
557 UInt8
558 IODMACommand::getNumAddressBits(void)
559 {
560 return (fNumAddressBits);
561 }
562
563 UInt32
564 IODMACommand::getAlignment(void)
565 {
566 return (fAlignMask + 1);
567 }
568
569 IOReturn
570 IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc,
571 UInt8 numAddressBits,
572 UInt64 maxSegmentSize,
573 MappingOptions mappingOptions,
574 UInt64 maxTransferSize,
575 UInt32 alignment,
576 IOMapper *mapper,
577 UInt64 offset,
578 UInt64 length,
579 bool flushCache,
580 bool synchronize)
581 {
582 if (fActive)
583 return kIOReturnNotPermitted;
584
585 if (!outSegFunc || !numAddressBits)
586 return kIOReturnBadArgument;
587
588 bool is32Bit = (OutputHost32 == outSegFunc || OutputBig32 == outSegFunc
589 || OutputLittle32 == outSegFunc);
590 if (is32Bit)
591 {
592 if (!numAddressBits)
593 numAddressBits = 32;
594 else if (numAddressBits > 32)
595 return kIOReturnBadArgument; // Wrong output function for bits
596 }
597
598 if (numAddressBits && (numAddressBits < PAGE_SHIFT))
599 return kIOReturnBadArgument;
600
601 if (!maxSegmentSize)
602 maxSegmentSize--; // Set Max segment to -1
603 if (!maxTransferSize)
604 maxTransferSize--; // Set Max transfer to -1
605
606 if (!mapper)
607 {
608 IOMapper::checkForSystemMapper();
609 mapper = IOMapper::gSystem;
610 }
611
612 switch (MAPTYPE(mappingOptions))
613 {
614 case kMapped: break;
615 case kNonCoherent: fMapper = 0; break;
616 case kBypassed:
617 if (mapper && !mapper->getBypassMask(&fBypassMask))
618 return kIOReturnBadArgument;
619 break;
620 default:
621 return kIOReturnBadArgument;
622 };
623
624 fNumSegments = 0;
625 fBypassMask = 0;
626 fOutSeg = outSegFunc;
627 fNumAddressBits = numAddressBits;
628 fMaxSegmentSize = maxSegmentSize;
629 fMappingOptions = mappingOptions;
630 fMaxTransferSize = maxTransferSize;
631 if (!alignment)
632 alignment = 1;
633 fAlignMask = alignment - 1;
634 if (mapper != fMapper)
635 {
636 mapper->retain();
637 fMapper->release();
638 fMapper = mapper;
639 }
640
641 fInternalState->fIterateOnly = (0 != (kIterateOnly & mappingOptions));
642
643 return prepare(offset, length, flushCache, synchronize);
644 }
645
646
647 IOReturn
648 IODMACommand::prepare(UInt64 offset, UInt64 length, bool flushCache, bool synchronize)
649 {
650 IODMACommandInternal * state = fInternalState;
651 IOReturn ret = kIOReturnSuccess;
652 MappingOptions mappingOptions = fMappingOptions;
653
654 if (!length)
655 length = fMDSummary.fLength;
656
657 if (length > fMaxTransferSize)
658 return kIOReturnNoSpace;
659
660 if (IS_NONCOHERENT(mappingOptions) && flushCache) {
661 IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory);
662
663 poMD->performOperation(kIOMemoryIncoherentIOStore, offset, length);
664 }
665 if (fActive++)
666 {
667 if ((state->fPreparedOffset != offset)
668 || (state->fPreparedLength != length))
669 ret = kIOReturnNotReady;
670 }
671 else
672 {
673 state->fPreparedOffset = offset;
674 state->fPreparedLength = length;
675
676 state->fMapContig = false;
677 state->fMisaligned = false;
678 state->fDoubleBuffer = false;
679 state->fPrepared = false;
680 state->fCopyNext = 0;
681 state->fCopyMapperPageAlloc = 0;
682 state->fCopyPageCount = 0;
683 state->fNextRemapIndex = 0;
684 state->fCopyMD = 0;
685 state->fLocalMapperPageAlloc = 0;
686 state->fLocalMapperPageCount = 0;
687
688 state->fLocalMapper = (fMapper && (fMapper != IOMapper::gSystem));
689
690 state->fSourceAlignMask = fAlignMask;
691 if (state->fLocalMapper)
692 state->fSourceAlignMask &= page_mask;
693
694 state->fCursor = state->fIterateOnly
695 || (!state->fCheckAddressing
696 && !state->fLocalMapper
697 && (!state->fSourceAlignMask
698 || ((fMDSummary.fPageAlign & (1 << 31)) && (0 == (fMDSummary.fPageAlign & state->fSourceAlignMask)))));
699
700 if (!state->fCursor)
701 {
702 IOOptionBits op = kWalkPrepare | kWalkPreflight;
703 if (synchronize)
704 op |= kWalkSyncOut;
705 ret = walkAll(op);
706 }
707 if (kIOReturnSuccess == ret)
708 state->fPrepared = true;
709 }
710 return ret;
711 }
712
713 IOReturn
714 IODMACommand::complete(bool invalidateCache, bool synchronize)
715 {
716 IODMACommandInternal * state = fInternalState;
717 IOReturn ret = kIOReturnSuccess;
718
719 if (fActive < 1)
720 return kIOReturnNotReady;
721
722 if (!--fActive)
723 {
724 if (!state->fCursor)
725 {
726 IOOptionBits op = kWalkComplete;
727 if (synchronize)
728 op |= kWalkSyncIn;
729 ret = walkAll(op);
730 }
731 state->fPrepared = false;
732
733 if (IS_NONCOHERENT(fMappingOptions) && invalidateCache)
734 {
735 IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory);
736
737 poMD->performOperation(kIOMemoryIncoherentIOFlush, state->fPreparedOffset, state->fPreparedLength);
738 }
739 }
740
741 return ret;
742 }
743
744 IOReturn
745 IODMACommand::getPreparedOffsetAndLength(UInt64 * offset, UInt64 * length)
746 {
747 IODMACommandInternal * state = fInternalState;
748 if (fActive < 1)
749 return (kIOReturnNotReady);
750
751 if (offset)
752 *offset = state->fPreparedOffset;
753 if (length)
754 *length = state->fPreparedLength;
755
756 return (kIOReturnSuccess);
757 }
758
759 IOReturn
760 IODMACommand::synchronize(IOOptionBits options)
761 {
762 IODMACommandInternal * state = fInternalState;
763 IOReturn ret = kIOReturnSuccess;
764 IOOptionBits op;
765
766 if (kIODirectionOutIn == (kIODirectionOutIn & options))
767 return kIOReturnBadArgument;
768
769 if (fActive < 1)
770 return kIOReturnNotReady;
771
772 op = 0;
773 if (kForceDoubleBuffer & options)
774 {
775 if (state->fDoubleBuffer)
776 return kIOReturnSuccess;
777 if (state->fCursor)
778 state->fCursor = false;
779 else
780 ret = walkAll(kWalkComplete);
781
782 op |= kWalkPrepare | kWalkPreflight | kWalkDoubleBuffer;
783 }
784 else if (state->fCursor)
785 return kIOReturnSuccess;
786
787 if (kIODirectionIn & options)
788 op |= kWalkSyncIn | kWalkSyncAlways;
789 else if (kIODirectionOut & options)
790 op |= kWalkSyncOut | kWalkSyncAlways;
791
792 ret = walkAll(op);
793
794 return ret;
795 }
796
797 struct IODMACommandTransferContext
798 {
799 void * buffer;
800 UInt64 bufferOffset;
801 UInt64 remaining;
802 UInt32 op;
803 };
804 enum
805 {
806 kIODMACommandTransferOpReadBytes = 1,
807 kIODMACommandTransferOpWriteBytes = 2
808 };
809
810 IOReturn
811 IODMACommand::transferSegment(void *reference,
812 IODMACommand *target,
813 Segment64 segment,
814 void *segments,
815 UInt32 segmentIndex)
816 {
817 IODMACommandTransferContext * context = (IODMACommandTransferContext *) reference;
818 UInt64 length = min(segment.fLength, context->remaining);
819 addr64_t ioAddr = segment.fIOVMAddr;
820 addr64_t cpuAddr = ioAddr;
821
822 context->remaining -= length;
823
824 while (length)
825 {
826 UInt64 copyLen = length;
827 if ((kMapped == MAPTYPE(target->fMappingOptions))
828 && target->fMapper)
829 {
830 cpuAddr = target->fMapper->mapAddr(ioAddr);
831 copyLen = min(copyLen, page_size - (ioAddr & (page_size - 1)));
832 ioAddr += copyLen;
833 }
834
835 switch (context->op)
836 {
837 case kIODMACommandTransferOpReadBytes:
838 copypv(cpuAddr, context->bufferOffset + (addr64_t) context->buffer, copyLen,
839 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
840 break;
841 case kIODMACommandTransferOpWriteBytes:
842 copypv(context->bufferOffset + (addr64_t) context->buffer, cpuAddr, copyLen,
843 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
844 break;
845 }
846 length -= copyLen;
847 context->bufferOffset += copyLen;
848 }
849
850 return (context->remaining ? kIOReturnSuccess : kIOReturnOverrun);
851 }
852
853 UInt64
854 IODMACommand::transfer(IOOptionBits transferOp, UInt64 offset, void * buffer, UInt64 length)
855 {
856 IODMACommandInternal * state = fInternalState;
857 IODMACommandTransferContext context;
858 Segment64 segments[1];
859 UInt32 numSegments = 0-1;
860
861 if (fActive < 1)
862 return (0);
863
864 if (offset >= state->fPreparedLength)
865 return (0);
866 length = min(length, state->fPreparedLength - offset);
867
868 context.buffer = buffer;
869 context.bufferOffset = 0;
870 context.remaining = length;
871 context.op = transferOp;
872 (void) genIOVMSegments(kWalkClient, transferSegment, &context, &offset, &segments[0], &numSegments);
873
874 return (length - context.remaining);
875 }
876
877 UInt64
878 IODMACommand::readBytes(UInt64 offset, void *bytes, UInt64 length)
879 {
880 return (transfer(kIODMACommandTransferOpReadBytes, offset, bytes, length));
881 }
882
883 UInt64
884 IODMACommand::writeBytes(UInt64 offset, const void *bytes, UInt64 length)
885 {
886 return (transfer(kIODMACommandTransferOpWriteBytes, offset, const_cast<void *>(bytes), length));
887 }
888
889 IOReturn
890 IODMACommand::genIOVMSegments(UInt64 *offsetP,
891 void *segmentsP,
892 UInt32 *numSegmentsP)
893 {
894 return (genIOVMSegments(kWalkClient, clientOutputSegment, (void *) fOutSeg,
895 offsetP, segmentsP, numSegmentsP));
896 }
897
898 IOReturn
899 IODMACommand::genIOVMSegments(uint32_t op,
900 InternalSegmentFunction outSegFunc,
901 void *reference,
902 UInt64 *offsetP,
903 void *segmentsP,
904 UInt32 *numSegmentsP)
905 {
906 IODMACommandInternal * internalState = fInternalState;
907 IOOptionBits mdOp = kIOMDWalkSegments;
908 IOReturn ret = kIOReturnSuccess;
909
910 if (!(kWalkComplete & op) && !fActive)
911 return kIOReturnNotReady;
912
913 if (!offsetP || !segmentsP || !numSegmentsP || !*numSegmentsP)
914 return kIOReturnBadArgument;
915
916 IOMDDMAWalkSegmentArgs *state =
917 (IOMDDMAWalkSegmentArgs *) fState;
918
919 UInt64 offset = *offsetP + internalState->fPreparedOffset;
920 UInt64 memLength = internalState->fPreparedOffset + internalState->fPreparedLength;
921
922 if (offset >= memLength)
923 return kIOReturnOverrun;
924
925 if ((offset == internalState->fPreparedOffset) || (offset != state->fOffset) || internalState->fNewMD) {
926 state->fOffset = 0;
927 state->fIOVMAddr = 0;
928 internalState->fNextRemapIndex = 0;
929 internalState->fNewMD = false;
930 state->fMapped = (IS_MAPPED(fMappingOptions) && fMapper);
931 mdOp = kIOMDFirstSegment;
932 };
933
934 UInt64 bypassMask = fBypassMask;
935 UInt32 segIndex = 0;
936 UInt32 numSegments = *numSegmentsP;
937 Segment64 curSeg = { 0, 0 };
938 addr64_t maxPhys;
939
940 if (fNumAddressBits && (fNumAddressBits < 64))
941 maxPhys = (1ULL << fNumAddressBits);
942 else
943 maxPhys = 0;
944 maxPhys--;
945
946 while ((state->fIOVMAddr) || state->fOffset < memLength)
947 {
948 if (!state->fIOVMAddr) {
949
950 IOReturn rtn;
951
952 state->fOffset = offset;
953 state->fLength = memLength - offset;
954
955 if (internalState->fMapContig && (kWalkClient & op))
956 {
957 ppnum_t pageNum = internalState->fLocalMapperPageAlloc;
958 if (!pageNum)
959 pageNum = internalState->fCopyMapperPageAlloc;
960 state->fIOVMAddr = ptoa_64(pageNum)
961 + offset - internalState->fPreparedOffset;
962 rtn = kIOReturnSuccess;
963 }
964 else
965 {
966 const IOMemoryDescriptor * memory =
967 internalState->fCopyMD ? internalState->fCopyMD : fMemory;
968 rtn = memory->dmaCommandOperation(mdOp, fState, sizeof(fState));
969 mdOp = kIOMDWalkSegments;
970 }
971
972 if (rtn == kIOReturnSuccess) {
973 assert(state->fIOVMAddr);
974 assert(state->fLength);
975 }
976 else if (rtn == kIOReturnOverrun)
977 state->fIOVMAddr = state->fLength = 0; // At end
978 else
979 return rtn;
980 };
981
982 if (!curSeg.fIOVMAddr) {
983 UInt64 length = state->fLength;
984
985 offset += length;
986 curSeg.fIOVMAddr = state->fIOVMAddr | bypassMask;
987 curSeg.fLength = length;
988 state->fIOVMAddr = 0;
989 }
990 else if ((curSeg.fIOVMAddr + curSeg.fLength == state->fIOVMAddr)) {
991 UInt64 length = state->fLength;
992 offset += length;
993 curSeg.fLength += length;
994 state->fIOVMAddr = 0;
995 };
996
997
998 if (!state->fIOVMAddr)
999 {
1000 if (kWalkClient & op)
1001 {
1002 if ((curSeg.fIOVMAddr + curSeg.fLength - 1) > maxPhys)
1003 {
1004 if (internalState->fCursor)
1005 {
1006 curSeg.fIOVMAddr = 0;
1007 ret = kIOReturnMessageTooLarge;
1008 break;
1009 }
1010 else if (curSeg.fIOVMAddr <= maxPhys)
1011 {
1012 UInt64 remain, newLength;
1013
1014 newLength = (maxPhys + 1 - curSeg.fIOVMAddr);
1015 DEBG("trunc %qx, %qx-> %qx\n", curSeg.fIOVMAddr, curSeg.fLength, newLength);
1016 remain = curSeg.fLength - newLength;
1017 state->fIOVMAddr = newLength + curSeg.fIOVMAddr;
1018 curSeg.fLength = newLength;
1019 state->fLength = remain;
1020 offset -= remain;
1021 }
1022 else if (gIOCopyMapper)
1023 {
1024 DEBG("sparse switch %qx, %qx ", curSeg.fIOVMAddr, curSeg.fLength);
1025 if (trunc_page_64(curSeg.fIOVMAddr) == gIOCopyMapper->mapAddr(
1026 ptoa_64(internalState->fCopyMapperPageAlloc + internalState->fNextRemapIndex)))
1027 {
1028
1029 curSeg.fIOVMAddr = ptoa_64(internalState->fCopyMapperPageAlloc + internalState->fNextRemapIndex)
1030 + (curSeg.fIOVMAddr & PAGE_MASK);
1031 internalState->fNextRemapIndex += atop_64(round_page(curSeg.fLength));
1032 }
1033 else for (UInt checkRemapIndex = 0; checkRemapIndex < internalState->fCopyPageCount; checkRemapIndex++)
1034 {
1035 if (trunc_page_64(curSeg.fIOVMAddr) == gIOCopyMapper->mapAddr(
1036 ptoa_64(internalState->fCopyMapperPageAlloc + checkRemapIndex)))
1037 {
1038 curSeg.fIOVMAddr = ptoa_64(internalState->fCopyMapperPageAlloc + checkRemapIndex)
1039 + (curSeg.fIOVMAddr & PAGE_MASK);
1040 internalState->fNextRemapIndex = checkRemapIndex + atop_64(round_page(curSeg.fLength));
1041 break;
1042 }
1043 }
1044 DEBG("-> %qx, %qx\n", curSeg.fIOVMAddr, curSeg.fLength);
1045 }
1046 }
1047 }
1048
1049 if (curSeg.fLength > fMaxSegmentSize)
1050 {
1051 UInt64 remain = curSeg.fLength - fMaxSegmentSize;
1052
1053 state->fIOVMAddr = fMaxSegmentSize + curSeg.fIOVMAddr;
1054 curSeg.fLength = fMaxSegmentSize;
1055
1056 state->fLength = remain;
1057 offset -= remain;
1058 }
1059
1060 if (internalState->fCursor
1061 && (0 != (internalState->fSourceAlignMask & curSeg.fIOVMAddr)))
1062 {
1063 curSeg.fIOVMAddr = 0;
1064 ret = kIOReturnNotAligned;
1065 break;
1066 }
1067
1068 if (offset >= memLength)
1069 {
1070 curSeg.fLength -= (offset - memLength);
1071 offset = memLength;
1072 state->fIOVMAddr = state->fLength = 0; // At end
1073 break;
1074 }
1075 }
1076
1077 if (state->fIOVMAddr) {
1078 if ((segIndex + 1 == numSegments))
1079 break;
1080
1081 ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++);
1082 curSeg.fIOVMAddr = 0;
1083 if (kIOReturnSuccess != ret)
1084 break;
1085 }
1086 }
1087
1088 if (curSeg.fIOVMAddr) {
1089 ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++);
1090 }
1091
1092 if (kIOReturnSuccess == ret)
1093 {
1094 state->fOffset = offset;
1095 *offsetP = offset - internalState->fPreparedOffset;
1096 *numSegmentsP = segIndex;
1097 }
1098 return ret;
1099 }
1100
1101 IOReturn
1102 IODMACommand::clientOutputSegment(
1103 void *reference, IODMACommand *target,
1104 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1105 {
1106 SegmentFunction segmentFunction = (SegmentFunction) reference;
1107 IOReturn ret = kIOReturnSuccess;
1108
1109 if ((target->fNumAddressBits < 64)
1110 && ((segment.fIOVMAddr + segment.fLength - 1) >> target->fNumAddressBits)
1111 && (target->reserved->fLocalMapperPageAlloc || !target->reserved->fLocalMapper))
1112 {
1113 DEBG("kIOReturnMessageTooLarge(fNumAddressBits) %qx, %qx\n", segment.fIOVMAddr, segment.fLength);
1114 ret = kIOReturnMessageTooLarge;
1115 }
1116
1117 if (!(*segmentFunction)(target, segment, vSegList, outSegIndex))
1118 {
1119 DEBG("kIOReturnMessageTooLarge(fOutSeg) %qx, %qx\n", segment.fIOVMAddr, segment.fLength);
1120 ret = kIOReturnMessageTooLarge;
1121 }
1122
1123 return (ret);
1124 }
1125
1126 IOReturn
1127 IODMACommand::genIOVMSegments(SegmentFunction segmentFunction,
1128 UInt64 *offsetP,
1129 void *segmentsP,
1130 UInt32 *numSegmentsP)
1131 {
1132 return (genIOVMSegments(kWalkClient, clientOutputSegment, (void *) segmentFunction,
1133 offsetP, segmentsP, numSegmentsP));
1134 }
1135
1136 bool
1137 IODMACommand::OutputHost32(IODMACommand *,
1138 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1139 {
1140 Segment32 *base = (Segment32 *) vSegList;
1141 base[outSegIndex].fIOVMAddr = (UInt32) segment.fIOVMAddr;
1142 base[outSegIndex].fLength = (UInt32) segment.fLength;
1143 return true;
1144 }
1145
1146 bool
1147 IODMACommand::OutputBig32(IODMACommand *,
1148 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1149 {
1150 const UInt offAddr = outSegIndex * sizeof(Segment32);
1151 const UInt offLen = offAddr + sizeof(UInt32);
1152 OSWriteBigInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr);
1153 OSWriteBigInt32(vSegList, offLen, (UInt32) segment.fLength);
1154 return true;
1155 }
1156
1157 bool
1158 IODMACommand::OutputLittle32(IODMACommand *,
1159 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1160 {
1161 const UInt offAddr = outSegIndex * sizeof(Segment32);
1162 const UInt offLen = offAddr + sizeof(UInt32);
1163 OSWriteLittleInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr);
1164 OSWriteLittleInt32(vSegList, offLen, (UInt32) segment.fLength);
1165 return true;
1166 }
1167
1168 bool
1169 IODMACommand::OutputHost64(IODMACommand *,
1170 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1171 {
1172 Segment64 *base = (Segment64 *) vSegList;
1173 base[outSegIndex] = segment;
1174 return true;
1175 }
1176
1177 bool
1178 IODMACommand::OutputBig64(IODMACommand *,
1179 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1180 {
1181 const UInt offAddr = outSegIndex * sizeof(Segment64);
1182 const UInt offLen = offAddr + sizeof(UInt64);
1183 OSWriteBigInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr);
1184 OSWriteBigInt64(vSegList, offLen, (UInt64) segment.fLength);
1185 return true;
1186 }
1187
1188 bool
1189 IODMACommand::OutputLittle64(IODMACommand *,
1190 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1191 {
1192 const UInt offAddr = outSegIndex * sizeof(Segment64);
1193 const UInt offLen = offAddr + sizeof(UInt64);
1194 OSWriteLittleInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr);
1195 OSWriteLittleInt64(vSegList, offLen, (UInt64) segment.fLength);
1196 return true;
1197 }
1198
1199