]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IODMACommand.cpp
xnu-1228.0.2.tar.gz
[apple/xnu.git] / iokit / Kernel / IODMACommand.cpp
1 /*
2 * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <IOKit/assert.h>
30
31 #include <libkern/OSTypes.h>
32 #include <libkern/OSByteOrder.h>
33
34 #include <IOKit/IOReturn.h>
35 #include <IOKit/IOLib.h>
36 #include <IOKit/IODMACommand.h>
37 #include <IOKit/IOMapper.h>
38 #include <IOKit/IOMemoryDescriptor.h>
39 #include <IOKit/IOBufferMemoryDescriptor.h>
40
41 #include "IOKitKernelInternal.h"
42 #include "IOCopyMapper.h"
43
44 #define MAPTYPE(type) ((UInt) (type) & kTypeMask)
45 #define IS_MAPPED(type) (MAPTYPE(type) == kMapped)
46 #define IS_BYPASSED(type) (MAPTYPE(type) == kBypassed)
47 #define IS_NONCOHERENT(type) (MAPTYPE(type) == kNonCoherent)
48
49
50 static bool gIOEnableCopyMapper = true;
51
52 enum
53 {
54 kWalkSyncIn = 0x01, // bounce -> md
55 kWalkSyncOut = 0x02, // bounce <- md
56 kWalkSyncAlways = 0x04,
57 kWalkPreflight = 0x08,
58 kWalkDoubleBuffer = 0x10,
59 kWalkPrepare = 0x20,
60 kWalkComplete = 0x40,
61 kWalkClient = 0x80
62 };
63
64
65 #define fInternalState reserved
66 #define fState reserved->fState
67 #define fMDSummary reserved->fMDSummary
68
69
70 #if 1
71 // no direction => OutIn
72 #define SHOULD_COPY_DIR(op, direction) \
73 ((kIODirectionNone == (direction)) \
74 || (kWalkSyncAlways & (op)) \
75 || (((kWalkSyncIn & (op)) ? kIODirectionIn : kIODirectionOut) \
76 & (direction)))
77
78 #else
79 #define SHOULD_COPY_DIR(state, direction) (true)
80 #endif
81
82 #if 0
83 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
84 #else
85 #define DEBG(fmt, args...) {}
86 #endif
87
88
89 /**************************** class IODMACommand ***************************/
90
91 #undef super
92 #define super OSObject
93 OSDefineMetaClassAndStructors(IODMACommand, IOCommand);
94
95 OSMetaClassDefineReservedUsed(IODMACommand, 0);
96 OSMetaClassDefineReservedUsed(IODMACommand, 1);
97 OSMetaClassDefineReservedUnused(IODMACommand, 2);
98 OSMetaClassDefineReservedUnused(IODMACommand, 3);
99 OSMetaClassDefineReservedUnused(IODMACommand, 4);
100 OSMetaClassDefineReservedUnused(IODMACommand, 5);
101 OSMetaClassDefineReservedUnused(IODMACommand, 6);
102 OSMetaClassDefineReservedUnused(IODMACommand, 7);
103 OSMetaClassDefineReservedUnused(IODMACommand, 8);
104 OSMetaClassDefineReservedUnused(IODMACommand, 9);
105 OSMetaClassDefineReservedUnused(IODMACommand, 10);
106 OSMetaClassDefineReservedUnused(IODMACommand, 11);
107 OSMetaClassDefineReservedUnused(IODMACommand, 12);
108 OSMetaClassDefineReservedUnused(IODMACommand, 13);
109 OSMetaClassDefineReservedUnused(IODMACommand, 14);
110 OSMetaClassDefineReservedUnused(IODMACommand, 15);
111
112 IODMACommand *
113 IODMACommand::withSpecification(SegmentFunction outSegFunc,
114 UInt8 numAddressBits,
115 UInt64 maxSegmentSize,
116 MappingOptions mappingOptions,
117 UInt64 maxTransferSize,
118 UInt32 alignment,
119 IOMapper *mapper,
120 void *refCon)
121 {
122 IODMACommand * me = new IODMACommand;
123
124 if (me && !me->initWithSpecification(outSegFunc,
125 numAddressBits, maxSegmentSize,
126 mappingOptions, maxTransferSize,
127 alignment, mapper, refCon))
128 {
129 me->release();
130 return 0;
131 };
132
133 return me;
134 }
135
136 IODMACommand *
137 IODMACommand::cloneCommand(void *refCon)
138 {
139 return withSpecification(fOutSeg, fNumAddressBits, fMaxSegmentSize,
140 fMappingOptions, fMaxTransferSize, fAlignMask + 1, fMapper, refCon);
141 }
142
143 #define kLastOutputFunction ((SegmentFunction) kLastOutputFunction)
144
145 bool
146 IODMACommand::initWithSpecification(SegmentFunction outSegFunc,
147 UInt8 numAddressBits,
148 UInt64 maxSegmentSize,
149 MappingOptions mappingOptions,
150 UInt64 maxTransferSize,
151 UInt32 alignment,
152 IOMapper *mapper,
153 void *refCon)
154 {
155 if (!super::init() || !outSegFunc || !numAddressBits)
156 return false;
157
158 bool is32Bit = (OutputHost32 == outSegFunc || OutputBig32 == outSegFunc
159 || OutputLittle32 == outSegFunc);
160 if (is32Bit)
161 {
162 if (!numAddressBits)
163 numAddressBits = 32;
164 else if (numAddressBits > 32)
165 return false; // Wrong output function for bits
166 }
167
168 if (numAddressBits && (numAddressBits < PAGE_SHIFT))
169 return false;
170
171 if (!maxSegmentSize)
172 maxSegmentSize--; // Set Max segment to -1
173 if (!maxTransferSize)
174 maxTransferSize--; // Set Max transfer to -1
175
176 if (!mapper)
177 {
178 IOMapper::checkForSystemMapper();
179 mapper = IOMapper::gSystem;
180 }
181
182 fNumSegments = 0;
183 fBypassMask = 0;
184 fOutSeg = outSegFunc;
185 fNumAddressBits = numAddressBits;
186 fMaxSegmentSize = maxSegmentSize;
187 fMappingOptions = mappingOptions;
188 fMaxTransferSize = maxTransferSize;
189 if (!alignment)
190 alignment = 1;
191 fAlignMask = alignment - 1;
192 fMapper = mapper;
193 fRefCon = refCon;
194
195 switch (MAPTYPE(mappingOptions))
196 {
197 case kMapped: break;
198 case kNonCoherent: fMapper = 0; break;
199 case kBypassed:
200 if (mapper && !mapper->getBypassMask(&fBypassMask))
201 return false;
202 break;
203 default:
204 return false;
205 };
206
207 reserved = IONew(IODMACommandInternal, 1);
208 if (!reserved)
209 return false;
210 bzero(reserved, sizeof(IODMACommandInternal));
211
212 fInternalState->fIterateOnly = (0 != (kIterateOnly & mappingOptions));
213
214 return true;
215 }
216
217 void
218 IODMACommand::free()
219 {
220 if (reserved)
221 IODelete(reserved, IODMACommandInternal, 1);
222
223 super::free();
224 }
225
226 IOReturn
227 IODMACommand::setMemoryDescriptor(const IOMemoryDescriptor *mem, bool autoPrepare)
228 {
229 if (mem == fMemory)
230 {
231 if (!autoPrepare)
232 {
233 while (fActive)
234 complete();
235 }
236 return kIOReturnSuccess;
237 }
238
239 if (fMemory) {
240 // As we are almost certainly being called from a work loop thread
241 // if fActive is true it is probably not a good time to potentially
242 // block. Just test for it and return an error
243 if (fActive)
244 return kIOReturnBusy;
245 clearMemoryDescriptor();
246 };
247
248 if (mem) {
249 bzero(&fMDSummary, sizeof(fMDSummary));
250 IOReturn rtn = mem->dmaCommandOperation(
251 kIOMDGetCharacteristics,
252 &fMDSummary, sizeof(fMDSummary));
253 if (rtn)
254 return rtn;
255
256 ppnum_t highPage = fMDSummary.fHighestPage ? fMDSummary.fHighestPage : gIOLastPage;
257
258 if ((kMapped == MAPTYPE(fMappingOptions))
259 && fMapper
260 && (!fNumAddressBits || (fNumAddressBits >= 31)))
261 // assuming mapped space is 2G
262 fInternalState->fCheckAddressing = false;
263 else
264 fInternalState->fCheckAddressing = (fNumAddressBits && (highPage >= (1UL << (fNumAddressBits - PAGE_SHIFT))));
265
266 mem->retain();
267 fMemory = mem;
268
269 if (autoPrepare)
270 return prepare();
271 };
272
273 return kIOReturnSuccess;
274 }
275
276 IOReturn
277 IODMACommand::clearMemoryDescriptor(bool autoComplete)
278 {
279 if (fActive && !autoComplete)
280 return (kIOReturnNotReady);
281
282 if (fMemory) {
283 while (fActive)
284 complete();
285 fMemory->release();
286 fMemory = 0;
287 }
288
289 return (kIOReturnSuccess);
290 }
291
292 const IOMemoryDescriptor *
293 IODMACommand::getMemoryDescriptor() const
294 {
295 return fMemory;
296 }
297
298
299 IOReturn
300 IODMACommand::segmentOp(
301 void *reference,
302 IODMACommand *target,
303 Segment64 segment,
304 void *segments,
305 UInt32 segmentIndex)
306 {
307 IOOptionBits op = (IOOptionBits) reference;
308 addr64_t maxPhys, address;
309 addr64_t remapAddr = 0;
310 uint64_t length;
311 uint32_t numPages;
312
313 IODMACommandInternal * state = target->reserved;
314
315 if (target->fNumAddressBits && (target->fNumAddressBits < 64))
316 maxPhys = (1ULL << target->fNumAddressBits);
317 else
318 maxPhys = 0;
319 maxPhys--;
320
321 address = segment.fIOVMAddr;
322 length = segment.fLength;
323
324 assert(address);
325 assert(length);
326
327 if (!state->fMisaligned)
328 {
329 state->fMisaligned |= (0 != (target->fAlignMask & address));
330 if (state->fMisaligned) DEBG("misaligned %qx:%qx, %lx\n", address, length, target->fAlignMask);
331 }
332
333 if (state->fMisaligned && (kWalkPreflight & op))
334 return (kIOReturnNotAligned);
335
336 if (!state->fDoubleBuffer)
337 {
338 if ((address + length - 1) <= maxPhys)
339 {
340 length = 0;
341 }
342 else if (address <= maxPhys)
343 {
344 DEBG("tail %qx, %qx", address, length);
345 length = (address + length - maxPhys - 1);
346 address = maxPhys + 1;
347 DEBG("-> %qx, %qx\n", address, length);
348 }
349 }
350
351 if (!length)
352 return (kIOReturnSuccess);
353
354 numPages = atop_64(round_page_64(length));
355 remapAddr = state->fCopyNext;
356
357 if (kWalkPreflight & op)
358 {
359 state->fCopyPageCount += numPages;
360 }
361 else
362 {
363 if (kWalkPrepare & op)
364 {
365 for (IOItemCount idx = 0; idx < numPages; idx++)
366 gIOCopyMapper->iovmInsert(atop_64(remapAddr), idx, atop_64(address) + idx);
367 }
368 if (state->fDoubleBuffer)
369 state->fCopyNext += length;
370 else
371 {
372 state->fCopyNext += round_page(length);
373 remapAddr += (address & PAGE_MASK);
374 }
375
376 if (SHOULD_COPY_DIR(op, target->fMDSummary.fDirection))
377 {
378 DEBG("cpv: 0x%qx %s 0x%qx, 0x%qx, 0x%02lx\n", remapAddr,
379 (kWalkSyncIn & op) ? "->" : "<-",
380 address, length, op);
381 if (kWalkSyncIn & op)
382 { // cppvNoModSnk
383 copypv(remapAddr, address, length,
384 cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
385 }
386 else
387 {
388 copypv(address, remapAddr, length,
389 cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
390 }
391 }
392 }
393
394 return kIOReturnSuccess;
395 }
396
397 IOReturn
398 IODMACommand::walkAll(UInt8 op)
399 {
400 IODMACommandInternal * state = fInternalState;
401
402 IOReturn ret = kIOReturnSuccess;
403 UInt32 numSegments;
404 UInt64 offset;
405
406 if (gIOEnableCopyMapper && (kWalkPreflight & op))
407 {
408 state->fCopyContig = false;
409 state->fMisaligned = false;
410 state->fDoubleBuffer = false;
411 state->fPrepared = false;
412 state->fCopyNext = 0;
413 state->fCopyPageAlloc = 0;
414 state->fCopyPageCount = 0;
415 state->fNextRemapIndex = 0;
416 state->fCopyMD = 0;
417
418 if (!(kWalkDoubleBuffer & op))
419 {
420 offset = 0;
421 numSegments = 0-1;
422 ret = genIOVMSegments(segmentOp, (void *) op, &offset, state, &numSegments);
423 }
424
425 op &= ~kWalkPreflight;
426
427 state->fDoubleBuffer = (state->fMisaligned || (kWalkDoubleBuffer & op));
428 if (state->fDoubleBuffer)
429 state->fCopyPageCount = atop_64(round_page(state->fPreparedLength));
430
431 if (state->fCopyPageCount)
432 {
433 IOMapper * mapper;
434 ppnum_t mapBase = 0;
435
436 DEBG("preflight fCopyPageCount %d\n", state->fCopyPageCount);
437
438 mapper = gIOCopyMapper;
439 if (mapper)
440 mapBase = mapper->iovmAlloc(state->fCopyPageCount);
441 if (mapBase)
442 {
443 state->fCopyPageAlloc = mapBase;
444 if (state->fCopyPageAlloc && state->fDoubleBuffer)
445 {
446 DEBG("contig copy map\n");
447 state->fCopyContig = true;
448 }
449
450 state->fCopyNext = ptoa_64(state->fCopyPageAlloc);
451 offset = 0;
452 numSegments = 0-1;
453 ret = genIOVMSegments(segmentOp, (void *) op, &offset, state, &numSegments);
454 state->fPrepared = true;
455 op &= ~(kWalkSyncIn | kWalkSyncOut);
456 }
457 else
458 {
459 DEBG("alloc IOBMD\n");
460 state->fCopyMD = IOBufferMemoryDescriptor::withOptions(
461 fMDSummary.fDirection, state->fPreparedLength, page_size);
462
463 if (state->fCopyMD)
464 {
465 ret = kIOReturnSuccess;
466 state->fPrepared = true;
467 }
468 else
469 {
470 DEBG("IODMACommand !iovmAlloc");
471 return (kIOReturnNoResources);
472 }
473 }
474 }
475 }
476
477 if (gIOEnableCopyMapper && state->fPrepared && ((kWalkSyncIn | kWalkSyncOut) & op))
478 {
479 if (state->fCopyPageCount)
480 {
481 DEBG("sync fCopyPageCount %d\n", state->fCopyPageCount);
482
483 if (state->fCopyPageAlloc)
484 {
485 state->fCopyNext = ptoa_64(state->fCopyPageAlloc);
486 offset = 0;
487 numSegments = 0-1;
488 ret = genIOVMSegments(segmentOp, (void *) op, &offset, state, &numSegments);
489 }
490 else if (state->fCopyMD)
491 {
492 DEBG("sync IOBMD\n");
493
494 if (SHOULD_COPY_DIR(op, fMDSummary.fDirection))
495 {
496 IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory);
497
498 IOByteCount bytes;
499
500 if (kWalkSyncIn & op)
501 bytes = poMD->writeBytes(state->fPreparedOffset,
502 state->fCopyMD->getBytesNoCopy(),
503 state->fPreparedLength);
504 else
505 bytes = poMD->readBytes(state->fPreparedOffset,
506 state->fCopyMD->getBytesNoCopy(),
507 state->fPreparedLength);
508 DEBG("fCopyMD %s %lx bytes\n", (kWalkSyncIn & op) ? "wrote" : "read", bytes);
509 ret = (bytes == state->fPreparedLength) ? kIOReturnSuccess : kIOReturnUnderrun;
510 }
511 else
512 ret = kIOReturnSuccess;
513 }
514 }
515 }
516
517 if (kWalkComplete & op)
518 {
519 if (state->fCopyPageAlloc)
520 {
521 gIOCopyMapper->iovmFree(state->fCopyPageAlloc, state->fCopyPageCount);
522 state->fCopyPageAlloc = 0;
523 state->fCopyPageCount = 0;
524 }
525 if (state->fCopyMD)
526 {
527 state->fCopyMD->release();
528 state->fCopyMD = 0;
529 }
530
531 state->fPrepared = false;
532 }
533 return (ret);
534 }
535
536 IOReturn
537 IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc,
538 UInt8 numAddressBits,
539 UInt64 maxSegmentSize,
540 MappingOptions mappingOptions,
541 UInt64 maxTransferSize,
542 UInt32 alignment,
543 IOMapper *mapper,
544 UInt64 offset,
545 UInt64 length,
546 bool flushCache,
547 bool synchronize)
548 {
549 if (fActive)
550 return kIOReturnNotPermitted;
551
552 if (!outSegFunc || !numAddressBits)
553 return kIOReturnBadArgument;
554
555 bool is32Bit = (OutputHost32 == outSegFunc || OutputBig32 == outSegFunc
556 || OutputLittle32 == outSegFunc);
557 if (is32Bit)
558 {
559 if (!numAddressBits)
560 numAddressBits = 32;
561 else if (numAddressBits > 32)
562 return kIOReturnBadArgument; // Wrong output function for bits
563 }
564
565 if (numAddressBits && (numAddressBits < PAGE_SHIFT))
566 return kIOReturnBadArgument;
567
568 if (!maxSegmentSize)
569 maxSegmentSize--; // Set Max segment to -1
570 if (!maxTransferSize)
571 maxTransferSize--; // Set Max transfer to -1
572
573 if (!mapper)
574 {
575 IOMapper::checkForSystemMapper();
576 mapper = IOMapper::gSystem;
577 }
578
579 switch (MAPTYPE(mappingOptions))
580 {
581 case kMapped: break;
582 case kNonCoherent: fMapper = 0; break;
583 case kBypassed:
584 if (mapper && !mapper->getBypassMask(&fBypassMask))
585 return kIOReturnBadArgument;
586 break;
587 default:
588 return kIOReturnBadArgument;
589 };
590
591 fNumSegments = 0;
592 fBypassMask = 0;
593 fOutSeg = outSegFunc;
594 fNumAddressBits = numAddressBits;
595 fMaxSegmentSize = maxSegmentSize;
596 fMappingOptions = mappingOptions;
597 fMaxTransferSize = maxTransferSize;
598 if (!alignment)
599 alignment = 1;
600 fAlignMask = alignment - 1;
601 fMapper = mapper;
602
603 fInternalState->fIterateOnly = (0 != (kIterateOnly & mappingOptions));
604
605 return prepare(offset, length, flushCache, synchronize);
606 }
607
608
609 IOReturn
610 IODMACommand::prepare(UInt64 offset, UInt64 length, bool flushCache, bool synchronize)
611 {
612 IODMACommandInternal * state = fInternalState;
613 IOReturn ret = kIOReturnSuccess;
614 MappingOptions mappingOptions = fMappingOptions;
615
616 if (!length)
617 length = fMDSummary.fLength;
618
619 if (length > fMaxTransferSize)
620 return kIOReturnNoSpace;
621
622 if (IS_NONCOHERENT(mappingOptions) && flushCache) {
623 IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory);
624
625 poMD->performOperation(kIOMemoryIncoherentIOStore, 0, fMDSummary.fLength);
626 }
627 if (fActive++)
628 {
629 if ((state->fPreparedOffset != offset)
630 || (state->fPreparedLength != length))
631 ret = kIOReturnNotReady;
632 }
633 else
634 {
635 state->fPreparedOffset = offset;
636 state->fPreparedLength = length;
637
638 state->fCopyContig = false;
639 state->fMisaligned = false;
640 state->fDoubleBuffer = false;
641 state->fPrepared = false;
642 state->fCopyNext = 0;
643 state->fCopyPageAlloc = 0;
644 state->fCopyPageCount = 0;
645 state->fNextRemapIndex = 0;
646 state->fCopyMD = 0;
647
648 state->fCursor = state->fIterateOnly
649 || (!state->fCheckAddressing
650 && (!fAlignMask
651 || ((fMDSummary.fPageAlign & (1 << 31)) && (0 == (fMDSummary.fPageAlign & fAlignMask)))));
652 if (!state->fCursor)
653 {
654 IOOptionBits op = kWalkPrepare | kWalkPreflight;
655 if (synchronize)
656 op |= kWalkSyncOut;
657 ret = walkAll(op);
658 }
659 if (kIOReturnSuccess == ret)
660 state->fPrepared = true;
661 }
662 return ret;
663 }
664
665 IOReturn
666 IODMACommand::complete(bool invalidateCache, bool synchronize)
667 {
668 IODMACommandInternal * state = fInternalState;
669 IOReturn ret = kIOReturnSuccess;
670
671 if (fActive < 1)
672 return kIOReturnNotReady;
673
674 if (!--fActive)
675 {
676 if (!state->fCursor)
677 {
678 IOOptionBits op = kWalkComplete;
679 if (synchronize)
680 op |= kWalkSyncIn;
681 ret = walkAll(op);
682 }
683 state->fPrepared = false;
684
685 if (IS_NONCOHERENT(fMappingOptions) && invalidateCache)
686 {
687 IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory);
688
689 poMD->performOperation(kIOMemoryIncoherentIOFlush, 0, fMDSummary.fLength);
690 }
691 }
692
693 return ret;
694 }
695
696 IOReturn
697 IODMACommand::synchronize(IOOptionBits options)
698 {
699 IODMACommandInternal * state = fInternalState;
700 IOReturn ret = kIOReturnSuccess;
701 IOOptionBits op;
702
703 if (kIODirectionOutIn == (kIODirectionOutIn & options))
704 return kIOReturnBadArgument;
705
706 if (fActive < 1)
707 return kIOReturnNotReady;
708
709 op = 0;
710 if (kForceDoubleBuffer & options)
711 {
712 if (state->fDoubleBuffer)
713 return kIOReturnSuccess;
714 if (state->fCursor)
715 state->fCursor = false;
716 else
717 ret = walkAll(kWalkComplete);
718
719 op |= kWalkPrepare | kWalkPreflight | kWalkDoubleBuffer;
720 }
721 else if (state->fCursor)
722 return kIOReturnSuccess;
723
724 if (kIODirectionIn & options)
725 op |= kWalkSyncIn | kWalkSyncAlways;
726 else if (kIODirectionOut & options)
727 op |= kWalkSyncOut | kWalkSyncAlways;
728
729 ret = walkAll(op);
730
731 return ret;
732 }
733
734 struct IODMACommandTransferContext
735 {
736 void * buffer;
737 UInt64 bufferOffset;
738 UInt64 remaining;
739 UInt32 op;
740 };
741 enum
742 {
743 kIODMACommandTransferOpReadBytes = 1,
744 kIODMACommandTransferOpWriteBytes = 2
745 };
746
747 IOReturn
748 IODMACommand::transferSegment(void *reference,
749 IODMACommand *target,
750 Segment64 segment,
751 void *segments,
752 UInt32 segmentIndex)
753 {
754 IODMACommandTransferContext * context = (IODMACommandTransferContext *) segments;
755 UInt64 length = min(segment.fLength, context->remaining);
756 addr64_t ioAddr = segment.fIOVMAddr;
757 addr64_t cpuAddr = ioAddr;
758
759 context->remaining -= length;
760
761 while (length)
762 {
763 UInt64 copyLen = length;
764 if ((kMapped == MAPTYPE(target->fMappingOptions))
765 && target->fMapper)
766 {
767 cpuAddr = target->fMapper->mapAddr(ioAddr);
768 copyLen = min(copyLen, page_size - (ioAddr & (page_size - 1)));
769 ioAddr += copyLen;
770 }
771
772 switch (context->op)
773 {
774 case kIODMACommandTransferOpReadBytes:
775 copypv(cpuAddr, context->bufferOffset + (addr64_t) context->buffer, copyLen,
776 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
777 break;
778 case kIODMACommandTransferOpWriteBytes:
779 copypv(context->bufferOffset + (addr64_t) context->buffer, cpuAddr, copyLen,
780 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
781 break;
782 }
783 length -= copyLen;
784 context->bufferOffset += copyLen;
785 }
786
787 return (context->remaining ? kIOReturnSuccess : kIOReturnOverrun);
788 }
789
790 UInt64
791 IODMACommand::transfer(IOOptionBits transferOp, UInt64 offset, void * buffer, UInt64 length)
792 {
793 IODMACommandInternal * state = fInternalState;
794 IODMACommandTransferContext context;
795 UInt32 numSegments = 0-1;
796
797 if (fActive < 1)
798 return (0);
799
800 if (offset >= state->fPreparedLength)
801 return (0);
802 length = min(length, state->fPreparedLength - offset);
803
804 context.buffer = buffer;
805 context.bufferOffset = 0;
806 context.remaining = length;
807 context.op = transferOp;
808 (void) genIOVMSegments(transferSegment, (void *) kWalkClient, &offset, &context, &numSegments);
809
810 return (length - context.remaining);
811 }
812
813 UInt64
814 IODMACommand::readBytes(UInt64 offset, void *bytes, UInt64 length)
815 {
816 return (transfer(kIODMACommandTransferOpReadBytes, offset, bytes, length));
817 }
818
819 UInt64
820 IODMACommand::writeBytes(UInt64 offset, const void *bytes, UInt64 length)
821 {
822 return (transfer(kIODMACommandTransferOpWriteBytes, offset, const_cast<void *>(bytes), length));
823 }
824
825 IOReturn
826 IODMACommand::genIOVMSegments(UInt64 *offsetP,
827 void *segmentsP,
828 UInt32 *numSegmentsP)
829 {
830 return (genIOVMSegments(clientOutputSegment, (void *) kWalkClient, offsetP, segmentsP, numSegmentsP));
831 }
832
833 IOReturn
834 IODMACommand::genIOVMSegments(InternalSegmentFunction outSegFunc,
835 void *reference,
836 UInt64 *offsetP,
837 void *segmentsP,
838 UInt32 *numSegmentsP)
839 {
840 IOOptionBits op = (IOOptionBits) reference;
841 IODMACommandInternal * internalState = fInternalState;
842 IOOptionBits mdOp = kIOMDWalkSegments;
843 IOReturn ret = kIOReturnSuccess;
844
845 if (!(kWalkComplete & op) && !fActive)
846 return kIOReturnNotReady;
847
848 if (!offsetP || !segmentsP || !numSegmentsP || !*numSegmentsP)
849 return kIOReturnBadArgument;
850
851 IOMDDMAWalkSegmentArgs *state =
852 (IOMDDMAWalkSegmentArgs *) fState;
853
854 UInt64 offset = *offsetP + internalState->fPreparedOffset;
855 UInt64 memLength = internalState->fPreparedOffset + internalState->fPreparedLength;
856
857 if (offset >= memLength)
858 return kIOReturnOverrun;
859
860 if ((offset == internalState->fPreparedOffset) || (offset != state->fOffset)) {
861 state->fOffset = 0;
862 state->fIOVMAddr = 0;
863 internalState->fNextRemapIndex = 0;
864 state->fMapped = (IS_MAPPED(fMappingOptions) && fMapper);
865 mdOp = kIOMDFirstSegment;
866 };
867
868 UInt64 bypassMask = fBypassMask;
869 UInt32 segIndex = 0;
870 UInt32 numSegments = *numSegmentsP;
871 Segment64 curSeg = { 0, 0 };
872 addr64_t maxPhys;
873
874 if (fNumAddressBits && (fNumAddressBits < 64))
875 maxPhys = (1ULL << fNumAddressBits);
876 else
877 maxPhys = 0;
878 maxPhys--;
879
880 while ((state->fIOVMAddr) || state->fOffset < memLength)
881 {
882 if (!state->fIOVMAddr) {
883
884 IOReturn rtn;
885
886 state->fOffset = offset;
887 state->fLength = memLength - offset;
888
889 if (internalState->fCopyContig && (kWalkClient & op))
890 {
891 state->fIOVMAddr = ptoa_64(internalState->fCopyPageAlloc)
892 + offset - internalState->fPreparedOffset;
893 rtn = kIOReturnSuccess;
894 }
895 else
896 {
897 const IOMemoryDescriptor * memory =
898 internalState->fCopyMD ? internalState->fCopyMD : fMemory;
899 rtn = memory->dmaCommandOperation(mdOp, fState, sizeof(fState));
900 mdOp = kIOMDWalkSegments;
901 }
902
903 if (rtn == kIOReturnSuccess) {
904 assert(state->fIOVMAddr);
905 assert(state->fLength);
906 }
907 else if (rtn == kIOReturnOverrun)
908 state->fIOVMAddr = state->fLength = 0; // At end
909 else
910 return rtn;
911 };
912
913 if (!curSeg.fIOVMAddr) {
914 UInt64 length = state->fLength;
915
916 offset += length;
917 curSeg.fIOVMAddr = state->fIOVMAddr | bypassMask;
918 curSeg.fLength = length;
919 state->fIOVMAddr = 0;
920 }
921 else if ((curSeg.fIOVMAddr + curSeg.fLength == state->fIOVMAddr)) {
922 UInt64 length = state->fLength;
923 offset += length;
924 curSeg.fLength += length;
925 state->fIOVMAddr = 0;
926 };
927
928
929 if (!state->fIOVMAddr)
930 {
931 if (kWalkClient & op)
932 {
933 if ((curSeg.fIOVMAddr + curSeg.fLength - 1) > maxPhys)
934 {
935 if (internalState->fCursor)
936 {
937 curSeg.fIOVMAddr = 0;
938 ret = kIOReturnMessageTooLarge;
939 break;
940 }
941 else if (curSeg.fIOVMAddr <= maxPhys)
942 {
943 UInt64 remain, newLength;
944
945 newLength = (maxPhys + 1 - curSeg.fIOVMAddr);
946 DEBG("trunc %qx, %qx-> %qx\n", curSeg.fIOVMAddr, curSeg.fLength, newLength);
947 remain = curSeg.fLength - newLength;
948 state->fIOVMAddr = newLength + curSeg.fIOVMAddr;
949 curSeg.fLength = newLength;
950 state->fLength = remain;
951 offset -= remain;
952 }
953 else if (gIOCopyMapper)
954 {
955 DEBG("sparse switch %qx, %qx ", curSeg.fIOVMAddr, curSeg.fLength);
956 if (trunc_page_64(curSeg.fIOVMAddr) == gIOCopyMapper->mapAddr(
957 ptoa_64(internalState->fCopyPageAlloc + internalState->fNextRemapIndex)))
958 {
959
960 curSeg.fIOVMAddr = ptoa_64(internalState->fCopyPageAlloc + internalState->fNextRemapIndex)
961 + (curSeg.fIOVMAddr & PAGE_MASK);
962 internalState->fNextRemapIndex += atop_64(round_page(curSeg.fLength));
963 }
964 else for (UInt checkRemapIndex = 0; checkRemapIndex < internalState->fCopyPageCount; checkRemapIndex++)
965 {
966 if (trunc_page_64(curSeg.fIOVMAddr) == gIOCopyMapper->mapAddr(
967 ptoa_64(internalState->fCopyPageAlloc + checkRemapIndex)))
968 {
969 curSeg.fIOVMAddr = ptoa_64(internalState->fCopyPageAlloc + checkRemapIndex)
970 + (curSeg.fIOVMAddr & PAGE_MASK);
971 internalState->fNextRemapIndex = checkRemapIndex + atop_64(round_page(curSeg.fLength));
972 break;
973 }
974 }
975 DEBG("-> %qx, %qx\n", curSeg.fIOVMAddr, curSeg.fLength);
976 }
977 }
978 }
979
980 if (curSeg.fLength > fMaxSegmentSize)
981 {
982 UInt64 remain = curSeg.fLength - fMaxSegmentSize;
983
984 state->fIOVMAddr = fMaxSegmentSize + curSeg.fIOVMAddr;
985 curSeg.fLength = fMaxSegmentSize;
986
987 state->fLength = remain;
988 offset -= remain;
989 }
990
991 if (internalState->fCursor
992 && (0 != (fAlignMask & curSeg.fIOVMAddr)))
993 {
994 curSeg.fIOVMAddr = 0;
995 ret = kIOReturnNotAligned;
996 break;
997 }
998
999 if (offset >= memLength)
1000 {
1001 curSeg.fLength -= (offset - memLength);
1002 offset = memLength;
1003 state->fIOVMAddr = state->fLength = 0; // At end
1004 break;
1005 }
1006 }
1007
1008 if (state->fIOVMAddr) {
1009 if ((segIndex + 1 == numSegments))
1010 break;
1011
1012 ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++);
1013 curSeg.fIOVMAddr = 0;
1014 if (kIOReturnSuccess != ret)
1015 break;
1016 }
1017 }
1018
1019 if (curSeg.fIOVMAddr) {
1020 ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++);
1021 }
1022
1023 if (kIOReturnSuccess == ret)
1024 {
1025 state->fOffset = offset;
1026 *offsetP = offset - internalState->fPreparedOffset;
1027 *numSegmentsP = segIndex;
1028 }
1029 return ret;
1030 }
1031
1032 IOReturn
1033 IODMACommand::clientOutputSegment(
1034 void *reference, IODMACommand *target,
1035 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1036 {
1037 IOReturn ret = kIOReturnSuccess;
1038
1039 if ((target->fNumAddressBits < 64)
1040 && ((segment.fIOVMAddr + segment.fLength - 1) >> target->fNumAddressBits))
1041 {
1042 DEBG("kIOReturnMessageTooLarge(fNumAddressBits) %qx, %qx\n", segment.fIOVMAddr, segment.fLength);
1043 ret = kIOReturnMessageTooLarge;
1044 }
1045
1046 if (!(*target->fOutSeg)(target, segment, vSegList, outSegIndex))
1047 {
1048 DEBG("kIOReturnMessageTooLarge(fOutSeg) %qx, %qx\n", segment.fIOVMAddr, segment.fLength);
1049 ret = kIOReturnMessageTooLarge;
1050 }
1051
1052 return (ret);
1053 }
1054
1055 bool
1056 IODMACommand::OutputHost32(IODMACommand *,
1057 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1058 {
1059 Segment32 *base = (Segment32 *) vSegList;
1060 base[outSegIndex].fIOVMAddr = (UInt32) segment.fIOVMAddr;
1061 base[outSegIndex].fLength = (UInt32) segment.fLength;
1062 return true;
1063 }
1064
1065 bool
1066 IODMACommand::OutputBig32(IODMACommand *,
1067 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1068 {
1069 const UInt offAddr = outSegIndex * sizeof(Segment32);
1070 const UInt offLen = offAddr + sizeof(UInt32);
1071 OSWriteBigInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr);
1072 OSWriteBigInt32(vSegList, offLen, (UInt32) segment.fLength);
1073 return true;
1074 }
1075
1076 bool
1077 IODMACommand::OutputLittle32(IODMACommand *,
1078 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1079 {
1080 const UInt offAddr = outSegIndex * sizeof(Segment32);
1081 const UInt offLen = offAddr + sizeof(UInt32);
1082 OSWriteLittleInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr);
1083 OSWriteLittleInt32(vSegList, offLen, (UInt32) segment.fLength);
1084 return true;
1085 }
1086
1087 bool
1088 IODMACommand::OutputHost64(IODMACommand *,
1089 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1090 {
1091 Segment64 *base = (Segment64 *) vSegList;
1092 base[outSegIndex] = segment;
1093 return true;
1094 }
1095
1096 bool
1097 IODMACommand::OutputBig64(IODMACommand *,
1098 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1099 {
1100 const UInt offAddr = outSegIndex * sizeof(Segment64);
1101 const UInt offLen = offAddr + sizeof(UInt64);
1102 OSWriteBigInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr);
1103 OSWriteBigInt64(vSegList, offLen, (UInt64) segment.fLength);
1104 return true;
1105 }
1106
1107 bool
1108 IODMACommand::OutputLittle64(IODMACommand *,
1109 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1110 {
1111 const UInt offAddr = outSegIndex * sizeof(Segment64);
1112 const UInt offLen = offAddr + sizeof(UInt64);
1113 OSWriteLittleInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr);
1114 OSWriteLittleInt64(vSegList, offLen, (UInt64) segment.fLength);
1115 return true;
1116 }
1117
1118