]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IODMACommand.cpp
2b79fb980e630a7433d94c1ea142c1d159e74f6a
[apple/xnu.git] / iokit / Kernel / IODMACommand.cpp
1 /*
2 * Copyright (c) 2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #include <IOKit/assert.h>
24
25 #include <libkern/OSTypes.h>
26 #include <libkern/OSByteOrder.h>
27
28 #include <IOKit/IOReturn.h>
29 #include <IOKit/IOLib.h>
30 #include <IOKit/IODMACommand.h>
31 #include <IOKit/IOMapper.h>
32 #include <IOKit/IOMemoryDescriptor.h>
33 #include <IOKit/IOBufferMemoryDescriptor.h>
34
35 #include "IOKitKernelInternal.h"
36 #include "IOCopyMapper.h"
37
38 #define MAPTYPE(type) ((UInt) (type) & kTypeMask)
39 #define IS_MAPPED(type) (MAPTYPE(type) == kMapped)
40 #define IS_BYPASSED(type) (MAPTYPE(type) == kBypassed)
41 #define IS_NONCOHERENT(type) (MAPTYPE(type) == kNonCoherent)
42
43
44 static bool gIOEnableCopyMapper = true;
45
46 enum
47 {
48 kWalkSyncIn = 0x01, // bounce -> md
49 kWalkSyncOut = 0x02, // bounce <- md
50 kWalkSyncAlways = 0x04,
51 kWalkPreflight = 0x08,
52 kWalkDoubleBuffer = 0x10,
53 kWalkPrepare = 0x20,
54 kWalkComplete = 0x40,
55 kWalkClient = 0x80
56 };
57
58 struct ExpansionData
59 {
60 IOMDDMAWalkSegmentState fState;
61 IOMDDMACharacteristics fMDSummary;
62
63 UInt64 fPreparedOffset;
64 UInt64 fPreparedLength;
65
66 UInt8 fCursor;
67 UInt8 fCheckAddressing;
68 UInt8 fIterateOnly;
69 UInt8 fMisaligned;
70 UInt8 fCopyContig;
71 UInt8 fPrepared;
72 UInt8 fDoubleBuffer;
73 UInt8 __pad[1];
74
75 ppnum_t fCopyPageAlloc;
76 ppnum_t fCopyPageCount;
77 addr64_t fCopyNext;
78
79 class IOBufferMemoryDescriptor * fCopyMD;
80 };
81 typedef ExpansionData IODMACommandInternal;
82
83 #define fInternalState reserved
84 #define fState reserved->fState
85 #define fMDSummary reserved->fMDSummary
86
87
88 #if 1
89 // no direction => OutIn
90 #define SHOULD_COPY_DIR(op, direction) \
91 ((kIODirectionNone == (direction)) \
92 || (kWalkSyncAlways & (op)) \
93 || (((kWalkSyncIn & (op)) ? kIODirectionIn : kIODirectionOut) \
94 & (direction)))
95
96 #else
97 #define SHOULD_COPY_DIR(state, direction) (true)
98 #endif
99
100 #if 0
101 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
102 #else
103 #define DEBG(fmt, args...) {}
104 #endif
105
106
107 /**************************** class IODMACommand ***************************/
108
109 #undef super
110 #define super OSObject
111 OSDefineMetaClassAndStructors(IODMACommand, IOCommand);
112
113 OSMetaClassDefineReservedUnused(IODMACommand, 0);
114 OSMetaClassDefineReservedUnused(IODMACommand, 1);
115 OSMetaClassDefineReservedUnused(IODMACommand, 2);
116 OSMetaClassDefineReservedUnused(IODMACommand, 3);
117 OSMetaClassDefineReservedUnused(IODMACommand, 4);
118 OSMetaClassDefineReservedUnused(IODMACommand, 5);
119 OSMetaClassDefineReservedUnused(IODMACommand, 6);
120 OSMetaClassDefineReservedUnused(IODMACommand, 7);
121 OSMetaClassDefineReservedUnused(IODMACommand, 8);
122 OSMetaClassDefineReservedUnused(IODMACommand, 9);
123 OSMetaClassDefineReservedUnused(IODMACommand, 10);
124 OSMetaClassDefineReservedUnused(IODMACommand, 11);
125 OSMetaClassDefineReservedUnused(IODMACommand, 12);
126 OSMetaClassDefineReservedUnused(IODMACommand, 13);
127 OSMetaClassDefineReservedUnused(IODMACommand, 14);
128 OSMetaClassDefineReservedUnused(IODMACommand, 15);
129
130 IODMACommand *
131 IODMACommand::withSpecification(SegmentFunction outSegFunc,
132 UInt8 numAddressBits,
133 UInt64 maxSegmentSize,
134 MappingOptions mappingOptions,
135 UInt64 maxTransferSize,
136 UInt32 alignment,
137 IOMapper *mapper,
138 void *refCon)
139 {
140 IODMACommand * me = new IODMACommand;
141
142 if (me && !me->initWithSpecification(outSegFunc,
143 numAddressBits, maxSegmentSize,
144 mappingOptions, maxTransferSize,
145 alignment, mapper, refCon))
146 {
147 me->release();
148 return 0;
149 };
150
151 return me;
152 }
153
154 IODMACommand *
155 IODMACommand::cloneCommand(void *refCon)
156 {
157 return withSpecification(fOutSeg, fNumAddressBits, fMaxSegmentSize,
158 fMappingOptions, fMaxTransferSize, fAlignMask + 1, fMapper, refCon);
159 }
160
161 #define kLastOutputFunction ((SegmentFunction) kLastOutputFunction)
162
163 bool
164 IODMACommand::initWithSpecification(SegmentFunction outSegFunc,
165 UInt8 numAddressBits,
166 UInt64 maxSegmentSize,
167 MappingOptions mappingOptions,
168 UInt64 maxTransferSize,
169 UInt32 alignment,
170 IOMapper *mapper,
171 void *refCon)
172 {
173 if (!super::init() || !outSegFunc || !numAddressBits)
174 return false;
175
176 bool is32Bit = (OutputHost32 == outSegFunc || OutputBig32 == outSegFunc
177 || OutputLittle32 == outSegFunc);
178 if (is32Bit)
179 {
180 if (!numAddressBits)
181 numAddressBits = 32;
182 else if (numAddressBits > 32)
183 return false; // Wrong output function for bits
184 }
185
186 if (numAddressBits && (numAddressBits < PAGE_SHIFT))
187 return false;
188
189 if (!maxSegmentSize)
190 maxSegmentSize--; // Set Max segment to -1
191 if (!maxTransferSize)
192 maxTransferSize--; // Set Max transfer to -1
193
194 if (!mapper)
195 {
196 IOMapper::checkForSystemMapper();
197 mapper = IOMapper::gSystem;
198 }
199
200 fNumSegments = 0;
201 fBypassMask = 0;
202 fOutSeg = outSegFunc;
203 fNumAddressBits = numAddressBits;
204 fMaxSegmentSize = maxSegmentSize;
205 fMappingOptions = mappingOptions;
206 fMaxTransferSize = maxTransferSize;
207 if (!alignment)
208 alignment = 1;
209 fAlignMask = alignment - 1;
210 fMapper = mapper;
211 fRefCon = refCon;
212
213 switch (MAPTYPE(mappingOptions))
214 {
215 case kMapped: break;
216 case kNonCoherent: fMapper = 0; break;
217 case kBypassed:
218 if (mapper && !mapper->getBypassMask(&fBypassMask))
219 return false;
220 break;
221 default:
222 return false;
223 };
224
225 reserved = IONew(ExpansionData, 1);
226 if (!reserved)
227 return false;
228 bzero(reserved, sizeof(ExpansionData));
229
230 fInternalState->fIterateOnly = (0 != (kIterateOnly & mappingOptions));
231
232 return true;
233 }
234
235 void
236 IODMACommand::free()
237 {
238 if (reserved)
239 IODelete(reserved, ExpansionData, 1);
240
241 super::free();
242 }
243
244 IOReturn
245 IODMACommand::setMemoryDescriptor(const IOMemoryDescriptor *mem, bool autoPrepare)
246 {
247 if (mem == fMemory)
248 {
249 if (!autoPrepare)
250 {
251 while (fActive)
252 complete();
253 }
254 return kIOReturnSuccess;
255 }
256
257 if (fMemory) {
258 // As we are almost certainly being called from a work loop thread
259 // if fActive is true it is probably not a good time to potentially
260 // block. Just test for it and return an error
261 if (fActive)
262 return kIOReturnBusy;
263 clearMemoryDescriptor();
264 };
265
266 if (mem) {
267 bzero(&fMDSummary, sizeof(fMDSummary));
268 IOReturn rtn = mem->dmaCommandOperation(
269 kIOMDGetCharacteristics,
270 &fMDSummary, sizeof(fMDSummary));
271 if (rtn)
272 return rtn;
273
274 ppnum_t highPage = fMDSummary.fHighestPage ? fMDSummary.fHighestPage : gIOLastPage;
275
276 if ((kMapped == MAPTYPE(fMappingOptions))
277 && fMapper
278 && (!fNumAddressBits || (fNumAddressBits >= 31)))
279 // assuming mapped space is 2G
280 fInternalState->fCheckAddressing = false;
281 else
282 fInternalState->fCheckAddressing = (fNumAddressBits && (highPage >= (1UL << (fNumAddressBits - PAGE_SHIFT))));
283
284 mem->retain();
285 fMemory = mem;
286
287 if (autoPrepare)
288 return prepare();
289 };
290
291 return kIOReturnSuccess;
292 }
293
294 IOReturn
295 IODMACommand::clearMemoryDescriptor(bool autoComplete)
296 {
297 if (fActive && !autoComplete)
298 return (kIOReturnNotReady);
299
300 if (fMemory) {
301 while (fActive)
302 complete();
303 fMemory->release();
304 fMemory = 0;
305 }
306
307 return (kIOReturnSuccess);
308 }
309
310 const IOMemoryDescriptor *
311 IODMACommand::getMemoryDescriptor() const
312 {
313 return fMemory;
314 }
315
316
317 IOReturn
318 IODMACommand::segmentOp(
319 void *reference,
320 IODMACommand *target,
321 Segment64 segment,
322 void *segments,
323 UInt32 segmentIndex)
324 {
325 IOOptionBits op = (IOOptionBits) reference;
326 addr64_t maxPhys, address;
327 addr64_t remapAddr = 0;
328 uint64_t length;
329 uint32_t numPages;
330
331 IODMACommandInternal * state = target->reserved;
332
333 if (target->fNumAddressBits && (target->fNumAddressBits < 64))
334 maxPhys = (1ULL << target->fNumAddressBits);
335 else
336 maxPhys = 0;
337 maxPhys--;
338
339 address = segment.fIOVMAddr;
340 length = segment.fLength;
341
342 assert(address);
343 assert(length);
344
345 if (!state->fMisaligned)
346 {
347 state->fMisaligned |= (0 != (target->fAlignMask & address));
348 if (state->fMisaligned) DEBG("misaligned %qx:%qx, %lx\n", address, length, target->fAlignMask);
349 }
350
351 if (state->fMisaligned && (kWalkPreflight & op))
352 return (kIOReturnNotAligned);
353
354 if (!state->fDoubleBuffer)
355 {
356 if ((address + length - 1) <= maxPhys)
357 {
358 length = 0;
359 }
360 else if (address <= maxPhys)
361 {
362 DEBG("tail %qx, %qx", address, length);
363 length = (address + length - maxPhys - 1);
364 address = maxPhys + 1;
365 DEBG("-> %qx, %qx\n", address, length);
366 }
367 }
368
369 if (!length)
370 return (kIOReturnSuccess);
371
372 numPages = atop_64(round_page_64(length));
373 remapAddr = state->fCopyNext;
374
375 if (kWalkPreflight & op)
376 {
377 state->fCopyPageCount += numPages;
378 }
379 else
380 {
381 if (kWalkPrepare & op)
382 {
383 for (IOItemCount idx = 0; idx < numPages; idx++)
384 gIOCopyMapper->iovmInsert(atop_64(remapAddr), idx, atop_64(address) + idx);
385 }
386 if (state->fDoubleBuffer)
387 state->fCopyNext += length;
388 else
389 {
390 state->fCopyNext += round_page(length);
391 remapAddr += (address & PAGE_MASK);
392 }
393
394 if (SHOULD_COPY_DIR(op, target->fMDSummary.fDirection))
395 {
396 DEBG("cpv: 0x%qx %s 0x%qx, 0x%qx, 0x%02lx\n", remapAddr,
397 (kWalkSyncIn & op) ? "->" : "<-",
398 address, length, op);
399 if (kWalkSyncIn & op)
400 { // cppvNoModSnk
401 copypv(remapAddr, address, length,
402 cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
403 }
404 else
405 {
406 copypv(address, remapAddr, length,
407 cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
408 }
409 }
410 }
411
412 return kIOReturnSuccess;
413 }
414
415 IOReturn
416 IODMACommand::walkAll(UInt8 op)
417 {
418 IODMACommandInternal * state = fInternalState;
419
420 IOReturn ret = kIOReturnSuccess;
421 UInt32 numSegments;
422 UInt64 offset;
423
424 if (gIOEnableCopyMapper && (kWalkPreflight & op))
425 {
426 state->fCopyContig = false;
427 state->fMisaligned = false;
428 state->fDoubleBuffer = false;
429 state->fPrepared = false;
430 state->fCopyNext = 0;
431 state->fCopyPageAlloc = 0;
432 state->fCopyPageCount = 0;
433 state->fCopyMD = 0;
434
435 if (!(kWalkDoubleBuffer & op))
436 {
437 offset = 0;
438 numSegments = 0-1;
439 ret = genIOVMSegments(segmentOp, (void *) op, &offset, state, &numSegments);
440 }
441
442 op &= ~kWalkPreflight;
443
444 state->fDoubleBuffer = (state->fMisaligned || (kWalkDoubleBuffer & op));
445 if (state->fDoubleBuffer)
446 state->fCopyPageCount = atop_64(round_page(state->fPreparedLength));
447
448 if (state->fCopyPageCount)
449 {
450 IOMapper * mapper;
451 ppnum_t mapBase = 0;
452
453 DEBG("preflight fCopyPageCount %d\n", state->fCopyPageCount);
454
455 mapper = gIOCopyMapper;
456 if (mapper)
457 mapBase = mapper->iovmAlloc(state->fCopyPageCount);
458 if (mapBase)
459 {
460 state->fCopyPageAlloc = mapBase;
461 if (state->fCopyPageAlloc && state->fDoubleBuffer)
462 {
463 DEBG("contig copy map\n");
464 state->fCopyContig = true;
465 }
466
467 state->fCopyNext = ptoa_64(state->fCopyPageAlloc);
468 offset = 0;
469 numSegments = 0-1;
470 ret = genIOVMSegments(segmentOp, (void *) op, &offset, state, &numSegments);
471 state->fPrepared = true;
472 op &= ~(kWalkSyncIn | kWalkSyncOut);
473 }
474 else
475 {
476 DEBG("alloc IOBMD\n");
477 state->fCopyMD = IOBufferMemoryDescriptor::withOptions(
478 fMDSummary.fDirection, state->fPreparedLength, page_size);
479
480 if (state->fCopyMD)
481 {
482 ret = kIOReturnSuccess;
483 state->fPrepared = true;
484 }
485 else
486 {
487 DEBG("IODMACommand !iovmAlloc");
488 return (kIOReturnNoResources);
489 }
490 }
491 }
492 }
493
494 if (gIOEnableCopyMapper && state->fPrepared && ((kWalkSyncIn | kWalkSyncOut) & op))
495 {
496 if (state->fCopyPageCount)
497 {
498 DEBG("sync fCopyPageCount %d\n", state->fCopyPageCount);
499
500 if (state->fCopyPageAlloc)
501 {
502 state->fCopyNext = ptoa_64(state->fCopyPageAlloc);
503 offset = 0;
504 numSegments = 0-1;
505 ret = genIOVMSegments(segmentOp, (void *) op, &offset, state, &numSegments);
506 }
507 else if (state->fCopyMD)
508 {
509 DEBG("sync IOBMD\n");
510
511 if (SHOULD_COPY_DIR(op, fMDSummary.fDirection))
512 {
513 IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory);
514
515 IOByteCount bytes;
516
517 if (kWalkSyncIn & op)
518 bytes = poMD->writeBytes(state->fPreparedOffset,
519 state->fCopyMD->getBytesNoCopy(),
520 state->fPreparedLength);
521 else
522 bytes = poMD->readBytes(state->fPreparedOffset,
523 state->fCopyMD->getBytesNoCopy(),
524 state->fPreparedLength);
525 DEBG("fCopyMD %s %lx bytes\n", (kWalkSyncIn & op) ? "wrote" : "read", bytes);
526 ret = (bytes == state->fPreparedLength) ? kIOReturnSuccess : kIOReturnUnderrun;
527 }
528 else
529 ret = kIOReturnSuccess;
530 }
531 }
532 }
533
534 if (kWalkComplete & op)
535 {
536 if (state->fCopyPageAlloc)
537 {
538 gIOCopyMapper->iovmFree(state->fCopyPageAlloc, state->fCopyPageCount);
539 state->fCopyPageAlloc = 0;
540 state->fCopyPageCount = 0;
541 }
542 if (state->fCopyMD)
543 {
544 state->fCopyMD->release();
545 state->fCopyMD = 0;
546 }
547
548 state->fPrepared = false;
549 }
550 return (ret);
551 }
552
553 IOReturn
554 IODMACommand::prepare(UInt64 offset, UInt64 length, bool flushCache, bool synchronize)
555 {
556 IODMACommandInternal * state = fInternalState;
557 IOReturn ret = kIOReturnSuccess;
558
559 if (!length)
560 length = fMDSummary.fLength;
561
562 if (length > fMaxTransferSize)
563 return kIOReturnNoSpace;
564
565 #if 0
566 if (IS_NONCOHERENT(mappingOptions) && flushCache) {
567 IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory);
568
569 poMD->performOperation(kIOMemoryIncoherentIOStore, 0, fMDSummary.fLength);
570 }
571 #endif
572 if (fActive++)
573 {
574 if ((state->fPreparedOffset != offset)
575 || (state->fPreparedLength != length))
576 ret = kIOReturnNotReady;
577 }
578 else
579 {
580 state->fPreparedOffset = offset;
581 state->fPreparedLength = length;
582
583 state->fCopyContig = false;
584 state->fMisaligned = false;
585 state->fDoubleBuffer = false;
586 state->fPrepared = false;
587 state->fCopyNext = 0;
588 state->fCopyPageAlloc = 0;
589 state->fCopyPageCount = 0;
590 state->fCopyMD = 0;
591
592 state->fCursor = state->fIterateOnly
593 || (!state->fCheckAddressing
594 && (!fAlignMask
595 || ((fMDSummary.fPageAlign & (1 << 31)) && (0 == (fMDSummary.fPageAlign & fAlignMask)))));
596 if (!state->fCursor)
597 {
598 IOOptionBits op = kWalkPrepare | kWalkPreflight;
599 if (synchronize)
600 op |= kWalkSyncOut;
601 ret = walkAll(op);
602 }
603 if (kIOReturnSuccess == ret)
604 state->fPrepared = true;
605 }
606 return ret;
607 }
608
609 IOReturn
610 IODMACommand::complete(bool invalidateCache, bool synchronize)
611 {
612 IODMACommandInternal * state = fInternalState;
613 IOReturn ret = kIOReturnSuccess;
614
615 if (fActive < 1)
616 return kIOReturnNotReady;
617
618 if (!--fActive)
619 {
620 if (!state->fCursor)
621 {
622 IOOptionBits op = kWalkComplete;
623 if (synchronize)
624 op |= kWalkSyncIn;
625 ret = walkAll(op);
626 }
627 state->fPrepared = false;
628
629 #if 0
630 if (IS_NONCOHERENT(fMappingOptions) && invalidateCache)
631 {
632 // XXX gvdl: need invalidate before Chardonnay ships
633 IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory);
634
635 poMD->performOperation(kIOMemoryIncoherentIOInvalidate, 0, fMDSummary.fLength);
636 }
637 #endif
638 }
639
640 return ret;
641 }
642
643 IOReturn
644 IODMACommand::synchronize(IOOptionBits options)
645 {
646 IODMACommandInternal * state = fInternalState;
647 IOReturn ret = kIOReturnSuccess;
648 IOOptionBits op;
649
650 if (kIODirectionOutIn == (kIODirectionOutIn & options))
651 return kIOReturnBadArgument;
652
653 if (fActive < 1)
654 return kIOReturnNotReady;
655
656 op = 0;
657 if (kForceDoubleBuffer & options)
658 {
659 if (state->fDoubleBuffer)
660 return kIOReturnSuccess;
661 if (state->fCursor)
662 state->fCursor = false;
663 else
664 ret = walkAll(kWalkComplete);
665
666 op |= kWalkPrepare | kWalkPreflight | kWalkDoubleBuffer;
667 }
668 else if (state->fCursor)
669 return kIOReturnSuccess;
670
671 if (kIODirectionIn & options)
672 op |= kWalkSyncIn | kWalkSyncAlways;
673 else if (kIODirectionOut & options)
674 op |= kWalkSyncOut | kWalkSyncAlways;
675
676 ret = walkAll(op);
677
678 return ret;
679 }
680
681 IOReturn
682 IODMACommand::genIOVMSegments(UInt64 *offsetP,
683 void *segmentsP,
684 UInt32 *numSegmentsP)
685 {
686 return (genIOVMSegments(clientOutputSegment, (void *) kWalkClient, offsetP, segmentsP, numSegmentsP));
687 }
688
689 IOReturn
690 IODMACommand::genIOVMSegments(InternalSegmentFunction outSegFunc,
691 void *reference,
692 UInt64 *offsetP,
693 void *segmentsP,
694 UInt32 *numSegmentsP)
695 {
696 IOOptionBits op = (IOOptionBits) reference;
697 IODMACommandInternal * internalState = fInternalState;
698 IOOptionBits mdOp = kIOMDWalkSegments;
699 IOReturn ret = kIOReturnSuccess;
700
701 if (!(kWalkComplete & op) && !fActive)
702 return kIOReturnNotReady;
703
704 if (!offsetP || !segmentsP || !numSegmentsP || !*numSegmentsP)
705 return kIOReturnBadArgument;
706
707 IOMDDMAWalkSegmentArgs *state =
708 (IOMDDMAWalkSegmentArgs *) fState;
709
710 UInt64 offset = *offsetP + internalState->fPreparedOffset;
711 UInt64 memLength = internalState->fPreparedOffset + internalState->fPreparedLength;
712
713 if (offset >= memLength)
714 return kIOReturnOverrun;
715
716 if (!offset || offset != state->fOffset) {
717 state->fOffset = 0;
718 state->fIOVMAddr = 0;
719 state->fMapped = (IS_MAPPED(fMappingOptions) && fMapper);
720 mdOp = kIOMDFirstSegment;
721 };
722
723 UInt64 bypassMask = fBypassMask;
724 UInt32 segIndex = 0;
725 UInt32 numSegments = *numSegmentsP;
726 Segment64 curSeg = { 0, 0 };
727 addr64_t maxPhys;
728
729 if (fNumAddressBits && (fNumAddressBits < 64))
730 maxPhys = (1ULL << fNumAddressBits);
731 else
732 maxPhys = 0;
733 maxPhys--;
734
735 while ((state->fIOVMAddr) || state->fOffset < memLength)
736 {
737 if (!state->fIOVMAddr) {
738
739 IOReturn rtn;
740
741 state->fOffset = offset;
742 state->fLength = memLength - offset;
743
744 if (internalState->fCopyContig && (kWalkClient & op))
745 {
746 state->fIOVMAddr = ptoa_64(internalState->fCopyPageAlloc)
747 + offset - internalState->fPreparedOffset;
748 rtn = kIOReturnSuccess;
749 }
750 else
751 {
752 const IOMemoryDescriptor * memory =
753 internalState->fCopyMD ? internalState->fCopyMD : fMemory;
754 rtn = memory->dmaCommandOperation(mdOp, fState, sizeof(fState));
755 mdOp = kIOMDWalkSegments;
756 }
757
758 if (rtn == kIOReturnSuccess) {
759 assert(state->fIOVMAddr);
760 assert(state->fLength);
761 }
762 else if (rtn == kIOReturnOverrun)
763 state->fIOVMAddr = state->fLength = 0; // At end
764 else
765 return rtn;
766 };
767
768 if (!curSeg.fIOVMAddr) {
769 UInt64 length = state->fLength;
770
771 offset += length;
772 curSeg.fIOVMAddr = state->fIOVMAddr | bypassMask;
773 curSeg.fLength = length;
774 state->fIOVMAddr = 0;
775 }
776 else if ((curSeg.fIOVMAddr + curSeg.fLength == state->fIOVMAddr)) {
777 UInt64 length = state->fLength;
778 offset += length;
779 curSeg.fLength += length;
780 state->fIOVMAddr = 0;
781 };
782
783
784 if (!state->fIOVMAddr)
785 {
786 if (kWalkClient & op)
787 {
788 if ((curSeg.fIOVMAddr + curSeg.fLength - 1) > maxPhys)
789 {
790 if (internalState->fCursor)
791 {
792 curSeg.fIOVMAddr = 0;
793 ret = kIOReturnMessageTooLarge;
794 break;
795 }
796 else if (curSeg.fIOVMAddr <= maxPhys)
797 {
798 UInt64 remain, newLength;
799
800 newLength = (maxPhys + 1 - curSeg.fIOVMAddr);
801 DEBG("trunc %qx, %qx-> %qx\n", curSeg.fIOVMAddr, curSeg.fLength, newLength);
802 remain = curSeg.fLength - newLength;
803 state->fIOVMAddr = newLength + curSeg.fIOVMAddr;
804 curSeg.fLength = newLength;
805 state->fLength = remain;
806 offset -= remain;
807 }
808 else if (gIOCopyMapper)
809 {
810 DEBG("sparse switch %qx, %qx ", curSeg.fIOVMAddr, curSeg.fLength);
811 // Cache this!
812 for (UInt checkRemapIndex = 0; checkRemapIndex < internalState->fCopyPageCount; checkRemapIndex++)
813 {
814 if (trunc_page_64(curSeg.fIOVMAddr) == gIOCopyMapper->mapAddr(
815 ptoa_64(internalState->fCopyPageAlloc + checkRemapIndex)))
816 {
817 curSeg.fIOVMAddr = ptoa_64(internalState->fCopyPageAlloc + checkRemapIndex) + (curSeg.fIOVMAddr & PAGE_MASK);
818 break;
819 }
820 }
821 DEBG("-> %qx, %qx\n", curSeg.fIOVMAddr, curSeg.fLength);
822 }
823 }
824 }
825
826 if (curSeg.fLength > fMaxSegmentSize)
827 {
828 UInt64 remain = curSeg.fLength - fMaxSegmentSize;
829
830 state->fIOVMAddr = fMaxSegmentSize + curSeg.fIOVMAddr;
831 curSeg.fLength = fMaxSegmentSize;
832
833 state->fLength = remain;
834 offset -= remain;
835 }
836
837 if (internalState->fCursor
838 && (0 != (fAlignMask & curSeg.fIOVMAddr)))
839 {
840 curSeg.fIOVMAddr = 0;
841 ret = kIOReturnNotAligned;
842 break;
843 }
844
845 if (offset >= memLength)
846 {
847 curSeg.fLength -= (offset - memLength);
848 offset = memLength;
849 state->fIOVMAddr = state->fLength = 0; // At end
850 break;
851 }
852 }
853
854 if (state->fIOVMAddr) {
855 if ((segIndex + 1 == numSegments))
856 break;
857
858 ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++);
859 curSeg.fIOVMAddr = 0;
860 if (kIOReturnSuccess != ret)
861 break;
862 }
863 }
864
865 if (curSeg.fIOVMAddr) {
866 ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++);
867 }
868
869 if (kIOReturnSuccess == ret)
870 {
871 state->fOffset = offset;
872 *offsetP = offset - internalState->fPreparedOffset;
873 *numSegmentsP = segIndex;
874 }
875 return ret;
876 }
877
878 IOReturn
879 IODMACommand::clientOutputSegment(
880 void *reference, IODMACommand *target,
881 Segment64 segment, void *vSegList, UInt32 outSegIndex)
882 {
883 IOReturn ret = kIOReturnSuccess;
884
885 if ((target->fNumAddressBits < 64)
886 && ((segment.fIOVMAddr + segment.fLength - 1) >> target->fNumAddressBits))
887 {
888 DEBG("kIOReturnMessageTooLarge(fNumAddressBits) %qx, %qx\n", segment.fIOVMAddr, segment.fLength);
889 ret = kIOReturnMessageTooLarge;
890 }
891
892 if (!(*target->fOutSeg)(target, segment, vSegList, outSegIndex))
893 {
894 DEBG("kIOReturnMessageTooLarge(fOutSeg) %qx, %qx\n", segment.fIOVMAddr, segment.fLength);
895 ret = kIOReturnMessageTooLarge;
896 }
897
898 return (ret);
899 }
900
901 bool
902 IODMACommand::OutputHost32(IODMACommand *,
903 Segment64 segment, void *vSegList, UInt32 outSegIndex)
904 {
905 Segment32 *base = (Segment32 *) vSegList;
906 base[outSegIndex].fIOVMAddr = (UInt32) segment.fIOVMAddr;
907 base[outSegIndex].fLength = (UInt32) segment.fLength;
908 return true;
909 }
910
911 bool
912 IODMACommand::OutputBig32(IODMACommand *,
913 Segment64 segment, void *vSegList, UInt32 outSegIndex)
914 {
915 const UInt offAddr = outSegIndex * sizeof(Segment32);
916 const UInt offLen = offAddr + sizeof(UInt32);
917 OSWriteBigInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr);
918 OSWriteBigInt32(vSegList, offLen, (UInt32) segment.fLength);
919 return true;
920 }
921
922 bool
923 IODMACommand::OutputLittle32(IODMACommand *,
924 Segment64 segment, void *vSegList, UInt32 outSegIndex)
925 {
926 const UInt offAddr = outSegIndex * sizeof(Segment32);
927 const UInt offLen = offAddr + sizeof(UInt32);
928 OSWriteLittleInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr);
929 OSWriteLittleInt32(vSegList, offLen, (UInt32) segment.fLength);
930 return true;
931 }
932
933 bool
934 IODMACommand::OutputHost64(IODMACommand *,
935 Segment64 segment, void *vSegList, UInt32 outSegIndex)
936 {
937 Segment64 *base = (Segment64 *) vSegList;
938 base[outSegIndex] = segment;
939 return true;
940 }
941
942 bool
943 IODMACommand::OutputBig64(IODMACommand *,
944 Segment64 segment, void *vSegList, UInt32 outSegIndex)
945 {
946 const UInt offAddr = outSegIndex * sizeof(Segment64);
947 const UInt offLen = offAddr + sizeof(UInt64);
948 OSWriteBigInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr);
949 OSWriteBigInt64(vSegList, offLen, (UInt64) segment.fLength);
950 return true;
951 }
952
953 bool
954 IODMACommand::OutputLittle64(IODMACommand *,
955 Segment64 segment, void *vSegList, UInt32 outSegIndex)
956 {
957 const UInt offAddr = outSegIndex * sizeof(Segment64);
958 const UInt offLen = offAddr + sizeof(UInt64);
959 OSWriteLittleInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr);
960 OSWriteLittleInt64(vSegList, offLen, (UInt64) segment.fLength);
961 return true;
962 }
963
964