]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IODMACommand.cpp
xnu-792.13.8.tar.gz
[apple/xnu.git] / iokit / Kernel / IODMACommand.cpp
1 /*
2 * Copyright (c) 2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30
31 #include <IOKit/assert.h>
32
33 #include <libkern/OSTypes.h>
34 #include <libkern/OSByteOrder.h>
35
36 #include <IOKit/IOReturn.h>
37 #include <IOKit/IOLib.h>
38 #include <IOKit/IODMACommand.h>
39 #include <IOKit/IOMapper.h>
40 #include <IOKit/IOMemoryDescriptor.h>
41 #include <IOKit/IOBufferMemoryDescriptor.h>
42
43 #include "IOKitKernelInternal.h"
44 #include "IOCopyMapper.h"
45
46 #define MAPTYPE(type) ((UInt) (type) & kTypeMask)
47 #define IS_MAPPED(type) (MAPTYPE(type) == kMapped)
48 #define IS_BYPASSED(type) (MAPTYPE(type) == kBypassed)
49 #define IS_NONCOHERENT(type) (MAPTYPE(type) == kNonCoherent)
50
51
52 static bool gIOEnableCopyMapper = true;
53
54 enum
55 {
56 kWalkSyncIn = 0x01, // bounce -> md
57 kWalkSyncOut = 0x02, // bounce <- md
58 kWalkSyncAlways = 0x04,
59 kWalkPreflight = 0x08,
60 kWalkDoubleBuffer = 0x10,
61 kWalkPrepare = 0x20,
62 kWalkComplete = 0x40,
63 kWalkClient = 0x80
64 };
65
66 struct ExpansionData
67 {
68 IOMDDMAWalkSegmentState fState;
69 IOMDDMACharacteristics fMDSummary;
70
71 UInt64 fPreparedOffset;
72 UInt64 fPreparedLength;
73
74 UInt8 fCursor;
75 UInt8 fCheckAddressing;
76 UInt8 fIterateOnly;
77 UInt8 fMisaligned;
78 UInt8 fCopyContig;
79 UInt8 fPrepared;
80 UInt8 fDoubleBuffer;
81 UInt8 __pad[1];
82
83 ppnum_t fCopyPageAlloc;
84 ppnum_t fCopyPageCount;
85 addr64_t fCopyNext;
86
87 class IOBufferMemoryDescriptor * fCopyMD;
88 };
89 typedef ExpansionData IODMACommandInternal;
90
91 #define fInternalState reserved
92 #define fState reserved->fState
93 #define fMDSummary reserved->fMDSummary
94
95
96 #if 1
97 // no direction => OutIn
98 #define SHOULD_COPY_DIR(op, direction) \
99 ((kIODirectionNone == (direction)) \
100 || (kWalkSyncAlways & (op)) \
101 || (((kWalkSyncIn & (op)) ? kIODirectionIn : kIODirectionOut) \
102 & (direction)))
103
104 #else
105 #define SHOULD_COPY_DIR(state, direction) (true)
106 #endif
107
108 #if 0
109 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
110 #else
111 #define DEBG(fmt, args...) {}
112 #endif
113
114
115 /**************************** class IODMACommand ***************************/
116
117 #undef super
118 #define super OSObject
119 OSDefineMetaClassAndStructors(IODMACommand, IOCommand);
120
121 OSMetaClassDefineReservedUnused(IODMACommand, 0);
122 OSMetaClassDefineReservedUnused(IODMACommand, 1);
123 OSMetaClassDefineReservedUnused(IODMACommand, 2);
124 OSMetaClassDefineReservedUnused(IODMACommand, 3);
125 OSMetaClassDefineReservedUnused(IODMACommand, 4);
126 OSMetaClassDefineReservedUnused(IODMACommand, 5);
127 OSMetaClassDefineReservedUnused(IODMACommand, 6);
128 OSMetaClassDefineReservedUnused(IODMACommand, 7);
129 OSMetaClassDefineReservedUnused(IODMACommand, 8);
130 OSMetaClassDefineReservedUnused(IODMACommand, 9);
131 OSMetaClassDefineReservedUnused(IODMACommand, 10);
132 OSMetaClassDefineReservedUnused(IODMACommand, 11);
133 OSMetaClassDefineReservedUnused(IODMACommand, 12);
134 OSMetaClassDefineReservedUnused(IODMACommand, 13);
135 OSMetaClassDefineReservedUnused(IODMACommand, 14);
136 OSMetaClassDefineReservedUnused(IODMACommand, 15);
137
138 IODMACommand *
139 IODMACommand::withSpecification(SegmentFunction outSegFunc,
140 UInt8 numAddressBits,
141 UInt64 maxSegmentSize,
142 MappingOptions mappingOptions,
143 UInt64 maxTransferSize,
144 UInt32 alignment,
145 IOMapper *mapper,
146 void *refCon)
147 {
148 IODMACommand * me = new IODMACommand;
149
150 if (me && !me->initWithSpecification(outSegFunc,
151 numAddressBits, maxSegmentSize,
152 mappingOptions, maxTransferSize,
153 alignment, mapper, refCon))
154 {
155 me->release();
156 return 0;
157 };
158
159 return me;
160 }
161
162 IODMACommand *
163 IODMACommand::cloneCommand(void *refCon)
164 {
165 return withSpecification(fOutSeg, fNumAddressBits, fMaxSegmentSize,
166 fMappingOptions, fMaxTransferSize, fAlignMask + 1, fMapper, refCon);
167 }
168
169 #define kLastOutputFunction ((SegmentFunction) kLastOutputFunction)
170
171 bool
172 IODMACommand::initWithSpecification(SegmentFunction outSegFunc,
173 UInt8 numAddressBits,
174 UInt64 maxSegmentSize,
175 MappingOptions mappingOptions,
176 UInt64 maxTransferSize,
177 UInt32 alignment,
178 IOMapper *mapper,
179 void *refCon)
180 {
181 if (!super::init() || !outSegFunc || !numAddressBits)
182 return false;
183
184 bool is32Bit = (OutputHost32 == outSegFunc || OutputBig32 == outSegFunc
185 || OutputLittle32 == outSegFunc);
186 if (is32Bit)
187 {
188 if (!numAddressBits)
189 numAddressBits = 32;
190 else if (numAddressBits > 32)
191 return false; // Wrong output function for bits
192 }
193
194 if (numAddressBits && (numAddressBits < PAGE_SHIFT))
195 return false;
196
197 if (!maxSegmentSize)
198 maxSegmentSize--; // Set Max segment to -1
199 if (!maxTransferSize)
200 maxTransferSize--; // Set Max transfer to -1
201
202 if (!mapper)
203 {
204 IOMapper::checkForSystemMapper();
205 mapper = IOMapper::gSystem;
206 }
207
208 fNumSegments = 0;
209 fBypassMask = 0;
210 fOutSeg = outSegFunc;
211 fNumAddressBits = numAddressBits;
212 fMaxSegmentSize = maxSegmentSize;
213 fMappingOptions = mappingOptions;
214 fMaxTransferSize = maxTransferSize;
215 if (!alignment)
216 alignment = 1;
217 fAlignMask = alignment - 1;
218 fMapper = mapper;
219 fRefCon = refCon;
220
221 switch (MAPTYPE(mappingOptions))
222 {
223 case kMapped: break;
224 case kNonCoherent: fMapper = 0; break;
225 case kBypassed:
226 if (mapper && !mapper->getBypassMask(&fBypassMask))
227 return false;
228 break;
229 default:
230 return false;
231 };
232
233 reserved = IONew(ExpansionData, 1);
234 if (!reserved)
235 return false;
236 bzero(reserved, sizeof(ExpansionData));
237
238 fInternalState->fIterateOnly = (0 != (kIterateOnly & mappingOptions));
239
240 return true;
241 }
242
243 void
244 IODMACommand::free()
245 {
246 if (reserved)
247 IODelete(reserved, ExpansionData, 1);
248
249 super::free();
250 }
251
252 IOReturn
253 IODMACommand::setMemoryDescriptor(const IOMemoryDescriptor *mem, bool autoPrepare)
254 {
255 if (mem == fMemory)
256 {
257 if (!autoPrepare)
258 {
259 while (fActive)
260 complete();
261 }
262 return kIOReturnSuccess;
263 }
264
265 if (fMemory) {
266 // As we are almost certainly being called from a work loop thread
267 // if fActive is true it is probably not a good time to potentially
268 // block. Just test for it and return an error
269 if (fActive)
270 return kIOReturnBusy;
271 clearMemoryDescriptor();
272 };
273
274 if (mem) {
275 bzero(&fMDSummary, sizeof(fMDSummary));
276 IOReturn rtn = mem->dmaCommandOperation(
277 kIOMDGetCharacteristics,
278 &fMDSummary, sizeof(fMDSummary));
279 if (rtn)
280 return rtn;
281
282 ppnum_t highPage = fMDSummary.fHighestPage ? fMDSummary.fHighestPage : gIOLastPage;
283
284 if ((kMapped == MAPTYPE(fMappingOptions))
285 && fMapper
286 && (!fNumAddressBits || (fNumAddressBits >= 31)))
287 // assuming mapped space is 2G
288 fInternalState->fCheckAddressing = false;
289 else
290 fInternalState->fCheckAddressing = (fNumAddressBits && (highPage >= (1UL << (fNumAddressBits - PAGE_SHIFT))));
291
292 mem->retain();
293 fMemory = mem;
294
295 if (autoPrepare)
296 return prepare();
297 };
298
299 return kIOReturnSuccess;
300 }
301
302 IOReturn
303 IODMACommand::clearMemoryDescriptor(bool autoComplete)
304 {
305 if (fActive && !autoComplete)
306 return (kIOReturnNotReady);
307
308 if (fMemory) {
309 while (fActive)
310 complete();
311 fMemory->release();
312 fMemory = 0;
313 }
314
315 return (kIOReturnSuccess);
316 }
317
318 const IOMemoryDescriptor *
319 IODMACommand::getMemoryDescriptor() const
320 {
321 return fMemory;
322 }
323
324
325 IOReturn
326 IODMACommand::segmentOp(
327 void *reference,
328 IODMACommand *target,
329 Segment64 segment,
330 void *segments,
331 UInt32 segmentIndex)
332 {
333 IOOptionBits op = (IOOptionBits) reference;
334 addr64_t maxPhys, address;
335 addr64_t remapAddr = 0;
336 uint64_t length;
337 uint32_t numPages;
338
339 IODMACommandInternal * state = target->reserved;
340
341 if (target->fNumAddressBits && (target->fNumAddressBits < 64))
342 maxPhys = (1ULL << target->fNumAddressBits);
343 else
344 maxPhys = 0;
345 maxPhys--;
346
347 address = segment.fIOVMAddr;
348 length = segment.fLength;
349
350 assert(address);
351 assert(length);
352
353 if (!state->fMisaligned)
354 {
355 state->fMisaligned |= (0 != (target->fAlignMask & address));
356 if (state->fMisaligned) DEBG("misaligned %qx:%qx, %lx\n", address, length, target->fAlignMask);
357 }
358
359 if (state->fMisaligned && (kWalkPreflight & op))
360 return (kIOReturnNotAligned);
361
362 if (!state->fDoubleBuffer)
363 {
364 if ((address + length - 1) <= maxPhys)
365 {
366 length = 0;
367 }
368 else if (address <= maxPhys)
369 {
370 DEBG("tail %qx, %qx", address, length);
371 length = (address + length - maxPhys - 1);
372 address = maxPhys + 1;
373 DEBG("-> %qx, %qx\n", address, length);
374 }
375 }
376
377 if (!length)
378 return (kIOReturnSuccess);
379
380 numPages = atop_64(round_page_64(length));
381 remapAddr = state->fCopyNext;
382
383 if (kWalkPreflight & op)
384 {
385 state->fCopyPageCount += numPages;
386 }
387 else
388 {
389 if (kWalkPrepare & op)
390 {
391 for (IOItemCount idx = 0; idx < numPages; idx++)
392 gIOCopyMapper->iovmInsert(atop_64(remapAddr), idx, atop_64(address) + idx);
393 }
394 if (state->fDoubleBuffer)
395 state->fCopyNext += length;
396 else
397 {
398 state->fCopyNext += round_page(length);
399 remapAddr += (address & PAGE_MASK);
400 }
401
402 if (SHOULD_COPY_DIR(op, target->fMDSummary.fDirection))
403 {
404 DEBG("cpv: 0x%qx %s 0x%qx, 0x%qx, 0x%02lx\n", remapAddr,
405 (kWalkSyncIn & op) ? "->" : "<-",
406 address, length, op);
407 if (kWalkSyncIn & op)
408 { // cppvNoModSnk
409 copypv(remapAddr, address, length,
410 cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
411 }
412 else
413 {
414 copypv(address, remapAddr, length,
415 cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
416 }
417 }
418 }
419
420 return kIOReturnSuccess;
421 }
422
423 IOReturn
424 IODMACommand::walkAll(UInt8 op)
425 {
426 IODMACommandInternal * state = fInternalState;
427
428 IOReturn ret = kIOReturnSuccess;
429 UInt32 numSegments;
430 UInt64 offset;
431
432 if (gIOEnableCopyMapper && (kWalkPreflight & op))
433 {
434 state->fCopyContig = false;
435 state->fMisaligned = false;
436 state->fDoubleBuffer = false;
437 state->fPrepared = false;
438 state->fCopyNext = 0;
439 state->fCopyPageAlloc = 0;
440 state->fCopyPageCount = 0;
441 state->fCopyMD = 0;
442
443 if (!(kWalkDoubleBuffer & op))
444 {
445 offset = 0;
446 numSegments = 0-1;
447 ret = genIOVMSegments(segmentOp, (void *) op, &offset, state, &numSegments);
448 }
449
450 op &= ~kWalkPreflight;
451
452 state->fDoubleBuffer = (state->fMisaligned || (kWalkDoubleBuffer & op));
453 if (state->fDoubleBuffer)
454 state->fCopyPageCount = atop_64(round_page(state->fPreparedLength));
455
456 if (state->fCopyPageCount)
457 {
458 IOMapper * mapper;
459 ppnum_t mapBase = 0;
460
461 DEBG("preflight fCopyPageCount %d\n", state->fCopyPageCount);
462
463 mapper = gIOCopyMapper;
464 if (mapper)
465 mapBase = mapper->iovmAlloc(state->fCopyPageCount);
466 if (mapBase)
467 {
468 state->fCopyPageAlloc = mapBase;
469 if (state->fCopyPageAlloc && state->fDoubleBuffer)
470 {
471 DEBG("contig copy map\n");
472 state->fCopyContig = true;
473 }
474
475 state->fCopyNext = ptoa_64(state->fCopyPageAlloc);
476 offset = 0;
477 numSegments = 0-1;
478 ret = genIOVMSegments(segmentOp, (void *) op, &offset, state, &numSegments);
479 state->fPrepared = true;
480 op &= ~(kWalkSyncIn | kWalkSyncOut);
481 }
482 else
483 {
484 DEBG("alloc IOBMD\n");
485 state->fCopyMD = IOBufferMemoryDescriptor::withOptions(
486 fMDSummary.fDirection, state->fPreparedLength, page_size);
487
488 if (state->fCopyMD)
489 {
490 ret = kIOReturnSuccess;
491 state->fPrepared = true;
492 }
493 else
494 {
495 DEBG("IODMACommand !iovmAlloc");
496 return (kIOReturnNoResources);
497 }
498 }
499 }
500 }
501
502 if (gIOEnableCopyMapper && state->fPrepared && ((kWalkSyncIn | kWalkSyncOut) & op))
503 {
504 if (state->fCopyPageCount)
505 {
506 DEBG("sync fCopyPageCount %d\n", state->fCopyPageCount);
507
508 if (state->fCopyPageAlloc)
509 {
510 state->fCopyNext = ptoa_64(state->fCopyPageAlloc);
511 offset = 0;
512 numSegments = 0-1;
513 ret = genIOVMSegments(segmentOp, (void *) op, &offset, state, &numSegments);
514 }
515 else if (state->fCopyMD)
516 {
517 DEBG("sync IOBMD\n");
518
519 if (SHOULD_COPY_DIR(op, fMDSummary.fDirection))
520 {
521 IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory);
522
523 IOByteCount bytes;
524
525 if (kWalkSyncIn & op)
526 bytes = poMD->writeBytes(state->fPreparedOffset,
527 state->fCopyMD->getBytesNoCopy(),
528 state->fPreparedLength);
529 else
530 bytes = poMD->readBytes(state->fPreparedOffset,
531 state->fCopyMD->getBytesNoCopy(),
532 state->fPreparedLength);
533 DEBG("fCopyMD %s %lx bytes\n", (kWalkSyncIn & op) ? "wrote" : "read", bytes);
534 ret = (bytes == state->fPreparedLength) ? kIOReturnSuccess : kIOReturnUnderrun;
535 }
536 else
537 ret = kIOReturnSuccess;
538 }
539 }
540 }
541
542 if (kWalkComplete & op)
543 {
544 if (state->fCopyPageAlloc)
545 {
546 gIOCopyMapper->iovmFree(state->fCopyPageAlloc, state->fCopyPageCount);
547 state->fCopyPageAlloc = 0;
548 state->fCopyPageCount = 0;
549 }
550 if (state->fCopyMD)
551 {
552 state->fCopyMD->release();
553 state->fCopyMD = 0;
554 }
555
556 state->fPrepared = false;
557 }
558 return (ret);
559 }
560
561 IOReturn
562 IODMACommand::prepare(UInt64 offset, UInt64 length, bool flushCache, bool synchronize)
563 {
564 IODMACommandInternal * state = fInternalState;
565 IOReturn ret = kIOReturnSuccess;
566
567 if (!length)
568 length = fMDSummary.fLength;
569
570 if (length > fMaxTransferSize)
571 return kIOReturnNoSpace;
572
573 #if 0
574 if (IS_NONCOHERENT(mappingOptions) && flushCache) {
575 IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory);
576
577 poMD->performOperation(kIOMemoryIncoherentIOStore, 0, fMDSummary.fLength);
578 }
579 #endif
580 if (fActive++)
581 {
582 if ((state->fPreparedOffset != offset)
583 || (state->fPreparedLength != length))
584 ret = kIOReturnNotReady;
585 }
586 else
587 {
588 state->fPreparedOffset = offset;
589 state->fPreparedLength = length;
590
591 state->fCopyContig = false;
592 state->fMisaligned = false;
593 state->fDoubleBuffer = false;
594 state->fPrepared = false;
595 state->fCopyNext = 0;
596 state->fCopyPageAlloc = 0;
597 state->fCopyPageCount = 0;
598 state->fCopyMD = 0;
599
600 state->fCursor = state->fIterateOnly
601 || (!state->fCheckAddressing
602 && (!fAlignMask
603 || ((fMDSummary.fPageAlign & (1 << 31)) && (0 == (fMDSummary.fPageAlign & fAlignMask)))));
604 if (!state->fCursor)
605 {
606 IOOptionBits op = kWalkPrepare | kWalkPreflight;
607 if (synchronize)
608 op |= kWalkSyncOut;
609 ret = walkAll(op);
610 }
611 if (kIOReturnSuccess == ret)
612 state->fPrepared = true;
613 }
614 return ret;
615 }
616
617 IOReturn
618 IODMACommand::complete(bool invalidateCache, bool synchronize)
619 {
620 IODMACommandInternal * state = fInternalState;
621 IOReturn ret = kIOReturnSuccess;
622
623 if (fActive < 1)
624 return kIOReturnNotReady;
625
626 if (!--fActive)
627 {
628 if (!state->fCursor)
629 {
630 IOOptionBits op = kWalkComplete;
631 if (synchronize)
632 op |= kWalkSyncIn;
633 ret = walkAll(op);
634 }
635 state->fPrepared = false;
636
637 #if 0
638 if (IS_NONCOHERENT(fMappingOptions) && invalidateCache)
639 {
640 // XXX gvdl: need invalidate before Chardonnay ships
641 IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory);
642
643 poMD->performOperation(kIOMemoryIncoherentIOInvalidate, 0, fMDSummary.fLength);
644 }
645 #endif
646 }
647
648 return ret;
649 }
650
651 IOReturn
652 IODMACommand::synchronize(IOOptionBits options)
653 {
654 IODMACommandInternal * state = fInternalState;
655 IOReturn ret = kIOReturnSuccess;
656 IOOptionBits op;
657
658 if (kIODirectionOutIn == (kIODirectionOutIn & options))
659 return kIOReturnBadArgument;
660
661 if (fActive < 1)
662 return kIOReturnNotReady;
663
664 op = 0;
665 if (kForceDoubleBuffer & options)
666 {
667 if (state->fDoubleBuffer)
668 return kIOReturnSuccess;
669 if (state->fCursor)
670 state->fCursor = false;
671 else
672 ret = walkAll(kWalkComplete);
673
674 op |= kWalkPrepare | kWalkPreflight | kWalkDoubleBuffer;
675 }
676 else if (state->fCursor)
677 return kIOReturnSuccess;
678
679 if (kIODirectionIn & options)
680 op |= kWalkSyncIn | kWalkSyncAlways;
681 else if (kIODirectionOut & options)
682 op |= kWalkSyncOut | kWalkSyncAlways;
683
684 ret = walkAll(op);
685
686 return ret;
687 }
688
689 IOReturn
690 IODMACommand::genIOVMSegments(UInt64 *offsetP,
691 void *segmentsP,
692 UInt32 *numSegmentsP)
693 {
694 return (genIOVMSegments(clientOutputSegment, (void *) kWalkClient, offsetP, segmentsP, numSegmentsP));
695 }
696
697 IOReturn
698 IODMACommand::genIOVMSegments(InternalSegmentFunction outSegFunc,
699 void *reference,
700 UInt64 *offsetP,
701 void *segmentsP,
702 UInt32 *numSegmentsP)
703 {
704 IOOptionBits op = (IOOptionBits) reference;
705 IODMACommandInternal * internalState = fInternalState;
706 IOOptionBits mdOp = kIOMDWalkSegments;
707 IOReturn ret = kIOReturnSuccess;
708
709 if (!(kWalkComplete & op) && !fActive)
710 return kIOReturnNotReady;
711
712 if (!offsetP || !segmentsP || !numSegmentsP || !*numSegmentsP)
713 return kIOReturnBadArgument;
714
715 IOMDDMAWalkSegmentArgs *state =
716 (IOMDDMAWalkSegmentArgs *) fState;
717
718 UInt64 offset = *offsetP + internalState->fPreparedOffset;
719 UInt64 memLength = internalState->fPreparedOffset + internalState->fPreparedLength;
720
721 if (offset >= memLength)
722 return kIOReturnOverrun;
723
724 if (!offset || offset != state->fOffset) {
725 state->fOffset = 0;
726 state->fIOVMAddr = 0;
727 state->fMapped = (IS_MAPPED(fMappingOptions) && fMapper);
728 mdOp = kIOMDFirstSegment;
729 };
730
731 UInt64 bypassMask = fBypassMask;
732 UInt32 segIndex = 0;
733 UInt32 numSegments = *numSegmentsP;
734 Segment64 curSeg = { 0, 0 };
735 addr64_t maxPhys;
736
737 if (fNumAddressBits && (fNumAddressBits < 64))
738 maxPhys = (1ULL << fNumAddressBits);
739 else
740 maxPhys = 0;
741 maxPhys--;
742
743 while ((state->fIOVMAddr) || state->fOffset < memLength)
744 {
745 if (!state->fIOVMAddr) {
746
747 IOReturn rtn;
748
749 state->fOffset = offset;
750 state->fLength = memLength - offset;
751
752 if (internalState->fCopyContig && (kWalkClient & op))
753 {
754 state->fIOVMAddr = ptoa_64(internalState->fCopyPageAlloc)
755 + offset - internalState->fPreparedOffset;
756 rtn = kIOReturnSuccess;
757 }
758 else
759 {
760 const IOMemoryDescriptor * memory =
761 internalState->fCopyMD ? internalState->fCopyMD : fMemory;
762 rtn = memory->dmaCommandOperation(mdOp, fState, sizeof(fState));
763 mdOp = kIOMDWalkSegments;
764 }
765
766 if (rtn == kIOReturnSuccess) {
767 assert(state->fIOVMAddr);
768 assert(state->fLength);
769 }
770 else if (rtn == kIOReturnOverrun)
771 state->fIOVMAddr = state->fLength = 0; // At end
772 else
773 return rtn;
774 };
775
776 if (!curSeg.fIOVMAddr) {
777 UInt64 length = state->fLength;
778
779 offset += length;
780 curSeg.fIOVMAddr = state->fIOVMAddr | bypassMask;
781 curSeg.fLength = length;
782 state->fIOVMAddr = 0;
783 }
784 else if ((curSeg.fIOVMAddr + curSeg.fLength == state->fIOVMAddr)) {
785 UInt64 length = state->fLength;
786 offset += length;
787 curSeg.fLength += length;
788 state->fIOVMAddr = 0;
789 };
790
791
792 if (!state->fIOVMAddr)
793 {
794 if (kWalkClient & op)
795 {
796 if ((curSeg.fIOVMAddr + curSeg.fLength - 1) > maxPhys)
797 {
798 if (internalState->fCursor)
799 {
800 curSeg.fIOVMAddr = 0;
801 ret = kIOReturnMessageTooLarge;
802 break;
803 }
804 else if (curSeg.fIOVMAddr <= maxPhys)
805 {
806 UInt64 remain, newLength;
807
808 newLength = (maxPhys + 1 - curSeg.fIOVMAddr);
809 DEBG("trunc %qx, %qx-> %qx\n", curSeg.fIOVMAddr, curSeg.fLength, newLength);
810 remain = curSeg.fLength - newLength;
811 state->fIOVMAddr = newLength + curSeg.fIOVMAddr;
812 curSeg.fLength = newLength;
813 state->fLength = remain;
814 offset -= remain;
815 }
816 else if (gIOCopyMapper)
817 {
818 DEBG("sparse switch %qx, %qx ", curSeg.fIOVMAddr, curSeg.fLength);
819 // Cache this!
820 for (UInt checkRemapIndex = 0; checkRemapIndex < internalState->fCopyPageCount; checkRemapIndex++)
821 {
822 if (trunc_page_64(curSeg.fIOVMAddr) == gIOCopyMapper->mapAddr(
823 ptoa_64(internalState->fCopyPageAlloc + checkRemapIndex)))
824 {
825 curSeg.fIOVMAddr = ptoa_64(internalState->fCopyPageAlloc + checkRemapIndex) + (curSeg.fIOVMAddr & PAGE_MASK);
826 break;
827 }
828 }
829 DEBG("-> %qx, %qx\n", curSeg.fIOVMAddr, curSeg.fLength);
830 }
831 }
832 }
833
834 if (curSeg.fLength > fMaxSegmentSize)
835 {
836 UInt64 remain = curSeg.fLength - fMaxSegmentSize;
837
838 state->fIOVMAddr = fMaxSegmentSize + curSeg.fIOVMAddr;
839 curSeg.fLength = fMaxSegmentSize;
840
841 state->fLength = remain;
842 offset -= remain;
843 }
844
845 if (internalState->fCursor
846 && (0 != (fAlignMask & curSeg.fIOVMAddr)))
847 {
848 curSeg.fIOVMAddr = 0;
849 ret = kIOReturnNotAligned;
850 break;
851 }
852
853 if (offset >= memLength)
854 {
855 curSeg.fLength -= (offset - memLength);
856 offset = memLength;
857 state->fIOVMAddr = state->fLength = 0; // At end
858 break;
859 }
860 }
861
862 if (state->fIOVMAddr) {
863 if ((segIndex + 1 == numSegments))
864 break;
865
866 ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++);
867 curSeg.fIOVMAddr = 0;
868 if (kIOReturnSuccess != ret)
869 break;
870 }
871 }
872
873 if (curSeg.fIOVMAddr) {
874 ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++);
875 }
876
877 if (kIOReturnSuccess == ret)
878 {
879 state->fOffset = offset;
880 *offsetP = offset - internalState->fPreparedOffset;
881 *numSegmentsP = segIndex;
882 }
883 return ret;
884 }
885
886 IOReturn
887 IODMACommand::clientOutputSegment(
888 void *reference, IODMACommand *target,
889 Segment64 segment, void *vSegList, UInt32 outSegIndex)
890 {
891 IOReturn ret = kIOReturnSuccess;
892
893 if ((target->fNumAddressBits < 64)
894 && ((segment.fIOVMAddr + segment.fLength - 1) >> target->fNumAddressBits))
895 {
896 DEBG("kIOReturnMessageTooLarge(fNumAddressBits) %qx, %qx\n", segment.fIOVMAddr, segment.fLength);
897 ret = kIOReturnMessageTooLarge;
898 }
899
900 if (!(*target->fOutSeg)(target, segment, vSegList, outSegIndex))
901 {
902 DEBG("kIOReturnMessageTooLarge(fOutSeg) %qx, %qx\n", segment.fIOVMAddr, segment.fLength);
903 ret = kIOReturnMessageTooLarge;
904 }
905
906 return (ret);
907 }
908
909 bool
910 IODMACommand::OutputHost32(IODMACommand *,
911 Segment64 segment, void *vSegList, UInt32 outSegIndex)
912 {
913 Segment32 *base = (Segment32 *) vSegList;
914 base[outSegIndex].fIOVMAddr = (UInt32) segment.fIOVMAddr;
915 base[outSegIndex].fLength = (UInt32) segment.fLength;
916 return true;
917 }
918
919 bool
920 IODMACommand::OutputBig32(IODMACommand *,
921 Segment64 segment, void *vSegList, UInt32 outSegIndex)
922 {
923 const UInt offAddr = outSegIndex * sizeof(Segment32);
924 const UInt offLen = offAddr + sizeof(UInt32);
925 OSWriteBigInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr);
926 OSWriteBigInt32(vSegList, offLen, (UInt32) segment.fLength);
927 return true;
928 }
929
930 bool
931 IODMACommand::OutputLittle32(IODMACommand *,
932 Segment64 segment, void *vSegList, UInt32 outSegIndex)
933 {
934 const UInt offAddr = outSegIndex * sizeof(Segment32);
935 const UInt offLen = offAddr + sizeof(UInt32);
936 OSWriteLittleInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr);
937 OSWriteLittleInt32(vSegList, offLen, (UInt32) segment.fLength);
938 return true;
939 }
940
941 bool
942 IODMACommand::OutputHost64(IODMACommand *,
943 Segment64 segment, void *vSegList, UInt32 outSegIndex)
944 {
945 Segment64 *base = (Segment64 *) vSegList;
946 base[outSegIndex] = segment;
947 return true;
948 }
949
950 bool
951 IODMACommand::OutputBig64(IODMACommand *,
952 Segment64 segment, void *vSegList, UInt32 outSegIndex)
953 {
954 const UInt offAddr = outSegIndex * sizeof(Segment64);
955 const UInt offLen = offAddr + sizeof(UInt64);
956 OSWriteBigInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr);
957 OSWriteBigInt64(vSegList, offLen, (UInt64) segment.fLength);
958 return true;
959 }
960
961 bool
962 IODMACommand::OutputLittle64(IODMACommand *,
963 Segment64 segment, void *vSegList, UInt32 outSegIndex)
964 {
965 const UInt offAddr = outSegIndex * sizeof(Segment64);
966 const UInt offLen = offAddr + sizeof(UInt64);
967 OSWriteLittleInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr);
968 OSWriteLittleInt64(vSegList, offLen, (UInt64) segment.fLength);
969 return true;
970 }
971
972