]> git.saurik.com Git - apple/xnu.git/blame - iokit/Kernel/IODMACommand.cpp
xnu-792.18.15.tar.gz
[apple/xnu.git] / iokit / Kernel / IODMACommand.cpp
CommitLineData
89b3af67
A
1/*
2 * Copyright (c) 2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <IOKit/assert.h>
30
31#include <libkern/OSTypes.h>
32#include <libkern/OSByteOrder.h>
33
34#include <IOKit/IOReturn.h>
35#include <IOKit/IOLib.h>
36#include <IOKit/IODMACommand.h>
37#include <IOKit/IOMapper.h>
38#include <IOKit/IOMemoryDescriptor.h>
39#include <IOKit/IOBufferMemoryDescriptor.h>
40
41#include "IOKitKernelInternal.h"
42#include "IOCopyMapper.h"
43
44#define MAPTYPE(type) ((UInt) (type) & kTypeMask)
45#define IS_MAPPED(type) (MAPTYPE(type) == kMapped)
46#define IS_BYPASSED(type) (MAPTYPE(type) == kBypassed)
47#define IS_NONCOHERENT(type) (MAPTYPE(type) == kNonCoherent)
48
49
50static bool gIOEnableCopyMapper = true;
51
52enum
53{
54 kWalkSyncIn = 0x01, // bounce -> md
55 kWalkSyncOut = 0x02, // bounce <- md
56 kWalkSyncAlways = 0x04,
57 kWalkPreflight = 0x08,
58 kWalkDoubleBuffer = 0x10,
59 kWalkPrepare = 0x20,
60 kWalkComplete = 0x40,
61 kWalkClient = 0x80
62};
63
64struct ExpansionData
65{
66 IOMDDMAWalkSegmentState fState;
67 IOMDDMACharacteristics fMDSummary;
68
69 UInt64 fPreparedOffset;
70 UInt64 fPreparedLength;
71
72 UInt8 fCursor;
73 UInt8 fCheckAddressing;
74 UInt8 fIterateOnly;
75 UInt8 fMisaligned;
76 UInt8 fCopyContig;
77 UInt8 fPrepared;
78 UInt8 fDoubleBuffer;
79 UInt8 __pad[1];
80
81 ppnum_t fCopyPageAlloc;
82 ppnum_t fCopyPageCount;
83 addr64_t fCopyNext;
84
85 class IOBufferMemoryDescriptor * fCopyMD;
86};
87typedef ExpansionData IODMACommandInternal;
88
89#define fInternalState reserved
90#define fState reserved->fState
91#define fMDSummary reserved->fMDSummary
92
93
94#if 1
95// no direction => OutIn
96#define SHOULD_COPY_DIR(op, direction) \
97 ((kIODirectionNone == (direction)) \
98 || (kWalkSyncAlways & (op)) \
99 || (((kWalkSyncIn & (op)) ? kIODirectionIn : kIODirectionOut) \
100 & (direction)))
101
102#else
103#define SHOULD_COPY_DIR(state, direction) (true)
104#endif
105
106#if 0
107#define DEBG(fmt, args...) { kprintf(fmt, ## args); }
108#else
109#define DEBG(fmt, args...) {}
110#endif
111
112
113/**************************** class IODMACommand ***************************/
114
115#undef super
116#define super OSObject
117OSDefineMetaClassAndStructors(IODMACommand, IOCommand);
118
119OSMetaClassDefineReservedUnused(IODMACommand, 0);
120OSMetaClassDefineReservedUnused(IODMACommand, 1);
121OSMetaClassDefineReservedUnused(IODMACommand, 2);
122OSMetaClassDefineReservedUnused(IODMACommand, 3);
123OSMetaClassDefineReservedUnused(IODMACommand, 4);
124OSMetaClassDefineReservedUnused(IODMACommand, 5);
125OSMetaClassDefineReservedUnused(IODMACommand, 6);
126OSMetaClassDefineReservedUnused(IODMACommand, 7);
127OSMetaClassDefineReservedUnused(IODMACommand, 8);
128OSMetaClassDefineReservedUnused(IODMACommand, 9);
129OSMetaClassDefineReservedUnused(IODMACommand, 10);
130OSMetaClassDefineReservedUnused(IODMACommand, 11);
131OSMetaClassDefineReservedUnused(IODMACommand, 12);
132OSMetaClassDefineReservedUnused(IODMACommand, 13);
133OSMetaClassDefineReservedUnused(IODMACommand, 14);
134OSMetaClassDefineReservedUnused(IODMACommand, 15);
135
136IODMACommand *
137IODMACommand::withSpecification(SegmentFunction outSegFunc,
138 UInt8 numAddressBits,
139 UInt64 maxSegmentSize,
140 MappingOptions mappingOptions,
141 UInt64 maxTransferSize,
142 UInt32 alignment,
143 IOMapper *mapper,
144 void *refCon)
145{
146 IODMACommand * me = new IODMACommand;
147
148 if (me && !me->initWithSpecification(outSegFunc,
149 numAddressBits, maxSegmentSize,
150 mappingOptions, maxTransferSize,
151 alignment, mapper, refCon))
152 {
153 me->release();
154 return 0;
155 };
156
157 return me;
158}
159
160IODMACommand *
161IODMACommand::cloneCommand(void *refCon)
162{
163 return withSpecification(fOutSeg, fNumAddressBits, fMaxSegmentSize,
164 fMappingOptions, fMaxTransferSize, fAlignMask + 1, fMapper, refCon);
165}
166
167#define kLastOutputFunction ((SegmentFunction) kLastOutputFunction)
168
169bool
170IODMACommand::initWithSpecification(SegmentFunction outSegFunc,
171 UInt8 numAddressBits,
172 UInt64 maxSegmentSize,
173 MappingOptions mappingOptions,
174 UInt64 maxTransferSize,
175 UInt32 alignment,
176 IOMapper *mapper,
177 void *refCon)
178{
179 if (!super::init() || !outSegFunc || !numAddressBits)
180 return false;
181
182 bool is32Bit = (OutputHost32 == outSegFunc || OutputBig32 == outSegFunc
183 || OutputLittle32 == outSegFunc);
184 if (is32Bit)
185 {
186 if (!numAddressBits)
187 numAddressBits = 32;
188 else if (numAddressBits > 32)
189 return false; // Wrong output function for bits
190 }
191
192 if (numAddressBits && (numAddressBits < PAGE_SHIFT))
193 return false;
194
195 if (!maxSegmentSize)
196 maxSegmentSize--; // Set Max segment to -1
197 if (!maxTransferSize)
198 maxTransferSize--; // Set Max transfer to -1
199
200 if (!mapper)
201 {
202 IOMapper::checkForSystemMapper();
203 mapper = IOMapper::gSystem;
204 }
205
206 fNumSegments = 0;
207 fBypassMask = 0;
208 fOutSeg = outSegFunc;
209 fNumAddressBits = numAddressBits;
210 fMaxSegmentSize = maxSegmentSize;
211 fMappingOptions = mappingOptions;
212 fMaxTransferSize = maxTransferSize;
213 if (!alignment)
214 alignment = 1;
215 fAlignMask = alignment - 1;
216 fMapper = mapper;
217 fRefCon = refCon;
218
219 switch (MAPTYPE(mappingOptions))
220 {
221 case kMapped: break;
222 case kNonCoherent: fMapper = 0; break;
223 case kBypassed:
224 if (mapper && !mapper->getBypassMask(&fBypassMask))
225 return false;
226 break;
227 default:
228 return false;
229 };
230
231 reserved = IONew(ExpansionData, 1);
232 if (!reserved)
233 return false;
234 bzero(reserved, sizeof(ExpansionData));
235
236 fInternalState->fIterateOnly = (0 != (kIterateOnly & mappingOptions));
237
238 return true;
239}
240
241void
242IODMACommand::free()
243{
244 if (reserved)
245 IODelete(reserved, ExpansionData, 1);
246
247 super::free();
248}
249
250IOReturn
251IODMACommand::setMemoryDescriptor(const IOMemoryDescriptor *mem, bool autoPrepare)
252{
253 if (mem == fMemory)
254 {
255 if (!autoPrepare)
256 {
257 while (fActive)
258 complete();
259 }
260 return kIOReturnSuccess;
261 }
262
263 if (fMemory) {
264 // As we are almost certainly being called from a work loop thread
265 // if fActive is true it is probably not a good time to potentially
266 // block. Just test for it and return an error
267 if (fActive)
268 return kIOReturnBusy;
269 clearMemoryDescriptor();
270 };
271
272 if (mem) {
273 bzero(&fMDSummary, sizeof(fMDSummary));
274 IOReturn rtn = mem->dmaCommandOperation(
275 kIOMDGetCharacteristics,
276 &fMDSummary, sizeof(fMDSummary));
277 if (rtn)
278 return rtn;
279
280 ppnum_t highPage = fMDSummary.fHighestPage ? fMDSummary.fHighestPage : gIOLastPage;
281
282 if ((kMapped == MAPTYPE(fMappingOptions))
283 && fMapper
284 && (!fNumAddressBits || (fNumAddressBits >= 31)))
285 // assuming mapped space is 2G
286 fInternalState->fCheckAddressing = false;
287 else
288 fInternalState->fCheckAddressing = (fNumAddressBits && (highPage >= (1UL << (fNumAddressBits - PAGE_SHIFT))));
289
290 mem->retain();
291 fMemory = mem;
292
293 if (autoPrepare)
294 return prepare();
295 };
296
297 return kIOReturnSuccess;
298}
299
300IOReturn
301IODMACommand::clearMemoryDescriptor(bool autoComplete)
302{
303 if (fActive && !autoComplete)
304 return (kIOReturnNotReady);
305
306 if (fMemory) {
307 while (fActive)
308 complete();
309 fMemory->release();
310 fMemory = 0;
311 }
312
313 return (kIOReturnSuccess);
314}
315
316const IOMemoryDescriptor *
317IODMACommand::getMemoryDescriptor() const
318{
319 return fMemory;
320}
321
322
323IOReturn
324IODMACommand::segmentOp(
325 void *reference,
326 IODMACommand *target,
327 Segment64 segment,
328 void *segments,
329 UInt32 segmentIndex)
330{
331 IOOptionBits op = (IOOptionBits) reference;
332 addr64_t maxPhys, address;
333 addr64_t remapAddr = 0;
334 uint64_t length;
335 uint32_t numPages;
336
337 IODMACommandInternal * state = target->reserved;
338
339 if (target->fNumAddressBits && (target->fNumAddressBits < 64))
340 maxPhys = (1ULL << target->fNumAddressBits);
341 else
342 maxPhys = 0;
343 maxPhys--;
344
345 address = segment.fIOVMAddr;
346 length = segment.fLength;
347
348 assert(address);
349 assert(length);
350
351 if (!state->fMisaligned)
352 {
353 state->fMisaligned |= (0 != (target->fAlignMask & address));
354 if (state->fMisaligned) DEBG("misaligned %qx:%qx, %lx\n", address, length, target->fAlignMask);
355 }
356
357 if (state->fMisaligned && (kWalkPreflight & op))
358 return (kIOReturnNotAligned);
359
360 if (!state->fDoubleBuffer)
361 {
362 if ((address + length - 1) <= maxPhys)
363 {
364 length = 0;
365 }
366 else if (address <= maxPhys)
367 {
368 DEBG("tail %qx, %qx", address, length);
369 length = (address + length - maxPhys - 1);
370 address = maxPhys + 1;
371 DEBG("-> %qx, %qx\n", address, length);
372 }
373 }
374
375 if (!length)
376 return (kIOReturnSuccess);
377
378 numPages = atop_64(round_page_64(length));
379 remapAddr = state->fCopyNext;
380
381 if (kWalkPreflight & op)
382 {
383 state->fCopyPageCount += numPages;
384 }
385 else
386 {
387 if (kWalkPrepare & op)
388 {
389 for (IOItemCount idx = 0; idx < numPages; idx++)
390 gIOCopyMapper->iovmInsert(atop_64(remapAddr), idx, atop_64(address) + idx);
391 }
392 if (state->fDoubleBuffer)
393 state->fCopyNext += length;
394 else
395 {
396 state->fCopyNext += round_page(length);
397 remapAddr += (address & PAGE_MASK);
398 }
399
400 if (SHOULD_COPY_DIR(op, target->fMDSummary.fDirection))
401 {
402 DEBG("cpv: 0x%qx %s 0x%qx, 0x%qx, 0x%02lx\n", remapAddr,
403 (kWalkSyncIn & op) ? "->" : "<-",
404 address, length, op);
405 if (kWalkSyncIn & op)
406 { // cppvNoModSnk
407 copypv(remapAddr, address, length,
408 cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
409 }
410 else
411 {
412 copypv(address, remapAddr, length,
413 cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
414 }
415 }
416 }
417
418 return kIOReturnSuccess;
419}
420
421IOReturn
422IODMACommand::walkAll(UInt8 op)
423{
424 IODMACommandInternal * state = fInternalState;
425
426 IOReturn ret = kIOReturnSuccess;
427 UInt32 numSegments;
428 UInt64 offset;
429
430 if (gIOEnableCopyMapper && (kWalkPreflight & op))
431 {
432 state->fCopyContig = false;
433 state->fMisaligned = false;
434 state->fDoubleBuffer = false;
435 state->fPrepared = false;
436 state->fCopyNext = 0;
437 state->fCopyPageAlloc = 0;
438 state->fCopyPageCount = 0;
439 state->fCopyMD = 0;
440
441 if (!(kWalkDoubleBuffer & op))
442 {
443 offset = 0;
444 numSegments = 0-1;
445 ret = genIOVMSegments(segmentOp, (void *) op, &offset, state, &numSegments);
446 }
447
448 op &= ~kWalkPreflight;
449
450 state->fDoubleBuffer = (state->fMisaligned || (kWalkDoubleBuffer & op));
451 if (state->fDoubleBuffer)
452 state->fCopyPageCount = atop_64(round_page(state->fPreparedLength));
453
454 if (state->fCopyPageCount)
455 {
456 IOMapper * mapper;
457 ppnum_t mapBase = 0;
458
459 DEBG("preflight fCopyPageCount %d\n", state->fCopyPageCount);
460
461 mapper = gIOCopyMapper;
462 if (mapper)
463 mapBase = mapper->iovmAlloc(state->fCopyPageCount);
464 if (mapBase)
465 {
466 state->fCopyPageAlloc = mapBase;
467 if (state->fCopyPageAlloc && state->fDoubleBuffer)
468 {
469 DEBG("contig copy map\n");
470 state->fCopyContig = true;
471 }
472
473 state->fCopyNext = ptoa_64(state->fCopyPageAlloc);
474 offset = 0;
475 numSegments = 0-1;
476 ret = genIOVMSegments(segmentOp, (void *) op, &offset, state, &numSegments);
477 state->fPrepared = true;
478 op &= ~(kWalkSyncIn | kWalkSyncOut);
479 }
480 else
481 {
482 DEBG("alloc IOBMD\n");
483 state->fCopyMD = IOBufferMemoryDescriptor::withOptions(
484 fMDSummary.fDirection, state->fPreparedLength, page_size);
485
486 if (state->fCopyMD)
487 {
488 ret = kIOReturnSuccess;
489 state->fPrepared = true;
490 }
491 else
492 {
493 DEBG("IODMACommand !iovmAlloc");
494 return (kIOReturnNoResources);
495 }
496 }
497 }
498 }
499
500 if (gIOEnableCopyMapper && state->fPrepared && ((kWalkSyncIn | kWalkSyncOut) & op))
501 {
502 if (state->fCopyPageCount)
503 {
504 DEBG("sync fCopyPageCount %d\n", state->fCopyPageCount);
505
506 if (state->fCopyPageAlloc)
507 {
508 state->fCopyNext = ptoa_64(state->fCopyPageAlloc);
509 offset = 0;
510 numSegments = 0-1;
511 ret = genIOVMSegments(segmentOp, (void *) op, &offset, state, &numSegments);
512 }
513 else if (state->fCopyMD)
514 {
515 DEBG("sync IOBMD\n");
516
517 if (SHOULD_COPY_DIR(op, fMDSummary.fDirection))
518 {
519 IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory);
520
521 IOByteCount bytes;
522
523 if (kWalkSyncIn & op)
524 bytes = poMD->writeBytes(state->fPreparedOffset,
525 state->fCopyMD->getBytesNoCopy(),
526 state->fPreparedLength);
527 else
528 bytes = poMD->readBytes(state->fPreparedOffset,
529 state->fCopyMD->getBytesNoCopy(),
530 state->fPreparedLength);
531 DEBG("fCopyMD %s %lx bytes\n", (kWalkSyncIn & op) ? "wrote" : "read", bytes);
532 ret = (bytes == state->fPreparedLength) ? kIOReturnSuccess : kIOReturnUnderrun;
533 }
534 else
535 ret = kIOReturnSuccess;
536 }
537 }
538 }
539
540 if (kWalkComplete & op)
541 {
542 if (state->fCopyPageAlloc)
543 {
544 gIOCopyMapper->iovmFree(state->fCopyPageAlloc, state->fCopyPageCount);
545 state->fCopyPageAlloc = 0;
546 state->fCopyPageCount = 0;
547 }
548 if (state->fCopyMD)
549 {
550 state->fCopyMD->release();
551 state->fCopyMD = 0;
552 }
553
554 state->fPrepared = false;
555 }
556 return (ret);
557}
558
559IOReturn
560IODMACommand::prepare(UInt64 offset, UInt64 length, bool flushCache, bool synchronize)
561{
562 IODMACommandInternal * state = fInternalState;
563 IOReturn ret = kIOReturnSuccess;
564
565 if (!length)
566 length = fMDSummary.fLength;
567
568 if (length > fMaxTransferSize)
569 return kIOReturnNoSpace;
570
571#if 0
572 if (IS_NONCOHERENT(mappingOptions) && flushCache) {
573 IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory);
574
575 poMD->performOperation(kIOMemoryIncoherentIOStore, 0, fMDSummary.fLength);
576 }
577#endif
578 if (fActive++)
579 {
580 if ((state->fPreparedOffset != offset)
581 || (state->fPreparedLength != length))
582 ret = kIOReturnNotReady;
583 }
584 else
585 {
586 state->fPreparedOffset = offset;
587 state->fPreparedLength = length;
588
589 state->fCopyContig = false;
590 state->fMisaligned = false;
591 state->fDoubleBuffer = false;
592 state->fPrepared = false;
593 state->fCopyNext = 0;
594 state->fCopyPageAlloc = 0;
595 state->fCopyPageCount = 0;
596 state->fCopyMD = 0;
597
598 state->fCursor = state->fIterateOnly
599 || (!state->fCheckAddressing
600 && (!fAlignMask
601 || ((fMDSummary.fPageAlign & (1 << 31)) && (0 == (fMDSummary.fPageAlign & fAlignMask)))));
602 if (!state->fCursor)
603 {
604 IOOptionBits op = kWalkPrepare | kWalkPreflight;
605 if (synchronize)
606 op |= kWalkSyncOut;
607 ret = walkAll(op);
608 }
609 if (kIOReturnSuccess == ret)
610 state->fPrepared = true;
611 }
612 return ret;
613}
614
615IOReturn
616IODMACommand::complete(bool invalidateCache, bool synchronize)
617{
618 IODMACommandInternal * state = fInternalState;
619 IOReturn ret = kIOReturnSuccess;
620
621 if (fActive < 1)
622 return kIOReturnNotReady;
623
624 if (!--fActive)
625 {
626 if (!state->fCursor)
627 {
628 IOOptionBits op = kWalkComplete;
629 if (synchronize)
630 op |= kWalkSyncIn;
631 ret = walkAll(op);
632 }
633 state->fPrepared = false;
634
635#if 0
636 if (IS_NONCOHERENT(fMappingOptions) && invalidateCache)
637 {
638 // XXX gvdl: need invalidate before Chardonnay ships
639 IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory);
640
641 poMD->performOperation(kIOMemoryIncoherentIOInvalidate, 0, fMDSummary.fLength);
642 }
643#endif
644 }
645
646 return ret;
647}
648
649IOReturn
650IODMACommand::synchronize(IOOptionBits options)
651{
652 IODMACommandInternal * state = fInternalState;
653 IOReturn ret = kIOReturnSuccess;
654 IOOptionBits op;
655
656 if (kIODirectionOutIn == (kIODirectionOutIn & options))
657 return kIOReturnBadArgument;
658
659 if (fActive < 1)
660 return kIOReturnNotReady;
661
662 op = 0;
663 if (kForceDoubleBuffer & options)
664 {
665 if (state->fDoubleBuffer)
666 return kIOReturnSuccess;
667 if (state->fCursor)
668 state->fCursor = false;
669 else
670 ret = walkAll(kWalkComplete);
671
672 op |= kWalkPrepare | kWalkPreflight | kWalkDoubleBuffer;
673 }
674 else if (state->fCursor)
675 return kIOReturnSuccess;
676
677 if (kIODirectionIn & options)
678 op |= kWalkSyncIn | kWalkSyncAlways;
679 else if (kIODirectionOut & options)
680 op |= kWalkSyncOut | kWalkSyncAlways;
681
682 ret = walkAll(op);
683
684 return ret;
685}
686
687IOReturn
688IODMACommand::genIOVMSegments(UInt64 *offsetP,
689 void *segmentsP,
690 UInt32 *numSegmentsP)
691{
692 return (genIOVMSegments(clientOutputSegment, (void *) kWalkClient, offsetP, segmentsP, numSegmentsP));
693}
694
695IOReturn
696IODMACommand::genIOVMSegments(InternalSegmentFunction outSegFunc,
697 void *reference,
698 UInt64 *offsetP,
699 void *segmentsP,
700 UInt32 *numSegmentsP)
701{
702 IOOptionBits op = (IOOptionBits) reference;
703 IODMACommandInternal * internalState = fInternalState;
704 IOOptionBits mdOp = kIOMDWalkSegments;
705 IOReturn ret = kIOReturnSuccess;
706
707 if (!(kWalkComplete & op) && !fActive)
708 return kIOReturnNotReady;
709
710 if (!offsetP || !segmentsP || !numSegmentsP || !*numSegmentsP)
711 return kIOReturnBadArgument;
712
713 IOMDDMAWalkSegmentArgs *state =
714 (IOMDDMAWalkSegmentArgs *) fState;
715
716 UInt64 offset = *offsetP + internalState->fPreparedOffset;
717 UInt64 memLength = internalState->fPreparedOffset + internalState->fPreparedLength;
718
719 if (offset >= memLength)
720 return kIOReturnOverrun;
721
722 if (!offset || offset != state->fOffset) {
723 state->fOffset = 0;
724 state->fIOVMAddr = 0;
725 state->fMapped = (IS_MAPPED(fMappingOptions) && fMapper);
726 mdOp = kIOMDFirstSegment;
727 };
728
729 UInt64 bypassMask = fBypassMask;
730 UInt32 segIndex = 0;
731 UInt32 numSegments = *numSegmentsP;
732 Segment64 curSeg = { 0, 0 };
733 addr64_t maxPhys;
734
735 if (fNumAddressBits && (fNumAddressBits < 64))
736 maxPhys = (1ULL << fNumAddressBits);
737 else
738 maxPhys = 0;
739 maxPhys--;
740
741 while ((state->fIOVMAddr) || state->fOffset < memLength)
742 {
743 if (!state->fIOVMAddr) {
744
745 IOReturn rtn;
746
747 state->fOffset = offset;
748 state->fLength = memLength - offset;
749
750 if (internalState->fCopyContig && (kWalkClient & op))
751 {
752 state->fIOVMAddr = ptoa_64(internalState->fCopyPageAlloc)
753 + offset - internalState->fPreparedOffset;
754 rtn = kIOReturnSuccess;
755 }
756 else
757 {
758 const IOMemoryDescriptor * memory =
759 internalState->fCopyMD ? internalState->fCopyMD : fMemory;
760 rtn = memory->dmaCommandOperation(mdOp, fState, sizeof(fState));
761 mdOp = kIOMDWalkSegments;
762 }
763
764 if (rtn == kIOReturnSuccess) {
765 assert(state->fIOVMAddr);
766 assert(state->fLength);
767 }
768 else if (rtn == kIOReturnOverrun)
769 state->fIOVMAddr = state->fLength = 0; // At end
770 else
771 return rtn;
772 };
773
774 if (!curSeg.fIOVMAddr) {
775 UInt64 length = state->fLength;
776
777 offset += length;
778 curSeg.fIOVMAddr = state->fIOVMAddr | bypassMask;
779 curSeg.fLength = length;
780 state->fIOVMAddr = 0;
781 }
782 else if ((curSeg.fIOVMAddr + curSeg.fLength == state->fIOVMAddr)) {
783 UInt64 length = state->fLength;
784 offset += length;
785 curSeg.fLength += length;
786 state->fIOVMAddr = 0;
787 };
788
789
790 if (!state->fIOVMAddr)
791 {
792 if (kWalkClient & op)
793 {
794 if ((curSeg.fIOVMAddr + curSeg.fLength - 1) > maxPhys)
795 {
796 if (internalState->fCursor)
797 {
798 curSeg.fIOVMAddr = 0;
799 ret = kIOReturnMessageTooLarge;
800 break;
801 }
802 else if (curSeg.fIOVMAddr <= maxPhys)
803 {
804 UInt64 remain, newLength;
805
806 newLength = (maxPhys + 1 - curSeg.fIOVMAddr);
807 DEBG("trunc %qx, %qx-> %qx\n", curSeg.fIOVMAddr, curSeg.fLength, newLength);
808 remain = curSeg.fLength - newLength;
809 state->fIOVMAddr = newLength + curSeg.fIOVMAddr;
810 curSeg.fLength = newLength;
811 state->fLength = remain;
812 offset -= remain;
813 }
814 else if (gIOCopyMapper)
815 {
816 DEBG("sparse switch %qx, %qx ", curSeg.fIOVMAddr, curSeg.fLength);
817 // Cache this!
818 for (UInt checkRemapIndex = 0; checkRemapIndex < internalState->fCopyPageCount; checkRemapIndex++)
819 {
820 if (trunc_page_64(curSeg.fIOVMAddr) == gIOCopyMapper->mapAddr(
821 ptoa_64(internalState->fCopyPageAlloc + checkRemapIndex)))
822 {
823 curSeg.fIOVMAddr = ptoa_64(internalState->fCopyPageAlloc + checkRemapIndex) + (curSeg.fIOVMAddr & PAGE_MASK);
824 break;
825 }
826 }
827 DEBG("-> %qx, %qx\n", curSeg.fIOVMAddr, curSeg.fLength);
828 }
829 }
830 }
831
832 if (curSeg.fLength > fMaxSegmentSize)
833 {
834 UInt64 remain = curSeg.fLength - fMaxSegmentSize;
835
836 state->fIOVMAddr = fMaxSegmentSize + curSeg.fIOVMAddr;
837 curSeg.fLength = fMaxSegmentSize;
838
839 state->fLength = remain;
840 offset -= remain;
841 }
842
843 if (internalState->fCursor
844 && (0 != (fAlignMask & curSeg.fIOVMAddr)))
845 {
846 curSeg.fIOVMAddr = 0;
847 ret = kIOReturnNotAligned;
848 break;
849 }
850
851 if (offset >= memLength)
852 {
853 curSeg.fLength -= (offset - memLength);
854 offset = memLength;
855 state->fIOVMAddr = state->fLength = 0; // At end
856 break;
857 }
858 }
859
860 if (state->fIOVMAddr) {
861 if ((segIndex + 1 == numSegments))
862 break;
863
864 ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++);
865 curSeg.fIOVMAddr = 0;
866 if (kIOReturnSuccess != ret)
867 break;
868 }
869 }
870
871 if (curSeg.fIOVMAddr) {
872 ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++);
873 }
874
875 if (kIOReturnSuccess == ret)
876 {
877 state->fOffset = offset;
878 *offsetP = offset - internalState->fPreparedOffset;
879 *numSegmentsP = segIndex;
880 }
881 return ret;
882}
883
884IOReturn
885IODMACommand::clientOutputSegment(
886 void *reference, IODMACommand *target,
887 Segment64 segment, void *vSegList, UInt32 outSegIndex)
888{
889 IOReturn ret = kIOReturnSuccess;
890
891 if ((target->fNumAddressBits < 64)
892 && ((segment.fIOVMAddr + segment.fLength - 1) >> target->fNumAddressBits))
893 {
894 DEBG("kIOReturnMessageTooLarge(fNumAddressBits) %qx, %qx\n", segment.fIOVMAddr, segment.fLength);
895 ret = kIOReturnMessageTooLarge;
896 }
897
898 if (!(*target->fOutSeg)(target, segment, vSegList, outSegIndex))
899 {
900 DEBG("kIOReturnMessageTooLarge(fOutSeg) %qx, %qx\n", segment.fIOVMAddr, segment.fLength);
901 ret = kIOReturnMessageTooLarge;
902 }
903
904 return (ret);
905}
906
907bool
908IODMACommand::OutputHost32(IODMACommand *,
909 Segment64 segment, void *vSegList, UInt32 outSegIndex)
910{
911 Segment32 *base = (Segment32 *) vSegList;
912 base[outSegIndex].fIOVMAddr = (UInt32) segment.fIOVMAddr;
913 base[outSegIndex].fLength = (UInt32) segment.fLength;
914 return true;
915}
916
917bool
918IODMACommand::OutputBig32(IODMACommand *,
919 Segment64 segment, void *vSegList, UInt32 outSegIndex)
920{
921 const UInt offAddr = outSegIndex * sizeof(Segment32);
922 const UInt offLen = offAddr + sizeof(UInt32);
923 OSWriteBigInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr);
924 OSWriteBigInt32(vSegList, offLen, (UInt32) segment.fLength);
925 return true;
926}
927
928bool
929IODMACommand::OutputLittle32(IODMACommand *,
930 Segment64 segment, void *vSegList, UInt32 outSegIndex)
931{
932 const UInt offAddr = outSegIndex * sizeof(Segment32);
933 const UInt offLen = offAddr + sizeof(UInt32);
934 OSWriteLittleInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr);
935 OSWriteLittleInt32(vSegList, offLen, (UInt32) segment.fLength);
936 return true;
937}
938
939bool
940IODMACommand::OutputHost64(IODMACommand *,
941 Segment64 segment, void *vSegList, UInt32 outSegIndex)
942{
943 Segment64 *base = (Segment64 *) vSegList;
944 base[outSegIndex] = segment;
945 return true;
946}
947
948bool
949IODMACommand::OutputBig64(IODMACommand *,
950 Segment64 segment, void *vSegList, UInt32 outSegIndex)
951{
952 const UInt offAddr = outSegIndex * sizeof(Segment64);
953 const UInt offLen = offAddr + sizeof(UInt64);
954 OSWriteBigInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr);
955 OSWriteBigInt64(vSegList, offLen, (UInt64) segment.fLength);
956 return true;
957}
958
959bool
960IODMACommand::OutputLittle64(IODMACommand *,
961 Segment64 segment, void *vSegList, UInt32 outSegIndex)
962{
963 const UInt offAddr = outSegIndex * sizeof(Segment64);
964 const UInt offLen = offAddr + sizeof(UInt64);
965 OSWriteLittleInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr);
966 OSWriteLittleInt64(vSegList, offLen, (UInt64) segment.fLength);
967 return true;
968}
969
970