]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IODMACommand.cpp
xnu-3248.30.4.tar.gz
[apple/xnu.git] / iokit / Kernel / IODMACommand.cpp
1 /*
2 * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <IOKit/assert.h>
30
31 #include <libkern/OSTypes.h>
32 #include <libkern/OSByteOrder.h>
33 #include <libkern/OSDebug.h>
34
35 #include <IOKit/IOReturn.h>
36 #include <IOKit/IOLib.h>
37 #include <IOKit/IODMACommand.h>
38 #include <IOKit/IOMapper.h>
39 #include <IOKit/IOMemoryDescriptor.h>
40 #include <IOKit/IOBufferMemoryDescriptor.h>
41
42 #include "IOKitKernelInternal.h"
43
44 #define MAPTYPE(type) ((UInt) (type) & kTypeMask)
45 #define IS_NONCOHERENT(type) (MAPTYPE(type) == kNonCoherent)
46
47 enum
48 {
49 kWalkSyncIn = 0x01, // bounce -> md
50 kWalkSyncOut = 0x02, // bounce <- md
51 kWalkSyncAlways = 0x04,
52 kWalkPreflight = 0x08,
53 kWalkDoubleBuffer = 0x10,
54 kWalkPrepare = 0x20,
55 kWalkComplete = 0x40,
56 kWalkClient = 0x80
57 };
58
59
60 #define fInternalState reserved
61 #define fState reserved->fState
62 #define fMDSummary reserved->fMDSummary
63
64
65 #if 1
66 // no direction => OutIn
67 #define SHOULD_COPY_DIR(op, direction) \
68 ((kIODirectionNone == (direction)) \
69 || (kWalkSyncAlways & (op)) \
70 || (((kWalkSyncIn & (op)) ? kIODirectionIn : kIODirectionOut) \
71 & (direction)))
72
73 #else
74 #define SHOULD_COPY_DIR(state, direction) (true)
75 #endif
76
77 #if 0
78 #define DEBG(fmt, args...) { IOLog(fmt, ## args); kprintf(fmt, ## args); }
79 #else
80 #define DEBG(fmt, args...) {}
81 #endif
82
83 /**************************** class IODMACommand ***************************/
84
85 #undef super
86 #define super IOCommand
87 OSDefineMetaClassAndStructors(IODMACommand, IOCommand);
88
89 OSMetaClassDefineReservedUsed(IODMACommand, 0);
90 OSMetaClassDefineReservedUsed(IODMACommand, 1);
91 OSMetaClassDefineReservedUsed(IODMACommand, 2);
92 OSMetaClassDefineReservedUsed(IODMACommand, 3);
93 OSMetaClassDefineReservedUsed(IODMACommand, 4);
94 OSMetaClassDefineReservedUsed(IODMACommand, 5);
95 OSMetaClassDefineReservedUsed(IODMACommand, 6);
96 OSMetaClassDefineReservedUnused(IODMACommand, 7);
97 OSMetaClassDefineReservedUnused(IODMACommand, 8);
98 OSMetaClassDefineReservedUnused(IODMACommand, 9);
99 OSMetaClassDefineReservedUnused(IODMACommand, 10);
100 OSMetaClassDefineReservedUnused(IODMACommand, 11);
101 OSMetaClassDefineReservedUnused(IODMACommand, 12);
102 OSMetaClassDefineReservedUnused(IODMACommand, 13);
103 OSMetaClassDefineReservedUnused(IODMACommand, 14);
104 OSMetaClassDefineReservedUnused(IODMACommand, 15);
105
106 IODMACommand *
107 IODMACommand::withRefCon(void * refCon)
108 {
109 IODMACommand * me = new IODMACommand;
110
111 if (me && !me->initWithRefCon(refCon))
112 {
113 me->release();
114 return 0;
115 }
116
117 return me;
118 }
119
120 IODMACommand *
121 IODMACommand::withSpecification(SegmentFunction outSegFunc,
122 const SegmentOptions * segmentOptions,
123 uint32_t mappingOptions,
124 IOMapper * mapper,
125 void * refCon)
126 {
127 IODMACommand * me = new IODMACommand;
128
129 if (me && !me->initWithSpecification(outSegFunc, segmentOptions, mappingOptions,
130 mapper, refCon))
131 {
132 me->release();
133 return 0;
134 }
135
136 return me;
137 }
138
139 IODMACommand *
140 IODMACommand::withSpecification(SegmentFunction outSegFunc,
141 UInt8 numAddressBits,
142 UInt64 maxSegmentSize,
143 MappingOptions mappingOptions,
144 UInt64 maxTransferSize,
145 UInt32 alignment,
146 IOMapper *mapper,
147 void *refCon)
148 {
149 IODMACommand * me = new IODMACommand;
150
151 if (me && !me->initWithSpecification(outSegFunc,
152 numAddressBits, maxSegmentSize,
153 mappingOptions, maxTransferSize,
154 alignment, mapper, refCon))
155 {
156 me->release();
157 return 0;
158 }
159
160 return me;
161 }
162
163 IODMACommand *
164 IODMACommand::cloneCommand(void *refCon)
165 {
166 SegmentOptions segmentOptions =
167 {
168 .fStructSize = sizeof(segmentOptions),
169 .fNumAddressBits = fNumAddressBits,
170 .fMaxSegmentSize = fMaxSegmentSize,
171 .fMaxTransferSize = fMaxTransferSize,
172 .fAlignment = fAlignMask + 1,
173 .fAlignmentLength = fAlignMaskInternalSegments + 1,
174 .fAlignmentInternalSegments = fAlignMaskLength + 1
175 };
176
177 return (IODMACommand::withSpecification(fOutSeg, &segmentOptions,
178 fMappingOptions, fMapper, refCon));
179 }
180
181 #define kLastOutputFunction ((SegmentFunction) kLastOutputFunction)
182
183 bool
184 IODMACommand::initWithRefCon(void * refCon)
185 {
186 if (!super::init()) return (false);
187
188 if (!reserved)
189 {
190 reserved = IONew(IODMACommandInternal, 1);
191 if (!reserved) return false;
192 }
193 bzero(reserved, sizeof(IODMACommandInternal));
194 fRefCon = refCon;
195
196 return (true);
197 }
198
199 bool
200 IODMACommand::initWithSpecification(SegmentFunction outSegFunc,
201 const SegmentOptions * segmentOptions,
202 uint32_t mappingOptions,
203 IOMapper * mapper,
204 void * refCon)
205 {
206 if (!initWithRefCon(refCon)) return false;
207
208 if (kIOReturnSuccess != setSpecification(outSegFunc, segmentOptions,
209 mappingOptions, mapper)) return false;
210
211 return (true);
212 }
213
214 bool
215 IODMACommand::initWithSpecification(SegmentFunction outSegFunc,
216 UInt8 numAddressBits,
217 UInt64 maxSegmentSize,
218 MappingOptions mappingOptions,
219 UInt64 maxTransferSize,
220 UInt32 alignment,
221 IOMapper *mapper,
222 void *refCon)
223 {
224 SegmentOptions segmentOptions =
225 {
226 .fStructSize = sizeof(segmentOptions),
227 .fNumAddressBits = numAddressBits,
228 .fMaxSegmentSize = maxSegmentSize,
229 .fMaxTransferSize = maxTransferSize,
230 .fAlignment = alignment,
231 .fAlignmentLength = 1,
232 .fAlignmentInternalSegments = alignment
233 };
234
235 return (initWithSpecification(outSegFunc, &segmentOptions, mappingOptions, mapper, refCon));
236 }
237
238 IOReturn
239 IODMACommand::setSpecification(SegmentFunction outSegFunc,
240 const SegmentOptions * segmentOptions,
241 uint32_t mappingOptions,
242 IOMapper * mapper)
243 {
244 IOService * device = 0;
245 UInt8 numAddressBits;
246 UInt64 maxSegmentSize;
247 UInt64 maxTransferSize;
248 UInt32 alignment;
249
250 bool is32Bit;
251
252 if (!outSegFunc || !segmentOptions) return (kIOReturnBadArgument);
253
254 is32Bit = ((OutputHost32 == outSegFunc)
255 || (OutputBig32 == outSegFunc)
256 || (OutputLittle32 == outSegFunc));
257
258 numAddressBits = segmentOptions->fNumAddressBits;
259 maxSegmentSize = segmentOptions->fMaxSegmentSize;
260 maxTransferSize = segmentOptions->fMaxTransferSize;
261 alignment = segmentOptions->fAlignment;
262 if (is32Bit)
263 {
264 if (!numAddressBits)
265 numAddressBits = 32;
266 else if (numAddressBits > 32)
267 return (kIOReturnBadArgument); // Wrong output function for bits
268 }
269
270 if (numAddressBits && (numAddressBits < PAGE_SHIFT)) return (kIOReturnBadArgument);
271
272 if (!maxSegmentSize) maxSegmentSize--; // Set Max segment to -1
273 if (!maxTransferSize) maxTransferSize--; // Set Max transfer to -1
274
275 if (mapper && !OSDynamicCast(IOMapper, mapper))
276 {
277 device = mapper;
278 mapper = 0;
279 }
280 if (!mapper && (kUnmapped != MAPTYPE(mappingOptions)))
281 {
282 IOMapper::checkForSystemMapper();
283 mapper = IOMapper::gSystem;
284 }
285
286 fNumSegments = 0;
287 fOutSeg = outSegFunc;
288 fNumAddressBits = numAddressBits;
289 fMaxSegmentSize = maxSegmentSize;
290 fMappingOptions = mappingOptions;
291 fMaxTransferSize = maxTransferSize;
292 if (!alignment) alignment = 1;
293 fAlignMask = alignment - 1;
294
295 alignment = segmentOptions->fAlignmentLength;
296 if (!alignment) alignment = 1;
297 fAlignMaskLength = alignment - 1;
298
299 alignment = segmentOptions->fAlignmentInternalSegments;
300 if (!alignment) alignment = (fAlignMask + 1);
301 fAlignMaskInternalSegments = alignment - 1;
302
303 switch (MAPTYPE(mappingOptions))
304 {
305 case kMapped: break;
306 case kUnmapped: break;
307 case kNonCoherent: break;
308
309 case kBypassed:
310 if (!mapper) break;
311 return (kIOReturnBadArgument);
312
313 default:
314 return (kIOReturnBadArgument);
315 };
316
317 if (mapper != fMapper)
318 {
319 if (mapper) mapper->retain();
320 if (fMapper) fMapper->release();
321 fMapper = mapper;
322 }
323
324 fInternalState->fIterateOnly = (0 != (kIterateOnly & mappingOptions));
325 fInternalState->fDevice = device;
326
327 return (kIOReturnSuccess);
328 }
329
330 void
331 IODMACommand::free()
332 {
333 if (reserved) IODelete(reserved, IODMACommandInternal, 1);
334
335 if (fMapper) fMapper->release();
336
337 super::free();
338 }
339
340 IOReturn
341 IODMACommand::setMemoryDescriptor(const IOMemoryDescriptor *mem, bool autoPrepare)
342 {
343 IOReturn err = kIOReturnSuccess;
344
345 if (mem == fMemory)
346 {
347 if (!autoPrepare)
348 {
349 while (fActive)
350 complete();
351 }
352 return kIOReturnSuccess;
353 }
354
355 if (fMemory) {
356 // As we are almost certainly being called from a work loop thread
357 // if fActive is true it is probably not a good time to potentially
358 // block. Just test for it and return an error
359 if (fActive)
360 return kIOReturnBusy;
361 clearMemoryDescriptor();
362 }
363
364 if (mem) {
365 bzero(&fMDSummary, sizeof(fMDSummary));
366 err = mem->dmaCommandOperation(kIOMDGetCharacteristics | (kMapped == MAPTYPE(fMappingOptions)),
367 &fMDSummary, sizeof(fMDSummary));
368 if (err)
369 return err;
370
371 ppnum_t highPage = fMDSummary.fHighestPage ? fMDSummary.fHighestPage : gIOLastPage;
372
373 if ((kMapped == MAPTYPE(fMappingOptions))
374 && fMapper)
375 fInternalState->fCheckAddressing = false;
376 else
377 fInternalState->fCheckAddressing = (fNumAddressBits && (highPage >= (1UL << (fNumAddressBits - PAGE_SHIFT))));
378
379 fInternalState->fNewMD = true;
380 mem->retain();
381 fMemory = mem;
382
383 mem->dmaCommandOperation(kIOMDSetDMAActive, this, 0);
384 if (autoPrepare) {
385 err = prepare();
386 if (err) {
387 clearMemoryDescriptor();
388 }
389 }
390 }
391
392 return err;
393 }
394
395 IOReturn
396 IODMACommand::clearMemoryDescriptor(bool autoComplete)
397 {
398 if (fActive && !autoComplete)
399 return (kIOReturnNotReady);
400
401 if (fMemory) {
402 while (fActive)
403 complete();
404 fMemory->dmaCommandOperation(kIOMDSetDMAInactive, this, 0);
405 fMemory->release();
406 fMemory = 0;
407 }
408
409 return (kIOReturnSuccess);
410 }
411
412 const IOMemoryDescriptor *
413 IODMACommand::getMemoryDescriptor() const
414 {
415 return fMemory;
416 }
417
418 IOMemoryDescriptor *
419 IODMACommand::getIOMemoryDescriptor() const
420 {
421 IOMemoryDescriptor * mem;
422
423 mem = reserved->fCopyMD;
424 if (!mem) mem = __IODEQUALIFY(IOMemoryDescriptor *, fMemory);
425
426 return (mem);
427 }
428
429 IOReturn
430 IODMACommand::segmentOp(
431 void *reference,
432 IODMACommand *target,
433 Segment64 segment,
434 void *segments,
435 UInt32 segmentIndex)
436 {
437 IOOptionBits op = (uintptr_t) reference;
438 addr64_t maxPhys, address;
439 uint64_t length;
440 uint32_t numPages;
441 uint32_t mask;
442
443 IODMACommandInternal * state = target->reserved;
444
445 if (target->fNumAddressBits && (target->fNumAddressBits < 64) && (state->fLocalMapperAlloc || !target->fMapper))
446 maxPhys = (1ULL << target->fNumAddressBits);
447 else
448 maxPhys = 0;
449 maxPhys--;
450
451 address = segment.fIOVMAddr;
452 length = segment.fLength;
453
454 assert(address);
455 assert(length);
456
457 if (!state->fMisaligned)
458 {
459 mask = (segmentIndex ? target->fAlignMaskInternalSegments : state->fSourceAlignMask);
460 state->fMisaligned |= (0 != (mask & address));
461 if (state->fMisaligned) DEBG("misaligned address %qx:%qx, %x\n", address, length, mask);
462 }
463 if (!state->fMisaligned)
464 {
465 mask = target->fAlignMaskLength;
466 state->fMisaligned |= (0 != (mask & length));
467 if (state->fMisaligned) DEBG("misaligned length %qx:%qx, %x\n", address, length, mask);
468 }
469
470 if (state->fMisaligned && (kWalkPreflight & op))
471 return (kIOReturnNotAligned);
472
473 if (!state->fDoubleBuffer)
474 {
475 if ((address + length - 1) <= maxPhys)
476 {
477 length = 0;
478 }
479 else if (address <= maxPhys)
480 {
481 DEBG("tail %qx, %qx", address, length);
482 length = (address + length - maxPhys - 1);
483 address = maxPhys + 1;
484 DEBG("-> %qx, %qx\n", address, length);
485 }
486 }
487
488 if (!length)
489 return (kIOReturnSuccess);
490
491 numPages = atop_64(round_page_64((address & PAGE_MASK) + length));
492
493 if (kWalkPreflight & op)
494 {
495 state->fCopyPageCount += numPages;
496 }
497 else
498 {
499 vm_page_t lastPage;
500 lastPage = NULL;
501 if (kWalkPrepare & op)
502 {
503 lastPage = state->fCopyNext;
504 for (IOItemCount idx = 0; idx < numPages; idx++)
505 {
506 vm_page_set_offset(lastPage, atop_64(address) + idx);
507 lastPage = vm_page_get_next(lastPage);
508 }
509 }
510
511 if (!lastPage || SHOULD_COPY_DIR(op, target->fMDSummary.fDirection))
512 {
513 lastPage = state->fCopyNext;
514 for (IOItemCount idx = 0; idx < numPages; idx++)
515 {
516 if (SHOULD_COPY_DIR(op, target->fMDSummary.fDirection))
517 {
518 addr64_t cpuAddr = address;
519 addr64_t remapAddr;
520 uint64_t chunk;
521
522 if ((kMapped == MAPTYPE(target->fMappingOptions))
523 && target->fMapper)
524 {
525 cpuAddr = target->fMapper->mapToPhysicalAddress(address);
526 }
527
528 remapAddr = ptoa_64(vm_page_get_phys_page(lastPage));
529 if (!state->fDoubleBuffer)
530 {
531 remapAddr += (address & PAGE_MASK);
532 }
533 chunk = PAGE_SIZE - (address & PAGE_MASK);
534 if (chunk > length)
535 chunk = length;
536
537 DEBG("cpv: 0x%qx %s 0x%qx, 0x%qx, 0x%02lx\n", remapAddr,
538 (kWalkSyncIn & op) ? "->" : "<-",
539 address, chunk, op);
540
541 if (kWalkSyncIn & op)
542 { // cppvNoModSnk
543 copypv(remapAddr, cpuAddr, chunk,
544 cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
545 }
546 else
547 {
548 copypv(cpuAddr, remapAddr, chunk,
549 cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
550 }
551 address += chunk;
552 length -= chunk;
553 }
554 lastPage = vm_page_get_next(lastPage);
555 }
556 }
557 state->fCopyNext = lastPage;
558 }
559
560 return kIOReturnSuccess;
561 }
562
563 IOBufferMemoryDescriptor *
564 IODMACommand::createCopyBuffer(IODirection direction, UInt64 length)
565 {
566 mach_vm_address_t mask = 0xFFFFF000; //state->fSourceAlignMask
567 return (IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task,
568 direction, length, mask));
569 }
570
571 IOReturn
572 IODMACommand::walkAll(UInt8 op)
573 {
574 IODMACommandInternal * state = fInternalState;
575
576 IOReturn ret = kIOReturnSuccess;
577 UInt32 numSegments;
578 UInt64 offset;
579
580 if (kWalkPreflight & op)
581 {
582 state->fMisaligned = false;
583 state->fDoubleBuffer = false;
584 state->fPrepared = false;
585 state->fCopyNext = NULL;
586 state->fCopyPageAlloc = 0;
587 state->fCopyPageCount = 0;
588 state->fNextRemapPage = NULL;
589 state->fCopyMD = 0;
590
591 if (!(kWalkDoubleBuffer & op))
592 {
593 offset = 0;
594 numSegments = 0-1;
595 ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments);
596 }
597
598 op &= ~kWalkPreflight;
599
600 state->fDoubleBuffer = (state->fMisaligned || (kWalkDoubleBuffer & op));
601 if (state->fDoubleBuffer)
602 state->fCopyPageCount = atop_64(round_page(state->fPreparedLength));
603
604 if (state->fCopyPageCount)
605 {
606 vm_page_t mapBase = NULL;
607
608 DEBG("preflight fCopyPageCount %d\n", state->fCopyPageCount);
609
610 if (!fMapper && !state->fDoubleBuffer)
611 {
612 kern_return_t kr;
613
614 if (fMapper) panic("fMapper copying");
615
616 kr = vm_page_alloc_list(state->fCopyPageCount,
617 KMA_LOMEM | KMA_NOPAGEWAIT, &mapBase);
618 if (KERN_SUCCESS != kr)
619 {
620 DEBG("vm_page_alloc_list(%d) failed (%d)\n", state->fCopyPageCount, kr);
621 mapBase = NULL;
622 }
623 }
624
625 if (mapBase)
626 {
627 state->fCopyPageAlloc = mapBase;
628 state->fCopyNext = state->fCopyPageAlloc;
629 offset = 0;
630 numSegments = 0-1;
631 ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments);
632 state->fPrepared = true;
633 op &= ~(kWalkSyncIn | kWalkSyncOut);
634 }
635 else
636 {
637 DEBG("alloc IOBMD\n");
638 state->fCopyMD = createCopyBuffer(fMDSummary.fDirection, state->fPreparedLength);
639
640 if (state->fCopyMD)
641 {
642 ret = kIOReturnSuccess;
643 state->fPrepared = true;
644 }
645 else
646 {
647 DEBG("IODMACommand !alloc IOBMD");
648 return (kIOReturnNoResources);
649 }
650 }
651 }
652 }
653
654 if (state->fPrepared && ((kWalkSyncIn | kWalkSyncOut) & op))
655 {
656 if (state->fCopyPageCount)
657 {
658 DEBG("sync fCopyPageCount %d\n", state->fCopyPageCount);
659
660 if (state->fCopyPageAlloc)
661 {
662 state->fCopyNext = state->fCopyPageAlloc;
663 offset = 0;
664 numSegments = 0-1;
665 ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments);
666 }
667 else if (state->fCopyMD)
668 {
669 DEBG("sync IOBMD\n");
670
671 if (SHOULD_COPY_DIR(op, fMDSummary.fDirection))
672 {
673 IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory);
674
675 IOByteCount bytes;
676
677 if (kWalkSyncIn & op)
678 bytes = poMD->writeBytes(state->fPreparedOffset,
679 state->fCopyMD->getBytesNoCopy(),
680 state->fPreparedLength);
681 else
682 bytes = poMD->readBytes(state->fPreparedOffset,
683 state->fCopyMD->getBytesNoCopy(),
684 state->fPreparedLength);
685 DEBG("fCopyMD %s %lx bytes\n", (kWalkSyncIn & op) ? "wrote" : "read", bytes);
686 ret = (bytes == state->fPreparedLength) ? kIOReturnSuccess : kIOReturnUnderrun;
687 }
688 else
689 ret = kIOReturnSuccess;
690 }
691 }
692 }
693
694 if (kWalkComplete & op)
695 {
696 if (state->fCopyPageAlloc)
697 {
698 vm_page_free_list(state->fCopyPageAlloc, FALSE);
699 state->fCopyPageAlloc = 0;
700 state->fCopyPageCount = 0;
701 }
702 if (state->fCopyMD)
703 {
704 state->fCopyMD->release();
705 state->fCopyMD = 0;
706 }
707
708 state->fPrepared = false;
709 }
710 return (ret);
711 }
712
713 UInt8
714 IODMACommand::getNumAddressBits(void)
715 {
716 return (fNumAddressBits);
717 }
718
719 UInt32
720 IODMACommand::getAlignment(void)
721 {
722 return (fAlignMask + 1);
723 }
724
725 uint32_t
726 IODMACommand::getAlignmentLength(void)
727 {
728 return (fAlignMaskLength + 1);
729 }
730
731 uint32_t
732 IODMACommand::getAlignmentInternalSegments(void)
733 {
734 return (fAlignMaskInternalSegments + 1);
735 }
736
737 IOReturn
738 IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc,
739 const SegmentOptions * segmentOptions,
740 uint32_t mappingOptions,
741 IOMapper * mapper,
742 UInt64 offset,
743 UInt64 length,
744 bool flushCache,
745 bool synchronize)
746 {
747 IOReturn ret;
748
749 if (fActive) return kIOReturnNotPermitted;
750
751 ret = setSpecification(outSegFunc, segmentOptions, mappingOptions, mapper);
752 if (kIOReturnSuccess != ret) return (ret);
753
754 ret = prepare(offset, length, flushCache, synchronize);
755
756 return (ret);
757 }
758
759 IOReturn
760 IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc,
761 UInt8 numAddressBits,
762 UInt64 maxSegmentSize,
763 MappingOptions mappingOptions,
764 UInt64 maxTransferSize,
765 UInt32 alignment,
766 IOMapper *mapper,
767 UInt64 offset,
768 UInt64 length,
769 bool flushCache,
770 bool synchronize)
771 {
772 SegmentOptions segmentOptions =
773 {
774 .fStructSize = sizeof(segmentOptions),
775 .fNumAddressBits = numAddressBits,
776 .fMaxSegmentSize = maxSegmentSize,
777 .fMaxTransferSize = maxTransferSize,
778 .fAlignment = alignment,
779 .fAlignmentLength = 1,
780 .fAlignmentInternalSegments = alignment
781 };
782
783 return (prepareWithSpecification(outSegFunc, &segmentOptions, mappingOptions, mapper,
784 offset, length, flushCache, synchronize));
785 }
786
787
788 IOReturn
789 IODMACommand::prepare(UInt64 offset, UInt64 length, bool flushCache, bool synchronize)
790 {
791 IODMACommandInternal * state = fInternalState;
792 IOReturn ret = kIOReturnSuccess;
793 uint32_t mappingOptions = fMappingOptions;
794
795 // check specification has been set
796 if (!fOutSeg) return (kIOReturnNotReady);
797
798 if (!length) length = fMDSummary.fLength;
799
800 if (length > fMaxTransferSize) return kIOReturnNoSpace;
801
802 if (fActive++)
803 {
804 if ((state->fPreparedOffset != offset)
805 || (state->fPreparedLength != length))
806 ret = kIOReturnNotReady;
807 }
808 else
809 {
810 if (fAlignMaskLength & length) return (kIOReturnNotAligned);
811
812 state->fPreparedOffset = offset;
813 state->fPreparedLength = length;
814
815 state->fMapContig = false;
816 state->fMisaligned = false;
817 state->fDoubleBuffer = false;
818 state->fPrepared = false;
819 state->fCopyNext = NULL;
820 state->fCopyPageAlloc = 0;
821 state->fCopyPageCount = 0;
822 state->fNextRemapPage = NULL;
823 state->fCopyMD = 0;
824 state->fLocalMapperAlloc = 0;
825 state->fLocalMapperAllocLength = 0;
826
827 state->fLocalMapper = (fMapper && (fMapper != IOMapper::gSystem));
828
829 state->fSourceAlignMask = fAlignMask;
830 if (fMapper)
831 state->fSourceAlignMask &= page_mask;
832
833 state->fCursor = state->fIterateOnly
834 || (!state->fCheckAddressing
835 && (!state->fSourceAlignMask
836 || ((fMDSummary.fPageAlign & (1 << 31)) && (0 == (fMDSummary.fPageAlign & state->fSourceAlignMask)))));
837
838 if (!state->fCursor)
839 {
840 IOOptionBits op = kWalkPrepare | kWalkPreflight;
841 if (synchronize)
842 op |= kWalkSyncOut;
843 ret = walkAll(op);
844 }
845
846 if (IS_NONCOHERENT(mappingOptions) && flushCache)
847 {
848 if (state->fCopyMD)
849 {
850 state->fCopyMD->performOperation(kIOMemoryIncoherentIOStore, 0, length);
851 }
852 else
853 {
854 IOMemoryDescriptor * md = const_cast<IOMemoryDescriptor *>(fMemory);
855 md->performOperation(kIOMemoryIncoherentIOStore, offset, length);
856 }
857 }
858
859 if (fMapper)
860 {
861 IOMDDMAMapArgs mapArgs;
862 bzero(&mapArgs, sizeof(mapArgs));
863 mapArgs.fMapper = fMapper;
864 mapArgs.fCommand = this;
865 mapArgs.fMapSpec.device = state->fDevice;
866 mapArgs.fMapSpec.alignment = fAlignMask + 1;
867 mapArgs.fMapSpec.numAddressBits = fNumAddressBits ? fNumAddressBits : 64;
868 mapArgs.fLength = state->fPreparedLength;
869 const IOMemoryDescriptor * md = state->fCopyMD;
870 if (md) { mapArgs.fOffset = 0; }
871 else
872 {
873 md = fMemory;
874 mapArgs.fOffset = state->fPreparedOffset;
875 }
876 ret = md->dmaCommandOperation(kIOMDDMAMap | state->fIterateOnly, &mapArgs, sizeof(mapArgs));
877 //IOLog("dma %p 0x%x 0x%qx-0x%qx 0x%qx-0x%qx\n", this, ret, state->fPreparedOffset, state->fPreparedLength, mapArgs.fAlloc, mapArgs.fAllocLength);
878
879 if (kIOReturnSuccess == ret)
880 {
881 state->fLocalMapperAlloc = mapArgs.fAlloc;
882 state->fLocalMapperAllocLength = mapArgs.fAllocLength;
883 state->fMapContig = mapArgs.fMapContig;
884 }
885 if (NULL != IOMapper::gSystem) ret = kIOReturnSuccess;
886 }
887 if (kIOReturnSuccess == ret) state->fPrepared = true;
888 }
889 return ret;
890 }
891
892 IOReturn
893 IODMACommand::complete(bool invalidateCache, bool synchronize)
894 {
895 IODMACommandInternal * state = fInternalState;
896 IOReturn ret = kIOReturnSuccess;
897
898 if (fActive < 1)
899 return kIOReturnNotReady;
900
901 if (!--fActive)
902 {
903 if (IS_NONCOHERENT(fMappingOptions) && invalidateCache)
904 {
905 if (state->fCopyMD)
906 {
907 state->fCopyMD->performOperation(kIOMemoryIncoherentIOFlush, 0, state->fPreparedLength);
908 }
909 else
910 {
911 IOMemoryDescriptor * md = const_cast<IOMemoryDescriptor *>(fMemory);
912 md->performOperation(kIOMemoryIncoherentIOFlush, state->fPreparedOffset, state->fPreparedLength);
913 }
914 }
915
916 if (!state->fCursor)
917 {
918 IOOptionBits op = kWalkComplete;
919 if (synchronize)
920 op |= kWalkSyncIn;
921 ret = walkAll(op);
922 }
923 if (state->fLocalMapperAlloc)
924 {
925 if (state->fLocalMapperAllocLength)
926 {
927 fMapper->iovmUnmapMemory(getIOMemoryDescriptor(), this,
928 state->fLocalMapperAlloc, state->fLocalMapperAllocLength);
929 }
930 state->fLocalMapperAlloc = 0;
931 state->fLocalMapperAllocLength = 0;
932 }
933
934 state->fPrepared = false;
935 }
936
937 return ret;
938 }
939
940 IOReturn
941 IODMACommand::getPreparedOffsetAndLength(UInt64 * offset, UInt64 * length)
942 {
943 IODMACommandInternal * state = fInternalState;
944 if (fActive < 1)
945 return (kIOReturnNotReady);
946
947 if (offset)
948 *offset = state->fPreparedOffset;
949 if (length)
950 *length = state->fPreparedLength;
951
952 return (kIOReturnSuccess);
953 }
954
955 IOReturn
956 IODMACommand::synchronize(IOOptionBits options)
957 {
958 IODMACommandInternal * state = fInternalState;
959 IOReturn ret = kIOReturnSuccess;
960 IOOptionBits op;
961
962 if (kIODirectionOutIn == (kIODirectionOutIn & options))
963 return kIOReturnBadArgument;
964
965 if (fActive < 1)
966 return kIOReturnNotReady;
967
968 op = 0;
969 if (kForceDoubleBuffer & options)
970 {
971 if (state->fDoubleBuffer)
972 return kIOReturnSuccess;
973 if (state->fCursor)
974 state->fCursor = false;
975 else
976 ret = walkAll(kWalkComplete);
977
978 op |= kWalkPrepare | kWalkPreflight | kWalkDoubleBuffer;
979 }
980 else if (state->fCursor)
981 return kIOReturnSuccess;
982
983 if (kIODirectionIn & options)
984 op |= kWalkSyncIn | kWalkSyncAlways;
985 else if (kIODirectionOut & options)
986 op |= kWalkSyncOut | kWalkSyncAlways;
987
988 ret = walkAll(op);
989
990 return ret;
991 }
992
993 struct IODMACommandTransferContext
994 {
995 void * buffer;
996 UInt64 bufferOffset;
997 UInt64 remaining;
998 UInt32 op;
999 };
1000 enum
1001 {
1002 kIODMACommandTransferOpReadBytes = 1,
1003 kIODMACommandTransferOpWriteBytes = 2
1004 };
1005
1006 IOReturn
1007 IODMACommand::transferSegment(void *reference,
1008 IODMACommand *target,
1009 Segment64 segment,
1010 void *segments,
1011 UInt32 segmentIndex)
1012 {
1013 IODMACommandTransferContext * context = (IODMACommandTransferContext *) reference;
1014 UInt64 length = min(segment.fLength, context->remaining);
1015 addr64_t ioAddr = segment.fIOVMAddr;
1016 addr64_t cpuAddr = ioAddr;
1017
1018 context->remaining -= length;
1019
1020 while (length)
1021 {
1022 UInt64 copyLen = length;
1023 if ((kMapped == MAPTYPE(target->fMappingOptions))
1024 && target->fMapper)
1025 {
1026 cpuAddr = target->fMapper->mapToPhysicalAddress(ioAddr);
1027 copyLen = min(copyLen, page_size - (ioAddr & (page_size - 1)));
1028 ioAddr += copyLen;
1029 }
1030
1031 switch (context->op)
1032 {
1033 case kIODMACommandTransferOpReadBytes:
1034 copypv(cpuAddr, context->bufferOffset + (addr64_t) context->buffer, copyLen,
1035 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
1036 break;
1037 case kIODMACommandTransferOpWriteBytes:
1038 copypv(context->bufferOffset + (addr64_t) context->buffer, cpuAddr, copyLen,
1039 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
1040 break;
1041 }
1042 length -= copyLen;
1043 context->bufferOffset += copyLen;
1044 }
1045
1046 return (context->remaining ? kIOReturnSuccess : kIOReturnOverrun);
1047 }
1048
1049 UInt64
1050 IODMACommand::transfer(IOOptionBits transferOp, UInt64 offset, void * buffer, UInt64 length)
1051 {
1052 IODMACommandInternal * state = fInternalState;
1053 IODMACommandTransferContext context;
1054 Segment64 segments[1];
1055 UInt32 numSegments = 0-1;
1056
1057 if (fActive < 1)
1058 return (0);
1059
1060 if (offset >= state->fPreparedLength)
1061 return (0);
1062 length = min(length, state->fPreparedLength - offset);
1063
1064 context.buffer = buffer;
1065 context.bufferOffset = 0;
1066 context.remaining = length;
1067 context.op = transferOp;
1068 (void) genIOVMSegments(kWalkClient, transferSegment, &context, &offset, &segments[0], &numSegments);
1069
1070 return (length - context.remaining);
1071 }
1072
1073 UInt64
1074 IODMACommand::readBytes(UInt64 offset, void *bytes, UInt64 length)
1075 {
1076 return (transfer(kIODMACommandTransferOpReadBytes, offset, bytes, length));
1077 }
1078
1079 UInt64
1080 IODMACommand::writeBytes(UInt64 offset, const void *bytes, UInt64 length)
1081 {
1082 return (transfer(kIODMACommandTransferOpWriteBytes, offset, const_cast<void *>(bytes), length));
1083 }
1084
1085 IOReturn
1086 IODMACommand::genIOVMSegments(UInt64 *offsetP,
1087 void *segmentsP,
1088 UInt32 *numSegmentsP)
1089 {
1090 return (genIOVMSegments(kWalkClient, clientOutputSegment, (void *) fOutSeg,
1091 offsetP, segmentsP, numSegmentsP));
1092 }
1093
1094 IOReturn
1095 IODMACommand::genIOVMSegments(uint32_t op,
1096 InternalSegmentFunction outSegFunc,
1097 void *reference,
1098 UInt64 *offsetP,
1099 void *segmentsP,
1100 UInt32 *numSegmentsP)
1101 {
1102 IODMACommandInternal * internalState = fInternalState;
1103 IOOptionBits mdOp = kIOMDWalkSegments;
1104 IOReturn ret = kIOReturnSuccess;
1105
1106 if (!(kWalkComplete & op) && !fActive)
1107 return kIOReturnNotReady;
1108
1109 if (!offsetP || !segmentsP || !numSegmentsP || !*numSegmentsP)
1110 return kIOReturnBadArgument;
1111
1112 IOMDDMAWalkSegmentArgs *state =
1113 (IOMDDMAWalkSegmentArgs *)(void *) fState;
1114
1115 UInt64 offset = *offsetP + internalState->fPreparedOffset;
1116 UInt64 memLength = internalState->fPreparedOffset + internalState->fPreparedLength;
1117
1118 if (offset >= memLength)
1119 return kIOReturnOverrun;
1120
1121 if ((offset == internalState->fPreparedOffset) || (offset != state->fOffset) || internalState->fNewMD) {
1122 state->fOffset = 0;
1123 state->fIOVMAddr = 0;
1124 internalState->fNextRemapPage = NULL;
1125 internalState->fNewMD = false;
1126 state->fMapped = (0 != fMapper);
1127 mdOp = kIOMDFirstSegment;
1128 };
1129
1130 UInt32 segIndex = 0;
1131 UInt32 numSegments = *numSegmentsP;
1132 Segment64 curSeg = { 0, 0 };
1133 addr64_t maxPhys;
1134
1135 if (fNumAddressBits && (fNumAddressBits < 64))
1136 maxPhys = (1ULL << fNumAddressBits);
1137 else
1138 maxPhys = 0;
1139 maxPhys--;
1140
1141 while (state->fIOVMAddr || (state->fOffset < memLength))
1142 {
1143 // state = next seg
1144 if (!state->fIOVMAddr) {
1145
1146 IOReturn rtn;
1147
1148 state->fOffset = offset;
1149 state->fLength = memLength - offset;
1150
1151 if (internalState->fMapContig && internalState->fLocalMapperAlloc)
1152 {
1153 state->fIOVMAddr = internalState->fLocalMapperAlloc + offset;
1154 rtn = kIOReturnSuccess;
1155 #if 0
1156 {
1157 uint64_t checkOffset;
1158 IOPhysicalLength segLen;
1159 for (checkOffset = 0; checkOffset < state->fLength; )
1160 {
1161 addr64_t phys = const_cast<IOMemoryDescriptor *>(fMemory)->getPhysicalSegment(checkOffset + offset, &segLen, kIOMemoryMapperNone);
1162 if (fMapper->mapAddr(state->fIOVMAddr + checkOffset) != phys)
1163 {
1164 panic("%llx != %llx:%llx, %llx phys: %llx %llx\n", offset,
1165 state->fIOVMAddr + checkOffset, fMapper->mapAddr(state->fIOVMAddr + checkOffset), state->fLength,
1166 phys, checkOffset);
1167 }
1168 checkOffset += page_size - (phys & page_mask);
1169 }
1170 }
1171 #endif
1172 }
1173 else
1174 {
1175 const IOMemoryDescriptor * memory =
1176 internalState->fCopyMD ? internalState->fCopyMD : fMemory;
1177 rtn = memory->dmaCommandOperation(mdOp, fState, sizeof(fState));
1178 mdOp = kIOMDWalkSegments;
1179 }
1180
1181 if (rtn == kIOReturnSuccess)
1182 {
1183 assert(state->fIOVMAddr);
1184 assert(state->fLength);
1185 if ((curSeg.fIOVMAddr + curSeg.fLength) == state->fIOVMAddr) {
1186 UInt64 length = state->fLength;
1187 offset += length;
1188 curSeg.fLength += length;
1189 state->fIOVMAddr = 0;
1190 }
1191 }
1192 else if (rtn == kIOReturnOverrun)
1193 state->fIOVMAddr = state->fLength = 0; // At end
1194 else
1195 return rtn;
1196 }
1197
1198 // seg = state, offset = end of seg
1199 if (!curSeg.fIOVMAddr)
1200 {
1201 UInt64 length = state->fLength;
1202 offset += length;
1203 curSeg.fIOVMAddr = state->fIOVMAddr;
1204 curSeg.fLength = length;
1205 state->fIOVMAddr = 0;
1206 }
1207
1208 if (!state->fIOVMAddr)
1209 {
1210 // maxPhys
1211 if ((kWalkClient & op) && (curSeg.fIOVMAddr + curSeg.fLength - 1) > maxPhys)
1212 {
1213 if (internalState->fCursor)
1214 {
1215 curSeg.fIOVMAddr = 0;
1216 ret = kIOReturnMessageTooLarge;
1217 break;
1218 }
1219 else if (curSeg.fIOVMAddr <= maxPhys)
1220 {
1221 UInt64 remain, newLength;
1222
1223 newLength = (maxPhys + 1 - curSeg.fIOVMAddr);
1224 DEBG("trunc %qx, %qx-> %qx\n", curSeg.fIOVMAddr, curSeg.fLength, newLength);
1225 remain = curSeg.fLength - newLength;
1226 state->fIOVMAddr = newLength + curSeg.fIOVMAddr;
1227 curSeg.fLength = newLength;
1228 state->fLength = remain;
1229 offset -= remain;
1230 }
1231 else
1232 {
1233 UInt64 addr = curSeg.fIOVMAddr;
1234 ppnum_t addrPage = atop_64(addr);
1235 vm_page_t remap = NULL;
1236 UInt64 remain, newLength;
1237
1238 DEBG("sparse switch %qx, %qx ", addr, curSeg.fLength);
1239
1240 remap = internalState->fNextRemapPage;
1241 if (remap && (addrPage == vm_page_get_offset(remap)))
1242 {
1243 }
1244 else for (remap = internalState->fCopyPageAlloc;
1245 remap && (addrPage != vm_page_get_offset(remap));
1246 remap = vm_page_get_next(remap))
1247 {
1248 }
1249
1250 if (!remap) panic("no remap page found");
1251
1252 curSeg.fIOVMAddr = ptoa_64(vm_page_get_phys_page(remap))
1253 + (addr & PAGE_MASK);
1254 internalState->fNextRemapPage = vm_page_get_next(remap);
1255
1256 newLength = PAGE_SIZE - (addr & PAGE_MASK);
1257 if (newLength < curSeg.fLength)
1258 {
1259 remain = curSeg.fLength - newLength;
1260 state->fIOVMAddr = addr + newLength;
1261 curSeg.fLength = newLength;
1262 state->fLength = remain;
1263 offset -= remain;
1264 }
1265 DEBG("-> %qx, %qx offset %qx\n", curSeg.fIOVMAddr, curSeg.fLength, offset);
1266 }
1267 }
1268
1269 // reduce size of output segment
1270 uint64_t reduce, leftover = 0;
1271
1272 // fMaxSegmentSize
1273 if (curSeg.fLength > fMaxSegmentSize)
1274 {
1275 leftover += curSeg.fLength - fMaxSegmentSize;
1276 curSeg.fLength = fMaxSegmentSize;
1277 state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr;
1278 }
1279
1280 // alignment current length
1281
1282 reduce = (curSeg.fLength & fAlignMaskLength);
1283 if (reduce && (curSeg.fLength > reduce))
1284 {
1285 leftover += reduce;
1286 curSeg.fLength -= reduce;
1287 state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr;
1288 }
1289
1290 // alignment next address
1291
1292 reduce = (state->fIOVMAddr & fAlignMaskInternalSegments);
1293 if (reduce && (curSeg.fLength > reduce))
1294 {
1295 leftover += reduce;
1296 curSeg.fLength -= reduce;
1297 state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr;
1298 }
1299
1300 if (leftover)
1301 {
1302 DEBG("reduce seg by 0x%llx @ 0x%llx [0x%llx, 0x%llx]\n",
1303 leftover, offset,
1304 curSeg.fIOVMAddr, curSeg.fLength);
1305 state->fLength = leftover;
1306 offset -= leftover;
1307 }
1308
1309 //
1310
1311 if (internalState->fCursor)
1312 {
1313 bool misaligned;
1314 uint32_t mask;
1315
1316 mask = (segIndex ? fAlignMaskInternalSegments : internalState->fSourceAlignMask);
1317 misaligned = (0 != (mask & curSeg.fIOVMAddr));
1318 if (!misaligned)
1319 {
1320 mask = fAlignMaskLength;
1321 misaligned |= (0 != (mask & curSeg.fLength));
1322 }
1323 if (misaligned)
1324 {
1325 if (misaligned) DEBG("cursor misaligned %qx:%qx\n", curSeg.fIOVMAddr, curSeg.fLength);
1326 curSeg.fIOVMAddr = 0;
1327 ret = kIOReturnNotAligned;
1328 break;
1329 }
1330 }
1331
1332 if (offset >= memLength)
1333 {
1334 curSeg.fLength -= (offset - memLength);
1335 offset = memLength;
1336 state->fIOVMAddr = state->fLength = 0; // At end
1337 break;
1338 }
1339 }
1340
1341 if (state->fIOVMAddr) {
1342 if ((segIndex + 1 == numSegments))
1343 break;
1344
1345 ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++);
1346 curSeg.fIOVMAddr = 0;
1347 if (kIOReturnSuccess != ret)
1348 break;
1349 }
1350 }
1351
1352 if (curSeg.fIOVMAddr) {
1353 ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++);
1354 }
1355
1356 if (kIOReturnSuccess == ret)
1357 {
1358 state->fOffset = offset;
1359 *offsetP = offset - internalState->fPreparedOffset;
1360 *numSegmentsP = segIndex;
1361 }
1362 return ret;
1363 }
1364
1365 IOReturn
1366 IODMACommand::clientOutputSegment(
1367 void *reference, IODMACommand *target,
1368 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1369 {
1370 SegmentFunction segmentFunction = (SegmentFunction) reference;
1371 IOReturn ret = kIOReturnSuccess;
1372
1373 if (target->fNumAddressBits && (target->fNumAddressBits < 64)
1374 && ((segment.fIOVMAddr + segment.fLength - 1) >> target->fNumAddressBits)
1375 && (target->reserved->fLocalMapperAlloc || !target->fMapper))
1376 {
1377 DEBG("kIOReturnMessageTooLarge(fNumAddressBits) %qx, %qx\n", segment.fIOVMAddr, segment.fLength);
1378 ret = kIOReturnMessageTooLarge;
1379 }
1380
1381 if (!(*segmentFunction)(target, segment, vSegList, outSegIndex))
1382 {
1383 DEBG("kIOReturnMessageTooLarge(fOutSeg) %qx, %qx\n", segment.fIOVMAddr, segment.fLength);
1384 ret = kIOReturnMessageTooLarge;
1385 }
1386
1387 return (ret);
1388 }
1389
1390 IOReturn
1391 IODMACommand::genIOVMSegments(SegmentFunction segmentFunction,
1392 UInt64 *offsetP,
1393 void *segmentsP,
1394 UInt32 *numSegmentsP)
1395 {
1396 return (genIOVMSegments(kWalkClient, clientOutputSegment, (void *) segmentFunction,
1397 offsetP, segmentsP, numSegmentsP));
1398 }
1399
1400 bool
1401 IODMACommand::OutputHost32(IODMACommand *,
1402 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1403 {
1404 Segment32 *base = (Segment32 *) vSegList;
1405 base[outSegIndex].fIOVMAddr = (UInt32) segment.fIOVMAddr;
1406 base[outSegIndex].fLength = (UInt32) segment.fLength;
1407 return true;
1408 }
1409
1410 bool
1411 IODMACommand::OutputBig32(IODMACommand *,
1412 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1413 {
1414 const UInt offAddr = outSegIndex * sizeof(Segment32);
1415 const UInt offLen = offAddr + sizeof(UInt32);
1416 OSWriteBigInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr);
1417 OSWriteBigInt32(vSegList, offLen, (UInt32) segment.fLength);
1418 return true;
1419 }
1420
1421 bool
1422 IODMACommand::OutputLittle32(IODMACommand *,
1423 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1424 {
1425 const UInt offAddr = outSegIndex * sizeof(Segment32);
1426 const UInt offLen = offAddr + sizeof(UInt32);
1427 OSWriteLittleInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr);
1428 OSWriteLittleInt32(vSegList, offLen, (UInt32) segment.fLength);
1429 return true;
1430 }
1431
1432 bool
1433 IODMACommand::OutputHost64(IODMACommand *,
1434 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1435 {
1436 Segment64 *base = (Segment64 *) vSegList;
1437 base[outSegIndex] = segment;
1438 return true;
1439 }
1440
1441 bool
1442 IODMACommand::OutputBig64(IODMACommand *,
1443 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1444 {
1445 const UInt offAddr = outSegIndex * sizeof(Segment64);
1446 const UInt offLen = offAddr + sizeof(UInt64);
1447 OSWriteBigInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr);
1448 OSWriteBigInt64(vSegList, offLen, (UInt64) segment.fLength);
1449 return true;
1450 }
1451
1452 bool
1453 IODMACommand::OutputLittle64(IODMACommand *,
1454 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1455 {
1456 const UInt offAddr = outSegIndex * sizeof(Segment64);
1457 const UInt offLen = offAddr + sizeof(UInt64);
1458 OSWriteLittleInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr);
1459 OSWriteLittleInt64(vSegList, offLen, (UInt64) segment.fLength);
1460 return true;
1461 }
1462
1463