]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IODMACommand.cpp
xnu-2422.115.4.tar.gz
[apple/xnu.git] / iokit / Kernel / IODMACommand.cpp
1 /*
2 * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <IOKit/assert.h>
30
31 #include <libkern/OSTypes.h>
32 #include <libkern/OSByteOrder.h>
33 #include <libkern/OSDebug.h>
34
35 #include <IOKit/IOReturn.h>
36 #include <IOKit/IOLib.h>
37 #include <IOKit/IODMACommand.h>
38 #include <IOKit/IOMapper.h>
39 #include <IOKit/IOMemoryDescriptor.h>
40 #include <IOKit/IOBufferMemoryDescriptor.h>
41
42 #include "IOKitKernelInternal.h"
43
44 #define MAPTYPE(type) ((UInt) (type) & kTypeMask)
45 #define IS_MAPPED(type) (MAPTYPE(type) != kBypassed)
46 #define IS_BYPASSED(type) (MAPTYPE(type) == kBypassed)
47 #define IS_NONCOHERENT(type) (MAPTYPE(type) == kNonCoherent)
48
49 enum
50 {
51 kWalkSyncIn = 0x01, // bounce -> md
52 kWalkSyncOut = 0x02, // bounce <- md
53 kWalkSyncAlways = 0x04,
54 kWalkPreflight = 0x08,
55 kWalkDoubleBuffer = 0x10,
56 kWalkPrepare = 0x20,
57 kWalkComplete = 0x40,
58 kWalkClient = 0x80
59 };
60
61
62 #define fInternalState reserved
63 #define fState reserved->fState
64 #define fMDSummary reserved->fMDSummary
65
66
67 #if 1
68 // no direction => OutIn
69 #define SHOULD_COPY_DIR(op, direction) \
70 ((kIODirectionNone == (direction)) \
71 || (kWalkSyncAlways & (op)) \
72 || (((kWalkSyncIn & (op)) ? kIODirectionIn : kIODirectionOut) \
73 & (direction)))
74
75 #else
76 #define SHOULD_COPY_DIR(state, direction) (true)
77 #endif
78
79 #if 0
80 #define DEBG(fmt, args...) { IOLog(fmt, ## args); kprintf(fmt, ## args); }
81 #else
82 #define DEBG(fmt, args...) {}
83 #endif
84
85 /**************************** class IODMACommand ***************************/
86
87 #undef super
88 #define super IOCommand
89 OSDefineMetaClassAndStructors(IODMACommand, IOCommand);
90
91 OSMetaClassDefineReservedUsed(IODMACommand, 0);
92 OSMetaClassDefineReservedUsed(IODMACommand, 1);
93 OSMetaClassDefineReservedUsed(IODMACommand, 2);
94 OSMetaClassDefineReservedUnused(IODMACommand, 3);
95 OSMetaClassDefineReservedUnused(IODMACommand, 4);
96 OSMetaClassDefineReservedUnused(IODMACommand, 5);
97 OSMetaClassDefineReservedUnused(IODMACommand, 6);
98 OSMetaClassDefineReservedUnused(IODMACommand, 7);
99 OSMetaClassDefineReservedUnused(IODMACommand, 8);
100 OSMetaClassDefineReservedUnused(IODMACommand, 9);
101 OSMetaClassDefineReservedUnused(IODMACommand, 10);
102 OSMetaClassDefineReservedUnused(IODMACommand, 11);
103 OSMetaClassDefineReservedUnused(IODMACommand, 12);
104 OSMetaClassDefineReservedUnused(IODMACommand, 13);
105 OSMetaClassDefineReservedUnused(IODMACommand, 14);
106 OSMetaClassDefineReservedUnused(IODMACommand, 15);
107
108 IODMACommand *
109 IODMACommand::withSpecification(SegmentFunction outSegFunc,
110 UInt8 numAddressBits,
111 UInt64 maxSegmentSize,
112 MappingOptions mappingOptions,
113 UInt64 maxTransferSize,
114 UInt32 alignment,
115 IOMapper *mapper,
116 void *refCon)
117 {
118 IODMACommand * me = new IODMACommand;
119
120 if (me && !me->initWithSpecification(outSegFunc,
121 numAddressBits, maxSegmentSize,
122 mappingOptions, maxTransferSize,
123 alignment, mapper, refCon))
124 {
125 me->release();
126 return 0;
127 };
128
129 return me;
130 }
131
132 IODMACommand *
133 IODMACommand::cloneCommand(void *refCon)
134 {
135 return withSpecification(fOutSeg, fNumAddressBits, fMaxSegmentSize,
136 fMappingOptions, fMaxTransferSize, fAlignMask + 1, fMapper, refCon);
137 }
138
139 #define kLastOutputFunction ((SegmentFunction) kLastOutputFunction)
140
141 bool
142 IODMACommand::initWithSpecification(SegmentFunction outSegFunc,
143 UInt8 numAddressBits,
144 UInt64 maxSegmentSize,
145 MappingOptions mappingOptions,
146 UInt64 maxTransferSize,
147 UInt32 alignment,
148 IOMapper *mapper,
149 void *refCon)
150 {
151 IOService * device = 0;
152
153 if (!super::init() || !outSegFunc)
154 return false;
155
156 bool is32Bit = (OutputHost32 == outSegFunc || OutputBig32 == outSegFunc
157 || OutputLittle32 == outSegFunc);
158 if (is32Bit)
159 {
160 if (!numAddressBits)
161 numAddressBits = 32;
162 else if (numAddressBits > 32)
163 return false; // Wrong output function for bits
164 }
165
166 if (numAddressBits && (numAddressBits < PAGE_SHIFT))
167 return false;
168
169 if (!maxSegmentSize)
170 maxSegmentSize--; // Set Max segment to -1
171 if (!maxTransferSize)
172 maxTransferSize--; // Set Max transfer to -1
173
174
175 if (mapper && !OSDynamicCast(IOMapper, mapper))
176 {
177 device = mapper;
178 mapper = 0;
179 }
180 if (!mapper)
181 {
182 IOMapper::checkForSystemMapper();
183 mapper = IOMapper::gSystem;
184 }
185
186 fNumSegments = 0;
187 fBypassMask = 0;
188 fOutSeg = outSegFunc;
189 fNumAddressBits = numAddressBits;
190 fMaxSegmentSize = maxSegmentSize;
191 fMappingOptions = mappingOptions;
192 fMaxTransferSize = maxTransferSize;
193 if (!alignment)
194 alignment = 1;
195 fAlignMask = alignment - 1;
196 fMapper = mapper;
197 fRefCon = refCon;
198
199 switch (MAPTYPE(mappingOptions))
200 {
201 case kMapped: break;
202 case kNonCoherent: /*fMapper = 0;*/ break;
203 case kBypassed:
204 if (mapper && !mapper->getBypassMask(&fBypassMask))
205 return false;
206 break;
207 default:
208 return false;
209 };
210
211 if (fMapper)
212 fMapper->retain();
213
214 reserved = IONew(IODMACommandInternal, 1);
215 if (!reserved)
216 return false;
217 bzero(reserved, sizeof(IODMACommandInternal));
218
219 fInternalState->fIterateOnly = (0 != (kIterateOnly & mappingOptions));
220 fInternalState->fDevice = device;
221
222 return true;
223 }
224
225 void
226 IODMACommand::free()
227 {
228 if (reserved)
229 IODelete(reserved, IODMACommandInternal, 1);
230
231 if (fMapper)
232 fMapper->release();
233
234 super::free();
235 }
236
237 IOReturn
238 IODMACommand::setMemoryDescriptor(const IOMemoryDescriptor *mem, bool autoPrepare)
239 {
240 IOReturn err = kIOReturnSuccess;
241
242 if (mem == fMemory)
243 {
244 if (!autoPrepare)
245 {
246 while (fActive)
247 complete();
248 }
249 return kIOReturnSuccess;
250 }
251
252 if (fMemory) {
253 // As we are almost certainly being called from a work loop thread
254 // if fActive is true it is probably not a good time to potentially
255 // block. Just test for it and return an error
256 if (fActive)
257 return kIOReturnBusy;
258 clearMemoryDescriptor();
259 }
260
261 if (mem) {
262 bzero(&fMDSummary, sizeof(fMDSummary));
263 err = mem->dmaCommandOperation(kIOMDGetCharacteristics | (kMapped == MAPTYPE(fMappingOptions)),
264 &fMDSummary, sizeof(fMDSummary));
265 if (err)
266 return err;
267
268 ppnum_t highPage = fMDSummary.fHighestPage ? fMDSummary.fHighestPage : gIOLastPage;
269
270 if ((kMapped == MAPTYPE(fMappingOptions))
271 && fMapper)
272 fInternalState->fCheckAddressing = false;
273 else
274 fInternalState->fCheckAddressing = (fNumAddressBits && (highPage >= (1UL << (fNumAddressBits - PAGE_SHIFT))));
275
276 fInternalState->fNewMD = true;
277 mem->retain();
278 fMemory = mem;
279
280 mem->dmaCommandOperation(kIOMDSetDMAActive, this, 0);
281 if (autoPrepare) {
282 err = prepare();
283 if (err) {
284 clearMemoryDescriptor();
285 }
286 }
287 }
288
289 return err;
290 }
291
292 IOReturn
293 IODMACommand::clearMemoryDescriptor(bool autoComplete)
294 {
295 if (fActive && !autoComplete)
296 return (kIOReturnNotReady);
297
298 if (fMemory) {
299 while (fActive)
300 complete();
301 fMemory->dmaCommandOperation(kIOMDSetDMAInactive, this, 0);
302 fMemory->release();
303 fMemory = 0;
304 }
305
306 return (kIOReturnSuccess);
307 }
308
309 const IOMemoryDescriptor *
310 IODMACommand::getMemoryDescriptor() const
311 {
312 return fMemory;
313 }
314
315
316 IOReturn
317 IODMACommand::segmentOp(
318 void *reference,
319 IODMACommand *target,
320 Segment64 segment,
321 void *segments,
322 UInt32 segmentIndex)
323 {
324 IOOptionBits op = (uintptr_t) reference;
325 addr64_t maxPhys, address;
326 uint64_t length;
327 uint32_t numPages;
328
329 IODMACommandInternal * state = target->reserved;
330
331 if (target->fNumAddressBits && (target->fNumAddressBits < 64) && (state->fLocalMapperPageAlloc || !target->fMapper))
332 maxPhys = (1ULL << target->fNumAddressBits);
333 else
334 maxPhys = 0;
335 maxPhys--;
336
337 address = segment.fIOVMAddr;
338 length = segment.fLength;
339
340 assert(address);
341 assert(length);
342
343 if (!state->fMisaligned)
344 {
345 state->fMisaligned |= (0 != (state->fSourceAlignMask & address));
346 if (state->fMisaligned) DEBG("misaligned %qx:%qx, %lx\n", address, length, state->fSourceAlignMask);
347 }
348
349 if (state->fMisaligned && (kWalkPreflight & op))
350 return (kIOReturnNotAligned);
351
352 if (!state->fDoubleBuffer)
353 {
354 if ((address + length - 1) <= maxPhys)
355 {
356 length = 0;
357 }
358 else if (address <= maxPhys)
359 {
360 DEBG("tail %qx, %qx", address, length);
361 length = (address + length - maxPhys - 1);
362 address = maxPhys + 1;
363 DEBG("-> %qx, %qx\n", address, length);
364 }
365 }
366
367 if (!length)
368 return (kIOReturnSuccess);
369
370 numPages = atop_64(round_page_64((address & PAGE_MASK) + length));
371
372 if (kWalkPreflight & op)
373 {
374 state->fCopyPageCount += numPages;
375 }
376 else
377 {
378 vm_page_t lastPage;
379 lastPage = NULL;
380 if (kWalkPrepare & op)
381 {
382 lastPage = state->fCopyNext;
383 for (IOItemCount idx = 0; idx < numPages; idx++)
384 {
385 vm_page_set_offset(lastPage, atop_64(address) + idx);
386 lastPage = vm_page_get_next(lastPage);
387 }
388 }
389
390 if (!lastPage || SHOULD_COPY_DIR(op, target->fMDSummary.fDirection))
391 {
392 lastPage = state->fCopyNext;
393 for (IOItemCount idx = 0; idx < numPages; idx++)
394 {
395 if (SHOULD_COPY_DIR(op, target->fMDSummary.fDirection))
396 {
397 addr64_t cpuAddr = address;
398 addr64_t remapAddr;
399 uint64_t chunk;
400
401 if ((kMapped == MAPTYPE(target->fMappingOptions))
402 && target->fMapper)
403 {
404 cpuAddr = target->fMapper->mapAddr(address);
405 }
406
407 remapAddr = ptoa_64(vm_page_get_phys_page(lastPage));
408 if (!state->fDoubleBuffer)
409 {
410 remapAddr += (address & PAGE_MASK);
411 }
412 chunk = PAGE_SIZE - (address & PAGE_MASK);
413 if (chunk > length)
414 chunk = length;
415
416 DEBG("cpv: 0x%qx %s 0x%qx, 0x%qx, 0x%02lx\n", remapAddr,
417 (kWalkSyncIn & op) ? "->" : "<-",
418 address, chunk, op);
419
420 if (kWalkSyncIn & op)
421 { // cppvNoModSnk
422 copypv(remapAddr, cpuAddr, chunk,
423 cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
424 }
425 else
426 {
427 copypv(cpuAddr, remapAddr, chunk,
428 cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
429 }
430 address += chunk;
431 length -= chunk;
432 }
433 lastPage = vm_page_get_next(lastPage);
434 }
435 }
436 state->fCopyNext = lastPage;
437 }
438
439 return kIOReturnSuccess;
440 }
441
442 IOReturn
443 IODMACommand::walkAll(UInt8 op)
444 {
445 IODMACommandInternal * state = fInternalState;
446
447 IOReturn ret = kIOReturnSuccess;
448 UInt32 numSegments;
449 UInt64 offset;
450
451 if (kWalkPreflight & op)
452 {
453 state->fMisaligned = false;
454 state->fDoubleBuffer = false;
455 state->fPrepared = false;
456 state->fCopyNext = NULL;
457 state->fCopyPageAlloc = 0;
458 state->fCopyPageCount = 0;
459 state->fNextRemapPage = NULL;
460 state->fCopyMD = 0;
461
462 if (!(kWalkDoubleBuffer & op))
463 {
464 offset = 0;
465 numSegments = 0-1;
466 ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments);
467 }
468
469 op &= ~kWalkPreflight;
470
471 state->fDoubleBuffer = (state->fMisaligned || (kWalkDoubleBuffer & op));
472 if (state->fDoubleBuffer)
473 state->fCopyPageCount = atop_64(round_page(state->fPreparedLength));
474
475 if (state->fCopyPageCount)
476 {
477 vm_page_t mapBase = NULL;
478
479 DEBG("preflight fCopyPageCount %d\n", state->fCopyPageCount);
480
481 if (!state->fDoubleBuffer)
482 {
483 kern_return_t kr;
484
485 if (fMapper) panic("fMapper copying");
486
487 kr = vm_page_alloc_list(state->fCopyPageCount,
488 KMA_LOMEM | KMA_NOPAGEWAIT, &mapBase);
489 if (KERN_SUCCESS != kr)
490 {
491 DEBG("vm_page_alloc_list(%d) failed (%d)\n", state->fCopyPageCount, kr);
492 mapBase = NULL;
493 }
494 }
495
496 if (mapBase)
497 {
498 state->fCopyPageAlloc = mapBase;
499 state->fCopyNext = state->fCopyPageAlloc;
500 offset = 0;
501 numSegments = 0-1;
502 ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments);
503 state->fPrepared = true;
504 op &= ~(kWalkSyncIn | kWalkSyncOut);
505 }
506 else
507 {
508 DEBG("alloc IOBMD\n");
509 mach_vm_address_t mask = 0xFFFFF000; //state->fSourceAlignMask
510 state->fCopyMD = IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task,
511 fMDSummary.fDirection, state->fPreparedLength, mask);
512
513 if (state->fCopyMD)
514 {
515 ret = kIOReturnSuccess;
516 state->fPrepared = true;
517 }
518 else
519 {
520 DEBG("IODMACommand !alloc IOBMD");
521 return (kIOReturnNoResources);
522 }
523 }
524 }
525 }
526
527 if (state->fPrepared && ((kWalkSyncIn | kWalkSyncOut) & op))
528 {
529 if (state->fCopyPageCount)
530 {
531 DEBG("sync fCopyPageCount %d\n", state->fCopyPageCount);
532
533 if (state->fCopyPageAlloc)
534 {
535 state->fCopyNext = state->fCopyPageAlloc;
536 offset = 0;
537 numSegments = 0-1;
538 ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments);
539 }
540 else if (state->fCopyMD)
541 {
542 DEBG("sync IOBMD\n");
543
544 if (SHOULD_COPY_DIR(op, fMDSummary.fDirection))
545 {
546 IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory);
547
548 IOByteCount bytes;
549
550 if (kWalkSyncIn & op)
551 bytes = poMD->writeBytes(state->fPreparedOffset,
552 state->fCopyMD->getBytesNoCopy(),
553 state->fPreparedLength);
554 else
555 bytes = poMD->readBytes(state->fPreparedOffset,
556 state->fCopyMD->getBytesNoCopy(),
557 state->fPreparedLength);
558 DEBG("fCopyMD %s %lx bytes\n", (kWalkSyncIn & op) ? "wrote" : "read", bytes);
559 ret = (bytes == state->fPreparedLength) ? kIOReturnSuccess : kIOReturnUnderrun;
560 }
561 else
562 ret = kIOReturnSuccess;
563 }
564 }
565 }
566
567 if (kWalkComplete & op)
568 {
569 if (state->fCopyPageAlloc)
570 {
571 vm_page_free_list(state->fCopyPageAlloc, FALSE);
572 state->fCopyPageAlloc = 0;
573 state->fCopyPageCount = 0;
574 }
575 if (state->fCopyMD)
576 {
577 state->fCopyMD->release();
578 state->fCopyMD = 0;
579 }
580
581 state->fPrepared = false;
582 }
583 return (ret);
584 }
585
586 UInt8
587 IODMACommand::getNumAddressBits(void)
588 {
589 return (fNumAddressBits);
590 }
591
592 UInt32
593 IODMACommand::getAlignment(void)
594 {
595 return (fAlignMask + 1);
596 }
597
598 IOReturn
599 IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc,
600 UInt8 numAddressBits,
601 UInt64 maxSegmentSize,
602 MappingOptions mappingOptions,
603 UInt64 maxTransferSize,
604 UInt32 alignment,
605 IOMapper *mapper,
606 UInt64 offset,
607 UInt64 length,
608 bool flushCache,
609 bool synchronize)
610 {
611 if (fActive)
612 return kIOReturnNotPermitted;
613
614 if (!outSegFunc)
615 return kIOReturnBadArgument;
616
617 bool is32Bit = (OutputHost32 == outSegFunc || OutputBig32 == outSegFunc
618 || OutputLittle32 == outSegFunc);
619 if (is32Bit)
620 {
621 if (!numAddressBits)
622 numAddressBits = 32;
623 else if (numAddressBits > 32)
624 return kIOReturnBadArgument; // Wrong output function for bits
625 }
626
627 if (numAddressBits && (numAddressBits < PAGE_SHIFT))
628 return kIOReturnBadArgument;
629
630 if (!maxSegmentSize)
631 maxSegmentSize--; // Set Max segment to -1
632 if (!maxTransferSize)
633 maxTransferSize--; // Set Max transfer to -1
634
635 if (mapper && !OSDynamicCast(IOMapper, mapper))
636 {
637 fInternalState->fDevice = mapper;
638 mapper = 0;
639 }
640 if (!mapper)
641 {
642 IOMapper::checkForSystemMapper();
643 mapper = IOMapper::gSystem;
644 }
645
646 switch (MAPTYPE(mappingOptions))
647 {
648 case kMapped: break;
649 case kNonCoherent: break;
650 case kBypassed:
651 if (mapper && !mapper->getBypassMask(&fBypassMask))
652 return kIOReturnBadArgument;
653 break;
654 default:
655 return kIOReturnBadArgument;
656 };
657
658 fNumSegments = 0;
659 fBypassMask = 0;
660 fOutSeg = outSegFunc;
661 fNumAddressBits = numAddressBits;
662 fMaxSegmentSize = maxSegmentSize;
663 fMappingOptions = mappingOptions;
664 fMaxTransferSize = maxTransferSize;
665 if (!alignment)
666 alignment = 1;
667 fAlignMask = alignment - 1;
668 if (mapper != fMapper)
669 {
670 mapper->retain();
671 fMapper->release();
672 fMapper = mapper;
673 }
674
675 fInternalState->fIterateOnly = (0 != (kIterateOnly & mappingOptions));
676
677 return prepare(offset, length, flushCache, synchronize);
678 }
679
680
681 IOReturn
682 IODMACommand::prepare(UInt64 offset, UInt64 length, bool flushCache, bool synchronize)
683 {
684 IODMACommandInternal * state = fInternalState;
685 IOReturn ret = kIOReturnSuccess;
686 MappingOptions mappingOptions = fMappingOptions;
687
688 if (!length)
689 length = fMDSummary.fLength;
690
691 if (length > fMaxTransferSize)
692 return kIOReturnNoSpace;
693
694 if (IS_NONCOHERENT(mappingOptions) && flushCache) {
695 IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory);
696
697 poMD->performOperation(kIOMemoryIncoherentIOStore, offset, length);
698 }
699 if (fActive++)
700 {
701 if ((state->fPreparedOffset != offset)
702 || (state->fPreparedLength != length))
703 ret = kIOReturnNotReady;
704 }
705 else
706 {
707 state->fPreparedOffset = offset;
708 state->fPreparedLength = length;
709
710 state->fMapContig = false;
711 state->fMisaligned = false;
712 state->fDoubleBuffer = false;
713 state->fPrepared = false;
714 state->fCopyNext = NULL;
715 state->fCopyPageAlloc = 0;
716 state->fCopyPageCount = 0;
717 state->fNextRemapPage = NULL;
718 state->fCopyMD = 0;
719 state->fLocalMapperPageAlloc = 0;
720 state->fLocalMapperPageCount = 0;
721
722 state->fLocalMapper = (fMapper && (fMapper != IOMapper::gSystem));
723
724 state->fSourceAlignMask = fAlignMask;
725 if (fMapper)
726 state->fSourceAlignMask &= page_mask;
727
728 state->fCursor = state->fIterateOnly
729 || (!state->fCheckAddressing
730 && (!state->fSourceAlignMask
731 || ((fMDSummary.fPageAlign & (1 << 31)) && (0 == (fMDSummary.fPageAlign & state->fSourceAlignMask)))));
732
733 if (!state->fCursor)
734 {
735 IOOptionBits op = kWalkPrepare | kWalkPreflight;
736 if (synchronize)
737 op |= kWalkSyncOut;
738 ret = walkAll(op);
739 }
740
741 if (fMapper)
742 {
743 if (state->fLocalMapper)
744 {
745 state->fLocalMapperPageCount = atop_64(round_page(
746 state->fPreparedLength + ((state->fPreparedOffset + fMDSummary.fPageAlign) & page_mask)));
747 state->fLocalMapperPageAlloc = ptoa_64(fMapper->iovmAllocDMACommand(this, state->fLocalMapperPageCount));
748 if (!state->fLocalMapperPageAlloc)
749 {
750 DEBG("IODMACommand !iovmAlloc");
751 return (kIOReturnNoResources);
752 }
753 state->fMapContig = true;
754 }
755 else
756 {
757 IOMDDMAMapArgs mapArgs;
758 bzero(&mapArgs, sizeof(mapArgs));
759 mapArgs.fMapper = fMapper;
760 mapArgs.fMapSpec.device = state->fDevice;
761 mapArgs.fMapSpec.alignment = fAlignMask + 1;
762 mapArgs.fMapSpec.numAddressBits = fNumAddressBits ? fNumAddressBits : 64;
763 mapArgs.fOffset = state->fPreparedOffset;
764 mapArgs.fLength = state->fPreparedLength;
765 const IOMemoryDescriptor * md = state->fCopyMD;
766 if (!md) md = fMemory;
767 ret = md->dmaCommandOperation(kIOMDDMAMap | state->fIterateOnly, &mapArgs, sizeof(mapArgs));
768 if (kIOReturnSuccess == ret)
769 {
770 state->fLocalMapperPageAlloc = mapArgs.fAlloc;
771 state->fLocalMapperPageCount = mapArgs.fAllocCount;
772 state->fMapContig = mapArgs.fMapContig;
773 }
774 ret = kIOReturnSuccess;
775 }
776 }
777
778
779 if (kIOReturnSuccess == ret)
780 state->fPrepared = true;
781 }
782 return ret;
783 }
784
785 IOReturn
786 IODMACommand::complete(bool invalidateCache, bool synchronize)
787 {
788 IODMACommandInternal * state = fInternalState;
789 IOReturn ret = kIOReturnSuccess;
790
791 if (fActive < 1)
792 return kIOReturnNotReady;
793
794 if (!--fActive)
795 {
796 if (!state->fCursor)
797 {
798 IOOptionBits op = kWalkComplete;
799 if (synchronize)
800 op |= kWalkSyncIn;
801 ret = walkAll(op);
802 }
803 if (state->fLocalMapperPageAlloc)
804 {
805 if (state->fLocalMapper)
806 {
807 fMapper->iovmFreeDMACommand(this, atop_64(state->fLocalMapperPageAlloc), state->fLocalMapperPageCount);
808 }
809 else if (state->fLocalMapperPageCount)
810 {
811 fMapper->iovmFree(atop_64(state->fLocalMapperPageAlloc), state->fLocalMapperPageCount);
812 }
813 state->fLocalMapperPageAlloc = 0;
814 state->fLocalMapperPageCount = 0;
815 }
816
817 state->fPrepared = false;
818
819 if (IS_NONCOHERENT(fMappingOptions) && invalidateCache)
820 {
821 IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory);
822
823 poMD->performOperation(kIOMemoryIncoherentIOFlush, state->fPreparedOffset, state->fPreparedLength);
824 }
825 }
826
827 return ret;
828 }
829
830 IOReturn
831 IODMACommand::getPreparedOffsetAndLength(UInt64 * offset, UInt64 * length)
832 {
833 IODMACommandInternal * state = fInternalState;
834 if (fActive < 1)
835 return (kIOReturnNotReady);
836
837 if (offset)
838 *offset = state->fPreparedOffset;
839 if (length)
840 *length = state->fPreparedLength;
841
842 return (kIOReturnSuccess);
843 }
844
845 IOReturn
846 IODMACommand::synchronize(IOOptionBits options)
847 {
848 IODMACommandInternal * state = fInternalState;
849 IOReturn ret = kIOReturnSuccess;
850 IOOptionBits op;
851
852 if (kIODirectionOutIn == (kIODirectionOutIn & options))
853 return kIOReturnBadArgument;
854
855 if (fActive < 1)
856 return kIOReturnNotReady;
857
858 op = 0;
859 if (kForceDoubleBuffer & options)
860 {
861 if (state->fDoubleBuffer)
862 return kIOReturnSuccess;
863 if (state->fCursor)
864 state->fCursor = false;
865 else
866 ret = walkAll(kWalkComplete);
867
868 op |= kWalkPrepare | kWalkPreflight | kWalkDoubleBuffer;
869 }
870 else if (state->fCursor)
871 return kIOReturnSuccess;
872
873 if (kIODirectionIn & options)
874 op |= kWalkSyncIn | kWalkSyncAlways;
875 else if (kIODirectionOut & options)
876 op |= kWalkSyncOut | kWalkSyncAlways;
877
878 ret = walkAll(op);
879
880 return ret;
881 }
882
883 struct IODMACommandTransferContext
884 {
885 void * buffer;
886 UInt64 bufferOffset;
887 UInt64 remaining;
888 UInt32 op;
889 };
890 enum
891 {
892 kIODMACommandTransferOpReadBytes = 1,
893 kIODMACommandTransferOpWriteBytes = 2
894 };
895
896 IOReturn
897 IODMACommand::transferSegment(void *reference,
898 IODMACommand *target,
899 Segment64 segment,
900 void *segments,
901 UInt32 segmentIndex)
902 {
903 IODMACommandTransferContext * context = (IODMACommandTransferContext *) reference;
904 UInt64 length = min(segment.fLength, context->remaining);
905 addr64_t ioAddr = segment.fIOVMAddr;
906 addr64_t cpuAddr = ioAddr;
907
908 context->remaining -= length;
909
910 while (length)
911 {
912 UInt64 copyLen = length;
913 if ((kMapped == MAPTYPE(target->fMappingOptions))
914 && target->fMapper)
915 {
916 cpuAddr = target->fMapper->mapAddr(ioAddr);
917 copyLen = min(copyLen, page_size - (ioAddr & (page_size - 1)));
918 ioAddr += copyLen;
919 }
920
921 switch (context->op)
922 {
923 case kIODMACommandTransferOpReadBytes:
924 copypv(cpuAddr, context->bufferOffset + (addr64_t) context->buffer, copyLen,
925 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
926 break;
927 case kIODMACommandTransferOpWriteBytes:
928 copypv(context->bufferOffset + (addr64_t) context->buffer, cpuAddr, copyLen,
929 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
930 break;
931 }
932 length -= copyLen;
933 context->bufferOffset += copyLen;
934 }
935
936 return (context->remaining ? kIOReturnSuccess : kIOReturnOverrun);
937 }
938
939 UInt64
940 IODMACommand::transfer(IOOptionBits transferOp, UInt64 offset, void * buffer, UInt64 length)
941 {
942 IODMACommandInternal * state = fInternalState;
943 IODMACommandTransferContext context;
944 Segment64 segments[1];
945 UInt32 numSegments = 0-1;
946
947 if (fActive < 1)
948 return (0);
949
950 if (offset >= state->fPreparedLength)
951 return (0);
952 length = min(length, state->fPreparedLength - offset);
953
954 context.buffer = buffer;
955 context.bufferOffset = 0;
956 context.remaining = length;
957 context.op = transferOp;
958 (void) genIOVMSegments(kWalkClient, transferSegment, &context, &offset, &segments[0], &numSegments);
959
960 return (length - context.remaining);
961 }
962
963 UInt64
964 IODMACommand::readBytes(UInt64 offset, void *bytes, UInt64 length)
965 {
966 return (transfer(kIODMACommandTransferOpReadBytes, offset, bytes, length));
967 }
968
969 UInt64
970 IODMACommand::writeBytes(UInt64 offset, const void *bytes, UInt64 length)
971 {
972 return (transfer(kIODMACommandTransferOpWriteBytes, offset, const_cast<void *>(bytes), length));
973 }
974
975 IOReturn
976 IODMACommand::genIOVMSegments(UInt64 *offsetP,
977 void *segmentsP,
978 UInt32 *numSegmentsP)
979 {
980 return (genIOVMSegments(kWalkClient, clientOutputSegment, (void *) fOutSeg,
981 offsetP, segmentsP, numSegmentsP));
982 }
983
984 IOReturn
985 IODMACommand::genIOVMSegments(uint32_t op,
986 InternalSegmentFunction outSegFunc,
987 void *reference,
988 UInt64 *offsetP,
989 void *segmentsP,
990 UInt32 *numSegmentsP)
991 {
992 IODMACommandInternal * internalState = fInternalState;
993 IOOptionBits mdOp = kIOMDWalkSegments;
994 IOReturn ret = kIOReturnSuccess;
995
996 if (!(kWalkComplete & op) && !fActive)
997 return kIOReturnNotReady;
998
999 if (!offsetP || !segmentsP || !numSegmentsP || !*numSegmentsP)
1000 return kIOReturnBadArgument;
1001
1002 IOMDDMAWalkSegmentArgs *state =
1003 (IOMDDMAWalkSegmentArgs *)(void *) fState;
1004
1005 UInt64 offset = *offsetP + internalState->fPreparedOffset;
1006 UInt64 memLength = internalState->fPreparedOffset + internalState->fPreparedLength;
1007
1008 if (offset >= memLength)
1009 return kIOReturnOverrun;
1010
1011 if ((offset == internalState->fPreparedOffset) || (offset != state->fOffset) || internalState->fNewMD) {
1012 state->fOffset = 0;
1013 state->fIOVMAddr = 0;
1014 internalState->fNextRemapPage = NULL;
1015 internalState->fNewMD = false;
1016 state->fMapped = (IS_MAPPED(fMappingOptions) && fMapper);
1017 mdOp = kIOMDFirstSegment;
1018 };
1019
1020 UInt64 bypassMask = fBypassMask;
1021 UInt32 segIndex = 0;
1022 UInt32 numSegments = *numSegmentsP;
1023 Segment64 curSeg = { 0, 0 };
1024 addr64_t maxPhys;
1025
1026 if (fNumAddressBits && (fNumAddressBits < 64))
1027 maxPhys = (1ULL << fNumAddressBits);
1028 else
1029 maxPhys = 0;
1030 maxPhys--;
1031
1032 while (state->fIOVMAddr || (state->fOffset < memLength))
1033 {
1034 // state = next seg
1035 if (!state->fIOVMAddr) {
1036
1037 IOReturn rtn;
1038
1039 state->fOffset = offset;
1040 state->fLength = memLength - offset;
1041
1042 if (internalState->fMapContig && internalState->fLocalMapperPageAlloc)
1043 {
1044 state->fIOVMAddr = internalState->fLocalMapperPageAlloc + offset;
1045 rtn = kIOReturnSuccess;
1046 #if 0
1047 {
1048 uint64_t checkOffset;
1049 IOPhysicalLength segLen;
1050 for (checkOffset = 0; checkOffset < state->fLength; )
1051 {
1052 addr64_t phys = const_cast<IOMemoryDescriptor *>(fMemory)->getPhysicalSegment(checkOffset + offset, &segLen, kIOMemoryMapperNone);
1053 if (fMapper->mapAddr(state->fIOVMAddr + checkOffset) != phys)
1054 {
1055 panic("%llx != %llx:%llx, %llx phys: %llx %llx\n", offset,
1056 state->fIOVMAddr + checkOffset, fMapper->mapAddr(state->fIOVMAddr + checkOffset), state->fLength,
1057 phys, checkOffset);
1058 }
1059 checkOffset += page_size - (phys & page_mask);
1060 }
1061 }
1062 #endif
1063 }
1064 else
1065 {
1066 const IOMemoryDescriptor * memory =
1067 internalState->fCopyMD ? internalState->fCopyMD : fMemory;
1068 rtn = memory->dmaCommandOperation(mdOp, fState, sizeof(fState));
1069 mdOp = kIOMDWalkSegments;
1070 }
1071
1072 if (rtn == kIOReturnSuccess)
1073 {
1074 assert(state->fIOVMAddr);
1075 assert(state->fLength);
1076 if ((curSeg.fIOVMAddr + curSeg.fLength) == state->fIOVMAddr) {
1077 UInt64 length = state->fLength;
1078 offset += length;
1079 curSeg.fLength += length;
1080 state->fIOVMAddr = 0;
1081 }
1082 }
1083 else if (rtn == kIOReturnOverrun)
1084 state->fIOVMAddr = state->fLength = 0; // At end
1085 else
1086 return rtn;
1087 }
1088
1089 // seg = state, offset = end of seg
1090 if (!curSeg.fIOVMAddr)
1091 {
1092 UInt64 length = state->fLength;
1093 offset += length;
1094 curSeg.fIOVMAddr = state->fIOVMAddr | bypassMask;
1095 curSeg.fLength = length;
1096 state->fIOVMAddr = 0;
1097 }
1098
1099 if (!state->fIOVMAddr)
1100 {
1101 if ((kWalkClient & op) && (curSeg.fIOVMAddr + curSeg.fLength - 1) > maxPhys)
1102 {
1103 if (internalState->fCursor)
1104 {
1105 curSeg.fIOVMAddr = 0;
1106 ret = kIOReturnMessageTooLarge;
1107 break;
1108 }
1109 else if (curSeg.fIOVMAddr <= maxPhys)
1110 {
1111 UInt64 remain, newLength;
1112
1113 newLength = (maxPhys + 1 - curSeg.fIOVMAddr);
1114 DEBG("trunc %qx, %qx-> %qx\n", curSeg.fIOVMAddr, curSeg.fLength, newLength);
1115 remain = curSeg.fLength - newLength;
1116 state->fIOVMAddr = newLength + curSeg.fIOVMAddr;
1117 curSeg.fLength = newLength;
1118 state->fLength = remain;
1119 offset -= remain;
1120 }
1121 else
1122 {
1123 UInt64 addr = curSeg.fIOVMAddr;
1124 ppnum_t addrPage = atop_64(addr);
1125 vm_page_t remap = NULL;
1126 UInt64 remain, newLength;
1127
1128 DEBG("sparse switch %qx, %qx ", addr, curSeg.fLength);
1129
1130 remap = internalState->fNextRemapPage;
1131 if (remap && (addrPage == vm_page_get_offset(remap)))
1132 {
1133 }
1134 else for (remap = internalState->fCopyPageAlloc;
1135 remap && (addrPage != vm_page_get_offset(remap));
1136 remap = vm_page_get_next(remap))
1137 {
1138 }
1139
1140 if (!remap) panic("no remap page found");
1141
1142 curSeg.fIOVMAddr = ptoa_64(vm_page_get_phys_page(remap))
1143 + (addr & PAGE_MASK);
1144 internalState->fNextRemapPage = vm_page_get_next(remap);
1145
1146 newLength = PAGE_SIZE - (addr & PAGE_MASK);
1147 if (newLength < curSeg.fLength)
1148 {
1149 remain = curSeg.fLength - newLength;
1150 state->fIOVMAddr = addr + newLength;
1151 curSeg.fLength = newLength;
1152 state->fLength = remain;
1153 offset -= remain;
1154 }
1155 DEBG("-> %qx, %qx offset %qx\n", curSeg.fIOVMAddr, curSeg.fLength, offset);
1156 }
1157 }
1158
1159 if (curSeg.fLength > fMaxSegmentSize)
1160 {
1161 UInt64 remain = curSeg.fLength - fMaxSegmentSize;
1162
1163 state->fIOVMAddr = fMaxSegmentSize + curSeg.fIOVMAddr;
1164 curSeg.fLength = fMaxSegmentSize;
1165
1166 state->fLength = remain;
1167 offset -= remain;
1168 }
1169
1170 if (internalState->fCursor
1171 && (0 != (internalState->fSourceAlignMask & curSeg.fIOVMAddr)))
1172 {
1173 curSeg.fIOVMAddr = 0;
1174 ret = kIOReturnNotAligned;
1175 break;
1176 }
1177
1178 if (offset >= memLength)
1179 {
1180 curSeg.fLength -= (offset - memLength);
1181 offset = memLength;
1182 state->fIOVMAddr = state->fLength = 0; // At end
1183 break;
1184 }
1185 }
1186
1187 if (state->fIOVMAddr) {
1188 if ((segIndex + 1 == numSegments))
1189 break;
1190
1191 ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++);
1192 curSeg.fIOVMAddr = 0;
1193 if (kIOReturnSuccess != ret)
1194 break;
1195 }
1196 }
1197
1198 if (curSeg.fIOVMAddr) {
1199 ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++);
1200 }
1201
1202 if (kIOReturnSuccess == ret)
1203 {
1204 state->fOffset = offset;
1205 *offsetP = offset - internalState->fPreparedOffset;
1206 *numSegmentsP = segIndex;
1207 }
1208 return ret;
1209 }
1210
1211 IOReturn
1212 IODMACommand::clientOutputSegment(
1213 void *reference, IODMACommand *target,
1214 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1215 {
1216 SegmentFunction segmentFunction = (SegmentFunction) reference;
1217 IOReturn ret = kIOReturnSuccess;
1218
1219 if (target->fNumAddressBits && (target->fNumAddressBits < 64)
1220 && ((segment.fIOVMAddr + segment.fLength - 1) >> target->fNumAddressBits)
1221 && (target->reserved->fLocalMapperPageAlloc || !target->fMapper))
1222 {
1223 DEBG("kIOReturnMessageTooLarge(fNumAddressBits) %qx, %qx\n", segment.fIOVMAddr, segment.fLength);
1224 ret = kIOReturnMessageTooLarge;
1225 }
1226
1227 if (!(*segmentFunction)(target, segment, vSegList, outSegIndex))
1228 {
1229 DEBG("kIOReturnMessageTooLarge(fOutSeg) %qx, %qx\n", segment.fIOVMAddr, segment.fLength);
1230 ret = kIOReturnMessageTooLarge;
1231 }
1232
1233 return (ret);
1234 }
1235
1236 IOReturn
1237 IODMACommand::genIOVMSegments(SegmentFunction segmentFunction,
1238 UInt64 *offsetP,
1239 void *segmentsP,
1240 UInt32 *numSegmentsP)
1241 {
1242 return (genIOVMSegments(kWalkClient, clientOutputSegment, (void *) segmentFunction,
1243 offsetP, segmentsP, numSegmentsP));
1244 }
1245
1246 bool
1247 IODMACommand::OutputHost32(IODMACommand *,
1248 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1249 {
1250 Segment32 *base = (Segment32 *) vSegList;
1251 base[outSegIndex].fIOVMAddr = (UInt32) segment.fIOVMAddr;
1252 base[outSegIndex].fLength = (UInt32) segment.fLength;
1253 return true;
1254 }
1255
1256 bool
1257 IODMACommand::OutputBig32(IODMACommand *,
1258 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1259 {
1260 const UInt offAddr = outSegIndex * sizeof(Segment32);
1261 const UInt offLen = offAddr + sizeof(UInt32);
1262 OSWriteBigInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr);
1263 OSWriteBigInt32(vSegList, offLen, (UInt32) segment.fLength);
1264 return true;
1265 }
1266
1267 bool
1268 IODMACommand::OutputLittle32(IODMACommand *,
1269 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1270 {
1271 const UInt offAddr = outSegIndex * sizeof(Segment32);
1272 const UInt offLen = offAddr + sizeof(UInt32);
1273 OSWriteLittleInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr);
1274 OSWriteLittleInt32(vSegList, offLen, (UInt32) segment.fLength);
1275 return true;
1276 }
1277
1278 bool
1279 IODMACommand::OutputHost64(IODMACommand *,
1280 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1281 {
1282 Segment64 *base = (Segment64 *) vSegList;
1283 base[outSegIndex] = segment;
1284 return true;
1285 }
1286
1287 bool
1288 IODMACommand::OutputBig64(IODMACommand *,
1289 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1290 {
1291 const UInt offAddr = outSegIndex * sizeof(Segment64);
1292 const UInt offLen = offAddr + sizeof(UInt64);
1293 OSWriteBigInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr);
1294 OSWriteBigInt64(vSegList, offLen, (UInt64) segment.fLength);
1295 return true;
1296 }
1297
1298 bool
1299 IODMACommand::OutputLittle64(IODMACommand *,
1300 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1301 {
1302 const UInt offAddr = outSegIndex * sizeof(Segment64);
1303 const UInt offLen = offAddr + sizeof(UInt64);
1304 OSWriteLittleInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr);
1305 OSWriteLittleInt64(vSegList, offLen, (UInt64) segment.fLength);
1306 return true;
1307 }
1308
1309