]> git.saurik.com Git - apple/xnu.git/blame - iokit/Kernel/IODMACommand.cpp
xnu-3248.60.10.tar.gz
[apple/xnu.git] / iokit / Kernel / IODMACommand.cpp
CommitLineData
0c530ab8 1/*
2d21ac55 2 * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved.
0c530ab8 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0c530ab8 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0c530ab8 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
0c530ab8
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0c530ab8 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
0c530ab8
A
27 */
28
29#include <IOKit/assert.h>
30
31#include <libkern/OSTypes.h>
32#include <libkern/OSByteOrder.h>
99c3a104 33#include <libkern/OSDebug.h>
0c530ab8
A
34
35#include <IOKit/IOReturn.h>
36#include <IOKit/IOLib.h>
37#include <IOKit/IODMACommand.h>
38#include <IOKit/IOMapper.h>
39#include <IOKit/IOMemoryDescriptor.h>
40#include <IOKit/IOBufferMemoryDescriptor.h>
41
42#include "IOKitKernelInternal.h"
0c530ab8
A
43
44#define MAPTYPE(type) ((UInt) (type) & kTypeMask)
0c530ab8
A
45#define IS_NONCOHERENT(type) (MAPTYPE(type) == kNonCoherent)
46
0c530ab8
A
47enum
48{
49 kWalkSyncIn = 0x01, // bounce -> md
50 kWalkSyncOut = 0x02, // bounce <- md
51 kWalkSyncAlways = 0x04,
52 kWalkPreflight = 0x08,
53 kWalkDoubleBuffer = 0x10,
54 kWalkPrepare = 0x20,
55 kWalkComplete = 0x40,
56 kWalkClient = 0x80
57};
58
0c530ab8
A
59
60#define fInternalState reserved
61#define fState reserved->fState
62#define fMDSummary reserved->fMDSummary
63
64
65#if 1
66// no direction => OutIn
67#define SHOULD_COPY_DIR(op, direction) \
68 ((kIODirectionNone == (direction)) \
69 || (kWalkSyncAlways & (op)) \
70 || (((kWalkSyncIn & (op)) ? kIODirectionIn : kIODirectionOut) \
71 & (direction)))
72
73#else
74#define SHOULD_COPY_DIR(state, direction) (true)
75#endif
76
77#if 0
0b4c1975 78#define DEBG(fmt, args...) { IOLog(fmt, ## args); kprintf(fmt, ## args); }
0c530ab8
A
79#else
80#define DEBG(fmt, args...) {}
81#endif
82
0c530ab8
A
83/**************************** class IODMACommand ***************************/
84
85#undef super
6d2010ae 86#define super IOCommand
0c530ab8
A
87OSDefineMetaClassAndStructors(IODMACommand, IOCommand);
88
2d21ac55
A
89OSMetaClassDefineReservedUsed(IODMACommand, 0);
90OSMetaClassDefineReservedUsed(IODMACommand, 1);
b0d623f7 91OSMetaClassDefineReservedUsed(IODMACommand, 2);
3e170ce0
A
92OSMetaClassDefineReservedUsed(IODMACommand, 3);
93OSMetaClassDefineReservedUsed(IODMACommand, 4);
94OSMetaClassDefineReservedUsed(IODMACommand, 5);
95OSMetaClassDefineReservedUsed(IODMACommand, 6);
0c530ab8
A
96OSMetaClassDefineReservedUnused(IODMACommand, 7);
97OSMetaClassDefineReservedUnused(IODMACommand, 8);
98OSMetaClassDefineReservedUnused(IODMACommand, 9);
99OSMetaClassDefineReservedUnused(IODMACommand, 10);
100OSMetaClassDefineReservedUnused(IODMACommand, 11);
101OSMetaClassDefineReservedUnused(IODMACommand, 12);
102OSMetaClassDefineReservedUnused(IODMACommand, 13);
103OSMetaClassDefineReservedUnused(IODMACommand, 14);
104OSMetaClassDefineReservedUnused(IODMACommand, 15);
105
3e170ce0
A
106IODMACommand *
107IODMACommand::withRefCon(void * refCon)
108{
109 IODMACommand * me = new IODMACommand;
110
111 if (me && !me->initWithRefCon(refCon))
112 {
113 me->release();
114 return 0;
115 }
116
117 return me;
118}
119
120IODMACommand *
121IODMACommand::withSpecification(SegmentFunction outSegFunc,
122 const SegmentOptions * segmentOptions,
123 uint32_t mappingOptions,
124 IOMapper * mapper,
125 void * refCon)
126{
127 IODMACommand * me = new IODMACommand;
128
129 if (me && !me->initWithSpecification(outSegFunc, segmentOptions, mappingOptions,
130 mapper, refCon))
131 {
132 me->release();
133 return 0;
134 }
135
136 return me;
137}
138
0c530ab8
A
139IODMACommand *
140IODMACommand::withSpecification(SegmentFunction outSegFunc,
141 UInt8 numAddressBits,
142 UInt64 maxSegmentSize,
143 MappingOptions mappingOptions,
144 UInt64 maxTransferSize,
145 UInt32 alignment,
146 IOMapper *mapper,
147 void *refCon)
148{
149 IODMACommand * me = new IODMACommand;
150
151 if (me && !me->initWithSpecification(outSegFunc,
152 numAddressBits, maxSegmentSize,
153 mappingOptions, maxTransferSize,
154 alignment, mapper, refCon))
155 {
156 me->release();
157 return 0;
3e170ce0 158 }
0c530ab8
A
159
160 return me;
161}
162
163IODMACommand *
164IODMACommand::cloneCommand(void *refCon)
165{
3e170ce0
A
166 SegmentOptions segmentOptions =
167 {
168 .fStructSize = sizeof(segmentOptions),
169 .fNumAddressBits = fNumAddressBits,
170 .fMaxSegmentSize = fMaxSegmentSize,
171 .fMaxTransferSize = fMaxTransferSize,
172 .fAlignment = fAlignMask + 1,
173 .fAlignmentLength = fAlignMaskInternalSegments + 1,
174 .fAlignmentInternalSegments = fAlignMaskLength + 1
175 };
176
177 return (IODMACommand::withSpecification(fOutSeg, &segmentOptions,
178 fMappingOptions, fMapper, refCon));
0c530ab8
A
179}
180
181#define kLastOutputFunction ((SegmentFunction) kLastOutputFunction)
182
3e170ce0
A
183bool
184IODMACommand::initWithRefCon(void * refCon)
185{
186 if (!super::init()) return (false);
187
188 if (!reserved)
189 {
190 reserved = IONew(IODMACommandInternal, 1);
191 if (!reserved) return false;
192 }
193 bzero(reserved, sizeof(IODMACommandInternal));
194 fRefCon = refCon;
195
196 return (true);
197}
198
199bool
200IODMACommand::initWithSpecification(SegmentFunction outSegFunc,
201 const SegmentOptions * segmentOptions,
202 uint32_t mappingOptions,
203 IOMapper * mapper,
204 void * refCon)
205{
206 if (!initWithRefCon(refCon)) return false;
207
208 if (kIOReturnSuccess != setSpecification(outSegFunc, segmentOptions,
209 mappingOptions, mapper)) return false;
210
211 return (true);
212}
213
0c530ab8
A
214bool
215IODMACommand::initWithSpecification(SegmentFunction outSegFunc,
216 UInt8 numAddressBits,
217 UInt64 maxSegmentSize,
218 MappingOptions mappingOptions,
219 UInt64 maxTransferSize,
220 UInt32 alignment,
221 IOMapper *mapper,
222 void *refCon)
3e170ce0
A
223{
224 SegmentOptions segmentOptions =
225 {
226 .fStructSize = sizeof(segmentOptions),
227 .fNumAddressBits = numAddressBits,
228 .fMaxSegmentSize = maxSegmentSize,
229 .fMaxTransferSize = maxTransferSize,
230 .fAlignment = alignment,
231 .fAlignmentLength = 1,
232 .fAlignmentInternalSegments = alignment
233 };
234
235 return (initWithSpecification(outSegFunc, &segmentOptions, mappingOptions, mapper, refCon));
236}
237
238IOReturn
239IODMACommand::setSpecification(SegmentFunction outSegFunc,
240 const SegmentOptions * segmentOptions,
241 uint32_t mappingOptions,
242 IOMapper * mapper)
0c530ab8 243{
99c3a104 244 IOService * device = 0;
3e170ce0
A
245 UInt8 numAddressBits;
246 UInt64 maxSegmentSize;
247 UInt64 maxTransferSize;
248 UInt32 alignment;
99c3a104 249
3e170ce0 250 bool is32Bit;
0c530ab8 251
3e170ce0
A
252 if (!outSegFunc || !segmentOptions) return (kIOReturnBadArgument);
253
254 is32Bit = ((OutputHost32 == outSegFunc)
255 || (OutputBig32 == outSegFunc)
256 || (OutputLittle32 == outSegFunc));
257
258 numAddressBits = segmentOptions->fNumAddressBits;
259 maxSegmentSize = segmentOptions->fMaxSegmentSize;
260 maxTransferSize = segmentOptions->fMaxTransferSize;
261 alignment = segmentOptions->fAlignment;
0c530ab8
A
262 if (is32Bit)
263 {
264 if (!numAddressBits)
265 numAddressBits = 32;
266 else if (numAddressBits > 32)
3e170ce0 267 return (kIOReturnBadArgument); // Wrong output function for bits
0c530ab8
A
268 }
269
3e170ce0 270 if (numAddressBits && (numAddressBits < PAGE_SHIFT)) return (kIOReturnBadArgument);
0c530ab8 271
3e170ce0
A
272 if (!maxSegmentSize) maxSegmentSize--; // Set Max segment to -1
273 if (!maxTransferSize) maxTransferSize--; // Set Max transfer to -1
99c3a104
A
274
275 if (mapper && !OSDynamicCast(IOMapper, mapper))
276 {
277 device = mapper;
278 mapper = 0;
279 }
3e170ce0 280 if (!mapper && (kUnmapped != MAPTYPE(mappingOptions)))
0c530ab8
A
281 {
282 IOMapper::checkForSystemMapper();
283 mapper = IOMapper::gSystem;
284 }
285
286 fNumSegments = 0;
0c530ab8
A
287 fOutSeg = outSegFunc;
288 fNumAddressBits = numAddressBits;
289 fMaxSegmentSize = maxSegmentSize;
290 fMappingOptions = mappingOptions;
291 fMaxTransferSize = maxTransferSize;
3e170ce0 292 if (!alignment) alignment = 1;
0c530ab8 293 fAlignMask = alignment - 1;
3e170ce0
A
294
295 alignment = segmentOptions->fAlignmentLength;
296 if (!alignment) alignment = 1;
297 fAlignMaskLength = alignment - 1;
298
299 alignment = segmentOptions->fAlignmentInternalSegments;
300 if (!alignment) alignment = (fAlignMask + 1);
301 fAlignMaskInternalSegments = alignment - 1;
0c530ab8
A
302
303 switch (MAPTYPE(mappingOptions))
304 {
3e170ce0
A
305 case kMapped: break;
306 case kUnmapped: break;
307 case kNonCoherent: break;
308
0c530ab8 309 case kBypassed:
3e170ce0
A
310 if (!mapper) break;
311 return (kIOReturnBadArgument);
312
0c530ab8 313 default:
3e170ce0 314 return (kIOReturnBadArgument);
0c530ab8
A
315 };
316
3e170ce0
A
317 if (mapper != fMapper)
318 {
319 if (mapper) mapper->retain();
320 if (fMapper) fMapper->release();
321 fMapper = mapper;
322 }
0c530ab8
A
323
324 fInternalState->fIterateOnly = (0 != (kIterateOnly & mappingOptions));
99c3a104
A
325 fInternalState->fDevice = device;
326
3e170ce0 327 return (kIOReturnSuccess);
0c530ab8
A
328}
329
330void
331IODMACommand::free()
332{
3e170ce0 333 if (reserved) IODelete(reserved, IODMACommandInternal, 1);
0c530ab8 334
3e170ce0 335 if (fMapper) fMapper->release();
b0d623f7 336
0c530ab8
A
337 super::free();
338}
339
340IOReturn
341IODMACommand::setMemoryDescriptor(const IOMemoryDescriptor *mem, bool autoPrepare)
342{
3e170ce0 343 IOReturn err = kIOReturnSuccess;
6d2010ae 344
0c530ab8
A
345 if (mem == fMemory)
346 {
347 if (!autoPrepare)
348 {
349 while (fActive)
350 complete();
351 }
352 return kIOReturnSuccess;
353 }
354
355 if (fMemory) {
356 // As we are almost certainly being called from a work loop thread
357 // if fActive is true it is probably not a good time to potentially
358 // block. Just test for it and return an error
359 if (fActive)
360 return kIOReturnBusy;
361 clearMemoryDescriptor();
6d2010ae 362 }
0c530ab8
A
363
364 if (mem) {
365 bzero(&fMDSummary, sizeof(fMDSummary));
99c3a104
A
366 err = mem->dmaCommandOperation(kIOMDGetCharacteristics | (kMapped == MAPTYPE(fMappingOptions)),
367 &fMDSummary, sizeof(fMDSummary));
6d2010ae
A
368 if (err)
369 return err;
0c530ab8
A
370
371 ppnum_t highPage = fMDSummary.fHighestPage ? fMDSummary.fHighestPage : gIOLastPage;
372
373 if ((kMapped == MAPTYPE(fMappingOptions))
99c3a104 374 && fMapper)
0c530ab8
A
375 fInternalState->fCheckAddressing = false;
376 else
377 fInternalState->fCheckAddressing = (fNumAddressBits && (highPage >= (1UL << (fNumAddressBits - PAGE_SHIFT))));
378
4a3eedf9 379 fInternalState->fNewMD = true;
0c530ab8
A
380 mem->retain();
381 fMemory = mem;
382
b0d623f7 383 mem->dmaCommandOperation(kIOMDSetDMAActive, this, 0);
6d2010ae 384 if (autoPrepare) {
99c3a104
A
385 err = prepare();
386 if (err) {
387 clearMemoryDescriptor();
388 }
6d2010ae
A
389 }
390 }
391
392 return err;
0c530ab8
A
393}
394
395IOReturn
396IODMACommand::clearMemoryDescriptor(bool autoComplete)
397{
398 if (fActive && !autoComplete)
399 return (kIOReturnNotReady);
400
401 if (fMemory) {
402 while (fActive)
403 complete();
b0d623f7 404 fMemory->dmaCommandOperation(kIOMDSetDMAInactive, this, 0);
0c530ab8
A
405 fMemory->release();
406 fMemory = 0;
407 }
408
409 return (kIOReturnSuccess);
410}
411
412const IOMemoryDescriptor *
413IODMACommand::getMemoryDescriptor() const
414{
415 return fMemory;
416}
417
3e170ce0
A
418IOMemoryDescriptor *
419IODMACommand::getIOMemoryDescriptor() const
420{
421 IOMemoryDescriptor * mem;
422
423 mem = reserved->fCopyMD;
424 if (!mem) mem = __IODEQUALIFY(IOMemoryDescriptor *, fMemory);
425
426 return (mem);
427}
0c530ab8
A
428
429IOReturn
430IODMACommand::segmentOp(
431 void *reference,
432 IODMACommand *target,
433 Segment64 segment,
434 void *segments,
435 UInt32 segmentIndex)
436{
b0d623f7 437 IOOptionBits op = (uintptr_t) reference;
0c530ab8 438 addr64_t maxPhys, address;
0c530ab8
A
439 uint64_t length;
440 uint32_t numPages;
3e170ce0 441 uint32_t mask;
0c530ab8
A
442
443 IODMACommandInternal * state = target->reserved;
444
3e170ce0 445 if (target->fNumAddressBits && (target->fNumAddressBits < 64) && (state->fLocalMapperAlloc || !target->fMapper))
0c530ab8
A
446 maxPhys = (1ULL << target->fNumAddressBits);
447 else
448 maxPhys = 0;
449 maxPhys--;
450
451 address = segment.fIOVMAddr;
452 length = segment.fLength;
453
454 assert(address);
455 assert(length);
456
457 if (!state->fMisaligned)
458 {
3e170ce0
A
459 mask = (segmentIndex ? target->fAlignMaskInternalSegments : state->fSourceAlignMask);
460 state->fMisaligned |= (0 != (mask & address));
461 if (state->fMisaligned) DEBG("misaligned address %qx:%qx, %x\n", address, length, mask);
462 }
463 if (!state->fMisaligned)
464 {
465 mask = target->fAlignMaskLength;
466 state->fMisaligned |= (0 != (mask & length));
467 if (state->fMisaligned) DEBG("misaligned length %qx:%qx, %x\n", address, length, mask);
0c530ab8
A
468 }
469
470 if (state->fMisaligned && (kWalkPreflight & op))
471 return (kIOReturnNotAligned);
472
473 if (!state->fDoubleBuffer)
474 {
475 if ((address + length - 1) <= maxPhys)
476 {
477 length = 0;
478 }
479 else if (address <= maxPhys)
480 {
481 DEBG("tail %qx, %qx", address, length);
482 length = (address + length - maxPhys - 1);
483 address = maxPhys + 1;
484 DEBG("-> %qx, %qx\n", address, length);
485 }
486 }
487
488 if (!length)
489 return (kIOReturnSuccess);
490
0b4c1975 491 numPages = atop_64(round_page_64((address & PAGE_MASK) + length));
0c530ab8
A
492
493 if (kWalkPreflight & op)
494 {
495 state->fCopyPageCount += numPages;
496 }
497 else
498 {
0b4c1975
A
499 vm_page_t lastPage;
500 lastPage = NULL;
0c530ab8
A
501 if (kWalkPrepare & op)
502 {
0b4c1975 503 lastPage = state->fCopyNext;
0c530ab8 504 for (IOItemCount idx = 0; idx < numPages; idx++)
0b4c1975
A
505 {
506 vm_page_set_offset(lastPage, atop_64(address) + idx);
507 lastPage = vm_page_get_next(lastPage);
508 }
0c530ab8
A
509 }
510
0b4c1975 511 if (!lastPage || SHOULD_COPY_DIR(op, target->fMDSummary.fDirection))
0c530ab8 512 {
0b4c1975
A
513 lastPage = state->fCopyNext;
514 for (IOItemCount idx = 0; idx < numPages; idx++)
0c530ab8 515 {
0b4c1975
A
516 if (SHOULD_COPY_DIR(op, target->fMDSummary.fDirection))
517 {
99c3a104 518 addr64_t cpuAddr = address;
0b4c1975
A
519 addr64_t remapAddr;
520 uint64_t chunk;
521
99c3a104
A
522 if ((kMapped == MAPTYPE(target->fMappingOptions))
523 && target->fMapper)
524 {
3e170ce0 525 cpuAddr = target->fMapper->mapToPhysicalAddress(address);
99c3a104
A
526 }
527
0b4c1975
A
528 remapAddr = ptoa_64(vm_page_get_phys_page(lastPage));
529 if (!state->fDoubleBuffer)
530 {
531 remapAddr += (address & PAGE_MASK);
532 }
533 chunk = PAGE_SIZE - (address & PAGE_MASK);
534 if (chunk > length)
535 chunk = length;
536
537 DEBG("cpv: 0x%qx %s 0x%qx, 0x%qx, 0x%02lx\n", remapAddr,
538 (kWalkSyncIn & op) ? "->" : "<-",
539 address, chunk, op);
540
541 if (kWalkSyncIn & op)
542 { // cppvNoModSnk
99c3a104 543 copypv(remapAddr, cpuAddr, chunk,
0b4c1975
A
544 cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
545 }
546 else
547 {
99c3a104 548 copypv(cpuAddr, remapAddr, chunk,
0b4c1975
A
549 cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc );
550 }
551 address += chunk;
552 length -= chunk;
553 }
554 lastPage = vm_page_get_next(lastPage);
0c530ab8
A
555 }
556 }
0b4c1975 557 state->fCopyNext = lastPage;
0c530ab8
A
558 }
559
560 return kIOReturnSuccess;
561}
562
3e170ce0
A
563IOBufferMemoryDescriptor *
564IODMACommand::createCopyBuffer(IODirection direction, UInt64 length)
565{
566 mach_vm_address_t mask = 0xFFFFF000; //state->fSourceAlignMask
567 return (IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task,
568 direction, length, mask));
569}
570
0c530ab8
A
571IOReturn
572IODMACommand::walkAll(UInt8 op)
573{
574 IODMACommandInternal * state = fInternalState;
575
576 IOReturn ret = kIOReturnSuccess;
577 UInt32 numSegments;
578 UInt64 offset;
579
b0d623f7 580 if (kWalkPreflight & op)
0c530ab8 581 {
0c530ab8
A
582 state->fMisaligned = false;
583 state->fDoubleBuffer = false;
584 state->fPrepared = false;
0b4c1975
A
585 state->fCopyNext = NULL;
586 state->fCopyPageAlloc = 0;
0c530ab8 587 state->fCopyPageCount = 0;
0b4c1975
A
588 state->fNextRemapPage = NULL;
589 state->fCopyMD = 0;
0c530ab8
A
590
591 if (!(kWalkDoubleBuffer & op))
592 {
593 offset = 0;
594 numSegments = 0-1;
39236c6e 595 ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments);
0c530ab8
A
596 }
597
598 op &= ~kWalkPreflight;
599
600 state->fDoubleBuffer = (state->fMisaligned || (kWalkDoubleBuffer & op));
601 if (state->fDoubleBuffer)
602 state->fCopyPageCount = atop_64(round_page(state->fPreparedLength));
603
604 if (state->fCopyPageCount)
605 {
0b4c1975 606 vm_page_t mapBase = NULL;
0c530ab8
A
607
608 DEBG("preflight fCopyPageCount %d\n", state->fCopyPageCount);
609
3e170ce0 610 if (!fMapper && !state->fDoubleBuffer)
0c530ab8 611 {
0b4c1975 612 kern_return_t kr;
99c3a104
A
613
614 if (fMapper) panic("fMapper copying");
615
0b4c1975
A
616 kr = vm_page_alloc_list(state->fCopyPageCount,
617 KMA_LOMEM | KMA_NOPAGEWAIT, &mapBase);
618 if (KERN_SUCCESS != kr)
0c530ab8 619 {
0b4c1975
A
620 DEBG("vm_page_alloc_list(%d) failed (%d)\n", state->fCopyPageCount, kr);
621 mapBase = NULL;
0c530ab8 622 }
0b4c1975 623 }
0c530ab8 624
0b4c1975
A
625 if (mapBase)
626 {
627 state->fCopyPageAlloc = mapBase;
628 state->fCopyNext = state->fCopyPageAlloc;
0c530ab8
A
629 offset = 0;
630 numSegments = 0-1;
39236c6e 631 ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments);
0c530ab8
A
632 state->fPrepared = true;
633 op &= ~(kWalkSyncIn | kWalkSyncOut);
634 }
635 else
636 {
637 DEBG("alloc IOBMD\n");
3e170ce0 638 state->fCopyMD = createCopyBuffer(fMDSummary.fDirection, state->fPreparedLength);
0c530ab8
A
639
640 if (state->fCopyMD)
641 {
642 ret = kIOReturnSuccess;
643 state->fPrepared = true;
644 }
645 else
646 {
316670eb 647 DEBG("IODMACommand !alloc IOBMD");
0c530ab8
A
648 return (kIOReturnNoResources);
649 }
650 }
651 }
652 }
653
b0d623f7 654 if (state->fPrepared && ((kWalkSyncIn | kWalkSyncOut) & op))
0c530ab8
A
655 {
656 if (state->fCopyPageCount)
657 {
658 DEBG("sync fCopyPageCount %d\n", state->fCopyPageCount);
659
0b4c1975 660 if (state->fCopyPageAlloc)
0c530ab8 661 {
0b4c1975 662 state->fCopyNext = state->fCopyPageAlloc;
0c530ab8
A
663 offset = 0;
664 numSegments = 0-1;
39236c6e 665 ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments);
0c530ab8
A
666 }
667 else if (state->fCopyMD)
668 {
669 DEBG("sync IOBMD\n");
670
671 if (SHOULD_COPY_DIR(op, fMDSummary.fDirection))
672 {
673 IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory);
674
675 IOByteCount bytes;
676
677 if (kWalkSyncIn & op)
678 bytes = poMD->writeBytes(state->fPreparedOffset,
679 state->fCopyMD->getBytesNoCopy(),
680 state->fPreparedLength);
681 else
682 bytes = poMD->readBytes(state->fPreparedOffset,
683 state->fCopyMD->getBytesNoCopy(),
684 state->fPreparedLength);
685 DEBG("fCopyMD %s %lx bytes\n", (kWalkSyncIn & op) ? "wrote" : "read", bytes);
686 ret = (bytes == state->fPreparedLength) ? kIOReturnSuccess : kIOReturnUnderrun;
687 }
688 else
689 ret = kIOReturnSuccess;
690 }
691 }
692 }
693
694 if (kWalkComplete & op)
695 {
0b4c1975 696 if (state->fCopyPageAlloc)
0c530ab8 697 {
0b4c1975
A
698 vm_page_free_list(state->fCopyPageAlloc, FALSE);
699 state->fCopyPageAlloc = 0;
0c530ab8
A
700 state->fCopyPageCount = 0;
701 }
702 if (state->fCopyMD)
703 {
704 state->fCopyMD->release();
705 state->fCopyMD = 0;
706 }
707
708 state->fPrepared = false;
709 }
710 return (ret);
711}
712
b0d623f7
A
713UInt8
714IODMACommand::getNumAddressBits(void)
715{
716 return (fNumAddressBits);
717}
718
719UInt32
720IODMACommand::getAlignment(void)
721{
722 return (fAlignMask + 1);
723}
724
3e170ce0
A
725uint32_t
726IODMACommand::getAlignmentLength(void)
727{
728 return (fAlignMaskLength + 1);
729}
730
731uint32_t
732IODMACommand::getAlignmentInternalSegments(void)
733{
734 return (fAlignMaskInternalSegments + 1);
735}
736
737IOReturn
738IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc,
739 const SegmentOptions * segmentOptions,
740 uint32_t mappingOptions,
741 IOMapper * mapper,
742 UInt64 offset,
743 UInt64 length,
744 bool flushCache,
745 bool synchronize)
746{
747 IOReturn ret;
748
749 if (fActive) return kIOReturnNotPermitted;
750
751 ret = setSpecification(outSegFunc, segmentOptions, mappingOptions, mapper);
752 if (kIOReturnSuccess != ret) return (ret);
753
754 ret = prepare(offset, length, flushCache, synchronize);
755
756 return (ret);
757}
758
2d21ac55
A
759IOReturn
760IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc,
761 UInt8 numAddressBits,
762 UInt64 maxSegmentSize,
763 MappingOptions mappingOptions,
764 UInt64 maxTransferSize,
765 UInt32 alignment,
766 IOMapper *mapper,
767 UInt64 offset,
768 UInt64 length,
769 bool flushCache,
770 bool synchronize)
771{
3e170ce0 772 SegmentOptions segmentOptions =
2d21ac55 773 {
3e170ce0
A
774 .fStructSize = sizeof(segmentOptions),
775 .fNumAddressBits = numAddressBits,
776 .fMaxSegmentSize = maxSegmentSize,
777 .fMaxTransferSize = maxTransferSize,
778 .fAlignment = alignment,
779 .fAlignmentLength = 1,
780 .fAlignmentInternalSegments = alignment
2d21ac55
A
781 };
782
3e170ce0
A
783 return (prepareWithSpecification(outSegFunc, &segmentOptions, mappingOptions, mapper,
784 offset, length, flushCache, synchronize));
2d21ac55
A
785}
786
787
0c530ab8
A
788IOReturn
789IODMACommand::prepare(UInt64 offset, UInt64 length, bool flushCache, bool synchronize)
790{
3e170ce0
A
791 IODMACommandInternal * state = fInternalState;
792 IOReturn ret = kIOReturnSuccess;
793 uint32_t mappingOptions = fMappingOptions;
0c530ab8 794
3e170ce0
A
795 // check specification has been set
796 if (!fOutSeg) return (kIOReturnNotReady);
0c530ab8 797
3e170ce0 798 if (!length) length = fMDSummary.fLength;
0c530ab8 799
3e170ce0 800 if (length > fMaxTransferSize) return kIOReturnNoSpace;
0c530ab8 801
0c530ab8
A
802 if (fActive++)
803 {
804 if ((state->fPreparedOffset != offset)
805 || (state->fPreparedLength != length))
806 ret = kIOReturnNotReady;
807 }
808 else
809 {
3e170ce0
A
810 if (fAlignMaskLength & length) return (kIOReturnNotAligned);
811
0c530ab8
A
812 state->fPreparedOffset = offset;
813 state->fPreparedLength = length;
814
b0d623f7 815 state->fMapContig = false;
0c530ab8
A
816 state->fMisaligned = false;
817 state->fDoubleBuffer = false;
818 state->fPrepared = false;
0b4c1975
A
819 state->fCopyNext = NULL;
820 state->fCopyPageAlloc = 0;
0c530ab8 821 state->fCopyPageCount = 0;
0b4c1975 822 state->fNextRemapPage = NULL;
0c530ab8 823 state->fCopyMD = 0;
3e170ce0
A
824 state->fLocalMapperAlloc = 0;
825 state->fLocalMapperAllocLength = 0;
0c530ab8 826
b0d623f7
A
827 state->fLocalMapper = (fMapper && (fMapper != IOMapper::gSystem));
828
829 state->fSourceAlignMask = fAlignMask;
99c3a104 830 if (fMapper)
b0d623f7
A
831 state->fSourceAlignMask &= page_mask;
832
0c530ab8
A
833 state->fCursor = state->fIterateOnly
834 || (!state->fCheckAddressing
b0d623f7
A
835 && (!state->fSourceAlignMask
836 || ((fMDSummary.fPageAlign & (1 << 31)) && (0 == (fMDSummary.fPageAlign & state->fSourceAlignMask)))));
99c3a104 837
0c530ab8
A
838 if (!state->fCursor)
839 {
840 IOOptionBits op = kWalkPrepare | kWalkPreflight;
841 if (synchronize)
842 op |= kWalkSyncOut;
843 ret = walkAll(op);
844 }
99c3a104 845
3e170ce0 846 if (IS_NONCOHERENT(mappingOptions) && flushCache)
99c3a104 847 {
3e170ce0 848 if (state->fCopyMD)
99c3a104 849 {
3e170ce0 850 state->fCopyMD->performOperation(kIOMemoryIncoherentIOStore, 0, length);
99c3a104
A
851 }
852 else
853 {
3e170ce0
A
854 IOMemoryDescriptor * md = const_cast<IOMemoryDescriptor *>(fMemory);
855 md->performOperation(kIOMemoryIncoherentIOStore, offset, length);
99c3a104
A
856 }
857 }
858
3e170ce0
A
859 if (fMapper)
860 {
861 IOMDDMAMapArgs mapArgs;
862 bzero(&mapArgs, sizeof(mapArgs));
863 mapArgs.fMapper = fMapper;
864 mapArgs.fCommand = this;
865 mapArgs.fMapSpec.device = state->fDevice;
866 mapArgs.fMapSpec.alignment = fAlignMask + 1;
867 mapArgs.fMapSpec.numAddressBits = fNumAddressBits ? fNumAddressBits : 64;
868 mapArgs.fLength = state->fPreparedLength;
869 const IOMemoryDescriptor * md = state->fCopyMD;
870 if (md) { mapArgs.fOffset = 0; }
871 else
872 {
873 md = fMemory;
874 mapArgs.fOffset = state->fPreparedOffset;
875 }
876 ret = md->dmaCommandOperation(kIOMDDMAMap | state->fIterateOnly, &mapArgs, sizeof(mapArgs));
877//IOLog("dma %p 0x%x 0x%qx-0x%qx 0x%qx-0x%qx\n", this, ret, state->fPreparedOffset, state->fPreparedLength, mapArgs.fAlloc, mapArgs.fAllocLength);
99c3a104 878
3e170ce0
A
879 if (kIOReturnSuccess == ret)
880 {
881 state->fLocalMapperAlloc = mapArgs.fAlloc;
882 state->fLocalMapperAllocLength = mapArgs.fAllocLength;
883 state->fMapContig = mapArgs.fMapContig;
884 }
885 if (NULL != IOMapper::gSystem) ret = kIOReturnSuccess;
886 }
887 if (kIOReturnSuccess == ret) state->fPrepared = true;
0c530ab8
A
888 }
889 return ret;
890}
891
892IOReturn
893IODMACommand::complete(bool invalidateCache, bool synchronize)
894{
895 IODMACommandInternal * state = fInternalState;
896 IOReturn ret = kIOReturnSuccess;
897
898 if (fActive < 1)
899 return kIOReturnNotReady;
900
901 if (!--fActive)
902 {
3e170ce0
A
903 if (IS_NONCOHERENT(fMappingOptions) && invalidateCache)
904 {
905 if (state->fCopyMD)
906 {
907 state->fCopyMD->performOperation(kIOMemoryIncoherentIOFlush, 0, state->fPreparedLength);
908 }
909 else
910 {
911 IOMemoryDescriptor * md = const_cast<IOMemoryDescriptor *>(fMemory);
912 md->performOperation(kIOMemoryIncoherentIOFlush, state->fPreparedOffset, state->fPreparedLength);
913 }
914 }
915
0c530ab8
A
916 if (!state->fCursor)
917 {
2d21ac55
A
918 IOOptionBits op = kWalkComplete;
919 if (synchronize)
920 op |= kWalkSyncIn;
921 ret = walkAll(op);
0c530ab8 922 }
3e170ce0 923 if (state->fLocalMapperAlloc)
99c3a104 924 {
3e170ce0 925 if (state->fLocalMapperAllocLength)
99c3a104 926 {
3e170ce0
A
927 fMapper->iovmUnmapMemory(getIOMemoryDescriptor(), this,
928 state->fLocalMapperAlloc, state->fLocalMapperAllocLength);
99c3a104 929 }
3e170ce0
A
930 state->fLocalMapperAlloc = 0;
931 state->fLocalMapperAllocLength = 0;
99c3a104
A
932 }
933
0c530ab8 934 state->fPrepared = false;
0c530ab8
A
935 }
936
937 return ret;
938}
939
b0d623f7
A
940IOReturn
941IODMACommand::getPreparedOffsetAndLength(UInt64 * offset, UInt64 * length)
942{
943 IODMACommandInternal * state = fInternalState;
944 if (fActive < 1)
945 return (kIOReturnNotReady);
946
947 if (offset)
948 *offset = state->fPreparedOffset;
949 if (length)
950 *length = state->fPreparedLength;
951
952 return (kIOReturnSuccess);
953}
954
0c530ab8
A
955IOReturn
956IODMACommand::synchronize(IOOptionBits options)
957{
958 IODMACommandInternal * state = fInternalState;
959 IOReturn ret = kIOReturnSuccess;
960 IOOptionBits op;
961
962 if (kIODirectionOutIn == (kIODirectionOutIn & options))
963 return kIOReturnBadArgument;
964
965 if (fActive < 1)
966 return kIOReturnNotReady;
967
968 op = 0;
969 if (kForceDoubleBuffer & options)
970 {
971 if (state->fDoubleBuffer)
972 return kIOReturnSuccess;
973 if (state->fCursor)
974 state->fCursor = false;
975 else
976 ret = walkAll(kWalkComplete);
977
978 op |= kWalkPrepare | kWalkPreflight | kWalkDoubleBuffer;
979 }
980 else if (state->fCursor)
981 return kIOReturnSuccess;
982
983 if (kIODirectionIn & options)
984 op |= kWalkSyncIn | kWalkSyncAlways;
985 else if (kIODirectionOut & options)
986 op |= kWalkSyncOut | kWalkSyncAlways;
987
988 ret = walkAll(op);
989
990 return ret;
991}
992
2d21ac55
A
993struct IODMACommandTransferContext
994{
995 void * buffer;
996 UInt64 bufferOffset;
997 UInt64 remaining;
998 UInt32 op;
999};
1000enum
1001{
1002 kIODMACommandTransferOpReadBytes = 1,
1003 kIODMACommandTransferOpWriteBytes = 2
1004};
1005
1006IOReturn
1007IODMACommand::transferSegment(void *reference,
1008 IODMACommand *target,
1009 Segment64 segment,
1010 void *segments,
1011 UInt32 segmentIndex)
1012{
b0d623f7 1013 IODMACommandTransferContext * context = (IODMACommandTransferContext *) reference;
2d21ac55
A
1014 UInt64 length = min(segment.fLength, context->remaining);
1015 addr64_t ioAddr = segment.fIOVMAddr;
1016 addr64_t cpuAddr = ioAddr;
1017
1018 context->remaining -= length;
1019
1020 while (length)
1021 {
1022 UInt64 copyLen = length;
1023 if ((kMapped == MAPTYPE(target->fMappingOptions))
1024 && target->fMapper)
1025 {
3e170ce0 1026 cpuAddr = target->fMapper->mapToPhysicalAddress(ioAddr);
2d21ac55
A
1027 copyLen = min(copyLen, page_size - (ioAddr & (page_size - 1)));
1028 ioAddr += copyLen;
1029 }
1030
1031 switch (context->op)
1032 {
1033 case kIODMACommandTransferOpReadBytes:
1034 copypv(cpuAddr, context->bufferOffset + (addr64_t) context->buffer, copyLen,
1035 cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap);
1036 break;
1037 case kIODMACommandTransferOpWriteBytes:
1038 copypv(context->bufferOffset + (addr64_t) context->buffer, cpuAddr, copyLen,
1039 cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap);
1040 break;
1041 }
1042 length -= copyLen;
1043 context->bufferOffset += copyLen;
1044 }
1045
1046 return (context->remaining ? kIOReturnSuccess : kIOReturnOverrun);
1047}
1048
1049UInt64
1050IODMACommand::transfer(IOOptionBits transferOp, UInt64 offset, void * buffer, UInt64 length)
1051{
1052 IODMACommandInternal * state = fInternalState;
1053 IODMACommandTransferContext context;
b0d623f7 1054 Segment64 segments[1];
2d21ac55
A
1055 UInt32 numSegments = 0-1;
1056
1057 if (fActive < 1)
1058 return (0);
1059
1060 if (offset >= state->fPreparedLength)
1061 return (0);
1062 length = min(length, state->fPreparedLength - offset);
1063
1064 context.buffer = buffer;
1065 context.bufferOffset = 0;
1066 context.remaining = length;
1067 context.op = transferOp;
b0d623f7 1068 (void) genIOVMSegments(kWalkClient, transferSegment, &context, &offset, &segments[0], &numSegments);
2d21ac55
A
1069
1070 return (length - context.remaining);
1071}
1072
1073UInt64
1074IODMACommand::readBytes(UInt64 offset, void *bytes, UInt64 length)
1075{
1076 return (transfer(kIODMACommandTransferOpReadBytes, offset, bytes, length));
1077}
1078
1079UInt64
1080IODMACommand::writeBytes(UInt64 offset, const void *bytes, UInt64 length)
1081{
1082 return (transfer(kIODMACommandTransferOpWriteBytes, offset, const_cast<void *>(bytes), length));
1083}
1084
0c530ab8
A
1085IOReturn
1086IODMACommand::genIOVMSegments(UInt64 *offsetP,
1087 void *segmentsP,
1088 UInt32 *numSegmentsP)
1089{
b0d623f7
A
1090 return (genIOVMSegments(kWalkClient, clientOutputSegment, (void *) fOutSeg,
1091 offsetP, segmentsP, numSegmentsP));
0c530ab8
A
1092}
1093
1094IOReturn
b0d623f7
A
1095IODMACommand::genIOVMSegments(uint32_t op,
1096 InternalSegmentFunction outSegFunc,
0c530ab8
A
1097 void *reference,
1098 UInt64 *offsetP,
1099 void *segmentsP,
1100 UInt32 *numSegmentsP)
1101{
0c530ab8
A
1102 IODMACommandInternal * internalState = fInternalState;
1103 IOOptionBits mdOp = kIOMDWalkSegments;
1104 IOReturn ret = kIOReturnSuccess;
1105
1106 if (!(kWalkComplete & op) && !fActive)
1107 return kIOReturnNotReady;
1108
1109 if (!offsetP || !segmentsP || !numSegmentsP || !*numSegmentsP)
1110 return kIOReturnBadArgument;
1111
1112 IOMDDMAWalkSegmentArgs *state =
99c3a104 1113 (IOMDDMAWalkSegmentArgs *)(void *) fState;
0c530ab8 1114
2d21ac55 1115 UInt64 offset = *offsetP + internalState->fPreparedOffset;
0c530ab8
A
1116 UInt64 memLength = internalState->fPreparedOffset + internalState->fPreparedLength;
1117
1118 if (offset >= memLength)
1119 return kIOReturnOverrun;
1120
4a3eedf9 1121 if ((offset == internalState->fPreparedOffset) || (offset != state->fOffset) || internalState->fNewMD) {
2d21ac55
A
1122 state->fOffset = 0;
1123 state->fIOVMAddr = 0;
0b4c1975 1124 internalState->fNextRemapPage = NULL;
4a3eedf9 1125 internalState->fNewMD = false;
3e170ce0 1126 state->fMapped = (0 != fMapper);
2d21ac55 1127 mdOp = kIOMDFirstSegment;
0c530ab8
A
1128 };
1129
0c530ab8
A
1130 UInt32 segIndex = 0;
1131 UInt32 numSegments = *numSegmentsP;
1132 Segment64 curSeg = { 0, 0 };
1133 addr64_t maxPhys;
1134
1135 if (fNumAddressBits && (fNumAddressBits < 64))
1136 maxPhys = (1ULL << fNumAddressBits);
1137 else
1138 maxPhys = 0;
1139 maxPhys--;
1140
0b4c1975 1141 while (state->fIOVMAddr || (state->fOffset < memLength))
0c530ab8 1142 {
0b4c1975
A
1143 // state = next seg
1144 if (!state->fIOVMAddr) {
0c530ab8
A
1145
1146 IOReturn rtn;
1147
1148 state->fOffset = offset;
1149 state->fLength = memLength - offset;
1150
3e170ce0 1151 if (internalState->fMapContig && internalState->fLocalMapperAlloc)
0c530ab8 1152 {
3e170ce0 1153 state->fIOVMAddr = internalState->fLocalMapperAlloc + offset;
0c530ab8 1154 rtn = kIOReturnSuccess;
99c3a104
A
1155#if 0
1156 {
1157 uint64_t checkOffset;
1158 IOPhysicalLength segLen;
1159 for (checkOffset = 0; checkOffset < state->fLength; )
1160 {
1161 addr64_t phys = const_cast<IOMemoryDescriptor *>(fMemory)->getPhysicalSegment(checkOffset + offset, &segLen, kIOMemoryMapperNone);
1162 if (fMapper->mapAddr(state->fIOVMAddr + checkOffset) != phys)
1163 {
1164 panic("%llx != %llx:%llx, %llx phys: %llx %llx\n", offset,
1165 state->fIOVMAddr + checkOffset, fMapper->mapAddr(state->fIOVMAddr + checkOffset), state->fLength,
1166 phys, checkOffset);
1167 }
1168 checkOffset += page_size - (phys & page_mask);
1169 }
1170 }
1171#endif
0c530ab8
A
1172 }
1173 else
1174 {
1175 const IOMemoryDescriptor * memory =
1176 internalState->fCopyMD ? internalState->fCopyMD : fMemory;
1177 rtn = memory->dmaCommandOperation(mdOp, fState, sizeof(fState));
1178 mdOp = kIOMDWalkSegments;
1179 }
1180
0b4c1975
A
1181 if (rtn == kIOReturnSuccess)
1182 {
0c530ab8
A
1183 assert(state->fIOVMAddr);
1184 assert(state->fLength);
0b4c1975
A
1185 if ((curSeg.fIOVMAddr + curSeg.fLength) == state->fIOVMAddr) {
1186 UInt64 length = state->fLength;
1187 offset += length;
1188 curSeg.fLength += length;
1189 state->fIOVMAddr = 0;
1190 }
0c530ab8
A
1191 }
1192 else if (rtn == kIOReturnOverrun)
1193 state->fIOVMAddr = state->fLength = 0; // At end
1194 else
1195 return rtn;
0b4c1975 1196 }
0c530ab8 1197
0b4c1975
A
1198 // seg = state, offset = end of seg
1199 if (!curSeg.fIOVMAddr)
1200 {
0c530ab8 1201 UInt64 length = state->fLength;
0b4c1975 1202 offset += length;
3e170ce0 1203 curSeg.fIOVMAddr = state->fIOVMAddr;
0b4c1975
A
1204 curSeg.fLength = length;
1205 state->fIOVMAddr = 0;
1206 }
0c530ab8
A
1207
1208 if (!state->fIOVMAddr)
1209 {
3e170ce0 1210 // maxPhys
0b4c1975 1211 if ((kWalkClient & op) && (curSeg.fIOVMAddr + curSeg.fLength - 1) > maxPhys)
0c530ab8 1212 {
0b4c1975
A
1213 if (internalState->fCursor)
1214 {
1215 curSeg.fIOVMAddr = 0;
1216 ret = kIOReturnMessageTooLarge;
1217 break;
1218 }
1219 else if (curSeg.fIOVMAddr <= maxPhys)
1220 {
1221 UInt64 remain, newLength;
1222
1223 newLength = (maxPhys + 1 - curSeg.fIOVMAddr);
1224 DEBG("trunc %qx, %qx-> %qx\n", curSeg.fIOVMAddr, curSeg.fLength, newLength);
1225 remain = curSeg.fLength - newLength;
1226 state->fIOVMAddr = newLength + curSeg.fIOVMAddr;
1227 curSeg.fLength = newLength;
1228 state->fLength = remain;
1229 offset -= remain;
1230 }
1231 else
0c530ab8 1232 {
0b4c1975
A
1233 UInt64 addr = curSeg.fIOVMAddr;
1234 ppnum_t addrPage = atop_64(addr);
1235 vm_page_t remap = NULL;
1236 UInt64 remain, newLength;
1237
1238 DEBG("sparse switch %qx, %qx ", addr, curSeg.fLength);
1239
1240 remap = internalState->fNextRemapPage;
1241 if (remap && (addrPage == vm_page_get_offset(remap)))
0c530ab8 1242 {
0c530ab8 1243 }
0b4c1975
A
1244 else for (remap = internalState->fCopyPageAlloc;
1245 remap && (addrPage != vm_page_get_offset(remap));
1246 remap = vm_page_get_next(remap))
0c530ab8 1247 {
0c530ab8 1248 }
0b4c1975
A
1249
1250 if (!remap) panic("no remap page found");
1251
1252 curSeg.fIOVMAddr = ptoa_64(vm_page_get_phys_page(remap))
1253 + (addr & PAGE_MASK);
1254 internalState->fNextRemapPage = vm_page_get_next(remap);
1255
1256 newLength = PAGE_SIZE - (addr & PAGE_MASK);
1257 if (newLength < curSeg.fLength)
0c530ab8 1258 {
0b4c1975
A
1259 remain = curSeg.fLength - newLength;
1260 state->fIOVMAddr = addr + newLength;
1261 curSeg.fLength = newLength;
1262 state->fLength = remain;
1263 offset -= remain;
0c530ab8 1264 }
0b4c1975 1265 DEBG("-> %qx, %qx offset %qx\n", curSeg.fIOVMAddr, curSeg.fLength, offset);
0c530ab8
A
1266 }
1267 }
1268
3e170ce0
A
1269 // reduce size of output segment
1270 uint64_t reduce, leftover = 0;
1271
1272 // fMaxSegmentSize
0c530ab8
A
1273 if (curSeg.fLength > fMaxSegmentSize)
1274 {
3e170ce0
A
1275 leftover += curSeg.fLength - fMaxSegmentSize;
1276 curSeg.fLength = fMaxSegmentSize;
1277 state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr;
1278 }
1279
1280 // alignment current length
1281
1282 reduce = (curSeg.fLength & fAlignMaskLength);
1283 if (reduce && (curSeg.fLength > reduce))
1284 {
1285 leftover += reduce;
1286 curSeg.fLength -= reduce;
1287 state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr;
1288 }
0c530ab8 1289
3e170ce0 1290 // alignment next address
0c530ab8 1291
3e170ce0
A
1292 reduce = (state->fIOVMAddr & fAlignMaskInternalSegments);
1293 if (reduce && (curSeg.fLength > reduce))
1294 {
1295 leftover += reduce;
1296 curSeg.fLength -= reduce;
1297 state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr;
0c530ab8
A
1298 }
1299
3e170ce0 1300 if (leftover)
0c530ab8 1301 {
3e170ce0
A
1302 DEBG("reduce seg by 0x%llx @ 0x%llx [0x%llx, 0x%llx]\n",
1303 leftover, offset,
1304 curSeg.fIOVMAddr, curSeg.fLength);
1305 state->fLength = leftover;
1306 offset -= leftover;
1307 }
1308
1309 //
1310
1311 if (internalState->fCursor)
1312 {
1313 bool misaligned;
1314 uint32_t mask;
1315
1316 mask = (segIndex ? fAlignMaskInternalSegments : internalState->fSourceAlignMask);
1317 misaligned = (0 != (mask & curSeg.fIOVMAddr));
1318 if (!misaligned)
1319 {
1320 mask = fAlignMaskLength;
1321 misaligned |= (0 != (mask & curSeg.fLength));
1322 }
1323 if (misaligned)
1324 {
1325 if (misaligned) DEBG("cursor misaligned %qx:%qx\n", curSeg.fIOVMAddr, curSeg.fLength);
1326 curSeg.fIOVMAddr = 0;
1327 ret = kIOReturnNotAligned;
1328 break;
1329 }
0c530ab8
A
1330 }
1331
1332 if (offset >= memLength)
1333 {
1334 curSeg.fLength -= (offset - memLength);
1335 offset = memLength;
1336 state->fIOVMAddr = state->fLength = 0; // At end
1337 break;
1338 }
1339 }
1340
1341 if (state->fIOVMAddr) {
1342 if ((segIndex + 1 == numSegments))
1343 break;
1344
1345 ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++);
1346 curSeg.fIOVMAddr = 0;
1347 if (kIOReturnSuccess != ret)
1348 break;
1349 }
1350 }
1351
1352 if (curSeg.fIOVMAddr) {
1353 ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++);
1354 }
1355
1356 if (kIOReturnSuccess == ret)
1357 {
1358 state->fOffset = offset;
1359 *offsetP = offset - internalState->fPreparedOffset;
1360 *numSegmentsP = segIndex;
1361 }
1362 return ret;
1363}
1364
1365IOReturn
1366IODMACommand::clientOutputSegment(
1367 void *reference, IODMACommand *target,
1368 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1369{
b0d623f7 1370 SegmentFunction segmentFunction = (SegmentFunction) reference;
0c530ab8
A
1371 IOReturn ret = kIOReturnSuccess;
1372
316670eb 1373 if (target->fNumAddressBits && (target->fNumAddressBits < 64)
b0d623f7 1374 && ((segment.fIOVMAddr + segment.fLength - 1) >> target->fNumAddressBits)
3e170ce0 1375 && (target->reserved->fLocalMapperAlloc || !target->fMapper))
0c530ab8
A
1376 {
1377 DEBG("kIOReturnMessageTooLarge(fNumAddressBits) %qx, %qx\n", segment.fIOVMAddr, segment.fLength);
1378 ret = kIOReturnMessageTooLarge;
1379 }
1380
b0d623f7 1381 if (!(*segmentFunction)(target, segment, vSegList, outSegIndex))
0c530ab8
A
1382 {
1383 DEBG("kIOReturnMessageTooLarge(fOutSeg) %qx, %qx\n", segment.fIOVMAddr, segment.fLength);
1384 ret = kIOReturnMessageTooLarge;
1385 }
1386
1387 return (ret);
1388}
1389
b0d623f7
A
1390IOReturn
1391IODMACommand::genIOVMSegments(SegmentFunction segmentFunction,
1392 UInt64 *offsetP,
1393 void *segmentsP,
1394 UInt32 *numSegmentsP)
1395{
1396 return (genIOVMSegments(kWalkClient, clientOutputSegment, (void *) segmentFunction,
1397 offsetP, segmentsP, numSegmentsP));
1398}
1399
0c530ab8
A
1400bool
1401IODMACommand::OutputHost32(IODMACommand *,
1402 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1403{
1404 Segment32 *base = (Segment32 *) vSegList;
1405 base[outSegIndex].fIOVMAddr = (UInt32) segment.fIOVMAddr;
1406 base[outSegIndex].fLength = (UInt32) segment.fLength;
1407 return true;
1408}
1409
1410bool
1411IODMACommand::OutputBig32(IODMACommand *,
1412 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1413{
1414 const UInt offAddr = outSegIndex * sizeof(Segment32);
1415 const UInt offLen = offAddr + sizeof(UInt32);
1416 OSWriteBigInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr);
1417 OSWriteBigInt32(vSegList, offLen, (UInt32) segment.fLength);
1418 return true;
1419}
1420
1421bool
1422IODMACommand::OutputLittle32(IODMACommand *,
1423 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1424{
1425 const UInt offAddr = outSegIndex * sizeof(Segment32);
1426 const UInt offLen = offAddr + sizeof(UInt32);
1427 OSWriteLittleInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr);
1428 OSWriteLittleInt32(vSegList, offLen, (UInt32) segment.fLength);
1429 return true;
1430}
1431
1432bool
1433IODMACommand::OutputHost64(IODMACommand *,
1434 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1435{
1436 Segment64 *base = (Segment64 *) vSegList;
1437 base[outSegIndex] = segment;
1438 return true;
1439}
1440
1441bool
1442IODMACommand::OutputBig64(IODMACommand *,
1443 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1444{
1445 const UInt offAddr = outSegIndex * sizeof(Segment64);
1446 const UInt offLen = offAddr + sizeof(UInt64);
1447 OSWriteBigInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr);
1448 OSWriteBigInt64(vSegList, offLen, (UInt64) segment.fLength);
1449 return true;
1450}
1451
1452bool
1453IODMACommand::OutputLittle64(IODMACommand *,
1454 Segment64 segment, void *vSegList, UInt32 outSegIndex)
1455{
1456 const UInt offAddr = outSegIndex * sizeof(Segment64);
1457 const UInt offLen = offAddr + sizeof(UInt64);
1458 OSWriteLittleInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr);
1459 OSWriteLittleInt64(vSegList, offLen, (UInt64) segment.fLength);
1460 return true;
1461}
1462
1463