]>
Commit | Line | Data |
---|---|---|
0c530ab8 | 1 | /* |
2d21ac55 | 2 | * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved. |
0c530ab8 | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
0a7de745 | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
0a7de745 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
0a7de745 | 17 | * |
2d21ac55 A |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
0c530ab8 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
0a7de745 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
0c530ab8 A |
27 | */ |
28 | ||
29 | #include <IOKit/assert.h> | |
30 | ||
31 | #include <libkern/OSTypes.h> | |
32 | #include <libkern/OSByteOrder.h> | |
99c3a104 | 33 | #include <libkern/OSDebug.h> |
0c530ab8 A |
34 | |
35 | #include <IOKit/IOReturn.h> | |
36 | #include <IOKit/IOLib.h> | |
37 | #include <IOKit/IODMACommand.h> | |
38 | #include <IOKit/IOMapper.h> | |
39 | #include <IOKit/IOMemoryDescriptor.h> | |
40 | #include <IOKit/IOBufferMemoryDescriptor.h> | |
41 | ||
42 | #include "IOKitKernelInternal.h" | |
0c530ab8 | 43 | |
0a7de745 A |
44 | #define MAPTYPE(type) ((UInt) (type) & kTypeMask) |
45 | #define IS_NONCOHERENT(type) (MAPTYPE(type) == kNonCoherent) | |
46 | ||
47 | enum{ | |
48 | kWalkSyncIn = 0x01,// bounce -> md | |
49 | kWalkSyncOut = 0x02,// bounce <- md | |
50 | kWalkSyncAlways = 0x04, | |
51 | kWalkPreflight = 0x08, | |
52 | kWalkDoubleBuffer = 0x10, | |
53 | kWalkPrepare = 0x20, | |
54 | kWalkComplete = 0x40, | |
55 | kWalkClient = 0x80 | |
0c530ab8 A |
56 | }; |
57 | ||
0c530ab8 A |
58 | |
59 | #define fInternalState reserved | |
60 | #define fState reserved->fState | |
61 | #define fMDSummary reserved->fMDSummary | |
62 | ||
63 | ||
64 | #if 1 | |
65 | // no direction => OutIn | |
0a7de745 A |
66 | #define SHOULD_COPY_DIR(op, direction) \ |
67 | ((kIODirectionNone == (direction)) \ | |
68 | || (kWalkSyncAlways & (op)) \ | |
0c530ab8 | 69 | || (((kWalkSyncIn & (op)) ? kIODirectionIn : kIODirectionOut) \ |
0a7de745 | 70 | & (direction))) |
0c530ab8 A |
71 | |
72 | #else | |
73 | #define SHOULD_COPY_DIR(state, direction) (true) | |
74 | #endif | |
75 | ||
76 | #if 0 | |
0a7de745 | 77 | #define DEBG(fmt, args...) { IOLog(fmt, ## args); kprintf(fmt, ## args); } |
0c530ab8 | 78 | #else |
0a7de745 | 79 | #define DEBG(fmt, args...) {} |
0c530ab8 A |
80 | #endif |
81 | ||
0c530ab8 A |
82 | /**************************** class IODMACommand ***************************/ |
83 | ||
84 | #undef super | |
6d2010ae | 85 | #define super IOCommand |
0c530ab8 A |
86 | OSDefineMetaClassAndStructors(IODMACommand, IOCommand); |
87 | ||
0a7de745 A |
88 | OSMetaClassDefineReservedUsed(IODMACommand, 0); |
89 | OSMetaClassDefineReservedUsed(IODMACommand, 1); | |
90 | OSMetaClassDefineReservedUsed(IODMACommand, 2); | |
91 | OSMetaClassDefineReservedUsed(IODMACommand, 3); | |
92 | OSMetaClassDefineReservedUsed(IODMACommand, 4); | |
93 | OSMetaClassDefineReservedUsed(IODMACommand, 5); | |
94 | OSMetaClassDefineReservedUsed(IODMACommand, 6); | |
95 | OSMetaClassDefineReservedUnused(IODMACommand, 7); | |
96 | OSMetaClassDefineReservedUnused(IODMACommand, 8); | |
97 | OSMetaClassDefineReservedUnused(IODMACommand, 9); | |
0c530ab8 A |
98 | OSMetaClassDefineReservedUnused(IODMACommand, 10); |
99 | OSMetaClassDefineReservedUnused(IODMACommand, 11); | |
100 | OSMetaClassDefineReservedUnused(IODMACommand, 12); | |
101 | OSMetaClassDefineReservedUnused(IODMACommand, 13); | |
102 | OSMetaClassDefineReservedUnused(IODMACommand, 14); | |
103 | OSMetaClassDefineReservedUnused(IODMACommand, 15); | |
104 | ||
3e170ce0 A |
105 | IODMACommand * |
106 | IODMACommand::withRefCon(void * refCon) | |
107 | { | |
0a7de745 | 108 | IODMACommand * me = new IODMACommand; |
3e170ce0 | 109 | |
0a7de745 A |
110 | if (me && !me->initWithRefCon(refCon)) { |
111 | me->release(); | |
cb323159 | 112 | return NULL; |
0a7de745 | 113 | } |
3e170ce0 | 114 | |
0a7de745 | 115 | return me; |
3e170ce0 A |
116 | } |
117 | ||
118 | IODMACommand * | |
119 | IODMACommand::withSpecification(SegmentFunction outSegFunc, | |
0a7de745 A |
120 | const SegmentOptions * segmentOptions, |
121 | uint32_t mappingOptions, | |
122 | IOMapper * mapper, | |
123 | void * refCon) | |
3e170ce0 | 124 | { |
0a7de745 | 125 | IODMACommand * me = new IODMACommand; |
3e170ce0 | 126 | |
0a7de745 A |
127 | if (me && !me->initWithSpecification(outSegFunc, segmentOptions, mappingOptions, |
128 | mapper, refCon)) { | |
129 | me->release(); | |
cb323159 | 130 | return NULL; |
0a7de745 | 131 | } |
3e170ce0 | 132 | |
0a7de745 | 133 | return me; |
3e170ce0 A |
134 | } |
135 | ||
0c530ab8 A |
136 | IODMACommand * |
137 | IODMACommand::withSpecification(SegmentFunction outSegFunc, | |
0a7de745 A |
138 | UInt8 numAddressBits, |
139 | UInt64 maxSegmentSize, | |
140 | MappingOptions mappingOptions, | |
141 | UInt64 maxTransferSize, | |
142 | UInt32 alignment, | |
143 | IOMapper *mapper, | |
144 | void *refCon) | |
0c530ab8 | 145 | { |
0a7de745 A |
146 | IODMACommand * me = new IODMACommand; |
147 | ||
148 | if (me && !me->initWithSpecification(outSegFunc, | |
149 | numAddressBits, maxSegmentSize, | |
150 | mappingOptions, maxTransferSize, | |
151 | alignment, mapper, refCon)) { | |
152 | me->release(); | |
cb323159 | 153 | return NULL; |
0a7de745 A |
154 | } |
155 | ||
156 | return me; | |
0c530ab8 A |
157 | } |
158 | ||
159 | IODMACommand * | |
160 | IODMACommand::cloneCommand(void *refCon) | |
161 | { | |
0a7de745 A |
162 | SegmentOptions segmentOptions = |
163 | { | |
164 | .fStructSize = sizeof(segmentOptions), | |
165 | .fNumAddressBits = (uint8_t)fNumAddressBits, | |
166 | .fMaxSegmentSize = fMaxSegmentSize, | |
167 | .fMaxTransferSize = fMaxTransferSize, | |
168 | .fAlignment = fAlignMask + 1, | |
169 | .fAlignmentLength = fAlignMaskInternalSegments + 1, | |
170 | .fAlignmentInternalSegments = fAlignMaskLength + 1 | |
171 | }; | |
172 | ||
173 | return IODMACommand::withSpecification(fOutSeg, &segmentOptions, | |
174 | fMappingOptions, fMapper, refCon); | |
0c530ab8 A |
175 | } |
176 | ||
177 | #define kLastOutputFunction ((SegmentFunction) kLastOutputFunction) | |
178 | ||
3e170ce0 A |
179 | bool |
180 | IODMACommand::initWithRefCon(void * refCon) | |
181 | { | |
0a7de745 A |
182 | if (!super::init()) { |
183 | return false; | |
184 | } | |
3e170ce0 | 185 | |
0a7de745 A |
186 | if (!reserved) { |
187 | reserved = IONew(IODMACommandInternal, 1); | |
188 | if (!reserved) { | |
189 | return false; | |
190 | } | |
191 | } | |
192 | bzero(reserved, sizeof(IODMACommandInternal)); | |
193 | fRefCon = refCon; | |
3e170ce0 | 194 | |
0a7de745 | 195 | return true; |
3e170ce0 A |
196 | } |
197 | ||
198 | bool | |
0a7de745 A |
199 | IODMACommand::initWithSpecification(SegmentFunction outSegFunc, |
200 | const SegmentOptions * segmentOptions, | |
201 | uint32_t mappingOptions, | |
202 | IOMapper * mapper, | |
203 | void * refCon) | |
3e170ce0 | 204 | { |
0a7de745 A |
205 | if (!initWithRefCon(refCon)) { |
206 | return false; | |
207 | } | |
3e170ce0 | 208 | |
0a7de745 A |
209 | if (kIOReturnSuccess != setSpecification(outSegFunc, segmentOptions, |
210 | mappingOptions, mapper)) { | |
211 | return false; | |
212 | } | |
3e170ce0 | 213 | |
0a7de745 | 214 | return true; |
3e170ce0 A |
215 | } |
216 | ||
0c530ab8 A |
217 | bool |
218 | IODMACommand::initWithSpecification(SegmentFunction outSegFunc, | |
0a7de745 A |
219 | UInt8 numAddressBits, |
220 | UInt64 maxSegmentSize, | |
221 | MappingOptions mappingOptions, | |
222 | UInt64 maxTransferSize, | |
223 | UInt32 alignment, | |
224 | IOMapper *mapper, | |
225 | void *refCon) | |
3e170ce0 | 226 | { |
0a7de745 A |
227 | SegmentOptions segmentOptions = |
228 | { | |
229 | .fStructSize = sizeof(segmentOptions), | |
230 | .fNumAddressBits = numAddressBits, | |
231 | .fMaxSegmentSize = maxSegmentSize, | |
232 | .fMaxTransferSize = maxTransferSize, | |
233 | .fAlignment = alignment, | |
234 | .fAlignmentLength = 1, | |
235 | .fAlignmentInternalSegments = alignment | |
236 | }; | |
237 | ||
238 | return initWithSpecification(outSegFunc, &segmentOptions, mappingOptions, mapper, refCon); | |
3e170ce0 A |
239 | } |
240 | ||
241 | IOReturn | |
242 | IODMACommand::setSpecification(SegmentFunction outSegFunc, | |
0a7de745 A |
243 | const SegmentOptions * segmentOptions, |
244 | uint32_t mappingOptions, | |
245 | IOMapper * mapper) | |
0c530ab8 | 246 | { |
cb323159 | 247 | IOService * device = NULL; |
0a7de745 A |
248 | UInt8 numAddressBits; |
249 | UInt64 maxSegmentSize; | |
250 | UInt64 maxTransferSize; | |
251 | UInt32 alignment; | |
252 | ||
253 | bool is32Bit; | |
254 | ||
255 | if (!outSegFunc || !segmentOptions) { | |
256 | return kIOReturnBadArgument; | |
257 | } | |
258 | ||
259 | is32Bit = ((OutputHost32 == outSegFunc) | |
260 | || (OutputBig32 == outSegFunc) | |
261 | || (OutputLittle32 == outSegFunc)); | |
262 | ||
263 | numAddressBits = segmentOptions->fNumAddressBits; | |
264 | maxSegmentSize = segmentOptions->fMaxSegmentSize; | |
265 | maxTransferSize = segmentOptions->fMaxTransferSize; | |
266 | alignment = segmentOptions->fAlignment; | |
267 | if (is32Bit) { | |
268 | if (!numAddressBits) { | |
269 | numAddressBits = 32; | |
270 | } else if (numAddressBits > 32) { | |
271 | return kIOReturnBadArgument; // Wrong output function for bits | |
272 | } | |
273 | } | |
274 | ||
275 | if (numAddressBits && (numAddressBits < PAGE_SHIFT)) { | |
276 | return kIOReturnBadArgument; | |
277 | } | |
278 | ||
279 | if (!maxSegmentSize) { | |
280 | maxSegmentSize--; // Set Max segment to -1 | |
281 | } | |
282 | if (!maxTransferSize) { | |
283 | maxTransferSize--; // Set Max transfer to -1 | |
284 | } | |
285 | if (mapper && !OSDynamicCast(IOMapper, mapper)) { | |
286 | device = mapper; | |
cb323159 | 287 | mapper = NULL; |
0a7de745 A |
288 | } |
289 | if (!mapper && (kUnmapped != MAPTYPE(mappingOptions))) { | |
290 | IOMapper::checkForSystemMapper(); | |
291 | mapper = IOMapper::gSystem; | |
292 | } | |
293 | ||
294 | fNumSegments = 0; | |
295 | fOutSeg = outSegFunc; | |
296 | fNumAddressBits = numAddressBits; | |
297 | fMaxSegmentSize = maxSegmentSize; | |
298 | fMappingOptions = mappingOptions; | |
299 | fMaxTransferSize = maxTransferSize; | |
300 | if (!alignment) { | |
301 | alignment = 1; | |
302 | } | |
303 | fAlignMask = alignment - 1; | |
304 | ||
305 | alignment = segmentOptions->fAlignmentLength; | |
306 | if (!alignment) { | |
307 | alignment = 1; | |
308 | } | |
309 | fAlignMaskLength = alignment - 1; | |
310 | ||
311 | alignment = segmentOptions->fAlignmentInternalSegments; | |
312 | if (!alignment) { | |
313 | alignment = (fAlignMask + 1); | |
314 | } | |
315 | fAlignMaskInternalSegments = alignment - 1; | |
316 | ||
317 | switch (MAPTYPE(mappingOptions)) { | |
318 | case kMapped: break; | |
319 | case kUnmapped: break; | |
320 | case kNonCoherent: break; | |
321 | ||
322 | case kBypassed: | |
323 | if (!mapper) { | |
324 | break; | |
325 | } | |
326 | return kIOReturnBadArgument; | |
327 | ||
328 | default: | |
329 | return kIOReturnBadArgument; | |
330 | } | |
331 | ; | |
332 | ||
333 | if (mapper != fMapper) { | |
334 | if (mapper) { | |
335 | mapper->retain(); | |
336 | } | |
337 | if (fMapper) { | |
338 | fMapper->release(); | |
339 | } | |
340 | fMapper = mapper; | |
341 | } | |
342 | ||
343 | fInternalState->fIterateOnly = (0 != (kIterateOnly & mappingOptions)); | |
344 | fInternalState->fDevice = device; | |
345 | ||
346 | return kIOReturnSuccess; | |
0c530ab8 A |
347 | } |
348 | ||
349 | void | |
350 | IODMACommand::free() | |
351 | { | |
0a7de745 A |
352 | if (reserved) { |
353 | IODelete(reserved, IODMACommandInternal, 1); | |
354 | } | |
0c530ab8 | 355 | |
0a7de745 A |
356 | if (fMapper) { |
357 | fMapper->release(); | |
358 | } | |
b0d623f7 | 359 | |
0a7de745 | 360 | super::free(); |
0c530ab8 A |
361 | } |
362 | ||
363 | IOReturn | |
364 | IODMACommand::setMemoryDescriptor(const IOMemoryDescriptor *mem, bool autoPrepare) | |
365 | { | |
0a7de745 A |
366 | IOReturn err = kIOReturnSuccess; |
367 | ||
368 | if (mem == fMemory) { | |
369 | if (!autoPrepare) { | |
370 | while (fActive) { | |
371 | complete(); | |
372 | } | |
373 | } | |
374 | return kIOReturnSuccess; | |
0c530ab8 | 375 | } |
0a7de745 A |
376 | |
377 | if (fMemory) { | |
378 | // As we are almost certainly being called from a work loop thread | |
379 | // if fActive is true it is probably not a good time to potentially | |
380 | // block. Just test for it and return an error | |
381 | if (fActive) { | |
382 | return kIOReturnBusy; | |
383 | } | |
99c3a104 | 384 | clearMemoryDescriptor(); |
6d2010ae | 385 | } |
0a7de745 A |
386 | |
387 | if (mem) { | |
388 | bzero(&fMDSummary, sizeof(fMDSummary)); | |
389 | err = mem->dmaCommandOperation(kIOMDGetCharacteristics | (kMapped == MAPTYPE(fMappingOptions)), | |
390 | &fMDSummary, sizeof(fMDSummary)); | |
391 | if (err) { | |
392 | return err; | |
393 | } | |
394 | ||
395 | ppnum_t highPage = fMDSummary.fHighestPage ? fMDSummary.fHighestPage : gIOLastPage; | |
396 | ||
397 | if ((kMapped == MAPTYPE(fMappingOptions)) | |
398 | && fMapper) { | |
399 | fInternalState->fCheckAddressing = false; | |
400 | } else { | |
401 | fInternalState->fCheckAddressing = (fNumAddressBits && (highPage >= (1UL << (fNumAddressBits - PAGE_SHIFT)))); | |
402 | } | |
403 | ||
404 | fInternalState->fNewMD = true; | |
405 | mem->retain(); | |
406 | fMemory = mem; | |
407 | fInternalState->fSetActiveNoMapper = (!fMapper); | |
408 | if (fInternalState->fSetActiveNoMapper) { | |
409 | mem->dmaCommandOperation(kIOMDSetDMAActive, this, 0); | |
410 | } | |
411 | if (autoPrepare) { | |
412 | err = prepare(); | |
413 | if (err) { | |
414 | clearMemoryDescriptor(); | |
415 | } | |
416 | } | |
417 | } | |
418 | ||
419 | return err; | |
0c530ab8 A |
420 | } |
421 | ||
422 | IOReturn | |
423 | IODMACommand::clearMemoryDescriptor(bool autoComplete) | |
424 | { | |
0a7de745 A |
425 | if (fActive && !autoComplete) { |
426 | return kIOReturnNotReady; | |
427 | } | |
0c530ab8 | 428 | |
0a7de745 A |
429 | if (fMemory) { |
430 | while (fActive) { | |
431 | complete(); | |
432 | } | |
433 | if (fInternalState->fSetActiveNoMapper) { | |
434 | fMemory->dmaCommandOperation(kIOMDSetDMAInactive, this, 0); | |
435 | } | |
436 | fMemory->release(); | |
cb323159 | 437 | fMemory = NULL; |
0a7de745 | 438 | } |
0c530ab8 | 439 | |
0a7de745 | 440 | return kIOReturnSuccess; |
0c530ab8 A |
441 | } |
442 | ||
443 | const IOMemoryDescriptor * | |
444 | IODMACommand::getMemoryDescriptor() const | |
445 | { | |
0a7de745 | 446 | return fMemory; |
0c530ab8 A |
447 | } |
448 | ||
3e170ce0 A |
449 | IOMemoryDescriptor * |
450 | IODMACommand::getIOMemoryDescriptor() const | |
451 | { | |
0a7de745 | 452 | IOMemoryDescriptor * mem; |
3e170ce0 | 453 | |
0a7de745 A |
454 | mem = reserved->fCopyMD; |
455 | if (!mem) { | |
456 | mem = __IODEQUALIFY(IOMemoryDescriptor *, fMemory); | |
457 | } | |
3e170ce0 | 458 | |
0a7de745 | 459 | return mem; |
3e170ce0 | 460 | } |
0c530ab8 A |
461 | |
462 | IOReturn | |
463 | IODMACommand::segmentOp( | |
0a7de745 A |
464 | void *reference, |
465 | IODMACommand *target, | |
466 | Segment64 segment, | |
467 | void *segments, | |
468 | UInt32 segmentIndex) | |
0c530ab8 | 469 | { |
0a7de745 A |
470 | IOOptionBits op = (uintptr_t) reference; |
471 | addr64_t maxPhys, address; | |
472 | uint64_t length; | |
473 | uint32_t numPages; | |
474 | uint32_t mask; | |
475 | ||
476 | IODMACommandInternal * state = target->reserved; | |
477 | ||
478 | if (target->fNumAddressBits && (target->fNumAddressBits < 64) && (state->fLocalMapperAllocValid || !target->fMapper)) { | |
479 | maxPhys = (1ULL << target->fNumAddressBits); | |
480 | } else { | |
481 | maxPhys = 0; | |
0c530ab8 | 482 | } |
0a7de745 A |
483 | maxPhys--; |
484 | ||
485 | address = segment.fIOVMAddr; | |
486 | length = segment.fLength; | |
487 | ||
488 | assert(length); | |
489 | ||
490 | if (!state->fMisaligned) { | |
491 | mask = (segmentIndex ? target->fAlignMaskInternalSegments : state->fSourceAlignMask); | |
492 | state->fMisaligned |= (0 != (mask & address)); | |
493 | if (state->fMisaligned) { | |
494 | DEBG("misaligned address %qx:%qx, %x\n", address, length, mask); | |
495 | } | |
496 | } | |
497 | if (!state->fMisaligned) { | |
498 | mask = target->fAlignMaskLength; | |
499 | state->fMisaligned |= (0 != (mask & length)); | |
500 | if (state->fMisaligned) { | |
501 | DEBG("misaligned length %qx:%qx, %x\n", address, length, mask); | |
502 | } | |
0c530ab8 A |
503 | } |
504 | ||
0a7de745 A |
505 | if (state->fMisaligned && (kWalkPreflight & op)) { |
506 | return kIOReturnNotAligned; | |
507 | } | |
508 | ||
509 | if (!state->fDoubleBuffer) { | |
510 | if ((address + length - 1) <= maxPhys) { | |
511 | length = 0; | |
512 | } else if (address <= maxPhys) { | |
513 | DEBG("tail %qx, %qx", address, length); | |
514 | length = (address + length - maxPhys - 1); | |
515 | address = maxPhys + 1; | |
516 | DEBG("-> %qx, %qx\n", address, length); | |
517 | } | |
518 | } | |
519 | ||
520 | if (!length) { | |
521 | return kIOReturnSuccess; | |
522 | } | |
523 | ||
524 | numPages = atop_64(round_page_64((address & PAGE_MASK) + length)); | |
525 | ||
526 | if (kWalkPreflight & op) { | |
527 | state->fCopyPageCount += numPages; | |
528 | } else { | |
529 | vm_page_t lastPage; | |
530 | lastPage = NULL; | |
531 | if (kWalkPrepare & op) { | |
532 | lastPage = state->fCopyNext; | |
533 | for (IOItemCount idx = 0; idx < numPages; idx++) { | |
534 | vm_page_set_offset(lastPage, atop_64(address) + idx); | |
535 | lastPage = vm_page_get_next(lastPage); | |
536 | } | |
537 | } | |
538 | ||
539 | if (!lastPage || SHOULD_COPY_DIR(op, target->fMDSummary.fDirection)) { | |
540 | lastPage = state->fCopyNext; | |
541 | for (IOItemCount idx = 0; idx < numPages; idx++) { | |
542 | if (SHOULD_COPY_DIR(op, target->fMDSummary.fDirection)) { | |
543 | addr64_t cpuAddr = address; | |
544 | addr64_t remapAddr; | |
545 | uint64_t chunk; | |
546 | ||
547 | if ((kMapped == MAPTYPE(target->fMappingOptions)) | |
548 | && target->fMapper) { | |
549 | cpuAddr = target->fMapper->mapToPhysicalAddress(address); | |
550 | } | |
551 | ||
552 | remapAddr = ptoa_64(vm_page_get_phys_page(lastPage)); | |
553 | if (!state->fDoubleBuffer) { | |
554 | remapAddr += (address & PAGE_MASK); | |
555 | } | |
556 | chunk = PAGE_SIZE - (address & PAGE_MASK); | |
557 | if (chunk > length) { | |
558 | chunk = length; | |
559 | } | |
560 | ||
561 | DEBG("cpv: 0x%qx %s 0x%qx, 0x%qx, 0x%02lx\n", remapAddr, | |
562 | (kWalkSyncIn & op) ? "->" : "<-", | |
563 | address, chunk, op); | |
564 | ||
565 | if (kWalkSyncIn & op) { // cppvNoModSnk | |
566 | copypv(remapAddr, cpuAddr, chunk, | |
567 | cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc ); | |
568 | } else { | |
569 | copypv(cpuAddr, remapAddr, chunk, | |
570 | cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc ); | |
571 | } | |
572 | address += chunk; | |
573 | length -= chunk; | |
574 | } | |
575 | lastPage = vm_page_get_next(lastPage); | |
576 | } | |
0b4c1975 | 577 | } |
0a7de745 | 578 | state->fCopyNext = lastPage; |
0c530ab8 | 579 | } |
0c530ab8 | 580 | |
0a7de745 | 581 | return kIOReturnSuccess; |
0c530ab8 A |
582 | } |
583 | ||
0a7de745 | 584 | IOBufferMemoryDescriptor * |
3e170ce0 A |
585 | IODMACommand::createCopyBuffer(IODirection direction, UInt64 length) |
586 | { | |
0a7de745 A |
587 | mach_vm_address_t mask = 0xFFFFF000; //state->fSourceAlignMask |
588 | return IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task, | |
589 | direction, length, mask); | |
3e170ce0 A |
590 | } |
591 | ||
0c530ab8 A |
592 | IOReturn |
593 | IODMACommand::walkAll(UInt8 op) | |
594 | { | |
0a7de745 A |
595 | IODMACommandInternal * state = fInternalState; |
596 | ||
597 | IOReturn ret = kIOReturnSuccess; | |
598 | UInt32 numSegments; | |
599 | UInt64 offset; | |
600 | ||
601 | if (kWalkPreflight & op) { | |
602 | state->fMisaligned = false; | |
603 | state->fDoubleBuffer = false; | |
604 | state->fPrepared = false; | |
605 | state->fCopyNext = NULL; | |
cb323159 | 606 | state->fCopyPageAlloc = NULL; |
0a7de745 A |
607 | state->fCopyPageCount = 0; |
608 | state->fNextRemapPage = NULL; | |
cb323159 | 609 | state->fCopyMD = NULL; |
0a7de745 A |
610 | |
611 | if (!(kWalkDoubleBuffer & op)) { | |
612 | offset = 0; | |
613 | numSegments = 0 - 1; | |
614 | ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments); | |
615 | } | |
0c530ab8 | 616 | |
0a7de745 | 617 | op &= ~kWalkPreflight; |
0c530ab8 | 618 | |
0a7de745 A |
619 | state->fDoubleBuffer = (state->fMisaligned || state->fForceDoubleBuffer); |
620 | state->fForceDoubleBuffer = false; | |
621 | if (state->fDoubleBuffer) { | |
622 | state->fCopyPageCount = atop_64(round_page(state->fPreparedLength)); | |
623 | } | |
0c530ab8 | 624 | |
0a7de745 A |
625 | if (state->fCopyPageCount) { |
626 | vm_page_t mapBase = NULL; | |
0c530ab8 | 627 | |
0a7de745 | 628 | DEBG("preflight fCopyPageCount %d\n", state->fCopyPageCount); |
0c530ab8 | 629 | |
0a7de745 A |
630 | if (!fMapper && !state->fDoubleBuffer) { |
631 | kern_return_t kr; | |
99c3a104 | 632 | |
0a7de745 A |
633 | if (fMapper) { |
634 | panic("fMapper copying"); | |
635 | } | |
99c3a104 | 636 | |
0a7de745 A |
637 | kr = vm_page_alloc_list(state->fCopyPageCount, |
638 | KMA_LOMEM | KMA_NOPAGEWAIT, &mapBase); | |
639 | if (KERN_SUCCESS != kr) { | |
640 | DEBG("vm_page_alloc_list(%d) failed (%d)\n", state->fCopyPageCount, kr); | |
641 | mapBase = NULL; | |
642 | } | |
643 | } | |
644 | ||
645 | if (mapBase) { | |
646 | state->fCopyPageAlloc = mapBase; | |
647 | state->fCopyNext = state->fCopyPageAlloc; | |
648 | offset = 0; | |
649 | numSegments = 0 - 1; | |
650 | ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments); | |
651 | state->fPrepared = true; | |
652 | op &= ~(kWalkSyncIn | kWalkSyncOut); | |
653 | } else { | |
654 | DEBG("alloc IOBMD\n"); | |
655 | state->fCopyMD = createCopyBuffer(fMDSummary.fDirection, state->fPreparedLength); | |
656 | ||
657 | if (state->fCopyMD) { | |
658 | ret = kIOReturnSuccess; | |
659 | state->fPrepared = true; | |
660 | } else { | |
661 | DEBG("IODMACommand !alloc IOBMD"); | |
662 | return kIOReturnNoResources; | |
663 | } | |
664 | } | |
0c530ab8 | 665 | } |
0c530ab8 | 666 | } |
0c530ab8 | 667 | |
0a7de745 A |
668 | if (state->fPrepared && ((kWalkSyncIn | kWalkSyncOut) & op)) { |
669 | if (state->fCopyPageCount) { | |
670 | DEBG("sync fCopyPageCount %d\n", state->fCopyPageCount); | |
671 | ||
672 | if (state->fCopyPageAlloc) { | |
673 | state->fCopyNext = state->fCopyPageAlloc; | |
674 | offset = 0; | |
675 | numSegments = 0 - 1; | |
676 | ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments); | |
677 | } else if (state->fCopyMD) { | |
678 | DEBG("sync IOBMD\n"); | |
679 | ||
680 | if (SHOULD_COPY_DIR(op, fMDSummary.fDirection)) { | |
681 | IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory); | |
682 | ||
683 | IOByteCount bytes; | |
684 | ||
685 | if (kWalkSyncIn & op) { | |
686 | bytes = poMD->writeBytes(state->fPreparedOffset, | |
0c530ab8 A |
687 | state->fCopyMD->getBytesNoCopy(), |
688 | state->fPreparedLength); | |
0a7de745 A |
689 | } else { |
690 | bytes = poMD->readBytes(state->fPreparedOffset, | |
0c530ab8 A |
691 | state->fCopyMD->getBytesNoCopy(), |
692 | state->fPreparedLength); | |
0a7de745 A |
693 | } |
694 | DEBG("fCopyMD %s %lx bytes\n", (kWalkSyncIn & op) ? "wrote" : "read", bytes); | |
695 | ret = (bytes == state->fPreparedLength) ? kIOReturnSuccess : kIOReturnUnderrun; | |
696 | } else { | |
697 | ret = kIOReturnSuccess; | |
698 | } | |
699 | } | |
0c530ab8 | 700 | } |
0c530ab8 | 701 | } |
0c530ab8 | 702 | |
0a7de745 A |
703 | if (kWalkComplete & op) { |
704 | if (state->fCopyPageAlloc) { | |
705 | vm_page_free_list(state->fCopyPageAlloc, FALSE); | |
cb323159 | 706 | state->fCopyPageAlloc = NULL; |
0a7de745 A |
707 | state->fCopyPageCount = 0; |
708 | } | |
709 | if (state->fCopyMD) { | |
710 | state->fCopyMD->release(); | |
cb323159 | 711 | state->fCopyMD = NULL; |
0a7de745 | 712 | } |
0c530ab8 | 713 | |
0a7de745 A |
714 | state->fPrepared = false; |
715 | } | |
716 | return ret; | |
0c530ab8 A |
717 | } |
718 | ||
b0d623f7 A |
719 | UInt8 |
720 | IODMACommand::getNumAddressBits(void) | |
721 | { | |
0a7de745 | 722 | return fNumAddressBits; |
b0d623f7 A |
723 | } |
724 | ||
725 | UInt32 | |
726 | IODMACommand::getAlignment(void) | |
727 | { | |
0a7de745 | 728 | return fAlignMask + 1; |
b0d623f7 A |
729 | } |
730 | ||
3e170ce0 A |
731 | uint32_t |
732 | IODMACommand::getAlignmentLength(void) | |
733 | { | |
0a7de745 | 734 | return fAlignMaskLength + 1; |
3e170ce0 A |
735 | } |
736 | ||
737 | uint32_t | |
738 | IODMACommand::getAlignmentInternalSegments(void) | |
739 | { | |
0a7de745 | 740 | return fAlignMaskInternalSegments + 1; |
3e170ce0 A |
741 | } |
742 | ||
743 | IOReturn | |
0a7de745 A |
744 | IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc, |
745 | const SegmentOptions * segmentOptions, | |
746 | uint32_t mappingOptions, | |
747 | IOMapper * mapper, | |
748 | UInt64 offset, | |
749 | UInt64 length, | |
750 | bool flushCache, | |
751 | bool synchronize) | |
3e170ce0 | 752 | { |
0a7de745 | 753 | IOReturn ret; |
3e170ce0 | 754 | |
0a7de745 A |
755 | if (fActive) { |
756 | return kIOReturnNotPermitted; | |
757 | } | |
3e170ce0 | 758 | |
0a7de745 A |
759 | ret = setSpecification(outSegFunc, segmentOptions, mappingOptions, mapper); |
760 | if (kIOReturnSuccess != ret) { | |
761 | return ret; | |
762 | } | |
3e170ce0 | 763 | |
0a7de745 | 764 | ret = prepare(offset, length, flushCache, synchronize); |
3e170ce0 | 765 | |
0a7de745 | 766 | return ret; |
3e170ce0 A |
767 | } |
768 | ||
2d21ac55 | 769 | IOReturn |
0a7de745 A |
770 | IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc, |
771 | UInt8 numAddressBits, | |
772 | UInt64 maxSegmentSize, | |
773 | MappingOptions mappingOptions, | |
774 | UInt64 maxTransferSize, | |
775 | UInt32 alignment, | |
776 | IOMapper *mapper, | |
777 | UInt64 offset, | |
778 | UInt64 length, | |
779 | bool flushCache, | |
780 | bool synchronize) | |
2d21ac55 | 781 | { |
0a7de745 A |
782 | SegmentOptions segmentOptions = |
783 | { | |
784 | .fStructSize = sizeof(segmentOptions), | |
785 | .fNumAddressBits = numAddressBits, | |
786 | .fMaxSegmentSize = maxSegmentSize, | |
787 | .fMaxTransferSize = maxTransferSize, | |
788 | .fAlignment = alignment, | |
789 | .fAlignmentLength = 1, | |
790 | .fAlignmentInternalSegments = alignment | |
791 | }; | |
792 | ||
793 | return prepareWithSpecification(outSegFunc, &segmentOptions, mappingOptions, mapper, | |
794 | offset, length, flushCache, synchronize); | |
2d21ac55 A |
795 | } |
796 | ||
797 | ||
0a7de745 | 798 | IOReturn |
0c530ab8 A |
799 | IODMACommand::prepare(UInt64 offset, UInt64 length, bool flushCache, bool synchronize) |
800 | { | |
0a7de745 A |
801 | IODMACommandInternal * state = fInternalState; |
802 | IOReturn ret = kIOReturnSuccess; | |
803 | uint32_t mappingOptions = fMappingOptions; | |
804 | ||
805 | // check specification has been set | |
806 | if (!fOutSeg) { | |
807 | return kIOReturnNotReady; | |
0c530ab8 | 808 | } |
99c3a104 | 809 | |
0a7de745 A |
810 | if (!length) { |
811 | length = fMDSummary.fLength; | |
812 | } | |
813 | ||
814 | if (length > fMaxTransferSize) { | |
815 | return kIOReturnNoSpace; | |
816 | } | |
817 | ||
818 | if (fActive++) { | |
819 | if ((state->fPreparedOffset != offset) | |
820 | || (state->fPreparedLength != length)) { | |
821 | ret = kIOReturnNotReady; | |
822 | } | |
823 | } else { | |
824 | if (fAlignMaskLength & length) { | |
825 | return kIOReturnNotAligned; | |
826 | } | |
827 | ||
828 | state->fPreparedOffset = offset; | |
829 | state->fPreparedLength = length; | |
830 | ||
831 | state->fMapContig = false; | |
832 | state->fMisaligned = false; | |
833 | state->fDoubleBuffer = false; | |
834 | state->fPrepared = false; | |
835 | state->fCopyNext = NULL; | |
cb323159 | 836 | state->fCopyPageAlloc = NULL; |
0a7de745 A |
837 | state->fCopyPageCount = 0; |
838 | state->fNextRemapPage = NULL; | |
cb323159 | 839 | state->fCopyMD = NULL; |
0a7de745 A |
840 | state->fLocalMapperAlloc = 0; |
841 | state->fLocalMapperAllocValid = false; | |
842 | state->fLocalMapperAllocLength = 0; | |
843 | ||
844 | state->fSourceAlignMask = fAlignMask; | |
845 | if (fMapper) { | |
846 | state->fSourceAlignMask &= page_mask; | |
847 | } | |
848 | ||
849 | state->fCursor = state->fIterateOnly | |
850 | || (!state->fCheckAddressing | |
851 | && (!state->fSourceAlignMask | |
852 | || ((fMDSummary.fPageAlign & (1 << 31)) && (0 == (fMDSummary.fPageAlign & state->fSourceAlignMask))))); | |
853 | ||
854 | if (!state->fCursor) { | |
855 | IOOptionBits op = kWalkPrepare | kWalkPreflight; | |
856 | if (synchronize) { | |
857 | op |= kWalkSyncOut; | |
858 | } | |
859 | ret = walkAll(op); | |
860 | } | |
861 | ||
862 | if (IS_NONCOHERENT(mappingOptions) && flushCache) { | |
863 | if (state->fCopyMD) { | |
864 | state->fCopyMD->performOperation(kIOMemoryIncoherentIOStore, 0, length); | |
865 | } else { | |
866 | IOMemoryDescriptor * md = const_cast<IOMemoryDescriptor *>(fMemory); | |
867 | md->performOperation(kIOMemoryIncoherentIOStore, offset, length); | |
868 | } | |
869 | } | |
870 | ||
871 | if (fMapper) { | |
872 | IOMDDMAMapArgs mapArgs; | |
873 | bzero(&mapArgs, sizeof(mapArgs)); | |
874 | mapArgs.fMapper = fMapper; | |
875 | mapArgs.fCommand = this; | |
876 | mapArgs.fMapSpec.device = state->fDevice; | |
877 | mapArgs.fMapSpec.alignment = fAlignMask + 1; | |
878 | mapArgs.fMapSpec.numAddressBits = fNumAddressBits ? fNumAddressBits : 64; | |
879 | mapArgs.fLength = state->fPreparedLength; | |
880 | const IOMemoryDescriptor * md = state->fCopyMD; | |
881 | if (md) { | |
882 | mapArgs.fOffset = 0; | |
883 | } else { | |
884 | md = fMemory; | |
885 | mapArgs.fOffset = state->fPreparedOffset; | |
886 | } | |
887 | ret = md->dmaCommandOperation(kIOMDDMAMap | state->fIterateOnly, &mapArgs, sizeof(mapArgs)); | |
3e170ce0 | 888 | //IOLog("dma %p 0x%x 0x%qx-0x%qx 0x%qx-0x%qx\n", this, ret, state->fPreparedOffset, state->fPreparedLength, mapArgs.fAlloc, mapArgs.fAllocLength); |
99c3a104 | 889 | |
0a7de745 A |
890 | if (kIOReturnSuccess == ret) { |
891 | state->fLocalMapperAlloc = mapArgs.fAlloc; | |
892 | state->fLocalMapperAllocValid = true; | |
893 | state->fLocalMapperAllocLength = mapArgs.fAllocLength; | |
894 | state->fMapContig = mapArgs.fMapContig; | |
895 | } | |
896 | if (NULL != IOMapper::gSystem) { | |
897 | ret = kIOReturnSuccess; | |
898 | } | |
899 | } | |
900 | if (kIOReturnSuccess == ret) { | |
901 | state->fPrepared = true; | |
902 | } | |
903 | } | |
904 | return ret; | |
0c530ab8 A |
905 | } |
906 | ||
0a7de745 | 907 | IOReturn |
0c530ab8 A |
908 | IODMACommand::complete(bool invalidateCache, bool synchronize) |
909 | { | |
0a7de745 A |
910 | IODMACommandInternal * state = fInternalState; |
911 | IOReturn ret = kIOReturnSuccess; | |
912 | IOMemoryDescriptor * copyMD; | |
0c530ab8 | 913 | |
0a7de745 A |
914 | if (fActive < 1) { |
915 | return kIOReturnNotReady; | |
916 | } | |
0c530ab8 | 917 | |
0a7de745 A |
918 | if (!--fActive) { |
919 | copyMD = state->fCopyMD; | |
920 | if (copyMD) { | |
921 | copyMD->retain(); | |
922 | } | |
5ba3f43e | 923 | |
0a7de745 A |
924 | if (IS_NONCOHERENT(fMappingOptions) && invalidateCache) { |
925 | if (copyMD) { | |
926 | copyMD->performOperation(kIOMemoryIncoherentIOFlush, 0, state->fPreparedLength); | |
927 | } else { | |
928 | IOMemoryDescriptor * md = const_cast<IOMemoryDescriptor *>(fMemory); | |
929 | md->performOperation(kIOMemoryIncoherentIOFlush, state->fPreparedOffset, state->fPreparedLength); | |
930 | } | |
931 | } | |
932 | ||
933 | if (!state->fCursor) { | |
934 | IOOptionBits op = kWalkComplete; | |
935 | if (synchronize) { | |
936 | op |= kWalkSyncIn; | |
937 | } | |
938 | ret = walkAll(op); | |
939 | } | |
940 | ||
941 | if (state->fLocalMapperAllocValid) { | |
942 | IOMDDMAMapArgs mapArgs; | |
943 | bzero(&mapArgs, sizeof(mapArgs)); | |
944 | mapArgs.fMapper = fMapper; | |
945 | mapArgs.fCommand = this; | |
946 | mapArgs.fAlloc = state->fLocalMapperAlloc; | |
947 | mapArgs.fAllocLength = state->fLocalMapperAllocLength; | |
948 | const IOMemoryDescriptor * md = copyMD; | |
949 | if (md) { | |
950 | mapArgs.fOffset = 0; | |
951 | } else { | |
952 | md = fMemory; | |
953 | mapArgs.fOffset = state->fPreparedOffset; | |
954 | } | |
955 | ||
956 | ret = md->dmaCommandOperation(kIOMDDMAUnmap, &mapArgs, sizeof(mapArgs)); | |
957 | ||
958 | state->fLocalMapperAlloc = 0; | |
959 | state->fLocalMapperAllocValid = false; | |
960 | state->fLocalMapperAllocLength = 0; | |
961 | } | |
962 | if (copyMD) { | |
963 | copyMD->release(); | |
964 | } | |
965 | state->fPrepared = false; | |
966 | } | |
967 | ||
968 | return ret; | |
0c530ab8 A |
969 | } |
970 | ||
b0d623f7 A |
971 | IOReturn |
972 | IODMACommand::getPreparedOffsetAndLength(UInt64 * offset, UInt64 * length) | |
973 | { | |
0a7de745 A |
974 | IODMACommandInternal * state = fInternalState; |
975 | if (fActive < 1) { | |
976 | return kIOReturnNotReady; | |
977 | } | |
b0d623f7 | 978 | |
0a7de745 A |
979 | if (offset) { |
980 | *offset = state->fPreparedOffset; | |
981 | } | |
982 | if (length) { | |
983 | *length = state->fPreparedLength; | |
984 | } | |
b0d623f7 | 985 | |
0a7de745 | 986 | return kIOReturnSuccess; |
b0d623f7 A |
987 | } |
988 | ||
0c530ab8 A |
989 | IOReturn |
990 | IODMACommand::synchronize(IOOptionBits options) | |
991 | { | |
0a7de745 A |
992 | IODMACommandInternal * state = fInternalState; |
993 | IOReturn ret = kIOReturnSuccess; | |
994 | IOOptionBits op; | |
0c530ab8 | 995 | |
0a7de745 A |
996 | if (kIODirectionOutIn == (kIODirectionOutIn & options)) { |
997 | return kIOReturnBadArgument; | |
998 | } | |
999 | ||
1000 | if (fActive < 1) { | |
1001 | return kIOReturnNotReady; | |
1002 | } | |
1003 | ||
1004 | op = 0; | |
1005 | if (kForceDoubleBuffer & options) { | |
1006 | if (state->fDoubleBuffer) { | |
1007 | return kIOReturnSuccess; | |
1008 | } | |
1009 | ret = complete(false /* invalidateCache */, true /* synchronize */); | |
1010 | state->fCursor = false; | |
1011 | state->fForceDoubleBuffer = true; | |
1012 | ret = prepare(state->fPreparedOffset, state->fPreparedLength, false /* flushCache */, true /* synchronize */); | |
1013 | ||
1014 | return ret; | |
1015 | } else if (state->fCursor) { | |
1016 | return kIOReturnSuccess; | |
1017 | } | |
1018 | ||
1019 | if (kIODirectionIn & options) { | |
1020 | op |= kWalkSyncIn | kWalkSyncAlways; | |
1021 | } else if (kIODirectionOut & options) { | |
1022 | op |= kWalkSyncOut | kWalkSyncAlways; | |
1023 | } | |
0c530ab8 | 1024 | |
0a7de745 | 1025 | ret = walkAll(op); |
0c530ab8 | 1026 | |
0a7de745 | 1027 | return ret; |
0c530ab8 A |
1028 | } |
1029 | ||
0a7de745 A |
1030 | struct IODMACommandTransferContext { |
1031 | void * buffer; | |
1032 | UInt64 bufferOffset; | |
1033 | UInt64 remaining; | |
1034 | UInt32 op; | |
2d21ac55 | 1035 | }; |
0a7de745 A |
1036 | enum{ |
1037 | kIODMACommandTransferOpReadBytes = 1, | |
1038 | kIODMACommandTransferOpWriteBytes = 2 | |
2d21ac55 A |
1039 | }; |
1040 | ||
1041 | IOReturn | |
1042 | IODMACommand::transferSegment(void *reference, | |
0a7de745 A |
1043 | IODMACommand *target, |
1044 | Segment64 segment, | |
1045 | void *segments, | |
1046 | UInt32 segmentIndex) | |
2d21ac55 | 1047 | { |
0a7de745 A |
1048 | IODMACommandTransferContext * context = (IODMACommandTransferContext *) reference; |
1049 | UInt64 length = min(segment.fLength, context->remaining); | |
1050 | addr64_t ioAddr = segment.fIOVMAddr; | |
1051 | addr64_t cpuAddr = ioAddr; | |
1052 | ||
1053 | context->remaining -= length; | |
1054 | ||
1055 | while (length) { | |
1056 | UInt64 copyLen = length; | |
1057 | if ((kMapped == MAPTYPE(target->fMappingOptions)) | |
1058 | && target->fMapper) { | |
1059 | cpuAddr = target->fMapper->mapToPhysicalAddress(ioAddr); | |
1060 | copyLen = min(copyLen, page_size - (ioAddr & (page_size - 1))); | |
1061 | ioAddr += copyLen; | |
1062 | } | |
1063 | ||
1064 | switch (context->op) { | |
1065 | case kIODMACommandTransferOpReadBytes: | |
1066 | copypv(cpuAddr, context->bufferOffset + (addr64_t) context->buffer, copyLen, | |
1067 | cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap); | |
1068 | break; | |
1069 | case kIODMACommandTransferOpWriteBytes: | |
1070 | copypv(context->bufferOffset + (addr64_t) context->buffer, cpuAddr, copyLen, | |
1071 | cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap); | |
1072 | break; | |
1073 | } | |
1074 | length -= copyLen; | |
1075 | context->bufferOffset += copyLen; | |
2d21ac55 A |
1076 | } |
1077 | ||
0a7de745 | 1078 | return context->remaining ? kIOReturnSuccess : kIOReturnOverrun; |
2d21ac55 A |
1079 | } |
1080 | ||
1081 | UInt64 | |
1082 | IODMACommand::transfer(IOOptionBits transferOp, UInt64 offset, void * buffer, UInt64 length) | |
1083 | { | |
0a7de745 A |
1084 | IODMACommandInternal * state = fInternalState; |
1085 | IODMACommandTransferContext context; | |
1086 | Segment64 segments[1]; | |
1087 | UInt32 numSegments = 0 - 1; | |
2d21ac55 | 1088 | |
0a7de745 A |
1089 | if (fActive < 1) { |
1090 | return 0; | |
1091 | } | |
2d21ac55 | 1092 | |
0a7de745 A |
1093 | if (offset >= state->fPreparedLength) { |
1094 | return 0; | |
1095 | } | |
1096 | length = min(length, state->fPreparedLength - offset); | |
2d21ac55 | 1097 | |
0a7de745 A |
1098 | context.buffer = buffer; |
1099 | context.bufferOffset = 0; | |
1100 | context.remaining = length; | |
1101 | context.op = transferOp; | |
1102 | (void) genIOVMSegments(kWalkClient, transferSegment, &context, &offset, &segments[0], &numSegments); | |
2d21ac55 | 1103 | |
0a7de745 | 1104 | return length - context.remaining; |
2d21ac55 A |
1105 | } |
1106 | ||
1107 | UInt64 | |
1108 | IODMACommand::readBytes(UInt64 offset, void *bytes, UInt64 length) | |
1109 | { | |
0a7de745 | 1110 | return transfer(kIODMACommandTransferOpReadBytes, offset, bytes, length); |
2d21ac55 A |
1111 | } |
1112 | ||
1113 | UInt64 | |
1114 | IODMACommand::writeBytes(UInt64 offset, const void *bytes, UInt64 length) | |
1115 | { | |
0a7de745 | 1116 | return transfer(kIODMACommandTransferOpWriteBytes, offset, const_cast<void *>(bytes), length); |
2d21ac55 A |
1117 | } |
1118 | ||
0c530ab8 A |
1119 | IOReturn |
1120 | IODMACommand::genIOVMSegments(UInt64 *offsetP, | |
0a7de745 A |
1121 | void *segmentsP, |
1122 | UInt32 *numSegmentsP) | |
0c530ab8 | 1123 | { |
0a7de745 A |
1124 | return genIOVMSegments(kWalkClient, clientOutputSegment, (void *) fOutSeg, |
1125 | offsetP, segmentsP, numSegmentsP); | |
0c530ab8 A |
1126 | } |
1127 | ||
1128 | IOReturn | |
b0d623f7 | 1129 | IODMACommand::genIOVMSegments(uint32_t op, |
0a7de745 A |
1130 | InternalSegmentFunction outSegFunc, |
1131 | void *reference, | |
1132 | UInt64 *offsetP, | |
1133 | void *segmentsP, | |
1134 | UInt32 *numSegmentsP) | |
0c530ab8 | 1135 | { |
0a7de745 A |
1136 | IODMACommandInternal * internalState = fInternalState; |
1137 | IOOptionBits mdOp = kIOMDWalkSegments; | |
1138 | IOReturn ret = kIOReturnSuccess; | |
0c530ab8 | 1139 | |
0a7de745 A |
1140 | if (!(kWalkComplete & op) && !fActive) { |
1141 | return kIOReturnNotReady; | |
1142 | } | |
0c530ab8 | 1143 | |
0a7de745 A |
1144 | if (!offsetP || !segmentsP || !numSegmentsP || !*numSegmentsP) { |
1145 | return kIOReturnBadArgument; | |
1146 | } | |
0c530ab8 | 1147 | |
0a7de745 A |
1148 | IOMDDMAWalkSegmentArgs *state = |
1149 | (IOMDDMAWalkSegmentArgs *)(void *) fState; | |
0c530ab8 | 1150 | |
0a7de745 A |
1151 | UInt64 offset = *offsetP + internalState->fPreparedOffset; |
1152 | UInt64 memLength = internalState->fPreparedOffset + internalState->fPreparedLength; | |
0c530ab8 | 1153 | |
0a7de745 A |
1154 | if (offset >= memLength) { |
1155 | return kIOReturnOverrun; | |
1156 | } | |
0c530ab8 | 1157 | |
0a7de745 A |
1158 | if ((offset == internalState->fPreparedOffset) || (offset != state->fOffset) || internalState->fNewMD) { |
1159 | state->fOffset = 0; | |
1160 | internalState->fIOVMAddrValid = state->fIOVMAddr = 0; | |
1161 | internalState->fNextRemapPage = NULL; | |
1162 | internalState->fNewMD = false; | |
1163 | mdOp = kIOMDFirstSegment; | |
1164 | if (fMapper) { | |
1165 | if (internalState->fLocalMapperAllocValid) { | |
1166 | state->fMapped = kIOMDDMAWalkMappedLocal; | |
1167 | state->fMappedBase = internalState->fLocalMapperAlloc; | |
1168 | } else { | |
1169 | state->fMapped = true; | |
99c3a104 | 1170 | } |
0b4c1975 | 1171 | } |
0b4c1975 | 1172 | } |
0a7de745 A |
1173 | ; |
1174 | ||
1175 | UInt32 segIndex = 0; | |
1176 | UInt32 numSegments = *numSegmentsP; | |
1177 | Segment64 curSeg = { 0, 0 }; | |
1178 | bool curSegValid = false; | |
1179 | addr64_t maxPhys; | |
1180 | ||
1181 | if (fNumAddressBits && (fNumAddressBits < 64)) { | |
1182 | maxPhys = (1ULL << fNumAddressBits); | |
1183 | } else { | |
1184 | maxPhys = 0; | |
0b4c1975 | 1185 | } |
0a7de745 | 1186 | maxPhys--; |
0c530ab8 | 1187 | |
0a7de745 A |
1188 | while (internalState->fIOVMAddrValid || (state->fOffset < memLength)) { |
1189 | // state = next seg | |
1190 | if (!internalState->fIOVMAddrValid) { | |
1191 | IOReturn rtn; | |
1192 | ||
1193 | state->fOffset = offset; | |
1194 | state->fLength = memLength - offset; | |
1195 | ||
1196 | if (internalState->fMapContig && internalState->fLocalMapperAllocValid) { | |
1197 | state->fIOVMAddr = internalState->fLocalMapperAlloc + offset - internalState->fPreparedOffset; | |
1198 | rtn = kIOReturnSuccess; | |
1199 | #if 0 | |
1200 | { | |
1201 | uint64_t checkOffset; | |
1202 | IOPhysicalLength segLen; | |
1203 | for (checkOffset = 0; checkOffset < state->fLength;) { | |
1204 | addr64_t phys = const_cast<IOMemoryDescriptor *>(fMemory)->getPhysicalSegment(checkOffset + offset, &segLen, kIOMemoryMapperNone); | |
1205 | if (fMapper->mapAddr(state->fIOVMAddr + checkOffset) != phys) { | |
1206 | panic("%llx != %llx:%llx, %llx phys: %llx %llx\n", offset, | |
1207 | state->fIOVMAddr + checkOffset, fMapper->mapAddr(state->fIOVMAddr + checkOffset), state->fLength, | |
1208 | phys, checkOffset); | |
1209 | } | |
1210 | checkOffset += page_size - (phys & page_mask); | |
1211 | } | |
1212 | } | |
1213 | #endif | |
1214 | } else { | |
1215 | const IOMemoryDescriptor * memory = | |
1216 | internalState->fCopyMD ? internalState->fCopyMD : fMemory; | |
1217 | rtn = memory->dmaCommandOperation(mdOp, fState, sizeof(fState)); | |
1218 | mdOp = kIOMDWalkSegments; | |
1219 | } | |
1220 | ||
1221 | if (rtn == kIOReturnSuccess) { | |
1222 | internalState->fIOVMAddrValid = true; | |
1223 | assert(state->fLength); | |
1224 | if (curSegValid && ((curSeg.fIOVMAddr + curSeg.fLength) == state->fIOVMAddr)) { | |
1225 | UInt64 length = state->fLength; | |
1226 | offset += length; | |
1227 | curSeg.fLength += length; | |
1228 | internalState->fIOVMAddrValid = state->fIOVMAddr = 0; | |
1229 | } | |
1230 | } else if (rtn == kIOReturnOverrun) { | |
1231 | internalState->fIOVMAddrValid = state->fIOVMAddr = state->fLength = 0; // At end | |
1232 | } else { | |
1233 | return rtn; | |
1234 | } | |
0b4c1975 | 1235 | } |
0a7de745 A |
1236 | |
1237 | // seg = state, offset = end of seg | |
1238 | if (!curSegValid) { | |
1239 | UInt64 length = state->fLength; | |
1240 | offset += length; | |
1241 | curSeg.fIOVMAddr = state->fIOVMAddr; | |
1242 | curSeg.fLength = length; | |
1243 | curSegValid = true; | |
1244 | internalState->fIOVMAddrValid = state->fIOVMAddr = 0; | |
0c530ab8 | 1245 | } |
0a7de745 A |
1246 | |
1247 | if (!internalState->fIOVMAddrValid) { | |
1248 | // maxPhys | |
1249 | if ((kWalkClient & op) && (curSeg.fIOVMAddr + curSeg.fLength - 1) > maxPhys) { | |
1250 | if (internalState->fCursor) { | |
1251 | curSegValid = curSeg.fIOVMAddr = 0; | |
1252 | ret = kIOReturnMessageTooLarge; | |
1253 | break; | |
1254 | } else if (curSeg.fIOVMAddr <= maxPhys) { | |
1255 | UInt64 remain, newLength; | |
1256 | ||
1257 | newLength = (maxPhys + 1 - curSeg.fIOVMAddr); | |
1258 | DEBG("trunc %qx, %qx-> %qx\n", curSeg.fIOVMAddr, curSeg.fLength, newLength); | |
1259 | remain = curSeg.fLength - newLength; | |
1260 | state->fIOVMAddr = newLength + curSeg.fIOVMAddr; | |
1261 | internalState->fIOVMAddrValid = true; | |
1262 | curSeg.fLength = newLength; | |
1263 | state->fLength = remain; | |
1264 | offset -= remain; | |
1265 | } else { | |
1266 | UInt64 addr = curSeg.fIOVMAddr; | |
1267 | ppnum_t addrPage = atop_64(addr); | |
1268 | vm_page_t remap = NULL; | |
1269 | UInt64 remain, newLength; | |
1270 | ||
1271 | DEBG("sparse switch %qx, %qx ", addr, curSeg.fLength); | |
1272 | ||
1273 | remap = internalState->fNextRemapPage; | |
1274 | if (remap && (addrPage == vm_page_get_offset(remap))) { | |
1275 | } else { | |
1276 | for (remap = internalState->fCopyPageAlloc; | |
1277 | remap && (addrPage != vm_page_get_offset(remap)); | |
1278 | remap = vm_page_get_next(remap)) { | |
1279 | } | |
1280 | } | |
1281 | ||
1282 | if (!remap) { | |
1283 | panic("no remap page found"); | |
1284 | } | |
1285 | ||
1286 | curSeg.fIOVMAddr = ptoa_64(vm_page_get_phys_page(remap)) | |
1287 | + (addr & PAGE_MASK); | |
1288 | curSegValid = true; | |
1289 | internalState->fNextRemapPage = vm_page_get_next(remap); | |
1290 | ||
1291 | newLength = PAGE_SIZE - (addr & PAGE_MASK); | |
1292 | if (newLength < curSeg.fLength) { | |
1293 | remain = curSeg.fLength - newLength; | |
1294 | state->fIOVMAddr = addr + newLength; | |
1295 | internalState->fIOVMAddrValid = true; | |
1296 | curSeg.fLength = newLength; | |
1297 | state->fLength = remain; | |
1298 | offset -= remain; | |
1299 | } | |
1300 | DEBG("-> %qx, %qx offset %qx\n", curSeg.fIOVMAddr, curSeg.fLength, offset); | |
1301 | } | |
1302 | } | |
1303 | ||
1304 | // reduce size of output segment | |
1305 | uint64_t reduce, leftover = 0; | |
1306 | ||
1307 | // fMaxSegmentSize | |
1308 | if (curSeg.fLength > fMaxSegmentSize) { | |
1309 | leftover += curSeg.fLength - fMaxSegmentSize; | |
1310 | curSeg.fLength = fMaxSegmentSize; | |
1311 | state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr; | |
1312 | internalState->fIOVMAddrValid = true; | |
1313 | } | |
1314 | ||
1315 | // alignment current length | |
1316 | ||
1317 | reduce = (curSeg.fLength & fAlignMaskLength); | |
1318 | if (reduce && (curSeg.fLength > reduce)) { | |
1319 | leftover += reduce; | |
1320 | curSeg.fLength -= reduce; | |
1321 | state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr; | |
1322 | internalState->fIOVMAddrValid = true; | |
1323 | } | |
1324 | ||
1325 | // alignment next address | |
1326 | ||
1327 | reduce = (state->fIOVMAddr & fAlignMaskInternalSegments); | |
1328 | if (reduce && (curSeg.fLength > reduce)) { | |
1329 | leftover += reduce; | |
1330 | curSeg.fLength -= reduce; | |
1331 | state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr; | |
1332 | internalState->fIOVMAddrValid = true; | |
1333 | } | |
1334 | ||
1335 | if (leftover) { | |
1336 | DEBG("reduce seg by 0x%llx @ 0x%llx [0x%llx, 0x%llx]\n", | |
1337 | leftover, offset, | |
1338 | curSeg.fIOVMAddr, curSeg.fLength); | |
1339 | state->fLength = leftover; | |
1340 | offset -= leftover; | |
1341 | } | |
1342 | ||
1343 | // | |
1344 | ||
1345 | if (internalState->fCursor) { | |
1346 | bool misaligned; | |
1347 | uint32_t mask; | |
1348 | ||
1349 | mask = (segIndex ? fAlignMaskInternalSegments : internalState->fSourceAlignMask); | |
1350 | misaligned = (0 != (mask & curSeg.fIOVMAddr)); | |
1351 | if (!misaligned) { | |
1352 | mask = fAlignMaskLength; | |
1353 | misaligned |= (0 != (mask & curSeg.fLength)); | |
1354 | } | |
1355 | if (misaligned) { | |
1356 | if (misaligned) { | |
1357 | DEBG("cursor misaligned %qx:%qx\n", curSeg.fIOVMAddr, curSeg.fLength); | |
1358 | } | |
1359 | curSegValid = curSeg.fIOVMAddr = 0; | |
1360 | ret = kIOReturnNotAligned; | |
1361 | break; | |
1362 | } | |
1363 | } | |
1364 | ||
1365 | if (offset >= memLength) { | |
1366 | curSeg.fLength -= (offset - memLength); | |
1367 | offset = memLength; | |
1368 | internalState->fIOVMAddrValid = state->fIOVMAddr = state->fLength = 0; // At end | |
1369 | break; | |
1370 | } | |
3e170ce0 | 1371 | } |
0a7de745 A |
1372 | |
1373 | if (internalState->fIOVMAddrValid) { | |
1374 | if ((segIndex + 1 == numSegments)) { | |
1375 | break; | |
1376 | } | |
1377 | ||
1378 | ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++); | |
1379 | curSegValid = curSeg.fIOVMAddr = 0; | |
1380 | if (kIOReturnSuccess != ret) { | |
1381 | break; | |
1382 | } | |
3e170ce0 | 1383 | } |
0a7de745 A |
1384 | } |
1385 | ||
1386 | if (curSegValid) { | |
1387 | ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++); | |
1388 | } | |
1389 | ||
1390 | if (kIOReturnSuccess == ret) { | |
1391 | state->fOffset = offset; | |
1392 | *offsetP = offset - internalState->fPreparedOffset; | |
1393 | *numSegmentsP = segIndex; | |
1394 | } | |
1395 | return ret; | |
0c530ab8 A |
1396 | } |
1397 | ||
0a7de745 | 1398 | IOReturn |
0c530ab8 A |
1399 | IODMACommand::clientOutputSegment( |
1400 | void *reference, IODMACommand *target, | |
1401 | Segment64 segment, void *vSegList, UInt32 outSegIndex) | |
1402 | { | |
0a7de745 A |
1403 | SegmentFunction segmentFunction = (SegmentFunction) reference; |
1404 | IOReturn ret = kIOReturnSuccess; | |
1405 | ||
1406 | if (target->fNumAddressBits && (target->fNumAddressBits < 64) | |
1407 | && ((segment.fIOVMAddr + segment.fLength - 1) >> target->fNumAddressBits) | |
1408 | && (target->reserved->fLocalMapperAllocValid || !target->fMapper)) { | |
1409 | DEBG("kIOReturnMessageTooLarge(fNumAddressBits) %qx, %qx\n", segment.fIOVMAddr, segment.fLength); | |
1410 | ret = kIOReturnMessageTooLarge; | |
1411 | } | |
1412 | ||
1413 | if (!(*segmentFunction)(target, segment, vSegList, outSegIndex)) { | |
1414 | DEBG("kIOReturnMessageTooLarge(fOutSeg) %qx, %qx\n", segment.fIOVMAddr, segment.fLength); | |
1415 | ret = kIOReturnMessageTooLarge; | |
1416 | } | |
1417 | ||
1418 | return ret; | |
0c530ab8 A |
1419 | } |
1420 | ||
b0d623f7 A |
1421 | IOReturn |
1422 | IODMACommand::genIOVMSegments(SegmentFunction segmentFunction, | |
0a7de745 A |
1423 | UInt64 *offsetP, |
1424 | void *segmentsP, | |
1425 | UInt32 *numSegmentsP) | |
b0d623f7 | 1426 | { |
0a7de745 A |
1427 | return genIOVMSegments(kWalkClient, clientOutputSegment, (void *) segmentFunction, |
1428 | offsetP, segmentsP, numSegmentsP); | |
b0d623f7 A |
1429 | } |
1430 | ||
0a7de745 | 1431 | bool |
0c530ab8 | 1432 | IODMACommand::OutputHost32(IODMACommand *, |
0a7de745 | 1433 | Segment64 segment, void *vSegList, UInt32 outSegIndex) |
0c530ab8 | 1434 | { |
0a7de745 A |
1435 | Segment32 *base = (Segment32 *) vSegList; |
1436 | base[outSegIndex].fIOVMAddr = (UInt32) segment.fIOVMAddr; | |
1437 | base[outSegIndex].fLength = (UInt32) segment.fLength; | |
1438 | return true; | |
0c530ab8 A |
1439 | } |
1440 | ||
0a7de745 | 1441 | bool |
0c530ab8 | 1442 | IODMACommand::OutputBig32(IODMACommand *, |
0a7de745 | 1443 | Segment64 segment, void *vSegList, UInt32 outSegIndex) |
0c530ab8 | 1444 | { |
0a7de745 A |
1445 | const UInt offAddr = outSegIndex * sizeof(Segment32); |
1446 | const UInt offLen = offAddr + sizeof(UInt32); | |
1447 | OSWriteBigInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr); | |
1448 | OSWriteBigInt32(vSegList, offLen, (UInt32) segment.fLength); | |
1449 | return true; | |
0c530ab8 A |
1450 | } |
1451 | ||
1452 | bool | |
1453 | IODMACommand::OutputLittle32(IODMACommand *, | |
0a7de745 | 1454 | Segment64 segment, void *vSegList, UInt32 outSegIndex) |
0c530ab8 | 1455 | { |
0a7de745 A |
1456 | const UInt offAddr = outSegIndex * sizeof(Segment32); |
1457 | const UInt offLen = offAddr + sizeof(UInt32); | |
1458 | OSWriteLittleInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr); | |
1459 | OSWriteLittleInt32(vSegList, offLen, (UInt32) segment.fLength); | |
1460 | return true; | |
0c530ab8 A |
1461 | } |
1462 | ||
1463 | bool | |
1464 | IODMACommand::OutputHost64(IODMACommand *, | |
0a7de745 | 1465 | Segment64 segment, void *vSegList, UInt32 outSegIndex) |
0c530ab8 | 1466 | { |
0a7de745 A |
1467 | Segment64 *base = (Segment64 *) vSegList; |
1468 | base[outSegIndex] = segment; | |
1469 | return true; | |
0c530ab8 A |
1470 | } |
1471 | ||
1472 | bool | |
1473 | IODMACommand::OutputBig64(IODMACommand *, | |
0a7de745 | 1474 | Segment64 segment, void *vSegList, UInt32 outSegIndex) |
0c530ab8 | 1475 | { |
0a7de745 A |
1476 | const UInt offAddr = outSegIndex * sizeof(Segment64); |
1477 | const UInt offLen = offAddr + sizeof(UInt64); | |
1478 | OSWriteBigInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr); | |
1479 | OSWriteBigInt64(vSegList, offLen, (UInt64) segment.fLength); | |
1480 | return true; | |
0c530ab8 A |
1481 | } |
1482 | ||
1483 | bool | |
1484 | IODMACommand::OutputLittle64(IODMACommand *, | |
0a7de745 | 1485 | Segment64 segment, void *vSegList, UInt32 outSegIndex) |
0c530ab8 | 1486 | { |
0a7de745 A |
1487 | const UInt offAddr = outSegIndex * sizeof(Segment64); |
1488 | const UInt offLen = offAddr + sizeof(UInt64); | |
1489 | OSWriteLittleInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr); | |
1490 | OSWriteLittleInt64(vSegList, offLen, (UInt64) segment.fLength); | |
1491 | return true; | |
0c530ab8 | 1492 | } |