]>
Commit | Line | Data |
---|---|---|
0c530ab8 | 1 | /* |
2d21ac55 | 2 | * Copyright (c) 2005-2006 Apple Computer, Inc. All rights reserved. |
0c530ab8 | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
0c530ab8 | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
0c530ab8 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
0c530ab8 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
0c530ab8 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
0c530ab8 A |
27 | */ |
28 | ||
29 | #include <IOKit/assert.h> | |
30 | ||
31 | #include <libkern/OSTypes.h> | |
32 | #include <libkern/OSByteOrder.h> | |
99c3a104 | 33 | #include <libkern/OSDebug.h> |
0c530ab8 A |
34 | |
35 | #include <IOKit/IOReturn.h> | |
36 | #include <IOKit/IOLib.h> | |
37 | #include <IOKit/IODMACommand.h> | |
38 | #include <IOKit/IOMapper.h> | |
39 | #include <IOKit/IOMemoryDescriptor.h> | |
40 | #include <IOKit/IOBufferMemoryDescriptor.h> | |
41 | ||
42 | #include "IOKitKernelInternal.h" | |
0c530ab8 A |
43 | |
44 | #define MAPTYPE(type) ((UInt) (type) & kTypeMask) | |
0c530ab8 A |
45 | #define IS_NONCOHERENT(type) (MAPTYPE(type) == kNonCoherent) |
46 | ||
0c530ab8 A |
47 | enum |
48 | { | |
49 | kWalkSyncIn = 0x01, // bounce -> md | |
50 | kWalkSyncOut = 0x02, // bounce <- md | |
51 | kWalkSyncAlways = 0x04, | |
52 | kWalkPreflight = 0x08, | |
53 | kWalkDoubleBuffer = 0x10, | |
54 | kWalkPrepare = 0x20, | |
55 | kWalkComplete = 0x40, | |
56 | kWalkClient = 0x80 | |
57 | }; | |
58 | ||
0c530ab8 A |
59 | |
60 | #define fInternalState reserved | |
61 | #define fState reserved->fState | |
62 | #define fMDSummary reserved->fMDSummary | |
63 | ||
64 | ||
65 | #if 1 | |
66 | // no direction => OutIn | |
67 | #define SHOULD_COPY_DIR(op, direction) \ | |
68 | ((kIODirectionNone == (direction)) \ | |
69 | || (kWalkSyncAlways & (op)) \ | |
70 | || (((kWalkSyncIn & (op)) ? kIODirectionIn : kIODirectionOut) \ | |
71 | & (direction))) | |
72 | ||
73 | #else | |
74 | #define SHOULD_COPY_DIR(state, direction) (true) | |
75 | #endif | |
76 | ||
77 | #if 0 | |
0b4c1975 | 78 | #define DEBG(fmt, args...) { IOLog(fmt, ## args); kprintf(fmt, ## args); } |
0c530ab8 A |
79 | #else |
80 | #define DEBG(fmt, args...) {} | |
81 | #endif | |
82 | ||
0c530ab8 A |
83 | /**************************** class IODMACommand ***************************/ |
84 | ||
85 | #undef super | |
6d2010ae | 86 | #define super IOCommand |
0c530ab8 A |
87 | OSDefineMetaClassAndStructors(IODMACommand, IOCommand); |
88 | ||
2d21ac55 A |
89 | OSMetaClassDefineReservedUsed(IODMACommand, 0); |
90 | OSMetaClassDefineReservedUsed(IODMACommand, 1); | |
b0d623f7 | 91 | OSMetaClassDefineReservedUsed(IODMACommand, 2); |
3e170ce0 A |
92 | OSMetaClassDefineReservedUsed(IODMACommand, 3); |
93 | OSMetaClassDefineReservedUsed(IODMACommand, 4); | |
94 | OSMetaClassDefineReservedUsed(IODMACommand, 5); | |
95 | OSMetaClassDefineReservedUsed(IODMACommand, 6); | |
0c530ab8 A |
96 | OSMetaClassDefineReservedUnused(IODMACommand, 7); |
97 | OSMetaClassDefineReservedUnused(IODMACommand, 8); | |
98 | OSMetaClassDefineReservedUnused(IODMACommand, 9); | |
99 | OSMetaClassDefineReservedUnused(IODMACommand, 10); | |
100 | OSMetaClassDefineReservedUnused(IODMACommand, 11); | |
101 | OSMetaClassDefineReservedUnused(IODMACommand, 12); | |
102 | OSMetaClassDefineReservedUnused(IODMACommand, 13); | |
103 | OSMetaClassDefineReservedUnused(IODMACommand, 14); | |
104 | OSMetaClassDefineReservedUnused(IODMACommand, 15); | |
105 | ||
3e170ce0 A |
106 | IODMACommand * |
107 | IODMACommand::withRefCon(void * refCon) | |
108 | { | |
109 | IODMACommand * me = new IODMACommand; | |
110 | ||
111 | if (me && !me->initWithRefCon(refCon)) | |
112 | { | |
113 | me->release(); | |
114 | return 0; | |
115 | } | |
116 | ||
117 | return me; | |
118 | } | |
119 | ||
120 | IODMACommand * | |
121 | IODMACommand::withSpecification(SegmentFunction outSegFunc, | |
122 | const SegmentOptions * segmentOptions, | |
123 | uint32_t mappingOptions, | |
124 | IOMapper * mapper, | |
125 | void * refCon) | |
126 | { | |
127 | IODMACommand * me = new IODMACommand; | |
128 | ||
129 | if (me && !me->initWithSpecification(outSegFunc, segmentOptions, mappingOptions, | |
130 | mapper, refCon)) | |
131 | { | |
132 | me->release(); | |
133 | return 0; | |
134 | } | |
135 | ||
136 | return me; | |
137 | } | |
138 | ||
0c530ab8 A |
139 | IODMACommand * |
140 | IODMACommand::withSpecification(SegmentFunction outSegFunc, | |
141 | UInt8 numAddressBits, | |
142 | UInt64 maxSegmentSize, | |
143 | MappingOptions mappingOptions, | |
144 | UInt64 maxTransferSize, | |
145 | UInt32 alignment, | |
146 | IOMapper *mapper, | |
147 | void *refCon) | |
148 | { | |
149 | IODMACommand * me = new IODMACommand; | |
150 | ||
151 | if (me && !me->initWithSpecification(outSegFunc, | |
152 | numAddressBits, maxSegmentSize, | |
153 | mappingOptions, maxTransferSize, | |
154 | alignment, mapper, refCon)) | |
155 | { | |
156 | me->release(); | |
157 | return 0; | |
3e170ce0 | 158 | } |
0c530ab8 A |
159 | |
160 | return me; | |
161 | } | |
162 | ||
163 | IODMACommand * | |
164 | IODMACommand::cloneCommand(void *refCon) | |
165 | { | |
3e170ce0 A |
166 | SegmentOptions segmentOptions = |
167 | { | |
168 | .fStructSize = sizeof(segmentOptions), | |
39037602 | 169 | .fNumAddressBits = (uint8_t)fNumAddressBits, |
3e170ce0 A |
170 | .fMaxSegmentSize = fMaxSegmentSize, |
171 | .fMaxTransferSize = fMaxTransferSize, | |
172 | .fAlignment = fAlignMask + 1, | |
173 | .fAlignmentLength = fAlignMaskInternalSegments + 1, | |
174 | .fAlignmentInternalSegments = fAlignMaskLength + 1 | |
175 | }; | |
176 | ||
177 | return (IODMACommand::withSpecification(fOutSeg, &segmentOptions, | |
178 | fMappingOptions, fMapper, refCon)); | |
0c530ab8 A |
179 | } |
180 | ||
181 | #define kLastOutputFunction ((SegmentFunction) kLastOutputFunction) | |
182 | ||
3e170ce0 A |
183 | bool |
184 | IODMACommand::initWithRefCon(void * refCon) | |
185 | { | |
186 | if (!super::init()) return (false); | |
187 | ||
188 | if (!reserved) | |
189 | { | |
190 | reserved = IONew(IODMACommandInternal, 1); | |
191 | if (!reserved) return false; | |
192 | } | |
193 | bzero(reserved, sizeof(IODMACommandInternal)); | |
194 | fRefCon = refCon; | |
195 | ||
196 | return (true); | |
197 | } | |
198 | ||
199 | bool | |
200 | IODMACommand::initWithSpecification(SegmentFunction outSegFunc, | |
201 | const SegmentOptions * segmentOptions, | |
202 | uint32_t mappingOptions, | |
203 | IOMapper * mapper, | |
204 | void * refCon) | |
205 | { | |
206 | if (!initWithRefCon(refCon)) return false; | |
207 | ||
208 | if (kIOReturnSuccess != setSpecification(outSegFunc, segmentOptions, | |
209 | mappingOptions, mapper)) return false; | |
210 | ||
211 | return (true); | |
212 | } | |
213 | ||
0c530ab8 A |
214 | bool |
215 | IODMACommand::initWithSpecification(SegmentFunction outSegFunc, | |
216 | UInt8 numAddressBits, | |
217 | UInt64 maxSegmentSize, | |
218 | MappingOptions mappingOptions, | |
219 | UInt64 maxTransferSize, | |
220 | UInt32 alignment, | |
221 | IOMapper *mapper, | |
222 | void *refCon) | |
3e170ce0 A |
223 | { |
224 | SegmentOptions segmentOptions = | |
225 | { | |
226 | .fStructSize = sizeof(segmentOptions), | |
227 | .fNumAddressBits = numAddressBits, | |
228 | .fMaxSegmentSize = maxSegmentSize, | |
229 | .fMaxTransferSize = maxTransferSize, | |
230 | .fAlignment = alignment, | |
231 | .fAlignmentLength = 1, | |
232 | .fAlignmentInternalSegments = alignment | |
233 | }; | |
234 | ||
235 | return (initWithSpecification(outSegFunc, &segmentOptions, mappingOptions, mapper, refCon)); | |
236 | } | |
237 | ||
238 | IOReturn | |
239 | IODMACommand::setSpecification(SegmentFunction outSegFunc, | |
240 | const SegmentOptions * segmentOptions, | |
241 | uint32_t mappingOptions, | |
242 | IOMapper * mapper) | |
0c530ab8 | 243 | { |
99c3a104 | 244 | IOService * device = 0; |
3e170ce0 A |
245 | UInt8 numAddressBits; |
246 | UInt64 maxSegmentSize; | |
247 | UInt64 maxTransferSize; | |
248 | UInt32 alignment; | |
99c3a104 | 249 | |
3e170ce0 | 250 | bool is32Bit; |
0c530ab8 | 251 | |
3e170ce0 A |
252 | if (!outSegFunc || !segmentOptions) return (kIOReturnBadArgument); |
253 | ||
254 | is32Bit = ((OutputHost32 == outSegFunc) | |
255 | || (OutputBig32 == outSegFunc) | |
256 | || (OutputLittle32 == outSegFunc)); | |
257 | ||
258 | numAddressBits = segmentOptions->fNumAddressBits; | |
259 | maxSegmentSize = segmentOptions->fMaxSegmentSize; | |
260 | maxTransferSize = segmentOptions->fMaxTransferSize; | |
261 | alignment = segmentOptions->fAlignment; | |
0c530ab8 A |
262 | if (is32Bit) |
263 | { | |
264 | if (!numAddressBits) | |
265 | numAddressBits = 32; | |
266 | else if (numAddressBits > 32) | |
3e170ce0 | 267 | return (kIOReturnBadArgument); // Wrong output function for bits |
0c530ab8 A |
268 | } |
269 | ||
3e170ce0 | 270 | if (numAddressBits && (numAddressBits < PAGE_SHIFT)) return (kIOReturnBadArgument); |
0c530ab8 | 271 | |
3e170ce0 A |
272 | if (!maxSegmentSize) maxSegmentSize--; // Set Max segment to -1 |
273 | if (!maxTransferSize) maxTransferSize--; // Set Max transfer to -1 | |
99c3a104 A |
274 | |
275 | if (mapper && !OSDynamicCast(IOMapper, mapper)) | |
276 | { | |
277 | device = mapper; | |
278 | mapper = 0; | |
279 | } | |
3e170ce0 | 280 | if (!mapper && (kUnmapped != MAPTYPE(mappingOptions))) |
0c530ab8 A |
281 | { |
282 | IOMapper::checkForSystemMapper(); | |
283 | mapper = IOMapper::gSystem; | |
284 | } | |
285 | ||
286 | fNumSegments = 0; | |
0c530ab8 A |
287 | fOutSeg = outSegFunc; |
288 | fNumAddressBits = numAddressBits; | |
289 | fMaxSegmentSize = maxSegmentSize; | |
290 | fMappingOptions = mappingOptions; | |
291 | fMaxTransferSize = maxTransferSize; | |
3e170ce0 | 292 | if (!alignment) alignment = 1; |
0c530ab8 | 293 | fAlignMask = alignment - 1; |
3e170ce0 A |
294 | |
295 | alignment = segmentOptions->fAlignmentLength; | |
296 | if (!alignment) alignment = 1; | |
297 | fAlignMaskLength = alignment - 1; | |
298 | ||
299 | alignment = segmentOptions->fAlignmentInternalSegments; | |
300 | if (!alignment) alignment = (fAlignMask + 1); | |
301 | fAlignMaskInternalSegments = alignment - 1; | |
0c530ab8 A |
302 | |
303 | switch (MAPTYPE(mappingOptions)) | |
304 | { | |
3e170ce0 A |
305 | case kMapped: break; |
306 | case kUnmapped: break; | |
307 | case kNonCoherent: break; | |
308 | ||
0c530ab8 | 309 | case kBypassed: |
3e170ce0 A |
310 | if (!mapper) break; |
311 | return (kIOReturnBadArgument); | |
312 | ||
0c530ab8 | 313 | default: |
3e170ce0 | 314 | return (kIOReturnBadArgument); |
0c530ab8 A |
315 | }; |
316 | ||
3e170ce0 A |
317 | if (mapper != fMapper) |
318 | { | |
319 | if (mapper) mapper->retain(); | |
320 | if (fMapper) fMapper->release(); | |
321 | fMapper = mapper; | |
322 | } | |
0c530ab8 A |
323 | |
324 | fInternalState->fIterateOnly = (0 != (kIterateOnly & mappingOptions)); | |
99c3a104 A |
325 | fInternalState->fDevice = device; |
326 | ||
3e170ce0 | 327 | return (kIOReturnSuccess); |
0c530ab8 A |
328 | } |
329 | ||
330 | void | |
331 | IODMACommand::free() | |
332 | { | |
3e170ce0 | 333 | if (reserved) IODelete(reserved, IODMACommandInternal, 1); |
0c530ab8 | 334 | |
3e170ce0 | 335 | if (fMapper) fMapper->release(); |
b0d623f7 | 336 | |
0c530ab8 A |
337 | super::free(); |
338 | } | |
339 | ||
340 | IOReturn | |
341 | IODMACommand::setMemoryDescriptor(const IOMemoryDescriptor *mem, bool autoPrepare) | |
342 | { | |
3e170ce0 | 343 | IOReturn err = kIOReturnSuccess; |
6d2010ae | 344 | |
0c530ab8 A |
345 | if (mem == fMemory) |
346 | { | |
347 | if (!autoPrepare) | |
348 | { | |
349 | while (fActive) | |
350 | complete(); | |
351 | } | |
352 | return kIOReturnSuccess; | |
353 | } | |
354 | ||
355 | if (fMemory) { | |
356 | // As we are almost certainly being called from a work loop thread | |
357 | // if fActive is true it is probably not a good time to potentially | |
358 | // block. Just test for it and return an error | |
359 | if (fActive) | |
360 | return kIOReturnBusy; | |
361 | clearMemoryDescriptor(); | |
6d2010ae | 362 | } |
0c530ab8 A |
363 | |
364 | if (mem) { | |
365 | bzero(&fMDSummary, sizeof(fMDSummary)); | |
99c3a104 A |
366 | err = mem->dmaCommandOperation(kIOMDGetCharacteristics | (kMapped == MAPTYPE(fMappingOptions)), |
367 | &fMDSummary, sizeof(fMDSummary)); | |
6d2010ae A |
368 | if (err) |
369 | return err; | |
0c530ab8 A |
370 | |
371 | ppnum_t highPage = fMDSummary.fHighestPage ? fMDSummary.fHighestPage : gIOLastPage; | |
372 | ||
373 | if ((kMapped == MAPTYPE(fMappingOptions)) | |
99c3a104 | 374 | && fMapper) |
0c530ab8 A |
375 | fInternalState->fCheckAddressing = false; |
376 | else | |
377 | fInternalState->fCheckAddressing = (fNumAddressBits && (highPage >= (1UL << (fNumAddressBits - PAGE_SHIFT)))); | |
378 | ||
4a3eedf9 | 379 | fInternalState->fNewMD = true; |
0c530ab8 A |
380 | mem->retain(); |
381 | fMemory = mem; | |
39037602 A |
382 | if (fMapper) |
383 | { | |
384 | #if IOTRACKING | |
385 | fInternalState->fTag = IOMemoryTag(kernel_map); | |
386 | __IODEQUALIFY(IOMemoryDescriptor *, mem)->prepare((IODirection) | |
387 | (kIODirectionDMACommand | (fInternalState->fTag << kIODirectionDMACommandShift))); | |
388 | IOTrackingAdd(gIOWireTracking, &fInternalState->fWireTracking, fMemory->getLength(), false); | |
389 | #endif /* IOTRACKING */ | |
390 | } | |
6d2010ae | 391 | if (autoPrepare) { |
99c3a104 A |
392 | err = prepare(); |
393 | if (err) { | |
394 | clearMemoryDescriptor(); | |
395 | } | |
6d2010ae A |
396 | } |
397 | } | |
398 | ||
399 | return err; | |
0c530ab8 A |
400 | } |
401 | ||
402 | IOReturn | |
403 | IODMACommand::clearMemoryDescriptor(bool autoComplete) | |
404 | { | |
39037602 | 405 | if (fActive && !autoComplete) return (kIOReturnNotReady); |
0c530ab8 | 406 | |
39037602 A |
407 | if (fMemory) |
408 | { | |
409 | while (fActive) complete(); | |
410 | if (fMapper) | |
411 | { | |
412 | #if IOTRACKING | |
413 | __IODEQUALIFY(IOMemoryDescriptor *, fMemory)->complete((IODirection) | |
414 | (kIODirectionDMACommand | (fInternalState->fTag << kIODirectionDMACommandShift))); | |
415 | IOTrackingRemove(gIOWireTracking, &fInternalState->fWireTracking, fMemory->getLength()); | |
416 | #endif /* IOTRACKING */ | |
417 | } | |
0c530ab8 A |
418 | fMemory->release(); |
419 | fMemory = 0; | |
420 | } | |
421 | ||
422 | return (kIOReturnSuccess); | |
423 | } | |
424 | ||
425 | const IOMemoryDescriptor * | |
426 | IODMACommand::getMemoryDescriptor() const | |
427 | { | |
428 | return fMemory; | |
429 | } | |
430 | ||
3e170ce0 A |
431 | IOMemoryDescriptor * |
432 | IODMACommand::getIOMemoryDescriptor() const | |
433 | { | |
434 | IOMemoryDescriptor * mem; | |
435 | ||
436 | mem = reserved->fCopyMD; | |
437 | if (!mem) mem = __IODEQUALIFY(IOMemoryDescriptor *, fMemory); | |
438 | ||
439 | return (mem); | |
440 | } | |
0c530ab8 A |
441 | |
442 | IOReturn | |
443 | IODMACommand::segmentOp( | |
444 | void *reference, | |
445 | IODMACommand *target, | |
446 | Segment64 segment, | |
447 | void *segments, | |
448 | UInt32 segmentIndex) | |
449 | { | |
b0d623f7 | 450 | IOOptionBits op = (uintptr_t) reference; |
0c530ab8 | 451 | addr64_t maxPhys, address; |
0c530ab8 A |
452 | uint64_t length; |
453 | uint32_t numPages; | |
3e170ce0 | 454 | uint32_t mask; |
0c530ab8 A |
455 | |
456 | IODMACommandInternal * state = target->reserved; | |
457 | ||
3e170ce0 | 458 | if (target->fNumAddressBits && (target->fNumAddressBits < 64) && (state->fLocalMapperAlloc || !target->fMapper)) |
0c530ab8 A |
459 | maxPhys = (1ULL << target->fNumAddressBits); |
460 | else | |
461 | maxPhys = 0; | |
462 | maxPhys--; | |
463 | ||
464 | address = segment.fIOVMAddr; | |
465 | length = segment.fLength; | |
466 | ||
467 | assert(address); | |
468 | assert(length); | |
469 | ||
470 | if (!state->fMisaligned) | |
471 | { | |
3e170ce0 A |
472 | mask = (segmentIndex ? target->fAlignMaskInternalSegments : state->fSourceAlignMask); |
473 | state->fMisaligned |= (0 != (mask & address)); | |
474 | if (state->fMisaligned) DEBG("misaligned address %qx:%qx, %x\n", address, length, mask); | |
475 | } | |
476 | if (!state->fMisaligned) | |
477 | { | |
478 | mask = target->fAlignMaskLength; | |
479 | state->fMisaligned |= (0 != (mask & length)); | |
480 | if (state->fMisaligned) DEBG("misaligned length %qx:%qx, %x\n", address, length, mask); | |
0c530ab8 A |
481 | } |
482 | ||
483 | if (state->fMisaligned && (kWalkPreflight & op)) | |
484 | return (kIOReturnNotAligned); | |
485 | ||
486 | if (!state->fDoubleBuffer) | |
487 | { | |
488 | if ((address + length - 1) <= maxPhys) | |
489 | { | |
490 | length = 0; | |
491 | } | |
492 | else if (address <= maxPhys) | |
493 | { | |
494 | DEBG("tail %qx, %qx", address, length); | |
495 | length = (address + length - maxPhys - 1); | |
496 | address = maxPhys + 1; | |
497 | DEBG("-> %qx, %qx\n", address, length); | |
498 | } | |
499 | } | |
500 | ||
501 | if (!length) | |
502 | return (kIOReturnSuccess); | |
503 | ||
0b4c1975 | 504 | numPages = atop_64(round_page_64((address & PAGE_MASK) + length)); |
0c530ab8 A |
505 | |
506 | if (kWalkPreflight & op) | |
507 | { | |
508 | state->fCopyPageCount += numPages; | |
509 | } | |
510 | else | |
511 | { | |
0b4c1975 A |
512 | vm_page_t lastPage; |
513 | lastPage = NULL; | |
0c530ab8 A |
514 | if (kWalkPrepare & op) |
515 | { | |
0b4c1975 | 516 | lastPage = state->fCopyNext; |
0c530ab8 | 517 | for (IOItemCount idx = 0; idx < numPages; idx++) |
0b4c1975 A |
518 | { |
519 | vm_page_set_offset(lastPage, atop_64(address) + idx); | |
520 | lastPage = vm_page_get_next(lastPage); | |
521 | } | |
0c530ab8 A |
522 | } |
523 | ||
0b4c1975 | 524 | if (!lastPage || SHOULD_COPY_DIR(op, target->fMDSummary.fDirection)) |
0c530ab8 | 525 | { |
0b4c1975 A |
526 | lastPage = state->fCopyNext; |
527 | for (IOItemCount idx = 0; idx < numPages; idx++) | |
0c530ab8 | 528 | { |
0b4c1975 A |
529 | if (SHOULD_COPY_DIR(op, target->fMDSummary.fDirection)) |
530 | { | |
99c3a104 | 531 | addr64_t cpuAddr = address; |
0b4c1975 A |
532 | addr64_t remapAddr; |
533 | uint64_t chunk; | |
534 | ||
99c3a104 A |
535 | if ((kMapped == MAPTYPE(target->fMappingOptions)) |
536 | && target->fMapper) | |
537 | { | |
3e170ce0 | 538 | cpuAddr = target->fMapper->mapToPhysicalAddress(address); |
99c3a104 A |
539 | } |
540 | ||
0b4c1975 A |
541 | remapAddr = ptoa_64(vm_page_get_phys_page(lastPage)); |
542 | if (!state->fDoubleBuffer) | |
543 | { | |
544 | remapAddr += (address & PAGE_MASK); | |
545 | } | |
546 | chunk = PAGE_SIZE - (address & PAGE_MASK); | |
547 | if (chunk > length) | |
548 | chunk = length; | |
549 | ||
550 | DEBG("cpv: 0x%qx %s 0x%qx, 0x%qx, 0x%02lx\n", remapAddr, | |
551 | (kWalkSyncIn & op) ? "->" : "<-", | |
552 | address, chunk, op); | |
553 | ||
554 | if (kWalkSyncIn & op) | |
555 | { // cppvNoModSnk | |
99c3a104 | 556 | copypv(remapAddr, cpuAddr, chunk, |
0b4c1975 A |
557 | cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc ); |
558 | } | |
559 | else | |
560 | { | |
99c3a104 | 561 | copypv(cpuAddr, remapAddr, chunk, |
0b4c1975 A |
562 | cppvPsnk | cppvFsnk | cppvPsrc | cppvNoRefSrc ); |
563 | } | |
564 | address += chunk; | |
565 | length -= chunk; | |
566 | } | |
567 | lastPage = vm_page_get_next(lastPage); | |
0c530ab8 A |
568 | } |
569 | } | |
0b4c1975 | 570 | state->fCopyNext = lastPage; |
0c530ab8 A |
571 | } |
572 | ||
573 | return kIOReturnSuccess; | |
574 | } | |
575 | ||
3e170ce0 A |
576 | IOBufferMemoryDescriptor * |
577 | IODMACommand::createCopyBuffer(IODirection direction, UInt64 length) | |
578 | { | |
579 | mach_vm_address_t mask = 0xFFFFF000; //state->fSourceAlignMask | |
580 | return (IOBufferMemoryDescriptor::inTaskWithPhysicalMask(kernel_task, | |
581 | direction, length, mask)); | |
582 | } | |
583 | ||
0c530ab8 A |
584 | IOReturn |
585 | IODMACommand::walkAll(UInt8 op) | |
586 | { | |
587 | IODMACommandInternal * state = fInternalState; | |
588 | ||
589 | IOReturn ret = kIOReturnSuccess; | |
590 | UInt32 numSegments; | |
591 | UInt64 offset; | |
592 | ||
b0d623f7 | 593 | if (kWalkPreflight & op) |
0c530ab8 | 594 | { |
0c530ab8 A |
595 | state->fMisaligned = false; |
596 | state->fDoubleBuffer = false; | |
597 | state->fPrepared = false; | |
0b4c1975 A |
598 | state->fCopyNext = NULL; |
599 | state->fCopyPageAlloc = 0; | |
0c530ab8 | 600 | state->fCopyPageCount = 0; |
0b4c1975 A |
601 | state->fNextRemapPage = NULL; |
602 | state->fCopyMD = 0; | |
0c530ab8 A |
603 | |
604 | if (!(kWalkDoubleBuffer & op)) | |
605 | { | |
606 | offset = 0; | |
607 | numSegments = 0-1; | |
39236c6e | 608 | ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments); |
0c530ab8 A |
609 | } |
610 | ||
611 | op &= ~kWalkPreflight; | |
612 | ||
613 | state->fDoubleBuffer = (state->fMisaligned || (kWalkDoubleBuffer & op)); | |
614 | if (state->fDoubleBuffer) | |
615 | state->fCopyPageCount = atop_64(round_page(state->fPreparedLength)); | |
616 | ||
617 | if (state->fCopyPageCount) | |
618 | { | |
0b4c1975 | 619 | vm_page_t mapBase = NULL; |
0c530ab8 A |
620 | |
621 | DEBG("preflight fCopyPageCount %d\n", state->fCopyPageCount); | |
622 | ||
3e170ce0 | 623 | if (!fMapper && !state->fDoubleBuffer) |
0c530ab8 | 624 | { |
0b4c1975 | 625 | kern_return_t kr; |
99c3a104 A |
626 | |
627 | if (fMapper) panic("fMapper copying"); | |
628 | ||
0b4c1975 A |
629 | kr = vm_page_alloc_list(state->fCopyPageCount, |
630 | KMA_LOMEM | KMA_NOPAGEWAIT, &mapBase); | |
631 | if (KERN_SUCCESS != kr) | |
0c530ab8 | 632 | { |
0b4c1975 A |
633 | DEBG("vm_page_alloc_list(%d) failed (%d)\n", state->fCopyPageCount, kr); |
634 | mapBase = NULL; | |
0c530ab8 | 635 | } |
0b4c1975 | 636 | } |
0c530ab8 | 637 | |
0b4c1975 A |
638 | if (mapBase) |
639 | { | |
640 | state->fCopyPageAlloc = mapBase; | |
641 | state->fCopyNext = state->fCopyPageAlloc; | |
0c530ab8 A |
642 | offset = 0; |
643 | numSegments = 0-1; | |
39236c6e | 644 | ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments); |
0c530ab8 A |
645 | state->fPrepared = true; |
646 | op &= ~(kWalkSyncIn | kWalkSyncOut); | |
647 | } | |
648 | else | |
649 | { | |
650 | DEBG("alloc IOBMD\n"); | |
3e170ce0 | 651 | state->fCopyMD = createCopyBuffer(fMDSummary.fDirection, state->fPreparedLength); |
0c530ab8 A |
652 | |
653 | if (state->fCopyMD) | |
654 | { | |
655 | ret = kIOReturnSuccess; | |
656 | state->fPrepared = true; | |
657 | } | |
658 | else | |
659 | { | |
316670eb | 660 | DEBG("IODMACommand !alloc IOBMD"); |
0c530ab8 A |
661 | return (kIOReturnNoResources); |
662 | } | |
663 | } | |
664 | } | |
665 | } | |
666 | ||
b0d623f7 | 667 | if (state->fPrepared && ((kWalkSyncIn | kWalkSyncOut) & op)) |
0c530ab8 A |
668 | { |
669 | if (state->fCopyPageCount) | |
670 | { | |
671 | DEBG("sync fCopyPageCount %d\n", state->fCopyPageCount); | |
672 | ||
0b4c1975 | 673 | if (state->fCopyPageAlloc) |
0c530ab8 | 674 | { |
0b4c1975 | 675 | state->fCopyNext = state->fCopyPageAlloc; |
0c530ab8 A |
676 | offset = 0; |
677 | numSegments = 0-1; | |
39236c6e | 678 | ret = genIOVMSegments(op, segmentOp, (void *)(uintptr_t) op, &offset, state, &numSegments); |
0c530ab8 A |
679 | } |
680 | else if (state->fCopyMD) | |
681 | { | |
682 | DEBG("sync IOBMD\n"); | |
683 | ||
684 | if (SHOULD_COPY_DIR(op, fMDSummary.fDirection)) | |
685 | { | |
686 | IOMemoryDescriptor *poMD = const_cast<IOMemoryDescriptor *>(fMemory); | |
687 | ||
688 | IOByteCount bytes; | |
689 | ||
690 | if (kWalkSyncIn & op) | |
691 | bytes = poMD->writeBytes(state->fPreparedOffset, | |
692 | state->fCopyMD->getBytesNoCopy(), | |
693 | state->fPreparedLength); | |
694 | else | |
695 | bytes = poMD->readBytes(state->fPreparedOffset, | |
696 | state->fCopyMD->getBytesNoCopy(), | |
697 | state->fPreparedLength); | |
698 | DEBG("fCopyMD %s %lx bytes\n", (kWalkSyncIn & op) ? "wrote" : "read", bytes); | |
699 | ret = (bytes == state->fPreparedLength) ? kIOReturnSuccess : kIOReturnUnderrun; | |
700 | } | |
701 | else | |
702 | ret = kIOReturnSuccess; | |
703 | } | |
704 | } | |
705 | } | |
706 | ||
707 | if (kWalkComplete & op) | |
708 | { | |
0b4c1975 | 709 | if (state->fCopyPageAlloc) |
0c530ab8 | 710 | { |
0b4c1975 A |
711 | vm_page_free_list(state->fCopyPageAlloc, FALSE); |
712 | state->fCopyPageAlloc = 0; | |
0c530ab8 A |
713 | state->fCopyPageCount = 0; |
714 | } | |
715 | if (state->fCopyMD) | |
716 | { | |
717 | state->fCopyMD->release(); | |
718 | state->fCopyMD = 0; | |
719 | } | |
720 | ||
721 | state->fPrepared = false; | |
722 | } | |
723 | return (ret); | |
724 | } | |
725 | ||
b0d623f7 A |
726 | UInt8 |
727 | IODMACommand::getNumAddressBits(void) | |
728 | { | |
729 | return (fNumAddressBits); | |
730 | } | |
731 | ||
732 | UInt32 | |
733 | IODMACommand::getAlignment(void) | |
734 | { | |
735 | return (fAlignMask + 1); | |
736 | } | |
737 | ||
3e170ce0 A |
738 | uint32_t |
739 | IODMACommand::getAlignmentLength(void) | |
740 | { | |
741 | return (fAlignMaskLength + 1); | |
742 | } | |
743 | ||
744 | uint32_t | |
745 | IODMACommand::getAlignmentInternalSegments(void) | |
746 | { | |
747 | return (fAlignMaskInternalSegments + 1); | |
748 | } | |
749 | ||
750 | IOReturn | |
751 | IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc, | |
752 | const SegmentOptions * segmentOptions, | |
753 | uint32_t mappingOptions, | |
754 | IOMapper * mapper, | |
755 | UInt64 offset, | |
756 | UInt64 length, | |
757 | bool flushCache, | |
758 | bool synchronize) | |
759 | { | |
760 | IOReturn ret; | |
761 | ||
762 | if (fActive) return kIOReturnNotPermitted; | |
763 | ||
764 | ret = setSpecification(outSegFunc, segmentOptions, mappingOptions, mapper); | |
765 | if (kIOReturnSuccess != ret) return (ret); | |
766 | ||
767 | ret = prepare(offset, length, flushCache, synchronize); | |
768 | ||
769 | return (ret); | |
770 | } | |
771 | ||
2d21ac55 A |
772 | IOReturn |
773 | IODMACommand::prepareWithSpecification(SegmentFunction outSegFunc, | |
774 | UInt8 numAddressBits, | |
775 | UInt64 maxSegmentSize, | |
776 | MappingOptions mappingOptions, | |
777 | UInt64 maxTransferSize, | |
778 | UInt32 alignment, | |
779 | IOMapper *mapper, | |
780 | UInt64 offset, | |
781 | UInt64 length, | |
782 | bool flushCache, | |
783 | bool synchronize) | |
784 | { | |
3e170ce0 | 785 | SegmentOptions segmentOptions = |
2d21ac55 | 786 | { |
3e170ce0 A |
787 | .fStructSize = sizeof(segmentOptions), |
788 | .fNumAddressBits = numAddressBits, | |
789 | .fMaxSegmentSize = maxSegmentSize, | |
790 | .fMaxTransferSize = maxTransferSize, | |
791 | .fAlignment = alignment, | |
792 | .fAlignmentLength = 1, | |
793 | .fAlignmentInternalSegments = alignment | |
2d21ac55 A |
794 | }; |
795 | ||
3e170ce0 A |
796 | return (prepareWithSpecification(outSegFunc, &segmentOptions, mappingOptions, mapper, |
797 | offset, length, flushCache, synchronize)); | |
2d21ac55 A |
798 | } |
799 | ||
800 | ||
0c530ab8 A |
801 | IOReturn |
802 | IODMACommand::prepare(UInt64 offset, UInt64 length, bool flushCache, bool synchronize) | |
803 | { | |
3e170ce0 A |
804 | IODMACommandInternal * state = fInternalState; |
805 | IOReturn ret = kIOReturnSuccess; | |
806 | uint32_t mappingOptions = fMappingOptions; | |
0c530ab8 | 807 | |
3e170ce0 A |
808 | // check specification has been set |
809 | if (!fOutSeg) return (kIOReturnNotReady); | |
0c530ab8 | 810 | |
3e170ce0 | 811 | if (!length) length = fMDSummary.fLength; |
0c530ab8 | 812 | |
3e170ce0 | 813 | if (length > fMaxTransferSize) return kIOReturnNoSpace; |
0c530ab8 | 814 | |
0c530ab8 A |
815 | if (fActive++) |
816 | { | |
817 | if ((state->fPreparedOffset != offset) | |
818 | || (state->fPreparedLength != length)) | |
819 | ret = kIOReturnNotReady; | |
820 | } | |
821 | else | |
822 | { | |
3e170ce0 A |
823 | if (fAlignMaskLength & length) return (kIOReturnNotAligned); |
824 | ||
0c530ab8 A |
825 | state->fPreparedOffset = offset; |
826 | state->fPreparedLength = length; | |
827 | ||
b0d623f7 | 828 | state->fMapContig = false; |
0c530ab8 A |
829 | state->fMisaligned = false; |
830 | state->fDoubleBuffer = false; | |
831 | state->fPrepared = false; | |
0b4c1975 A |
832 | state->fCopyNext = NULL; |
833 | state->fCopyPageAlloc = 0; | |
0c530ab8 | 834 | state->fCopyPageCount = 0; |
0b4c1975 | 835 | state->fNextRemapPage = NULL; |
0c530ab8 | 836 | state->fCopyMD = 0; |
3e170ce0 A |
837 | state->fLocalMapperAlloc = 0; |
838 | state->fLocalMapperAllocLength = 0; | |
0c530ab8 | 839 | |
b0d623f7 A |
840 | state->fLocalMapper = (fMapper && (fMapper != IOMapper::gSystem)); |
841 | ||
842 | state->fSourceAlignMask = fAlignMask; | |
99c3a104 | 843 | if (fMapper) |
b0d623f7 A |
844 | state->fSourceAlignMask &= page_mask; |
845 | ||
0c530ab8 A |
846 | state->fCursor = state->fIterateOnly |
847 | || (!state->fCheckAddressing | |
b0d623f7 A |
848 | && (!state->fSourceAlignMask |
849 | || ((fMDSummary.fPageAlign & (1 << 31)) && (0 == (fMDSummary.fPageAlign & state->fSourceAlignMask))))); | |
99c3a104 | 850 | |
0c530ab8 A |
851 | if (!state->fCursor) |
852 | { | |
853 | IOOptionBits op = kWalkPrepare | kWalkPreflight; | |
854 | if (synchronize) | |
855 | op |= kWalkSyncOut; | |
856 | ret = walkAll(op); | |
857 | } | |
99c3a104 | 858 | |
3e170ce0 | 859 | if (IS_NONCOHERENT(mappingOptions) && flushCache) |
99c3a104 | 860 | { |
3e170ce0 | 861 | if (state->fCopyMD) |
99c3a104 | 862 | { |
3e170ce0 | 863 | state->fCopyMD->performOperation(kIOMemoryIncoherentIOStore, 0, length); |
99c3a104 A |
864 | } |
865 | else | |
866 | { | |
3e170ce0 A |
867 | IOMemoryDescriptor * md = const_cast<IOMemoryDescriptor *>(fMemory); |
868 | md->performOperation(kIOMemoryIncoherentIOStore, offset, length); | |
99c3a104 A |
869 | } |
870 | } | |
871 | ||
3e170ce0 A |
872 | if (fMapper) |
873 | { | |
874 | IOMDDMAMapArgs mapArgs; | |
875 | bzero(&mapArgs, sizeof(mapArgs)); | |
876 | mapArgs.fMapper = fMapper; | |
877 | mapArgs.fCommand = this; | |
878 | mapArgs.fMapSpec.device = state->fDevice; | |
879 | mapArgs.fMapSpec.alignment = fAlignMask + 1; | |
880 | mapArgs.fMapSpec.numAddressBits = fNumAddressBits ? fNumAddressBits : 64; | |
881 | mapArgs.fLength = state->fPreparedLength; | |
882 | const IOMemoryDescriptor * md = state->fCopyMD; | |
883 | if (md) { mapArgs.fOffset = 0; } | |
884 | else | |
885 | { | |
886 | md = fMemory; | |
887 | mapArgs.fOffset = state->fPreparedOffset; | |
888 | } | |
889 | ret = md->dmaCommandOperation(kIOMDDMAMap | state->fIterateOnly, &mapArgs, sizeof(mapArgs)); | |
890 | //IOLog("dma %p 0x%x 0x%qx-0x%qx 0x%qx-0x%qx\n", this, ret, state->fPreparedOffset, state->fPreparedLength, mapArgs.fAlloc, mapArgs.fAllocLength); | |
99c3a104 | 891 | |
3e170ce0 A |
892 | if (kIOReturnSuccess == ret) |
893 | { | |
894 | state->fLocalMapperAlloc = mapArgs.fAlloc; | |
895 | state->fLocalMapperAllocLength = mapArgs.fAllocLength; | |
896 | state->fMapContig = mapArgs.fMapContig; | |
897 | } | |
898 | if (NULL != IOMapper::gSystem) ret = kIOReturnSuccess; | |
899 | } | |
900 | if (kIOReturnSuccess == ret) state->fPrepared = true; | |
0c530ab8 A |
901 | } |
902 | return ret; | |
903 | } | |
904 | ||
905 | IOReturn | |
906 | IODMACommand::complete(bool invalidateCache, bool synchronize) | |
907 | { | |
908 | IODMACommandInternal * state = fInternalState; | |
909 | IOReturn ret = kIOReturnSuccess; | |
910 | ||
911 | if (fActive < 1) | |
912 | return kIOReturnNotReady; | |
913 | ||
914 | if (!--fActive) | |
915 | { | |
3e170ce0 A |
916 | if (IS_NONCOHERENT(fMappingOptions) && invalidateCache) |
917 | { | |
918 | if (state->fCopyMD) | |
919 | { | |
920 | state->fCopyMD->performOperation(kIOMemoryIncoherentIOFlush, 0, state->fPreparedLength); | |
921 | } | |
922 | else | |
923 | { | |
924 | IOMemoryDescriptor * md = const_cast<IOMemoryDescriptor *>(fMemory); | |
925 | md->performOperation(kIOMemoryIncoherentIOFlush, state->fPreparedOffset, state->fPreparedLength); | |
926 | } | |
927 | } | |
928 | ||
0c530ab8 A |
929 | if (!state->fCursor) |
930 | { | |
2d21ac55 A |
931 | IOOptionBits op = kWalkComplete; |
932 | if (synchronize) | |
933 | op |= kWalkSyncIn; | |
934 | ret = walkAll(op); | |
0c530ab8 | 935 | } |
3e170ce0 | 936 | if (state->fLocalMapperAlloc) |
99c3a104 | 937 | { |
3e170ce0 | 938 | if (state->fLocalMapperAllocLength) |
99c3a104 | 939 | { |
3e170ce0 A |
940 | fMapper->iovmUnmapMemory(getIOMemoryDescriptor(), this, |
941 | state->fLocalMapperAlloc, state->fLocalMapperAllocLength); | |
99c3a104 | 942 | } |
3e170ce0 A |
943 | state->fLocalMapperAlloc = 0; |
944 | state->fLocalMapperAllocLength = 0; | |
99c3a104 A |
945 | } |
946 | ||
0c530ab8 | 947 | state->fPrepared = false; |
0c530ab8 A |
948 | } |
949 | ||
950 | return ret; | |
951 | } | |
952 | ||
b0d623f7 A |
953 | IOReturn |
954 | IODMACommand::getPreparedOffsetAndLength(UInt64 * offset, UInt64 * length) | |
955 | { | |
956 | IODMACommandInternal * state = fInternalState; | |
957 | if (fActive < 1) | |
958 | return (kIOReturnNotReady); | |
959 | ||
960 | if (offset) | |
961 | *offset = state->fPreparedOffset; | |
962 | if (length) | |
963 | *length = state->fPreparedLength; | |
964 | ||
965 | return (kIOReturnSuccess); | |
966 | } | |
967 | ||
0c530ab8 A |
968 | IOReturn |
969 | IODMACommand::synchronize(IOOptionBits options) | |
970 | { | |
971 | IODMACommandInternal * state = fInternalState; | |
972 | IOReturn ret = kIOReturnSuccess; | |
973 | IOOptionBits op; | |
974 | ||
975 | if (kIODirectionOutIn == (kIODirectionOutIn & options)) | |
976 | return kIOReturnBadArgument; | |
977 | ||
978 | if (fActive < 1) | |
979 | return kIOReturnNotReady; | |
980 | ||
981 | op = 0; | |
982 | if (kForceDoubleBuffer & options) | |
983 | { | |
984 | if (state->fDoubleBuffer) | |
985 | return kIOReturnSuccess; | |
986 | if (state->fCursor) | |
987 | state->fCursor = false; | |
988 | else | |
989 | ret = walkAll(kWalkComplete); | |
990 | ||
991 | op |= kWalkPrepare | kWalkPreflight | kWalkDoubleBuffer; | |
992 | } | |
993 | else if (state->fCursor) | |
994 | return kIOReturnSuccess; | |
995 | ||
996 | if (kIODirectionIn & options) | |
997 | op |= kWalkSyncIn | kWalkSyncAlways; | |
998 | else if (kIODirectionOut & options) | |
999 | op |= kWalkSyncOut | kWalkSyncAlways; | |
1000 | ||
1001 | ret = walkAll(op); | |
1002 | ||
1003 | return ret; | |
1004 | } | |
1005 | ||
2d21ac55 A |
1006 | struct IODMACommandTransferContext |
1007 | { | |
1008 | void * buffer; | |
1009 | UInt64 bufferOffset; | |
1010 | UInt64 remaining; | |
1011 | UInt32 op; | |
1012 | }; | |
1013 | enum | |
1014 | { | |
1015 | kIODMACommandTransferOpReadBytes = 1, | |
1016 | kIODMACommandTransferOpWriteBytes = 2 | |
1017 | }; | |
1018 | ||
1019 | IOReturn | |
1020 | IODMACommand::transferSegment(void *reference, | |
1021 | IODMACommand *target, | |
1022 | Segment64 segment, | |
1023 | void *segments, | |
1024 | UInt32 segmentIndex) | |
1025 | { | |
b0d623f7 | 1026 | IODMACommandTransferContext * context = (IODMACommandTransferContext *) reference; |
2d21ac55 A |
1027 | UInt64 length = min(segment.fLength, context->remaining); |
1028 | addr64_t ioAddr = segment.fIOVMAddr; | |
1029 | addr64_t cpuAddr = ioAddr; | |
1030 | ||
1031 | context->remaining -= length; | |
1032 | ||
1033 | while (length) | |
1034 | { | |
1035 | UInt64 copyLen = length; | |
1036 | if ((kMapped == MAPTYPE(target->fMappingOptions)) | |
1037 | && target->fMapper) | |
1038 | { | |
3e170ce0 | 1039 | cpuAddr = target->fMapper->mapToPhysicalAddress(ioAddr); |
2d21ac55 A |
1040 | copyLen = min(copyLen, page_size - (ioAddr & (page_size - 1))); |
1041 | ioAddr += copyLen; | |
1042 | } | |
1043 | ||
1044 | switch (context->op) | |
1045 | { | |
1046 | case kIODMACommandTransferOpReadBytes: | |
1047 | copypv(cpuAddr, context->bufferOffset + (addr64_t) context->buffer, copyLen, | |
1048 | cppvPsrc | cppvNoRefSrc | cppvFsnk | cppvKmap); | |
1049 | break; | |
1050 | case kIODMACommandTransferOpWriteBytes: | |
1051 | copypv(context->bufferOffset + (addr64_t) context->buffer, cpuAddr, copyLen, | |
1052 | cppvPsnk | cppvFsnk | cppvNoRefSrc | cppvNoModSnk | cppvKmap); | |
1053 | break; | |
1054 | } | |
1055 | length -= copyLen; | |
1056 | context->bufferOffset += copyLen; | |
1057 | } | |
1058 | ||
1059 | return (context->remaining ? kIOReturnSuccess : kIOReturnOverrun); | |
1060 | } | |
1061 | ||
1062 | UInt64 | |
1063 | IODMACommand::transfer(IOOptionBits transferOp, UInt64 offset, void * buffer, UInt64 length) | |
1064 | { | |
1065 | IODMACommandInternal * state = fInternalState; | |
1066 | IODMACommandTransferContext context; | |
b0d623f7 | 1067 | Segment64 segments[1]; |
2d21ac55 A |
1068 | UInt32 numSegments = 0-1; |
1069 | ||
1070 | if (fActive < 1) | |
1071 | return (0); | |
1072 | ||
1073 | if (offset >= state->fPreparedLength) | |
1074 | return (0); | |
1075 | length = min(length, state->fPreparedLength - offset); | |
1076 | ||
1077 | context.buffer = buffer; | |
1078 | context.bufferOffset = 0; | |
1079 | context.remaining = length; | |
1080 | context.op = transferOp; | |
b0d623f7 | 1081 | (void) genIOVMSegments(kWalkClient, transferSegment, &context, &offset, &segments[0], &numSegments); |
2d21ac55 A |
1082 | |
1083 | return (length - context.remaining); | |
1084 | } | |
1085 | ||
1086 | UInt64 | |
1087 | IODMACommand::readBytes(UInt64 offset, void *bytes, UInt64 length) | |
1088 | { | |
1089 | return (transfer(kIODMACommandTransferOpReadBytes, offset, bytes, length)); | |
1090 | } | |
1091 | ||
1092 | UInt64 | |
1093 | IODMACommand::writeBytes(UInt64 offset, const void *bytes, UInt64 length) | |
1094 | { | |
1095 | return (transfer(kIODMACommandTransferOpWriteBytes, offset, const_cast<void *>(bytes), length)); | |
1096 | } | |
1097 | ||
0c530ab8 A |
1098 | IOReturn |
1099 | IODMACommand::genIOVMSegments(UInt64 *offsetP, | |
1100 | void *segmentsP, | |
1101 | UInt32 *numSegmentsP) | |
1102 | { | |
b0d623f7 A |
1103 | return (genIOVMSegments(kWalkClient, clientOutputSegment, (void *) fOutSeg, |
1104 | offsetP, segmentsP, numSegmentsP)); | |
0c530ab8 A |
1105 | } |
1106 | ||
1107 | IOReturn | |
b0d623f7 A |
1108 | IODMACommand::genIOVMSegments(uint32_t op, |
1109 | InternalSegmentFunction outSegFunc, | |
0c530ab8 A |
1110 | void *reference, |
1111 | UInt64 *offsetP, | |
1112 | void *segmentsP, | |
1113 | UInt32 *numSegmentsP) | |
1114 | { | |
0c530ab8 A |
1115 | IODMACommandInternal * internalState = fInternalState; |
1116 | IOOptionBits mdOp = kIOMDWalkSegments; | |
1117 | IOReturn ret = kIOReturnSuccess; | |
1118 | ||
1119 | if (!(kWalkComplete & op) && !fActive) | |
1120 | return kIOReturnNotReady; | |
1121 | ||
1122 | if (!offsetP || !segmentsP || !numSegmentsP || !*numSegmentsP) | |
1123 | return kIOReturnBadArgument; | |
1124 | ||
1125 | IOMDDMAWalkSegmentArgs *state = | |
99c3a104 | 1126 | (IOMDDMAWalkSegmentArgs *)(void *) fState; |
0c530ab8 | 1127 | |
2d21ac55 | 1128 | UInt64 offset = *offsetP + internalState->fPreparedOffset; |
0c530ab8 A |
1129 | UInt64 memLength = internalState->fPreparedOffset + internalState->fPreparedLength; |
1130 | ||
1131 | if (offset >= memLength) | |
1132 | return kIOReturnOverrun; | |
1133 | ||
4a3eedf9 | 1134 | if ((offset == internalState->fPreparedOffset) || (offset != state->fOffset) || internalState->fNewMD) { |
2d21ac55 A |
1135 | state->fOffset = 0; |
1136 | state->fIOVMAddr = 0; | |
0b4c1975 | 1137 | internalState->fNextRemapPage = NULL; |
4a3eedf9 | 1138 | internalState->fNewMD = false; |
3e170ce0 | 1139 | state->fMapped = (0 != fMapper); |
2d21ac55 | 1140 | mdOp = kIOMDFirstSegment; |
0c530ab8 A |
1141 | }; |
1142 | ||
0c530ab8 A |
1143 | UInt32 segIndex = 0; |
1144 | UInt32 numSegments = *numSegmentsP; | |
1145 | Segment64 curSeg = { 0, 0 }; | |
1146 | addr64_t maxPhys; | |
1147 | ||
1148 | if (fNumAddressBits && (fNumAddressBits < 64)) | |
1149 | maxPhys = (1ULL << fNumAddressBits); | |
1150 | else | |
1151 | maxPhys = 0; | |
1152 | maxPhys--; | |
1153 | ||
0b4c1975 | 1154 | while (state->fIOVMAddr || (state->fOffset < memLength)) |
0c530ab8 | 1155 | { |
0b4c1975 A |
1156 | // state = next seg |
1157 | if (!state->fIOVMAddr) { | |
0c530ab8 A |
1158 | |
1159 | IOReturn rtn; | |
1160 | ||
1161 | state->fOffset = offset; | |
1162 | state->fLength = memLength - offset; | |
1163 | ||
3e170ce0 | 1164 | if (internalState->fMapContig && internalState->fLocalMapperAlloc) |
0c530ab8 | 1165 | { |
39037602 | 1166 | state->fIOVMAddr = internalState->fLocalMapperAlloc + offset - internalState->fPreparedOffset; |
0c530ab8 | 1167 | rtn = kIOReturnSuccess; |
99c3a104 A |
1168 | #if 0 |
1169 | { | |
1170 | uint64_t checkOffset; | |
1171 | IOPhysicalLength segLen; | |
1172 | for (checkOffset = 0; checkOffset < state->fLength; ) | |
1173 | { | |
1174 | addr64_t phys = const_cast<IOMemoryDescriptor *>(fMemory)->getPhysicalSegment(checkOffset + offset, &segLen, kIOMemoryMapperNone); | |
1175 | if (fMapper->mapAddr(state->fIOVMAddr + checkOffset) != phys) | |
1176 | { | |
1177 | panic("%llx != %llx:%llx, %llx phys: %llx %llx\n", offset, | |
1178 | state->fIOVMAddr + checkOffset, fMapper->mapAddr(state->fIOVMAddr + checkOffset), state->fLength, | |
1179 | phys, checkOffset); | |
1180 | } | |
1181 | checkOffset += page_size - (phys & page_mask); | |
1182 | } | |
1183 | } | |
1184 | #endif | |
0c530ab8 A |
1185 | } |
1186 | else | |
1187 | { | |
1188 | const IOMemoryDescriptor * memory = | |
1189 | internalState->fCopyMD ? internalState->fCopyMD : fMemory; | |
1190 | rtn = memory->dmaCommandOperation(mdOp, fState, sizeof(fState)); | |
1191 | mdOp = kIOMDWalkSegments; | |
1192 | } | |
1193 | ||
0b4c1975 A |
1194 | if (rtn == kIOReturnSuccess) |
1195 | { | |
0c530ab8 A |
1196 | assert(state->fIOVMAddr); |
1197 | assert(state->fLength); | |
0b4c1975 A |
1198 | if ((curSeg.fIOVMAddr + curSeg.fLength) == state->fIOVMAddr) { |
1199 | UInt64 length = state->fLength; | |
1200 | offset += length; | |
1201 | curSeg.fLength += length; | |
1202 | state->fIOVMAddr = 0; | |
1203 | } | |
0c530ab8 A |
1204 | } |
1205 | else if (rtn == kIOReturnOverrun) | |
1206 | state->fIOVMAddr = state->fLength = 0; // At end | |
1207 | else | |
1208 | return rtn; | |
0b4c1975 | 1209 | } |
0c530ab8 | 1210 | |
0b4c1975 A |
1211 | // seg = state, offset = end of seg |
1212 | if (!curSeg.fIOVMAddr) | |
1213 | { | |
0c530ab8 | 1214 | UInt64 length = state->fLength; |
0b4c1975 | 1215 | offset += length; |
3e170ce0 | 1216 | curSeg.fIOVMAddr = state->fIOVMAddr; |
0b4c1975 A |
1217 | curSeg.fLength = length; |
1218 | state->fIOVMAddr = 0; | |
1219 | } | |
0c530ab8 A |
1220 | |
1221 | if (!state->fIOVMAddr) | |
1222 | { | |
3e170ce0 | 1223 | // maxPhys |
0b4c1975 | 1224 | if ((kWalkClient & op) && (curSeg.fIOVMAddr + curSeg.fLength - 1) > maxPhys) |
0c530ab8 | 1225 | { |
0b4c1975 A |
1226 | if (internalState->fCursor) |
1227 | { | |
1228 | curSeg.fIOVMAddr = 0; | |
1229 | ret = kIOReturnMessageTooLarge; | |
1230 | break; | |
1231 | } | |
1232 | else if (curSeg.fIOVMAddr <= maxPhys) | |
1233 | { | |
1234 | UInt64 remain, newLength; | |
1235 | ||
1236 | newLength = (maxPhys + 1 - curSeg.fIOVMAddr); | |
1237 | DEBG("trunc %qx, %qx-> %qx\n", curSeg.fIOVMAddr, curSeg.fLength, newLength); | |
1238 | remain = curSeg.fLength - newLength; | |
1239 | state->fIOVMAddr = newLength + curSeg.fIOVMAddr; | |
1240 | curSeg.fLength = newLength; | |
1241 | state->fLength = remain; | |
1242 | offset -= remain; | |
1243 | } | |
1244 | else | |
0c530ab8 | 1245 | { |
0b4c1975 A |
1246 | UInt64 addr = curSeg.fIOVMAddr; |
1247 | ppnum_t addrPage = atop_64(addr); | |
1248 | vm_page_t remap = NULL; | |
1249 | UInt64 remain, newLength; | |
1250 | ||
1251 | DEBG("sparse switch %qx, %qx ", addr, curSeg.fLength); | |
1252 | ||
1253 | remap = internalState->fNextRemapPage; | |
1254 | if (remap && (addrPage == vm_page_get_offset(remap))) | |
0c530ab8 | 1255 | { |
0c530ab8 | 1256 | } |
0b4c1975 A |
1257 | else for (remap = internalState->fCopyPageAlloc; |
1258 | remap && (addrPage != vm_page_get_offset(remap)); | |
1259 | remap = vm_page_get_next(remap)) | |
0c530ab8 | 1260 | { |
0c530ab8 | 1261 | } |
0b4c1975 A |
1262 | |
1263 | if (!remap) panic("no remap page found"); | |
1264 | ||
1265 | curSeg.fIOVMAddr = ptoa_64(vm_page_get_phys_page(remap)) | |
1266 | + (addr & PAGE_MASK); | |
1267 | internalState->fNextRemapPage = vm_page_get_next(remap); | |
1268 | ||
1269 | newLength = PAGE_SIZE - (addr & PAGE_MASK); | |
1270 | if (newLength < curSeg.fLength) | |
0c530ab8 | 1271 | { |
0b4c1975 A |
1272 | remain = curSeg.fLength - newLength; |
1273 | state->fIOVMAddr = addr + newLength; | |
1274 | curSeg.fLength = newLength; | |
1275 | state->fLength = remain; | |
1276 | offset -= remain; | |
0c530ab8 | 1277 | } |
0b4c1975 | 1278 | DEBG("-> %qx, %qx offset %qx\n", curSeg.fIOVMAddr, curSeg.fLength, offset); |
0c530ab8 A |
1279 | } |
1280 | } | |
1281 | ||
3e170ce0 A |
1282 | // reduce size of output segment |
1283 | uint64_t reduce, leftover = 0; | |
1284 | ||
1285 | // fMaxSegmentSize | |
0c530ab8 A |
1286 | if (curSeg.fLength > fMaxSegmentSize) |
1287 | { | |
3e170ce0 A |
1288 | leftover += curSeg.fLength - fMaxSegmentSize; |
1289 | curSeg.fLength = fMaxSegmentSize; | |
1290 | state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr; | |
1291 | } | |
1292 | ||
1293 | // alignment current length | |
1294 | ||
1295 | reduce = (curSeg.fLength & fAlignMaskLength); | |
1296 | if (reduce && (curSeg.fLength > reduce)) | |
1297 | { | |
1298 | leftover += reduce; | |
1299 | curSeg.fLength -= reduce; | |
1300 | state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr; | |
1301 | } | |
0c530ab8 | 1302 | |
3e170ce0 | 1303 | // alignment next address |
0c530ab8 | 1304 | |
3e170ce0 A |
1305 | reduce = (state->fIOVMAddr & fAlignMaskInternalSegments); |
1306 | if (reduce && (curSeg.fLength > reduce)) | |
1307 | { | |
1308 | leftover += reduce; | |
1309 | curSeg.fLength -= reduce; | |
1310 | state->fIOVMAddr = curSeg.fLength + curSeg.fIOVMAddr; | |
0c530ab8 A |
1311 | } |
1312 | ||
3e170ce0 | 1313 | if (leftover) |
0c530ab8 | 1314 | { |
3e170ce0 A |
1315 | DEBG("reduce seg by 0x%llx @ 0x%llx [0x%llx, 0x%llx]\n", |
1316 | leftover, offset, | |
1317 | curSeg.fIOVMAddr, curSeg.fLength); | |
1318 | state->fLength = leftover; | |
1319 | offset -= leftover; | |
1320 | } | |
1321 | ||
1322 | // | |
1323 | ||
1324 | if (internalState->fCursor) | |
1325 | { | |
1326 | bool misaligned; | |
1327 | uint32_t mask; | |
1328 | ||
1329 | mask = (segIndex ? fAlignMaskInternalSegments : internalState->fSourceAlignMask); | |
1330 | misaligned = (0 != (mask & curSeg.fIOVMAddr)); | |
1331 | if (!misaligned) | |
1332 | { | |
1333 | mask = fAlignMaskLength; | |
1334 | misaligned |= (0 != (mask & curSeg.fLength)); | |
1335 | } | |
1336 | if (misaligned) | |
1337 | { | |
1338 | if (misaligned) DEBG("cursor misaligned %qx:%qx\n", curSeg.fIOVMAddr, curSeg.fLength); | |
1339 | curSeg.fIOVMAddr = 0; | |
1340 | ret = kIOReturnNotAligned; | |
1341 | break; | |
1342 | } | |
0c530ab8 A |
1343 | } |
1344 | ||
1345 | if (offset >= memLength) | |
1346 | { | |
1347 | curSeg.fLength -= (offset - memLength); | |
1348 | offset = memLength; | |
1349 | state->fIOVMAddr = state->fLength = 0; // At end | |
1350 | break; | |
1351 | } | |
1352 | } | |
1353 | ||
1354 | if (state->fIOVMAddr) { | |
1355 | if ((segIndex + 1 == numSegments)) | |
1356 | break; | |
1357 | ||
1358 | ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++); | |
1359 | curSeg.fIOVMAddr = 0; | |
1360 | if (kIOReturnSuccess != ret) | |
1361 | break; | |
1362 | } | |
1363 | } | |
1364 | ||
1365 | if (curSeg.fIOVMAddr) { | |
1366 | ret = (*outSegFunc)(reference, this, curSeg, segmentsP, segIndex++); | |
1367 | } | |
1368 | ||
1369 | if (kIOReturnSuccess == ret) | |
1370 | { | |
1371 | state->fOffset = offset; | |
1372 | *offsetP = offset - internalState->fPreparedOffset; | |
1373 | *numSegmentsP = segIndex; | |
1374 | } | |
1375 | return ret; | |
1376 | } | |
1377 | ||
1378 | IOReturn | |
1379 | IODMACommand::clientOutputSegment( | |
1380 | void *reference, IODMACommand *target, | |
1381 | Segment64 segment, void *vSegList, UInt32 outSegIndex) | |
1382 | { | |
b0d623f7 | 1383 | SegmentFunction segmentFunction = (SegmentFunction) reference; |
0c530ab8 A |
1384 | IOReturn ret = kIOReturnSuccess; |
1385 | ||
316670eb | 1386 | if (target->fNumAddressBits && (target->fNumAddressBits < 64) |
b0d623f7 | 1387 | && ((segment.fIOVMAddr + segment.fLength - 1) >> target->fNumAddressBits) |
3e170ce0 | 1388 | && (target->reserved->fLocalMapperAlloc || !target->fMapper)) |
0c530ab8 A |
1389 | { |
1390 | DEBG("kIOReturnMessageTooLarge(fNumAddressBits) %qx, %qx\n", segment.fIOVMAddr, segment.fLength); | |
1391 | ret = kIOReturnMessageTooLarge; | |
1392 | } | |
1393 | ||
b0d623f7 | 1394 | if (!(*segmentFunction)(target, segment, vSegList, outSegIndex)) |
0c530ab8 A |
1395 | { |
1396 | DEBG("kIOReturnMessageTooLarge(fOutSeg) %qx, %qx\n", segment.fIOVMAddr, segment.fLength); | |
1397 | ret = kIOReturnMessageTooLarge; | |
1398 | } | |
1399 | ||
1400 | return (ret); | |
1401 | } | |
1402 | ||
b0d623f7 A |
1403 | IOReturn |
1404 | IODMACommand::genIOVMSegments(SegmentFunction segmentFunction, | |
1405 | UInt64 *offsetP, | |
1406 | void *segmentsP, | |
1407 | UInt32 *numSegmentsP) | |
1408 | { | |
1409 | return (genIOVMSegments(kWalkClient, clientOutputSegment, (void *) segmentFunction, | |
1410 | offsetP, segmentsP, numSegmentsP)); | |
1411 | } | |
1412 | ||
0c530ab8 A |
1413 | bool |
1414 | IODMACommand::OutputHost32(IODMACommand *, | |
1415 | Segment64 segment, void *vSegList, UInt32 outSegIndex) | |
1416 | { | |
1417 | Segment32 *base = (Segment32 *) vSegList; | |
1418 | base[outSegIndex].fIOVMAddr = (UInt32) segment.fIOVMAddr; | |
1419 | base[outSegIndex].fLength = (UInt32) segment.fLength; | |
1420 | return true; | |
1421 | } | |
1422 | ||
1423 | bool | |
1424 | IODMACommand::OutputBig32(IODMACommand *, | |
1425 | Segment64 segment, void *vSegList, UInt32 outSegIndex) | |
1426 | { | |
1427 | const UInt offAddr = outSegIndex * sizeof(Segment32); | |
1428 | const UInt offLen = offAddr + sizeof(UInt32); | |
1429 | OSWriteBigInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr); | |
1430 | OSWriteBigInt32(vSegList, offLen, (UInt32) segment.fLength); | |
1431 | return true; | |
1432 | } | |
1433 | ||
1434 | bool | |
1435 | IODMACommand::OutputLittle32(IODMACommand *, | |
1436 | Segment64 segment, void *vSegList, UInt32 outSegIndex) | |
1437 | { | |
1438 | const UInt offAddr = outSegIndex * sizeof(Segment32); | |
1439 | const UInt offLen = offAddr + sizeof(UInt32); | |
1440 | OSWriteLittleInt32(vSegList, offAddr, (UInt32) segment.fIOVMAddr); | |
1441 | OSWriteLittleInt32(vSegList, offLen, (UInt32) segment.fLength); | |
1442 | return true; | |
1443 | } | |
1444 | ||
1445 | bool | |
1446 | IODMACommand::OutputHost64(IODMACommand *, | |
1447 | Segment64 segment, void *vSegList, UInt32 outSegIndex) | |
1448 | { | |
1449 | Segment64 *base = (Segment64 *) vSegList; | |
1450 | base[outSegIndex] = segment; | |
1451 | return true; | |
1452 | } | |
1453 | ||
1454 | bool | |
1455 | IODMACommand::OutputBig64(IODMACommand *, | |
1456 | Segment64 segment, void *vSegList, UInt32 outSegIndex) | |
1457 | { | |
1458 | const UInt offAddr = outSegIndex * sizeof(Segment64); | |
1459 | const UInt offLen = offAddr + sizeof(UInt64); | |
1460 | OSWriteBigInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr); | |
1461 | OSWriteBigInt64(vSegList, offLen, (UInt64) segment.fLength); | |
1462 | return true; | |
1463 | } | |
1464 | ||
1465 | bool | |
1466 | IODMACommand::OutputLittle64(IODMACommand *, | |
1467 | Segment64 segment, void *vSegList, UInt32 outSegIndex) | |
1468 | { | |
1469 | const UInt offAddr = outSegIndex * sizeof(Segment64); | |
1470 | const UInt offLen = offAddr + sizeof(UInt64); | |
1471 | OSWriteLittleInt64(vSegList, offAddr, (UInt64) segment.fIOVMAddr); | |
1472 | OSWriteLittleInt64(vSegList, offLen, (UInt64) segment.fLength); | |
1473 | return true; | |
1474 | } | |
1475 | ||
1476 |