]> git.saurik.com Git - apple/xnu.git/blob - iokit/Tests/TestIOMemoryDescriptor.cpp
xnu-3789.31.2.tar.gz
[apple/xnu.git] / iokit / Tests / TestIOMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 2014-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/cdefs.h>
30
31 #include <IOKit/assert.h>
32 #include <IOKit/system.h>
33 #include <IOKit/IOLib.h>
34 #include <IOKit/IOMemoryDescriptor.h>
35 #include <IOKit/IOMapper.h>
36 #include <IOKit/IODMACommand.h>
37 #include <IOKit/IOKitKeysPrivate.h>
38 #include "Tests.h"
39
40 #ifndef __LP64__
41 #include <IOKit/IOSubMemoryDescriptor.h>
42 #endif /* !__LP64__ */
43 #include <IOKit/IOSubMemoryDescriptor.h>
44 #include <IOKit/IOMultiMemoryDescriptor.h>
45 #include <IOKit/IOBufferMemoryDescriptor.h>
46
47 #include <IOKit/IOKitDebug.h>
48 #include <libkern/OSDebug.h>
49 #include <sys/uio.h>
50
51 __BEGIN_DECLS
52 #include <vm/pmap.h>
53 #include <vm/vm_pageout.h>
54 #include <mach/memory_object_types.h>
55 #include <device/device_port.h>
56
57 #include <mach/vm_prot.h>
58 #include <mach/mach_vm.h>
59 #include <vm/vm_fault.h>
60 #include <vm/vm_protos.h>
61 __END_DECLS
62
63
64 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
65
66 #if DEVELOPMENT || DEBUG
67
68 extern SInt32 gIOMemoryReferenceCount;
69
70 static int IOMultMemoryDescriptorTest(int newValue)
71 {
72 IOMemoryDescriptor * mds[3];
73 IOMultiMemoryDescriptor * mmd;
74 IOMemoryMap * map;
75 void * addr;
76 uint8_t * data;
77 uint32_t i;
78 IOAddressRange ranges[2];
79
80 data = (typeof(data)) IOMallocAligned(ptoa(8), page_size);
81 for (i = 0; i < ptoa(8); i++) data[i] = atop(i) | 0xD0;
82
83 ranges[0].address = (IOVirtualAddress)(data + ptoa(4));
84 ranges[0].length = ptoa(4);
85 ranges[1].address = (IOVirtualAddress)(data + ptoa(0));
86 ranges[1].length = ptoa(4);
87
88 mds[0] = IOMemoryDescriptor::withAddressRanges(&ranges[0], 2, kIODirectionOutIn, kernel_task);
89
90 mds[1] = IOSubMemoryDescriptor::withSubRange(mds[0], ptoa(3), ptoa(2), kIODirectionOutIn);
91 mds[2] = IOSubMemoryDescriptor::withSubRange(mds[0], ptoa(7), ptoa(1), kIODirectionOutIn);
92
93 mmd = IOMultiMemoryDescriptor::withDescriptors(&mds[0], sizeof(mds)/sizeof(mds[0]), kIODirectionOutIn, false);
94 mds[2]->release();
95 mds[1]->release();
96 mds[0]->release();
97 map = mmd->createMappingInTask(kernel_task, 0, kIOMapAnywhere, ptoa(7), mmd->getLength() - ptoa(7));
98 mmd->release();
99 assert(map);
100
101 addr = (void *) map->getVirtualAddress();
102 assert(ptoa(4) == map->getLength());
103 assert(0xd3d3d3d3 == ((uint32_t *)addr)[ptoa(0) / sizeof(uint32_t)]);
104 assert(0xd7d7d7d7 == ((uint32_t *)addr)[ptoa(1) / sizeof(uint32_t)]);
105 assert(0xd0d0d0d0 == ((uint32_t *)addr)[ptoa(2) / sizeof(uint32_t)]);
106 assert(0xd3d3d3d3 == ((uint32_t *)addr)[ptoa(3) / sizeof(uint32_t)]);
107 map->release();
108 IOFreeAligned(data, ptoa(8));
109
110 return (0);
111 }
112
113 // <rdar://problem/26375234>
114 static IOReturn
115 ZeroLengthTest(int newValue)
116 {
117 IOMemoryDescriptor * md;
118
119 md = IOMemoryDescriptor::withAddressRange(
120 0, 0, kIODirectionNone, current_task());
121 assert(md);
122 md->prepare();
123 md->complete();
124 md->release();
125 return (0);
126 }
127
128 // <rdar://problem/27002624>
129 static IOReturn
130 BadFixedAllocTest(int newValue)
131 {
132 IOBufferMemoryDescriptor * bmd;
133 IOMemoryMap * map;
134
135 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(NULL,
136 kIODirectionIn | kIOMemoryPageable, ptoa(1));
137 assert(bmd);
138 map = bmd->createMappingInTask(kernel_task, 0x2000, 0);
139 assert(!map);
140
141 bmd->release();
142 return (0);
143 }
144
145 // <rdar://problem/26466423>
146 static IOReturn
147 IODirectionPrepareNoZeroFillTest(int newValue)
148 {
149 IOBufferMemoryDescriptor * bmd;
150
151 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(NULL,
152 kIODirectionIn | kIOMemoryPageable, ptoa(24));
153 assert(bmd);
154 bmd->prepare((IODirection)(kIODirectionIn | kIODirectionPrepareNoZeroFill));
155 bmd->prepare(kIODirectionIn);
156 bmd->complete((IODirection)(kIODirectionIn | kIODirectionCompleteWithDataValid));
157 bmd->complete(kIODirectionIn);
158 bmd->release();
159 return (0);
160 }
161
162
163 // <rdar://problem/28190483>
164 static IOReturn
165 IOMemoryMapTest(uint32_t options)
166 {
167 IOBufferMemoryDescriptor * bmd;
168 IOMemoryDescriptor * md;
169 IOMemoryMap * map;
170 uint32_t data;
171 user_addr_t p;
172 uint8_t * p2;
173 int r;
174 uint64_t time, nano;
175
176 bmd = IOBufferMemoryDescriptor::inTaskWithOptions(current_task(),
177 kIODirectionOutIn | kIOMemoryPageable, 0x4018+0x800);
178 assert(bmd);
179 p = (typeof(p)) bmd->getBytesNoCopy();
180 p += 0x800;
181 data = 0x11111111;
182 r = copyout(&data, p, sizeof(data));
183 assert(r == 0);
184 data = 0x22222222;
185 r = copyout(&data, p + 0x1000, sizeof(data));
186 assert(r == 0);
187 data = 0x33333333;
188 r = copyout(&data, p + 0x2000, sizeof(data));
189 assert(r == 0);
190 data = 0x44444444;
191 r = copyout(&data, p + 0x3000, sizeof(data));
192 assert(r == 0);
193
194 md = IOMemoryDescriptor::withAddressRange(p, 0x4018,
195 kIODirectionOut | options,
196 current_task());
197 assert(md);
198 time = mach_absolute_time();
199 map = md->map(kIOMapReadOnly);
200 time = mach_absolute_time() - time;
201 assert(map);
202 absolutetime_to_nanoseconds(time, &nano);
203
204 p2 = (typeof(p2)) map->getVirtualAddress();
205 assert(0x11 == p2[0]);
206 assert(0x22 == p2[0x1000]);
207 assert(0x33 == p2[0x2000]);
208 assert(0x44 == p2[0x3000]);
209
210 data = 0x99999999;
211 r = copyout(&data, p + 0x2000, sizeof(data));
212 assert(r == 0);
213
214 assert(0x11 == p2[0]);
215 assert(0x22 == p2[0x1000]);
216 assert(0x44 == p2[0x3000]);
217 if (kIOMemoryMapCopyOnWrite & options) assert(0x33 == p2[0x2000]);
218 else assert(0x99 == p2[0x2000]);
219
220 IOLog("IOMemoryMapCopyOnWriteTest map(%s) %lld ns\n",
221 kIOMemoryMapCopyOnWrite & options ? "kIOMemoryMapCopyOnWrite" : "",
222 nano);
223
224 map->release();
225 md->release();
226 bmd->release();
227
228 return (kIOReturnSuccess);
229 }
230
231 static int
232 IOMemoryMapCopyOnWriteTest(int newValue)
233 {
234 IOMemoryMapTest(0);
235 IOMemoryMapTest(kIOMemoryMapCopyOnWrite);
236 return (0);
237 }
238
239 int IOMemoryDescriptorTest(int newValue)
240 {
241 int result;
242
243 IOLog("/IOMemoryDescriptorTest %d\n", (int) gIOMemoryReferenceCount);
244
245 #if 0
246 if (6 == newValue)
247 {
248 IOMemoryDescriptor * sbmds[3];
249 IOMultiMemoryDescriptor * smmd;
250 IOMemoryDescriptor * mds[2];
251 IOMultiMemoryDescriptor * mmd;
252 IOMemoryMap * map;
253
254 sbmds[0] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, kIODirectionOutIn | kIOMemoryKernelUserShared, ptoa(1));
255 sbmds[1] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, kIODirectionOutIn | kIOMemoryKernelUserShared, ptoa(2));
256 sbmds[2] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, kIODirectionOutIn | kIOMemoryKernelUserShared, ptoa(3));
257 smmd = IOMultiMemoryDescriptor::withDescriptors(&sbmds[0], sizeof(sbmds)/sizeof(sbmds[0]), kIODirectionOutIn, false);
258
259 mds[0] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task, kIODirectionOutIn | kIOMemoryKernelUserShared, ptoa(1));
260 mds[1] = smmd;
261 mmd = IOMultiMemoryDescriptor::withDescriptors(&mds[0], sizeof(mds)/sizeof(mds[0]), kIODirectionOutIn, false);
262 map = mmd->createMappingInTask(kernel_task, 0, kIOMapAnywhere);
263 assert(map);
264 map->release();
265 mmd->release();
266 mds[0]->release();
267 mds[1]->release();
268 sbmds[0]->release();
269 sbmds[1]->release();
270 sbmds[2]->release();
271
272 return (0);
273 }
274 else if (5 == newValue)
275 {
276 IOReturn ret;
277 IOMemoryDescriptor * md;
278 IODMACommand * dma;
279 IODMACommand::SegmentOptions segOptions =
280 {
281 .fStructSize = sizeof(segOptions),
282 .fNumAddressBits = 64,
283 .fMaxSegmentSize = 4096,
284 .fMaxTransferSize = 128*1024,
285 .fAlignment = 4,
286 .fAlignmentLength = 4,
287 .fAlignmentInternalSegments = 0x1000
288 };
289
290 IOAddressRange ranges[3][2] =
291 {
292 {
293 { (uintptr_t) &IOMemoryDescriptorTest, 0x2ffc },
294 { 0, 0 },
295 },
296 {
297 { ranges[0][0].address, 0x10 },
298 { 0x3000 + ranges[0][0].address, 0xff0 },
299 },
300 {
301 { ranges[0][0].address, 0x2ffc },
302 { trunc_page(ranges[0][0].address), 0x800 },
303 },
304 };
305 static const uint32_t rangesCount[3] = { 1, 2, 2 };
306 uint32_t test;
307
308 for (test = 0; test < 3; test++)
309 {
310 kprintf("---[%d] address 0x%qx-0x%qx, 0x%qx-0x%qx\n", test,
311 ranges[test][0].address, ranges[test][0].length,
312 ranges[test][1].address, ranges[test][1].length);
313
314 md = IOMemoryDescriptor::withAddressRanges((IOAddressRange*)&ranges[test][0], rangesCount[test], kIODirectionOut, kernel_task);
315 assert(md);
316 ret = md->prepare();
317 assert(kIOReturnSuccess == ret);
318 dma = IODMACommand::withSpecification(kIODMACommandOutputHost64, &segOptions,
319 IODMACommand::kMapped, NULL, NULL);
320 assert(dma);
321 ret = dma->setMemoryDescriptor(md, true);
322 if (kIOReturnSuccess == ret)
323 {
324 IODMACommand::Segment64 segments[1];
325 UInt32 numSegments;
326 UInt64 offset;
327
328 offset = 0;
329 do
330 {
331 numSegments = 1;
332 ret = dma->gen64IOVMSegments(&offset, &segments[0], &numSegments);
333 assert(kIOReturnSuccess == ret);
334 assert(1 == numSegments);
335 kprintf("seg 0x%qx, 0x%qx\n", segments[0].fIOVMAddr, segments[0].fLength);
336 }
337 while (offset < md->getLength());
338
339 ret = dma->clearMemoryDescriptor(true);
340 assert(kIOReturnSuccess == ret);
341 dma->release();
342 }
343 md->release();
344 }
345
346 return (kIOReturnSuccess);
347 }
348 else if (4 == newValue)
349 {
350 IOService * isp;
351 IOMapper * mapper;
352 IOBufferMemoryDescriptor * md1;
353 IODMACommand * dma;
354 IOReturn ret;
355 size_t bufSize = 8192 * 8192 * sizeof(uint32_t);
356 uint64_t start, time, nano;
357
358 isp = IOService::copyMatchingService(IOService::nameMatching("isp"));
359 assert(isp);
360 mapper = IOMapper::copyMapperForDeviceWithIndex(isp, 0);
361 assert(mapper);
362
363 md1 = IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL,
364 kIODirectionOutIn | kIOMemoryPersistent | kIOMemoryPageable,
365 bufSize, page_size);
366
367 ret = md1->prepare();
368 assert(kIOReturnSuccess == ret);
369
370 IODMAMapSpecification mapSpec;
371 bzero(&mapSpec, sizeof(mapSpec));
372 uint64_t mapped;
373 uint64_t mappedLength;
374
375 start = mach_absolute_time();
376
377 ret = md1->dmaMap(mapper, NULL, &mapSpec, 0, bufSize, &mapped, &mappedLength);
378 assert(kIOReturnSuccess == ret);
379
380 time = mach_absolute_time() - start;
381
382 absolutetime_to_nanoseconds(time, &nano);
383 kprintf("time %lld us\n", nano / 1000ULL);
384 kprintf("seg0 0x%qx, 0x%qx\n", mapped, mappedLength);
385
386 assert(md1);
387
388 dma = IODMACommand::withSpecification(kIODMACommandOutputHost32,
389 32, 0, IODMACommand::kMapped, 0, 1, mapper, NULL);
390
391 assert(dma);
392
393 start = mach_absolute_time();
394 ret = dma->setMemoryDescriptor(md1, true);
395 assert(kIOReturnSuccess == ret);
396 time = mach_absolute_time() - start;
397
398 absolutetime_to_nanoseconds(time, &nano);
399 kprintf("time %lld us\n", nano / 1000ULL);
400
401
402 IODMACommand::Segment32 segments[1];
403 UInt32 numSegments = 1;
404 UInt64 offset;
405
406 offset = 0;
407 ret = dma->gen32IOVMSegments(&offset, &segments[0], &numSegments);
408 assert(kIOReturnSuccess == ret);
409 assert(1 == numSegments);
410 kprintf("seg0 0x%x, 0x%x\n", (int)segments[0].fIOVMAddr, (int)segments[0].fLength);
411
412 ret = dma->clearMemoryDescriptor(true);
413 assert(kIOReturnSuccess == ret);
414
415 md1->release();
416
417 return (kIOReturnSuccess);
418 }
419
420 if (3 == newValue)
421 {
422 IOBufferMemoryDescriptor * md1;
423 IOBufferMemoryDescriptor * md2;
424 IOMemoryMap * map1;
425 IOMemoryMap * map2;
426 uint32_t * buf1;
427 uint32_t * buf2;
428 IOReturn err;
429
430 md1 = IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL,
431 kIODirectionOutIn | kIOMemoryPersistent | kIOMemoryPageable,
432 64*1024, page_size);
433 assert(md1);
434 map1 = md1->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapUnique);
435 assert(map1);
436 buf1 = (uint32_t *) map1->getVirtualAddress();
437
438 md2 = IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL,
439 kIODirectionOutIn | kIOMemoryPersistent | kIOMemoryPageable,
440 64*1024, page_size);
441 assert(md2);
442 map2 = md2->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapUnique);
443 assert(map2);
444 buf2 = (uint32_t *) map2->getVirtualAddress();
445
446 memset(buf1, 0x11, 64*1024L);
447 memset(buf2, 0x22, 64*1024L);
448
449 kprintf("md1 %p, map1 %p, buf2 %p; md2 %p, map2 %p, buf2 %p\n", md1, map1, buf1, md2, map2, buf2);
450
451 kprintf("no redir 0x%08x, 0x%08x\n", buf1[0], buf2[0]);
452 assert(0x11111111 == buf1[0]);
453 assert(0x22222222 == buf2[0]);
454 err = map1->redirect(md2, 0, 0ULL);
455 kprintf("redir md2(0x%x) 0x%08x, 0x%08x\n", err, buf1[0], buf2[0]);
456 assert(0x11111111 == buf2[0]);
457 assert(0x22222222 == buf1[0]);
458 err = map1->redirect(md1, 0, 0ULL);
459 kprintf("redir md1(0x%x) 0x%08x, 0x%08x\n", err, buf1[0], buf2[0]);
460 assert(0x11111111 == buf1[0]);
461 assert(0x22222222 == buf2[0]);
462 map1->release();
463 map2->release();
464 md1->release();
465 md2->release();
466 }
467 #endif
468
469 result = IOMemoryMapCopyOnWriteTest(newValue);
470 if (result) return (result);
471
472 result = IOMultMemoryDescriptorTest(newValue);
473 if (result) return (result);
474
475 result = ZeroLengthTest(newValue);
476 if (result) return (result);
477
478 result = IODirectionPrepareNoZeroFillTest(newValue);
479 if (result) return (result);
480
481 result = BadFixedAllocTest(newValue);
482 if (result) return (result);
483
484 IOGeneralMemoryDescriptor * md;
485 vm_offset_t data[2];
486 vm_size_t bsize = 16*1024*1024;
487 vm_size_t srcsize, srcoffset, mapoffset, size;
488 kern_return_t kr;
489
490 data[0] = data[1] = 0;
491 kr = vm_allocate(kernel_map, &data[0], bsize, VM_FLAGS_ANYWHERE);
492 assert(KERN_SUCCESS == kr);
493
494 vm_inherit(kernel_map, data[0] + ptoa(1), ptoa(1), VM_INHERIT_NONE);
495 vm_inherit(kernel_map, data[0] + ptoa(16), ptoa(4), VM_INHERIT_NONE);
496
497 IOLog("data 0x%lx, 0x%lx\n", (long)data[0], (long)data[1]);
498
499 uint32_t idx, offidx;
500 for (idx = 0; idx < (bsize / sizeof(uint32_t)); idx++)
501 {
502 ((uint32_t*)data[0])[idx] = idx;
503 }
504
505 for (srcoffset = 0; srcoffset < bsize; srcoffset = ((srcoffset << 2) + 0x40c))
506 {
507 for (srcsize = 4; srcsize < (bsize - srcoffset - 1); srcsize = ((srcsize << 2) + 0x3fc))
508 {
509 IOAddressRange ranges[3];
510 uint32_t rangeCount = 1;
511
512 bzero(&ranges[0], sizeof(ranges));
513 ranges[0].address = data[0] + srcoffset;
514 ranges[0].length = srcsize;
515 ranges[1].address = ranges[2].address = data[0];
516
517 if (srcsize > ptoa(5))
518 {
519 ranges[0].length = 7634;
520 ranges[1].length = 9870;
521 ranges[2].length = srcsize - ranges[0].length - ranges[1].length;
522 ranges[1].address = ranges[0].address + ranges[0].length;
523 ranges[2].address = ranges[1].address + ranges[1].length;
524 rangeCount = 3;
525 }
526 else if ((srcsize > ptoa(2)) && !(page_mask & srcoffset))
527 {
528 ranges[0].length = ptoa(1);
529 ranges[1].length = ptoa(1);
530 ranges[2].length = srcsize - ranges[0].length - ranges[1].length;
531 ranges[0].address = data[0] + srcoffset + ptoa(1);
532 ranges[1].address = data[0] + srcoffset;
533 ranges[2].address = ranges[0].address + ranges[0].length;
534 rangeCount = 3;
535 }
536
537 md = OSDynamicCast(IOGeneralMemoryDescriptor,
538 IOMemoryDescriptor::withAddressRanges(&ranges[0], rangeCount, kIODirectionInOut, kernel_task));
539 assert(md);
540
541 IOLog("IOMemoryDescriptor::withAddressRanges [0x%lx @ 0x%lx]\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx]\n",
542 (long) srcsize, (long) srcoffset,
543 (long long) ranges[0].address - data[0], (long long) ranges[0].length,
544 (long long) ranges[1].address - data[0], (long long) ranges[1].length,
545 (long long) ranges[2].address - data[0], (long long) ranges[2].length);
546
547 if (kIOReturnSuccess == kr)
548 {
549 for (mapoffset = 0; mapoffset < srcsize; mapoffset = ((mapoffset << 1) + 0xf00))
550 {
551 for (size = 4; size < (srcsize - mapoffset - 1); size = ((size << 2) + 0x200))
552 {
553 IOMemoryMap * map;
554 mach_vm_address_t addr = 0;
555 uint32_t data;
556
557 // IOLog("<mapRef [0x%lx @ 0x%lx]\n", (long) size, (long) mapoffset);
558
559 map = md->createMappingInTask(kernel_task, 0, kIOMapAnywhere, mapoffset, size);
560 if (map) addr = map->getAddress();
561 else kr = kIOReturnError;
562
563 // IOLog(">mapRef 0x%x %llx\n", kr, addr);
564
565 if (kIOReturnSuccess != kr) break;
566 kr = md->prepare();
567 if (kIOReturnSuccess != kr)
568 {
569 panic("prepare() fail 0x%x\n", kr);
570 break;
571 }
572 for (idx = 0; idx < size; idx += sizeof(uint32_t))
573 {
574 offidx = (idx + mapoffset + srcoffset);
575 if ((srcsize <= ptoa(5)) && (srcsize > ptoa(2)) && !(page_mask & srcoffset))
576 {
577 if (offidx < ptoa(2)) offidx ^= ptoa(1);
578 }
579 offidx /= sizeof(uint32_t);
580
581 if (offidx != ((uint32_t*)addr)[idx/sizeof(uint32_t)])
582 {
583 panic("vm mismatch md %p map %p, @ 0x%x, 0x%lx, 0x%lx, \n", md, map, idx, (long) srcoffset, (long) mapoffset);
584 kr = kIOReturnBadMedia;
585 }
586 else
587 {
588 if (sizeof(data) != md->readBytes(mapoffset + idx, &data, sizeof(data))) data = 0;
589 if (offidx != data)
590 {
591 panic("phys mismatch md %p map %p, @ 0x%x, 0x%lx, 0x%lx, \n", md, map, idx, (long) srcoffset, (long) mapoffset);
592 kr = kIOReturnBadMedia;
593 }
594 }
595 }
596 md->complete();
597 map->release();
598 // IOLog("unmapRef %llx\n", addr);
599 }
600 if (kIOReturnSuccess != kr) break;
601 }
602 }
603 md->release();
604 if (kIOReturnSuccess != kr) break;
605 }
606 if (kIOReturnSuccess != kr) break;
607 }
608
609 if (kIOReturnSuccess != kr) IOLog("FAIL: src 0x%lx @ 0x%lx, map 0x%lx @ 0x%lx\n",
610 (long) srcsize, (long) srcoffset, (long) size, (long) mapoffset);
611
612 assert(kr == kIOReturnSuccess);
613
614 vm_deallocate(kernel_map, data[0], bsize);
615 // vm_deallocate(kernel_map, data[1], size);
616
617 IOLog("IOMemoryDescriptorTest/ %d\n", (int) gIOMemoryReferenceCount);
618
619 return (0);
620 }
621
622 #endif /* DEVELOPMENT || DEBUG */