2 * Copyright (c) 2014-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <sys/cdefs.h>
31 #include <IOKit/assert.h>
32 #include <IOKit/system.h>
33 #include <IOKit/IOLib.h>
34 #include <IOKit/IOMemoryDescriptor.h>
35 #include <IOKit/IOMapper.h>
36 #include <IOKit/IODMACommand.h>
37 #include <IOKit/IOKitKeysPrivate.h>
41 #include <IOKit/IOSubMemoryDescriptor.h>
42 #endif /* !__LP64__ */
43 #include <IOKit/IOSubMemoryDescriptor.h>
44 #include <IOKit/IOMultiMemoryDescriptor.h>
45 #include <IOKit/IOBufferMemoryDescriptor.h>
47 #include <IOKit/IOKitDebug.h>
48 #include <libkern/OSDebug.h>
53 #include <vm/vm_pageout.h>
54 #include <mach/memory_object_types.h>
55 #include <device/device_port.h>
57 #include <mach/vm_prot.h>
58 #include <mach/mach_vm.h>
59 #include <vm/vm_fault.h>
60 #include <vm/vm_protos.h>
64 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
66 #if DEVELOPMENT || DEBUG
68 extern SInt32 gIOMemoryReferenceCount
;
70 static int IOMultMemoryDescriptorTest(int newValue
)
72 IOMemoryDescriptor
* mds
[3];
73 IOMultiMemoryDescriptor
* mmd
;
78 IOAddressRange ranges
[2];
80 data
= (typeof(data
)) IOMallocAligned(ptoa(8), page_size
);
81 for (i
= 0; i
< ptoa(8); i
++) data
[i
] = atop(i
) | 0xD0;
83 ranges
[0].address
= (IOVirtualAddress
)(data
+ ptoa(4));
84 ranges
[0].length
= ptoa(4);
85 ranges
[1].address
= (IOVirtualAddress
)(data
+ ptoa(0));
86 ranges
[1].length
= ptoa(4);
88 mds
[0] = IOMemoryDescriptor::withAddressRanges(&ranges
[0], 2, kIODirectionOutIn
, kernel_task
);
90 mds
[1] = IOSubMemoryDescriptor::withSubRange(mds
[0], ptoa(3), ptoa(2), kIODirectionOutIn
);
91 mds
[2] = IOSubMemoryDescriptor::withSubRange(mds
[0], ptoa(7), ptoa(1), kIODirectionOutIn
);
93 mmd
= IOMultiMemoryDescriptor::withDescriptors(&mds
[0], sizeof(mds
)/sizeof(mds
[0]), kIODirectionOutIn
, false);
97 map
= mmd
->createMappingInTask(kernel_task
, 0, kIOMapAnywhere
, ptoa(7), mmd
->getLength() - ptoa(7));
101 addr
= (void *) map
->getVirtualAddress();
102 assert(ptoa(4) == map
->getLength());
103 assert(0xd3d3d3d3 == ((uint32_t *)addr
)[ptoa(0) / sizeof(uint32_t)]);
104 assert(0xd7d7d7d7 == ((uint32_t *)addr
)[ptoa(1) / sizeof(uint32_t)]);
105 assert(0xd0d0d0d0 == ((uint32_t *)addr
)[ptoa(2) / sizeof(uint32_t)]);
106 assert(0xd3d3d3d3 == ((uint32_t *)addr
)[ptoa(3) / sizeof(uint32_t)]);
108 IOFreeAligned(data
, ptoa(8));
115 // <rdar://problem/30102458>
117 IODMACommandForceDoubleBufferTest(int newValue
)
120 IOBufferMemoryDescriptor
* bmd
;
123 IODMACommand::SegmentOptions segOptions
=
125 .fStructSize
= sizeof(segOptions
),
126 .fNumAddressBits
= 64,
127 .fMaxSegmentSize
= 0x2000,
128 .fMaxTransferSize
= 128*1024,
130 .fAlignmentLength
= 1,
131 .fAlignmentInternalSegments
= 1
133 IODMACommand::Segment64 segments
[1];
138 for (dir
= kIODirectionIn
; ; dir
++)
140 bmd
= IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task
,
141 dir
| kIOMemoryPageable
, ptoa(8));
144 ((uint32_t*) bmd
->getBytesNoCopy())[0] = 0x53535300 | dir
;
146 ret
= bmd
->prepare((IODirection
) dir
);
147 assert(kIOReturnSuccess
== ret
);
149 dma
= IODMACommand::withSpecification(kIODMACommandOutputHost64
, &segOptions
,
150 kIODMAMapOptionMapped
,
153 ret
= dma
->setMemoryDescriptor(bmd
, true);
154 assert(kIOReturnSuccess
== ret
);
156 ret
= dma
->synchronize(IODMACommand::kForceDoubleBuffer
| kIODirectionOut
);
157 assert(kIOReturnSuccess
== ret
);
161 ret
= dma
->gen64IOVMSegments(&dmaOffset
, &segments
[0], &numSegments
);
162 assert(kIOReturnSuccess
== ret
);
163 assert(1 == numSegments
);
165 if (kIODirectionOut
& dir
)
167 data
= ((uint32_t*) bmd
->getBytesNoCopy())[0];
168 assertf((0x53535300 | dir
) == data
, "mismatch 0x%x", data
);
170 if (kIODirectionIn
& dir
)
172 IOMappedWrite32(segments
[0].fIOVMAddr
, 0x11223300 | dir
);
175 ret
= dma
->clearMemoryDescriptor(true);
176 assert(kIOReturnSuccess
== ret
);
179 bmd
->complete((IODirection
) dir
);
181 if (kIODirectionIn
& dir
)
183 data
= ((uint32_t*) bmd
->getBytesNoCopy())[0];
184 assertf((0x11223300 | dir
) == data
, "mismatch 0x%x", data
);
189 if (dir
== kIODirectionInOut
) break;
195 // <rdar://problem/34322778>
197 IODMACommandLocalMappedNonContig(int newValue
)
200 IOMemoryDescriptor
* md
;
202 OSDictionary
* matching
;
205 IODMACommand::SegmentOptions segOptions
=
207 .fStructSize
= sizeof(segOptions
),
208 .fNumAddressBits
= 64,
209 .fMaxSegmentSize
= 128*1024,
210 .fMaxTransferSize
= 128*1024,
212 .fAlignmentLength
= 1,
213 .fAlignmentInternalSegments
= 1
215 IODMACommand::Segment64 segments
[1];
220 vm_size_t bufSize
= ptoa(4);
222 if (!IOMapper::gSystem
) return (0);
225 kr
= vm_allocate_kernel(kernel_map
, &buffer
, bufSize
, VM_FLAGS_ANYWHERE
, VM_KERN_MEMORY_IOKIT
);
226 assert(KERN_SUCCESS
== kr
);
228 // fragment the vmentries
229 kr
= vm_inherit(kernel_map
, buffer
+ ptoa(1), ptoa(1), VM_INHERIT_NONE
);
230 assert(KERN_SUCCESS
== kr
);
232 md
= IOMemoryDescriptor::withAddressRange(
233 buffer
+ 0xa00, 0x2000, kIODirectionOutIn
, kernel_task
);
235 kr
= md
->prepare(kIODirectionOutIn
);
236 assert(kIOReturnSuccess
== kr
);
238 segPhys
= md
->getPhysicalSegment(0, NULL
, 0);
240 matching
= IOService::nameMatching("XHC1");
242 device
= IOService::copyMatchingService(matching
);
244 mapper
= device
? IOMapper::copyMapperForDeviceWithIndex(device
, 0) : NULL
;
246 dma
= IODMACommand::withSpecification(kIODMACommandOutputHost64
, &segOptions
,
247 kIODMAMapOptionMapped
,
250 kr
= dma
->setMemoryDescriptor(md
, true);
251 assert(kIOReturnSuccess
== kr
);
255 kr
= dma
->gen64IOVMSegments(&dmaOffset
, &segments
[0], &numSegments
);
256 assert(kIOReturnSuccess
== kr
);
257 assert(1 == numSegments
);
259 if (mapper
) assertf(segments
[0].fIOVMAddr
!= segPhys
, "phys !local 0x%qx, 0x%qx, %p", segments
[0].fIOVMAddr
, segPhys
, dma
);
261 kr
= dma
->clearMemoryDescriptor(true);
262 assert(kIOReturnSuccess
== kr
);
265 kr
= md
->complete(kIODirectionOutIn
);
266 assert(kIOReturnSuccess
== kr
);
269 kr
= vm_deallocate(kernel_map
, buffer
, bufSize
);
270 assert(KERN_SUCCESS
== kr
);
271 OSSafeReleaseNULL(mapper
);
276 // <rdar://problem/30102458>
278 IOMemoryRemoteTest(int newValue
)
281 IOMemoryDescriptor
* md
;
282 IOByteCount offset
, length
;
287 IODMACommand::SegmentOptions segOptions
=
289 .fStructSize
= sizeof(segOptions
),
290 .fNumAddressBits
= 64,
291 .fMaxSegmentSize
= 0x2000,
292 .fMaxTransferSize
= 128*1024,
294 .fAlignmentLength
= 1,
295 .fAlignmentInternalSegments
= 1
297 IODMACommand::Segment64 segments
[1];
301 IOAddressRange ranges
[2] = {
302 { 0x1234567890123456ULL
, 0x1000 }, { 0x5432109876543210, 0x2000 },
305 md
= IOMemoryDescriptor::withAddressRanges(&ranges
[0], 2, kIODirectionOutIn
|kIOMemoryRemote
, TASK_NULL
);
309 // md->readBytes(0, &idx, sizeof(idx));
311 ret
= md
->prepare(kIODirectionOutIn
);
312 assert(kIOReturnSuccess
== ret
);
314 printf("remote md flags 0x%qx, r %d\n",
315 md
->getFlags(), (0 != (kIOMemoryRemote
& md
->getFlags())));
317 for (offset
= 0, idx
= 0; true; offset
+= length
, idx
++)
319 addr
= md
->getPhysicalSegment(offset
, &length
, 0);
322 assert(addr
== ranges
[idx
].address
);
323 assert(length
== ranges
[idx
].length
);
325 assert(offset
== md
->getLength());
327 dma
= IODMACommand::withSpecification(kIODMACommandOutputHost64
, &segOptions
,
328 kIODMAMapOptionUnmapped
| kIODMAMapOptionIterateOnly
,
331 ret
= dma
->setMemoryDescriptor(md
, true);
332 assert(kIOReturnSuccess
== ret
);
334 for (dmaOffset
= 0, idx
= 0; dmaOffset
< md
->getLength(); idx
++)
337 ret
= dma
->gen64IOVMSegments(&dmaOffset
, &segments
[0], &numSegments
);
338 assert(kIOReturnSuccess
== ret
);
339 assert(1 == numSegments
);
341 assert(segments
[0].fIOVMAddr
== ranges
[idx
].address
);
342 assert(segments
[0].fLength
== ranges
[idx
].length
);
344 assert(dmaOffset
== md
->getLength());
346 ret
= dma
->clearMemoryDescriptor(true);
347 assert(kIOReturnSuccess
== ret
);
349 md
->complete(kIODirectionOutIn
);
356 IOMemoryPrefaultTest(uint32_t options
)
358 IOBufferMemoryDescriptor
* bmd
;
365 lock
= IOSimpleLockAlloc();
368 bmd
= IOBufferMemoryDescriptor::inTaskWithOptions(current_task(),
369 kIODirectionOutIn
| kIOMemoryPageable
, ptoa(8));
372 assert(KERN_SUCCESS
== kr
);
374 map
= bmd
->map(kIOMapPrefault
);
377 p
= (typeof(p
)) map
->getVirtualAddress();
378 IOSimpleLockLock(lock
);
380 IOSimpleLockUnlock(lock
);
382 IOLog("IOMemoryPrefaultTest %d\n", data
);
386 IOSimpleLockFree(lock
);
388 return (kIOReturnSuccess
);
392 // <rdar://problem/26375234>
394 ZeroLengthTest(int newValue
)
396 IOMemoryDescriptor
* md
;
398 md
= IOMemoryDescriptor::withAddressRange(
399 0, 0, kIODirectionNone
, current_task());
407 // <rdar://problem/27002624>
409 BadFixedAllocTest(int newValue
)
411 IOBufferMemoryDescriptor
* bmd
;
414 bmd
= IOBufferMemoryDescriptor::inTaskWithOptions(NULL
,
415 kIODirectionIn
| kIOMemoryPageable
, ptoa(1));
417 map
= bmd
->createMappingInTask(kernel_task
, 0x2000, 0);
424 // <rdar://problem/26466423>
426 IODirectionPrepareNoZeroFillTest(int newValue
)
428 IOBufferMemoryDescriptor
* bmd
;
430 bmd
= IOBufferMemoryDescriptor::inTaskWithOptions(NULL
,
431 kIODirectionIn
| kIOMemoryPageable
, ptoa(24));
433 bmd
->prepare((IODirection
)(kIODirectionIn
| kIODirectionPrepareNoZeroFill
));
434 bmd
->prepare(kIODirectionIn
);
435 bmd
->complete((IODirection
)(kIODirectionIn
| kIODirectionCompleteWithDataValid
));
436 bmd
->complete(kIODirectionIn
);
441 // <rdar://problem/28190483>
443 IOMemoryMapTest(uint32_t options
)
445 IOBufferMemoryDescriptor
* bmd
;
446 IOMemoryDescriptor
* md
;
454 bmd
= IOBufferMemoryDescriptor::inTaskWithOptions(current_task(),
455 kIODirectionOutIn
| kIOMemoryPageable
, 0x4018+0x800);
457 p
= (typeof(p
)) bmd
->getBytesNoCopy();
460 r
= copyout(&data
, p
, sizeof(data
));
463 r
= copyout(&data
, p
+ 0x1000, sizeof(data
));
466 r
= copyout(&data
, p
+ 0x2000, sizeof(data
));
469 r
= copyout(&data
, p
+ 0x3000, sizeof(data
));
472 md
= IOMemoryDescriptor::withAddressRange(p
, 0x4018,
473 kIODirectionOut
| options
,
476 time
= mach_absolute_time();
477 map
= md
->map(kIOMapReadOnly
);
478 time
= mach_absolute_time() - time
;
480 absolutetime_to_nanoseconds(time
, &nano
);
482 p2
= (typeof(p2
)) map
->getVirtualAddress();
483 assert(0x11 == p2
[0]);
484 assert(0x22 == p2
[0x1000]);
485 assert(0x33 == p2
[0x2000]);
486 assert(0x44 == p2
[0x3000]);
489 r
= copyout(&data
, p
+ 0x2000, sizeof(data
));
492 assert(0x11 == p2
[0]);
493 assert(0x22 == p2
[0x1000]);
494 assert(0x44 == p2
[0x3000]);
495 if (kIOMemoryMapCopyOnWrite
& options
) assert(0x33 == p2
[0x2000]);
496 else assert(0x99 == p2
[0x2000]);
498 IOLog("IOMemoryMapCopyOnWriteTest map(%s) %lld ns\n",
499 kIOMemoryMapCopyOnWrite
& options
? "kIOMemoryMapCopyOnWrite" : "",
506 return (kIOReturnSuccess
);
510 IOMemoryMapCopyOnWriteTest(int newValue
)
513 IOMemoryMapTest(kIOMemoryMapCopyOnWrite
);
518 AllocationNameTest(int newValue
)
520 IOMemoryDescriptor
* bmd
;
521 kern_allocation_name_t name
, prior
;
523 name
= kern_allocation_name_allocate("com.apple.iokit.test", 0);
526 prior
= thread_set_allocation_name(name
);
528 bmd
= IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL
,
529 kIODirectionOutIn
| kIOMemoryPageable
| kIOMemoryKernelUserShared
,
534 thread_set_allocation_name(prior
);
535 kern_allocation_name_release(name
);
537 if (newValue
!= 7) bmd
->release();
542 int IOMemoryDescriptorTest(int newValue
)
546 IOLog("/IOMemoryDescriptorTest %d\n", (int) gIOMemoryReferenceCount
);
551 IOMemoryDescriptor
* sbmds
[3];
552 IOMultiMemoryDescriptor
* smmd
;
553 IOMemoryDescriptor
* mds
[2];
554 IOMultiMemoryDescriptor
* mmd
;
557 sbmds
[0] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task
, kIODirectionOutIn
| kIOMemoryKernelUserShared
, ptoa(1));
558 sbmds
[1] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task
, kIODirectionOutIn
| kIOMemoryKernelUserShared
, ptoa(2));
559 sbmds
[2] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task
, kIODirectionOutIn
| kIOMemoryKernelUserShared
, ptoa(3));
560 smmd
= IOMultiMemoryDescriptor::withDescriptors(&sbmds
[0], sizeof(sbmds
)/sizeof(sbmds
[0]), kIODirectionOutIn
, false);
562 mds
[0] = IOBufferMemoryDescriptor::inTaskWithOptions(kernel_task
, kIODirectionOutIn
| kIOMemoryKernelUserShared
, ptoa(1));
564 mmd
= IOMultiMemoryDescriptor::withDescriptors(&mds
[0], sizeof(mds
)/sizeof(mds
[0]), kIODirectionOutIn
, false);
565 map
= mmd
->createMappingInTask(kernel_task
, 0, kIOMapAnywhere
);
577 else if (5 == newValue
)
580 IOMemoryDescriptor
* md
;
582 IODMACommand::SegmentOptions segOptions
=
584 .fStructSize
= sizeof(segOptions
),
585 .fNumAddressBits
= 64,
586 .fMaxSegmentSize
= 4096,
587 .fMaxTransferSize
= 128*1024,
589 .fAlignmentLength
= 4,
590 .fAlignmentInternalSegments
= 0x1000
593 IOAddressRange ranges
[3][2] =
596 { (uintptr_t) &IOMemoryDescriptorTest
, 0x2ffc },
600 { ranges
[0][0].address
, 0x10 },
601 { 0x3000 + ranges
[0][0].address
, 0xff0 },
604 { ranges
[0][0].address
, 0x2ffc },
605 { trunc_page(ranges
[0][0].address
), 0x800 },
608 static const uint32_t rangesCount
[3] = { 1, 2, 2 };
611 for (test
= 0; test
< 3; test
++)
613 kprintf("---[%d] address 0x%qx-0x%qx, 0x%qx-0x%qx\n", test
,
614 ranges
[test
][0].address
, ranges
[test
][0].length
,
615 ranges
[test
][1].address
, ranges
[test
][1].length
);
617 md
= IOMemoryDescriptor::withAddressRanges((IOAddressRange
*)&ranges
[test
][0], rangesCount
[test
], kIODirectionOut
, kernel_task
);
620 assert(kIOReturnSuccess
== ret
);
621 dma
= IODMACommand::withSpecification(kIODMACommandOutputHost64
, &segOptions
,
622 IODMACommand::kMapped
, NULL
, NULL
);
624 ret
= dma
->setMemoryDescriptor(md
, true);
625 if (kIOReturnSuccess
== ret
)
627 IODMACommand::Segment64 segments
[1];
635 ret
= dma
->gen64IOVMSegments(&offset
, &segments
[0], &numSegments
);
636 assert(kIOReturnSuccess
== ret
);
637 assert(1 == numSegments
);
638 kprintf("seg 0x%qx, 0x%qx\n", segments
[0].fIOVMAddr
, segments
[0].fLength
);
640 while (offset
< md
->getLength());
642 ret
= dma
->clearMemoryDescriptor(true);
643 assert(kIOReturnSuccess
== ret
);
649 return (kIOReturnSuccess
);
651 else if (4 == newValue
)
655 IOBufferMemoryDescriptor
* md1
;
658 size_t bufSize
= 8192 * 8192 * sizeof(uint32_t);
659 uint64_t start
, time
, nano
;
661 isp
= IOService::copyMatchingService(IOService::nameMatching("isp"));
663 mapper
= IOMapper::copyMapperForDeviceWithIndex(isp
, 0);
666 md1
= IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL
,
667 kIODirectionOutIn
| kIOMemoryPersistent
| kIOMemoryPageable
,
670 ret
= md1
->prepare();
671 assert(kIOReturnSuccess
== ret
);
673 IODMAMapSpecification mapSpec
;
674 bzero(&mapSpec
, sizeof(mapSpec
));
676 uint64_t mappedLength
;
678 start
= mach_absolute_time();
680 ret
= md1
->dmaMap(mapper
, NULL
, &mapSpec
, 0, bufSize
, &mapped
, &mappedLength
);
681 assert(kIOReturnSuccess
== ret
);
683 time
= mach_absolute_time() - start
;
685 absolutetime_to_nanoseconds(time
, &nano
);
686 kprintf("time %lld us\n", nano
/ 1000ULL);
687 kprintf("seg0 0x%qx, 0x%qx\n", mapped
, mappedLength
);
691 dma
= IODMACommand::withSpecification(kIODMACommandOutputHost32
,
692 32, 0, IODMACommand::kMapped
, 0, 1, mapper
, NULL
);
696 start
= mach_absolute_time();
697 ret
= dma
->setMemoryDescriptor(md1
, true);
698 assert(kIOReturnSuccess
== ret
);
699 time
= mach_absolute_time() - start
;
701 absolutetime_to_nanoseconds(time
, &nano
);
702 kprintf("time %lld us\n", nano
/ 1000ULL);
705 IODMACommand::Segment32 segments
[1];
706 UInt32 numSegments
= 1;
710 ret
= dma
->gen32IOVMSegments(&offset
, &segments
[0], &numSegments
);
711 assert(kIOReturnSuccess
== ret
);
712 assert(1 == numSegments
);
713 kprintf("seg0 0x%x, 0x%x\n", (int)segments
[0].fIOVMAddr
, (int)segments
[0].fLength
);
715 ret
= dma
->clearMemoryDescriptor(true);
716 assert(kIOReturnSuccess
== ret
);
720 return (kIOReturnSuccess
);
725 IOBufferMemoryDescriptor
* md1
;
726 IOBufferMemoryDescriptor
* md2
;
733 md1
= IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL
,
734 kIODirectionOutIn
| kIOMemoryPersistent
| kIOMemoryPageable
,
737 map1
= md1
->createMappingInTask(kernel_task
, 0, kIOMapAnywhere
| kIOMapUnique
);
739 buf1
= (uint32_t *) map1
->getVirtualAddress();
741 md2
= IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL
,
742 kIODirectionOutIn
| kIOMemoryPersistent
| kIOMemoryPageable
,
745 map2
= md2
->createMappingInTask(kernel_task
, 0, kIOMapAnywhere
| kIOMapUnique
);
747 buf2
= (uint32_t *) map2
->getVirtualAddress();
749 memset(buf1
, 0x11, 64*1024L);
750 memset(buf2
, 0x22, 64*1024L);
752 kprintf("md1 %p, map1 %p, buf2 %p; md2 %p, map2 %p, buf2 %p\n", md1
, map1
, buf1
, md2
, map2
, buf2
);
754 kprintf("no redir 0x%08x, 0x%08x\n", buf1
[0], buf2
[0]);
755 assert(0x11111111 == buf1
[0]);
756 assert(0x22222222 == buf2
[0]);
757 err
= map1
->redirect(md2
, 0, 0ULL);
758 kprintf("redir md2(0x%x) 0x%08x, 0x%08x\n", err
, buf1
[0], buf2
[0]);
759 assert(0x11111111 == buf2
[0]);
760 assert(0x22222222 == buf1
[0]);
761 err
= map1
->redirect(md1
, 0, 0ULL);
762 kprintf("redir md1(0x%x) 0x%08x, 0x%08x\n", err
, buf1
[0], buf2
[0]);
763 assert(0x11111111 == buf1
[0]);
764 assert(0x22222222 == buf2
[0]);
772 // result = IODMACommandLocalMappedNonContig(newValue);
773 // if (result) return (result);
775 result
= IODMACommandForceDoubleBufferTest(newValue
);
776 if (result
) return (result
);
778 result
= AllocationNameTest(newValue
);
779 if (result
) return (result
);
781 result
= IOMemoryMapCopyOnWriteTest(newValue
);
782 if (result
) return (result
);
784 result
= IOMultMemoryDescriptorTest(newValue
);
785 if (result
) return (result
);
787 result
= ZeroLengthTest(newValue
);
788 if (result
) return (result
);
790 result
= IODirectionPrepareNoZeroFillTest(newValue
);
791 if (result
) return (result
);
793 result
= BadFixedAllocTest(newValue
);
794 if (result
) return (result
);
796 result
= IOMemoryRemoteTest(newValue
);
797 if (result
) return (result
);
799 result
= IOMemoryPrefaultTest(newValue
);
800 if (result
) return (result
);
802 IOGeneralMemoryDescriptor
* md
;
804 vm_size_t bsize
= 16*1024*1024;
805 vm_size_t srcsize
, srcoffset
, mapoffset
, size
;
808 data
[0] = data
[1] = 0;
809 kr
= vm_allocate_kernel(kernel_map
, &data
[0], bsize
, VM_FLAGS_ANYWHERE
, VM_KERN_MEMORY_IOKIT
);
810 assert(KERN_SUCCESS
== kr
);
812 vm_inherit(kernel_map
, data
[0] + ptoa(1), ptoa(1), VM_INHERIT_NONE
);
813 vm_inherit(kernel_map
, data
[0] + ptoa(16), ptoa(4), VM_INHERIT_NONE
);
815 IOLog("data 0x%lx, 0x%lx\n", (long)data
[0], (long)data
[1]);
817 uint32_t idx
, offidx
;
818 for (idx
= 0; idx
< (bsize
/ sizeof(uint32_t)); idx
++)
820 ((uint32_t*)data
[0])[idx
] = idx
;
823 for (srcoffset
= 0; srcoffset
< bsize
; srcoffset
= ((srcoffset
<< 2) + 0x40c))
825 for (srcsize
= 4; srcsize
< (bsize
- srcoffset
- 1); srcsize
= ((srcsize
<< 2) + 0x3fc))
827 IOAddressRange ranges
[3];
828 uint32_t rangeCount
= 1;
830 bzero(&ranges
[0], sizeof(ranges
));
831 ranges
[0].address
= data
[0] + srcoffset
;
832 ranges
[0].length
= srcsize
;
833 ranges
[1].address
= ranges
[2].address
= data
[0];
835 if (srcsize
> ptoa(5))
837 ranges
[0].length
= 7634;
838 ranges
[1].length
= 9870;
839 ranges
[2].length
= srcsize
- ranges
[0].length
- ranges
[1].length
;
840 ranges
[1].address
= ranges
[0].address
+ ranges
[0].length
;
841 ranges
[2].address
= ranges
[1].address
+ ranges
[1].length
;
844 else if ((srcsize
> ptoa(2)) && !(page_mask
& srcoffset
))
846 ranges
[0].length
= ptoa(1);
847 ranges
[1].length
= ptoa(1);
848 ranges
[2].length
= srcsize
- ranges
[0].length
- ranges
[1].length
;
849 ranges
[0].address
= data
[0] + srcoffset
+ ptoa(1);
850 ranges
[1].address
= data
[0] + srcoffset
;
851 ranges
[2].address
= ranges
[0].address
+ ranges
[0].length
;
855 md
= OSDynamicCast(IOGeneralMemoryDescriptor
,
856 IOMemoryDescriptor::withAddressRanges(&ranges
[0], rangeCount
, kIODirectionInOut
, kernel_task
));
859 IOLog("IOMemoryDescriptor::withAddressRanges [0x%lx @ 0x%lx]\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx]\n",
860 (long) srcsize
, (long) srcoffset
,
861 (long long) ranges
[0].address
- data
[0], (long long) ranges
[0].length
,
862 (long long) ranges
[1].address
- data
[0], (long long) ranges
[1].length
,
863 (long long) ranges
[2].address
- data
[0], (long long) ranges
[2].length
);
865 if (kIOReturnSuccess
== kr
)
867 for (mapoffset
= 0; mapoffset
< srcsize
; mapoffset
= ((mapoffset
<< 1) + 0xf00))
869 for (size
= 4; size
< (srcsize
- mapoffset
- 1); size
= ((size
<< 2) + 0x200))
872 mach_vm_address_t addr
= 0;
875 // IOLog("<mapRef [0x%lx @ 0x%lx]\n", (long) size, (long) mapoffset);
877 map
= md
->createMappingInTask(kernel_task
, 0, kIOMapAnywhere
, mapoffset
, size
);
878 if (map
) addr
= map
->getAddress();
879 else kr
= kIOReturnError
;
881 // IOLog(">mapRef 0x%x %llx\n", kr, addr);
883 if (kIOReturnSuccess
!= kr
) break;
885 if (kIOReturnSuccess
!= kr
)
887 panic("prepare() fail 0x%x\n", kr
);
890 for (idx
= 0; idx
< size
; idx
+= sizeof(uint32_t))
892 offidx
= (idx
+ mapoffset
+ srcoffset
);
893 if ((srcsize
<= ptoa(5)) && (srcsize
> ptoa(2)) && !(page_mask
& srcoffset
))
895 if (offidx
< ptoa(2)) offidx
^= ptoa(1);
897 offidx
/= sizeof(uint32_t);
899 if (offidx
!= ((uint32_t*)addr
)[idx
/sizeof(uint32_t)])
901 panic("vm mismatch md %p map %p, @ 0x%x, 0x%lx, 0x%lx, \n", md
, map
, idx
, (long) srcoffset
, (long) mapoffset
);
902 kr
= kIOReturnBadMedia
;
906 if (sizeof(data
) != md
->readBytes(mapoffset
+ idx
, &data
, sizeof(data
))) data
= 0;
909 panic("phys mismatch md %p map %p, @ 0x%x, 0x%lx, 0x%lx, \n", md
, map
, idx
, (long) srcoffset
, (long) mapoffset
);
910 kr
= kIOReturnBadMedia
;
916 // IOLog("unmapRef %llx\n", addr);
918 if (kIOReturnSuccess
!= kr
) break;
922 if (kIOReturnSuccess
!= kr
) break;
924 if (kIOReturnSuccess
!= kr
) break;
927 if (kIOReturnSuccess
!= kr
) IOLog("FAIL: src 0x%lx @ 0x%lx, map 0x%lx @ 0x%lx\n",
928 (long) srcsize
, (long) srcoffset
, (long) size
, (long) mapoffset
);
930 assert(kr
== kIOReturnSuccess
);
932 vm_deallocate(kernel_map
, data
[0], bsize
);
933 // vm_deallocate(kernel_map, data[1], size);
935 IOLog("IOMemoryDescriptorTest/ %d\n", (int) gIOMemoryReferenceCount
);
940 #endif /* DEVELOPMENT || DEBUG */