2 * Copyright (c) 2014 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <sys/cdefs.h>
31 #include <IOKit/assert.h>
32 #include <IOKit/system.h>
33 #include <IOKit/IOLib.h>
34 #include <IOKit/IOMemoryDescriptor.h>
35 #include <IOKit/IOMapper.h>
36 #include <IOKit/IODMACommand.h>
37 #include <IOKit/IOKitKeysPrivate.h>
40 #include <IOKit/IOSubMemoryDescriptor.h>
41 #endif /* !__LP64__ */
42 #include <IOKit/IOSubMemoryDescriptor.h>
43 #include <IOKit/IOMultiMemoryDescriptor.h>
44 #include <IOKit/IOBufferMemoryDescriptor.h>
46 #include <IOKit/IOKitDebug.h>
47 #include <libkern/OSDebug.h>
52 #include <vm/vm_pageout.h>
53 #include <mach/memory_object_types.h>
54 #include <device/device_port.h>
56 #include <mach/vm_prot.h>
57 #include <mach/mach_vm.h>
58 #include <vm/vm_fault.h>
59 #include <vm/vm_protos.h>
63 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
65 #if DEVELOPMENT || DEBUG
67 static int IOMultMemoryDescriptorTest(int newValue
)
69 IOMemoryDescriptor
* mds
[3];
70 IOMultiMemoryDescriptor
* mmd
;
75 IOAddressRange ranges
[2];
77 data
= (typeof(data
)) IOMallocAligned(ptoa(8), page_size
);
78 for (i
= 0; i
< ptoa(8); i
++) data
[i
] = atop(i
) | 0xD0;
80 ranges
[0].address
= (IOVirtualAddress
)(data
+ ptoa(4));
81 ranges
[0].length
= ptoa(4);
82 ranges
[1].address
= (IOVirtualAddress
)(data
+ ptoa(0));
83 ranges
[1].length
= ptoa(4);
85 mds
[0] = IOMemoryDescriptor::withAddressRanges(&ranges
[0], 2, kIODirectionOutIn
, kernel_task
);
87 mds
[1] = IOSubMemoryDescriptor::withSubRange(mds
[0], ptoa(3), ptoa(2), kIODirectionOutIn
);
88 mds
[2] = IOSubMemoryDescriptor::withSubRange(mds
[0], ptoa(7), ptoa(1), kIODirectionOutIn
);
90 mmd
= IOMultiMemoryDescriptor::withDescriptors(&mds
[0], sizeof(mds
)/sizeof(mds
[0]), kIODirectionOutIn
, false);
94 map
= mmd
->createMappingInTask(kernel_task
, 0, kIOMapAnywhere
, ptoa(7), mmd
->getLength() - ptoa(7));
98 addr
= (void *) map
->getVirtualAddress();
99 assert(ptoa(4) == map
->getLength());
100 assert(0xd3d3d3d3 == ((uint32_t *)addr
)[ptoa(0) / sizeof(uint32_t)]);
101 assert(0xd7d7d7d7 == ((uint32_t *)addr
)[ptoa(1) / sizeof(uint32_t)]);
102 assert(0xd0d0d0d0 == ((uint32_t *)addr
)[ptoa(2) / sizeof(uint32_t)]);
103 assert(0xd3d3d3d3 == ((uint32_t *)addr
)[ptoa(3) / sizeof(uint32_t)]);
105 IOFreeAligned(data
, ptoa(8));
111 int IOMemoryDescriptorTest(int newValue
)
119 IOMemoryDescriptor
* md
;
121 IODMACommand::SegmentOptions segOptions
=
123 .fStructSize
= sizeof(segOptions
),
124 .fNumAddressBits
= 64,
125 .fMaxSegmentSize
= 4096,
126 .fMaxTransferSize
= 128*1024,
128 .fAlignmentLength
= 4,
129 .fAlignmentInternalSegments
= 0x1000
132 IOAddressRange ranges
[3][2] =
135 { (uintptr_t) &IOMemoryDescriptorTest
, 0x2ffc },
139 { ranges
[0][0].address
, 0x10 },
140 { 0x3000 + ranges
[0][0].address
, 0xff0 },
143 { ranges
[0][0].address
, 0x2ffc },
144 { trunc_page(ranges
[0][0].address
), 0x800 },
147 static const uint32_t rangesCount
[3] = { 1, 2, 2 };
150 for (test
= 0; test
< 3; test
++)
152 kprintf("---[%d] address 0x%qx-0x%qx, 0x%qx-0x%qx\n", test
,
153 ranges
[test
][0].address
, ranges
[test
][0].length
,
154 ranges
[test
][1].address
, ranges
[test
][1].length
);
156 md
= IOMemoryDescriptor::withAddressRanges((IOAddressRange
*)&ranges
[test
][0], rangesCount
[test
], kIODirectionOut
, kernel_task
);
159 assert(kIOReturnSuccess
== ret
);
160 dma
= IODMACommand::withSpecification(kIODMACommandOutputHost64
, &segOptions
,
161 IODMACommand::kMapped
, NULL
, NULL
);
163 ret
= dma
->setMemoryDescriptor(md
, true);
164 if (kIOReturnSuccess
== ret
)
166 IODMACommand::Segment64 segments
[1];
174 ret
= dma
->gen64IOVMSegments(&offset
, &segments
[0], &numSegments
);
175 assert(kIOReturnSuccess
== ret
);
176 assert(1 == numSegments
);
177 kprintf("seg 0x%qx, 0x%qx\n", segments
[0].fIOVMAddr
, segments
[0].fLength
);
179 while (offset
< md
->getLength());
181 ret
= dma
->clearMemoryDescriptor(true);
182 assert(kIOReturnSuccess
== ret
);
188 return (kIOReturnSuccess
);
190 else if (4 == newValue
)
194 IOBufferMemoryDescriptor
* md1
;
197 size_t bufSize
= 8192 * 8192 * sizeof(uint32_t);
198 uint64_t start
, time
, nano
;
200 isp
= IOService::copyMatchingService(IOService::nameMatching("isp"));
202 mapper
= IOMapper::copyMapperForDeviceWithIndex(isp
, 0);
205 md1
= IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL
,
206 kIODirectionOutIn
| kIOMemoryPersistent
| kIOMemoryPageable
,
209 ret
= md1
->prepare();
210 assert(kIOReturnSuccess
== ret
);
212 IODMAMapSpecification mapSpec
;
213 bzero(&mapSpec
, sizeof(mapSpec
));
215 uint64_t mappedLength
;
217 start
= mach_absolute_time();
219 ret
= md1
->dmaMap(mapper
, NULL
, &mapSpec
, 0, bufSize
, &mapped
, &mappedLength
);
220 assert(kIOReturnSuccess
== ret
);
222 time
= mach_absolute_time() - start
;
224 absolutetime_to_nanoseconds(time
, &nano
);
225 kprintf("time %lld us\n", nano
/ 1000ULL);
226 kprintf("seg0 0x%qx, 0x%qx\n", mapped
, mappedLength
);
230 dma
= IODMACommand::withSpecification(kIODMACommandOutputHost32
,
231 32, 0, IODMACommand::kMapped
, 0, 1, mapper
, NULL
);
235 start
= mach_absolute_time();
236 ret
= dma
->setMemoryDescriptor(md1
, true);
237 assert(kIOReturnSuccess
== ret
);
238 time
= mach_absolute_time() - start
;
240 absolutetime_to_nanoseconds(time
, &nano
);
241 kprintf("time %lld us\n", nano
/ 1000ULL);
244 IODMACommand::Segment32 segments
[1];
245 UInt32 numSegments
= 1;
249 ret
= dma
->gen32IOVMSegments(&offset
, &segments
[0], &numSegments
);
250 assert(kIOReturnSuccess
== ret
);
251 assert(1 == numSegments
);
252 kprintf("seg0 0x%x, 0x%x\n", (int)segments
[0].fIOVMAddr
, (int)segments
[0].fLength
);
254 ret
= dma
->clearMemoryDescriptor(true);
255 assert(kIOReturnSuccess
== ret
);
259 return (kIOReturnSuccess
);
264 IOBufferMemoryDescriptor
* md1
;
265 IOBufferMemoryDescriptor
* md2
;
272 md1
= IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL
,
273 kIODirectionOutIn
| kIOMemoryPersistent
| kIOMemoryPageable
,
276 map1
= md1
->createMappingInTask(kernel_task
, 0, kIOMapAnywhere
| kIOMapUnique
);
278 buf1
= (uint32_t *) map1
->getVirtualAddress();
280 md2
= IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL
,
281 kIODirectionOutIn
| kIOMemoryPersistent
| kIOMemoryPageable
,
284 map2
= md2
->createMappingInTask(kernel_task
, 0, kIOMapAnywhere
| kIOMapUnique
);
286 buf2
= (uint32_t *) map2
->getVirtualAddress();
288 memset(buf1
, 0x11, 64*1024L);
289 memset(buf2
, 0x22, 64*1024L);
291 kprintf("md1 %p, map1 %p, buf2 %p; md2 %p, map2 %p, buf2 %p\n", md1
, map1
, buf1
, md2
, map2
, buf2
);
293 kprintf("no redir 0x%08x, 0x%08x\n", buf1
[0], buf2
[0]);
294 assert(0x11111111 == buf1
[0]);
295 assert(0x22222222 == buf2
[0]);
296 err
= map1
->redirect(md2
, 0, 0ULL);
297 kprintf("redir md2(0x%x) 0x%08x, 0x%08x\n", err
, buf1
[0], buf2
[0]);
298 assert(0x11111111 == buf2
[0]);
299 assert(0x22222222 == buf1
[0]);
300 err
= map1
->redirect(md1
, 0, 0ULL);
301 kprintf("redir md1(0x%x) 0x%08x, 0x%08x\n", err
, buf1
[0], buf2
[0]);
302 assert(0x11111111 == buf1
[0]);
303 assert(0x22222222 == buf2
[0]);
311 result
= IOMultMemoryDescriptorTest(newValue
);
312 if (result
) return (result
);
314 IOGeneralMemoryDescriptor
* md
;
316 vm_size_t bsize
= 16*1024*1024;
317 vm_size_t srcsize
, srcoffset
, mapoffset
, size
;
320 kr
= vm_allocate(kernel_map
, &data
[0], bsize
, VM_FLAGS_ANYWHERE
);
321 vm_inherit(kernel_map
, data
[0] + ptoa(1), ptoa(1), VM_INHERIT_NONE
);
322 vm_inherit(kernel_map
, data
[0] + ptoa(16), ptoa(4), VM_INHERIT_NONE
);
324 IOLog("data 0x%lx, 0x%lx\n", (long)data
[0], (long)data
[1]);
326 uint32_t idx
, offidx
;
327 for (idx
= 0; idx
< (bsize
/ sizeof(uint32_t)); idx
++)
329 ((uint32_t*)data
[0])[idx
] = idx
;
332 for (srcoffset
= 0; srcoffset
< bsize
; srcoffset
= ((srcoffset
<< 2) + 0x40c))
334 for (srcsize
= 4; srcsize
< (bsize
- srcoffset
- 1); srcsize
= ((srcsize
<< 2) + 0x3fc))
336 IOAddressRange ranges
[3];
337 uint32_t rangeCount
= 1;
339 bzero(&ranges
[0], sizeof(ranges
));
340 ranges
[0].address
= data
[0] + srcoffset
;
341 ranges
[0].length
= srcsize
;
343 if (srcsize
> ptoa(5))
345 ranges
[0].length
= 7634;
346 ranges
[1].length
= 9870;
347 ranges
[2].length
= srcsize
- ranges
[0].length
- ranges
[1].length
;
348 ranges
[1].address
= ranges
[0].address
+ ranges
[0].length
;
349 ranges
[2].address
= ranges
[1].address
+ ranges
[1].length
;
352 else if ((srcsize
> ptoa(2)) && !(page_mask
& srcoffset
))
354 ranges
[0].length
= ptoa(1);
355 ranges
[1].length
= ptoa(1);
356 ranges
[2].length
= srcsize
- ranges
[0].length
- ranges
[1].length
;
357 ranges
[0].address
= data
[0] + srcoffset
+ ptoa(1);
358 ranges
[1].address
= data
[0] + srcoffset
;
359 ranges
[2].address
= ranges
[0].address
+ ranges
[0].length
;
363 md
= OSDynamicCast(IOGeneralMemoryDescriptor
,
364 IOMemoryDescriptor::withAddressRanges(&ranges
[0], rangeCount
, kIODirectionInOut
, kernel_task
));
367 IOLog("IOMemoryDescriptor::withAddressRanges [0x%lx @ 0x%lx]\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx]\n",
368 (long) srcsize
, (long) srcoffset
,
369 (long long) ranges
[0].address
- data
[0], (long long) ranges
[0].length
,
370 (long long) ranges
[1].address
- data
[0], (long long) ranges
[1].length
,
371 (long long) ranges
[2].address
- data
[0], (long long) ranges
[2].length
);
373 if (kIOReturnSuccess
== kr
)
375 for (mapoffset
= 0; mapoffset
< srcsize
; mapoffset
= ((mapoffset
<< 1) + 0xf00))
377 for (size
= 4; size
< (srcsize
- mapoffset
- 1); size
= ((size
<< 2) + 0x200))
380 mach_vm_address_t addr
= 0;
383 // IOLog("<mapRef [0x%lx @ 0x%lx]\n", (long) size, (long) mapoffset);
385 map
= md
->createMappingInTask(kernel_task
, 0, kIOMapAnywhere
, mapoffset
, size
);
386 if (map
) addr
= map
->getAddress();
387 else kr
= kIOReturnError
;
389 // IOLog(">mapRef 0x%x %llx\n", kr, addr);
391 if (kIOReturnSuccess
!= kr
) break;
393 if (kIOReturnSuccess
!= kr
)
395 panic("prepare() fail 0x%x\n", kr
);
398 for (idx
= 0; idx
< size
; idx
+= sizeof(uint32_t))
400 offidx
= (idx
+ mapoffset
+ srcoffset
);
401 if ((srcsize
<= ptoa(5)) && (srcsize
> ptoa(2)) && !(page_mask
& srcoffset
))
403 if (offidx
< ptoa(2)) offidx
^= ptoa(1);
405 offidx
/= sizeof(uint32_t);
407 if (offidx
!= ((uint32_t*)addr
)[idx
/sizeof(uint32_t)])
409 panic("vm mismatch md %p map %p, @ 0x%x, 0x%lx, 0x%lx, \n", md
, map
, idx
, (long) srcoffset
, (long) mapoffset
);
410 kr
= kIOReturnBadMedia
;
414 if (sizeof(data
) != md
->readBytes(mapoffset
+ idx
, &data
, sizeof(data
))) data
= 0;
417 panic("phys mismatch md %p map %p, @ 0x%x, 0x%lx, 0x%lx, \n", md
, map
, idx
, (long) srcoffset
, (long) mapoffset
);
418 kr
= kIOReturnBadMedia
;
424 // IOLog("unmapRef %llx\n", addr);
426 if (kIOReturnSuccess
!= kr
) break;
430 if (kIOReturnSuccess
!= kr
) break;
432 if (kIOReturnSuccess
!= kr
) break;
435 if (kIOReturnSuccess
!= kr
) IOLog("FAIL: src 0x%lx @ 0x%lx, map 0x%lx @ 0x%lx\n",
436 (long) srcsize
, (long) srcoffset
, (long) size
, (long) mapoffset
);
438 assert(kr
== kIOReturnSuccess
);
440 vm_deallocate(kernel_map
, data
[0], bsize
);
441 // vm_deallocate(kernel_map, data[1], size);
446 #endif /* DEVELOPMENT || DEBUG */