]> git.saurik.com Git - apple/xnu.git/blob - iokit/Tests/TestIOMemoryDescriptor.cpp
926681a7ede6f878229b28dc6ca066e031651bf3
[apple/xnu.git] / iokit / Tests / TestIOMemoryDescriptor.cpp
1 /*
2 * Copyright (c) 2014 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/cdefs.h>
30
31 #include <IOKit/assert.h>
32 #include <IOKit/system.h>
33 #include <IOKit/IOLib.h>
34 #include <IOKit/IOMemoryDescriptor.h>
35 #include <IOKit/IOMapper.h>
36 #include <IOKit/IODMACommand.h>
37 #include <IOKit/IOKitKeysPrivate.h>
38
39 #ifndef __LP64__
40 #include <IOKit/IOSubMemoryDescriptor.h>
41 #endif /* !__LP64__ */
42 #include <IOKit/IOSubMemoryDescriptor.h>
43 #include <IOKit/IOMultiMemoryDescriptor.h>
44 #include <IOKit/IOBufferMemoryDescriptor.h>
45
46 #include <IOKit/IOKitDebug.h>
47 #include <libkern/OSDebug.h>
48 #include <sys/uio.h>
49
50 __BEGIN_DECLS
51 #include <vm/pmap.h>
52 #include <vm/vm_pageout.h>
53 #include <mach/memory_object_types.h>
54 #include <device/device_port.h>
55
56 #include <mach/vm_prot.h>
57 #include <mach/mach_vm.h>
58 #include <vm/vm_fault.h>
59 #include <vm/vm_protos.h>
60 __END_DECLS
61
62
63 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
64
65 #if DEVELOPMENT || DEBUG
66
67 static int IOMultMemoryDescriptorTest(int newValue)
68 {
69 IOMemoryDescriptor * mds[3];
70 IOMultiMemoryDescriptor * mmd;
71 IOMemoryMap * map;
72 void * addr;
73 uint8_t * data;
74 uint32_t i;
75 IOAddressRange ranges[2];
76
77 data = (typeof(data)) IOMallocAligned(ptoa(8), page_size);
78 for (i = 0; i < ptoa(8); i++) data[i] = atop(i) | 0xD0;
79
80 ranges[0].address = (IOVirtualAddress)(data + ptoa(4));
81 ranges[0].length = ptoa(4);
82 ranges[1].address = (IOVirtualAddress)(data + ptoa(0));
83 ranges[1].length = ptoa(4);
84
85 mds[0] = IOMemoryDescriptor::withAddressRanges(&ranges[0], 2, kIODirectionOutIn, kernel_task);
86
87 mds[1] = IOSubMemoryDescriptor::withSubRange(mds[0], ptoa(3), ptoa(2), kIODirectionOutIn);
88 mds[2] = IOSubMemoryDescriptor::withSubRange(mds[0], ptoa(7), ptoa(1), kIODirectionOutIn);
89
90 mmd = IOMultiMemoryDescriptor::withDescriptors(&mds[0], sizeof(mds)/sizeof(mds[0]), kIODirectionOutIn, false);
91 mds[2]->release();
92 mds[1]->release();
93 mds[0]->release();
94 map = mmd->createMappingInTask(kernel_task, 0, kIOMapAnywhere, ptoa(7), mmd->getLength() - ptoa(7));
95 mmd->release();
96 assert(map);
97
98 addr = (void *) map->getVirtualAddress();
99 assert(ptoa(4) == map->getLength());
100 assert(0xd3d3d3d3 == ((uint32_t *)addr)[ptoa(0) / sizeof(uint32_t)]);
101 assert(0xd7d7d7d7 == ((uint32_t *)addr)[ptoa(1) / sizeof(uint32_t)]);
102 assert(0xd0d0d0d0 == ((uint32_t *)addr)[ptoa(2) / sizeof(uint32_t)]);
103 assert(0xd3d3d3d3 == ((uint32_t *)addr)[ptoa(3) / sizeof(uint32_t)]);
104 map->release();
105 IOFreeAligned(data, ptoa(8));
106
107 return (0);
108 }
109
110
111 int IOMemoryDescriptorTest(int newValue)
112 {
113 int result;
114
115 #if 0
116 if (5 == newValue)
117 {
118 IOReturn ret;
119 IOMemoryDescriptor * md;
120 IODMACommand * dma;
121 IODMACommand::SegmentOptions segOptions =
122 {
123 .fStructSize = sizeof(segOptions),
124 .fNumAddressBits = 64,
125 .fMaxSegmentSize = 4096,
126 .fMaxTransferSize = 128*1024,
127 .fAlignment = 4,
128 .fAlignmentLength = 4,
129 .fAlignmentInternalSegments = 0x1000
130 };
131
132 IOAddressRange ranges[3][2] =
133 {
134 {
135 { (uintptr_t) &IOMemoryDescriptorTest, 0x2ffc },
136 { 0, 0 },
137 },
138 {
139 { ranges[0][0].address, 0x10 },
140 { 0x3000 + ranges[0][0].address, 0xff0 },
141 },
142 {
143 { ranges[0][0].address, 0x2ffc },
144 { trunc_page(ranges[0][0].address), 0x800 },
145 },
146 };
147 static const uint32_t rangesCount[3] = { 1, 2, 2 };
148 uint32_t test;
149
150 for (test = 0; test < 3; test++)
151 {
152 kprintf("---[%d] address 0x%qx-0x%qx, 0x%qx-0x%qx\n", test,
153 ranges[test][0].address, ranges[test][0].length,
154 ranges[test][1].address, ranges[test][1].length);
155
156 md = IOMemoryDescriptor::withAddressRanges((IOAddressRange*)&ranges[test][0], rangesCount[test], kIODirectionOut, kernel_task);
157 assert(md);
158 ret = md->prepare();
159 assert(kIOReturnSuccess == ret);
160 dma = IODMACommand::withSpecification(kIODMACommandOutputHost64, &segOptions,
161 IODMACommand::kMapped, NULL, NULL);
162 assert(dma);
163 ret = dma->setMemoryDescriptor(md, true);
164 if (kIOReturnSuccess == ret)
165 {
166 IODMACommand::Segment64 segments[1];
167 UInt32 numSegments;
168 UInt64 offset;
169
170 offset = 0;
171 do
172 {
173 numSegments = 1;
174 ret = dma->gen64IOVMSegments(&offset, &segments[0], &numSegments);
175 assert(kIOReturnSuccess == ret);
176 assert(1 == numSegments);
177 kprintf("seg 0x%qx, 0x%qx\n", segments[0].fIOVMAddr, segments[0].fLength);
178 }
179 while (offset < md->getLength());
180
181 ret = dma->clearMemoryDescriptor(true);
182 assert(kIOReturnSuccess == ret);
183 dma->release();
184 }
185 md->release();
186 }
187
188 return (kIOReturnSuccess);
189 }
190 else if (4 == newValue)
191 {
192 IOService * isp;
193 IOMapper * mapper;
194 IOBufferMemoryDescriptor * md1;
195 IODMACommand * dma;
196 IOReturn ret;
197 size_t bufSize = 8192 * 8192 * sizeof(uint32_t);
198 uint64_t start, time, nano;
199
200 isp = IOService::copyMatchingService(IOService::nameMatching("isp"));
201 assert(isp);
202 mapper = IOMapper::copyMapperForDeviceWithIndex(isp, 0);
203 assert(mapper);
204
205 md1 = IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL,
206 kIODirectionOutIn | kIOMemoryPersistent | kIOMemoryPageable,
207 bufSize, page_size);
208
209 ret = md1->prepare();
210 assert(kIOReturnSuccess == ret);
211
212 IODMAMapSpecification mapSpec;
213 bzero(&mapSpec, sizeof(mapSpec));
214 uint64_t mapped;
215 uint64_t mappedLength;
216
217 start = mach_absolute_time();
218
219 ret = md1->dmaMap(mapper, NULL, &mapSpec, 0, bufSize, &mapped, &mappedLength);
220 assert(kIOReturnSuccess == ret);
221
222 time = mach_absolute_time() - start;
223
224 absolutetime_to_nanoseconds(time, &nano);
225 kprintf("time %lld us\n", nano / 1000ULL);
226 kprintf("seg0 0x%qx, 0x%qx\n", mapped, mappedLength);
227
228 assert(md1);
229
230 dma = IODMACommand::withSpecification(kIODMACommandOutputHost32,
231 32, 0, IODMACommand::kMapped, 0, 1, mapper, NULL);
232
233 assert(dma);
234
235 start = mach_absolute_time();
236 ret = dma->setMemoryDescriptor(md1, true);
237 assert(kIOReturnSuccess == ret);
238 time = mach_absolute_time() - start;
239
240 absolutetime_to_nanoseconds(time, &nano);
241 kprintf("time %lld us\n", nano / 1000ULL);
242
243
244 IODMACommand::Segment32 segments[1];
245 UInt32 numSegments = 1;
246 UInt64 offset;
247
248 offset = 0;
249 ret = dma->gen32IOVMSegments(&offset, &segments[0], &numSegments);
250 assert(kIOReturnSuccess == ret);
251 assert(1 == numSegments);
252 kprintf("seg0 0x%x, 0x%x\n", (int)segments[0].fIOVMAddr, (int)segments[0].fLength);
253
254 ret = dma->clearMemoryDescriptor(true);
255 assert(kIOReturnSuccess == ret);
256
257 md1->release();
258
259 return (kIOReturnSuccess);
260 }
261
262 if (3 == newValue)
263 {
264 IOBufferMemoryDescriptor * md1;
265 IOBufferMemoryDescriptor * md2;
266 IOMemoryMap * map1;
267 IOMemoryMap * map2;
268 uint32_t * buf1;
269 uint32_t * buf2;
270 IOReturn err;
271
272 md1 = IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL,
273 kIODirectionOutIn | kIOMemoryPersistent | kIOMemoryPageable,
274 64*1024, page_size);
275 assert(md1);
276 map1 = md1->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapUnique);
277 assert(map1);
278 buf1 = (uint32_t *) map1->getVirtualAddress();
279
280 md2 = IOBufferMemoryDescriptor::inTaskWithOptions(TASK_NULL,
281 kIODirectionOutIn | kIOMemoryPersistent | kIOMemoryPageable,
282 64*1024, page_size);
283 assert(md2);
284 map2 = md2->createMappingInTask(kernel_task, 0, kIOMapAnywhere | kIOMapUnique);
285 assert(map2);
286 buf2 = (uint32_t *) map2->getVirtualAddress();
287
288 memset(buf1, 0x11, 64*1024L);
289 memset(buf2, 0x22, 64*1024L);
290
291 kprintf("md1 %p, map1 %p, buf2 %p; md2 %p, map2 %p, buf2 %p\n", md1, map1, buf1, md2, map2, buf2);
292
293 kprintf("no redir 0x%08x, 0x%08x\n", buf1[0], buf2[0]);
294 assert(0x11111111 == buf1[0]);
295 assert(0x22222222 == buf2[0]);
296 err = map1->redirect(md2, 0, 0ULL);
297 kprintf("redir md2(0x%x) 0x%08x, 0x%08x\n", err, buf1[0], buf2[0]);
298 assert(0x11111111 == buf2[0]);
299 assert(0x22222222 == buf1[0]);
300 err = map1->redirect(md1, 0, 0ULL);
301 kprintf("redir md1(0x%x) 0x%08x, 0x%08x\n", err, buf1[0], buf2[0]);
302 assert(0x11111111 == buf1[0]);
303 assert(0x22222222 == buf2[0]);
304 map1->release();
305 map2->release();
306 md1->release();
307 md2->release();
308 }
309 #endif
310
311 result = IOMultMemoryDescriptorTest(newValue);
312 if (result) return (result);
313
314 IOGeneralMemoryDescriptor * md;
315 vm_offset_t data[2];
316 vm_size_t bsize = 16*1024*1024;
317 vm_size_t srcsize, srcoffset, mapoffset, size;
318 kern_return_t kr;
319
320 kr = vm_allocate(kernel_map, &data[0], bsize, VM_FLAGS_ANYWHERE);
321 vm_inherit(kernel_map, data[0] + ptoa(1), ptoa(1), VM_INHERIT_NONE);
322 vm_inherit(kernel_map, data[0] + ptoa(16), ptoa(4), VM_INHERIT_NONE);
323
324 IOLog("data 0x%lx, 0x%lx\n", (long)data[0], (long)data[1]);
325
326 uint32_t idx, offidx;
327 for (idx = 0; idx < (bsize / sizeof(uint32_t)); idx++)
328 {
329 ((uint32_t*)data[0])[idx] = idx;
330 }
331
332 for (srcoffset = 0; srcoffset < bsize; srcoffset = ((srcoffset << 2) + 0x40c))
333 {
334 for (srcsize = 4; srcsize < (bsize - srcoffset - 1); srcsize = ((srcsize << 2) + 0x3fc))
335 {
336 IOAddressRange ranges[3];
337 uint32_t rangeCount = 1;
338
339 bzero(&ranges[0], sizeof(ranges));
340 ranges[0].address = data[0] + srcoffset;
341 ranges[0].length = srcsize;
342
343 if (srcsize > ptoa(5))
344 {
345 ranges[0].length = 7634;
346 ranges[1].length = 9870;
347 ranges[2].length = srcsize - ranges[0].length - ranges[1].length;
348 ranges[1].address = ranges[0].address + ranges[0].length;
349 ranges[2].address = ranges[1].address + ranges[1].length;
350 rangeCount = 3;
351 }
352 else if ((srcsize > ptoa(2)) && !(page_mask & srcoffset))
353 {
354 ranges[0].length = ptoa(1);
355 ranges[1].length = ptoa(1);
356 ranges[2].length = srcsize - ranges[0].length - ranges[1].length;
357 ranges[0].address = data[0] + srcoffset + ptoa(1);
358 ranges[1].address = data[0] + srcoffset;
359 ranges[2].address = ranges[0].address + ranges[0].length;
360 rangeCount = 3;
361 }
362
363 md = OSDynamicCast(IOGeneralMemoryDescriptor,
364 IOMemoryDescriptor::withAddressRanges(&ranges[0], rangeCount, kIODirectionInOut, kernel_task));
365 assert(md);
366
367 IOLog("IOMemoryDescriptor::withAddressRanges [0x%lx @ 0x%lx]\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx],\n[0x%llx, 0x%llx]\n",
368 (long) srcsize, (long) srcoffset,
369 (long long) ranges[0].address - data[0], (long long) ranges[0].length,
370 (long long) ranges[1].address - data[0], (long long) ranges[1].length,
371 (long long) ranges[2].address - data[0], (long long) ranges[2].length);
372
373 if (kIOReturnSuccess == kr)
374 {
375 for (mapoffset = 0; mapoffset < srcsize; mapoffset = ((mapoffset << 1) + 0xf00))
376 {
377 for (size = 4; size < (srcsize - mapoffset - 1); size = ((size << 2) + 0x200))
378 {
379 IOMemoryMap * map;
380 mach_vm_address_t addr = 0;
381 uint32_t data;
382
383 // IOLog("<mapRef [0x%lx @ 0x%lx]\n", (long) size, (long) mapoffset);
384
385 map = md->createMappingInTask(kernel_task, 0, kIOMapAnywhere, mapoffset, size);
386 if (map) addr = map->getAddress();
387 else kr = kIOReturnError;
388
389 // IOLog(">mapRef 0x%x %llx\n", kr, addr);
390
391 if (kIOReturnSuccess != kr) break;
392 kr = md->prepare();
393 if (kIOReturnSuccess != kr)
394 {
395 panic("prepare() fail 0x%x\n", kr);
396 break;
397 }
398 for (idx = 0; idx < size; idx += sizeof(uint32_t))
399 {
400 offidx = (idx + mapoffset + srcoffset);
401 if ((srcsize <= ptoa(5)) && (srcsize > ptoa(2)) && !(page_mask & srcoffset))
402 {
403 if (offidx < ptoa(2)) offidx ^= ptoa(1);
404 }
405 offidx /= sizeof(uint32_t);
406
407 if (offidx != ((uint32_t*)addr)[idx/sizeof(uint32_t)])
408 {
409 panic("vm mismatch md %p map %p, @ 0x%x, 0x%lx, 0x%lx, \n", md, map, idx, (long) srcoffset, (long) mapoffset);
410 kr = kIOReturnBadMedia;
411 }
412 else
413 {
414 if (sizeof(data) != md->readBytes(mapoffset + idx, &data, sizeof(data))) data = 0;
415 if (offidx != data)
416 {
417 panic("phys mismatch md %p map %p, @ 0x%x, 0x%lx, 0x%lx, \n", md, map, idx, (long) srcoffset, (long) mapoffset);
418 kr = kIOReturnBadMedia;
419 }
420 }
421 }
422 md->complete();
423 map->release();
424 // IOLog("unmapRef %llx\n", addr);
425 }
426 if (kIOReturnSuccess != kr) break;
427 }
428 }
429 md->release();
430 if (kIOReturnSuccess != kr) break;
431 }
432 if (kIOReturnSuccess != kr) break;
433 }
434
435 if (kIOReturnSuccess != kr) IOLog("FAIL: src 0x%lx @ 0x%lx, map 0x%lx @ 0x%lx\n",
436 (long) srcsize, (long) srcoffset, (long) size, (long) mapoffset);
437
438 assert(kr == kIOReturnSuccess);
439
440 vm_deallocate(kernel_map, data[0], bsize);
441 // vm_deallocate(kernel_map, data[1], size);
442
443 return (0);
444 }
445
446 #endif /* DEVELOPMENT || DEBUG */