]> git.saurik.com Git - apple/xnu.git/blob - iokit/Kernel/IOCopyMapper.cpp
xnu-792.10.96.tar.gz
[apple/xnu.git] / iokit / Kernel / IOCopyMapper.cpp
1 /*
2 * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 // 45678901234567890123456789012345678901234567890123456789012345678901234567890
23
24 #include "IOCopyMapper.h"
25 #include <sys/sysctl.h>
26
27 #if 0
28 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
29 #else
30 #define DEBG(fmt, args...) {}
31 #endif
32
33 extern "C" {
34 extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
35 extern void ml_get_bouncepool_info(
36 vm_offset_t *phys_addr,
37 vm_size_t *size);
38 extern unsigned int vm_lopage_max_count;
39 extern unsigned int vm_himemory_mode;
40 }
41
42 #define super IOMapper
43
44 OSDefineMetaClassAndStructors(IOCopyMapper, IOMapper);
45
46 // Remember no value can be bigger than 31 bits as the sign bit indicates
47 // that this entry is valid to the hardware and that would be bad if it wasn't
48 typedef struct FreeDARTEntry {
49 #if __BIG_ENDIAN__
50 unsigned int
51 /* bool */ fValid : 1,
52 /* bool */ fInUse : 1, // Allocated but not inserted yet
53 /* bool */ : 5, // Align size on nibble boundary for debugging
54 /* uint */ fSize : 5,
55 /* uint */ : 2,
56 /* uint */ fNext :18; // offset of FreeDARTEntry's
57
58 #elif __LITTLE_ENDIAN__
59 unsigned int
60 /* uint */ fNext :18, // offset of FreeDARTEntry's
61 /* uint */ : 2,
62 /* uint */ fSize : 5,
63 /* bool */ : 5, // Align size on nibble boundary for debugging
64 /* bool */ fInUse : 1, // Allocated but not inserted yet
65 /* bool */ fValid : 1;
66 #endif
67 #if __BIG_ENDIAN__
68 unsigned int
69 /* uint */ :14,
70 /* uint */ fPrev :18; // offset of FreeDARTEntry's
71
72 #elif __LITTLE_ENDIAN__
73 unsigned int
74 /* uint */ fPrev :18, // offset of FreeDARTEntry's
75 /* uint */ :14;
76 #endif
77 } FreeDARTEntry;
78
79 typedef struct ActiveDARTEntry {
80 #if __BIG_ENDIAN__
81 unsigned int
82 /* bool */ fValid : 1, // Must be set to one if valid
83 /* uint */ fPPNum :31; // ppnum_t page of translation
84 #define ACTIVEDARTENTRY(page) { true, page }
85
86 #elif __LITTLE_ENDIAN__
87 unsigned int
88 /* uint */ fPPNum :31, // ppnum_t page of translation
89 /* bool */ fValid : 1; // Must be set to one if valid
90 #define ACTIVEDARTENTRY(page) { page, true }
91
92 #endif
93 };
94
95 #define kActivePerFree (sizeof(freeDART[0]) / sizeof(ActiveDARTEntry))
96
97 static SYSCTL_UINT(_kern, OID_AUTO, copyregionmax,
98 CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN,
99 NULL, 0, "");
100
101 static SYSCTL_UINT(_kern, OID_AUTO, lowpagemax,
102 CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN,
103 &vm_lopage_max_count, 0, "");
104
105 static SYSCTL_UINT(_kern, OID_AUTO, himemorymode,
106 CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN,
107 &vm_himemory_mode, 0, "");
108
109 bool IOCopyMapper::initHardware(IOService * provider)
110 {
111 UInt32 dartSizePages = 0;
112
113 vm_offset_t phys_addr;
114 vm_size_t size;
115 ml_get_bouncepool_info(&phys_addr, &size);
116
117 if (!size)
118 return (false);
119
120 fBufferPage = atop_32(phys_addr);
121 dartSizePages = (atop_32(size) + kTransPerPage - 1) / kTransPerPage;
122
123 fTableLock = IOLockAlloc();
124
125 if (!fTableLock)
126 return false;
127
128 if (!allocTable(dartSizePages * kMapperPage))
129 return false;
130
131 UInt32 canMapPages = dartSizePages * kTransPerPage;
132 fMapperRegionSize = canMapPages;
133 for (fNumZones = 0; canMapPages; fNumZones++)
134 canMapPages >>= 1;
135 fNumZones -= 3; // correct for overshoot and minumum 16K pages allocation
136
137 invalidateDART(0, fMapperRegionSize);
138
139 breakUp(0, fNumZones, 0);
140 ((FreeDARTEntry *) fTable)->fInUse = true;
141
142 fMapperRegionUsed = kMinZoneSize;
143 fMapperRegionMaxUsed = fMapperRegionUsed;
144
145 sysctl__kern_copyregionmax.oid_arg1 = &fMapperRegionMaxUsed;
146
147 sysctl_register_oid(&sysctl__kern_copyregionmax);
148 sysctl_register_oid(&sysctl__kern_lowpagemax);
149 sysctl_register_oid(&sysctl__kern_himemorymode);
150
151 fDummyPage = IOMallocAligned(0x1000, 0x1000);
152 fDummyPageNumber =
153 pmap_find_phys(kernel_pmap, (addr64_t) (uintptr_t) fDummyPage);
154
155 return true;
156 }
157
158 void IOCopyMapper::free()
159 {
160 if (fDummyPage) {
161 IOFreeAligned(fDummyPage, 0x1000);
162 fDummyPage = 0;
163 fDummyPageNumber = 0;
164 }
165
166 if (fTableLock) {
167 IOLockFree(fTableLock);
168 fTableLock = 0;
169 }
170
171 super::free();
172 }
173
174 // Must be called while locked
175 void IOCopyMapper::breakUp(unsigned startIndex, unsigned endIndex, unsigned freeInd)
176 {
177 unsigned int zoneSize;
178 FreeDARTEntry *freeDART = (FreeDARTEntry *) fTable;
179
180 do {
181 // Need to break up bigger blocks of memory till we get one in our
182 // desired zone.
183 endIndex--;
184 zoneSize = (kMinZoneSize/2 << endIndex);
185 ppnum_t tail = freeInd + zoneSize;
186
187 DEBG("breakup z %d start %x tail %x\n", endIndex, freeInd, tail);
188
189 // By definition free lists must be empty
190 fFreeLists[endIndex] = tail;
191 freeDART[tail].fSize = endIndex;
192 freeDART[tail].fNext = freeDART[tail].fPrev = 0;
193 } while (endIndex != startIndex);
194 freeDART[freeInd].fSize = endIndex;
195 }
196
197 // Zero is never a valid page to return
198 ppnum_t IOCopyMapper::iovmAlloc(IOItemCount pages)
199 {
200 unsigned int zone, zoneSize, z, cnt;
201 ppnum_t next, ret = 0;
202 FreeDARTEntry *freeDART = (FreeDARTEntry *) fTable;
203
204 // Can't alloc anything of less than minumum
205 if (pages < kMinZoneSize)
206 pages = kMinZoneSize;
207
208 // Can't alloc anything bigger than 1/2 table
209 if (pages >= fMapperRegionSize/2)
210 {
211 panic("iovmAlloc 0x%x", pages);
212 return 0;
213 }
214
215 // Find the appropriate zone for this allocation
216 for (zone = 0, zoneSize = kMinZoneSize; pages > zoneSize; zone++)
217 zoneSize <<= 1;
218
219 {
220 IOLockLock(fTableLock);
221
222 for (;;) {
223 for (z = zone; z < fNumZones; z++) {
224 if ( (ret = fFreeLists[z]) )
225 break;
226 }
227 if (ret)
228 break;
229
230 fFreeSleepers++;
231 IOLockSleep(fTableLock, fFreeLists, THREAD_UNINT);
232 fFreeSleepers--;
233 }
234
235 // If we didn't find a entry in our size then break up the free block
236 // that we did find.
237 if (zone != z)
238 {
239 DEBG("breakup %d, %d, 0x%x\n", zone, z, ret);
240 breakUp(zone, z, ret);
241 }
242
243 freeDART[ret].fInUse = true; // Mark entry as In Use
244 next = freeDART[ret].fNext;
245 DEBG("va: 0x%x, %d, ret %x next %x\n", (ret * kActivePerFree) + fBufferPage, pages, ret, next);
246
247 fFreeLists[z] = next;
248 if (next)
249 freeDART[next].fPrev = 0;
250
251 // ret is free list offset not page offset;
252 ret *= kActivePerFree;
253
254 ActiveDARTEntry pageEntry = ACTIVEDARTENTRY(fDummyPageNumber);
255 for (cnt = 0; cnt < pages; cnt++) {
256 ActiveDARTEntry *activeDART = &fMappings[ret + cnt];
257 *activeDART = pageEntry;
258 }
259
260 fMapperRegionUsed += pages;
261 if (fMapperRegionUsed > fMapperRegionMaxUsed)
262 fMapperRegionMaxUsed = fMapperRegionUsed;
263
264 IOLockUnlock(fTableLock);
265 }
266
267 if (ret)
268 ret += fBufferPage;
269
270 return ret;
271 }
272
273
274 void IOCopyMapper::invalidateDART(ppnum_t pnum, IOItemCount size)
275 {
276 bzero((void *) &fMappings[pnum], size * sizeof(fMappings[0]));
277 }
278
279 void IOCopyMapper::iovmFree(ppnum_t addr, IOItemCount pages)
280 {
281 unsigned int zone, zoneSize, z;
282 FreeDARTEntry *freeDART = (FreeDARTEntry *) fTable;
283
284 if (addr < fBufferPage)
285 IOPanic("addr < fBufferPage");
286 addr -= fBufferPage;
287
288 // Can't free anything of less than minumum
289 if (pages < kMinZoneSize)
290 pages = kMinZoneSize;
291
292 // Can't free anything bigger than 1/2 table
293 if (pages >= fMapperRegionSize/2)
294 return;
295
296 // Find the appropriate zone for this allocation
297 for (zone = 0, zoneSize = kMinZoneSize; pages > zoneSize; zone++)
298 zoneSize <<= 1;
299
300 // Grab lock that protects the dart
301 IOLockLock(fTableLock);
302
303 invalidateDART(addr, pages);
304
305 addr /= kActivePerFree;
306
307 // We are freeing a block, check to see if pairs are available for
308 // coalescing. We will walk up the entire chain if we can.
309 for (z = zone; z < fNumZones; z++) {
310 ppnum_t pair = addr ^ (kMinZoneSize/2 << z); // Find pair address
311 if (freeDART[pair].fValid || freeDART[pair].fInUse || (freeDART[pair].fSize != z))
312 break;
313
314 // The paired alloc entry is free if we are here
315 ppnum_t next = freeDART[pair].fNext;
316 ppnum_t prev = freeDART[pair].fPrev;
317
318 // Remove the pair from its freeList
319 if (prev)
320 freeDART[prev].fNext = next;
321 else
322 fFreeLists[z] = next;
323
324 if (next)
325 freeDART[next].fPrev = prev;
326
327 // Sort the addr and the pair
328 if (addr > pair)
329 addr = pair;
330 }
331
332 DEBG("vf: 0x%x, %d, z %d, head %x, new %x\n", addr * kActivePerFree + fBufferPage, pages, z, fFreeLists[z], addr);
333
334 // Add the allocation entry into it's free list and re-init it
335 freeDART[addr].fSize = z;
336 freeDART[addr].fNext = fFreeLists[z];
337 if (fFreeLists[z])
338 freeDART[fFreeLists[z]].fPrev = addr;
339 freeDART[addr].fPrev = 0;
340 fFreeLists[z] = addr;
341
342 fMapperRegionUsed -= pages;
343
344 if (fFreeSleepers)
345 IOLockWakeup(fTableLock, fFreeLists, /* oneThread */ false);
346
347 IOLockUnlock(fTableLock);
348 }
349
350 addr64_t IOCopyMapper::mapAddr(IOPhysicalAddress addr)
351 {
352 if (addr < ptoa_32(fBufferPage))
353 {
354 return (addr64_t) addr; // Not mapped by us anyway
355 }
356
357 addr -= ptoa_32(fBufferPage);
358 if (addr >= ptoa_32(fMapperRegionSize))
359 {
360 return (addr64_t) addr; // Not mapped by us anyway
361 }
362 else
363 {
364 ActiveDARTEntry *activeDART = (ActiveDARTEntry *) fTable;
365 UInt offset = addr & PAGE_MASK;
366
367 ActiveDARTEntry mappedPage = activeDART[atop_32(addr)];
368 if (mappedPage.fValid)
369 {
370 return (ptoa_64(mappedPage.fPPNum) | offset);
371 }
372
373 panic("%s::mapAddr(0x%08lx) not mapped for I/O\n", getName(), addr);
374 return 0;
375 }
376 }
377
378 void IOCopyMapper::iovmInsert(ppnum_t addr, IOItemCount offset, ppnum_t page)
379 {
380 addr -= fBufferPage;
381 addr += offset; // Add the offset page to the base address
382
383 ActiveDARTEntry *activeDART = &fMappings[addr];
384 ActiveDARTEntry entry = ACTIVEDARTENTRY(page);
385 *activeDART = entry;
386 }
387
388 void IOCopyMapper::iovmInsert(ppnum_t addr, IOItemCount offset,
389 ppnum_t *pageList, IOItemCount pageCount)
390 {
391 addr -= fBufferPage;
392 addr += offset; // Add the offset page to the base address
393
394 IOItemCount i;
395 ActiveDARTEntry *activeDART = &fMappings[addr];
396
397 for (i = 0; i < pageCount; i++)
398 {
399 ActiveDARTEntry entry = ACTIVEDARTENTRY(pageList[i]);
400 activeDART[i] = entry;
401 }
402 }
403
404 void IOCopyMapper::iovmInsert(ppnum_t addr, IOItemCount offset,
405 upl_page_info_t *pageList, IOItemCount pageCount)
406 {
407 addr -= fBufferPage;
408 addr += offset; // Add the offset page to the base address
409
410 IOItemCount i;
411 ActiveDARTEntry *activeDART = &fMappings[addr];
412
413 for (i = 0; i < pageCount; i++)
414 {
415 ActiveDARTEntry entry = ACTIVEDARTENTRY(pageList[i].phys_addr);
416 activeDART[i] = entry;
417 }
418 }
419
420