]>
Commit | Line | Data |
---|---|---|
5d5c5d0d A |
1 | /* |
2 | * Copyright (c) 2006 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_OSREFERENCE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the | |
10 | * License may not be used to create, or enable the creation or | |
11 | * redistribution of, unlawful or unlicensed copies of an Apple operating | |
12 | * system, or to circumvent, violate, or enable the circumvention or | |
13 | * violation of, any terms of an Apple operating system software license | |
14 | * agreement. | |
15 | * | |
16 | * Please obtain a copy of the License at | |
17 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
18 | * file. | |
19 | * | |
20 | * The Original Code and all software distributed under the License are | |
21 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
22 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
23 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
24 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
25 | * Please see the License for the specific language governing rights and | |
26 | * limitations under the License. | |
27 | * | |
28 | * @APPLE_LICENSE_OSREFERENCE_HEADER_END@ | |
29 | */ | |
30 | // 45678901234567890123456789012345678901234567890123456789012345678901234567890 | |
31 | ||
32 | #include "IOCopyMapper.h" | |
33 | #include <sys/sysctl.h> | |
34 | ||
35 | #if 0 | |
36 | #define DEBG(fmt, args...) { kprintf(fmt, ## args); } | |
37 | #else | |
38 | #define DEBG(fmt, args...) {} | |
39 | #endif | |
40 | ||
41 | extern "C" { | |
42 | extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va); | |
43 | extern void ml_get_bouncepool_info( | |
44 | vm_offset_t *phys_addr, | |
45 | vm_size_t *size); | |
46 | extern unsigned int vm_lopage_max_count; | |
47 | extern unsigned int vm_himemory_mode; | |
48 | } | |
49 | ||
50 | #define super IOMapper | |
51 | ||
52 | OSDefineMetaClassAndStructors(IOCopyMapper, IOMapper); | |
53 | ||
54 | // Remember no value can be bigger than 31 bits as the sign bit indicates | |
55 | // that this entry is valid to the hardware and that would be bad if it wasn't | |
56 | typedef struct FreeDARTEntry { | |
57 | #if __BIG_ENDIAN__ | |
58 | unsigned int | |
59 | /* bool */ fValid : 1, | |
60 | /* bool */ fInUse : 1, // Allocated but not inserted yet | |
61 | /* bool */ : 5, // Align size on nibble boundary for debugging | |
62 | /* uint */ fSize : 5, | |
63 | /* uint */ : 2, | |
64 | /* uint */ fNext :18; // offset of FreeDARTEntry's | |
65 | ||
66 | #elif __LITTLE_ENDIAN__ | |
67 | unsigned int | |
68 | /* uint */ fNext :18, // offset of FreeDARTEntry's | |
69 | /* uint */ : 2, | |
70 | /* uint */ fSize : 5, | |
71 | /* bool */ : 5, // Align size on nibble boundary for debugging | |
72 | /* bool */ fInUse : 1, // Allocated but not inserted yet | |
73 | /* bool */ fValid : 1; | |
74 | #endif | |
75 | #if __BIG_ENDIAN__ | |
76 | unsigned int | |
77 | /* uint */ :14, | |
78 | /* uint */ fPrev :18; // offset of FreeDARTEntry's | |
79 | ||
80 | #elif __LITTLE_ENDIAN__ | |
81 | unsigned int | |
82 | /* uint */ fPrev :18, // offset of FreeDARTEntry's | |
83 | /* uint */ :14; | |
84 | #endif | |
85 | } FreeDARTEntry; | |
86 | ||
87 | typedef struct ActiveDARTEntry { | |
88 | #if __BIG_ENDIAN__ | |
89 | unsigned int | |
90 | /* bool */ fValid : 1, // Must be set to one if valid | |
91 | /* uint */ fPPNum :31; // ppnum_t page of translation | |
92 | #define ACTIVEDARTENTRY(page) { true, page } | |
93 | ||
94 | #elif __LITTLE_ENDIAN__ | |
95 | unsigned int | |
96 | /* uint */ fPPNum :31, // ppnum_t page of translation | |
97 | /* bool */ fValid : 1; // Must be set to one if valid | |
98 | #define ACTIVEDARTENTRY(page) { page, true } | |
99 | ||
100 | #endif | |
101 | }; | |
102 | ||
103 | #define kActivePerFree (sizeof(freeDART[0]) / sizeof(ActiveDARTEntry)) | |
104 | ||
105 | static SYSCTL_UINT(_kern, OID_AUTO, copyregionmax, | |
106 | CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN, | |
107 | NULL, 0, ""); | |
108 | ||
109 | static SYSCTL_UINT(_kern, OID_AUTO, lowpagemax, | |
110 | CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN, | |
111 | &vm_lopage_max_count, 0, ""); | |
112 | ||
113 | static SYSCTL_UINT(_kern, OID_AUTO, himemorymode, | |
114 | CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN, | |
115 | &vm_himemory_mode, 0, ""); | |
116 | ||
117 | bool IOCopyMapper::initHardware(IOService * provider) | |
118 | { | |
119 | UInt32 dartSizePages = 0; | |
120 | ||
121 | vm_offset_t phys_addr; | |
122 | vm_size_t size; | |
123 | ml_get_bouncepool_info(&phys_addr, &size); | |
124 | ||
125 | if (!size) | |
126 | return (false); | |
127 | ||
128 | fBufferPage = atop_32(phys_addr); | |
129 | dartSizePages = (atop_32(size) + kTransPerPage - 1) / kTransPerPage; | |
130 | ||
131 | fTableLock = IOLockAlloc(); | |
132 | ||
133 | if (!fTableLock) | |
134 | return false; | |
135 | ||
136 | if (!allocTable(dartSizePages * kMapperPage)) | |
137 | return false; | |
138 | ||
139 | UInt32 canMapPages = dartSizePages * kTransPerPage; | |
140 | fMapperRegionSize = canMapPages; | |
141 | for (fNumZones = 0; canMapPages; fNumZones++) | |
142 | canMapPages >>= 1; | |
143 | fNumZones -= 3; // correct for overshoot and minumum 16K pages allocation | |
144 | ||
145 | invalidateDART(0, fMapperRegionSize); | |
146 | ||
147 | breakUp(0, fNumZones, 0); | |
148 | ((FreeDARTEntry *) fTable)->fInUse = true; | |
149 | ||
150 | fMapperRegionUsed = kMinZoneSize; | |
151 | fMapperRegionMaxUsed = fMapperRegionUsed; | |
152 | ||
153 | sysctl__kern_copyregionmax.oid_arg1 = &fMapperRegionMaxUsed; | |
154 | ||
155 | sysctl_register_oid(&sysctl__kern_copyregionmax); | |
156 | sysctl_register_oid(&sysctl__kern_lowpagemax); | |
157 | sysctl_register_oid(&sysctl__kern_himemorymode); | |
158 | ||
159 | fDummyPage = IOMallocAligned(0x1000, 0x1000); | |
160 | fDummyPageNumber = | |
161 | pmap_find_phys(kernel_pmap, (addr64_t) (uintptr_t) fDummyPage); | |
162 | ||
163 | return true; | |
164 | } | |
165 | ||
166 | void IOCopyMapper::free() | |
167 | { | |
168 | if (fDummyPage) { | |
169 | IOFreeAligned(fDummyPage, 0x1000); | |
170 | fDummyPage = 0; | |
171 | fDummyPageNumber = 0; | |
172 | } | |
173 | ||
174 | if (fTableLock) { | |
175 | IOLockFree(fTableLock); | |
176 | fTableLock = 0; | |
177 | } | |
178 | ||
179 | super::free(); | |
180 | } | |
181 | ||
182 | // Must be called while locked | |
183 | void IOCopyMapper::breakUp(unsigned startIndex, unsigned endIndex, unsigned freeInd) | |
184 | { | |
185 | unsigned int zoneSize; | |
186 | FreeDARTEntry *freeDART = (FreeDARTEntry *) fTable; | |
187 | ||
188 | do { | |
189 | // Need to break up bigger blocks of memory till we get one in our | |
190 | // desired zone. | |
191 | endIndex--; | |
192 | zoneSize = (kMinZoneSize/2 << endIndex); | |
193 | ppnum_t tail = freeInd + zoneSize; | |
194 | ||
195 | DEBG("breakup z %d start %x tail %x\n", endIndex, freeInd, tail); | |
196 | ||
197 | // By definition free lists must be empty | |
198 | fFreeLists[endIndex] = tail; | |
199 | freeDART[tail].fSize = endIndex; | |
200 | freeDART[tail].fNext = freeDART[tail].fPrev = 0; | |
201 | } while (endIndex != startIndex); | |
202 | freeDART[freeInd].fSize = endIndex; | |
203 | } | |
204 | ||
205 | // Zero is never a valid page to return | |
206 | ppnum_t IOCopyMapper::iovmAlloc(IOItemCount pages) | |
207 | { | |
208 | unsigned int zone, zoneSize, z, cnt; | |
209 | ppnum_t next, ret = 0; | |
210 | FreeDARTEntry *freeDART = (FreeDARTEntry *) fTable; | |
211 | ||
212 | // Can't alloc anything of less than minumum | |
213 | if (pages < kMinZoneSize) | |
214 | pages = kMinZoneSize; | |
215 | ||
216 | // Can't alloc anything bigger than 1/2 table | |
217 | if (pages >= fMapperRegionSize/2) | |
218 | { | |
219 | panic("iovmAlloc 0x%x", pages); | |
220 | return 0; | |
221 | } | |
222 | ||
223 | // Find the appropriate zone for this allocation | |
224 | for (zone = 0, zoneSize = kMinZoneSize; pages > zoneSize; zone++) | |
225 | zoneSize <<= 1; | |
226 | ||
227 | { | |
228 | IOLockLock(fTableLock); | |
229 | ||
230 | for (;;) { | |
231 | for (z = zone; z < fNumZones; z++) { | |
232 | if ( (ret = fFreeLists[z]) ) | |
233 | break; | |
234 | } | |
235 | if (ret) | |
236 | break; | |
237 | ||
238 | fFreeSleepers++; | |
239 | IOLockSleep(fTableLock, fFreeLists, THREAD_UNINT); | |
240 | fFreeSleepers--; | |
241 | } | |
242 | ||
243 | // If we didn't find a entry in our size then break up the free block | |
244 | // that we did find. | |
245 | if (zone != z) | |
246 | { | |
247 | DEBG("breakup %d, %d, 0x%x\n", zone, z, ret); | |
248 | breakUp(zone, z, ret); | |
249 | } | |
250 | ||
251 | freeDART[ret].fInUse = true; // Mark entry as In Use | |
252 | next = freeDART[ret].fNext; | |
253 | DEBG("va: 0x%x, %d, ret %x next %x\n", (ret * kActivePerFree) + fBufferPage, pages, ret, next); | |
254 | ||
255 | fFreeLists[z] = next; | |
256 | if (next) | |
257 | freeDART[next].fPrev = 0; | |
258 | ||
259 | // ret is free list offset not page offset; | |
260 | ret *= kActivePerFree; | |
261 | ||
262 | ActiveDARTEntry pageEntry = ACTIVEDARTENTRY(fDummyPageNumber); | |
263 | for (cnt = 0; cnt < pages; cnt++) { | |
264 | ActiveDARTEntry *activeDART = &fMappings[ret + cnt]; | |
265 | *activeDART = pageEntry; | |
266 | } | |
267 | ||
268 | fMapperRegionUsed += pages; | |
269 | if (fMapperRegionUsed > fMapperRegionMaxUsed) | |
270 | fMapperRegionMaxUsed = fMapperRegionUsed; | |
271 | ||
272 | IOLockUnlock(fTableLock); | |
273 | } | |
274 | ||
275 | if (ret) | |
276 | ret += fBufferPage; | |
277 | ||
278 | return ret; | |
279 | } | |
280 | ||
281 | ||
282 | void IOCopyMapper::invalidateDART(ppnum_t pnum, IOItemCount size) | |
283 | { | |
284 | bzero((void *) &fMappings[pnum], size * sizeof(fMappings[0])); | |
285 | } | |
286 | ||
287 | void IOCopyMapper::iovmFree(ppnum_t addr, IOItemCount pages) | |
288 | { | |
289 | unsigned int zone, zoneSize, z; | |
290 | FreeDARTEntry *freeDART = (FreeDARTEntry *) fTable; | |
291 | ||
292 | if (addr < fBufferPage) | |
293 | IOPanic("addr < fBufferPage"); | |
294 | addr -= fBufferPage; | |
295 | ||
296 | // Can't free anything of less than minumum | |
297 | if (pages < kMinZoneSize) | |
298 | pages = kMinZoneSize; | |
299 | ||
300 | // Can't free anything bigger than 1/2 table | |
301 | if (pages >= fMapperRegionSize/2) | |
302 | return; | |
303 | ||
304 | // Find the appropriate zone for this allocation | |
305 | for (zone = 0, zoneSize = kMinZoneSize; pages > zoneSize; zone++) | |
306 | zoneSize <<= 1; | |
307 | ||
308 | // Grab lock that protects the dart | |
309 | IOLockLock(fTableLock); | |
310 | ||
311 | invalidateDART(addr, pages); | |
312 | ||
313 | addr /= kActivePerFree; | |
314 | ||
315 | // We are freeing a block, check to see if pairs are available for | |
316 | // coalescing. We will walk up the entire chain if we can. | |
317 | for (z = zone; z < fNumZones; z++) { | |
318 | ppnum_t pair = addr ^ (kMinZoneSize/2 << z); // Find pair address | |
319 | if (freeDART[pair].fValid || freeDART[pair].fInUse || (freeDART[pair].fSize != z)) | |
320 | break; | |
321 | ||
322 | // The paired alloc entry is free if we are here | |
323 | ppnum_t next = freeDART[pair].fNext; | |
324 | ppnum_t prev = freeDART[pair].fPrev; | |
325 | ||
326 | // Remove the pair from its freeList | |
327 | if (prev) | |
328 | freeDART[prev].fNext = next; | |
329 | else | |
330 | fFreeLists[z] = next; | |
331 | ||
332 | if (next) | |
333 | freeDART[next].fPrev = prev; | |
334 | ||
335 | // Sort the addr and the pair | |
336 | if (addr > pair) | |
337 | addr = pair; | |
338 | } | |
339 | ||
340 | DEBG("vf: 0x%x, %d, z %d, head %x, new %x\n", addr * kActivePerFree + fBufferPage, pages, z, fFreeLists[z], addr); | |
341 | ||
342 | // Add the allocation entry into it's free list and re-init it | |
343 | freeDART[addr].fSize = z; | |
344 | freeDART[addr].fNext = fFreeLists[z]; | |
345 | if (fFreeLists[z]) | |
346 | freeDART[fFreeLists[z]].fPrev = addr; | |
347 | freeDART[addr].fPrev = 0; | |
348 | fFreeLists[z] = addr; | |
349 | ||
350 | fMapperRegionUsed -= pages; | |
351 | ||
352 | if (fFreeSleepers) | |
353 | IOLockWakeup(fTableLock, fFreeLists, /* oneThread */ false); | |
354 | ||
355 | IOLockUnlock(fTableLock); | |
356 | } | |
357 | ||
358 | addr64_t IOCopyMapper::mapAddr(IOPhysicalAddress addr) | |
359 | { | |
360 | if (addr < ptoa_32(fBufferPage)) | |
361 | { | |
362 | return (addr64_t) addr; // Not mapped by us anyway | |
363 | } | |
364 | ||
365 | addr -= ptoa_32(fBufferPage); | |
366 | if (addr >= ptoa_32(fMapperRegionSize)) | |
367 | { | |
368 | return (addr64_t) addr; // Not mapped by us anyway | |
369 | } | |
370 | else | |
371 | { | |
372 | ActiveDARTEntry *activeDART = (ActiveDARTEntry *) fTable; | |
373 | UInt offset = addr & PAGE_MASK; | |
374 | ||
375 | ActiveDARTEntry mappedPage = activeDART[atop_32(addr)]; | |
376 | if (mappedPage.fValid) | |
377 | { | |
378 | return (ptoa_64(mappedPage.fPPNum) | offset); | |
379 | } | |
380 | ||
381 | panic("%s::mapAddr(0x%08lx) not mapped for I/O\n", getName(), addr); | |
382 | return 0; | |
383 | } | |
384 | } | |
385 | ||
386 | void IOCopyMapper::iovmInsert(ppnum_t addr, IOItemCount offset, ppnum_t page) | |
387 | { | |
388 | addr -= fBufferPage; | |
389 | addr += offset; // Add the offset page to the base address | |
390 | ||
391 | ActiveDARTEntry *activeDART = &fMappings[addr]; | |
392 | ActiveDARTEntry entry = ACTIVEDARTENTRY(page); | |
393 | *activeDART = entry; | |
394 | } | |
395 | ||
396 | void IOCopyMapper::iovmInsert(ppnum_t addr, IOItemCount offset, | |
397 | ppnum_t *pageList, IOItemCount pageCount) | |
398 | { | |
399 | addr -= fBufferPage; | |
400 | addr += offset; // Add the offset page to the base address | |
401 | ||
402 | IOItemCount i; | |
403 | ActiveDARTEntry *activeDART = &fMappings[addr]; | |
404 | ||
405 | for (i = 0; i < pageCount; i++) | |
406 | { | |
407 | ActiveDARTEntry entry = ACTIVEDARTENTRY(pageList[i]); | |
408 | activeDART[i] = entry; | |
409 | } | |
410 | } | |
411 | ||
412 | void IOCopyMapper::iovmInsert(ppnum_t addr, IOItemCount offset, | |
413 | upl_page_info_t *pageList, IOItemCount pageCount) | |
414 | { | |
415 | addr -= fBufferPage; | |
416 | addr += offset; // Add the offset page to the base address | |
417 | ||
418 | IOItemCount i; | |
419 | ActiveDARTEntry *activeDART = &fMappings[addr]; | |
420 | ||
421 | for (i = 0; i < pageCount; i++) | |
422 | { | |
423 | ActiveDARTEntry entry = ACTIVEDARTENTRY(pageList[i].phys_addr); | |
424 | activeDART[i] = entry; | |
425 | } | |
426 | } | |
427 | ||
428 |