]>
Commit | Line | Data |
---|---|---|
0c530ab8 A |
1 | /* |
2 | * Copyright (c) 2006 Apple Computer, Inc. All rights reserved. | |
3 | * | |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
0c530ab8 | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
0c530ab8 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
0c530ab8 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
0c530ab8 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
0c530ab8 A |
27 | */ |
28 | // 45678901234567890123456789012345678901234567890123456789012345678901234567890 | |
29 | ||
30 | #include "IOCopyMapper.h" | |
31 | #include <sys/sysctl.h> | |
32 | ||
33 | #if 0 | |
34 | #define DEBG(fmt, args...) { kprintf(fmt, ## args); } | |
35 | #else | |
36 | #define DEBG(fmt, args...) {} | |
37 | #endif | |
38 | ||
39 | extern "C" { | |
40 | extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va); | |
41 | extern void ml_get_bouncepool_info( | |
42 | vm_offset_t *phys_addr, | |
43 | vm_size_t *size); | |
44 | extern unsigned int vm_lopage_max_count; | |
45 | extern unsigned int vm_himemory_mode; | |
46 | } | |
47 | ||
48 | #define super IOMapper | |
49 | ||
50 | OSDefineMetaClassAndStructors(IOCopyMapper, IOMapper); | |
51 | ||
52 | // Remember no value can be bigger than 31 bits as the sign bit indicates | |
53 | // that this entry is valid to the hardware and that would be bad if it wasn't | |
54 | typedef struct FreeDARTEntry { | |
55 | #if __BIG_ENDIAN__ | |
56 | unsigned int | |
57 | /* bool */ fValid : 1, | |
58 | /* bool */ fInUse : 1, // Allocated but not inserted yet | |
59 | /* bool */ : 5, // Align size on nibble boundary for debugging | |
60 | /* uint */ fSize : 5, | |
61 | /* uint */ : 2, | |
62 | /* uint */ fNext :18; // offset of FreeDARTEntry's | |
63 | ||
64 | #elif __LITTLE_ENDIAN__ | |
65 | unsigned int | |
66 | /* uint */ fNext :18, // offset of FreeDARTEntry's | |
67 | /* uint */ : 2, | |
68 | /* uint */ fSize : 5, | |
69 | /* bool */ : 5, // Align size on nibble boundary for debugging | |
70 | /* bool */ fInUse : 1, // Allocated but not inserted yet | |
71 | /* bool */ fValid : 1; | |
72 | #endif | |
73 | #if __BIG_ENDIAN__ | |
74 | unsigned int | |
75 | /* uint */ :14, | |
76 | /* uint */ fPrev :18; // offset of FreeDARTEntry's | |
77 | ||
78 | #elif __LITTLE_ENDIAN__ | |
79 | unsigned int | |
80 | /* uint */ fPrev :18, // offset of FreeDARTEntry's | |
81 | /* uint */ :14; | |
82 | #endif | |
83 | } FreeDARTEntry; | |
84 | ||
85 | typedef struct ActiveDARTEntry { | |
86 | #if __BIG_ENDIAN__ | |
87 | unsigned int | |
88 | /* bool */ fValid : 1, // Must be set to one if valid | |
89 | /* uint */ fPPNum :31; // ppnum_t page of translation | |
90 | #define ACTIVEDARTENTRY(page) { true, page } | |
91 | ||
92 | #elif __LITTLE_ENDIAN__ | |
93 | unsigned int | |
94 | /* uint */ fPPNum :31, // ppnum_t page of translation | |
95 | /* bool */ fValid : 1; // Must be set to one if valid | |
96 | #define ACTIVEDARTENTRY(page) { page, true } | |
97 | ||
98 | #endif | |
99 | }; | |
100 | ||
101 | #define kActivePerFree (sizeof(freeDART[0]) / sizeof(ActiveDARTEntry)) | |
102 | ||
103 | static SYSCTL_UINT(_kern, OID_AUTO, copyregionmax, | |
104 | CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN, | |
b0d623f7 | 105 | (unsigned int *)NULL, 0, ""); |
0c530ab8 A |
106 | |
107 | static SYSCTL_UINT(_kern, OID_AUTO, lowpagemax, | |
108 | CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN, | |
109 | &vm_lopage_max_count, 0, ""); | |
110 | ||
111 | static SYSCTL_UINT(_kern, OID_AUTO, himemorymode, | |
112 | CTLFLAG_RD | CTLFLAG_NOAUTO | CTLFLAG_KERN, | |
113 | &vm_himemory_mode, 0, ""); | |
114 | ||
115 | bool IOCopyMapper::initHardware(IOService * provider) | |
116 | { | |
117 | UInt32 dartSizePages = 0; | |
118 | ||
119 | vm_offset_t phys_addr; | |
120 | vm_size_t size; | |
121 | ml_get_bouncepool_info(&phys_addr, &size); | |
122 | ||
123 | if (!size) | |
124 | return (false); | |
125 | ||
126 | fBufferPage = atop_32(phys_addr); | |
127 | dartSizePages = (atop_32(size) + kTransPerPage - 1) / kTransPerPage; | |
128 | ||
129 | fTableLock = IOLockAlloc(); | |
130 | ||
131 | if (!fTableLock) | |
132 | return false; | |
133 | ||
134 | if (!allocTable(dartSizePages * kMapperPage)) | |
135 | return false; | |
136 | ||
137 | UInt32 canMapPages = dartSizePages * kTransPerPage; | |
138 | fMapperRegionSize = canMapPages; | |
139 | for (fNumZones = 0; canMapPages; fNumZones++) | |
140 | canMapPages >>= 1; | |
141 | fNumZones -= 3; // correct for overshoot and minumum 16K pages allocation | |
142 | ||
143 | invalidateDART(0, fMapperRegionSize); | |
144 | ||
145 | breakUp(0, fNumZones, 0); | |
146 | ((FreeDARTEntry *) fTable)->fInUse = true; | |
147 | ||
148 | fMapperRegionUsed = kMinZoneSize; | |
149 | fMapperRegionMaxUsed = fMapperRegionUsed; | |
150 | ||
151 | sysctl__kern_copyregionmax.oid_arg1 = &fMapperRegionMaxUsed; | |
152 | ||
153 | sysctl_register_oid(&sysctl__kern_copyregionmax); | |
154 | sysctl_register_oid(&sysctl__kern_lowpagemax); | |
155 | sysctl_register_oid(&sysctl__kern_himemorymode); | |
156 | ||
157 | fDummyPage = IOMallocAligned(0x1000, 0x1000); | |
158 | fDummyPageNumber = | |
159 | pmap_find_phys(kernel_pmap, (addr64_t) (uintptr_t) fDummyPage); | |
160 | ||
161 | return true; | |
162 | } | |
163 | ||
164 | void IOCopyMapper::free() | |
165 | { | |
166 | if (fDummyPage) { | |
167 | IOFreeAligned(fDummyPage, 0x1000); | |
168 | fDummyPage = 0; | |
169 | fDummyPageNumber = 0; | |
170 | } | |
171 | ||
172 | if (fTableLock) { | |
173 | IOLockFree(fTableLock); | |
174 | fTableLock = 0; | |
175 | } | |
176 | ||
177 | super::free(); | |
178 | } | |
179 | ||
180 | // Must be called while locked | |
181 | void IOCopyMapper::breakUp(unsigned startIndex, unsigned endIndex, unsigned freeInd) | |
182 | { | |
183 | unsigned int zoneSize; | |
184 | FreeDARTEntry *freeDART = (FreeDARTEntry *) fTable; | |
185 | ||
186 | do { | |
187 | // Need to break up bigger blocks of memory till we get one in our | |
188 | // desired zone. | |
189 | endIndex--; | |
190 | zoneSize = (kMinZoneSize/2 << endIndex); | |
191 | ppnum_t tail = freeInd + zoneSize; | |
192 | ||
193 | DEBG("breakup z %d start %x tail %x\n", endIndex, freeInd, tail); | |
194 | ||
195 | // By definition free lists must be empty | |
196 | fFreeLists[endIndex] = tail; | |
197 | freeDART[tail].fSize = endIndex; | |
198 | freeDART[tail].fNext = freeDART[tail].fPrev = 0; | |
199 | } while (endIndex != startIndex); | |
200 | freeDART[freeInd].fSize = endIndex; | |
201 | } | |
202 | ||
203 | // Zero is never a valid page to return | |
204 | ppnum_t IOCopyMapper::iovmAlloc(IOItemCount pages) | |
205 | { | |
206 | unsigned int zone, zoneSize, z, cnt; | |
207 | ppnum_t next, ret = 0; | |
208 | FreeDARTEntry *freeDART = (FreeDARTEntry *) fTable; | |
209 | ||
210 | // Can't alloc anything of less than minumum | |
211 | if (pages < kMinZoneSize) | |
212 | pages = kMinZoneSize; | |
213 | ||
214 | // Can't alloc anything bigger than 1/2 table | |
215 | if (pages >= fMapperRegionSize/2) | |
216 | { | |
b0d623f7 | 217 | panic("iovmAlloc 0x%lx", (long) pages); |
0c530ab8 A |
218 | return 0; |
219 | } | |
220 | ||
221 | // Find the appropriate zone for this allocation | |
222 | for (zone = 0, zoneSize = kMinZoneSize; pages > zoneSize; zone++) | |
223 | zoneSize <<= 1; | |
224 | ||
225 | { | |
226 | IOLockLock(fTableLock); | |
227 | ||
228 | for (;;) { | |
229 | for (z = zone; z < fNumZones; z++) { | |
230 | if ( (ret = fFreeLists[z]) ) | |
231 | break; | |
232 | } | |
233 | if (ret) | |
234 | break; | |
235 | ||
236 | fFreeSleepers++; | |
237 | IOLockSleep(fTableLock, fFreeLists, THREAD_UNINT); | |
238 | fFreeSleepers--; | |
239 | } | |
240 | ||
241 | // If we didn't find a entry in our size then break up the free block | |
242 | // that we did find. | |
243 | if (zone != z) | |
244 | { | |
245 | DEBG("breakup %d, %d, 0x%x\n", zone, z, ret); | |
246 | breakUp(zone, z, ret); | |
247 | } | |
248 | ||
249 | freeDART[ret].fInUse = true; // Mark entry as In Use | |
250 | next = freeDART[ret].fNext; | |
2d21ac55 | 251 | DEBG("va: 0x%lx, %ld, ret %x next %x\n", (ret * kActivePerFree) + fBufferPage, pages, ret, next); |
0c530ab8 A |
252 | |
253 | fFreeLists[z] = next; | |
254 | if (next) | |
255 | freeDART[next].fPrev = 0; | |
256 | ||
257 | // ret is free list offset not page offset; | |
258 | ret *= kActivePerFree; | |
259 | ||
260 | ActiveDARTEntry pageEntry = ACTIVEDARTENTRY(fDummyPageNumber); | |
261 | for (cnt = 0; cnt < pages; cnt++) { | |
262 | ActiveDARTEntry *activeDART = &fMappings[ret + cnt]; | |
263 | *activeDART = pageEntry; | |
264 | } | |
265 | ||
266 | fMapperRegionUsed += pages; | |
267 | if (fMapperRegionUsed > fMapperRegionMaxUsed) | |
268 | fMapperRegionMaxUsed = fMapperRegionUsed; | |
269 | ||
270 | IOLockUnlock(fTableLock); | |
271 | } | |
272 | ||
273 | if (ret) | |
274 | ret += fBufferPage; | |
275 | ||
276 | return ret; | |
277 | } | |
278 | ||
279 | ||
280 | void IOCopyMapper::invalidateDART(ppnum_t pnum, IOItemCount size) | |
281 | { | |
282 | bzero((void *) &fMappings[pnum], size * sizeof(fMappings[0])); | |
283 | } | |
284 | ||
285 | void IOCopyMapper::iovmFree(ppnum_t addr, IOItemCount pages) | |
286 | { | |
287 | unsigned int zone, zoneSize, z; | |
288 | FreeDARTEntry *freeDART = (FreeDARTEntry *) fTable; | |
289 | ||
290 | if (addr < fBufferPage) | |
b0d623f7 | 291 | panic("addr < fBufferPage"); |
0c530ab8 A |
292 | addr -= fBufferPage; |
293 | ||
294 | // Can't free anything of less than minumum | |
295 | if (pages < kMinZoneSize) | |
296 | pages = kMinZoneSize; | |
297 | ||
298 | // Can't free anything bigger than 1/2 table | |
299 | if (pages >= fMapperRegionSize/2) | |
300 | return; | |
301 | ||
302 | // Find the appropriate zone for this allocation | |
303 | for (zone = 0, zoneSize = kMinZoneSize; pages > zoneSize; zone++) | |
304 | zoneSize <<= 1; | |
305 | ||
306 | // Grab lock that protects the dart | |
307 | IOLockLock(fTableLock); | |
308 | ||
309 | invalidateDART(addr, pages); | |
310 | ||
311 | addr /= kActivePerFree; | |
312 | ||
313 | // We are freeing a block, check to see if pairs are available for | |
314 | // coalescing. We will walk up the entire chain if we can. | |
315 | for (z = zone; z < fNumZones; z++) { | |
316 | ppnum_t pair = addr ^ (kMinZoneSize/2 << z); // Find pair address | |
317 | if (freeDART[pair].fValid || freeDART[pair].fInUse || (freeDART[pair].fSize != z)) | |
318 | break; | |
319 | ||
320 | // The paired alloc entry is free if we are here | |
321 | ppnum_t next = freeDART[pair].fNext; | |
322 | ppnum_t prev = freeDART[pair].fPrev; | |
323 | ||
324 | // Remove the pair from its freeList | |
325 | if (prev) | |
326 | freeDART[prev].fNext = next; | |
327 | else | |
328 | fFreeLists[z] = next; | |
329 | ||
330 | if (next) | |
331 | freeDART[next].fPrev = prev; | |
332 | ||
333 | // Sort the addr and the pair | |
334 | if (addr > pair) | |
335 | addr = pair; | |
336 | } | |
337 | ||
2d21ac55 | 338 | DEBG("vf: 0x%lx, %ld, z %d, head %lx, new %x\n", addr * kActivePerFree + fBufferPage, pages, z, fFreeLists[z], addr); |
0c530ab8 A |
339 | |
340 | // Add the allocation entry into it's free list and re-init it | |
341 | freeDART[addr].fSize = z; | |
342 | freeDART[addr].fNext = fFreeLists[z]; | |
343 | if (fFreeLists[z]) | |
344 | freeDART[fFreeLists[z]].fPrev = addr; | |
345 | freeDART[addr].fPrev = 0; | |
346 | fFreeLists[z] = addr; | |
347 | ||
348 | fMapperRegionUsed -= pages; | |
349 | ||
350 | if (fFreeSleepers) | |
351 | IOLockWakeup(fTableLock, fFreeLists, /* oneThread */ false); | |
352 | ||
353 | IOLockUnlock(fTableLock); | |
354 | } | |
355 | ||
356 | addr64_t IOCopyMapper::mapAddr(IOPhysicalAddress addr) | |
357 | { | |
358 | if (addr < ptoa_32(fBufferPage)) | |
359 | { | |
360 | return (addr64_t) addr; // Not mapped by us anyway | |
361 | } | |
362 | ||
363 | addr -= ptoa_32(fBufferPage); | |
364 | if (addr >= ptoa_32(fMapperRegionSize)) | |
365 | { | |
366 | return (addr64_t) addr; // Not mapped by us anyway | |
367 | } | |
368 | else | |
369 | { | |
370 | ActiveDARTEntry *activeDART = (ActiveDARTEntry *) fTable; | |
371 | UInt offset = addr & PAGE_MASK; | |
372 | ||
373 | ActiveDARTEntry mappedPage = activeDART[atop_32(addr)]; | |
374 | if (mappedPage.fValid) | |
375 | { | |
376 | return (ptoa_64(mappedPage.fPPNum) | offset); | |
377 | } | |
378 | ||
b0d623f7 | 379 | panic("%s::mapAddr(0x%08lx) not mapped for I/O\n", getName(), (long) addr); |
0c530ab8 A |
380 | return 0; |
381 | } | |
382 | } | |
383 | ||
384 | void IOCopyMapper::iovmInsert(ppnum_t addr, IOItemCount offset, ppnum_t page) | |
385 | { | |
386 | addr -= fBufferPage; | |
387 | addr += offset; // Add the offset page to the base address | |
388 | ||
389 | ActiveDARTEntry *activeDART = &fMappings[addr]; | |
390 | ActiveDARTEntry entry = ACTIVEDARTENTRY(page); | |
391 | *activeDART = entry; | |
392 | } | |
393 | ||
394 | void IOCopyMapper::iovmInsert(ppnum_t addr, IOItemCount offset, | |
395 | ppnum_t *pageList, IOItemCount pageCount) | |
396 | { | |
397 | addr -= fBufferPage; | |
398 | addr += offset; // Add the offset page to the base address | |
399 | ||
400 | IOItemCount i; | |
401 | ActiveDARTEntry *activeDART = &fMappings[addr]; | |
402 | ||
403 | for (i = 0; i < pageCount; i++) | |
404 | { | |
405 | ActiveDARTEntry entry = ACTIVEDARTENTRY(pageList[i]); | |
406 | activeDART[i] = entry; | |
407 | } | |
408 | } | |
409 | ||
410 | void IOCopyMapper::iovmInsert(ppnum_t addr, IOItemCount offset, | |
411 | upl_page_info_t *pageList, IOItemCount pageCount) | |
412 | { | |
413 | addr -= fBufferPage; | |
414 | addr += offset; // Add the offset page to the base address | |
415 | ||
416 | IOItemCount i; | |
417 | ActiveDARTEntry *activeDART = &fMappings[addr]; | |
418 | ||
419 | for (i = 0; i < pageCount; i++) | |
420 | { | |
421 | ActiveDARTEntry entry = ACTIVEDARTENTRY(pageList[i].phys_addr); | |
422 | activeDART[i] = entry; | |
423 | } | |
424 | } | |
425 | ||
426 |