2 * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
22 // 45678901234567890123456789012345678901234567890123456789012345678901234567890
24 #include "IOCopyMapper.h"
25 #include <sys/sysctl.h>
28 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
30 #define DEBG(fmt, args...) {}
34 extern ppnum_t
pmap_find_phys(pmap_t pmap
, addr64_t va
);
35 extern void ml_get_bouncepool_info(
36 vm_offset_t
*phys_addr
,
38 extern unsigned int vm_lopage_max_count
;
39 extern unsigned int vm_himemory_mode
;
42 #define super IOMapper
44 OSDefineMetaClassAndStructors(IOCopyMapper
, IOMapper
);
46 // Remember no value can be bigger than 31 bits as the sign bit indicates
47 // that this entry is valid to the hardware and that would be bad if it wasn't
48 typedef struct FreeDARTEntry
{
51 /* bool */ fValid
: 1,
52 /* bool */ fInUse
: 1, // Allocated but not inserted yet
53 /* bool */ : 5, // Align size on nibble boundary for debugging
56 /* uint */ fNext
:18; // offset of FreeDARTEntry's
58 #elif __LITTLE_ENDIAN__
60 /* uint */ fNext
:18, // offset of FreeDARTEntry's
63 /* bool */ : 5, // Align size on nibble boundary for debugging
64 /* bool */ fInUse
: 1, // Allocated but not inserted yet
65 /* bool */ fValid
: 1;
70 /* uint */ fPrev
:18; // offset of FreeDARTEntry's
72 #elif __LITTLE_ENDIAN__
74 /* uint */ fPrev
:18, // offset of FreeDARTEntry's
79 typedef struct ActiveDARTEntry
{
82 /* bool */ fValid
: 1, // Must be set to one if valid
83 /* uint */ fPPNum
:31; // ppnum_t page of translation
84 #define ACTIVEDARTENTRY(page) { true, page }
86 #elif __LITTLE_ENDIAN__
88 /* uint */ fPPNum
:31, // ppnum_t page of translation
89 /* bool */ fValid
: 1; // Must be set to one if valid
90 #define ACTIVEDARTENTRY(page) { page, true }
95 #define kActivePerFree (sizeof(freeDART[0]) / sizeof(ActiveDARTEntry))
97 static SYSCTL_UINT(_kern
, OID_AUTO
, copyregionmax
,
98 CTLFLAG_RD
| CTLFLAG_NOAUTO
| CTLFLAG_KERN
,
101 static SYSCTL_UINT(_kern
, OID_AUTO
, lowpagemax
,
102 CTLFLAG_RD
| CTLFLAG_NOAUTO
| CTLFLAG_KERN
,
103 &vm_lopage_max_count
, 0, "");
105 static SYSCTL_UINT(_kern
, OID_AUTO
, himemorymode
,
106 CTLFLAG_RD
| CTLFLAG_NOAUTO
| CTLFLAG_KERN
,
107 &vm_himemory_mode
, 0, "");
109 bool IOCopyMapper::initHardware(IOService
* provider
)
111 UInt32 dartSizePages
= 0;
113 vm_offset_t phys_addr
;
115 ml_get_bouncepool_info(&phys_addr
, &size
);
120 fBufferPage
= atop_32(phys_addr
);
121 dartSizePages
= (atop_32(size
) + kTransPerPage
- 1) / kTransPerPage
;
123 fTableLock
= IOLockAlloc();
128 if (!allocTable(dartSizePages
* kMapperPage
))
131 UInt32 canMapPages
= dartSizePages
* kTransPerPage
;
132 fMapperRegionSize
= canMapPages
;
133 for (fNumZones
= 0; canMapPages
; fNumZones
++)
135 fNumZones
-= 3; // correct for overshoot and minumum 16K pages allocation
137 invalidateDART(0, fMapperRegionSize
);
139 breakUp(0, fNumZones
, 0);
140 ((FreeDARTEntry
*) fTable
)->fInUse
= true;
142 fMapperRegionUsed
= kMinZoneSize
;
143 fMapperRegionMaxUsed
= fMapperRegionUsed
;
145 sysctl__kern_copyregionmax
.oid_arg1
= &fMapperRegionMaxUsed
;
147 sysctl_register_oid(&sysctl__kern_copyregionmax
);
148 sysctl_register_oid(&sysctl__kern_lowpagemax
);
149 sysctl_register_oid(&sysctl__kern_himemorymode
);
151 fDummyPage
= IOMallocAligned(0x1000, 0x1000);
153 pmap_find_phys(kernel_pmap
, (addr64_t
) (uintptr_t) fDummyPage
);
158 void IOCopyMapper::free()
161 IOFreeAligned(fDummyPage
, 0x1000);
163 fDummyPageNumber
= 0;
167 IOLockFree(fTableLock
);
174 // Must be called while locked
175 void IOCopyMapper::breakUp(unsigned startIndex
, unsigned endIndex
, unsigned freeInd
)
177 unsigned int zoneSize
;
178 FreeDARTEntry
*freeDART
= (FreeDARTEntry
*) fTable
;
181 // Need to break up bigger blocks of memory till we get one in our
184 zoneSize
= (kMinZoneSize
/2 << endIndex
);
185 ppnum_t tail
= freeInd
+ zoneSize
;
187 DEBG("breakup z %d start %x tail %x\n", endIndex
, freeInd
, tail
);
189 // By definition free lists must be empty
190 fFreeLists
[endIndex
] = tail
;
191 freeDART
[tail
].fSize
= endIndex
;
192 freeDART
[tail
].fNext
= freeDART
[tail
].fPrev
= 0;
193 } while (endIndex
!= startIndex
);
194 freeDART
[freeInd
].fSize
= endIndex
;
197 // Zero is never a valid page to return
198 ppnum_t
IOCopyMapper::iovmAlloc(IOItemCount pages
)
200 unsigned int zone
, zoneSize
, z
, cnt
;
201 ppnum_t next
, ret
= 0;
202 FreeDARTEntry
*freeDART
= (FreeDARTEntry
*) fTable
;
204 // Can't alloc anything of less than minumum
205 if (pages
< kMinZoneSize
)
206 pages
= kMinZoneSize
;
208 // Can't alloc anything bigger than 1/2 table
209 if (pages
>= fMapperRegionSize
/2)
211 panic("iovmAlloc 0x%x", pages
);
215 // Find the appropriate zone for this allocation
216 for (zone
= 0, zoneSize
= kMinZoneSize
; pages
> zoneSize
; zone
++)
220 IOLockLock(fTableLock
);
223 for (z
= zone
; z
< fNumZones
; z
++) {
224 if ( (ret
= fFreeLists
[z
]) )
231 IOLockSleep(fTableLock
, fFreeLists
, THREAD_UNINT
);
235 // If we didn't find a entry in our size then break up the free block
239 DEBG("breakup %d, %d, 0x%x\n", zone
, z
, ret
);
240 breakUp(zone
, z
, ret
);
243 freeDART
[ret
].fInUse
= true; // Mark entry as In Use
244 next
= freeDART
[ret
].fNext
;
245 DEBG("va: 0x%x, %d, ret %x next %x\n", (ret
* kActivePerFree
) + fBufferPage
, pages
, ret
, next
);
247 fFreeLists
[z
] = next
;
249 freeDART
[next
].fPrev
= 0;
251 // ret is free list offset not page offset;
252 ret
*= kActivePerFree
;
254 ActiveDARTEntry pageEntry
= ACTIVEDARTENTRY(fDummyPageNumber
);
255 for (cnt
= 0; cnt
< pages
; cnt
++) {
256 ActiveDARTEntry
*activeDART
= &fMappings
[ret
+ cnt
];
257 *activeDART
= pageEntry
;
260 fMapperRegionUsed
+= pages
;
261 if (fMapperRegionUsed
> fMapperRegionMaxUsed
)
262 fMapperRegionMaxUsed
= fMapperRegionUsed
;
264 IOLockUnlock(fTableLock
);
274 void IOCopyMapper::invalidateDART(ppnum_t pnum
, IOItemCount size
)
276 bzero((void *) &fMappings
[pnum
], size
* sizeof(fMappings
[0]));
279 void IOCopyMapper::iovmFree(ppnum_t addr
, IOItemCount pages
)
281 unsigned int zone
, zoneSize
, z
;
282 FreeDARTEntry
*freeDART
= (FreeDARTEntry
*) fTable
;
284 if (addr
< fBufferPage
)
285 IOPanic("addr < fBufferPage");
288 // Can't free anything of less than minumum
289 if (pages
< kMinZoneSize
)
290 pages
= kMinZoneSize
;
292 // Can't free anything bigger than 1/2 table
293 if (pages
>= fMapperRegionSize
/2)
296 // Find the appropriate zone for this allocation
297 for (zone
= 0, zoneSize
= kMinZoneSize
; pages
> zoneSize
; zone
++)
300 // Grab lock that protects the dart
301 IOLockLock(fTableLock
);
303 invalidateDART(addr
, pages
);
305 addr
/= kActivePerFree
;
307 // We are freeing a block, check to see if pairs are available for
308 // coalescing. We will walk up the entire chain if we can.
309 for (z
= zone
; z
< fNumZones
; z
++) {
310 ppnum_t pair
= addr
^ (kMinZoneSize
/2 << z
); // Find pair address
311 if (freeDART
[pair
].fValid
|| freeDART
[pair
].fInUse
|| (freeDART
[pair
].fSize
!= z
))
314 // The paired alloc entry is free if we are here
315 ppnum_t next
= freeDART
[pair
].fNext
;
316 ppnum_t prev
= freeDART
[pair
].fPrev
;
318 // Remove the pair from its freeList
320 freeDART
[prev
].fNext
= next
;
322 fFreeLists
[z
] = next
;
325 freeDART
[next
].fPrev
= prev
;
327 // Sort the addr and the pair
332 DEBG("vf: 0x%x, %d, z %d, head %x, new %x\n", addr
* kActivePerFree
+ fBufferPage
, pages
, z
, fFreeLists
[z
], addr
);
334 // Add the allocation entry into it's free list and re-init it
335 freeDART
[addr
].fSize
= z
;
336 freeDART
[addr
].fNext
= fFreeLists
[z
];
338 freeDART
[fFreeLists
[z
]].fPrev
= addr
;
339 freeDART
[addr
].fPrev
= 0;
340 fFreeLists
[z
] = addr
;
342 fMapperRegionUsed
-= pages
;
345 IOLockWakeup(fTableLock
, fFreeLists
, /* oneThread */ false);
347 IOLockUnlock(fTableLock
);
350 addr64_t
IOCopyMapper::mapAddr(IOPhysicalAddress addr
)
352 if (addr
< ptoa_32(fBufferPage
))
354 return (addr64_t
) addr
; // Not mapped by us anyway
357 addr
-= ptoa_32(fBufferPage
);
358 if (addr
>= ptoa_32(fMapperRegionSize
))
360 return (addr64_t
) addr
; // Not mapped by us anyway
364 ActiveDARTEntry
*activeDART
= (ActiveDARTEntry
*) fTable
;
365 UInt offset
= addr
& PAGE_MASK
;
367 ActiveDARTEntry mappedPage
= activeDART
[atop_32(addr
)];
368 if (mappedPage
.fValid
)
370 return (ptoa_64(mappedPage
.fPPNum
) | offset
);
373 panic("%s::mapAddr(0x%08lx) not mapped for I/O\n", getName(), addr
);
378 void IOCopyMapper::iovmInsert(ppnum_t addr
, IOItemCount offset
, ppnum_t page
)
381 addr
+= offset
; // Add the offset page to the base address
383 ActiveDARTEntry
*activeDART
= &fMappings
[addr
];
384 ActiveDARTEntry entry
= ACTIVEDARTENTRY(page
);
388 void IOCopyMapper::iovmInsert(ppnum_t addr
, IOItemCount offset
,
389 ppnum_t
*pageList
, IOItemCount pageCount
)
392 addr
+= offset
; // Add the offset page to the base address
395 ActiveDARTEntry
*activeDART
= &fMappings
[addr
];
397 for (i
= 0; i
< pageCount
; i
++)
399 ActiveDARTEntry entry
= ACTIVEDARTENTRY(pageList
[i
]);
400 activeDART
[i
] = entry
;
404 void IOCopyMapper::iovmInsert(ppnum_t addr
, IOItemCount offset
,
405 upl_page_info_t
*pageList
, IOItemCount pageCount
)
408 addr
+= offset
; // Add the offset page to the base address
411 ActiveDARTEntry
*activeDART
= &fMappings
[addr
];
413 for (i
= 0; i
< pageCount
; i
++)
415 ActiveDARTEntry entry
= ACTIVEDARTENTRY(pageList
[i
].phys_addr
);
416 activeDART
[i
] = entry
;