2 * Copyright (c) 2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 // 45678901234567890123456789012345678901234567890123456789012345678901234567890
30 #include "IOCopyMapper.h"
31 #include <sys/sysctl.h>
34 #define DEBG(fmt, args...) { kprintf(fmt, ## args); }
36 #define DEBG(fmt, args...) {}
40 extern ppnum_t
pmap_find_phys(pmap_t pmap
, addr64_t va
);
41 extern void ml_get_bouncepool_info(
42 vm_offset_t
*phys_addr
,
44 extern unsigned int vm_lopage_max_count
;
45 extern unsigned int vm_himemory_mode
;
48 #define super IOMapper
50 OSDefineMetaClassAndStructors(IOCopyMapper
, IOMapper
);
52 // Remember no value can be bigger than 31 bits as the sign bit indicates
53 // that this entry is valid to the hardware and that would be bad if it wasn't
54 typedef struct FreeDARTEntry
{
57 /* bool */ fValid
: 1,
58 /* bool */ fInUse
: 1, // Allocated but not inserted yet
59 /* bool */ : 5, // Align size on nibble boundary for debugging
62 /* uint */ fNext
:18; // offset of FreeDARTEntry's
64 #elif __LITTLE_ENDIAN__
66 /* uint */ fNext
:18, // offset of FreeDARTEntry's
69 /* bool */ : 5, // Align size on nibble boundary for debugging
70 /* bool */ fInUse
: 1, // Allocated but not inserted yet
71 /* bool */ fValid
: 1;
76 /* uint */ fPrev
:18; // offset of FreeDARTEntry's
78 #elif __LITTLE_ENDIAN__
80 /* uint */ fPrev
:18, // offset of FreeDARTEntry's
85 typedef struct ActiveDARTEntry
{
88 /* bool */ fValid
: 1, // Must be set to one if valid
89 /* uint */ fPPNum
:31; // ppnum_t page of translation
90 #define ACTIVEDARTENTRY(page) { true, page }
92 #elif __LITTLE_ENDIAN__
94 /* uint */ fPPNum
:31, // ppnum_t page of translation
95 /* bool */ fValid
: 1; // Must be set to one if valid
96 #define ACTIVEDARTENTRY(page) { page, true }
101 #define kActivePerFree (sizeof(freeDART[0]) / sizeof(ActiveDARTEntry))
103 static SYSCTL_UINT(_kern
, OID_AUTO
, copyregionmax
,
104 CTLFLAG_RD
| CTLFLAG_NOAUTO
| CTLFLAG_KERN
,
107 static SYSCTL_UINT(_kern
, OID_AUTO
, lowpagemax
,
108 CTLFLAG_RD
| CTLFLAG_NOAUTO
| CTLFLAG_KERN
,
109 &vm_lopage_max_count
, 0, "");
111 static SYSCTL_UINT(_kern
, OID_AUTO
, himemorymode
,
112 CTLFLAG_RD
| CTLFLAG_NOAUTO
| CTLFLAG_KERN
,
113 &vm_himemory_mode
, 0, "");
115 bool IOCopyMapper::initHardware(IOService
* provider
)
117 UInt32 dartSizePages
= 0;
119 vm_offset_t phys_addr
;
121 ml_get_bouncepool_info(&phys_addr
, &size
);
126 fBufferPage
= atop_32(phys_addr
);
127 dartSizePages
= (atop_32(size
) + kTransPerPage
- 1) / kTransPerPage
;
129 fTableLock
= IOLockAlloc();
134 if (!allocTable(dartSizePages
* kMapperPage
))
137 UInt32 canMapPages
= dartSizePages
* kTransPerPage
;
138 fMapperRegionSize
= canMapPages
;
139 for (fNumZones
= 0; canMapPages
; fNumZones
++)
141 fNumZones
-= 3; // correct for overshoot and minumum 16K pages allocation
143 invalidateDART(0, fMapperRegionSize
);
145 breakUp(0, fNumZones
, 0);
146 ((FreeDARTEntry
*) fTable
)->fInUse
= true;
148 fMapperRegionUsed
= kMinZoneSize
;
149 fMapperRegionMaxUsed
= fMapperRegionUsed
;
151 sysctl__kern_copyregionmax
.oid_arg1
= &fMapperRegionMaxUsed
;
153 sysctl_register_oid(&sysctl__kern_copyregionmax
);
154 sysctl_register_oid(&sysctl__kern_lowpagemax
);
155 sysctl_register_oid(&sysctl__kern_himemorymode
);
157 fDummyPage
= IOMallocAligned(0x1000, 0x1000);
159 pmap_find_phys(kernel_pmap
, (addr64_t
) (uintptr_t) fDummyPage
);
164 void IOCopyMapper::free()
167 IOFreeAligned(fDummyPage
, 0x1000);
169 fDummyPageNumber
= 0;
173 IOLockFree(fTableLock
);
180 // Must be called while locked
181 void IOCopyMapper::breakUp(unsigned startIndex
, unsigned endIndex
, unsigned freeInd
)
183 unsigned int zoneSize
;
184 FreeDARTEntry
*freeDART
= (FreeDARTEntry
*) fTable
;
187 // Need to break up bigger blocks of memory till we get one in our
190 zoneSize
= (kMinZoneSize
/2 << endIndex
);
191 ppnum_t tail
= freeInd
+ zoneSize
;
193 DEBG("breakup z %d start %x tail %x\n", endIndex
, freeInd
, tail
);
195 // By definition free lists must be empty
196 fFreeLists
[endIndex
] = tail
;
197 freeDART
[tail
].fSize
= endIndex
;
198 freeDART
[tail
].fNext
= freeDART
[tail
].fPrev
= 0;
199 } while (endIndex
!= startIndex
);
200 freeDART
[freeInd
].fSize
= endIndex
;
203 // Zero is never a valid page to return
204 ppnum_t
IOCopyMapper::iovmAlloc(IOItemCount pages
)
206 unsigned int zone
, zoneSize
, z
, cnt
;
207 ppnum_t next
, ret
= 0;
208 FreeDARTEntry
*freeDART
= (FreeDARTEntry
*) fTable
;
210 // Can't alloc anything of less than minumum
211 if (pages
< kMinZoneSize
)
212 pages
= kMinZoneSize
;
214 // Can't alloc anything bigger than 1/2 table
215 if (pages
>= fMapperRegionSize
/2)
217 panic("iovmAlloc 0x%lx", pages
);
221 // Find the appropriate zone for this allocation
222 for (zone
= 0, zoneSize
= kMinZoneSize
; pages
> zoneSize
; zone
++)
226 IOLockLock(fTableLock
);
229 for (z
= zone
; z
< fNumZones
; z
++) {
230 if ( (ret
= fFreeLists
[z
]) )
237 IOLockSleep(fTableLock
, fFreeLists
, THREAD_UNINT
);
241 // If we didn't find a entry in our size then break up the free block
245 DEBG("breakup %d, %d, 0x%x\n", zone
, z
, ret
);
246 breakUp(zone
, z
, ret
);
249 freeDART
[ret
].fInUse
= true; // Mark entry as In Use
250 next
= freeDART
[ret
].fNext
;
251 DEBG("va: 0x%lx, %ld, ret %x next %x\n", (ret
* kActivePerFree
) + fBufferPage
, pages
, ret
, next
);
253 fFreeLists
[z
] = next
;
255 freeDART
[next
].fPrev
= 0;
257 // ret is free list offset not page offset;
258 ret
*= kActivePerFree
;
260 ActiveDARTEntry pageEntry
= ACTIVEDARTENTRY(fDummyPageNumber
);
261 for (cnt
= 0; cnt
< pages
; cnt
++) {
262 ActiveDARTEntry
*activeDART
= &fMappings
[ret
+ cnt
];
263 *activeDART
= pageEntry
;
266 fMapperRegionUsed
+= pages
;
267 if (fMapperRegionUsed
> fMapperRegionMaxUsed
)
268 fMapperRegionMaxUsed
= fMapperRegionUsed
;
270 IOLockUnlock(fTableLock
);
280 void IOCopyMapper::invalidateDART(ppnum_t pnum
, IOItemCount size
)
282 bzero((void *) &fMappings
[pnum
], size
* sizeof(fMappings
[0]));
285 void IOCopyMapper::iovmFree(ppnum_t addr
, IOItemCount pages
)
287 unsigned int zone
, zoneSize
, z
;
288 FreeDARTEntry
*freeDART
= (FreeDARTEntry
*) fTable
;
290 if (addr
< fBufferPage
)
291 IOPanic("addr < fBufferPage");
294 // Can't free anything of less than minumum
295 if (pages
< kMinZoneSize
)
296 pages
= kMinZoneSize
;
298 // Can't free anything bigger than 1/2 table
299 if (pages
>= fMapperRegionSize
/2)
302 // Find the appropriate zone for this allocation
303 for (zone
= 0, zoneSize
= kMinZoneSize
; pages
> zoneSize
; zone
++)
306 // Grab lock that protects the dart
307 IOLockLock(fTableLock
);
309 invalidateDART(addr
, pages
);
311 addr
/= kActivePerFree
;
313 // We are freeing a block, check to see if pairs are available for
314 // coalescing. We will walk up the entire chain if we can.
315 for (z
= zone
; z
< fNumZones
; z
++) {
316 ppnum_t pair
= addr
^ (kMinZoneSize
/2 << z
); // Find pair address
317 if (freeDART
[pair
].fValid
|| freeDART
[pair
].fInUse
|| (freeDART
[pair
].fSize
!= z
))
320 // The paired alloc entry is free if we are here
321 ppnum_t next
= freeDART
[pair
].fNext
;
322 ppnum_t prev
= freeDART
[pair
].fPrev
;
324 // Remove the pair from its freeList
326 freeDART
[prev
].fNext
= next
;
328 fFreeLists
[z
] = next
;
331 freeDART
[next
].fPrev
= prev
;
333 // Sort the addr and the pair
338 DEBG("vf: 0x%lx, %ld, z %d, head %lx, new %x\n", addr
* kActivePerFree
+ fBufferPage
, pages
, z
, fFreeLists
[z
], addr
);
340 // Add the allocation entry into it's free list and re-init it
341 freeDART
[addr
].fSize
= z
;
342 freeDART
[addr
].fNext
= fFreeLists
[z
];
344 freeDART
[fFreeLists
[z
]].fPrev
= addr
;
345 freeDART
[addr
].fPrev
= 0;
346 fFreeLists
[z
] = addr
;
348 fMapperRegionUsed
-= pages
;
351 IOLockWakeup(fTableLock
, fFreeLists
, /* oneThread */ false);
353 IOLockUnlock(fTableLock
);
356 addr64_t
IOCopyMapper::mapAddr(IOPhysicalAddress addr
)
358 if (addr
< ptoa_32(fBufferPage
))
360 return (addr64_t
) addr
; // Not mapped by us anyway
363 addr
-= ptoa_32(fBufferPage
);
364 if (addr
>= ptoa_32(fMapperRegionSize
))
366 return (addr64_t
) addr
; // Not mapped by us anyway
370 ActiveDARTEntry
*activeDART
= (ActiveDARTEntry
*) fTable
;
371 UInt offset
= addr
& PAGE_MASK
;
373 ActiveDARTEntry mappedPage
= activeDART
[atop_32(addr
)];
374 if (mappedPage
.fValid
)
376 return (ptoa_64(mappedPage
.fPPNum
) | offset
);
379 panic("%s::mapAddr(0x%08lx) not mapped for I/O\n", getName(), addr
);
384 void IOCopyMapper::iovmInsert(ppnum_t addr
, IOItemCount offset
, ppnum_t page
)
387 addr
+= offset
; // Add the offset page to the base address
389 ActiveDARTEntry
*activeDART
= &fMappings
[addr
];
390 ActiveDARTEntry entry
= ACTIVEDARTENTRY(page
);
394 void IOCopyMapper::iovmInsert(ppnum_t addr
, IOItemCount offset
,
395 ppnum_t
*pageList
, IOItemCount pageCount
)
398 addr
+= offset
; // Add the offset page to the base address
401 ActiveDARTEntry
*activeDART
= &fMappings
[addr
];
403 for (i
= 0; i
< pageCount
; i
++)
405 ActiveDARTEntry entry
= ACTIVEDARTENTRY(pageList
[i
]);
406 activeDART
[i
] = entry
;
410 void IOCopyMapper::iovmInsert(ppnum_t addr
, IOItemCount offset
,
411 upl_page_info_t
*pageList
, IOItemCount pageCount
)
414 addr
+= offset
; // Add the offset page to the base address
417 ActiveDARTEntry
*activeDART
= &fMappings
[addr
];
419 for (i
= 0; i
< pageCount
; i
++)
421 ActiveDARTEntry entry
= ACTIVEDARTENTRY(pageList
[i
].phys_addr
);
422 activeDART
[i
] = entry
;