2 * Copyright (c) 2002-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
31 * These are the subroutines that manage the skip-list data structures used for the
32 * resident mappings for each pmap. We used to use a much simpler hash-based scheme,
33 * but it didn't scale well for 64-bit address spaces and multi-GB real memories.
34 * Here's a brief tutorial on skip-lists:
36 * The basic idea is that each mapping is on one or more singly-linked lists, sorted
37 * in increasing order by virtual address. The number of lists a mapping is on is an
38 * invariant property determined when the mapping is created, using an exponentially-
39 * distributed random number. Every mapping is on the first list. Ideally, each
40 * successive list has only 1/F as many nodes on it as the previous, where F is the
41 * "fanout." With a max of n lists, up to F**n nodes can be handled optimally.
43 * Searching, adding, and deleting from a skip-list can all be done in O(ln(n)) time.
44 * Because the first skip-list is just a sorted list of all mappings, it is also
45 * efficient to purge a sparsely populated pmap of all the mappings in a large range,
46 * for example when tearing down an address space. Large-range deletes are the
47 * primary advantage of skip-lists over a hash, btw.
49 * We currently use a fanout of 4 and a maximum of 12 lists (cf kSkipListFanoutShift
50 * and kSkipListMaxLists.) Thus, we can optimally handle pmaps with as many as 4**12
51 * pages, which is 64GB of resident physical memory per pmap. Pmaps can be larger than
52 * this, albeit with diminishing efficiency.
54 * The major problem with skip-lists is that we could waste a lot of space with 12
55 * 64-bit link fields in every mapping. So we currently have two sizes of mappings:
56 * 64-byte nodes with 4 list links, and 128-byte nodes with 12. Only one in every
57 * (4**4)==256 mappings requires the larger node, so the average size is 64.25 bytes.
58 * In practice, the additional complexity of the variable node size is entirely
59 * contained in the allocate and free routines.
61 * The other, mostly theoretic problem with skip-lists is that they have worst cases
62 * where performance becomes nearly linear. These worst-cases are quite rare but there
63 * is no practical way to prevent them.
67 ; set nonzero to accumulate skip-list stats on a per-map basis:
68 #define SKIPLISTSTATS 1
70 ; cr7 bit set when mapSearchFull() finds a match on a high list:
76 #include <ppc/proc_reg.h>
77 #include <ppc/exception.h>
81 * *********************
82 * * m a p S e a r c h *
83 * *********************
85 * Given a pmap and a virtual address (VA), find the mapping for that address.
86 * This is the fast call, that does not set up the previous-ptr vector or make
87 * consistency checks. When called:
88 * the pmap is locked (shared or exclusive)
89 * translation is off, interrupts masked
90 * 64-bit mode is enabled (if on a 64-bit machine)
91 * cr6 is loaded with the corresponding feature flags (in particular, pf64Bit)
93 * r4 = high 32 bits of key to search for (0 if a 32-bit processor)
94 * r5 = low 32 bits of key (low 12 bits may be nonzero garbage)
95 * r7 = mpFlags field if found. Undefined if not
97 * We return the mapping ptr (or 0) in r3, and the next VA (or 0 if no more) in r4 and r5.
98 * Except for cr6 (which is global), we trash nonvolatile regs. Called both on 32- and 64-bit
99 * machines, though we quickly branch into parallel code paths.
103 .globl EXT(mapSearch)
105 lbz r7,pmapCurLists(r3) ; get largest #lists any mapping is on
106 la r8,pmapSkipLists+4(r3) ; point to lists in pmap, assuming 32-bit machine
107 rlwinm r5,r5,0,0,19 ; zero low 12 bits of key
108 mr r6,r3 ; save pmap ptr here so we can accumulate statistics
109 li r9,0 ; initialize prev ptr
110 addic. r7,r7,-1 ; get base-0 number of last list, and test for 0
111 li r2,0 ; initialize count of mappings visited
112 slwi r7,r7,3 ; get offset of last list in use
113 blt-- mapSrchPmapEmpty ; pmapCurLists==0 (ie, no mappings)
114 lwzx r3,r8,r7 ; get 32-bit ptr to 1st mapping in highest list
115 bf-- pf64Bitb,mapSrch32c ; skip if 32-bit processor
116 subi r8,r8,4 ; we use all 64 bits of ptrs
117 rldimi r5,r4,32,0 ; r5 <- 64-bit va
118 ldx r3,r8,r7 ; get 64-bit ptr to 1st mapping in highest list
119 b mapSrch64c ; enter 64-bit search loop
122 ; 64-bit processors. Check next mapping.
123 ; r2 = count of mappings visited so far
124 ; r3 = current mapping ptr
125 ; r4 = va of current mapping (ie, of r3)
126 ; r5 = va to search for (the "key") (low 12 bits are 0)
128 ; r7 = current skip list number * 8
129 ; r8 = ptr to skip list vector of mapping pointed to by r9 (or pmap, if r9==0)
130 ; r9 = prev ptr, or 0 if none
133 mapSrch64a: ; loop over each mapping
134 ld r4,mpVAddr(r3) ; get va for this mapping (plus flags in low 12 bits)
135 addi r2,r2,1 ; count mappings visited
136 rldicr r4,r4,0,51 ; zero low 12 bits of mapping va
137 cmpld cr1,r5,r4 ; compare the vas
138 blt cr1,mapSrch64d ; key is less, try next list
139 la r8,mpList0(r3) ; point to skip list vector in this mapping
140 mr r9,r3 ; remember prev ptr
141 beq-- cr1,mapSrch64Found ; this is the correct mapping
142 ldx r3,r7,r8 ; get ptr to next mapping in current list
144 mr. r3,r3 ; was there another mapping on current list?
145 bne++ mapSrch64a ; was another, so loop
147 subic. r7,r7,8 ; move on to next list offset
148 ldx r3,r7,r8 ; get next mapping on next list (if any)
149 bge++ mapSrch64c ; loop to try next list
151 ; Mapping not found, check to see if prev node was a block mapping or nested pmap.
152 ; If not, or if our address is not covered by the block or nested map, return 0.
153 ; Note the advantage of keeping the check for block mappings (and nested pmaps)
154 ; out of the inner loop; we do the special case work at most once per search, and
155 ; never for the most-common case of finding a scalar mapping. The full searches
156 ; must check _in_ the inner loop, to get the prev ptrs right.
158 mr. r9,r9 ; was there a prev ptr?
159 li r3,0 ; assume we are going to return null
160 ld r4,pmapSkipLists(r6) ; assume prev ptr null... so next is first
161 beq-- mapSrch64Exit ; prev ptr was null, search failed
162 lwz r0,mpFlags(r9) ; get flag bits from prev mapping
163 lhz r11,mpBSize(r9) ; get #pages/#segments in block/submap mapping
165 rlwinm r0,r0,mpBSub+1,31,31 ; 0 if 4K bsu or 1 if 32MB bsu
166 ld r10,mpVAddr(r9) ; re-fetch base address of prev ptr
167 ori r0,r0,0x3216 ; OR in 0x00003216 (0x3200 and a base rotate of 22)
168 addi r11,r11,1 ; Convert 0-based to 1-based
169 rlwnm r0,r0,r0,27,31 ; Rotate to get 12 or 25
170 ld r4,mpList0(r9) ; get 64-bit ptr to next mapping, if any
171 sld r11,r11,r0 ; Get the length in bytes
172 rldicr r10,r10,0,51 ; zero low 12 bits of mapping va
173 subi r0,r11,4096 ; get offset last page in mapping
174 add r10,r10,r0 ; r10 <- last page in this mapping
175 cmpld r5,r10 ; does this mapping cover our page?
176 bgt mapSrch64Exit ; no, search failed
177 mr r3,r9 ; yes, we found it
180 ; r2 = count of nodes visited
184 mapSrch64Found: ; WARNING: can drop down to here
185 ld r4,mpList0(r3) ; get ptr to next mapping
186 lwz r7,mpFlags(r3) ; Get the flags for our caller
188 ; r2 = count of nodes visited
189 ; r3 = return value (ie, found mapping or 0)
190 ; r4 = next mapping (or 0 if none)
194 mapSrch64Exit: ; WARNING: can drop down to here
195 mr. r5,r4 ; next ptr null?
197 lwz r10,pmapSearchCnt(r6) ; prepare to accumulate statistics
198 ld r8,pmapSearchVisits(r6)
199 addi r10,r10,1 ; count searches
200 add r8,r8,r2 ; count nodes visited
201 stw r10,pmapSearchCnt(r6)
202 std r8,pmapSearchVisits(r6)
204 beqlr- ; next ptr was null, so return 0 in r4 and r5
205 lwz r5,mpVAddr+4(r4) ; get VA of next node
210 ; 32-bit processors. Check next mapping.
211 ; r2 = count of mappings visited so far
212 ; r3 = current mapping ptr
213 ; r4 = va of current mapping (ie, of r3)
214 ; r5 = va to search for (the "key") (low 12 bits are 0)
216 ; r7 = current skip list number * 8
217 ; r8 = ptr to skip list vector of mapping pointed to by r9 (or pmap, if r9==0)
218 ; r9 = prev ptr, or 0 if none
221 mapSrch32a: ; loop over each mapping
222 lwz r4,mpVAddr+4(r3) ; get va for this mapping (plus flags in low 12 bits)
223 addi r2,r2,1 ; count mappings visited
224 rlwinm r4,r4,0,0,19 ; zero low 12 bits of mapping va
225 cmplw cr1,r5,r4 ; compare the vas
226 blt cr1,mapSrch32d ; key is less, try next list
227 la r8,mpList0+4(r3) ; point to skip list vector in this mapping
228 mr r9,r3 ; remember prev ptr
229 beq- cr1,mapSrch32Found ; this is the correct mapping
230 lwzx r3,r7,r8 ; get ptr to next mapping in current list
232 mr. r3,r3 ; was there another mapping on current list?
233 bne+ mapSrch32a ; was another, so loop
235 subic. r7,r7,8 ; move on to next list offset
236 lwzx r3,r7,r8 ; get next mapping on next list (if any)
237 bge+ mapSrch32c ; loop to try next list
239 ; Mapping not found, check to see if prev node was a block mapping or nested pmap.
240 ; If not, or if our address is not covered by the block or nested map, return 0.
241 ; Note the advantage of keeping the check for block mappings (and nested pmaps)
242 ; out of the inner loop; we do the special case work at most once per search, and
243 ; never for the most-common case of finding a scalar mapping. The full searches
244 ; must check _in_ the inner loop, to get the prev ptrs right.
246 mr. r9,r9 ; was there a prev ptr?
247 li r3,0 ; assume we are going to return null
248 lwz r4,pmapSkipLists+4(r6) ; assume prev ptr null... so next is first
249 beq- mapSrch32Exit ; prev ptr was null, search failed
250 lwz r0,mpFlags(r9) ; get flag bits from prev mapping
251 lhz r11,mpBSize(r9) ; get #pages/#segments in block/submap mapping
252 lwz r10,mpVAddr+4(r9) ; re-fetch base address of prev ptr
254 rlwinm r0,r0,mpBSub+1,31,31 ; Rotate to get 0 if 4K bsu or 1 if 32MB bsu
255 addi r11,r11,1 ; Convert 0-based to 1-based
256 ori r0,r0,0x3216 ; OR in 0x00003216 (0x3200 and a base rotate of 22)
257 rlwnm r0,r0,r0,27,31 ; Rotate to get 12 or 25
258 lwz r4,mpList0+4(r9) ; get ptr to next mapping, if any
259 slw r11,r11,r0 ; Get length in bytes
260 rlwinm r10,r10,0,0,19 ; zero low 12 bits of block mapping va
261 subi r0,r11,4096 ; get address of last page in submap
262 add r10,r10,r0 ; r10 <- last page in this mapping
263 cmplw r5,r10 ; does this mapping cover our page?
264 bgt mapSrch32Exit ; no, search failed
265 mr r3,r9 ; yes, we found it
268 ; r2 = count of nodes visited
272 mapSrch32Found: ; WARNING: can drop down to here
273 lwz r4,mpList0+4(r3) ; get ptr to next mapping
274 lwz r7,mpFlags(r3) ; Get mpFlags for our caller
275 ; r2 = count of nodes visited
276 ; r3 = return value (ie, found mapping or 0)
277 ; r4 = next mapping (or 0 if none)
282 mr. r5,r4 ; next ptr null?
284 lwz r10,pmapSearchCnt(r6) ; prepare to accumulate statistics
285 lwz r8,pmapSearchVisits(r6)
286 lwz r9,pmapSearchVisits+4(r6)
287 addi r10,r10,1 ; count searches
288 addc r9,r9,r2 ; count nodes visited
290 stw r10,pmapSearchCnt(r6)
291 stw r8,pmapSearchVisits(r6)
292 stw r9,pmapSearchVisits+4(r6)
294 beqlr- ; next ptr was null, so return 0 in r4 and r5
295 lwz r5,mpVAddr+4(r4) ; get VA of next node
299 ; Here when the pmap is empty (ie, pmapCurLists==0), both in 32 and 64-bit mode,
300 ; and from both mapSearch and mapSearchFull.
304 li r3,0 ; return null
305 li r4,0 ; return 0 as virtual address of next node
308 lwz r7,pmapSearchCnt(r6) ; prepare to accumulate statistics
309 addi r7,r7,1 ; count searches
310 stw r7,pmapSearchCnt(r6)
316 * *****************************
317 * * m a p S e a r c h F u l l *
318 * *****************************
320 * Given a pmap and a virtual address (VA), find the mapping for that address.
321 * This is the "full" call, that sets up a vector of ptrs to the previous node
322 * (or to the pmap, if there is no previous node) for each list that the mapping
323 * in on. We also make consistency checks on the skip-lists. When called:
324 * the pmap is locked (shared or exclusive)
325 * translation is off, interrupts masked
326 * 64-bit mode is enabled (if on a 64-bit machine)
327 * cr6 is loaded with the corresponding feature flags (in particular, pf64Bit)
329 * r4 = high 32 bits of key to search for (0 if a 32-bit processor)
330 * r5 = low 32 bits of key (low 12 bits may be nonzero garbage)
332 * We return the mapping ptr (or 0) in r3, and the next VA (or 0 if no more) in r4 and r5.
333 * Except for cr6 (which is global), we trash nonvolatile regs. Called both on 32- and 64-bit
334 * machines, though we quickly branch into parallel code paths.
338 .globl EXT(mapSearchFull)
340 lbz r7,pmapCurLists(r3) ; get largest #lists any mapping is on
341 la r8,pmapSkipLists+4(r3) ; point to lists in pmap, assuming 32-bit machine
342 rlwinm r5,r5,0,0,19 ; zero low 12 bits of key
343 mr r6,r3 ; save pmap ptr here so we can accumulate statistics
344 li r2,0 ; initialize count of mappings visited
345 mfsprg r12,0 ; get the per-proc data ptr
346 crclr bFullFound ; we have not found the mapping yet
347 addic. r7,r7,-1 ; get base-0 number of last list, and test for 0
348 subi r9,r8,mpList0+4 ; initialize prev ptr to be a fake mapping
349 slwi r7,r7,3 ; get (offset*8) of last list
350 la r12,skipListPrev+4(r12) ; point to vector of prev ptrs, assuming 32-bit machine
351 blt-- mapSrchPmapEmpty ; pmapCurLists==0 (ie, no mappings)
352 lwzx r3,r8,r7 ; get 32-bit ptr to 1st mapping in highest list
353 li r10,0 ; initialize prev ptrs VA to 0 too
354 bf-- pf64Bitb,mapSrchFull32c ; skip if 32-bit processor
355 subi r8,r8,4 ; we use all 64 bits of ptrs
357 rldimi r5,r4,32,0 ; r5 <- 64-bit va
358 ldx r3,r8,r7 ; get 64-bit ptr to 1st mapping in highest list
359 b mapSrchFull64c ; enter 64-bit search loop
362 ; 64-bit processors. Check next mapping.
363 ; r2 = count of mappings visited so far
364 ; r3 = current mapping ptr
365 ; r4 = va of current mapping (ie, of r3)
366 ; r5 = va to search for (the "key") (low 12 bits are 0)
368 ; r7 = current skip list number * 8
369 ; r8 = ptr to skip list vector of mapping pointed to by r9
370 ; r9 = prev ptr, ie highest mapping that comes before search target (initially the pmap)
371 ; r10 = lowest expected next va, 0 at the beginning of the search
372 ; r12 = ptr to the skipListPrev vector in the per-proc
375 mapSrchFull64a: ; loop over each mapping
376 addi r2,r2,1 ; count mappings visited
377 lwz r0,mpFlags(r3) ; get mapping flag bits
378 lhz r11,mpBSize(r3) ; get #pages/#segments in block/submap mapping
379 ld r4,mpVAddr(r3) ; get va for this mapping (plus flags in low 12 bits)
381 rlwinm r0,r0,mpBSub+1,31,31 ; Rotate to get 0 if 4K bsu or 1 if 32MB bsu
382 addi r11,r11,1 ; Convert 0-based to 1-based
383 ori r0,r0,0x3216 ; OR in 0x00003216 (0x3200 and a base rotate of 22)
384 rlwnm r0,r0,r0,27,31 ; Rotate to get 12 or 25
385 sld r11,r11,r0 ; Get the length in bytes
386 rldicr r4,r4,0,51 ; zero low 12 bits of mapping va
387 addic. r0,r11,-4096 ; get offset last page in mapping (set cr0_eq if 1 page)
389 cmpld cr5,r10,r4 ; make sure VAs come in strictly ascending order
390 cmpld cr1,r5,r4 ; compare the vas
391 bgt-- cr5,mapSkipListPanic ; die if keys are out of order
393 blt cr1,mapSrchFull64d ; key is less, try next list
394 beq cr1,mapSrchFull64Found ; this is the correct mapping
395 bne-- cr0,mapSrchFull64e ; handle mapping larger than one page
397 la r8,mpList0(r3) ; point to skip list vector in this mapping
398 mr r9,r3 ; current becomes previous
399 ldx r3,r7,r8 ; get ptr to next mapping in current list
400 addi r10,r4,0x1000 ; Get the lowest VA we can get next
402 mr. r3,r3 ; was there another mapping on current list?
403 bne++ mapSrchFull64a ; was another, so loop
405 stdx r9,r7,r12 ; save prev ptr in per-proc vector
406 subic. r7,r7,8 ; move on to next list offset
407 ldx r3,r7,r8 ; get next mapping on next list (if any)
408 bge++ mapSrchFull64c ; loop to try next list
410 ; Mapping not found, return 0 and next higher key
412 li r3,0 ; return null
413 bt-- bFullFound,mapSkipListPanic ; panic if it was on earlier list
414 ld r4,mpList0(r9) ; get 64-bit ptr to next mapping, if any
417 ; Block mapping or nested pmap, and key > base. We must compute the va of
418 ; the end of the block to see if key fits within it.
421 add r4,r4,r0 ; r4 <- last page in this mapping
422 cmpld r5,r4 ; does this mapping cover our page?
423 bgt mapSrchFull64b ; no, try next mapping (r4 is advanced to end of range)
427 ; r2 = count of nodes visited
430 ; r7 = current skip list number * 8
431 ; r8 = ptr to prev mappings (ie, r9) skip-list vector
432 ; r9 = prev ptr, ie highest mapping that comes before search target
433 ; r10 = prev mappings va
434 ; r12 = ptr to the skipListPrev vector in the per-proc
436 mapSrchFull64Found: ; WARNING: can drop down to here
437 cmpwi r7,0 ; are we in the last skip-list?
438 crset bFullFound ; remember that we found the mapping
439 bne mapSrchFull64d ; mapSearchFull must search all lists to get prev ptrs
440 ld r4,mpList0(r3) ; get ptr to next mapping
441 stdx r9,r7,r12 ; save prev ptr in last list
442 lwz r7,mpFlags(r3) ; Get the flags for our caller
446 ; 32-bit processors. Check next mapping.
447 ; r2 = count of nodes visited
448 ; r3 = ptr to next mapping in current list
449 ; r5 = va to search for (the "key") (low 12 bits are 0)
451 ; r7 = current skip list number * 8
452 ; r8 = ptr to skip list vector of mapping pointed to by r9
453 ; r9 = prev ptr, ie highest mapping that comes before search target (initially the pmap)
454 ; r10 = lowest expected next va, 0 at the beginning of the search
455 ; r12 = ptr to the skipListPrev vector in the per-proc
458 mapSrchFull32a: ; loop over each mapping
459 addi r2,r2,1 ; count mappings visited
460 lwz r0,mpFlags(r3) ; get mapping flag bits
461 lhz r11,mpBSize(r3) ; get #pages/#segments in block/submap mapping
462 lwz r4,mpVAddr+4(r3) ; get va for this mapping (plus flags in low 12 bits)
464 rlwinm r0,r0,mpBSub+1,31,31 ; Rotate to get 0 if 4K bsu or 1 if 32MB bsu
465 addi r11,r11,1 ; Convert 0-based to 1-based
466 ori r0,r0,0x3216 ; OR in 0x00003216 (0x3200 and a base rotate of 22)
467 rlwnm r0,r0,r0,27,31 ; Rotate to get 12 or 25
468 slw r11,r11,r0 ; Get the length in bytes
469 rlwinm r4,r4,0,0,19 ; zero low 12 bits of mapping va
470 addic. r0,r11,-4096 ; get offset last page in mapping (set cr0_eq if 1 page)
472 cmplw cr0,r10,r4 ; make sure VAs come in strictly ascending order
473 cmplw cr1,r5,r4 ; compare the vas
474 bgt- cr0,mapSkipListPanic ; die if keys are out of order
476 blt cr1,mapSrchFull32d ; key is less than this va, try next list
477 beq cr1,mapSrchFull32Found ; this is the correct mapping
478 bne- cr0,mapSrchFull32e ; handle mapping larger than one page
480 la r8,mpList0+4(r3) ; point to skip list vector in this mapping
481 mr r9,r3 ; current becomes previous
482 lwzx r3,r7,r8 ; get ptr to next mapping in current list
483 addi r10,r4,0x1000 ; Get the lowest VA we can get next
485 mr. r3,r3 ; next becomes current
486 bne+ mapSrchFull32a ; was another, so loop
488 stwx r9,r7,r12 ; save prev ptr in per-proc vector
489 subic. r7,r7,8 ; move on to next list offset
490 lwzx r3,r7,r8 ; get next mapping on lower list (if any)
491 bge+ mapSrchFull32c ; loop to try next list
493 ; mapping not found, return 0 and next-key
495 li r3,0 ; return null
496 bt- bFullFound,mapSkipListPanic ; panic if it was on an earlier list
497 lwz r4,mpList0+4(r9) ; get ptr to next mapping
500 ; Block mapping or nested pmap, and key > base. We must compute the va of
501 ; the end of the block to see if our key fits within it.
504 add r4,r4,r0 ; r4 <- last page in this mapping
505 cmplw r5,r4 ; does this mapping cover our page?
506 bgt mapSrchFull32b ; no, try next mapping
510 ; r2 = count of nodes visited
513 ; r7 = current skip list number * 8
514 ; r9 = prev ptr, ie highest mapping that comes before search target, or 0
515 ; r10 = prev mappings va
516 ; r12 = ptr to the skipListPrev vector in the per-proc
518 mapSrchFull32Found: ; WARNING: can drop down to here
519 cmpwi r7,0 ; are we in the last skip-list?
520 crset bFullFound ; remember that we found the mapping
521 bne mapSrchFull32d ; mapSearchFull must search all lists to get prev ptrs
522 lwz r4,mpList0+4(r3) ; get ptr to next mapping
523 stwx r9,r7,r12 ; save prev ptr in last list
524 lwz r7,mpFlags(r3) ; Get mpFlags for our caller
529 * *********************
530 * * m a p I n s e r t *
531 * *********************
533 * Insert a mapping into pmap skip-lists. The caller has already called mapSearchFull to
534 * determine that this mapping does not overlap other mappings in the pmap. As a side effect
535 * of calling mapSearchFull, the per-proc skipListPrev array is set up with a vector of the
536 * previous ptrs for each skip list. When called:
537 * the pmap is locked (exclusive)
538 * translation is off, interrupts masked
539 * 64-bit mode is enabled (if on a 64-bit machine)
540 * mapSearchFull has just been called for this mappings key
541 * cr6 is loaded with the corresponding feature flags (in particular, pf64Bit)
545 * There is no return value. Except for cr6 (which is global), we trash nonvolatile regs.
549 .globl EXT(mapInsert)
551 lwz r8,mpFlags(r4) ; get this mappings flags
552 lbz r7,pmapCurLists(r3) ; get current max# lists any mapping is on
553 la r10,pmapSkipLists+4(r3) ; r10 <-- base of pmap list headers, assuming 32-bit machine
554 la r11,mpList0+4(r4) ; r11 <-- base of this mappings list vector
555 mfsprg r12,0 ; get ptr to our per-proc
556 andi. r9,r8,mpLists ; get #lists this mapping is on (1<=n<=27)
557 la r12,skipListPrev+4(r12) ; r12 <-- base of prev ptr vector
558 sub. r6,r9,r7 ; is this mapping on more lists than any other?
559 slwi r8,r9,3 ; get #lists * 8
560 subi r8,r8,8 ; get offset to topmost (last) list in use
561 bf-- pf64Bitb,mapIns32 ; handle 32-bit processor
562 subi r10,r10,4 ; we use all 8 bytes of the ptr fields
565 ble++ mapIns64a ; not new max #lists
567 ; 64-bit processor: We must increase pmapCurLists. Since mapSearchFull() only
568 ; sets up the first pmapCurLists prev ptrs, we must initialize the new ones to
569 ; point to the pmap. While we are at it, we verify that the unused list hdrs in
572 cmpwi r9,kSkipListMaxLists ; in range?
573 stb r9,pmapCurLists(r3) ; remember new max
574 mtctr r6 ; set up count of new lists
575 mr r5,r8 ; copy offset to last list
576 subi r0,r10,mpList0 ; r0 <-- fake mapping ptr (to pmap) for null prev ptrs
577 bgt-- mapSkipListPanic ; choke if this mapping is on too many lists
579 ldx r6,r5,r10 ; get pmap list head
580 stdx r0,r5,r12 ; initialize prev ptr
581 subi r5,r5,8 ; get next list offset
582 cmpdi r6,0 ; was list hdr null?
583 bdnzt cr0_eq,mapIns64NewList ; loop if more lists to initialize and list hdr was 0
584 bne-- mapSkipListPanic ; die if pmap list hdr was not null
587 ; 64-bit processor: loop over each list this mapping is on
589 ; r8 = next list offset
590 ; r10 = ptr to base of pmap list header vector
591 ; r11 = ptr to base of new mappings list vector
592 ; r12 = ptr to base of prev ptr vector in per-proc
596 ldx r5,r8,r12 ; get prev ptr from per-proc vector
597 cmpwi cr1,r8,0 ; more to go?
598 la r7,mpList0(r5) ; get base of prev mappings list vector
600 stdx r4,r8,r7 ; * insert new mapping in middle of this list
602 subi r8,r8,8 ; get next list offset
603 bne++ cr1,mapIns64a ; more lists to go
606 ; Handle 32-bit processor. First, increase pmapCurLists if necessary; cr0 is bgt
607 ; iff the new mapping has more lists. Since mapSearchFull() only sets up the first
608 ; pmapCurLists prev ptrs, we must initialize any new ones to point to the pmap.
609 ; While we are at it, we verify that the unused list hdrs in the pmap are 0.
612 ble+ mapIns32a ; skip if new mapping does not use extra lists
613 cmpwi r9,kSkipListMaxLists ; in range?
614 stb r9,pmapCurLists(r3) ; remember new max
615 mtctr r6 ; set up count of new lists
616 mr r5,r8 ; copy offset to last list
617 subi r0,r10,mpList0+4 ; r0 <-- fake mapping ptr (to pmap) for null prev ptrs
618 bgt- mapSkipListPanic ; choke if this mapping is on too many lists
620 lwzx r6,r5,r10 ; get pmap list head
621 stwx r0,r5,r12 ; initialize prev ptr
622 subi r5,r5,8 ; get next list offset
623 cmpwi r6,0 ; was list hdr null?
624 bdnzt cr0_eq,mapIns32NewList ; loop if more lists to initialize and list hdr was 0
625 bne- mapSkipListPanic ; die if pmap list hdr was not null
628 ; 32-bit processor: loop over each list this mapping is on
630 ; r8 = next list offset
631 ; r10 = ptr to base of pmap list header vector
632 ; r11 = ptr to base of new mappings list vector
633 ; r12 = ptr to base of prev ptr vector
637 lwzx r5,r8,r12 ; get prev ptr from per-proc vector
638 cmpwi cr1,r8,0 ; more to go?
639 la r7,mpList0+4(r5) ; get base of prev mappings list vector
641 stwx r4,r8,r7 ; * insert new mapping in middle of this list
643 subi r8,r8,8 ; get next list offset
644 bne+ cr1,mapIns32a ; more lists to go
649 * *********************
650 * * m a p R e m o v e *
651 * *********************
653 * Remove a mapping from pmap skip-lists. The caller has already called mapSearchFull to
654 * find the mapping, which sets up the skipListPrev array with a vector of the previous
655 * ptrs for each skip list. When called:
656 * the pmap is locked (exclusive)
657 * translation is off, interrupts masked
658 * 64-bit mode is enabled (if on a 64-bit machine)
659 * mapSearchFull has just been called for this mappings key
660 * cr6 is loaded with the corresponding feature flags (in particular, pf64Bit)
664 * There is no return value. Except for cr6 (which is global), we trash nonvolatile regs.
668 .globl EXT(mapRemove)
670 lwz r8,mpFlags(r4) ; get this mappings flags
671 lbz r10,pmapCurLists(r3) ; get current #lists in use
672 la r11,mpList0+4(r4) ; r11 <-- base of this mappings list vector
673 mfsprg r12,0 ; get ptr to our per-proc
674 andi. r9,r8,mpLists ; get #lists this mapping is on (1<=n<=27)
675 slwi r8,r9,3 ; get #lists * 8
676 cmpw cr5,r9,r10 ; compare mpLists to pmapCurLists
677 la r12,skipListPrev+4(r12) ; r12 <-- base of prev ptr vector
678 bgt-- cr5,mapSkipListPanic ; die if mpLists > pmapCurLists
679 subi r8,r8,8 ; get offset to topmast (last) list this mapping is in
680 bf-- pf64Bitb,mapRem32a ; skip if 32-bit processor
681 subi r11,r11,4 ; we use all 64 bits of list links on 64-bit machines
685 ; 64-bit processor: loop over each list this mapping is on
688 ; r8 = offset to next list
690 ; r11 = ptr to base of mapping list vector
691 ; r12 = ptr to base of prev ptr vector in per-proc
692 ; cr5 = beq if (mpLists == pmapCurLists)
696 ldx r5,r8,r12 ; get prev ptr from per-proc vector
697 ldx r9,r8,r11 ; get next ptr from mapping
698 cmpwi cr1,r8,0 ; more to go?
699 la r7,mpList0(r5) ; get base of prev mappings list vector
700 stdx r9,r8,r7 ; point to next from prev
701 subi r8,r8,8 ; get next list offset
702 bne++ cr1,mapRem64a ; loop if another list to unlink from
704 ; Did we reduce #lists in use by removing last mapping in last list?
706 bnelr++ cr5 ; if (mpLists!=pmapCurLists) cannot have removed last map
707 la r5,pmapSkipLists(r3) ; point to vector of list hdrs
709 subic. r10,r10,1 ; get base-0 list#
710 slwi r8,r10,3 ; get offset to last list
711 ldx r0,r8,r5 ; get last list ptr
712 cmpdi cr1,r0,0 ; null?
713 bnelr cr1 ; not null, so we are done
714 stb r10,pmapCurLists(r3) ; was null, so decrement pmapCurLists
715 bgt mapRem64b ; loop to see if more than one list was emptied
719 ; 32-bit processor: loop over each list this mapping is on
722 ; r8 = offset to next list
724 ; r11 = ptr to base of mapping list vector
725 ; r12 = ptr to base of prev ptr vector in per-proc
726 ; cr5 = beq if (mpLists == pmapCurLists)
730 lwzx r5,r8,r12 ; get prev ptr from per-proc vector
731 lwzx r9,r8,r11 ; get next ptr from mapping
732 cmpwi cr1,r8,0 ; more to go?
733 la r7,mpList0+4(r5) ; get base of prev mappings list vector
734 stwx r9,r8,r7 ; point to next from prev
735 subi r8,r8,8 ; get next list offset
736 bne+ cr1,mapRem32a ; loop if another list to unlink from
738 ; Did we reduce #lists in use by removing last mapping in last list?
740 bnelr+ cr5 ; if (mpLists!=pmapCurLists) cannot have removed last map
741 la r5,pmapSkipLists+4(r3) ; point to vector of list hdrs
743 subic. r10,r10,1 ; get base-0 list#
744 slwi r8,r10,3 ; get offset to last list
745 lwzx r0,r8,r5 ; get last list ptr
746 cmpwi cr1,r0,0 ; null?
747 bnelr cr1 ; not null, so we are done
748 stb r10,pmapCurLists(r3) ; was null, so decrement pmapCurLists
749 bgt mapRem32b ; loop to see if more than one list was emptied
754 * *************************
755 * * m a p S e t L i s t s *
756 * *************************
758 * Called to decide how many skip-lists the next mapping will be on. For each pmap,
759 * we maintain a psuedo-random sequence based on a linear feedback shift register. The
760 * next number is generated by rotating the old value left by 1 and XORing with a
761 * polynomial (actually 4 8-bit polynomials concatanated) and adding 1.
762 * The simple (unclamped) number of lists a mapping is on is the number of trailing 0s
763 * in the pseudo-random sequence, shifted by the (log2-1) of the fanout F, plus one.
764 * This seems to give us a near perfect distribution, in the sense that about F times more nodes
765 * are allocated on n lists, as are on (n+1) lists.
767 * At one point we used a simple counter to assign lists. While this gave perfect
768 * distribution, there were certain access pattern that would drive a worst case
769 * distribution (e.g., insert low, then high, then low, etc.). Unfortunately,
770 * these patterns were not too uncommon. We changed to a less-than-perfect assignment,
771 * but one that works consistently across all known access patterns.
773 * Also, we modify the "simple" trailing-0-based list count, to account for an important
774 * observation: because VM does a lot of removing and restoring of mappings in the process of
775 * doing copy-on-write etc, it is common to have the pmap's "random number" (ie, the
776 * count of created mappings) be much larger than the number of mappings currently in the
777 * pmap. This means the simple list count will often be larger than justified by the number of
778 * mappings in the pmap. To avoid this common situation, we clamp the list count to be no more
779 * than ceil(logBaseF(pmapResidentCnt)).
781 * Finally, we also clamp the list count to kSkipListMaxLists.
783 * We are passed the pmap ptr in r3. Called with translation on, interrupts enabled,
784 * and in 32-bit mode.
787 .globl EXT(mapSetLists)
789 lwz r5,pmapRandNum(r3) ; get the per-pmap counter of mapping creates
790 lwz r4,pmapResidentCnt(r3) ; get number of mappings in this pmap
791 lis r11,hi16(0xA7CBF5B9) ; Get polynomial (I just made this up...)
792 li r0,-1 ; get a mask of 1s
793 ori r11,r11,lo16(0xA7CBF5B9) ; Get polynomial (I just made this up...)
794 rlwinm r5,r5,1,0,31 ; Rotate
795 cntlzw r7,r4 ; get magnitude of pmapResidentCnt
796 xor r5,r5,r11 ; Munge with poly
797 srw r7,r0,r7 ; r7 <- mask for magnitude of pmapResidentCnt
798 addi r6,r5,1 ; increment pmapRandNum non-atomically
799 andc r8,r5,r6 ; get a mask for trailing zeroes in pmapRandNum
800 stw r6,pmapRandNum(r3) ; update "random number"
801 and r8,r8,r7 ; clamp trailing 0s to magnitude of pmapResidentCnt
802 rlwinm r8,r8,0,32-(kSkipListMaxLists*(kSkipListFanoutShift+1))+1,31 ; clamp to kSkipListMaxLists
803 cntlzw r9,r8 ; count leading 0s in the mask
804 subfic r10,r9,32 ; r10 <- trailing zero count
805 srwi r11,r10,kSkipListFanoutShift ; shift by 1 if fanout is 4, 2 if 8, etc
806 addi r3,r11,1 ; every mapping is on at least one list
811 * *************************************
812 * * m a p S k i p L i s t V e r i f y *
813 * *************************************
815 * This does a fairly thorough sweep through a pmaps skip-list data structure, doing
816 * consistency checks. It is typically called (from hw_exceptions.s) from debug or
817 * instrumented builds. It is probably not a good idea to call this in production builds,
818 * as it must run with exceptions disabled and can take a long time to verify a big pmap.
819 * It runs in O(n*ln(n)).
821 * Called on a bl, with the pmap ptr in r20. We assume the pmap is locked (shared) and
822 * that EE and DR are off. We check all 64 bits of ptrs even on 32-bit machines.
823 * We use r20-r31, cr0, cr1, and cr7. If we return, no inconsistencies were found.
825 * You will notice we make little attempt to schedule the code; clarity is deemed more
826 * important than speed.
831 * mapSkipListVerifyC is a version that is callable from C.
832 * This should be called only from the debugger, IT DOES NOT LOCK THE PMAP!!!!
835 .globl EXT(mapSkipListVerifyC)
836 LEXT(mapSkipListVerifyC)
838 stwu r1,-(FM_ALIGN((31-13+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
839 mflr r0 ; Save the link register
840 stmw r13,FM_ARG0(r1) ; Save all registers
841 stw r0,(FM_ALIGN((31-13+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
843 lwz r15,pmapvr(r3) ; Get the V to R translation
844 lwz r16,pmapvr+4(r3) ; Get the V to R translation
845 mr r19,r4 ; Save register dump area
847 bl EXT(mapSetUp) ; Get set up
850 xor r20,r3,r16 ; Translate 32-bit portion
851 bf-- pf64Bitb,mslvc32a ; Skip if 32-bit...
853 rldimi r20,r15,32,0 ; Shift the fixed upper part of the physical over and cram in top
855 mslvc32a: lis r18,hi16(EXT(DebugWork))
856 ori r18,r18,lo16(EXT(DebugWork))
858 stw r0,4(r18) ; Make sure the test knows to run
860 bl EXT(mapSkipListVerify) ; Run the test
863 stw r0,4(r18) ; Remove explicit call flag
865 bt++ pf64Bitb,mslvc64a ; This is 64-bit...
867 mtmsr r17 ; Restore enables/translation/etc.
936 b mslvcreturn ; Join common...
938 mslvc64a: mtmsrd r17 ; Restore enables/translation/etc.
976 lwz r0,(FM_ALIGN((31-13+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Get the return
977 lmw r13,FM_ARG0(r1) ; Get the registers
978 mtlr r0 ; Restore the return
979 lwz r1,0(r1) ; Pop the stack
983 .globl EXT(mapSkipListVerify)
984 LEXT(mapSkipListVerify)
985 mflr r31 ; save LR so we can bl to mapVerifyDie
987 ; If we have already found an inconsistency and died, don not do so again, to
990 lis r27,hi16(EXT(DebugWork))
991 ori r27,r27,lo16(EXT(DebugWork))
992 lwz r0,4(r27) ; Get the explicit entry flag
993 lwz r27,0(r27) ; Get lockout
994 cmplwi r0,0x4262 ; Should we run anyway?
995 beq-- mslvAnyway ; Yes...
996 cmpwi r27,0 ; have we already found an error?
997 bnelr-- ; yes, just return wo checking again
1000 ; Not recursive call, so initialize.
1002 mfsprg r23,2 ; get the feature flags
1003 mtcrf 0x02,r23 ; put pf64Bit where we can test it
1004 lbz r26,pmapCurLists(r20) ; get #lists that are in use
1005 lwz r21,pmapResidentCnt(r20); get #mappings in this pmap
1006 cmpwi r26,kSkipListMaxLists ; in range?
1007 bgtl-- mapVerifyDie ; pmapCurLists is too big
1009 ; To prevent infinite loops, set limit of (pmapCurLists*pmapResidentCnt) iterations.
1010 ; Since we walk each list this is the max number of mappings we could visit.
1012 li r23,0 ; initialize count
1014 subic. r26,r26,1 ; loop pmapCurLists times (but at least once)
1015 add r23,r23,r21 ; compute (pmapCurLists*pmapResidentCnt)
1016 bgt mapVer0 ; this will be a 64-bit qty on 64-bit machines
1018 li r22,kSkipListMaxLists ; initialize list#
1019 bf-- pf64Bitb,mapVer32 ; go handle a 32-bit processor
1023 ; Loop over each list, counting mappings in each. We first check whether or not
1024 ; the list is empty (ie, if the pmapSlipLists ptr is null.) All lists above
1025 ; pmapCurLists should be empty, and no list at or below pmapCurLists should be.
1027 ; r21 = decrementing counter of mappings in this pmap
1028 ; r22 = next list# (1...kSkipListMaxLists)
1029 ; r23 = decrementing counter for infinite loop check
1032 slwi r25,r22,3 ; get offset to next skiplist
1033 la r26,pmapSkipLists(r20) ; get ptr to base of skiplist vector
1035 ldx r26,r25,r26 ; get 1st mapping on this list, if any
1036 lbz r28,pmapCurLists(r20) ; get #lists in use
1037 cmpdi cr6,r26,0 ; set cr6_eq if this list is null ("null")
1038 cmpw cr7,r22,r28 ; set cr7_gt if this list is > pmapCurLists ("high")
1039 crxor cr0_eq,cr6_eq,cr7_gt ; cr0_eq <-- (null & !high) | (!null & high)
1040 beql-- mapVerifyDie ; die if this list is null when it should not be, etc
1043 ; Loop over each node in the list.
1045 ; r21 = decrementing counter of mappings in this pmap
1046 ; r22 = this list# (1...kSkipListMaxLists)
1047 ; r23 = decrementing counter for infinite loop check
1048 ; r25 = offset to this skiplist (ie, ((r22<<3)-8))
1052 lwz r29,mpFlags(r26) ; get bits for this mapping
1053 ld r28,mpVAddr(r26) ; get key
1054 subic. r23,r23,1 ; check for loops
1055 bltl-- mapVerifyDie ; we have visited > (pmapCurLists*pmapResidentCnt) nodes
1056 andi. r30,r26,mpBasicSize-1 ; test address for alignment
1057 bnel-- mapVerifyDie ; not aligned
1058 andi. r27,r29,mpLists ; get #lists this mapping is supposed to be on
1059 cmpw cr1,r27,r22 ; is it supposed to be on this list?
1060 bltl-- cr1,mapVerifyDie ; mappings mpLists is too low
1061 cmpwi r27,kSkipListMaxLists ; too big?
1062 bgtl-- mapVerifyDie ; mappings mpLists > max
1063 rldicr r28,r28,0,51 ; clear low 12 bits of va
1064 bne++ cr1,mapVer64f ; jump if this is not highest list for this node
1066 ; This is the "highest" (last) list this mapping is on.
1067 ; Do some additional checks (so we only do them once per mapping.)
1068 ; First, if a block mapping or nested pmap, compute block end.
1070 lhz r27,mpBSize(r26) ; get #pages or #segments
1071 rlwinm r29,r29,mpBSub+1,31,31 ; Rotate to get 0 if 4K bsu or 1 if 32MB bsu
1072 addi r27,r27,1 ; units of nested pmap are (#segs-1)
1073 ori r29,r29,0x3216 ; OR in 0x00003216 (0x3200 and a base rotate of 22)
1074 rlwnm r29,r29,r29,27,31 ; Rotate to get 12 or 25
1075 subi r21,r21,1 ; count mappings in this pmap
1076 sld r29,r27,r29 ; Get the length in bytes
1077 subi r29,r29,4096 ; get offset to last byte in nested pmap
1079 ; Here with r29 = size of block - 4k, or 0 if mapping is a scalar page.
1081 add r24,r28,r29 ; r24 <- address of last valid page in this mapping
1082 la r28,mpList0(r26) ; get base of this mappings vector
1083 lwz r27,mpFlags(r26) ; Get the number of lists
1084 andi. r27,r27,mpLists ; get #lists this mapping is on (1<=n<=27)
1085 cmplwi r27,mpBasicLists ; Into bigger mapping?
1086 li r27,mpBasicLists*8-8 ; Assume normal
1087 ble+ mapVer64c ; It is...
1088 li r27,kSkipListMaxLists*8-8 ; initialize list offset for inner loop
1090 ; Inner loop over each list link in this mappingss mpList vector.
1091 ; r24 = address of last valid page in this mapping
1092 ; r27 = offset for next list in inner loop
1093 ; r28 = base of this mappings list links
1096 cmpw cr1,r27,r25 ; higher, lower, or same?
1097 ldx r29,r27,r28 ; get link to next mapping at this level
1099 beq mapVer64d ; link null, which is always OK
1100 bgtl-- cr1,mapVerifyDie ; a mapping has a non-null list higher than its mpLists
1101 ld r30,mpVAddr(r29) ; get next mappings va
1102 rldicr r30,r30,0,51 ; zero low 12 bits
1103 cmpld r30,r24 ; compare next key with ours
1104 blel-- mapVerifyDie ; a next node has key <= to ours
1106 subic. r27,r27,8 ; move on to next list
1107 bne++ mapVer64c ; loop if more to go
1109 ; Next node on current list, or next list if current done, or return if no more lists.
1112 la r28,mpList0(r26) ; get base of this mappings vector
1113 ldx r26,r25,r28 ; get next mapping on this list
1115 mr. r26,r26 ; is there one?
1116 bne++ mapVer64a ; yes, handle
1117 subic. r22,r22,1 ; is there another list?
1118 bgt++ mapVer64 ; loop if so
1120 cmpwi r21,0 ; did we find all the mappings in the pmap?
1121 bnel-- mapVerifyDie ; no
1122 mtlr r31 ; restore return address
1127 ; Handle 32-bit machine.
1130 lwz r24,mpFlags(r20) ; Get number of lists
1131 la r30,pmapSkipLists(r20) ; first, check the pmap list hdrs
1132 andi. r24,r24,mpLists ; Clean the number of lists
1133 bl mapVerUpperWordsAre0 ; are the upper words of each list all 0?
1135 ; Loop over each list, counting mappings in each. We first check whether or not
1136 ; the list is empty. All lists above pmapCurLists should be empty, and no list
1137 ; at or below pmapCurLists should be.
1140 ; r21 = decrementing counter of mappings in this pmap
1141 ; r22 = next list# (1...kSkipListMaxLists)
1142 ; r23 = decrementing counter for infinite loop check
1145 lbz r28,pmapCurLists(r20) ; get #lists in use
1146 slwi r25,r22,3 ; get offset to next skiplist
1147 la r26,pmapSkipLists+4(r20) ; get ptr to base of skiplist vector
1149 lwzx r26,r25,r26 ; get the 1st mapping on this list, or 0
1150 cmpw cr7,r22,r28 ; set cr7_gt if this list is > pmapCurLists ("high")
1151 cmpwi cr6,r26,0 ; set cr6_eq if this list is null ("null")
1152 crxor cr0_eq,cr6_eq,cr7_gt ; cr0_eq <-- (null & !high) | (!null & high)
1153 beql- mapVerifyDie ; die if this list is null when it should not be, etc
1156 ; Loop over each node in the list.
1158 ; r21 = decrementing counter of mappings in this pmap
1159 ; r22 = this list# (1...kSkipListMaxLists)
1160 ; r23 = decrementing counter for infinite loop check
1161 ; r25 = offset to this skiplist (ie, ((r22<<3)-8))
1165 lwz r29,mpFlags(r26) ; get bits for this mapping
1166 andi. r30,r26,mpBasicSize-1 ; test address for alignment
1167 lwz r24,mpVAddr+0(r26) ; get upper word of key
1168 bnel- mapVerifyDie ; mapping address not 64-byte aligned
1169 lwz r28,mpVAddr+4(r26) ; get lower word of key
1170 subic. r23,r23,1 ; check for loops
1171 bltl- mapVerifyDie ; we have visited > (pmapCurLists*pmapResidentCnt) nodes
1172 cmpwi r24,0 ; upper word of key (ie, va) should be 0
1173 bnel- mapVerifyDie ; was not
1174 andi. r27,r29,mpLists ; get #lists this mapping is supposed to be on
1175 cmpw cr1,r27,r22 ; is it supposed to be on this list?
1176 bltl- cr1,mapVerifyDie ; mappings mpLists is too low
1177 cmpwi r27,kSkipListMaxLists ; too big?
1178 bgtl- mapVerifyDie ; mappings mpLists > max
1179 rlwinm r28,r28,0,0,19 ; clear low 12 bits of va
1180 bne+ cr1,mapVer32f ; jump if this is not highest list for this node
1182 ; This is the "highest" (last) list this mapping is on.
1183 ; Do some additional checks (so we only do them once per mapping.)
1184 ; First, make sure upper words of the mpList vector are 0.
1186 lhz r27,mpBSize(r26) ; get #blocks
1187 rlwinm r29,r29,mpBSub+1,31,31 ; Rotate to get 0 if 4K bsu or 1 if 32MB bsu
1188 addi r27,r27,1 ; units of nested pmap are (#segs-1)
1189 ori r29,r29,0x3216 ; OR in 0x00003216 (0x3200 and a base rotate of 22)
1190 rlwnm r29,r29,r29,27,31 ; Rotate to get 12 or 25
1191 subi r21,r21,1 ; count mappings in this pmap
1192 slw r29,r27,r29 ; Get the length in bytes
1193 subi r29,r29,4096 ; get offset to last byte in nested pmap
1195 lwz r24,mpFlags(r26) ; Get number of lists
1196 la r30,mpList0(r26) ; point to base of skiplist vector
1197 andi. r24,r24,mpLists ; Clean the number of lists
1198 bl mapVerUpperWordsAre0 ; make sure upper words are all 0 (uses r24 and r27)
1200 ; Here with r29 = size of block - 4k, or 0 if mapping is a scalar page.
1202 add r24,r28,r29 ; r24 <- address of last valid page in this mapping
1203 la r28,mpList0+4(r26) ; get base of this mappings vector
1204 lwz r27,mpFlags(r26) ; Get the number of lists
1205 andi. r27,r27,mpLists ; get #lists this mapping is on (1<=n<=27)
1206 cmplwi r27,mpBasicLists ; Into bigger mapping?
1207 li r27,mpBasicLists*8-8 ; Assume normal
1208 ble+ mapVer32c ; It is...
1209 li r27,kSkipListMaxLists*8-8 ; initialize list offset for inner loop
1211 ; Inner loop over each list in this mappings mpList vector.
1212 ; r24 = address of last valid page in this mapping
1213 ; r27 = offset for next list in inner loop
1214 ; r28 = base of this mappings list links
1217 cmpw cr1,r27,r25 ; higher, lower, or same?
1218 lwzx r29,r27,r28 ; get link to next mapping at this level
1220 beq mapVer32d ; link null, which is always OK
1223 bgtl- cr1,mapVerifyDie ; a mapping has a non-null list higher than its mpLists
1224 lwz r30,mpVAddr+4(r29) ; get next mappings va
1225 rlwinm r30,r30,0,0,19 ; zero low 12 bits
1226 cmplw r30,r24 ; compare next key with ours
1227 blel- mapVerifyDie ; a next node has key <= to ours
1229 subic. r27,r27,8 ; move on to next list
1230 bne+ mapVer32c ; loop if more to go
1232 ; Next node on current list, or next list if current done, or return if no more lists.
1235 la r28,mpList0+4(r26) ; get base of this mappings vector again
1236 lwzx r26,r25,r28 ; get next mapping on this list
1238 mr. r26,r26 ; is there one?
1239 bne+ mapVer32a ; yes, handle
1240 subic. r22,r22,1 ; is there another list?
1241 bgt+ mapVer32NextList ; loop if so
1243 cmpwi r21,0 ; did we find all the mappings in the pmap?
1244 bnel- mapVerifyDie ; no
1245 mtlr r31 ; restore return address
1249 ; Subroutine to verify that the upper words of a vector of kSkipListMaxLists
1250 ; doublewords are 0.
1251 ; r30 = ptr to base of vector
1254 mapVerUpperWordsAre0:
1255 cmplwi r24,mpBasicLists ; Do we have more than basic?
1256 li r24,mpBasicLists*8 ; Assume basic
1257 ble++ mapVerUpper1 ; We have the basic size
1258 li r24,kSkipListMaxLists*8 ; Use max size
1261 subic. r24,r24,8 ; get offset to next doubleword
1262 lwzx r27,r24,r30 ; get upper word
1263 cmpwi cr1,r27,0 ; 0 ?
1264 bne- cr1,mapVerifyDie ; die if not, passing callers LR
1265 bgt+ mapVerUpper1 ; loop if more to go
1268 ; bl here if mapSkipListVerify detects an inconsistency.
1272 mtlr r31 ; Restore return
1273 lis r31,hi16(EXT(DebugWork))
1274 ori r31,r31,lo16(EXT(DebugWork))
1275 lwz r0,4(r31) ; Get the explicit entry flag
1276 cmplwi r0,0x4262 ; Should we run anyway?
1277 beqlr-- ; Explicit call, return...
1280 stw r0,0(r31) ; Lock out further calls
1281 BREAKPOINT_TRAP ; hopefully, enter debugger
1286 * Panic (choke, to be exact) because of messed up skip lists. The LR points back
1287 * to the original caller of the skip-list function.
1290 mapSkipListPanic: ; skip-lists are screwed up
1292 ori r0,r0,lo16(Choke)
1293 li r3,failSkipLists ; get choke code