2 * Copyright (c) 2002 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
28 * These are the subroutines that manage the skip-list data structures used for the
29 * resident mappings for each pmap. We used to use a much simpler hash-based scheme,
30 * but it didn't scale well for 64-bit address spaces and multi-GB real memories.
31 * Here's a brief tutorial on skip-lists:
33 * The basic idea is that each mapping is on one or more singly-linked lists, sorted
34 * in increasing order by virtual address. The number of lists a mapping is on is an
35 * invariant property determined when the mapping is created, using an exponentially-
36 * distributed random number. Every mapping is on the first list. Ideally, each
37 * successive list has only 1/F as many nodes on it as the previous, where F is the
38 * "fanout." With a max of n lists, up to F**n nodes can be handled optimally.
40 * Searching, adding, and deleting from a skip-list can all be done in O(ln(n)) time.
41 * Because the first skip-list is just a sorted list of all mappings, it is also
42 * efficient to purge a sparsely populated pmap of all the mappings in a large range,
43 * for example when tearing down an address space. Large-range deletes are the
44 * primary advantage of skip-lists over a hash, btw.
46 * We currently use a fanout of 4 and a maximum of 12 lists (cf kSkipListFanoutShift
47 * and kSkipListMaxLists.) Thus, we can optimally handle pmaps with as many as 4**12
48 * pages, which is 64GB of resident physical memory per pmap. Pmaps can be larger than
49 * this, albeit with diminishing efficiency.
51 * The major problem with skip-lists is that we could waste a lot of space with 12
52 * 64-bit link fields in every mapping. So we currently have two sizes of mappings:
53 * 64-byte nodes with 4 list links, and 128-byte nodes with 12. Only one in every
54 * (4**4)==256 mappings requires the larger node, so the average size is 64.25 bytes.
55 * In practice, the additional complexity of the variable node size is entirely
56 * contained in the allocate and free routines.
58 * The other, mostly theoretic problem with skip-lists is that they have worst cases
59 * where performance becomes nearly linear. These worst-cases are quite rare but there
60 * is no practical way to prevent them.
64 ; set nonzero to accumulate skip-list stats on a per-map basis:
65 #define SKIPLISTSTATS 1
67 ; cr7 bit set when mapSearchFull() finds a match on a high list:
73 #include <ppc/proc_reg.h>
74 #include <ppc/exception.h>
78 * *********************
79 * * m a p S e a r c h *
80 * *********************
82 * Given a pmap and a virtual address (VA), find the mapping for that address.
83 * This is the fast call, that does not set up the previous-ptr vector or make
84 * consistency checks. When called:
85 * the pmap is locked (shared or exclusive)
86 * translation is off, interrupts masked
87 * 64-bit mode is enabled (if on a 64-bit machine)
88 * cr6 is loaded with the corresponding feature flags (in particular, pf64Bit)
90 * r4 = high 32 bits of key to search for (0 if a 32-bit processor)
91 * r5 = low 32 bits of key (low 12 bits may be nonzero garbage)
92 * r7 = mpFlags field if found. Undefined if not
94 * We return the mapping ptr (or 0) in r3, and the next VA (or 0 if no more) in r4 and r5.
95 * Except for cr6 (which is global), we trash nonvolatile regs. Called both on 32- and 64-bit
96 * machines, though we quickly branch into parallel code paths.
100 .globl EXT(mapSearch)
102 lbz r7,pmapCurLists(r3) ; get largest #lists any mapping is on
103 la r8,pmapSkipLists+4(r3) ; point to lists in pmap, assuming 32-bit machine
104 rlwinm r5,r5,0,0,19 ; zero low 12 bits of key
105 mr r6,r3 ; save pmap ptr here so we can accumulate statistics
106 li r9,0 ; initialize prev ptr
107 addic. r7,r7,-1 ; get base-0 number of last list, and test for 0
108 li r2,0 ; initialize count of mappings visited
109 slwi r7,r7,3 ; get offset of last list in use
110 blt-- mapSrchPmapEmpty ; pmapCurLists==0 (ie, no mappings)
111 lwzx r3,r8,r7 ; get 32-bit ptr to 1st mapping in highest list
112 bf-- pf64Bitb,mapSrch32c ; skip if 32-bit processor
113 subi r8,r8,4 ; we use all 64 bits of ptrs
114 rldimi r5,r4,32,0 ; r5 <- 64-bit va
115 ldx r3,r8,r7 ; get 64-bit ptr to 1st mapping in highest list
116 b mapSrch64c ; enter 64-bit search loop
119 ; 64-bit processors. Check next mapping.
120 ; r2 = count of mappings visited so far
121 ; r3 = current mapping ptr
122 ; r4 = va of current mapping (ie, of r3)
123 ; r5 = va to search for (the "key") (low 12 bits are 0)
125 ; r7 = current skip list number * 8
126 ; r8 = ptr to skip list vector of mapping pointed to by r9 (or pmap, if r9==0)
127 ; r9 = prev ptr, or 0 if none
130 mapSrch64a: ; loop over each mapping
131 ld r4,mpVAddr(r3) ; get va for this mapping (plus flags in low 12 bits)
132 addi r2,r2,1 ; count mappings visited
133 rldicr r4,r4,0,51 ; zero low 12 bits of mapping va
134 cmpld cr1,r5,r4 ; compare the vas
135 blt cr1,mapSrch64d ; key is less, try next list
136 la r8,mpList0(r3) ; point to skip list vector in this mapping
137 mr r9,r3 ; remember prev ptr
138 beq-- cr1,mapSrch64Found ; this is the correct mapping
139 ldx r3,r7,r8 ; get ptr to next mapping in current list
141 mr. r3,r3 ; was there another mapping on current list?
142 bne++ mapSrch64a ; was another, so loop
144 subic. r7,r7,8 ; move on to next list offset
145 ldx r3,r7,r8 ; get next mapping on next list (if any)
146 bge++ mapSrch64c ; loop to try next list
148 ; Mapping not found, check to see if prev node was a block mapping or nested pmap.
149 ; If not, or if our address is not covered by the block or nested map, return 0.
150 ; Note the advantage of keeping the check for block mappings (and nested pmaps)
151 ; out of the inner loop; we do the special case work at most once per search, and
152 ; never for the most-common case of finding a scalar mapping. The full searches
153 ; must check _in_ the inner loop, to get the prev ptrs right.
155 mr. r9,r9 ; was there a prev ptr?
156 li r3,0 ; assume we are going to return null
157 ld r4,pmapSkipLists(r6) ; assume prev ptr null... so next is first
158 beq-- mapSrch64Exit ; prev ptr was null, search failed
159 lwz r0,mpFlags(r9) ; get flag bits from prev mapping
160 ld r10,mpVAddr(r9) ; re-fetch base address of prev ptr
161 ld r4,mpList0(r9) ; get 64-bit ptr to next mapping, if any
162 andi. r0,r0,mpBlock+mpNest ; block mapping or nested pmap?
163 lhz r11,mpBSize(r9) ; get #pages/#segments in block/submap mapping
164 rldicr r10,r10,0,51 ; zero low 12 bits of mapping va
165 beq mapSrch64Exit ; prev mapping was just a scalar page, search failed
166 cmpwi r0,mpBlock ; block mapping or nested pmap?
167 sldi r0,r11,12 ; assume block mapping, get size in bytes - 4k
168 beq mapSrch64f ; we guessed right, it was a block mapping
169 addi r11,r11,1 ; mpBSize is 1 too low
170 sldi r11,r11,28 ; in a nested pmap, mpBSize is in units of segments
171 subi r0,r11,4096 ; get address of last page in submap
173 add r10,r10,r0 ; r10 <- last page in this mapping
174 cmpld r5,r10 ; does this mapping cover our page?
175 bgt mapSrch64Exit ; no, search failed
176 mr r3,r9 ; yes, we found it
179 ; r2 = count of nodes visited
183 mapSrch64Found: ; WARNING: can drop down to here
184 ld r4,mpList0(r3) ; get ptr to next mapping
185 lwz r7,mpFlags(r3) ; Get the flags for our caller
187 ; r2 = count of nodes visited
188 ; r3 = return value (ie, found mapping or 0)
189 ; r4 = next mapping (or 0 if none)
193 mapSrch64Exit: ; WARNING: can drop down to here
194 mr. r5,r4 ; next ptr null?
196 lwz r10,pmapSearchCnt(r6) ; prepare to accumulate statistics
197 ld r8,pmapSearchVisits(r6)
198 addi r10,r10,1 ; count searches
199 add r8,r8,r2 ; count nodes visited
200 stw r10,pmapSearchCnt(r6)
201 std r8,pmapSearchVisits(r6)
203 beqlr- ; next ptr was null, so return 0 in r4 and r5
204 lwz r5,mpVAddr+4(r4) ; get VA of next node
209 ; 32-bit processors. Check next mapping.
210 ; r2 = count of mappings visited so far
211 ; r3 = current mapping ptr
212 ; r4 = va of current mapping (ie, of r3)
213 ; r5 = va to search for (the "key") (low 12 bits are 0)
215 ; r7 = current skip list number * 8
216 ; r8 = ptr to skip list vector of mapping pointed to by r9 (or pmap, if r9==0)
217 ; r9 = prev ptr, or 0 if none
220 mapSrch32a: ; loop over each mapping
221 lwz r4,mpVAddr+4(r3) ; get va for this mapping (plus flags in low 12 bits)
222 addi r2,r2,1 ; count mappings visited
223 rlwinm r4,r4,0,0,19 ; zero low 12 bits of mapping va
224 cmplw cr1,r5,r4 ; compare the vas
225 blt cr1,mapSrch32d ; key is less, try next list
226 la r8,mpList0+4(r3) ; point to skip list vector in this mapping
227 mr r9,r3 ; remember prev ptr
228 beq- cr1,mapSrch32Found ; this is the correct mapping
229 lwzx r3,r7,r8 ; get ptr to next mapping in current list
231 mr. r3,r3 ; was there another mapping on current list?
232 bne+ mapSrch32a ; was another, so loop
234 subic. r7,r7,8 ; move on to next list offset
235 lwzx r3,r7,r8 ; get next mapping on next list (if any)
236 bge+ mapSrch32c ; loop to try next list
238 ; Mapping not found, check to see if prev node was a block mapping or nested pmap.
239 ; If not, or if our address is not covered by the block or nested map, return 0.
240 ; Note the advantage of keeping the check for block mappings (and nested pmaps)
241 ; out of the inner loop; we do the special case work at most once per search, and
242 ; never for the most-common case of finding a scalar mapping. The full searches
243 ; must check _in_ the inner loop, to get the prev ptrs right.
245 mr. r9,r9 ; was there a prev ptr?
246 li r3,0 ; assume we are going to return null
247 lwz r4,pmapSkipLists+4(r6) ; assume prev ptr null... so next is first
248 beq- mapSrch32Exit ; prev ptr was null, search failed
249 lwz r0,mpFlags(r9) ; get flag bits from prev mapping
250 lwz r10,mpVAddr+4(r9) ; re-fetch base address of prev ptr
251 andi. r0,r0,mpBlock+mpNest ; block mapping or nested pmap?
252 lwz r4,mpList0+4(r9) ; get ptr to next mapping, if any
253 beq mapSrch32Exit ; prev mapping was just a scalar page, search failed
254 lhz r11,mpBSize(r9) ; get #pages/#segments in block/submap mapping
255 cmpwi r0,mpBlock ; block mapping or nested pmap?
256 rlwinm r10,r10,0,0,19 ; zero low 12 bits of block mapping va
257 slwi r0,r11,12 ; assume block mapping, get size in bytes - 4k
258 beq mapSrch32f ; we guessed right, it was a block mapping
259 addi r11,r11,1 ; mpBSize is 1 too low
260 slwi r11,r11,28 ; in a nested pmap, mpBSize is in units of segments
261 subi r0,r11,4096 ; get address of last page in submap
263 add r10,r10,r0 ; r10 <- last page in this mapping
264 cmplw r5,r10 ; does this mapping cover our page?
265 bgt mapSrch32Exit ; no, search failed
266 mr r3,r9 ; yes, we found it
269 ; r2 = count of nodes visited
273 mapSrch32Found: ; WARNING: can drop down to here
274 lwz r4,mpList0+4(r3) ; get ptr to next mapping
275 lwz r7,mpFlags(r3) ; Get mpFlags for our caller
276 ; r2 = count of nodes visited
277 ; r3 = return value (ie, found mapping or 0)
278 ; r4 = next mapping (or 0 if none)
283 mr. r5,r4 ; next ptr null?
285 lwz r10,pmapSearchCnt(r6) ; prepare to accumulate statistics
286 lwz r8,pmapSearchVisits(r6)
287 lwz r9,pmapSearchVisits+4(r6)
288 addi r10,r10,1 ; count searches
289 addc r9,r9,r2 ; count nodes visited
291 stw r10,pmapSearchCnt(r6)
292 stw r8,pmapSearchVisits(r6)
293 stw r9,pmapSearchVisits+4(r6)
295 beqlr- ; next ptr was null, so return 0 in r4 and r5
296 lwz r5,mpVAddr+4(r4) ; get VA of next node
300 ; Here when the pmap is empty (ie, pmapCurLists==0), both in 32 and 64-bit mode,
301 ; and from both mapSearch and mapSearchFull.
305 li r3,0 ; return null
306 li r4,0 ; return 0 as virtual address of next node
309 lwz r7,pmapSearchCnt(r6) ; prepare to accumulate statistics
310 addi r7,r7,1 ; count searches
311 stw r7,pmapSearchCnt(r6)
317 * *****************************
318 * * m a p S e a r c h F u l l *
319 * *****************************
321 * Given a pmap and a virtual address (VA), find the mapping for that address.
322 * This is the "full" call, that sets up a vector of ptrs to the previous node
323 * (or to the pmap, if there is no previous node) for each list that the mapping
324 * in on. We also make consistency checks on the skip-lists. When called:
325 * the pmap is locked (shared or exclusive)
326 * translation is off, interrupts masked
327 * 64-bit mode is enabled (if on a 64-bit machine)
328 * cr6 is loaded with the corresponding feature flags (in particular, pf64Bit)
330 * r4 = high 32 bits of key to search for (0 if a 32-bit processor)
331 * r5 = low 32 bits of key (low 12 bits may be nonzero garbage)
333 * We return the mapping ptr (or 0) in r3, and the next VA (or 0 if no more) in r4 and r5.
334 * Except for cr6 (which is global), we trash nonvolatile regs. Called both on 32- and 64-bit
335 * machines, though we quickly branch into parallel code paths.
339 .globl EXT(mapSearchFull)
341 lbz r7,pmapCurLists(r3) ; get largest #lists any mapping is on
342 la r8,pmapSkipLists+4(r3) ; point to lists in pmap, assuming 32-bit machine
343 rlwinm r5,r5,0,0,19 ; zero low 12 bits of key
344 mr r6,r3 ; save pmap ptr here so we can accumulate statistics
345 li r2,0 ; initialize count of mappings visited
346 mfsprg r12,0 ; get the per-proc data ptr
347 crclr bFullFound ; we have not found the mapping yet
348 addic. r7,r7,-1 ; get base-0 number of last list, and test for 0
349 subi r9,r8,mpList0+4 ; initialize prev ptr to be a fake mapping
350 slwi r7,r7,3 ; get (offset*8) of last list
351 la r12,skipListPrev+4(r12) ; point to vector of prev ptrs, assuming 32-bit machine
352 blt-- mapSrchPmapEmpty ; pmapCurLists==0 (ie, no mappings)
353 lwzx r3,r8,r7 ; get 32-bit ptr to 1st mapping in highest list
354 li r10,0 ; initialize prev ptrs VA to 0 too
355 bf-- pf64Bitb,mapSrchFull32c ; skip if 32-bit processor
356 subi r8,r8,4 ; we use all 64 bits of ptrs
358 rldimi r5,r4,32,0 ; r5 <- 64-bit va
359 ldx r3,r8,r7 ; get 64-bit ptr to 1st mapping in highest list
360 b mapSrchFull64c ; enter 64-bit search loop
363 ; 64-bit processors. Check next mapping.
364 ; r2 = count of mappings visited so far
365 ; r3 = current mapping ptr
366 ; r4 = va of current mapping (ie, of r3)
367 ; r5 = va to search for (the "key") (low 12 bits are 0)
369 ; r7 = current skip list number * 8
370 ; r8 = ptr to skip list vector of mapping pointed to by r9
371 ; r9 = prev ptr, ie highest mapping that comes before search target (initially the pmap)
372 ; r10 = prev mappings va, or 0 if r9==pmap
373 ; r12 = ptr to the skipListPrev vector in the per-proc
376 mapSrchFull64a: ; loop over each mapping
377 ld r4,mpVAddr(r3) ; get va for this mapping (plus flags in low 12 bits)
378 addi r2,r2,1 ; count mappings visited
379 lwz r0,mpFlags(r3) ; get mapping flag bits
380 cmpld cr0,r10,r4 ; make sure VAs come in strictly ascending order
381 rldicr r4,r4,0,51 ; zero low 12 bits of mapping va
382 cmpld cr1,r5,r4 ; compare the vas
383 bge-- cr0,mapSkipListPanic ; die if keys are out of order
384 andi. r0,r0,mpBlock+mpNest ; is it a scalar mapping? (ie, of a single page)
385 blt cr1,mapSrchFull64d ; key is less, try next list
386 beq cr1,mapSrchFull64Found ; this is the correct mapping
387 bne-- cr0,mapSrchFull64e ; handle block mapping or nested pmap
389 la r8,mpList0(r3) ; point to skip list vector in this mapping
390 mr r9,r3 ; current becomes previous
391 ldx r3,r7,r8 ; get ptr to next mapping in current list
392 mr r10,r4 ; remember prev ptrs VA
394 mr. r3,r3 ; was there another mapping on current list?
395 bne++ mapSrchFull64a ; was another, so loop
397 stdx r9,r7,r12 ; save prev ptr in per-proc vector
398 subic. r7,r7,8 ; move on to next list offset
399 ldx r3,r7,r8 ; get next mapping on next list (if any)
400 bge++ mapSrchFull64c ; loop to try next list
402 ; Mapping not found, return 0 and next higher key
404 li r3,0 ; return null
405 bt-- bFullFound,mapSkipListPanic ; panic if it was on earlier list
406 ld r4,mpList0(r9) ; get 64-bit ptr to next mapping, if any
409 ; Block mapping or nested pmap, and key > base. We must compute the va of
410 ; the end of the block to see if key fits within it.
413 lhz r11,mpBSize(r3) ; get #pages/#segments in block/submap mapping (if nonscalar)
414 cmpwi r0,mpBlock ; distinguish between block mapping and nested pmaps
415 sldi r0,r11,12 ; assume block mapping, get size in bytes - 4k
416 beq mapSrchFull64f ; we guessed right, it was a block mapping
417 addi r11,r11,1 ; mpBSize is 1 too low
418 sldi r11,r11,28 ; in a nested pmap, mpBSize is in units of segments
419 subi r0,r11,4096 ; get address of last page in submap
421 add r4,r4,r0 ; r4 <- last page in this mapping
422 cmpld r5,r4 ; does this mapping cover our page?
423 bgt mapSrchFull64b ; no, try next mapping (r4 is advanced to end of range)
427 ; r2 = count of nodes visited
430 ; r7 = current skip list number * 8
431 ; r8 = ptr to prev mappings (ie, r9) skip-list vector
432 ; r9 = prev ptr, ie highest mapping that comes before search target
433 ; r10 = prev mappings va
434 ; r12 = ptr to the skipListPrev vector in the per-proc
436 mapSrchFull64Found: ; WARNING: can drop down to here
437 cmpwi r7,0 ; are we in the last skip-list?
438 crset bFullFound ; remember that we found the mapping
439 bne mapSrchFull64d ; mapSearchFull must search all lists to get prev ptrs
440 ld r4,mpList0(r3) ; get ptr to next mapping
441 stdx r9,r7,r12 ; save prev ptr in last list
442 lwz r7,mpFlags(r3) ; Get the flags for our caller
446 ; 32-bit processors. Check next mapping.
447 ; r2 = count of nodes visited
448 ; r3 = ptr to next mapping in current list
449 ; r5 = va to search for (the "key") (low 12 bits are 0)
451 ; r7 = current skip list number * 8
452 ; r8 = ptr to skip list vector of mapping pointed to by r9
453 ; r9 = prev ptr, ie highest mapping that comes before search target (initially the pmap)
454 ; r10 = prev mappings va, or 0 if r9==pmap
455 ; r12 = ptr to the skipListPrev vector in the per-proc
458 mapSrchFull32a: ; loop over each mapping
459 lwz r4,mpVAddr+4(r3) ; get va for this mapping (plus flags in low 12 bits)
460 addi r2,r2,1 ; count mappings visited
461 lwz r0,mpFlags(r3) ; get mapping flag bits
462 cmplw cr0,r10,r4 ; make sure VAs come in strictly ascending order
463 rlwinm r4,r4,0,0,19 ; zero low 12 bits of mapping va
464 cmplw cr1,r5,r4 ; compare the vas
465 bge- cr0,mapSkipListPanic ; die if keys are out of order
466 andi. r0,r0,mpBlock+mpNest ; is it a scalar mapping? (ie, of a single page)
467 blt cr1,mapSrchFull32d ; key is less than this va, try next list
468 beq- cr1,mapSrchFull32Found ; this is the correct mapping
469 bne- cr0,mapSrchFull32e ; handle block mapping or nested pmap
471 la r8,mpList0+4(r3) ; point to skip list vector in this mapping
472 mr r9,r3 ; current becomes previous
473 lwzx r3,r7,r8 ; get ptr to next mapping in current list
474 mr r10,r4 ; remember prev ptrs VA
476 mr. r3,r3 ; next becomes current
477 bne+ mapSrchFull32a ; was another, so loop
479 stwx r9,r7,r12 ; save prev ptr in per-proc vector
480 subic. r7,r7,8 ; move on to next list offset
481 lwzx r3,r7,r8 ; get next mapping on lower list (if any)
482 bge+ mapSrchFull32c ; loop to try next list
484 ; mapping not found, return 0 and next-key
486 li r3,0 ; return null
487 bt- bFullFound,mapSkipListPanic ; panic if it was on an earlier list
488 lwz r4,mpList0+4(r9) ; get ptr to next mapping
491 ; Block mapping or nested pmap, and key > base. We must compute the va of
492 ; the end of the block to see if our key fits within it.
495 lhz r11,mpBSize(r3) ; get #pages/#segments in block/submap mapping (if nonscalar)
496 cmpwi r0,mpBlock ; distinguish between block mapping and nested pmaps
497 slwi r0,r11,12 ; assume block mapping, get size in bytes - 4k
498 beq mapSrchFull32f ; we guessed right, it was a block mapping
499 addi r11,r11,1 ; mpBSize is 1 too low
500 slwi r11,r11,28 ; in a nested pmap, mpBSize is in units of segments
501 subi r0,r11,4096 ; get address of last page in submap
503 add r4,r4,r0 ; r4 <- last page in this mapping
504 cmplw r5,r4 ; does this mapping cover our page?
505 bgt mapSrchFull32b ; no, try next mapping
509 ; r2 = count of nodes visited
512 ; r7 = current skip list number * 8
513 ; r9 = prev ptr, ie highest mapping that comes before search target, or 0
514 ; r10 = prev mappings va
515 ; r12 = ptr to the skipListPrev vector in the per-proc
517 mapSrchFull32Found: ; WARNING: can drop down to here
518 cmpwi r7,0 ; are we in the last skip-list?
519 crset bFullFound ; remember that we found the mapping
520 bne mapSrchFull32d ; mapSearchFull must search all lists to get prev ptrs
521 lwz r4,mpList0+4(r3) ; get ptr to next mapping
522 stwx r9,r7,r12 ; save prev ptr in last list
523 lwz r7,mpFlags(r3) ; Get mpFlags for our caller
528 * *********************
529 * * m a p I n s e r t *
530 * *********************
532 * Insert a mapping into pmap skip-lists. The caller has already called mapSearchFull to
533 * determine that this mapping does not overlap other mappings in the pmap. As a side effect
534 * of calling mapSearchFull, the per-proc skipListPrev array is set up with a vector of the
535 * previous ptrs for each skip list. When called:
536 * the pmap is locked (exclusive)
537 * translation is off, interrupts masked
538 * 64-bit mode is enabled (if on a 64-bit machine)
539 * mapSearchFull has just been called for this mappings key
540 * cr6 is loaded with the corresponding feature flags (in particular, pf64Bit)
544 * There is no return value. Except for cr6 (which is global), we trash nonvolatile regs.
548 .globl EXT(mapInsert)
550 lwz r8,mpFlags(r4) ; get this mappings flags
551 lbz r7,pmapCurLists(r3) ; get current max# lists any mapping is on
552 la r10,pmapSkipLists+4(r3) ; r10 <-- base of pmap list headers, assuming 32-bit machine
553 la r11,mpList0+4(r4) ; r11 <-- base of this mappings list vector
554 mfsprg r12,0 ; get ptr to our per-proc
555 andi. r9,r8,mpLists ; get #lists this mapping is on (1<=n<=27)
556 la r12,skipListPrev+4(r12) ; r12 <-- base of prev ptr vector
557 sub. r6,r9,r7 ; is this mapping on more lists than any other?
558 slwi r8,r9,3 ; get #lists * 8
559 subi r8,r8,8 ; get offset to topmost (last) list in use
560 bf-- pf64Bitb,mapIns32 ; handle 32-bit processor
561 subi r10,r10,4 ; we use all 8 bytes of the ptr fields
564 ble++ mapIns64a ; not new max #lists
566 ; 64-bit processor: We must increase pmapCurLists. Since mapSearchFull() only
567 ; sets up the first pmapCurLists prev ptrs, we must initialize the new ones to
568 ; point to the pmap. While we are at it, we verify that the unused list hdrs in
571 cmpwi r9,kSkipListMaxLists ; in range?
572 stb r9,pmapCurLists(r3) ; remember new max
573 mtctr r6 ; set up count of new lists
574 mr r5,r8 ; copy offset to last list
575 subi r0,r10,mpList0 ; r0 <-- fake mapping ptr (to pmap) for null prev ptrs
576 bgt-- mapSkipListPanic ; choke if this mapping is on too many lists
578 ldx r6,r5,r10 ; get pmap list head
579 stdx r0,r5,r12 ; initialize prev ptr
580 subi r5,r5,8 ; get next list offset
581 cmpdi r6,0 ; was list hdr null?
582 bdnzt cr0_eq,mapIns64NewList ; loop if more lists to initialize and list hdr was 0
583 bne-- mapSkipListPanic ; die if pmap list hdr was not null
586 ; 64-bit processor: loop over each list this mapping is on
588 ; r8 = next list offset
589 ; r10 = ptr to base of pmap list header vector
590 ; r11 = ptr to base of new mappings list vector
591 ; r12 = ptr to base of prev ptr vector in per-proc
595 ldx r5,r8,r12 ; get prev ptr from per-proc vector
596 cmpwi cr1,r8,0 ; more to go?
597 la r7,mpList0(r5) ; get base of prev mappings list vector
599 stdx r4,r8,r7 ; * insert new mapping in middle of this list
601 subi r8,r8,8 ; get next list offset
602 bne++ cr1,mapIns64a ; more lists to go
605 ; Handle 32-bit processor. First, increase pmapCurLists if necessary; cr0 is bgt
606 ; iff the new mapping has more lists. Since mapSearchFull() only sets up the first
607 ; pmapCurLists prev ptrs, we must initialize any new ones to point to the pmap.
608 ; While we are at it, we verify that the unused list hdrs in the pmap are 0.
611 ble+ mapIns32a ; skip if new mapping does not use extra lists
612 cmpwi r9,kSkipListMaxLists ; in range?
613 stb r9,pmapCurLists(r3) ; remember new max
614 mtctr r6 ; set up count of new lists
615 mr r5,r8 ; copy offset to last list
616 subi r0,r10,mpList0+4 ; r0 <-- fake mapping ptr (to pmap) for null prev ptrs
617 bgt- mapSkipListPanic ; choke if this mapping is on too many lists
619 lwzx r6,r5,r10 ; get pmap list head
620 stwx r0,r5,r12 ; initialize prev ptr
621 subi r5,r5,8 ; get next list offset
622 cmpwi r6,0 ; was list hdr null?
623 bdnzt cr0_eq,mapIns32NewList ; loop if more lists to initialize and list hdr was 0
624 bne- mapSkipListPanic ; die if pmap list hdr was not null
627 ; 32-bit processor: loop over each list this mapping is on
629 ; r8 = next list offset
630 ; r10 = ptr to base of pmap list header vector
631 ; r11 = ptr to base of new mappings list vector
632 ; r12 = ptr to base of prev ptr vector
636 lwzx r5,r8,r12 ; get prev ptr from per-proc vector
637 cmpwi cr1,r8,0 ; more to go?
638 la r7,mpList0+4(r5) ; get base of prev mappings list vector
640 stwx r4,r8,r7 ; * insert new mapping in middle of this list
642 subi r8,r8,8 ; get next list offset
643 bne+ cr1,mapIns32a ; more lists to go
648 * *********************
649 * * m a p R e m o v e *
650 * *********************
652 * Remove a mapping from pmap skip-lists. The caller has already called mapSearchFull to
653 * find the mapping, which sets up the skipListPrev array with a vector of the previous
654 * ptrs for each skip list. When called:
655 * the pmap is locked (exclusive)
656 * translation is off, interrupts masked
657 * 64-bit mode is enabled (if on a 64-bit machine)
658 * mapSearchFull has just been called for this mappings key
659 * cr6 is loaded with the corresponding feature flags (in particular, pf64Bit)
663 * There is no return value. Except for cr6 (which is global), we trash nonvolatile regs.
667 .globl EXT(mapRemove)
669 lwz r8,mpFlags(r4) ; get this mappings flags
670 lbz r10,pmapCurLists(r3) ; get current #lists in use
671 la r11,mpList0+4(r4) ; r11 <-- base of this mappings list vector
672 mfsprg r12,0 ; get ptr to our per-proc
673 andi. r9,r8,mpLists ; get #lists this mapping is on (1<=n<=27)
674 slwi r8,r9,3 ; get #lists * 8
675 cmpw cr5,r9,r10 ; compare mpLists to pmapCurLists
676 la r12,skipListPrev+4(r12) ; r12 <-- base of prev ptr vector
677 bgt-- cr5,mapSkipListPanic ; die if mpLists > pmapCurLists
678 subi r8,r8,8 ; get offset to topmast (last) list this mapping is in
679 bf-- pf64Bitb,mapRem32a ; skip if 32-bit processor
680 subi r11,r11,4 ; we use all 64 bits of list links on 64-bit machines
684 ; 64-bit processor: loop over each list this mapping is on
687 ; r8 = offset to next list
689 ; r11 = ptr to base of mapping list vector
690 ; r12 = ptr to base of prev ptr vector in per-proc
691 ; cr5 = beq if (mpLists == pmapCurLists)
695 ldx r5,r8,r12 ; get prev ptr from per-proc vector
696 ldx r9,r8,r11 ; get next ptr from mapping
697 cmpwi cr1,r8,0 ; more to go?
698 la r7,mpList0(r5) ; get base of prev mappings list vector
699 stdx r9,r8,r7 ; point to next from prev
700 subi r8,r8,8 ; get next list offset
701 bne++ cr1,mapRem64a ; loop if another list to unlink from
703 ; Did we reduce #lists in use by removing last mapping in last list?
705 bnelr++ cr5 ; if (mpLists!=pmapCurLists) cannot have removed last map
706 la r5,pmapSkipLists(r3) ; point to vector of list hdrs
708 subic. r10,r10,1 ; get base-0 list#
709 slwi r8,r10,3 ; get offset to last list
710 ldx r0,r8,r5 ; get last list ptr
711 cmpdi cr1,r0,0 ; null?
712 bnelr cr1 ; not null, so we are done
713 stb r10,pmapCurLists(r3) ; was null, so decrement pmapCurLists
714 bgt mapRem64b ; loop to see if more than one list was emptied
718 ; 32-bit processor: loop over each list this mapping is on
721 ; r8 = offset to next list
723 ; r11 = ptr to base of mapping list vector
724 ; r12 = ptr to base of prev ptr vector in per-proc
725 ; cr5 = beq if (mpLists == pmapCurLists)
729 lwzx r5,r8,r12 ; get prev ptr from per-proc vector
730 lwzx r9,r8,r11 ; get next ptr from mapping
731 cmpwi cr1,r8,0 ; more to go?
732 la r7,mpList0+4(r5) ; get base of prev mappings list vector
733 stwx r9,r8,r7 ; point to next from prev
734 subi r8,r8,8 ; get next list offset
735 bne+ cr1,mapRem32a ; loop if another list to unlink from
737 ; Did we reduce #lists in use by removing last mapping in last list?
739 bnelr+ cr5 ; if (mpLists!=pmapCurLists) cannot have removed last map
740 la r5,pmapSkipLists+4(r3) ; point to vector of list hdrs
742 subic. r10,r10,1 ; get base-0 list#
743 slwi r8,r10,3 ; get offset to last list
744 lwzx r0,r8,r5 ; get last list ptr
745 cmpwi cr1,r0,0 ; null?
746 bnelr cr1 ; not null, so we are done
747 stb r10,pmapCurLists(r3) ; was null, so decrement pmapCurLists
748 bgt mapRem32b ; loop to see if more than one list was emptied
753 * *************************
754 * * m a p S e t L i s t s *
755 * *************************
757 * Called to decide how many skip-lists the next mapping will be on. For each pmap,
758 * we maintain a psuedo-random sequence based on a linear feedback shift register. The
759 * next number is generated by rotating the old value left by 1 and XORing with a
760 * polynomial (actually 4 8-bit polynomials concatanated) and adding 1.
761 * The simple (unclamped) number of lists a mapping is on is the number of trailing 0s
762 * in the pseudo-random sequence, shifted by the (log2-1) of the fanout F, plus one.
763 * This seems to give us a near perfect distribution, in the sense that about F times more nodes
764 * are allocated on n lists, as are on (n+1) lists.
766 * At one point we used a simple counter to assign lists. While this gave perfect
767 * distribution, there were certain access pattern that would drive a worst case
768 * distribution (e.g., insert low, then high, then low, etc.). Unfortunately,
769 * these patterns were not too uncommon. We changed to a less-than-perfect assignment,
770 * but one that works consistently across all known access patterns.
772 * Also, we modify the "simple" trailing-0-based list count, to account for an important
773 * observation: because VM does a lot of removing and restoring of mappings in the process of
774 * doing copy-on-write etc, it is common to have the pmap's "random number" (ie, the
775 * count of created mappings) be much larger than the number of mappings currently in the
776 * pmap. This means the simple list count will often be larger than justified by the number of
777 * mappings in the pmap. To avoid this common situation, we clamp the list count to be no more
778 * than ceil(logBaseF(pmapResidentCnt)).
780 * Finally, we also clamp the list count to kSkipListMaxLists.
782 * We are passed the pmap ptr in r3. Called with translation on, interrupts enabled,
783 * and in 32-bit mode.
786 .globl EXT(mapSetLists)
788 lwz r5,pmapRandNum(r3) ; get the per-pmap counter of mapping creates
789 lwz r4,pmapResidentCnt(r3) ; get number of mappings in this pmap
790 lis r11,hi16(0xA7CBF5B9) ; Get polynomial (I just made this up...)
791 li r0,-1 ; get a mask of 1s
792 ori r11,r11,lo16(0xA7CBF5B9) ; Get polynomial (I just made this up...)
793 rlwinm r5,r5,1,0,31 ; Rotate
794 cntlzw r7,r4 ; get magnitude of pmapResidentCnt
795 xor r5,r5,r11 ; Munge with poly
796 srw r7,r0,r7 ; r7 <- mask for magnitude of pmapResidentCnt
797 addi r6,r5,1 ; increment pmapRandNum non-atomically
798 andc r8,r5,r6 ; get a mask for trailing zeroes in pmapRandNum
799 stw r6,pmapRandNum(r3) ; update "random number"
800 and r8,r8,r7 ; clamp trailing 0s to magnitude of pmapResidentCnt
801 rlwinm r8,r8,0,32-(kSkipListMaxLists*(kSkipListFanoutShift+1))+1,31 ; clamp to kSkipListMaxLists
802 cntlzw r9,r8 ; count leading 0s in the mask
803 subfic r10,r9,32 ; r10 <- trailing zero count
804 srwi r11,r10,kSkipListFanoutShift ; shift by 1 if fanout is 4, 2 if 8, etc
805 addi r3,r11,1 ; every mapping is on at least one list
810 * *************************************
811 * * m a p S k i p L i s t V e r i f y *
812 * *************************************
814 * This does a fairly thorough sweep through a pmaps skip-list data structure, doing
815 * consistency checks. It is typically called (from hw_exceptions.s) from debug or
816 * instrumented builds. It is probably not a good idea to call this in production builds,
817 * as it must run with exceptions disabled and can take a long time to verify a big pmap.
818 * It runs in O(n*ln(n)).
820 * Called on a bl, with the pmap ptr in r20. We assume the pmap is locked (shared) and
821 * that EE and DR are off. We check all 64 bits of ptrs even on 32-bit machines.
822 * We use r20-r31, cr0, cr1, and cr7. If we return, no inconsistencies were found.
824 * You will notice we make little attempt to schedule the code; clarity is deemed more
825 * important than speed.
830 * mapSkipListVerifyC is a version that is callable from C.
831 * This should be called only from the debugger, IT DOES NOT LOCK THE PMAP!!!!
834 .globl EXT(mapSkipListVerifyC)
835 LEXT(mapSkipListVerifyC)
837 stwu r1,-(FM_ALIGN((31-13+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
838 mflr r0 ; Save the link register
839 stmw r13,FM_ARG0(r1) ; Save all registers
840 stw r0,(FM_ALIGN((31-13+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
842 lwz r15,pmapvr(r3) ; Get the V to R translation
843 lwz r16,pmapvr+4(r3) ; Get the V to R translation
844 mr r19,r4 ; Save register dump area
846 bl EXT(mapSetUp) ; Get set up
849 xor r20,r3,r16 ; Translate 32-bit portion
850 bf-- pf64Bitb,mslvc32a ; Skip if 32-bit...
852 rldimi r20,r15,32,0 ; Shift the fixed upper part of the physical over and cram in top
854 mslvc32a: lis r18,hi16(EXT(DebugWork))
855 ori r18,r18,lo16(EXT(DebugWork))
857 stw r0,4(r18) ; Make sure the test knows to run
859 bl EXT(mapSkipListVerify) ; Run the test
862 stw r0,4(r18) ; Remove explicit call flag
864 bt++ pf64Bitb,mslvc64a ; This is 64-bit...
866 mtmsr r17 ; Restore enables/translation/etc.
935 b mslvcreturn ; Join common...
937 mslvc64a: mtmsrd r17 ; Restore enables/translation/etc.
975 lwz r0,(FM_ALIGN((31-13+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Get the return
976 lmw r13,FM_ARG0(r1) ; Get the registers
977 mtlr r0 ; Restore the return
978 lwz r1,0(r1) ; Pop the stack
982 .globl EXT(mapSkipListVerify)
983 LEXT(mapSkipListVerify)
984 mflr r31 ; save LR so we can bl to mapVerifyDie
986 ; If we have already found an inconsistency and died, don not do so again, to
989 lis r27,hi16(EXT(DebugWork))
990 ori r27,r27,lo16(EXT(DebugWork))
991 lwz r0,4(r27) ; Get the explicit entry flag
992 lwz r27,0(r27) ; Get lockout
993 cmplwi r0,0x4262 ; Should we run anyway?
994 beq-- mslvAnyway ; Yes...
995 cmpwi r27,0 ; have we already found an error?
996 bnelr-- ; yes, just return wo checking again
999 ; Not recursive call, so initialize.
1001 mfsprg r23,2 ; get the feature flags
1002 mtcrf 0x02,r23 ; put pf64Bit where we can test it
1003 lbz r26,pmapCurLists(r20) ; get #lists that are in use
1004 lwz r21,pmapResidentCnt(r20); get #mappings in this pmap
1005 cmpwi r26,kSkipListMaxLists ; in range?
1006 bgtl-- mapVerifyDie ; pmapCurLists is too big
1008 ; To prevent infinite loops, set limit of (pmapCurLists*pmapResidentCnt) iterations.
1009 ; Since we walk each list this is the max number of mappings we could visit.
1011 li r23,0 ; initialize count
1013 subic. r26,r26,1 ; loop pmapCurLists times (but at least once)
1014 add r23,r23,r21 ; compute (pmapCurLists*pmapResidentCnt)
1015 bgt mapVer0 ; this will be a 64-bit qty on 64-bit machines
1017 li r22,kSkipListMaxLists ; initialize list#
1018 bf-- pf64Bitb,mapVer32 ; go handle a 32-bit processor
1022 ; Loop over each list, counting mappings in each. We first check whether or not
1023 ; the list is empty (ie, if the pmapSlipLists ptr is null.) All lists above
1024 ; pmapCurLists should be empty, and no list at or below pmapCurLists should be.
1026 ; r21 = decrementing counter of mappings in this pmap
1027 ; r22 = next list# (1...kSkipListMaxLists)
1028 ; r23 = decrementing counter for infinite loop check
1031 slwi r25,r22,3 ; get offset to next skiplist
1032 la r26,pmapSkipLists(r20) ; get ptr to base of skiplist vector
1034 ldx r26,r25,r26 ; get 1st mapping on this list, if any
1035 lbz r28,pmapCurLists(r20) ; get #lists in use
1036 cmpdi cr6,r26,0 ; set cr6_eq if this list is null ("null")
1037 cmpw cr7,r22,r28 ; set cr7_gt if this list is > pmapCurLists ("high")
1038 crxor cr0_eq,cr6_eq,cr7_gt ; cr0_eq <-- (null & !high) | (!null & high)
1039 beql-- mapVerifyDie ; die if this list is null when it should not be, etc
1042 ; Loop over each node in the list.
1044 ; r21 = decrementing counter of mappings in this pmap
1045 ; r22 = this list# (1...kSkipListMaxLists)
1046 ; r23 = decrementing counter for infinite loop check
1047 ; r25 = offset to this skiplist (ie, ((r22<<3)-8))
1051 lwz r29,mpFlags(r26) ; get bits for this mapping
1052 ld r28,mpVAddr(r26) ; get key
1053 subic. r23,r23,1 ; check for loops
1054 bltl-- mapVerifyDie ; we have visited > (pmapCurLists*pmapResidentCnt) nodes
1055 andi. r30,r26,mpBasicSize-1 ; test address for alignment
1056 bnel-- mapVerifyDie ; not aligned
1057 andi. r27,r29,mpLists ; get #lists this mapping is supposed to be on
1058 cmpw cr1,r27,r22 ; is it supposed to be on this list?
1059 bltl-- cr1,mapVerifyDie ; mappings mpLists is too low
1060 cmpwi r27,kSkipListMaxLists ; too big?
1061 bgtl-- mapVerifyDie ; mappings mpLists > max
1062 rldicr r28,r28,0,51 ; clear low 12 bits of va
1063 bne++ cr1,mapVer64f ; jump if this is not highest list for this node
1065 ; This is the "highest" (last) list this mapping is on.
1066 ; Do some additional checks (so we only do them once per mapping.)
1067 ; First, if a block mapping or nested pmap, compute block end.
1069 andi. r29,r29,mpBlock+mpNest ; is it block mapping or nested pmap?
1070 subi r21,r21,1 ; count mappings in this pmap
1071 beq++ mapVer64b ; not nested or pmap
1072 lhz r27,mpBSize(r26) ; get #pages or #segments
1073 cmpwi r29,mpBlock ; which one is it?
1074 sldi r29,r27,12 ; assume block mapping, units are (pages-1)
1075 beq mapVer64b ; guessed correctly
1076 addi r27,r27,1 ; units of nested pmap are (#segs-1)
1077 sldi r29,r27,28 ; convert to #bytes
1078 subi r29,r29,4096 ; get offset to last byte in nested pmap
1080 ; Here with r29 = size of block - 4k, or 0 if mapping is a scalar page.
1083 add r24,r28,r29 ; r24 <- address of last valid page in this mapping
1084 la r28,mpList0(r26) ; get base of this mappings vector
1085 lwz r27,mpFlags(r26) ; Get the number of lists
1086 andi. r27,r27,mpLists ; get #lists this mapping is on (1<=n<=27)
1087 cmplwi r27,mpBasicLists ; Into bigger mapping?
1088 li r27,mpBasicLists*8-8 ; Assume normal
1089 ble+ mapVer64c ; It is...
1090 li r27,kSkipListMaxLists*8-8 ; initialize list offset for inner loop
1092 ; Inner loop over each list link in this mappingss mpList vector.
1093 ; r24 = address of last valid page in this mapping
1094 ; r27 = offset for next list in inner loop
1095 ; r28 = base of this mappings list links
1098 cmpw cr1,r27,r25 ; higher, lower, or same?
1099 ldx r29,r27,r28 ; get link to next mapping at this level
1101 beq mapVer64d ; link null, which is always OK
1102 bgtl-- cr1,mapVerifyDie ; a mapping has a non-null list higher than its mpLists
1103 ld r30,mpVAddr(r29) ; get next mappings va
1104 rldicr r30,r30,0,51 ; zero low 12 bits
1105 cmpld r30,r24 ; compare next key with ours
1106 blel-- mapVerifyDie ; a next node has key <= to ours
1108 subic. r27,r27,8 ; move on to next list
1109 bne++ mapVer64c ; loop if more to go
1111 ; Next node on current list, or next list if current done, or return if no more lists.
1114 la r28,mpList0(r26) ; get base of this mappings vector
1115 ldx r26,r25,r28 ; get next mapping on this list
1117 mr. r26,r26 ; is there one?
1118 bne++ mapVer64a ; yes, handle
1119 subic. r22,r22,1 ; is there another list?
1120 bgt++ mapVer64 ; loop if so
1122 cmpwi r21,0 ; did we find all the mappings in the pmap?
1123 bnel-- mapVerifyDie ; no
1124 mtlr r31 ; restore return address
1129 ; Handle 32-bit machine.
1132 lwz r24,mpFlags(r20) ; Get number of lists
1133 la r30,pmapSkipLists(r20) ; first, check the pmap list hdrs
1134 andi. r24,r24,mpLists ; Clean the number of lists
1135 bl mapVerUpperWordsAre0 ; are the upper words of each list all 0?
1137 ; Loop over each list, counting mappings in each. We first check whether or not
1138 ; the list is empty. All lists above pmapCurLists should be empty, and no list
1139 ; at or below pmapCurLists should be.
1142 ; r21 = decrementing counter of mappings in this pmap
1143 ; r22 = next list# (1...kSkipListMaxLists)
1144 ; r23 = decrementing counter for infinite loop check
1147 lbz r28,pmapCurLists(r20) ; get #lists in use
1148 slwi r25,r22,3 ; get offset to next skiplist
1149 la r26,pmapSkipLists+4(r20) ; get ptr to base of skiplist vector
1151 lwzx r26,r25,r26 ; get the 1st mapping on this list, or 0
1152 cmpw cr7,r22,r28 ; set cr7_gt if this list is > pmapCurLists ("high")
1153 cmpwi cr6,r26,0 ; set cr6_eq if this list is null ("null")
1154 crxor cr0_eq,cr6_eq,cr7_gt ; cr0_eq <-- (null & !high) | (!null & high)
1155 beql- mapVerifyDie ; die if this list is null when it should not be, etc
1158 ; Loop over each node in the list.
1160 ; r21 = decrementing counter of mappings in this pmap
1161 ; r22 = this list# (1...kSkipListMaxLists)
1162 ; r23 = decrementing counter for infinite loop check
1163 ; r25 = offset to this skiplist (ie, ((r22<<3)-8))
1167 lwz r29,mpFlags(r26) ; get bits for this mapping
1168 andi. r30,r26,mpBasicSize-1 ; test address for alignment
1169 lwz r24,mpVAddr+0(r26) ; get upper word of key
1170 bnel- mapVerifyDie ; mapping address not 64-byte aligned
1171 lwz r28,mpVAddr+4(r26) ; get lower word of key
1172 subic. r23,r23,1 ; check for loops
1173 bltl- mapVerifyDie ; we have visited > (pmapCurLists*pmapResidentCnt) nodes
1174 cmpwi r24,0 ; upper word of key (ie, va) should be 0
1175 bnel- mapVerifyDie ; was not
1176 andi. r27,r29,mpLists ; get #lists this mapping is supposed to be on
1177 cmpw cr1,r27,r22 ; is it supposed to be on this list?
1178 bltl- cr1,mapVerifyDie ; mappings mpLists is too low
1179 cmpwi r27,kSkipListMaxLists ; too big?
1180 bgtl- mapVerifyDie ; mappings mpLists > max
1181 rlwinm r28,r28,0,0,19 ; clear low 12 bits of va
1182 bne+ cr1,mapVer32f ; jump if this is not highest list for this node
1184 ; This is the "highest" (last) list this mapping is on.
1185 ; Do some additional checks (so we only do them once per mapping.)
1186 ; First, make sure upper words of the mpList vector are 0.
1188 subi r21,r21,1 ; count mappings in this pmap
1189 lwz r24,mpFlags(r26) ; Get number of lists
1190 la r30,mpList0(r26) ; point to base of skiplist vector
1191 andi. r24,r24,mpLists ; Clean the number of lists
1192 bl mapVerUpperWordsAre0 ; make sure upper words are all 0 (uses r24 and r27)
1194 ; Then, if a block mapping or nested pmap, compute block end.
1196 andi. r29,r29,mpBlock+mpNest ; is it block mapping or nested pmap?
1198 lhz r27,mpBSize(r26) ; get #pages or #segments
1199 cmpwi r29,mpBlock ; which one is it?
1200 slwi r29,r27,12 ; assume block mapping, units are pages
1201 beq mapVer32b ; guessed correctly
1202 addi r27,r27,1 ; units of nested pmap are (#segs-1)
1203 slwi r29,r27,28 ; convert to #bytes
1204 subi r29,r29,4096 ; get offset to last byte in nested pmap
1206 ; Here with r29 = size of block - 4k, or 0 if mapping is a scalar page.
1209 add r24,r28,r29 ; r24 <- address of last valid page in this mapping
1210 la r28,mpList0+4(r26) ; get base of this mappings vector
1211 lwz r27,mpFlags(r26) ; Get the number of lists
1212 andi. r27,r27,mpLists ; get #lists this mapping is on (1<=n<=27)
1213 cmplwi r27,mpBasicLists ; Into bigger mapping?
1214 li r27,mpBasicLists*8-8 ; Assume normal
1215 ble+ mapVer32c ; It is...
1216 li r27,kSkipListMaxLists*8-8 ; initialize list offset for inner loop
1218 ; Inner loop over each list in this mappings mpList vector.
1219 ; r24 = address of last valid page in this mapping
1220 ; r27 = offset for next list in inner loop
1221 ; r28 = base of this mappings list links
1224 cmpw cr1,r27,r25 ; higher, lower, or same?
1225 lwzx r29,r27,r28 ; get link to next mapping at this level
1227 beq mapVer32d ; link null, which is always OK
1230 bgtl- cr1,mapVerifyDie ; a mapping has a non-null list higher than its mpLists
1231 lwz r30,mpVAddr+4(r29) ; get next mappings va
1232 rlwinm r30,r30,0,0,19 ; zero low 12 bits
1233 cmplw r30,r24 ; compare next key with ours
1234 blel- mapVerifyDie ; a next node has key <= to ours
1236 subic. r27,r27,8 ; move on to next list
1237 bne+ mapVer32c ; loop if more to go
1239 ; Next node on current list, or next list if current done, or return if no more lists.
1242 la r28,mpList0+4(r26) ; get base of this mappings vector again
1243 lwzx r26,r25,r28 ; get next mapping on this list
1245 mr. r26,r26 ; is there one?
1246 bne+ mapVer32a ; yes, handle
1247 subic. r22,r22,1 ; is there another list?
1248 bgt+ mapVer32NextList ; loop if so
1250 cmpwi r21,0 ; did we find all the mappings in the pmap?
1251 bnel- mapVerifyDie ; no
1252 mtlr r31 ; restore return address
1256 ; Subroutine to verify that the upper words of a vector of kSkipListMaxLists
1257 ; doublewords are 0.
1258 ; r30 = ptr to base of vector
1261 mapVerUpperWordsAre0:
1262 cmplwi r24,mpBasicLists ; Do we have more than basic?
1263 li r24,mpBasicLists*8 ; Assume basic
1264 ble++ mapVerUpper1 ; We have the basic size
1265 li r24,kSkipListMaxLists*8 ; Use max size
1268 subic. r24,r24,8 ; get offset to next doubleword
1269 lwzx r27,r24,r30 ; get upper word
1270 cmpwi cr1,r27,0 ; 0 ?
1271 bne- cr1,mapVerifyDie ; die if not, passing callers LR
1272 bgt+ mapVerUpper1 ; loop if more to go
1275 ; bl here if mapSkipListVerify detects an inconsistency.
1279 mtlr r31 ; Restore return
1280 lis r31,hi16(EXT(DebugWork))
1281 ori r31,r31,lo16(EXT(DebugWork))
1282 lwz r0,4(r31) ; Get the explicit entry flag
1283 cmplwi r0,0x4262 ; Should we run anyway?
1284 beqlr-- ; Explicit call, return...
1287 stw r0,0(r31) ; Lock out further calls
1288 BREAKPOINT_TRAP ; hopefully, enter debugger
1293 * Panic (choke, to be exact) because of messed up skip lists. The LR points back
1294 * to the original caller of the skip-list function.
1297 mapSkipListPanic: ; skip-lists are screwed up
1299 ori r0,r0,lo16(Choke)
1300 li r3,failSkipLists ; get choke code