2 * Copyright (c) 2002-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
33 * These are the subroutines that manage the skip-list data structures used for the
34 * resident mappings for each pmap. We used to use a much simpler hash-based scheme,
35 * but it didn't scale well for 64-bit address spaces and multi-GB real memories.
36 * Here's a brief tutorial on skip-lists:
38 * The basic idea is that each mapping is on one or more singly-linked lists, sorted
39 * in increasing order by virtual address. The number of lists a mapping is on is an
40 * invariant property determined when the mapping is created, using an exponentially-
41 * distributed random number. Every mapping is on the first list. Ideally, each
42 * successive list has only 1/F as many nodes on it as the previous, where F is the
43 * "fanout." With a max of n lists, up to F**n nodes can be handled optimally.
45 * Searching, adding, and deleting from a skip-list can all be done in O(ln(n)) time.
46 * Because the first skip-list is just a sorted list of all mappings, it is also
47 * efficient to purge a sparsely populated pmap of all the mappings in a large range,
48 * for example when tearing down an address space. Large-range deletes are the
49 * primary advantage of skip-lists over a hash, btw.
51 * We currently use a fanout of 4 and a maximum of 12 lists (cf kSkipListFanoutShift
52 * and kSkipListMaxLists.) Thus, we can optimally handle pmaps with as many as 4**12
53 * pages, which is 64GB of resident physical memory per pmap. Pmaps can be larger than
54 * this, albeit with diminishing efficiency.
56 * The major problem with skip-lists is that we could waste a lot of space with 12
57 * 64-bit link fields in every mapping. So we currently have two sizes of mappings:
58 * 64-byte nodes with 4 list links, and 128-byte nodes with 12. Only one in every
59 * (4**4)==256 mappings requires the larger node, so the average size is 64.25 bytes.
60 * In practice, the additional complexity of the variable node size is entirely
61 * contained in the allocate and free routines.
63 * The other, mostly theoretic problem with skip-lists is that they have worst cases
64 * where performance becomes nearly linear. These worst-cases are quite rare but there
65 * is no practical way to prevent them.
69 ; set nonzero to accumulate skip-list stats on a per-map basis:
70 #define SKIPLISTSTATS 1
72 ; cr7 bit set when mapSearchFull() finds a match on a high list:
78 #include <ppc/proc_reg.h>
79 #include <ppc/exception.h>
83 * *********************
84 * * m a p S e a r c h *
85 * *********************
87 * Given a pmap and a virtual address (VA), find the mapping for that address.
88 * This is the fast call, that does not set up the previous-ptr vector or make
89 * consistency checks. When called:
90 * the pmap is locked (shared or exclusive)
91 * translation is off, interrupts masked
92 * 64-bit mode is enabled (if on a 64-bit machine)
93 * cr6 is loaded with the corresponding feature flags (in particular, pf64Bit)
95 * r4 = high 32 bits of key to search for (0 if a 32-bit processor)
96 * r5 = low 32 bits of key (low 12 bits may be nonzero garbage)
97 * r7 = mpFlags field if found. Undefined if not
99 * We return the mapping ptr (or 0) in r3, and the next VA (or 0 if no more) in r4 and r5.
100 * Except for cr6 (which is global), we trash nonvolatile regs. Called both on 32- and 64-bit
101 * machines, though we quickly branch into parallel code paths.
105 .globl EXT(mapSearch)
107 lbz r7,pmapCurLists(r3) ; get largest #lists any mapping is on
108 la r8,pmapSkipLists+4(r3) ; point to lists in pmap, assuming 32-bit machine
109 rlwinm r5,r5,0,0,19 ; zero low 12 bits of key
110 mr r6,r3 ; save pmap ptr here so we can accumulate statistics
111 li r9,0 ; initialize prev ptr
112 addic. r7,r7,-1 ; get base-0 number of last list, and test for 0
113 li r2,0 ; initialize count of mappings visited
114 slwi r7,r7,3 ; get offset of last list in use
115 blt-- mapSrchPmapEmpty ; pmapCurLists==0 (ie, no mappings)
116 lwzx r3,r8,r7 ; get 32-bit ptr to 1st mapping in highest list
117 bf-- pf64Bitb,mapSrch32c ; skip if 32-bit processor
118 subi r8,r8,4 ; we use all 64 bits of ptrs
119 rldimi r5,r4,32,0 ; r5 <- 64-bit va
120 ldx r3,r8,r7 ; get 64-bit ptr to 1st mapping in highest list
121 b mapSrch64c ; enter 64-bit search loop
124 ; 64-bit processors. Check next mapping.
125 ; r2 = count of mappings visited so far
126 ; r3 = current mapping ptr
127 ; r4 = va of current mapping (ie, of r3)
128 ; r5 = va to search for (the "key") (low 12 bits are 0)
130 ; r7 = current skip list number * 8
131 ; r8 = ptr to skip list vector of mapping pointed to by r9 (or pmap, if r9==0)
132 ; r9 = prev ptr, or 0 if none
135 mapSrch64a: ; loop over each mapping
136 ld r4,mpVAddr(r3) ; get va for this mapping (plus flags in low 12 bits)
137 addi r2,r2,1 ; count mappings visited
138 rldicr r4,r4,0,51 ; zero low 12 bits of mapping va
139 cmpld cr1,r5,r4 ; compare the vas
140 blt cr1,mapSrch64d ; key is less, try next list
141 la r8,mpList0(r3) ; point to skip list vector in this mapping
142 mr r9,r3 ; remember prev ptr
143 beq-- cr1,mapSrch64Found ; this is the correct mapping
144 ldx r3,r7,r8 ; get ptr to next mapping in current list
146 mr. r3,r3 ; was there another mapping on current list?
147 bne++ mapSrch64a ; was another, so loop
149 subic. r7,r7,8 ; move on to next list offset
150 ldx r3,r7,r8 ; get next mapping on next list (if any)
151 bge++ mapSrch64c ; loop to try next list
153 ; Mapping not found, check to see if prev node was a block mapping or nested pmap.
154 ; If not, or if our address is not covered by the block or nested map, return 0.
155 ; Note the advantage of keeping the check for block mappings (and nested pmaps)
156 ; out of the inner loop; we do the special case work at most once per search, and
157 ; never for the most-common case of finding a scalar mapping. The full searches
158 ; must check _in_ the inner loop, to get the prev ptrs right.
160 mr. r9,r9 ; was there a prev ptr?
161 li r3,0 ; assume we are going to return null
162 ld r4,pmapSkipLists(r6) ; assume prev ptr null... so next is first
163 beq-- mapSrch64Exit ; prev ptr was null, search failed
164 lwz r0,mpFlags(r9) ; get flag bits from prev mapping
165 lhz r11,mpBSize(r9) ; get #pages/#segments in block/submap mapping
167 rlwinm r0,r0,mpBSub+1,31,31 ; 0 if 4K bsu or 1 if 32MB bsu
168 ld r10,mpVAddr(r9) ; re-fetch base address of prev ptr
169 ori r0,r0,0x3216 ; OR in 0x00003216 (0x3200 and a base rotate of 22)
170 addi r11,r11,1 ; Convert 0-based to 1-based
171 rlwnm r0,r0,r0,27,31 ; Rotate to get 12 or 25
172 ld r4,mpList0(r9) ; get 64-bit ptr to next mapping, if any
173 sld r11,r11,r0 ; Get the length in bytes
174 rldicr r10,r10,0,51 ; zero low 12 bits of mapping va
175 subi r0,r11,4096 ; get offset last page in mapping
176 add r10,r10,r0 ; r10 <- last page in this mapping
177 cmpld r5,r10 ; does this mapping cover our page?
178 bgt mapSrch64Exit ; no, search failed
179 mr r3,r9 ; yes, we found it
182 ; r2 = count of nodes visited
186 mapSrch64Found: ; WARNING: can drop down to here
187 ld r4,mpList0(r3) ; get ptr to next mapping
188 lwz r7,mpFlags(r3) ; Get the flags for our caller
190 ; r2 = count of nodes visited
191 ; r3 = return value (ie, found mapping or 0)
192 ; r4 = next mapping (or 0 if none)
196 mapSrch64Exit: ; WARNING: can drop down to here
197 mr. r5,r4 ; next ptr null?
199 lwz r10,pmapSearchCnt(r6) ; prepare to accumulate statistics
200 ld r8,pmapSearchVisits(r6)
201 addi r10,r10,1 ; count searches
202 add r8,r8,r2 ; count nodes visited
203 stw r10,pmapSearchCnt(r6)
204 std r8,pmapSearchVisits(r6)
206 beqlr- ; next ptr was null, so return 0 in r4 and r5
207 lwz r5,mpVAddr+4(r4) ; get VA of next node
212 ; 32-bit processors. Check next mapping.
213 ; r2 = count of mappings visited so far
214 ; r3 = current mapping ptr
215 ; r4 = va of current mapping (ie, of r3)
216 ; r5 = va to search for (the "key") (low 12 bits are 0)
218 ; r7 = current skip list number * 8
219 ; r8 = ptr to skip list vector of mapping pointed to by r9 (or pmap, if r9==0)
220 ; r9 = prev ptr, or 0 if none
223 mapSrch32a: ; loop over each mapping
224 lwz r4,mpVAddr+4(r3) ; get va for this mapping (plus flags in low 12 bits)
225 addi r2,r2,1 ; count mappings visited
226 rlwinm r4,r4,0,0,19 ; zero low 12 bits of mapping va
227 cmplw cr1,r5,r4 ; compare the vas
228 blt cr1,mapSrch32d ; key is less, try next list
229 la r8,mpList0+4(r3) ; point to skip list vector in this mapping
230 mr r9,r3 ; remember prev ptr
231 beq- cr1,mapSrch32Found ; this is the correct mapping
232 lwzx r3,r7,r8 ; get ptr to next mapping in current list
234 mr. r3,r3 ; was there another mapping on current list?
235 bne+ mapSrch32a ; was another, so loop
237 subic. r7,r7,8 ; move on to next list offset
238 lwzx r3,r7,r8 ; get next mapping on next list (if any)
239 bge+ mapSrch32c ; loop to try next list
241 ; Mapping not found, check to see if prev node was a block mapping or nested pmap.
242 ; If not, or if our address is not covered by the block or nested map, return 0.
243 ; Note the advantage of keeping the check for block mappings (and nested pmaps)
244 ; out of the inner loop; we do the special case work at most once per search, and
245 ; never for the most-common case of finding a scalar mapping. The full searches
246 ; must check _in_ the inner loop, to get the prev ptrs right.
248 mr. r9,r9 ; was there a prev ptr?
249 li r3,0 ; assume we are going to return null
250 lwz r4,pmapSkipLists+4(r6) ; assume prev ptr null... so next is first
251 beq- mapSrch32Exit ; prev ptr was null, search failed
252 lwz r0,mpFlags(r9) ; get flag bits from prev mapping
253 lhz r11,mpBSize(r9) ; get #pages/#segments in block/submap mapping
254 lwz r10,mpVAddr+4(r9) ; re-fetch base address of prev ptr
256 rlwinm r0,r0,mpBSub+1,31,31 ; Rotate to get 0 if 4K bsu or 1 if 32MB bsu
257 addi r11,r11,1 ; Convert 0-based to 1-based
258 ori r0,r0,0x3216 ; OR in 0x00003216 (0x3200 and a base rotate of 22)
259 rlwnm r0,r0,r0,27,31 ; Rotate to get 12 or 25
260 lwz r4,mpList0+4(r9) ; get ptr to next mapping, if any
261 slw r11,r11,r0 ; Get length in bytes
262 rlwinm r10,r10,0,0,19 ; zero low 12 bits of block mapping va
263 subi r0,r11,4096 ; get address of last page in submap
264 add r10,r10,r0 ; r10 <- last page in this mapping
265 cmplw r5,r10 ; does this mapping cover our page?
266 bgt mapSrch32Exit ; no, search failed
267 mr r3,r9 ; yes, we found it
270 ; r2 = count of nodes visited
274 mapSrch32Found: ; WARNING: can drop down to here
275 lwz r4,mpList0+4(r3) ; get ptr to next mapping
276 lwz r7,mpFlags(r3) ; Get mpFlags for our caller
277 ; r2 = count of nodes visited
278 ; r3 = return value (ie, found mapping or 0)
279 ; r4 = next mapping (or 0 if none)
284 mr. r5,r4 ; next ptr null?
286 lwz r10,pmapSearchCnt(r6) ; prepare to accumulate statistics
287 lwz r8,pmapSearchVisits(r6)
288 lwz r9,pmapSearchVisits+4(r6)
289 addi r10,r10,1 ; count searches
290 addc r9,r9,r2 ; count nodes visited
292 stw r10,pmapSearchCnt(r6)
293 stw r8,pmapSearchVisits(r6)
294 stw r9,pmapSearchVisits+4(r6)
296 beqlr- ; next ptr was null, so return 0 in r4 and r5
297 lwz r5,mpVAddr+4(r4) ; get VA of next node
301 ; Here when the pmap is empty (ie, pmapCurLists==0), both in 32 and 64-bit mode,
302 ; and from both mapSearch and mapSearchFull.
306 li r3,0 ; return null
307 li r4,0 ; return 0 as virtual address of next node
310 lwz r7,pmapSearchCnt(r6) ; prepare to accumulate statistics
311 addi r7,r7,1 ; count searches
312 stw r7,pmapSearchCnt(r6)
318 * *****************************
319 * * m a p S e a r c h F u l l *
320 * *****************************
322 * Given a pmap and a virtual address (VA), find the mapping for that address.
323 * This is the "full" call, that sets up a vector of ptrs to the previous node
324 * (or to the pmap, if there is no previous node) for each list that the mapping
325 * in on. We also make consistency checks on the skip-lists. When called:
326 * the pmap is locked (shared or exclusive)
327 * translation is off, interrupts masked
328 * 64-bit mode is enabled (if on a 64-bit machine)
329 * cr6 is loaded with the corresponding feature flags (in particular, pf64Bit)
331 * r4 = high 32 bits of key to search for (0 if a 32-bit processor)
332 * r5 = low 32 bits of key (low 12 bits may be nonzero garbage)
334 * We return the mapping ptr (or 0) in r3, and the next VA (or 0 if no more) in r4 and r5.
335 * Except for cr6 (which is global), we trash nonvolatile regs. Called both on 32- and 64-bit
336 * machines, though we quickly branch into parallel code paths.
340 .globl EXT(mapSearchFull)
342 lbz r7,pmapCurLists(r3) ; get largest #lists any mapping is on
343 la r8,pmapSkipLists+4(r3) ; point to lists in pmap, assuming 32-bit machine
344 rlwinm r5,r5,0,0,19 ; zero low 12 bits of key
345 mr r6,r3 ; save pmap ptr here so we can accumulate statistics
346 li r2,0 ; initialize count of mappings visited
347 mfsprg r12,0 ; get the per-proc data ptr
348 crclr bFullFound ; we have not found the mapping yet
349 addic. r7,r7,-1 ; get base-0 number of last list, and test for 0
350 subi r9,r8,mpList0+4 ; initialize prev ptr to be a fake mapping
351 slwi r7,r7,3 ; get (offset*8) of last list
352 la r12,skipListPrev+4(r12) ; point to vector of prev ptrs, assuming 32-bit machine
353 blt-- mapSrchPmapEmpty ; pmapCurLists==0 (ie, no mappings)
354 lwzx r3,r8,r7 ; get 32-bit ptr to 1st mapping in highest list
355 li r10,0 ; initialize prev ptrs VA to 0 too
356 bf-- pf64Bitb,mapSrchFull32c ; skip if 32-bit processor
357 subi r8,r8,4 ; we use all 64 bits of ptrs
359 rldimi r5,r4,32,0 ; r5 <- 64-bit va
360 ldx r3,r8,r7 ; get 64-bit ptr to 1st mapping in highest list
361 b mapSrchFull64c ; enter 64-bit search loop
364 ; 64-bit processors. Check next mapping.
365 ; r2 = count of mappings visited so far
366 ; r3 = current mapping ptr
367 ; r4 = va of current mapping (ie, of r3)
368 ; r5 = va to search for (the "key") (low 12 bits are 0)
370 ; r7 = current skip list number * 8
371 ; r8 = ptr to skip list vector of mapping pointed to by r9
372 ; r9 = prev ptr, ie highest mapping that comes before search target (initially the pmap)
373 ; r10 = lowest expected next va, 0 at the beginning of the search
374 ; r12 = ptr to the skipListPrev vector in the per-proc
377 mapSrchFull64a: ; loop over each mapping
378 addi r2,r2,1 ; count mappings visited
379 lwz r0,mpFlags(r3) ; get mapping flag bits
380 lhz r11,mpBSize(r3) ; get #pages/#segments in block/submap mapping
381 ld r4,mpVAddr(r3) ; get va for this mapping (plus flags in low 12 bits)
383 rlwinm r0,r0,mpBSub+1,31,31 ; Rotate to get 0 if 4K bsu or 1 if 32MB bsu
384 addi r11,r11,1 ; Convert 0-based to 1-based
385 ori r0,r0,0x3216 ; OR in 0x00003216 (0x3200 and a base rotate of 22)
386 rlwnm r0,r0,r0,27,31 ; Rotate to get 12 or 25
387 sld r11,r11,r0 ; Get the length in bytes
388 rldicr r4,r4,0,51 ; zero low 12 bits of mapping va
389 addic. r0,r11,-4096 ; get offset last page in mapping (set cr0_eq if 1 page)
391 cmpld cr5,r10,r4 ; make sure VAs come in strictly ascending order
392 cmpld cr1,r5,r4 ; compare the vas
393 bgt-- cr5,mapSkipListPanic ; die if keys are out of order
395 blt cr1,mapSrchFull64d ; key is less, try next list
396 beq cr1,mapSrchFull64Found ; this is the correct mapping
397 bne-- cr0,mapSrchFull64e ; handle mapping larger than one page
399 la r8,mpList0(r3) ; point to skip list vector in this mapping
400 mr r9,r3 ; current becomes previous
401 ldx r3,r7,r8 ; get ptr to next mapping in current list
402 addi r10,r4,0x1000 ; Get the lowest VA we can get next
404 mr. r3,r3 ; was there another mapping on current list?
405 bne++ mapSrchFull64a ; was another, so loop
407 stdx r9,r7,r12 ; save prev ptr in per-proc vector
408 subic. r7,r7,8 ; move on to next list offset
409 ldx r3,r7,r8 ; get next mapping on next list (if any)
410 bge++ mapSrchFull64c ; loop to try next list
412 ; Mapping not found, return 0 and next higher key
414 li r3,0 ; return null
415 bt-- bFullFound,mapSkipListPanic ; panic if it was on earlier list
416 ld r4,mpList0(r9) ; get 64-bit ptr to next mapping, if any
419 ; Block mapping or nested pmap, and key > base. We must compute the va of
420 ; the end of the block to see if key fits within it.
423 add r4,r4,r0 ; r4 <- last page in this mapping
424 cmpld r5,r4 ; does this mapping cover our page?
425 bgt mapSrchFull64b ; no, try next mapping (r4 is advanced to end of range)
429 ; r2 = count of nodes visited
432 ; r7 = current skip list number * 8
433 ; r8 = ptr to prev mappings (ie, r9) skip-list vector
434 ; r9 = prev ptr, ie highest mapping that comes before search target
435 ; r10 = prev mappings va
436 ; r12 = ptr to the skipListPrev vector in the per-proc
438 mapSrchFull64Found: ; WARNING: can drop down to here
439 cmpwi r7,0 ; are we in the last skip-list?
440 crset bFullFound ; remember that we found the mapping
441 bne mapSrchFull64d ; mapSearchFull must search all lists to get prev ptrs
442 ld r4,mpList0(r3) ; get ptr to next mapping
443 stdx r9,r7,r12 ; save prev ptr in last list
444 lwz r7,mpFlags(r3) ; Get the flags for our caller
448 ; 32-bit processors. Check next mapping.
449 ; r2 = count of nodes visited
450 ; r3 = ptr to next mapping in current list
451 ; r5 = va to search for (the "key") (low 12 bits are 0)
453 ; r7 = current skip list number * 8
454 ; r8 = ptr to skip list vector of mapping pointed to by r9
455 ; r9 = prev ptr, ie highest mapping that comes before search target (initially the pmap)
456 ; r10 = lowest expected next va, 0 at the beginning of the search
457 ; r12 = ptr to the skipListPrev vector in the per-proc
460 mapSrchFull32a: ; loop over each mapping
461 addi r2,r2,1 ; count mappings visited
462 lwz r0,mpFlags(r3) ; get mapping flag bits
463 lhz r11,mpBSize(r3) ; get #pages/#segments in block/submap mapping
464 lwz r4,mpVAddr+4(r3) ; get va for this mapping (plus flags in low 12 bits)
466 rlwinm r0,r0,mpBSub+1,31,31 ; Rotate to get 0 if 4K bsu or 1 if 32MB bsu
467 addi r11,r11,1 ; Convert 0-based to 1-based
468 ori r0,r0,0x3216 ; OR in 0x00003216 (0x3200 and a base rotate of 22)
469 rlwnm r0,r0,r0,27,31 ; Rotate to get 12 or 25
470 slw r11,r11,r0 ; Get the length in bytes
471 rlwinm r4,r4,0,0,19 ; zero low 12 bits of mapping va
472 addic. r0,r11,-4096 ; get offset last page in mapping (set cr0_eq if 1 page)
474 cmplw cr0,r10,r4 ; make sure VAs come in strictly ascending order
475 cmplw cr1,r5,r4 ; compare the vas
476 bgt- cr0,mapSkipListPanic ; die if keys are out of order
478 blt cr1,mapSrchFull32d ; key is less than this va, try next list
479 beq cr1,mapSrchFull32Found ; this is the correct mapping
480 bne- cr0,mapSrchFull32e ; handle mapping larger than one page
482 la r8,mpList0+4(r3) ; point to skip list vector in this mapping
483 mr r9,r3 ; current becomes previous
484 lwzx r3,r7,r8 ; get ptr to next mapping in current list
485 addi r10,r4,0x1000 ; Get the lowest VA we can get next
487 mr. r3,r3 ; next becomes current
488 bne+ mapSrchFull32a ; was another, so loop
490 stwx r9,r7,r12 ; save prev ptr in per-proc vector
491 subic. r7,r7,8 ; move on to next list offset
492 lwzx r3,r7,r8 ; get next mapping on lower list (if any)
493 bge+ mapSrchFull32c ; loop to try next list
495 ; mapping not found, return 0 and next-key
497 li r3,0 ; return null
498 bt- bFullFound,mapSkipListPanic ; panic if it was on an earlier list
499 lwz r4,mpList0+4(r9) ; get ptr to next mapping
502 ; Block mapping or nested pmap, and key > base. We must compute the va of
503 ; the end of the block to see if our key fits within it.
506 add r4,r4,r0 ; r4 <- last page in this mapping
507 cmplw r5,r4 ; does this mapping cover our page?
508 bgt mapSrchFull32b ; no, try next mapping
512 ; r2 = count of nodes visited
515 ; r7 = current skip list number * 8
516 ; r9 = prev ptr, ie highest mapping that comes before search target, or 0
517 ; r10 = prev mappings va
518 ; r12 = ptr to the skipListPrev vector in the per-proc
520 mapSrchFull32Found: ; WARNING: can drop down to here
521 cmpwi r7,0 ; are we in the last skip-list?
522 crset bFullFound ; remember that we found the mapping
523 bne mapSrchFull32d ; mapSearchFull must search all lists to get prev ptrs
524 lwz r4,mpList0+4(r3) ; get ptr to next mapping
525 stwx r9,r7,r12 ; save prev ptr in last list
526 lwz r7,mpFlags(r3) ; Get mpFlags for our caller
531 * *********************
532 * * m a p I n s e r t *
533 * *********************
535 * Insert a mapping into pmap skip-lists. The caller has already called mapSearchFull to
536 * determine that this mapping does not overlap other mappings in the pmap. As a side effect
537 * of calling mapSearchFull, the per-proc skipListPrev array is set up with a vector of the
538 * previous ptrs for each skip list. When called:
539 * the pmap is locked (exclusive)
540 * translation is off, interrupts masked
541 * 64-bit mode is enabled (if on a 64-bit machine)
542 * mapSearchFull has just been called for this mappings key
543 * cr6 is loaded with the corresponding feature flags (in particular, pf64Bit)
547 * There is no return value. Except for cr6 (which is global), we trash nonvolatile regs.
551 .globl EXT(mapInsert)
553 lwz r8,mpFlags(r4) ; get this mappings flags
554 lbz r7,pmapCurLists(r3) ; get current max# lists any mapping is on
555 la r10,pmapSkipLists+4(r3) ; r10 <-- base of pmap list headers, assuming 32-bit machine
556 la r11,mpList0+4(r4) ; r11 <-- base of this mappings list vector
557 mfsprg r12,0 ; get ptr to our per-proc
558 andi. r9,r8,mpLists ; get #lists this mapping is on (1<=n<=27)
559 la r12,skipListPrev+4(r12) ; r12 <-- base of prev ptr vector
560 sub. r6,r9,r7 ; is this mapping on more lists than any other?
561 slwi r8,r9,3 ; get #lists * 8
562 subi r8,r8,8 ; get offset to topmost (last) list in use
563 bf-- pf64Bitb,mapIns32 ; handle 32-bit processor
564 subi r10,r10,4 ; we use all 8 bytes of the ptr fields
567 ble++ mapIns64a ; not new max #lists
569 ; 64-bit processor: We must increase pmapCurLists. Since mapSearchFull() only
570 ; sets up the first pmapCurLists prev ptrs, we must initialize the new ones to
571 ; point to the pmap. While we are at it, we verify that the unused list hdrs in
574 cmpwi r9,kSkipListMaxLists ; in range?
575 stb r9,pmapCurLists(r3) ; remember new max
576 mtctr r6 ; set up count of new lists
577 mr r5,r8 ; copy offset to last list
578 subi r0,r10,mpList0 ; r0 <-- fake mapping ptr (to pmap) for null prev ptrs
579 bgt-- mapSkipListPanic ; choke if this mapping is on too many lists
581 ldx r6,r5,r10 ; get pmap list head
582 stdx r0,r5,r12 ; initialize prev ptr
583 subi r5,r5,8 ; get next list offset
584 cmpdi r6,0 ; was list hdr null?
585 bdnzt cr0_eq,mapIns64NewList ; loop if more lists to initialize and list hdr was 0
586 bne-- mapSkipListPanic ; die if pmap list hdr was not null
589 ; 64-bit processor: loop over each list this mapping is on
591 ; r8 = next list offset
592 ; r10 = ptr to base of pmap list header vector
593 ; r11 = ptr to base of new mappings list vector
594 ; r12 = ptr to base of prev ptr vector in per-proc
598 ldx r5,r8,r12 ; get prev ptr from per-proc vector
599 cmpwi cr1,r8,0 ; more to go?
600 la r7,mpList0(r5) ; get base of prev mappings list vector
602 stdx r4,r8,r7 ; * insert new mapping in middle of this list
604 subi r8,r8,8 ; get next list offset
605 bne++ cr1,mapIns64a ; more lists to go
608 ; Handle 32-bit processor. First, increase pmapCurLists if necessary; cr0 is bgt
609 ; iff the new mapping has more lists. Since mapSearchFull() only sets up the first
610 ; pmapCurLists prev ptrs, we must initialize any new ones to point to the pmap.
611 ; While we are at it, we verify that the unused list hdrs in the pmap are 0.
614 ble+ mapIns32a ; skip if new mapping does not use extra lists
615 cmpwi r9,kSkipListMaxLists ; in range?
616 stb r9,pmapCurLists(r3) ; remember new max
617 mtctr r6 ; set up count of new lists
618 mr r5,r8 ; copy offset to last list
619 subi r0,r10,mpList0+4 ; r0 <-- fake mapping ptr (to pmap) for null prev ptrs
620 bgt- mapSkipListPanic ; choke if this mapping is on too many lists
622 lwzx r6,r5,r10 ; get pmap list head
623 stwx r0,r5,r12 ; initialize prev ptr
624 subi r5,r5,8 ; get next list offset
625 cmpwi r6,0 ; was list hdr null?
626 bdnzt cr0_eq,mapIns32NewList ; loop if more lists to initialize and list hdr was 0
627 bne- mapSkipListPanic ; die if pmap list hdr was not null
630 ; 32-bit processor: loop over each list this mapping is on
632 ; r8 = next list offset
633 ; r10 = ptr to base of pmap list header vector
634 ; r11 = ptr to base of new mappings list vector
635 ; r12 = ptr to base of prev ptr vector
639 lwzx r5,r8,r12 ; get prev ptr from per-proc vector
640 cmpwi cr1,r8,0 ; more to go?
641 la r7,mpList0+4(r5) ; get base of prev mappings list vector
643 stwx r4,r8,r7 ; * insert new mapping in middle of this list
645 subi r8,r8,8 ; get next list offset
646 bne+ cr1,mapIns32a ; more lists to go
651 * *********************
652 * * m a p R e m o v e *
653 * *********************
655 * Remove a mapping from pmap skip-lists. The caller has already called mapSearchFull to
656 * find the mapping, which sets up the skipListPrev array with a vector of the previous
657 * ptrs for each skip list. When called:
658 * the pmap is locked (exclusive)
659 * translation is off, interrupts masked
660 * 64-bit mode is enabled (if on a 64-bit machine)
661 * mapSearchFull has just been called for this mappings key
662 * cr6 is loaded with the corresponding feature flags (in particular, pf64Bit)
666 * There is no return value. Except for cr6 (which is global), we trash nonvolatile regs.
670 .globl EXT(mapRemove)
672 lwz r8,mpFlags(r4) ; get this mappings flags
673 lbz r10,pmapCurLists(r3) ; get current #lists in use
674 la r11,mpList0+4(r4) ; r11 <-- base of this mappings list vector
675 mfsprg r12,0 ; get ptr to our per-proc
676 andi. r9,r8,mpLists ; get #lists this mapping is on (1<=n<=27)
677 slwi r8,r9,3 ; get #lists * 8
678 cmpw cr5,r9,r10 ; compare mpLists to pmapCurLists
679 la r12,skipListPrev+4(r12) ; r12 <-- base of prev ptr vector
680 bgt-- cr5,mapSkipListPanic ; die if mpLists > pmapCurLists
681 subi r8,r8,8 ; get offset to topmast (last) list this mapping is in
682 bf-- pf64Bitb,mapRem32a ; skip if 32-bit processor
683 subi r11,r11,4 ; we use all 64 bits of list links on 64-bit machines
687 ; 64-bit processor: loop over each list this mapping is on
690 ; r8 = offset to next list
692 ; r11 = ptr to base of mapping list vector
693 ; r12 = ptr to base of prev ptr vector in per-proc
694 ; cr5 = beq if (mpLists == pmapCurLists)
698 ldx r5,r8,r12 ; get prev ptr from per-proc vector
699 ldx r9,r8,r11 ; get next ptr from mapping
700 cmpwi cr1,r8,0 ; more to go?
701 la r7,mpList0(r5) ; get base of prev mappings list vector
702 stdx r9,r8,r7 ; point to next from prev
703 subi r8,r8,8 ; get next list offset
704 bne++ cr1,mapRem64a ; loop if another list to unlink from
706 ; Did we reduce #lists in use by removing last mapping in last list?
708 bnelr++ cr5 ; if (mpLists!=pmapCurLists) cannot have removed last map
709 la r5,pmapSkipLists(r3) ; point to vector of list hdrs
711 subic. r10,r10,1 ; get base-0 list#
712 slwi r8,r10,3 ; get offset to last list
713 ldx r0,r8,r5 ; get last list ptr
714 cmpdi cr1,r0,0 ; null?
715 bnelr cr1 ; not null, so we are done
716 stb r10,pmapCurLists(r3) ; was null, so decrement pmapCurLists
717 bgt mapRem64b ; loop to see if more than one list was emptied
721 ; 32-bit processor: loop over each list this mapping is on
724 ; r8 = offset to next list
726 ; r11 = ptr to base of mapping list vector
727 ; r12 = ptr to base of prev ptr vector in per-proc
728 ; cr5 = beq if (mpLists == pmapCurLists)
732 lwzx r5,r8,r12 ; get prev ptr from per-proc vector
733 lwzx r9,r8,r11 ; get next ptr from mapping
734 cmpwi cr1,r8,0 ; more to go?
735 la r7,mpList0+4(r5) ; get base of prev mappings list vector
736 stwx r9,r8,r7 ; point to next from prev
737 subi r8,r8,8 ; get next list offset
738 bne+ cr1,mapRem32a ; loop if another list to unlink from
740 ; Did we reduce #lists in use by removing last mapping in last list?
742 bnelr+ cr5 ; if (mpLists!=pmapCurLists) cannot have removed last map
743 la r5,pmapSkipLists+4(r3) ; point to vector of list hdrs
745 subic. r10,r10,1 ; get base-0 list#
746 slwi r8,r10,3 ; get offset to last list
747 lwzx r0,r8,r5 ; get last list ptr
748 cmpwi cr1,r0,0 ; null?
749 bnelr cr1 ; not null, so we are done
750 stb r10,pmapCurLists(r3) ; was null, so decrement pmapCurLists
751 bgt mapRem32b ; loop to see if more than one list was emptied
756 * *************************
757 * * m a p S e t L i s t s *
758 * *************************
760 * Called to decide how many skip-lists the next mapping will be on. For each pmap,
761 * we maintain a psuedo-random sequence based on a linear feedback shift register. The
762 * next number is generated by rotating the old value left by 1 and XORing with a
763 * polynomial (actually 4 8-bit polynomials concatanated) and adding 1.
764 * The simple (unclamped) number of lists a mapping is on is the number of trailing 0s
765 * in the pseudo-random sequence, shifted by the (log2-1) of the fanout F, plus one.
766 * This seems to give us a near perfect distribution, in the sense that about F times more nodes
767 * are allocated on n lists, as are on (n+1) lists.
769 * At one point we used a simple counter to assign lists. While this gave perfect
770 * distribution, there were certain access pattern that would drive a worst case
771 * distribution (e.g., insert low, then high, then low, etc.). Unfortunately,
772 * these patterns were not too uncommon. We changed to a less-than-perfect assignment,
773 * but one that works consistently across all known access patterns.
775 * Also, we modify the "simple" trailing-0-based list count, to account for an important
776 * observation: because VM does a lot of removing and restoring of mappings in the process of
777 * doing copy-on-write etc, it is common to have the pmap's "random number" (ie, the
778 * count of created mappings) be much larger than the number of mappings currently in the
779 * pmap. This means the simple list count will often be larger than justified by the number of
780 * mappings in the pmap. To avoid this common situation, we clamp the list count to be no more
781 * than ceil(logBaseF(pmapResidentCnt)).
783 * Finally, we also clamp the list count to kSkipListMaxLists.
785 * We are passed the pmap ptr in r3. Called with translation on, interrupts enabled,
786 * and in 32-bit mode.
789 .globl EXT(mapSetLists)
791 lwz r5,pmapRandNum(r3) ; get the per-pmap counter of mapping creates
792 lwz r4,pmapResidentCnt(r3) ; get number of mappings in this pmap
793 lis r11,hi16(0xA7CBF5B9) ; Get polynomial (I just made this up...)
794 li r0,-1 ; get a mask of 1s
795 ori r11,r11,lo16(0xA7CBF5B9) ; Get polynomial (I just made this up...)
796 rlwinm r5,r5,1,0,31 ; Rotate
797 cntlzw r7,r4 ; get magnitude of pmapResidentCnt
798 xor r5,r5,r11 ; Munge with poly
799 srw r7,r0,r7 ; r7 <- mask for magnitude of pmapResidentCnt
800 addi r6,r5,1 ; increment pmapRandNum non-atomically
801 andc r8,r5,r6 ; get a mask for trailing zeroes in pmapRandNum
802 stw r6,pmapRandNum(r3) ; update "random number"
803 and r8,r8,r7 ; clamp trailing 0s to magnitude of pmapResidentCnt
804 rlwinm r8,r8,0,32-(kSkipListMaxLists*(kSkipListFanoutShift+1))+1,31 ; clamp to kSkipListMaxLists
805 cntlzw r9,r8 ; count leading 0s in the mask
806 subfic r10,r9,32 ; r10 <- trailing zero count
807 srwi r11,r10,kSkipListFanoutShift ; shift by 1 if fanout is 4, 2 if 8, etc
808 addi r3,r11,1 ; every mapping is on at least one list
813 * *************************************
814 * * m a p S k i p L i s t V e r i f y *
815 * *************************************
817 * This does a fairly thorough sweep through a pmaps skip-list data structure, doing
818 * consistency checks. It is typically called (from hw_exceptions.s) from debug or
819 * instrumented builds. It is probably not a good idea to call this in production builds,
820 * as it must run with exceptions disabled and can take a long time to verify a big pmap.
821 * It runs in O(n*ln(n)).
823 * Called on a bl, with the pmap ptr in r20. We assume the pmap is locked (shared) and
824 * that EE and DR are off. We check all 64 bits of ptrs even on 32-bit machines.
825 * We use r20-r31, cr0, cr1, and cr7. If we return, no inconsistencies were found.
827 * You will notice we make little attempt to schedule the code; clarity is deemed more
828 * important than speed.
833 * mapSkipListVerifyC is a version that is callable from C.
834 * This should be called only from the debugger, IT DOES NOT LOCK THE PMAP!!!!
837 .globl EXT(mapSkipListVerifyC)
838 LEXT(mapSkipListVerifyC)
840 stwu r1,-(FM_ALIGN((31-13+1)*4)+FM_SIZE)(r1) ; Make some space on the stack
841 mflr r0 ; Save the link register
842 stmw r13,FM_ARG0(r1) ; Save all registers
843 stw r0,(FM_ALIGN((31-13+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Save the return
845 lwz r15,pmapvr(r3) ; Get the V to R translation
846 lwz r16,pmapvr+4(r3) ; Get the V to R translation
847 mr r19,r4 ; Save register dump area
849 bl EXT(mapSetUp) ; Get set up
852 xor r20,r3,r16 ; Translate 32-bit portion
853 bf-- pf64Bitb,mslvc32a ; Skip if 32-bit...
855 rldimi r20,r15,32,0 ; Shift the fixed upper part of the physical over and cram in top
857 mslvc32a: lis r18,hi16(EXT(DebugWork))
858 ori r18,r18,lo16(EXT(DebugWork))
860 stw r0,4(r18) ; Make sure the test knows to run
862 bl EXT(mapSkipListVerify) ; Run the test
865 stw r0,4(r18) ; Remove explicit call flag
867 bt++ pf64Bitb,mslvc64a ; This is 64-bit...
869 mtmsr r17 ; Restore enables/translation/etc.
938 b mslvcreturn ; Join common...
940 mslvc64a: mtmsrd r17 ; Restore enables/translation/etc.
978 lwz r0,(FM_ALIGN((31-13+1)*4)+FM_SIZE+FM_LR_SAVE)(r1) ; Get the return
979 lmw r13,FM_ARG0(r1) ; Get the registers
980 mtlr r0 ; Restore the return
981 lwz r1,0(r1) ; Pop the stack
985 .globl EXT(mapSkipListVerify)
986 LEXT(mapSkipListVerify)
987 mflr r31 ; save LR so we can bl to mapVerifyDie
989 ; If we have already found an inconsistency and died, don not do so again, to
992 lis r27,hi16(EXT(DebugWork))
993 ori r27,r27,lo16(EXT(DebugWork))
994 lwz r0,4(r27) ; Get the explicit entry flag
995 lwz r27,0(r27) ; Get lockout
996 cmplwi r0,0x4262 ; Should we run anyway?
997 beq-- mslvAnyway ; Yes...
998 cmpwi r27,0 ; have we already found an error?
999 bnelr-- ; yes, just return wo checking again
1002 ; Not recursive call, so initialize.
1004 mfsprg r23,2 ; get the feature flags
1005 mtcrf 0x02,r23 ; put pf64Bit where we can test it
1006 lbz r26,pmapCurLists(r20) ; get #lists that are in use
1007 lwz r21,pmapResidentCnt(r20); get #mappings in this pmap
1008 cmpwi r26,kSkipListMaxLists ; in range?
1009 bgtl-- mapVerifyDie ; pmapCurLists is too big
1011 ; To prevent infinite loops, set limit of (pmapCurLists*pmapResidentCnt) iterations.
1012 ; Since we walk each list this is the max number of mappings we could visit.
1014 li r23,0 ; initialize count
1016 subic. r26,r26,1 ; loop pmapCurLists times (but at least once)
1017 add r23,r23,r21 ; compute (pmapCurLists*pmapResidentCnt)
1018 bgt mapVer0 ; this will be a 64-bit qty on 64-bit machines
1020 li r22,kSkipListMaxLists ; initialize list#
1021 bf-- pf64Bitb,mapVer32 ; go handle a 32-bit processor
1025 ; Loop over each list, counting mappings in each. We first check whether or not
1026 ; the list is empty (ie, if the pmapSlipLists ptr is null.) All lists above
1027 ; pmapCurLists should be empty, and no list at or below pmapCurLists should be.
1029 ; r21 = decrementing counter of mappings in this pmap
1030 ; r22 = next list# (1...kSkipListMaxLists)
1031 ; r23 = decrementing counter for infinite loop check
1034 slwi r25,r22,3 ; get offset to next skiplist
1035 la r26,pmapSkipLists(r20) ; get ptr to base of skiplist vector
1037 ldx r26,r25,r26 ; get 1st mapping on this list, if any
1038 lbz r28,pmapCurLists(r20) ; get #lists in use
1039 cmpdi cr6,r26,0 ; set cr6_eq if this list is null ("null")
1040 cmpw cr7,r22,r28 ; set cr7_gt if this list is > pmapCurLists ("high")
1041 crxor cr0_eq,cr6_eq,cr7_gt ; cr0_eq <-- (null & !high) | (!null & high)
1042 beql-- mapVerifyDie ; die if this list is null when it should not be, etc
1045 ; Loop over each node in the list.
1047 ; r21 = decrementing counter of mappings in this pmap
1048 ; r22 = this list# (1...kSkipListMaxLists)
1049 ; r23 = decrementing counter for infinite loop check
1050 ; r25 = offset to this skiplist (ie, ((r22<<3)-8))
1054 lwz r29,mpFlags(r26) ; get bits for this mapping
1055 ld r28,mpVAddr(r26) ; get key
1056 subic. r23,r23,1 ; check for loops
1057 bltl-- mapVerifyDie ; we have visited > (pmapCurLists*pmapResidentCnt) nodes
1058 andi. r30,r26,mpBasicSize-1 ; test address for alignment
1059 bnel-- mapVerifyDie ; not aligned
1060 andi. r27,r29,mpLists ; get #lists this mapping is supposed to be on
1061 cmpw cr1,r27,r22 ; is it supposed to be on this list?
1062 bltl-- cr1,mapVerifyDie ; mappings mpLists is too low
1063 cmpwi r27,kSkipListMaxLists ; too big?
1064 bgtl-- mapVerifyDie ; mappings mpLists > max
1065 rldicr r28,r28,0,51 ; clear low 12 bits of va
1066 bne++ cr1,mapVer64f ; jump if this is not highest list for this node
1068 ; This is the "highest" (last) list this mapping is on.
1069 ; Do some additional checks (so we only do them once per mapping.)
1070 ; First, if a block mapping or nested pmap, compute block end.
1072 lhz r27,mpBSize(r26) ; get #pages or #segments
1073 rlwinm r29,r29,mpBSub+1,31,31 ; Rotate to get 0 if 4K bsu or 1 if 32MB bsu
1074 addi r27,r27,1 ; units of nested pmap are (#segs-1)
1075 ori r29,r29,0x3216 ; OR in 0x00003216 (0x3200 and a base rotate of 22)
1076 rlwnm r29,r29,r29,27,31 ; Rotate to get 12 or 25
1077 subi r21,r21,1 ; count mappings in this pmap
1078 sld r29,r27,r29 ; Get the length in bytes
1079 subi r29,r29,4096 ; get offset to last byte in nested pmap
1081 ; Here with r29 = size of block - 4k, or 0 if mapping is a scalar page.
1083 add r24,r28,r29 ; r24 <- address of last valid page in this mapping
1084 la r28,mpList0(r26) ; get base of this mappings vector
1085 lwz r27,mpFlags(r26) ; Get the number of lists
1086 andi. r27,r27,mpLists ; get #lists this mapping is on (1<=n<=27)
1087 cmplwi r27,mpBasicLists ; Into bigger mapping?
1088 li r27,mpBasicLists*8-8 ; Assume normal
1089 ble+ mapVer64c ; It is...
1090 li r27,kSkipListMaxLists*8-8 ; initialize list offset for inner loop
1092 ; Inner loop over each list link in this mappingss mpList vector.
1093 ; r24 = address of last valid page in this mapping
1094 ; r27 = offset for next list in inner loop
1095 ; r28 = base of this mappings list links
1098 cmpw cr1,r27,r25 ; higher, lower, or same?
1099 ldx r29,r27,r28 ; get link to next mapping at this level
1101 beq mapVer64d ; link null, which is always OK
1102 bgtl-- cr1,mapVerifyDie ; a mapping has a non-null list higher than its mpLists
1103 ld r30,mpVAddr(r29) ; get next mappings va
1104 rldicr r30,r30,0,51 ; zero low 12 bits
1105 cmpld r30,r24 ; compare next key with ours
1106 blel-- mapVerifyDie ; a next node has key <= to ours
1108 subic. r27,r27,8 ; move on to next list
1109 bne++ mapVer64c ; loop if more to go
1111 ; Next node on current list, or next list if current done, or return if no more lists.
1114 la r28,mpList0(r26) ; get base of this mappings vector
1115 ldx r26,r25,r28 ; get next mapping on this list
1117 mr. r26,r26 ; is there one?
1118 bne++ mapVer64a ; yes, handle
1119 subic. r22,r22,1 ; is there another list?
1120 bgt++ mapVer64 ; loop if so
1122 cmpwi r21,0 ; did we find all the mappings in the pmap?
1123 bnel-- mapVerifyDie ; no
1124 mtlr r31 ; restore return address
1129 ; Handle 32-bit machine.
1132 lwz r24,mpFlags(r20) ; Get number of lists
1133 la r30,pmapSkipLists(r20) ; first, check the pmap list hdrs
1134 andi. r24,r24,mpLists ; Clean the number of lists
1135 bl mapVerUpperWordsAre0 ; are the upper words of each list all 0?
1137 ; Loop over each list, counting mappings in each. We first check whether or not
1138 ; the list is empty. All lists above pmapCurLists should be empty, and no list
1139 ; at or below pmapCurLists should be.
1142 ; r21 = decrementing counter of mappings in this pmap
1143 ; r22 = next list# (1...kSkipListMaxLists)
1144 ; r23 = decrementing counter for infinite loop check
1147 lbz r28,pmapCurLists(r20) ; get #lists in use
1148 slwi r25,r22,3 ; get offset to next skiplist
1149 la r26,pmapSkipLists+4(r20) ; get ptr to base of skiplist vector
1151 lwzx r26,r25,r26 ; get the 1st mapping on this list, or 0
1152 cmpw cr7,r22,r28 ; set cr7_gt if this list is > pmapCurLists ("high")
1153 cmpwi cr6,r26,0 ; set cr6_eq if this list is null ("null")
1154 crxor cr0_eq,cr6_eq,cr7_gt ; cr0_eq <-- (null & !high) | (!null & high)
1155 beql- mapVerifyDie ; die if this list is null when it should not be, etc
1158 ; Loop over each node in the list.
1160 ; r21 = decrementing counter of mappings in this pmap
1161 ; r22 = this list# (1...kSkipListMaxLists)
1162 ; r23 = decrementing counter for infinite loop check
1163 ; r25 = offset to this skiplist (ie, ((r22<<3)-8))
1167 lwz r29,mpFlags(r26) ; get bits for this mapping
1168 andi. r30,r26,mpBasicSize-1 ; test address for alignment
1169 lwz r24,mpVAddr+0(r26) ; get upper word of key
1170 bnel- mapVerifyDie ; mapping address not 64-byte aligned
1171 lwz r28,mpVAddr+4(r26) ; get lower word of key
1172 subic. r23,r23,1 ; check for loops
1173 bltl- mapVerifyDie ; we have visited > (pmapCurLists*pmapResidentCnt) nodes
1174 cmpwi r24,0 ; upper word of key (ie, va) should be 0
1175 bnel- mapVerifyDie ; was not
1176 andi. r27,r29,mpLists ; get #lists this mapping is supposed to be on
1177 cmpw cr1,r27,r22 ; is it supposed to be on this list?
1178 bltl- cr1,mapVerifyDie ; mappings mpLists is too low
1179 cmpwi r27,kSkipListMaxLists ; too big?
1180 bgtl- mapVerifyDie ; mappings mpLists > max
1181 rlwinm r28,r28,0,0,19 ; clear low 12 bits of va
1182 bne+ cr1,mapVer32f ; jump if this is not highest list for this node
1184 ; This is the "highest" (last) list this mapping is on.
1185 ; Do some additional checks (so we only do them once per mapping.)
1186 ; First, make sure upper words of the mpList vector are 0.
1188 lhz r27,mpBSize(r26) ; get #blocks
1189 rlwinm r29,r29,mpBSub+1,31,31 ; Rotate to get 0 if 4K bsu or 1 if 32MB bsu
1190 addi r27,r27,1 ; units of nested pmap are (#segs-1)
1191 ori r29,r29,0x3216 ; OR in 0x00003216 (0x3200 and a base rotate of 22)
1192 rlwnm r29,r29,r29,27,31 ; Rotate to get 12 or 25
1193 subi r21,r21,1 ; count mappings in this pmap
1194 slw r29,r27,r29 ; Get the length in bytes
1195 subi r29,r29,4096 ; get offset to last byte in nested pmap
1197 lwz r24,mpFlags(r26) ; Get number of lists
1198 la r30,mpList0(r26) ; point to base of skiplist vector
1199 andi. r24,r24,mpLists ; Clean the number of lists
1200 bl mapVerUpperWordsAre0 ; make sure upper words are all 0 (uses r24 and r27)
1202 ; Here with r29 = size of block - 4k, or 0 if mapping is a scalar page.
1204 add r24,r28,r29 ; r24 <- address of last valid page in this mapping
1205 la r28,mpList0+4(r26) ; get base of this mappings vector
1206 lwz r27,mpFlags(r26) ; Get the number of lists
1207 andi. r27,r27,mpLists ; get #lists this mapping is on (1<=n<=27)
1208 cmplwi r27,mpBasicLists ; Into bigger mapping?
1209 li r27,mpBasicLists*8-8 ; Assume normal
1210 ble+ mapVer32c ; It is...
1211 li r27,kSkipListMaxLists*8-8 ; initialize list offset for inner loop
1213 ; Inner loop over each list in this mappings mpList vector.
1214 ; r24 = address of last valid page in this mapping
1215 ; r27 = offset for next list in inner loop
1216 ; r28 = base of this mappings list links
1219 cmpw cr1,r27,r25 ; higher, lower, or same?
1220 lwzx r29,r27,r28 ; get link to next mapping at this level
1222 beq mapVer32d ; link null, which is always OK
1225 bgtl- cr1,mapVerifyDie ; a mapping has a non-null list higher than its mpLists
1226 lwz r30,mpVAddr+4(r29) ; get next mappings va
1227 rlwinm r30,r30,0,0,19 ; zero low 12 bits
1228 cmplw r30,r24 ; compare next key with ours
1229 blel- mapVerifyDie ; a next node has key <= to ours
1231 subic. r27,r27,8 ; move on to next list
1232 bne+ mapVer32c ; loop if more to go
1234 ; Next node on current list, or next list if current done, or return if no more lists.
1237 la r28,mpList0+4(r26) ; get base of this mappings vector again
1238 lwzx r26,r25,r28 ; get next mapping on this list
1240 mr. r26,r26 ; is there one?
1241 bne+ mapVer32a ; yes, handle
1242 subic. r22,r22,1 ; is there another list?
1243 bgt+ mapVer32NextList ; loop if so
1245 cmpwi r21,0 ; did we find all the mappings in the pmap?
1246 bnel- mapVerifyDie ; no
1247 mtlr r31 ; restore return address
1251 ; Subroutine to verify that the upper words of a vector of kSkipListMaxLists
1252 ; doublewords are 0.
1253 ; r30 = ptr to base of vector
1256 mapVerUpperWordsAre0:
1257 cmplwi r24,mpBasicLists ; Do we have more than basic?
1258 li r24,mpBasicLists*8 ; Assume basic
1259 ble++ mapVerUpper1 ; We have the basic size
1260 li r24,kSkipListMaxLists*8 ; Use max size
1263 subic. r24,r24,8 ; get offset to next doubleword
1264 lwzx r27,r24,r30 ; get upper word
1265 cmpwi cr1,r27,0 ; 0 ?
1266 bne- cr1,mapVerifyDie ; die if not, passing callers LR
1267 bgt+ mapVerUpper1 ; loop if more to go
1270 ; bl here if mapSkipListVerify detects an inconsistency.
1274 mtlr r31 ; Restore return
1275 lis r31,hi16(EXT(DebugWork))
1276 ori r31,r31,lo16(EXT(DebugWork))
1277 lwz r0,4(r31) ; Get the explicit entry flag
1278 cmplwi r0,0x4262 ; Should we run anyway?
1279 beqlr-- ; Explicit call, return...
1282 stw r0,0(r31) ; Lock out further calls
1283 BREAKPOINT_TRAP ; hopefully, enter debugger
1288 * Panic (choke, to be exact) because of messed up skip lists. The LR points back
1289 * to the original caller of the skip-list function.
1292 mapSkipListPanic: ; skip-lists are screwed up
1294 ori r0,r0,lo16(Choke)
1295 li r3,failSkipLists ; get choke code