/*
- * Copyright (c) 2002 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2002-2004 Apple Computer, Inc. All rights reserved.
*
* @APPLE_LICENSE_HEADER_START@
*
- * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
+ * The contents of this file constitute Original Code as defined in and
+ * are subject to the Apple Public Source License Version 1.1 (the
+ * "License"). You may not use this file except in compliance with the
+ * License. Please obtain a copy of the License at
+ * http://www.apple.com/publicsource and read it before using this file.
*
- * This file contains Original Code and/or Modifications of Original Code
- * as defined in and that are subject to the Apple Public Source License
- * Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
- *
- * The Original Code and all software distributed under the License are
- * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * This Original Code and all software distributed under the License are
+ * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
- * Please see the License for the specific language governing rights and
- * limitations under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
+ * License for the specific language governing rights and limitations
+ * under the License.
*
* @APPLE_LICENSE_HEADER_END@
*/
; never for the most-common case of finding a scalar mapping. The full searches
; must check _in_ the inner loop, to get the prev ptrs right.
- mr. r9,r9 ; was there a prev ptr?
- li r3,0 ; assume we are going to return null
- ld r4,pmapSkipLists(r6) ; assume prev ptr null... so next is first
- beq-- mapSrch64Exit ; prev ptr was null, search failed
- lwz r0,mpFlags(r9) ; get flag bits from prev mapping
- ld r10,mpVAddr(r9) ; re-fetch base address of prev ptr
- ld r4,mpList0(r9) ; get 64-bit ptr to next mapping, if any
- andi. r0,r0,mpBlock+mpNest ; block mapping or nested pmap?
- lhz r11,mpBSize(r9) ; get #pages/#segments in block/submap mapping
- rldicr r10,r10,0,51 ; zero low 12 bits of mapping va
- beq mapSrch64Exit ; prev mapping was just a scalar page, search failed
- cmpwi r0,mpBlock ; block mapping or nested pmap?
- sldi r0,r11,12 ; assume block mapping, get size in bytes - 4k
- beq mapSrch64f ; we guessed right, it was a block mapping
- addi r11,r11,1 ; mpBSize is 1 too low
- sldi r11,r11,28 ; in a nested pmap, mpBSize is in units of segments
- subi r0,r11,4096 ; get address of last page in submap
-mapSrch64f:
- add r10,r10,r0 ; r10 <- last page in this mapping
- cmpld r5,r10 ; does this mapping cover our page?
- bgt mapSrch64Exit ; no, search failed
- mr r3,r9 ; yes, we found it
+ mr. r9,r9 ; was there a prev ptr?
+ li r3,0 ; assume we are going to return null
+ ld r4,pmapSkipLists(r6) ; assume prev ptr null... so next is first
+ beq-- mapSrch64Exit ; prev ptr was null, search failed
+ lwz r0,mpFlags(r9) ; get flag bits from prev mapping
+ lhz r11,mpBSize(r9) ; get #pages/#segments in block/submap mapping
+
+ rlwinm r0,r0,mpBSub+1,31,31 ; 0 if 4K bsu or 1 if 32MB bsu
+ ld r10,mpVAddr(r9) ; re-fetch base address of prev ptr
+ ori r0,r0,0x3216 ; OR in 0x00003216 (0x3200 and a base rotate of 22)
+ addi r11,r11,1 ; Convert 0-based to 1-based
+ rlwnm r0,r0,r0,27,31 ; Rotate to get 12 or 25
+ ld r4,mpList0(r9) ; get 64-bit ptr to next mapping, if any
+ sld r11,r11,r0 ; Get the length in bytes
+ rldicr r10,r10,0,51 ; zero low 12 bits of mapping va
+ subi r0,r11,4096 ; get offset last page in mapping
+ add r10,r10,r0 ; r10 <- last page in this mapping
+ cmpld r5,r10 ; does this mapping cover our page?
+ bgt mapSrch64Exit ; no, search failed
+ mr r3,r9 ; yes, we found it
; found the mapping
; r2 = count of nodes visited
; never for the most-common case of finding a scalar mapping. The full searches
; must check _in_ the inner loop, to get the prev ptrs right.
- mr. r9,r9 ; was there a prev ptr?
- li r3,0 ; assume we are going to return null
- lwz r4,pmapSkipLists+4(r6) ; assume prev ptr null... so next is first
- beq- mapSrch32Exit ; prev ptr was null, search failed
- lwz r0,mpFlags(r9) ; get flag bits from prev mapping
- lwz r10,mpVAddr+4(r9) ; re-fetch base address of prev ptr
- andi. r0,r0,mpBlock+mpNest ; block mapping or nested pmap?
- lwz r4,mpList0+4(r9) ; get ptr to next mapping, if any
- beq mapSrch32Exit ; prev mapping was just a scalar page, search failed
- lhz r11,mpBSize(r9) ; get #pages/#segments in block/submap mapping
- cmpwi r0,mpBlock ; block mapping or nested pmap?
- rlwinm r10,r10,0,0,19 ; zero low 12 bits of block mapping va
- slwi r0,r11,12 ; assume block mapping, get size in bytes - 4k
- beq mapSrch32f ; we guessed right, it was a block mapping
- addi r11,r11,1 ; mpBSize is 1 too low
- slwi r11,r11,28 ; in a nested pmap, mpBSize is in units of segments
- subi r0,r11,4096 ; get address of last page in submap
-mapSrch32f:
- add r10,r10,r0 ; r10 <- last page in this mapping
- cmplw r5,r10 ; does this mapping cover our page?
- bgt mapSrch32Exit ; no, search failed
- mr r3,r9 ; yes, we found it
+ mr. r9,r9 ; was there a prev ptr?
+ li r3,0 ; assume we are going to return null
+ lwz r4,pmapSkipLists+4(r6) ; assume prev ptr null... so next is first
+ beq- mapSrch32Exit ; prev ptr was null, search failed
+ lwz r0,mpFlags(r9) ; get flag bits from prev mapping
+ lhz r11,mpBSize(r9) ; get #pages/#segments in block/submap mapping
+ lwz r10,mpVAddr+4(r9) ; re-fetch base address of prev ptr
+
+ rlwinm r0,r0,mpBSub+1,31,31 ; Rotate to get 0 if 4K bsu or 1 if 32MB bsu
+ addi r11,r11,1 ; Convert 0-based to 1-based
+ ori r0,r0,0x3216 ; OR in 0x00003216 (0x3200 and a base rotate of 22)
+ rlwnm r0,r0,r0,27,31 ; Rotate to get 12 or 25
+ lwz r4,mpList0+4(r9) ; get ptr to next mapping, if any
+ slw r11,r11,r0 ; Get length in bytes
+ rlwinm r10,r10,0,0,19 ; zero low 12 bits of block mapping va
+ subi r0,r11,4096 ; get address of last page in submap
+ add r10,r10,r0 ; r10 <- last page in this mapping
+ cmplw r5,r10 ; does this mapping cover our page?
+ bgt mapSrch32Exit ; no, search failed
+ mr r3,r9 ; yes, we found it
; found the mapping
; r2 = count of nodes visited
; r7 = current skip list number * 8
; r8 = ptr to skip list vector of mapping pointed to by r9
; r9 = prev ptr, ie highest mapping that comes before search target (initially the pmap)
- ; r10 = prev mappings va, or 0 if r9==pmap
+ ; r10 = lowest expected next va, 0 at the beginning of the search
; r12 = ptr to the skipListPrev vector in the per-proc
.align 5
mapSrchFull64a: ; loop over each mapping
- ld r4,mpVAddr(r3) ; get va for this mapping (plus flags in low 12 bits)
- addi r2,r2,1 ; count mappings visited
- lwz r0,mpFlags(r3) ; get mapping flag bits
- cmpld cr0,r10,r4 ; make sure VAs come in strictly ascending order
+ addi r2,r2,1 ; count mappings visited
+ lwz r0,mpFlags(r3) ; get mapping flag bits
+ lhz r11,mpBSize(r3) ; get #pages/#segments in block/submap mapping
+ ld r4,mpVAddr(r3) ; get va for this mapping (plus flags in low 12 bits)
+
+ rlwinm r0,r0,mpBSub+1,31,31 ; Rotate to get 0 if 4K bsu or 1 if 32MB bsu
+ addi r11,r11,1 ; Convert 0-based to 1-based
+ ori r0,r0,0x3216 ; OR in 0x00003216 (0x3200 and a base rotate of 22)
+ rlwnm r0,r0,r0,27,31 ; Rotate to get 12 or 25
+ sld r11,r11,r0 ; Get the length in bytes
rldicr r4,r4,0,51 ; zero low 12 bits of mapping va
+ addic. r0,r11,-4096 ; get offset last page in mapping (set cr0_eq if 1 page)
+
+ cmpld cr5,r10,r4 ; make sure VAs come in strictly ascending order
cmpld cr1,r5,r4 ; compare the vas
- bge-- cr0,mapSkipListPanic ; die if keys are out of order
- andi. r0,r0,mpBlock+mpNest ; is it a scalar mapping? (ie, of a single page)
+ bgt-- cr5,mapSkipListPanic ; die if keys are out of order
+
blt cr1,mapSrchFull64d ; key is less, try next list
beq cr1,mapSrchFull64Found ; this is the correct mapping
- bne-- cr0,mapSrchFull64e ; handle block mapping or nested pmap
+ bne-- cr0,mapSrchFull64e ; handle mapping larger than one page
mapSrchFull64b:
la r8,mpList0(r3) ; point to skip list vector in this mapping
mr r9,r3 ; current becomes previous
ldx r3,r7,r8 ; get ptr to next mapping in current list
- mr r10,r4 ; remember prev ptrs VA
+ addi r10,r4,0x1000 ; Get the lowest VA we can get next
mapSrchFull64c:
mr. r3,r3 ; was there another mapping on current list?
bne++ mapSrchFull64a ; was another, so loop
; the end of the block to see if key fits within it.
mapSrchFull64e:
- lhz r11,mpBSize(r3) ; get #pages/#segments in block/submap mapping (if nonscalar)
- cmpwi r0,mpBlock ; distinguish between block mapping and nested pmaps
- sldi r0,r11,12 ; assume block mapping, get size in bytes - 4k
- beq mapSrchFull64f ; we guessed right, it was a block mapping
- addi r11,r11,1 ; mpBSize is 1 too low
- sldi r11,r11,28 ; in a nested pmap, mpBSize is in units of segments
- subi r0,r11,4096 ; get address of last page in submap
-mapSrchFull64f:
add r4,r4,r0 ; r4 <- last page in this mapping
cmpld r5,r4 ; does this mapping cover our page?
bgt mapSrchFull64b ; no, try next mapping (r4 is advanced to end of range)
; r7 = current skip list number * 8
; r8 = ptr to skip list vector of mapping pointed to by r9
; r9 = prev ptr, ie highest mapping that comes before search target (initially the pmap)
- ; r10 = prev mappings va, or 0 if r9==pmap
+ ; r10 = lowest expected next va, 0 at the beginning of the search
; r12 = ptr to the skipListPrev vector in the per-proc
.align 4
mapSrchFull32a: ; loop over each mapping
- lwz r4,mpVAddr+4(r3) ; get va for this mapping (plus flags in low 12 bits)
- addi r2,r2,1 ; count mappings visited
- lwz r0,mpFlags(r3) ; get mapping flag bits
- cmplw cr0,r10,r4 ; make sure VAs come in strictly ascending order
- rlwinm r4,r4,0,0,19 ; zero low 12 bits of mapping va
- cmplw cr1,r5,r4 ; compare the vas
- bge- cr0,mapSkipListPanic ; die if keys are out of order
- andi. r0,r0,mpBlock+mpNest ; is it a scalar mapping? (ie, of a single page)
- blt cr1,mapSrchFull32d ; key is less than this va, try next list
- beq- cr1,mapSrchFull32Found ; this is the correct mapping
- bne- cr0,mapSrchFull32e ; handle block mapping or nested pmap
+ addi r2,r2,1 ; count mappings visited
+ lwz r0,mpFlags(r3) ; get mapping flag bits
+ lhz r11,mpBSize(r3) ; get #pages/#segments in block/submap mapping
+ lwz r4,mpVAddr+4(r3) ; get va for this mapping (plus flags in low 12 bits)
+
+ rlwinm r0,r0,mpBSub+1,31,31 ; Rotate to get 0 if 4K bsu or 1 if 32MB bsu
+ addi r11,r11,1 ; Convert 0-based to 1-based
+ ori r0,r0,0x3216 ; OR in 0x00003216 (0x3200 and a base rotate of 22)
+ rlwnm r0,r0,r0,27,31 ; Rotate to get 12 or 25
+ slw r11,r11,r0 ; Get the length in bytes
+ rlwinm r4,r4,0,0,19 ; zero low 12 bits of mapping va
+ addic. r0,r11,-4096 ; get offset last page in mapping (set cr0_eq if 1 page)
+
+ cmplw cr0,r10,r4 ; make sure VAs come in strictly ascending order
+ cmplw cr1,r5,r4 ; compare the vas
+ bgt- cr0,mapSkipListPanic ; die if keys are out of order
+
+ blt cr1,mapSrchFull32d ; key is less than this va, try next list
+ beq cr1,mapSrchFull32Found ; this is the correct mapping
+ bne- cr0,mapSrchFull32e ; handle mapping larger than one page
mapSrchFull32b:
la r8,mpList0+4(r3) ; point to skip list vector in this mapping
mr r9,r3 ; current becomes previous
lwzx r3,r7,r8 ; get ptr to next mapping in current list
- mr r10,r4 ; remember prev ptrs VA
+ addi r10,r4,0x1000 ; Get the lowest VA we can get next
mapSrchFull32c:
mr. r3,r3 ; next becomes current
bne+ mapSrchFull32a ; was another, so loop
; the end of the block to see if our key fits within it.
mapSrchFull32e:
- lhz r11,mpBSize(r3) ; get #pages/#segments in block/submap mapping (if nonscalar)
- cmpwi r0,mpBlock ; distinguish between block mapping and nested pmaps
- slwi r0,r11,12 ; assume block mapping, get size in bytes - 4k
- beq mapSrchFull32f ; we guessed right, it was a block mapping
- addi r11,r11,1 ; mpBSize is 1 too low
- slwi r11,r11,28 ; in a nested pmap, mpBSize is in units of segments
- subi r0,r11,4096 ; get address of last page in submap
-mapSrchFull32f:
add r4,r4,r0 ; r4 <- last page in this mapping
cmplw r5,r4 ; does this mapping cover our page?
bgt mapSrchFull32b ; no, try next mapping
; Do some additional checks (so we only do them once per mapping.)
; First, if a block mapping or nested pmap, compute block end.
- andi. r29,r29,mpBlock+mpNest ; is it block mapping or nested pmap?
- subi r21,r21,1 ; count mappings in this pmap
- beq++ mapVer64b ; not nested or pmap
- lhz r27,mpBSize(r26) ; get #pages or #segments
- cmpwi r29,mpBlock ; which one is it?
- sldi r29,r27,12 ; assume block mapping, units are (pages-1)
- beq mapVer64b ; guessed correctly
- addi r27,r27,1 ; units of nested pmap are (#segs-1)
- sldi r29,r27,28 ; convert to #bytes
- subi r29,r29,4096 ; get offset to last byte in nested pmap
+ lhz r27,mpBSize(r26) ; get #pages or #segments
+ rlwinm r29,r29,mpBSub+1,31,31 ; Rotate to get 0 if 4K bsu or 1 if 32MB bsu
+ addi r27,r27,1 ; units of nested pmap are (#segs-1)
+ ori r29,r29,0x3216 ; OR in 0x00003216 (0x3200 and a base rotate of 22)
+ rlwnm r29,r29,r29,27,31 ; Rotate to get 12 or 25
+ subi r21,r21,1 ; count mappings in this pmap
+ sld r29,r27,r29 ; Get the length in bytes
+ subi r29,r29,4096 ; get offset to last byte in nested pmap
; Here with r29 = size of block - 4k, or 0 if mapping is a scalar page.
-mapVer64b:
add r24,r28,r29 ; r24 <- address of last valid page in this mapping
la r28,mpList0(r26) ; get base of this mappings vector
lwz r27,mpFlags(r26) ; Get the number of lists
; Do some additional checks (so we only do them once per mapping.)
; First, make sure upper words of the mpList vector are 0.
- subi r21,r21,1 ; count mappings in this pmap
+ lhz r27,mpBSize(r26) ; get #blocks
+ rlwinm r29,r29,mpBSub+1,31,31 ; Rotate to get 0 if 4K bsu or 1 if 32MB bsu
+ addi r27,r27,1 ; units of nested pmap are (#segs-1)
+ ori r29,r29,0x3216 ; OR in 0x00003216 (0x3200 and a base rotate of 22)
+ rlwnm r29,r29,r29,27,31 ; Rotate to get 12 or 25
+ subi r21,r21,1 ; count mappings in this pmap
+ slw r29,r27,r29 ; Get the length in bytes
+ subi r29,r29,4096 ; get offset to last byte in nested pmap
+
lwz r24,mpFlags(r26) ; Get number of lists
la r30,mpList0(r26) ; point to base of skiplist vector
andi. r24,r24,mpLists ; Clean the number of lists
bl mapVerUpperWordsAre0 ; make sure upper words are all 0 (uses r24 and r27)
-
- ; Then, if a block mapping or nested pmap, compute block end.
-
- andi. r29,r29,mpBlock+mpNest ; is it block mapping or nested pmap?
- beq+ mapVer32b ; no
- lhz r27,mpBSize(r26) ; get #pages or #segments
- cmpwi r29,mpBlock ; which one is it?
- slwi r29,r27,12 ; assume block mapping, units are pages
- beq mapVer32b ; guessed correctly
- addi r27,r27,1 ; units of nested pmap are (#segs-1)
- slwi r29,r27,28 ; convert to #bytes
- subi r29,r29,4096 ; get offset to last byte in nested pmap
-
+
; Here with r29 = size of block - 4k, or 0 if mapping is a scalar page.
-mapVer32b:
add r24,r28,r29 ; r24 <- address of last valid page in this mapping
la r28,mpList0+4(r26) ; get base of this mappings vector
lwz r27,mpFlags(r26) ; Get the number of lists