2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
27 #include <ppc/proc_reg.h>
28 #include <mach/ppc/vm_param.h>
30 #include <sys/errno.h>
33 * void pmap_zero_page(vm_offset_t pa)
35 * zero a page of physical memory.
39 /* C debug stub in pmap.c calls this */
40 ENTRY(pmap_zero_page_assembler, TAG_NO_FRAME_USED)
42 ENTRY(pmap_zero_page, TAG_NO_FRAME_USED)
45 mfmsr r6 /* Get the MSR */
46 rlwinm r6,r6,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
47 rlwinm r6,r6,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
48 rlwinm r7, r6, 0, MSR_DR_BIT+1, MSR_DR_BIT-1 /* Turn off DR */
49 rlwinm r7,r7,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Disable interruptions
50 li r4,PPC_PGBYTES-CACHE_LINE_SIZE /* Point to the end of the page */
51 mtmsr r7 /* Set MSR to DR off */
52 isync /* Ensure data translations are off */
56 subic. r5,r4,CACHE_LINE_SIZE /* Point to the next one */
57 dcbz r4, r3 /* Clear the whole thing to 0s */
58 subi r4,r5,CACHE_LINE_SIZE /* Point to the next one */
59 dcbz r5, r3 /* Clear the next to zeros */
60 bgt+ .L_phys_zero_loop /* Keep going until we do the page... */
62 sync /* Make sure they're all done */
63 li r4,PPC_PGBYTES-CACHE_LINE_SIZE /* Point to the end of the page */
66 subic. r5,r4,CACHE_LINE_SIZE /* Point to the next one */
67 icbi r4, r3 /* Clear the whole thing to 0s */
68 subi r4,r5,CACHE_LINE_SIZE /* Point to the next one */
69 icbi r5, r3 /* Clear the next to zeros */
70 bgt+ .L_inst_inval_loop /* Keep going until we do the page... */
72 sync /* Make sure they're all done */
74 mtmsr r6 /* Restore original translations */
75 isync /* Ensure data translations are on */
80 * phys_copy(src, dst, bytecount)
85 * This routine will copy bytecount bytes from physical address src to physical
89 ENTRY(phys_copy, TAG_NO_FRAME_USED)
91 /* Switch off data translations */
93 rlwinm r6,r6,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
94 rlwinm r6,r6,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
95 rlwinm r7, r6, 0, MSR_DR_BIT+1, MSR_DR_BIT-1
96 rlwinm r7, r7, 0, MSR_EE_BIT+1, MSR_EE_BIT-1
98 isync /* Ensure data translations are off */
104 ble- .L_phys_copy_bytes
112 bgt+ .L_phys_copy_loop
114 /* If no leftover bytes, we're done now */
116 beq+ .L_phys_copy_done
121 .L_phys_copy_byte_loop:
128 bne+ .L_phys_copy_byte_loop
131 mtmsr r6 /* Restore original translations */
132 isync /* Ensure data translations are off */
137 * pmap_copy_page(src, dst)
141 * This routine will copy the physical page src to physical page dst
143 * This routine assumes that the src and dst are page aligned and that the
144 * destination is cached.
146 * We also must assume that noone will be executing within the destination
147 * page. We also assume that this will be used for paging
152 /* if debug, we have a little piece of C around this
153 * in pmap.c that gives some trace ability
155 ENTRY(pmap_copy_page_assembler, TAG_NO_FRAME_USED)
157 ENTRY(pmap_copy_page, TAG_NO_FRAME_USED)
161 mfpvr r9 ; Get the PVR
162 rlwinm r9,r9,16,16,31 ; Isolate the PPC processor
163 cmplwi r9,PROCESSOR_VERSION_Max ; Do we have Altivec?
164 beq+ wegotaltivec ; Yeah...
167 mfmsr r9 ; Get the MSR
168 rlwinm r9,r9,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off
169 rlwinm r9,r9,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off
170 stwu r1,-(FM_SIZE+32)(r1) ; Make a frame for us
171 rlwinm r7,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Disable interruptions
172 ori r7,r7,lo16(MASK(MSR_FP)) ; Turn on the FPU
173 mtmsr r7 ; Disable rupts and enable FPU
176 stfd f0,FM_SIZE+0(r1) ; Save an FP register
177 rlwinm r7,r7,0,MSR_DR_BIT+1,MSR_DR_BIT-1 ; Clear the DDAT bit
178 stfd f1,FM_SIZE+8(r1) ; Save an FP register
179 addi r6,r3,PPC_PGBYTES ; Point to the start of the next page
180 stfd f2,FM_SIZE+16(r1) ; Save an FP register
181 mr r8,r4 ; Save the destination
182 stfd f3,FM_SIZE+24(r1) ; Save an FP register
184 mtmsr r7 ; Set the new MSR
185 isync ; Ensure data translations are off
187 dcbt br0, r3 /* Start in first input line */
188 li r5, CACHE_LINE_SIZE /* Get the line size */
190 .L_pmap_copy_page_loop:
191 dcbz 0, r4 /* Allocate a line for the output */
192 lfd f0, 0(r3) /* Get first 8 */
193 lfd f1, 8(r3) /* Get second 8 */
194 lfd f2, 16(r3) /* Get third 8 */
195 stfd f0, 0(r4) /* Put first 8 */
196 dcbt r5, r3 /* Start next line coming in */
197 lfd f3, 24(r3) /* Get fourth 8 */
198 stfd f1, 8(r4) /* Put second 8 */
199 addi r3,r3,CACHE_LINE_SIZE /* Point to the next line in */
200 stfd f2, 16(r4) /* Put third 8 */
201 cmplw cr0,r3,r6 /* See if we're finished yet */
202 stfd f3, 24(r4) /* Put fourth 8 */
203 dcbst br0,r4 /* Force it out */
204 addi r4,r4,CACHE_LINE_SIZE /* Point to the next line out */
205 blt+ .L_pmap_copy_page_loop /* Copy the whole page */
207 sync /* Make sure they're all done */
208 li r4,PPC_PGBYTES-CACHE_LINE_SIZE /* Point to the end of the page */
211 subic. r5,r4,CACHE_LINE_SIZE /* Point to the next one */
212 icbi r4, r8 /* Trash the i-cache */
213 subi r4,r5,CACHE_LINE_SIZE /* Point to the next one */
214 icbi r5, r8 /* Trash the i-cache */
215 bgt+ invalinst /* Keep going until we do the page... */
217 rlwimi r7,r9,0,MSR_DR_BIT,MSR_DR_BIT ; Set DDAT if on
218 sync ; Make sure all invalidates done
220 mtmsr r7 ; Set DDAT correctly
223 lfd f0,FM_SIZE+0(r1) ; Restore an FP register
224 lfd f1,FM_SIZE+8(r1) ; Restore an FP register
225 lfd f2,FM_SIZE+16(r1) ; Restore an FP register
226 lfd f3,FM_SIZE+24(r1) ; Restore an FP register
228 lwz r1,0(r1) ; Pop up the stack
230 mtmsr r9 ; Turn off FPU now and maybe rupts back on
236 ; This is not very optimal. We just do it here for a test of
237 ; Altivec in the kernel.
240 mfmsr r9 ; Get the MSR
241 lis r8,hi16(0xC0000000) ; Make sure we keep the first 2 vector registers
242 rlwinm r7,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Disable interruptions
243 lis r6,lo16(2*256+128) ; Specify 128 blocks of 2 vectors each
244 rlwinm r7,r7,0,MSR_DR_BIT+1,MSR_DR_BIT-1 ; Clear the DDAT bit
245 ori r6,r6,32 ; Set a 32-byte stride
246 mtsprg 256,r8 ; Set VRSave
247 mtmsr r7 ; Disable rupts and turn xlate off
250 addi r11,r3,4096 ; Point to the next page
251 li r10,16 ; Get vector size
253 avmovepg: lvxl v0,br0,r3 ; Get first half of line
254 dcba br0,r4 ; Allocate output
255 lvxl v1,r10,r3 ; Get second half of line
256 stvxl v0,br0,r4 ; Save first half of line
257 addi r3,r3,32 ; Point to the next line
258 icbi br0,r4 ; Make the icache go away also
259 stvxl v1,r10,r4 ; Save second half of line
260 cmplw r3,r11 ; Have we reached the next page?
261 dcbst br0,r4 ; Make sure the line is on its way out
262 addi r4,r4,32 ; Point to the next line
263 blt+ avmovepg ; Move the next line...
266 sync ; Make sure all the memory stuff is done
267 mtsprg 256,r8 ; Show we are not using VRs any more
268 mtmsr r9 ; Translation and interruptions back on
278 * copyin(src, dst, count)
285 ENTRY2(copyin, copyinmsg, TAG_NO_FRAME_USED)
287 /* Preamble allowing us to call a sub-function */
289 stw r0,FM_LR_SAVE(r1)
290 stwu r1,-(FM_SIZE+16)(r1)
293 ble- cr0,.L_copyinout_trivial
295 /* we know we have a valid copyin to do now */
296 /* Set up thread_recover in case we hit an illegal address */
298 mfsprg r8,1 /* Get the current act */
299 lwz r10,ACT_THREAD(r8)
300 lis r11,hi16(.L_copyinout_error)
302 ori r11,r11,lo16(.L_copyinout_error)
303 add r9,r3,r5 /* Get the end of the source */
304 lwz r8,VMMAP_PMAP(r8) ; Get the pmap
305 rlwinm r12,r3,6,26,29 ; Get index to the segment slot
306 subi r9,r9,1 /* Make sure we don't go too far */
307 add r8,r8,r12 ; Start indexing to the segment value
308 stw r11,THREAD_RECOVER(r10)
309 xor r9,r9,r3 /* Smoosh 'em together */
310 lwz r8,PMAP_SEGS(r8) ; Get the source SR value
311 rlwinm. r9,r9,0,1,3 /* Top nybble equal? */
312 mtsr SR_COPYIN,r8 ; Set the SR
315 lis r0,HIGH_ADDR(EXT(dbgRegsCall)) /* (TEST/DEBUG) */
316 ori r0,r0,LOW_ADDR(EXT(dbgRegsCall)) /* (TEST/DEBUG) */
317 sc /* (TEST/DEBUG) */
320 /* For optimization, we check if the copyin lies on a segment
321 * boundary. If it doesn't, we can use a simple copy. If it
322 * does, we split it into two separate copies in some C code.
325 bne- .L_call_copyin_multiple /* Nope, we went past the segment boundary... */
328 oris r3,r3,(SR_COPYIN_NUM << (28-16)) /* Set the copyin segment as the source */
332 /* Now that copyin is done, we don't need a recovery point */
334 addi r1,r1,FM_SIZE+16
335 mfsprg r6,1 /* Get the current act */
336 lwz r10,ACT_THREAD(r6)
338 lwz r0,FM_LR_SAVE(r1)
339 stw r3,THREAD_RECOVER(r10) /* Clear recovery */
343 /* we get here via the exception handler if an illegal
344 * user memory reference was made.
348 /* Now that copyin is done, we don't need a recovery point */
350 mfsprg r6,1 /* Get the current act */
351 addi r1,r1,FM_SIZE+16
352 lwz r10,ACT_THREAD(r6)
354 lwz r0,FM_LR_SAVE(r1)
355 stw r4,THREAD_RECOVER(r10) /* Clear recovery */
357 li r3,EFAULT ; Indicate error (EFAULT)
360 .L_copyinout_trivial:
361 /* The copyin/out was for either 0 bytes or a negative
362 * number of bytes, return an appropriate value (0 == SUCCESS).
363 * cr0 still contains result of comparison of len with 0.
366 beq+ cr0, .L_copyinout_negative
368 .L_copyinout_negative:
370 /* unwind the stack */
371 addi r1, r1, FM_SIZE+16
372 lwz r0, FM_LR_SAVE(r1)
377 .L_call_copyin_multiple:
379 /* unwind the stack */
380 addi r1, r1, FM_SIZE+16
381 lwz r0, FM_LR_SAVE(r1)
384 b EXT(copyin_multiple) /* not a call - a jump! */
388 * copyout(src, dst, count)
395 ENTRY2(copyout, copyoutmsg, TAG_NO_FRAME_USED)
397 /* Preamble allowing us to call a sub-function */
400 stw r0,FM_LR_SAVE(r1)
401 stwu r1,-(FM_SIZE+16)(r1)
404 stw r3,FM_SIZE+0(r1) /* (TEST/DEBUG) */
405 stw r4,FM_SIZE+4(r1) /* (TEST/DEBUG) */
406 stw r5,FM_SIZE+8(r1) /* (TEST/DEBUG) */
407 mr r6,r0 /* (TEST/DEBUG) */
409 bl EXT(tracecopyout) /* (TEST/DEBUG) */
411 lwz r3,FM_SIZE+0(r1) /* (TEST/DEBUG) */
412 lwz r4,FM_SIZE+4(r1) /* (TEST/DEBUG) */
413 lwz r5,FM_SIZE+8(r1) /* (TEST/DEBUG) */
417 ble- cr0,.L_copyinout_trivial
418 /* we know we have a valid copyout to do now */
419 /* Set up thread_recover in case we hit an illegal address */
422 mfsprg r8,1 /* Get the current act */
423 lwz r10,ACT_THREAD(r8)
424 lis r11,HIGH_ADDR(.L_copyinout_error)
426 rlwinm r12,r4,6,26,29 ; Get index to the segment slot
427 ori r11,r11,LOW_ADDR(.L_copyinout_error)
428 add r9,r4,r5 /* Get the end of the destination */
429 lwz r8,VMMAP_PMAP(r8)
430 subi r9,r9,1 /* Make sure we don't go too far */
431 add r8,r8,r12 ; Start indexing to the segment value
432 stw r11,THREAD_RECOVER(r10)
433 xor r9,r9,r4 /* Smoosh 'em together */
434 lwz r8,PMAP_SEGS(r8) ; Get the source SR value
435 rlwinm. r9,r9,0,1,3 /* Top nybble equal? */
440 /* For optimisation, we check if the copyout lies on a segment
441 * boundary. If it doesn't, we can use a simple copy. If it
442 * does, we split it into two separate copies in some C code.
445 bne- .L_call_copyout_multiple /* Nope, we went past the segment boundary... */
448 oris r4,r4,(SR_COPYIN_NUM << (28-16)) /* Set the copyin segment as the source */
452 /* Now that copyout is done, we don't need a recovery point */
453 mfsprg r6,1 /* Get the current act */
454 addi r1,r1,FM_SIZE+16
455 lwz r10,ACT_THREAD(r6)
457 lwz r0,FM_LR_SAVE(r1)
458 stw r3,THREAD_RECOVER(r10) /* Clear recovery */
462 .L_call_copyout_multiple:
463 /* unwind the stack */
464 addi r1, r1, FM_SIZE+16
465 lwz r0, FM_LR_SAVE(r1)
468 b EXT(copyout_multiple) /* not a call - a jump! */
472 * copyinstr(src, dst, count, maxcount)
475 * vm_size_t maxcount;
478 * Set *count to the number of bytes copied
480 * If dst == NULL, don't copy, just count bytes.
481 * Only currently called from klcopyinstr.
484 ENTRY(copyinstr, TAG_NO_FRAME_USED)
486 /* Preamble allowing us to call a sub-function */
488 stw r0,FM_LR_SAVE(r1)
489 stwu r1,-(FM_SIZE+16)(r1)
492 stw r3,FM_SIZE+0(r1) /* (TEST/DEBUG) */
493 stw r4,FM_SIZE+4(r1) /* (TEST/DEBUG) */
494 stw r5,FM_SIZE+8(r1) /* (TEST/DEBUG) */
495 stw r6,FM_SIZE+12(r1) /* (TEST/DEBUG) */
496 mr r7,r0 /* (TEST/DEBUG) */
498 bl EXT(tracecopystr) /* (TEST/DEBUG) */
500 lwz r3,FM_SIZE+0(r1) /* (TEST/DEBUG) */
501 lwz r4,FM_SIZE+4(r1) /* (TEST/DEBUG) */
502 lwz r5,FM_SIZE+8(r1) /* (TEST/DEBUG) */
503 stw r6,FM_SIZE+12(r1) /* (TEST/DEBUG) */
507 ble- cr0,.L_copyinout_trivial
509 /* we know we have a valid copyin to do now */
510 /* Set up thread_recover in case we hit an illegal address */
513 mfsprg r8,1 /* Get the current act */
514 lwz r10,ACT_THREAD(r8)
515 stw r0,0(r6) /* Clear result length */
516 lis r11,HIGH_ADDR(.L_copyinout_error)
517 lwz r8,ACT_VMMAP(r8) ; Get the map for this activation
518 rlwinm r12,r3,6,26,29 ; Get index to the segment slot
519 lwz r8,VMMAP_PMAP(r8)
520 ori r11,r11,LOW_ADDR(.L_copyinout_error)
521 add r8,r8,r12 ; Start indexing to the segment value
522 stw r11,THREAD_RECOVER(r10)
524 lwz r7,PMAP_SEGS(r8) ; Get the source SR value
525 oris r3,r3,(SR_COPYIN_NUM << (28-16)) /* Set the copyin segment as the source */
527 /* Copy byte by byte for now - TODO NMGS speed this up with
528 * some clever (but fairly standard) logic for word copies.
529 * We don't use a copyinstr_multiple since copyinstr is called
530 * with INT_MAX in the linux server. Eugh.
533 li r9,0 /* Clear byte counter */
535 /* If the destination is NULL, don't do writes,
536 * just count bytes. We set CR7 outside the loop to save time
538 cmpwi cr7,r4,0 /* Is the destination null? */
540 nxtseg: mtsr SR_COPYIN,r7 /* Set the source SR */
544 lbz r0,0(r3) /* Get the source */
545 addic. r5,r5,-1 /* Have we gone far enough? */
546 addi r3,r3,1 /* Bump source pointer */
548 cmpwi cr1,r0,0 /* Did we hit a null? */
550 beq cr7,.L_copyinstr_no_store /* If we are just counting, skip the store... */
552 stb r0,0(r4) /* Move to sink */
553 addi r4,r4,1 /* Advance sink pointer */
555 .L_copyinstr_no_store:
557 addi r9,r9,1 /* Count the character */
558 beq- cr1,.L_copyinstr_done /* We're done if we did a null... */
559 beq- cr0,L_copyinstr_toobig /* Also if we maxed the count... */
561 /* Check to see if the copyin pointer has moved out of the
562 * copyin segment, if it has we must remap.
565 rlwinm. r0,r3,0,4,31 /* Did we wrap around to 0? */
566 bne+ cr0,.L_copyinstr_loop /* Nope... */
568 lwz r7,PMAP_SEGS+4(r8) ; Get the next source SR value
569 addi r8,r8,4 ; Point to the next segment
570 oris r3,r0,(SR_COPYIN_NUM << (28-16)) /* Reset the segment number */
571 b nxtseg /* Keep going... */
577 li r3,0 /* Normal return */
579 li r4,0 /* to clear thread_recover */
580 stw r9,0(r6) /* Set how many bytes we did */
581 stw r4,THREAD_RECOVER(r10) /* Clear recovery exit */
583 addi r1, r1, FM_SIZE+16
584 lwz r0, FM_LR_SAVE(r1)