2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
27 #include <ppc/proc_reg.h>
28 #include <mach/ppc/vm_param.h>
30 #include <sys/errno.h>
33 * void pmap_zero_page(vm_offset_t pa)
35 * zero a page of physical memory.
39 /* C debug stub in pmap.c calls this */
40 ENTRY(pmap_zero_page_assembler, TAG_NO_FRAME_USED)
42 ENTRY(pmap_zero_page, TAG_NO_FRAME_USED)
45 mfmsr r6 /* Get the MSR */
46 rlwinm r7, r6, 0, MSR_DR_BIT+1, MSR_DR_BIT-1 /* Turn off DR */
47 rlwinm r7,r7,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Disable interruptions
48 li r4,PPC_PGBYTES-CACHE_LINE_SIZE /* Point to the end of the page */
49 mtmsr r7 /* Set MSR to DR off */
50 isync /* Ensure data translations are off */
54 subic. r5,r4,CACHE_LINE_SIZE /* Point to the next one */
55 dcbz r4, r3 /* Clear the whole thing to 0s */
56 subi r4,r5,CACHE_LINE_SIZE /* Point to the next one */
57 dcbz r5, r3 /* Clear the next to zeros */
58 bgt+ .L_phys_zero_loop /* Keep going until we do the page... */
60 sync /* Make sure they're all done */
61 li r4,PPC_PGBYTES-CACHE_LINE_SIZE /* Point to the end of the page */
64 subic. r5,r4,CACHE_LINE_SIZE /* Point to the next one */
65 icbi r4, r3 /* Clear the whole thing to 0s */
66 subi r4,r5,CACHE_LINE_SIZE /* Point to the next one */
67 icbi r5, r3 /* Clear the next to zeros */
68 bgt+ .L_inst_inval_loop /* Keep going until we do the page... */
70 sync /* Make sure they're all done */
72 mtmsr r6 /* Restore original translations */
73 isync /* Ensure data translations are on */
78 * phys_copy(src, dst, bytecount)
83 * This routine will copy bytecount bytes from physical address src to physical
87 ENTRY(phys_copy, TAG_NO_FRAME_USED)
89 /* Switch off data translations */
91 rlwinm r7, r6, 0, MSR_DR_BIT+1, MSR_DR_BIT-1
92 rlwinm r7, r7, 0, MSR_EE_BIT+1, MSR_EE_BIT-1
94 isync /* Ensure data translations are off */
100 ble- .L_phys_copy_bytes
108 bgt+ .L_phys_copy_loop
110 /* If no leftover bytes, we're done now */
112 beq+ .L_phys_copy_done
117 .L_phys_copy_byte_loop:
124 bne+ .L_phys_copy_byte_loop
127 mtmsr r6 /* Restore original translations */
128 isync /* Ensure data translations are off */
133 * pmap_copy_page(src, dst)
137 * This routine will copy the physical page src to physical page dst
139 * This routine assumes that the src and dst are page aligned and that the
140 * destination is cached.
142 * We also must assume that noone will be executing within the destination
143 * page. We also assume that this will be used for paging
148 /* if debug, we have a little piece of C around this
149 * in pmap.c that gives some trace ability
151 ENTRY(pmap_copy_page_assembler, TAG_NO_FRAME_USED)
153 ENTRY(pmap_copy_page, TAG_NO_FRAME_USED)
157 mfpvr r9 ; Get the PVR
158 rlwinm r9,r9,16,16,31 ; Isolate the PPC processor
159 cmplwi r9,PROCESSOR_VERSION_Max ; Do we have Altivec?
160 beq+ wegotaltivec ; Yeah...
163 mfmsr r9 ; Get the MSR
164 stwu r1,-(FM_SIZE+32)(r1) ; Make a frame for us
165 rlwinm r7,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Disable interruptions
166 ori r7,r7,lo16(MASK(MSR_FP)) ; Turn on the FPU
167 mtmsr r7 ; Disable rupts and enable FPU
170 stfd f0,FM_SIZE+0(r1) ; Save an FP register
171 rlwinm r7,r7,0,MSR_DR_BIT+1,MSR_DR_BIT-1 ; Clear the DDAT bit
172 stfd f1,FM_SIZE+8(r1) ; Save an FP register
173 addi r6,r3,PPC_PGBYTES ; Point to the start of the next page
174 stfd f2,FM_SIZE+16(r1) ; Save an FP register
175 mr r8,r4 ; Save the destination
176 stfd f3,FM_SIZE+24(r1) ; Save an FP register
178 mtmsr r7 ; Set the new MSR
179 isync ; Ensure data translations are off
181 dcbt br0, r3 /* Start in first input line */
182 li r5, CACHE_LINE_SIZE /* Get the line size */
184 .L_pmap_copy_page_loop:
185 dcbz 0, r4 /* Allocate a line for the output */
186 lfd f0, 0(r3) /* Get first 8 */
187 lfd f1, 8(r3) /* Get second 8 */
188 lfd f2, 16(r3) /* Get third 8 */
189 stfd f0, 0(r4) /* Put first 8 */
190 dcbt r5, r3 /* Start next line coming in */
191 lfd f3, 24(r3) /* Get fourth 8 */
192 stfd f1, 8(r4) /* Put second 8 */
193 addi r3,r3,CACHE_LINE_SIZE /* Point to the next line in */
194 stfd f2, 16(r4) /* Put third 8 */
195 cmplw cr0,r3,r6 /* See if we're finished yet */
196 stfd f3, 24(r4) /* Put fourth 8 */
197 dcbst br0,r4 /* Force it out */
198 addi r4,r4,CACHE_LINE_SIZE /* Point to the next line out */
199 blt+ .L_pmap_copy_page_loop /* Copy the whole page */
201 sync /* Make sure they're all done */
202 li r4,PPC_PGBYTES-CACHE_LINE_SIZE /* Point to the end of the page */
205 subic. r5,r4,CACHE_LINE_SIZE /* Point to the next one */
206 icbi r4, r8 /* Trash the i-cache */
207 subi r4,r5,CACHE_LINE_SIZE /* Point to the next one */
208 icbi r5, r8 /* Trash the i-cache */
209 bgt+ invalinst /* Keep going until we do the page... */
211 rlwimi r7,r9,0,MSR_DR_BIT,MSR_DR_BIT ; Set DDAT if on
212 sync ; Make sure all invalidates done
214 mtmsr r7 ; Set DDAT correctly
217 lfd f0,FM_SIZE+0(r1) ; Restore an FP register
218 lfd f1,FM_SIZE+8(r1) ; Restore an FP register
219 lfd f2,FM_SIZE+16(r1) ; Restore an FP register
220 lfd f3,FM_SIZE+24(r1) ; Restore an FP register
222 lwz r1,0(r1) ; Pop up the stack
224 mtmsr r9 ; Turn off FPU now and maybe rupts back on
230 ; This is not very optimal. We just do it here for a test of
231 ; Altivec in the kernel.
234 mfmsr r9 ; Get the MSR
235 lis r8,hi16(0xC0000000) ; Make sure we keep the first 2 vector registers
236 rlwinm r7,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Disable interruptions
237 lis r6,lo16(2*256+128) ; Specify 128 blocks of 2 vectors each
238 rlwinm r7,r7,0,MSR_DR_BIT+1,MSR_DR_BIT-1 ; Clear the DDAT bit
239 ori r6,r6,32 ; Set a 32-byte stride
240 mtsprg 256,r8 ; Set VRSave
241 mtmsr r7 ; Disable rupts and turn xlate off
244 addi r11,r3,4096 ; Point to the next page
245 li r10,16 ; Get vector size
247 avmovepg: lvxl v0,br0,r3 ; Get first half of line
248 dcba br0,r4 ; Allocate output
249 lvxl v1,r10,r3 ; Get second half of line
250 stvxl v0,br0,r4 ; Save first half of line
251 addi r3,r3,32 ; Point to the next line
252 icbi br0,r4 ; Make the icache go away also
253 stvxl v1,r10,r4 ; Save second half of line
254 cmplw r3,r11 ; Have we reached the next page?
255 dcbst br0,r4 ; Make sure the line is on its way out
256 addi r4,r4,32 ; Point to the next line
257 blt+ avmovepg ; Move the next line...
260 sync ; Make sure all the memory stuff is done
261 mtsprg 256,r8 ; Show we are not using VRs any more
262 mtmsr r9 ; Translation and interruptions back on
272 * copyin(src, dst, count)
279 ENTRY2(copyin, copyinmsg, TAG_NO_FRAME_USED)
281 /* Preamble allowing us to call a sub-function */
283 stw r0,FM_LR_SAVE(r1)
284 stwu r1,-(FM_SIZE+16)(r1)
286 mfmsr r0 /* Get the MSR */
287 rlwinm r6,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear 'rupts */
288 mtmsr r6 /* Disable 'rupts */
290 mfsprg r6,0 /* Get the per_proc */
291 lwz r6,PP_CPU_DATA(r6)
293 lwz r10,CPU_ACTIVE_THREAD(r6)
294 mtmsr r0 /* Set 'rupts back */
295 ble- cr0,.L_copyinout_trivial
297 /* we know we have a valid copyin to do now */
298 /* Set up thread_recover in case we hit an illegal address */
300 lwz r8,THREAD_TOP_ACT(r10)
301 lis r11,hi16(.L_copyinout_error)
303 ori r11,r11,lo16(.L_copyinout_error)
304 add r9,r3,r5 /* Get the end of the source */
305 lwz r8,VMMAP_PMAP(r8) ; Get the pmap
306 rlwinm r12,r3,6,26,29 ; Get index to the segment slot
307 subi r9,r9,1 /* Make sure we don't go too far */
308 add r8,r8,r12 ; Start indexing to the segment value
309 stw r11,THREAD_RECOVER(r10)
310 xor r9,r9,r3 /* Smoosh 'em together */
311 lwz r8,PMAP_SEGS(r8) ; Get the source SR value
312 rlwinm. r9,r9,0,1,3 /* Top nybble equal? */
313 mtsr SR_COPYIN,r8 ; Set the SR
316 lis r0,HIGH_ADDR(EXT(dbgRegsCall)) /* (TEST/DEBUG) */
317 ori r0,r0,LOW_ADDR(EXT(dbgRegsCall)) /* (TEST/DEBUG) */
318 sc /* (TEST/DEBUG) */
321 /* For optimization, we check if the copyin lies on a segment
322 * boundary. If it doesn't, we can use a simple copy. If it
323 * does, we split it into two separate copies in some C code.
326 bne- .L_call_copyin_multiple /* Nope, we went past the segment boundary... */
329 oris r3,r3,(SR_COPYIN_NUM << (28-16)) /* Set the copyin segment as the source */
333 /* Now that copyin is done, we don't need a recovery point */
334 mfmsr r7 /* Get the MSR */
335 rlwinm r6,r7,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear 'rupts */
336 mtmsr r6 /* Disable 'rupts */
338 mfsprg r6,0 /* Get the per_proc */
340 lwz r6,PP_CPU_DATA(r6)
341 addi r1,r1,FM_SIZE+16
342 lwz r10,CPU_ACTIVE_THREAD(r6)
343 mtmsr r7 ; Restore interrupts
345 lwz r0,FM_LR_SAVE(r1)
346 stw r3,THREAD_RECOVER(r10) /* Clear recovery */
350 /* we get here via the exception handler if an illegal
351 * user memory reference was made.
355 /* Now that copyin is done, we don't need a recovery point */
357 mfmsr r7 /* Get the MSR */
358 rlwinm r6,r7,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear 'rupts */
359 mtmsr r6 /* Disable 'rupts */
361 mfsprg r6,0 /* Get the per_proc */
363 lwz r6,PP_CPU_DATA(r6)
364 addi r1,r1,FM_SIZE+16
365 lwz r10,CPU_ACTIVE_THREAD(r6)
366 mtmsr r7 ; Restore interrupts
368 lwz r0,FM_LR_SAVE(r1)
369 stw r4,THREAD_RECOVER(r10) /* Clear recovery */
371 li r3,EFAULT ; Indicate error (EFAULT)
374 .L_copyinout_trivial:
375 /* The copyin/out was for either 0 bytes or a negative
376 * number of bytes, return an appropriate value (0 == SUCCESS).
377 * cr0 still contains result of comparison of len with 0.
380 beq+ cr0, .L_copyinout_negative
382 .L_copyinout_negative:
384 /* unwind the stack */
385 addi r1, r1, FM_SIZE+16
386 lwz r0, FM_LR_SAVE(r1)
391 .L_call_copyin_multiple:
393 /* unwind the stack */
394 addi r1, r1, FM_SIZE+16
395 lwz r0, FM_LR_SAVE(r1)
398 b EXT(copyin_multiple) /* not a call - a jump! */
402 * copyout(src, dst, count)
409 ENTRY2(copyout, copyoutmsg, TAG_NO_FRAME_USED)
411 /* Preamble allowing us to call a sub-function */
414 stw r0,FM_LR_SAVE(r1)
415 stwu r1,-(FM_SIZE+16)(r1)
418 stw r3,FM_SIZE+0(r1) /* (TEST/DEBUG) */
419 stw r4,FM_SIZE+4(r1) /* (TEST/DEBUG) */
420 stw r5,FM_SIZE+8(r1) /* (TEST/DEBUG) */
421 mr r6,r0 /* (TEST/DEBUG) */
423 bl EXT(tracecopyout) /* (TEST/DEBUG) */
425 lwz r3,FM_SIZE+0(r1) /* (TEST/DEBUG) */
426 lwz r4,FM_SIZE+4(r1) /* (TEST/DEBUG) */
427 lwz r5,FM_SIZE+8(r1) /* (TEST/DEBUG) */
430 mfmsr r7 /* Get the MSR */
431 rlwinm r6,r7,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear 'rupts */
432 mtmsr r6 /* Disable 'rupts */
434 mfsprg r6,0 /* Get the per_proc */
436 lwz r6,PP_CPU_DATA(r6)
438 lwz r10,CPU_ACTIVE_THREAD(r6)
439 mtmsr r7 /* Restore 'rupts */
440 ble- cr0,.L_copyinout_trivial
441 /* we know we have a valid copyout to do now */
442 /* Set up thread_recover in case we hit an illegal address */
445 lwz r8,THREAD_TOP_ACT(r10)
446 lis r11,HIGH_ADDR(.L_copyinout_error)
448 rlwinm r12,r4,6,26,29 ; Get index to the segment slot
449 ori r11,r11,LOW_ADDR(.L_copyinout_error)
450 add r9,r4,r5 /* Get the end of the destination */
451 lwz r8,VMMAP_PMAP(r8)
452 subi r9,r9,1 /* Make sure we don't go too far */
453 add r8,r8,r12 ; Start indexing to the segment value
454 stw r11,THREAD_RECOVER(r10)
455 xor r9,r9,r4 /* Smoosh 'em together */
456 lwz r8,PMAP_SEGS(r8) ; Get the source SR value
457 rlwinm. r9,r9,0,1,3 /* Top nybble equal? */
462 /* For optimisation, we check if the copyout lies on a segment
463 * boundary. If it doesn't, we can use a simple copy. If it
464 * does, we split it into two separate copies in some C code.
467 bne- .L_call_copyout_multiple /* Nope, we went past the segment boundary... */
470 oris r4,r4,(SR_COPYIN_NUM << (28-16)) /* Set the copyin segment as the source */
474 /* Now that copyout is done, we don't need a recovery point */
475 mfmsr r7 /* Get the MSR */
476 rlwinm r6,r7,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear 'rupts */
477 mtmsr r6 /* Disable 'rupts */
479 mfsprg r6,0 /* Get the per_proc */
481 lwz r6,PP_CPU_DATA(r6)
482 addi r1,r1,FM_SIZE+16
483 lwz r10,CPU_ACTIVE_THREAD(r6)
484 mtmsr r7 ; Restore interrupts
486 lwz r0,FM_LR_SAVE(r1)
487 stw r3,THREAD_RECOVER(r10) /* Clear recovery */
491 .L_call_copyout_multiple:
492 /* unwind the stack */
493 addi r1, r1, FM_SIZE+16
494 lwz r0, FM_LR_SAVE(r1)
497 b EXT(copyout_multiple) /* not a call - a jump! */
501 * copyinstr(src, dst, count, maxcount)
504 * vm_size_t maxcount;
507 * Set *count to the number of bytes copied
509 * If dst == NULL, don't copy, just count bytes.
510 * Only currently called from klcopyinstr.
513 ENTRY(copyinstr, TAG_NO_FRAME_USED)
515 /* Preamble allowing us to call a sub-function */
517 stw r0,FM_LR_SAVE(r1)
518 stwu r1,-(FM_SIZE+16)(r1)
521 stw r3,FM_SIZE+0(r1) /* (TEST/DEBUG) */
522 stw r4,FM_SIZE+4(r1) /* (TEST/DEBUG) */
523 stw r5,FM_SIZE+8(r1) /* (TEST/DEBUG) */
524 stw r6,FM_SIZE+12(r1) /* (TEST/DEBUG) */
525 mr r7,r0 /* (TEST/DEBUG) */
527 bl EXT(tracecopystr) /* (TEST/DEBUG) */
529 lwz r3,FM_SIZE+0(r1) /* (TEST/DEBUG) */
530 lwz r4,FM_SIZE+4(r1) /* (TEST/DEBUG) */
531 lwz r5,FM_SIZE+8(r1) /* (TEST/DEBUG) */
532 stw r6,FM_SIZE+12(r1) /* (TEST/DEBUG) */
535 mfmsr r0 /* Get the MSR */
536 rlwinm r7,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear 'rupts */
537 mtmsr r7 /* Disable 'rupts */
539 mfsprg r7,0 /* Get the per_proc */
540 lwz r7,PP_CPU_DATA(r7)
542 lwz r10,CPU_ACTIVE_THREAD(r7)
543 mtmsr r0 /* Restore 'rupts */
544 ble- cr0,.L_copyinout_trivial
546 /* we know we have a valid copyin to do now */
547 /* Set up thread_recover in case we hit an illegal address */
550 lwz r8,THREAD_TOP_ACT(r10)
551 stw r0,0(r6) /* Clear result length */
552 lis r11,HIGH_ADDR(.L_copyinout_error)
553 lwz r8,ACT_VMMAP(r8) ; Get the map for this activation
554 rlwinm r12,r3,6,26,29 ; Get index to the segment slot
555 lwz r8,VMMAP_PMAP(r8)
556 ori r11,r11,LOW_ADDR(.L_copyinout_error)
557 add r8,r8,r12 ; Start indexing to the segment value
558 stw r11,THREAD_RECOVER(r10)
560 lwz r7,PMAP_SEGS(r8) ; Get the source SR value
561 oris r3,r3,(SR_COPYIN_NUM << (28-16)) /* Set the copyin segment as the source */
563 /* Copy byte by byte for now - TODO NMGS speed this up with
564 * some clever (but fairly standard) logic for word copies.
565 * We don't use a copyinstr_multiple since copyinstr is called
566 * with INT_MAX in the linux server. Eugh.
569 li r9,0 /* Clear byte counter */
571 /* If the destination is NULL, don't do writes,
572 * just count bytes. We set CR7 outside the loop to save time
574 cmpwi cr7,r4,0 /* Is the destination null? */
576 nxtseg: mtsr SR_COPYIN,r7 /* Set the source SR */
580 lbz r0,0(r3) /* Get the source */
581 addic. r5,r5,-1 /* Have we gone far enough? */
582 addi r3,r3,1 /* Bump source pointer */
584 cmpwi cr1,r0,0 /* Did we hit a null? */
586 beq cr7,.L_copyinstr_no_store /* If we are just counting, skip the store... */
588 stb r0,0(r4) /* Move to sink */
589 addi r4,r4,1 /* Advance sink pointer */
591 .L_copyinstr_no_store:
593 addi r9,r9,1 /* Count the character */
594 beq- cr1,.L_copyinstr_done /* We're done if we did a null... */
595 beq- cr0,L_copyinstr_toobig /* Also if we maxed the count... */
597 /* Check to see if the copyin pointer has moved out of the
598 * copyin segment, if it has we must remap.
601 rlwinm. r0,r3,0,4,31 /* Did we wrap around to 0? */
602 bne+ cr0,.L_copyinstr_loop /* Nope... */
604 lwz r7,PMAP_SEGS+4(r8) ; Get the next source SR value
605 addi r8,r8,4 ; Point to the next segment
606 oris r3,r0,(SR_COPYIN_NUM << (28-16)) /* Reset the segment number */
607 b nxtseg /* Keep going... */
613 li r3,0 /* Normal return */
615 li r4,0 /* to clear thread_recover */
616 stw r9,0(r6) /* Set how many bytes we did */
617 stw r4,THREAD_RECOVER(r10) /* Clear recovery exit */
619 addi r1, r1, FM_SIZE+16
620 lwz r0, FM_LR_SAVE(r1)