]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/movc.s
xnu-201.42.3.tar.gz
[apple/xnu.git] / osfmk / ppc / movc.s
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 #include <debug.h>
26 #include <ppc/asm.h>
27 #include <ppc/proc_reg.h>
28 #include <mach/ppc/vm_param.h>
29 #include <assym.s>
30 #include <sys/errno.h>
31
32 /*
33 * void pmap_zero_page(vm_offset_t pa)
34 *
35 * zero a page of physical memory.
36 */
37
38 #if DEBUG
39 /* C debug stub in pmap.c calls this */
40 ENTRY(pmap_zero_page_assembler, TAG_NO_FRAME_USED)
41 #else
42 ENTRY(pmap_zero_page, TAG_NO_FRAME_USED)
43 #endif /* DEBUG */
44
45 mfmsr r6 /* Get the MSR */
46 rlwinm r7, r6, 0, MSR_DR_BIT+1, MSR_DR_BIT-1 /* Turn off DR */
47 rlwinm r7,r7,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Disable interruptions
48 li r4,PPC_PGBYTES-CACHE_LINE_SIZE /* Point to the end of the page */
49 mtmsr r7 /* Set MSR to DR off */
50 isync /* Ensure data translations are off */
51
52
53 .L_phys_zero_loop:
54 subic. r5,r4,CACHE_LINE_SIZE /* Point to the next one */
55 dcbz r4, r3 /* Clear the whole thing to 0s */
56 subi r4,r5,CACHE_LINE_SIZE /* Point to the next one */
57 dcbz r5, r3 /* Clear the next to zeros */
58 bgt+ .L_phys_zero_loop /* Keep going until we do the page... */
59
60 sync /* Make sure they're all done */
61 li r4,PPC_PGBYTES-CACHE_LINE_SIZE /* Point to the end of the page */
62
63 .L_inst_inval_loop:
64 subic. r5,r4,CACHE_LINE_SIZE /* Point to the next one */
65 icbi r4, r3 /* Clear the whole thing to 0s */
66 subi r4,r5,CACHE_LINE_SIZE /* Point to the next one */
67 icbi r5, r3 /* Clear the next to zeros */
68 bgt+ .L_inst_inval_loop /* Keep going until we do the page... */
69
70 sync /* Make sure they're all done */
71
72 mtmsr r6 /* Restore original translations */
73 isync /* Ensure data translations are on */
74
75 blr
76
77 /* void
78 * phys_copy(src, dst, bytecount)
79 * vm_offset_t src;
80 * vm_offset_t dst;
81 * int bytecount
82 *
83 * This routine will copy bytecount bytes from physical address src to physical
84 * address dst.
85 */
86
87 ENTRY(phys_copy, TAG_NO_FRAME_USED)
88
89 /* Switch off data translations */
90 mfmsr r6
91 rlwinm r7, r6, 0, MSR_DR_BIT+1, MSR_DR_BIT-1
92 rlwinm r7, r7, 0, MSR_EE_BIT+1, MSR_EE_BIT-1
93 mtmsr r7
94 isync /* Ensure data translations are off */
95
96 subi r3, r3, 4
97 subi r4, r4, 4
98
99 cmpwi r5, 3
100 ble- .L_phys_copy_bytes
101 .L_phys_copy_loop:
102 lwz r0, 4(r3)
103 addi r3, r3, 4
104 subi r5, r5, 4
105 stw r0, 4(r4)
106 addi r4, r4, 4
107 cmpwi r5, 3
108 bgt+ .L_phys_copy_loop
109
110 /* If no leftover bytes, we're done now */
111 cmpwi r5, 0
112 beq+ .L_phys_copy_done
113
114 .L_phys_copy_bytes:
115 addi r3, r3, 3
116 addi r4, r4, 3
117 .L_phys_copy_byte_loop:
118 lbz r0, 1(r3)
119 addi r3, r3, 1
120 subi r5, r5, 1
121 stb r0, 1(r4)
122 addi r4, r4, 1
123 cmpwi r5, 0
124 bne+ .L_phys_copy_byte_loop
125
126 .L_phys_copy_done:
127 mtmsr r6 /* Restore original translations */
128 isync /* Ensure data translations are off */
129
130 blr
131
132 /* void
133 * pmap_copy_page(src, dst)
134 * vm_offset_t src;
135 * vm_offset_t dst;
136 *
137 * This routine will copy the physical page src to physical page dst
138 *
139 * This routine assumes that the src and dst are page aligned and that the
140 * destination is cached.
141 *
142 * We also must assume that noone will be executing within the destination
143 * page. We also assume that this will be used for paging
144 *
145 */
146
147 #if DEBUG
148 /* if debug, we have a little piece of C around this
149 * in pmap.c that gives some trace ability
150 */
151 ENTRY(pmap_copy_page_assembler, TAG_NO_FRAME_USED)
152 #else
153 ENTRY(pmap_copy_page, TAG_NO_FRAME_USED)
154 #endif /* DEBUG */
155
156 #if 0
157 mfpvr r9 ; Get the PVR
158 rlwinm r9,r9,16,16,31 ; Isolate the PPC processor
159 cmplwi r9,PROCESSOR_VERSION_Max ; Do we have Altivec?
160 beq+ wegotaltivec ; Yeah...
161 #endif
162
163 mfmsr r9 ; Get the MSR
164 stwu r1,-(FM_SIZE+32)(r1) ; Make a frame for us
165 rlwinm r7,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Disable interruptions
166 ori r7,r7,lo16(MASK(MSR_FP)) ; Turn on the FPU
167 mtmsr r7 ; Disable rupts and enable FPU
168 isync
169
170 stfd f0,FM_SIZE+0(r1) ; Save an FP register
171 rlwinm r7,r7,0,MSR_DR_BIT+1,MSR_DR_BIT-1 ; Clear the DDAT bit
172 stfd f1,FM_SIZE+8(r1) ; Save an FP register
173 addi r6,r3,PPC_PGBYTES ; Point to the start of the next page
174 stfd f2,FM_SIZE+16(r1) ; Save an FP register
175 mr r8,r4 ; Save the destination
176 stfd f3,FM_SIZE+24(r1) ; Save an FP register
177
178 mtmsr r7 ; Set the new MSR
179 isync ; Ensure data translations are off
180
181 dcbt br0, r3 /* Start in first input line */
182 li r5, CACHE_LINE_SIZE /* Get the line size */
183
184 .L_pmap_copy_page_loop:
185 dcbz 0, r4 /* Allocate a line for the output */
186 lfd f0, 0(r3) /* Get first 8 */
187 lfd f1, 8(r3) /* Get second 8 */
188 lfd f2, 16(r3) /* Get third 8 */
189 stfd f0, 0(r4) /* Put first 8 */
190 dcbt r5, r3 /* Start next line coming in */
191 lfd f3, 24(r3) /* Get fourth 8 */
192 stfd f1, 8(r4) /* Put second 8 */
193 addi r3,r3,CACHE_LINE_SIZE /* Point to the next line in */
194 stfd f2, 16(r4) /* Put third 8 */
195 cmplw cr0,r3,r6 /* See if we're finished yet */
196 stfd f3, 24(r4) /* Put fourth 8 */
197 dcbst br0,r4 /* Force it out */
198 addi r4,r4,CACHE_LINE_SIZE /* Point to the next line out */
199 blt+ .L_pmap_copy_page_loop /* Copy the whole page */
200
201 sync /* Make sure they're all done */
202 li r4,PPC_PGBYTES-CACHE_LINE_SIZE /* Point to the end of the page */
203
204 invalinst:
205 subic. r5,r4,CACHE_LINE_SIZE /* Point to the next one */
206 icbi r4, r8 /* Trash the i-cache */
207 subi r4,r5,CACHE_LINE_SIZE /* Point to the next one */
208 icbi r5, r8 /* Trash the i-cache */
209 bgt+ invalinst /* Keep going until we do the page... */
210
211 rlwimi r7,r9,0,MSR_DR_BIT,MSR_DR_BIT ; Set DDAT if on
212 sync ; Make sure all invalidates done
213
214 mtmsr r7 ; Set DDAT correctly
215 isync
216
217 lfd f0,FM_SIZE+0(r1) ; Restore an FP register
218 lfd f1,FM_SIZE+8(r1) ; Restore an FP register
219 lfd f2,FM_SIZE+16(r1) ; Restore an FP register
220 lfd f3,FM_SIZE+24(r1) ; Restore an FP register
221
222 lwz r1,0(r1) ; Pop up the stack
223
224 mtmsr r9 ; Turn off FPU now and maybe rupts back on
225 isync
226 blr
227
228 #if 0
229 ;
230 ; This is not very optimal. We just do it here for a test of
231 ; Altivec in the kernel.
232 ;
233 wegotaltivec:
234 mfmsr r9 ; Get the MSR
235 lis r8,hi16(0xC0000000) ; Make sure we keep the first 2 vector registers
236 rlwinm r7,r9,0,MSR_EE_BIT+1,MSR_EE_BIT-1 ; Disable interruptions
237 lis r6,lo16(2*256+128) ; Specify 128 blocks of 2 vectors each
238 rlwinm r7,r7,0,MSR_DR_BIT+1,MSR_DR_BIT-1 ; Clear the DDAT bit
239 ori r6,r6,32 ; Set a 32-byte stride
240 mtsprg 256,r8 ; Set VRSave
241 mtmsr r7 ; Disable rupts and turn xlate off
242 isync
243
244 addi r11,r3,4096 ; Point to the next page
245 li r10,16 ; Get vector size
246
247 avmovepg: lvxl v0,br0,r3 ; Get first half of line
248 dcba br0,r4 ; Allocate output
249 lvxl v1,r10,r3 ; Get second half of line
250 stvxl v0,br0,r4 ; Save first half of line
251 addi r3,r3,32 ; Point to the next line
252 icbi br0,r4 ; Make the icache go away also
253 stvxl v1,r10,r4 ; Save second half of line
254 cmplw r3,r11 ; Have we reached the next page?
255 dcbst br0,r4 ; Make sure the line is on its way out
256 addi r4,r4,32 ; Point to the next line
257 blt+ avmovepg ; Move the next line...
258
259 li r8,0 ; Clear this
260 sync ; Make sure all the memory stuff is done
261 mtsprg 256,r8 ; Show we are not using VRs any more
262 mtmsr r9 ; Translation and interruptions back on
263 isync
264 blr
265 #endif
266
267
268
269
270 /*
271 * int
272 * copyin(src, dst, count)
273 * vm_offset_t src;
274 * vm_offset_t dst;
275 * int count;
276 *
277 */
278
279 ENTRY2(copyin, copyinmsg, TAG_NO_FRAME_USED)
280
281 /* Preamble allowing us to call a sub-function */
282 mflr r0
283 stw r0,FM_LR_SAVE(r1)
284 stwu r1,-(FM_SIZE+16)(r1)
285
286 mfmsr r0 /* Get the MSR */
287 rlwinm r6,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear 'rupts */
288 mtmsr r6 /* Disable 'rupts */
289
290 mfsprg r6,0 /* Get the per_proc */
291 lwz r6,PP_CPU_DATA(r6)
292 cmpli cr0,r5,0
293 lwz r10,CPU_ACTIVE_THREAD(r6)
294 mtmsr r0 /* Set 'rupts back */
295 ble- cr0,.L_copyinout_trivial
296
297 /* we know we have a valid copyin to do now */
298 /* Set up thread_recover in case we hit an illegal address */
299
300 lwz r8,THREAD_TOP_ACT(r10)
301 lis r11,hi16(.L_copyinout_error)
302 lwz r8,ACT_VMMAP(r8)
303 ori r11,r11,lo16(.L_copyinout_error)
304 add r9,r3,r5 /* Get the end of the source */
305 lwz r8,VMMAP_PMAP(r8) ; Get the pmap
306 rlwinm r12,r3,6,26,29 ; Get index to the segment slot
307 subi r9,r9,1 /* Make sure we don't go too far */
308 add r8,r8,r12 ; Start indexing to the segment value
309 stw r11,THREAD_RECOVER(r10)
310 xor r9,r9,r3 /* Smoosh 'em together */
311 lwz r8,PMAP_SEGS(r8) ; Get the source SR value
312 rlwinm. r9,r9,0,1,3 /* Top nybble equal? */
313 mtsr SR_COPYIN,r8 ; Set the SR
314 isync
315 #if 0
316 lis r0,HIGH_ADDR(EXT(dbgRegsCall)) /* (TEST/DEBUG) */
317 ori r0,r0,LOW_ADDR(EXT(dbgRegsCall)) /* (TEST/DEBUG) */
318 sc /* (TEST/DEBUG) */
319 #endif
320
321 /* For optimization, we check if the copyin lies on a segment
322 * boundary. If it doesn't, we can use a simple copy. If it
323 * does, we split it into two separate copies in some C code.
324 */
325
326 bne- .L_call_copyin_multiple /* Nope, we went past the segment boundary... */
327
328 rlwinm r3,r3,0,4,31
329 oris r3,r3,(SR_COPYIN_NUM << (28-16)) /* Set the copyin segment as the source */
330
331 bl EXT(bcopy)
332
333 /* Now that copyin is done, we don't need a recovery point */
334 mfmsr r7 /* Get the MSR */
335 rlwinm r6,r7,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear 'rupts */
336 mtmsr r6 /* Disable 'rupts */
337
338 mfsprg r6,0 /* Get the per_proc */
339
340 lwz r6,PP_CPU_DATA(r6)
341 addi r1,r1,FM_SIZE+16
342 lwz r10,CPU_ACTIVE_THREAD(r6)
343 mtmsr r7 ; Restore interrupts
344 li r3,0
345 lwz r0,FM_LR_SAVE(r1)
346 stw r3,THREAD_RECOVER(r10) /* Clear recovery */
347 mtlr r0
348 blr
349
350 /* we get here via the exception handler if an illegal
351 * user memory reference was made.
352 */
353 .L_copyinout_error:
354
355 /* Now that copyin is done, we don't need a recovery point */
356
357 mfmsr r7 /* Get the MSR */
358 rlwinm r6,r7,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear 'rupts */
359 mtmsr r6 /* Disable 'rupts */
360
361 mfsprg r6,0 /* Get the per_proc */
362
363 lwz r6,PP_CPU_DATA(r6)
364 addi r1,r1,FM_SIZE+16
365 lwz r10,CPU_ACTIVE_THREAD(r6)
366 mtmsr r7 ; Restore interrupts
367 li r4,0
368 lwz r0,FM_LR_SAVE(r1)
369 stw r4,THREAD_RECOVER(r10) /* Clear recovery */
370 mtlr r0
371 li r3,EFAULT ; Indicate error (EFAULT)
372 blr
373
374 .L_copyinout_trivial:
375 /* The copyin/out was for either 0 bytes or a negative
376 * number of bytes, return an appropriate value (0 == SUCCESS).
377 * cr0 still contains result of comparison of len with 0.
378 */
379 li r3, 0
380 beq+ cr0, .L_copyinout_negative
381 li r3, 1
382 .L_copyinout_negative:
383
384 /* unwind the stack */
385 addi r1, r1, FM_SIZE+16
386 lwz r0, FM_LR_SAVE(r1)
387 mtlr r0
388
389 blr
390
391 .L_call_copyin_multiple:
392
393 /* unwind the stack */
394 addi r1, r1, FM_SIZE+16
395 lwz r0, FM_LR_SAVE(r1)
396 mtlr r0
397
398 b EXT(copyin_multiple) /* not a call - a jump! */
399
400 /*
401 * int
402 * copyout(src, dst, count)
403 * vm_offset_t src;
404 * vm_offset_t dst;
405 * int count;
406 *
407 */
408
409 ENTRY2(copyout, copyoutmsg, TAG_NO_FRAME_USED)
410
411 /* Preamble allowing us to call a sub-function */
412
413 mflr r0
414 stw r0,FM_LR_SAVE(r1)
415 stwu r1,-(FM_SIZE+16)(r1)
416
417 #if 0
418 stw r3,FM_SIZE+0(r1) /* (TEST/DEBUG) */
419 stw r4,FM_SIZE+4(r1) /* (TEST/DEBUG) */
420 stw r5,FM_SIZE+8(r1) /* (TEST/DEBUG) */
421 mr r6,r0 /* (TEST/DEBUG) */
422
423 bl EXT(tracecopyout) /* (TEST/DEBUG) */
424
425 lwz r3,FM_SIZE+0(r1) /* (TEST/DEBUG) */
426 lwz r4,FM_SIZE+4(r1) /* (TEST/DEBUG) */
427 lwz r5,FM_SIZE+8(r1) /* (TEST/DEBUG) */
428 #endif
429
430 mfmsr r7 /* Get the MSR */
431 rlwinm r6,r7,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear 'rupts */
432 mtmsr r6 /* Disable 'rupts */
433
434 mfsprg r6,0 /* Get the per_proc */
435
436 lwz r6,PP_CPU_DATA(r6)
437 cmpli cr0,r5,0
438 lwz r10,CPU_ACTIVE_THREAD(r6)
439 mtmsr r7 /* Restore 'rupts */
440 ble- cr0,.L_copyinout_trivial
441 /* we know we have a valid copyout to do now */
442 /* Set up thread_recover in case we hit an illegal address */
443
444
445 lwz r8,THREAD_TOP_ACT(r10)
446 lis r11,HIGH_ADDR(.L_copyinout_error)
447 lwz r8,ACT_VMMAP(r8)
448 rlwinm r12,r4,6,26,29 ; Get index to the segment slot
449 ori r11,r11,LOW_ADDR(.L_copyinout_error)
450 add r9,r4,r5 /* Get the end of the destination */
451 lwz r8,VMMAP_PMAP(r8)
452 subi r9,r9,1 /* Make sure we don't go too far */
453 add r8,r8,r12 ; Start indexing to the segment value
454 stw r11,THREAD_RECOVER(r10)
455 xor r9,r9,r4 /* Smoosh 'em together */
456 lwz r8,PMAP_SEGS(r8) ; Get the source SR value
457 rlwinm. r9,r9,0,1,3 /* Top nybble equal? */
458 mtsr SR_COPYIN,r8
459 isync
460
461
462 /* For optimisation, we check if the copyout lies on a segment
463 * boundary. If it doesn't, we can use a simple copy. If it
464 * does, we split it into two separate copies in some C code.
465 */
466
467 bne- .L_call_copyout_multiple /* Nope, we went past the segment boundary... */
468
469 rlwinm r4,r4,0,4,31
470 oris r4,r4,(SR_COPYIN_NUM << (28-16)) /* Set the copyin segment as the source */
471
472 bl EXT(bcopy)
473
474 /* Now that copyout is done, we don't need a recovery point */
475 mfmsr r7 /* Get the MSR */
476 rlwinm r6,r7,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear 'rupts */
477 mtmsr r6 /* Disable 'rupts */
478
479 mfsprg r6,0 /* Get the per_proc */
480
481 lwz r6,PP_CPU_DATA(r6)
482 addi r1,r1,FM_SIZE+16
483 lwz r10,CPU_ACTIVE_THREAD(r6)
484 mtmsr r7 ; Restore interrupts
485 li r3,0
486 lwz r0,FM_LR_SAVE(r1)
487 stw r3,THREAD_RECOVER(r10) /* Clear recovery */
488 mtlr r0
489 blr
490
491 .L_call_copyout_multiple:
492 /* unwind the stack */
493 addi r1, r1, FM_SIZE+16
494 lwz r0, FM_LR_SAVE(r1)
495 mtlr r0
496
497 b EXT(copyout_multiple) /* not a call - a jump! */
498
499 /*
500 * boolean_t
501 * copyinstr(src, dst, count, maxcount)
502 * vm_offset_t src;
503 * vm_offset_t dst;
504 * vm_size_t maxcount;
505 * vm_size_t* count;
506 *
507 * Set *count to the number of bytes copied
508 *
509 * If dst == NULL, don't copy, just count bytes.
510 * Only currently called from klcopyinstr.
511 */
512
513 ENTRY(copyinstr, TAG_NO_FRAME_USED)
514
515 /* Preamble allowing us to call a sub-function */
516 mflr r0
517 stw r0,FM_LR_SAVE(r1)
518 stwu r1,-(FM_SIZE+16)(r1)
519
520 #if 0
521 stw r3,FM_SIZE+0(r1) /* (TEST/DEBUG) */
522 stw r4,FM_SIZE+4(r1) /* (TEST/DEBUG) */
523 stw r5,FM_SIZE+8(r1) /* (TEST/DEBUG) */
524 stw r6,FM_SIZE+12(r1) /* (TEST/DEBUG) */
525 mr r7,r0 /* (TEST/DEBUG) */
526
527 bl EXT(tracecopystr) /* (TEST/DEBUG) */
528
529 lwz r3,FM_SIZE+0(r1) /* (TEST/DEBUG) */
530 lwz r4,FM_SIZE+4(r1) /* (TEST/DEBUG) */
531 lwz r5,FM_SIZE+8(r1) /* (TEST/DEBUG) */
532 stw r6,FM_SIZE+12(r1) /* (TEST/DEBUG) */
533 #endif
534
535 mfmsr r0 /* Get the MSR */
536 rlwinm r7,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear 'rupts */
537 mtmsr r7 /* Disable 'rupts */
538
539 mfsprg r7,0 /* Get the per_proc */
540 lwz r7,PP_CPU_DATA(r7)
541 cmpli cr0,r5,0
542 lwz r10,CPU_ACTIVE_THREAD(r7)
543 mtmsr r0 /* Restore 'rupts */
544 ble- cr0,.L_copyinout_trivial
545
546 /* we know we have a valid copyin to do now */
547 /* Set up thread_recover in case we hit an illegal address */
548
549 li r0,0
550 lwz r8,THREAD_TOP_ACT(r10)
551 stw r0,0(r6) /* Clear result length */
552 lis r11,HIGH_ADDR(.L_copyinout_error)
553 lwz r8,ACT_VMMAP(r8) ; Get the map for this activation
554 rlwinm r12,r3,6,26,29 ; Get index to the segment slot
555 lwz r8,VMMAP_PMAP(r8)
556 ori r11,r11,LOW_ADDR(.L_copyinout_error)
557 add r8,r8,r12 ; Start indexing to the segment value
558 stw r11,THREAD_RECOVER(r10)
559 rlwinm r3,r3,0,4,31
560 lwz r7,PMAP_SEGS(r8) ; Get the source SR value
561 oris r3,r3,(SR_COPYIN_NUM << (28-16)) /* Set the copyin segment as the source */
562
563 /* Copy byte by byte for now - TODO NMGS speed this up with
564 * some clever (but fairly standard) logic for word copies.
565 * We don't use a copyinstr_multiple since copyinstr is called
566 * with INT_MAX in the linux server. Eugh.
567 */
568
569 li r9,0 /* Clear byte counter */
570
571 /* If the destination is NULL, don't do writes,
572 * just count bytes. We set CR7 outside the loop to save time
573 */
574 cmpwi cr7,r4,0 /* Is the destination null? */
575
576 nxtseg: mtsr SR_COPYIN,r7 /* Set the source SR */
577 isync
578
579 .L_copyinstr_loop:
580 lbz r0,0(r3) /* Get the source */
581 addic. r5,r5,-1 /* Have we gone far enough? */
582 addi r3,r3,1 /* Bump source pointer */
583
584 cmpwi cr1,r0,0 /* Did we hit a null? */
585
586 beq cr7,.L_copyinstr_no_store /* If we are just counting, skip the store... */
587
588 stb r0,0(r4) /* Move to sink */
589 addi r4,r4,1 /* Advance sink pointer */
590
591 .L_copyinstr_no_store:
592
593 addi r9,r9,1 /* Count the character */
594 beq- cr1,.L_copyinstr_done /* We're done if we did a null... */
595 beq- cr0,L_copyinstr_toobig /* Also if we maxed the count... */
596
597 /* Check to see if the copyin pointer has moved out of the
598 * copyin segment, if it has we must remap.
599 */
600
601 rlwinm. r0,r3,0,4,31 /* Did we wrap around to 0? */
602 bne+ cr0,.L_copyinstr_loop /* Nope... */
603
604 lwz r7,PMAP_SEGS+4(r8) ; Get the next source SR value
605 addi r8,r8,4 ; Point to the next segment
606 oris r3,r0,(SR_COPYIN_NUM << (28-16)) /* Reset the segment number */
607 b nxtseg /* Keep going... */
608
609 L_copyinstr_toobig:
610 li r3,ENAMETOOLONG
611 b L_copyinstr_return
612 .L_copyinstr_done:
613 li r3,0 /* Normal return */
614 L_copyinstr_return:
615 li r4,0 /* to clear thread_recover */
616 stw r9,0(r6) /* Set how many bytes we did */
617 stw r4,THREAD_RECOVER(r10) /* Clear recovery exit */
618
619 addi r1, r1, FM_SIZE+16
620 lwz r0, FM_LR_SAVE(r1)
621 mtlr r0
622 blr