]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/hw_exception.s
92610dbe68a8613681e5e468bf16cfb3bc68d285
[apple/xnu.git] / osfmk / ppc / hw_exception.s
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * @OSF_COPYRIGHT@
25 */
26
27 /* Low level routines dealing with exception entry and exit.
28 * There are various types of exception:
29 *
30 * Interrupt, trap, system call and debugger entry. Each has it's own
31 * handler since the state save routine is different for each. The
32 * code is very similar (a lot of cut and paste).
33 *
34 * The code for the FPU disabled handler (lazy fpu) is in cswtch.s
35 */
36
37 #include <debug.h>
38 #include <mach_assert.h>
39 #include <mach/exception_types.h>
40 #include <mach/kern_return.h>
41 #include <mach/ppc/vm_param.h>
42
43 #include <assym.s>
44
45 #include <ppc/asm.h>
46 #include <ppc/proc_reg.h>
47 #include <ppc/trap.h>
48 #include <ppc/exception.h>
49 #include <ppc/savearea.h>
50
51
52 #define VERIFYSAVE 0
53 #define FPVECDBG 0
54 #define FPFLOOD 0
55 #define INSTRUMENT 0
56
57 /*
58 * thandler(type)
59 *
60 * ENTRY: VM switched ON
61 * Interrupts OFF
62 * R3 contains exception code
63 * R4 points to the saved context (virtual address)
64 * Everything is saved in savearea
65 */
66
67 /*
68 * If pcb.ksp == 0 then the kernel stack is already busy,
69 * we make a stack frame
70 * leaving enough space for the 'red zone' in case the
71 * trapped thread was in the middle of saving state below
72 * its stack pointer.
73 *
74 * otherwise we make a stack frame and
75 * the kernel stack (setting pcb.ksp to 0)
76 *
77 * on return, we do the reverse, the last state is popped from the pcb
78 * and pcb.ksp is set to the top of stack
79 */
80
81 /* TRAP_SPACE_NEEDED is the space assumed free on the kernel stack when
82 * another trap is taken. We need at least enough space for a saved state
83 * structure plus two small backpointer frames, and we add a few
84 * hundred bytes for the space needed by the C (which may be less but
85 * may be much more). We're trying to catch kernel stack overflows :-)
86 */
87
88 #define TRAP_SPACE_NEEDED FM_REDZONE+(2*FM_SIZE)+256
89
90 .text
91
92 .align 5
93 .globl EXT(thandler)
94 LEXT(thandler) ; Trap handler
95
96 mfsprg r13,1 ; Get the current activation
97 lwz r25,ACT_PER_PROC(r13) ; Get the per_proc block
98
99 lwz r1,PP_ISTACKPTR(r25) ; Get interrupt stack pointer
100
101 cmpwi cr0,r1,0 ; Are we on interrupt stack?
102 mr r6,r13
103 beq- cr0,EXT(ihandler) ; If on interrupt stack, treat this as interrupt...
104 lwz r26,ACT_MACT_SPF(r13) ; Get special flags
105 lwz r8,ACT_MACT_PCB(r13) ; Get the last savearea used
106 rlwinm. r26,r26,0,bbThreadbit,bbThreadbit ; Do we have Blue Box Assist active?
107 lwz r1,ACT_MACT_KSP(r13) ; Get the top of kernel stack
108 bnel- checkassist ; See if we should assist this
109 stw r4,ACT_MACT_PCB(r13) ; Point to our savearea
110 stw r8,SAVprev+4(r4) ; Queue the new save area in the front
111
112 #if VERIFYSAVE
113 bl versave ; (TEST/DEBUG)
114 #endif
115
116 lwz r9,THREAD_KERNEL_STACK(r6) ; Get our kernel stack start
117 cmpwi cr1,r1,0 ; Are we already on kernel stack?
118 stw r13,SAVact(r4) ; Mark the savearea as belonging to this activation
119 lwz r26,saver1+4(r4) ; Get the stack at interrupt time
120
121 bne+ cr1,.L_kstackfree ; We are not on kernel stack yet...
122
123 subi r1,r26,FM_REDZONE ; Make a red zone on interrupt time kernel stack
124
125 .L_kstackfree:
126 lwz r31,savesrr1+4(r4) ; Pick up the entry MSR
127 sub r9,r1,r9 ; Get displacment into the kernel stack
128 li r0,0 ; Make this 0
129 rlwinm. r0,r9,0,28,31 ; Verify that we have a 16-byte aligned stack (and get a 0)
130 cmplwi cr2,r9,KERNEL_STACK_SIZE ; Do we still have room on the stack?
131 beq cr1,.L_state_on_kstack ; using above test for pcb/stack
132
133 stw r0,ACT_MACT_KSP(r13) ; Show that we have taken the stack
134
135 .L_state_on_kstack:
136 lwz r9,savevrsave(r4) ; Get the VRSAVE register
137 bne-- kernelStackUnaligned ; Stack is unaligned...
138 rlwinm. r6,r31,0,MSR_VEC_BIT,MSR_VEC_BIT ; Was vector on?
139 subi r1,r1,FM_SIZE ; Push a header onto the current stack
140 bgt-- cr2,kernelStackBad ; Kernel stack is bogus...
141
142 kernelStackNotBad: ; Vector was off
143 beq++ tvecoff ; Vector off, do not save vrsave...
144 stw r9,liveVRS(r25) ; Set the live value
145
146 tvecoff: stw r26,FM_BACKPTR(r1) ; Link back to the previous frame
147
148 #if DEBUG
149 /* If debugging, we need two frames, the first being a dummy
150 * which links back to the trapped routine. The second is
151 * that which the C routine below will need
152 */
153 lwz r3,savesrr0+4(r4) ; Get the point of interruption
154 stw r3,FM_LR_SAVE(r1) ; save old instr ptr as LR value
155 stwu r1, -FM_SIZE(r1) ; and make new frame
156 #endif /* DEBUG */
157
158 mr r30,r4
159 lwz r3,SAVtime+4(r4)
160 addi r4,r13,SYSTEM_TIMER
161 bl EXT(timer_event)
162
163 /* call trap handler proper, with
164 * ARG0 = type
165 * ARG1 = saved_state ptr
166 * ARG2 = dsisr
167 * ARG3 = dar
168 */
169
170 mr r4,r30
171 lwz r3,saveexception(r30) ; Get the exception code
172 lwz r0,ACT_MACT_SPF(r13) ; Get the special flags
173
174 addi r5,r3,-T_DATA_ACCESS ; Adjust to start of range
175 rlwinm. r0,r0,0,runningVMbit,runningVMbit ; Are we in VM state? (cr0_eq == 0 if yes)
176 cmplwi cr2,r5,T_TRACE-T_DATA_ACCESS ; Are we still in range? (cr_gt if not)
177
178 lwz r5,savedsisr(r4) ; Get the saved DSISR
179
180 crnor cr7_eq,cr0_eq,cr2_gt ; We should intercept if in VM and is a true trap (cr7_eq == 1 if yes)
181 rlwinm. r0,r31,0,MSR_PR_BIT,MSR_PR_BIT ; Are we trapping from supervisor state? (cr0_eq == 1 if yes)
182
183 cmpi cr2,r3,T_PREEMPT ; Is this a preemption?
184
185 beq-- .L_check_VM
186 stw r4,ACT_MACT_UPCB(r13) ; Store user savearea
187 .L_check_VM:
188
189 crandc cr0_eq,cr7_eq,cr0_eq ; Do not intercept if we are in the kernel (cr0_eq == 1 if yes)
190
191 lwz r6,savedar(r4) ; Get the DAR (top)
192 lwz r7,savedar+4(r4) ; Get the DAR (bottom)
193
194 beq- cr2,.L_call_trap ; Do not turn on interrupts for T_PREEMPT
195 beq- exitFromVM ; Any true trap but T_MACHINE_CHECK exits us from the VM...
196
197 /* syscall exception might warp here if there's nothing left
198 * to do except generate a trap
199 */
200
201 .L_call_trap:
202
203 #if FPFLOOD
204 stfd f31,emfp31(r25) ; (TEST/DEBUG)
205 #endif
206
207 bl EXT(trap)
208
209 lis r10,hi16(MASK(MSR_VEC)) ; Get the vector enable
210 mfmsr r7 ; Get the MSR
211 ori r10,r10,lo16(MASK(MSR_FP)|MASK(MSR_EE)) ; Add in FP and EE
212 andc r7,r7,r10 ; Turn off VEC, FP, and EE
213 mtmsr r7 ; Disable for interrupts
214 mfsprg r8,1 ; Get the current activation
215 lwz r10,ACT_PER_PROC(r8) ; Get the per_proc block
216 /*
217 * This is also the point where new threads come when they are created.
218 * The new thread is setup to look like a thread that took an
219 * interrupt and went immediatly into trap.
220 */
221
222 thread_return:
223 lwz r11,SAVflags(r3) ; Get the flags of the current savearea
224 lwz r0,savesrr1+4(r3) ; Get the MSR we are going to
225 lwz r4,SAVprev+4(r3) ; Pick up the previous savearea
226 mfsprg r8,1 ; Get the current thread
227 rlwinm r11,r11,0,15,13 ; Clear the syscall flag
228 rlwinm. r0,r0,0,MSR_PR_BIT,MSR_PR_BIT ; Are we going to the user?
229 mr r1,r8
230 stw r11,SAVflags(r3) ; Save back the flags (with reset stack cleared)
231
232 lwz r5,THREAD_KERNEL_STACK(r1) ; Get the base pointer to the stack
233 stw r4,ACT_MACT_PCB(r8) ; Point to the previous savearea (or 0 if none)
234 addi r5,r5,KERNEL_STACK_SIZE-FM_SIZE ; Reset to empty
235
236 beq-- chkfac ; We are not leaving the kernel yet...
237
238 stw r5,ACT_MACT_KSP(r8) ; Save the empty stack pointer
239 b chkfac ; Go end it all...
240
241
242 ;
243 ; Here is where we go when we detect that the kernel stack is all messed up.
244 ; We just try to dump some info and get into the debugger.
245 ;
246
247 kernelStackBad:
248
249 lwz r3,PP_DEBSTACK_TOP_SS(r25) ; Pick up debug stack top
250 subi r3,r3,KERNEL_STACK_SIZE-FM_SIZE ; Adjust to start of stack
251 sub r3,r1,r3 ; Get displacement into debug stack
252 cmplwi cr2,r3,KERNEL_STACK_SIZE-FM_SIZE ; Check if we are on debug stack
253 blt+ cr2,kernelStackNotBad ; Yeah, that is ok too...
254
255 lis r0,hi16(Choke) ; Choke code
256 ori r0,r0,lo16(Choke) ; and the rest
257 li r3,failStack ; Bad stack code
258 sc ; System ABEND
259
260 kernelStackUnaligned:
261 lis r0,hi16(Choke) ; Choke code
262 ori r0,r0,lo16(Choke) ; and the rest
263 li r3,failUnalignedStk ; Unaligned stack code
264 sc ; System ABEND
265
266
267 /*
268 * shandler(type)
269 *
270 * ENTRY: VM switched ON
271 * Interrupts OFF
272 * R3 contains exception code
273 * R4 points to the saved context (virtual address)
274 * Everything is saved in savearea
275 */
276
277 /*
278 * If pcb.ksp == 0 then the kernel stack is already busy,
279 * this is an error - jump to the debugger entry
280 *
281 * otherwise depending upon the type of
282 * syscall, look it up in the kernel table
283 * or pass it to the server.
284 *
285 * on return, we do the reverse, the state is popped from the pcb
286 * and pcb.ksp is set to the top of stack.
287 */
288
289 /*
290 * NOTE:
291 * mach system calls are negative
292 * BSD system calls are low positive
293 * PPC-only system calls are in the range 0x6xxx
294 * PPC-only "fast" traps are in the range 0x7xxx
295 */
296
297 .align 5
298 .globl EXT(shandler)
299 LEXT(shandler) ; System call handler
300
301 lwz r7,savesrr1+4(r4) ; Get the SRR1 value
302 mfsprg r13,1 ; Get the current activation
303 lwz r25,ACT_PER_PROC(r13) ; Get the per_proc block
304 lwz r0,saver0+4(r4) ; Get the original syscall number
305 lwz r17,PP_ISTACKPTR(r25) ; Get interrupt stack pointer
306 rlwinm r15,r0,0,0,19 ; Clear the bottom of call number for fast check
307 mr. r17,r17 ; Are we on interrupt stack?
308 lwz r9,savevrsave(r4) ; Get the VRsave register
309 beq-- EXT(ihandler) ; On interrupt stack, not allowed...
310 rlwinm. r6,r7,0,MSR_VEC_BIT,MSR_VEC_BIT ; Was vector on?
311 mr r16,r13
312
313 beq++ svecoff ; Vector off, do not save vrsave...
314 stw r9,liveVRS(r25) ; Set the live value
315 ;
316 ; Check if SCs are being redirected for the BlueBox or to VMM
317 ;
318
319 svecoff: lwz r6,ACT_MACT_SPF(r13) ; Pick up activation special flags
320 mtcrf 0x40,r6 ; Check special flags
321 mtcrf 0x01,r6 ; Check special flags
322 crmove cr6_eq,runningVMbit ; Remember if we are in VMM
323 bne++ cr6,sVMchecked ; Not running VM
324 lwz r18,spcFlags(r25) ; Load per_proc special flags
325 rlwinm. r18,r18,0,FamVMmodebit,FamVMmodebit ; Is FamVMmodebit set?
326 beq sVMchecked ; Not in FAM
327 cmpwi r0,0x6004 ; Is it vmm_dispatch syscall:
328 bne sVMchecked
329 lwz r26,saver3+4(r4) ; Get the original syscall number
330 cmpwi cr6,r26,kvmmExitToHost ; vmm_exit_to_host request
331 sVMchecked:
332 bf++ bbNoMachSCbit,noassist ; Take branch if SCs are not redirected
333 lwz r26,ACT_MACT_BEDA(r13) ; Pick up the pointer to the blue box exception area
334 b EXT(atomic_switch_syscall) ; Go to the assist...
335
336 noassist: cmplwi r15,0x7000 ; Do we have a fast path trap?
337 lwz r14,ACT_MACT_PCB(r13) ; Now point to the PCB
338 beql fastpath ; We think it is a fastpath...
339
340 lwz r1,ACT_MACT_KSP(r13) ; Get the kernel stack pointer
341 #if DEBUG
342 mr. r1,r1 ; Are we already on the kernel stack?
343 li r3,T_SYSTEM_CALL ; Yup, pretend we had an interrupt...
344 beq- EXT(ihandler) ; Bad boy, bad boy... What cha gonna do when they come for you?
345 #endif /* DEBUG */
346
347 stw r4,ACT_MACT_PCB(r13) ; Point to our savearea
348 stw r4,ACT_MACT_UPCB(r13) ; Store user savearea
349 li r0,0 ; Clear this out
350 stw r14,SAVprev+4(r4) ; Queue the new save area in the front
351 stw r13,SAVact(r4) ; Point the savearea at its activation
352
353 #if VERIFYSAVE
354 bl versave ; (TEST/DEBUG)
355 #endif
356
357 lwz r15,saver1+4(r4) ; Grab interrupt time stack
358 mr r30,r4 ; Save pointer to the new context savearea
359 stw r0,ACT_MACT_KSP(r13) ; Mark stack as busy with 0 val
360 stw r15,FM_BACKPTR(r1) ; Link stack frame backwards
361
362 lwz r3,SAVtime+4(r30)
363 addi r4,r13,SYSTEM_TIMER
364 bl EXT(timer_event)
365
366 #if DEBUG
367 /* If debugging, we need two frames, the first being a dummy
368 * which links back to the trapped routine. The second is
369 * that which the C routine below will need
370 */
371 lwz r8,savesrr0+4(r30) ; Get the point of interruption
372 stw r8,FM_LR_SAVE(r1) ; Save old instr ptr as LR value
373 stwu r1, -FM_SIZE(r1) ; and make new frame
374 #endif /* DEBUG */
375
376 mr r4,r30
377
378 lwz r15,SAVflags(r30) ; Get the savearea flags
379 lwz r0,saver0+4(r30) ; Get R0 back
380 mfmsr r11 ; Get the MSR
381 stwu r1,-(FM_SIZE+ARG_SIZE+MUNGE_ARGS_SIZE)(r1) ; Make a stack frame
382 ori r11,r11,lo16(MASK(MSR_EE)) ; Turn on interruption enabled bit
383 rlwinm r10,r0,0,0,19 ; Keep only the top part
384 oris r15,r15,SAVsyscall >> 16 ; Mark that it this is a syscall
385 cmplwi r10,0x6000 ; Is it the special ppc-only guy?
386 stw r15,SAVflags(r30) ; Save syscall marker
387 beq-- cr6,exitFromVM ; It is time to exit from alternate context...
388
389 beq-- ppcscall ; Call the ppc-only system call handler...
390
391 mr. r0,r0 ; What kind is it?
392 mtmsr r11 ; Enable interruptions
393
394 blt-- .L_kernel_syscall ; System call number if negative, this is a mach call...
395
396 lwz r8,ACT_TASK(r13) ; Get our task
397 cmpwi cr0,r0,0x7FFA ; Special blue box call?
398 beq-- .L_notify_interrupt_syscall ; Yeah, call it...
399
400 lwz r7,TASK_SYSCALLS_UNIX(r8) ; Get the current count
401 mr r3,r30 ; Get PCB/savearea
402 mr r4,r13 ; current activation
403 addi r7,r7,1 ; Bump it
404 stw r7,TASK_SYSCALLS_UNIX(r8) ; Save it
405
406 #if FPFLOOD
407 stfd f31,emfp31(r25) ; (TEST/DEBUG)
408 #endif
409
410 bl EXT(unix_syscall) ; Check out unix...
411
412 .L_call_server_syscall_exception:
413 li r3,EXC_SYSCALL ; doexception(EXC_SYSCALL, num, 1)
414
415 .L_call_server_exception:
416 mr r4,r0 ; Set syscall selector
417 li r5,1
418 b EXT(doexception) ; Go away, never to return...
419
420 .L_notify_interrupt_syscall:
421 lwz r3,saver3+4(r30) ; Get the new PC address to pass in
422 bl EXT(syscall_notify_interrupt)
423 /*
424 * Ok, return from C function, R3 = return value
425 *
426 * saved state is still in R30 and the active thread is in R16 .
427 */
428 mr r31,r16 ; Move the current thread pointer
429 stw r3,saver3+4(r30) ; Stash the return code
430 b .L_thread_syscall_ret_check_ast
431
432 ;
433 ; Handle PPC-only system call interface
434 ; These are called with interruptions disabled
435 ; and the savearea/pcb as the first parameter.
436 ; It is up to the callee to enable interruptions if
437 ; they should be. We are in a state here where
438 ; both interrupts and preemption are ok, but because we could
439 ; be calling diagnostic code we will not enable.
440 ;
441 ; Also, the callee is responsible for finding any parameters
442 ; in the savearea/pcb. It also must set saver3 with any return
443 ; code before returning.
444 ;
445 ; There are 3 possible return codes:
446 ; 0 the call is disabled or something, we treat this like it was bogus
447 ; + the call finished ok, check for AST
448 ; - the call finished ok, do not check for AST
449 ;
450 ; Note: the last option is intended for special diagnostics calls that
451 ; want the thread to return and execute before checking for preemption.
452 ;
453 ; NOTE: Both R16 (thread) and R30 (savearea) need to be preserved over this call!!!!
454 ;
455
456 .align 5
457
458 ppcscall: rlwinm r11,r0,2,18,29 ; Make an index into the table
459 lis r10,hi16(EXT(PPCcalls)) ; Get PPC-only system call table
460 cmplwi r11,PPCcallmax ; See if we are too big
461 ori r10,r10,lo16(EXT(PPCcalls)) ; Merge in low half
462 bgt- .L_call_server_syscall_exception ; Bogus call...
463 lwzx r11,r10,r11 ; Get function address
464
465 ;
466 ; Note: make sure we do not change the savearea in R30 to
467 ; a different register without checking. Some of the PPCcalls
468 ; depend upon it being there.
469 ;
470
471 mr r3,r30 ; Pass the savearea
472 mr r4,r13 ; Pass the activation
473 mr. r11,r11 ; See if there is a function here
474 mtctr r11 ; Set the function address
475 beq- .L_call_server_syscall_exception ; Disabled call...
476 #if INSTRUMENT
477 mfspr r4,pmc1 ; Get stamp
478 stw r4,0x6100+(9*16)+0x0(0) ; Save it
479 mfspr r4,pmc2 ; Get stamp
480 stw r4,0x6100+(9*16)+0x4(0) ; Save it
481 mfspr r4,pmc3 ; Get stamp
482 stw r4,0x6100+(9*16)+0x8(0) ; Save it
483 mfspr r4,pmc4 ; Get stamp
484 stw r4,0x6100+(9*16)+0xC(0) ; Save it
485 #endif
486 bctrl ; Call it
487
488 .globl EXT(ppcscret)
489
490 LEXT(ppcscret)
491 mr. r3,r3 ; See what we should do
492 mr r31,r16 ; Restore the current thread pointer
493 bgt+ .L_thread_syscall_ret_check_ast ; Take normal AST checking return....
494 mfsprg r10,1 ; Get the current activation
495 lwz r10,ACT_PER_PROC(r10) ; Get the per_proc block
496 blt+ .L_thread_syscall_return ; Return, but no ASTs....
497 lwz r0,saver0+4(r30) ; Restore the system call number
498 b .L_call_server_syscall_exception ; Go to common exit...
499
500
501
502 /*
503 * we get here for mach system calls
504 * when kdebug tracing is enabled
505 */
506
507 ksystrace:
508 mr r4,r30 ; Pass in saved state
509 bl EXT(syscall_trace)
510
511 cmplw r31,r29 ; Is this syscall in the table?
512 add r31,r27,r28 ; Point right to the syscall table entry
513
514 bge- .L_call_server_syscall_exception ; The syscall number is invalid
515
516 lwz r0,savesrr1(r30) ; Get the saved srr1
517 rlwinm. r0,r0,0,MSR_SF_BIT,MSR_SF_BIT ; Test for 64 bit caller
518 lwz r0,MACH_TRAP_ARG_MUNGE32(r31) ; Pick up the 32 bit munge function address
519 beq-- .L_ksystrace_munge
520 lwz r0,MACH_TRAP_ARG_MUNGE64(r31) ; Pick up the 64 bit munge function address
521
522 .L_ksystrace_munge:
523 cmplwi r0,0 ; do we have a munger to call?
524 mtctr r0 ; Set the function call address
525 addi r3,r30,saver3 ; Pointer to args from save area
526 addi r4,r1,FM_ARG0+ARG_SIZE ; Pointer for munged args
527 beq-- .L_ksystrace_trapcall ; just make the trap call
528 bctrl ; Call the munge function
529
530 .L_ksystrace_trapcall:
531 lwz r0,MACH_TRAP_FUNCTION(r31) ; Pick up the function address
532 mtctr r0 ; Set the function call address
533 addi r3,r1,FM_ARG0+ARG_SIZE ; Pointer to munged args
534 bctrl
535
536 mr r4,r30 ; Pass in the savearea
537 bl EXT(syscall_trace_end) ; Trace the exit of the system call
538 b .L_mach_return
539
540
541
542 /* Once here, we know that the syscall was -ve
543 * we should still have r1=ksp,
544 * r16 = pointer to current thread,
545 * r13 = pointer to top activation,
546 * r0 = syscall number
547 * r30 = pointer to saved state (in pcb)
548 */
549
550 .align 5
551
552 .L_kernel_syscall:
553 ;
554 ; Call a function that can print out our syscall info
555 ; Note that we don t care about any volatiles yet
556 ;
557 lwz r10,ACT_TASK(r13) ; Get our task
558 lwz r0,saver0+4(r30)
559 lis r8,hi16(EXT(kdebug_enable)) ; Get top of kdebug_enable
560 lis r28,hi16(EXT(mach_trap_table)) ; Get address of table
561 ori r8,r8,lo16(EXT(kdebug_enable)) ; Get bottom of kdebug_enable
562 lwz r8,0(r8) ; Get kdebug_enable
563
564 lwz r7,TASK_SYSCALLS_MACH(r10) ; Get the current count
565 neg r31,r0 ; Make this positive
566 mr r3,r31 ; save it
567 slwi r27,r3,4 ; multiply by 16
568 slwi r3,r3,2 ; and the original by 4
569 ori r28,r28,lo16(EXT(mach_trap_table)) ; Get address of table
570 add r27,r27,r3 ; for a total of 20x (5 words/entry)
571 addi r7,r7,1 ; Bump TASK_SYSCALLS_MACH count
572 cmplwi r8,0 ; Is kdebug_enable non-zero
573 stw r7,TASK_SYSCALLS_MACH(r10) ; Save count
574 bne-- ksystrace ; yes, tracing enabled
575
576 cmplwi r31,MACH_TRAP_TABLE_COUNT ; Is this syscall in the table?
577 add r31,r27,r28 ; Point right to the syscall table entry
578
579 bge-- .L_call_server_syscall_exception ; The syscall number is invalid
580
581 lwz r0,savesrr1(r30) ; Get the saved srr1
582 rlwinm. r0,r0,0,MSR_SF_BIT,MSR_SF_BIT ; Test for 64 bit caller
583 lwz r0,MACH_TRAP_ARG_MUNGE32(r31) ; Pick up the 32 bit munge function address
584 beq-- .L_kernel_syscall_munge
585 lwz r0,MACH_TRAP_ARG_MUNGE64(r31) ; Pick up the 64 bit munge function address
586
587 .L_kernel_syscall_munge:
588 cmplwi r0,0 ; test for null munger
589 mtctr r0 ; Set the function call address
590 addi r3,r30,saver3 ; Pointer to args from save area
591 addi r4,r1,FM_ARG0+ARG_SIZE ; Pointer for munged args
592 beq-- .L_kernel_syscall_trapcall ; null munger - skip to trap call
593 bctrl ; Call the munge function
594
595 .L_kernel_syscall_trapcall:
596 lwz r0,MACH_TRAP_FUNCTION(r31) ; Pick up the function address
597 mtctr r0 ; Set the function call address
598 addi r3,r1,FM_ARG0+ARG_SIZE ; Pointer to munged args
599
600 #if FPFLOOD
601 stfd f31,emfp31(r25) ; (TEST/DEBUG)
602 #endif
603
604 bctrl
605
606
607 /*
608 * Ok, return from C function, R3 = return value
609 *
610 * get the active thread's PCB pointer and thus pointer to user state
611 * saved state is still in R30 and the active thread is in R16
612 */
613
614 .L_mach_return:
615 srawi r0,r3,31 ; properly extend the return code
616 cmpi cr0,r3,KERN_INVALID_ARGUMENT ; deal with invalid system calls
617 mr r31,r16 ; Move the current thread pointer
618 stw r0, saver3(r30) ; stash the high part of the return code
619 stw r3,saver3+4(r30) ; Stash the low part of the return code
620 beq-- cr0,.L_mach_invalid_ret ; otherwise fall through into the normal return path
621 .L_mach_invalid_arg:
622
623
624 /* 'standard' syscall returns here - INTERRUPTS ARE STILL ON
625 * the syscall may perform a thread_set_syscall_return
626 * followed by a thread_exception_return, ending up
627 * at thread_syscall_return below, with SS_R3 having
628 * been set up already
629 *
630 * When we are here, r31 should point to the current thread,
631 * r30 should point to the current pcb
632 * r3 contains value that we're going to return to the user
633 * which has already been stored back into the save area
634 */
635
636 .L_thread_syscall_ret_check_ast:
637 lis r10,hi16(MASK(MSR_VEC)) ; Get the vector enable
638 mfmsr r12 ; Get the current MSR
639 ori r10,r10,lo16(MASK(MSR_FP)|MASK(MSR_EE)) ; Add in FP and EE
640 andc r12,r12,r10 ; Turn off VEC, FP, and EE
641 mtmsr r12 ; Turn interruptions off
642
643 mfsprg r10,1 ; Get the current activation
644 lwz r10,ACT_PER_PROC(r10) ; Get the per_proc block
645
646 /* Check to see if there's an outstanding AST */
647
648 lwz r4,PP_PENDING_AST(r10)
649 cmpi cr0,r4, 0 ; Any pending asts?
650 beq++ cr0,.L_syscall_no_ast ; Nope...
651
652 /* Yes there is, call ast_taken
653 * pretending that the user thread took an AST exception here,
654 * ast_taken will save all state and bring us back here
655 */
656
657 #if DEBUG
658 /* debug assert - make sure that we're not returning to kernel */
659 lwz r3,savesrr1+4(r30)
660 andi. r3,r3,MASK(MSR_PR)
661 bne++ scrnotkern ; returning to user level, check
662
663 lis r0,hi16(Choke) ; Choke code
664 ori r0,r0,lo16(Choke) ; and the rest
665 li r3,failContext ; Bad state code
666 sc ; System ABEND
667
668 scrnotkern:
669 #endif /* DEBUG */
670
671 lis r3,hi16(AST_ALL) ; Set ast flags
672 li r4,1 ; Set interrupt allowed
673 ori r3,r3,lo16(AST_ALL)
674 bl EXT(ast_taken) ; Process the pending ast
675 b .L_thread_syscall_ret_check_ast ; Go see if there was another...
676
677 .L_mach_invalid_ret:
678 /*
679 * need to figure out why we got an KERN_INVALID_ARG
680 * if it was due to a non-existent system call
681 * then we want to throw an exception... otherwise
682 * we want to pass the error code back to the caller
683 */
684 lwz r0,saver0+4(r30) ; reload the original syscall number
685 neg r28,r0 ; Make this positive
686 mr r4,r28 ; save a copy
687 slwi r27,r4,4 ; multiply by 16
688 slwi r4,r4,2 ; and another 4
689 lis r28,hi16(EXT(mach_trap_table)) ; Get address of table
690 add r27,r27,r4 ; for a total of 20x (5 words/entry)
691 ori r28,r28,lo16(EXT(mach_trap_table)) ; Get address of table
692 add r28,r27,r28 ; Point right to the syscall table entry
693 lwz r27,MACH_TRAP_FUNCTION(r28) ; Pick up the function address
694 lis r28,hi16(EXT(kern_invalid)) ; Get high half of invalid syscall function
695 ori r28,r28,lo16(EXT(kern_invalid)) ; Get low half of invalid syscall function
696 cmpw cr0,r27,r28 ; Check if this is an invalid system call
697 beq-- .L_call_server_syscall_exception ; We have a bad system call
698 b .L_mach_invalid_arg ; a system call returned KERN_INVALID_ARG
699
700
701 /* thread_exception_return returns to here, almost all
702 * registers intact. It expects a full context restore
703 * of what it hasn't restored itself (ie. what we use).
704 *
705 * In particular for us,
706 * we still have r31 points to the current thread,
707 * r30 points to the current pcb
708 */
709
710 .align 5
711
712 .L_syscall_no_ast:
713 .L_thread_syscall_return:
714
715 mr r3,r30 ; Get savearea to the correct register for common exit
716
717 lwz r11,SAVflags(r30) ; Get the flags
718 lwz r5,THREAD_KERNEL_STACK(r31) ; Get the base pointer to the stack
719 lwz r4,SAVprev+4(r30) ; Get the previous save area
720 rlwinm r11,r11,0,15,13 ; Clear the syscall flag
721 mfsprg r8,1 ; Now find the current activation
722 addi r5,r5,KERNEL_STACK_SIZE-FM_SIZE ; Reset to empty
723 stw r11,SAVflags(r30) ; Stick back the flags
724 stw r5,ACT_MACT_KSP(r8) ; Save the empty stack pointer
725 stw r4,ACT_MACT_PCB(r8) ; Save previous save area
726 b chkfac ; Go end it all...
727
728 /*
729 * thread_exception_return()
730 *
731 * Return to user mode directly from within a system call.
732 */
733
734 .align 5
735 .globl EXT(thread_bootstrap_return)
736 LEXT(thread_bootstrap_return) ; NOTE: THIS IS GOING AWAY IN A FEW DAYS....
737
738 .globl EXT(thread_exception_return)
739 LEXT(thread_exception_return) ; Directly return to user mode
740
741 .L_thread_exc_ret_check_ast:
742 lis r10,hi16(MASK(MSR_VEC)) ; Get the vector enable
743 mfmsr r3 ; Get the MSR
744 ori r10,r10,lo16(MASK(MSR_FP)|MASK(MSR_EE)) ; Add in FP and EE
745 andc r3,r3,r10 ; Turn off VEC, FP, and EE
746 mtmsr r3 ; Disable interrupts
747
748 /* Check to see if there's an outstanding AST */
749 /* We don't bother establishing a call frame even though CHECK_AST
750 can invoke ast_taken(), because it can just borrow our caller's
751 frame, given that we're not going to return.
752 */
753
754 mfsprg r10,1 ; Get the current activation
755 lwz r10,ACT_PER_PROC(r10) ; Get the per_proc block
756 lwz r4,PP_PENDING_AST(r10)
757 cmpi cr0,r4, 0
758 beq+ cr0,.L_exc_ret_no_ast
759
760 /* Yes there is, call ast_taken
761 * pretending that the user thread took an AST exception here,
762 * ast_taken will save all state and bring us back here
763 */
764
765 lis r3,hi16(AST_ALL)
766 li r4,1
767 ori r3,r3,lo16(AST_ALL)
768
769 bl EXT(ast_taken)
770 b .L_thread_exc_ret_check_ast ; check for a second AST (rare)
771
772 /* arriving here, interrupts should be disabled */
773 /* Get the active thread's PCB pointer to restore regs
774 */
775 .L_exc_ret_no_ast:
776
777 mfsprg r30,1 ; Get the currrent activation
778 mr r31,r30
779
780 lwz r30,ACT_MACT_PCB(r30)
781 mr. r30,r30 ; Is there any context yet?
782 beq- makeDummyCtx ; No, hack one up...
783 #if DEBUG
784 /*
785 * debug assert - make sure that we're not returning to kernel
786 * get the active thread's PCB pointer and thus pointer to user state
787 */
788
789 lwz r3,savesrr1+4(r30)
790 andi. r3,r3,MASK(MSR_PR)
791 bne+ ret_user2 ; We are ok...
792
793 lis r0,hi16(Choke) ; Choke code
794 ori r0,r0,lo16(Choke) ; and the rest
795 li r3,failContext ; Bad state code
796 sc ; System ABEND
797
798 ret_user2:
799 #endif /* DEBUG */
800
801 /* If the system call flag isn't set, then we came from a trap,
802 * so warp into the return_from_trap (thread_return) routine,
803 * which takes PCB pointer in R3, not in r30!
804 */
805 lwz r0,SAVflags(r30) ; Grab the savearea flags
806 andis. r0,r0,SAVsyscall>>16 ; Are we returning from a syscall?
807 mr r3,r30 ; Copy pcb pointer into r3 in case we need it
808 beq-- cr0,thread_return ; Nope, must be a thread return...
809 b .L_thread_syscall_return ; Join up with the system call return...
810
811 ;
812 ; This is where we handle someone trying who did a thread_create followed
813 ; by a thread_resume with no intervening thread_set_state. Just make an
814 ; empty context, initialize it to trash and let em execute at 0...
815 ;
816
817 .align 5
818
819 makeDummyCtx:
820 bl EXT(save_get) ; Get a save_area
821 li r4,SAVgeneral ; Get the general context type
822 li r0,0 ; Get a 0
823 stb r4,SAVflags+2(r3) ; Set type
824 addi r2,r3,savefpscr+4 ; Point past what we are clearing
825 mr r4,r3 ; Save the start
826
827 cleardummy: stw r0,0(r4) ; Clear stuff
828 addi r4,r4,4 ; Next word
829 cmplw r4,r2 ; Still some more?
830 blt+ cleardummy ; Yeah...
831
832 lis r2,hi16(MSR_EXPORT_MASK_SET) ; Set the high part of the user MSR
833 ori r2,r2,lo16(MSR_EXPORT_MASK_SET) ; And the low part
834 stw r2,savesrr1+4(r3) ; Set the default user MSR
835
836 b thread_return ; Go let em try to execute, hah!
837
838 /*
839 * ihandler(type)
840 *
841 * ENTRY: VM switched ON
842 * Interrupts OFF
843 * R3 contains exception code
844 * R4 points to the saved context (virtual address)
845 * Everything is saved in savearea
846 *
847 */
848
849 .align 5
850 .globl EXT(ihandler)
851 LEXT(ihandler) ; Interrupt handler */
852
853 /*
854 * get the value of istackptr, if it's zero then we're already on the
855 * interrupt stack.
856 */
857
858 lwz r10,savesrr1+4(r4) ; Get SRR1
859 lwz r7,savevrsave(r4) ; Get the VRSAVE register
860 mfsprg r13,1 ; Get the current activation
861 lwz r25,ACT_PER_PROC(r13) ; Get the per_proc block
862 li r14,0 ; Zero this for now
863 rlwinm. r16,r10,0,MSR_VEC_BIT,MSR_VEC_BIT ; Was vector on?
864 lwz r1,PP_ISTACKPTR(r25) ; Get the interrupt stack
865 li r16,0 ; Zero this for now
866
867 beq+ ivecoff ; Vector off, do not save vrsave...
868 stw r7,liveVRS(r25) ; Set the live value
869
870 ivecoff: li r0,0 ; Get a constant 0
871 rlwinm r5,r10,0,MSR_PR_BIT,MSR_PR_BIT ; Are we trapping from supervisor state?
872 mr. r1,r1 ; Is it active?
873 cmplwi cr2,r5,0 ; cr2_eq == 1 if yes
874 mr r16,r13
875 lwz r14,ACT_MACT_PCB(r13) ; Now point to the PCB
876 lwz r9,saver1+4(r4) ; Pick up the rupt time stack
877 stw r14,SAVprev+4(r4) ; Queue the new save area in the front
878 stw r13,SAVact(r4) ; Point the savearea at its activation
879 stw r4,ACT_MACT_PCB(r13) ; Point to our savearea
880 beq cr2,ifromk
881 stw r4,ACT_MACT_UPCB(r13) ; Store user savearea
882
883 ifromk: bne .L_istackfree ; Nope...
884
885 /* We're already on the interrupt stack, get back the old
886 * stack pointer and make room for a frame
887 */
888
889 lwz r10,PP_INTSTACK_TOP_SS(r25) ; Get the top of the interrupt stack
890 addi r5,r9,INTSTACK_SIZE-FM_SIZE ; Shift stack for bounds check
891 subi r1,r9,FM_REDZONE ; Back up beyond the red zone
892 sub r5,r5,r10 ; Get displacement into stack
893 cmplwi r5,INTSTACK_SIZE-FM_SIZE ; Is the stack actually invalid?
894 blt+ ihsetback ; The stack is ok...
895
896 lwz r5,PP_DEBSTACK_TOP_SS(r25) ; Pick up debug stack top
897 subi r5,r5,KERNEL_STACK_SIZE-FM_SIZE ; Adjust to start of stack
898 sub r5,r1,r5 ; Get displacement into debug stack
899 cmplwi cr2,r5,KERNEL_STACK_SIZE-FM_SIZE ; Check if we are on debug stack
900 blt+ cr2,ihsetback ; Yeah, that is ok too...
901
902 lis r0,hi16(Choke) ; Choke code
903 ori r0,r0,lo16(Choke) ; and the rest
904 li r3,failStack ; Bad stack code
905 sc ; System ABEND
906
907 intUnalignedStk:
908 lis r0,hi16(Choke) ; Choke code
909 ori r0,r0,lo16(Choke) ; and the rest
910 li r3,failUnalignedStk ; Unaligned stack code
911 sc ; System ABEND
912
913 .align 5
914
915 .L_istackfree:
916 rlwinm. r0,r1,0,28,31 ; Check if stack is aligned (and get 0)
917 lwz r10,SAVflags(r4) ; Get savearea flags
918 bne-- intUnalignedStk ; Stack is unaligned...
919 stw r0,PP_ISTACKPTR(r25) ; Mark the stack in use
920 oris r10,r10,hi16(SAVrststk) ; Indicate we reset stack when we return from this one
921 stw r10,SAVflags(r4) ; Stick it back
922
923 /*
924 * To summarize, when we reach here, the state has been saved and
925 * the stack is marked as busy. We now generate a small
926 * stack frame with backpointers to follow the calling
927 * conventions. We set up the backpointers to the trapped
928 * routine allowing us to backtrace.
929 */
930
931 ihsetback: subi r1,r1,FM_SIZE ; Make a new frame
932 stw r9,FM_BACKPTR(r1) ; Point back to previous stackptr
933
934 #if VERIFYSAVE
935 beq- cr1,ihbootnover ; (TEST/DEBUG)
936 bl versave ; (TEST/DEBUG)
937 ihbootnover: ; (TEST/DEBUG)
938 #endif
939
940 #if DEBUG
941 /* If debugging, we need two frames, the first being a dummy
942 * which links back to the trapped routine. The second is
943 * that which the C routine below will need
944 */
945 lwz r5,savesrr0+4(r4) ; Get interrupt address
946 stw r5,FM_LR_SAVE(r1) ; save old instr ptr as LR value
947 stwu r1,-FM_SIZE(r1) ; Make another new frame for C routine
948 #endif /* DEBUG */
949
950 mr r31,r3
951 mr r30,r4
952
953 lwz r3,SAVtime+4(r4)
954 addi r4,r13,SYSTEM_TIMER
955 bl EXT(timer_event)
956
957 mr r3,r31
958 mr r4,r30
959 lwz r5,savedsisr(r30) ; Get the DSISR
960 lwz r6,savedar+4(r30) ; Get the DAR
961
962 #if FPFLOOD
963 stfd f31,emfp31(r25) ; (TEST/DEBUG)
964 #endif
965
966 bl EXT(interrupt)
967
968
969 /* interrupt() returns a pointer to the saved state in r3
970 *
971 * Ok, back from C. Disable interrupts while we restore things
972 */
973 .globl EXT(ihandler_ret)
974
975 LEXT(ihandler_ret) ; Marks our return point from debugger entry
976
977 lis r10,hi16(MASK(MSR_VEC)) ; Get the vector enable
978 mfmsr r0 ; Get our MSR
979 ori r10,r10,lo16(MASK(MSR_FP)|MASK(MSR_EE)) ; Add in FP and EE
980 andc r0,r0,r10 ; Turn off VEC, FP, and EE
981 mtmsr r0 ; Make sure interrupts are disabled
982 mfsprg r8,1 ; Get the current activation
983 lwz r10,ACT_PER_PROC(r8) ; Get the per_proc block
984
985 lwz r7,SAVflags(r3) ; Pick up the flags
986 lwz r9,SAVprev+4(r3) ; Get previous save area
987 cmplwi cr1,r8,0 ; Are we still initializing?
988 lwz r12,savesrr1+4(r3) ; Get the MSR we will load on return
989 andis. r11,r7,hi16(SAVrststk) ; Is this the first on the stack?
990 stw r9,ACT_MACT_PCB(r8) ; Point to previous context savearea
991 mr r4,r3 ; Move the savearea pointer
992 beq .L_no_int_ast2 ; Get going if not the top-o-stack...
993
994
995 /* We're the last frame on the stack. Restore istackptr to empty state.
996 *
997 * Check for ASTs if one of the below is true:
998 * returning to user mode
999 * returning to a kloaded server
1000 */
1001 lwz r9,PP_INTSTACK_TOP_SS(r10) ; Get the empty stack value
1002 andc r7,r7,r11 ; Remove the stack reset bit in case we pass this one
1003 stw r9,PP_ISTACKPTR(r10) ; Save that saved state ptr
1004 lwz r3,ACT_PREEMPT_CNT(r8) ; Get preemption level
1005 stw r7,SAVflags(r4) ; Save the flags
1006 cmplwi r3, 0 ; Check for preemption
1007 bne .L_no_int_ast ; Do not preempt if level is not zero
1008 andi. r6,r12,MASK(MSR_PR) ; privilege mode
1009 lwz r11,PP_PENDING_AST(r10) ; Get the pending AST mask
1010 beq- .L_kernel_int_ast ; In kernel space, AST_URGENT check
1011 li r3,T_AST ; Assume the worst
1012 mr. r11,r11 ; Are there any pending?
1013 beq .L_no_int_ast ; Nope...
1014 b .L_call_thandler
1015
1016 .L_kernel_int_ast:
1017 andi. r11,r11,AST_URGENT ; Do we have AST_URGENT?
1018 li r3,T_PREEMPT ; Assume the worst
1019 beq .L_no_int_ast ; Nope...
1020
1021 /*
1022 * There is a pending AST. Massage things to make it look like
1023 * we took a trap and jump into the trap handler. To do this
1024 * we essentially pretend to return from the interrupt but
1025 * at the last minute jump into the trap handler with an AST
1026 * trap instead of performing an rfi.
1027 */
1028
1029 .L_call_thandler:
1030 stw r3,saveexception(r4) ; Set the exception code to T_AST/T_PREEMPT
1031 b EXT(thandler) ; We need to preempt so treat like a trap...
1032
1033 .L_no_int_ast:
1034 mr r3,r4 ; Get into the right register for common code
1035
1036 .L_no_int_ast2:
1037 rlwinm r7,r7,0,15,13 ; Clear the syscall flag
1038 li r4,0 ; Assume for a moment that we are in init
1039 stw r7,SAVflags(r3) ; Set the flags with cleared syscall flag
1040 beq-- cr1,chkfac ; Jump away if we are in init...
1041
1042 lwz r4,ACT_MACT_PCB(r8) ; Get the new level marker
1043
1044
1045 ;
1046 ; This section is common to all exception exits. It throws away vector
1047 ; and floating point saveareas as the exception level of a thread is
1048 ; exited.
1049 ;
1050 ; It also enables the facility if its context is live
1051 ; Requires:
1052 ; R3 = Savearea to be released (virtual)
1053 ; R4 = New top of savearea stack (could be 0)
1054 ; R8 = pointer to activation
1055 ; R10 = per_proc block
1056 ;
1057 ; Note that barring unforseen crashes, there is no escape from this point
1058 ; on. We WILL call exception_exit and launch this context. No worries
1059 ; about preemption or interruptions here.
1060 ;
1061 ; Note that we will set up R26 with whatever context we will be launching,
1062 ; so it will indicate the current, or the deferred it it is set and we
1063 ; are going to user state. CR2_eq will be set to indicate deferred.
1064 ;
1065
1066 chkfac: lwz r29,savesrr1+4(r3) ; Get the current MSR
1067 mr. r28,r8 ; Are we still in boot?
1068 mr r31,r10 ; Move per_proc address
1069 mr r30,r4 ; Preserve new level
1070 mr r27,r3 ; Save the old level
1071 beq-- chkenax ; Yeah, skip it all...
1072
1073 rlwinm. r0,r29,0,MSR_PR_BIT,MSR_PR_BIT ; Are we going into user state?
1074
1075 lwz r20,curctx(r28) ; Get our current context
1076 lwz r26,deferctx(r28) ; Get any deferred context switch
1077 li r0,1 ; Get set to hold off quickfret
1078 rlwinm r29,r29,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Turn off floating point for now
1079 lwz r21,FPUlevel(r20) ; Get the facility level
1080 cmplwi cr2,r26,0 ; Are we going into a deferred context later?
1081 rlwinm r29,r29,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Turn off vector for now
1082 crnor cr2_eq,cr0_eq,cr2_eq ; Set cr2_eq if going to user state and there is deferred
1083 lhz r19,PP_CPU_NUMBER(r31) ; Get our CPU number
1084 cmplw r27,r21 ; Are we returning from the active level?
1085 stw r0,holdQFret(r31) ; Make sure we hold off releasing quickfret
1086 bne++ fpuchkena ; Nope...
1087
1088 ;
1089 ; First clean up any live context we are returning from
1090 ;
1091
1092 lwz r22,FPUcpu(r20) ; Get CPU this context was last dispatched on
1093
1094 stw r19,FPUcpu(r20) ; Claim context for us
1095
1096 eieio ; Make sure this gets out before owner clear
1097
1098 #if ppeSize != 16
1099 #error per_proc_entry is not 16bytes in size
1100 #endif
1101
1102 lis r23,hi16(EXT(PerProcTable)) ; Set base PerProcTable
1103 slwi r22,r22,4 ; Find offset to the owner per_proc_entry
1104 ori r23,r23,lo16(EXT(PerProcTable)) ; Set base PerProcTable
1105 li r24,FPUowner ; Displacement to float owner
1106 add r22,r23,r22 ; Point to the owner per_proc_entry
1107 lwz r22,ppe_vaddr(r22) ; Point to the owner per_proc
1108
1109 fpuinvothr: lwarx r23,r24,r22 ; Get the owner
1110
1111 sub r0,r23,r20 ; Subtract one from the other
1112 sub r21,r20,r23 ; Subtract the other from the one
1113 or r21,r21,r0 ; Combine them
1114 srawi r21,r21,31 ; Get a 0 if equal or -1 of not
1115 and r23,r23,r21 ; Make 0 if same, unchanged if not
1116 stwcx. r23,r24,r22 ; Try to invalidate it
1117 bne-- fpuinvothr ; Try again if there was a collision...
1118
1119 isync
1120
1121 ;
1122 ; Now if there is a savearea associated with the popped context, release it.
1123 ; Either way, pop the level to the top stacked context.
1124 ;
1125
1126 lwz r22,FPUsave(r20) ; Get pointer to the first savearea
1127 li r21,0 ; Assume we popped all the way out
1128 mr. r22,r22 ; Is there anything there?
1129 beq++ fpusetlvl ; No, see if we need to enable...
1130
1131 lwz r21,SAVlevel(r22) ; Get the level of that savearea
1132 cmplw r21,r27 ; Is this the saved copy of the live stuff?
1133 bne fpusetlvl ; No, leave as is...
1134
1135 lwz r24,SAVprev+4(r22) ; Pick up the previous area
1136 li r21,0 ; Assume we popped all the way out
1137 mr. r24,r24 ; Any more context stacked?
1138 beq-- fpuonlyone ; Nope...
1139 lwz r21,SAVlevel(r24) ; Get the level associated with save
1140
1141 fpuonlyone: stw r24,FPUsave(r20) ; Dequeue this savearea
1142
1143 rlwinm r3,r22,0,0,19 ; Find main savearea header
1144
1145 lwz r8,quickfret(r31) ; Get the first in quickfret list (top)
1146 lwz r9,quickfret+4(r31) ; Get the first in quickfret list (bottom)
1147 lwz r2,SACvrswap(r3) ; Get the virtual to real conversion (top)
1148 lwz r3,SACvrswap+4(r3) ; Get the virtual to real conversion (bottom)
1149 stw r8,SAVprev(r22) ; Link the old in (top)
1150 stw r9,SAVprev+4(r22) ; Link the old in (bottom)
1151 xor r3,r22,r3 ; Convert to physical
1152 stw r2,quickfret(r31) ; Set the first in quickfret list (top)
1153 stw r3,quickfret+4(r31) ; Set the first in quickfret list (bottom)
1154
1155 #if FPVECDBG
1156 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
1157 li r2,0x3301 ; (TEST/DEBUG)
1158 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
1159 sc ; (TEST/DEBUG)
1160 #endif
1161
1162 fpusetlvl: stw r21,FPUlevel(r20) ; Save the level
1163
1164 ;
1165 ; Here we check if we are at the right level
1166 ; We need to check the level we are entering, not the one we are exiting.
1167 ; Therefore, we will use the defer level if it is non-zero and we are
1168 ; going into user state.
1169 ;
1170
1171 fpuchkena: bt-- cr2_eq,fpuhasdfrd ; Skip if deferred, R26 already set up...
1172 mr r26,r20 ; Use the non-deferred value
1173
1174 fpuhasdfrd:
1175 #if 0
1176 rlwinm. r0,r29,0,MSR_PR_BIT,MSR_PR_BIT ; (TEST/DEBUG) Going into user state?
1177 beq fpunusrstt ; (TEST/DEBUG) Nope...
1178 lwz r23,FPUlevel(r26) ; (TEST/DEBUG) Get the level ID
1179 lwz r24,FPUsave(r26) ; (TEST/DEBUG) Get the first savearea
1180 mr. r23,r23 ; (TEST/DEBUG) Should be level 0
1181 beq++ fpulvl0 ; (TEST/DEBUG) Yes...
1182
1183 lis r0,hi16(Choke) ; (TEST/DEBUG) Choke code
1184 ori r0,r0,lo16(Choke) ; (TEST/DEBUG) and the rest
1185 sc ; (TEST/DEBUG) System ABEND
1186
1187 fpulvl0: mr. r24,r24 ; (TEST/DEBUG) Any context?
1188 beq fpunusrstt ; (TEST/DEBUG) No...
1189 lwz r23,SAVlevel(r24) ; (TEST/DEBUG) Get level of context
1190 lwz r21,SAVprev+4(r24) ; (TEST/DEBUG) Get previous pointer
1191 mr. r23,r23 ; (TEST/DEBUG) Is this our user context?
1192 beq++ fpulvl0b ; (TEST/DEBUG) Yes...
1193
1194 lis r0,hi16(Choke) ; (TEST/DEBUG) Choke code
1195 ori r0,r0,lo16(Choke) ; (TEST/DEBUG) and the rest
1196 sc ; (TEST/DEBUG) System ABEND
1197
1198 fpulvl0b: mr. r21,r21 ; (TEST/DEBUG) Is there a forward chain?
1199 beq++ fpunusrstt ; (TEST/DEBUG) Nope...
1200
1201 lis r0,hi16(Choke) ; (TEST/DEBUG) Choke code
1202 ori r0,r0,lo16(Choke) ; (TEST/DEBUG) and the rest
1203 sc ; (TEST/DEBUG) System ABEND
1204
1205 fpunusrstt: ; (TEST/DEBUG)
1206 #endif
1207
1208 lwz r21,FPUowner(r31) ; Get the ID of the live context
1209 lwz r23,FPUlevel(r26) ; Get the level ID
1210 lwz r24,FPUcpu(r26) ; Get the CPU that the context was last dispatched on
1211 cmplw cr3,r26,r21 ; Do we have the live context?
1212 cmplw r30,r23 ; Are we about to launch the live level?
1213 bne-- cr3,chkvec ; No, can not possibly enable...
1214 cmplw cr1,r19,r24 ; Was facility used on this processor last?
1215 bne-- chkvec ; No, not live...
1216 bne-- cr1,chkvec ; No, wrong cpu, have to enable later....
1217
1218 lwz r24,FPUsave(r26) ; Get the first savearea
1219 mr. r24,r24 ; Any savearea?
1220 beq++ fpuena ; Nope...
1221 lwz r25,SAVlevel(r24) ; Get the level of savearea
1222 lwz r0,SAVprev+4(r24) ; Get the previous
1223
1224 cmplw r30,r25 ; Is savearea for the level we are launching?
1225 bne++ fpuena ; No, just go enable...
1226
1227 stw r0,FPUsave(r26) ; Pop the chain
1228
1229 rlwinm r3,r24,0,0,19 ; Find main savearea header
1230
1231 lwz r8,quickfret(r31) ; Get the first in quickfret list (top)
1232 lwz r9,quickfret+4(r31) ; Get the first in quickfret list (bottom)
1233 lwz r2,SACvrswap(r3) ; Get the virtual to real conversion (top)
1234 lwz r3,SACvrswap+4(r3) ; Get the virtual to real conversion (bottom)
1235 stw r8,SAVprev(r24) ; Link the old in (top)
1236 stw r9,SAVprev+4(r24) ; Link the old in (bottom)
1237 xor r3,r24,r3 ; Convert to physical
1238 stw r2,quickfret(r31) ; Set the first in quickfret list (top)
1239 stw r3,quickfret+4(r31) ; Set the first in quickfret list (bottom)
1240
1241 #if FPVECDBG
1242 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
1243 li r2,0x3302 ; (TEST/DEBUG)
1244 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
1245 sc ; (TEST/DEBUG)
1246 #endif
1247
1248 fpuena: ori r29,r29,lo16(MASK(MSR_FP)) ; Enable facility
1249
1250 chkvec:
1251
1252 lwz r21,VMXlevel(r20) ; Get the facility level
1253
1254 cmplw r27,r21 ; Are we returning from the active level?
1255 bne+ vmxchkena ; Nope...
1256
1257
1258 ;
1259 ; First clean up any live context we are returning from
1260 ;
1261
1262 lwz r22,VMXcpu(r20) ; Get CPU this context was last dispatched on
1263
1264 stw r19,VMXcpu(r20) ; Claim context for us
1265
1266 eieio ; Make sure this gets out before owner clear
1267
1268 lis r23,hi16(EXT(PerProcTable)) ; Set base PerProcTable
1269 slwi r22,r22,4 ; Find offset to the owner per_proc_entry
1270 ori r23,r23,lo16(EXT(PerProcTable)) ; Set base PerProcTable
1271 li r24,VMXowner ; Displacement to float owner
1272 add r22,r23,r22 ; Point to the owner per_proc_entry
1273 lwz r22,ppe_vaddr(r22) ; Point to the owner per_proc
1274
1275 vmxinvothr: lwarx r23,r24,r22 ; Get the owner
1276
1277 sub r0,r23,r20 ; Subtract one from the other
1278 sub r21,r20,r23 ; Subtract the other from the one
1279 or r21,r21,r0 ; Combine them
1280 srawi r21,r21,31 ; Get a 0 if equal or -1 of not
1281 and r23,r23,r21 ; Make 0 if same, unchanged if not
1282 stwcx. r23,r24,r22 ; Try to invalidate it
1283 bne-- vmxinvothr ; Try again if there was a collision...
1284
1285 isync
1286
1287 ;
1288 ; Now if there is a savearea associated with the popped context, release it.
1289 ; Either way, pop the level to the top stacked context.
1290 ;
1291
1292 lwz r22,VMXsave(r20) ; Get pointer to the first savearea
1293 li r21,0 ; Assume we popped all the way out
1294 mr. r22,r22 ; Is there anything there?
1295 beq++ vmxsetlvl ; No, see if we need to enable...
1296
1297 lwz r21,SAVlevel(r22) ; Get the level of that savearea
1298 cmplw r21,r27 ; Is this the saved copy of the live stuff?
1299 bne vmxsetlvl ; No, leave as is...
1300
1301 lwz r24,SAVprev+4(r22) ; Pick up the previous area
1302 li r21,0 ; Assume we popped all the way out
1303 mr. r24,r24 ; Any more context?
1304 beq-- vmxonlyone ; Nope...
1305 lwz r21,SAVlevel(r24) ; Get the level associated with save
1306
1307 vmxonlyone: stw r24,VMXsave(r20) ; Dequeue this savearea
1308
1309 rlwinm r3,r22,0,0,19 ; Find main savearea header
1310
1311 lwz r8,quickfret(r31) ; Get the first in quickfret list (top)
1312 lwz r9,quickfret+4(r31) ; Get the first in quickfret list (bottom)
1313 lwz r2,SACvrswap(r3) ; Get the virtual to real conversion (top)
1314 lwz r3,SACvrswap+4(r3) ; Get the virtual to real conversion (bottom)
1315 stw r8,SAVprev(r22) ; Link the old in (top)
1316 stw r9,SAVprev+4(r22) ; Link the old in (bottom)
1317 xor r3,r22,r3 ; Convert to physical
1318 stw r2,quickfret(r31) ; Set the first in quickfret list (top)
1319 stw r3,quickfret+4(r31) ; Set the first in quickfret list (bottom)
1320
1321 #if FPVECDBG
1322 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
1323 li r2,0x3401 ; (TEST/DEBUG)
1324 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
1325 sc ; (TEST/DEBUG)
1326 #endif
1327
1328 vmxsetlvl: stw r21,VMXlevel(r20) ; Save the level
1329
1330 ;
1331 ; Here we check if we are at the right level
1332 ;
1333
1334 vmxchkena: lwz r21,VMXowner(r31) ; Get the ID of the live context
1335 lwz r23,VMXlevel(r26) ; Get the level ID
1336 cmplw r26,r21 ; Do we have the live context?
1337 lwz r24,VMXcpu(r26) ; Get the CPU that the context was last dispatched on
1338 bne-- setena ; No, can not possibly enable...
1339 cmplw r30,r23 ; Are we about to launch the live level?
1340 cmplw cr1,r19,r24 ; Was facility used on this processor last?
1341 bne-- setena ; No, not live...
1342 bne-- cr1,setena ; No, wrong cpu, have to enable later....
1343
1344 lwz r24,VMXsave(r26) ; Get the first savearea
1345 mr. r24,r24 ; Any savearea?
1346 beq++ vmxena ; Nope...
1347 lwz r25,SAVlevel(r24) ; Get the level of savearea
1348 lwz r0,SAVprev+4(r24) ; Get the previous
1349 cmplw r30,r25 ; Is savearea for the level we are launching?
1350 bne++ vmxena ; No, just go enable...
1351
1352 stw r0,VMXsave(r26) ; Pop the chain
1353
1354 rlwinm r3,r24,0,0,19 ; Find main savearea header
1355
1356 lwz r8,quickfret(r31) ; Get the first in quickfret list (top)
1357 lwz r9,quickfret+4(r31) ; Get the first in quickfret list (bottom)
1358 lwz r2,SACvrswap(r3) ; Get the virtual to real conversion (top)
1359 lwz r3,SACvrswap+4(r3) ; Get the virtual to real conversion (bottom)
1360 stw r8,SAVprev(r24) ; Link the old in (top)
1361 stw r9,SAVprev+4(r24) ; Link the old in (bottom)
1362 xor r3,r24,r3 ; Convert to physical
1363 stw r2,quickfret(r31) ; Set the first in quickfret list (top)
1364 stw r3,quickfret+4(r31) ; Set the first in quickfret list (bottom)
1365
1366 #if FPVECDBG
1367 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
1368 li r2,0x3402 ; (TEST/DEBUG)
1369 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
1370 sc ; (TEST/DEBUG)
1371 #endif
1372
1373 vmxena: oris r29,r29,hi16(MASK(MSR_VEC)) ; Enable facility
1374
1375 setena: lwz r18,umwSpace(r28) ; Get the space ID in case we are launching user
1376 rlwinm. r0,r29,0,MSR_PR_BIT,MSR_PR_BIT ; Are we about to launch user state?
1377 li r0,0 ; Get set to release quickfret holdoff
1378 crmove cr7_eq,cr0_eq ; Remember if we are going to user state
1379 rlwimi. r20,r29,(((31-floatCngbit)+(MSR_FP_BIT+1))&31),floatCngbit,floatCngbit ; Set flag if we enabled floats
1380 lwz r19,deferctx(r28) ; Get any deferred facility context switch
1381 rlwinm r20,r29,(((31-vectorCngbit)+(MSR_VEC_BIT+1))&31),vectorCngbit,vectorCngbit ; Set flag if we enabled vector
1382 stw r29,savesrr1+4(r27) ; Turn facility on or off
1383 stw r0,holdQFret(r31) ; Release quickfret
1384 oris r18,r18,hi16(umwSwitchAway) ; Set the switch-away bit in case we go to user
1385
1386 beq setenaa ; Neither float nor vector turned on....
1387
1388 lwz r5,ACT_MACT_SPF(r28) ; Get activation copy
1389 lwz r6,spcFlags(r31) ; Get per_proc copy
1390 or r5,r5,r20 ; Set vector/float changed bits in activation
1391 or r6,r6,r20 ; Set vector/float changed bits in per_proc
1392 stw r5,ACT_MACT_SPF(r28) ; Set activation copy
1393 stw r6,spcFlags(r31) ; Set per_proc copy
1394
1395 setenaa: mfdec r24 ; Get decrementer
1396 bf+ cr2_eq,nodefer ; No deferred to switch to...
1397
1398 li r20,0 ; Clear this
1399 stw r26,curctx(r28) ; Make the facility context current
1400 stw r20,deferctx(r28) ; Clear deferred context
1401
1402 nodefer: lwz r22,qactTimer(r28) ; Get high order quick activation timer
1403 mr. r24,r24 ; See if it has popped already...
1404 lwz r23,qactTimer+4(r28) ; Get low order qact timer
1405 ble- chkifuser ; We have popped or are just about to...
1406
1407 segtb: mftbu r20 ; Get the upper time base
1408 mftb r21 ; Get the low
1409 mftbu r19 ; Get upper again
1410 or. r0,r22,r23 ; Any time set?
1411 cmplw cr1,r20,r19 ; Did they change?
1412 beq++ chkifuser ; No time set....
1413 bne-- cr1,segtb ; Timebase ticked, get them again...
1414
1415 subfc r6,r21,r23 ; Subtract current from qact time
1416 li r0,0 ; Make a 0
1417 subfe r5,r20,r22 ; Finish subtract
1418 subfze r0,r0 ; Get a 0 if qact was bigger than current, -1 otherwise
1419 andc. r12,r5,r0 ; Set 0 if qact has passed
1420 andc r13,r6,r0 ; Set 0 if qact has passed
1421 bne chkifuser ; If high order is non-zero, this is too big for a decrementer
1422 cmplw r13,r24 ; Is this earlier than the decrementer? (logical compare takes care of high bit on)
1423 bge++ chkifuser ; No, do not reset decrementer...
1424
1425 mtdec r13 ; Set our value
1426
1427 chkifuser: addi r4,r28,SYSTEM_TIMER
1428 mftb r3
1429 beq-- cr7,chkifuser1 ; Skip this if we are going to kernel...
1430 stw r18,umwSpace(r28) ; Half-invalidate to force MapUserAddressWindow to reload SRs
1431 addi r4,r28,USER_TIMER
1432
1433 chkifuser1: bl EXT(timer_event)
1434
1435 chkenax:
1436
1437 #if DEBUG
1438 lwz r20,SAVact(r27) ; (TEST/DEBUG) Make sure our restore
1439 mfsprg r21, 1 ; (TEST/DEBUG) with the current act.
1440 cmpwi r21,0 ; (TEST/DEBUG)
1441 beq-- yeswereok ; (TEST/DEBUG)
1442 cmplw r21,r20 ; (TEST/DEBUG)
1443 beq++ yeswereok ; (TEST/DEBUG)
1444
1445 lis r0,hi16(Choke) ; (TEST/DEBUG) Choke code
1446 ori r0,r0,lo16(Choke) ; (TEST/DEBUG) and the rest
1447 mr r21,r27 ; (TEST/DEBUG) Save the savearea address
1448 li r3,failContext ; (TEST/DEBUG) Bad state code
1449 sc ; (TEST/DEBUG) System ABEND
1450
1451 yeswereok:
1452 #endif
1453
1454 mr r3,r27 ; Pass savearea back
1455 b EXT(exception_exit) ; We are all done now...
1456
1457
1458
1459 ;
1460 ; Null PPC call - performance testing, does absolutely nothing
1461 ;
1462
1463 .align 5
1464
1465 .globl EXT(ppcNull)
1466
1467 LEXT(ppcNull)
1468
1469 li r3,-1 ; Make sure we test no asts
1470 blr
1471
1472
1473 ;
1474 ; Instrumented null PPC call - performance testing, does absolutely nothing
1475 ; Forces various timestamps to be returned.
1476 ;
1477
1478 .align 5
1479
1480 .globl EXT(ppcNullinst)
1481
1482 LEXT(ppcNullinst)
1483
1484 li r3,-1 ; Make sure we test no asts
1485 blr
1486
1487
1488 /*
1489 * Here's where we handle the fastpath stuff
1490 * We'll do what we can here because registers are already
1491 * loaded and it will be less confusing that moving them around.
1492 * If we need to though, we'll branch off somewhere's else.
1493 *
1494 * Registers when we get here:
1495 *
1496 * r0 = syscall number
1497 * r4 = savearea/pcb
1498 * r13 = activation
1499 * r14 = previous savearea (if any)
1500 * r16 = thread
1501 * r25 = per_proc
1502 */
1503
1504 .align 5
1505
1506 fastpath: cmplwi cr3,r0,0x7FF5 ; Is this a null fastpath?
1507 beq-- cr3,fastexutl ; Yes, bail fast...
1508 cmplwi cr3,r0,0x7FF1 ; Is it CthreadSetSelfNumber?
1509 bnelr-- cr3 ; Not a fast path...
1510
1511 /*
1512 * void cthread_set_self(cproc_t p)
1513 *
1514 * Set's thread state "user_value". In practice this is the thread-local-data-pointer (TLDP),
1515 * though we do not interpret it. This call is mostly used by 32-bit tasks, but we save all 64 bits
1516 * in case a 64-bit task wants to use this facility. They normally do not, because the 64-bit
1517 * ABI reserves r13 for the TLDP.
1518 *
1519 * This op is invoked as follows:
1520 * li r0, CthreadSetSelfNumber // load the fast-trap number
1521 * sc // invoke fast-trap
1522 * blr
1523 */
1524
1525 CthreadSetSelfNumber:
1526 lwz r3,saver3+0(r4) /* get the TLDP passed in r3 */
1527 lwz r5,saver3+4(r4) /* (all 64 bits, in case this is a 64-bit task) */
1528 stw r3,CTHREAD_SELF+0(r13) /* Remember it in the activation... */
1529 stw r5,CTHREAD_SELF+4(r13)
1530 stw r3,UAW+0(r25) /* ...and in the per-proc */
1531 stw r5,UAW+4(r25)
1532
1533
1534 .globl EXT(fastexit)
1535 EXT(fastexit):
1536 fastexutl: mr r3,r4 ; Pass back savearea
1537 b EXT(exception_exit) ; Go back to the caller...
1538
1539
1540 /*
1541 * Here's where we check for a hit on the Blue Box Assist
1542 * Most registers are non-volatile, so be careful here. If we don't
1543 * recognize the trap instruction we go back for regular processing.
1544 * Otherwise we transfer to the assist code.
1545 */
1546
1547 .align 5
1548
1549 checkassist:
1550 lwz r0,saveexception(r4) ; Get the exception code
1551 lwz r23,savesrr1+4(r4) ; Get the interrupted MSR
1552 lwz r26,ACT_MACT_BEDA(r13) ; Get Blue Box Descriptor Area
1553 mtcrf 0x18,r23 ; Check what SRR1 says
1554 lwz r24,ACT_MACT_BTS(r13) ; Get the table start
1555 cmplwi r0,T_AST ; Check for T_AST trap
1556 lwz r27,savesrr0+4(r4) ; Get trapped address
1557 crnand cr1_eq,SRR1_PRG_TRAP_BIT,MSR_PR_BIT ; We need both trap and user state
1558 sub r24,r27,r24 ; See how far into it we are
1559 cror cr0_eq,cr0_eq,cr1_eq ; Need to bail if AST or not trap or not user state
1560 cmplwi cr1,r24,BB_MAX_TRAP ; Do we fit in the list?
1561 cror cr0_eq,cr0_eq,cr1_gt ; Also leave it trap not in range
1562 btlr- cr0_eq ; No assist if AST or not trap or not user state or trap not in range
1563 b EXT(atomic_switch_trap) ; Go to the assist...
1564
1565 ;
1566 ; Virtual Machine Monitor
1567 ; Here is where we exit from the emulated context
1568 ; Note that most registers get trashed here
1569 ; R3 and R30 are preserved across the call and hold the activation
1570 ; and savearea respectivily.
1571 ;
1572
1573 .align 5
1574
1575 exitFromVM: mr r30,r4 ; Get the savearea
1576 mr r3,r13 ; Get the activation
1577
1578 b EXT(vmm_exit) ; Do it to it
1579
1580 .align 5
1581 .globl EXT(retFromVM)
1582
1583 LEXT(retFromVM)
1584 mfsprg r10,1 ; Get the current activation
1585 lwz r10,ACT_PER_PROC(r10) ; Get the per_proc block
1586 mr r8,r3 ; Get the activation
1587 lwz r4,SAVprev+4(r30) ; Pick up the previous savearea
1588 mr r3,r30 ; Put savearea in proper register for common code
1589 lwz r11,SAVflags(r30) ; Get the flags of the current savearea
1590 rlwinm r11,r11,0,15,13 ; Clear the syscall flag
1591 mr r1,r8
1592 stw r11,SAVflags(r3) ; Save back the flags (with reset stack cleared)
1593
1594 stw r4,ACT_MACT_PCB(r8) ; Point to the previous savearea (or 0 if none)
1595
1596 lwz r5,THREAD_KERNEL_STACK(r1) ; Get the base pointer to the stack
1597 addi r5,r5,KERNEL_STACK_SIZE-FM_SIZE ; Reset to empty
1598 stw r5,ACT_MACT_KSP(r8) ; Save the empty stack pointer
1599 b chkfac ; Go end it all...
1600
1601
1602 ;
1603 ; chandler (note: not a candle maker or tallow merchant)
1604 ;
1605 ; Here is the system choke handler. This is where the system goes
1606 ; to die.
1607 ;
1608 ; We get here as a result of a T_CHOKE exception which is generated
1609 ; by the Choke firmware call or by lowmem_vectors when it detects a
1610 ; fatal error. Examples of where this may be used is when we detect
1611 ; problems in low-level mapping chains, trashed savearea free chains,
1612 ; or stack guardpage violations.
1613 ;
1614 ; Note that we can not set a back chain in the stack when we come
1615 ; here because we are probably here because the chain was corrupt.
1616 ;
1617
1618
1619 .align 5
1620 .globl EXT(chandler)
1621 LEXT(chandler) ; Choke handler
1622
1623 li r31,0 ; Get a 0
1624 mfsprg r25,1 ; Get the current activation
1625 lwz r25,ACT_PER_PROC(r25) ; Get the per_proc block
1626 stw r31,traceMask(0) ; Force tracing off right now
1627
1628
1629
1630 lwz r1,PP_DEBSTACKPTR(r25) ; Get debug stack pointer
1631 cmpwi r1,-1 ; Are we already choking?
1632 bne chokefirst ; Nope...
1633
1634 chokespin: addi r31,r31,1 ; Spin and hope for an analyzer connection...
1635 addi r31,r31,1 ; Spin and hope for an analyzer connection...
1636 addi r31,r31,1 ; Spin and hope for an analyzer connection...
1637 addi r31,r31,1 ; Spin and hope for an analyzer connection...
1638 addi r31,r31,1 ; Spin and hope for an analyzer connection...
1639 addi r31,r31,1 ; Spin and hope for an analyzer connection...
1640 b chokespin ; Spin and hope for an analyzer connection...
1641
1642 chokefirst: li r0,-1 ; Set choke value
1643 mr. r1,r1 ; See if we are on debug stack yet
1644 lwz r10,saver1+4(r4) ;
1645 stw r0,PP_DEBSTACKPTR(r25) ; Show we are choking
1646 bne chokestart ; We are not on the debug stack yet...
1647
1648 lwz r2,PP_DEBSTACK_TOP_SS(r25) ; Get debug stack top
1649 sub r11,r2,r10 ; Get stack depth
1650
1651 cmplwi r11,KERNEL_STACK_SIZE-FM_SIZE-TRAP_SPACE_NEEDED ; Check if stack pointer is ok
1652 bgt chokespin ; Bad stack pointer or too little left, just die...
1653
1654 subi r1,r10,FM_REDZONE ; Make a red zone
1655
1656 chokestart: li r0,0 ; Get a zero
1657 stw r0,FM_BACKPTR(r1) ; We now have terminated the back chain
1658
1659 bl EXT(SysChoked) ; Call the "C" phase of this
1660 b chokespin ; Should not be here so just go spin...
1661
1662
1663 #if VERIFYSAVE
1664 ;
1665 ; Savearea chain verification
1666 ;
1667
1668 versave:
1669 #if 0
1670 lis r22,hi16(EXT(DebugWork)) ; (TEST/DEBUG)
1671 ori r22,r22,lo16(EXT(DebugWork)) ; (TEST/DEBUG)
1672 lwz r23,0(r22) ; (TEST/DEBUG)
1673 mr. r23,r23 ; (TEST/DEBUG)
1674 beqlr- ; (TEST/DEBUG)
1675 mfsprg r20,1 ; Get the current activation
1676 lwz r20,ACT_PER_PROC(r20) ; Get the per_proc block
1677 lwz r21,pfAvailable(r20) ; (TEST/DEBUG)
1678 mr. r21,r21 ; (TEST/DEBUG)
1679 bnelr+ ; (TEST/DEBUG)
1680
1681 stw r22,0(r22) ; (TEST/DEBUG) Lock out more checks
1682 BREAKPOINT_TRAP ; (TEST/DEBUG) Get into debugger
1683 #endif
1684
1685 #if 0
1686 ;; This code is broken and migration will make the matter even worse
1687 ;
1688 ; Make sure that all savearea chains have the right type on them
1689 ;
1690
1691 lis r28,hi16(EXT(default_pset)) ; (TEST/DEBUG)
1692 lis r27,hi16(EXT(DebugWork)) ; (TEST/DEBUG)
1693 ori r28,r28,lo16(EXT(default_pset)) ; (TEST/DEBUG)
1694 ori r27,r27,lo16(EXT(DebugWork)) ; (TEST/DEBUG)
1695 li r20,0 ; (TEST/DEBUG)
1696 lwz r26,0(r27) ; (TEST/DEBUG)
1697 lwz r27,psthreadcnt(r28) ; (TEST/DEBUG)
1698 mr. r26,r26 ; (TEST/DEBUG) Have we locked the test out?
1699 lwz r28,psthreads(r28) ; (TEST/DEBUG)
1700 mflr r31 ; (TEST/DEBUG) Save return
1701 bnelr- ; (TEST/DEBUG) Test already triggered, skip...
1702 b fckgo ; (TEST/DEBUG) Join up...
1703
1704 fcknext: mr. r27,r27 ; (TEST/DEBUG) Any more threads?
1705 bne+ fckxxx ; (TEST/DEBUG) Yes...
1706
1707 mtlr r31 ; (TEST/DEBUG) Restore return
1708 blr ; (TEST/DEBUG) Leave...
1709
1710 fckxxx: lwz r28,THREAD_PSTHRN(r28) ; (TEST/DEBUG) Get next thread
1711
1712 fckgo: subi r27,r27,1 ; (TEST/DEBUG) Decrement thread count
1713 lwz r24,THREAD_TOP_ACT(r28) ; (TEST/DEBUG) Get activation for the thread
1714 lwz r20,ACT_MACT_PCB(r24) ; (TEST/DEBUG) Get the normal context
1715 li r21,SAVgeneral ; (TEST/DEBUG) Make sure this is all general context
1716 bl versavetype ; (TEST/DEBUG) Check the chain
1717
1718 lwz r20,facctx+FPUsave(r24) ; (TEST/DEBUG) Get regular floating point
1719 li r21,SAVfloat ; (TEST/DEBUG) Make sure this is all floating point
1720 bl versavetype ; (TEST/DEBUG) Check the chain
1721
1722 lwz r20,facctx+VMXsave(r24) ; (TEST/DEBUG) Get regular vector point
1723 li r21,SAVvector ; (TEST/DEBUG) Make sure this is all vector
1724 bl versavetype ; (TEST/DEBUG) Check the chain
1725
1726 lwz r29,vmmControl(r24) ; (TEST/DEBUG) Get the virtual machine control blocks
1727 mr. r29,r29 ; (TEST/DEBUG) Are there any?
1728 beq+ fcknext ; (TEST/DEBUG) Nope, next thread...
1729
1730 li r22,kVmmMaxContextsPerThread ; (TEST/DEBUG) Get the number of control blocks
1731 subi r29,r29,vmmCEntrySize ; (TEST/DEBUG) Get running start
1732
1733 fcknvmm: subi r22,r22,1 ; (TEST/DEBUG) Do all of them
1734 mr. r22,r22 ; (TEST/DEBUG) Are we all done?
1735 addi r29,r29,vmmCEntrySize ; (TEST/DEBUG) Get the next entry
1736 blt- fcknext ; (TEST/DEBUG) Yes, check next thread...
1737
1738 lwz r23,vmmFlags(r29) ; (TEST/DEBUG) Get entry flags
1739 rlwinm. r23,r23,0,0,0 ; (TEST/DEBUG) Is this in use?
1740 beq+ fcknvmm ; (TEST/DEBUG) Not in use...
1741
1742 lwz r20,vmmFacCtx+FPUsave(r29) ; (TEST/DEBUG) Get regular floating point
1743 li r21,SAVfloat ; (TEST/DEBUG) Make sure this is all floating point
1744 bl versavetype ; (TEST/DEBUG) Check the chain
1745
1746 lwz r20,vmmFacCtx+VMXsave(r29) ; (TEST/DEBUG) Get regular vector point
1747 li r21,SAVvector ; (TEST/DEBUG) Make sure this is all vector
1748 bl versavetype ; (TEST/DEBUG) Check the chain
1749 b fcknvmm ; (TEST/DEBUG) Get then vmm block...
1750
1751 versavetype:
1752 mr. r20,r20 ; (TEST/DEBUG) Chain done?
1753 beqlr- ; (TEST/DEBUG) Yes...
1754
1755 lwz r23,SAVflags(r20) ; (TEST/DEBUG) Get the flags
1756 rlwinm r23,r23,24,24,31 ; (TEST/DEBUG) Position it
1757 cmplw r23,r21 ; (TEST/DEBUG) Are we the correct type?
1758 beq+ versvok ; (TEST/DEBUG) This one is ok...
1759
1760 lis r22,hi16(EXT(DebugWork)) ; (TEST/DEBUG)
1761 ori r22,r22,lo16(EXT(DebugWork)) ; (TEST/DEBUG)
1762 stw r22,0(r22) ; (TEST/DEBUG) Lock out more checks
1763 BREAKPOINT_TRAP ; (TEST/DEBUG) Get into debugger
1764
1765 versvok: lwz r20,SAVprev+4(r20) ; (TEST/DEBUG) Get the previous one
1766 b versavetype ; (TEST/DEBUG) Go check its type...
1767 #endif
1768
1769
1770 #endif