]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/hw_exception.s
xnu-792.25.20.tar.gz
[apple/xnu.git] / osfmk / ppc / hw_exception.s
1 /*
2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25
26 /* Low level routines dealing with exception entry and exit.
27 * There are various types of exception:
28 *
29 * Interrupt, trap, system call and debugger entry. Each has it's own
30 * handler since the state save routine is different for each. The
31 * code is very similar (a lot of cut and paste).
32 *
33 * The code for the FPU disabled handler (lazy fpu) is in cswtch.s
34 */
35
36 #include <debug.h>
37 #include <mach_assert.h>
38 #include <mach/exception_types.h>
39 #include <mach/kern_return.h>
40 #include <mach/ppc/vm_param.h>
41
42 #include <assym.s>
43
44 #include <ppc/asm.h>
45 #include <ppc/proc_reg.h>
46 #include <ppc/trap.h>
47 #include <ppc/exception.h>
48 #include <ppc/savearea.h>
49
50
51 #define VERIFYSAVE 0
52 #define FPVECDBG 0
53 #define FPFLOOD 0
54 #define INSTRUMENT 0
55
56 /*
57 * thandler(type)
58 *
59 * ENTRY: VM switched ON
60 * Interrupts OFF
61 * R3 contains exception code
62 * R4 points to the saved context (virtual address)
63 * Everything is saved in savearea
64 */
65
66 /*
67 * If pcb.ksp == 0 then the kernel stack is already busy,
68 * we make a stack frame
69 * leaving enough space for the 'red zone' in case the
70 * trapped thread was in the middle of saving state below
71 * its stack pointer.
72 *
73 * otherwise we make a stack frame and
74 * the kernel stack (setting pcb.ksp to 0)
75 *
76 * on return, we do the reverse, the last state is popped from the pcb
77 * and pcb.ksp is set to the top of stack
78 */
79
80 /* TRAP_SPACE_NEEDED is the space assumed free on the kernel stack when
81 * another trap is taken. We need at least enough space for a saved state
82 * structure plus two small backpointer frames, and we add a few
83 * hundred bytes for the space needed by the C (which may be less but
84 * may be much more). We're trying to catch kernel stack overflows :-)
85 */
86
87 #define TRAP_SPACE_NEEDED FM_REDZONE+(2*FM_SIZE)+256
88
89 .text
90
91 .align 5
92 .globl EXT(thandler)
93 LEXT(thandler) ; Trap handler
94
95 mfsprg r13,1 ; Get the current activation
96 lwz r25,ACT_PER_PROC(r13) ; Get the per_proc block
97
98 lwz r1,PP_ISTACKPTR(r25) ; Get interrupt stack pointer
99
100 cmpwi cr0,r1,0 ; Are we on interrupt stack?
101 mr r6,r13
102 beq- cr0,EXT(ihandler) ; If on interrupt stack, treat this as interrupt...
103 lwz r26,ACT_MACT_SPF(r13) ; Get special flags
104 lwz r8,ACT_MACT_PCB(r13) ; Get the last savearea used
105 rlwinm. r26,r26,0,bbThreadbit,bbThreadbit ; Do we have Blue Box Assist active?
106 lwz r1,ACT_MACT_KSP(r13) ; Get the top of kernel stack
107 bnel- checkassist ; See if we should assist this
108 stw r4,ACT_MACT_PCB(r13) ; Point to our savearea
109 stw r8,SAVprev+4(r4) ; Queue the new save area in the front
110
111 #if VERIFYSAVE
112 bl versave ; (TEST/DEBUG)
113 #endif
114
115 lwz r9,THREAD_KERNEL_STACK(r6) ; Get our kernel stack start
116 cmpwi cr1,r1,0 ; Are we already on kernel stack?
117 stw r13,SAVact(r4) ; Mark the savearea as belonging to this activation
118 lwz r26,saver1+4(r4) ; Get the stack at interrupt time
119
120 bne+ cr1,.L_kstackfree ; We are not on kernel stack yet...
121
122 subi r1,r26,FM_REDZONE ; Make a red zone on interrupt time kernel stack
123
124 .L_kstackfree:
125 lwz r31,savesrr1+4(r4) ; Pick up the entry MSR
126 sub r9,r1,r9 ; Get displacment into the kernel stack
127 li r0,0 ; Make this 0
128 rlwinm. r0,r9,0,28,31 ; Verify that we have a 16-byte aligned stack (and get a 0)
129 cmplwi cr2,r9,KERNEL_STACK_SIZE ; Do we still have room on the stack?
130 beq cr1,.L_state_on_kstack ; using above test for pcb/stack
131
132 stw r0,ACT_MACT_KSP(r13) ; Show that we have taken the stack
133
134 .L_state_on_kstack:
135 lwz r9,savevrsave(r4) ; Get the VRSAVE register
136 bne-- kernelStackUnaligned ; Stack is unaligned...
137 rlwinm. r6,r31,0,MSR_VEC_BIT,MSR_VEC_BIT ; Was vector on?
138 subi r1,r1,FM_SIZE ; Push a header onto the current stack
139 bgt-- cr2,kernelStackBad ; Kernel stack is bogus...
140
141 kernelStackNotBad: ; Vector was off
142 beq++ tvecoff ; Vector off, do not save vrsave...
143 stw r9,liveVRS(r25) ; Set the live value
144
145 tvecoff: stw r26,FM_BACKPTR(r1) ; Link back to the previous frame
146
147 #if DEBUG
148 /* If debugging, we need two frames, the first being a dummy
149 * which links back to the trapped routine. The second is
150 * that which the C routine below will need
151 */
152 lwz r3,savesrr0+4(r4) ; Get the point of interruption
153 stw r3,FM_LR_SAVE(r1) ; save old instr ptr as LR value
154 stwu r1, -FM_SIZE(r1) ; and make new frame
155 #endif /* DEBUG */
156
157 mr r30,r4
158 lwz r3,SAVtime+4(r4)
159 addi r4,r13,SYSTEM_TIMER
160 bl EXT(timer_event)
161
162 /* call trap handler proper, with
163 * ARG0 = type
164 * ARG1 = saved_state ptr
165 * ARG2 = dsisr
166 * ARG3 = dar
167 */
168
169 mr r4,r30
170 lwz r3,saveexception(r30) ; Get the exception code
171 lwz r0,ACT_MACT_SPF(r13) ; Get the special flags
172
173 addi r5,r3,-T_DATA_ACCESS ; Adjust to start of range
174 rlwinm. r0,r0,0,runningVMbit,runningVMbit ; Are we in VM state? (cr0_eq == 0 if yes)
175 cmplwi cr2,r5,T_TRACE-T_DATA_ACCESS ; Are we still in range? (cr_gt if not)
176
177 lwz r5,savedsisr(r4) ; Get the saved DSISR
178
179 crnor cr7_eq,cr0_eq,cr2_gt ; We should intercept if in VM and is a true trap (cr7_eq == 1 if yes)
180 rlwinm. r0,r31,0,MSR_PR_BIT,MSR_PR_BIT ; Are we trapping from supervisor state? (cr0_eq == 1 if yes)
181
182 cmpi cr2,r3,T_PREEMPT ; Is this a preemption?
183
184 beq-- .L_check_VM
185 stw r4,ACT_MACT_UPCB(r13) ; Store user savearea
186 .L_check_VM:
187
188 crandc cr0_eq,cr7_eq,cr0_eq ; Do not intercept if we are in the kernel (cr0_eq == 1 if yes)
189
190 lwz r6,savedar(r4) ; Get the DAR (top)
191 lwz r7,savedar+4(r4) ; Get the DAR (bottom)
192
193 beq- cr2,.L_call_trap ; Do not turn on interrupts for T_PREEMPT
194 beq- exitFromVM ; Any true trap but T_MACHINE_CHECK exits us from the VM...
195
196 /* syscall exception might warp here if there's nothing left
197 * to do except generate a trap
198 */
199
200 .L_call_trap:
201
202 #if FPFLOOD
203 stfd f31,emfp31(r25) ; (TEST/DEBUG)
204 #endif
205
206 bl EXT(trap)
207
208 lis r10,hi16(MASK(MSR_VEC)) ; Get the vector enable
209 mfmsr r7 ; Get the MSR
210 ori r10,r10,lo16(MASK(MSR_FP)|MASK(MSR_EE)) ; Add in FP and EE
211 andc r7,r7,r10 ; Turn off VEC, FP, and EE
212 mtmsr r7 ; Disable for interrupts
213 mfsprg r8,1 ; Get the current activation
214 lwz r10,ACT_PER_PROC(r8) ; Get the per_proc block
215 /*
216 * This is also the point where new threads come when they are created.
217 * The new thread is setup to look like a thread that took an
218 * interrupt and went immediatly into trap.
219 */
220
221 thread_return:
222 lwz r11,SAVflags(r3) ; Get the flags of the current savearea
223 lwz r0,savesrr1+4(r3) ; Get the MSR we are going to
224 lwz r4,SAVprev+4(r3) ; Pick up the previous savearea
225 mfsprg r8,1 ; Get the current thread
226 rlwinm r11,r11,0,15,13 ; Clear the syscall flag
227 rlwinm. r0,r0,0,MSR_PR_BIT,MSR_PR_BIT ; Are we going to the user?
228 mr r1,r8
229 stw r11,SAVflags(r3) ; Save back the flags (with reset stack cleared)
230
231 lwz r5,THREAD_KERNEL_STACK(r1) ; Get the base pointer to the stack
232 stw r4,ACT_MACT_PCB(r8) ; Point to the previous savearea (or 0 if none)
233 addi r5,r5,KERNEL_STACK_SIZE-FM_SIZE ; Reset to empty
234
235 beq-- chkfac ; We are not leaving the kernel yet...
236
237 stw r5,ACT_MACT_KSP(r8) ; Save the empty stack pointer
238 b chkfac ; Go end it all...
239
240
241 ;
242 ; Here is where we go when we detect that the kernel stack is all messed up.
243 ; We just try to dump some info and get into the debugger.
244 ;
245
246 kernelStackBad:
247
248 lwz r3,PP_DEBSTACK_TOP_SS(r25) ; Pick up debug stack top
249 subi r3,r3,KERNEL_STACK_SIZE-FM_SIZE ; Adjust to start of stack
250 sub r3,r1,r3 ; Get displacement into debug stack
251 cmplwi cr2,r3,KERNEL_STACK_SIZE-FM_SIZE ; Check if we are on debug stack
252 blt+ cr2,kernelStackNotBad ; Yeah, that is ok too...
253
254 lis r0,hi16(Choke) ; Choke code
255 ori r0,r0,lo16(Choke) ; and the rest
256 li r3,failStack ; Bad stack code
257 sc ; System ABEND
258
259 kernelStackUnaligned:
260 lis r0,hi16(Choke) ; Choke code
261 ori r0,r0,lo16(Choke) ; and the rest
262 li r3,failUnalignedStk ; Unaligned stack code
263 sc ; System ABEND
264
265
266 /*
267 * shandler(type)
268 *
269 * ENTRY: VM switched ON
270 * Interrupts OFF
271 * R3 contains exception code
272 * R4 points to the saved context (virtual address)
273 * Everything is saved in savearea
274 */
275
276 /*
277 * If pcb.ksp == 0 then the kernel stack is already busy,
278 * this is an error - jump to the debugger entry
279 *
280 * otherwise depending upon the type of
281 * syscall, look it up in the kernel table
282 * or pass it to the server.
283 *
284 * on return, we do the reverse, the state is popped from the pcb
285 * and pcb.ksp is set to the top of stack.
286 */
287
288 /*
289 * NOTE:
290 * mach system calls are negative
291 * BSD system calls are low positive
292 * PPC-only system calls are in the range 0x6xxx
293 * PPC-only "fast" traps are in the range 0x7xxx
294 */
295
296 .align 5
297 .globl EXT(shandler)
298 LEXT(shandler) ; System call handler
299
300 lwz r7,savesrr1+4(r4) ; Get the SRR1 value
301 mfsprg r13,1 ; Get the current activation
302 lwz r25,ACT_PER_PROC(r13) ; Get the per_proc block
303 lwz r0,saver0+4(r4) ; Get the original syscall number
304 lwz r17,PP_ISTACKPTR(r25) ; Get interrupt stack pointer
305 rlwinm r15,r0,0,0,19 ; Clear the bottom of call number for fast check
306 mr. r17,r17 ; Are we on interrupt stack?
307 lwz r9,savevrsave(r4) ; Get the VRsave register
308 beq-- EXT(ihandler) ; On interrupt stack, not allowed...
309 rlwinm. r6,r7,0,MSR_VEC_BIT,MSR_VEC_BIT ; Was vector on?
310 mr r16,r13
311
312 beq++ svecoff ; Vector off, do not save vrsave...
313 stw r9,liveVRS(r25) ; Set the live value
314 ;
315 ; Check if SCs are being redirected for the BlueBox or to VMM
316 ;
317
318 svecoff: lwz r6,ACT_MACT_SPF(r13) ; Pick up activation special flags
319 mtcrf 0x40,r6 ; Check special flags
320 mtcrf 0x01,r6 ; Check special flags
321 crmove cr6_eq,runningVMbit ; Remember if we are in VMM
322 bne++ cr6,sVMchecked ; Not running VM
323 lwz r18,spcFlags(r25) ; Load per_proc special flags
324 rlwinm. r18,r18,0,FamVMmodebit,FamVMmodebit ; Is FamVMmodebit set?
325 beq sVMchecked ; Not in FAM
326 cmpwi r0,0x6004 ; Is it vmm_dispatch syscall:
327 bne sVMchecked
328 lwz r26,saver3+4(r4) ; Get the original syscall number
329 cmpwi cr6,r26,kvmmExitToHost ; vmm_exit_to_host request
330 sVMchecked:
331 bf++ bbNoMachSCbit,noassist ; Take branch if SCs are not redirected
332 lwz r26,ACT_MACT_BEDA(r13) ; Pick up the pointer to the blue box exception area
333 b EXT(atomic_switch_syscall) ; Go to the assist...
334
335 noassist: cmplwi r15,0x7000 ; Do we have a fast path trap?
336 lwz r14,ACT_MACT_PCB(r13) ; Now point to the PCB
337 beql fastpath ; We think it is a fastpath...
338
339 lwz r1,ACT_MACT_KSP(r13) ; Get the kernel stack pointer
340 #if DEBUG
341 mr. r1,r1 ; Are we already on the kernel stack?
342 li r3,T_SYSTEM_CALL ; Yup, pretend we had an interrupt...
343 beq- EXT(ihandler) ; Bad boy, bad boy... What cha gonna do when they come for you?
344 #endif /* DEBUG */
345
346 stw r4,ACT_MACT_PCB(r13) ; Point to our savearea
347 stw r4,ACT_MACT_UPCB(r13) ; Store user savearea
348 li r0,0 ; Clear this out
349 stw r14,SAVprev+4(r4) ; Queue the new save area in the front
350 stw r13,SAVact(r4) ; Point the savearea at its activation
351
352 #if VERIFYSAVE
353 bl versave ; (TEST/DEBUG)
354 #endif
355
356 lwz r15,saver1+4(r4) ; Grab interrupt time stack
357 mr r30,r4 ; Save pointer to the new context savearea
358 stw r0,ACT_MACT_KSP(r13) ; Mark stack as busy with 0 val
359 stw r15,FM_BACKPTR(r1) ; Link stack frame backwards
360
361 lwz r3,SAVtime+4(r30)
362 addi r4,r13,SYSTEM_TIMER
363 bl EXT(timer_event)
364
365 #if DEBUG
366 /* If debugging, we need two frames, the first being a dummy
367 * which links back to the trapped routine. The second is
368 * that which the C routine below will need
369 */
370 lwz r8,savesrr0+4(r30) ; Get the point of interruption
371 stw r8,FM_LR_SAVE(r1) ; Save old instr ptr as LR value
372 stwu r1, -FM_SIZE(r1) ; and make new frame
373 #endif /* DEBUG */
374
375 mr r4,r30
376
377 lwz r15,SAVflags(r30) ; Get the savearea flags
378 lwz r0,saver0+4(r30) ; Get R0 back
379 mfmsr r11 ; Get the MSR
380 stwu r1,-(FM_SIZE+ARG_SIZE+MUNGE_ARGS_SIZE)(r1) ; Make a stack frame
381 ori r11,r11,lo16(MASK(MSR_EE)) ; Turn on interruption enabled bit
382 rlwinm r10,r0,0,0,19 ; Keep only the top part
383 oris r15,r15,SAVsyscall >> 16 ; Mark that it this is a syscall
384 cmplwi r10,0x6000 ; Is it the special ppc-only guy?
385 stw r15,SAVflags(r30) ; Save syscall marker
386 beq-- cr6,exitFromVM ; It is time to exit from alternate context...
387
388 beq-- ppcscall ; Call the ppc-only system call handler...
389
390 mr. r0,r0 ; What kind is it?
391 mtmsr r11 ; Enable interruptions
392
393 blt-- .L_kernel_syscall ; System call number if negative, this is a mach call...
394
395 lwz r8,ACT_TASK(r13) ; Get our task
396 cmpwi cr0,r0,0x7FFA ; Special blue box call?
397 beq-- .L_notify_interrupt_syscall ; Yeah, call it...
398
399 lwz r7,TASK_SYSCALLS_UNIX(r8) ; Get the current count
400 mr r3,r30 ; Get PCB/savearea
401 mr r4,r13 ; current activation
402 addi r7,r7,1 ; Bump it
403 stw r7,TASK_SYSCALLS_UNIX(r8) ; Save it
404
405 #if FPFLOOD
406 stfd f31,emfp31(r25) ; (TEST/DEBUG)
407 #endif
408
409 bl EXT(unix_syscall) ; Check out unix...
410
411 .L_call_server_syscall_exception:
412 li r3,EXC_SYSCALL ; doexception(EXC_SYSCALL, num, 1)
413
414 .L_call_server_exception:
415 mr r4,r0 ; Set syscall selector
416 li r5,1
417 b EXT(doexception) ; Go away, never to return...
418
419 .L_notify_interrupt_syscall:
420 lwz r3,saver3+4(r30) ; Get the new PC address to pass in
421 bl EXT(syscall_notify_interrupt)
422 /*
423 * Ok, return from C function, R3 = return value
424 *
425 * saved state is still in R30 and the active thread is in R16 .
426 */
427 mr r31,r16 ; Move the current thread pointer
428 stw r3,saver3+4(r30) ; Stash the return code
429 b .L_thread_syscall_ret_check_ast
430
431 ;
432 ; Handle PPC-only system call interface
433 ; These are called with interruptions disabled
434 ; and the savearea/pcb as the first parameter.
435 ; It is up to the callee to enable interruptions if
436 ; they should be. We are in a state here where
437 ; both interrupts and preemption are ok, but because we could
438 ; be calling diagnostic code we will not enable.
439 ;
440 ; Also, the callee is responsible for finding any parameters
441 ; in the savearea/pcb. It also must set saver3 with any return
442 ; code before returning.
443 ;
444 ; There are 3 possible return codes:
445 ; 0 the call is disabled or something, we treat this like it was bogus
446 ; + the call finished ok, check for AST
447 ; - the call finished ok, do not check for AST
448 ;
449 ; Note: the last option is intended for special diagnostics calls that
450 ; want the thread to return and execute before checking for preemption.
451 ;
452 ; NOTE: Both R16 (thread) and R30 (savearea) need to be preserved over this call!!!!
453 ;
454
455 .align 5
456
457 ppcscall: rlwinm r11,r0,2,18,29 ; Make an index into the table
458 lis r10,hi16(EXT(PPCcalls)) ; Get PPC-only system call table
459 cmplwi r11,PPCcallmax ; See if we are too big
460 ori r10,r10,lo16(EXT(PPCcalls)) ; Merge in low half
461 bgt- .L_call_server_syscall_exception ; Bogus call...
462 lwzx r11,r10,r11 ; Get function address
463
464 ;
465 ; Note: make sure we do not change the savearea in R30 to
466 ; a different register without checking. Some of the PPCcalls
467 ; depend upon it being there.
468 ;
469
470 mr r3,r30 ; Pass the savearea
471 mr r4,r13 ; Pass the activation
472 mr. r11,r11 ; See if there is a function here
473 mtctr r11 ; Set the function address
474 beq- .L_call_server_syscall_exception ; Disabled call...
475 #if INSTRUMENT
476 mfspr r4,pmc1 ; Get stamp
477 stw r4,0x6100+(9*16)+0x0(0) ; Save it
478 mfspr r4,pmc2 ; Get stamp
479 stw r4,0x6100+(9*16)+0x4(0) ; Save it
480 mfspr r4,pmc3 ; Get stamp
481 stw r4,0x6100+(9*16)+0x8(0) ; Save it
482 mfspr r4,pmc4 ; Get stamp
483 stw r4,0x6100+(9*16)+0xC(0) ; Save it
484 #endif
485 bctrl ; Call it
486
487 .globl EXT(ppcscret)
488
489 LEXT(ppcscret)
490 mr. r3,r3 ; See what we should do
491 mr r31,r16 ; Restore the current thread pointer
492 bgt+ .L_thread_syscall_ret_check_ast ; Take normal AST checking return....
493 mfsprg r10,1 ; Get the current activation
494 lwz r10,ACT_PER_PROC(r10) ; Get the per_proc block
495 blt+ .L_thread_syscall_return ; Return, but no ASTs....
496 lwz r0,saver0+4(r30) ; Restore the system call number
497 b .L_call_server_syscall_exception ; Go to common exit...
498
499
500
501 /*
502 * we get here for mach system calls
503 * when kdebug tracing is enabled
504 */
505
506 ksystrace:
507 mr r4,r30 ; Pass in saved state
508 bl EXT(syscall_trace)
509
510 cmplw r31,r29 ; Is this syscall in the table?
511 add r31,r27,r28 ; Point right to the syscall table entry
512
513 bge- .L_call_server_syscall_exception ; The syscall number is invalid
514
515 lwz r0,savesrr1(r30) ; Get the saved srr1
516 rlwinm. r0,r0,0,MSR_SF_BIT,MSR_SF_BIT ; Test for 64 bit caller
517 lwz r0,MACH_TRAP_ARG_MUNGE32(r31) ; Pick up the 32 bit munge function address
518 beq-- .L_ksystrace_munge
519 lwz r0,MACH_TRAP_ARG_MUNGE64(r31) ; Pick up the 64 bit munge function address
520
521 .L_ksystrace_munge:
522 cmplwi r0,0 ; do we have a munger to call?
523 mtctr r0 ; Set the function call address
524 addi r3,r30,saver3 ; Pointer to args from save area
525 addi r4,r1,FM_ARG0+ARG_SIZE ; Pointer for munged args
526 beq-- .L_ksystrace_trapcall ; just make the trap call
527 bctrl ; Call the munge function
528
529 .L_ksystrace_trapcall:
530 lwz r0,MACH_TRAP_FUNCTION(r31) ; Pick up the function address
531 mtctr r0 ; Set the function call address
532 addi r3,r1,FM_ARG0+ARG_SIZE ; Pointer to munged args
533 bctrl
534
535 mr r4,r30 ; Pass in the savearea
536 bl EXT(syscall_trace_end) ; Trace the exit of the system call
537 b .L_mach_return
538
539
540
541 /* Once here, we know that the syscall was -ve
542 * we should still have r1=ksp,
543 * r16 = pointer to current thread,
544 * r13 = pointer to top activation,
545 * r0 = syscall number
546 * r30 = pointer to saved state (in pcb)
547 */
548
549 .align 5
550
551 .L_kernel_syscall:
552 ;
553 ; Call a function that can print out our syscall info
554 ; Note that we don t care about any volatiles yet
555 ;
556 lwz r10,ACT_TASK(r13) ; Get our task
557 lwz r0,saver0+4(r30)
558 lis r8,hi16(EXT(kdebug_enable)) ; Get top of kdebug_enable
559 lis r28,hi16(EXT(mach_trap_table)) ; Get address of table
560 ori r8,r8,lo16(EXT(kdebug_enable)) ; Get bottom of kdebug_enable
561 lwz r8,0(r8) ; Get kdebug_enable
562
563 lwz r7,TASK_SYSCALLS_MACH(r10) ; Get the current count
564 neg r31,r0 ; Make this positive
565 mr r3,r31 ; save it
566 slwi r27,r3,4 ; multiply by 16
567 slwi r3,r3,2 ; and the original by 4
568 ori r28,r28,lo16(EXT(mach_trap_table)) ; Get address of table
569 add r27,r27,r3 ; for a total of 20x (5 words/entry)
570 addi r7,r7,1 ; Bump TASK_SYSCALLS_MACH count
571 cmplwi r8,0 ; Is kdebug_enable non-zero
572 stw r7,TASK_SYSCALLS_MACH(r10) ; Save count
573 bne-- ksystrace ; yes, tracing enabled
574
575 cmplwi r31,MACH_TRAP_TABLE_COUNT ; Is this syscall in the table?
576 add r31,r27,r28 ; Point right to the syscall table entry
577
578 bge-- .L_call_server_syscall_exception ; The syscall number is invalid
579
580 lwz r0,savesrr1(r30) ; Get the saved srr1
581 rlwinm. r0,r0,0,MSR_SF_BIT,MSR_SF_BIT ; Test for 64 bit caller
582 lwz r0,MACH_TRAP_ARG_MUNGE32(r31) ; Pick up the 32 bit munge function address
583 beq-- .L_kernel_syscall_munge
584 lwz r0,MACH_TRAP_ARG_MUNGE64(r31) ; Pick up the 64 bit munge function address
585
586 .L_kernel_syscall_munge:
587 cmplwi r0,0 ; test for null munger
588 mtctr r0 ; Set the function call address
589 addi r3,r30,saver3 ; Pointer to args from save area
590 addi r4,r1,FM_ARG0+ARG_SIZE ; Pointer for munged args
591 beq-- .L_kernel_syscall_trapcall ; null munger - skip to trap call
592 bctrl ; Call the munge function
593
594 .L_kernel_syscall_trapcall:
595 lwz r0,MACH_TRAP_FUNCTION(r31) ; Pick up the function address
596 mtctr r0 ; Set the function call address
597 addi r3,r1,FM_ARG0+ARG_SIZE ; Pointer to munged args
598
599 #if FPFLOOD
600 stfd f31,emfp31(r25) ; (TEST/DEBUG)
601 #endif
602
603 bctrl
604
605
606 /*
607 * Ok, return from C function, R3 = return value
608 *
609 * get the active thread's PCB pointer and thus pointer to user state
610 * saved state is still in R30 and the active thread is in R16
611 */
612
613 .L_mach_return:
614 srawi r0,r3,31 ; properly extend the return code
615 cmpi cr0,r3,KERN_INVALID_ARGUMENT ; deal with invalid system calls
616 mr r31,r16 ; Move the current thread pointer
617 stw r0, saver3(r30) ; stash the high part of the return code
618 stw r3,saver3+4(r30) ; Stash the low part of the return code
619 beq-- cr0,.L_mach_invalid_ret ; otherwise fall through into the normal return path
620 .L_mach_invalid_arg:
621
622
623 /* 'standard' syscall returns here - INTERRUPTS ARE STILL ON
624 * the syscall may perform a thread_set_syscall_return
625 * followed by a thread_exception_return, ending up
626 * at thread_syscall_return below, with SS_R3 having
627 * been set up already
628 *
629 * When we are here, r31 should point to the current thread,
630 * r30 should point to the current pcb
631 * r3 contains value that we're going to return to the user
632 * which has already been stored back into the save area
633 */
634
635 .L_thread_syscall_ret_check_ast:
636 lis r10,hi16(MASK(MSR_VEC)) ; Get the vector enable
637 mfmsr r12 ; Get the current MSR
638 ori r10,r10,lo16(MASK(MSR_FP)|MASK(MSR_EE)) ; Add in FP and EE
639 andc r12,r12,r10 ; Turn off VEC, FP, and EE
640 mtmsr r12 ; Turn interruptions off
641
642 mfsprg r10,1 ; Get the current activation
643 lwz r10,ACT_PER_PROC(r10) ; Get the per_proc block
644
645 /* Check to see if there's an outstanding AST */
646
647 lwz r4,PP_PENDING_AST(r10)
648 cmpi cr0,r4, 0 ; Any pending asts?
649 beq++ cr0,.L_syscall_no_ast ; Nope...
650
651 /* Yes there is, call ast_taken
652 * pretending that the user thread took an AST exception here,
653 * ast_taken will save all state and bring us back here
654 */
655
656 #if DEBUG
657 /* debug assert - make sure that we're not returning to kernel */
658 lwz r3,savesrr1+4(r30)
659 andi. r3,r3,MASK(MSR_PR)
660 bne++ scrnotkern ; returning to user level, check
661
662 lis r0,hi16(Choke) ; Choke code
663 ori r0,r0,lo16(Choke) ; and the rest
664 li r3,failContext ; Bad state code
665 sc ; System ABEND
666
667 scrnotkern:
668 #endif /* DEBUG */
669
670 lis r3,hi16(AST_ALL) ; Set ast flags
671 li r4,1 ; Set interrupt allowed
672 ori r3,r3,lo16(AST_ALL)
673 bl EXT(ast_taken) ; Process the pending ast
674 b .L_thread_syscall_ret_check_ast ; Go see if there was another...
675
676 .L_mach_invalid_ret:
677 /*
678 * need to figure out why we got an KERN_INVALID_ARG
679 * if it was due to a non-existent system call
680 * then we want to throw an exception... otherwise
681 * we want to pass the error code back to the caller
682 */
683 lwz r0,saver0+4(r30) ; reload the original syscall number
684 neg r28,r0 ; Make this positive
685 mr r4,r28 ; save a copy
686 slwi r27,r4,4 ; multiply by 16
687 slwi r4,r4,2 ; and another 4
688 lis r28,hi16(EXT(mach_trap_table)) ; Get address of table
689 add r27,r27,r4 ; for a total of 20x (5 words/entry)
690 ori r28,r28,lo16(EXT(mach_trap_table)) ; Get address of table
691 add r28,r27,r28 ; Point right to the syscall table entry
692 lwz r27,MACH_TRAP_FUNCTION(r28) ; Pick up the function address
693 lis r28,hi16(EXT(kern_invalid)) ; Get high half of invalid syscall function
694 ori r28,r28,lo16(EXT(kern_invalid)) ; Get low half of invalid syscall function
695 cmpw cr0,r27,r28 ; Check if this is an invalid system call
696 beq-- .L_call_server_syscall_exception ; We have a bad system call
697 b .L_mach_invalid_arg ; a system call returned KERN_INVALID_ARG
698
699
700 /* thread_exception_return returns to here, almost all
701 * registers intact. It expects a full context restore
702 * of what it hasn't restored itself (ie. what we use).
703 *
704 * In particular for us,
705 * we still have r31 points to the current thread,
706 * r30 points to the current pcb
707 */
708
709 .align 5
710
711 .L_syscall_no_ast:
712 .L_thread_syscall_return:
713
714 mr r3,r30 ; Get savearea to the correct register for common exit
715
716 lwz r11,SAVflags(r30) ; Get the flags
717 lwz r5,THREAD_KERNEL_STACK(r31) ; Get the base pointer to the stack
718 lwz r4,SAVprev+4(r30) ; Get the previous save area
719 rlwinm r11,r11,0,15,13 ; Clear the syscall flag
720 mfsprg r8,1 ; Now find the current activation
721 addi r5,r5,KERNEL_STACK_SIZE-FM_SIZE ; Reset to empty
722 stw r11,SAVflags(r30) ; Stick back the flags
723 stw r5,ACT_MACT_KSP(r8) ; Save the empty stack pointer
724 stw r4,ACT_MACT_PCB(r8) ; Save previous save area
725 b chkfac ; Go end it all...
726
727 /*
728 * thread_exception_return()
729 *
730 * Return to user mode directly from within a system call.
731 */
732
733 .align 5
734 .globl EXT(thread_bootstrap_return)
735 LEXT(thread_bootstrap_return) ; NOTE: THIS IS GOING AWAY IN A FEW DAYS....
736
737 .globl EXT(thread_exception_return)
738 LEXT(thread_exception_return) ; Directly return to user mode
739
740 .L_thread_exc_ret_check_ast:
741 lis r10,hi16(MASK(MSR_VEC)) ; Get the vector enable
742 mfmsr r3 ; Get the MSR
743 ori r10,r10,lo16(MASK(MSR_FP)|MASK(MSR_EE)) ; Add in FP and EE
744 andc r3,r3,r10 ; Turn off VEC, FP, and EE
745 mtmsr r3 ; Disable interrupts
746
747 /* Check to see if there's an outstanding AST */
748 /* We don't bother establishing a call frame even though CHECK_AST
749 can invoke ast_taken(), because it can just borrow our caller's
750 frame, given that we're not going to return.
751 */
752
753 mfsprg r10,1 ; Get the current activation
754 lwz r10,ACT_PER_PROC(r10) ; Get the per_proc block
755 lwz r4,PP_PENDING_AST(r10)
756 cmpi cr0,r4, 0
757 beq+ cr0,.L_exc_ret_no_ast
758
759 /* Yes there is, call ast_taken
760 * pretending that the user thread took an AST exception here,
761 * ast_taken will save all state and bring us back here
762 */
763
764 lis r3,hi16(AST_ALL)
765 li r4,1
766 ori r3,r3,lo16(AST_ALL)
767
768 bl EXT(ast_taken)
769 b .L_thread_exc_ret_check_ast ; check for a second AST (rare)
770
771 /* arriving here, interrupts should be disabled */
772 /* Get the active thread's PCB pointer to restore regs
773 */
774 .L_exc_ret_no_ast:
775
776 mfsprg r30,1 ; Get the currrent activation
777 mr r31,r30
778
779 lwz r30,ACT_MACT_PCB(r30)
780 mr. r30,r30 ; Is there any context yet?
781 beq- makeDummyCtx ; No, hack one up...
782 #if DEBUG
783 /*
784 * debug assert - make sure that we're not returning to kernel
785 * get the active thread's PCB pointer and thus pointer to user state
786 */
787
788 lwz r3,savesrr1+4(r30)
789 andi. r3,r3,MASK(MSR_PR)
790 bne+ ret_user2 ; We are ok...
791
792 lis r0,hi16(Choke) ; Choke code
793 ori r0,r0,lo16(Choke) ; and the rest
794 li r3,failContext ; Bad state code
795 sc ; System ABEND
796
797 ret_user2:
798 #endif /* DEBUG */
799
800 /* If the system call flag isn't set, then we came from a trap,
801 * so warp into the return_from_trap (thread_return) routine,
802 * which takes PCB pointer in R3, not in r30!
803 */
804 lwz r0,SAVflags(r30) ; Grab the savearea flags
805 andis. r0,r0,SAVsyscall>>16 ; Are we returning from a syscall?
806 mr r3,r30 ; Copy pcb pointer into r3 in case we need it
807 beq-- cr0,thread_return ; Nope, must be a thread return...
808 b .L_thread_syscall_return ; Join up with the system call return...
809
810 ;
811 ; This is where we handle someone trying who did a thread_create followed
812 ; by a thread_resume with no intervening thread_set_state. Just make an
813 ; empty context, initialize it to trash and let em execute at 0...
814 ;
815
816 .align 5
817
818 makeDummyCtx:
819 bl EXT(save_get) ; Get a save_area
820 li r4,SAVgeneral ; Get the general context type
821 li r0,0 ; Get a 0
822 stb r4,SAVflags+2(r3) ; Set type
823 addi r2,r3,savefpscr+4 ; Point past what we are clearing
824 mr r4,r3 ; Save the start
825
826 cleardummy: stw r0,0(r4) ; Clear stuff
827 addi r4,r4,4 ; Next word
828 cmplw r4,r2 ; Still some more?
829 blt+ cleardummy ; Yeah...
830
831 lis r2,hi16(MSR_EXPORT_MASK_SET) ; Set the high part of the user MSR
832 ori r2,r2,lo16(MSR_EXPORT_MASK_SET) ; And the low part
833 stw r2,savesrr1+4(r3) ; Set the default user MSR
834
835 b thread_return ; Go let em try to execute, hah!
836
837 /*
838 * ihandler(type)
839 *
840 * ENTRY: VM switched ON
841 * Interrupts OFF
842 * R3 contains exception code
843 * R4 points to the saved context (virtual address)
844 * Everything is saved in savearea
845 *
846 */
847
848 .align 5
849 .globl EXT(ihandler)
850 LEXT(ihandler) ; Interrupt handler */
851
852 /*
853 * get the value of istackptr, if it's zero then we're already on the
854 * interrupt stack.
855 */
856
857 lwz r10,savesrr1+4(r4) ; Get SRR1
858 lwz r7,savevrsave(r4) ; Get the VRSAVE register
859 mfsprg r13,1 ; Get the current activation
860 lwz r25,ACT_PER_PROC(r13) ; Get the per_proc block
861 li r14,0 ; Zero this for now
862 rlwinm. r16,r10,0,MSR_VEC_BIT,MSR_VEC_BIT ; Was vector on?
863 lwz r1,PP_ISTACKPTR(r25) ; Get the interrupt stack
864 li r16,0 ; Zero this for now
865
866 beq+ ivecoff ; Vector off, do not save vrsave...
867 stw r7,liveVRS(r25) ; Set the live value
868
869 ivecoff: li r0,0 ; Get a constant 0
870 rlwinm r5,r10,0,MSR_PR_BIT,MSR_PR_BIT ; Are we trapping from supervisor state?
871 mr. r1,r1 ; Is it active?
872 cmplwi cr2,r5,0 ; cr2_eq == 1 if yes
873 mr r16,r13
874 lwz r14,ACT_MACT_PCB(r13) ; Now point to the PCB
875 lwz r9,saver1+4(r4) ; Pick up the rupt time stack
876 stw r14,SAVprev+4(r4) ; Queue the new save area in the front
877 stw r13,SAVact(r4) ; Point the savearea at its activation
878 stw r4,ACT_MACT_PCB(r13) ; Point to our savearea
879 beq cr2,ifromk
880 stw r4,ACT_MACT_UPCB(r13) ; Store user savearea
881
882 ifromk: bne .L_istackfree ; Nope...
883
884 /* We're already on the interrupt stack, get back the old
885 * stack pointer and make room for a frame
886 */
887
888 lwz r10,PP_INTSTACK_TOP_SS(r25) ; Get the top of the interrupt stack
889 addi r5,r9,INTSTACK_SIZE-FM_SIZE ; Shift stack for bounds check
890 subi r1,r9,FM_REDZONE ; Back up beyond the red zone
891 sub r5,r5,r10 ; Get displacement into stack
892 cmplwi r5,INTSTACK_SIZE-FM_SIZE ; Is the stack actually invalid?
893 blt+ ihsetback ; The stack is ok...
894
895 lwz r5,PP_DEBSTACK_TOP_SS(r25) ; Pick up debug stack top
896 subi r5,r5,KERNEL_STACK_SIZE-FM_SIZE ; Adjust to start of stack
897 sub r5,r1,r5 ; Get displacement into debug stack
898 cmplwi cr2,r5,KERNEL_STACK_SIZE-FM_SIZE ; Check if we are on debug stack
899 blt+ cr2,ihsetback ; Yeah, that is ok too...
900
901 lis r0,hi16(Choke) ; Choke code
902 ori r0,r0,lo16(Choke) ; and the rest
903 li r3,failStack ; Bad stack code
904 sc ; System ABEND
905
906 intUnalignedStk:
907 lis r0,hi16(Choke) ; Choke code
908 ori r0,r0,lo16(Choke) ; and the rest
909 li r3,failUnalignedStk ; Unaligned stack code
910 sc ; System ABEND
911
912 .align 5
913
914 .L_istackfree:
915 rlwinm. r0,r1,0,28,31 ; Check if stack is aligned (and get 0)
916 lwz r10,SAVflags(r4) ; Get savearea flags
917 bne-- intUnalignedStk ; Stack is unaligned...
918 stw r0,PP_ISTACKPTR(r25) ; Mark the stack in use
919 oris r10,r10,hi16(SAVrststk) ; Indicate we reset stack when we return from this one
920 stw r10,SAVflags(r4) ; Stick it back
921
922 /*
923 * To summarize, when we reach here, the state has been saved and
924 * the stack is marked as busy. We now generate a small
925 * stack frame with backpointers to follow the calling
926 * conventions. We set up the backpointers to the trapped
927 * routine allowing us to backtrace.
928 */
929
930 ihsetback: subi r1,r1,FM_SIZE ; Make a new frame
931 stw r9,FM_BACKPTR(r1) ; Point back to previous stackptr
932
933 #if VERIFYSAVE
934 beq- cr1,ihbootnover ; (TEST/DEBUG)
935 bl versave ; (TEST/DEBUG)
936 ihbootnover: ; (TEST/DEBUG)
937 #endif
938
939 #if DEBUG
940 /* If debugging, we need two frames, the first being a dummy
941 * which links back to the trapped routine. The second is
942 * that which the C routine below will need
943 */
944 lwz r5,savesrr0+4(r4) ; Get interrupt address
945 stw r5,FM_LR_SAVE(r1) ; save old instr ptr as LR value
946 stwu r1,-FM_SIZE(r1) ; Make another new frame for C routine
947 #endif /* DEBUG */
948
949 mr r31,r3
950 mr r30,r4
951
952 lwz r3,SAVtime+4(r4)
953 addi r4,r13,SYSTEM_TIMER
954 bl EXT(timer_event)
955
956 mr r3,r31
957 mr r4,r30
958 lwz r5,savedsisr(r30) ; Get the DSISR
959 lwz r6,savedar+4(r30) ; Get the DAR
960
961 #if FPFLOOD
962 stfd f31,emfp31(r25) ; (TEST/DEBUG)
963 #endif
964
965 bl EXT(interrupt)
966
967
968 /* interrupt() returns a pointer to the saved state in r3
969 *
970 * Ok, back from C. Disable interrupts while we restore things
971 */
972 .globl EXT(ihandler_ret)
973
974 LEXT(ihandler_ret) ; Marks our return point from debugger entry
975
976 lis r10,hi16(MASK(MSR_VEC)) ; Get the vector enable
977 mfmsr r0 ; Get our MSR
978 ori r10,r10,lo16(MASK(MSR_FP)|MASK(MSR_EE)) ; Add in FP and EE
979 andc r0,r0,r10 ; Turn off VEC, FP, and EE
980 mtmsr r0 ; Make sure interrupts are disabled
981 mfsprg r8,1 ; Get the current activation
982 lwz r10,ACT_PER_PROC(r8) ; Get the per_proc block
983
984 lwz r7,SAVflags(r3) ; Pick up the flags
985 lwz r9,SAVprev+4(r3) ; Get previous save area
986 cmplwi cr1,r8,0 ; Are we still initializing?
987 lwz r12,savesrr1+4(r3) ; Get the MSR we will load on return
988 andis. r11,r7,hi16(SAVrststk) ; Is this the first on the stack?
989 stw r9,ACT_MACT_PCB(r8) ; Point to previous context savearea
990 mr r4,r3 ; Move the savearea pointer
991 beq .L_no_int_ast2 ; Get going if not the top-o-stack...
992
993
994 /* We're the last frame on the stack. Restore istackptr to empty state.
995 *
996 * Check for ASTs if one of the below is true:
997 * returning to user mode
998 * returning to a kloaded server
999 */
1000 lwz r9,PP_INTSTACK_TOP_SS(r10) ; Get the empty stack value
1001 andc r7,r7,r11 ; Remove the stack reset bit in case we pass this one
1002 stw r9,PP_ISTACKPTR(r10) ; Save that saved state ptr
1003 lwz r3,ACT_PREEMPT_CNT(r8) ; Get preemption level
1004 stw r7,SAVflags(r4) ; Save the flags
1005 cmplwi r3, 0 ; Check for preemption
1006 bne .L_no_int_ast ; Do not preempt if level is not zero
1007 andi. r6,r12,MASK(MSR_PR) ; privilege mode
1008 lwz r11,PP_PENDING_AST(r10) ; Get the pending AST mask
1009 beq- .L_kernel_int_ast ; In kernel space, AST_URGENT check
1010 li r3,T_AST ; Assume the worst
1011 mr. r11,r11 ; Are there any pending?
1012 beq .L_no_int_ast ; Nope...
1013 b .L_call_thandler
1014
1015 .L_kernel_int_ast:
1016 andi. r11,r11,AST_URGENT ; Do we have AST_URGENT?
1017 li r3,T_PREEMPT ; Assume the worst
1018 beq .L_no_int_ast ; Nope...
1019
1020 /*
1021 * There is a pending AST. Massage things to make it look like
1022 * we took a trap and jump into the trap handler. To do this
1023 * we essentially pretend to return from the interrupt but
1024 * at the last minute jump into the trap handler with an AST
1025 * trap instead of performing an rfi.
1026 */
1027
1028 .L_call_thandler:
1029 stw r3,saveexception(r4) ; Set the exception code to T_AST/T_PREEMPT
1030 b EXT(thandler) ; We need to preempt so treat like a trap...
1031
1032 .L_no_int_ast:
1033 mr r3,r4 ; Get into the right register for common code
1034
1035 .L_no_int_ast2:
1036 rlwinm r7,r7,0,15,13 ; Clear the syscall flag
1037 li r4,0 ; Assume for a moment that we are in init
1038 stw r7,SAVflags(r3) ; Set the flags with cleared syscall flag
1039 beq-- cr1,chkfac ; Jump away if we are in init...
1040
1041 lwz r4,ACT_MACT_PCB(r8) ; Get the new level marker
1042
1043
1044 ;
1045 ; This section is common to all exception exits. It throws away vector
1046 ; and floating point saveareas as the exception level of a thread is
1047 ; exited.
1048 ;
1049 ; It also enables the facility if its context is live
1050 ; Requires:
1051 ; R3 = Savearea to be released (virtual)
1052 ; R4 = New top of savearea stack (could be 0)
1053 ; R8 = pointer to activation
1054 ; R10 = per_proc block
1055 ;
1056 ; Note that barring unforseen crashes, there is no escape from this point
1057 ; on. We WILL call exception_exit and launch this context. No worries
1058 ; about preemption or interruptions here.
1059 ;
1060 ; Note that we will set up R26 with whatever context we will be launching,
1061 ; so it will indicate the current, or the deferred it it is set and we
1062 ; are going to user state. CR2_eq will be set to indicate deferred.
1063 ;
1064
1065 chkfac: lwz r29,savesrr1+4(r3) ; Get the current MSR
1066 mr. r28,r8 ; Are we still in boot?
1067 mr r31,r10 ; Move per_proc address
1068 mr r30,r4 ; Preserve new level
1069 mr r27,r3 ; Save the old level
1070 beq-- chkenax ; Yeah, skip it all...
1071
1072 rlwinm. r0,r29,0,MSR_PR_BIT,MSR_PR_BIT ; Are we going into user state?
1073
1074 lwz r20,curctx(r28) ; Get our current context
1075 lwz r26,deferctx(r28) ; Get any deferred context switch
1076 li r0,1 ; Get set to hold off quickfret
1077 rlwinm r29,r29,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Turn off floating point for now
1078 lwz r21,FPUlevel(r20) ; Get the facility level
1079 cmplwi cr2,r26,0 ; Are we going into a deferred context later?
1080 rlwinm r29,r29,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Turn off vector for now
1081 crnor cr2_eq,cr0_eq,cr2_eq ; Set cr2_eq if going to user state and there is deferred
1082 lhz r19,PP_CPU_NUMBER(r31) ; Get our CPU number
1083 cmplw r27,r21 ; Are we returning from the active level?
1084 stw r0,holdQFret(r31) ; Make sure we hold off releasing quickfret
1085 bne++ fpuchkena ; Nope...
1086
1087 ;
1088 ; First clean up any live context we are returning from
1089 ;
1090
1091 lwz r22,FPUcpu(r20) ; Get CPU this context was last dispatched on
1092
1093 stw r19,FPUcpu(r20) ; Claim context for us
1094
1095 eieio ; Make sure this gets out before owner clear
1096
1097 #if ppeSize != 16
1098 #error per_proc_entry is not 16bytes in size
1099 #endif
1100
1101 lis r23,hi16(EXT(PerProcTable)) ; Set base PerProcTable
1102 slwi r22,r22,4 ; Find offset to the owner per_proc_entry
1103 ori r23,r23,lo16(EXT(PerProcTable)) ; Set base PerProcTable
1104 li r24,FPUowner ; Displacement to float owner
1105 add r22,r23,r22 ; Point to the owner per_proc_entry
1106 lwz r22,ppe_vaddr(r22) ; Point to the owner per_proc
1107
1108 fpuinvothr: lwarx r23,r24,r22 ; Get the owner
1109
1110 sub r0,r23,r20 ; Subtract one from the other
1111 sub r21,r20,r23 ; Subtract the other from the one
1112 or r21,r21,r0 ; Combine them
1113 srawi r21,r21,31 ; Get a 0 if equal or -1 of not
1114 and r23,r23,r21 ; Make 0 if same, unchanged if not
1115 stwcx. r23,r24,r22 ; Try to invalidate it
1116 bne-- fpuinvothr ; Try again if there was a collision...
1117
1118 isync
1119
1120 ;
1121 ; Now if there is a savearea associated with the popped context, release it.
1122 ; Either way, pop the level to the top stacked context.
1123 ;
1124
1125 lwz r22,FPUsave(r20) ; Get pointer to the first savearea
1126 li r21,0 ; Assume we popped all the way out
1127 mr. r22,r22 ; Is there anything there?
1128 beq++ fpusetlvl ; No, see if we need to enable...
1129
1130 lwz r21,SAVlevel(r22) ; Get the level of that savearea
1131 cmplw r21,r27 ; Is this the saved copy of the live stuff?
1132 bne fpusetlvl ; No, leave as is...
1133
1134 lwz r24,SAVprev+4(r22) ; Pick up the previous area
1135 li r21,0 ; Assume we popped all the way out
1136 mr. r24,r24 ; Any more context stacked?
1137 beq-- fpuonlyone ; Nope...
1138 lwz r21,SAVlevel(r24) ; Get the level associated with save
1139
1140 fpuonlyone: stw r24,FPUsave(r20) ; Dequeue this savearea
1141
1142 rlwinm r3,r22,0,0,19 ; Find main savearea header
1143
1144 lwz r8,quickfret(r31) ; Get the first in quickfret list (top)
1145 lwz r9,quickfret+4(r31) ; Get the first in quickfret list (bottom)
1146 lwz r2,SACvrswap(r3) ; Get the virtual to real conversion (top)
1147 lwz r3,SACvrswap+4(r3) ; Get the virtual to real conversion (bottom)
1148 stw r8,SAVprev(r22) ; Link the old in (top)
1149 stw r9,SAVprev+4(r22) ; Link the old in (bottom)
1150 xor r3,r22,r3 ; Convert to physical
1151 stw r2,quickfret(r31) ; Set the first in quickfret list (top)
1152 stw r3,quickfret+4(r31) ; Set the first in quickfret list (bottom)
1153
1154 #if FPVECDBG
1155 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
1156 li r2,0x3301 ; (TEST/DEBUG)
1157 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
1158 sc ; (TEST/DEBUG)
1159 #endif
1160
1161 fpusetlvl: stw r21,FPUlevel(r20) ; Save the level
1162
1163 ;
1164 ; Here we check if we are at the right level
1165 ; We need to check the level we are entering, not the one we are exiting.
1166 ; Therefore, we will use the defer level if it is non-zero and we are
1167 ; going into user state.
1168 ;
1169
1170 fpuchkena: bt-- cr2_eq,fpuhasdfrd ; Skip if deferred, R26 already set up...
1171 mr r26,r20 ; Use the non-deferred value
1172
1173 fpuhasdfrd:
1174 #if 0
1175 rlwinm. r0,r29,0,MSR_PR_BIT,MSR_PR_BIT ; (TEST/DEBUG) Going into user state?
1176 beq fpunusrstt ; (TEST/DEBUG) Nope...
1177 lwz r23,FPUlevel(r26) ; (TEST/DEBUG) Get the level ID
1178 lwz r24,FPUsave(r26) ; (TEST/DEBUG) Get the first savearea
1179 mr. r23,r23 ; (TEST/DEBUG) Should be level 0
1180 beq++ fpulvl0 ; (TEST/DEBUG) Yes...
1181
1182 lis r0,hi16(Choke) ; (TEST/DEBUG) Choke code
1183 ori r0,r0,lo16(Choke) ; (TEST/DEBUG) and the rest
1184 sc ; (TEST/DEBUG) System ABEND
1185
1186 fpulvl0: mr. r24,r24 ; (TEST/DEBUG) Any context?
1187 beq fpunusrstt ; (TEST/DEBUG) No...
1188 lwz r23,SAVlevel(r24) ; (TEST/DEBUG) Get level of context
1189 lwz r21,SAVprev+4(r24) ; (TEST/DEBUG) Get previous pointer
1190 mr. r23,r23 ; (TEST/DEBUG) Is this our user context?
1191 beq++ fpulvl0b ; (TEST/DEBUG) Yes...
1192
1193 lis r0,hi16(Choke) ; (TEST/DEBUG) Choke code
1194 ori r0,r0,lo16(Choke) ; (TEST/DEBUG) and the rest
1195 sc ; (TEST/DEBUG) System ABEND
1196
1197 fpulvl0b: mr. r21,r21 ; (TEST/DEBUG) Is there a forward chain?
1198 beq++ fpunusrstt ; (TEST/DEBUG) Nope...
1199
1200 lis r0,hi16(Choke) ; (TEST/DEBUG) Choke code
1201 ori r0,r0,lo16(Choke) ; (TEST/DEBUG) and the rest
1202 sc ; (TEST/DEBUG) System ABEND
1203
1204 fpunusrstt: ; (TEST/DEBUG)
1205 #endif
1206
1207 lwz r21,FPUowner(r31) ; Get the ID of the live context
1208 lwz r23,FPUlevel(r26) ; Get the level ID
1209 lwz r24,FPUcpu(r26) ; Get the CPU that the context was last dispatched on
1210 cmplw cr3,r26,r21 ; Do we have the live context?
1211 cmplw r30,r23 ; Are we about to launch the live level?
1212 bne-- cr3,chkvec ; No, can not possibly enable...
1213 cmplw cr1,r19,r24 ; Was facility used on this processor last?
1214 bne-- chkvec ; No, not live...
1215 bne-- cr1,chkvec ; No, wrong cpu, have to enable later....
1216
1217 lwz r24,FPUsave(r26) ; Get the first savearea
1218 mr. r24,r24 ; Any savearea?
1219 beq++ fpuena ; Nope...
1220 lwz r25,SAVlevel(r24) ; Get the level of savearea
1221 lwz r0,SAVprev+4(r24) ; Get the previous
1222
1223 cmplw r30,r25 ; Is savearea for the level we are launching?
1224 bne++ fpuena ; No, just go enable...
1225
1226 stw r0,FPUsave(r26) ; Pop the chain
1227
1228 rlwinm r3,r24,0,0,19 ; Find main savearea header
1229
1230 lwz r8,quickfret(r31) ; Get the first in quickfret list (top)
1231 lwz r9,quickfret+4(r31) ; Get the first in quickfret list (bottom)
1232 lwz r2,SACvrswap(r3) ; Get the virtual to real conversion (top)
1233 lwz r3,SACvrswap+4(r3) ; Get the virtual to real conversion (bottom)
1234 stw r8,SAVprev(r24) ; Link the old in (top)
1235 stw r9,SAVprev+4(r24) ; Link the old in (bottom)
1236 xor r3,r24,r3 ; Convert to physical
1237 stw r2,quickfret(r31) ; Set the first in quickfret list (top)
1238 stw r3,quickfret+4(r31) ; Set the first in quickfret list (bottom)
1239
1240 #if FPVECDBG
1241 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
1242 li r2,0x3302 ; (TEST/DEBUG)
1243 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
1244 sc ; (TEST/DEBUG)
1245 #endif
1246
1247 fpuena: ori r29,r29,lo16(MASK(MSR_FP)) ; Enable facility
1248
1249 chkvec:
1250
1251 lwz r21,VMXlevel(r20) ; Get the facility level
1252
1253 cmplw r27,r21 ; Are we returning from the active level?
1254 bne+ vmxchkena ; Nope...
1255
1256
1257 ;
1258 ; First clean up any live context we are returning from
1259 ;
1260
1261 lwz r22,VMXcpu(r20) ; Get CPU this context was last dispatched on
1262
1263 stw r19,VMXcpu(r20) ; Claim context for us
1264
1265 eieio ; Make sure this gets out before owner clear
1266
1267 lis r23,hi16(EXT(PerProcTable)) ; Set base PerProcTable
1268 slwi r22,r22,4 ; Find offset to the owner per_proc_entry
1269 ori r23,r23,lo16(EXT(PerProcTable)) ; Set base PerProcTable
1270 li r24,VMXowner ; Displacement to float owner
1271 add r22,r23,r22 ; Point to the owner per_proc_entry
1272 lwz r22,ppe_vaddr(r22) ; Point to the owner per_proc
1273
1274 vmxinvothr: lwarx r23,r24,r22 ; Get the owner
1275
1276 sub r0,r23,r20 ; Subtract one from the other
1277 sub r21,r20,r23 ; Subtract the other from the one
1278 or r21,r21,r0 ; Combine them
1279 srawi r21,r21,31 ; Get a 0 if equal or -1 of not
1280 and r23,r23,r21 ; Make 0 if same, unchanged if not
1281 stwcx. r23,r24,r22 ; Try to invalidate it
1282 bne-- vmxinvothr ; Try again if there was a collision...
1283
1284 isync
1285
1286 ;
1287 ; Now if there is a savearea associated with the popped context, release it.
1288 ; Either way, pop the level to the top stacked context.
1289 ;
1290
1291 lwz r22,VMXsave(r20) ; Get pointer to the first savearea
1292 li r21,0 ; Assume we popped all the way out
1293 mr. r22,r22 ; Is there anything there?
1294 beq++ vmxsetlvl ; No, see if we need to enable...
1295
1296 lwz r21,SAVlevel(r22) ; Get the level of that savearea
1297 cmplw r21,r27 ; Is this the saved copy of the live stuff?
1298 bne vmxsetlvl ; No, leave as is...
1299
1300 lwz r24,SAVprev+4(r22) ; Pick up the previous area
1301 li r21,0 ; Assume we popped all the way out
1302 mr. r24,r24 ; Any more context?
1303 beq-- vmxonlyone ; Nope...
1304 lwz r21,SAVlevel(r24) ; Get the level associated with save
1305
1306 vmxonlyone: stw r24,VMXsave(r20) ; Dequeue this savearea
1307
1308 rlwinm r3,r22,0,0,19 ; Find main savearea header
1309
1310 lwz r8,quickfret(r31) ; Get the first in quickfret list (top)
1311 lwz r9,quickfret+4(r31) ; Get the first in quickfret list (bottom)
1312 lwz r2,SACvrswap(r3) ; Get the virtual to real conversion (top)
1313 lwz r3,SACvrswap+4(r3) ; Get the virtual to real conversion (bottom)
1314 stw r8,SAVprev(r22) ; Link the old in (top)
1315 stw r9,SAVprev+4(r22) ; Link the old in (bottom)
1316 xor r3,r22,r3 ; Convert to physical
1317 stw r2,quickfret(r31) ; Set the first in quickfret list (top)
1318 stw r3,quickfret+4(r31) ; Set the first in quickfret list (bottom)
1319
1320 #if FPVECDBG
1321 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
1322 li r2,0x3401 ; (TEST/DEBUG)
1323 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
1324 sc ; (TEST/DEBUG)
1325 #endif
1326
1327 vmxsetlvl: stw r21,VMXlevel(r20) ; Save the level
1328
1329 ;
1330 ; Here we check if we are at the right level
1331 ;
1332
1333 vmxchkena: lwz r21,VMXowner(r31) ; Get the ID of the live context
1334 lwz r23,VMXlevel(r26) ; Get the level ID
1335 cmplw r26,r21 ; Do we have the live context?
1336 lwz r24,VMXcpu(r26) ; Get the CPU that the context was last dispatched on
1337 bne-- setena ; No, can not possibly enable...
1338 cmplw r30,r23 ; Are we about to launch the live level?
1339 cmplw cr1,r19,r24 ; Was facility used on this processor last?
1340 bne-- setena ; No, not live...
1341 bne-- cr1,setena ; No, wrong cpu, have to enable later....
1342
1343 lwz r24,VMXsave(r26) ; Get the first savearea
1344 mr. r24,r24 ; Any savearea?
1345 beq++ vmxena ; Nope...
1346 lwz r25,SAVlevel(r24) ; Get the level of savearea
1347 lwz r0,SAVprev+4(r24) ; Get the previous
1348 cmplw r30,r25 ; Is savearea for the level we are launching?
1349 bne++ vmxena ; No, just go enable...
1350
1351 stw r0,VMXsave(r26) ; Pop the chain
1352
1353 rlwinm r3,r24,0,0,19 ; Find main savearea header
1354
1355 lwz r8,quickfret(r31) ; Get the first in quickfret list (top)
1356 lwz r9,quickfret+4(r31) ; Get the first in quickfret list (bottom)
1357 lwz r2,SACvrswap(r3) ; Get the virtual to real conversion (top)
1358 lwz r3,SACvrswap+4(r3) ; Get the virtual to real conversion (bottom)
1359 stw r8,SAVprev(r24) ; Link the old in (top)
1360 stw r9,SAVprev+4(r24) ; Link the old in (bottom)
1361 xor r3,r24,r3 ; Convert to physical
1362 stw r2,quickfret(r31) ; Set the first in quickfret list (top)
1363 stw r3,quickfret+4(r31) ; Set the first in quickfret list (bottom)
1364
1365 #if FPVECDBG
1366 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
1367 li r2,0x3402 ; (TEST/DEBUG)
1368 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
1369 sc ; (TEST/DEBUG)
1370 #endif
1371
1372 vmxena: oris r29,r29,hi16(MASK(MSR_VEC)) ; Enable facility
1373
1374 setena: lwz r18,umwSpace(r28) ; Get the space ID in case we are launching user
1375 rlwinm. r0,r29,0,MSR_PR_BIT,MSR_PR_BIT ; Are we about to launch user state?
1376 li r0,0 ; Get set to release quickfret holdoff
1377 crmove cr7_eq,cr0_eq ; Remember if we are going to user state
1378 rlwimi. r20,r29,(((31-floatCngbit)+(MSR_FP_BIT+1))&31),floatCngbit,floatCngbit ; Set flag if we enabled floats
1379 lwz r19,deferctx(r28) ; Get any deferred facility context switch
1380 rlwinm r20,r29,(((31-vectorCngbit)+(MSR_VEC_BIT+1))&31),vectorCngbit,vectorCngbit ; Set flag if we enabled vector
1381 stw r29,savesrr1+4(r27) ; Turn facility on or off
1382 stw r0,holdQFret(r31) ; Release quickfret
1383 oris r18,r18,hi16(umwSwitchAway) ; Set the switch-away bit in case we go to user
1384
1385 beq setenaa ; Neither float nor vector turned on....
1386
1387 lwz r5,ACT_MACT_SPF(r28) ; Get activation copy
1388 lwz r6,spcFlags(r31) ; Get per_proc copy
1389 or r5,r5,r20 ; Set vector/float changed bits in activation
1390 or r6,r6,r20 ; Set vector/float changed bits in per_proc
1391 stw r5,ACT_MACT_SPF(r28) ; Set activation copy
1392 stw r6,spcFlags(r31) ; Set per_proc copy
1393
1394 setenaa: mfdec r24 ; Get decrementer
1395 bf+ cr2_eq,nodefer ; No deferred to switch to...
1396
1397 li r20,0 ; Clear this
1398 stw r26,curctx(r28) ; Make the facility context current
1399 stw r20,deferctx(r28) ; Clear deferred context
1400
1401 nodefer: lwz r22,qactTimer(r28) ; Get high order quick activation timer
1402 mr. r24,r24 ; See if it has popped already...
1403 lwz r23,qactTimer+4(r28) ; Get low order qact timer
1404 ble- chkifuser ; We have popped or are just about to...
1405
1406 segtb: mftbu r20 ; Get the upper time base
1407 mftb r21 ; Get the low
1408 mftbu r19 ; Get upper again
1409 or. r0,r22,r23 ; Any time set?
1410 cmplw cr1,r20,r19 ; Did they change?
1411 beq++ chkifuser ; No time set....
1412 bne-- cr1,segtb ; Timebase ticked, get them again...
1413
1414 subfc r6,r21,r23 ; Subtract current from qact time
1415 li r0,0 ; Make a 0
1416 subfe r5,r20,r22 ; Finish subtract
1417 subfze r0,r0 ; Get a 0 if qact was bigger than current, -1 otherwise
1418 andc. r12,r5,r0 ; Set 0 if qact has passed
1419 andc r13,r6,r0 ; Set 0 if qact has passed
1420 bne chkifuser ; If high order is non-zero, this is too big for a decrementer
1421 cmplw r13,r24 ; Is this earlier than the decrementer? (logical compare takes care of high bit on)
1422 bge++ chkifuser ; No, do not reset decrementer...
1423
1424 mtdec r13 ; Set our value
1425
1426 chkifuser: addi r4,r28,SYSTEM_TIMER
1427 mftb r3
1428 beq-- cr7,chkifuser1 ; Skip this if we are going to kernel...
1429 stw r18,umwSpace(r28) ; Half-invalidate to force MapUserAddressWindow to reload SRs
1430 addi r4,r28,USER_TIMER
1431
1432 chkifuser1: bl EXT(timer_event)
1433
1434 chkenax:
1435
1436 #if DEBUG
1437 lwz r20,SAVact(r27) ; (TEST/DEBUG) Make sure our restore
1438 mfsprg r21, 1 ; (TEST/DEBUG) with the current act.
1439 cmpwi r21,0 ; (TEST/DEBUG)
1440 beq-- yeswereok ; (TEST/DEBUG)
1441 cmplw r21,r20 ; (TEST/DEBUG)
1442 beq++ yeswereok ; (TEST/DEBUG)
1443
1444 lis r0,hi16(Choke) ; (TEST/DEBUG) Choke code
1445 ori r0,r0,lo16(Choke) ; (TEST/DEBUG) and the rest
1446 mr r21,r27 ; (TEST/DEBUG) Save the savearea address
1447 li r3,failContext ; (TEST/DEBUG) Bad state code
1448 sc ; (TEST/DEBUG) System ABEND
1449
1450 yeswereok:
1451 #endif
1452
1453 mr r3,r27 ; Pass savearea back
1454 b EXT(exception_exit) ; We are all done now...
1455
1456
1457
1458 ;
1459 ; Null PPC call - performance testing, does absolutely nothing
1460 ;
1461
1462 .align 5
1463
1464 .globl EXT(ppcNull)
1465
1466 LEXT(ppcNull)
1467
1468 li r3,-1 ; Make sure we test no asts
1469 blr
1470
1471
1472 ;
1473 ; Instrumented null PPC call - performance testing, does absolutely nothing
1474 ; Forces various timestamps to be returned.
1475 ;
1476
1477 .align 5
1478
1479 .globl EXT(ppcNullinst)
1480
1481 LEXT(ppcNullinst)
1482
1483 li r3,-1 ; Make sure we test no asts
1484 blr
1485
1486
1487 /*
1488 * Here's where we handle the fastpath stuff
1489 * We'll do what we can here because registers are already
1490 * loaded and it will be less confusing that moving them around.
1491 * If we need to though, we'll branch off somewhere's else.
1492 *
1493 * Registers when we get here:
1494 *
1495 * r0 = syscall number
1496 * r4 = savearea/pcb
1497 * r13 = activation
1498 * r14 = previous savearea (if any)
1499 * r16 = thread
1500 * r25 = per_proc
1501 */
1502
1503 .align 5
1504
1505 fastpath: cmplwi cr3,r0,0x7FF5 ; Is this a null fastpath?
1506 beq-- cr3,fastexutl ; Yes, bail fast...
1507 cmplwi cr3,r0,0x7FF1 ; Is it CthreadSetSelfNumber?
1508 bnelr-- cr3 ; Not a fast path...
1509
1510 /*
1511 * void cthread_set_self(cproc_t p)
1512 *
1513 * Set's thread state "user_value". In practice this is the thread-local-data-pointer (TLDP),
1514 * though we do not interpret it. This call is mostly used by 32-bit tasks, but we save all 64 bits
1515 * in case a 64-bit task wants to use this facility. They normally do not, because the 64-bit
1516 * ABI reserves r13 for the TLDP.
1517 *
1518 * This op is invoked as follows:
1519 * li r0, CthreadSetSelfNumber // load the fast-trap number
1520 * sc // invoke fast-trap
1521 * blr
1522 */
1523
1524 CthreadSetSelfNumber:
1525 lwz r3,saver3+0(r4) /* get the TLDP passed in r3 */
1526 lwz r5,saver3+4(r4) /* (all 64 bits, in case this is a 64-bit task) */
1527 stw r3,CTHREAD_SELF+0(r13) /* Remember it in the activation... */
1528 stw r5,CTHREAD_SELF+4(r13)
1529 stw r3,UAW+0(r25) /* ...and in the per-proc */
1530 stw r5,UAW+4(r25)
1531
1532
1533 .globl EXT(fastexit)
1534 EXT(fastexit):
1535 fastexutl: mr r3,r4 ; Pass back savearea
1536 b EXT(exception_exit) ; Go back to the caller...
1537
1538
1539 /*
1540 * Here's where we check for a hit on the Blue Box Assist
1541 * Most registers are non-volatile, so be careful here. If we don't
1542 * recognize the trap instruction we go back for regular processing.
1543 * Otherwise we transfer to the assist code.
1544 */
1545
1546 .align 5
1547
1548 checkassist:
1549 lwz r0,saveexception(r4) ; Get the exception code
1550 lwz r23,savesrr1+4(r4) ; Get the interrupted MSR
1551 lwz r26,ACT_MACT_BEDA(r13) ; Get Blue Box Descriptor Area
1552 mtcrf 0x18,r23 ; Check what SRR1 says
1553 lwz r24,ACT_MACT_BTS(r13) ; Get the table start
1554 cmplwi r0,T_AST ; Check for T_AST trap
1555 lwz r27,savesrr0+4(r4) ; Get trapped address
1556 crnand cr1_eq,SRR1_PRG_TRAP_BIT,MSR_PR_BIT ; We need both trap and user state
1557 sub r24,r27,r24 ; See how far into it we are
1558 cror cr0_eq,cr0_eq,cr1_eq ; Need to bail if AST or not trap or not user state
1559 cmplwi cr1,r24,BB_MAX_TRAP ; Do we fit in the list?
1560 cror cr0_eq,cr0_eq,cr1_gt ; Also leave it trap not in range
1561 btlr- cr0_eq ; No assist if AST or not trap or not user state or trap not in range
1562 b EXT(atomic_switch_trap) ; Go to the assist...
1563
1564 ;
1565 ; Virtual Machine Monitor
1566 ; Here is where we exit from the emulated context
1567 ; Note that most registers get trashed here
1568 ; R3 and R30 are preserved across the call and hold the activation
1569 ; and savearea respectivily.
1570 ;
1571
1572 .align 5
1573
1574 exitFromVM: mr r30,r4 ; Get the savearea
1575 mr r3,r13 ; Get the activation
1576
1577 b EXT(vmm_exit) ; Do it to it
1578
1579 .align 5
1580 .globl EXT(retFromVM)
1581
1582 LEXT(retFromVM)
1583 mfsprg r10,1 ; Get the current activation
1584 lwz r10,ACT_PER_PROC(r10) ; Get the per_proc block
1585 mr r8,r3 ; Get the activation
1586 lwz r4,SAVprev+4(r30) ; Pick up the previous savearea
1587 mr r3,r30 ; Put savearea in proper register for common code
1588 lwz r11,SAVflags(r30) ; Get the flags of the current savearea
1589 rlwinm r11,r11,0,15,13 ; Clear the syscall flag
1590 mr r1,r8
1591 stw r11,SAVflags(r3) ; Save back the flags (with reset stack cleared)
1592
1593 stw r4,ACT_MACT_PCB(r8) ; Point to the previous savearea (or 0 if none)
1594
1595 lwz r5,THREAD_KERNEL_STACK(r1) ; Get the base pointer to the stack
1596 addi r5,r5,KERNEL_STACK_SIZE-FM_SIZE ; Reset to empty
1597 stw r5,ACT_MACT_KSP(r8) ; Save the empty stack pointer
1598 b chkfac ; Go end it all...
1599
1600
1601 ;
1602 ; chandler (note: not a candle maker or tallow merchant)
1603 ;
1604 ; Here is the system choke handler. This is where the system goes
1605 ; to die.
1606 ;
1607 ; We get here as a result of a T_CHOKE exception which is generated
1608 ; by the Choke firmware call or by lowmem_vectors when it detects a
1609 ; fatal error. Examples of where this may be used is when we detect
1610 ; problems in low-level mapping chains, trashed savearea free chains,
1611 ; or stack guardpage violations.
1612 ;
1613 ; Note that we can not set a back chain in the stack when we come
1614 ; here because we are probably here because the chain was corrupt.
1615 ;
1616
1617
1618 .align 5
1619 .globl EXT(chandler)
1620 LEXT(chandler) ; Choke handler
1621
1622 li r31,0 ; Get a 0
1623 mfsprg r25,1 ; Get the current activation
1624 lwz r25,ACT_PER_PROC(r25) ; Get the per_proc block
1625 stw r31,traceMask(0) ; Force tracing off right now
1626
1627
1628
1629 lwz r1,PP_DEBSTACKPTR(r25) ; Get debug stack pointer
1630 cmpwi r1,-1 ; Are we already choking?
1631 bne chokefirst ; Nope...
1632
1633 chokespin: addi r31,r31,1 ; Spin and hope for an analyzer connection...
1634 addi r31,r31,1 ; Spin and hope for an analyzer connection...
1635 addi r31,r31,1 ; Spin and hope for an analyzer connection...
1636 addi r31,r31,1 ; Spin and hope for an analyzer connection...
1637 addi r31,r31,1 ; Spin and hope for an analyzer connection...
1638 addi r31,r31,1 ; Spin and hope for an analyzer connection...
1639 b chokespin ; Spin and hope for an analyzer connection...
1640
1641 chokefirst: li r0,-1 ; Set choke value
1642 mr. r1,r1 ; See if we are on debug stack yet
1643 lwz r10,saver1+4(r4) ;
1644 stw r0,PP_DEBSTACKPTR(r25) ; Show we are choking
1645 bne chokestart ; We are not on the debug stack yet...
1646
1647 lwz r2,PP_DEBSTACK_TOP_SS(r25) ; Get debug stack top
1648 sub r11,r2,r10 ; Get stack depth
1649
1650 cmplwi r11,KERNEL_STACK_SIZE-FM_SIZE-TRAP_SPACE_NEEDED ; Check if stack pointer is ok
1651 bgt chokespin ; Bad stack pointer or too little left, just die...
1652
1653 subi r1,r10,FM_REDZONE ; Make a red zone
1654
1655 chokestart: li r0,0 ; Get a zero
1656 stw r0,FM_BACKPTR(r1) ; We now have terminated the back chain
1657
1658 bl EXT(SysChoked) ; Call the "C" phase of this
1659 b chokespin ; Should not be here so just go spin...
1660
1661
1662 #if VERIFYSAVE
1663 ;
1664 ; Savearea chain verification
1665 ;
1666
1667 versave:
1668 #if 0
1669 lis r22,hi16(EXT(DebugWork)) ; (TEST/DEBUG)
1670 ori r22,r22,lo16(EXT(DebugWork)) ; (TEST/DEBUG)
1671 lwz r23,0(r22) ; (TEST/DEBUG)
1672 mr. r23,r23 ; (TEST/DEBUG)
1673 beqlr- ; (TEST/DEBUG)
1674 mfsprg r20,1 ; Get the current activation
1675 lwz r20,ACT_PER_PROC(r20) ; Get the per_proc block
1676 lwz r21,pfAvailable(r20) ; (TEST/DEBUG)
1677 mr. r21,r21 ; (TEST/DEBUG)
1678 bnelr+ ; (TEST/DEBUG)
1679
1680 stw r22,0(r22) ; (TEST/DEBUG) Lock out more checks
1681 BREAKPOINT_TRAP ; (TEST/DEBUG) Get into debugger
1682 #endif
1683
1684 #if 0
1685 ;; This code is broken and migration will make the matter even worse
1686 ;
1687 ; Make sure that all savearea chains have the right type on them
1688 ;
1689
1690 lis r28,hi16(EXT(default_pset)) ; (TEST/DEBUG)
1691 lis r27,hi16(EXT(DebugWork)) ; (TEST/DEBUG)
1692 ori r28,r28,lo16(EXT(default_pset)) ; (TEST/DEBUG)
1693 ori r27,r27,lo16(EXT(DebugWork)) ; (TEST/DEBUG)
1694 li r20,0 ; (TEST/DEBUG)
1695 lwz r26,0(r27) ; (TEST/DEBUG)
1696 lwz r27,psthreadcnt(r28) ; (TEST/DEBUG)
1697 mr. r26,r26 ; (TEST/DEBUG) Have we locked the test out?
1698 lwz r28,psthreads(r28) ; (TEST/DEBUG)
1699 mflr r31 ; (TEST/DEBUG) Save return
1700 bnelr- ; (TEST/DEBUG) Test already triggered, skip...
1701 b fckgo ; (TEST/DEBUG) Join up...
1702
1703 fcknext: mr. r27,r27 ; (TEST/DEBUG) Any more threads?
1704 bne+ fckxxx ; (TEST/DEBUG) Yes...
1705
1706 mtlr r31 ; (TEST/DEBUG) Restore return
1707 blr ; (TEST/DEBUG) Leave...
1708
1709 fckxxx: lwz r28,THREAD_PSTHRN(r28) ; (TEST/DEBUG) Get next thread
1710
1711 fckgo: subi r27,r27,1 ; (TEST/DEBUG) Decrement thread count
1712 lwz r24,THREAD_TOP_ACT(r28) ; (TEST/DEBUG) Get activation for the thread
1713 lwz r20,ACT_MACT_PCB(r24) ; (TEST/DEBUG) Get the normal context
1714 li r21,SAVgeneral ; (TEST/DEBUG) Make sure this is all general context
1715 bl versavetype ; (TEST/DEBUG) Check the chain
1716
1717 lwz r20,facctx+FPUsave(r24) ; (TEST/DEBUG) Get regular floating point
1718 li r21,SAVfloat ; (TEST/DEBUG) Make sure this is all floating point
1719 bl versavetype ; (TEST/DEBUG) Check the chain
1720
1721 lwz r20,facctx+VMXsave(r24) ; (TEST/DEBUG) Get regular vector point
1722 li r21,SAVvector ; (TEST/DEBUG) Make sure this is all vector
1723 bl versavetype ; (TEST/DEBUG) Check the chain
1724
1725 lwz r29,vmmControl(r24) ; (TEST/DEBUG) Get the virtual machine control blocks
1726 mr. r29,r29 ; (TEST/DEBUG) Are there any?
1727 beq+ fcknext ; (TEST/DEBUG) Nope, next thread...
1728
1729 li r22,kVmmMaxContextsPerThread ; (TEST/DEBUG) Get the number of control blocks
1730 subi r29,r29,vmmCEntrySize ; (TEST/DEBUG) Get running start
1731
1732 fcknvmm: subi r22,r22,1 ; (TEST/DEBUG) Do all of them
1733 mr. r22,r22 ; (TEST/DEBUG) Are we all done?
1734 addi r29,r29,vmmCEntrySize ; (TEST/DEBUG) Get the next entry
1735 blt- fcknext ; (TEST/DEBUG) Yes, check next thread...
1736
1737 lwz r23,vmmFlags(r29) ; (TEST/DEBUG) Get entry flags
1738 rlwinm. r23,r23,0,0,0 ; (TEST/DEBUG) Is this in use?
1739 beq+ fcknvmm ; (TEST/DEBUG) Not in use...
1740
1741 lwz r20,vmmFacCtx+FPUsave(r29) ; (TEST/DEBUG) Get regular floating point
1742 li r21,SAVfloat ; (TEST/DEBUG) Make sure this is all floating point
1743 bl versavetype ; (TEST/DEBUG) Check the chain
1744
1745 lwz r20,vmmFacCtx+VMXsave(r29) ; (TEST/DEBUG) Get regular vector point
1746 li r21,SAVvector ; (TEST/DEBUG) Make sure this is all vector
1747 bl versavetype ; (TEST/DEBUG) Check the chain
1748 b fcknvmm ; (TEST/DEBUG) Get then vmm block...
1749
1750 versavetype:
1751 mr. r20,r20 ; (TEST/DEBUG) Chain done?
1752 beqlr- ; (TEST/DEBUG) Yes...
1753
1754 lwz r23,SAVflags(r20) ; (TEST/DEBUG) Get the flags
1755 rlwinm r23,r23,24,24,31 ; (TEST/DEBUG) Position it
1756 cmplw r23,r21 ; (TEST/DEBUG) Are we the correct type?
1757 beq+ versvok ; (TEST/DEBUG) This one is ok...
1758
1759 lis r22,hi16(EXT(DebugWork)) ; (TEST/DEBUG)
1760 ori r22,r22,lo16(EXT(DebugWork)) ; (TEST/DEBUG)
1761 stw r22,0(r22) ; (TEST/DEBUG) Lock out more checks
1762 BREAKPOINT_TRAP ; (TEST/DEBUG) Get into debugger
1763
1764 versvok: lwz r20,SAVprev+4(r20) ; (TEST/DEBUG) Get the previous one
1765 b versavetype ; (TEST/DEBUG) Go check its type...
1766 #endif
1767
1768
1769 #endif