]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/hw_exception.s
xnu-1228.3.13.tar.gz
[apple/xnu.git] / osfmk / ppc / hw_exception.s
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31
32 /* Low level routines dealing with exception entry and exit.
33 * There are various types of exception:
34 *
35 * Interrupt, trap, system call and debugger entry. Each has it's own
36 * handler since the state save routine is different for each. The
37 * code is very similar (a lot of cut and paste).
38 *
39 * The code for the FPU disabled handler (lazy fpu) is in cswtch.s
40 */
41
42 #include <debug.h>
43 #include <mach_assert.h>
44 #include <mach/exception_types.h>
45 #include <mach/kern_return.h>
46 #include <mach/ppc/vm_param.h>
47
48 #include <assym.s>
49
50 #include <ppc/asm.h>
51 #include <ppc/proc_reg.h>
52 #include <ppc/trap.h>
53 #include <ppc/exception.h>
54 #include <ppc/savearea.h>
55
56
57 #define VERIFYSAVE 0
58 #define FPVECDBG 0
59 #define FPFLOOD 0
60 #define INSTRUMENT 0
61
62 /*
63 * thandler(type)
64 *
65 * ENTRY: VM switched ON
66 * Interrupts OFF
67 * R3 contains exception code
68 * R4 points to the saved context (virtual address)
69 * Everything is saved in savearea
70 */
71
72 /*
73 * If pcb.ksp == 0 then the kernel stack is already busy,
74 * we make a stack frame
75 * leaving enough space for the 'red zone' in case the
76 * trapped thread was in the middle of saving state below
77 * its stack pointer.
78 *
79 * otherwise we make a stack frame and
80 * the kernel stack (setting pcb.ksp to 0)
81 *
82 * on return, we do the reverse, the last state is popped from the pcb
83 * and pcb.ksp is set to the top of stack
84 */
85
86 /* TRAP_SPACE_NEEDED is the space assumed free on the kernel stack when
87 * another trap is taken. We need at least enough space for a saved state
88 * structure plus two small backpointer frames, and we add a few
89 * hundred bytes for the space needed by the C (which may be less but
90 * may be much more). We're trying to catch kernel stack overflows :-)
91 */
92
93 #define TRAP_SPACE_NEEDED FM_REDZONE+(2*FM_SIZE)+256
94
95 .text
96
97 .align 5
98 .globl EXT(thandler)
99 LEXT(thandler) ; Trap handler
100
101 mfsprg r13,1 ; Get the current activation
102 lwz r25,ACT_PER_PROC(r13) ; Get the per_proc block
103
104 lwz r1,PP_ISTACKPTR(r25) ; Get interrupt stack pointer
105
106 cmpwi cr0,r1,0 ; Are we on interrupt stack?
107 mr r6,r13
108 beq- cr0,EXT(ihandler) ; If on interrupt stack, treat this as interrupt...
109 lwz r26,ACT_MACT_SPF(r13) ; Get special flags
110 lwz r8,ACT_MACT_PCB(r13) ; Get the last savearea used
111 rlwinm. r26,r26,0,bbThreadbit,bbThreadbit ; Do we have Blue Box Assist active?
112 lwz r1,ACT_MACT_KSP(r13) ; Get the top of kernel stack
113 bnel- checkassist ; See if we should assist this
114 stw r4,ACT_MACT_PCB(r13) ; Point to our savearea
115 stw r8,SAVprev+4(r4) ; Queue the new save area in the front
116
117 #if VERIFYSAVE
118 bl versave ; (TEST/DEBUG)
119 #endif
120
121 lwz r9,THREAD_KERNEL_STACK(r6) ; Get our kernel stack start
122 cmpwi cr1,r1,0 ; Are we already on kernel stack?
123 stw r13,SAVact(r4) ; Mark the savearea as belonging to this activation
124 lwz r26,saver1+4(r4) ; Get the stack at interrupt time
125
126 bne+ cr1,.L_kstackfree ; We are not on kernel stack yet...
127
128 subi r1,r26,FM_REDZONE ; Make a red zone on interrupt time kernel stack
129
130 .L_kstackfree:
131 lwz r31,savesrr1+4(r4) ; Pick up the entry MSR
132 sub r9,r1,r9 ; Get displacment into the kernel stack
133 li r0,0 ; Make this 0
134 rlwinm. r0,r9,0,28,31 ; Verify that we have a 16-byte aligned stack (and get a 0)
135 cmplwi cr2,r9,KERNEL_STACK_SIZE ; Do we still have room on the stack?
136 beq cr1,.L_state_on_kstack ; using above test for pcb/stack
137
138 stw r0,ACT_MACT_KSP(r13) ; Show that we have taken the stack
139
140 .L_state_on_kstack:
141 lwz r9,savevrsave(r4) ; Get the VRSAVE register
142 bne-- kernelStackUnaligned ; Stack is unaligned...
143 rlwinm. r6,r31,0,MSR_VEC_BIT,MSR_VEC_BIT ; Was vector on?
144 subi r1,r1,FM_SIZE ; Push a header onto the current stack
145 bgt-- cr2,kernelStackBad ; Kernel stack is bogus...
146
147 kernelStackNotBad: ; Vector was off
148 beq++ tvecoff ; Vector off, do not save vrsave...
149 stw r9,liveVRS(r25) ; Set the live value
150
151 tvecoff: stw r26,FM_BACKPTR(r1) ; Link back to the previous frame
152
153 #if DEBUG
154 /* If debugging, we need two frames, the first being a dummy
155 * which links back to the trapped routine. The second is
156 * that which the C routine below will need
157 */
158 lwz r3,savesrr0+4(r4) ; Get the point of interruption
159 stw r3,FM_LR_SAVE(r1) ; save old instr ptr as LR value
160 stwu r1, -FM_SIZE(r1) ; and make new frame
161 #endif /* DEBUG */
162
163 mr r30,r4
164 lwz r3,SAVtime(r4)
165 lwz r4,SAVtime+4(r4)
166 addi r5,r13,SYSTEM_TIMER
167 bl EXT(thread_timer_event)
168 addi r5,r25,SYSTEM_STATE
169 bl EXT(state_event)
170
171 lwz r7,ACT_TASK(r13)
172 lwz r8,TASK_VTIMERS(r7)
173 cmpwi r8,0
174 beq++ 0f
175
176 lwz r7,ACT_PER_PROC(r13)
177 li r4,AST_BSD
178 lwz r8,PP_PENDING_AST(r7)
179 or r8,r8,r4
180 stw r8,PP_PENDING_AST(r7)
181 addi r3,r13,ACT_AST
182 bl EXT(hw_atomic_or)
183 0:
184
185 /* call trap handler proper, with
186 * ARG0 = type
187 * ARG1 = saved_state ptr
188 * ARG2 = dsisr
189 * ARG3 = dar
190 */
191
192 mr r4,r30
193 lwz r3,saveexception(r30) ; Get the exception code
194 lwz r0,ACT_MACT_SPF(r13) ; Get the special flags
195
196 addi r5,r3,-T_DATA_ACCESS ; Adjust to start of range
197 rlwinm. r0,r0,0,runningVMbit,runningVMbit ; Are we in VM state? (cr0_eq == 0 if yes)
198 cmplwi cr2,r5,T_TRACE-T_DATA_ACCESS ; Are we still in range? (cr_gt if not)
199
200 lwz r5,savedsisr(r4) ; Get the saved DSISR
201
202 crnor cr7_eq,cr0_eq,cr2_gt ; We should intercept if in VM and is a true trap (cr7_eq == 1 if yes)
203 rlwinm. r0,r31,0,MSR_PR_BIT,MSR_PR_BIT ; Are we trapping from supervisor state? (cr0_eq == 1 if yes)
204
205 cmpi cr2,r3,T_PREEMPT ; Is this a preemption?
206
207 beq-- .L_check_VM
208 stw r4,ACT_MACT_UPCB(r13) ; Store user savearea
209 .L_check_VM:
210
211 crandc cr0_eq,cr7_eq,cr0_eq ; Do not intercept if we are in the kernel (cr0_eq == 1 if yes)
212
213 lwz r6,savedar(r4) ; Get the DAR (top)
214 lwz r7,savedar+4(r4) ; Get the DAR (bottom)
215
216 beq- cr2,.L_call_trap ; Do not turn on interrupts for T_PREEMPT
217 beq- exitFromVM ; Any true trap but T_MACHINE_CHECK exits us from the VM...
218
219 /* syscall exception might warp here if there's nothing left
220 * to do except generate a trap
221 */
222
223 .L_call_trap:
224
225 #if FPFLOOD
226 stfd f31,emfp31(r25) ; (TEST/DEBUG)
227 #endif
228
229 bl EXT(trap)
230
231 lis r10,hi16(MASK(MSR_VEC)) ; Get the vector enable
232 mfmsr r7 ; Get the MSR
233 ori r10,r10,lo16(MASK(MSR_FP)|MASK(MSR_EE)) ; Add in FP and EE
234 andc r7,r7,r10 ; Turn off VEC, FP, and EE
235 mtmsr r7 ; Disable for interrupts
236 mfsprg r8,1 ; Get the current activation
237 lwz r10,ACT_PER_PROC(r8) ; Get the per_proc block
238 /*
239 * This is also the point where new threads come when they are created.
240 * The new thread is setup to look like a thread that took an
241 * interrupt and went immediatly into trap.
242 */
243
244 thread_return:
245 lwz r11,SAVflags(r3) ; Get the flags of the current savearea
246 lwz r0,savesrr1+4(r3) ; Get the MSR we are going to
247 lwz r4,SAVprev+4(r3) ; Pick up the previous savearea
248 mfsprg r8,1 ; Get the current thread
249 rlwinm r11,r11,0,15,13 ; Clear the syscall flag
250 rlwinm. r0,r0,0,MSR_PR_BIT,MSR_PR_BIT ; Are we going to the user?
251 mr r1,r8
252 stw r11,SAVflags(r3) ; Save back the flags (with reset stack cleared)
253
254 lwz r5,THREAD_KERNEL_STACK(r1) ; Get the base pointer to the stack
255 stw r4,ACT_MACT_PCB(r8) ; Point to the previous savearea (or 0 if none)
256 addi r5,r5,KERNEL_STACK_SIZE-FM_SIZE ; Reset to empty
257
258 beq-- chkfac ; We are not leaving the kernel yet...
259
260 stw r5,ACT_MACT_KSP(r8) ; Save the empty stack pointer
261 b chkfac ; Go end it all...
262
263
264 ;
265 ; Here is where we go when we detect that the kernel stack is all messed up.
266 ; We just try to dump some info and get into the debugger.
267 ;
268
269 kernelStackBad:
270
271 lwz r3,PP_DEBSTACK_TOP_SS(r25) ; Pick up debug stack top
272 subi r3,r3,KERNEL_STACK_SIZE-FM_SIZE ; Adjust to start of stack
273 sub r3,r1,r3 ; Get displacement into debug stack
274 cmplwi cr2,r3,KERNEL_STACK_SIZE-FM_SIZE ; Check if we are on debug stack
275 blt+ cr2,kernelStackNotBad ; Yeah, that is ok too...
276
277 lis r0,hi16(Choke) ; Choke code
278 ori r0,r0,lo16(Choke) ; and the rest
279 li r3,failStack ; Bad stack code
280 sc ; System ABEND
281
282 kernelStackUnaligned:
283 lis r0,hi16(Choke) ; Choke code
284 ori r0,r0,lo16(Choke) ; and the rest
285 li r3,failUnalignedStk ; Unaligned stack code
286 sc ; System ABEND
287
288
289 /*
290 * shandler(type)
291 *
292 * ENTRY: VM switched ON
293 * Interrupts OFF
294 * R3 contains exception code
295 * R4 points to the saved context (virtual address)
296 * Everything is saved in savearea
297 */
298
299 /*
300 * If pcb.ksp == 0 then the kernel stack is already busy,
301 * this is an error - jump to the debugger entry
302 *
303 * otherwise depending upon the type of
304 * syscall, look it up in the kernel table
305 * or pass it to the server.
306 *
307 * on return, we do the reverse, the state is popped from the pcb
308 * and pcb.ksp is set to the top of stack.
309 */
310
311 /*
312 * NOTE:
313 * mach system calls are negative
314 * BSD system calls are low positive
315 * PPC-only system calls are in the range 0x6xxx
316 * PPC-only "fast" traps are in the range 0x7xxx
317 */
318
319 .align 5
320 .globl EXT(shandler)
321 LEXT(shandler) ; System call handler
322
323 lwz r7,savesrr1+4(r4) ; Get the SRR1 value
324 mfsprg r13,1 ; Get the current activation
325 lwz r25,ACT_PER_PROC(r13) ; Get the per_proc block
326 lwz r0,saver0+4(r4) ; Get the original syscall number
327 lwz r17,PP_ISTACKPTR(r25) ; Get interrupt stack pointer
328 rlwinm r15,r0,0,0,19 ; Clear the bottom of call number for fast check
329 mr. r17,r17 ; Are we on interrupt stack?
330 lwz r9,savevrsave(r4) ; Get the VRsave register
331 beq-- EXT(ihandler) ; On interrupt stack, not allowed...
332 rlwinm. r6,r7,0,MSR_VEC_BIT,MSR_VEC_BIT ; Was vector on?
333 mr r16,r13
334
335 beq++ svecoff ; Vector off, do not save vrsave...
336 stw r9,liveVRS(r25) ; Set the live value
337 ;
338 ; Check if SCs are being redirected for the BlueBox or to VMM
339 ;
340
341 svecoff: lwz r6,ACT_MACT_SPF(r13) ; Pick up activation special flags
342 mtcrf 0x40,r6 ; Check special flags
343 mtcrf 0x01,r6 ; Check special flags
344 crmove cr6_eq,runningVMbit ; Remember if we are in VMM
345 bne++ cr6,sVMchecked ; Not running VM
346 lwz r18,spcFlags(r25) ; Load per_proc special flags
347 rlwinm. r18,r18,0,FamVMmodebit,FamVMmodebit ; Is FamVMmodebit set?
348 beq sVMchecked ; Not in FAM
349 cmpwi r0,0x6004 ; Is it vmm_dispatch syscall:
350 bne sVMchecked
351 lwz r26,saver3+4(r4) ; Get the original syscall number
352 cmpwi cr6,r26,kvmmExitToHost ; vmm_exit_to_host request
353 sVMchecked:
354 bf++ bbNoMachSCbit,noassist ; Take branch if SCs are not redirected
355 lwz r26,ACT_MACT_BEDA(r13) ; Pick up the pointer to the blue box exception area
356 b EXT(atomic_switch_syscall) ; Go to the assist...
357
358 noassist: cmplwi r15,0x7000 ; Do we have a fast path trap?
359 lwz r14,ACT_MACT_PCB(r13) ; Now point to the PCB
360 beql fastpath ; We think it is a fastpath...
361
362 lwz r1,ACT_MACT_KSP(r13) ; Get the kernel stack pointer
363 #if DEBUG
364 mr. r1,r1 ; Are we already on the kernel stack?
365 li r3,T_SYSTEM_CALL ; Yup, pretend we had an interrupt...
366 beq- EXT(ihandler) ; Bad boy, bad boy... What cha gonna do when they come for you?
367 #endif /* DEBUG */
368
369 stw r4,ACT_MACT_PCB(r13) ; Point to our savearea
370 stw r4,ACT_MACT_UPCB(r13) ; Store user savearea
371 li r0,0 ; Clear this out
372 stw r14,SAVprev+4(r4) ; Queue the new save area in the front
373 stw r13,SAVact(r4) ; Point the savearea at its activation
374
375 #if VERIFYSAVE
376 bl versave ; (TEST/DEBUG)
377 #endif
378
379 lwz r15,saver1+4(r4) ; Grab interrupt time stack
380 mr r30,r4 ; Save pointer to the new context savearea
381 stw r0,ACT_MACT_KSP(r13) ; Mark stack as busy with 0 val
382 stw r15,FM_BACKPTR(r1) ; Link stack frame backwards
383
384 lwz r3,SAVtime(r30)
385 lwz r4,SAVtime+4(r30)
386 addi r5,r13,SYSTEM_TIMER
387 bl EXT(thread_timer_event)
388 addi r5,r25,SYSTEM_STATE
389 bl EXT(state_event)
390
391 lwz r7,ACT_TASK(r13)
392 lwz r8,TASK_VTIMERS(r7)
393 cmpwi r8,0
394 beq++ 0f
395
396 lwz r7,ACT_PER_PROC(r13)
397 li r4,AST_BSD
398 lwz r8,PP_PENDING_AST(r7)
399 or r8,r8,r4
400 stw r8,PP_PENDING_AST(r7)
401 addi r3,r13,ACT_AST
402 bl EXT(hw_atomic_or)
403 0:
404
405 #if DEBUG
406 /* If debugging, we need two frames, the first being a dummy
407 * which links back to the trapped routine. The second is
408 * that which the C routine below will need
409 */
410 lwz r8,savesrr0+4(r30) ; Get the point of interruption
411 stw r8,FM_LR_SAVE(r1) ; Save old instr ptr as LR value
412 stwu r1, -FM_SIZE(r1) ; and make new frame
413 #endif /* DEBUG */
414
415 mr r4,r30
416
417 lwz r15,SAVflags(r30) ; Get the savearea flags
418 lwz r0,saver0+4(r30) ; Get R0 back
419 mfmsr r11 ; Get the MSR
420 stwu r1,-(FM_SIZE+ARG_SIZE+MUNGE_ARGS_SIZE)(r1) ; Make a stack frame
421 ori r11,r11,lo16(MASK(MSR_EE)) ; Turn on interruption enabled bit
422 rlwinm r10,r0,0,0,19 ; Keep only the top part
423 oris r15,r15,SAVsyscall >> 16 ; Mark that it this is a syscall
424 cmplwi r10,0x6000 ; Is it the special ppc-only guy?
425 stw r15,SAVflags(r30) ; Save syscall marker
426 beq-- cr6,exitFromVM ; It is time to exit from alternate context...
427
428 beq-- ppcscall ; Call the ppc-only system call handler...
429
430 mr. r0,r0 ; What kind is it?
431 mtmsr r11 ; Enable interruptions
432
433 blt-- .L_kernel_syscall ; System call number if negative, this is a mach call...
434
435 lwz r8,ACT_TASK(r13) ; Get our task
436 cmpwi cr0,r0,0x7FFA ; Special blue box call?
437 beq-- .L_notify_interrupt_syscall ; Yeah, call it...
438
439 lwz r7,TASK_SYSCALLS_UNIX(r8) ; Get the current count
440 mr r3,r30 ; Get PCB/savearea
441 mr r4,r13 ; current activation
442 addi r7,r7,1 ; Bump it
443 stw r7,TASK_SYSCALLS_UNIX(r8) ; Save it
444
445 #if FPFLOOD
446 stfd f31,emfp31(r25) ; (TEST/DEBUG)
447 #endif
448
449 bl EXT(unix_syscall) ; Check out unix...
450
451 .L_call_server_syscall_exception:
452 li r3,EXC_SYSCALL ; doexception(EXC_SYSCALL, num, 1)
453
454 .L_call_server_exception:
455 mr r4,r0 ; Set syscall selector
456 li r5,1
457 b EXT(doexception) ; Go away, never to return...
458
459 .L_notify_interrupt_syscall:
460 lwz r3,saver3+4(r30) ; Get the new PC address to pass in
461 bl EXT(syscall_notify_interrupt)
462 /*
463 * Ok, return from C function, R3 = return value
464 *
465 * saved state is still in R30 and the active thread is in R16 .
466 */
467 mr r31,r16 ; Move the current thread pointer
468 stw r3,saver3+4(r30) ; Stash the return code
469 b .L_thread_syscall_ret_check_ast
470
471 ;
472 ; Handle PPC-only system call interface
473 ; These are called with interruptions disabled
474 ; and the savearea/pcb as the first parameter.
475 ; It is up to the callee to enable interruptions if
476 ; they should be. We are in a state here where
477 ; both interrupts and preemption are ok, but because we could
478 ; be calling diagnostic code we will not enable.
479 ;
480 ; Also, the callee is responsible for finding any parameters
481 ; in the savearea/pcb. It also must set saver3 with any return
482 ; code before returning.
483 ;
484 ; There are 3 possible return codes:
485 ; 0 the call is disabled or something, we treat this like it was bogus
486 ; + the call finished ok, check for AST
487 ; - the call finished ok, do not check for AST
488 ;
489 ; Note: the last option is intended for special diagnostics calls that
490 ; want the thread to return and execute before checking for preemption.
491 ;
492 ; NOTE: Both R16 (thread) and R30 (savearea) need to be preserved over this call!!!!
493 ;
494
495 .align 5
496
497 ppcscall: rlwinm r11,r0,2,18,29 ; Make an index into the table
498 lis r10,hi16(EXT(PPCcalls)) ; Get PPC-only system call table
499 cmplwi r11,PPCcallmax ; See if we are too big
500 ori r10,r10,lo16(EXT(PPCcalls)) ; Merge in low half
501 bgt- .L_call_server_syscall_exception ; Bogus call...
502 lwzx r11,r10,r11 ; Get function address
503
504 ;
505 ; Note: make sure we do not change the savearea in R30 to
506 ; a different register without checking. Some of the PPCcalls
507 ; depend upon it being there.
508 ;
509
510 mr r3,r30 ; Pass the savearea
511 mr r4,r13 ; Pass the activation
512 mr. r11,r11 ; See if there is a function here
513 mtctr r11 ; Set the function address
514 beq- .L_call_server_syscall_exception ; Disabled call...
515 #if INSTRUMENT
516 mfspr r4,pmc1 ; Get stamp
517 stw r4,0x6100+(9*16)+0x0(0) ; Save it
518 mfspr r4,pmc2 ; Get stamp
519 stw r4,0x6100+(9*16)+0x4(0) ; Save it
520 mfspr r4,pmc3 ; Get stamp
521 stw r4,0x6100+(9*16)+0x8(0) ; Save it
522 mfspr r4,pmc4 ; Get stamp
523 stw r4,0x6100+(9*16)+0xC(0) ; Save it
524 #endif
525 bctrl ; Call it
526
527 .globl EXT(ppcscret)
528
529 LEXT(ppcscret)
530 mr. r3,r3 ; See what we should do
531 mr r31,r16 ; Restore the current thread pointer
532 bgt+ .L_thread_syscall_ret_check_ast ; Take normal AST checking return....
533 mfsprg r10,1 ; Get the current activation
534 lwz r10,ACT_PER_PROC(r10) ; Get the per_proc block
535 blt+ .L_thread_syscall_return ; Return, but no ASTs....
536 lwz r0,saver0+4(r30) ; Restore the system call number
537 b .L_call_server_syscall_exception ; Go to common exit...
538
539
540
541 /*
542 * we get here for mach system calls
543 * when kdebug tracing is enabled
544 */
545
546 ksystrace:
547 mr r4,r30 ; Pass in saved state
548 bl EXT(syscall_trace)
549
550 cmplw r31,r29 ; Is this syscall in the table?
551 add r31,r27,r28 ; Point right to the syscall table entry
552
553 bge- .L_call_server_syscall_exception ; The syscall number is invalid
554
555 lwz r0,savesrr1(r30) ; Get the saved srr1
556 rlwinm. r0,r0,0,MSR_SF_BIT,MSR_SF_BIT ; Test for 64 bit caller
557 lwz r0,MACH_TRAP_ARG_MUNGE32(r31) ; Pick up the 32 bit munge function address
558 beq-- .L_ksystrace_munge
559 lwz r0,MACH_TRAP_ARG_MUNGE64(r31) ; Pick up the 64 bit munge function address
560
561 .L_ksystrace_munge:
562 cmplwi r0,0 ; do we have a munger to call?
563 mtctr r0 ; Set the function call address
564 addi r3,r30,saver3 ; Pointer to args from save area
565 addi r4,r1,FM_ARG0+ARG_SIZE ; Pointer for munged args
566 beq-- .L_ksystrace_trapcall ; just make the trap call
567 bctrl ; Call the munge function
568
569 .L_ksystrace_trapcall:
570 lwz r0,MACH_TRAP_FUNCTION(r31) ; Pick up the function address
571 mtctr r0 ; Set the function call address
572 addi r3,r1,FM_ARG0+ARG_SIZE ; Pointer to munged args
573 bctrl
574
575 mr r4,r30 ; Pass in the savearea
576 bl EXT(syscall_trace_end) ; Trace the exit of the system call
577 b .L_mach_return
578
579
580
581 /* Once here, we know that the syscall was -ve
582 * we should still have r1=ksp,
583 * r16 = pointer to current thread,
584 * r13 = pointer to top activation,
585 * r0 = syscall number
586 * r30 = pointer to saved state (in pcb)
587 */
588
589 .align 5
590
591 .L_kernel_syscall:
592 ;
593 ; Call a function that can print out our syscall info
594 ; Note that we don t care about any volatiles yet
595 ;
596 lwz r10,ACT_TASK(r13) ; Get our task
597 lwz r0,saver0+4(r30)
598 lis r8,hi16(EXT(kdebug_enable)) ; Get top of kdebug_enable
599 lis r28,hi16(EXT(mach_trap_table)) ; Get address of table
600 ori r8,r8,lo16(EXT(kdebug_enable)) ; Get bottom of kdebug_enable
601 lwz r8,0(r8) ; Get kdebug_enable
602
603 lwz r7,TASK_SYSCALLS_MACH(r10) ; Get the current count
604 neg r31,r0 ; Make this positive
605 mr r3,r31 ; save it
606 slwi r27,r3,4 ; multiply by 16
607 slwi r3,r3,2 ; and the original by 4
608 ori r28,r28,lo16(EXT(mach_trap_table)) ; Get address of table
609 add r27,r27,r3 ; for a total of 20x (5 words/entry)
610 addi r7,r7,1 ; Bump TASK_SYSCALLS_MACH count
611 cmplwi r8,0 ; Is kdebug_enable non-zero
612 stw r7,TASK_SYSCALLS_MACH(r10) ; Save count
613 bne-- ksystrace ; yes, tracing enabled
614
615 cmplwi r31,MACH_TRAP_TABLE_COUNT ; Is this syscall in the table?
616 add r31,r27,r28 ; Point right to the syscall table entry
617
618 bge-- .L_call_server_syscall_exception ; The syscall number is invalid
619
620 lwz r0,savesrr1(r30) ; Get the saved srr1
621 rlwinm. r0,r0,0,MSR_SF_BIT,MSR_SF_BIT ; Test for 64 bit caller
622 lwz r0,MACH_TRAP_ARG_MUNGE32(r31) ; Pick up the 32 bit munge function address
623 beq-- .L_kernel_syscall_munge
624 lwz r0,MACH_TRAP_ARG_MUNGE64(r31) ; Pick up the 64 bit munge function address
625
626 .L_kernel_syscall_munge:
627 cmplwi r0,0 ; test for null munger
628 mtctr r0 ; Set the function call address
629 addi r3,r30,saver3 ; Pointer to args from save area
630 addi r4,r1,FM_ARG0+ARG_SIZE ; Pointer for munged args
631 beq-- .L_kernel_syscall_trapcall ; null munger - skip to trap call
632 bctrl ; Call the munge function
633
634 .L_kernel_syscall_trapcall:
635 lwz r0,MACH_TRAP_FUNCTION(r31) ; Pick up the function address
636 mtctr r0 ; Set the function call address
637 addi r3,r1,FM_ARG0+ARG_SIZE ; Pointer to munged args
638
639 #if FPFLOOD
640 stfd f31,emfp31(r25) ; (TEST/DEBUG)
641 #endif
642
643 bctrl
644
645
646 /*
647 * Ok, return from C function, R3 = return value
648 *
649 * get the active thread's PCB pointer and thus pointer to user state
650 * saved state is still in R30 and the active thread is in R16
651 */
652
653 .L_mach_return:
654 srawi r0,r3,31 ; properly extend the return code
655 cmpi cr0,r3,KERN_INVALID_ARGUMENT ; deal with invalid system calls
656 mr r31,r16 ; Move the current thread pointer
657 stw r0, saver3(r30) ; stash the high part of the return code
658 stw r3,saver3+4(r30) ; Stash the low part of the return code
659 beq-- cr0,.L_mach_invalid_ret ; otherwise fall through into the normal return path
660 .L_mach_invalid_arg:
661
662
663 /* 'standard' syscall returns here - INTERRUPTS ARE STILL ON
664 * the syscall may perform a thread_set_syscall_return
665 * followed by a thread_exception_return, ending up
666 * at thread_syscall_return below, with SS_R3 having
667 * been set up already
668 *
669 * When we are here, r31 should point to the current thread,
670 * r30 should point to the current pcb
671 * r3 contains value that we're going to return to the user
672 * which has already been stored back into the save area
673 */
674
675 .L_thread_syscall_ret_check_ast:
676 lis r10,hi16(MASK(MSR_VEC)) ; Get the vector enable
677 mfmsr r12 ; Get the current MSR
678 ori r10,r10,lo16(MASK(MSR_FP)|MASK(MSR_EE)) ; Add in FP and EE
679 andc r12,r12,r10 ; Turn off VEC, FP, and EE
680 mtmsr r12 ; Turn interruptions off
681
682 mfsprg r10,1 ; Get the current activation
683 lwz r10,ACT_PER_PROC(r10) ; Get the per_proc block
684
685 /* Check to see if there's an outstanding AST */
686
687 lwz r4,PP_PENDING_AST(r10)
688 cmpi cr0,r4, 0 ; Any pending asts?
689 beq++ cr0,.L_syscall_no_ast ; Nope...
690
691 /* Yes there is, call ast_taken
692 * pretending that the user thread took an AST exception here,
693 * ast_taken will save all state and bring us back here
694 */
695
696 #if DEBUG
697 /* debug assert - make sure that we're not returning to kernel */
698 lwz r3,savesrr1+4(r30)
699 andi. r3,r3,MASK(MSR_PR)
700 bne++ scrnotkern ; returning to user level, check
701
702 lis r0,hi16(Choke) ; Choke code
703 ori r0,r0,lo16(Choke) ; and the rest
704 li r3,failContext ; Bad state code
705 sc ; System ABEND
706
707 scrnotkern:
708 #endif /* DEBUG */
709
710 lis r3,hi16(AST_ALL) ; Set ast flags
711 li r4,1 ; Set interrupt allowed
712 ori r3,r3,lo16(AST_ALL)
713 bl EXT(ast_taken) ; Process the pending ast
714 b .L_thread_syscall_ret_check_ast ; Go see if there was another...
715
716 .L_mach_invalid_ret:
717 /*
718 * need to figure out why we got an KERN_INVALID_ARG
719 * if it was due to a non-existent system call
720 * then we want to throw an exception... otherwise
721 * we want to pass the error code back to the caller
722 */
723 lwz r0,saver0+4(r30) ; reload the original syscall number
724 neg r28,r0 ; Make this positive
725 mr r4,r28 ; save a copy
726 slwi r27,r4,4 ; multiply by 16
727 slwi r4,r4,2 ; and another 4
728 lis r28,hi16(EXT(mach_trap_table)) ; Get address of table
729 add r27,r27,r4 ; for a total of 20x (5 words/entry)
730 ori r28,r28,lo16(EXT(mach_trap_table)) ; Get address of table
731 add r28,r27,r28 ; Point right to the syscall table entry
732 lwz r27,MACH_TRAP_FUNCTION(r28) ; Pick up the function address
733 lis r28,hi16(EXT(kern_invalid)) ; Get high half of invalid syscall function
734 ori r28,r28,lo16(EXT(kern_invalid)) ; Get low half of invalid syscall function
735 cmpw cr0,r27,r28 ; Check if this is an invalid system call
736 beq-- .L_call_server_syscall_exception ; We have a bad system call
737 b .L_mach_invalid_arg ; a system call returned KERN_INVALID_ARG
738
739
740 /* thread_exception_return returns to here, almost all
741 * registers intact. It expects a full context restore
742 * of what it hasn't restored itself (ie. what we use).
743 *
744 * In particular for us,
745 * we still have r31 points to the current thread,
746 * r30 points to the current pcb
747 */
748
749 .align 5
750
751 .L_syscall_no_ast:
752 .L_thread_syscall_return:
753
754 mr r3,r30 ; Get savearea to the correct register for common exit
755
756 lwz r11,SAVflags(r30) ; Get the flags
757 lwz r5,THREAD_KERNEL_STACK(r31) ; Get the base pointer to the stack
758 lwz r4,SAVprev+4(r30) ; Get the previous save area
759 rlwinm r11,r11,0,15,13 ; Clear the syscall flag
760 mfsprg r8,1 ; Now find the current activation
761 addi r5,r5,KERNEL_STACK_SIZE-FM_SIZE ; Reset to empty
762 stw r11,SAVflags(r30) ; Stick back the flags
763 stw r5,ACT_MACT_KSP(r8) ; Save the empty stack pointer
764 stw r4,ACT_MACT_PCB(r8) ; Save previous save area
765 b chkfac ; Go end it all...
766
767 /*
768 * thread_exception_return()
769 *
770 * Return to user mode directly from within a system call.
771 */
772
773 .align 5
774 .globl EXT(thread_bootstrap_return)
775 LEXT(thread_bootstrap_return) ; NOTE: THIS IS GOING AWAY IN A FEW DAYS....
776
777 .globl EXT(thread_exception_return)
778 LEXT(thread_exception_return) ; Directly return to user mode
779
780 .L_thread_exc_ret_check_ast:
781 lis r10,hi16(MASK(MSR_VEC)) ; Get the vector enable
782 mfmsr r3 ; Get the MSR
783 ori r10,r10,lo16(MASK(MSR_FP)|MASK(MSR_EE)) ; Add in FP and EE
784 andc r3,r3,r10 ; Turn off VEC, FP, and EE
785 mtmsr r3 ; Disable interrupts
786
787 /* Check to see if there's an outstanding AST */
788 /* We don't bother establishing a call frame even though CHECK_AST
789 can invoke ast_taken(), because it can just borrow our caller's
790 frame, given that we're not going to return.
791 */
792
793 mfsprg r10,1 ; Get the current activation
794 lwz r10,ACT_PER_PROC(r10) ; Get the per_proc block
795 lwz r4,PP_PENDING_AST(r10)
796 cmpi cr0,r4, 0
797 beq+ cr0,.L_exc_ret_no_ast
798
799 /* Yes there is, call ast_taken
800 * pretending that the user thread took an AST exception here,
801 * ast_taken will save all state and bring us back here
802 */
803
804 lis r3,hi16(AST_ALL)
805 li r4,1
806 ori r3,r3,lo16(AST_ALL)
807
808 bl EXT(ast_taken)
809 b .L_thread_exc_ret_check_ast ; check for a second AST (rare)
810
811 /* arriving here, interrupts should be disabled */
812 /* Get the active thread's PCB pointer to restore regs
813 */
814 .L_exc_ret_no_ast:
815
816 mfsprg r30,1 ; Get the currrent activation
817 mr r31,r30
818
819 lwz r30,ACT_MACT_PCB(r30)
820 mr. r30,r30 ; Is there any context yet?
821 beq- makeDummyCtx ; No, hack one up...
822 #if DEBUG
823 /*
824 * debug assert - make sure that we're not returning to kernel
825 * get the active thread's PCB pointer and thus pointer to user state
826 */
827
828 lwz r3,savesrr1+4(r30)
829 andi. r3,r3,MASK(MSR_PR)
830 bne+ ret_user2 ; We are ok...
831
832 lis r0,hi16(Choke) ; Choke code
833 ori r0,r0,lo16(Choke) ; and the rest
834 li r3,failContext ; Bad state code
835 sc ; System ABEND
836
837 ret_user2:
838 #endif /* DEBUG */
839
840 /* If the system call flag isn't set, then we came from a trap,
841 * so warp into the return_from_trap (thread_return) routine,
842 * which takes PCB pointer in R3, not in r30!
843 */
844 lwz r0,SAVflags(r30) ; Grab the savearea flags
845 andis. r0,r0,SAVsyscall>>16 ; Are we returning from a syscall?
846 mr r3,r30 ; Copy pcb pointer into r3 in case we need it
847 beq-- cr0,thread_return ; Nope, must be a thread return...
848 b .L_thread_syscall_return ; Join up with the system call return...
849
850 ;
851 ; This is where we handle someone trying who did a thread_create followed
852 ; by a thread_resume with no intervening thread_set_state. Just make an
853 ; empty context, initialize it to trash and let em execute at 0...
854 ;
855
856 .align 5
857
858 makeDummyCtx:
859 bl EXT(save_get) ; Get a save_area
860 li r4,SAVgeneral ; Get the general context type
861 li r0,0 ; Get a 0
862 stb r4,SAVflags+2(r3) ; Set type
863 addi r2,r3,savefpscr+4 ; Point past what we are clearing
864 mr r4,r3 ; Save the start
865
866 cleardummy: stw r0,0(r4) ; Clear stuff
867 addi r4,r4,4 ; Next word
868 cmplw r4,r2 ; Still some more?
869 blt+ cleardummy ; Yeah...
870
871 lis r2,hi16(MSR_EXPORT_MASK_SET) ; Set the high part of the user MSR
872 ori r2,r2,lo16(MSR_EXPORT_MASK_SET) ; And the low part
873 stw r2,savesrr1+4(r3) ; Set the default user MSR
874
875 b thread_return ; Go let em try to execute, hah!
876
877 /*
878 * ihandler(type)
879 *
880 * ENTRY: VM switched ON
881 * Interrupts OFF
882 * R3 contains exception code
883 * R4 points to the saved context (virtual address)
884 * Everything is saved in savearea
885 *
886 */
887
888 .align 5
889 .globl EXT(ihandler)
890 LEXT(ihandler) ; Interrupt handler */
891
892 /*
893 * get the value of istackptr, if it's zero then we're already on the
894 * interrupt stack.
895 */
896
897 lwz r10,savesrr1+4(r4) ; Get SRR1
898 lwz r7,savevrsave(r4) ; Get the VRSAVE register
899 mfsprg r13,1 ; Get the current activation
900 lwz r25,ACT_PER_PROC(r13) ; Get the per_proc block
901 li r14,0 ; Zero this for now
902 rlwinm. r16,r10,0,MSR_VEC_BIT,MSR_VEC_BIT ; Was vector on?
903 lwz r1,PP_ISTACKPTR(r25) ; Get the interrupt stack
904 li r16,0 ; Zero this for now
905
906 beq+ ivecoff ; Vector off, do not save vrsave...
907 stw r7,liveVRS(r25) ; Set the live value
908
909 ivecoff: li r0,0 ; Get a constant 0
910 rlwinm r5,r10,0,MSR_PR_BIT,MSR_PR_BIT ; Are we trapping from supervisor state?
911 mr. r1,r1 ; Is it active?
912 cmplwi cr2,r5,0 ; cr2_eq == 1 if yes
913 mr r16,r13
914 lwz r14,ACT_MACT_PCB(r13) ; Now point to the PCB
915 lwz r9,saver1+4(r4) ; Pick up the rupt time stack
916 stw r14,SAVprev+4(r4) ; Queue the new save area in the front
917 stw r13,SAVact(r4) ; Point the savearea at its activation
918 stw r4,ACT_MACT_PCB(r13) ; Point to our savearea
919 beq cr2,ifromk
920 stw r4,ACT_MACT_UPCB(r13) ; Store user savearea
921
922 ifromk: bne .L_istackfree ; Nope...
923
924 /* We're already on the interrupt stack, get back the old
925 * stack pointer and make room for a frame
926 */
927
928 lwz r10,PP_INTSTACK_TOP_SS(r25) ; Get the top of the interrupt stack
929 addi r5,r9,INTSTACK_SIZE-FM_SIZE ; Shift stack for bounds check
930 subi r1,r9,FM_REDZONE ; Back up beyond the red zone
931 sub r5,r5,r10 ; Get displacement into stack
932 cmplwi r5,INTSTACK_SIZE-FM_SIZE ; Is the stack actually invalid?
933 blt+ ihsetback ; The stack is ok...
934
935 lwz r5,PP_DEBSTACK_TOP_SS(r25) ; Pick up debug stack top
936 subi r5,r5,KERNEL_STACK_SIZE-FM_SIZE ; Adjust to start of stack
937 sub r5,r1,r5 ; Get displacement into debug stack
938 cmplwi cr2,r5,KERNEL_STACK_SIZE-FM_SIZE ; Check if we are on debug stack
939 blt+ cr2,ihsetback ; Yeah, that is ok too...
940
941 lis r0,hi16(Choke) ; Choke code
942 ori r0,r0,lo16(Choke) ; and the rest
943 li r3,failStack ; Bad stack code
944 sc ; System ABEND
945
946 intUnalignedStk:
947 lis r0,hi16(Choke) ; Choke code
948 ori r0,r0,lo16(Choke) ; and the rest
949 li r3,failUnalignedStk ; Unaligned stack code
950 sc ; System ABEND
951
952 .align 5
953
954 .L_istackfree:
955 rlwinm. r0,r1,0,28,31 ; Check if stack is aligned (and get 0)
956 lwz r10,SAVflags(r4) ; Get savearea flags
957 bne-- intUnalignedStk ; Stack is unaligned...
958 stw r0,PP_ISTACKPTR(r25) ; Mark the stack in use
959 oris r10,r10,hi16(SAVrststk) ; Indicate we reset stack when we return from this one
960 stw r10,SAVflags(r4) ; Stick it back
961
962 /*
963 * To summarize, when we reach here, the state has been saved and
964 * the stack is marked as busy. We now generate a small
965 * stack frame with backpointers to follow the calling
966 * conventions. We set up the backpointers to the trapped
967 * routine allowing us to backtrace.
968 */
969
970 ihsetback: subi r1,r1,FM_SIZE ; Make a new frame
971 stw r9,FM_BACKPTR(r1) ; Point back to previous stackptr
972
973 #if VERIFYSAVE
974 beq- cr1,ihbootnover ; (TEST/DEBUG)
975 bl versave ; (TEST/DEBUG)
976 ihbootnover: ; (TEST/DEBUG)
977 #endif
978
979 #if DEBUG
980 /* If debugging, we need two frames, the first being a dummy
981 * which links back to the trapped routine. The second is
982 * that which the C routine below will need
983 */
984 lwz r5,savesrr0+4(r4) ; Get interrupt address
985 stw r5,FM_LR_SAVE(r1) ; save old instr ptr as LR value
986 stwu r1,-FM_SIZE(r1) ; Make another new frame for C routine
987 #endif /* DEBUG */
988
989 mr r31,r3
990 mr r30,r4
991
992 lwz r3,SAVtime(r4)
993 lwz r4,SAVtime+4(r4)
994 addi r5,r25,PP_PROCESSOR
995 lwz r5,KERNEL_TIMER(r5)
996 bl EXT(thread_timer_event)
997 addi r6,r25,PP_PROCESSOR
998 lwz r5,CURRENT_STATE(r6)
999 addi r7,r6,USER_STATE
1000 cmplw r5,r7
1001 bne 0f
1002 addi r5,r6,SYSTEM_STATE
1003 bl EXT(state_event)
1004 0:
1005
1006 lwz r7,ACT_TASK(r13)
1007 lwz r8,TASK_VTIMERS(r7)
1008 cmpwi r8,0
1009 beq++ 0f
1010
1011 lwz r7,ACT_PER_PROC(r13)
1012 li r4,AST_BSD
1013 lwz r8,PP_PENDING_AST(r7)
1014 or r8,r8,r4
1015 stw r8,PP_PENDING_AST(r7)
1016 addi r3,r13,ACT_AST
1017 bl EXT(hw_atomic_or)
1018 0:
1019
1020 mr r3,r31
1021 mr r4,r30
1022 lwz r5,savedsisr(r30) ; Get the DSISR
1023 lwz r6,savedar+4(r30) ; Get the DAR
1024
1025 #if FPFLOOD
1026 stfd f31,emfp31(r25) ; (TEST/DEBUG)
1027 #endif
1028
1029 bl EXT(interrupt)
1030
1031 /* interrupt() returns a pointer to the saved state in r3 */
1032
1033 lis r10,hi16(MASK(MSR_VEC)) ; Get the vector enable
1034 mfmsr r0 ; Get our MSR
1035 ori r10,r10,lo16(MASK(MSR_FP)|MASK(MSR_EE)) ; Add in FP and EE
1036 andc r0,r0,r10 ; Turn off VEC, FP, and EE
1037 mtmsr r0 ; Make sure interrupts are disabled
1038 mfsprg r8,1 ; Get the current activation
1039 lwz r10,ACT_PER_PROC(r8) ; Get the per_proc block
1040
1041 lwz r7,SAVflags(r3) ; Pick up the flags
1042 lwz r9,SAVprev+4(r3) ; Get previous save area
1043 cmplwi cr1,r8,0 ; Are we still initializing?
1044 lwz r12,savesrr1+4(r3) ; Get the MSR we will load on return
1045 andis. r11,r7,hi16(SAVrststk) ; Is this the first on the stack?
1046 stw r9,ACT_MACT_PCB(r8) ; Point to previous context savearea
1047 mr r4,r3 ; Move the savearea pointer
1048 beq .L_no_int_ast2 ; Get going if not the top-o-stack...
1049
1050
1051 /* We're the last frame on the stack. Restore istackptr to empty state.
1052 *
1053 * Check for ASTs if one of the below is true:
1054 * returning to user mode
1055 * returning to a kloaded server
1056 */
1057 lwz r9,PP_INTSTACK_TOP_SS(r10) ; Get the empty stack value
1058 andc r7,r7,r11 ; Remove the stack reset bit in case we pass this one
1059 stw r9,PP_ISTACKPTR(r10) ; Save that saved state ptr
1060 lwz r3,ACT_PREEMPT_CNT(r8) ; Get preemption level
1061 stw r7,SAVflags(r4) ; Save the flags
1062 cmplwi r3, 0 ; Check for preemption
1063 bne .L_no_int_ast ; Do not preempt if level is not zero
1064 andi. r6,r12,MASK(MSR_PR) ; privilege mode
1065 lwz r11,PP_PENDING_AST(r10) ; Get the pending AST mask
1066 beq- .L_kernel_int_ast ; In kernel space, AST_URGENT check
1067 li r3,T_AST ; Assume the worst
1068 mr. r11,r11 ; Are there any pending?
1069 beq .L_no_int_ast ; Nope...
1070 b .L_call_thandler
1071
1072 .L_kernel_int_ast:
1073 andi. r11,r11,AST_URGENT ; Do we have AST_URGENT?
1074 li r3,T_PREEMPT ; Assume the worst
1075 beq .L_no_int_ast ; Nope...
1076
1077 /*
1078 * There is a pending AST. Massage things to make it look like
1079 * we took a trap and jump into the trap handler. To do this
1080 * we essentially pretend to return from the interrupt but
1081 * at the last minute jump into the trap handler with an AST
1082 * trap instead of performing an rfi.
1083 */
1084
1085 .L_call_thandler:
1086 stw r3,saveexception(r4) ; Set the exception code to T_AST/T_PREEMPT
1087 b EXT(thandler) ; We need to preempt so treat like a trap...
1088
1089 .L_no_int_ast:
1090 mr r3,r4 ; Get into the right register for common code
1091
1092 .L_no_int_ast2:
1093 rlwinm r7,r7,0,15,13 ; Clear the syscall flag
1094 li r4,0 ; Assume for a moment that we are in init
1095 stw r7,SAVflags(r3) ; Set the flags with cleared syscall flag
1096 beq-- cr1,chkfac ; Jump away if we are in init...
1097
1098 lwz r4,ACT_MACT_PCB(r8) ; Get the new level marker
1099
1100
1101 ;
1102 ; This section is common to all exception exits. It throws away vector
1103 ; and floating point saveareas as the exception level of a thread is
1104 ; exited.
1105 ;
1106 ; It also enables the facility if its context is live
1107 ; Requires:
1108 ; R3 = Savearea to be released (virtual)
1109 ; R4 = New top of savearea stack (could be 0)
1110 ; R8 = pointer to activation
1111 ; R10 = per_proc block
1112 ;
1113 ; Note that barring unforseen crashes, there is no escape from this point
1114 ; on. We WILL call exception_exit and launch this context. No worries
1115 ; about preemption or interruptions here.
1116 ;
1117 ; Note that we will set up R26 with whatever context we will be launching,
1118 ; so it will indicate the current, or the deferred it it is set and we
1119 ; are going to user state. CR2_eq will be set to indicate deferred.
1120 ;
1121
1122 chkfac: lwz r29,savesrr1+4(r3) ; Get the current MSR
1123 mr. r28,r8 ; Are we still in boot?
1124 mr r31,r10 ; Move per_proc address
1125 mr r30,r4 ; Preserve new level
1126 mr r27,r3 ; Save the old level
1127 beq-- chkenax ; Yeah, skip it all...
1128
1129 rlwinm. r0,r29,0,MSR_PR_BIT,MSR_PR_BIT ; Are we going into user state?
1130
1131 lwz r20,curctx(r28) ; Get our current context
1132 lwz r26,deferctx(r28) ; Get any deferred context switch
1133 li r0,1 ; Get set to hold off quickfret
1134 rlwinm r29,r29,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Turn off floating point for now
1135 lwz r21,FPUlevel(r20) ; Get the facility level
1136 cmplwi cr2,r26,0 ; Are we going into a deferred context later?
1137 rlwinm r29,r29,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Turn off vector for now
1138 crnor cr2_eq,cr0_eq,cr2_eq ; Set cr2_eq if going to user state and there is deferred
1139 lhz r19,PP_CPU_NUMBER(r31) ; Get our CPU number
1140 cmplw r27,r21 ; Are we returning from the active level?
1141 stw r0,holdQFret(r31) ; Make sure we hold off releasing quickfret
1142 bne++ fpuchkena ; Nope...
1143
1144 ;
1145 ; First clean up any live context we are returning from
1146 ;
1147
1148 lwz r22,FPUcpu(r20) ; Get CPU this context was last dispatched on
1149
1150 stw r19,FPUcpu(r20) ; Claim context for us
1151
1152 eieio ; Make sure this gets out before owner clear
1153
1154 #if ppeSize != 16
1155 #error per_proc_entry is not 16bytes in size
1156 #endif
1157
1158 lis r23,hi16(EXT(PerProcTable)) ; Set base PerProcTable
1159 slwi r22,r22,4 ; Find offset to the owner per_proc_entry
1160 ori r23,r23,lo16(EXT(PerProcTable)) ; Set base PerProcTable
1161 li r24,FPUowner ; Displacement to float owner
1162 add r22,r23,r22 ; Point to the owner per_proc_entry
1163 lwz r22,ppe_vaddr(r22) ; Point to the owner per_proc
1164
1165 fpuinvothr: lwarx r23,r24,r22 ; Get the owner
1166
1167 sub r0,r23,r20 ; Subtract one from the other
1168 sub r21,r20,r23 ; Subtract the other from the one
1169 or r21,r21,r0 ; Combine them
1170 srawi r21,r21,31 ; Get a 0 if equal or -1 of not
1171 and r23,r23,r21 ; Make 0 if same, unchanged if not
1172 stwcx. r23,r24,r22 ; Try to invalidate it
1173 bne-- fpuinvothr ; Try again if there was a collision...
1174
1175 isync
1176
1177 ;
1178 ; Now if there is a savearea associated with the popped context, release it.
1179 ; Either way, pop the level to the top stacked context.
1180 ;
1181
1182 lwz r22,FPUsave(r20) ; Get pointer to the first savearea
1183 li r21,0 ; Assume we popped all the way out
1184 mr. r22,r22 ; Is there anything there?
1185 beq++ fpusetlvl ; No, see if we need to enable...
1186
1187 lwz r21,SAVlevel(r22) ; Get the level of that savearea
1188 cmplw r21,r27 ; Is this the saved copy of the live stuff?
1189 bne fpusetlvl ; No, leave as is...
1190
1191 lwz r24,SAVprev+4(r22) ; Pick up the previous area
1192 li r21,0 ; Assume we popped all the way out
1193 mr. r24,r24 ; Any more context stacked?
1194 beq-- fpuonlyone ; Nope...
1195 lwz r21,SAVlevel(r24) ; Get the level associated with save
1196
1197 fpuonlyone: stw r24,FPUsave(r20) ; Dequeue this savearea
1198
1199 rlwinm r3,r22,0,0,19 ; Find main savearea header
1200
1201 lwz r8,quickfret(r31) ; Get the first in quickfret list (top)
1202 lwz r9,quickfret+4(r31) ; Get the first in quickfret list (bottom)
1203 lwz r2,SACvrswap(r3) ; Get the virtual to real conversion (top)
1204 lwz r3,SACvrswap+4(r3) ; Get the virtual to real conversion (bottom)
1205 stw r8,SAVprev(r22) ; Link the old in (top)
1206 stw r9,SAVprev+4(r22) ; Link the old in (bottom)
1207 xor r3,r22,r3 ; Convert to physical
1208 stw r2,quickfret(r31) ; Set the first in quickfret list (top)
1209 stw r3,quickfret+4(r31) ; Set the first in quickfret list (bottom)
1210
1211 #if FPVECDBG
1212 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
1213 li r2,0x3301 ; (TEST/DEBUG)
1214 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
1215 sc ; (TEST/DEBUG)
1216 #endif
1217
1218 fpusetlvl: stw r21,FPUlevel(r20) ; Save the level
1219
1220 ;
1221 ; Here we check if we are at the right level
1222 ; We need to check the level we are entering, not the one we are exiting.
1223 ; Therefore, we will use the defer level if it is non-zero and we are
1224 ; going into user state.
1225 ;
1226
1227 fpuchkena: bt-- cr2_eq,fpuhasdfrd ; Skip if deferred, R26 already set up...
1228 mr r26,r20 ; Use the non-deferred value
1229
1230 fpuhasdfrd:
1231 #if 0
1232 rlwinm. r0,r29,0,MSR_PR_BIT,MSR_PR_BIT ; (TEST/DEBUG) Going into user state?
1233 beq fpunusrstt ; (TEST/DEBUG) Nope...
1234 lwz r23,FPUlevel(r26) ; (TEST/DEBUG) Get the level ID
1235 lwz r24,FPUsave(r26) ; (TEST/DEBUG) Get the first savearea
1236 mr. r23,r23 ; (TEST/DEBUG) Should be level 0
1237 beq++ fpulvl0 ; (TEST/DEBUG) Yes...
1238
1239 lis r0,hi16(Choke) ; (TEST/DEBUG) Choke code
1240 ori r0,r0,lo16(Choke) ; (TEST/DEBUG) and the rest
1241 sc ; (TEST/DEBUG) System ABEND
1242
1243 fpulvl0: mr. r24,r24 ; (TEST/DEBUG) Any context?
1244 beq fpunusrstt ; (TEST/DEBUG) No...
1245 lwz r23,SAVlevel(r24) ; (TEST/DEBUG) Get level of context
1246 lwz r21,SAVprev+4(r24) ; (TEST/DEBUG) Get previous pointer
1247 mr. r23,r23 ; (TEST/DEBUG) Is this our user context?
1248 beq++ fpulvl0b ; (TEST/DEBUG) Yes...
1249
1250 lis r0,hi16(Choke) ; (TEST/DEBUG) Choke code
1251 ori r0,r0,lo16(Choke) ; (TEST/DEBUG) and the rest
1252 sc ; (TEST/DEBUG) System ABEND
1253
1254 fpulvl0b: mr. r21,r21 ; (TEST/DEBUG) Is there a forward chain?
1255 beq++ fpunusrstt ; (TEST/DEBUG) Nope...
1256
1257 lis r0,hi16(Choke) ; (TEST/DEBUG) Choke code
1258 ori r0,r0,lo16(Choke) ; (TEST/DEBUG) and the rest
1259 sc ; (TEST/DEBUG) System ABEND
1260
1261 fpunusrstt: ; (TEST/DEBUG)
1262 #endif
1263
1264 lwz r21,FPUowner(r31) ; Get the ID of the live context
1265 lwz r23,FPUlevel(r26) ; Get the level ID
1266 lwz r24,FPUcpu(r26) ; Get the CPU that the context was last dispatched on
1267 cmplw cr3,r26,r21 ; Do we have the live context?
1268 cmplw r30,r23 ; Are we about to launch the live level?
1269 bne-- cr3,chkvec ; No, can not possibly enable...
1270 cmplw cr1,r19,r24 ; Was facility used on this processor last?
1271 bne-- chkvec ; No, not live...
1272 bne-- cr1,chkvec ; No, wrong cpu, have to enable later....
1273
1274 lwz r24,FPUsave(r26) ; Get the first savearea
1275 mr. r24,r24 ; Any savearea?
1276 beq++ fpuena ; Nope...
1277 lwz r25,SAVlevel(r24) ; Get the level of savearea
1278 lwz r0,SAVprev+4(r24) ; Get the previous
1279
1280 cmplw r30,r25 ; Is savearea for the level we are launching?
1281 bne++ fpuena ; No, just go enable...
1282
1283 stw r0,FPUsave(r26) ; Pop the chain
1284
1285 rlwinm r3,r24,0,0,19 ; Find main savearea header
1286
1287 lwz r8,quickfret(r31) ; Get the first in quickfret list (top)
1288 lwz r9,quickfret+4(r31) ; Get the first in quickfret list (bottom)
1289 lwz r2,SACvrswap(r3) ; Get the virtual to real conversion (top)
1290 lwz r3,SACvrswap+4(r3) ; Get the virtual to real conversion (bottom)
1291 stw r8,SAVprev(r24) ; Link the old in (top)
1292 stw r9,SAVprev+4(r24) ; Link the old in (bottom)
1293 xor r3,r24,r3 ; Convert to physical
1294 stw r2,quickfret(r31) ; Set the first in quickfret list (top)
1295 stw r3,quickfret+4(r31) ; Set the first in quickfret list (bottom)
1296
1297 #if FPVECDBG
1298 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
1299 li r2,0x3302 ; (TEST/DEBUG)
1300 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
1301 sc ; (TEST/DEBUG)
1302 #endif
1303
1304 fpuena: ori r29,r29,lo16(MASK(MSR_FP)) ; Enable facility
1305
1306 chkvec:
1307
1308 lwz r21,VMXlevel(r20) ; Get the facility level
1309
1310 cmplw r27,r21 ; Are we returning from the active level?
1311 bne+ vmxchkena ; Nope...
1312
1313
1314 ;
1315 ; First clean up any live context we are returning from
1316 ;
1317
1318 lwz r22,VMXcpu(r20) ; Get CPU this context was last dispatched on
1319
1320 stw r19,VMXcpu(r20) ; Claim context for us
1321
1322 eieio ; Make sure this gets out before owner clear
1323
1324 lis r23,hi16(EXT(PerProcTable)) ; Set base PerProcTable
1325 slwi r22,r22,4 ; Find offset to the owner per_proc_entry
1326 ori r23,r23,lo16(EXT(PerProcTable)) ; Set base PerProcTable
1327 li r24,VMXowner ; Displacement to float owner
1328 add r22,r23,r22 ; Point to the owner per_proc_entry
1329 lwz r22,ppe_vaddr(r22) ; Point to the owner per_proc
1330
1331 vmxinvothr: lwarx r23,r24,r22 ; Get the owner
1332
1333 sub r0,r23,r20 ; Subtract one from the other
1334 sub r21,r20,r23 ; Subtract the other from the one
1335 or r21,r21,r0 ; Combine them
1336 srawi r21,r21,31 ; Get a 0 if equal or -1 of not
1337 and r23,r23,r21 ; Make 0 if same, unchanged if not
1338 stwcx. r23,r24,r22 ; Try to invalidate it
1339 bne-- vmxinvothr ; Try again if there was a collision...
1340
1341 isync
1342
1343 ;
1344 ; Now if there is a savearea associated with the popped context, release it.
1345 ; Either way, pop the level to the top stacked context.
1346 ;
1347
1348 lwz r22,VMXsave(r20) ; Get pointer to the first savearea
1349 li r21,0 ; Assume we popped all the way out
1350 mr. r22,r22 ; Is there anything there?
1351 beq++ vmxsetlvl ; No, see if we need to enable...
1352
1353 lwz r21,SAVlevel(r22) ; Get the level of that savearea
1354 cmplw r21,r27 ; Is this the saved copy of the live stuff?
1355 bne vmxsetlvl ; No, leave as is...
1356
1357 lwz r24,SAVprev+4(r22) ; Pick up the previous area
1358 li r21,0 ; Assume we popped all the way out
1359 mr. r24,r24 ; Any more context?
1360 beq-- vmxonlyone ; Nope...
1361 lwz r21,SAVlevel(r24) ; Get the level associated with save
1362
1363 vmxonlyone: stw r24,VMXsave(r20) ; Dequeue this savearea
1364
1365 rlwinm r3,r22,0,0,19 ; Find main savearea header
1366
1367 lwz r8,quickfret(r31) ; Get the first in quickfret list (top)
1368 lwz r9,quickfret+4(r31) ; Get the first in quickfret list (bottom)
1369 lwz r2,SACvrswap(r3) ; Get the virtual to real conversion (top)
1370 lwz r3,SACvrswap+4(r3) ; Get the virtual to real conversion (bottom)
1371 stw r8,SAVprev(r22) ; Link the old in (top)
1372 stw r9,SAVprev+4(r22) ; Link the old in (bottom)
1373 xor r3,r22,r3 ; Convert to physical
1374 stw r2,quickfret(r31) ; Set the first in quickfret list (top)
1375 stw r3,quickfret+4(r31) ; Set the first in quickfret list (bottom)
1376
1377 #if FPVECDBG
1378 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
1379 li r2,0x3401 ; (TEST/DEBUG)
1380 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
1381 sc ; (TEST/DEBUG)
1382 #endif
1383
1384 vmxsetlvl: stw r21,VMXlevel(r20) ; Save the level
1385
1386 ;
1387 ; Here we check if we are at the right level
1388 ;
1389
1390 vmxchkena: lwz r21,VMXowner(r31) ; Get the ID of the live context
1391 lwz r23,VMXlevel(r26) ; Get the level ID
1392 cmplw r26,r21 ; Do we have the live context?
1393 lwz r24,VMXcpu(r26) ; Get the CPU that the context was last dispatched on
1394 bne-- setena ; No, can not possibly enable...
1395 cmplw r30,r23 ; Are we about to launch the live level?
1396 cmplw cr1,r19,r24 ; Was facility used on this processor last?
1397 bne-- setena ; No, not live...
1398 bne-- cr1,setena ; No, wrong cpu, have to enable later....
1399
1400 lwz r24,VMXsave(r26) ; Get the first savearea
1401 mr. r24,r24 ; Any savearea?
1402 beq++ vmxena ; Nope...
1403 lwz r25,SAVlevel(r24) ; Get the level of savearea
1404 lwz r0,SAVprev+4(r24) ; Get the previous
1405 cmplw r30,r25 ; Is savearea for the level we are launching?
1406 bne++ vmxena ; No, just go enable...
1407
1408 stw r0,VMXsave(r26) ; Pop the chain
1409
1410 rlwinm r3,r24,0,0,19 ; Find main savearea header
1411
1412 lwz r8,quickfret(r31) ; Get the first in quickfret list (top)
1413 lwz r9,quickfret+4(r31) ; Get the first in quickfret list (bottom)
1414 lwz r2,SACvrswap(r3) ; Get the virtual to real conversion (top)
1415 lwz r3,SACvrswap+4(r3) ; Get the virtual to real conversion (bottom)
1416 stw r8,SAVprev(r24) ; Link the old in (top)
1417 stw r9,SAVprev+4(r24) ; Link the old in (bottom)
1418 xor r3,r24,r3 ; Convert to physical
1419 stw r2,quickfret(r31) ; Set the first in quickfret list (top)
1420 stw r3,quickfret+4(r31) ; Set the first in quickfret list (bottom)
1421
1422 #if FPVECDBG
1423 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
1424 li r2,0x3402 ; (TEST/DEBUG)
1425 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
1426 sc ; (TEST/DEBUG)
1427 #endif
1428
1429 vmxena: oris r29,r29,hi16(MASK(MSR_VEC)) ; Enable facility
1430
1431 setena: lwz r18,umwSpace(r28) ; Get the space ID in case we are launching user
1432 rlwinm. r0,r29,0,MSR_PR_BIT,MSR_PR_BIT ; Are we about to launch user state?
1433 li r0,0 ; Get set to release quickfret holdoff
1434 crmove cr7_eq,cr0_eq ; Remember if we are going to user state
1435 rlwimi. r20,r29,(((31-floatCngbit)+(MSR_FP_BIT+1))&31),floatCngbit,floatCngbit ; Set flag if we enabled floats
1436 lwz r19,deferctx(r28) ; Get any deferred facility context switch
1437 rlwinm r20,r29,(((31-vectorCngbit)+(MSR_VEC_BIT+1))&31),vectorCngbit,vectorCngbit ; Set flag if we enabled vector
1438 stw r29,savesrr1+4(r27) ; Turn facility on or off
1439 stw r0,holdQFret(r31) ; Release quickfret
1440 oris r18,r18,hi16(umwSwitchAway) ; Set the switch-away bit in case we go to user
1441
1442 beq setenaa ; Neither float nor vector turned on....
1443
1444 lwz r5,ACT_MACT_SPF(r28) ; Get activation copy
1445 lwz r6,spcFlags(r31) ; Get per_proc copy
1446 or r5,r5,r20 ; Set vector/float changed bits in activation
1447 or r6,r6,r20 ; Set vector/float changed bits in per_proc
1448 stw r5,ACT_MACT_SPF(r28) ; Set activation copy
1449 stw r6,spcFlags(r31) ; Set per_proc copy
1450
1451 setenaa: mfdec r24 ; Get decrementer
1452 bf+ cr2_eq,nodefer ; No deferred to switch to...
1453
1454 li r20,0 ; Clear this
1455 stw r26,curctx(r28) ; Make the facility context current
1456 stw r20,deferctx(r28) ; Clear deferred context
1457
1458 nodefer: lwz r22,qactTimer(r28) ; Get high order quick activation timer
1459 mr. r24,r24 ; See if it has popped already...
1460 lwz r23,qactTimer+4(r28) ; Get low order qact timer
1461 ble- chkifuser ; We have popped or are just about to...
1462
1463 segtb: mftbu r20 ; Get the upper time base
1464 mftb r21 ; Get the low
1465 mftbu r19 ; Get upper again
1466 or. r0,r22,r23 ; Any time set?
1467 cmplw cr1,r20,r19 ; Did they change?
1468 beq++ chkifuser ; No time set....
1469 bne-- cr1,segtb ; Timebase ticked, get them again...
1470
1471 subfc r6,r21,r23 ; Subtract current from qact time
1472 li r0,0 ; Make a 0
1473 subfe r5,r20,r22 ; Finish subtract
1474 subfze r0,r0 ; Get a 0 if qact was bigger than current, -1 otherwise
1475 andc. r12,r5,r0 ; Set 0 if qact has passed
1476 andc r13,r6,r0 ; Set 0 if qact has passed
1477 bne chkifuser ; If high order is non-zero, this is too big for a decrementer
1478 cmplw r13,r24 ; Is this earlier than the decrementer? (logical compare takes care of high bit on)
1479 bge++ chkifuser ; No, do not reset decrementer...
1480
1481 mtdec r13 ; Set our value
1482
1483 chkifuser: bl EXT(mach_absolute_time)
1484 lwz r5,ACT_PER_PROC(r28)
1485 addi r6,r5,PP_PROCESSOR
1486 lwz r5,KERNEL_TIMER(r6)
1487 lwz r29,CURRENT_STATE(r6)
1488 beq-- cr7,chkifuser1 ; Skip this if we are going to kernel...
1489 stw r18,umwSpace(r28) ; Half-invalidate to force MapUserAddressWindow to reload SRs
1490 addi r5,r28,USER_TIMER
1491 addi r29,r6,USER_STATE
1492
1493 chkifuser1: bl EXT(thread_timer_event)
1494 mr r5,r29
1495 bl EXT(state_event)
1496
1497 chkenax:
1498
1499 #if DEBUG
1500 lwz r20,SAVact(r27) ; (TEST/DEBUG) Make sure our restore
1501 mfsprg r21, 1 ; (TEST/DEBUG) with the current act.
1502 cmpwi r21,0 ; (TEST/DEBUG)
1503 beq-- yeswereok ; (TEST/DEBUG)
1504 cmplw r21,r20 ; (TEST/DEBUG)
1505 beq++ yeswereok ; (TEST/DEBUG)
1506
1507 lis r0,hi16(Choke) ; (TEST/DEBUG) Choke code
1508 ori r0,r0,lo16(Choke) ; (TEST/DEBUG) and the rest
1509 mr r21,r27 ; (TEST/DEBUG) Save the savearea address
1510 li r3,failContext ; (TEST/DEBUG) Bad state code
1511 sc ; (TEST/DEBUG) System ABEND
1512
1513 yeswereok:
1514 #endif
1515
1516 mr r3,r27 ; Pass savearea back
1517 b EXT(exception_exit) ; We are all done now...
1518
1519
1520
1521 ;
1522 ; Null PPC call - performance testing, does absolutely nothing
1523 ;
1524
1525 .align 5
1526
1527 .globl EXT(ppcNull)
1528
1529 LEXT(ppcNull)
1530
1531 li r3,-1 ; Make sure we test no asts
1532 blr
1533
1534
1535 ;
1536 ; Instrumented null PPC call - performance testing, does absolutely nothing
1537 ; Forces various timestamps to be returned.
1538 ;
1539
1540 .align 5
1541
1542 .globl EXT(ppcNullinst)
1543
1544 LEXT(ppcNullinst)
1545
1546 li r3,-1 ; Make sure we test no asts
1547 blr
1548
1549
1550 /*
1551 * Here's where we handle the fastpath stuff
1552 * We'll do what we can here because registers are already
1553 * loaded and it will be less confusing that moving them around.
1554 * If we need to though, we'll branch off somewhere's else.
1555 *
1556 * Registers when we get here:
1557 *
1558 * r0 = syscall number
1559 * r4 = savearea/pcb
1560 * r13 = activation
1561 * r14 = previous savearea (if any)
1562 * r16 = thread
1563 * r25 = per_proc
1564 */
1565
1566 .align 5
1567
1568 fastpath: cmplwi cr3,r0,0x7FF5 ; Is this a null fastpath?
1569 beq-- cr3,fastexutl ; Yes, bail fast...
1570 cmplwi cr3,r0,0x7FF1 ; Is it CthreadSetSelfNumber?
1571 bnelr-- cr3 ; Not a fast path...
1572
1573 /*
1574 * void cthread_set_self(cproc_t p)
1575 *
1576 * Set's thread state "user_value". In practice this is the thread-local-data-pointer (TLDP),
1577 * though we do not interpret it. This call is mostly used by 32-bit tasks, but we save all 64 bits
1578 * in case a 64-bit task wants to use this facility. They normally do not, because the 64-bit
1579 * ABI reserves r13 for the TLDP.
1580 *
1581 * This op is invoked as follows:
1582 * li r0, CthreadSetSelfNumber // load the fast-trap number
1583 * sc // invoke fast-trap
1584 * blr
1585 */
1586
1587 CthreadSetSelfNumber:
1588 lwz r3,saver3+0(r4) /* get the TLDP passed in r3 */
1589 lwz r5,saver3+4(r4) /* (all 64 bits, in case this is a 64-bit task) */
1590 stw r3,CTHREAD_SELF+0(r13) /* Remember it in the activation... */
1591 stw r5,CTHREAD_SELF+4(r13)
1592 stw r3,UAW+0(r25) /* ...and in the per-proc */
1593 stw r5,UAW+4(r25)
1594
1595
1596 .globl EXT(fastexit)
1597 EXT(fastexit):
1598 fastexutl: mr r3,r4 ; Pass back savearea
1599 b EXT(exception_exit) ; Go back to the caller...
1600
1601
1602 /*
1603 * Here's where we check for a hit on the Blue Box Assist
1604 * Most registers are non-volatile, so be careful here. If we don't
1605 * recognize the trap instruction we go back for regular processing.
1606 * Otherwise we transfer to the assist code.
1607 */
1608
1609 .align 5
1610
1611 checkassist:
1612 lwz r0,saveexception(r4) ; Get the exception code
1613 lwz r23,savesrr1+4(r4) ; Get the interrupted MSR
1614 lwz r26,ACT_MACT_BEDA(r13) ; Get Blue Box Descriptor Area
1615 mtcrf 0x18,r23 ; Check what SRR1 says
1616 lwz r24,ACT_MACT_BTS(r13) ; Get the table start
1617 cmplwi r0,T_AST ; Check for T_AST trap
1618 lwz r27,savesrr0+4(r4) ; Get trapped address
1619 crnand cr1_eq,SRR1_PRG_TRAP_BIT,MSR_PR_BIT ; We need both trap and user state
1620 sub r24,r27,r24 ; See how far into it we are
1621 cror cr0_eq,cr0_eq,cr1_eq ; Need to bail if AST or not trap or not user state
1622 cmplwi cr1,r24,BB_MAX_TRAP ; Do we fit in the list?
1623 cror cr0_eq,cr0_eq,cr1_gt ; Also leave it trap not in range
1624 btlr- cr0_eq ; No assist if AST or not trap or not user state or trap not in range
1625 b EXT(atomic_switch_trap) ; Go to the assist...
1626
1627 ;
1628 ; Virtual Machine Monitor
1629 ; Here is where we exit from the emulated context
1630 ; Note that most registers get trashed here
1631 ; R3 and R30 are preserved across the call and hold the activation
1632 ; and savearea respectivily.
1633 ;
1634
1635 .align 5
1636
1637 exitFromVM: mr r30,r4 ; Get the savearea
1638 mr r3,r13 ; Get the activation
1639
1640 b EXT(vmm_exit) ; Do it to it
1641
1642 .align 5
1643 .globl EXT(retFromVM)
1644
1645 LEXT(retFromVM)
1646 mfsprg r10,1 ; Get the current activation
1647 lwz r10,ACT_PER_PROC(r10) ; Get the per_proc block
1648 mr r8,r3 ; Get the activation
1649 lwz r4,SAVprev+4(r30) ; Pick up the previous savearea
1650 mr r3,r30 ; Put savearea in proper register for common code
1651 lwz r11,SAVflags(r30) ; Get the flags of the current savearea
1652 rlwinm r11,r11,0,15,13 ; Clear the syscall flag
1653 mr r1,r8
1654 stw r11,SAVflags(r3) ; Save back the flags (with reset stack cleared)
1655
1656 stw r4,ACT_MACT_PCB(r8) ; Point to the previous savearea (or 0 if none)
1657
1658 lwz r5,THREAD_KERNEL_STACK(r1) ; Get the base pointer to the stack
1659 addi r5,r5,KERNEL_STACK_SIZE-FM_SIZE ; Reset to empty
1660 stw r5,ACT_MACT_KSP(r8) ; Save the empty stack pointer
1661 b chkfac ; Go end it all...
1662
1663
1664 ;
1665 ; chandler (note: not a candle maker or tallow merchant)
1666 ;
1667 ; Here is the system choke handler. This is where the system goes
1668 ; to die.
1669 ;
1670 ; We get here as a result of a T_CHOKE exception which is generated
1671 ; by the Choke firmware call or by lowmem_vectors when it detects a
1672 ; fatal error. Examples of where this may be used is when we detect
1673 ; problems in low-level mapping chains, trashed savearea free chains,
1674 ; or stack guardpage violations.
1675 ;
1676 ; Note that we can not set a back chain in the stack when we come
1677 ; here because we are probably here because the chain was corrupt.
1678 ;
1679
1680
1681 .align 5
1682 .globl EXT(chandler)
1683 LEXT(chandler) ; Choke handler
1684
1685 li r31,0 ; Get a 0
1686 mfsprg r25,1 ; Get the current activation
1687 lwz r25,ACT_PER_PROC(r25) ; Get the per_proc block
1688 stw r31,traceMask(0) ; Force tracing off right now
1689
1690
1691
1692 lwz r1,PP_DEBSTACKPTR(r25) ; Get debug stack pointer
1693 cmpwi r1,-1 ; Are we already choking?
1694 bne chokefirst ; Nope...
1695
1696 chokespin: addi r31,r31,1 ; Spin and hope for an analyzer connection...
1697 addi r31,r31,1 ; Spin and hope for an analyzer connection...
1698 addi r31,r31,1 ; Spin and hope for an analyzer connection...
1699 addi r31,r31,1 ; Spin and hope for an analyzer connection...
1700 addi r31,r31,1 ; Spin and hope for an analyzer connection...
1701 addi r31,r31,1 ; Spin and hope for an analyzer connection...
1702 b chokespin ; Spin and hope for an analyzer connection...
1703
1704 chokefirst: li r0,-1 ; Set choke value
1705 mr. r1,r1 ; See if we are on debug stack yet
1706 lwz r10,saver1+4(r4) ;
1707 stw r0,PP_DEBSTACKPTR(r25) ; Show we are choking
1708 bne chokestart ; We are not on the debug stack yet...
1709
1710 lwz r2,PP_DEBSTACK_TOP_SS(r25) ; Get debug stack top
1711 sub r11,r2,r10 ; Get stack depth
1712
1713 cmplwi r11,KERNEL_STACK_SIZE-FM_SIZE-TRAP_SPACE_NEEDED ; Check if stack pointer is ok
1714 bgt chokespin ; Bad stack pointer or too little left, just die...
1715
1716 subi r1,r10,FM_REDZONE ; Make a red zone
1717
1718 chokestart: li r0,0 ; Get a zero
1719 stw r0,FM_BACKPTR(r1) ; We now have terminated the back chain
1720
1721 bl EXT(SysChoked) ; Call the "C" phase of this
1722 b chokespin ; Should not be here so just go spin...
1723
1724
1725 #if VERIFYSAVE
1726 ;
1727 ; Savearea chain verification
1728 ;
1729
1730 versave:
1731 #if 0
1732 lis r22,hi16(EXT(DebugWork)) ; (TEST/DEBUG)
1733 ori r22,r22,lo16(EXT(DebugWork)) ; (TEST/DEBUG)
1734 lwz r23,0(r22) ; (TEST/DEBUG)
1735 mr. r23,r23 ; (TEST/DEBUG)
1736 beqlr- ; (TEST/DEBUG)
1737 mfsprg r20,1 ; Get the current activation
1738 lwz r20,ACT_PER_PROC(r20) ; Get the per_proc block
1739 lwz r21,pfAvailable(r20) ; (TEST/DEBUG)
1740 mr. r21,r21 ; (TEST/DEBUG)
1741 bnelr+ ; (TEST/DEBUG)
1742
1743 stw r22,0(r22) ; (TEST/DEBUG) Lock out more checks
1744 BREAKPOINT_TRAP ; (TEST/DEBUG) Get into debugger
1745 #endif
1746
1747 #if 0
1748 ;; This code is broken and migration will make the matter even worse
1749 ;
1750 ; Make sure that all savearea chains have the right type on them
1751 ;
1752
1753 lis r28,hi16(EXT(default_pset)) ; (TEST/DEBUG)
1754 lis r27,hi16(EXT(DebugWork)) ; (TEST/DEBUG)
1755 ori r28,r28,lo16(EXT(default_pset)) ; (TEST/DEBUG)
1756 ori r27,r27,lo16(EXT(DebugWork)) ; (TEST/DEBUG)
1757 li r20,0 ; (TEST/DEBUG)
1758 lwz r26,0(r27) ; (TEST/DEBUG)
1759 lwz r27,psthreadcnt(r28) ; (TEST/DEBUG)
1760 mr. r26,r26 ; (TEST/DEBUG) Have we locked the test out?
1761 lwz r28,psthreads(r28) ; (TEST/DEBUG)
1762 mflr r31 ; (TEST/DEBUG) Save return
1763 bnelr- ; (TEST/DEBUG) Test already triggered, skip...
1764 b fckgo ; (TEST/DEBUG) Join up...
1765
1766 fcknext: mr. r27,r27 ; (TEST/DEBUG) Any more threads?
1767 bne+ fckxxx ; (TEST/DEBUG) Yes...
1768
1769 mtlr r31 ; (TEST/DEBUG) Restore return
1770 blr ; (TEST/DEBUG) Leave...
1771
1772 fckxxx: lwz r28,THREAD_PSTHRN(r28) ; (TEST/DEBUG) Get next thread
1773
1774 fckgo: subi r27,r27,1 ; (TEST/DEBUG) Decrement thread count
1775 lwz r24,THREAD_TOP_ACT(r28) ; (TEST/DEBUG) Get activation for the thread
1776 lwz r20,ACT_MACT_PCB(r24) ; (TEST/DEBUG) Get the normal context
1777 li r21,SAVgeneral ; (TEST/DEBUG) Make sure this is all general context
1778 bl versavetype ; (TEST/DEBUG) Check the chain
1779
1780 lwz r20,facctx+FPUsave(r24) ; (TEST/DEBUG) Get regular floating point
1781 li r21,SAVfloat ; (TEST/DEBUG) Make sure this is all floating point
1782 bl versavetype ; (TEST/DEBUG) Check the chain
1783
1784 lwz r20,facctx+VMXsave(r24) ; (TEST/DEBUG) Get regular vector point
1785 li r21,SAVvector ; (TEST/DEBUG) Make sure this is all vector
1786 bl versavetype ; (TEST/DEBUG) Check the chain
1787
1788 lwz r29,vmmControl(r24) ; (TEST/DEBUG) Get the virtual machine control blocks
1789 mr. r29,r29 ; (TEST/DEBUG) Are there any?
1790 beq+ fcknext ; (TEST/DEBUG) Nope, next thread...
1791
1792 li r22,kVmmMaxContextsPerThread ; (TEST/DEBUG) Get the number of control blocks
1793 subi r29,r29,vmmCEntrySize ; (TEST/DEBUG) Get running start
1794
1795 fcknvmm: subi r22,r22,1 ; (TEST/DEBUG) Do all of them
1796 mr. r22,r22 ; (TEST/DEBUG) Are we all done?
1797 addi r29,r29,vmmCEntrySize ; (TEST/DEBUG) Get the next entry
1798 blt- fcknext ; (TEST/DEBUG) Yes, check next thread...
1799
1800 lwz r23,vmmFlags(r29) ; (TEST/DEBUG) Get entry flags
1801 rlwinm. r23,r23,0,0,0 ; (TEST/DEBUG) Is this in use?
1802 beq+ fcknvmm ; (TEST/DEBUG) Not in use...
1803
1804 lwz r20,vmmFacCtx+FPUsave(r29) ; (TEST/DEBUG) Get regular floating point
1805 li r21,SAVfloat ; (TEST/DEBUG) Make sure this is all floating point
1806 bl versavetype ; (TEST/DEBUG) Check the chain
1807
1808 lwz r20,vmmFacCtx+VMXsave(r29) ; (TEST/DEBUG) Get regular vector point
1809 li r21,SAVvector ; (TEST/DEBUG) Make sure this is all vector
1810 bl versavetype ; (TEST/DEBUG) Check the chain
1811 b fcknvmm ; (TEST/DEBUG) Get then vmm block...
1812
1813 versavetype:
1814 mr. r20,r20 ; (TEST/DEBUG) Chain done?
1815 beqlr- ; (TEST/DEBUG) Yes...
1816
1817 lwz r23,SAVflags(r20) ; (TEST/DEBUG) Get the flags
1818 rlwinm r23,r23,24,24,31 ; (TEST/DEBUG) Position it
1819 cmplw r23,r21 ; (TEST/DEBUG) Are we the correct type?
1820 beq+ versvok ; (TEST/DEBUG) This one is ok...
1821
1822 lis r22,hi16(EXT(DebugWork)) ; (TEST/DEBUG)
1823 ori r22,r22,lo16(EXT(DebugWork)) ; (TEST/DEBUG)
1824 stw r22,0(r22) ; (TEST/DEBUG) Lock out more checks
1825 BREAKPOINT_TRAP ; (TEST/DEBUG) Get into debugger
1826
1827 versvok: lwz r20,SAVprev+4(r20) ; (TEST/DEBUG) Get the previous one
1828 b versavetype ; (TEST/DEBUG) Go check its type...
1829 #endif
1830
1831
1832 #endif