]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * @OSF_COPYRIGHT@ | |
24 | */ | |
25 | ||
26 | /* Low level routines dealing with exception entry and exit. | |
27 | * There are various types of exception: | |
28 | * | |
29 | * Interrupt, trap, system call and debugger entry. Each has it's own | |
30 | * handler since the state save routine is different for each. The | |
31 | * code is very similar (a lot of cut and paste). | |
32 | * | |
33 | * The code for the FPU disabled handler (lazy fpu) is in cswtch.s | |
34 | */ | |
35 | ||
36 | #include <debug.h> | |
37 | #include <mach_assert.h> | |
38 | #include <mach/exception_types.h> | |
39 | #include <mach/ppc/vm_param.h> | |
40 | ||
41 | #include <assym.s> | |
42 | ||
43 | #include <ppc/asm.h> | |
44 | #include <ppc/proc_reg.h> | |
45 | #include <ppc/trap.h> | |
46 | #include <ppc/exception.h> | |
47 | #include <ppc/spl.h> | |
48 | ||
49 | ||
50 | #define VERIFYSAVE 0 | |
51 | #define FPVECDBG 0 | |
52 | ||
53 | /* | |
54 | * thandler(type) | |
55 | * | |
56 | * ENTRY: VM switched ON | |
57 | * Interrupts OFF | |
58 | * R3 contains exception code | |
59 | * R4 points to the saved context (virtual address) | |
60 | * Everything is saved in savearea | |
61 | */ | |
62 | ||
63 | /* | |
64 | * If pcb.ksp == 0 then the kernel stack is already busy, | |
65 | * we save ppc_saved state below the current stack pointer, | |
66 | * leaving enough space for the 'red zone' in case the | |
67 | * trapped thread was in the middle of saving state below | |
68 | * its stack pointer. | |
69 | * | |
70 | * otherwise we save a ppc_saved_state in the pcb, and switch to | |
71 | * the kernel stack (setting pcb.ksp to 0) | |
72 | * | |
73 | * on return, we do the reverse, the last state is popped from the pcb | |
74 | * and pcb.ksp is set to the top of stack | |
75 | */ | |
76 | ||
1c79356b A |
77 | /* TRAP_SPACE_NEEDED is the space assumed free on the kernel stack when |
78 | * another trap is taken. We need at least enough space for a saved state | |
79 | * structure plus two small backpointer frames, and we add a few | |
80 | * hundred bytes for the space needed by the C (which may be less but | |
81 | * may be much more). We're trying to catch kernel stack overflows :-) | |
82 | */ | |
83 | ||
84 | #define TRAP_SPACE_NEEDED FM_REDZONE+(2*FM_SIZE)+256 | |
85 | ||
1c79356b A |
86 | .text |
87 | ||
0b4e3aa0 A |
88 | .align 5 |
89 | .globl EXT(thandler) | |
90 | LEXT(thandler) /* Trap handler */ | |
91 | ||
92 | #if 0 | |
93 | ; | |
94 | ; NOTE: This trap will hang VPC running Windows98 (and probably others)... | |
95 | ; | |
96 | lwz r25,savedar(r4) ; (TEST/DEBUG) | |
97 | cmplwi r25,0x298 ; (TEST/DEBUG) | |
98 | ||
99 | deadloop: addi r25,r25,1 ; (TEST/DEBUG) | |
100 | addi r25,r25,1 ; (TEST/DEBUG) | |
101 | addi r25,r25,1 ; (TEST/DEBUG) | |
102 | addi r25,r25,1 ; (TEST/DEBUG) | |
103 | addi r25,r25,1 ; (TEST/DEBUG) | |
104 | addi r25,r25,1 ; (TEST/DEBUG) | |
105 | addi r25,r25,1 ; (TEST/DEBUG) | |
106 | addi r25,r25,1 ; (TEST/DEBUG) | |
107 | addi r25,r25,1 ; (TEST/DEBUG) | |
108 | addi r25,r25,1 ; (TEST/DEBUG) | |
109 | addi r25,r25,1 ; (TEST/DEBUG) | |
110 | beq- deadloop ; (TEST/DEBUG) | |
111 | #endif | |
1c79356b A |
112 | |
113 | mfsprg r25,0 /* Get the per_proc */ | |
114 | ||
115 | lwz r1,PP_ISTACKPTR(r25) ; Get interrupt stack pointer | |
116 | ||
1c79356b A |
117 | lwz r6,PP_CPU_DATA(r25) /* Get point to cpu specific data */ |
118 | cmpwi cr0,r1,0 ; Are we on interrupt stack? | |
119 | lwz r6,CPU_ACTIVE_THREAD(r6) /* Get the pointer to the currently active thread */ | |
0b4e3aa0 | 120 | beq- cr0,EXT(ihandler) ; If on interrupt stack, treat this as interrupt... |
1c79356b | 121 | lwz r13,THREAD_TOP_ACT(r6) /* Point to the active activation */ |
0b4e3aa0 | 122 | lwz r26,ACT_MACT_SPF(r13) ; Get special flags |
1c79356b | 123 | lwz r8,ACT_MACT_PCB(r13) /* Get the last savearea used */ |
0b4e3aa0 A |
124 | rlwinm. r26,r26,0,bbThreadbit,bbThreadbit ; Do we have Blue Box Assist active? |
125 | lwz r1,ACT_MACT_KSP(r13) ; Get the top of kernel stack | |
1c79356b A |
126 | bnel- checkassist /* See if we should assist this */ |
127 | stw r4,ACT_MACT_PCB(r13) /* Point to our savearea */ | |
128 | stw r8,SAVprev(r4) /* Queue the new save area in the front */ | |
129 | ||
130 | #if VERIFYSAVE | |
131 | bl versave ; (TEST/DEBUG) | |
132 | #endif | |
133 | ||
0b4e3aa0 A |
134 | lwz r9,THREAD_KERNEL_STACK(r6) ; Get our kernel stack start |
135 | cmpwi cr1,r1,0 ; Are we already on kernel stack? | |
136 | stw r13,SAVact(r4) ; Mark the savearea as belonging to this activation | |
137 | lwz r26,saver1(r4) ; Get the stack at interrupt time | |
1c79356b | 138 | |
0b4e3aa0 | 139 | bne+ cr1,.L_kstackfree ; We are not on kernel stack yet... |
1c79356b | 140 | |
0b4e3aa0 | 141 | subi r1,r26,FM_REDZONE ; Make a red zone on interrupt time kernel stack |
1c79356b A |
142 | |
143 | .L_kstackfree: | |
1c79356b | 144 | lwz r7,savesrr1(r4) /* Pick up the entry MSR */ |
0b4e3aa0 | 145 | sub r9,r1,r9 ; Get displacment into the kernel stack |
1c79356b | 146 | li r0,0 /* Make this 0 */ |
0b4e3aa0 | 147 | cmplwi cr2,r9,KERNEL_STACK_SIZE ; Do we still have room on the stack? |
1c79356b A |
148 | beq cr1,.L_state_on_kstack /* using above test for pcb/stack */ |
149 | ||
150 | stw r0,ACT_MACT_KSP(r13) /* Show that we have taken the stack */ | |
151 | ||
152 | .L_state_on_kstack: | |
153 | rlwinm. r6,r7,0,MSR_VEC_BIT,MSR_VEC_BIT ; Was vector on? | |
0b4e3aa0 A |
154 | bgt- cr2,kernelStackBad ; Kernel stack is bogus... |
155 | kernelStackNotBad: ; Not really | |
1c79356b A |
156 | beq+ tvecoff ; Vector off, do not save vrsave... |
157 | lwz r3,savevrsave(r4) ; Get the VRSAVE register | |
158 | stw r3,liveVRS(r25) ; Set the live value | |
159 | ||
160 | tvecoff: rlwinm. r3,r7,0,MSR_FP_BIT,MSR_FP_BIT ; Was floating point on? | |
161 | subi r1,r1,FM_SIZE /* Push a header onto the current stack */ | |
162 | beq+ tfpoff /* Floating point was off... */ | |
163 | lwz r3,savexfpscr(r4) ; Grab the just saved FPSCR | |
164 | stw r3,liveFPSCR(r25) ; Make it the live copy | |
165 | ||
0b4e3aa0 | 166 | tfpoff: stw r26,FM_BACKPTR(r1) ; Link back to the previous frame |
1c79356b A |
167 | |
168 | #if DEBUG | |
169 | /* If debugging, we need two frames, the first being a dummy | |
170 | * which links back to the trapped routine. The second is | |
171 | * that which the C routine below will need | |
172 | */ | |
173 | lwz r3,savesrr0(r4) /* Get the point of interruption */ | |
174 | stw r3,FM_LR_SAVE(r1) /* save old instr ptr as LR value */ | |
175 | stwu r1, -FM_SIZE(r1) /* and make new frame */ | |
176 | #endif /* DEBUG */ | |
177 | ||
178 | ||
179 | /* call trap handler proper, with | |
180 | * ARG0 = type (not yet, holds pcb ptr) | |
181 | * ARG1 = saved_state ptr (already there) | |
182 | * ARG2 = dsisr (already there) | |
183 | * ARG3 = dar (already there) | |
184 | */ | |
185 | ||
186 | ||
187 | lwz r3,saveexception(r4) /* Get the exception code */ | |
188 | lwz r0,ACT_MACT_SPF(r13) ; Get the special flags | |
189 | ||
190 | addi r5,r3,-T_DATA_ACCESS ; Adjust to start of range | |
191 | rlwinm. r0,r0,0,runningVMbit,runningVMbit ; Are we in VM state? (cr0_eq == 0 if yes) | |
192 | cmplwi cr2,r5,T_RUNMODE_TRACE-T_DATA_ACCESS ; Are we still in range? (cr_gt if not) | |
193 | ||
194 | lwz r5,savedsisr(r4) /* Get the saved DSISR */ | |
195 | ||
196 | crnor cr7_eq,cr0_eq,cr2_gt ; We should intercept if in VM and is a true trap (cr7_eq == 1 if yes) | |
197 | rlwinm. r0,r7,0,MSR_PR_BIT,MSR_PR_BIT ; Are we trapping from supervisor state? (cr0_eq == 1 if yes) | |
198 | ||
199 | cmpi cr2,r3,T_PREEMPT ; Is this a preemption? | |
200 | ||
201 | crandc cr0_eq,cr7_eq,cr0_eq ; Do not intercept if we are in the kernel (cr0_eq == 1 if yes) | |
202 | ||
203 | lwz r6,savedar(r4) /* Get the DAR */ | |
204 | ||
205 | beq- cr2, .L_call_trap /* Don't turn on interrupts for T_PREEMPT */ | |
206 | beq- exitFromVM ; Any true trap but T_MACHINE_CHECK exits us from the VM... | |
207 | ||
208 | /* syscall exception might warp here if there's nothing left | |
209 | * to do except generate a trap | |
210 | */ | |
211 | ||
212 | .L_call_trap: | |
213 | #if 0 | |
214 | lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */ | |
215 | oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */ | |
216 | sc /* (TEST/DEBUG) */ | |
217 | #endif | |
218 | ||
219 | bl EXT(trap) | |
220 | ||
221 | /* | |
222 | * Ok, return from C function | |
223 | * | |
224 | * This is also the point where new threads come when they are created. | |
225 | * The new thread is setup to look like a thread that took an | |
226 | * interrupt and went immediatly into trap. | |
227 | * | |
228 | */ | |
229 | ||
230 | thread_return: | |
231 | ||
232 | mfmsr r7 /* Get the MSR */ | |
233 | lwz r4,SAVprev(r3) /* Pick up the previous savearea */ | |
234 | rlwinm r7,r7,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear the interrupt enable mask */ | |
235 | lwz r11,SAVflags(r3) /* Get the flags of the current savearea */ | |
236 | mtmsr r7 /* Disable for interrupts */ | |
237 | ||
238 | mfsprg r10,0 /* Restore the per_proc info */ | |
239 | ||
240 | lwz r8,savesrr1(r3) ; Get the MSR we are going to | |
241 | lwz r1,PP_CPU_DATA(r10) /* Get the CPU data area */ | |
242 | rlwinm r11,r11,0,15,13 /* Clear the syscall flag */ | |
243 | lwz r1,CPU_ACTIVE_THREAD(r1) /* and the active thread */ | |
244 | rlwinm. r8,r8,0,MSR_PR_BIT,MSR_PR_BIT ; Are we going to the user? | |
245 | lwz r8,THREAD_TOP_ACT(r1) /* Now find the current activation */ | |
246 | stw r11,SAVflags(r3) /* Save back the flags (with reset stack cleared) */ | |
247 | ||
248 | #if 0 | |
249 | lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */ | |
250 | oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */ | |
251 | sc /* (TEST/DEBUG) */ | |
252 | #endif | |
253 | stw r4,ACT_MACT_PCB(r8) /* Point to the previous savearea (or 0 if none) */ | |
254 | ||
255 | beq- chkfac ; We are not leaving the kernel yet... | |
256 | ||
257 | lwz r5,THREAD_KERNEL_STACK(r1) /* Get the base pointer to the stack */ | |
258 | addi r5,r5,KERNEL_STACK_SIZE-FM_SIZE /* Reset to empty */ | |
259 | stw r5,ACT_MACT_KSP(r8) /* Save the empty stack pointer */ | |
260 | b chkfac /* Go end it all... */ | |
261 | ||
262 | ||
0b4e3aa0 A |
263 | ; |
264 | ; Here is where we go when we detect that the kernel stack is all messed up. | |
265 | ; We just try to dump some info and get into the debugger. | |
266 | ; | |
267 | ||
268 | kernelStackBad: | |
269 | ||
270 | lwz r3,PP_DEBSTACK_TOP_SS(r25) ; Pick up debug stack top | |
271 | subi r3,r3,KERNEL_STACK_SIZE-FM_SIZE ; Adjust to start of stack | |
272 | sub r3,r1,r3 ; Get displacement into debug stack | |
273 | cmplwi cr2,r3,KERNEL_STACK_SIZE-FM_SIZE ; Check if we are on debug stack | |
274 | blt+ cr2,kernelStackNotBad ; Yeah, that is ok too... | |
275 | ||
276 | lis r0,hi16(Choke) ; Choke code | |
277 | ori r0,r0,lo16(Choke) ; and the rest | |
278 | li r3,failStack ; Bad stack code | |
279 | sc ; System ABEND | |
280 | ||
1c79356b A |
281 | |
282 | /* | |
283 | * shandler(type) | |
284 | * | |
285 | * ENTRY: VM switched ON | |
286 | * Interrupts OFF | |
287 | * R3 contains exception code | |
288 | * R4 points to the saved context (virtual address) | |
289 | * Everything is saved in savearea | |
290 | */ | |
291 | ||
292 | /* | |
293 | * If pcb.ksp == 0 then the kernel stack is already busy, | |
294 | * this is an error - jump to the debugger entry | |
295 | * | |
296 | * otherwise depending upon the type of | |
297 | * syscall, look it up in the kernel table | |
298 | * or pass it to the server. | |
299 | * | |
300 | * on return, we do the reverse, the state is popped from the pcb | |
301 | * and pcb.ksp is set to the top of stack. | |
302 | */ | |
303 | ||
304 | /* | |
305 | * NOTE: | |
306 | * mach system calls are negative | |
307 | * BSD system calls are low positive | |
308 | * PPC-only system calls are in the range 0x6xxx | |
309 | * PPC-only "fast" traps are in the range 0x7xxx | |
310 | */ | |
311 | ||
0b4e3aa0 A |
312 | .align 5 |
313 | .globl EXT(shandler) | |
314 | LEXT(shandler) /* System call handler */ | |
1c79356b A |
315 | |
316 | mfsprg r25,0 /* Get the per proc area */ | |
317 | lwz r0,saver0(r4) /* Get the original syscall number */ | |
318 | lwz r17,PP_ISTACKPTR(r25) ; Get interrupt stack pointer | |
319 | rlwinm r15,r0,0,0,19 ; Clear the bottom of call number for fast check | |
320 | lwz r16,PP_CPU_DATA(r25) /* Assume we need this */ | |
321 | mr. r17,r17 ; Are we on interrupt stack? | |
322 | lwz r7,savesrr1(r4) ; Get the SRR1 value | |
323 | beq- EXT(ihandler) ; On interrupt stack, not allowed... | |
324 | rlwinm. r6,r7,0,MSR_VEC_BIT,MSR_VEC_BIT ; Was vector on? | |
325 | lwz r16,CPU_ACTIVE_THREAD(r16) /* Get the thread pointer */ | |
326 | ||
327 | beq+ svecoff ; Vector off, do not save vrsave... | |
0b4e3aa0 A |
328 | lwz r6,savevrsave(r4) ; Get the VRSAVE register |
329 | stw r6,liveVRS(r25) ; Set the live value | |
1c79356b A |
330 | |
331 | svecoff: rlwinm. r6,r7,0,MSR_FP_BIT,MSR_FP_BIT ; Was floating point on? | |
332 | lwz r13,THREAD_TOP_ACT(r16) /* Pick up the active thread */ | |
333 | beq+ sfpoff ; Skip if floating point is off... | |
334 | lwz r9,savexfpscr(r4) ; Grab the just saved FPSCR | |
335 | stw r9,liveFPSCR(r25) ; Make it the live copy | |
336 | ||
337 | ; Check if SCs are being redirected for the BlueBox or to VMM | |
338 | ||
339 | sfpoff: lwz r6,ACT_MACT_SPF(r13) ; Pick up activation special flags | |
0b4e3aa0 A |
340 | mtcrf 0x41,r6 ; Check special flags |
341 | crmove cr6_eq,runningVMbit ; Remember if we are in VMM | |
342 | bf+ bbNoMachSCbit,noassist ; Take branch if SCs are not redirected | |
1c79356b | 343 | lwz r26,ACT_MACT_BEDA(r13) ; Pick up the pointer to the blue box exception area |
1c79356b A |
344 | b EXT(atomic_switch_syscall) ; Go to the assist... |
345 | ||
0b4e3aa0 | 346 | noassist: cmplwi r15,0x7000 /* Do we have a fast path trap? */ |
1c79356b A |
347 | lwz r14,ACT_MACT_PCB(r13) /* Now point to the PCB */ |
348 | beql+ fastpath /* We think it's a fastpath... */ | |
349 | ||
350 | lwz r1,ACT_MACT_KSP(r13) /* Get the kernel stack pointer */ | |
351 | #if DEBUG | |
352 | mr. r1,r1 /* Are we already on the kernel stack? */ | |
353 | li r3,T_SYSTEM_CALL /* Yup, pretend we had an interrupt... */ | |
354 | beq- EXT(ihandler) /* Bad boy, bad boy... What'cha gonna do when they come for you? */ | |
355 | #endif /* DEBUG */ | |
356 | ||
357 | stw r4,ACT_MACT_PCB(r13) /* Point to our savearea */ | |
358 | li r0,0 /* Clear this out */ | |
359 | stw r14,SAVprev(r4) /* Queue the new save area in the front */ | |
360 | stw r13,SAVact(r4) /* Point the savearea at its activation */ | |
361 | ||
362 | #if VERIFYSAVE | |
363 | bl versave ; (TEST/DEBUG) | |
364 | #endif | |
365 | ||
366 | mr r30,r4 /* Save pointer to the new context savearea */ | |
367 | lwz r15,saver1(r4) /* Grab interrupt time stack */ | |
368 | stw r0,ACT_MACT_KSP(r13) /* Mark stack as busy with 0 val */ | |
369 | stw r15,FM_BACKPTR(r1) /* Link backwards */ | |
370 | ||
371 | #if DEBUG | |
372 | /* If debugging, we need two frames, the first being a dummy | |
373 | * which links back to the trapped routine. The second is | |
374 | * that which the C routine below will need | |
375 | */ | |
376 | lwz r8,savesrr0(r30) /* Get the point of interruption */ | |
377 | stw r8,FM_LR_SAVE(r1) /* save old instr ptr as LR value */ | |
378 | stwu r1, -FM_SIZE(r1) /* and make new frame */ | |
379 | #endif /* DEBUG */ | |
380 | ||
381 | mfmsr r11 /* Get the MSR */ | |
382 | lwz r15,SAVflags(r4) /* Get the savearea flags */ | |
383 | ori r11,r11,lo16(MASK(MSR_EE)) /* Turn on interruption enabled bit */ | |
384 | lwz r0,saver0(r30) ; Get R0 back | |
385 | oris r15,r15,SAVsyscall >> 16 /* Mark that it this is a syscall */ | |
386 | rlwinm r10,r0,0,0,19 ; Keep only the top part | |
387 | stwu r1,-(FM_SIZE+ARG_SIZE)(r1) /* Make a stack frame */ | |
388 | cmplwi r10,0x6000 ; Is it the special ppc-only guy? | |
389 | stw r15,SAVflags(r30) /* Save syscall marker */ | |
0b4e3aa0 | 390 | beq- cr6,exitFromVM ; It is time to exit from alternate context... |
1c79356b A |
391 | |
392 | beq- ppcscall ; Call the ppc-only system call handler... | |
393 | ||
394 | mtmsr r11 /* Enable interruptions */ | |
395 | ||
396 | /* Call a function that can print out our syscall info */ | |
397 | /* Note that we don't care about any volatiles yet */ | |
398 | mr r4,r30 | |
399 | bl EXT(syscall_trace) | |
400 | ||
401 | lwz r0,saver0(r30) /* Get the system call selector */ | |
402 | mr. r0,r0 /* What kind is it? */ | |
403 | blt- .L_kernel_syscall /* -ve syscall - go to kernel */ | |
404 | /* +ve syscall - go to server */ | |
405 | cmpwi cr0,r0,0x7FFA | |
406 | beq- .L_notify_interrupt_syscall | |
407 | ||
408 | #ifdef MACH_BSD | |
409 | mr r3,r30 /* Get PCB/savearea */ | |
410 | lwz r4,saver4(r30) /* Restore r4 */ | |
411 | lwz r5,saver5(r30) /* Restore r5 */ | |
412 | lwz r6,saver6(r30) /* Restore r6 */ | |
413 | lwz r7,saver7(r30) /* Restore r7 */ | |
414 | lwz r8,saver8(r30) /* Restore r8 */ | |
415 | lwz r9,saver9(r30) /* Restore r9 */ | |
416 | lwz r10,saver10(r30) /* Restore r10 */ | |
417 | bl EXT(unix_syscall) /* Check out unix... */ | |
418 | #endif | |
419 | ||
420 | .L_call_server_syscall_exception: | |
421 | li r3,EXC_SYSCALL /* doexception(EXC_SYSCALL, num, 1) */ | |
422 | ||
423 | .L_call_server_exception: | |
424 | mr r4,r0 /* Set syscall selector */ | |
425 | li r5,1 | |
426 | b EXT(doexception) /* Go away, never to return... */ | |
427 | ||
428 | /* The above, but with EXC_MACH_SYSCALL */ | |
429 | .L_call_server_mach_syscall: | |
430 | li r3,EXC_MACH_SYSCALL | |
431 | b .L_call_server_exception /* Join the common above... */ | |
432 | ||
433 | .L_notify_interrupt_syscall: | |
434 | lwz r3,saver3(r30) ; Get the new PC address to pass in | |
435 | bl EXT(syscall_notify_interrupt) | |
436 | b .L_syscall_return | |
437 | ||
438 | ; | |
439 | ; Handle PPC-only system call interface | |
440 | ; These are called with interruptions disabled | |
441 | ; and the savearea/pcb as the first parameter. | |
442 | ; It is up to the callee to enable interruptions if | |
443 | ; they should be. We are in a state here where | |
444 | ; both interrupts and preemption is ok, but because we could | |
445 | ; be calling diagnostic code we will not enable. | |
446 | ; | |
447 | ; Also, the callee is responsible for finding any parameters | |
448 | ; in the savearea/pcb. It also must set saver3 with any return | |
449 | ; code before returning. | |
450 | ; | |
451 | ; There are 3 possible return codes: | |
452 | ; 0 the call is disabled or something, we treat this like it was bogus | |
453 | ; + the call finished ok, check for AST | |
454 | ; - the call finished ok, do not check for AST | |
455 | ; | |
456 | ; Note: the last option is intended for special diagnostics calls that | |
457 | ; want the thread to return and execute before checking for preemption. | |
458 | ; | |
459 | ||
460 | ppcscall: rlwinm r11,r0,2,18,29 ; Make an index into the table | |
461 | lis r10,hi16(EXT(PPCcalls)) ; Get PPC-only system call table | |
462 | cmplwi r11,PPCcallmax ; See if we are too big | |
463 | ori r10,r10,lo16(EXT(PPCcalls)) ; Merge in low half | |
464 | bgt- .L_call_server_syscall_exception ; Bogus call... | |
465 | lwzx r11,r10,r11 ; Get function address | |
466 | ||
467 | ; | |
468 | ; Note: make sure we do not change the savearea in R30 to | |
469 | ; a different register without checking. Some of the PPCcalls | |
470 | ; depend upon it being there. | |
471 | ; | |
472 | ||
473 | mr r3,r30 ; Pass the savearea | |
474 | mr r4,r13 ; Pass the activation | |
475 | mr. r11,r11 ; See if there is a function here | |
476 | mtlr r11 ; Set the function address | |
477 | beq- .L_call_server_syscall_exception ; Disabled call... | |
478 | blrl ; Call it | |
479 | ||
480 | ||
481 | .globl EXT(ppcscret) | |
482 | LEXT(ppcscret) | |
483 | mr. r3,r3 ; See what we should do | |
484 | mr r31,r16 ; Restore the current thread pointer | |
485 | bgt+ .L_thread_syscall_ret_check_ast ; Take normal AST checking return.... | |
0b4e3aa0 | 486 | mfsprg r10,0 ; Get the per_proc |
1c79356b A |
487 | blt+ .L_thread_syscall_return ; Return, but no ASTs.... |
488 | lwz r0,saver0(r30) ; Restore the system call number | |
489 | b .L_call_server_syscall_exception ; Go to common exit... | |
490 | ||
491 | ||
492 | /* Once here, we know that the syscall was -ve | |
493 | * we should still have r1=ksp, | |
494 | * r16 = pointer to current thread, | |
495 | * r13 = pointer to top activation, | |
496 | * r0 = syscall number | |
497 | * r30 = pointer to saved state (in pcb) | |
498 | */ | |
499 | .L_kernel_syscall: | |
500 | neg r31, r0 /* Make number +ve and put in r31*/ | |
501 | ||
502 | /* If out of range, call server with syscall exception */ | |
503 | addis r29, 0, HIGH_CADDR(EXT(mach_trap_count)) | |
504 | addi r29, r29, LOW_ADDR(EXT(mach_trap_count)) | |
505 | lwz r29, 0(r29) | |
506 | ||
507 | cmp cr0, r31, r29 | |
508 | bge- cr0, .L_call_server_syscall_exception | |
509 | ||
510 | addis r29, 0, HIGH_CADDR(EXT(mach_trap_table)) | |
511 | addi r29, r29, LOW_ADDR(EXT(mach_trap_table)) | |
512 | ||
513 | /* multiply the trap number to get offset into table */ | |
514 | slwi r31, r31, MACH_TRAP_OFFSET_POW2 | |
515 | ||
516 | /* r31 now holds offset into table of our trap entry, | |
517 | * add on the table base, and it then holds pointer to entry | |
518 | */ | |
519 | add r31, r31, r29 | |
520 | ||
521 | /* If the function is kern_invalid, prepare to send an exception. | |
522 | This is messy, but parallels the x86. We need it for task_by_pid, | |
523 | at least. */ | |
524 | lis r29, HIGH_CADDR(EXT(kern_invalid)) | |
525 | addi r29, r29, LOW_ADDR(EXT(kern_invalid)) | |
526 | lwz r0, MACH_TRAP_FUNCTION(r31) | |
527 | cmp cr0, r0, r29 | |
528 | beq- .L_call_server_syscall_exception | |
529 | ||
530 | /* get arg count. If argc > 8 then not all args were in regs, | |
531 | * so we must perform copyin. | |
532 | */ | |
533 | lwz r29, MACH_TRAP_ARGC(r31) | |
534 | cmpwi cr0, r29, 8 | |
535 | ble+ .L_syscall_got_args | |
536 | ||
537 | /* argc > 8 - perform a copyin */ | |
538 | /* if the syscall came from kernel space, we can just copy */ | |
539 | ||
540 | lwz r0,savesrr1(r30) /* Pick up exception time MSR */ | |
541 | andi. r0,r0,MASK(MSR_PR) /* Check the priv bit */ | |
542 | bne+ .L_syscall_arg_copyin /* We're not priviliged... */ | |
543 | ||
544 | /* we came from a privilaged task, just do a copy */ | |
545 | /* get user's stack pointer */ | |
546 | ||
547 | lwz r28,saver1(r30) /* Get the stack pointer */ | |
548 | ||
549 | subi r29,r29,8 /* Get the number of arguments to copy */ | |
550 | ||
551 | addi r28,r28,COPYIN_ARG0_OFFSET-4 /* Point to source - 4 */ | |
552 | addi r27,r1,FM_ARG0-4 /* Point to sink - 4 */ | |
553 | ||
554 | .L_syscall_copy_word_loop: | |
555 | addic. r29,r29,-1 /* Count down the number of arguments left */ | |
556 | lwz r0,4(r28) /* Pick up the argument from the stack */ | |
557 | addi r28,r28,4 /* Point to the next source */ | |
558 | stw r0,4(r27) /* Store the argument */ | |
559 | addi r27,r27,4 /* Point to the next sink */ | |
560 | bne+ .L_syscall_copy_word_loop /* Move all arguments... */ | |
561 | b .L_syscall_got_args /* Go call it now... */ | |
562 | ||
563 | ||
564 | /* we came from a user task, pay the price of a real copyin */ | |
565 | /* set recovery point */ | |
566 | ||
567 | .L_syscall_arg_copyin: | |
568 | lwz r8,ACT_VMMAP(r13) ; Get the vm_map for this activation | |
569 | lis r28,hi16(.L_syscall_copyin_recover) | |
570 | lwz r8,VMMAP_PMAP(r8) ; Get the pmap | |
571 | ori r28,r28,lo16(.L_syscall_copyin_recover) | |
572 | addi r8,r8,PMAP_SEGS ; Point to the pmap SR slots | |
573 | stw r28,THREAD_RECOVER(r16) /* R16 still holds thread ptr */ | |
574 | ||
575 | /* We can manipulate the COPYIN segment register quite easily | |
576 | * here, but we've also got to make sure we don't go over a | |
577 | * segment boundary - hence some mess. | |
578 | * Registers from 12-29 are free for our use. | |
579 | */ | |
580 | ||
581 | ||
582 | lwz r28,saver1(r30) /* Get the stack pointer */ | |
583 | subi r29,r29,8 /* Get the number of arguments to copy */ | |
584 | addi r28,r28,COPYIN_ARG0_OFFSET /* Set source in user land */ | |
585 | ||
586 | /* set up SR_COPYIN to allow us to copy, we may need to loop | |
587 | * around if we change segments. We know that this previously | |
588 | * pointed to user space, so the sid doesn't need setting. | |
589 | */ | |
590 | ||
591 | rlwinm r7,r28,6,26,29 ; Get index to the segment slot | |
592 | ||
593 | .L_syscall_copyin_seg_loop: | |
594 | ||
595 | ||
596 | lwzx r10,r8,r7 ; Get the source SR value | |
597 | rlwinm r26,r28,0,4,31 ; Clear the segment number from source address | |
598 | mtsr SR_COPYIN,r10 ; Set the copyin SR | |
599 | isync | |
600 | ||
601 | oris r26,r26,(SR_COPYIN_NUM << (28-16)) ; Insert the copyin segment number into source address | |
602 | ||
603 | /* Make r27 point to address-4 of where we will store copied args */ | |
604 | addi r27,r1,FM_ARG0-4 | |
605 | ||
606 | .L_syscall_copyin_word_loop: | |
607 | ||
608 | lwz r0,0(r26) /* MAY CAUSE PAGE FAULT! */ | |
609 | subi r29,r29,1 ; Decrement count | |
610 | addi r26,r26,4 ; Bump input | |
611 | stw r0,4(r27) ; Save the copied in word | |
612 | mr. r29,r29 ; Are they all moved? | |
613 | addi r27,r27,4 ; Bump output | |
614 | beq+ .L_syscall_copyin_done ; Escape if we are done... | |
615 | ||
616 | rlwinm. r0,r26,0,4,29 ; Did we just step into a new segment? | |
617 | addi r28,r28,4 ; Bump up user state address also | |
618 | bne+ .L_syscall_copyin_word_loop ; We are still on the same segment... | |
619 | ||
620 | addi r7,r7,4 ; Bump to next slot | |
621 | b .L_syscall_copyin_seg_loop /* On new segment! remap */ | |
622 | ||
623 | /* Don't bother restoring SR_COPYIN, we can leave it trashed */ | |
624 | /* clear thread recovery as we're done touching user data */ | |
625 | ||
626 | .L_syscall_copyin_done: | |
627 | li r0,0 | |
628 | stw r0,THREAD_RECOVER(r16) /* R16 still holds thread ptr */ | |
629 | ||
630 | .L_syscall_got_args: | |
631 | lwz r8,ACT_TASK(r13) /* Get our task */ | |
632 | lis r10,hi16(EXT(c_syscalls_mach)) /* Get top half of counter address */ | |
633 | lwz r7,TASK_SYSCALLS_MACH(r8) ; Get the current count | |
634 | lwz r3,saver3(r30) /* Restore r3 */ | |
635 | addi r7,r7,1 ; Bump it | |
636 | ori r10,r10,lo16(EXT(c_syscalls_mach)) /* Get low half of counter address */ | |
637 | stw r7,TASK_SYSCALLS_MACH(r8) ; Save it | |
638 | lwz r4,saver4(r30) /* Restore r4 */ | |
639 | lwz r9,0(r10) /* Get counter */ | |
640 | lwz r5,saver5(r30) /* Restore r5 */ | |
641 | lwz r6,saver6(r30) /* Restore r6 */ | |
642 | addi r9,r9,1 /* Add 1 */ | |
643 | lwz r7,saver7(r30) /* Restore r7 */ | |
644 | lwz r8,saver8(r30) /* Restore r8 */ | |
645 | stw r9,0(r10) /* Save it back */ | |
646 | lwz r9,saver9(r30) /* Restore r9 */ | |
647 | lwz r10,saver10(r30) /* Restore r10 */ | |
648 | ||
649 | lwz r0,MACH_TRAP_FUNCTION(r31) | |
650 | ||
651 | /* calling this function, all the callee-saved registers are | |
652 | * still valid except for r30 and r31 which are in the PCB | |
653 | * r30 holds pointer to saved state (ie. pcb) | |
654 | * r31 is scrap | |
655 | */ | |
656 | mtctr r0 | |
657 | bctrl /* perform the actual syscall */ | |
658 | ||
659 | /* 'standard' syscall returns here - INTERRUPTS ARE STILL ON */ | |
660 | ||
661 | /* r3 contains value that we're going to return to the user | |
662 | */ | |
663 | ||
664 | /* | |
665 | * Ok, return from C function, ARG0 = return value | |
666 | * | |
667 | * get the active thread's PCB pointer and thus pointer to user state | |
668 | * saved state is still in R30 and the active thread is in R16 . | |
669 | */ | |
670 | ||
671 | /* Store return value into saved state structure, since | |
672 | * we need to pick up the value from here later - the | |
673 | * syscall may perform a thread_set_syscall_return | |
674 | * followed by a thread_exception_return, ending up | |
675 | * at thread_syscall_return below, with SS_R3 having | |
676 | * been set up already | |
677 | */ | |
678 | ||
679 | /* When we are here, r16 should point to the current thread, | |
680 | * r30 should point to the current pcb | |
681 | */ | |
682 | ||
683 | /* save off return value, we must load it | |
684 | * back anyway for thread_exception_return | |
685 | * TODO NMGS put in register? | |
686 | */ | |
687 | .L_syscall_return: | |
688 | mr r31,r16 /* Move the current thread pointer */ | |
689 | stw r3,saver3(r30) /* Stash the return code */ | |
690 | ||
691 | /* Call a function that records the end of */ | |
692 | /* the mach system call */ | |
693 | mr r4,r30 | |
694 | bl EXT(syscall_trace_end) | |
695 | ||
696 | #if 0 | |
697 | lis r0,HIGH_ADDR(CutTrace) /* (TEST/DEBUG) */ | |
698 | mr r4,r31 /* (TEST/DEBUG) */ | |
699 | oris r0,r0,LOW_ADDR(CutTrace) /* (TEST/DEBUG) */ | |
700 | mr r5,r30 /* (TEST/DEBUG) */ | |
701 | sc /* (TEST/DEBUG) */ | |
702 | #endif | |
703 | ||
704 | .L_thread_syscall_ret_check_ast: | |
705 | mfmsr r12 /* Get the current MSR */ | |
706 | rlwinm r12,r12,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Turn off interruptions enable bit */ | |
707 | mtmsr r12 /* Turn interruptions off */ | |
708 | ||
709 | mfsprg r10,0 /* Get the per_processor block */ | |
710 | ||
711 | /* Check to see if there's an outstanding AST */ | |
712 | ||
713 | lwz r4,PP_NEED_AST(r10) | |
714 | lwz r4,0(r4) | |
715 | cmpi cr0,r4, 0 | |
716 | beq cr0,.L_syscall_no_ast | |
717 | ||
718 | /* Yes there is, call ast_taken | |
719 | * pretending that the user thread took an AST exception here, | |
720 | * ast_taken will save all state and bring us back here | |
721 | */ | |
722 | ||
723 | #if DEBUG | |
724 | /* debug assert - make sure that we're not returning to kernel */ | |
725 | lwz r3,savesrr1(r30) | |
726 | andi. r3,r3,MASK(MSR_PR) | |
727 | bne+ 0f /* returning to user level, check */ | |
728 | ||
0b4e3aa0 A |
729 | lis r0,hi16(Choke) ; Choke code |
730 | ori r0,r0,lo16(Choke) ; and the rest | |
731 | li r3,failContext ; Bad state code | |
732 | sc ; System ABEND | |
733 | ||
734 | ||
1c79356b A |
735 | 0: |
736 | #endif /* DEBUG */ | |
737 | ||
0b4e3aa0 A |
738 | li r3, AST_ALL |
739 | li r4, 1 | |
1c79356b A |
740 | bl EXT(ast_taken) |
741 | ||
742 | b .L_thread_syscall_ret_check_ast | |
743 | ||
744 | /* thread_exception_return returns to here, almost all | |
745 | * registers intact. It expects a full context restore | |
746 | * of what it hasn't restored itself (ie. what we use). | |
747 | * | |
748 | * In particular for us, | |
749 | * we still have r31 points to the current thread, | |
750 | * r30 points to the current pcb | |
751 | */ | |
752 | ||
753 | .L_syscall_no_ast: | |
754 | .L_thread_syscall_return: | |
755 | ||
756 | mr r3,r30 ; Get savearea to the correct register for common exit | |
757 | lwz r8,THREAD_TOP_ACT(r31) /* Now find the current activation */ | |
758 | ||
759 | lwz r11,SAVflags(r30) /* Get the flags */ | |
760 | lwz r5,THREAD_KERNEL_STACK(r31) /* Get the base pointer to the stack */ | |
761 | rlwinm r11,r11,0,15,13 /* Clear the syscall flag */ | |
762 | lwz r4,SAVprev(r30) ; Get the previous save area | |
763 | stw r11,SAVflags(r30) /* Stick back the flags */ | |
764 | addi r5,r5,KERNEL_STACK_SIZE-FM_SIZE /* Reset to empty */ | |
765 | stw r4,ACT_MACT_PCB(r8) ; Save previous save area | |
766 | stw r5,ACT_MACT_KSP(r8) /* Save the empty stack pointer */ | |
767 | ||
768 | b chkfac ; Go end it all... | |
769 | ||
770 | ||
771 | .L_syscall_copyin_recover: | |
772 | ||
773 | /* This is the catcher for any data faults in the copyin | |
774 | * of arguments from the user's stack. | |
775 | * r30 still holds a pointer to the PCB | |
776 | * | |
777 | * call syscall_error(EXC_BAD_ACCESS, EXC_PPC_VM_PROT_READ, sp, ssp), | |
778 | * | |
779 | * we already had a frame so we can do this | |
780 | */ | |
781 | ||
782 | li r3,EXC_BAD_ACCESS | |
783 | li r4,EXC_PPC_VM_PROT_READ | |
784 | lwz r5,saver1(r30) | |
785 | mr r6,r30 | |
786 | ||
787 | bl EXT(syscall_error) | |
788 | b .L_syscall_return | |
789 | ||
790 | ||
1c79356b A |
791 | /* |
792 | * thread_exception_return() | |
793 | * | |
794 | * Return to user mode directly from within a system call. | |
795 | */ | |
796 | ||
0b4e3aa0 A |
797 | .align 5 |
798 | .globl EXT(thread_bootstrap_return) | |
799 | LEXT(thread_bootstrap_return) ; NOTE: THIS IS GOING AWAY IN A FEW DAYS.... | |
800 | ||
801 | .globl EXT(thread_exception_return) | |
802 | LEXT(thread_exception_return) ; Directly return to user mode | |
1c79356b A |
803 | |
804 | .L_thread_exc_ret_check_ast: | |
805 | ||
806 | mfmsr r3 /* Get the MSR */ | |
807 | rlwinm r3,r3,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Clear EE */ | |
808 | mtmsr r3 /* Disable interrupts */ | |
809 | ||
810 | /* Check to see if there's an outstanding AST */ | |
811 | /* We don't bother establishing a call frame even though CHECK_AST | |
812 | can invoke ast_taken(), because it can just borrow our caller's | |
813 | frame, given that we're not going to return. | |
814 | */ | |
815 | ||
816 | mfsprg r10,0 /* Get the per_processor block */ | |
817 | lwz r4,PP_NEED_AST(r10) | |
818 | lwz r4,0(r4) | |
819 | cmpi cr0,r4, 0 | |
820 | beq cr0,.L_exc_ret_no_ast | |
821 | ||
822 | /* Yes there is, call ast_taken | |
823 | * pretending that the user thread took an AST exception here, | |
824 | * ast_taken will save all state and bring us back here | |
825 | */ | |
826 | ||
827 | ||
0b4e3aa0 A |
828 | li r3,AST_ALL |
829 | li r4,1 | |
1c79356b A |
830 | |
831 | bl EXT(ast_taken) | |
832 | b .L_thread_exc_ret_check_ast /* check for a second AST (rare)*/ | |
833 | ||
834 | /* arriving here, interrupts should be disabled */ | |
835 | /* Get the active thread's PCB pointer to restore regs | |
836 | */ | |
837 | .L_exc_ret_no_ast: | |
838 | ||
839 | lwz r31,PP_CPU_DATA(r10) | |
840 | lwz r31,CPU_ACTIVE_THREAD(r31) | |
841 | lwz r30,THREAD_TOP_ACT(r31) | |
842 | lwz r30,ACT_MACT_PCB(r30) | |
843 | mr. r30,r30 ; Is there any context yet? | |
844 | beq- makeDummyCtx ; No, hack one up... | |
845 | #if DEBUG | |
846 | /* | |
847 | * debug assert - make sure that we're not returning to kernel | |
848 | * get the active thread's PCB pointer and thus pointer to user state | |
849 | */ | |
850 | ||
851 | lwz r3,savesrr1(r30) | |
852 | andi. r3,r3,MASK(MSR_PR) | |
853 | bne+ ret_user2 ; We are ok... | |
854 | ||
0b4e3aa0 A |
855 | lis r0,hi16(Choke) ; Choke code |
856 | ori r0,r0,lo16(Choke) ; and the rest | |
857 | li r3,failContext ; Bad state code | |
858 | sc ; System ABEND | |
859 | ||
1c79356b A |
860 | ret_user2: |
861 | #endif /* DEBUG */ | |
862 | ||
863 | /* If the MSR_SYSCALL_MASK isn't set, then we came from a trap, | |
864 | * so warp into the return_from_trap (thread_return) routine, | |
865 | * which takes PCB pointer in R3, not in r30! | |
866 | */ | |
867 | lwz r0,SAVflags(r30) | |
868 | mr r3,r30 /* Copy pcb pointer into r3 in case */ | |
869 | andis. r0,r0,SAVsyscall>>16 /* Are we returning from a syscall? */ | |
870 | beq- cr0,thread_return /* Nope, must be a thread return... */ | |
871 | b .L_thread_syscall_return | |
872 | ||
873 | ; | |
874 | ; This is where we handle someone trying who did a thread_create followed | |
875 | ; by a thread_resume with no intervening thread_set_state. Just make an | |
876 | ; empty context, initialize it to trash and let em execute at 0... | |
877 | ||
878 | makeDummyCtx: | |
879 | bl EXT(save_get) ; Get a save_area | |
880 | li r0,0 ; Get a 0 | |
881 | addi r2,r3,savefp0 ; Point past what we are clearing | |
882 | mr r4,r3 ; Save the start | |
883 | ||
884 | cleardummy: stw r0,0(r4) ; Clear stuff | |
885 | addi r4,r4,4 ; Next word | |
886 | cmplw r4,r2 ; Still some more? | |
887 | blt+ cleardummy ; Yeah... | |
888 | ||
889 | lis r2,hi16(MSR_EXPORT_MASK_SET) ; Set the high part of the user MSR | |
890 | ori r2,r2,lo16(MSR_EXPORT_MASK_SET) ; And the low part | |
891 | stw r2,savesrr1(r3) ; Set the default user MSR | |
892 | ||
893 | b thread_return ; Go let em try to execute, hah! | |
894 | ||
895 | /* | |
896 | * ihandler(type) | |
897 | * | |
898 | * ENTRY: VM switched ON | |
899 | * Interrupts OFF | |
900 | * R3 contains exception code | |
901 | * R4 points to the saved context (virtual address) | |
902 | * Everything is saved in savearea | |
903 | * | |
904 | */ | |
905 | ||
0b4e3aa0 A |
906 | .align 5 |
907 | .globl EXT(ihandler) | |
908 | LEXT(ihandler) /* Interrupt handler */ | |
1c79356b A |
909 | |
910 | /* | |
911 | * get the value of istackptr, if it's zero then we're already on the | |
912 | * interrupt stack, otherwise it points to a saved_state structure | |
913 | * at the top of the interrupt stack. | |
914 | */ | |
915 | ||
916 | lwz r10,savesrr1(r4) /* Get SRR1 */ | |
917 | mfsprg r25,0 /* Get the per_proc block */ | |
918 | li r14,0 /* Zero this for now */ | |
919 | rlwinm. r13,r10,0,MSR_VEC_BIT,MSR_VEC_BIT ; Was vector on? | |
920 | lwz r16,PP_CPU_DATA(r25) /* Assume we need this */ | |
921 | crmove cr1_eq,cr0_eq ; Remember vector enablement | |
922 | lwz r1,PP_ISTACKPTR(r25) /* Get the interrupt stack */ | |
923 | rlwinm. r10,r10,0,MSR_FP_BIT,MSR_FP_BIT ; Was floating point on? | |
924 | li r13,0 /* Zero this for now */ | |
925 | lwz r16,CPU_ACTIVE_THREAD(r16) /* Get the thread pointer */ | |
926 | ||
927 | beq+ cr1,ivecoff ; Vector off, do not save vrsave... | |
928 | lwz r7,savevrsave(r4) ; Get the VRSAVE register | |
929 | stw r7,liveVRS(r25) ; Set the live value | |
930 | ||
931 | ivecoff: li r0,0 /* Get a constant 0 */ | |
932 | cmplwi cr1,r16,0 /* Are we still booting? */ | |
933 | beq+ ifpoff ; Skip if floating point is off... | |
934 | lwz r9,savexfpscr(r4) ; Grab the just saved FPSCR | |
935 | stw r9,liveFPSCR(r25) ; Make it the live copy | |
936 | ||
937 | ifpoff: mr. r1,r1 /* Is it active? */ | |
938 | beq- cr1,ihboot1 /* We're still coming up... */ | |
939 | lwz r13,THREAD_TOP_ACT(r16) /* Pick up the active thread */ | |
940 | lwz r14,ACT_MACT_PCB(r13) /* Now point to the PCB */ | |
941 | ||
942 | ihboot1: lwz r9,saver1(r4) /* Pick up the 'rupt time stack */ | |
943 | stw r14,SAVprev(r4) /* Queue the new save area in the front */ | |
944 | stw r13,SAVact(r4) /* Point the savearea at its activation */ | |
945 | beq- cr1,ihboot4 /* We're still coming up... */ | |
946 | stw r4,ACT_MACT_PCB(r13) /* Point to our savearea */ | |
947 | ||
948 | ihboot4: bne .L_istackfree /* Nope... */ | |
949 | ||
950 | /* We're already on the interrupt stack, get back the old | |
951 | * stack pointer and make room for a frame | |
952 | */ | |
953 | ||
0b4e3aa0 A |
954 | lwz r10,PP_INTSTACK_TOP_SS(r25) ; Get the top of the interrupt stack |
955 | addi r5,r9,INTSTACK_SIZE-FM_SIZE ; Shift stack for bounds check | |
956 | subi r1,r9,FM_REDZONE ; Back up beyond the red zone | |
957 | sub r5,r5,r10 ; Get displacement into stack | |
958 | cmplwi r5,INTSTACK_SIZE-FM_SIZE ; Is the stack actually invalid? | |
959 | blt+ ihsetback ; The stack is ok... | |
960 | ||
961 | lwz r5,PP_DEBSTACK_TOP_SS(r25) ; Pick up debug stack top | |
962 | subi r5,r5,KERNEL_STACK_SIZE-FM_SIZE ; Adjust to start of stack | |
963 | sub r5,r1,r5 ; Get displacement into debug stack | |
964 | cmplwi cr2,r5,KERNEL_STACK_SIZE-FM_SIZE ; Check if we are on debug stack | |
965 | blt+ ihsetback ; Yeah, that is ok too... | |
966 | ||
967 | lis r0,hi16(Choke) ; Choke code | |
968 | ori r0,r0,lo16(Choke) ; and the rest | |
969 | li r3,failStack ; Bad stack code | |
970 | sc ; System ABEND | |
971 | ||
972 | .align 5 | |
973 | ||
1c79356b A |
974 | .L_istackfree: |
975 | lwz r10,SAVflags(r4) | |
976 | stw r0,PP_ISTACKPTR(r25) /* Mark the stack in use */ | |
977 | oris r10,r10,HIGH_ADDR(SAVrststk) /* Indicate we reset stack when we return from this one */ | |
978 | stw r10,SAVflags(r4) /* Stick it back */ | |
979 | ||
980 | /* | |
0b4e3aa0 | 981 | * To summarize, when we reach here, the state has been saved and |
1c79356b A |
982 | * the stack is marked as busy. We now generate a small |
983 | * stack frame with backpointers to follow the calling | |
984 | * conventions. We set up the backpointers to the trapped | |
985 | * routine allowing us to backtrace. | |
986 | */ | |
987 | ||
988 | ihsetback: subi r1,r1,FM_SIZE /* Make a new frame */ | |
989 | stw r9,FM_BACKPTR(r1) /* point back to previous stackptr */ | |
990 | ||
991 | #if VERIFYSAVE | |
992 | bl versave ; (TEST/DEBUG) | |
993 | #endif | |
994 | ||
995 | #if DEBUG | |
996 | /* If debugging, we need two frames, the first being a dummy | |
997 | * which links back to the trapped routine. The second is | |
998 | * that which the C routine below will need | |
999 | */ | |
1000 | lwz r5,savesrr0(r4) /* Get interrupt address */ | |
1001 | stw r5,FM_LR_SAVE(r1) /* save old instr ptr as LR value */ | |
1002 | stwu r1,-FM_SIZE(r1) /* Make another new frame for C routine */ | |
1003 | #endif /* DEBUG */ | |
1004 | ||
1005 | lwz r5,savedsisr(r4) /* Get the DSISR */ | |
1006 | lwz r6,savedar(r4) /* Get the DAR */ | |
1007 | ||
1008 | bl EXT(interrupt) | |
1009 | ||
1010 | ||
1011 | /* interrupt() returns a pointer to the saved state in r3 | |
1012 | * | |
1013 | * Ok, back from C. Disable interrupts while we restore things | |
1014 | */ | |
1015 | .globl EXT(ihandler_ret) | |
1016 | ||
1017 | LEXT(ihandler_ret) /* Marks our return point from debugger entry */ | |
1018 | ||
1019 | mfmsr r0 /* Get our MSR */ | |
1020 | rlwinm r0,r0,0,MSR_EE_BIT+1,MSR_EE_BIT-1 /* Flip off the interrupt enabled bit */ | |
1021 | mtmsr r0 /* Make sure interrupts are disabled */ | |
1022 | mfsprg r10,0 /* Get the per_proc block */ | |
1023 | ||
1024 | lwz r8,PP_CPU_DATA(r10) /* Get the CPU data area */ | |
1025 | lwz r7,SAVflags(r3) /* Pick up the flags */ | |
1026 | lwz r8,CPU_ACTIVE_THREAD(r8) /* and the active thread */ | |
1027 | lwz r9,SAVprev(r3) /* Get previous save area */ | |
1028 | cmplwi cr1,r8,0 /* Are we still initializing? */ | |
1029 | lwz r12,savesrr1(r3) /* Get the MSR we will load on return */ | |
1030 | beq- cr1,ihboot2 /* Skip if we are still in init... */ | |
1031 | lwz r8,THREAD_TOP_ACT(r8) /* Pick up the active thread */ | |
1032 | ||
1033 | ihboot2: andis. r11,r7,HIGH_ADDR(SAVrststk) /* Is this the first on the stack? */ | |
1034 | beq- cr1,ihboot3 /* Skip if we are still in init... */ | |
1035 | stw r9,ACT_MACT_PCB(r8) /* Point to previous context savearea */ | |
1036 | ||
1037 | ihboot3: mr r4,r3 /* Move the savearea pointer */ | |
1038 | beq .L_no_int_ast2 /* Get going if not the top o' stack... */ | |
1039 | ||
1040 | ||
1041 | /* We're the last frame on the stack. Restore istackptr to empty state. | |
1042 | * | |
1043 | * Check for ASTs if one of the below is true: | |
1044 | * returning to user mode | |
1045 | * returning to a kloaded server | |
1046 | */ | |
1047 | lwz r9,PP_INTSTACK_TOP_SS(r10) /* Get the empty stack value */ | |
1048 | lwz r5,PP_CPU_DATA(r10) /* Get cpu_data ptr */ | |
1049 | andc r7,r7,r11 /* Remove the stack reset bit in case we pass this one */ | |
1050 | stw r9,PP_ISTACKPTR(r10) /* Save that saved state ptr */ | |
1051 | lwz r3,CPU_PREEMPTION_LEVEL(r5) /* Get preemption level */ | |
1052 | stw r7,SAVflags(r4) /* Save the flags */ | |
1053 | cmplwi r3, 0 /* Check for preemption */ | |
1054 | bne .L_no_int_ast /* Don't preempt if level is not zero */ | |
1055 | andi. r6,r12,MASK(MSR_PR) /* privilege mode */ | |
1056 | lwz r11,PP_NEED_AST(r10) /* Get the AST request address */ | |
1057 | lwz r11,0(r11) /* Get the request */ | |
1058 | beq- .L_kernel_int_ast /* In kernel space, AST_URGENT check */ | |
1059 | li r3,T_AST /* Assume the worst */ | |
1060 | mr. r11,r11 /* Are there any pending? */ | |
1061 | beq .L_no_int_ast /* Nope... */ | |
1062 | b .L_call_thandler | |
1063 | ||
1064 | .L_kernel_int_ast: | |
1065 | andi. r11,r11,AST_URGENT /* AST_URGENT */ | |
1066 | li r3,T_PREEMPT /* Assume the worst */ | |
1067 | beq .L_no_int_ast /* Nope... */ | |
1068 | ||
1069 | .L_call_thandler: | |
1070 | ||
1071 | /* | |
1072 | * There is a pending AST. Massage things to make it look like | |
1073 | * we took a trap and jump into the trap handler. To do this | |
1074 | * we essentially pretend to return from the interrupt but | |
1075 | * at the last minute jump into the trap handler with an AST | |
1076 | * trap instead of performing an rfi. | |
1077 | */ | |
1078 | ||
1079 | stw r3,saveexception(r4) /* Set the exception code to T_AST/T_PREEMPT */ | |
1080 | b EXT(thandler) /* hyperspace into AST trap */ | |
1081 | ||
1082 | .L_no_int_ast: | |
1083 | mr r3,r4 ; Get into the right register for common code | |
1084 | .L_no_int_ast2: | |
1085 | rlwinm r7,r7,0,15,13 /* Clear the syscall bit */ | |
1086 | li r4,0 ; Assume for a moment that we are in init | |
1087 | stw r7,SAVflags(r3) /* Set the flags */ | |
1088 | beq- cr1,chkfac ; Jump away if we are in init... | |
1089 | lwz r4,ACT_MACT_PCB(r8) ; Get the new level marker | |
1090 | ||
1091 | ||
1092 | ; | |
1093 | ; This section is common to all exception exits. It throws away vector | |
1094 | ; and floating point saveareas as the exception level of a thread is | |
1095 | ; exited. | |
1096 | ; | |
1097 | ; It also enables the facility if its context is live | |
1098 | ; Requires: | |
1099 | ; R3 = Savearea to be released (virtual) | |
1100 | ; R4 = New top of savearea stack (could be 0) | |
1101 | ; R8 = pointer to activation | |
1102 | ; R10 = per_proc block | |
1103 | ; | |
1104 | chkfac: mr. r8,r8 ; Are we still in boot? | |
1105 | beq- chkenax ; Yeah, skip it all... | |
1106 | ||
1107 | lwz r20,ACT_MACT_FPUlvl(r8) ; Get the FPU level | |
1108 | lwz r12,savesrr1(r3) ; Get the current MSR | |
0b4e3aa0 | 1109 | cmplw cr1,r20,r3 ; Are we returning from the active level? |
1c79356b A |
1110 | lwz r23,PP_FPU_THREAD(r10) ; Get floating point owner |
1111 | rlwinm r12,r12,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Turn off floating point for now | |
0b4e3aa0 | 1112 | cmplw cr2,r23,r8 ; Are we the facility owner? |
1c79356b | 1113 | lhz r26,PP_CPU_NUMBER(r10) ; Get the current CPU number |
0b4e3aa0 A |
1114 | cror cr0_eq,cr1_eq,cr2_eq ; Check if returning from active or we own facility |
1115 | bne- cr0,chkvecnr ; Nothing to do if not returning from active or not us... | |
1c79356b A |
1116 | |
1117 | #if FPVECDBG | |
1118 | lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) | |
1119 | li r2,0x3301 ; (TEST/DEBUG) | |
1120 | oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) | |
1121 | sc ; (TEST/DEBUG) | |
1122 | #endif | |
1123 | ||
0b4e3aa0 A |
1124 | li r22,ACT_MACT_FPUcpu ; Point to the CPU indication/lock word |
1125 | ||
1126 | cfSpin2: lwarx r27,r22,r8 ; Get and reserve the last used CPU | |
1127 | mr. r27,r27 ; Is it changing now? | |
1128 | oris r0,r27,hi16(fvChk) ; Set the "changing" flag | |
1129 | blt- cfSpin2 ; Spin if changing | |
1130 | stwcx. r0,r22,r8 ; Lock it up | |
1131 | bne- cfSpin2 ; Someone is messing right now | |
1132 | ||
1133 | isync ; Make sure we see everything | |
1134 | ||
1c79356b | 1135 | cmplw r4,r20 ; Are we going to be in the right level? |
0b4e3aa0 A |
1136 | beq- cr1,chkfpfree ; Leaving active level, can not possibly enable... |
1137 | cmplw cr1,r27,r26 ; Are we on the right CPU? | |
1c79356b A |
1138 | li r0,0 ; Get a constant 0 |
1139 | beq+ cr1,chkfpnlvl ; Right CPU... | |
1140 | ||
1141 | stw r0,PP_FPU_THREAD(r10) ; Show facility unowned so we do not get back here | |
1142 | b chkvec ; Go check out the vector facility... | |
1143 | ||
1144 | chkfpnlvl: bne- chkvec ; Different level, can not enable... | |
1145 | lwz r24,ACT_MACT_FPU(r8) ; Get the floating point save area | |
1146 | ori r12,r12,lo16(MASK(MSR_FP)) ; Enable facility | |
1147 | mr. r24,r24 ; Does the savearea exist? | |
1148 | li r0,1 ; Get set to invalidate | |
1149 | beq- chkvec ; Nothing to invalidate... | |
1150 | lwz r25,SAVlvlfp(r24) ; Get the level of top savearea | |
1151 | cmplw r4,r25 ; Is the top one ours? | |
1152 | bne+ chkvec ; Not ours... | |
1153 | stw r0,SAVlvlfp(r24) ; Invalidate the first one | |
1c79356b A |
1154 | b chkvec ; Go check out the vector facility... |
1155 | ||
1156 | chkfpfree: li r0,0 ; Clear a register | |
1157 | lwz r24,ACT_MACT_FPU(r8) ; Get the floating point save area | |
1158 | ||
0b4e3aa0 | 1159 | bne- cr2,chkfpnfr ; Not our facility, do not clear... |
1c79356b A |
1160 | stw r0,PP_FPU_THREAD(r10) ; Clear floating point owner |
1161 | chkfpnfr: | |
1162 | ||
1163 | #if FPVECDBG | |
1164 | lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) | |
1165 | li r2,0x3302 ; (TEST/DEBUG) | |
1166 | oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) | |
1167 | sc ; (TEST/DEBUG) | |
1168 | #endif | |
1169 | ||
1170 | mr. r24,r24 ; Do we even have a savearea? | |
1171 | beq+ chkvec ; Nope... | |
1172 | ||
1173 | #if FPVECDBG | |
1174 | rlwinm. r0,r24,0,0,15 ; (TEST/DEBUG) | |
1175 | bne+ notbadxxx1 ; (TEST/DEBUG) | |
1176 | BREAKPOINT_TRAP ; (TEST/DEBUG) | |
1177 | notbadxxx1: ; (TEST/DEBUG) | |
1178 | lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) | |
1179 | li r2,0x3303 ; (TEST/DEBUG) | |
1180 | oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) | |
1181 | sc ; (TEST/DEBUG) | |
1182 | #endif | |
1183 | ||
1184 | lwz r25,SAVlvlfp(r24) ; Get the level of top savearea | |
1185 | cmplwi r25,1 ; Is the top area invalid? | |
1186 | cmplw cr1,r25,r3 ; Is it for the returned from context? | |
1187 | beq fptoss ; It is invalid... | |
1188 | bne cr1,chkvec ; Not for the returned context... | |
1189 | ||
1190 | fptoss: lwz r25,SAVprefp(r24) ; Get previous savearea | |
1191 | #if FPVECDBG | |
1192 | lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) | |
1193 | li r2,0x3304 ; (TEST/DEBUG) | |
1194 | oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) | |
1195 | mr r5,r25 ; (TEST/DEBUG) | |
1196 | sc ; (TEST/DEBUG) | |
1197 | #endif | |
1198 | mr. r25,r25 ; Is there one? | |
1199 | stw r25,ACT_MACT_FPU(r8) ; Set the new pointer | |
1200 | beq fptoplvl ; Nope, we are at the top... | |
1201 | #if FPVECDBG | |
1202 | rlwinm. r0,r25,0,0,15 ; (TEST/DEBUG) | |
1203 | bne+ notbadxxx2 ; (TEST/DEBUG) | |
1204 | BREAKPOINT_TRAP ; (TEST/DEBUG) | |
1205 | notbadxxx2: ; (TEST/DEBUG) | |
1206 | #endif | |
1207 | lwz r25,SAVlvlfp(r25) ; Get the new level | |
1208 | ||
1209 | fptoplvl: lwz r19,SAVflags(r24) ; Get the savearea flags | |
1210 | #if FPVECDBG | |
1211 | rlwinm. r0,r19,0,1,1 ; (TEST/DEBUG) | |
1212 | bne+ donotdie3 ; (TEST/DEBUG) | |
1213 | BREAKPOINT_TRAP ; (TEST/DEBUG) | |
1214 | donotdie3: ; (TEST/DEBUG) | |
1215 | #endif | |
1216 | ||
1217 | #if FPVECDBG | |
1218 | lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) | |
1219 | li r2,0x3305 ; (TEST/DEBUG) | |
1220 | oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) | |
1221 | sc ; (TEST/DEBUG) | |
1222 | #endif | |
1223 | rlwinm r22,r24,0,0,19 ; Round down to the base savearea block | |
1224 | rlwinm r19,r19,0,2,0 ; Remove the floating point in use flag | |
1225 | stw r25,ACT_MACT_FPUlvl(r8) ; Set the new top level | |
1226 | andis. r0,r19,hi16(SAVinuse) ; Still in use? | |
1227 | stw r19,SAVflags(r24) ; Set the savearea flags | |
0b4e3aa0 | 1228 | bne- invlivefp ; Go invalidate live FP |
1c79356b A |
1229 | #if FPVECDBG |
1230 | lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) | |
1231 | li r2,0x3306 ; (TEST/DEBUG) | |
1232 | oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) | |
1233 | sc ; (TEST/DEBUG) | |
1234 | #endif | |
1235 | #if FPVECDBG | |
1236 | rlwinm. r0,r24,0,0,15 ; (TEST/DEBUG) | |
1237 | bne+ notbadxxx3 ; (TEST/DEBUG) | |
1238 | BREAKPOINT_TRAP ; (TEST/DEBUG) | |
1239 | notbadxxx3: ; (TEST/DEBUG) | |
1240 | #endif | |
1241 | lwz r23,SACvrswap(r22) ; Get the conversion from virtual to real | |
1242 | lwz r20,PP_QUICKFRET(r10) ; Get the old quick fret head | |
1243 | xor r23,r24,r23 ; Convert to physical | |
1244 | stw r20,SAVqfret(r24) ; Back chain the quick release queue | |
1245 | stw r23,PP_QUICKFRET(r10) ; Anchor it | |
1246 | ||
0b4e3aa0 A |
1247 | invlivefp: lis r20,hi16(EXT(real_ncpus)) ; Get number of CPUs |
1248 | lis r23,hi16(EXT(per_proc_info)) ; Set base per_proc | |
1249 | ori r20,r20,lo16(EXT(real_ncpus)) ; Other half of number of CPUs | |
1250 | li r25,PP_FPU_THREAD ; Point to the FP owner address | |
1251 | lwz r20,0(r20) ; Get number of processors active | |
1252 | ori r23,r23,lo16(EXT(per_proc_info)) ; Set base per_proc | |
1253 | li r2,0 ; Get something clear | |
1254 | ||
1255 | invlivefl: cmplw r23,r10 ; We can skip our processor | |
1256 | addi r20,r20,-1 ; Count remaining processors | |
1257 | beq invlivefn ; Skip ourselves... | |
1258 | ||
1259 | invlivefa: lwarx r0,r25,r23 ; Get FP owner for this processor | |
1260 | cmplw r0,r8 ; Do we own it? | |
1261 | bne invlivefn ; Nope... | |
1262 | stwcx. r2,r25,r23 ; Show not live | |
1263 | bne- invlivefa ; Someone else did this, try again... | |
1264 | ||
1265 | invlivefn: mr. r20,r20 ; Have we finished? | |
1266 | addi r23,r23,ppSize ; Bump to next | |
1267 | bgt invlivefl ; Make sure we do all processors... | |
1268 | ||
1269 | ||
1c79356b A |
1270 | ; |
1271 | ; Check out vector stuff (and translate savearea to physical for exit) | |
1272 | ; | |
0b4e3aa0 A |
1273 | chkvec: sync ; Make sure all is saved |
1274 | stw r27,ACT_MACT_FPUcpu(r8) ; Set the active CPU and release | |
1275 | ||
1276 | chkvecnr: lwz r20,ACT_MACT_VMXlvl(r8) ; Get the vector level | |
1c79356b | 1277 | lwz r23,PP_VMX_THREAD(r10) ; Get vector owner |
0b4e3aa0 A |
1278 | cmplw cr1,r20,r3 ; Are we returning from the active level? |
1279 | cmplw cr2,r23,r8 ; Are we the facility owner? | |
1c79356b | 1280 | rlwinm r12,r12,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Turn off vector for now |
0b4e3aa0 A |
1281 | cror cr0_eq,cr1_eq,cr2_eq ; Check if returning from active or we own facility |
1282 | bne- cr0,setenanr ; Not our facility, nothing to do here... | |
1c79356b A |
1283 | |
1284 | #if FPVECDBG | |
1285 | lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) | |
1286 | li r2,0x3401 ; (TEST/DEBUG) | |
1287 | oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) | |
1288 | sc ; (TEST/DEBUG) | |
1289 | #endif | |
1290 | ||
0b4e3aa0 A |
1291 | li r22,ACT_MACT_VMXcpu ; Point to the CPU indication/lock word |
1292 | ||
1293 | cvSpin2: lwarx r27,r22,r8 ; Get and reserve the last used CPU | |
1294 | mr. r27,r27 ; Is it changing now? | |
1295 | oris r0,r27,hi16(fvChk) ; Set the "changing" flag | |
1296 | blt- cvSpin2 ; Spin if changing | |
1297 | stwcx. r0,r22,r8 ; Lock it up | |
1298 | bne- cvSpin2 ; Someone is messing right now | |
1299 | ||
1300 | isync ; Make sure we see everything | |
1301 | ||
1c79356b | 1302 | cmplw r4,r20 ; Are we going to be in the right level? |
0b4e3aa0 A |
1303 | beq- cr1,chkvecfree ; Leaving active level, can not possibly enable... |
1304 | cmplw cr1,r27,r26 ; Are we on the right CPU? | |
1c79356b A |
1305 | li r0,0 ; Get a constant 0 |
1306 | beq+ cr1,chkvecnlvl ; Right CPU... | |
1307 | ||
1308 | stw r0,PP_VMX_THREAD(r10) ; Show facility unowned so we do not get back here | |
1309 | b setena ; Go actually exit... | |
1310 | ||
1311 | chkvecnlvl: bne- setena ; Different level, can not enable... | |
1312 | lwz r24,ACT_MACT_VMX(r8) ; Get the vector save area | |
1313 | oris r12,r12,hi16(MASK(MSR_VEC)) ; Enable facility | |
1314 | mr. r24,r24 ; Does the savearea exist? | |
1315 | li r0,1 ; Get set to invalidate | |
1316 | beq- setena ; Nothing to invalidate... | |
1317 | lwz r25,SAVlvlvec(r24) ; Get the level of top savearea | |
1318 | cmplw r4,r25 ; Is the top one ours? | |
1319 | bne+ setena ; Not ours... | |
1320 | stw r0,SAVlvlvec(r24) ; Invalidate the first one | |
1321 | b setena ; Actually exit... | |
1322 | ||
1323 | chkvecfree: li r0,0 ; Clear a register | |
1324 | lwz r24,ACT_MACT_VMX(r8) ; Get the vector save area | |
0b4e3aa0 A |
1325 | |
1326 | bne- cr2,chkvecnfr ; Not our facility, do not clear... | |
1c79356b A |
1327 | stw r0,PP_VMX_THREAD(r10) ; Clear vector owner |
1328 | chkvecnfr: | |
1329 | ||
1330 | #if FPVECDBG | |
1331 | lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) | |
1332 | li r2,0x3402 ; (TEST/DEBUG) | |
1333 | oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) | |
1334 | sc ; (TEST/DEBUG) | |
1335 | #endif | |
1336 | ||
1337 | mr. r24,r24 ; Do we even have a savearea? | |
1338 | beq+ setena ; Nope... | |
1339 | ||
1340 | #if FPVECDBG | |
1341 | lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) | |
1342 | li r2,0x3403 ; (TEST/DEBUG) | |
1343 | oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) | |
1344 | sc ; (TEST/DEBUG) | |
1345 | #endif | |
1346 | lwz r25,SAVlvlvec(r24) ; Get the level | |
1347 | cmplwi r25,1 ; Is the top area invalid? | |
1348 | cmplw cr1,r25,r3 ; Is it for the returned from context? | |
1349 | beq vectoss ; It is invalid... | |
1350 | bne cr1,setena ; Not for the returned context... | |
1351 | ||
1352 | vectoss: lwz r25,SAVprevec(r24) ; Get previous savearea | |
1353 | #if FPVECDBG | |
1354 | lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) | |
1355 | li r2,0x3504 ; (TEST/DEBUG) | |
1356 | oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) | |
1357 | mr r5,r25 ; (TEST/DEBUG) | |
1358 | sc ; (TEST/DEBUG) | |
1359 | #endif | |
1360 | mr. r25,r25 ; Is there one? | |
1361 | stw r25,ACT_MACT_VMX(r8) ; Set the new pointer | |
1362 | beq vectoplvl ; Nope, we are at the top... | |
1363 | lwz r25,SAVlvlvec(r25) ; Get the new level | |
1364 | ||
1365 | vectoplvl: lwz r19,SAVflags(r24) ; Get the savearea flags | |
1366 | ||
1367 | #if FPVECDBG | |
1368 | lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) | |
1369 | li r2,0x3405 ; (TEST/DEBUG) | |
1370 | oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) | |
1371 | sc ; (TEST/DEBUG) | |
1372 | #endif | |
1373 | rlwinm r22,r24,0,0,19 ; Round down to the base savearea block | |
1374 | rlwinm r19,r19,0,3,1 ; Remove the vector in use flag | |
1375 | stw r25,ACT_MACT_VMXlvl(r8) ; Set the new top level | |
1376 | andis. r0,r19,hi16(SAVinuse) ; Still in use? | |
1377 | stw r19,SAVflags(r24) ; Set the savearea flags | |
0b4e3aa0 | 1378 | bne- invliveve ; Go invalidate live vec... |
1c79356b A |
1379 | #if FPVECDBG |
1380 | lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG) | |
1381 | li r2,0x3406 ; (TEST/DEBUG) | |
1382 | oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG) | |
1383 | sc ; (TEST/DEBUG) | |
1384 | #endif | |
1385 | lwz r23,SACvrswap(r22) ; Get the conversion from virtual to real | |
1386 | lwz r20,PP_QUICKFRET(r10) ; Get the old quick fret head | |
1387 | xor r23,r24,r23 ; Convert to physical | |
1388 | stw r20,SAVqfret(r24) ; Back chain the quick release queue | |
1389 | stw r23,PP_QUICKFRET(r10) ; Anchor it | |
1390 | ||
0b4e3aa0 A |
1391 | invliveve: lis r20,hi16(EXT(real_ncpus)) ; Get number of CPUs |
1392 | lis r23,hi16(EXT(per_proc_info)) ; Set base per_proc | |
1393 | ori r20,r20,lo16(EXT(real_ncpus)) ; Other half of number of CPUs | |
1394 | li r25,PP_VMX_THREAD ; Point to the vector owner address | |
1395 | lwz r20,0(r20) ; Get number of processors active | |
1396 | ori r23,r23,lo16(EXT(per_proc_info)) ; Set base per_proc | |
1397 | li r2,0 ; Get something clear | |
1398 | ||
1399 | invlivevl: cmplw r23,r10 ; We can skip our processor | |
1400 | addi r20,r20,-1 ; Count remaining processors | |
1401 | beq invlivevn ; Skip ourselves... | |
1402 | ||
1403 | invliveva: lwarx r0,r25,r23 ; Get vector owner for this processor | |
1404 | cmplw r0,r8 ; Do we own it? | |
1405 | bne invlivevn ; Nope... | |
1406 | stwcx. r2,r25,r23 ; Show not live | |
1407 | bne- invliveva ; Someone else did this, try again... | |
1408 | ||
1409 | invlivevn: mr. r20,r20 ; Have we finished? | |
1410 | addi r23,r23,ppSize ; Bump to next | |
1411 | bgt invlivevl ; Make sure we do all processors... | |
1412 | ||
1413 | setena: sync ; Make sure all is saved | |
1414 | stw r27,ACT_MACT_VMXcpu(r8) ; Set the active CPU and release | |
1415 | ||
1416 | setenanr: rlwinm r20,r12,(((31-vectorCngbit)+(MSR_VEC_BIT+1))&31),vectorCngbit,vectorCngbit ; Set flag if we enabled vector | |
1c79356b A |
1417 | rlwimi. r20,r12,(((31-floatCngbit)+(MSR_FP_BIT+1))&31),floatCngbit,floatCngbit ; Set flag if we enabled floats |
1418 | beq setenaa ; Neither float nor vector turned on.... | |
1419 | ||
1420 | lwz r5,ACT_MACT_SPF(r8) ; Get activation copy | |
1421 | lwz r6,spcFlags(r10) ; Get per_proc copy | |
1422 | or r5,r5,r20 ; Set vector/float changed bits in activation | |
1423 | or r6,r6,r20 ; Set vector/float changed bits in per_proc | |
1424 | stw r5,ACT_MACT_SPF(r8) ; Set activation copy | |
1425 | stw r6,spcFlags(r10) ; Set per_proc copy | |
1426 | ||
1427 | setenaa: stw r12,savesrr1(r3) ; Turn facility on or off | |
1428 | ||
1429 | mfdec r24 ; Get decrementer | |
1430 | lwz r22,qactTimer(r8) ; Get high order quick activation timer | |
1431 | mr. r24,r24 ; See if it has popped already... | |
1432 | lwz r23,qactTimer+4(r8) ; Get low order qact timer | |
1433 | ble- chkenax ; We have popped or are just about to... | |
1434 | ||
1435 | segtb: mftbu r20 ; Get the upper time base | |
1436 | mftb r21 ; Get the low | |
1437 | mftbu r19 ; Get upper again | |
1438 | or. r0,r22,r23 ; Any time set? | |
1439 | cmplw cr1,r20,r19 ; Did they change? | |
1440 | beq+ chkenax ; No time set.... | |
1441 | bne- cr1,segtb ; Timebase ticked, get them again... | |
1442 | ||
1443 | subfc r6,r21,r23 ; Subtract current from qact time | |
1444 | li r0,0 ; Make a 0 | |
1445 | subfe r5,r20,r22 ; Finish subtract | |
1446 | subfze r0,r0 ; Get a 0 if qact was bigger than current, -1 otherwise | |
1447 | andc. r12,r5,r0 ; Set 0 if qact has passed | |
1448 | andc r13,r6,r0 ; Set 0 if qact has passed | |
1449 | bne chkenax ; If high order is non-zero, this is too big for a decrementer | |
1450 | cmplw r13,r24 ; Is this earlier than the decrementer? (logical compare takes care of high bit on) | |
1451 | bge+ chkenax ; No, do not reset decrementer... | |
1452 | ||
1453 | mtdec r13 ; Set our value | |
1454 | ||
1455 | chkenax: lwz r6,SAVflags(r3) ; Pick up the flags of the old savearea | |
1456 | ||
1457 | ||
1458 | #if DEBUG | |
1459 | lwz r20,SAVact(r3) ; (TEST/DEBUG) Make sure our restore | |
0b4e3aa0 A |
1460 | lwz r21,PP_CPU_DATA(r10) ; (TEST/DEBUG) context is associated |
1461 | lwz r21,CPU_ACTIVE_THREAD(r21) ; (TEST/DEBUG) with the current act. | |
1c79356b A |
1462 | cmpwi r21,0 ; (TEST/DEBUG) |
1463 | beq- yeswereok ; (TEST/DEBUG) | |
1464 | lwz r21,THREAD_TOP_ACT(r21) ; (TEST/DEBUG) | |
1465 | cmplw r21,r20 ; (TEST/DEBUG) | |
1466 | beq+ yeswereok ; (TEST/DEBUG) | |
0b4e3aa0 A |
1467 | |
1468 | lis r0,hi16(Choke) ; (TEST/DEBUG) Choke code | |
1469 | ori r0,r0,lo16(Choke) ; (TEST/DEBUG) and the rest | |
1470 | mr r21,r3 ; (TEST/DEBUG) Save the savearea address | |
1471 | li r3,failContext ; (TEST/DEBUG) Bad state code | |
1472 | sc ; (TEST/DEBUG) System ABEND | |
1c79356b A |
1473 | |
1474 | yeswereok: | |
1475 | #endif | |
1476 | ||
1477 | rlwinm r5,r3,0,0,19 ; Round savearea down to page bndry | |
1478 | rlwinm r6,r6,0,1,31 ; Mark savearea free | |
1479 | lwz r5,SACvrswap(r5) ; Get the conversion from virtual to real | |
1480 | stw r6,SAVflags(r3) ; Set savearea flags | |
1481 | xor r3,r3,r5 ; Flip to physical address | |
1482 | b EXT(exception_exit) ; We are all done now... | |
1483 | ||
1484 | ||
1485 | ||
1486 | /* | |
1487 | * Here's where we handle the fastpath stuff | |
1488 | * We'll do what we can here because registers are already | |
1489 | * loaded and it will be less confusing that moving them around. | |
1490 | * If we need to though, we'll branch off somewhere's else. | |
1491 | * | |
1492 | * Registers when we get here: | |
1493 | * | |
1494 | * r0 = syscall number | |
1495 | * r4 = savearea/pcb | |
1496 | * r13 = activation | |
1497 | * r14 = previous savearea (if any) | |
1498 | * r16 = thread | |
1499 | * r25 = per_proc | |
1500 | */ | |
1501 | ||
0b4e3aa0 A |
1502 | .align 5 |
1503 | ||
1c79356b A |
1504 | fastpath: cmplwi cr3,r0,0x7FF1 ; Is it CthreadSetSelfNumber? |
1505 | bnelr- cr3 ; Not a fast path... | |
1506 | ||
1507 | /* | |
1508 | * void cthread_set_self(cproc_t p) | |
1509 | * | |
1510 | * set's thread state "user_value" | |
1511 | * | |
1512 | * This op is invoked as follows: | |
1513 | * li r0, CthreadSetSelfNumber // load the fast-trap number | |
1514 | * sc // invoke fast-trap | |
1515 | * blr | |
1516 | * | |
1517 | */ | |
1518 | ||
1519 | CthreadSetSelfNumber: | |
1520 | ||
1521 | lwz r5,saver3(r4) /* Retrieve the self number */ | |
1522 | stw r5,CTHREAD_SELF(r13) /* Remember it */ | |
1523 | stw r5,UAW(r25) /* Prime the per_proc_info with it */ | |
1524 | ||
1525 | ||
1526 | .globl EXT(fastexit) | |
1527 | EXT(fastexit): | |
1528 | lwz r8,SAVflags(r4) /* Pick up the flags */ | |
1529 | rlwinm r9,r4,0,0,19 /* Round down to the base savearea block */ | |
1530 | rlwinm r8,r8,0,1,31 /* Clear the attached bit */ | |
1531 | lwz r9,SACvrswap(r9) /* Get the conversion from virtual to real */ | |
1532 | stw r8,SAVflags(r4) /* Set the flags */ | |
1533 | xor r3,r4,r9 /* Switch savearea to physical addressing */ | |
1534 | b EXT(exception_exit) /* Go back to the caller... */ | |
1535 | ||
1536 | ||
1537 | /* | |
1538 | * Here's where we check for a hit on the Blue Box Assist | |
1539 | * Most registers are non-volatile, so be careful here. If we don't | |
1540 | * recognize the trap instruction we go back for regular processing. | |
1541 | * Otherwise we transfer to the assist code. | |
1542 | */ | |
1543 | ||
0b4e3aa0 A |
1544 | .align 5 |
1545 | ||
1c79356b | 1546 | checkassist: |
0b4e3aa0 A |
1547 | lwz r0,saveexception(r4) ; Get the exception code |
1548 | lwz r23,savesrr1(r4) ; Get the interrupted MSR | |
1549 | lwz r26,ACT_MACT_BEDA(r13) ; Get Blue Box Descriptor Area | |
1550 | mtcrf 0x18,r23 ; Check what SRR1 says | |
1551 | lwz r24,ACT_MACT_BTS(r13) ; Get the table start | |
1552 | cmplwi r0,T_AST ; Check for T_AST trap | |
1553 | lwz r27,savesrr0(r4) ; Get trapped address | |
1554 | crnand cr1_eq,SRR1_PRG_TRAP_BIT,MSR_PR_BIT ; We need both trap and user state | |
1555 | sub r24,r27,r24 ; See how far into it we are | |
1556 | cror cr0_eq,cr0_eq,cr1_eq ; Need to bail if AST or not trap or not user state | |
1557 | cmplwi cr1,r24,BB_MAX_TRAP ; Do we fit in the list? | |
1558 | cror cr0_eq,cr0_eq,cr1_gt ; Also leave it trap not in range | |
1559 | btlr- cr0_eq ; No assist if AST or not trap or not user state or trap not in range | |
1560 | b EXT(atomic_switch_trap) ; Go to the assist... | |
1c79356b A |
1561 | |
1562 | ; | |
1563 | ; Virtual Machine Monitor | |
1564 | ; Here is where we exit from the emulated context | |
1565 | ; Note that most registers get trashed here | |
1566 | ; R3 and R30 are preserved across the call and hold the activation | |
1567 | ; and savearea respectivily. | |
1568 | ; | |
1569 | ||
0b4e3aa0 A |
1570 | .align 5 |
1571 | ||
1c79356b A |
1572 | exitFromVM: mr r30,r4 ; Get the savearea |
1573 | mr r3,r13 ; Get the activation | |
1574 | ||
1575 | b EXT(vmm_exit) ; Do it to it | |
1576 | ||
1577 | .align 5 | |
1578 | .globl EXT(retFromVM) | |
1579 | ||
1580 | LEXT(retFromVM) | |
1581 | mfsprg r10,0 ; Restore the per_proc info | |
1582 | mr r8,r3 ; Get the activation | |
1583 | lwz r4,SAVprev(r30) ; Pick up the previous savearea | |
1584 | mr r3,r30 ; Put savearea in proper register for common code | |
1585 | lwz r11,SAVflags(r30) ; Get the flags of the current savearea | |
1586 | rlwinm r11,r11,0,15,13 ; Clear the syscall flag | |
1587 | lwz r1,ACT_THREAD(r8) ; and the active thread | |
1588 | stw r11,SAVflags(r3) ; Save back the flags (with reset stack cleared) | |
1589 | ||
1590 | stw r4,ACT_MACT_PCB(r8) ; Point to the previous savearea (or 0 if none) | |
1591 | ||
1592 | lwz r5,THREAD_KERNEL_STACK(r1) ; Get the base pointer to the stack | |
1593 | addi r5,r5,KERNEL_STACK_SIZE-FM_SIZE ; Reset to empty | |
1594 | stw r5,ACT_MACT_KSP(r8) ; Save the empty stack pointer | |
1595 | b chkfac ; Go end it all... | |
1596 | ||
1597 | ||
0b4e3aa0 A |
1598 | ; |
1599 | ; chandler (note: not a candle maker or tallow merchant) | |
1600 | ; | |
1601 | ; Here is the system choke handler. This is where the system goes | |
1602 | ; to die. | |
1603 | ; | |
1604 | ; We get here as a result of a T_CHOKE exception which is generated | |
1605 | ; by the Choke firmware call or by lowmem_vectors when it detects a | |
1606 | ; fatal error. Examples of where this may be used is when we detect | |
1607 | ; problems in low-level mapping chains, trashed savearea free chains, | |
1608 | ; or stack guardpage violations. | |
1609 | ; | |
1610 | ; Note that we can not set a back chain in the stack when we come | |
1611 | ; here because we are probably here because the chain was corrupt. | |
1612 | ; | |
1c79356b | 1613 | |
0b4e3aa0 A |
1614 | |
1615 | .align 5 | |
1616 | .globl EXT(chandler) | |
1617 | LEXT(chandler) /* Choke handler */ | |
1618 | ||
1619 | lis r25,hi16(EXT(trcWork)) ; (TEST/DEBUG) | |
1620 | li r31,0 ; (TEST/DEBUG) | |
1621 | ori r25,r25,lo16(EXT(trcWork)) ; (TEST/DEBUG) | |
1622 | stw r31,traceMask(r25) ; (TEST/DEBUG) | |
1623 | ||
1624 | ||
1625 | mfsprg r25,0 ; Get the per_proc | |
1626 | ||
1627 | lwz r1,PP_DEBSTACKPTR(r25) ; Get debug stack pointer | |
1628 | cmpwi r1,-1 ; Are we already choking? | |
1629 | bne chokefirst ; Nope... | |
1c79356b | 1630 | |
0b4e3aa0 A |
1631 | chokespin: addi r31,r31,1 ; Spin and hope for an analyzer connection... |
1632 | addi r31,r31,1 ; Spin and hope for an analyzer connection... | |
1633 | addi r31,r31,1 ; Spin and hope for an analyzer connection... | |
1634 | addi r31,r31,1 ; Spin and hope for an analyzer connection... | |
1635 | addi r31,r31,1 ; Spin and hope for an analyzer connection... | |
1636 | addi r31,r31,1 ; Spin and hope for an analyzer connection... | |
1637 | b chokespin ; Spin and hope for an analyzer connection... | |
1c79356b | 1638 | |
0b4e3aa0 A |
1639 | chokefirst: li r0,-1 ; Set choke value |
1640 | mr. r1,r1 ; See if we are on debug stack yet | |
1641 | lwz r10,saver1(r4) ; | |
1642 | stw r0,PP_DEBSTACKPTR(r25) ; Show we are choking | |
1643 | bne chokestart ; We are not on the debug stack yet... | |
1c79356b | 1644 | |
0b4e3aa0 A |
1645 | lwz r2,PP_DEBSTACK_TOP_SS(r25) ; Get debug stack top |
1646 | sub r11,r2,r10 ; Get stack depth | |
1c79356b | 1647 | |
0b4e3aa0 A |
1648 | cmplwi r11,KERNEL_STACK_SIZE-FM_SIZE-TRAP_SPACE_NEEDED ; Check if stack pointer is ok |
1649 | bgt chokespin ; Bad stack pointer or too little left, just die... | |
1c79356b | 1650 | |
0b4e3aa0 A |
1651 | subi r1,r10,FM_REDZONE ; Make a red zone |
1652 | ||
1653 | chokestart: li r0,0 ; Get a zero | |
1654 | stw r0,FM_BACKPTR(r1) ; We now have terminated the back chain | |
1655 | ||
1656 | bl EXT(SysChoked) ; Call the "C" phase of this | |
1657 | b chokespin ; Should not be here so just go spin... | |
1658 | ||
1c79356b A |
1659 | |
1660 | #if VERIFYSAVE | |
1661 | ; | |
1662 | ; Savearea chain verification | |
1663 | ; | |
1664 | ||
1665 | versave: | |
1666 | ||
1667 | #if 0 | |
1668 | ; | |
1669 | ; Make sure that only the top FPU savearea is marked invalid | |
1670 | ; | |
1671 | ||
1672 | lis r28,hi16(EXT(default_pset)) ; (TEST/DEBUG) | |
1673 | lis r27,hi16(EXT(DebugWork)) ; (TEST/DEBUG) | |
1674 | ori r28,r28,lo16(EXT(default_pset)) ; (TEST/DEBUG) | |
1675 | ori r27,r27,lo16(EXT(DebugWork)) ; (TEST/DEBUG) | |
1676 | li r20,0 ; (TEST/DEBUG) | |
1677 | lwz r26,0(r27) ; (TEST/DEBUG) | |
1678 | lwz r27,psthreadcnt(r28) ; (TEST/DEBUG) | |
1679 | mr. r26,r26 ; (TEST/DEBUG) | |
1680 | lwz r28,psthreads(r28) ; (TEST/DEBUG) | |
1681 | bnelr- ; (TEST/DEBUG) | |
1682 | ||
1683 | fcknxtth: mr. r27,r27 ; (TEST/DEBUG) | |
1684 | beqlr- ; (TEST/DEBUG) | |
1685 | ||
1686 | lwz r26,THREAD_TOP_ACT(r28) ; (TEST/DEBUG) | |
1687 | ||
1688 | fckact: mr. r26,r26 ; (TEST/DEBUG) | |
1689 | bne+ fckact2 ; (TEST/DEBUG) | |
1690 | ||
1691 | lwz r28,THREAD_PSTHRN(r28) ; (TEST/DEBUG) Next in line | |
1692 | subi r27,r27,1 ; (TEST/DEBUG) | |
1693 | b fcknxtth ; (TEST/DEBUG) | |
1694 | ||
1695 | fckact2: lwz r20,ACT_MACT_FPU(r26) ; (TEST/DEBUG) Get FPU chain | |
1696 | mr. r20,r20 ; (TEST/DEBUG) Are there any? | |
1697 | beq+ fcknact ; (TEST/DEBUG) No... | |
1698 | ||
1699 | fckact3: lwz r20,SAVprefp(r20) ; (TEST/DEBUG) Get next in list | |
1700 | mr. r20,r20 ; (TEST/DEBUG) Check next savearea | |
1701 | beq+ fcknact ; (TEST/DEBUG) No... | |
1702 | ||
1703 | lwz r29,SAVlvlfp(r20) ; (TEST/DEBUG) Get the level | |
1704 | ||
1705 | cmplwi r29,1 ; (TEST/DEBUG) Is it invalid?? | |
1706 | bne+ fckact3 ; (TEST/DEBUG) Nope... | |
1707 | ||
1708 | lis r27,hi16(EXT(DebugWork)) ; (TEST/DEBUG) | |
1709 | ori r27,r27,lo16(EXT(DebugWork)) ; (TEST/DEBUG) | |
1710 | stw r27,0(r27) ; (TEST/DEBUG) | |
1711 | BREAKPOINT_TRAP ; (TEST/DEBUG) | |
1712 | ||
1713 | fcknact: lwz r26,ACT_LOWER(r26) ; (TEST/DEBUG) Next activation | |
1714 | b fckact ; (TEST/DEBUG) | |
1715 | #endif | |
1716 | ||
1717 | #if 1 | |
1718 | ; | |
1719 | ; Make sure there are no circular links in the float chain | |
1720 | ; And that FP is marked busy in it. | |
1721 | ; And the only the top is marked invalid. | |
1722 | ; And that the owning PCB is correct. | |
1723 | ; | |
1724 | ||
1725 | lis r28,hi16(EXT(default_pset)) ; (TEST/DEBUG) | |
1726 | lis r27,hi16(EXT(DebugWork)) ; (TEST/DEBUG) | |
1727 | ori r28,r28,lo16(EXT(default_pset)) ; (TEST/DEBUG) | |
1728 | ori r27,r27,lo16(EXT(DebugWork)) ; (TEST/DEBUG) | |
1729 | li r20,0 ; (TEST/DEBUG) | |
1730 | lwz r26,0(r27) ; (TEST/DEBUG) | |
1731 | lwz r27,psthreadcnt(r28) ; (TEST/DEBUG) | |
1732 | mr. r26,r26 ; (TEST/DEBUG) | |
1733 | lwz r28,psthreads(r28) ; (TEST/DEBUG) | |
1734 | bnelr- ; (TEST/DEBUG) | |
1735 | ||
1736 | fcknxtth: mr. r27,r27 ; (TEST/DEBUG) | |
1737 | beqlr- ; (TEST/DEBUG) | |
1738 | ||
1739 | lwz r26,THREAD_TOP_ACT(r28) ; (TEST/DEBUG) | |
1740 | ||
1741 | fckact: mr. r26,r26 ; (TEST/DEBUG) | |
1742 | bne+ fckact2 ; (TEST/DEBUG) | |
1743 | ||
1744 | lwz r28,THREAD_PSTHRN(r28) ; (TEST/DEBUG) Next in line | |
1745 | subi r27,r27,1 ; (TEST/DEBUG) | |
1746 | b fcknxtth ; (TEST/DEBUG) | |
1747 | ||
1748 | fckact2: lwz r20,ACT_MACT_FPU(r26) ; (TEST/DEBUG) Get FPU chain | |
1749 | li r29,1 ; (TEST/DEBUG) | |
1750 | li r22,0 ; (TEST/DEBUG) | |
1751 | ||
1752 | fckact3: mr. r20,r20 ; (TEST/DEBUG) Are there any? | |
1753 | beq+ fckact5 ; (TEST/DEBUG) No... | |
1754 | ||
1755 | addi r22,r22,1 ; (TEST/DEBUG) Count chain depth | |
1756 | ||
1757 | lwz r21,SAVflags(r20) ; (TEST/DEBUG) Get the flags | |
1758 | rlwinm. r21,r21,0,1,1 ; (TEST/DEBUG) FP busy? | |
1759 | bne+ fckact3a ; (TEST/DEBUG) Yeah... | |
1760 | lis r27,hi16(EXT(DebugWork)) ; (TEST/DEBUG) | |
1761 | ori r27,r27,lo16(EXT(DebugWork)) ; (TEST/DEBUG) | |
1762 | stw r27,0(r27) ; (TEST/DEBUG) | |
1763 | BREAKPOINT_TRAP ; (TEST/DEBUG) Die | |
1764 | ||
1765 | fckact3a: cmplwi r22,1 ; (TEST/DEBUG) At first SA? | |
1766 | beq+ fckact3b ; (TEST/DEBUG) Yeah, invalid is ok... | |
1767 | lwz r21,SAVlvlfp(r20) ; (TEST/DEBUG) Get level | |
1768 | cmplwi r21,1 ; (TEST/DEBUG) Is it invalid? | |
1769 | bne+ fckact3b ; (TEST/DEBUG) Nope, it is ok... | |
1770 | lis r27,hi16(EXT(DebugWork)) ; (TEST/DEBUG) | |
1771 | ori r27,r27,lo16(EXT(DebugWork)) ; (TEST/DEBUG) | |
1772 | stw r27,0(r27) ; (TEST/DEBUG) | |
1773 | BREAKPOINT_TRAP ; (TEST/DEBUG) Die | |
1774 | ||
1775 | fckact3b: lwz r21,SAVact(r20) ; (TEST/DEBUG) Get the owner | |
1776 | cmplw r21,r26 ; (TEST/DEBUG) Correct activation? | |
1777 | beq+ fckact3c ; (TEST/DEBUG) Yup... | |
1778 | lis r27,hi16(EXT(DebugWork)) ; (TEST/DEBUG) | |
1779 | ori r27,r27,lo16(EXT(DebugWork)) ; (TEST/DEBUG) | |
1780 | stw r27,0(r27) ; (TEST/DEBUG) | |
1781 | BREAKPOINT_TRAP ; (TEST/DEBUG) Die | |
1782 | ||
1783 | fckact3c: ; (TEST/DEBUG) | |
1784 | lbz r21,SAVflags+3(r20) ; (TEST/DEBUG) Pick up the test byte | |
1785 | mr. r21,r21 ; (TEST/DEBUG) marked? | |
1786 | beq+ fckact4 ; (TEST/DEBUG) No, good... | |
1787 | ||
1788 | lis r27,hi16(EXT(DebugWork)) ; (TEST/DEBUG) | |
1789 | ori r27,r27,lo16(EXT(DebugWork)) ; (TEST/DEBUG) | |
1790 | stw r27,0(r27) ; (TEST/DEBUG) | |
1791 | BREAKPOINT_TRAP ; (TEST/DEBUG) | |
1792 | ||
1793 | fckact4: stb r29,SAVflags+3(r20) ; (TEST/DEBUG) Set the test byte | |
1794 | lwz r20,SAVprefp(r20) ; (TEST/DEBUG) Next in list | |
1795 | b fckact3 ; (TEST/DEBUG) Try it... | |
1796 | ||
1797 | fckact5: lwz r20,ACT_MACT_FPU(r26) ; (TEST/DEBUG) Get FPU chain | |
1798 | li r29,0 ; (TEST/DEBUG) | |
1799 | ||
1800 | fckact6: mr. r20,r20 ; (TEST/DEBUG) Are there any? | |
1801 | beq+ fcknact ; (TEST/DEBUG) No... | |
1802 | ||
1803 | stb r29,SAVflags+3(r20) ; (TEST/DEBUG) Clear the test byte | |
1804 | lwz r20,SAVprefp(r20) ; (TEST/DEBUG) Next in list | |
1805 | b fckact6 ; (TEST/DEBUG) Try it... | |
1806 | ||
1807 | fcknact: lwz r26,ACT_LOWER(r26) ; (TEST/DEBUG) Next activation | |
1808 | b fckact ; (TEST/DEBUG) | |
1809 | #endif | |
1810 | ||
1811 | ||
1812 | #if 0 | |
1813 | ; | |
1814 | ; Make sure in use count matches found savearea. This is | |
1815 | ; not always accurate. There is a variable "fuzz" factor in count. | |
1816 | ||
1817 | lis r28,hi16(EXT(default_pset)) ; (TEST/DEBUG) | |
1818 | lis r27,hi16(EXT(DebugWork)) ; (TEST/DEBUG) | |
1819 | ori r28,r28,lo16(EXT(default_pset)) ; (TEST/DEBUG) | |
1820 | ori r27,r27,lo16(EXT(DebugWork)) ; (TEST/DEBUG) | |
1821 | li r20,0 ; (TEST/DEBUG) | |
1822 | lwz r26,0(r27) ; (TEST/DEBUG) | |
1823 | lwz r27,psthreadcnt(r28) ; (TEST/DEBUG) | |
1824 | mr. r26,r26 ; (TEST/DEBUG) | |
1825 | lwz r28,psthreads(r28) ; (TEST/DEBUG) | |
1826 | bnelr- ; (TEST/DEBUG) | |
1827 | ||
1828 | cknxtth: mr. r27,r27 ; (TEST/DEBUG) | |
1829 | beq- cktotal ; (TEST/DEBUG) | |
1830 | ||
1831 | lwz r26,THREAD_TOP_ACT(r28) ; (TEST/DEBUG) | |
1832 | ||
1833 | ckact: mr. r26,r26 ; (TEST/DEBUG) | |
1834 | bne+ ckact2 ; (TEST/DEBUG) | |
1835 | ||
1836 | lwz r28,THREAD_PSTHRN(r28) ; (TEST/DEBUG) Next in line | |
1837 | subi r27,r27,1 ; (TEST/DEBUG) | |
1838 | b cknxtth ; (TEST/DEBUG) | |
1839 | ||
1840 | ckact2: lwz r29,ACT_MACT_PCB(r26) ; (TEST/DEBUG) | |
1841 | ||
1842 | cknorm: mr. r29,r29 ; (TEST/DEBUG) | |
1843 | beq- cknormd ; (TEST/DEBUG) | |
1844 | ||
1845 | addi r20,r20,1 ; (TEST/DEBUG) Count normal savearea | |
1846 | ||
1847 | lwz r29,SAVprev(r29) ; (TEST/DEBUG) | |
1848 | b cknorm ; (TEST/DEBUG) | |
1849 | ||
1850 | cknormd: lwz r29,ACT_MACT_FPU(r26) ; (TEST/DEBUG) | |
1851 | ||
1852 | ckfpu: mr. r29,r29 ; (TEST/DEBUG) | |
1853 | beq- ckfpud ; (TEST/DEBUG) | |
1854 | ||
1855 | lwz r21,SAVflags(r29) ; (TEST/DEBUG) | |
1856 | rlwinm. r21,r21,0,0,0 ; (TEST/DEBUG) See if already counted | |
1857 | bne- cknfpu ; (TEST/DEBUG) | |
1858 | ||
1859 | addi r20,r20,1 ; (TEST/DEBUG) Count fpu savearea | |
1860 | ||
1861 | cknfpu: lwz r29,SAVprefp(r29) ; (TEST/DEBUG) | |
1862 | b ckfpu ; (TEST/DEBUG) | |
1863 | ||
1864 | ckfpud: lwz r29,ACT_MACT_VMX(r26) ; (TEST/DEBUG) | |
1865 | ||
1866 | ckvmx: mr. r29,r29 ; (TEST/DEBUG) | |
1867 | beq- ckvmxd ; (TEST/DEBUG) | |
1868 | ||
1869 | lwz r21,SAVflags(r29) ; (TEST/DEBUG) | |
1870 | rlwinm. r21,r21,0,0,1 ; (TEST/DEBUG) See if already counted | |
1871 | bne- cknvmx ; (TEST/DEBUG) | |
1872 | ||
1873 | addi r20,r20,1 ; (TEST/DEBUG) Count vector savearea | |
1874 | ||
1875 | cknvmx: lwz r29,SAVprevec(r29) ; (TEST/DEBUG) | |
1876 | b ckvmx ; (TEST/DEBUG) | |
1877 | ||
1878 | ckvmxd: lwz r26,ACT_LOWER(r26) ; (TEST/DEBUG) Next activation | |
1879 | b ckact ; (TEST/DEBUG) | |
1880 | ||
1881 | cktotal: lis r28,hi16(EXT(saveanchor)) ; (TEST/DEBUG) | |
1882 | lis r27,hi16(EXT(real_ncpus)) ; (TEST/DEBUG) | |
1883 | ori r28,r28,lo16(EXT(saveanchor)) ; (TEST/DEBUG) | |
1884 | ori r27,r27,lo16(EXT(real_ncpus)) ; (TEST/DEBUG) | |
1885 | ||
1886 | lwz r21,SVinuse(r28) ; (TEST/DEBUG) | |
1887 | lwz r27,0(r27) ; (TEST/DEBUG) Get the number of CPUs | |
1888 | sub. r29,r21,r20 ; (TEST/DEBUG) Get number accounted for | |
1889 | blt- badsave ; (TEST/DEBUG) Have too many in use... | |
1890 | sub r26,r29,r27 ; (TEST/DEBUG) Should be 1 unaccounted for for each processor | |
1891 | cmpwi r26,10 ; (TEST/DEBUG) Allow a 10 area slop factor | |
1892 | bltlr+ ; (TEST/DEBUG) | |
1893 | ||
1894 | badsave: lis r27,hi16(EXT(DebugWork)) ; (TEST/DEBUG) | |
1895 | ori r27,r27,lo16(EXT(DebugWork)) ; (TEST/DEBUG) | |
1896 | stw r27,0(r27) ; (TEST/DEBUG) | |
1897 | BREAKPOINT_TRAP ; (TEST/DEBUG) | |
1898 | #endif | |
1899 | #endif |