]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * @OSF_COPYRIGHT@ | |
24 | */ | |
25 | ||
26 | /* | |
27 | * Low-memory exception vector code for PowerPC MACH | |
28 | * | |
29 | * These are the only routines that are ever run with | |
30 | * VM instruction translation switched off. | |
31 | * | |
32 | * The PowerPC is quite strange in that rather than having a set | |
33 | * of exception vectors, the exception handlers are installed | |
34 | * in well-known addresses in low memory. This code must be loaded | |
35 | * at ZERO in physical memory. The simplest way of doing this is | |
36 | * to load the kernel at zero, and specify this as the first file | |
37 | * on the linker command line. | |
38 | * | |
39 | * When this code is loaded into place, it is loaded at virtual | |
40 | * address KERNELBASE, which is mapped to zero (physical). | |
41 | * | |
42 | * This code handles all powerpc exceptions and is always entered | |
43 | * in supervisor mode with translation off. It saves the minimum | |
44 | * processor state before switching back on translation and | |
45 | * jumping to the approprate routine. | |
46 | * | |
47 | * Vectors from 0x100 to 0x3fff occupy 0x100 bytes each (64 instructions) | |
48 | * | |
49 | * We use some of this space to decide which stack to use, and where to | |
50 | * save the context etc, before jumping to a generic handler. | |
51 | */ | |
52 | ||
53 | #include <assym.s> | |
54 | #include <debug.h> | |
55 | #include <cpus.h> | |
56 | #include <db_machine_commands.h> | |
57 | #include <mach_rt.h> | |
58 | ||
59 | #include <mach_debug.h> | |
60 | #include <ppc/asm.h> | |
61 | #include <ppc/proc_reg.h> | |
62 | #include <ppc/exception.h> | |
63 | #include <ppc/Performance.h> | |
64 | #include <mach/ppc/vm_param.h> | |
65 | #include <ppc/POWERMAC/mp/MPPlugIn.h> | |
66 | ||
67 | #define TRCSAVE 0 | |
68 | #define CHECKSAVE 0 | |
69 | #define PERFTIMES 0 | |
70 | #define ESPDEBUG 0 | |
71 | ||
72 | #if TRCSAVE | |
73 | #error The TRCSAVE option is broken.... Fix it | |
74 | #endif | |
75 | ||
76 | #define featL1ena 24 | |
77 | #define featSMP 25 | |
78 | #define featAltivec 26 | |
79 | #define wasNapping 27 | |
80 | #define featFP 28 | |
81 | ||
82 | #define VECTOR_SEGMENT .section __VECTORS, __interrupts | |
83 | ||
84 | VECTOR_SEGMENT | |
85 | ||
86 | ||
87 | .globl EXT(ExceptionVectorsStart) | |
88 | ||
89 | EXT(ExceptionVectorsStart): /* Used if relocating the exception vectors */ | |
90 | baseR: /* Used so we have more readable code */ | |
91 | ||
92 | /* | |
93 | * System reset - call debugger | |
94 | */ | |
95 | . = 0xf0 | |
96 | .globl EXT(ResetHandler) | |
97 | EXT(ResetHandler): | |
98 | .long 0x0 | |
99 | .long 0x0 | |
100 | .long 0x0 | |
101 | ||
102 | . = 0x100 | |
103 | .L_handler100: | |
104 | mtsprg 2,r13 /* Save R13 */ | |
105 | mtsprg 3,r11 /* Save R11 */ | |
106 | lwz r13,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_TYPE)(br0) ; Get reset type | |
107 | mfcr r11 | |
108 | cmpi cr0,r13,RESET_HANDLER_START | |
109 | bne resetexc | |
110 | ||
111 | li r11,RESET_HANDLER_NULL | |
112 | stw r11,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_TYPE)(br0) ; Clear reset type | |
113 | ||
114 | lwz r4,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_CALL)(br0) | |
115 | lwz r3,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_ARG)(br0) | |
116 | mtlr r4 | |
117 | blr | |
118 | ||
119 | resetexc: | |
120 | mtcr r11 | |
121 | mfsprg r13,1 /* Get the exception save area */ | |
122 | li r11,T_RESET /* Set 'rupt code */ | |
123 | b .L_exception_entry /* Join common... */ | |
124 | ||
125 | /* | |
126 | * Machine check | |
127 | */ | |
128 | ||
129 | . = 0x200 | |
130 | .L_handler200: | |
131 | mtsprg 2,r13 /* Save R13 */ | |
132 | mtsprg 3,r11 /* Save R11 */ | |
133 | mfsprg r13,1 /* Get the exception save area */ | |
134 | li r11,T_MACHINE_CHECK /* Set 'rupt code */ | |
135 | b .L_exception_entry /* Join common... */ | |
136 | ||
137 | /* | |
138 | * Data access - page fault, invalid memory rights for operation | |
139 | */ | |
140 | ||
141 | . = 0x300 | |
142 | .L_handler300: | |
143 | mtsprg 2,r13 /* Save R13 */ | |
144 | mtsprg 3,r11 /* Save R11 */ | |
145 | mfsprg r13,1 /* Get the exception save area */ | |
146 | li r11,T_DATA_ACCESS /* Set 'rupt code */ | |
147 | b .L_exception_entry /* Join common... */ | |
148 | ||
149 | /* | |
150 | * Instruction access - as for data access | |
151 | */ | |
152 | ||
153 | . = 0x400 | |
154 | .L_handler400: | |
155 | mtsprg 2,r13 /* Save R13 */ | |
156 | mtsprg 3,r11 /* Save R11 */ | |
157 | mfsprg r13,1 /* Get the exception save area */ | |
158 | li r11,T_INSTRUCTION_ACCESS /* Set 'rupt code */ | |
159 | b .L_exception_entry /* Join common... */ | |
160 | ||
161 | /* | |
162 | * External interrupt | |
163 | */ | |
164 | ||
165 | . = 0x500 | |
166 | .L_handler500: | |
167 | mtsprg 2,r13 /* Save R13 */ | |
168 | mtsprg 3,r11 /* Save R11 */ | |
169 | mfsprg r13,1 /* Get the exception save area */ | |
170 | li r11,T_INTERRUPT /* Set 'rupt code */ | |
171 | b .L_exception_entry /* Join common... */ | |
172 | ||
173 | /* | |
174 | * Alignment - many reasons | |
175 | */ | |
176 | ||
177 | . = 0x600 | |
178 | .L_handler600: | |
179 | mtsprg 2,r13 /* Save R13 */ | |
180 | mtsprg 3,r11 /* Save R11 */ | |
181 | mfsprg r13,1 /* Get the exception save area */ | |
182 | li r11,T_ALIGNMENT /* Set 'rupt code */ | |
183 | b .L_exception_entry /* Join common... */ | |
184 | ||
185 | /* | |
186 | * Program - floating point exception, illegal inst, priv inst, user trap | |
187 | */ | |
188 | ||
189 | . = 0x700 | |
190 | .L_handler700: | |
191 | mtsprg 2,r13 /* Save R13 */ | |
192 | mtsprg 3,r11 /* Save R11 */ | |
193 | mfsprg r13,1 /* Get the exception save area */ | |
194 | li r11,T_PROGRAM /* Set 'rupt code */ | |
195 | b .L_exception_entry /* Join common... */ | |
196 | ||
197 | /* | |
198 | * Floating point disabled | |
199 | */ | |
200 | ||
201 | . = 0x800 | |
202 | .L_handler800: | |
203 | mtsprg 2,r13 /* Save R13 */ | |
204 | mtsprg 3,r11 /* Save R11 */ | |
205 | mfsprg r13,1 /* Get the exception save area */ | |
206 | li r11,T_FP_UNAVAILABLE /* Set 'rupt code */ | |
207 | b .L_exception_entry /* Join common... */ | |
208 | ||
209 | ||
210 | /* | |
211 | * Decrementer - DEC register has passed zero. | |
212 | */ | |
213 | ||
214 | . = 0x900 | |
215 | .L_handler900: | |
216 | mtsprg 2,r13 /* Save R13 */ | |
217 | mtsprg 3,r11 /* Save R11 */ | |
218 | mfsprg r13,1 /* Get the exception save area */ | |
219 | li r11,T_DECREMENTER /* Set 'rupt code */ | |
220 | b .L_exception_entry /* Join common... */ | |
221 | ||
222 | /* | |
223 | * I/O controller interface error - MACH does not use this | |
224 | */ | |
225 | ||
226 | . = 0xA00 | |
227 | .L_handlerA00: | |
228 | mtsprg 2,r13 /* Save R13 */ | |
229 | mtsprg 3,r11 /* Save R11 */ | |
230 | mfsprg r13,1 /* Get the exception save area */ | |
231 | li r11,T_IO_ERROR /* Set 'rupt code */ | |
232 | b .L_exception_entry /* Join common... */ | |
233 | ||
234 | /* | |
235 | * Reserved | |
236 | */ | |
237 | ||
238 | . = 0xB00 | |
239 | .L_handlerB00: | |
240 | mtsprg 2,r13 /* Save R13 */ | |
241 | mtsprg 3,r11 /* Save R11 */ | |
242 | mfsprg r13,1 /* Get the exception save area */ | |
243 | li r11,T_RESERVED /* Set 'rupt code */ | |
244 | b .L_exception_entry /* Join common... */ | |
245 | ||
0b4e3aa0 A |
246 | #if 0 |
247 | hackxxxx1: | |
248 | stmw r29,4(br0) | |
249 | lwz r29,0(br0) | |
250 | mr. r29,r29 | |
251 | bne+ xxxx1 | |
252 | lis r29,0x4000 | |
253 | ||
254 | xxxx1: | |
255 | stw r0,0(r29) | |
256 | mfsrr0 r30 | |
257 | stw r30,4(r29) | |
258 | mtlr r30 | |
259 | stw r30,8(r29) | |
260 | ||
261 | addi r29,r29,12 | |
262 | stw r29,0(br0) | |
263 | ||
264 | lmw r29,4(br0) | |
265 | b hackxxxx2 | |
266 | #endif | |
267 | ||
268 | ||
269 | ; | |
270 | ; System call - generated by the sc instruction | |
271 | ; | |
272 | ; We handle the ultra-fast traps right here. They are: | |
273 | ; | |
274 | ; 0xFFFFFFFF - BlueBox only - MKIsPreemptiveTask | |
275 | ; 0xFFFFFFFE - BlueBox only - kcNKIsPreemptiveTaskEnv | |
276 | ; 0x00007FF2 - User state only - thread info | |
277 | ; 0x00007FF3 - User state only - floating point / vector facility status | |
278 | ; 0x00007FF4 - Kernel only - loadMSR | |
279 | ; | |
280 | ; Note: none handled if virtual machine is running | |
281 | ; | |
1c79356b A |
282 | |
283 | . = 0xC00 | |
284 | .L_handlerC00: | |
1c79356b | 285 | mtsprg 2,r13 ; Save R13 |
0b4e3aa0 A |
286 | mfsrr1 r13 ; Get SRR1 for loadMSR |
287 | mtsprg 3,r11 ; Save R11 | |
1c79356b | 288 | mfcr r11 ; Save the CR |
0b4e3aa0 A |
289 | rlwinm. r13,r13,0,MSR_PR_BIT,MSR_PR_BIT ; From problem state? |
290 | mfsprg r13,0 ; Get the per_proc_area | |
291 | beq- uftInKern ; We are in the kernel... | |
292 | ||
293 | cmplwi cr5,r0,0x7FF2 ; Ultra fast path cthread info call? | |
294 | cmpwi cr6,r0,0x7FF3 ; Ultra fast path facility status? | |
295 | cror cr1_eq,cr5_lt,cr6_gt ; Set true if not 0x7FF2 and not 0x7FF3 and not negative | |
296 | lwz r13,spcFlags(r13) ; Get the special flags | |
297 | bt- cr1_eq,notufp ; Exit if we can not be ultra fast... | |
298 | ||
299 | rlwimi r13,r13,runningVMbit+1,31,31 ; Move VM flag after the 3 blue box flags | |
300 | not. r0,r0 ; Flip bits and kind of subtract 1 | |
301 | mtcrf 1,r13 ; Set BB and VMM flags in CR7 | |
302 | ||
303 | cmplwi cr1,r0,1 ; Is this a bb fast path? | |
304 | not r0,r0 ; Restore to entry state | |
305 | bt- 31,notufp ; No fast paths if running VM (assume not)... | |
306 | bf- bbNoMachSCbit,ufpUSuft ; We are not running BlueBox... | |
307 | bgt cr1,notufp ; This can not be a bb ufp... | |
308 | #if 0 | |
309 | b hackxxxx1 | |
310 | hackxxxx2: | |
311 | #endif | |
1c79356b | 312 | |
0b4e3aa0 A |
313 | rlwimi r11,r13,bbPreemptivebit-cr0_eq,cr0_eq,cr0_eq ; Copy preemptive task flag into user cr0_eq |
314 | mfsprg r13,0 ; Get back pre_proc | |
315 | ||
316 | ||
317 | bne cr1,ufpIsBBpre ; This is the "isPreemptiveTask" call... | |
318 | ||
319 | lwz r0,ppbbTaskEnv(r13) ; Get the shadowed taskEnv from per_proc_area | |
1c79356b | 320 | |
0b4e3aa0 A |
321 | ufpIsBBpre: |
322 | mtcrf 0xFF,r11 ; Restore CR | |
323 | mfsprg r11,3 ; Restore R11 | |
324 | mfsprg r13,2 ; Restore R13 | |
325 | rfi ; All done, go back... | |
1c79356b | 326 | |
0b4e3aa0 A |
327 | ; |
328 | ; Normal fast path... | |
329 | ; | |
330 | ||
331 | ufpUSuft: bge+ notufp ; Bail if negative... (ARRRGGG -- BRANCH TO A BRANCH!!!!!) | |
1c79356b A |
332 | mfsprg r11,3 ; Restore R11 |
333 | mfsprg r3,0 ; Get the per_proc_area | |
334 | mfsprg r13,2 ; Restore R13 | |
0b4e3aa0 | 335 | bne- cr5,isvecfp ; This is the facility stat call |
1c79356b A |
336 | lwz r3,UAW(r3) ; Get the assist word |
337 | rfi ; All done, scream back... (no need to restore CR or R11, they are volatile) | |
338 | ; | |
339 | isvecfp: lwz r3,spcFlags(r3) ; Get the facility status | |
340 | rfi ; Bail back... | |
341 | ; | |
0b4e3aa0 | 342 | notufp: mtcrf 0xFF,r11 ; Restore the used CRs |
1c79356b A |
343 | li r11,T_SYSTEM_CALL ; Set interrupt code |
344 | mfsprg r13,1 ; Get the exception save area | |
345 | b .L_exception_entry ; Join common... | |
346 | ||
0b4e3aa0 | 347 | uftInKern: cmplwi r0,0x7FF4 ; Ultra fast path loadMSR? |
1c79356b A |
348 | bne- notufp ; Someone is trying to cheat... |
349 | ||
0b4e3aa0 | 350 | mtcrf 0xFF,r11 ; Restore CR |
1c79356b A |
351 | lwz r11,pfAvailable(r13) ; Pick up the feature flags |
352 | mtsrr1 r3 ; Set new MSR | |
353 | mfsprg r13,2 ; Restore R13 | |
354 | mtsprg 2,r11 ; Set the feature flags into sprg2 | |
355 | mfsprg r11,3 ; Restore R11 | |
356 | rfi ; Blast back | |
357 | ||
358 | ||
359 | /* | |
360 | * Trace - generated by single stepping | |
361 | * performance monitor BE branch enable tracing/logging | |
362 | * is also done here now. while this is permanently in the | |
363 | * system the impact is completely unnoticable as this code is | |
364 | * only executed when (a) a single step or branch exception is | |
365 | * hit, (b) in the single step debugger case there is so much | |
366 | * overhead already the few extra instructions for testing for BE | |
367 | * are not even noticable, (c) the BE logging code is *only* run | |
368 | * when it is enabled by the tool which will not happen during | |
369 | * normal system usage | |
370 | * | |
371 | * Note that this trace is available only to user state so we do not | |
372 | * need to set sprg2 before returning. | |
373 | */ | |
374 | ||
375 | . = 0xD00 | |
376 | .L_handlerD00: | |
377 | mtsprg 2,r13 ; Save R13 | |
378 | mtsprg 3,r11 ; Save R11 | |
379 | mfsrr1 r13 ; Get the old MSR | |
380 | mfcr r11 ; Get the CR | |
381 | rlwinm. r13,r13,0,MSR_PR_BIT,MSR_PR_BIT ; Are we in supervisor state? | |
382 | beq- notspectr ; Yes, not special trace... | |
383 | mfsprg r13,0 ; Get the per_proc area | |
384 | lhz r13,PP_CPU_FLAGS(r13) ; Get the flags | |
385 | rlwinm. r13,r13,0,traceBEb+16,traceBEb+16 ; Special trace enabled? | |
386 | bne+ specbrtr ; Yeah... | |
387 | ||
388 | notspectr: mtcr r11 ; Restore CR | |
389 | mfsprg r13,1 ; Get the savearea | |
390 | li r11,T_TRACE ; Set interrupt code | |
391 | b .L_exception_entry ; Join common... | |
392 | ||
393 | ; | |
394 | ; We are doing the special branch trace | |
395 | ; | |
396 | ||
397 | specbrtr: mfsprg r13,0 ; Get the per_proc area | |
398 | stw r1,emfp0(r13) ; Save in a scratch area | |
399 | stw r2,emfp0+4(r13) ; Save in a scratch area | |
400 | stw r3,emfp0+8(r13) ; Save in a scratch area | |
401 | ||
1c79356b | 402 | lis r2,hi16(EXT(pc_trace_buf)) ; Get the top of the buffer |
1c79356b A |
403 | lwz r3,spcTRp(r13) ; Pick up buffer position |
404 | mr. r1,r1 ; Is it time to count? | |
405 | ori r2,r2,lo16(EXT(pc_trace_buf)) ; Get the bottom of the buffer | |
0b4e3aa0 A |
406 | cmplwi cr1,r3,4092 ; Set cr1_eq if we should take exception |
407 | mfsrr0 r1 ; Get the pc | |
1c79356b A |
408 | stwx r1,r2,r3 ; Save it in the buffer |
409 | addi r3,r3,4 ; Point to the next slot | |
1c79356b A |
410 | rlwinm r3,r3,0,20,31 ; Wrap the slot at one page |
411 | stw r3,spcTRp(r13) ; Save the new slot | |
1c79356b A |
412 | lwz r1,emfp0(r13) ; Restore work register |
413 | lwz r2,emfp0+4(r13) ; Restore work register | |
414 | lwz r3,emfp0+8(r13) ; Restore work register | |
415 | beq cr1,notspectr ; Buffer filled, make a rupt... | |
416 | ||
417 | mtcr r11 ; Restore the CR | |
418 | mfsprg r13,2 ; Restore R13 | |
419 | mfsprg r11,3 ; Restore R11 | |
420 | rfi ; Bail back... | |
421 | ||
422 | /* | |
423 | * Floating point assist | |
424 | */ | |
425 | ||
426 | . = 0xe00 | |
427 | .L_handlerE00: | |
428 | mtsprg 2,r13 /* Save R13 */ | |
429 | mtsprg 3,r11 /* Save R11 */ | |
430 | mfsprg r13,1 /* Get the exception save area */ | |
431 | li r11,T_FP_ASSIST /* Set 'rupt code */ | |
432 | b .L_exception_entry /* Join common... */ | |
433 | ||
434 | ||
435 | /* | |
436 | * Performance monitor interruption | |
437 | */ | |
438 | ||
439 | . = 0xF00 | |
440 | PMIhandler: | |
441 | mtsprg 2,r13 /* Save R13 */ | |
442 | mtsprg 3,r11 /* Save R11 */ | |
443 | mfsprg r13,1 /* Get the exception save area */ | |
444 | li r11,T_PERF_MON /* Set 'rupt code */ | |
445 | b .L_exception_entry /* Join common... */ | |
446 | ||
447 | ||
448 | /* | |
449 | * VMX exception | |
450 | */ | |
451 | ||
452 | . = 0xF20 | |
453 | VMXhandler: | |
454 | mtsprg 2,r13 /* Save R13 */ | |
455 | mtsprg 3,r11 /* Save R11 */ | |
456 | mfsprg r13,1 /* Get the exception save area */ | |
457 | li r11,T_VMX /* Set 'rupt code */ | |
458 | b .L_exception_entry /* Join common... */ | |
459 | ||
460 | ||
461 | ||
462 | /* | |
463 | * Instruction translation miss - we inline this code. | |
464 | * Upon entry (done for us by the machine): | |
465 | * srr0 : addr of instruction that missed | |
466 | * srr1 : bits 0-3 = saved CR0 | |
467 | * 4 = lru way bit | |
468 | * 16-31 = saved msr | |
469 | * msr[tgpr] = 1 (so gpr0-3 become our temporary variables) | |
470 | * imiss: ea that missed | |
471 | * icmp : the compare value for the va that missed | |
472 | * hash1: pointer to first hash pteg | |
473 | * hash2: pointer to 2nd hash pteg | |
474 | * | |
475 | * Register usage: | |
476 | * tmp0: saved counter | |
477 | * tmp1: junk | |
478 | * tmp2: pointer to pteg | |
479 | * tmp3: current compare value | |
480 | * | |
481 | * This code is taken from the 603e User's Manual with | |
482 | * some bugfixes and minor improvements to save bytes and cycles | |
483 | * | |
484 | * NOTE: Do not touch sprg2 in here | |
485 | */ | |
486 | ||
487 | . = 0x1000 | |
488 | .L_handler1000: | |
489 | mfspr tmp2, hash1 | |
490 | mfctr tmp0 /* use tmp0 to save ctr */ | |
491 | mfspr tmp3, icmp | |
492 | ||
493 | .L_imiss_find_pte_in_pteg: | |
494 | li tmp1, 8 /* count */ | |
495 | subi tmp2, tmp2, 8 /* offset for lwzu */ | |
496 | mtctr tmp1 /* count... */ | |
497 | ||
498 | .L_imiss_pteg_loop: | |
499 | lwz tmp1, 8(tmp2) /* check pte0 for match... */ | |
500 | addi tmp2, tmp2, 8 | |
501 | cmpw cr0, tmp1, tmp3 | |
502 | #if 0 | |
503 | bdnzf+ cr0, .L_imiss_pteg_loop | |
504 | #else | |
505 | bc 0,2, .L_imiss_pteg_loop | |
506 | #endif | |
507 | beq+ cr0, .L_imiss_found_pte | |
508 | ||
509 | /* Not found in PTEG, we must scan 2nd then give up */ | |
510 | ||
511 | andi. tmp1, tmp3, MASK(PTE0_HASH_ID) | |
512 | bne- .L_imiss_do_no_hash_exception /* give up */ | |
513 | ||
514 | mfspr tmp2, hash2 | |
515 | ori tmp3, tmp3, MASK(PTE0_HASH_ID) | |
516 | b .L_imiss_find_pte_in_pteg | |
517 | ||
518 | .L_imiss_found_pte: | |
519 | ||
520 | lwz tmp1, 4(tmp2) /* get pte1_t */ | |
521 | andi. tmp3, tmp1, MASK(PTE1_WIMG_GUARD) /* Fault? */ | |
522 | bne- .L_imiss_do_prot_exception /* Guarded - illegal */ | |
523 | ||
524 | /* Ok, we've found what we need to, restore and rfi! */ | |
525 | ||
526 | mtctr tmp0 /* restore ctr */ | |
527 | mfsrr1 tmp3 | |
528 | mfspr tmp0, imiss | |
529 | mtcrf 0x80, tmp3 /* Restore CR0 */ | |
530 | mtspr rpa, tmp1 /* set the pte */ | |
531 | ori tmp1, tmp1, MASK(PTE1_REFERENCED) /* set referenced */ | |
532 | tlbli tmp0 | |
533 | sth tmp1, 6(tmp2) | |
534 | rfi | |
535 | ||
536 | .L_imiss_do_prot_exception: | |
537 | /* set up srr1 to indicate protection exception... */ | |
538 | mfsrr1 tmp3 | |
539 | andi. tmp2, tmp3, 0xffff | |
540 | addis tmp2, tmp2, MASK(SRR1_TRANS_PROT) >> 16 | |
541 | b .L_imiss_do_exception | |
542 | ||
543 | .L_imiss_do_no_hash_exception: | |
544 | /* clean up registers for protection exception... */ | |
545 | mfsrr1 tmp3 | |
546 | andi. tmp2, tmp3, 0xffff | |
547 | addis tmp2, tmp2, MASK(SRR1_TRANS_HASH) >> 16 | |
548 | ||
549 | /* And the entry into the usual instruction fault handler ... */ | |
550 | .L_imiss_do_exception: | |
551 | ||
552 | mtctr tmp0 /* Restore ctr */ | |
553 | mtsrr1 tmp2 /* Set up srr1 */ | |
554 | mfmsr tmp0 | |
555 | xoris tmp0, tmp0, MASK(MSR_TGPR)>>16 /* no TGPR */ | |
556 | mtcrf 0x80, tmp3 /* Restore CR0 */ | |
557 | mtmsr tmp0 /* reset MSR[TGPR] */ | |
558 | b .L_handler400 /* Instr Access */ | |
559 | ||
560 | /* | |
561 | * Data load translation miss | |
562 | * | |
563 | * Upon entry (done for us by the machine): | |
564 | * srr0 : addr of instruction that missed | |
565 | * srr1 : bits 0-3 = saved CR0 | |
566 | * 4 = lru way bit | |
567 | * 5 = 1 if store | |
568 | * 16-31 = saved msr | |
569 | * msr[tgpr] = 1 (so gpr0-3 become our temporary variables) | |
570 | * dmiss: ea that missed | |
571 | * dcmp : the compare value for the va that missed | |
572 | * hash1: pointer to first hash pteg | |
573 | * hash2: pointer to 2nd hash pteg | |
574 | * | |
575 | * Register usage: | |
576 | * tmp0: saved counter | |
577 | * tmp1: junk | |
578 | * tmp2: pointer to pteg | |
579 | * tmp3: current compare value | |
580 | * | |
581 | * This code is taken from the 603e User's Manual with | |
582 | * some bugfixes and minor improvements to save bytes and cycles | |
583 | * | |
584 | * NOTE: Do not touch sprg2 in here | |
585 | */ | |
586 | ||
587 | . = 0x1100 | |
588 | .L_handler1100: | |
589 | mfspr tmp2, hash1 | |
590 | mfctr tmp0 /* use tmp0 to save ctr */ | |
591 | mfspr tmp3, dcmp | |
592 | ||
593 | .L_dlmiss_find_pte_in_pteg: | |
594 | li tmp1, 8 /* count */ | |
595 | subi tmp2, tmp2, 8 /* offset for lwzu */ | |
596 | mtctr tmp1 /* count... */ | |
597 | ||
598 | .L_dlmiss_pteg_loop: | |
599 | lwz tmp1, 8(tmp2) /* check pte0 for match... */ | |
600 | addi tmp2, tmp2, 8 | |
601 | cmpw cr0, tmp1, tmp3 | |
602 | #if 0 /* How to write this correctly? */ | |
603 | bdnzf+ cr0, .L_dlmiss_pteg_loop | |
604 | #else | |
605 | bc 0,2, .L_dlmiss_pteg_loop | |
606 | #endif | |
607 | beq+ cr0, .L_dmiss_found_pte | |
608 | ||
609 | /* Not found in PTEG, we must scan 2nd then give up */ | |
610 | ||
611 | andi. tmp1, tmp3, MASK(PTE0_HASH_ID) /* already at 2nd? */ | |
612 | bne- .L_dmiss_do_no_hash_exception /* give up */ | |
613 | ||
614 | mfspr tmp2, hash2 | |
615 | ori tmp3, tmp3, MASK(PTE0_HASH_ID) | |
616 | b .L_dlmiss_find_pte_in_pteg | |
617 | ||
618 | .L_dmiss_found_pte: | |
619 | ||
620 | lwz tmp1, 4(tmp2) /* get pte1_t */ | |
621 | ||
622 | /* Ok, we've found what we need to, restore and rfi! */ | |
623 | ||
624 | mtctr tmp0 /* restore ctr */ | |
625 | mfsrr1 tmp3 | |
626 | mfspr tmp0, dmiss | |
627 | mtcrf 0x80, tmp3 /* Restore CR0 */ | |
628 | mtspr rpa, tmp1 /* set the pte */ | |
629 | ori tmp1, tmp1, MASK(PTE1_REFERENCED) /* set referenced */ | |
630 | tlbld tmp0 /* load up tlb */ | |
631 | sth tmp1, 6(tmp2) /* sth is faster? */ | |
632 | rfi | |
633 | ||
634 | /* This code is shared with data store translation miss */ | |
635 | ||
636 | .L_dmiss_do_no_hash_exception: | |
637 | /* clean up registers for protection exception... */ | |
638 | mfsrr1 tmp3 | |
639 | /* prepare to set DSISR_WRITE_BIT correctly from srr1 info */ | |
640 | rlwinm tmp1, tmp3, 9, 6, 6 | |
641 | addis tmp1, tmp1, MASK(DSISR_HASH) >> 16 | |
642 | ||
643 | /* And the entry into the usual data fault handler ... */ | |
644 | ||
645 | mtctr tmp0 /* Restore ctr */ | |
646 | andi. tmp2, tmp3, 0xffff /* Clean up srr1 */ | |
647 | mtsrr1 tmp2 /* Set srr1 */ | |
648 | mtdsisr tmp1 | |
649 | mfspr tmp2, dmiss | |
650 | mtdar tmp2 | |
651 | mfmsr tmp0 | |
652 | xoris tmp0, tmp0, MASK(MSR_TGPR)>>16 /* no TGPR */ | |
653 | mtcrf 0x80, tmp3 /* Restore CR0 */ | |
654 | sync /* Needed on some */ | |
655 | mtmsr tmp0 /* reset MSR[TGPR] */ | |
656 | b .L_handler300 /* Data Access */ | |
657 | ||
658 | /* | |
659 | * Data store translation miss (similar to data load) | |
660 | * | |
661 | * Upon entry (done for us by the machine): | |
662 | * srr0 : addr of instruction that missed | |
663 | * srr1 : bits 0-3 = saved CR0 | |
664 | * 4 = lru way bit | |
665 | * 5 = 1 if store | |
666 | * 16-31 = saved msr | |
667 | * msr[tgpr] = 1 (so gpr0-3 become our temporary variables) | |
668 | * dmiss: ea that missed | |
669 | * dcmp : the compare value for the va that missed | |
670 | * hash1: pointer to first hash pteg | |
671 | * hash2: pointer to 2nd hash pteg | |
672 | * | |
673 | * Register usage: | |
674 | * tmp0: saved counter | |
675 | * tmp1: junk | |
676 | * tmp2: pointer to pteg | |
677 | * tmp3: current compare value | |
678 | * | |
679 | * This code is taken from the 603e User's Manual with | |
680 | * some bugfixes and minor improvements to save bytes and cycles | |
681 | * | |
682 | * NOTE: Do not touch sprg2 in here | |
683 | */ | |
684 | ||
685 | . = 0x1200 | |
686 | .L_handler1200: | |
687 | mfspr tmp2, hash1 | |
688 | mfctr tmp0 /* use tmp0 to save ctr */ | |
689 | mfspr tmp3, dcmp | |
690 | ||
691 | .L_dsmiss_find_pte_in_pteg: | |
692 | li tmp1, 8 /* count */ | |
693 | subi tmp2, tmp2, 8 /* offset for lwzu */ | |
694 | mtctr tmp1 /* count... */ | |
695 | ||
696 | .L_dsmiss_pteg_loop: | |
697 | lwz tmp1, 8(tmp2) /* check pte0 for match... */ | |
698 | addi tmp2, tmp2, 8 | |
699 | ||
700 | cmpw cr0, tmp1, tmp3 | |
701 | #if 0 /* I don't know how to write this properly */ | |
702 | bdnzf+ cr0, .L_dsmiss_pteg_loop | |
703 | #else | |
704 | bc 0,2, .L_dsmiss_pteg_loop | |
705 | #endif | |
706 | beq+ cr0, .L_dsmiss_found_pte | |
707 | ||
708 | /* Not found in PTEG, we must scan 2nd then give up */ | |
709 | ||
710 | andi. tmp1, tmp3, MASK(PTE0_HASH_ID) /* already at 2nd? */ | |
711 | bne- .L_dmiss_do_no_hash_exception /* give up */ | |
712 | ||
713 | mfspr tmp2, hash2 | |
714 | ori tmp3, tmp3, MASK(PTE0_HASH_ID) | |
715 | b .L_dsmiss_find_pte_in_pteg | |
716 | ||
717 | .L_dsmiss_found_pte: | |
718 | ||
719 | lwz tmp1, 4(tmp2) /* get pte1_t */ | |
720 | andi. tmp3, tmp1, MASK(PTE1_CHANGED) /* unchanged, check? */ | |
721 | beq- .L_dsmiss_check_prot /* yes, check prot */ | |
722 | ||
723 | .L_dsmiss_resolved: | |
724 | /* Ok, we've found what we need to, restore and rfi! */ | |
725 | ||
726 | mtctr tmp0 /* restore ctr */ | |
727 | mfsrr1 tmp3 | |
728 | mfspr tmp0, dmiss | |
729 | mtcrf 0x80, tmp3 /* Restore CR0 */ | |
730 | mtspr rpa, tmp1 /* set the pte */ | |
731 | tlbld tmp0 /* load up tlb */ | |
732 | rfi | |
733 | ||
734 | .L_dsmiss_check_prot: | |
735 | /* PTE is unchanged, we must check that we can write */ | |
736 | rlwinm. tmp3, tmp1, 30, 0, 1 /* check PP[1] */ | |
737 | bge- .L_dsmiss_check_prot_user_kern | |
738 | andi. tmp3, tmp1, 1 /* check PP[0] */ | |
739 | beq+ .L_dsmiss_check_prot_ok | |
740 | ||
741 | .L_dmiss_do_prot_exception: | |
742 | /* clean up registers for protection exception... */ | |
743 | mfsrr1 tmp3 | |
744 | /* prepare to set DSISR_WRITE_BIT correctly from srr1 info */ | |
745 | rlwinm tmp1, tmp3, 9, 6, 6 | |
746 | addis tmp1, tmp1, MASK(DSISR_PROT) >> 16 | |
747 | ||
748 | /* And the entry into the usual data fault handler ... */ | |
749 | ||
750 | mtctr tmp0 /* Restore ctr */ | |
751 | andi. tmp2, tmp3, 0xffff /* Clean up srr1 */ | |
752 | mtsrr1 tmp2 /* Set srr1 */ | |
753 | mtdsisr tmp1 | |
754 | mfspr tmp2, dmiss | |
755 | mtdar tmp2 | |
756 | mfmsr tmp0 | |
757 | xoris tmp0, tmp0, MASK(MSR_TGPR)>>16 /* no TGPR */ | |
758 | mtcrf 0x80, tmp3 /* Restore CR0 */ | |
759 | sync /* Needed on some */ | |
760 | mtmsr tmp0 /* reset MSR[TGPR] */ | |
761 | b .L_handler300 /* Data Access */ | |
762 | ||
763 | /* NB - if we knew we were on a 603e we could test just the MSR_KEY bit */ | |
764 | .L_dsmiss_check_prot_user_kern: | |
765 | mfsrr1 tmp3 | |
766 | andi. tmp3, tmp3, MASK(MSR_PR) | |
767 | beq+ .L_dsmiss_check_prot_kern | |
768 | mfspr tmp3, dmiss /* check user privs */ | |
769 | mfsrin tmp3, tmp3 /* get excepting SR */ | |
770 | andis. tmp3, tmp3, 0x2000 /* Test SR ku bit */ | |
771 | beq+ .L_dsmiss_check_prot_ok | |
772 | b .L_dmiss_do_prot_exception | |
773 | ||
774 | .L_dsmiss_check_prot_kern: | |
775 | mfspr tmp3, dmiss /* check kern privs */ | |
776 | mfsrin tmp3, tmp3 | |
777 | andis. tmp3, tmp3, 0x4000 /* Test SR Ks bit */ | |
778 | bne- .L_dmiss_do_prot_exception | |
779 | ||
780 | .L_dsmiss_check_prot_ok: | |
781 | /* Ok, mark as referenced and changed before resolving the fault */ | |
782 | ori tmp1, tmp1, (MASK(PTE1_REFERENCED)|MASK(PTE1_CHANGED)) | |
783 | sth tmp1, 6(tmp2) | |
784 | b .L_dsmiss_resolved | |
785 | ||
786 | /* | |
787 | * Instruction address breakpoint | |
788 | */ | |
789 | ||
790 | . = 0x1300 | |
791 | .L_handler1300: | |
792 | mtsprg 2,r13 /* Save R13 */ | |
793 | mtsprg 3,r11 /* Save R11 */ | |
794 | mfsprg r13,1 /* Get the exception save area */ | |
795 | li r11,T_INSTRUCTION_BKPT /* Set 'rupt code */ | |
796 | b .L_exception_entry /* Join common... */ | |
797 | ||
798 | /* | |
799 | * System management interrupt | |
800 | */ | |
801 | ||
802 | . = 0x1400 | |
803 | .L_handler1400: | |
804 | mtsprg 2,r13 /* Save R13 */ | |
805 | mtsprg 3,r11 /* Save R11 */ | |
806 | mfsprg r13,1 /* Get the exception save area */ | |
807 | li r11,T_SYSTEM_MANAGEMENT /* Set 'rupt code */ | |
808 | b .L_exception_entry /* Join common... */ | |
809 | ||
810 | ; | |
811 | ; Altivec Java Mode Assist interrupt | |
812 | ; | |
813 | ||
814 | . = 0x1600 | |
815 | .L_handler1600: | |
816 | mtsprg 2,r13 /* Save R13 */ | |
817 | mtsprg 3,r11 /* Save R11 */ | |
818 | mfsprg r13,1 /* Get the exception save area */ | |
819 | li r11,T_ALTIVEC_ASSIST /* Set 'rupt code */ | |
820 | b .L_exception_entry /* Join common... */ | |
821 | ||
822 | ; | |
823 | ; Thermal interruption | |
824 | ; | |
825 | ||
826 | . = 0x1700 | |
827 | .L_handler1700: | |
828 | mtsprg 2,r13 /* Save R13 */ | |
829 | mtsprg 3,r11 /* Save R11 */ | |
830 | mfsprg r13,1 /* Get the exception save area */ | |
831 | li r11,T_THERMAL /* Set 'rupt code */ | |
832 | b .L_exception_entry /* Join common... */ | |
833 | ||
834 | /* | |
835 | * There is now a large gap of reserved traps | |
836 | */ | |
837 | ||
838 | /* | |
839 | * Run mode/ trace exception - single stepping on 601 processors | |
840 | */ | |
841 | ||
842 | . = 0x2000 | |
843 | .L_handler2000: | |
844 | mtsprg 2,r13 /* Save R13 */ | |
845 | mtsprg 3,r11 /* Save R11 */ | |
846 | mfsprg r13,1 /* Get the exception save area */ | |
847 | li r11,T_RUNMODE_TRACE /* Set 'rupt code */ | |
848 | b .L_exception_entry /* Join common... */ | |
849 | ||
850 | /* | |
851 | * .L_exception_entry(type) | |
852 | * | |
853 | * This is the common exception handling routine called by any | |
854 | * type of system exception. | |
855 | * | |
856 | * ENTRY: via a system exception handler, thus interrupts off, VM off. | |
857 | * r3 has been saved in sprg3 and now contains a number | |
858 | * representing the exception's origins | |
859 | * | |
860 | */ | |
861 | ||
862 | .data | |
863 | .align ALIGN | |
864 | .globl EXT(exception_entry) | |
865 | EXT(exception_entry): | |
866 | .long .L_exception_entry-EXT(ExceptionVectorsStart) /* phys addr of fn */ | |
867 | ||
868 | VECTOR_SEGMENT | |
869 | .align 5 | |
870 | ||
871 | .L_exception_entry: | |
872 | ||
873 | /* | |
874 | * | |
875 | * Here we will save off a mess of registers, the special ones and R0-R12. We use the DCBZ | |
876 | * instruction to clear and allcoate a line in the cache. This way we won't take any cache | |
877 | * misses, so these stores won't take all that long. Except the first line that is because | |
878 | * we can't do a DCBZ if the L1 D-cache is off. The rest we will skip if they are | |
879 | * off also. | |
880 | * | |
881 | * Note that if we are attempting to sleep (as opposed to nap or doze) all interruptions | |
882 | * are ignored. | |
883 | */ | |
884 | ||
885 | stw r1,saver1(r13) ; Save register 1 | |
886 | stw r0,saver0(r13) ; Save register 0 | |
887 | mfspr r1,hid0 ; Get HID0 | |
888 | mfcr r0 ; Save the CR | |
889 | mtcrf 255,r1 ; Get set to test for cache and sleep | |
890 | bf sleep,notsleep ; Skip if we are not trying to sleep | |
891 | ||
892 | mtcrf 255,r0 ; Restore the CR | |
893 | lwz r0,saver0(r13) ; Restore R0 | |
894 | lwz r1,saver1(r13) ; Restore R1 | |
895 | mfsprg r13,0 ; Get the per_proc | |
896 | lwz r11,pfAvailable(r13) ; Get back the feature flags | |
897 | mfsprg r13,2 ; Restore R13 | |
898 | mtsprg 2,r11 ; Set sprg2 to the features | |
899 | mfsprg r11,3 ; Restore R11 | |
900 | rfi ; Jump back into sleep code... | |
901 | .long 0 ; Leave these here please... | |
902 | .long 0 | |
903 | .long 0 | |
904 | .long 0 | |
905 | .long 0 | |
906 | .long 0 | |
907 | .long 0 | |
908 | .long 0 | |
909 | ||
910 | .align 5 | |
911 | ||
912 | notsleep: stw r2,saver2(r13) ; Save this one | |
913 | crmove featL1ena,dce ; Copy the cache enable bit | |
914 | rlwinm r2,r1,0,nap+1,doze-1 ; Clear any possible nap and doze bits | |
915 | mtspr hid0,r2 ; Clear the nap/doze bits | |
916 | cmplw r2,r1 ; See if we were napping | |
917 | li r1,32 ; Point to the next line in case we need it | |
918 | crnot wasNapping,cr0_eq ; Remember if we were napping | |
919 | mfsprg r2,0 ; Get the per_proc area | |
920 | bf- featL1ena,skipz1 ; L1 cache is disabled... | |
921 | dcbz r1,r13 ; Reserve our line in cache | |
922 | ||
923 | ; | |
924 | ; Remember, we are setting up CR6 with feature flags | |
925 | ; | |
926 | skipz1: lwz r1,pfAvailable(r2) ; Get the CPU features flags | |
927 | stw r3,saver3(r13) ; Save this one | |
928 | mtcrf 0xE0,r1 ; Put the features flags (that we care about) in the CR | |
929 | stw r4,saver4(r13) ; Save this one | |
930 | stw r6,saver6(r13) ; Save this one | |
931 | crmove featSMP,pfSMPcapb ; See if we have a PIR | |
932 | stw r8,saver8(r13) ; Save this one | |
933 | crmove featAltivec,pfAltivecb ; Set the Altivec flag | |
934 | mfsrr0 r6 /* Get the interruption SRR0 */ | |
935 | stw r8,saver8(r13) /* Save this one */ | |
936 | crmove featFP,pfFloatb ; Remember that we have floating point | |
937 | stw r7,saver7(r13) /* Save this one */ | |
938 | lhz r8,PP_CPU_FLAGS(r2) ; Get the flags | |
939 | mfsrr1 r7 /* Get the interrupt SRR1 */ | |
940 | rlwinm r8,r8,(((31-MSR_BE_BIT)+(traceBEb+16+1))&31),MSR_BE_BIT,MSR_BE_BIT ; Set BE bit if special trace is on | |
941 | stw r6,savesrr0(r13) /* Save the SRR0 */ | |
942 | rlwinm r6,r7,(((31-MSR_BE_BIT)+(MSR_PR_BIT+1))&31),MSR_BE_BIT,MSR_BE_BIT ; Move PR bit to BE bit | |
943 | stw r5,saver5(r13) /* Save this one */ | |
944 | and r8,r6,r8 ; Remove BE bit only if problem state and special tracing on | |
945 | mfsprg r6,2 ; Get interrupt time R13 | |
946 | mtsprg 2,r1 ; Set the feature flags | |
947 | andc r7,r7,r8 ; Clear BE bit if special trace is on and PR is set | |
948 | mfsprg r8,3 /* Get 'rupt time R11 */ | |
949 | stw r7,savesrr1(r13) /* Save SRR1 */ | |
950 | stw r6,saver13(r13) /* Save 'rupt R1 */ | |
951 | stw r8,saver11(r13) /* Save 'rupt time R11 */ | |
952 | ||
953 | getTB: mftbu r6 ; Get the upper timebase | |
954 | mftb r7 ; Get the lower timebase | |
955 | mftbu r8 ; Get the upper one again | |
956 | cmplw r6,r8 ; Did the top tick? | |
957 | bne- getTB ; Yeah, need to get it again... | |
958 | ||
959 | stw r8,ruptStamp(r2) ; Save the top of time stamp | |
960 | la r6,saver14(r13) ; Point to the next cache line | |
961 | stw r7,ruptStamp+4(r2) ; Save the bottom of time stamp | |
962 | bf- featL1ena,skipz2 ; L1 cache is disabled... | |
963 | dcbz 0,r6 /* Allocate in cache */ | |
964 | skipz2: | |
965 | stw r9,saver9(r13) /* Save this one */ | |
966 | ||
967 | la r9,saver30(r13) /* Point to the trailing end */ | |
968 | stw r10,saver10(r13) /* Save this one */ | |
969 | mflr r4 /* Get the LR */ | |
970 | mfxer r10 ; Get the XER | |
971 | ||
972 | bf+ wasNapping,notNapping ; Skip if not waking up from nap... | |
973 | ||
974 | lwz r6,napStamp+4(r2) ; Pick up low order nap stamp | |
975 | lis r3,hi16(EXT(machine_idle_ret)) ; Get high part of nap/doze return | |
976 | lwz r5,napStamp(r2) ; and high order | |
977 | subfc r7,r6,r7 ; Subtract low stamp from now | |
978 | lwz r6,napTotal+4(r2) ; Pick up low total | |
979 | subfe r5,r5,r8 ; Subtract high stamp and borrow from now | |
980 | lwz r8,napTotal(r2) ; Pick up the high total | |
981 | addc r6,r6,r7 ; Add low to total | |
982 | ori r3,r3,lo16(EXT(machine_idle_ret)) ; Get low part of nap/doze return | |
983 | adde r8,r8,r5 ; Add high and carry to total | |
984 | stw r6,napTotal+4(r2) ; Save the low total | |
985 | stw r8,napTotal(r2) ; Save the high total | |
986 | stw r3,savesrr0(r13) ; Modify to return to nap/doze exit | |
987 | ||
988 | notNapping: stw r12,saver12(r13) /* Save this one */ | |
989 | ||
990 | bf- featL1ena,skipz3 ; L1 cache is disabled... | |
991 | dcbz 0,r9 /* Allocate the last in the area */ | |
992 | skipz3: | |
993 | stw r14,saver14(r13) /* Save this one */ | |
994 | stw r15,saver15(r13) /* Save this one */ | |
995 | la r14,saver22(r13) /* Point to the next block to save into */ | |
996 | stw r0,savecr(r13) ; Save rupt CR | |
997 | mfctr r6 /* Get the CTR */ | |
998 | stw r16,saver16(r13) /* Save this one */ | |
999 | stw r4,savelr(r13) /* Save 'rupt LR */ | |
1000 | ||
1001 | bf- featL1ena,skipz4 ; L1 cache is disabled... | |
1002 | dcbz 0,r14 /* Allocate next save area line */ | |
1003 | skipz4: | |
1004 | stw r17,saver17(r13) /* Save this one */ | |
1005 | stw r18,saver18(r13) /* Save this one */ | |
1006 | stw r6,savectr(r13) /* Save 'rupt CTR */ | |
1007 | stw r19,saver19(r13) /* Save this one */ | |
1008 | lis r12,HIGH_ADDR(KERNEL_SEG_REG0_VALUE) /* Get the high half of the kernel SR0 value */ | |
1009 | mfdar r6 /* Get the 'rupt DAR */ | |
1010 | stw r20,saver20(r13) /* Save this one */ | |
1011 | #if 0 | |
1012 | mfsr r14,sr0 ; (TEST/DEBUG) | |
1013 | stw r14,savesr0(r13) ; (TEST/DEBUG) | |
1014 | mfsr r14,sr1 ; (TEST/DEBUG) | |
1015 | stw r14,savesr1(r13) ; (TEST/DEBUG) | |
1016 | mfsr r14,sr2 ; (TEST/DEBUG) | |
1017 | stw r14,savesr2(r13) ; (TEST/DEBUG) | |
1018 | mfsr r14,sr3 ; (TEST/DEBUG) | |
1019 | stw r14,savesr3(r13) ; (TEST/DEBUG) | |
1020 | mfsr r14,sr4 ; (TEST/DEBUG) | |
1021 | stw r14,savesr4(r13) ; (TEST/DEBUG) | |
1022 | mfsr r14,sr5 ; (TEST/DEBUG) | |
1023 | stw r14,savesr5(r13) ; (TEST/DEBUG) | |
1024 | mfsr r14,sr6 ; (TEST/DEBUG) | |
1025 | stw r14,savesr6(r13) ; (TEST/DEBUG) | |
1026 | mfsr r14,sr7 ; (TEST/DEBUG) | |
1027 | stw r14,savesr7(r13) ; (TEST/DEBUG) | |
1028 | mfsr r14,sr8 ; (TEST/DEBUG) | |
1029 | stw r14,savesr8(r13) ; (TEST/DEBUG) | |
1030 | mfsr r14,sr9 ; (TEST/DEBUG) | |
1031 | stw r14,savesr9(r13) ; (TEST/DEBUG) | |
1032 | mfsr r14,sr10 ; (TEST/DEBUG) | |
1033 | stw r14,savesr10(r13) ; (TEST/DEBUG) | |
1034 | mfsr r14,sr11 ; (TEST/DEBUG) | |
1035 | stw r14,savesr11(r13) ; (TEST/DEBUG) | |
1036 | mfsr r14,sr12 ; (TEST/DEBUG) | |
1037 | stw r14,savesr12(r13) ; (TEST/DEBUG) | |
1038 | mfsr r14,sr13 ; (TEST/DEBUG) | |
1039 | stw r14,savesr13(r13) ; (TEST/DEBUG) | |
1040 | mfsr r14,sr15 ; (TEST/DEBUG) | |
1041 | stw r14,savesr15(r13) ; (TEST/DEBUG) | |
1042 | #endif | |
1043 | ||
1044 | mtsr sr0,r12 /* Set the kernel SR0 */ | |
1045 | stw r21,saver21(r13) /* Save this one */ | |
1046 | addis r12,r12,0x0010 ; Point to the second segment of kernel | |
1047 | stw r10,savexer(r13) ; Save the rupt XER | |
1048 | mtsr sr1,r12 /* Set the kernel SR1 */ | |
1049 | stw r30,saver30(r13) /* Save this one */ | |
1050 | addis r12,r12,0x0010 ; Point to the third segment of kernel | |
1051 | stw r31,saver31(r13) /* Save this one */ | |
1052 | mtsr sr2,r12 /* Set the kernel SR2 */ | |
1053 | stw r22,saver22(r13) /* Save this one */ | |
1054 | addis r12,r12,0x0010 ; Point to the third segment of kernel | |
1055 | la r10,savedar(r13) /* Point to exception info block */ | |
1056 | stw r23,saver23(r13) /* Save this one */ | |
1057 | mtsr sr3,r12 /* Set the kernel SR3 */ | |
1058 | stw r24,saver24(r13) /* Save this one */ | |
1059 | stw r25,saver25(r13) /* Save this one */ | |
1060 | mfdsisr r7 /* Get the 'rupt DSISR */ | |
1061 | stw r26,saver26(r13) /* Save this one */ | |
1062 | ||
1063 | bf- featL1ena,skipz5 ; L1 cache is disabled... | |
1064 | dcbz 0,r10 /* Allocate exception info line */ | |
1065 | skipz5: | |
1066 | ||
1067 | stw r27,saver27(r13) /* Save this one */ | |
1068 | li r10,emfp0 ; Point to floating point save | |
1069 | stw r28,saver28(r13) /* Save this one */ | |
1070 | stw r29,saver29(r13) /* Save this one */ | |
1071 | mfsr r14,sr14 ; Get the copyin/out segment register | |
1072 | stw r6,savedar(r13) /* Save the 'rupt DAR */ | |
1073 | bf- featL1ena,skipz5a ; Do not do this if no L1... | |
1074 | dcbz r10,r2 ; Clear and allocate an L1 slot | |
1075 | ||
1076 | skipz5a: stw r7,savedsisr(r13) /* Save the 'rupt code DSISR */ | |
1077 | stw r11,saveexception(r13) /* Save the exception code */ | |
1078 | stw r14,savesr14(r13) ; Save copyin/copyout | |
1079 | ||
1080 | lis r8,HIGH_ADDR(EXT(saveanchor)) /* Get the high part of the anchor */ | |
1081 | li r19,0 ; Assume no Altivec | |
1082 | ori r8,r8,LOW_ADDR(EXT(saveanchor)) /* Bottom half of the anchor */ | |
1083 | ||
1084 | bf featAltivec,noavec ; No Altivec on this CPU... | |
1085 | li r9,0 ; Get set to clear VRSAVE | |
1086 | mfspr r19,vrsave ; Get the VRSAVE register | |
1087 | mtspr vrsave,r9 ; Clear VRSAVE for each interrupt level | |
1088 | ; | |
1089 | ; We need to save the FPSCR as if it is normal context. | |
1090 | ; This is because pending exceptions will cause an exception even if | |
1091 | ; FP is disabled. We need to clear the FPSCR when we first start running in the | |
1092 | ; kernel. | |
1093 | ; | |
1094 | noavec: stw r19,savevrsave(r13) ; Save the vector register usage flags | |
1095 | ||
1096 | bf- featFP,nofpexe ; No possible floating point exceptions... | |
1097 | ||
1098 | mfmsr r9 ; Get the MSR value | |
1099 | ori r7,r9,lo16(MASK(MSR_FP)) ; Enable floating point | |
1100 | mtmsr r7 ; Do it | |
1101 | isync | |
1102 | stfd f0,emfp0(r2) ; Save FPR0 | |
1103 | stfd f1,emfp1(r2) ; Save FPR1 | |
1104 | mffs f0 ; Get the FPSCR | |
1105 | fsub f1,f1,f1 ; Make a 0 | |
1106 | stfd f0,savexfpscrpad(r13) ; Save the FPSCR | |
1107 | mtfsf 0xFF,f1 ; Clear it | |
1108 | lfd f0,emfp0(r2) ; Restore FPR0 | |
1109 | lfd f1,emfp1(r2) ; Restore FPR1 | |
1110 | mtmsr r9 ; Turn off FP | |
1111 | isync | |
1112 | nofpexe: | |
1113 | ||
1114 | /* | |
1115 | * Everything is saved at this point, except for FPRs, and VMX registers | |
1116 | * | |
1117 | * Time for a new save area. Allocate the trace table entry now also | |
1118 | * Note that we haven't touched R0-R5 yet. Except for R0 & R1, that's in the save | |
1119 | */ | |
1120 | ||
1121 | ||
1122 | lllck: lwarx r9,0,r8 /* Grab the lock value */ | |
1123 | li r7,1 /* Use part of the delay time */ | |
1124 | mr. r9,r9 /* Is it locked? */ | |
1125 | bne- lllcks /* Yeah, wait for it to clear... */ | |
1126 | stwcx. r7,0,r8 /* Try to seize that there durn lock */ | |
1127 | beq+ lllckd /* Got it... */ | |
1128 | b lllck /* Collision, try again... */ | |
1129 | ||
1130 | lllcks: lwz r9,SVlock(r8) /* Get that lock in here */ | |
1131 | mr. r9,r9 /* Is it free yet? */ | |
1132 | beq+ lllck /* Yeah, try for it again... */ | |
1133 | b lllcks /* Sniff away... */ | |
1134 | ||
1135 | lllckd: isync /* Purge any speculative executions here */ | |
1136 | lis r23,hi16(EXT(trcWork)) ; Get the work area address | |
1137 | rlwinm r7,r11,30,0,31 /* Save 'rupt code shifted right 2 */ | |
1138 | ori r23,r23,lo16(EXT(trcWork)) ; Get the rest | |
1139 | #if 1 | |
1140 | lwz r14,traceMask(r23) /* Get the trace mask */ | |
1141 | #else | |
1142 | li r14,-1 /* (TEST/DEBUG) */ | |
1143 | #endif | |
1144 | addi r7,r7,10 /* Adjust for CR5_EQ position */ | |
1145 | lwz r15,SVfree(r8) /* Get the head of the save area list */ | |
1146 | lwz r25,SVinuse(r8) /* Get the in use count */ | |
1147 | rlwnm r7,r14,r7,22,22 /* Set CR5_EQ bit position to 0 if tracing allowed */ | |
1148 | lwz r20,traceCurr(r23) /* Pick up the current trace entry */ | |
1149 | mtcrf 0x04,r7 /* Set CR5 to show trace or not */ | |
1150 | ||
1151 | lwz r14,SACalloc(r15) /* Pick up the allocation bits */ | |
1152 | addi r25,r25,1 /* Bump up the in use count for the new savearea */ | |
1153 | lwz r21,traceEnd(r23) /* Grab up the end of it all */ | |
1154 | mr. r14,r14 /* Can we use the first one? */ | |
1155 | blt use1st /* Yeah... */ | |
1156 | ||
1157 | andis. r14,r14,0x8000 /* Show we used the second and remember if it was the last */ | |
1158 | addi r10,r15,0x0800 /* Point to the first one */ | |
1159 | b gotsave /* We have the area now... */ | |
1160 | ||
1161 | use1st: andis. r14,r14,0x4000 /* Mark first gone and remember if empty */ | |
1162 | mr r10,r15 /* Set the save area */ | |
1163 | ||
1164 | gotsave: stw r14,SACalloc(r15) /* Put back the allocation bits */ | |
1165 | bne nodqsave /* There's still an empty slot, don't dequeue... */ | |
1166 | ||
1167 | lwz r16,SACnext(r15) /* Get the next in line */ | |
1168 | stw r16,SVfree(r8) /* Dequeue our now empty save area block */ | |
1169 | ||
1170 | nodqsave: addi r22,r20,LTR_size /* Point to the next trace entry */ | |
1171 | stw r25,SVinuse(r8) /* Set the in use count */ | |
1172 | li r17,0 /* Clear this for the lock */ | |
1173 | cmplw r22,r21 /* Do we need to wrap the trace table? */ | |
1174 | stw r17,SAVprev(r10) /* Clear back pointer for the newly allocated guy */ | |
1175 | mtsprg 1,r10 /* Get set for the next 'rupt */ | |
1176 | bne+ gotTrcEnt /* We got a trace entry... */ | |
1177 | ||
1178 | lwz r22,traceStart(r23) /* Wrap back to the top */ | |
1179 | ||
1180 | gotTrcEnt: bne- cr5,skipTrace1 /* Don't want to trace this kind... */ | |
1181 | ||
1182 | stw r22,traceCurr(r23) /* Set the next entry for the next guy */ | |
1183 | ||
1184 | #if ESPDEBUG | |
1185 | dcbst br0,r23 ; (TEST/DEBUG) | |
1186 | sync ; (TEST/DEBUG) | |
1187 | #endif | |
1188 | ||
1189 | bf- featL1ena,skipz6 ; L1 cache is disabled... | |
1190 | dcbz 0,r20 /* Allocate cache for the entry */ | |
1191 | skipz6: | |
1192 | ||
1193 | skipTrace1: sync /* Make sure all stores are done */ | |
1194 | stw r17,SVlock(r8) /* Unlock both save and trace areas */ | |
1195 | ||
1196 | ||
1197 | /* | |
1198 | * At this point, we can take another exception and lose nothing. | |
1199 | * | |
1200 | * We still have the current savearea pointed to by R13, the next by R10 and | |
1201 | * sprg1. R20 contains the pointer to a trace entry and CR5_eq says | |
1202 | * to do the trace or not. | |
1203 | * | |
1204 | * Note that R13 was chosen as the save area pointer because the SIGP, | |
1205 | * firmware, and DSI/ISI handlers aren't supposed to touch anything | |
1206 | * over R12. But, actually, the DSI/ISI stuff does. | |
1207 | * | |
1208 | * | |
1209 | * Let's cut that trace entry now. | |
1210 | */ | |
1211 | ||
1212 | lwz r0,saver0(r13) ; Get back interrupt time R0 | |
1213 | bne- cr5,skipTrace2 /* Don't want to trace this kind... */ | |
1214 | ||
1215 | mfsprg r2,0 ; Get the per_proc | |
1216 | li r14,32 /* Second line of entry */ | |
1217 | ||
1218 | lwz r16,ruptStamp(r2) ; Get top of time base | |
1219 | lwz r17,ruptStamp+4(r2) ; Get the bottom of time stamp | |
1220 | ||
1221 | bf- featL1ena,skipz7 ; L1 cache is disabled... | |
1222 | dcbz r14,r20 /* Zap the second half */ | |
1223 | ||
1224 | skipz7: stw r16,LTR_timeHi(r20) /* Set the upper part of TB */ | |
1225 | bf featSMP,nopir4 ; Is there a processor ID register on this guy? | |
1226 | mfspr r19,pir /* Get the processor address */ | |
1227 | b gotpir4 /* Got it... */ | |
1228 | nopir4: li r19,0 /* Assume processor 0 for those underprivileged folks */ | |
1229 | gotpir4: | |
1230 | lwz r1,saver1(r13) ; Get back interrupt time R1 | |
1231 | stw r17,LTR_timeLo(r20) /* Set the lower part of TB */ | |
1232 | rlwinm r19,r19,0,27,31 /* Cut the junk */ | |
1233 | lwz r2,saver2(r13) ; Get back interrupt time R2 | |
1234 | stw r0,LTR_r0(r20) /* Save off register 0 */ | |
1235 | lwz r3,saver3(r13) ; Restore this one | |
1236 | sth r19,LTR_cpu(r20) /* Stash the cpu address */ | |
1237 | stw r1,LTR_r1(r20) /* Save off register 1 */ | |
1238 | lwz r4,saver4(r13) ; Restore this one | |
1239 | stw r2,LTR_r2(r20) /* Save off register 2 */ | |
1240 | lwz r5,saver5(r13) ; Restore this one | |
1241 | stw r3,LTR_r3(r20) /* Save off register 3 */ | |
1242 | lwz r16,savecr(r13) /* We don't remember the CR anymore, get it */ | |
1243 | stw r4,LTR_r4(r20) /* Save off register 4 */ | |
1244 | mfsrr0 r17 /* Get this back, it's still good */ | |
1245 | stw r5,LTR_r5(r20) /* Save off register 5 */ | |
1246 | mfsrr1 r18 /* This is still good in here also */ | |
1247 | ||
1248 | stw r16,LTR_cr(r20) /* Save the CR (or dec) */ | |
1249 | stw r17,LTR_srr0(r20) /* Save the SSR0 */ | |
1250 | stw r18,LTR_srr1(r20) /* Save the SRR1 */ | |
1251 | mfdar r17 /* Get this back */ | |
1252 | ||
1253 | mflr r16 /* Get the LR */ | |
1254 | stw r17,LTR_dar(r20) /* Save the DAR */ | |
1255 | mfctr r17 /* Get the CTR */ | |
1256 | stw r16,LTR_lr(r20) /* Save the LR */ | |
1257 | #if 0 | |
1258 | lis r17,HIGH_ADDR(EXT(saveanchor)) ; (TEST/DEBUG) | |
1259 | ori r17,r17,LOW_ADDR(EXT(saveanchor)) ; (TEST/DEBUG) | |
1260 | lwz r16,SVcount(r17) ; (TEST/DEBUG) | |
1261 | lwz r17,SVinuse(r17) ; (TEST/DEBUG) | |
1262 | rlwimi r17,r16,16,0,15 ; (TEST/DEBUG) | |
1263 | #endif | |
1264 | stw r17,LTR_ctr(r20) /* Save off the CTR */ | |
1265 | stw r13,LTR_save(r20) /* Save the savearea */ | |
1266 | sth r11,LTR_excpt(r20) /* Save the exception type */ | |
1267 | #if ESPDEBUG | |
1268 | addi r17,r20,32 ; (TEST/DEBUG) | |
1269 | dcbst br0,r20 ; (TEST/DEBUG) | |
1270 | dcbst br0,r17 ; (TEST/DEBUG) | |
1271 | sync ; (TEST/DEBUG) | |
1272 | #endif | |
1273 | ||
1274 | /* | |
1275 | * We're done with the trace, except for maybe modifying the exception | |
1276 | * code later on. So, that means that we need to save R20 and CR5, but | |
1277 | * R0 to R5 are clear now. | |
1278 | * | |
1279 | * So, let's finish setting up the kernel registers now. | |
1280 | */ | |
1281 | ||
1282 | skipTrace2: | |
1283 | ||
1284 | #if PERFTIMES && DEBUG | |
1285 | li r3,68 ; Indicate interrupt | |
1286 | mr r4,r11 ; Get code to log | |
1287 | mr r5,r13 ; Get savearea to log | |
1288 | mr r8,r0 ; Save R0 | |
1289 | bl EXT(dbgLog2) ; Cut log entry | |
1290 | mr r0,r8 ; Restore R0 | |
1291 | #endif | |
1292 | ||
1293 | mfsprg r2,0 /* Get the per processor block */ | |
1294 | ||
1295 | #if CHECKSAVE | |
1296 | ||
1297 | lis r4,0x7FFF /* (TEST/DEBUG) */ | |
1298 | mfdec r12 /* (TEST/DEBUG) */ | |
1299 | or r4,r4,r12 /* (TEST/DEBUG) */ | |
1300 | mtdec r4 /* (TEST/DEBUG) */ | |
1301 | li r4,0x20 /* (TEST/DEBUG) */ | |
1302 | ||
1303 | lwarx r8,0,r4 ; ? | |
1304 | ||
1305 | mpwait2: lwarx r8,0,r4 /* (TEST/DEBUG) */ | |
1306 | mr. r8,r8 /* (TEST/DEBUG) */ | |
1307 | bne- mpwait2 /* (TEST/DEBUG) */ | |
1308 | stwcx. r4,0,r4 /* (TEST/DEBUG) */ | |
1309 | bne- mpwait2 /* (TEST/DEBUG) */ | |
1310 | ||
1311 | isync /* (TEST/DEBUG) */ | |
1312 | lwz r4,0xD80(br0) /* (TEST/DEBUG) */ | |
1313 | mr. r4,r4 /* (TEST/DEBUG) */ | |
1314 | li r4,1 /* (TEST/DEBUG) */ | |
1315 | bne- doncheksv /* (TEST/DEBUG) */ | |
1316 | ||
1317 | lis r8,HIGH_ADDR(EXT(saveanchor)) /* (TEST/DEBUG) */ | |
1318 | ori r8,r8,LOW_ADDR(EXT(saveanchor)) /* (TEST/DEBUG) */ | |
1319 | ||
1320 | stw r4,0xD80(br0) /* (TEST/DEBUG) */ | |
1321 | ||
1322 | lwarx r4,0,r8 ; ? | |
1323 | ||
1324 | mpwait2x: lwarx r4,0,r8 /* (TEST/DEBUG) */ | |
1325 | mr. r4,r4 /* (TEST/DEBUG) */ | |
1326 | bne- mpwait2x /* (TEST/DEBUG) */ | |
1327 | stwcx. r8,0,r8 /* (TEST/DEBUG) */ | |
1328 | bne- mpwait2x /* (TEST/DEBUG) */ | |
1329 | ||
1330 | isync /* (TEST/DEBUG) */ | |
1331 | ||
1332 | #if 0 | |
1333 | rlwinm r4,r13,0,0,19 /* (TEST/DEBUG) */ | |
1334 | lwz r21,SACflags(r4) /* (TEST/DEBUG) */ | |
1335 | rlwinm r22,r21,24,24,31 /* (TEST/DEBUG) */ | |
1336 | cmplwi r22,0x00EE /* (TEST/DEBUG) */ | |
1337 | lwz r22,SACvrswap(r4) /* (TEST/DEBUG) */ | |
1338 | bne- currbad /* (TEST/DEBUG) */ | |
1339 | andis. r21,r21,hi16(sac_perm) /* (TEST/DEBUG) */ | |
1340 | bne- currnotbad /* (TEST/DEBUG) */ | |
1341 | mr. r22,r22 /* (TEST/DEBUG) */ | |
1342 | bne+ currnotbad /* (TEST/DEBUG) */ | |
1343 | ||
1344 | currbad: lis r23,hi16(EXT(debugbackpocket)) /* (TEST/DEBUG) */ | |
1345 | ori r23,r23,lo16(EXT(debugbackpocket)) /* (TEST/DEBUG) */ | |
0b4e3aa0 | 1346 | lwz r23,0(r23) ; (TEST/DEBUG) |
1c79356b A |
1347 | stw r23,SVfree(r8) /* (TEST/DEBUG) */ |
1348 | ||
1349 | mfsprg r25,1 /* (TEST/DEBUG) */ | |
1350 | mtsprg 1,r23 /* (TEST/DEBUG) */ | |
1351 | lwz r26,SACalloc(r23) /* (TEST/DEBUG) */ | |
1352 | rlwinm r26,r26,0,1,31 /* (TEST/DEBUG) */ | |
1353 | stw r26,SACalloc(r23) /* (TEST/DEBUG) */ | |
1354 | ||
1355 | sync /* (TEST/DEBUG) */ | |
1356 | li r28,0 /* (TEST/DEBUG) */ | |
1357 | stw r28,0x20(br0) /* (TEST/DEBUG) */ | |
1358 | stw r28,0(r8) /* (TEST/DEBUG) */ | |
1359 | BREAKPOINT_TRAP /* (TEST/DEBUG) */ | |
1360 | ||
1361 | currnotbad: | |
1362 | #endif | |
1363 | ||
1364 | lwz r28,SVcount(r8) /* (TEST/DEBUG) */ | |
1365 | lwz r21,SVinuse(r8) /* (TEST/DEBUG) */ | |
1366 | lwz r23,SVmin(r8) /* (TEST/DEBUG) */ | |
1367 | sub r22,r28,r21 /* (TEST/DEBUG) */ | |
1368 | cmpw r22,r23 /* (TEST/DEBUG) */ | |
1369 | bge+ cksave0 /* (TEST/DEBUG) */ | |
1370 | ||
1371 | li r4,0 /* (TEST/DEBUG) */ | |
1372 | stw r4,0x20(br0) /* (TEST/DEBUG) */ | |
1373 | stw r4,0(r8) /* (TEST/DEBUG) */ | |
1374 | BREAKPOINT_TRAP /* (TEST/DEBUG) */ | |
1375 | ||
1376 | cksave0: lwz r28,SVfree(r8) /* (TEST/DEBUG) */ | |
1377 | li r24,0 /* (TEST/DEBUG) */ | |
1378 | li r29,1 /* (TEST/SAVE) */ | |
1379 | ||
1380 | cksave0a: mr. r28,r28 /* (TEST/DEBUG) */ | |
1381 | beq- cksave3 /* (TEST/DEBUG) */ | |
1382 | ||
1383 | rlwinm. r21,r28,0,4,19 /* (TEST/DEBUG) */ | |
1384 | bne+ cksave1 /* (TEST/DEBUG) */ | |
1385 | ||
1386 | li r4,0 /* (TEST/DEBUG) */ | |
1387 | stw r4,0x20(br0) /* (TEST/DEBUG) */ | |
1388 | stw r4,0(r8) /* (TEST/DEBUG) */ | |
1389 | BREAKPOINT_TRAP /* (TEST/DEBUG) */ | |
1390 | ||
1391 | cksave1: rlwinm. r21,r28,0,21,3 /* (TEST/DEBUG) */ | |
1392 | beq+ cksave2 /* (TEST/DEBUG) */ | |
1393 | ||
1394 | li r4,0 /* (TEST/DEBUG) */ | |
1395 | stw r4,0x20(br0) /* (TEST/DEBUG) */ | |
1396 | stw r4,0(r8) /* (TEST/DEBUG) */ | |
1397 | BREAKPOINT_TRAP /* (TEST/DEBUG) */ | |
1398 | ||
1399 | cksave2: lwz r25,SACalloc(r28) /* (TEST/DEBUG) */ | |
1400 | lbz r26,SACflags+2(r28) /* (TEST/DEBUG) */ | |
1401 | lbz r21,SACflags+3(r28) /* (TEST/DEBUG) */ | |
1402 | cmplwi r26,0x00EE /* (TEST/DEBUG) */ | |
1403 | stb r29,SACflags+3(r28) /* (TEST/DEBUG) */ | |
1404 | beq+ cksave2z | |
1405 | ||
1406 | li r4,0 /* (TEST/DEBUG) */ | |
1407 | stw r4,0x20(br0) /* (TEST/DEBUG) */ | |
1408 | stw r4,0(r8) /* (TEST/DEBUG) */ | |
1409 | BREAKPOINT_TRAP /* (TEST/DEBUG) */ | |
1410 | ||
1411 | cksave2z: mr. r21,r21 /* (TEST/DEBUG) */ | |
1412 | beq+ cksave2a /* (TEST/DEBUG) */ | |
1413 | ||
1414 | li r4,0 /* (TEST/DEBUG) */ | |
1415 | stw r4,0x20(br0) /* (TEST/DEBUG) */ | |
1416 | stw r4,0(r8) /* (TEST/DEBUG) */ | |
1417 | BREAKPOINT_TRAP /* (TEST/DEBUG) */ | |
1418 | ||
1419 | cksave2a: rlwinm r26,r25,1,31,31 /* (TEST/DEBUG) */ | |
1420 | rlwinm r27,r25,2,31,31 /* (TEST/DEBUG) */ | |
1421 | add r24,r24,r26 /* (TEST/DEBUG) */ | |
1422 | add r24,r24,r27 /* (TEST/DEBUG) */ | |
1423 | lwz r28,SACnext(r28) /* (TEST/DEBUG) */ | |
1424 | b cksave0a /* (TEST/DEBUG) */ | |
1425 | ||
1426 | cksave3: cmplw r24,r22 /* (TEST/DEBUG) */ | |
1427 | beq+ cksave4 /* (TEST/DEBUG) */ | |
1428 | ||
1429 | li r4,0 /* (TEST/DEBUG) */ | |
1430 | stw r4,0x20(br0) /* (TEST/DEBUG) */ | |
1431 | stw r4,0(r8) /* (TEST/DEBUG) */ | |
1432 | BREAKPOINT_TRAP /* (TEST/DEBUG) */ | |
1433 | ||
1434 | cksave4: lwz r28,SVfree(r8) /* (TEST/DEBUG) */ | |
1435 | li r24,0 /* (TEST/DEBUG) */ | |
1436 | ||
1437 | cksave5: mr. r28,r28 /* (TEST/DEBUG) */ | |
1438 | beq- cksave6 /* (TEST/DEBUG) */ | |
1439 | stb r24,SACflags+3(r28) /* (TEST/DEBUG) */ | |
1440 | lwz r28,SACnext(r28) /* (TEST/DEBUG) */ | |
1441 | b cksave5 /* (TEST/DEBUG) */ | |
1442 | ||
1443 | cksave6: | |
1444 | ||
1445 | li r4,0 /* (TEST/DEBUG) */ | |
1446 | stw r4,0xD80(br0) /* (TEST/DEBUG) */ | |
1447 | stw r4,0(r8) /* (TEST/DEBUG) */ | |
1448 | ||
1449 | doncheksv: | |
1450 | li r4,0 /* (TEST/DEBUG) */ | |
1451 | stw r4,0x20(br0) /* (TEST/DEBUG) */ | |
1452 | mtdec r12 /* (TEST/DEBUG) */ | |
1453 | #endif | |
1454 | ||
1455 | lis r4,HIGH_ADDR(EXT(MPspec)) /* Get the MP control block */ | |
1456 | dcbt 0,r2 /* We'll need the per_proc in a sec */ | |
1457 | cmplwi cr0,r11,T_INTERRUPT /* Do we have an external interrupt? */ | |
1458 | ori r4,r4,LOW_ADDR(EXT(MPspec)) /* Get the bottom half of the MP control block */ | |
1459 | bne+ notracex /* Not an external... */ | |
1460 | ||
1461 | /* | |
1462 | * Here we check to see if there was a interprocessor signal | |
1463 | */ | |
1464 | ||
1465 | lwz r4,MPSSIGPhandler(r4) /* Get the address of the SIGP interrupt filter */ | |
1466 | lhz r3,PP_CPU_FLAGS(r2) /* Get the CPU flags */ | |
1467 | cmplwi cr1,r4,0 /* Check if signal filter is initialized yet */ | |
1468 | andi. r3,r3,LOW_ADDR(SIGPactive) /* See if this processor has started up */ | |
1469 | mtlr r4 /* Load up filter address */ | |
1470 | beq- cr1,notracex /* We don't have a filter yet... */ | |
1471 | beq- notracex /* This processor hasn't started filtering yet... */ | |
1472 | ||
1473 | blrl /* Filter the interrupt */ | |
1474 | ||
1475 | mfsprg r2,0 /* Make sure we have the per processor block */ | |
1476 | cmplwi cr0,r3,kMPIOInterruptPending /* See what the filter says */ | |
1477 | li r11,T_INTERRUPT /* Assume we have a regular external 'rupt */ | |
1478 | beq+ modRupt /* Yeah, we figured it would be... */ | |
1479 | li r11,T_SIGP /* Assume we had a signal processor interrupt */ | |
1480 | bgt+ modRupt /* Yeah, at this point we would assume so... */ | |
1481 | li r11,T_IN_VAIN /* Nothing there actually, so eat it */ | |
1482 | ||
1483 | modRupt: stw r11,PP_SAVE_EXCEPTION_TYPE(r2) /* Set that it was either in vain or a SIGP */ | |
1484 | stw r11,saveexception(r13) /* Save the exception code here also */ | |
1485 | bne- cr5,notracex /* Jump if no tracing... */ | |
1486 | sth r11,LTR_excpt(r20) /* Save the exception type */ | |
1487 | ||
1488 | notracex: | |
1489 | ||
1490 | #if 0 | |
1491 | bf featSMP,nopir6 /* (TEST/DEBUG) */ | |
1492 | mfspr r7,pir /* (TEST/DEBUG) */ | |
1493 | b gotpir6 /* (TEST/DEBUG) */ | |
1494 | nopir6: li r7,0 /* (TEST/DEBUG) */ | |
1495 | gotpir6: /* (TEST/DEBUG) */ | |
1496 | lis r6,HIGH_ADDR(EXT(RuptCtrs)) /* (TEST/DEBUG) */ | |
1497 | rlwinm r7,r7,8,23,23 /* (TEST/DEBUG) */ | |
1498 | lis r12,HIGH_ADDR(EXT(GratefulDeb)) /* (TEST/DEBUG) */ | |
1499 | rlwimi r7,r7,1,22,22 /* (TEST/DEBUG) */ | |
1500 | ori r6,r6,LOW_ADDR(EXT(RuptCtrs)) /* (TEST/DEBUG) */ | |
1501 | rlwinm r1,r11,2,0,29 /* (TEST/DEBUG) */ | |
1502 | add r6,r6,r7 /* (TEST/DEBUG) */ | |
1503 | ori r12,r12,LOW_ADDR(EXT(GratefulDeb)) /* (TEST/DEBUG) */ | |
1504 | lwz r21,(47*16)+8(r6) /* (TEST/DEBUG) */ | |
1505 | lwz r22,(47*16)+12(r6) /* (TEST/DEBUG) */ | |
1506 | add r1,r1,r6 /* (TEST/DEBUG) */ | |
1507 | mftb r24 /* (TEST/DEBUG) */ | |
1508 | sub r22,r24,r22 /* (TEST/DEBUG) */ | |
1509 | lwz r4,4(r6) /* (TEST/DEBUG) */ | |
1510 | cmplw cr2,r22,r21 /* (TEST/DEBUG) */ | |
1511 | lwz r7,4(r1) /* (TEST/DEBUG) */ | |
1512 | lwz r21,8(r6) /* (TEST/DEBUG) */ | |
1513 | blt+ cr2,nottime /* (TEST/DEBUG) */ | |
1514 | stw r24,(47*16)+12(r6) /* (TEST/DEBUG) */ | |
1515 | ||
1516 | nottime: addi r4,r4,1 /* (TEST/DEBUG) */ | |
1517 | lwz r22,8(r1) /* (TEST/DEBUG) */ | |
1518 | addi r7,r7,1 /* (TEST/DEBUG) */ | |
1519 | stw r4,4(r6) /* (TEST/DEBUG) */ | |
1520 | lwz r3,0(r6) /* (TEST/DEBUG) */ | |
1521 | mr. r21,r21 /* (TEST/DEBUG) */ | |
1522 | stw r7,4(r1) /* (TEST/DEBUG) */ | |
1523 | mtlr r12 /* (TEST/DEBUG) */ | |
1524 | lwz r1,0(r1) /* (TEST/DEBUG) */ | |
1525 | beq- nottimed1 /* (TEST/DEBUG) */ | |
1526 | blt+ cr2,isnttime1 /* (TEST/DEBUG) */ | |
1527 | ||
1528 | nottimed1: mr. r3,r3 /* (TEST/DEBUG) */ | |
1529 | bgelrl+ /* (TEST/DEBUG) */ | |
1530 | ||
1531 | isnttime1: mr. r22,r22 /* (TEST/DEBUG) */ | |
1532 | beq- nottimed2 /* (TEST/DEBUG) */ | |
1533 | blt+ cr2,isnttime2 /* (TEST/DEBUG) */ | |
1534 | ||
1535 | nottimed2: mr. r3,r1 /* (TEST/DEBUG) */ | |
1536 | mtlr r12 /* (TEST/DEBUG) */ | |
1537 | mr r4,r7 /* (TEST/DEBUG) */ | |
1538 | bgelrl+ /* (TEST/DEBUG) */ | |
1539 | mr r3,r11 /* (TEST/DEBUG) */ | |
1540 | ||
1541 | isnttime2: cmplwi r11,T_DATA_ACCESS /* (TEST/DEBUG) */ | |
1542 | lis r12,HIGH_ADDR(EXT(GratefulDeb)) /* (TEST/DEBUG) */ | |
1543 | bne+ nodsidisp /* (TEST/DEBUG) */ | |
1544 | mr. r22,r22 /* (TEST/DEBUG) */ | |
1545 | beq- nottimed3 /* (TEST/DEBUG) */ | |
1546 | blt+ cr2,nodsidisp /* (TEST/DEBUG) */ | |
1547 | ||
1548 | nottimed3: li r3,5 /* (TEST/DEBUG) */ | |
1549 | ori r12,r12,LOW_ADDR(EXT(GratefulDeb)) /* (TEST/DEBUG) */ | |
1550 | lwz r4,savesrr0(r13) /* (TEST/DEBUG) */ | |
1551 | mtlr r12 /* (TEST/DEBUG) */ | |
1552 | blrl /* (TEST/DEBUG) */ | |
1553 | ||
1554 | lis r12,HIGH_ADDR(EXT(GratefulDeb)) /* (TEST/DEBUG) */ | |
1555 | ori r12,r12,LOW_ADDR(EXT(GratefulDeb)) /* (TEST/DEBUG) */ | |
1556 | lis r3,9 /* (TEST/DEBUG) */ | |
1557 | ori r3,r3,5 /* (TEST/DEBUG) */ | |
1558 | mtlr r12 /* (TEST/DEBUG) */ | |
1559 | lwz r4,savedar(r13) /* (TEST/DEBUG) */ | |
1560 | blrl /* (TEST/DEBUG) */ | |
1561 | ||
1562 | nodsidisp: cmplwi r11,T_INSTRUCTION_ACCESS /* (TEST/DEBUG) */ | |
1563 | lis r12,HIGH_ADDR(EXT(GratefulDeb)) /* (TEST/DEBUG) */ | |
1564 | bne+ noisidisp /* (TEST/DEBUG) */ | |
1565 | mr. r22,r22 /* (TEST/DEBUG) */ | |
1566 | beq- nottimed4 /* (TEST/DEBUG) */ | |
1567 | blt+ cr2,noisidisp /* (TEST/DEBUG) */ | |
1568 | ||
1569 | nottimed4: li r3,6 /* (TEST/DEBUG) */ | |
1570 | ori r12,r12,LOW_ADDR(EXT(GratefulDeb)) /* (TEST/DEBUG) */ | |
1571 | lwz r4,savesrr0(r13) /* (TEST/DEBUG) */ | |
1572 | mtlr r12 /* (TEST/DEBUG) */ | |
1573 | blrl /* (TEST/DEBUG) */ | |
1574 | ||
1575 | noisidisp: mr r3,r11 /* (TEST/DEBUG) */ | |
1576 | #endif | |
1577 | ||
1578 | #if 0 | |
1579 | cmplwi r11,T_PROGRAM /* (TEST/DEBUG) */ | |
1580 | lis r12,HIGH_ADDR(EXT(GratefulDeb)) /* (TEST/DEBUG) */ | |
1581 | bne+ nopgmdisp /* (TEST/DEBUG) */ | |
1582 | li r3,7 /* (TEST/DEBUG) */ | |
1583 | ori r12,r12,LOW_ADDR(EXT(GratefulDeb)) /* (TEST/DEBUG) */ | |
1584 | lwz r4,savesrr0(r13) /* (TEST/DEBUG) */ | |
1585 | mtlr r12 /* (TEST/DEBUG) */ | |
1586 | blrl /* (TEST/DEBUG) */ | |
1587 | ||
1588 | nopgmdisp: mr r3,r11 /* (TEST/DEBUG) */ | |
1589 | #endif | |
1590 | ||
1591 | li r21,0 ; Assume no processor register for now | |
1592 | lis r12,hi16(EXT(hw_counts)) ; Get the high part of the interrupt counters | |
1593 | bf featSMP,nopirhere ; Jump if this processor does not have a PIR... | |
1594 | mfspr r21,pir ; Get the PIR | |
1595 | ||
1596 | nopirhere: ori r12,r12,lo16(EXT(hw_counts)) ; Get the low part of the interrupt counters | |
1597 | lwz r7,savesrr1(r13) ; Get the entering MSR | |
1598 | rlwinm r21,r21,8,20,23 ; Get index to processor counts | |
1599 | mtcrf 0x80,r0 /* Set our CR0 to the high nybble of the request code */ | |
1600 | rlwinm r6,r0,1,0,31 /* Move sign bit to the end */ | |
1601 | cmplwi cr1,r11,T_SYSTEM_CALL /* Did we get a system call? */ | |
1602 | crandc cr0_lt,cr0_lt,cr0_gt /* See if we have R0 equal to 0b10xx...x */ | |
1603 | add r12,r12,r21 ; Point to the processor count area | |
1604 | cmplwi cr3,r11,T_IN_VAIN /* Was this all in vain? All for nothing? */ | |
1605 | lwzx r22,r12,r11 ; Get the old value | |
1606 | cmplwi cr2,r6,1 /* See if original R0 had the CutTrace request code in it */ | |
1607 | addi r22,r22,1 ; Count this one | |
1608 | cmplwi cr4,r11,T_SIGP /* Indicate if we had a SIGP 'rupt */ | |
1609 | stwx r22,r12,r11 ; Store it back | |
1610 | ||
1611 | beq- cr3,EatRupt /* Interrupt was all for nothing... */ | |
1612 | cmplwi cr3,r11,T_MACHINE_CHECK ; Did we get a machine check? | |
1613 | bne+ cr1,noCutT /* Not a system call... */ | |
1614 | bnl+ cr0,noCutT /* R0 not 0b10xxx...x, can't be any kind of magical system call... */ | |
1615 | rlwinm. r7,r7,0,MSR_PR_BIT,MSR_PR_BIT ; Did we come from user state? | |
1616 | lis r1,hi16(EXT(dgWork)) ; Get the diagnostics flags | |
1617 | beq+ FCisok ; From supervisor state... | |
1618 | ||
1619 | ori r1,r1,lo16(EXT(dgWork)) ; Again | |
1620 | lwz r1,dgFlags(r1) ; Get the flags | |
1621 | rlwinm. r1,r1,0,enaUsrFCallb,enaUsrFCallb ; Are they valid? | |
1622 | beq- noCutT ; No... | |
1623 | ||
1624 | FCisok: beq- cr2,isCutTrace /* This is a CutTrace system call */ | |
1625 | ||
1626 | /* | |
1627 | * Here's where we call the firmware. If it returns T_IN_VAIN, that means | |
1628 | * that it has handled the interruption. Remember: thou shalt not trash R13 | |
1629 | * or R20 while you are away. Anything else is ok. | |
1630 | */ | |
1631 | ||
1632 | lis r1,hi16(EXT(FirmwareCall)) /* Top half of firmware call handler */ | |
1633 | ori r1,r1,lo16(EXT(FirmwareCall)) /* Bottom half of it */ | |
1634 | lwz r3,saver3(r13) /* Restore the first parameter, the rest are ok already */ | |
1635 | mtlr r1 /* Get it in the link register */ | |
1636 | blrl /* Call the handler */ | |
1637 | ||
1638 | cmplwi r3,T_IN_VAIN /* Was it handled? */ | |
1639 | mfsprg r2,0 /* Restore the per_processor area */ | |
1640 | beq+ EatRupt /* Interrupt was handled... */ | |
1641 | mr r11,r3 /* Put the 'rupt code in the right register */ | |
1642 | b noSIGP /* Go to the normal system call handler */ | |
1643 | ||
1644 | isCutTrace: | |
1645 | li r7,-32768 /* Get a 0x8000 for the exception code */ | |
1646 | bne- cr5,EatRupt /* Tracing is disabled... */ | |
1647 | sth r7,LTR_excpt(r20) /* Modify the exception type to a CutTrace */ | |
1648 | b EatRupt /* Time to go home... */ | |
1649 | ||
1650 | /* We are here 'cause we didn't have a CutTrace system call */ | |
1651 | ||
1652 | noCutT: beq- cr3,MachineCheck ; Whoa... Machine check... | |
1653 | bne+ cr4,noSIGP /* Skip away if we didn't get a SIGP... */ | |
1654 | ||
1655 | lis r6,HIGH_ADDR(EXT(MPsignalFW)) /* Top half of SIGP handler */ | |
1656 | ori r6,r6,LOW_ADDR(EXT(MPsignalFW)) /* Bottom half of it */ | |
1657 | mtlr r6 /* Get it in the link register */ | |
1658 | ||
1659 | blrl /* Call the handler - we'll only come back if this is an AST, */ | |
1660 | /* 'cause FW can't handle that */ | |
1661 | mfsprg r2,0 /* Restore the per_processor area */ | |
1662 | ; | |
1663 | ; The following interrupts are the only ones that can be redriven | |
1664 | ; by the higher level code or emulation routines. | |
1665 | ; | |
1666 | ||
1667 | Redrive: cmplwi cr0,r3,T_IN_VAIN /* Did the signal handler eat the signal? */ | |
1668 | mr r11,r3 /* Move it to the right place */ | |
1669 | beq+ cr0,EatRupt /* Bail now if the signal handler processed the signal... */ | |
1670 | ||
1671 | ||
1672 | /* | |
1673 | * Here's where we check for the other fast-path exceptions: translation exceptions, | |
1674 | * emulated instructions, etc. | |
1675 | */ | |
1676 | ||
1677 | noSIGP: cmplwi cr3,r11,T_ALTIVEC_ASSIST ; Check for an Altivec denorm assist | |
1678 | cmplwi cr1,r11,T_PROGRAM /* See if we got a program exception */ | |
1679 | cmplwi cr2,r11,T_INSTRUCTION_ACCESS /* Check on an ISI */ | |
1680 | bne+ cr3,noAltivecAssist ; It is not an assist... | |
1681 | b EXT(AltivecAssist) ; It is an assist... | |
1682 | ||
1683 | noAltivecAssist: | |
1684 | bne+ cr1,noEmulate ; No emulation here... | |
1685 | b EXT(Emulate) ; Go try to emulate... | |
1686 | ||
1687 | noEmulate: cmplwi cr3,r11,T_CSWITCH /* Are we context switching */ | |
1688 | cmplwi r11,T_DATA_ACCESS /* Check on a DSI */ | |
1689 | beq- cr2,DSIorISI /* It's a PTE fault... */ | |
1690 | beq- cr3,conswtch /* It's a context switch... */ | |
1691 | bne+ PassUp /* It's not a PTE fault... */ | |
1692 | ||
1693 | /* | |
1694 | * This call will either handle the fault, in which case it will not | |
1695 | * return, or return to pass the fault up the line. | |
1696 | */ | |
1697 | ||
1698 | DSIorISI: | |
1699 | lis r7,HIGH_ADDR(EXT(handlePF)) /* Top half of DSI handler */ | |
1700 | ori r7,r7,LOW_ADDR(EXT(handlePF)) /* Bottom half of it */ | |
1701 | mtlr r7 /* Get it in the link register */ | |
1702 | mr r3,r11 /* Move the 'rupt code */ | |
1703 | ||
1704 | blrl /* See if we can handle this fault */ | |
1705 | ||
1706 | lwz r0,savesrr1(r13) ; Get the MSR in use at exception time | |
1707 | mfsprg r2, 0 /* Get back per_proc */ | |
1708 | cmplwi cr1,r3,T_IN_VAIN ; Was it handled? | |
1709 | andi. r4,r0,lo16(MASK(MSR_RI)) ; See if the recover bit is on | |
1710 | mr r11,r3 /* Make sure we can find this later */ | |
1711 | beq+ cr1,EatRupt ; Yeah, just blast back to the user... | |
1712 | andc r0,r0,r4 ; Remove the recover bit | |
1713 | beq+ PassUp ; Not on, normal case... | |
1714 | lwz r4,savesrr0(r13) ; Get the failing instruction address | |
1715 | lwz r5,savecr(r13) ; Get the condition register | |
1716 | stw r0,savesrr1(r13) ; Save the result MSR | |
1717 | addi r4,r4,4 ; Skip failing instruction | |
1718 | rlwinm r5,r5,0,3,1 ; Clear CR0_EQ to let emulation code know we failed | |
1719 | stw r4,savesrr0(r13) ; Save instruction address | |
1720 | stw r4,savecr(r13) ; And the resume CR | |
1721 | b EatRupt ; Resume emulated code | |
1722 | ||
1723 | /* | |
1724 | * Here is where we handle the context switch firmware call. The old | |
1725 | * context has been saved, and the new savearea in in saver3. We'll just | |
1726 | * muck around with the savearea pointers, and then join the exit routine | |
1727 | */ | |
1728 | conswtch: lwz r28,SAVflags(r13) /* The the flags of the current */ | |
1729 | mr r29,r13 /* Save the save */ | |
1730 | rlwinm r30,r13,0,0,19 /* Get the start of the savearea block */ | |
1731 | lwz r5,saver3(r13) /* Switch to the new savearea */ | |
1732 | oris r28,r28,HIGH_ADDR(SAVattach) /* Turn on the attached flag */ | |
1733 | lwz r30,SACvrswap(r30) /* get real to virtual translation */ | |
1734 | mr r13,r5 /* Switch saveareas */ | |
1735 | xor r27,r29,r30 /* Flip to virtual */ | |
1736 | stw r28,SAVflags(r29) /* Stash it back */ | |
1737 | stw r27,saver3(r5) /* Push the new savearea to the switch to routine */ | |
1738 | b EatRupt /* Start 'er up... */ | |
1739 | ||
1740 | ; | |
1741 | ; Handle machine check here. | |
1742 | ; | |
1743 | ; ? | |
1744 | ; | |
1745 | MachineCheck: | |
1746 | lwz r27,savesrr1(r13) ; ? | |
1747 | rlwinm. r11,r27,0,dcmck,dcmck ; ? | |
1748 | beq+ notDCache ; ? | |
1749 | ||
1750 | mfspr r11,msscr0 ; ? | |
1751 | dssall ; ? | |
1752 | sync | |
1753 | ||
1754 | lwz r27,savesrr1(r13) ; ? | |
1755 | ||
1756 | hiccup: cmplw r27,r27 ; ? | |
1757 | bne- hiccup ; ? | |
1758 | isync ; ? | |
1759 | ||
1760 | oris r11,r11,hi16(dl1hwfm) ; ? | |
1761 | mtspr msscr0,r11 ; ? | |
1762 | ||
1763 | rstbsy: mfspr r11,msscr0 ; ? | |
1764 | ||
1765 | rlwinm. r11,r11,0,dl1hwf,dl1hwf ; ? | |
1766 | bne rstbsy ; ? | |
1767 | ||
1768 | sync ; ? | |
1769 | ||
1770 | li r11,T_IN_VAIN ; ? | |
1771 | b EatRupt ; ? | |
1772 | ||
1773 | ||
1774 | notDCache: | |
1775 | ; | |
1776 | ; Check if the failure was in | |
1777 | ; ml_probe_read. If so, this is expected, so modify the PC to | |
1778 | ; ml_proble_read_mck and then eat the exception. | |
1779 | ; | |
1780 | lwz r30,savesrr0(r13) ; Get the failing PC | |
1781 | lis r28,hi16(EXT(ml_probe_read_mck)) ; High order part | |
1782 | lis r27,hi16(EXT(ml_probe_read)) ; High order part | |
1783 | ori r28,r28,lo16(EXT(ml_probe_read_mck)) ; Get the low part | |
1784 | ori r27,r27,lo16(EXT(ml_probe_read)) ; Get the low part | |
1785 | cmplw r30,r28 ; Check highest possible | |
1786 | cmplw cr1,r30,r27 ; Check lowest | |
1787 | bge- PassUp ; Outside of range | |
1788 | blt- cr1,PassUp ; Outside of range | |
1789 | ; | |
1790 | ; We need to fix up the BATs here because the probe | |
1791 | ; routine messed them all up... As long as we are at it, | |
1792 | ; fix up to return directly to caller of probe. | |
1793 | ; | |
1794 | ||
1795 | lwz r30,saver5(r13) ; Get proper DBAT values | |
1796 | lwz r28,saver6(r13) | |
1797 | lwz r27,saver7(r13) | |
1798 | lwz r11,saver8(r13) | |
1799 | lwz r18,saver9(r13) | |
1800 | ||
1801 | sync | |
1802 | mtdbatu 0,r30 ; Restore DBAT 0 high | |
1803 | mtdbatl 0,r28 ; Restore DBAT 0 low | |
1804 | mtdbatu 1,r27 ; Restore DBAT 1 high | |
1805 | mtdbatu 2,r11 ; Restore DBAT 2 high | |
1806 | mtdbatu 3,r18 ; Restore DBAT 3 high | |
1807 | sync | |
1808 | ||
1809 | lwz r28,savelr(r13) ; Get return point | |
1810 | lwz r27,saver0(r13) ; Get the saved MSR | |
1811 | li r30,0 ; Get a failure RC | |
1812 | stw r28,savesrr0(r13) ; Set the return point | |
1813 | stw r27,savesrr1(r13) ; Set the continued MSR | |
1814 | stw r30,saver3(r13) ; Set return code | |
1815 | li r11,T_IN_VAIN ; Set new interrupt code | |
1816 | b EatRupt ; Yum, yum, eat it all up... | |
1817 | ||
1818 | /* | |
1819 | * Here's where we come back from some instruction emulator. If we come back with | |
1820 | * T_IN_VAIN, the emulation is done and we should just reload state and directly | |
1821 | * go back to the interrupted code. Otherwise, we'll check to see if | |
1822 | * we need to redrive with a different interrupt, i.e., DSI. | |
1823 | */ | |
1824 | ||
1825 | .align 5 | |
1826 | .globl EXT(EmulExit) | |
1827 | ||
1828 | LEXT(EmulExit) | |
1829 | ||
1830 | cmplwi r11,T_IN_VAIN /* Was it emulated? */ | |
1831 | lis r1,hi16(SAVredrive) ; Get redrive request | |
1832 | mfsprg r2,0 ; Restore the per_proc area | |
1833 | beq+ EatRupt /* Yeah, just blast back to the user... */ | |
1834 | lwz r4,SAVflags(r13) ; Pick up the flags | |
1835 | ||
1836 | and. r0,r4,r1 ; Check if redrive requested | |
1837 | andc r4,r4,r1 ; Clear redrive | |
1838 | ||
1839 | beq+ PassUp ; No redrive, just keep on going... | |
1840 | ||
1841 | lwz r3,saveexception(r13) ; Restore exception code | |
1842 | stw r4,SAVflags(r13) ; Set the flags | |
1843 | b Redrive ; Redrive the exception... | |
1844 | ||
1845 | /* Jump into main handler code switching on VM at the same time */ | |
1846 | ||
1847 | /* We assume kernel data is mapped contiguously in physical | |
1848 | * memory, otherwise we'd need to switch on (at least) virtual data. | |
1849 | * SRs are already set up. | |
1850 | */ | |
1851 | PassUp: lis r2,hi16(EXT(exception_handlers)) ; Get exception vector address | |
1852 | ori r2,r2,lo16(EXT(exception_handlers)) ; And low half | |
1853 | lwzx r6,r2,r11 /* Get the actual exception handler address */ | |
1854 | ||
1855 | PassUpDeb: lwz r8,SAVflags(r13) /* Get the flags */ | |
1856 | mtsrr0 r6 /* Set up the handler address */ | |
1857 | oris r8,r8,HIGH_ADDR(SAVattach) /* Since we're passing it up, attach it */ | |
1858 | rlwinm r5,r13,0,0,19 /* Back off to the start of savearea block */ | |
1859 | ||
1860 | mfmsr r3 /* Get our MSR */ | |
1861 | stw r8,SAVflags(r13) /* Pass up the flags */ | |
1862 | rlwinm r3,r3,0,MSR_BE_BIT+1,MSR_SE_BIT-1 /* Clear all but the trace bits */ | |
1863 | li r2,MSR_SUPERVISOR_INT_OFF /* Get our normal MSR value */ | |
1864 | lwz r5,SACvrswap(r5) /* Get real to virtual conversion */ | |
1865 | or r2,r2,r3 /* Keep the trace bits if they're on */ | |
1866 | mr r3,r11 /* Pass the exception code in the paramter reg */ | |
1867 | mtsrr1 r2 /* Set up our normal MSR value */ | |
1868 | xor r4,r13,r5 /* Pass up the virtual address of context savearea */ | |
1869 | ||
1870 | rfi /* Launch the exception handler */ | |
1871 | ||
1872 | .long 0 /* Leave these here gol durn it! */ | |
1873 | .long 0 | |
1874 | .long 0 | |
1875 | .long 0 | |
1876 | .long 0 | |
1877 | .long 0 | |
1878 | .long 0 | |
1879 | .long 0 | |
1880 | ||
1881 | /* | |
1882 | * This routine is the only place where we return from an interruption. | |
1883 | * Anyplace else is wrong. Even if I write the code, it's still wrong. | |
1884 | * Feel free to come by and slap me if I do do it--even though I may | |
1885 | * have had a good reason to do it. | |
1886 | * | |
1887 | * All we need to remember here is that R13 must point to the savearea | |
1888 | * that has the context we need to load up. Translation and interruptions | |
1889 | * must be disabled. | |
1890 | * | |
1891 | * This code always loads the context in the savearea pointed to | |
1892 | * by R13. In the process, it throws away the savearea. If there | |
1893 | * is any tomfoolery with savearea stacks, it must be taken care of | |
1894 | * before we get here. | |
1895 | * | |
1896 | * Speaking of tomfoolery, this is where we synthesize interruptions | |
1897 | * if any need to be. | |
1898 | */ | |
1899 | ||
1900 | .align 5 | |
1901 | ||
1902 | EatRupt: mr r31,r13 /* Move the savearea pointer to the far end of the register set */ | |
1903 | ||
1904 | EatRupt2: mfsprg r2,0 /* Get the per_proc block */ | |
1905 | dcbt 0,r31 ; Get this because we need it very soon | |
1906 | ||
1907 | #if TRCSAVE | |
1908 | lwz r30,saver0(r31) ; (TEST/DEBUG) Get users R0 | |
1909 | lwz r20,saveexception(r31) ; (TEST/DEBUG) Returning from trace? | |
1910 | xor r30,r20,r30 ; (TEST/DEBUG) Make code | |
1911 | rlwinm r30,r30,1,0,31 ; (TEST/DEBUG) Make an easy test | |
1912 | cmplwi cr5,r30,0x61 ; (TEST/DEBUG) See if this is a trace | |
1913 | #endif | |
1914 | ||
1915 | /* | |
1916 | * First we see if we are able to free the new savearea. | |
1917 | * If it is not attached to anything, put it on the free list. | |
1918 | * This is real dangerous, we haven't restored context yet... | |
1919 | * So, the free savearea chain lock must stay until the bitter end! | |
1920 | */ | |
1921 | ||
1922 | /* | |
1923 | * It's dangerous here. We haven't restored anything from the current savearea yet. | |
1924 | * And, we mark it the active one. So, if we get an exception in here, it is | |
1925 | * unrecoverable. Unless we mess up, we can't get any kind of exception. So, | |
1926 | * it is important to assay this code as only the purest of gold. | |
1927 | * | |
1928 | * But first, see if there is a savearea hanging off of quickfret. If so, | |
1929 | * we release that one first and then come back for the other. We should rarely | |
1930 | * see one, they appear when FPU or VMX context is discarded by either returning | |
1931 | * to a higher exception level, or explicitly. | |
1932 | * | |
1933 | * A word about QUICKFRET: Multiple saveareas may be queued for release. It is | |
1934 | * the responsibility of the queuer to insure that the savearea is not multiply | |
1935 | * queued and that the appropriate inuse bits are reset. | |
1936 | */ | |
1937 | ||
1938 | ||
1939 | ||
1940 | mfsprg r27,2 ; Get the processor features | |
1941 | lwz r1,savesrr1(r31) ; Get destination MSR | |
1942 | mtcrf 0x60,r27 ; Set CRs with thermal facilities | |
1943 | mr r18,r31 ; Save the savearea pointer | |
1944 | rlwinm. r0,r1,0,MSR_EE_BIT,MSR_EE_BIT ; Are interruptions going to be enabled? | |
1945 | lwz r19,PP_QUICKFRET(r2) ; Get the quick release savearea | |
1946 | crandc 31,pfThermalb,pfThermIntb ; See if we have both thermometer and not interrupt facility | |
1947 | li r0,0 ; Get a zero | |
1948 | crandc 31,31,cr0_eq ; Factor in enablement | |
1949 | la r21,savesr0(r18) ; Point to the first thing we restore | |
1950 | bf 31,tempisok ; No thermal checking needed... | |
1951 | ||
1952 | ; | |
1953 | ; We get to here if 1) there is a thermal facility, and 2) the hardware | |
1954 | ; will or cannot interrupt, and 3) the interrupt will be enabled after this point. | |
1955 | ; | |
1956 | ||
1957 | mfspr r16,thrm3 ; Get thermal 3 | |
1958 | mfspr r14,thrm1 ; Get thermal 2 | |
1959 | rlwinm. r16,r16,0,thrme,thrme ; Is the themometer enabled? | |
1960 | mfspr r15,thrm2 ; Get thermal 2 | |
1961 | beq- tempisok ; No thermometer... | |
1962 | rlwinm r16,r14,2,28,31 ; Cluster THRM1s TIE, V, TIN, and TIV at bottom 4 bits | |
1963 | srawi r0,r15,31 ; Make a mask of 1s if temprature over | |
1964 | rlwinm r30,r15,2,28,31 ; Cluster THRM2s TIE, V, TIN, and TIV at bottom 4 bits | |
1965 | ; | |
1966 | ; Note that the following compare check that V, TIN, and TIV are set and that TIE is cleared. | |
1967 | ; This insures that we only emulate when the hardware is not set to interrupt. | |
1968 | ; | |
1969 | cmplwi cr0,r16,7 ; Is there a valid pending interruption for THRM1? | |
1970 | cmplwi cr1,r30,7 ; Is there a valid pending interruption for THRM2? | |
1971 | and r15,r15,r0 ; Keep high temp if that interrupted, zero if not | |
1972 | cror cr0_eq,cr0_eq,cr1_eq ; Merge both | |
1973 | andc r14,r14,r0 ; Keep low if high did not interrupt, zero if it did | |
1974 | bne+ tempisok ; Nope, temprature is in range | |
1975 | ||
1976 | li r3,T_THERMAL ; Time to emulate a thermal interruption | |
1977 | or r14,r14,r15 ; Get contents of interrupting register | |
1978 | mr r13,r31 ; Make sure savearea is pointed to correctly | |
1979 | stw r3,saveexception(r31) ; Restore exception code | |
1980 | stw r14,savedar(r31) ; Set the contents of the interrupting register into the dar | |
1981 | b Redrive ; Go process this new interruption... | |
1982 | ||
1983 | ||
1984 | tempisok: lis r30,HIGH_ADDR(EXT(saveanchor)) /* Get the high part of the anchor */ | |
1985 | stw r0,PP_QUICKFRET(r2) /* Clear quickfret pointer */ | |
1986 | ori r30,r30,LOW_ADDR(EXT(saveanchor)) /* Bottom half of the anchor */ | |
1987 | dcbt 0,r21 /* Touch in the first thing */ | |
0b4e3aa0 A |
1988 | |
1989 | #if 0 | |
1990 | li r23,0 ; (TEST/DEBUG) | |
1991 | rlwinm r14,r31,0,0,19 ; (TEST/DEBUG) | |
1992 | lwz r21,SACflags(r14) ; (TEST/DEBUG) | |
1993 | rlwinm r22,r21,24,24,31 ; (TEST/DEBUG) | |
1994 | cmplwi r22,0x00EE ; (TEST/DEBUG) | |
1995 | beq+ nodienodie1 ; (TEST/DEBUG) | |
1996 | ||
1997 | dodiedodie: li r1,0x666 ; (TEST/DEBUG) | |
1998 | BREAKPOINT_TRAP ; (TEST/DEBUG) | |
1999 | ||
2000 | nodienodie1: | |
2001 | mr r23,r19 ; (TEST/DEBUG) | |
2002 | ||
2003 | chkitagain: mr. r23,r23 ; (TEST/DEBUG) | |
2004 | beq nodienodie2 ; (TEST/DEBUG) | |
2005 | rlwinm r14,r23,0,0,19 ; (TEST/DEBUG) | |
2006 | lwz r21,SACflags(r14) ; (TEST/DEBUG) | |
2007 | rlwinm r22,r21,24,24,31 ; (TEST/DEBUG) | |
2008 | cmplwi r22,0x00EE ; (TEST/DEBUG) | |
2009 | bne- dodiedodie ; (TEST/DEBUG) | |
2010 | lwz r23,SAVqfret(r23) ; (TEST/DEBUG) | |
2011 | b chkitagain ; (TEST/DEBUG) | |
2012 | ||
2013 | nodienodie2: | |
2014 | #endif | |
1c79356b A |
2015 | |
2016 | #if TRCSAVE | |
2017 | beq- cr5,trkill0 ; (TEST/DEBUG) Do not trace this type | |
2018 | lwz r14,LOW_ADDR(traceMask-EXT(ExceptionVectorsStart))(br0) ; (TEST/DEBUG) Get the trace mask | |
2019 | mr. r14,r14 ; (TEST/DEBUG) Is it stopped? | |
2020 | beq- trkill0 ; (TEST/DEBUG) yes... | |
2021 | bl cte ; (TEST/DEBUG) Trace this | |
2022 | stw r18,LTR_r1(r20) ; (TEST/DEBUG) Normal savearea | |
2023 | stw r19,LTR_r2(r20) ; (TEST/DEBUG) Quickfret savearea | |
2024 | trkill0: | |
2025 | #endif | |
2026 | ||
2027 | rtlck: lwarx r22,0,r30 /* Grab the lock value */ | |
2028 | li r23,1 /* Use part of the delay time */ | |
2029 | mr. r22,r22 /* Is it locked? */ | |
2030 | bne- rtlcks /* Yeah, wait for it to clear... */ | |
2031 | stwcx. r23,0,r30 /* Try to seize that there durn lock */ | |
2032 | beq+ fretagain ; Got it... | |
2033 | b rtlck /* Collision, try again... */ | |
2034 | ||
2035 | rtlcks: lwz r22,SVlock(r30) /* Get that lock in here */ | |
2036 | mr. r22,r22 /* Is it free yet? */ | |
2037 | beq+ rtlck /* Yeah, try for it again... */ | |
2038 | b rtlcks /* Sniff away... */ | |
2039 | ||
2040 | ; | |
2041 | ; Lock gotten, toss the saveareas | |
2042 | ; | |
2043 | fretagain: | |
2044 | #if TRCSAVE | |
2045 | beq- cr5,trkill1 ; (TEST/DEBUG) Do not trace this type | |
2046 | lwz r14,LOW_ADDR(traceMask-EXT(ExceptionVectorsStart))(br0) ; (TEST/DEBUG) Get the trace mask | |
2047 | mr. r14,r14 ; (TEST/DEBUG) Is it stopped? | |
2048 | beq- trkill1 ; (TEST/DEBUG) yes... | |
2049 | li r0,1 ; (TEST/DEBUG) ID number | |
2050 | bl cte ; (TEST/DEBUG) Trace this | |
2051 | stw r18,LTR_r1(r20) ; (TEST/DEBUG) Normal savearea | |
2052 | stw r19,LTR_r2(r20) ; (TEST/DEBUG) Quickfret savearea | |
2053 | trkill1: | |
2054 | #endif | |
2055 | ||
2056 | mr. r18,r18 ; Are we actually done here? | |
2057 | beq- donefret ; Yeah... | |
2058 | mr. r31,r19 ; Is there a quickfret to do? | |
2059 | beq+ noqfrt ; Nope... | |
2060 | lwz r19,SAVqfret(r19) ; Yes, get the next in line | |
2061 | #if TRCSAVE | |
2062 | beq- cr5,trkill2 ; (TEST/DEBUG) Do not trace this type | |
2063 | lwz r14,LOW_ADDR(traceMask-EXT(ExceptionVectorsStart))(br0) ; (TEST/DEBUG) Get the trace mask | |
2064 | mr. r14,r14 ; (TEST/DEBUG) Is it stopped? | |
2065 | beq- trkill2 ; (TEST/DEBUG) yes... | |
2066 | li r0,2 ; (TEST/DEBUG) ID number | |
2067 | bl cte ; (TEST/DEBUG) Trace this | |
2068 | stw r18,LTR_r1(r20) ; (TEST/DEBUG) Normal savearea | |
2069 | stw r19,LTR_r2(r20) ; (TEST/DEBUG) next quickfret savearea | |
2070 | stw r31,LTR_r3(r20) ; (TEST/DEBUG) Current one to toss | |
2071 | trkill2: | |
2072 | #endif | |
2073 | b doqfrt ; Go do it... | |
2074 | ||
2075 | noqfrt: mr r31,r18 ; Set the area to release | |
2076 | li r18,0 ; Show we have done it | |
2077 | #if TRCSAVE | |
2078 | beq- cr5,trkill3 ; (TEST/DEBUG) Do not trace this type | |
2079 | lwz r14,LOW_ADDR(traceMask-EXT(ExceptionVectorsStart))(br0) ; (TEST/DEBUG) Get the trace mask | |
2080 | mr. r14,r14 ; (TEST/DEBUG) Is it stopped? | |
2081 | beq- trkill3 ; (TEST/DEBUG) yes... | |
2082 | li r0,3 ; (TEST/DEBUG) ID number | |
2083 | bl cte ; (TEST/DEBUG) Trace this | |
2084 | stw r18,LTR_r1(r20) ; (TEST/DEBUG) Normal savearea | |
2085 | stw r19,LTR_r2(r20) ; (TEST/DEBUG) next quickfret savearea | |
2086 | stw r31,LTR_r3(r20) ; (TEST/DEBUG) Current one to toss | |
2087 | trkill3: | |
2088 | #endif | |
2089 | ||
2090 | doqfrt: li r0,0 ; Get a constant 0 | |
2091 | lis r26,0x8000 /* Build a bit mask and assume first savearea */ | |
2092 | stw r0,SAVqfret(r31) ; Make sure back chain is unlinked | |
2093 | lwz r28,SAVflags(r31) ; Get the flags for the old active one | |
2094 | #if TRCSAVE | |
2095 | beq- cr5,trkill4 ; (TEST/DEBUG) Do not trace this type | |
2096 | lwz r14,LOW_ADDR(traceMask-EXT(ExceptionVectorsStart))(br0) ; (TEST/DEBUG) Get the trace mask | |
2097 | mr. r14,r14 ; (TEST/DEBUG) Is it stopped? | |
2098 | beq- trkill4 ; (TEST/DEBUG) yes... | |
2099 | li r0,4 ; (TEST/DEBUG) ID number | |
2100 | bl cte ; (TEST/DEBUG) Trace this | |
2101 | stw r18,LTR_r1(r20) ; (TEST/DEBUG) Normal savearea | |
2102 | stw r19,LTR_r2(r20) ; (TEST/DEBUG) next quickfret savearea | |
2103 | stw r31,LTR_r3(r20) ; (TEST/DEBUG) Current one to toss | |
2104 | stw r28,LTR_r4(r20) ; (TEST/DEBUG) Save current flags | |
2105 | trkill4: | |
2106 | #endif | |
2107 | rlwinm r25,r31,21,31,31 /* Get position of savearea in block */ | |
2108 | andis. r28,r28,HIGH_ADDR(SAVinuse) /* See if we need to free it */ | |
2109 | srw r26,r26,r25 /* Get bit position to deallocate */ | |
2110 | rlwinm r29,r31,0,0,19 /* Round savearea pointer to even page address */ | |
2111 | ||
2112 | bne- fretagain /* Still in use, we can't free this one... */ | |
2113 | ||
2114 | lwz r23,SACalloc(r29) /* Get the allocation for this block */ | |
2115 | lwz r24,SVinuse(r30) /* Get the in use count */ | |
2116 | mr r28,r23 ; (TEST/DEBUG) save for trace | |
2117 | or r23,r23,r26 /* Turn on our bit */ | |
2118 | subi r24,r24,1 /* Show that this one is free */ | |
2119 | cmplw r23,r26 /* Is our's the only one free? */ | |
2120 | stw r23,SACalloc(r29) /* Save it out */ | |
2121 | bne+ rstrest /* Nope, then the block is already on the free list */ | |
2122 | ||
2123 | lwz r22,SVfree(r30) /* Get the old head of the free list */ | |
2124 | stw r29,SVfree(r30) /* Point the head at us now */ | |
2125 | stw r22,SACnext(r29) ; Point us to the old last | |
2126 | ||
2127 | rstrest: stw r24,SVinuse(r30) /* Set the in use count */ | |
2128 | #if TRCSAVE | |
2129 | beq- cr5,trkill5 ; (TEST/DEBUG) Do not trace this type | |
2130 | lwz r14,LOW_ADDR(traceMask-EXT(ExceptionVectorsStart))(br0) ; (TEST/DEBUG) Get the trace mask | |
2131 | mr. r14,r14 ; (TEST/DEBUG) Is it stopped? | |
2132 | beq- trkill5 ; (TEST/DEBUG) yes... | |
2133 | li r0,5 ; (TEST/DEBUG) ID number | |
2134 | bl cte ; (TEST/DEBUG) Trace this | |
2135 | stw r18,LTR_r1(r20) ; (TEST/DEBUG) Normal savearea | |
2136 | stw r19,LTR_r2(r20) ; (TEST/DEBUG) Next quickfret savearea | |
2137 | stw r31,LTR_r3(r20) ; (TEST/DEBUG) Current one to toss | |
2138 | stw r28,LTR_srr1(r20) ; (TEST/DEBUG) Save the original allocation | |
2139 | stw r23,LTR_dar(r20) ; (TEST/DEBUG) Save the new allocation | |
2140 | stw r24,LTR_save(r20) ; (TEST/DEBUG) Save the new in use count | |
2141 | stw r22,LTR_lr(r20) ; (TEST/DEBUG) Save the old top of free list | |
2142 | stw r29,LTR_ctr(r20) ; (TEST/DEBUG) Save the new top of free list | |
2143 | trkill5: | |
2144 | #endif | |
2145 | b fretagain ; Go finish up the rest... | |
2146 | ||
2147 | ; | |
2148 | ; Build the SR values depending upon destination. If we are going to the kernel, | |
2149 | ; the SRs are almost all the way set up. SR14 (or the currently used copyin/out register) | |
2150 | ; must be set to whatever it was at the last exception because it varies. All the rest | |
2151 | ; have been set up already. | |
2152 | ; | |
2153 | ; If we are going into user space, we need to check a bit more. SR0, SR1, SR2, and | |
2154 | ; SR14 (current implementation) must be restored always. The others must be set if | |
2155 | ; they are different that what was loaded last time (i.e., tasks have switched). | |
2156 | ; We check the last loaded address space ID and if the same, we skip the loads. | |
2157 | ; This is a performance gain because SR manipulations are slow. | |
2158 | ; | |
2159 | ||
2160 | .align 5 | |
2161 | ||
2162 | donefret: lwz r26,savesrr1(r31) ; Get destination state flags | |
2163 | lwz r7,PP_USERPMAP(r2) ; Pick up the user pmap we may launch | |
2164 | cmplw cr3,r14,r14 ; Set that we do not need to stop streams | |
2165 | rlwinm. r17,r26,0,MSR_PR_BIT,MSR_PR_BIT ; See if we are going to user or system | |
2166 | li r14,PMAP_SEGS ; Point to segments | |
2167 | bne+ gotouser ; We are going into user state... | |
2168 | ||
2169 | lwz r14,savesr14(r31) ; Get the copyin/out register at interrupt time | |
2170 | mtsr sr14,r14 ; Set SR14 | |
2171 | b segsdone ; We are all set up now... | |
2172 | ||
2173 | .align 5 | |
2174 | ||
2175 | gotouser: dcbt r14,r7 ; Touch the segment register contents | |
0b4e3aa0 A |
2176 | lwz r9,spcFlags(r2) ; Pick up the special flags |
2177 | lwz r16,PP_LASTPMAP(r2) ; Pick up the last loaded pmap | |
1c79356b | 2178 | addi r14,r14,32 ; Second half of pmap segments |
0b4e3aa0 | 2179 | rlwinm r9,r9,userProtKeybit-2,2,2 ; Isolate the user state protection key |
1c79356b | 2180 | lwz r15,PMAP_SPACE(r7) ; Get the primary space |
0b4e3aa0 | 2181 | lwz r13,PMAP_VFLAGS(r7) ; Get the flags |
1c79356b | 2182 | dcbt r14,r7 ; Touch second page |
1c79356b | 2183 | oris r15,r15,hi16(SEG_REG_PROT) ; Set segment 0 SR value |
0b4e3aa0 A |
2184 | mtcrf 0x0F,r13 ; Set CRs to correspond to the subordinate spaces |
2185 | xor r15,r15,r9 ; Flip to proper segment register key | |
1c79356b A |
2186 | lhz r9,PP_CPU_FLAGS(r2) ; Get the processor flags |
2187 | ||
2188 | addis r13,r15,0x0000 ; Get SR0 value | |
2189 | bf 16,nlsr0 ; No alternate here... | |
2190 | lwz r13,PMAP_SEGS+(0*4)(r7) ; Get SR0 value | |
2191 | ||
2192 | nlsr0: mtsr sr0,r13 ; Load up the SR | |
2193 | rlwinm r9,r9,(((31-MSR_BE_BIT)+(traceBEb+16+1))&31),MSR_BE_BIT,MSR_BE_BIT ; Set BE bit if special trace is on | |
2194 | ||
2195 | addis r13,r15,0x0010 ; Get SR1 value | |
2196 | bf 17,nlsr1 ; No alternate here... | |
2197 | lwz r13,PMAP_SEGS+(1*4)(r7) ; Get SR1 value | |
2198 | ||
2199 | nlsr1: mtsr sr1,r13 ; Load up the SR | |
2200 | or r26,r26,r9 ; Flip on the BE bit for special trace if needed | |
2201 | ||
0b4e3aa0 | 2202 | cmplw r7,r16 ; Are we running the same segs as last time? |
1c79356b A |
2203 | |
2204 | addis r13,r15,0x0020 ; Get SR2 value | |
2205 | bf 18,nlsr2 ; No alternate here... | |
2206 | lwz r13,PMAP_SEGS+(2*4)(r7) ; Get SR2 value | |
2207 | ||
2208 | nlsr2: mtsr sr2,r13 ; Load up the SR | |
2209 | ||
2210 | addis r13,r15,0x0030 ; Get SR3 value | |
2211 | bf 19,nlsr3 ; No alternate here... | |
2212 | lwz r13,PMAP_SEGS+(3*4)(r7) ; Get SR3 value | |
2213 | ||
2214 | nlsr3: mtsr sr3,r13 ; Load up the SR | |
2215 | ||
2216 | addis r13,r15,0x00E0 ; Get SR14 value | |
2217 | bf 30,nlsr14 ; No alternate here... | |
2218 | lwz r13,PMAP_SEGS+(14*4)(r7) ; Get SR14 value | |
2219 | ||
2220 | nlsr14: mtsr sr14,r13 ; Load up the SR | |
2221 | ||
2222 | beq+ segsdone ; All done if same pmap as last time... | |
2223 | ||
0b4e3aa0 A |
2224 | stw r7,PP_LASTPMAP(r2) ; Remember what we just loaded |
2225 | ||
1c79356b A |
2226 | addis r13,r15,0x0040 ; Get SR4 value |
2227 | bf 20,nlsr4 ; No alternate here... | |
2228 | lwz r13,PMAP_SEGS+(4*4)(r7) ; Get SR4 value | |
2229 | ||
2230 | nlsr4: mtsr sr4,r13 ; Load up the SR | |
2231 | ||
2232 | addis r13,r15,0x0050 ; Get SR5 value | |
2233 | bf 21,nlsr5 ; No alternate here... | |
2234 | lwz r13,PMAP_SEGS+(5*4)(r7) ; Get SR5 value | |
2235 | ||
2236 | nlsr5: mtsr sr5,r13 ; Load up the SR | |
2237 | ||
2238 | addis r13,r15,0x0060 ; Get SR6 value | |
2239 | bf 22,nlsr6 ; No alternate here... | |
2240 | lwz r13,PMAP_SEGS+(6*4)(r7) ; Get SR6 value | |
2241 | ||
2242 | nlsr6: mtsr sr6,r13 ; Load up the SR | |
2243 | ||
2244 | addis r13,r15,0x0070 ; Get SR7 value | |
2245 | bf 23,nlsr7 ; No alternate here... | |
2246 | lwz r13,PMAP_SEGS+(7*4)(r7) ; Get SR7 value | |
2247 | ||
2248 | nlsr7: mtsr sr7,r13 ; Load up the SR | |
2249 | ||
2250 | addis r13,r15,0x0080 ; Get SR8 value | |
2251 | bf 24,nlsr8 ; No alternate here... | |
2252 | lwz r13,PMAP_SEGS+(8*4)(r7) ; Get SR8 value | |
2253 | ||
2254 | nlsr8: mtsr sr8,r13 ; Load up the SR | |
2255 | ||
2256 | addis r13,r15,0x0090 ; Get SR9 value | |
2257 | bf 25,nlsr9 ; No alternate here... | |
2258 | lwz r13,PMAP_SEGS+(9*4)(r7) ; Get SR9 value | |
2259 | ||
2260 | nlsr9: mtsr sr9,r13 ; Load up the SR | |
2261 | ||
2262 | addis r13,r15,0x00A0 ; Get SR10 value | |
2263 | bf 26,nlsr10 ; No alternate here... | |
2264 | lwz r13,PMAP_SEGS+(10*4)(r7) ; Get SR10 value | |
2265 | ||
2266 | nlsr10: mtsr sr10,r13 ; Load up the SR | |
2267 | ||
2268 | addis r13,r15,0x00B0 ; Get SR11 value | |
2269 | bf 27,nlsr11 ; No alternate here... | |
2270 | lwz r13,PMAP_SEGS+(11*4)(r7) ; Get SR11 value | |
2271 | ||
2272 | nlsr11: mtsr sr11,r13 ; Load up the SR | |
2273 | ||
2274 | addis r13,r15,0x00C0 ; Get SR12 value | |
2275 | bf 28,nlsr12 ; No alternate here... | |
2276 | lwz r13,PMAP_SEGS+(12*4)(r7) ; Get SR12 value | |
2277 | ||
2278 | nlsr12: mtsr sr12,r13 ; Load up the SR | |
2279 | ||
2280 | addis r13,r15,0x00D0 ; Get SR13 value | |
2281 | bf 29,nlsr13 ; No alternate here... | |
2282 | lwz r13,PMAP_SEGS+(13*4)(r7) ; Get SR13 value | |
2283 | ||
2284 | nlsr13: mtsr sr13,r13 ; Load up the SR | |
2285 | ||
2286 | addis r13,r15,0x00F0 ; Get SR15 value | |
2287 | bf 31,nlsr15 ; No alternate here... | |
2288 | lwz r13,PMAP_SEGS+(15*4)(r7) ; Get SR15 value | |
2289 | ||
2290 | nlsr15: mtsr sr15,r13 ; Load up the SR | |
2291 | ||
2292 | segsdone: li r1,emfp0 ; Point to the fp savearea | |
2293 | lwz r25,savesrr0(r31) ; Get the SRR0 to use | |
2294 | la r28,saver6(r31) /* Point to the next line to use */ | |
2295 | dcbt r1,r2 ; Start moving in a work area | |
2296 | lwz r0,saver0(r31) /* Restore */ | |
2297 | dcbt 0,r28 /* Touch it in */ | |
2298 | mr r29,r2 ; Save the per_proc | |
2299 | lwz r1,saver1(r31) /* Restore */ | |
2300 | lwz r2,saver2(r31) /* Restore */ | |
2301 | la r28,saver14(r31) /* Point to the next line to get */ | |
2302 | lwz r3,saver3(r31) /* Restore */ | |
2303 | mtcrf 0x80,r27 ; Get facility availability flags (do not touch CR1-7) | |
2304 | lwz r4,saver4(r31) /* Restore */ | |
2305 | mtsrr0 r25 /* Restore the SRR0 now */ | |
2306 | lwz r5,saver5(r31) /* Restore */ | |
2307 | mtsrr1 r26 /* Restore the SRR1 now */ | |
2308 | lwz r6,saver6(r31) /* Restore */ | |
2309 | ||
2310 | dcbt 0,r28 /* Touch that next line on in */ | |
2311 | la r28,savexfpscrpad(r31) ; Point to the saved fpscr | |
2312 | ||
2313 | lwz r7,saver7(r31) /* Restore */ | |
2314 | dcbt 0,r28 ; Touch saved fpscr | |
2315 | lwz r8,saver8(r31) /* Restore */ | |
2316 | lwz r9,saver9(r31) /* Restore */ | |
2317 | lwz r10,saver10(r31) /* Restore */ | |
2318 | lwz r11,saver11(r31) /* Restore */ | |
2319 | lwz r12,saver12(r31) /* Restore */ | |
2320 | lwz r13,saver13(r31) /* Restore */ | |
2321 | la r28,saver22(r31) /* Point to the next line to do */ | |
2322 | lwz r14,saver14(r31) /* Restore */ | |
2323 | lwz r15,saver15(r31) /* Restore */ | |
2324 | ||
2325 | ; | |
2326 | ; Note that floating point will be enabled from here on until the RFI | |
2327 | ; | |
2328 | ||
2329 | bf- pfFloatb,nofphere ; Skip if no floating point... | |
2330 | mfmsr r27 ; Save the MSR | |
2331 | ori r27,r27,lo16(MASK(MSR_FP)) ; Enable floating point | |
2332 | mtmsr r27 ; Really enable | |
2333 | isync | |
2334 | stfd f0,emfp0(r29) ; Save FP0 | |
2335 | lfd f0,savexfpscrpad(r31) ; Get the fpscr | |
2336 | mtfsf 0xFF,f0 ; Restore fpscr | |
2337 | lfd f0,emfp0(r29) ; Restore the used register | |
2338 | ||
2339 | nofphere: dcbt 0,r28 /* Touch in another line of context */ | |
2340 | ||
2341 | lwz r16,saver16(r31) /* Restore */ | |
2342 | lwz r17,saver17(r31) /* Restore */ | |
2343 | lwz r18,saver18(r31) /* Restore */ | |
2344 | lwz r19,saver19(r31) /* Restore */ | |
2345 | lwz r20,saver20(r31) /* Restore */ | |
2346 | lwz r21,saver21(r31) /* Restore */ | |
2347 | la r28,saver30(r31) /* Point to the final line */ | |
2348 | lwz r22,saver22(r31) /* Restore */ | |
2349 | ||
2350 | dcbt 0,r28 /* Suck it in */ | |
2351 | ||
2352 | lwz r23,saver23(r31) /* Restore */ | |
2353 | lwz r24,saver24(r31) /* Restore */ | |
2354 | lwz r25,saver25(r31) /* Restore */ | |
2355 | lwz r26,saver26(r31) /* Restore */ | |
2356 | lwz r27,saver27(r31) /* Restore */ | |
2357 | ||
2358 | lwz r28,savecr(r31) /* Get CR to restore */ | |
2359 | bf pfAltivecb,noavec4 ; No vector on this machine | |
2360 | lwz r29,savevrsave(r31) ; Get the vrsave | |
2361 | beq+ cr3,noavec3 ; SRs have not changed, no need to stop the streams... | |
2362 | dssall ; Kill all data streams | |
2363 | ; The streams should be suspended | |
2364 | ; already, and we do a bunch of | |
2365 | ; dependent loads and a sync later | |
2366 | ; so we should be cool. | |
2367 | ||
2368 | noavec3: mtspr vrsave,r29 ; Set the vrsave | |
2369 | ||
2370 | noavec4: lwz r29,savexer(r31) /* Get XER to restore */ | |
2371 | mtcr r28 /* Restore the CR */ | |
2372 | lwz r28,savelr(r31) /* Get LR to restore */ | |
2373 | mtxer r29 /* Restore the XER */ | |
2374 | lwz r29,savectr(r31) /* Get the CTR to restore */ | |
2375 | mtlr r28 /* Restore the LR */ | |
2376 | lwz r28,saver30(r31) /* Restore */ | |
2377 | mtctr r29 /* Restore the CTR */ | |
2378 | lwz r29,saver31(r31) /* Restore */ | |
2379 | mtsprg 2,r28 /* Save R30 */ | |
2380 | lwz r28,saver28(r31) /* Restore */ | |
2381 | mtsprg 3,r29 /* Save R31 */ | |
2382 | lwz r29,saver29(r31) /* Restore */ | |
2383 | ||
2384 | #if PERFTIMES && DEBUG | |
2385 | stmw r1,0x280(br0) ; Save all registers | |
2386 | mfcr r20 ; Save the CR | |
2387 | mflr r21 ; Save the LR | |
2388 | mfsrr0 r9 ; Save SRR0 | |
2389 | mfsrr1 r11 ; Save SRR1 | |
2390 | mr r8,r0 ; Save R0 | |
2391 | li r3,69 ; Indicate interrupt | |
2392 | mr r4,r11 ; Set MSR to log | |
2393 | mr r5,r31 ; Get savearea to log | |
2394 | bl EXT(dbgLog2) ; Cut log entry | |
2395 | mr r0,r8 ; Restore R0 | |
2396 | mtsrr0 r9 ; Restore SRR0 | |
2397 | mtsrr1 r11 ; Restore SRR1 | |
2398 | mtlr r21 ; Restore the LR | |
2399 | mtcr r20 ; Restore the CR | |
2400 | lmw r1,0x280(br0) ; Restore all the rest | |
2401 | #endif | |
2402 | ||
2403 | li r31,0 /* Get set to clear lock */ | |
2404 | sync /* Make sure it's all out there */ | |
2405 | stw r31,SVlock(r30) /* Unlock it */ | |
2406 | mfsprg r30,2 /* Restore R30 */ | |
2407 | mfsprg r31,0 ; Get per_proc | |
2408 | lwz r31,pfAvailable(r31) ; Get the feature flags | |
2409 | mtsprg 2,r31 ; Set the feature flags | |
2410 | mfsprg r31,3 /* Restore R31 */ | |
2411 | ||
2412 | rfi /* Click heels three times and think very hard that there's no place like home */ | |
2413 | ||
2414 | .long 0 /* For old 601 bug */ | |
2415 | .long 0 | |
2416 | .long 0 | |
2417 | .long 0 | |
2418 | .long 0 | |
2419 | .long 0 | |
2420 | .long 0 | |
2421 | .long 0 | |
2422 | ||
2423 | ||
2424 | ||
2425 | ||
2426 | /* | |
2427 | * exception_exit(savearea *) | |
2428 | * | |
2429 | * | |
2430 | * ENTRY : IR and/or DR and/or interruptions can be on | |
2431 | * R3 points to the physical address of a savearea | |
2432 | */ | |
2433 | ||
2434 | .align 5 | |
2435 | .globl EXT(exception_exit) | |
2436 | ||
2437 | LEXT(exception_exit) | |
2438 | ||
2439 | mfsprg r29,2 ; Get feature flags | |
2440 | mfmsr r30 /* Get the current MSR */ | |
2441 | mtcrf 0x04,r29 ; Set the features | |
2442 | mr r31,r3 /* Get the savearea in the right register */ | |
2443 | andi. r30,r30,0x7FCF /* Turn off externals, IR, and DR */ | |
2444 | lis r1,hi16(SAVredrive) ; Get redrive request | |
2445 | ||
2446 | bt pfNoMSRirb,eeNoMSR ; No MSR... | |
2447 | ||
2448 | mtmsr r30 ; Translation and all off | |
2449 | isync ; Toss prefetch | |
2450 | b eeNoMSRx | |
2451 | ||
2452 | eeNoMSR: li r0,loadMSR ; Get the MSR setter SC | |
2453 | mr r3,r30 ; Get new MSR | |
2454 | sc ; Set it | |
2455 | ||
2456 | eeNoMSRx: | |
2457 | mfsprg r2,0 ; Get the per_proc block | |
2458 | lwz r4,SAVflags(r31) ; Pick up the flags | |
2459 | mr r13,r31 ; Put savearea here also | |
2460 | ||
2461 | and. r0,r4,r1 ; Check if redrive requested | |
2462 | andc r4,r4,r1 ; Clear redrive | |
2463 | ||
2464 | dcbt br0,r2 ; We will need this in just a sec | |
2465 | ||
2466 | beq+ EatRupt ; No redrive, just exit... | |
2467 | ||
2468 | lwz r3,saveexception(r13) ; Restore exception code | |
2469 | stw r4,SAVflags(r13) ; Set the flags | |
2470 | b Redrive ; Redrive the exception... | |
2471 | ||
2472 | ; | |
2473 | ; Make trace entry for lowmem_vectors internal debug | |
2474 | ; | |
2475 | #if TRCSAVE | |
2476 | cte: | |
2477 | lwz r20,LOW_ADDR(EXT(traceCurr)-EXT(ExceptionVectorsStart))(br0) ; Pick up the current trace entry | |
2478 | lwz r16,LOW_ADDR(EXT(traceEnd)-EXT(ExceptionVectorsStart))(br0) ; Grab up the end of it all | |
2479 | addi r17,r20,LTR_size ; Point to the next trace entry | |
2480 | cmplw r17,r16 ; Do we need to wrap the trace table? | |
2481 | li r15,32 ; Second line of entry | |
2482 | bne+ ctenwrap ; We got a trace entry... | |
2483 | lwz r17,LOW_ADDR(EXT(traceStart)-EXT(ExceptionVectorsStart))(br0) ; Wrap back to the top | |
2484 | ||
2485 | ctenwrap: stw r17,LOW_ADDR(EXT(traceCurr)-EXT(ExceptionVectorsStart))(br0) ; Set the next entry for the next guy | |
2486 | ||
2487 | bf- featL1ena,skipz8 ; L1 cache is disabled... | |
2488 | dcbz 0,r20 ; Allocate cache for the entry | |
2489 | dcbz r15,r20 ; Zap the second half | |
2490 | skipz8: | |
2491 | ||
2492 | ctegetTB: mftbu r16 ; Get the upper timebase | |
2493 | mftb r17 ; Get the lower timebase | |
2494 | mftbu r15 ; Get the upper one again | |
2495 | cmplw r16,r15 ; Did the top tick? | |
2496 | bne- ctegetTB ; Yeah, need to get it again... | |
2497 | ||
2498 | li r15,0x111 ; Get the special trace ID code | |
2499 | stw r0,LTR_r0(r20) ; Save R0 (usually used as an ID number | |
2500 | stw r16,LTR_timeHi(r20) ; Set the upper part of TB | |
2501 | mflr r16 ; Get the return point | |
2502 | stw r17,LTR_timeLo(r20) ; Set the lower part of TB | |
2503 | sth r15,LTR_excpt(r20) ; Save the exception type | |
2504 | stw r16,LTR_srr0(r20) ; Save the return point | |
2505 | blr ; Leave... | |
2506 | #endif | |
2507 | ||
2508 | /* | |
2509 | * Start of the trace table | |
2510 | */ | |
2511 | ||
2512 | .align 12 /* Align to 4k boundary */ | |
2513 | ||
2514 | .globl EXT(traceTableBeg) | |
2515 | EXT(traceTableBeg): /* Start of trace table */ | |
2516 | /* .fill 2048,4,0 Make an 8k trace table for now */ | |
2517 | .fill 13760,4,0 /* Make an .trace table for now */ | |
2518 | /* .fill 240000,4,0 Make an .trace table for now */ | |
2519 | .globl EXT(traceTableEnd) | |
2520 | EXT(traceTableEnd): /* End of trace table */ | |
2521 | ||
2522 | .globl EXT(ExceptionVectorsEnd) | |
2523 | EXT(ExceptionVectorsEnd): /* Used if relocating the exception vectors */ | |
2524 | #ifndef HACKALERTHACKALERT | |
2525 | /* | |
2526 | * This .long needs to be here because the linker gets confused and tries to | |
2527 | * include the final label in a section in the next section if there is nothing | |
2528 | * after it | |
2529 | */ | |
2530 | .long 0 /* (HACK/HACK/HACK) */ | |
2531 | #endif | |
2532 | ||
2533 | .data | |
2534 | .align ALIGN | |
2535 | .globl EXT(exception_end) | |
2536 | EXT(exception_end): | |
2537 | .long EXT(ExceptionVectorsEnd) -EXT(ExceptionVectorsStart) /* phys fn */ | |
2538 | ||
2539 |