]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
43866e37 | 6 | * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. |
1c79356b | 7 | * |
43866e37 A |
8 | * This file contains Original Code and/or Modifications of Original Code |
9 | * as defined in and that are subject to the Apple Public Source License | |
10 | * Version 2.0 (the 'License'). You may not use this file except in | |
11 | * compliance with the License. Please obtain a copy of the License at | |
12 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
13 | * file. | |
14 | * | |
15 | * The Original Code and all software distributed under the License are | |
16 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
1c79356b A |
17 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
18 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
43866e37 A |
19 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
20 | * Please see the License for the specific language governing rights and | |
21 | * limitations under the License. | |
1c79356b A |
22 | * |
23 | * @APPLE_LICENSE_HEADER_END@ | |
24 | */ | |
25 | /* | |
26 | * @OSF_COPYRIGHT@ | |
27 | */ | |
28 | ||
de355530 A |
29 | /* |
30 | * Low-memory exception vector code for PowerPC MACH | |
31 | * | |
32 | * These are the only routines that are ever run with | |
33 | * VM instruction translation switched off. | |
34 | * | |
35 | * The PowerPC is quite strange in that rather than having a set | |
36 | * of exception vectors, the exception handlers are installed | |
37 | * in well-known addresses in low memory. This code must be loaded | |
38 | * at ZERO in physical memory. The simplest way of doing this is | |
39 | * to load the kernel at zero, and specify this as the first file | |
40 | * on the linker command line. | |
41 | * | |
42 | * When this code is loaded into place, it is loaded at virtual | |
43 | * address KERNELBASE, which is mapped to zero (physical). | |
44 | * | |
45 | * This code handles all powerpc exceptions and is always entered | |
46 | * in supervisor mode with translation off. It saves the minimum | |
47 | * processor state before switching back on translation and | |
48 | * jumping to the approprate routine. | |
49 | * | |
50 | * Vectors from 0x100 to 0x3fff occupy 0x100 bytes each (64 instructions) | |
51 | * | |
52 | * We use some of this space to decide which stack to use, and where to | |
53 | * save the context etc, before jumping to a generic handler. | |
54 | */ | |
55 | ||
1c79356b A |
56 | #include <assym.s> |
57 | #include <debug.h> | |
58 | #include <cpus.h> | |
59 | #include <db_machine_commands.h> | |
de355530 | 60 | #include <mach_rt.h> |
1c79356b A |
61 | |
62 | #include <mach_debug.h> | |
63 | #include <ppc/asm.h> | |
64 | #include <ppc/proc_reg.h> | |
65 | #include <ppc/exception.h> | |
66 | #include <ppc/Performance.h> | |
9bccf70c | 67 | #include <ppc/savearea.h> |
1c79356b | 68 | #include <mach/ppc/vm_param.h> |
1c79356b | 69 | |
de355530 A |
70 | #define TRCSAVE 0 |
71 | #define CHECKSAVE 0 | |
72 | #define PERFTIMES 0 | |
1c79356b A |
73 | #define ESPDEBUG 0 |
74 | ||
de355530 A |
75 | #if TRCSAVE |
76 | #error The TRCSAVE option is broken.... Fix it | |
77 | #endif | |
78 | ||
79 | #define featL1ena 24 | |
80 | #define featSMP 25 | |
81 | #define featAltivec 26 | |
82 | #define wasNapping 27 | |
83 | #define featFP 28 | |
84 | #define specAccess 29 | |
1c79356b A |
85 | |
86 | #define VECTOR_SEGMENT .section __VECTORS, __interrupts | |
87 | ||
88 | VECTOR_SEGMENT | |
89 | ||
90 | ||
91 | .globl EXT(ExceptionVectorsStart) | |
92 | ||
93 | EXT(ExceptionVectorsStart): /* Used if relocating the exception vectors */ | |
94 | baseR: /* Used so we have more readable code */ | |
95 | ||
de355530 A |
96 | /* |
97 | * System reset - call debugger | |
98 | */ | |
1c79356b A |
99 | . = 0xf0 |
100 | .globl EXT(ResetHandler) | |
101 | EXT(ResetHandler): | |
102 | .long 0x0 | |
103 | .long 0x0 | |
104 | .long 0x0 | |
105 | ||
106 | . = 0x100 | |
107 | .L_handler100: | |
108 | mtsprg 2,r13 /* Save R13 */ | |
109 | mtsprg 3,r11 /* Save R11 */ | |
110 | lwz r13,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_TYPE)(br0) ; Get reset type | |
111 | mfcr r11 | |
112 | cmpi cr0,r13,RESET_HANDLER_START | |
113 | bne resetexc | |
114 | ||
115 | li r11,RESET_HANDLER_NULL | |
116 | stw r11,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_TYPE)(br0) ; Clear reset type | |
117 | ||
118 | lwz r4,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_CALL)(br0) | |
119 | lwz r3,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_ARG)(br0) | |
120 | mtlr r4 | |
121 | blr | |
122 | ||
de355530 A |
123 | resetexc: |
124 | mtcr r11 | |
1c79356b A |
125 | li r11,T_RESET /* Set 'rupt code */ |
126 | b .L_exception_entry /* Join common... */ | |
127 | ||
128 | /* | |
129 | * Machine check | |
130 | */ | |
131 | ||
132 | . = 0x200 | |
133 | .L_handler200: | |
de355530 A |
134 | mtsprg 2,r13 /* Save R13 */ |
135 | mtsprg 3,r11 /* Save R11 */ | |
136 | li r11,T_MACHINE_CHECK /* Set 'rupt code */ | |
137 | b .L_exception_entry /* Join common... */ | |
1c79356b A |
138 | |
139 | /* | |
140 | * Data access - page fault, invalid memory rights for operation | |
141 | */ | |
142 | ||
143 | . = 0x300 | |
144 | .L_handler300: | |
145 | mtsprg 2,r13 /* Save R13 */ | |
146 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
147 | li r11,T_DATA_ACCESS /* Set 'rupt code */ |
148 | b .L_exception_entry /* Join common... */ | |
149 | ||
150 | /* | |
151 | * Instruction access - as for data access | |
152 | */ | |
153 | ||
154 | . = 0x400 | |
155 | .L_handler400: | |
de355530 A |
156 | mtsprg 2,r13 /* Save R13 */ |
157 | mtsprg 3,r11 /* Save R11 */ | |
158 | li r11,T_INSTRUCTION_ACCESS /* Set 'rupt code */ | |
159 | b .L_exception_entry /* Join common... */ | |
1c79356b A |
160 | |
161 | /* | |
162 | * External interrupt | |
163 | */ | |
164 | ||
165 | . = 0x500 | |
166 | .L_handler500: | |
de355530 A |
167 | mtsprg 2,r13 /* Save R13 */ |
168 | mtsprg 3,r11 /* Save R11 */ | |
169 | li r11,T_INTERRUPT /* Set 'rupt code */ | |
170 | b .L_exception_entry /* Join common... */ | |
1c79356b A |
171 | |
172 | /* | |
173 | * Alignment - many reasons | |
174 | */ | |
175 | ||
176 | . = 0x600 | |
177 | .L_handler600: | |
178 | mtsprg 2,r13 /* Save R13 */ | |
179 | mtsprg 3,r11 /* Save R11 */ | |
d7e50217 | 180 | li r11,T_ALIGNMENT|T_FAM /* Set 'rupt code */ |
1c79356b A |
181 | b .L_exception_entry /* Join common... */ |
182 | ||
183 | /* | |
184 | * Program - floating point exception, illegal inst, priv inst, user trap | |
185 | */ | |
186 | ||
187 | . = 0x700 | |
188 | .L_handler700: | |
189 | mtsprg 2,r13 /* Save R13 */ | |
190 | mtsprg 3,r11 /* Save R11 */ | |
d7e50217 | 191 | li r11,T_PROGRAM|T_FAM /* Set 'rupt code */ |
1c79356b A |
192 | b .L_exception_entry /* Join common... */ |
193 | ||
194 | /* | |
195 | * Floating point disabled | |
196 | */ | |
197 | ||
198 | . = 0x800 | |
199 | .L_handler800: | |
200 | mtsprg 2,r13 /* Save R13 */ | |
201 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
202 | li r11,T_FP_UNAVAILABLE /* Set 'rupt code */ |
203 | b .L_exception_entry /* Join common... */ | |
204 | ||
205 | ||
206 | /* | |
207 | * Decrementer - DEC register has passed zero. | |
208 | */ | |
209 | ||
210 | . = 0x900 | |
211 | .L_handler900: | |
212 | mtsprg 2,r13 /* Save R13 */ | |
213 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
214 | li r11,T_DECREMENTER /* Set 'rupt code */ |
215 | b .L_exception_entry /* Join common... */ | |
216 | ||
217 | /* | |
218 | * I/O controller interface error - MACH does not use this | |
219 | */ | |
220 | ||
221 | . = 0xA00 | |
222 | .L_handlerA00: | |
223 | mtsprg 2,r13 /* Save R13 */ | |
224 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
225 | li r11,T_IO_ERROR /* Set 'rupt code */ |
226 | b .L_exception_entry /* Join common... */ | |
227 | ||
228 | /* | |
229 | * Reserved | |
230 | */ | |
231 | ||
232 | . = 0xB00 | |
233 | .L_handlerB00: | |
234 | mtsprg 2,r13 /* Save R13 */ | |
235 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
236 | li r11,T_RESERVED /* Set 'rupt code */ |
237 | b .L_exception_entry /* Join common... */ | |
238 | ||
de355530 A |
239 | #if 0 |
240 | hackxxxx1: | |
241 | stmw r29,4(br0) | |
242 | lwz r29,0(br0) | |
243 | mr. r29,r29 | |
244 | bne+ xxxx1 | |
245 | lis r29,0x4000 | |
246 | ||
247 | xxxx1: | |
248 | stw r0,0(r29) | |
249 | mfsrr0 r30 | |
250 | stw r30,4(r29) | |
251 | mtlr r30 | |
252 | stw r30,8(r29) | |
253 | ||
254 | addi r29,r29,12 | |
255 | stw r29,0(br0) | |
256 | ||
257 | lmw r29,4(br0) | |
258 | b hackxxxx2 | |
259 | #endif | |
260 | ||
261 | ||
0b4e3aa0 A |
262 | ; |
263 | ; System call - generated by the sc instruction | |
264 | ; | |
265 | ; We handle the ultra-fast traps right here. They are: | |
266 | ; | |
267 | ; 0xFFFFFFFF - BlueBox only - MKIsPreemptiveTask | |
268 | ; 0xFFFFFFFE - BlueBox only - kcNKIsPreemptiveTaskEnv | |
269 | ; 0x00007FF2 - User state only - thread info | |
270 | ; 0x00007FF3 - User state only - floating point / vector facility status | |
de355530 | 271 | ; 0x00007FF4 - Kernel only - loadMSR |
0b4e3aa0 A |
272 | ; |
273 | ; Note: none handled if virtual machine is running | |
9bccf70c | 274 | ; Also, it we treat SCs as kernel SCs if the RI bit is set |
0b4e3aa0 | 275 | ; |
1c79356b A |
276 | |
277 | . = 0xC00 | |
278 | .L_handlerC00: | |
1c79356b | 279 | mtsprg 2,r13 ; Save R13 |
0b4e3aa0 | 280 | mfsrr1 r13 ; Get SRR1 for loadMSR |
de355530 A |
281 | mtsprg 3,r11 ; Save R11 |
282 | rlwimi r13,r13,MSR_PR_BIT,0,0 ; Move PR bit to non-volatile CR0 bit 0 | |
283 | mfcr r11 ; Save the CR | |
284 | mtcrf 0x81,r13 ; Get the moved PR and the RI for testing | |
285 | crnot 0,0 ; Get !PR | |
286 | cror 0,0,MSR_RI_BIT ; See if we have !PR or RI | |
287 | mfsprg r13,0 ; Get the per_proc_area | |
288 | bt- 0,uftInKern ; We are in the kernel... | |
289 | ||
290 | lwz r13,spcFlags(r13) ; Get the special flags | |
291 | rlwimi r13,r13,runningVMbit+1,31,31 ; Move VM flag after the 3 blue box flags | |
292 | mtcrf 1,r13 ; Set BB and VMM flags in CR7 | |
293 | bt- 31,ufpVM ; fast paths running VM ... | |
294 | cmplwi cr5,r0,0x7FF2 ; Ultra fast path cthread info call? | |
295 | cmpwi cr6,r0,0x7FF3 ; Ultra fast path facility status? | |
296 | cror cr1_eq,cr5_lt,cr6_gt ; Set true if not 0x7FF2 and not 0x7FF3 and not negative | |
297 | bt- cr1_eq,notufp ; Exit if we can not be ultra fast... | |
298 | ||
299 | not. r0,r0 ; Flip bits and kind of subtract 1 | |
300 | ||
301 | cmplwi cr1,r0,1 ; Is this a bb fast path? | |
302 | not r0,r0 ; Restore to entry state | |
303 | bf- bbNoMachSCbit,ufpUSuft ; We are not running BlueBox... | |
304 | bgt cr1,notufp ; This can not be a bb ufp... | |
305 | #if 0 | |
306 | b hackxxxx1 | |
307 | hackxxxx2: | |
308 | #endif | |
0b4e3aa0 | 309 | |
de355530 A |
310 | rlwimi r11,r13,bbPreemptivebit-cr0_eq,cr0_eq,cr0_eq ; Copy preemptive task flag into user cr0_eq |
311 | mfsprg r13,0 ; Get back pre_proc | |
0b4e3aa0 | 312 | |
0b4e3aa0 | 313 | |
de355530 | 314 | bne cr1,ufpIsBBpre ; This is the "isPreemptiveTask" call... |
1c79356b | 315 | |
de355530 | 316 | lwz r0,ppbbTaskEnv(r13) ; Get the shadowed taskEnv from per_proc_area |
d7e50217 | 317 | |
de355530 A |
318 | ufpIsBBpre: |
319 | mtcrf 0xFF,r11 ; Restore CR | |
320 | mfsprg r11,3 ; Restore R11 | |
321 | mfsprg r13,2 ; Restore R13 | |
322 | rfi ; All done, go back... | |
323 | ||
0b4e3aa0 | 324 | ; |
de355530 | 325 | ; Normal fast path... |
0b4e3aa0 A |
326 | ; |
327 | ||
de355530 | 328 | ufpUSuft: bge+ notufp ; Bail if negative... (ARRRGGG -- BRANCH TO A BRANCH!!!!!) |
1c79356b | 329 | mfsprg r11,3 ; Restore R11 |
de355530 | 330 | mfsprg r3,0 ; Get the per_proc_area |
1c79356b | 331 | mfsprg r13,2 ; Restore R13 |
de355530 A |
332 | bne- cr5,isvecfp ; This is the facility stat call |
333 | lwz r3,UAW(r3) ; Get the assist word | |
334 | rfi ; All done, scream back... (no need to restore CR or R11, they are volatile) | |
1c79356b | 335 | ; |
de355530 A |
336 | isvecfp: lwz r3,spcFlags(r3) ; Get the facility status |
337 | rfi ; Bail back... | |
338 | ; | |
339 | notufp: mtcrf 0xFF,r11 ; Restore the used CRs | |
340 | li r11,T_SYSTEM_CALL|T_FAM ; Set interrupt code | |
341 | b .L_exception_entry ; Join common... | |
1c79356b | 342 | |
0b4e3aa0 | 343 | uftInKern: cmplwi r0,0x7FF4 ; Ultra fast path loadMSR? |
1c79356b | 344 | bne- notufp ; Someone is trying to cheat... |
d7e50217 | 345 | |
de355530 A |
346 | mtcrf 0xFF,r11 ; Restore CR |
347 | lwz r11,pfAvailable(r13) ; Pick up the feature flags | |
348 | mtsrr1 r3 ; Set new MSR | |
349 | mfsprg r13,2 ; Restore R13 | |
350 | mtsprg 2,r11 ; Set the feature flags into sprg2 | |
351 | mfsprg r11,3 ; Restore R11 | |
352 | rfi ; Blast back | |
1c79356b A |
353 | |
354 | ||
355 | /* | |
356 | * Trace - generated by single stepping | |
357 | * performance monitor BE branch enable tracing/logging | |
358 | * is also done here now. while this is permanently in the | |
359 | * system the impact is completely unnoticable as this code is | |
360 | * only executed when (a) a single step or branch exception is | |
361 | * hit, (b) in the single step debugger case there is so much | |
362 | * overhead already the few extra instructions for testing for BE | |
363 | * are not even noticable, (c) the BE logging code is *only* run | |
364 | * when it is enabled by the tool which will not happen during | |
365 | * normal system usage | |
366 | * | |
367 | * Note that this trace is available only to user state so we do not | |
368 | * need to set sprg2 before returning. | |
369 | */ | |
370 | ||
371 | . = 0xD00 | |
372 | .L_handlerD00: | |
d7e50217 | 373 | mtsprg 2,r13 ; Save R13 |
de355530 A |
374 | mtsprg 3,r11 ; Save R11 |
375 | mfsrr1 r13 ; Get the old MSR | |
376 | mfcr r11 ; Get the CR | |
377 | rlwinm. r13,r13,0,MSR_PR_BIT,MSR_PR_BIT ; Are we in supervisor state? | |
378 | beq- notspectr ; Yes, not special trace... | |
379 | mfsprg r13,0 ; Get the per_proc area | |
380 | lhz r13,PP_CPU_FLAGS(r13) ; Get the flags | |
381 | rlwinm. r13,r13,0,traceBEb+16,traceBEb+16 ; Special trace enabled? | |
382 | bne+ specbrtr ; Yeah... | |
383 | ||
384 | notspectr: mtcr r11 ; Restore CR | |
d7e50217 | 385 | li r11,T_TRACE|T_FAM ; Set interrupt code |
1c79356b A |
386 | b .L_exception_entry ; Join common... |
387 | ||
388 | ; | |
389 | ; We are doing the special branch trace | |
390 | ; | |
391 | ||
de355530 A |
392 | specbrtr: mfsprg r13,0 ; Get the per_proc area |
393 | stw r1,emfp0(r13) ; Save in a scratch area | |
394 | stw r2,emfp0+4(r13) ; Save in a scratch area | |
395 | stw r3,emfp0+8(r13) ; Save in a scratch area | |
396 | ||
397 | lis r2,hi16(EXT(pc_trace_buf)) ; Get the top of the buffer | |
398 | lwz r3,spcTRp(r13) ; Pick up buffer position | |
399 | mr. r1,r1 ; Is it time to count? | |
1c79356b | 400 | ori r2,r2,lo16(EXT(pc_trace_buf)) ; Get the bottom of the buffer |
de355530 | 401 | cmplwi cr1,r3,4092 ; Set cr1_eq if we should take exception |
0b4e3aa0 | 402 | mfsrr0 r1 ; Get the pc |
1c79356b A |
403 | stwx r1,r2,r3 ; Save it in the buffer |
404 | addi r3,r3,4 ; Point to the next slot | |
1c79356b | 405 | rlwinm r3,r3,0,20,31 ; Wrap the slot at one page |
de355530 A |
406 | stw r3,spcTRp(r13) ; Save the new slot |
407 | lwz r1,emfp0(r13) ; Restore work register | |
408 | lwz r2,emfp0+4(r13) ; Restore work register | |
409 | lwz r3,emfp0+8(r13) ; Restore work register | |
410 | beq cr1,notspectr ; Buffer filled, make a rupt... | |
411 | ||
412 | mtcr r11 ; Restore the CR | |
413 | mfsprg r13,2 ; Restore R13 | |
414 | mfsprg r11,3 ; Restore R11 | |
415 | rfi ; Bail back... | |
1c79356b A |
416 | |
417 | /* | |
418 | * Floating point assist | |
419 | */ | |
420 | ||
de355530 | 421 | . = 0xe00 |
1c79356b A |
422 | .L_handlerE00: |
423 | mtsprg 2,r13 /* Save R13 */ | |
424 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
425 | li r11,T_FP_ASSIST /* Set 'rupt code */ |
426 | b .L_exception_entry /* Join common... */ | |
427 | ||
428 | ||
429 | /* | |
430 | * Performance monitor interruption | |
431 | */ | |
432 | ||
433 | . = 0xF00 | |
434 | PMIhandler: | |
435 | mtsprg 2,r13 /* Save R13 */ | |
436 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
437 | li r11,T_PERF_MON /* Set 'rupt code */ |
438 | b .L_exception_entry /* Join common... */ | |
439 | ||
440 | ||
441 | /* | |
442 | * VMX exception | |
443 | */ | |
444 | ||
445 | . = 0xF20 | |
446 | VMXhandler: | |
447 | mtsprg 2,r13 /* Save R13 */ | |
448 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
449 | li r11,T_VMX /* Set 'rupt code */ |
450 | b .L_exception_entry /* Join common... */ | |
451 | ||
452 | ||
453 | ||
de355530 A |
454 | /* |
455 | * Instruction translation miss - we inline this code. | |
456 | * Upon entry (done for us by the machine): | |
457 | * srr0 : addr of instruction that missed | |
458 | * srr1 : bits 0-3 = saved CR0 | |
459 | * 4 = lru way bit | |
460 | * 16-31 = saved msr | |
461 | * msr[tgpr] = 1 (so gpr0-3 become our temporary variables) | |
462 | * imiss: ea that missed | |
463 | * icmp : the compare value for the va that missed | |
464 | * hash1: pointer to first hash pteg | |
465 | * hash2: pointer to 2nd hash pteg | |
466 | * | |
467 | * Register usage: | |
468 | * tmp0: saved counter | |
469 | * tmp1: junk | |
470 | * tmp2: pointer to pteg | |
471 | * tmp3: current compare value | |
472 | * | |
473 | * This code is taken from the 603e User's Manual with | |
474 | * some bugfixes and minor improvements to save bytes and cycles | |
475 | * | |
476 | * NOTE: Do not touch sprg2 in here | |
477 | */ | |
1c79356b | 478 | |
de355530 | 479 | . = 0x1000 |
1c79356b | 480 | .L_handler1000: |
de355530 A |
481 | mfspr tmp2, hash1 |
482 | mfctr tmp0 /* use tmp0 to save ctr */ | |
483 | mfspr tmp3, icmp | |
484 | ||
485 | .L_imiss_find_pte_in_pteg: | |
486 | li tmp1, 8 /* count */ | |
487 | subi tmp2, tmp2, 8 /* offset for lwzu */ | |
488 | mtctr tmp1 /* count... */ | |
1c79356b | 489 | |
de355530 A |
490 | .L_imiss_pteg_loop: |
491 | lwz tmp1, 8(tmp2) /* check pte0 for match... */ | |
492 | addi tmp2, tmp2, 8 | |
493 | cmpw cr0, tmp1, tmp3 | |
494 | #if 0 | |
495 | bdnzf+ cr0, .L_imiss_pteg_loop | |
496 | #else | |
497 | bc 0,2, .L_imiss_pteg_loop | |
498 | #endif | |
499 | beq+ cr0, .L_imiss_found_pte | |
500 | ||
501 | /* Not found in PTEG, we must scan 2nd then give up */ | |
502 | ||
503 | andi. tmp1, tmp3, MASK(PTE0_HASH_ID) | |
504 | bne- .L_imiss_do_no_hash_exception /* give up */ | |
505 | ||
506 | mfspr tmp2, hash2 | |
507 | ori tmp3, tmp3, MASK(PTE0_HASH_ID) | |
508 | b .L_imiss_find_pte_in_pteg | |
509 | ||
510 | .L_imiss_found_pte: | |
511 | ||
512 | lwz tmp1, 4(tmp2) /* get pte1_t */ | |
513 | andi. tmp3, tmp1, MASK(PTE1_WIMG_GUARD) /* Fault? */ | |
514 | bne- .L_imiss_do_prot_exception /* Guarded - illegal */ | |
515 | ||
516 | /* Ok, we've found what we need to, restore and rfi! */ | |
517 | ||
518 | mtctr tmp0 /* restore ctr */ | |
519 | mfsrr1 tmp3 | |
520 | mfspr tmp0, imiss | |
521 | mtcrf 0x80, tmp3 /* Restore CR0 */ | |
522 | mtspr rpa, tmp1 /* set the pte */ | |
523 | ori tmp1, tmp1, MASK(PTE1_REFERENCED) /* set referenced */ | |
524 | tlbli tmp0 | |
525 | sth tmp1, 6(tmp2) | |
526 | rfi | |
527 | ||
528 | .L_imiss_do_prot_exception: | |
529 | /* set up srr1 to indicate protection exception... */ | |
530 | mfsrr1 tmp3 | |
531 | andi. tmp2, tmp3, 0xffff | |
532 | addis tmp2, tmp2, MASK(SRR1_TRANS_PROT) >> 16 | |
533 | b .L_imiss_do_exception | |
534 | ||
535 | .L_imiss_do_no_hash_exception: | |
536 | /* clean up registers for protection exception... */ | |
537 | mfsrr1 tmp3 | |
538 | andi. tmp2, tmp3, 0xffff | |
539 | addis tmp2, tmp2, MASK(SRR1_TRANS_HASH) >> 16 | |
540 | ||
541 | /* And the entry into the usual instruction fault handler ... */ | |
542 | .L_imiss_do_exception: | |
543 | ||
544 | mtctr tmp0 /* Restore ctr */ | |
545 | mtsrr1 tmp2 /* Set up srr1 */ | |
546 | mfmsr tmp0 | |
547 | xoris tmp0, tmp0, MASK(MSR_TGPR)>>16 /* no TGPR */ | |
548 | mtcrf 0x80, tmp3 /* Restore CR0 */ | |
549 | mtmsr tmp0 /* reset MSR[TGPR] */ | |
550 | b .L_handler400 /* Instr Access */ | |
551 | ||
552 | /* | |
553 | * Data load translation miss | |
554 | * | |
555 | * Upon entry (done for us by the machine): | |
556 | * srr0 : addr of instruction that missed | |
557 | * srr1 : bits 0-3 = saved CR0 | |
558 | * 4 = lru way bit | |
559 | * 5 = 1 if store | |
560 | * 16-31 = saved msr | |
561 | * msr[tgpr] = 1 (so gpr0-3 become our temporary variables) | |
562 | * dmiss: ea that missed | |
563 | * dcmp : the compare value for the va that missed | |
564 | * hash1: pointer to first hash pteg | |
565 | * hash2: pointer to 2nd hash pteg | |
566 | * | |
567 | * Register usage: | |
568 | * tmp0: saved counter | |
569 | * tmp1: junk | |
570 | * tmp2: pointer to pteg | |
571 | * tmp3: current compare value | |
572 | * | |
573 | * This code is taken from the 603e User's Manual with | |
574 | * some bugfixes and minor improvements to save bytes and cycles | |
575 | * | |
576 | * NOTE: Do not touch sprg2 in here | |
577 | */ | |
1c79356b | 578 | |
de355530 | 579 | . = 0x1100 |
1c79356b | 580 | .L_handler1100: |
de355530 A |
581 | mfspr tmp2, hash1 |
582 | mfctr tmp0 /* use tmp0 to save ctr */ | |
583 | mfspr tmp3, dcmp | |
584 | ||
585 | .L_dlmiss_find_pte_in_pteg: | |
586 | li tmp1, 8 /* count */ | |
587 | subi tmp2, tmp2, 8 /* offset for lwzu */ | |
588 | mtctr tmp1 /* count... */ | |
1c79356b | 589 | |
de355530 A |
590 | .L_dlmiss_pteg_loop: |
591 | lwz tmp1, 8(tmp2) /* check pte0 for match... */ | |
592 | addi tmp2, tmp2, 8 | |
593 | cmpw cr0, tmp1, tmp3 | |
594 | #if 0 /* How to write this correctly? */ | |
595 | bdnzf+ cr0, .L_dlmiss_pteg_loop | |
596 | #else | |
597 | bc 0,2, .L_dlmiss_pteg_loop | |
598 | #endif | |
599 | beq+ cr0, .L_dmiss_found_pte | |
600 | ||
601 | /* Not found in PTEG, we must scan 2nd then give up */ | |
602 | ||
603 | andi. tmp1, tmp3, MASK(PTE0_HASH_ID) /* already at 2nd? */ | |
604 | bne- .L_dmiss_do_no_hash_exception /* give up */ | |
605 | ||
606 | mfspr tmp2, hash2 | |
607 | ori tmp3, tmp3, MASK(PTE0_HASH_ID) | |
608 | b .L_dlmiss_find_pte_in_pteg | |
609 | ||
610 | .L_dmiss_found_pte: | |
611 | ||
612 | lwz tmp1, 4(tmp2) /* get pte1_t */ | |
613 | ||
614 | /* Ok, we've found what we need to, restore and rfi! */ | |
615 | ||
616 | mtctr tmp0 /* restore ctr */ | |
617 | mfsrr1 tmp3 | |
618 | mfspr tmp0, dmiss | |
619 | mtcrf 0x80, tmp3 /* Restore CR0 */ | |
620 | mtspr rpa, tmp1 /* set the pte */ | |
621 | ori tmp1, tmp1, MASK(PTE1_REFERENCED) /* set referenced */ | |
622 | tlbld tmp0 /* load up tlb */ | |
623 | sth tmp1, 6(tmp2) /* sth is faster? */ | |
624 | rfi | |
625 | ||
626 | /* This code is shared with data store translation miss */ | |
627 | ||
628 | .L_dmiss_do_no_hash_exception: | |
629 | /* clean up registers for protection exception... */ | |
630 | mfsrr1 tmp3 | |
631 | /* prepare to set DSISR_WRITE_BIT correctly from srr1 info */ | |
632 | rlwinm tmp1, tmp3, 9, 6, 6 | |
633 | addis tmp1, tmp1, MASK(DSISR_HASH) >> 16 | |
634 | ||
635 | /* And the entry into the usual data fault handler ... */ | |
636 | ||
637 | mtctr tmp0 /* Restore ctr */ | |
638 | andi. tmp2, tmp3, 0xffff /* Clean up srr1 */ | |
639 | mtsrr1 tmp2 /* Set srr1 */ | |
640 | mtdsisr tmp1 | |
641 | mfspr tmp2, dmiss | |
642 | mtdar tmp2 | |
643 | mfmsr tmp0 | |
644 | xoris tmp0, tmp0, MASK(MSR_TGPR)>>16 /* no TGPR */ | |
645 | mtcrf 0x80, tmp3 /* Restore CR0 */ | |
646 | sync /* Needed on some */ | |
647 | mtmsr tmp0 /* reset MSR[TGPR] */ | |
648 | b .L_handler300 /* Data Access */ | |
649 | ||
650 | /* | |
651 | * Data store translation miss (similar to data load) | |
652 | * | |
653 | * Upon entry (done for us by the machine): | |
654 | * srr0 : addr of instruction that missed | |
655 | * srr1 : bits 0-3 = saved CR0 | |
656 | * 4 = lru way bit | |
657 | * 5 = 1 if store | |
658 | * 16-31 = saved msr | |
659 | * msr[tgpr] = 1 (so gpr0-3 become our temporary variables) | |
660 | * dmiss: ea that missed | |
661 | * dcmp : the compare value for the va that missed | |
662 | * hash1: pointer to first hash pteg | |
663 | * hash2: pointer to 2nd hash pteg | |
664 | * | |
665 | * Register usage: | |
666 | * tmp0: saved counter | |
667 | * tmp1: junk | |
668 | * tmp2: pointer to pteg | |
669 | * tmp3: current compare value | |
670 | * | |
671 | * This code is taken from the 603e User's Manual with | |
672 | * some bugfixes and minor improvements to save bytes and cycles | |
673 | * | |
674 | * NOTE: Do not touch sprg2 in here | |
675 | */ | |
1c79356b | 676 | |
de355530 | 677 | . = 0x1200 |
1c79356b | 678 | .L_handler1200: |
de355530 A |
679 | mfspr tmp2, hash1 |
680 | mfctr tmp0 /* use tmp0 to save ctr */ | |
681 | mfspr tmp3, dcmp | |
682 | ||
683 | .L_dsmiss_find_pte_in_pteg: | |
684 | li tmp1, 8 /* count */ | |
685 | subi tmp2, tmp2, 8 /* offset for lwzu */ | |
686 | mtctr tmp1 /* count... */ | |
687 | ||
688 | .L_dsmiss_pteg_loop: | |
689 | lwz tmp1, 8(tmp2) /* check pte0 for match... */ | |
690 | addi tmp2, tmp2, 8 | |
691 | ||
692 | cmpw cr0, tmp1, tmp3 | |
693 | #if 0 /* I don't know how to write this properly */ | |
694 | bdnzf+ cr0, .L_dsmiss_pteg_loop | |
695 | #else | |
696 | bc 0,2, .L_dsmiss_pteg_loop | |
697 | #endif | |
698 | beq+ cr0, .L_dsmiss_found_pte | |
699 | ||
700 | /* Not found in PTEG, we must scan 2nd then give up */ | |
701 | ||
702 | andi. tmp1, tmp3, MASK(PTE0_HASH_ID) /* already at 2nd? */ | |
703 | bne- .L_dmiss_do_no_hash_exception /* give up */ | |
704 | ||
705 | mfspr tmp2, hash2 | |
706 | ori tmp3, tmp3, MASK(PTE0_HASH_ID) | |
707 | b .L_dsmiss_find_pte_in_pteg | |
708 | ||
709 | .L_dsmiss_found_pte: | |
710 | ||
711 | lwz tmp1, 4(tmp2) /* get pte1_t */ | |
712 | andi. tmp3, tmp1, MASK(PTE1_CHANGED) /* unchanged, check? */ | |
713 | beq- .L_dsmiss_check_prot /* yes, check prot */ | |
714 | ||
715 | .L_dsmiss_resolved: | |
716 | /* Ok, we've found what we need to, restore and rfi! */ | |
717 | ||
718 | mtctr tmp0 /* restore ctr */ | |
719 | mfsrr1 tmp3 | |
720 | mfspr tmp0, dmiss | |
721 | mtcrf 0x80, tmp3 /* Restore CR0 */ | |
722 | mtspr rpa, tmp1 /* set the pte */ | |
723 | tlbld tmp0 /* load up tlb */ | |
724 | rfi | |
725 | ||
726 | .L_dsmiss_check_prot: | |
727 | /* PTE is unchanged, we must check that we can write */ | |
728 | rlwinm. tmp3, tmp1, 30, 0, 1 /* check PP[1] */ | |
729 | bge- .L_dsmiss_check_prot_user_kern | |
730 | andi. tmp3, tmp1, 1 /* check PP[0] */ | |
731 | beq+ .L_dsmiss_check_prot_ok | |
732 | ||
733 | .L_dmiss_do_prot_exception: | |
734 | /* clean up registers for protection exception... */ | |
735 | mfsrr1 tmp3 | |
736 | /* prepare to set DSISR_WRITE_BIT correctly from srr1 info */ | |
737 | rlwinm tmp1, tmp3, 9, 6, 6 | |
738 | addis tmp1, tmp1, MASK(DSISR_PROT) >> 16 | |
739 | ||
740 | /* And the entry into the usual data fault handler ... */ | |
741 | ||
742 | mtctr tmp0 /* Restore ctr */ | |
743 | andi. tmp2, tmp3, 0xffff /* Clean up srr1 */ | |
744 | mtsrr1 tmp2 /* Set srr1 */ | |
745 | mtdsisr tmp1 | |
746 | mfspr tmp2, dmiss | |
747 | mtdar tmp2 | |
748 | mfmsr tmp0 | |
749 | xoris tmp0, tmp0, MASK(MSR_TGPR)>>16 /* no TGPR */ | |
750 | mtcrf 0x80, tmp3 /* Restore CR0 */ | |
751 | sync /* Needed on some */ | |
752 | mtmsr tmp0 /* reset MSR[TGPR] */ | |
753 | b .L_handler300 /* Data Access */ | |
754 | ||
755 | /* NB - if we knew we were on a 603e we could test just the MSR_KEY bit */ | |
756 | .L_dsmiss_check_prot_user_kern: | |
757 | mfsrr1 tmp3 | |
758 | andi. tmp3, tmp3, MASK(MSR_PR) | |
759 | beq+ .L_dsmiss_check_prot_kern | |
760 | mfspr tmp3, dmiss /* check user privs */ | |
761 | mfsrin tmp3, tmp3 /* get excepting SR */ | |
762 | andis. tmp3, tmp3, 0x2000 /* Test SR ku bit */ | |
763 | beq+ .L_dsmiss_check_prot_ok | |
764 | b .L_dmiss_do_prot_exception | |
765 | ||
766 | .L_dsmiss_check_prot_kern: | |
767 | mfspr tmp3, dmiss /* check kern privs */ | |
768 | mfsrin tmp3, tmp3 | |
769 | andis. tmp3, tmp3, 0x4000 /* Test SR Ks bit */ | |
770 | bne- .L_dmiss_do_prot_exception | |
771 | ||
772 | .L_dsmiss_check_prot_ok: | |
773 | /* Ok, mark as referenced and changed before resolving the fault */ | |
774 | ori tmp1, tmp1, (MASK(PTE1_REFERENCED)|MASK(PTE1_CHANGED)) | |
775 | sth tmp1, 6(tmp2) | |
776 | b .L_dsmiss_resolved | |
1c79356b A |
777 | |
778 | /* | |
779 | * Instruction address breakpoint | |
780 | */ | |
781 | ||
782 | . = 0x1300 | |
783 | .L_handler1300: | |
784 | mtsprg 2,r13 /* Save R13 */ | |
785 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
786 | li r11,T_INSTRUCTION_BKPT /* Set 'rupt code */ |
787 | b .L_exception_entry /* Join common... */ | |
788 | ||
789 | /* | |
790 | * System management interrupt | |
791 | */ | |
792 | ||
793 | . = 0x1400 | |
794 | .L_handler1400: | |
795 | mtsprg 2,r13 /* Save R13 */ | |
796 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
797 | li r11,T_SYSTEM_MANAGEMENT /* Set 'rupt code */ |
798 | b .L_exception_entry /* Join common... */ | |
799 | ||
800 | ; | |
de355530 | 801 | ; Altivec Java Mode Assist interrupt |
1c79356b A |
802 | ; |
803 | ||
804 | . = 0x1600 | |
805 | .L_handler1600: | |
806 | mtsprg 2,r13 /* Save R13 */ | |
807 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
808 | li r11,T_ALTIVEC_ASSIST /* Set 'rupt code */ |
809 | b .L_exception_entry /* Join common... */ | |
810 | ||
811 | ; | |
de355530 | 812 | ; Thermal interruption |
1c79356b A |
813 | ; |
814 | ||
815 | . = 0x1700 | |
816 | .L_handler1700: | |
817 | mtsprg 2,r13 /* Save R13 */ | |
818 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
819 | li r11,T_THERMAL /* Set 'rupt code */ |
820 | b .L_exception_entry /* Join common... */ | |
821 | ||
822 | /* | |
823 | * There is now a large gap of reserved traps | |
824 | */ | |
825 | ||
826 | /* | |
de355530 | 827 | * Run mode/ trace exception - single stepping on 601 processors |
1c79356b A |
828 | */ |
829 | ||
830 | . = 0x2000 | |
831 | .L_handler2000: | |
832 | mtsprg 2,r13 /* Save R13 */ | |
833 | mtsprg 3,r11 /* Save R11 */ | |
de355530 | 834 | li r11,T_RUNMODE_TRACE /* Set 'rupt code */ |
1c79356b A |
835 | b .L_exception_entry /* Join common... */ |
836 | ||
d7e50217 A |
837 | |
838 | /* | |
839 | * Filter Ultra Fast Path syscalls for VMM | |
840 | */ | |
841 | ufpVM: | |
de355530 A |
842 | cmpwi cr6,r0,0x6004 ; Is it vmm_dispatch |
843 | bne cr6,notufp ; Exit If not | |
d7e50217 | 844 | cmpwi cr5,r3,kvmmResumeGuest ; Compare r3 with kvmmResumeGuest |
de355530 A |
845 | cmpwi cr6,r3,kvmmSetGuestRegister ; Compare r3 with kvmmSetGuestRegister |
846 | cror cr1_eq,cr5_lt,cr6_gt ; Set true if out of VMM Fast syscall range | |
d7e50217 | 847 | bt- cr1_eq,notufp ; Exit if out of range |
de355530 A |
848 | rlwinm r13,r13,1+FamVMmodebit,30,31 ; Extract FamVMenabit and FamVMmodebit |
849 | cmpwi cr0,r13,3 ; Are FamVMena and FamVMmode set | |
850 | bne+ notufp ; Exit if not in FAM | |
d7e50217 A |
851 | b EXT(vmm_ufp) ; Ultra Fast Path syscall |
852 | ||
1c79356b A |
853 | /* |
854 | * .L_exception_entry(type) | |
855 | * | |
856 | * This is the common exception handling routine called by any | |
857 | * type of system exception. | |
858 | * | |
859 | * ENTRY: via a system exception handler, thus interrupts off, VM off. | |
860 | * r3 has been saved in sprg3 and now contains a number | |
861 | * representing the exception's origins | |
862 | * | |
863 | */ | |
864 | ||
865 | .data | |
866 | .align ALIGN | |
867 | .globl EXT(exception_entry) | |
868 | EXT(exception_entry): | |
869 | .long .L_exception_entry-EXT(ExceptionVectorsStart) /* phys addr of fn */ | |
870 | ||
871 | VECTOR_SEGMENT | |
872 | .align 5 | |
873 | ||
874 | .L_exception_entry: | |
875 | ||
876 | /* | |
877 | * | |
878 | * Here we will save off a mess of registers, the special ones and R0-R12. We use the DCBZ | |
879 | * instruction to clear and allcoate a line in the cache. This way we won't take any cache | |
880 | * misses, so these stores won't take all that long. Except the first line that is because | |
881 | * we can't do a DCBZ if the L1 D-cache is off. The rest we will skip if they are | |
882 | * off also. | |
883 | * | |
884 | * Note that if we are attempting to sleep (as opposed to nap or doze) all interruptions | |
885 | * are ignored. | |
886 | */ | |
de355530 A |
887 | mfsprg r13,0 /* Load per_proc */ |
888 | lwz r13,next_savearea(r13) /* Get the exception save area */ | |
1c79356b | 889 | |
de355530 A |
890 | stw r1,saver1(r13) ; Save register 1 |
891 | stw r0,saver0(r13) ; Save register 0 | |
892 | dcbtst 0,r13 ; We will need this in a bit | |
1c79356b | 893 | mfspr r1,hid0 ; Get HID0 |
de355530 A |
894 | mfcr r0 ; Save the CR |
895 | mtcrf 255,r1 ; Get set to test for cache and sleep | |
1c79356b A |
896 | bf sleep,notsleep ; Skip if we are not trying to sleep |
897 | ||
de355530 A |
898 | mtcrf 255,r0 ; Restore the CR |
899 | lwz r0,saver0(r13) ; Restore R0 | |
900 | lwz r1,saver1(r13) ; Restore R1 | |
1c79356b A |
901 | mfsprg r13,0 ; Get the per_proc |
902 | lwz r11,pfAvailable(r13) ; Get back the feature flags | |
903 | mfsprg r13,2 ; Restore R13 | |
904 | mtsprg 2,r11 ; Set sprg2 to the features | |
905 | mfsprg r11,3 ; Restore R11 | |
906 | rfi ; Jump back into sleep code... | |
907 | .long 0 ; Leave these here please... | |
908 | .long 0 | |
909 | .long 0 | |
910 | .long 0 | |
911 | .long 0 | |
912 | .long 0 | |
913 | .long 0 | |
914 | .long 0 | |
915 | ||
916 | .align 5 | |
917 | ||
de355530 A |
918 | notsleep: stw r2,saver2(r13) ; Save this one |
919 | crmove featL1ena,dce ; Copy the cache enable bit | |
1c79356b A |
920 | rlwinm r2,r1,0,nap+1,doze-1 ; Clear any possible nap and doze bits |
921 | mtspr hid0,r2 ; Clear the nap/doze bits | |
de355530 A |
922 | cmplw r2,r1 ; See if we were napping |
923 | la r1,saver8(r13) ; Point to the next line in case we need it | |
924 | crnot wasNapping,cr0_eq ; Remember if we were napping | |
1c79356b | 925 | mfsprg r2,0 ; Get the per_proc area |
de355530 A |
926 | bf- featL1ena,skipz1 ; L1 cache is disabled... |
927 | dcbz 0,r1 ; Reserve our line in cache | |
1c79356b A |
928 | |
929 | ; | |
930 | ; Remember, we are setting up CR6 with feature flags | |
931 | ; | |
de355530 A |
932 | skipz1: |
933 | andi. r1,r11,T_FAM ; Check FAM bit | |
934 | stw r3,saver3(r13) ; Save this one | |
935 | stw r4,saver4(r13) ; Save this one | |
d7e50217 A |
936 | andc r11,r11,r1 ; Clear FAM bit |
937 | beq+ noFAM ; Is it FAM intercept | |
938 | mfsrr1 r3 ; Load srr1 | |
939 | rlwinm. r3,r3,0,MSR_PR_BIT,MSR_PR_BIT ; Are we trapping from supervisor state? | |
940 | beq+ noFAM ; From supervisor state | |
941 | lwz r1,spcFlags(r2) ; Load spcFlags | |
942 | rlwinm r1,r1,1+FamVMmodebit,30,31 ; Extract FamVMenabit and FamVMmodebit | |
943 | cmpwi cr0,r1,2 ; Check FamVMena set without FamVMmode | |
944 | bne+ noFAM ; Can this context be FAM intercept | |
945 | lwz r4,FAMintercept(r2) ; Load exceptions mask to intercept | |
946 | srwi r1,r11,2 ; divide r11 by 4 | |
947 | lis r3,0x8000 ; Set r3 to 0x80000000 | |
948 | srw r1,r3,r1 ; Set bit for current exception | |
949 | and. r1,r1,r4 ; And current exception with the intercept mask | |
950 | beq+ noFAM ; Is it FAM intercept | |
de355530 | 951 | b EXT(vmm_fam_handler) |
d7e50217 A |
952 | noFAM: |
953 | lwz r1,pfAvailable(r2) ; Get the CPU features flags | |
de355530 A |
954 | la r3,savesrr0(r13) ; Point to the last line |
955 | mtcrf 0xE0,r1 ; Put the features flags (that we care about) in the CR | |
956 | stw r6,saver6(r13) ; Save this one | |
957 | crmove featSMP,pfSMPcapb ; See if we have a PIR | |
958 | stw r8,saver8(r13) ; Save this one | |
1c79356b | 959 | crmove featAltivec,pfAltivecb ; Set the Altivec flag |
9bccf70c | 960 | mfsrr0 r6 ; Get the interruption SRR0 |
de355530 A |
961 | stw r8,saver8(r13) ; Save this one |
962 | bf- featL1ena,skipz1a ; L1 cache is disabled... | |
963 | dcbz 0,r3 ; Reserve our line in cache | |
964 | skipz1a: crmove featFP,pfFloatb ; Remember that we have floating point | |
965 | stw r7,saver7(r13) ; Save this one | |
1c79356b | 966 | lhz r8,PP_CPU_FLAGS(r2) ; Get the flags |
9bccf70c | 967 | mfsrr1 r7 ; Get the interrupt SRR1 |
1c79356b | 968 | rlwinm r8,r8,(((31-MSR_BE_BIT)+(traceBEb+16+1))&31),MSR_BE_BIT,MSR_BE_BIT ; Set BE bit if special trace is on |
de355530 | 969 | stw r6,savesrr0(r13) ; Save the SRR0 |
1c79356b | 970 | rlwinm r6,r7,(((31-MSR_BE_BIT)+(MSR_PR_BIT+1))&31),MSR_BE_BIT,MSR_BE_BIT ; Move PR bit to BE bit |
de355530 | 971 | stw r5,saver5(r13) ; Save this one |
1c79356b A |
972 | and r8,r6,r8 ; Remove BE bit only if problem state and special tracing on |
973 | mfsprg r6,2 ; Get interrupt time R13 | |
974 | mtsprg 2,r1 ; Set the feature flags | |
975 | andc r7,r7,r8 ; Clear BE bit if special trace is on and PR is set | |
9bccf70c | 976 | mfsprg r8,3 ; Get rupt time R11 |
de355530 A |
977 | stw r7,savesrr1(r13) ; Save SRR1 |
978 | rlwinm. r7,r7,MSR_RI_BIT,MSR_RI_BIT ; Is this a special case access fault? | |
979 | stw r6,saver13(r13) ; Save rupt R1 | |
980 | crnot specAccess,cr0_eq ; Set that we are doing a special access if RI is set | |
981 | stw r8,saver11(r13) ; Save rupt time R11 | |
1c79356b A |
982 | |
983 | getTB: mftbu r6 ; Get the upper timebase | |
984 | mftb r7 ; Get the lower timebase | |
985 | mftbu r8 ; Get the upper one again | |
986 | cmplw r6,r8 ; Did the top tick? | |
987 | bne- getTB ; Yeah, need to get it again... | |
988 | ||
989 | stw r8,ruptStamp(r2) ; Save the top of time stamp | |
9bccf70c | 990 | stw r8,SAVtime(r13) ; Save the top of time stamp |
de355530 | 991 | la r6,saver16(r13) ; Point to the next cache line |
1c79356b | 992 | stw r7,ruptStamp+4(r2) ; Save the bottom of time stamp |
9bccf70c A |
993 | stw r7,SAVtime+4(r13) ; Save the bottom of time stamp |
994 | ||
de355530 A |
995 | bf- featL1ena,skipz2 ; L1 cache is disabled... |
996 | dcbz 0,r6 ; Allocate in cache | |
997 | skipz2: | |
998 | stw r9,saver9(r13) ; Save this one | |
1c79356b | 999 | |
de355530 | 1000 | stw r10,saver10(r13) ; Save this one |
9bccf70c | 1001 | mflr r4 ; Get the LR |
1c79356b A |
1002 | mfxer r10 ; Get the XER |
1003 | ||
1004 | bf+ wasNapping,notNapping ; Skip if not waking up from nap... | |
1005 | ||
1006 | lwz r6,napStamp+4(r2) ; Pick up low order nap stamp | |
1007 | lis r3,hi16(EXT(machine_idle_ret)) ; Get high part of nap/doze return | |
1008 | lwz r5,napStamp(r2) ; and high order | |
1009 | subfc r7,r6,r7 ; Subtract low stamp from now | |
1010 | lwz r6,napTotal+4(r2) ; Pick up low total | |
1011 | subfe r5,r5,r8 ; Subtract high stamp and borrow from now | |
1012 | lwz r8,napTotal(r2) ; Pick up the high total | |
1013 | addc r6,r6,r7 ; Add low to total | |
1014 | ori r3,r3,lo16(EXT(machine_idle_ret)) ; Get low part of nap/doze return | |
1015 | adde r8,r8,r5 ; Add high and carry to total | |
1016 | stw r6,napTotal+4(r2) ; Save the low total | |
1017 | stw r8,napTotal(r2) ; Save the high total | |
de355530 | 1018 | stw r3,savesrr0(r13) ; Modify to return to nap/doze exit |
9bccf70c | 1019 | |
de355530 | 1020 | rlwinm. r3,r1,0,pfSlowNapb,pfSlowNapb ; Should HID1 be restored? |
9bccf70c A |
1021 | beq notInSlowNap |
1022 | ||
1023 | lwz r3,pfHID1(r2) ; Get saved HID1 value | |
de355530 | 1024 | mtspr hid1, r3 ; Restore HID1 |
d52fe63f | 1025 | |
9bccf70c | 1026 | notInSlowNap: |
de355530 | 1027 | rlwinm. r3,r1,0,pfNoL2PFNapb,pfNoL2PFNapb ; Should MSSCR0 be restored? |
9bccf70c | 1028 | beq notNapping |
d52fe63f | 1029 | |
9bccf70c | 1030 | lwz r3,pfMSSCR0(r2) ; Get saved MSSCR0 value |
de355530 | 1031 | mtspr msscr0, r3 ; Restore MSSCR0 |
9bccf70c A |
1032 | sync |
1033 | isync | |
1034 | ||
de355530 | 1035 | notNapping: stw r12,saver12(r13) ; Save this one |
1c79356b | 1036 | |
de355530 A |
1037 | stw r14,saver14(r13) ; Save this one |
1038 | stw r15,saver15(r13) ; Save this one | |
9bccf70c | 1039 | la r14,saver24(r13) ; Point to the next block to save into |
de355530 | 1040 | stw r0,savecr(r13) ; Save rupt CR |
9bccf70c | 1041 | mfctr r6 ; Get the CTR |
de355530 A |
1042 | stw r16,saver16(r13) ; Save this one |
1043 | stw r4,savelr(r13) ; Save rupt LR | |
1c79356b | 1044 | |
de355530 A |
1045 | bf- featL1ena,skipz4 ; L1 cache is disabled... |
1046 | dcbz 0,r14 ; Allocate next save area line | |
1047 | skipz4: | |
1048 | stw r17,saver17(r13) ; Save this one | |
1049 | stw r18,saver18(r13) ; Save this one | |
1050 | stw r6,savectr(r13) ; Save rupt CTR | |
1051 | stw r19,saver19(r13) ; Save this one | |
1052 | lis r12,hi16(KERNEL_SEG_REG0_VALUE) ; Get the high half of the kernel SR0 value | |
9bccf70c | 1053 | mfdar r6 ; Get the rupt DAR |
de355530 A |
1054 | stw r20,saver20(r13) ; Save this one |
1055 | ||
1056 | bf+ specAccess,noSRsave ; Do not save SRs if this is not a special access... | |
1057 | mfsr r14,sr0 ; Get SR0 | |
1058 | stw r14,savesr0(r13) ; and save | |
1059 | mfsr r14,sr1 ; Get SR1 | |
1060 | stw r14,savesr1(r13) ; and save | |
1061 | mfsr r14,sr2 ; get SR2 | |
1062 | stw r14,savesr2(r13) ; and save | |
1063 | mfsr r14,sr3 ; get SR3 | |
1064 | stw r14,savesr3(r13) ; and save | |
1065 | ||
1066 | noSRsave: mtsr sr0,r12 ; Set the kernel SR0 | |
1067 | stw r21,saver21(r13) ; Save this one | |
1068 | addis r12,r12,0x0010 ; Point to the second segment of kernel | |
1069 | stw r10,savexer(r13) ; Save the rupt XER | |
1070 | mtsr sr1,r12 ; Set the kernel SR1 | |
1071 | stw r30,saver30(r13) ; Save this one | |
1072 | addis r12,r12,0x0010 ; Point to the third segment of kernel | |
1073 | stw r31,saver31(r13) ; Save this one | |
1074 | mtsr sr2,r12 ; Set the kernel SR2 | |
1075 | stw r22,saver22(r13) ; Save this one | |
1076 | addis r12,r12,0x0010 ; Point to the third segment of kernel | |
1077 | stw r23,saver23(r13) ; Save this one | |
1078 | mtsr sr3,r12 ; Set the kernel SR3 | |
1079 | stw r24,saver24(r13) ; Save this one | |
1080 | stw r25,saver25(r13) ; Save this one | |
9bccf70c | 1081 | mfdsisr r7 ; Get the rupt DSISR |
de355530 A |
1082 | stw r26,saver26(r13) ; Save this one |
1083 | stw r27,saver27(r13) ; Save this one | |
1084 | li r10,emfp0 ; Point to floating point save | |
1085 | stw r28,saver28(r13) ; Save this one | |
1086 | stw r29,saver29(r13) ; Save this one | |
1087 | mfsr r14,sr14 ; Get the copyin/out segment register | |
1088 | stw r6,savedar(r13) ; Save the rupt DAR | |
1089 | bf- featL1ena,skipz5a ; Do not do this if no L1... | |
1090 | dcbz r10,r2 ; Clear and allocate an L1 slot | |
1091 | ||
1092 | skipz5a: stw r7,savedsisr(r13) ; Save the rupt code DSISR | |
9bccf70c | 1093 | stw r11,saveexception(r13) ; Save the exception code |
de355530 | 1094 | stw r14,savesr14(r13) ; Save copyin/copyout |
1c79356b | 1095 | |
9bccf70c A |
1096 | |
1097 | ; | |
de355530 A |
1098 | ; Here we will save some floating point and vector status |
1099 | ; and we also set a clean default status for a new interrupt level. | |
1100 | ; Note that we assume that emfp0 is on an altivec boundary | |
1101 | ; and that R10 points to it (as a displacemnt from R2). | |
9bccf70c A |
1102 | ; |
1103 | ||
de355530 A |
1104 | lis r8,hi16(MASK(MSR_VEC)) ; Get the vector enable bit |
1105 | mfmsr r6 ; Get the current MSR value | |
1106 | ori r8,r8,lo16(MASK(MSR_FP)) ; Add in the float enable | |
1107 | li r19,0 ; Assume no Altivec | |
1108 | or r7,r6,r8 ; Enable floating point | |
1109 | li r9,0 ; Get set to clear VRSAVE | |
1110 | mtmsr r7 ; Do it | |
1111 | isync | |
d7e50217 | 1112 | |
de355530 A |
1113 | bf featAltivec,noavec ; No Altivec on this CPU... |
1114 | addi r14,r10,16 ; Displacement to second vector register | |
1115 | stvxl v0,r10,r2 ; Save a register | |
1116 | stvxl v1,r14,r2 ; Save a second register | |
1117 | mfvscr v0 ; Get the vector status register | |
1118 | la r28,savevscr(r13) ; Point to the status area | |
1119 | vspltish v1,1 ; Turn on the non-Java bit and saturate | |
1120 | stvxl v0,0,r28 ; Save the vector status | |
1121 | vspltisw v0,1 ; Turn on the saturate bit | |
1122 | mfspr r19,vrsave ; Get the VRSAVE register | |
1123 | vxor v1,v1,v0 ; Turn off saturate | |
1124 | mtspr vrsave,r9 ; Clear VRSAVE for each interrupt level | |
1125 | mtvscr v1 ; Set the non-java, no saturate status for new level | |
d7e50217 | 1126 | |
de355530 A |
1127 | lvxl v0,r10,r2 ; Restore first work register |
1128 | lvxl v1,r14,r2 ; Restore second work register | |
d7e50217 | 1129 | |
de355530 | 1130 | noavec: stw r19,savevrsave(r13) ; Save the vector register usage flags |
d7e50217 A |
1131 | |
1132 | ; | |
de355530 A |
1133 | ; We need to save the FPSCR as if it is normal context. |
1134 | ; This is because pending exceptions will cause an exception even if | |
1135 | ; FP is disabled. We need to clear the FPSCR when we first start running in the | |
1136 | ; kernel. | |
d7e50217 A |
1137 | ; |
1138 | ||
de355530 | 1139 | bf- featFP,nofpexe ; No possible floating point exceptions... |
9bccf70c | 1140 | |
de355530 A |
1141 | stfd f0,emfp0(r2) ; Save FPR0 |
1142 | stfd f1,emfp1(r2) ; Save FPR1 | |
1143 | mffs f0 ; Get the FPSCR | |
1144 | fsub f1,f1,f1 ; Make a 0 | |
1145 | stfd f0,savefpscrpad(r13) ; Save the FPSCR | |
1146 | mtfsf 0xFF,f1 ; Clear it | |
1147 | lfd f0,emfp0(r2) ; Restore FPR0 | |
1148 | lfd f1,emfp1(r2) ; Restore FPR1 | |
d7e50217 | 1149 | |
de355530 A |
1150 | nofpexe: mtmsr r6 ; Turn off FP and vector |
1151 | isync | |
d7e50217 | 1152 | |
1c79356b | 1153 | |
9bccf70c A |
1154 | ; |
1155 | ; Everything is saved at this point, except for FPRs, and VMX registers. | |
1156 | ; Time for us to get a new savearea and then trace interrupt if it is enabled. | |
1157 | ; | |
1158 | ||
1159 | li r0,SAVgeneral ; Get the savearea type value | |
de355530 A |
1160 | lis r23,hi16(EXT(trcWork)) ; Get the trace work area address |
1161 | mr r14,r11 ; Save the interrupt code across the call | |
9bccf70c A |
1162 | stb r0,SAVflags+2(r13) ; Mark valid context |
1163 | ori r23,r23,lo16(EXT(trcWork)) ; Get the rest | |
1164 | rlwinm r22,r11,30,0,31 ; Divide interrupt code by 2 | |
de355530 | 1165 | lwz r25,traceMask(r23) ; Get the trace mask |
9bccf70c | 1166 | addi r22,r22,10 ; Adjust code so we shift into CR5 |
de355530 A |
1167 | |
1168 | bl EXT(save_get_phys) ; Grab a savearea | |
1169 | ||
1170 | mfsprg r2,0 ; Get back the per_proc block | |
d7e50217 | 1171 | rlwnm r7,r25,r22,22,22 ; Set CR5_EQ bit position to 0 if tracing allowed |
de355530 A |
1172 | lhz r19,PP_CPU_NUMBER(r2) ; Get the logical processor number |
1173 | li r26,0x8 ; Get start of cpu mask | |
1174 | mr r11,r14 ; Get the exception code back | |
9bccf70c A |
1175 | srw r26,r26,r19 ; Get bit position of cpu number |
1176 | mtcrf 0x04,r7 ; Set CR5 to show trace or not | |
1177 | and. r26,r26,r25 ; See if we trace this cpu | |
de355530 | 1178 | stw r3,next_savearea(r2) ; Remember the savearea we just got for the next rupt |
9bccf70c | 1179 | crandc cr5_eq,cr5_eq,cr0_eq ; Turn off tracing if cpu is disabled |
de355530 A |
1180 | ; |
1181 | ; At this point, we can take another exception and lose nothing. | |
1182 | ; | |
1183 | ||
1184 | lwz r0,saver0(r13) ; Get back interrupt time R0 (we need this whether we trace or not) | |
9bccf70c | 1185 | |
de355530 | 1186 | bne+ cr5,skipTrace ; Skip all of this if no tracing here... |
1c79356b | 1187 | |
9bccf70c A |
1188 | ; |
1189 | ; We select a trace entry using a compare and swap on the next entry field. | |
1190 | ; Since we do not lock the actual trace buffer, there is a potential that | |
1191 | ; another processor could wrap an trash our entry. Who cares? | |
1192 | ; | |
1c79356b | 1193 | |
de355530 A |
1194 | lwz r25,traceStart(r23) ; Get the start of trace table |
1195 | lwz r26,traceEnd(r23) ; Get end of trace table | |
1196 | ||
1197 | trcsel: lwarx r20,0,r23 ; Get and reserve the next slot to allocate | |
1c79356b | 1198 | |
9bccf70c A |
1199 | addi r22,r20,LTR_size ; Point to the next trace entry |
1200 | cmplw r22,r26 ; Do we need to wrap the trace table? | |
de355530 | 1201 | bne+ gotTrcEnt ; No wrap, we got us a trace entry... |
1c79356b | 1202 | |
9bccf70c A |
1203 | mr r22,r25 ; Wrap back to start |
1204 | ||
de355530 A |
1205 | gotTrcEnt: stwcx. r22,0,r23 ; Try to update the current pointer |
1206 | bne- trcsel ; Collision, try again... | |
1c79356b A |
1207 | |
1208 | #if ESPDEBUG | |
9bccf70c A |
1209 | dcbf 0,r23 ; Force to memory |
1210 | sync | |
1c79356b | 1211 | #endif |
de355530 A |
1212 | |
1213 | bf- featL1ena,skipz6 ; L1 cache is disabled... | |
1214 | dcbz 0,r20 ; Clear and allocate first trace line | |
1215 | skipz6: | |
1c79356b | 1216 | |
9bccf70c A |
1217 | ; |
1218 | ; Let us cut that trace entry now. | |
1219 | ; | |
1c79356b | 1220 | |
1c79356b | 1221 | |
de355530 A |
1222 | li r14,32 ; Offset to second line |
1223 | ||
1224 | lwz r16,ruptStamp(r2) ; Get top of time base | |
1225 | lwz r17,ruptStamp+4(r2) ; Get the bottom of time stamp | |
1226 | ||
1227 | bf- featL1ena,skipz7 ; L1 cache is disabled... | |
1228 | dcbz r14,r20 ; Zap the second half | |
1229 | ||
1230 | skipz7: stw r16,LTR_timeHi(r20) ; Set the upper part of TB | |
1231 | lwz r1,saver1(r13) ; Get back interrupt time R1 | |
1232 | stw r17,LTR_timeLo(r20) ; Set the lower part of TB | |
1233 | lwz r18,saver2(r13) ; Get back interrupt time R2 | |
1234 | stw r0,LTR_r0(r20) ; Save off register 0 | |
1235 | lwz r3,saver3(r13) ; Restore this one | |
9bccf70c | 1236 | sth r19,LTR_cpu(r20) ; Stash the cpu number |
de355530 A |
1237 | stw r1,LTR_r1(r20) ; Save off register 1 |
1238 | lwz r4,saver4(r13) ; Restore this one | |
1239 | stw r18,LTR_r2(r20) ; Save off register 2 | |
1240 | lwz r5,saver5(r13) ; Restore this one | |
1241 | stw r3,LTR_r3(r20) ; Save off register 3 | |
9bccf70c | 1242 | lwz r16,savecr(r13) ; Get the CR value |
de355530 | 1243 | stw r4,LTR_r4(r20) ; Save off register 4 |
9bccf70c | 1244 | mfsrr0 r17 ; Get SRR0 back, it is still good |
de355530 | 1245 | stw r5,LTR_r5(r20) ; Save off register 5 |
9bccf70c A |
1246 | mfsrr1 r18 ; SRR1 is still good in here |
1247 | stw r16,LTR_cr(r20) ; Save the CR | |
de355530 A |
1248 | stw r17,LTR_srr0(r20) ; Save the SSR0 |
1249 | stw r18,LTR_srr1(r20) ; Save the SRR1 | |
9bccf70c | 1250 | mfdar r17 ; Get this back |
de355530 A |
1251 | lwz r16,savelr(r13) ; Get the LR |
1252 | stw r17,LTR_dar(r20) ; Save the DAR | |
9bccf70c | 1253 | mfctr r17 ; Get the CTR (still good in register) |
de355530 A |
1254 | stw r16,LTR_lr(r20) ; Save the LR |
1255 | #if 0 | |
1256 | lwz r17,emfp1(r2) ; (TEST/DEBUG) | |
1257 | #endif | |
1258 | stw r17,LTR_ctr(r20) ; Save off the CTR | |
1259 | stw r13,LTR_save(r20) ; Save the savearea | |
9bccf70c | 1260 | sth r11,LTR_excpt(r20) ; Save the exception type |
1c79356b | 1261 | #if ESPDEBUG |
de355530 A |
1262 | addi r17,r20,32 ; (TEST/DEBUG) |
1263 | dcbst br0,r20 ; (TEST/DEBUG) | |
1264 | dcbst br0,r17 ; (TEST/DEBUG) | |
1265 | sync ; (TEST/DEBUG) | |
1c79356b | 1266 | #endif |
d7e50217 | 1267 | |
d7e50217 | 1268 | ; |
de355530 A |
1269 | ; We are done with the trace, except for maybe modifying the exception |
1270 | ; code later on. So, that means that we need to save R20 and CR5. | |
1271 | ; | |
1272 | ; So, finish setting up the kernel registers now. | |
d7e50217 A |
1273 | ; |
1274 | ||
de355530 A |
1275 | skipTrace: lhz r21,PP_CPU_NUMBER(r2) ; Get the logical processor number |
1276 | lis r12,hi16(EXT(hw_counts)) ; Get the high part of the interrupt counters | |
1277 | lwz r7,savesrr1(r13) ; Get the entering MSR | |
1278 | ori r12,r12,lo16(EXT(hw_counts)) ; Get the low part of the interrupt counters | |
1279 | rlwinm r21,r21,8,20,23 ; Get index to processor counts | |
9bccf70c | 1280 | mtcrf 0x80,r0 ; Set our CR0 to the high nybble of possible syscall code |
d7e50217 | 1281 | rlwinm r6,r0,1,0,31 ; Move sign bit to the end |
de355530 A |
1282 | cmplwi cr1,r11,T_SYSTEM_CALL ; Did we get a system call? |
1283 | add r12,r12,r21 ; Point to the processor count area | |
1284 | crandc cr0_lt,cr0_lt,cr0_gt ; See if we have R0 equal to 0b10xx...x | |
1285 | lwzx r22,r12,r11 ; Get the old value | |
1286 | cmplwi cr3,r11,T_IN_VAIN ; Was this all in vain? All for nothing? | |
1287 | addi r22,r22,1 ; Count this one | |
9bccf70c | 1288 | cmplwi cr2,r6,1 ; See if original R0 had the CutTrace request code in it |
de355530 | 1289 | stwx r22,r12,r11 ; Store it back |
1c79356b | 1290 | |
de355530 A |
1291 | beq- cr3,EatRupt ; Interrupt was all for nothing... |
1292 | cmplwi cr3,r11,T_MACHINE_CHECK ; Did we get a machine check? | |
1293 | bne+ cr1,noCutT ; Not a system call... | |
1294 | bnl+ cr0,noCutT ; R0 not 0b10xxx...x, can not be any kind of magical system call... | |
1c79356b | 1295 | rlwinm. r7,r7,0,MSR_PR_BIT,MSR_PR_BIT ; Did we come from user state? |
de355530 A |
1296 | lis r1,hi16(EXT(dgWork)) ; Get the diagnostics flags |
1297 | beq+ FCisok ; From supervisor state... | |
1c79356b | 1298 | |
de355530 A |
1299 | ori r1,r1,lo16(EXT(dgWork)) ; Again |
1300 | lwz r1,dgFlags(r1) ; Get the flags | |
1c79356b | 1301 | rlwinm. r1,r1,0,enaUsrFCallb,enaUsrFCallb ; Are they valid? |
de355530 | 1302 | beq- noCutT ; No... |
1c79356b | 1303 | |
de355530 | 1304 | FCisok: beq- cr2,isCutTrace ; This is a CutTrace system call... |
1c79356b | 1305 | |
9bccf70c A |
1306 | ; |
1307 | ; Here is where we call the firmware. If it returns T_IN_VAIN, that means | |
1308 | ; that it has handled the interruption. Remember: thou shalt not trash R13 | |
de355530 | 1309 | ; or R20 while you are away. Anything else is ok. |
9bccf70c A |
1310 | ; |
1311 | ||
de355530 A |
1312 | lwz r3,saver3(r13) ; Restore the first parameter |
1313 | bl EXT(FirmwareCall) ; Go handle the firmware call.... | |
1c79356b | 1314 | |
d7e50217 | 1315 | cmplwi r3,T_IN_VAIN ; Was it handled? |
de355530 | 1316 | mfsprg r2,0 ; Restore the per_proc |
d7e50217 A |
1317 | beq+ EatRupt ; Interrupt was handled... |
1318 | mr r11,r3 ; Put the rupt code into the right register | |
de355530 | 1319 | b filter ; Go to the normal system call handler... |
d7e50217 | 1320 | |
de355530 A |
1321 | .align 5 |
1322 | ||
1323 | isCutTrace: | |
1324 | li r7,-32768 ; Get a 0x8000 for the exception code | |
1325 | bne- cr5,EatRupt ; Tracing is disabled... | |
1326 | sth r7,LTR_excpt(r20) ; Modify the exception type to a CutTrace | |
1327 | b EatRupt ; Time to go home... | |
1328 | ||
1329 | ; We are here because we did not have a CutTrace system call | |
1330 | ||
1331 | .align 5 | |
1332 | ||
1333 | noCutT: beq- cr3,MachineCheck ; Whoa... Machine check... | |
1c79356b | 1334 | |
9bccf70c | 1335 | ; |
de355530 A |
1336 | ; The following interrupts are the only ones that can be redriven |
1337 | ; by the higher level code or emulation routines. | |
9bccf70c | 1338 | ; |
1c79356b | 1339 | |
de355530 A |
1340 | Redrive: cmplwi cr0,r11,T_IN_VAIN ; Did the signal handler eat the signal? |
1341 | mfsprg r2,0 ; Get the per_proc block | |
1342 | beq+ cr0,EatRupt ; Bail now if we ate the rupt... | |
1c79356b | 1343 | |
9bccf70c | 1344 | |
de355530 A |
1345 | ; |
1346 | ; Here ss where we check for the other fast-path exceptions: translation exceptions, | |
1347 | ; emulated instructions, etc. | |
1348 | ; | |
9bccf70c | 1349 | |
de355530 A |
1350 | filter: cmplwi cr3,r11,T_ALTIVEC_ASSIST ; Check for an Altivec denorm assist |
1351 | cmplwi cr4,r11,T_ALIGNMENT ; See if we got an alignment exception | |
1352 | cmplwi cr1,r11,T_PROGRAM ; See if we got a program exception | |
1353 | cmplwi cr2,r11,T_INSTRUCTION_ACCESS ; Check on an ISI | |
1354 | bne+ cr3,noAltivecAssist ; It is not an assist... | |
1355 | b EXT(AltivecAssist) ; It is an assist... | |
1356 | ||
1357 | .align 5 | |
1c79356b | 1358 | |
de355530 A |
1359 | noAltivecAssist: |
1360 | bne+ cr4,noAlignAssist ; No alignment here... | |
1361 | b EXT(AlignAssist) ; Go try to emulate... | |
1c79356b | 1362 | |
de355530 | 1363 | .align 5 |
9bccf70c | 1364 | |
de355530 A |
1365 | noAlignAssist: |
1366 | bne+ cr1,noEmulate ; No emulation here... | |
1367 | b EXT(Emulate) ; Go try to emulate... | |
1c79356b | 1368 | |
de355530 | 1369 | .align 5 |
1c79356b | 1370 | |
de355530 A |
1371 | noEmulate: cmplwi cr3,r11,T_CSWITCH ; Are we context switching |
1372 | cmplwi r11,T_DATA_ACCESS ; Check on a DSI | |
1373 | beq- cr2,DSIorISI ; It is a PTE fault... | |
1374 | beq- cr3,conswtch ; It is a context switch... | |
1375 | bne+ PassUp ; It is not a PTE fault... | |
1376 | ||
1377 | ; | |
1378 | ; This call will either handle the fault, in which case it will not | |
1379 | ; return, or return to pass the fault up the line. | |
1380 | ; | |
1381 | ||
1382 | DSIorISI: mr r3,r11 ; Move the rupt code | |
d7e50217 | 1383 | |
de355530 A |
1384 | bl EXT(handlePF) ; See if we can handle this fault |
1385 | ||
1386 | lwz r0,savesrr1(r13) ; Get the MSR in use at exception time | |
1387 | mfsprg r2,0 ; Get back per_proc | |
1388 | cmplwi cr1,r3,T_IN_VAIN ; Was it handled? | |
d7e50217 | 1389 | rlwinm. r4,r0,0,MSR_PR_BIT,MSR_PR_BIT ; Are we trapping from supervisor state? |
de355530 A |
1390 | mr r11,r3 ; Put interrupt code back into the right register |
1391 | beq+ cr1,EatRupt ; Yeah, just blast back to the user... | |
1392 | beq- NoFamPf | |
d7e50217 A |
1393 | lwz r1,spcFlags(r2) ; Load spcFlags |
1394 | rlwinm r1,r1,1+FamVMmodebit,30,31 ; Extract FamVMenabit and FamVMmodebit | |
1395 | cmpi cr0,r1,2 ; Check FamVMena set without FamVMmode | |
de355530 | 1396 | bne- cr0,NoFamPf |
d7e50217 | 1397 | lwz r6,FAMintercept(r2) ; Load exceptions mask to intercept |
d7e50217 | 1398 | srwi r1,r11,2 ; divide r11 by 4 |
de355530 | 1399 | lis r5,0x8000 ; Set r5 to 0x80000000 |
d7e50217 A |
1400 | srw r1,r5,r1 ; Set bit for current exception |
1401 | and. r1,r1,r6 ; And current exception with the intercept mask | |
de355530 A |
1402 | beq+ NoFamPf ; Is it FAM intercept |
1403 | bl EXT(vmm_fam_pf_handler) | |
d7e50217 | 1404 | b EatRupt |
de355530 A |
1405 | NoFamPf: |
1406 | andi. r4,r0,lo16(MASK(MSR_RI)) ; See if the recover bit is on | |
1407 | beq+ PassUp ; Not on, normal case... | |
9bccf70c A |
1408 | ; |
1409 | ; Here is where we handle the "recovery mode" stuff. | |
1410 | ; This is set by an emulation routine to trap any faults when it is fetching data or | |
1411 | ; instructions. | |
1412 | ; | |
1413 | ; If we get a fault, we turn off RI, set CR0_EQ to false, bump the PC, and set R0 | |
1414 | ; and R1 to the DAR and DSISR, respectively. | |
1415 | ; | |
de355530 | 1416 | lwz r4,savesrr0(r13) ; Get the failing instruction address |
1c79356b | 1417 | lwz r5,savecr(r13) ; Get the condition register |
de355530 A |
1418 | addi r4,r4,4 ; Skip failing instruction |
1419 | lwz r6,savedar(r13) ; Get the DAR | |
1c79356b | 1420 | rlwinm r5,r5,0,3,1 ; Clear CR0_EQ to let emulation code know we failed |
9bccf70c | 1421 | lwz r7,savedsisr(r13) ; Grab the DSISR |
de355530 A |
1422 | stw r0,savesrr1(r13) ; Save the result MSR |
1423 | stw r4,savesrr0(r13) ; Save resume address | |
9bccf70c | 1424 | stw r5,savecr(r13) ; And the resume CR |
de355530 A |
1425 | stw r6,saver0(r13) ; Pass back the DAR |
1426 | stw r7,saver1(r13) ; Pass back the DSISR | |
1c79356b A |
1427 | b EatRupt ; Resume emulated code |
1428 | ||
9bccf70c A |
1429 | ; |
1430 | ; Here is where we handle the context switch firmware call. The old | |
de355530 | 1431 | ; context has been saved, and the new savearea in in saver3. We will just |
9bccf70c A |
1432 | ; muck around with the savearea pointers, and then join the exit routine |
1433 | ; | |
1434 | ||
1435 | .align 5 | |
1436 | ||
1437 | conswtch: | |
1438 | mr r29,r13 ; Save the save | |
de355530 A |
1439 | rlwinm r30,r13,0,0,19 ; Get the start of the savearea block |
1440 | lwz r5,saver3(r13) ; Switch to the new savearea | |
1441 | lwz r30,SACvrswap(r30) ; get real to virtual translation | |
9bccf70c A |
1442 | mr r13,r5 ; Switch saveareas |
1443 | xor r27,r29,r30 ; Flip to virtual | |
de355530 | 1444 | stw r27,saver3(r5) ; Push the new savearea to the switch to routine |
9bccf70c | 1445 | b EatRupt ; Start it up... |
1c79356b A |
1446 | |
1447 | ; | |
1448 | ; Handle machine check here. | |
1449 | ; | |
1450 | ; ? | |
1451 | ; | |
9bccf70c A |
1452 | |
1453 | .align 5 | |
1454 | ||
1c79356b | 1455 | MachineCheck: |
9bccf70c | 1456 | |
de355530 A |
1457 | lwz r27,savesrr1(r13) ; ? |
1458 | rlwinm. r11,r27,0,dcmck,dcmck ; ? | |
1459 | beq+ notDCache ; ? | |
d7e50217 | 1460 | |
de355530 A |
1461 | mfspr r11,msscr0 ; ? |
1462 | dssall ; ? | |
d7e50217 A |
1463 | sync |
1464 | ||
de355530 | 1465 | lwz r27,savesrr1(r13) ; ? |
d7e50217 | 1466 | |
de355530 A |
1467 | hiccup: cmplw r27,r27 ; ? |
1468 | bne- hiccup ; ? | |
1469 | isync ; ? | |
1470 | ||
1471 | oris r11,r11,hi16(dl1hwfm) ; ? | |
1472 | mtspr msscr0,r11 ; ? | |
1473 | ||
1474 | rstbsy: mfspr r11,msscr0 ; ? | |
1c79356b | 1475 | |
de355530 A |
1476 | rlwinm. r11,r11,0,dl1hwf,dl1hwf ; ? |
1477 | bne rstbsy ; ? | |
1c79356b | 1478 | |
de355530 | 1479 | sync ; ? |
1c79356b | 1480 | |
de355530 | 1481 | b EatRupt ; ? |
1c79356b | 1482 | |
de355530 A |
1483 | .align 5 |
1484 | ||
1485 | notDCache: | |
1c79356b | 1486 | ; |
de355530 A |
1487 | ; Check if the failure was in |
1488 | ; ml_probe_read. If so, this is expected, so modify the PC to | |
1489 | ; ml_proble_read_mck and then eat the exception. | |
1c79356b | 1490 | ; |
de355530 A |
1491 | lwz r30,savesrr0(r13) ; Get the failing PC |
1492 | lis r28,hi16(EXT(ml_probe_read_mck)) ; High order part | |
1493 | lis r27,hi16(EXT(ml_probe_read)) ; High order part | |
1494 | ori r28,r28,lo16(EXT(ml_probe_read_mck)) ; Get the low part | |
1495 | ori r27,r27,lo16(EXT(ml_probe_read)) ; Get the low part | |
1496 | cmplw r30,r28 ; Check highest possible | |
1497 | cmplw cr1,r30,r27 ; Check lowest | |
1498 | bge- PassUp ; Outside of range | |
1499 | blt- cr1,PassUp ; Outside of range | |
1500 | ; | |
1501 | ; We need to fix up the BATs here because the probe | |
1502 | ; routine messed them all up... As long as we are at it, | |
1503 | ; fix up to return directly to caller of probe. | |
1504 | ; | |
1505 | ||
1506 | lis r11,hi16(EXT(shadow_BAT)+shdDBAT) ; Get shadow address | |
1507 | ori r11,r11,lo16(EXT(shadow_BAT)+shdDBAT) ; Get shadow address | |
1508 | ||
1509 | lwz r30,0(r11) ; Pick up DBAT 0 high | |
1510 | lwz r28,4(r11) ; Pick up DBAT 0 low | |
1511 | lwz r27,8(r11) ; Pick up DBAT 1 high | |
1512 | lwz r18,16(r11) ; Pick up DBAT 2 high | |
1513 | lwz r11,24(r11) ; Pick up DBAT 3 high | |
1c79356b | 1514 | |
de355530 A |
1515 | sync |
1516 | mtdbatu 0,r30 ; Restore DBAT 0 high | |
1517 | mtdbatl 0,r28 ; Restore DBAT 0 low | |
1518 | mtdbatu 1,r27 ; Restore DBAT 1 high | |
1519 | mtdbatu 2,r18 ; Restore DBAT 2 high | |
1520 | mtdbatu 3,r11 ; Restore DBAT 3 high | |
1521 | sync | |
1522 | ||
1523 | lwz r27,saver6(r13) ; Get the saved R6 value | |
1524 | mtspr hid0,r27 ; Restore HID0 | |
1525 | isync | |
1526 | ||
1527 | lwz r28,savelr(r13) ; Get return point | |
1528 | lwz r27,saver0(r13) ; Get the saved MSR | |
1529 | li r30,0 ; Get a failure RC | |
1530 | stw r28,savesrr0(r13) ; Set the return point | |
1531 | stw r27,savesrr1(r13) ; Set the continued MSR | |
1532 | stw r30,saver3(r13) ; Set return code | |
1533 | b EatRupt ; Yum, yum, eat it all up... | |
1c79356b A |
1534 | |
1535 | /* | |
1536 | * Here's where we come back from some instruction emulator. If we come back with | |
1537 | * T_IN_VAIN, the emulation is done and we should just reload state and directly | |
1538 | * go back to the interrupted code. Otherwise, we'll check to see if | |
1539 | * we need to redrive with a different interrupt, i.e., DSI. | |
1540 | */ | |
1541 | ||
1542 | .align 5 | |
1543 | .globl EXT(EmulExit) | |
1544 | ||
1545 | LEXT(EmulExit) | |
1546 | ||
de355530 | 1547 | cmplwi r11,T_IN_VAIN ; Was it emulated? |
1c79356b | 1548 | lis r1,hi16(SAVredrive) ; Get redrive request |
de355530 A |
1549 | mfsprg r2,0 ; Restore the per_proc area |
1550 | beq+ EatRupt ; Yeah, just blast back to the user... | |
1c79356b A |
1551 | lwz r4,SAVflags(r13) ; Pick up the flags |
1552 | ||
1553 | and. r0,r4,r1 ; Check if redrive requested | |
de355530 | 1554 | andc r4,r4,r1 ; Clear redrive |
1c79356b | 1555 | |
de355530 | 1556 | beq+ PassUp ; No redrive, just keep on going... |
1c79356b | 1557 | |
de355530 | 1558 | stw r4,SAVflags(r13) ; Set the flags |
1c79356b A |
1559 | b Redrive ; Redrive the exception... |
1560 | ||
9bccf70c A |
1561 | ; |
1562 | ; Jump into main handler code switching on VM at the same time. | |
1563 | ; | |
1564 | ; We assume kernel data is mapped contiguously in physical | |
1565 | ; memory, otherwise we would need to switch on (at least) virtual data. | |
1566 | ; SRs are already set up. | |
1567 | ; | |
1c79356b | 1568 | |
d7e50217 | 1569 | .align 5 |
9bccf70c | 1570 | |
de355530 A |
1571 | PassUp: lis r2,hi16(EXT(exception_handlers)) ; Get exception vector address |
1572 | ori r2,r2,lo16(EXT(exception_handlers)) ; And low half | |
1573 | lwzx r6,r2,r11 ; Get the actual exception handler address | |
d7e50217 | 1574 | |
de355530 A |
1575 | PassUpDeb: mtsrr0 r6 ; Set up the handler address |
1576 | rlwinm r5,r13,0,0,19 ; Back off to the start of savearea block | |
1577 | ||
9bccf70c | 1578 | mfmsr r3 ; Get our MSR |
de355530 A |
1579 | rlwinm r3,r3,0,MSR_BE_BIT+1,MSR_SE_BIT-1 ; Clear all but the trace bits |
1580 | li r2,MSR_SUPERVISOR_INT_OFF ; Get our normal MSR value | |
1581 | lwz r5,SACvrswap(r5) ; Get real to virtual conversion | |
1582 | or r2,r2,r3 ; Keep the trace bits if they are on | |
9bccf70c | 1583 | mr r3,r11 ; Pass the exception code in the paramter reg |
de355530 | 1584 | mtsrr1 r2 ; Set up our normal MSR value |
9bccf70c | 1585 | xor r4,r13,r5 ; Pass up the virtual address of context savearea |
9bccf70c | 1586 | |
d7e50217 | 1587 | rfi ; Launch the exception handler |
de355530 A |
1588 | |
1589 | .long 0 ; Leave these here gol durn it! | |
1590 | .long 0 | |
1591 | .long 0 | |
1592 | .long 0 | |
1593 | .long 0 | |
1594 | .long 0 | |
1595 | .long 0 | |
1596 | .long 0 | |
1c79356b A |
1597 | |
1598 | /* | |
de355530 A |
1599 | * This routine is the only place where we return from an interruption. |
1600 | * Anyplace else is wrong. Even if I write the code, it's still wrong. | |
1601 | * Feel free to come by and slap me if I do do it--even though I may | |
1602 | * have had a good reason to do it. | |
1c79356b A |
1603 | * |
1604 | * All we need to remember here is that R13 must point to the savearea | |
1605 | * that has the context we need to load up. Translation and interruptions | |
1606 | * must be disabled. | |
1607 | * | |
1608 | * This code always loads the context in the savearea pointed to | |
1609 | * by R13. In the process, it throws away the savearea. If there | |
1610 | * is any tomfoolery with savearea stacks, it must be taken care of | |
1611 | * before we get here. | |
1612 | * | |
de355530 A |
1613 | * Speaking of tomfoolery, this is where we synthesize interruptions |
1614 | * if we need to. | |
1c79356b A |
1615 | */ |
1616 | ||
1617 | .align 5 | |
1618 | ||
9bccf70c A |
1619 | EatRupt: mfsprg r29,0 ; Get the per_proc block back |
1620 | mr r31,r13 ; Move the savearea pointer to the far end of the register set | |
1621 | ||
de355530 | 1622 | lwz r30,quickfret(r29) ; Pick up the quick fret list, if any |
d7e50217 | 1623 | |
de355530 A |
1624 | mfsprg r27,2 ; Get the processor features |
1625 | lwz r21,savesrr1(r31) ; Get destination MSR | |
9bccf70c A |
1626 | |
1627 | erchkfret: mr. r3,r30 ; Any savearea to quickly release? | |
1628 | beq+ ernoqfret ; No quickfrets... | |
de355530 | 1629 | lwz r30,SAVprev(r30) ; Chain back now |
9bccf70c A |
1630 | |
1631 | bl EXT(save_ret_phys) ; Put it on the free list | |
de355530 | 1632 | stw r30,quickfret(r29) ; Dequeue previous guy (really, it is ok to wait until after the release) |
9bccf70c | 1633 | b erchkfret ; Try the next one... |
1c79356b | 1634 | |
de355530 | 1635 | |
9bccf70c A |
1636 | .align 5 |
1637 | ||
de355530 A |
1638 | ernoqfret: mtcrf 0x60,r27 ; Set CRs with thermal facilities |
1639 | rlwinm. r0,r21,0,MSR_EE_BIT,MSR_EE_BIT ; Are interruptions going to be enabled? | |
1640 | crandc 31,pfThermalb,pfThermIntb ; See if we have both thermometer and not interrupt facility | |
1641 | la r21,saver0(r31) ; Point to the first thing we restore | |
1642 | crandc 31,cr0_eq,31 ; Factor in enablement | |
1643 | bf 31,tempisok ; No thermal checking needed... | |
1644 | ||
1645 | ; | |
1646 | ; We get to here if 1) there is a thermal facility, and 2) the hardware | |
1647 | ; will or cannot interrupt, and 3) the interrupt will be enabled after this point. | |
1648 | ; | |
1649 | ||
1650 | mfspr r16,thrm3 ; Get thermal 3 | |
1651 | mfspr r14,thrm1 ; Get thermal 2 | |
1652 | rlwinm. r16,r16,0,thrme,thrme ; Is the themometer enabled? | |
1653 | mfspr r15,thrm2 ; Get thermal 2 | |
1654 | beq- tempisok ; No thermometer... | |
1655 | rlwinm r16,r14,2,28,31 ; Cluster THRM1s TIE, V, TIN, and TIV at bottom 4 bits | |
1656 | srawi r0,r15,31 ; Make a mask of 1s if temprature over | |
1657 | rlwinm r30,r15,2,28,31 ; Cluster THRM2s TIE, V, TIN, and TIV at bottom 4 bits | |
1658 | ; | |
1659 | ; Note that the following compare check that V, TIN, and TIV are set and that TIE is cleared. | |
1660 | ; This insures that we only emulate when the hardware is not set to interrupt. | |
1661 | ; | |
1662 | cmplwi cr0,r16,7 ; Is there a valid pending interruption for THRM1? | |
1663 | cmplwi cr1,r30,7 ; Is there a valid pending interruption for THRM2? | |
1664 | and r15,r15,r0 ; Keep high temp if that interrupted, zero if not | |
1665 | cror cr0_eq,cr0_eq,cr1_eq ; Merge both | |
1666 | andc r14,r14,r0 ; Keep low if high did not interrupt, zero if it did | |
1667 | bne+ tempisok ; Nope, temprature is in range | |
1668 | ||
1669 | li r11,T_THERMAL ; Time to emulate a thermal interruption | |
1670 | or r14,r14,r15 ; Get contents of interrupting register | |
1671 | mr r13,r31 ; Make sure savearea is pointed to correctly | |
1672 | stw r11,saveexception(r31) ; Set the exception code | |
1673 | stw r14,savedar(r31) ; Set the contents of the interrupting register into the dar | |
1674 | ||
1675 | ; | |
1676 | ; This code is here to prevent a problem that will probably never happen. If we are | |
1677 | ; returning from an emulation routine (alignment, altivec assist, etc.) the SRs may | |
1678 | ; not be set to the proper kernel values. Then, if we were to emulate a thermal here, | |
1679 | ; we would end up running in the kernel with a bogus SR. So, to prevent | |
1680 | ; this unfortunate circumstance, we slam the SRs here. (I worry too much...) | |
1681 | ; | |
1c79356b | 1682 | |
de355530 A |
1683 | lis r30,hi16(KERNEL_SEG_REG0_VALUE) ; Get the high half of the kernel SR0 value |
1684 | mtsr sr0,r30 ; Set the kernel SR0 | |
1685 | addis r30,r30,0x0010 ; Point to the second segment of kernel | |
1686 | mtsr sr1,r30 ; Set the kernel SR1 | |
1687 | addis r30,r30,0x0010 ; Point to the third segment of kernel | |
1688 | mtsr sr2,r30 ; Set the kernel SR2 | |
1689 | addis r30,r30,0x0010 ; Point to the third segment of kernel | |
1690 | mtsr sr3,r30 ; Set the kernel SR3 | |
1691 | b Redrive ; Go process this new interruption... | |
1692 | ||
1693 | ||
1694 | tempisok: dcbt 0,r21 ; Touch in the first thing we need | |
1c79356b | 1695 | |
1c79356b | 1696 | ; |
9bccf70c | 1697 | ; Here we release the savearea. |
1c79356b | 1698 | ; |
9bccf70c A |
1699 | ; Important!!!! The savearea is released before we are done with it. When the |
1700 | ; local free savearea list (anchored at lclfree) gets too long, save_ret_phys | |
1701 | ; will trim the list, making the extra saveareas allocatable by another processor | |
1702 | ; The code in there must ALWAYS leave our savearea on the local list, otherwise | |
1703 | ; we could be very, very unhappy. The code there always queues the "just released" | |
1704 | ; savearea to the head of the local list. Then, if it needs to trim, it will | |
1705 | ; start with the SECOND savearea, leaving ours intact. | |
1c79356b | 1706 | ; |
de355530 A |
1707 | ; Build the SR values depending upon destination. If we are going to the kernel, |
1708 | ; the SRs are almost all the way set up. SR14 (or the currently used copyin/out register) | |
1709 | ; must be set to whatever it was at the last exception because it varies. All the rest | |
1710 | ; have been set up already. | |
1711 | ; | |
1712 | ; If we are going into user space, we need to check a bit more. SR0, SR1, SR2, and | |
1713 | ; SR14 (current implementation) must be restored always. The others must be set if | |
1714 | ; they are different that what was loaded last time (i.e., tasks have switched). | |
1715 | ; We check the last loaded address space ID and if the same, we skip the loads. | |
1716 | ; This is a performance gain because SR manipulations are slow. | |
1717 | ; | |
1718 | ; There is also the special case when MSR_RI is set. This happens when we are trying to | |
1719 | ; make a special user state access when we are in the kernel. If we take an exception when | |
1720 | ; during that, the SRs may have been modified. Therefore, we need to restore them to | |
1721 | ; what they were before the exception because they could be non-standard. We saved them | |
1722 | ; during exception entry, so we will just load them here. | |
1c79356b A |
1723 | ; |
1724 | ||
9bccf70c A |
1725 | mr r3,r31 ; Get the exiting savearea in parm register |
1726 | bl EXT(save_ret_phys) ; Put it on the free list | |
1727 | ||
de355530 | 1728 | li r3,savesrr1 ; Get offset to the srr1 value |
9bccf70c A |
1729 | |
1730 | lwarx r26,r3,r31 ; Get destination MSR and take reservation along the way (just so we can blow it away) | |
de355530 A |
1731 | lwz r7,PP_USERPMAP(r29) ; Pick up the user pmap we may launch |
1732 | rlwinm. r17,r26,0,MSR_RI_BIT,MSR_RI_BIT ; See if we are returning from a special fault | |
1c79356b | 1733 | cmplw cr3,r14,r14 ; Set that we do not need to stop streams |
9bccf70c | 1734 | |
de355530 | 1735 | beq+ nSpecAcc ; Do not reload the kernel SRs if this is not a special access... |
9bccf70c | 1736 | |
de355530 A |
1737 | lwz r14,savesr0(r31) ; Get SR0 at fault time |
1738 | mtsr sr0,r14 ; Set SR0 | |
1739 | lwz r14,savesr1(r31) ; Get SR1 at fault time | |
1740 | mtsr sr1,r14 ; Set SR1 | |
1741 | lwz r14,savesr2(r31) ; Get SR2 at fault time | |
1742 | mtsr sr2,r14 ; Set SR2 | |
1743 | lwz r14,savesr3(r31) ; Get SR3 at fault timee | |
1744 | mtsr sr3,r14 ; Set SR3 | |
1745 | b segsdone ; We are all set up now... | |
9bccf70c | 1746 | |
de355530 | 1747 | .align 5 |
9bccf70c | 1748 | |
de355530 A |
1749 | nSpecAcc: rlwinm. r17,r26,0,MSR_PR_BIT,MSR_PR_BIT ; See if we are going to user or system |
1750 | li r14,PMAP_SEGS ; Point to segments | |
1751 | bne+ gotouser ; We are going into user state... | |
1c79356b | 1752 | |
de355530 A |
1753 | lwz r14,savesr14(r31) ; Get the copyin/out register at interrupt time |
1754 | mtsr sr14,r14 ; Set SR14 | |
1755 | b segsdone ; We are all set up now... | |
1756 | ||
1757 | .align 5 | |
1c79356b | 1758 | |
de355530 A |
1759 | gotouser: dcbt r14,r7 ; Touch the segment register contents |
1760 | lwz r9,spcFlags(r29) ; Pick up the special flags | |
1761 | lwz r16,PP_LASTPMAP(r29) ; Pick up the last loaded pmap | |
1762 | addi r14,r14,32 ; Second half of pmap segments | |
1763 | rlwinm r9,r9,userProtKeybit-2,2,2 ; Isolate the user state protection key | |
1764 | lwz r15,PMAP_SPACE(r7) ; Get the primary space | |
1765 | lwz r13,PMAP_VFLAGS(r7) ; Get the flags | |
1766 | dcbt r14,r7 ; Touch second page | |
1767 | oris r15,r15,hi16(SEG_REG_PROT) ; Set segment 0 SR value | |
1768 | mtcrf 0x0F,r13 ; Set CRs to correspond to the subordinate spaces | |
1769 | xor r15,r15,r9 ; Flip to proper segment register key | |
1770 | lhz r9,PP_CPU_FLAGS(r29) ; Get the processor flags | |
1c79356b | 1771 | |
de355530 A |
1772 | addis r13,r15,0x0000 ; Get SR0 value |
1773 | bf 16,nlsr0 ; No alternate here... | |
1774 | lwz r13,PMAP_SEGS+(0*4)(r7) ; Get SR0 value | |
1c79356b | 1775 | |
de355530 A |
1776 | nlsr0: mtsr sr0,r13 ; Load up the SR |
1777 | rlwinm r9,r9,(((31-MSR_BE_BIT)+(traceBEb+16+1))&31),MSR_BE_BIT,MSR_BE_BIT ; Set BE bit if special trace is on | |
1c79356b | 1778 | |
de355530 A |
1779 | addis r13,r15,0x0010 ; Get SR1 value |
1780 | bf 17,nlsr1 ; No alternate here... | |
1781 | lwz r13,PMAP_SEGS+(1*4)(r7) ; Get SR1 value | |
1782 | ||
1783 | nlsr1: mtsr sr1,r13 ; Load up the SR | |
1784 | or r26,r26,r9 ; Flip on the BE bit for special trace if needed | |
1c79356b | 1785 | |
de355530 | 1786 | cmplw cr3,r7,r16 ; Are we running the same segs as last time? |
1c79356b | 1787 | |
de355530 A |
1788 | addis r13,r15,0x0020 ; Get SR2 value |
1789 | bf 18,nlsr2 ; No alternate here... | |
1790 | lwz r13,PMAP_SEGS+(2*4)(r7) ; Get SR2 value | |
1791 | ||
1792 | nlsr2: mtsr sr2,r13 ; Load up the SR | |
1c79356b | 1793 | |
de355530 A |
1794 | addis r13,r15,0x0030 ; Get SR3 value |
1795 | bf 19,nlsr3 ; No alternate here... | |
1796 | lwz r13,PMAP_SEGS+(3*4)(r7) ; Get SR3 value | |
1797 | ||
1798 | nlsr3: mtsr sr3,r13 ; Load up the SR | |
1c79356b | 1799 | |
de355530 A |
1800 | addis r13,r15,0x00E0 ; Get SR14 value |
1801 | bf 30,nlsr14 ; No alternate here... | |
1802 | lwz r13,PMAP_SEGS+(14*4)(r7) ; Get SR14 value | |
1803 | ||
1804 | nlsr14: mtsr sr14,r13 ; Load up the SR | |
1c79356b | 1805 | |
de355530 A |
1806 | beq+ cr3,segsdone ; All done if same pmap as last time... |
1807 | ||
1808 | stw r7,PP_LASTPMAP(r29) ; Remember what we just loaded | |
1809 | ||
1810 | addis r13,r15,0x0040 ; Get SR4 value | |
1811 | bf 20,nlsr4 ; No alternate here... | |
1812 | lwz r13,PMAP_SEGS+(4*4)(r7) ; Get SR4 value | |
1813 | ||
1814 | nlsr4: mtsr sr4,r13 ; Load up the SR | |
1c79356b | 1815 | |
de355530 A |
1816 | addis r13,r15,0x0050 ; Get SR5 value |
1817 | bf 21,nlsr5 ; No alternate here... | |
1818 | lwz r13,PMAP_SEGS+(5*4)(r7) ; Get SR5 value | |
1819 | ||
1820 | nlsr5: mtsr sr5,r13 ; Load up the SR | |
1c79356b | 1821 | |
de355530 A |
1822 | addis r13,r15,0x0060 ; Get SR6 value |
1823 | bf 22,nlsr6 ; No alternate here... | |
1824 | lwz r13,PMAP_SEGS+(6*4)(r7) ; Get SR6 value | |
1c79356b | 1825 | |
de355530 A |
1826 | nlsr6: mtsr sr6,r13 ; Load up the SR |
1827 | ||
1828 | addis r13,r15,0x0070 ; Get SR7 value | |
1829 | bf 23,nlsr7 ; No alternate here... | |
1830 | lwz r13,PMAP_SEGS+(7*4)(r7) ; Get SR7 value | |
1c79356b | 1831 | |
de355530 | 1832 | nlsr7: mtsr sr7,r13 ; Load up the SR |
1c79356b | 1833 | |
de355530 A |
1834 | addis r13,r15,0x0080 ; Get SR8 value |
1835 | bf 24,nlsr8 ; No alternate here... | |
1836 | lwz r13,PMAP_SEGS+(8*4)(r7) ; Get SR8 value | |
1837 | ||
1838 | nlsr8: mtsr sr8,r13 ; Load up the SR | |
1c79356b | 1839 | |
de355530 A |
1840 | addis r13,r15,0x0090 ; Get SR9 value |
1841 | bf 25,nlsr9 ; No alternate here... | |
1842 | lwz r13,PMAP_SEGS+(9*4)(r7) ; Get SR9 value | |
1c79356b | 1843 | |
de355530 A |
1844 | nlsr9: mtsr sr9,r13 ; Load up the SR |
1845 | ||
1846 | addis r13,r15,0x00A0 ; Get SR10 value | |
1847 | bf 26,nlsr10 ; No alternate here... | |
1848 | lwz r13,PMAP_SEGS+(10*4)(r7) ; Get SR10 value | |
1c79356b | 1849 | |
de355530 | 1850 | nlsr10: mtsr sr10,r13 ; Load up the SR |
1c79356b | 1851 | |
de355530 A |
1852 | addis r13,r15,0x00B0 ; Get SR11 value |
1853 | bf 27,nlsr11 ; No alternate here... | |
1854 | lwz r13,PMAP_SEGS+(11*4)(r7) ; Get SR11 value | |
1c79356b | 1855 | |
de355530 | 1856 | nlsr11: mtsr sr11,r13 ; Load up the SR |
1c79356b | 1857 | |
de355530 A |
1858 | addis r13,r15,0x00C0 ; Get SR12 value |
1859 | bf 28,nlsr12 ; No alternate here... | |
1860 | lwz r13,PMAP_SEGS+(12*4)(r7) ; Get SR12 value | |
1861 | ||
1862 | nlsr12: mtsr sr12,r13 ; Load up the SR | |
1c79356b | 1863 | |
de355530 A |
1864 | addis r13,r15,0x00D0 ; Get SR13 value |
1865 | bf 29,nlsr13 ; No alternate here... | |
1866 | lwz r13,PMAP_SEGS+(13*4)(r7) ; Get SR13 value | |
1867 | ||
1868 | nlsr13: mtsr sr13,r13 ; Load up the SR | |
9bccf70c | 1869 | |
de355530 A |
1870 | addis r13,r15,0x00F0 ; Get SR15 value |
1871 | bf 31,nlsr15 ; No alternate here... | |
1872 | lwz r13,PMAP_SEGS+(15*4)(r7) ; Get SR15 value | |
1873 | ||
1874 | nlsr15: mtsr sr15,r13 ; Load up the SR | |
1875 | ||
1876 | segsdone: stwcx. r26,r3,r31 ; Blow away any reservations we hold | |
d7e50217 | 1877 | |
de355530 A |
1878 | li r21,emfp0 ; Point to the fp savearea |
1879 | lwz r25,savesrr0(r31) ; Get the SRR0 to use | |
1880 | la r28,saver8(r31) ; Point to the next line to use | |
1881 | dcbt r21,r29 ; Start moving in a work area | |
1882 | lwz r0,saver0(r31) ; Restore R0 | |
1883 | dcbt 0,r28 ; Touch it in | |
1884 | lwz r1,saver1(r31) ; Restore R1 | |
1885 | lwz r2,saver2(r31) ; Restore R2 | |
1886 | la r28,saver16(r31) ; Point to the next line to get | |
1887 | lwz r3,saver3(r31) ; Restore R3 | |
1c79356b | 1888 | mtcrf 0x80,r27 ; Get facility availability flags (do not touch CR1-7) |
de355530 | 1889 | lwz r4,saver4(r31) ; Restore R4 |
9bccf70c | 1890 | mtsrr0 r25 ; Restore the SRR0 now |
de355530 | 1891 | lwz r5,saver5(r31) ; Restore R5 |
9bccf70c | 1892 | mtsrr1 r26 ; Restore the SRR1 now |
de355530 A |
1893 | lwz r6,saver6(r31) ; Restore R6 |
1894 | ||
1895 | dcbt 0,r28 ; Touch that next line on in | |
1896 | la r28,savevscr(r31) ; Point to the saved facility context | |
1897 | ||
1898 | lwz r7,saver7(r31) ; Restore R7 | |
1899 | lwz r8,saver8(r31) ; Restore R8 | |
1900 | lwz r9,saver9(r31) ; Restore R9 | |
1901 | mfmsr r26 ; Get the current MSR | |
1902 | dcbt 0,r28 ; Touch saved facility context | |
1903 | lwz r10,saver10(r31) ; Restore R10 | |
1904 | lwz r11,saver11(r31) ; Restore R11 | |
1905 | oris r26,r26,hi16(MASK(MSR_VEC)) ; Get the vector enable bit | |
1906 | lwz r12,saver12(r31) ; Restore R12 | |
1907 | ori r26,r26,lo16(MASK(MSR_FP)) ; Add in the float enable | |
1908 | lwz r13,saver13(r31) ; Restore R13 | |
1909 | la r28,saver24(r31) ; Point to the next line to do | |
9bccf70c | 1910 | |
de355530 A |
1911 | ; |
1912 | ; Note that floating point and vector will be enabled from here on until the RFI | |
1913 | ; | |
9bccf70c | 1914 | |
de355530 A |
1915 | mtmsr r26 ; Turn on vectors and floating point |
1916 | isync | |
1917 | ||
1918 | dcbt 0,r28 ; Touch next line to do | |
1919 | ||
1920 | lwz r14,saver14(r31) ; Restore R14 | |
1921 | lwz r15,saver15(r31) ; Restore R15 | |
1922 | ||
1923 | bf pfAltivecb,noavec3 ; No Altivec on this CPU... | |
9bccf70c | 1924 | |
de355530 | 1925 | la r28,savevscr(r31) ; Point to the status area |
9bccf70c A |
1926 | stvxl v0,r21,r29 ; Save a vector register |
1927 | lvxl v0,0,r28 ; Get the vector status | |
de355530 | 1928 | lwz r27,savevrsave(r31) ; Get the vrsave |
9bccf70c A |
1929 | mtvscr v0 ; Set the vector status |
1930 | ||
1931 | lvxl v0,r21,r29 ; Restore work vector register | |
de355530 A |
1932 | beq+ cr3,noavec2 ; SRs have not changed, no need to stop the streams... |
1933 | dssall ; Kill all data streams | |
1934 | sync | |
9bccf70c | 1935 | |
de355530 A |
1936 | noavec2: mtspr vrsave,r27 ; Set the vrsave |
1937 | ||
1938 | noavec3: bf- pfFloatb,nofphere ; Skip if no floating point... | |
9bccf70c | 1939 | |
1c79356b | 1940 | stfd f0,emfp0(r29) ; Save FP0 |
9bccf70c | 1941 | lfd f0,savefpscrpad(r31) ; Get the fpscr |
1c79356b A |
1942 | mtfsf 0xFF,f0 ; Restore fpscr |
1943 | lfd f0,emfp0(r29) ; Restore the used register | |
de355530 A |
1944 | |
1945 | nofphere: lwz r16,saver16(r31) ; Restore R16 | |
1946 | lwz r17,saver17(r31) ; Restore R17 | |
1947 | lwz r18,saver18(r31) ; Restore R18 | |
1948 | lwz r19,saver19(r31) ; Restore R19 | |
1949 | lwz r20,saver20(r31) ; Restore R20 | |
1950 | lwz r21,saver21(r31) ; Restore R21 | |
1951 | lwz r22,saver22(r31) ; Restore R22 | |
1952 | ||
1953 | lwz r23,saver23(r31) ; Restore R23 | |
1954 | lwz r24,saver24(r31) ; Restore R24 | |
1955 | lwz r25,saver25(r31) ; Restore R25 | |
1956 | lwz r26,saver26(r31) ; Restore R26 | |
1957 | lwz r27,saver27(r31) ; Restore R27 | |
1958 | ||
9bccf70c | 1959 | lwz r28,savecr(r31) ; Get CR to restore |
de355530 A |
1960 | |
1961 | lwz r29,savexer(r31) ; Get XER to restore | |
9bccf70c | 1962 | mtcr r28 ; Restore the CR |
de355530 | 1963 | lwz r28,savelr(r31) ; Get LR to restore |
9bccf70c | 1964 | mtxer r29 ; Restore the XER |
de355530 | 1965 | lwz r29,savectr(r31) ; Get the CTR to restore |
9bccf70c | 1966 | mtlr r28 ; Restore the LR |
de355530 | 1967 | lwz r28,saver30(r31) ; Get R30 |
9bccf70c | 1968 | mtctr r29 ; Restore the CTR |
de355530 A |
1969 | lwz r29,saver31(r31) ; Get R31 |
1970 | mtsprg 2,r28 ; Save R30 for later | |
1971 | lwz r28,saver28(r31) ; Restore R28 | |
9bccf70c | 1972 | mtsprg 3,r29 ; Save R31 for later |
de355530 | 1973 | lwz r29,saver29(r31) ; Restore R29 |
1c79356b | 1974 | |
de355530 A |
1975 | mfsprg r31,0 ; Get per_proc |
1976 | mfsprg r30,2 ; Restore R30 | |
1977 | lwz r31,pfAvailable(r31) ; Get the feature flags | |
1c79356b | 1978 | mtsprg 2,r31 ; Set the feature flags |
9bccf70c | 1979 | mfsprg r31,3 ; Restore R31 |
1c79356b | 1980 | |
de355530 A |
1981 | rfi ; Click heels three times and think very hard that there is no place like home... |
1982 | ||
1983 | .long 0 ; Leave this here | |
1984 | .long 0 | |
1985 | .long 0 | |
1986 | .long 0 | |
1987 | .long 0 | |
1988 | .long 0 | |
1989 | .long 0 | |
1990 | .long 0 | |
1991 | ||
1c79356b A |
1992 | |
1993 | ||
1994 | ||
1995 | /* | |
1996 | * exception_exit(savearea *) | |
1997 | * | |
1998 | * | |
1999 | * ENTRY : IR and/or DR and/or interruptions can be on | |
de355530 | 2000 | * R3 points to the physical address of a savearea |
1c79356b A |
2001 | */ |
2002 | ||
2003 | .align 5 | |
2004 | .globl EXT(exception_exit) | |
2005 | ||
2006 | LEXT(exception_exit) | |
2007 | ||
2008 | mfsprg r29,2 ; Get feature flags | |
de355530 | 2009 | mfmsr r30 ; Get the current MSR |
d7e50217 | 2010 | mtcrf 0x04,r29 ; Set the features |
de355530 A |
2011 | rlwinm r30,r30,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off |
2012 | mr r31,r3 ; Get the savearea in the right register | |
2013 | rlwinm r30,r30,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off | |
2014 | li r10,savesrr0 ; Point to one of the first things we touch in the savearea on exit | |
2015 | andi. r30,r30,0x7FCF ; Turn off externals, IR, and DR | |
1c79356b | 2016 | lis r1,hi16(SAVredrive) ; Get redrive request |
de355530 | 2017 | |
1c79356b A |
2018 | bt pfNoMSRirb,eeNoMSR ; No MSR... |
2019 | ||
2020 | mtmsr r30 ; Translation and all off | |
2021 | isync ; Toss prefetch | |
2022 | b eeNoMSRx | |
2023 | ||
2024 | eeNoMSR: li r0,loadMSR ; Get the MSR setter SC | |
2025 | mr r3,r30 ; Get new MSR | |
2026 | sc ; Set it | |
2027 | ||
de355530 A |
2028 | eeNoMSRx: dcbt r10,r31 ; Touch in the first stuff we restore |
2029 | mfsprg r2,0 ; Get the per_proc block | |
1c79356b A |
2030 | lwz r4,SAVflags(r31) ; Pick up the flags |
2031 | mr r13,r31 ; Put savearea here also | |
2032 | ||
2033 | and. r0,r4,r1 ; Check if redrive requested | |
de355530 | 2034 | andc r4,r4,r1 ; Clear redrive |
1c79356b A |
2035 | |
2036 | dcbt br0,r2 ; We will need this in just a sec | |
2037 | ||
2038 | beq+ EatRupt ; No redrive, just exit... | |
2039 | ||
9bccf70c | 2040 | lwz r11,saveexception(r13) ; Restore exception code |
de355530 | 2041 | stw r4,SAVflags(r13) ; Set the flags |
1c79356b A |
2042 | b Redrive ; Redrive the exception... |
2043 | ||
1c79356b | 2044 | |
de355530 A |
2045 | /* |
2046 | * Start of the trace table | |
2047 | */ | |
2048 | ||
2049 | .align 12 /* Align to 4k boundary */ | |
2050 | ||
2051 | .globl EXT(traceTableBeg) | |
2052 | EXT(traceTableBeg): /* Start of trace table */ | |
2053 | /* .fill 2048,4,0 Make an 8k trace table for now */ | |
2054 | .fill 13760,4,0 /* Make an .trace table for now */ | |
2055 | /* .fill 240000,4,0 Make an .trace table for now */ | |
2056 | .globl EXT(traceTableEnd) | |
2057 | EXT(traceTableEnd): /* End of trace table */ | |
2058 | ||
1c79356b A |
2059 | .globl EXT(ExceptionVectorsEnd) |
2060 | EXT(ExceptionVectorsEnd): /* Used if relocating the exception vectors */ | |
de355530 A |
2061 | #ifndef HACKALERTHACKALERT |
2062 | /* | |
2063 | * This .long needs to be here because the linker gets confused and tries to | |
2064 | * include the final label in a section in the next section if there is nothing | |
2065 | * after it | |
2066 | */ | |
2067 | .long 0 /* (HACK/HACK/HACK) */ | |
1c79356b A |
2068 | #endif |
2069 | ||
2070 | .data | |
2071 | .align ALIGN | |
2072 | .globl EXT(exception_end) | |
2073 | EXT(exception_end): | |
2074 | .long EXT(ExceptionVectorsEnd) -EXT(ExceptionVectorsStart) /* phys fn */ | |
2075 | ||
2076 |