]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
de355530 A |
6 | * The contents of this file constitute Original Code as defined in and |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
1c79356b | 11 | * |
de355530 A |
12 | * This Original Code and all software distributed under the License are |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
1c79356b A |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
de355530 A |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
1c79356b A |
19 | * |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * @OSF_COPYRIGHT@ | |
24 | */ | |
25 | ||
de355530 A |
26 | /* |
27 | * Low-memory exception vector code for PowerPC MACH | |
28 | * | |
29 | * These are the only routines that are ever run with | |
30 | * VM instruction translation switched off. | |
31 | * | |
32 | * The PowerPC is quite strange in that rather than having a set | |
33 | * of exception vectors, the exception handlers are installed | |
34 | * in well-known addresses in low memory. This code must be loaded | |
35 | * at ZERO in physical memory. The simplest way of doing this is | |
36 | * to load the kernel at zero, and specify this as the first file | |
37 | * on the linker command line. | |
38 | * | |
39 | * When this code is loaded into place, it is loaded at virtual | |
40 | * address KERNELBASE, which is mapped to zero (physical). | |
41 | * | |
42 | * This code handles all powerpc exceptions and is always entered | |
43 | * in supervisor mode with translation off. It saves the minimum | |
44 | * processor state before switching back on translation and | |
45 | * jumping to the approprate routine. | |
46 | * | |
47 | * Vectors from 0x100 to 0x3fff occupy 0x100 bytes each (64 instructions) | |
48 | * | |
49 | * We use some of this space to decide which stack to use, and where to | |
50 | * save the context etc, before jumping to a generic handler. | |
51 | */ | |
52 | ||
1c79356b A |
53 | #include <assym.s> |
54 | #include <debug.h> | |
55 | #include <cpus.h> | |
56 | #include <db_machine_commands.h> | |
de355530 | 57 | #include <mach_rt.h> |
1c79356b A |
58 | |
59 | #include <mach_debug.h> | |
60 | #include <ppc/asm.h> | |
61 | #include <ppc/proc_reg.h> | |
62 | #include <ppc/exception.h> | |
63 | #include <ppc/Performance.h> | |
9bccf70c | 64 | #include <ppc/savearea.h> |
1c79356b | 65 | #include <mach/ppc/vm_param.h> |
1c79356b | 66 | |
de355530 A |
67 | #define TRCSAVE 0 |
68 | #define CHECKSAVE 0 | |
69 | #define PERFTIMES 0 | |
1c79356b A |
70 | #define ESPDEBUG 0 |
71 | ||
de355530 A |
72 | #if TRCSAVE |
73 | #error The TRCSAVE option is broken.... Fix it | |
74 | #endif | |
75 | ||
76 | #define featL1ena 24 | |
77 | #define featSMP 25 | |
78 | #define featAltivec 26 | |
79 | #define wasNapping 27 | |
80 | #define featFP 28 | |
81 | #define specAccess 29 | |
1c79356b A |
82 | |
83 | #define VECTOR_SEGMENT .section __VECTORS, __interrupts | |
84 | ||
85 | VECTOR_SEGMENT | |
86 | ||
87 | ||
88 | .globl EXT(ExceptionVectorsStart) | |
89 | ||
90 | EXT(ExceptionVectorsStart): /* Used if relocating the exception vectors */ | |
91 | baseR: /* Used so we have more readable code */ | |
92 | ||
de355530 A |
93 | /* |
94 | * System reset - call debugger | |
95 | */ | |
1c79356b A |
96 | . = 0xf0 |
97 | .globl EXT(ResetHandler) | |
98 | EXT(ResetHandler): | |
99 | .long 0x0 | |
100 | .long 0x0 | |
101 | .long 0x0 | |
102 | ||
103 | . = 0x100 | |
104 | .L_handler100: | |
105 | mtsprg 2,r13 /* Save R13 */ | |
106 | mtsprg 3,r11 /* Save R11 */ | |
107 | lwz r13,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_TYPE)(br0) ; Get reset type | |
108 | mfcr r11 | |
109 | cmpi cr0,r13,RESET_HANDLER_START | |
110 | bne resetexc | |
111 | ||
112 | li r11,RESET_HANDLER_NULL | |
113 | stw r11,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_TYPE)(br0) ; Clear reset type | |
114 | ||
115 | lwz r4,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_CALL)(br0) | |
116 | lwz r3,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_ARG)(br0) | |
117 | mtlr r4 | |
118 | blr | |
119 | ||
de355530 A |
120 | resetexc: |
121 | mtcr r11 | |
1c79356b A |
122 | li r11,T_RESET /* Set 'rupt code */ |
123 | b .L_exception_entry /* Join common... */ | |
124 | ||
125 | /* | |
126 | * Machine check | |
127 | */ | |
128 | ||
129 | . = 0x200 | |
130 | .L_handler200: | |
de355530 A |
131 | mtsprg 2,r13 /* Save R13 */ |
132 | mtsprg 3,r11 /* Save R11 */ | |
133 | li r11,T_MACHINE_CHECK /* Set 'rupt code */ | |
134 | b .L_exception_entry /* Join common... */ | |
1c79356b A |
135 | |
136 | /* | |
137 | * Data access - page fault, invalid memory rights for operation | |
138 | */ | |
139 | ||
140 | . = 0x300 | |
141 | .L_handler300: | |
142 | mtsprg 2,r13 /* Save R13 */ | |
143 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
144 | li r11,T_DATA_ACCESS /* Set 'rupt code */ |
145 | b .L_exception_entry /* Join common... */ | |
146 | ||
147 | /* | |
148 | * Instruction access - as for data access | |
149 | */ | |
150 | ||
151 | . = 0x400 | |
152 | .L_handler400: | |
de355530 A |
153 | mtsprg 2,r13 /* Save R13 */ |
154 | mtsprg 3,r11 /* Save R11 */ | |
155 | li r11,T_INSTRUCTION_ACCESS /* Set 'rupt code */ | |
156 | b .L_exception_entry /* Join common... */ | |
1c79356b A |
157 | |
158 | /* | |
159 | * External interrupt | |
160 | */ | |
161 | ||
162 | . = 0x500 | |
163 | .L_handler500: | |
de355530 A |
164 | mtsprg 2,r13 /* Save R13 */ |
165 | mtsprg 3,r11 /* Save R11 */ | |
166 | li r11,T_INTERRUPT /* Set 'rupt code */ | |
167 | b .L_exception_entry /* Join common... */ | |
1c79356b A |
168 | |
169 | /* | |
170 | * Alignment - many reasons | |
171 | */ | |
172 | ||
173 | . = 0x600 | |
174 | .L_handler600: | |
175 | mtsprg 2,r13 /* Save R13 */ | |
176 | mtsprg 3,r11 /* Save R11 */ | |
d7e50217 | 177 | li r11,T_ALIGNMENT|T_FAM /* Set 'rupt code */ |
1c79356b A |
178 | b .L_exception_entry /* Join common... */ |
179 | ||
180 | /* | |
181 | * Program - floating point exception, illegal inst, priv inst, user trap | |
182 | */ | |
183 | ||
184 | . = 0x700 | |
185 | .L_handler700: | |
186 | mtsprg 2,r13 /* Save R13 */ | |
187 | mtsprg 3,r11 /* Save R11 */ | |
d7e50217 | 188 | li r11,T_PROGRAM|T_FAM /* Set 'rupt code */ |
1c79356b A |
189 | b .L_exception_entry /* Join common... */ |
190 | ||
191 | /* | |
192 | * Floating point disabled | |
193 | */ | |
194 | ||
195 | . = 0x800 | |
196 | .L_handler800: | |
197 | mtsprg 2,r13 /* Save R13 */ | |
198 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
199 | li r11,T_FP_UNAVAILABLE /* Set 'rupt code */ |
200 | b .L_exception_entry /* Join common... */ | |
201 | ||
202 | ||
203 | /* | |
204 | * Decrementer - DEC register has passed zero. | |
205 | */ | |
206 | ||
207 | . = 0x900 | |
208 | .L_handler900: | |
209 | mtsprg 2,r13 /* Save R13 */ | |
210 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
211 | li r11,T_DECREMENTER /* Set 'rupt code */ |
212 | b .L_exception_entry /* Join common... */ | |
213 | ||
214 | /* | |
215 | * I/O controller interface error - MACH does not use this | |
216 | */ | |
217 | ||
218 | . = 0xA00 | |
219 | .L_handlerA00: | |
220 | mtsprg 2,r13 /* Save R13 */ | |
221 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
222 | li r11,T_IO_ERROR /* Set 'rupt code */ |
223 | b .L_exception_entry /* Join common... */ | |
224 | ||
225 | /* | |
226 | * Reserved | |
227 | */ | |
228 | ||
229 | . = 0xB00 | |
230 | .L_handlerB00: | |
231 | mtsprg 2,r13 /* Save R13 */ | |
232 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
233 | li r11,T_RESERVED /* Set 'rupt code */ |
234 | b .L_exception_entry /* Join common... */ | |
235 | ||
de355530 A |
236 | #if 0 |
237 | hackxxxx1: | |
238 | stmw r29,4(br0) | |
239 | lwz r29,0(br0) | |
240 | mr. r29,r29 | |
241 | bne+ xxxx1 | |
242 | lis r29,0x4000 | |
243 | ||
244 | xxxx1: | |
245 | stw r0,0(r29) | |
246 | mfsrr0 r30 | |
247 | stw r30,4(r29) | |
248 | mtlr r30 | |
249 | stw r30,8(r29) | |
250 | ||
251 | addi r29,r29,12 | |
252 | stw r29,0(br0) | |
253 | ||
254 | lmw r29,4(br0) | |
255 | b hackxxxx2 | |
256 | #endif | |
257 | ||
258 | ||
0b4e3aa0 A |
259 | ; |
260 | ; System call - generated by the sc instruction | |
261 | ; | |
262 | ; We handle the ultra-fast traps right here. They are: | |
263 | ; | |
264 | ; 0xFFFFFFFF - BlueBox only - MKIsPreemptiveTask | |
265 | ; 0xFFFFFFFE - BlueBox only - kcNKIsPreemptiveTaskEnv | |
266 | ; 0x00007FF2 - User state only - thread info | |
267 | ; 0x00007FF3 - User state only - floating point / vector facility status | |
de355530 | 268 | ; 0x00007FF4 - Kernel only - loadMSR |
0b4e3aa0 A |
269 | ; |
270 | ; Note: none handled if virtual machine is running | |
9bccf70c | 271 | ; Also, it we treat SCs as kernel SCs if the RI bit is set |
0b4e3aa0 | 272 | ; |
1c79356b A |
273 | |
274 | . = 0xC00 | |
275 | .L_handlerC00: | |
1c79356b | 276 | mtsprg 2,r13 ; Save R13 |
0b4e3aa0 | 277 | mfsrr1 r13 ; Get SRR1 for loadMSR |
de355530 A |
278 | mtsprg 3,r11 ; Save R11 |
279 | rlwimi r13,r13,MSR_PR_BIT,0,0 ; Move PR bit to non-volatile CR0 bit 0 | |
280 | mfcr r11 ; Save the CR | |
281 | mtcrf 0x81,r13 ; Get the moved PR and the RI for testing | |
282 | crnot 0,0 ; Get !PR | |
283 | cror 0,0,MSR_RI_BIT ; See if we have !PR or RI | |
284 | mfsprg r13,0 ; Get the per_proc_area | |
285 | bt- 0,uftInKern ; We are in the kernel... | |
286 | ||
287 | lwz r13,spcFlags(r13) ; Get the special flags | |
288 | rlwimi r13,r13,runningVMbit+1,31,31 ; Move VM flag after the 3 blue box flags | |
289 | mtcrf 1,r13 ; Set BB and VMM flags in CR7 | |
290 | bt- 31,ufpVM ; fast paths running VM ... | |
291 | cmplwi cr5,r0,0x7FF2 ; Ultra fast path cthread info call? | |
292 | cmpwi cr6,r0,0x7FF3 ; Ultra fast path facility status? | |
293 | cror cr1_eq,cr5_lt,cr6_gt ; Set true if not 0x7FF2 and not 0x7FF3 and not negative | |
294 | bt- cr1_eq,notufp ; Exit if we can not be ultra fast... | |
295 | ||
296 | not. r0,r0 ; Flip bits and kind of subtract 1 | |
297 | ||
298 | cmplwi cr1,r0,1 ; Is this a bb fast path? | |
299 | not r0,r0 ; Restore to entry state | |
300 | bf- bbNoMachSCbit,ufpUSuft ; We are not running BlueBox... | |
301 | bgt cr1,notufp ; This can not be a bb ufp... | |
302 | #if 0 | |
303 | b hackxxxx1 | |
304 | hackxxxx2: | |
305 | #endif | |
0b4e3aa0 | 306 | |
de355530 A |
307 | rlwimi r11,r13,bbPreemptivebit-cr0_eq,cr0_eq,cr0_eq ; Copy preemptive task flag into user cr0_eq |
308 | mfsprg r13,0 ; Get back pre_proc | |
0b4e3aa0 | 309 | |
0b4e3aa0 | 310 | |
de355530 | 311 | bne cr1,ufpIsBBpre ; This is the "isPreemptiveTask" call... |
1c79356b | 312 | |
de355530 | 313 | lwz r0,ppbbTaskEnv(r13) ; Get the shadowed taskEnv from per_proc_area |
d7e50217 | 314 | |
de355530 A |
315 | ufpIsBBpre: |
316 | mtcrf 0xFF,r11 ; Restore CR | |
317 | mfsprg r11,3 ; Restore R11 | |
318 | mfsprg r13,2 ; Restore R13 | |
319 | rfi ; All done, go back... | |
320 | ||
0b4e3aa0 | 321 | ; |
de355530 | 322 | ; Normal fast path... |
0b4e3aa0 A |
323 | ; |
324 | ||
de355530 | 325 | ufpUSuft: bge+ notufp ; Bail if negative... (ARRRGGG -- BRANCH TO A BRANCH!!!!!) |
1c79356b | 326 | mfsprg r11,3 ; Restore R11 |
de355530 | 327 | mfsprg r3,0 ; Get the per_proc_area |
1c79356b | 328 | mfsprg r13,2 ; Restore R13 |
de355530 A |
329 | bne- cr5,isvecfp ; This is the facility stat call |
330 | lwz r3,UAW(r3) ; Get the assist word | |
331 | rfi ; All done, scream back... (no need to restore CR or R11, they are volatile) | |
1c79356b | 332 | ; |
de355530 A |
333 | isvecfp: lwz r3,spcFlags(r3) ; Get the facility status |
334 | rfi ; Bail back... | |
335 | ; | |
336 | notufp: mtcrf 0xFF,r11 ; Restore the used CRs | |
337 | li r11,T_SYSTEM_CALL|T_FAM ; Set interrupt code | |
338 | b .L_exception_entry ; Join common... | |
1c79356b | 339 | |
0b4e3aa0 | 340 | uftInKern: cmplwi r0,0x7FF4 ; Ultra fast path loadMSR? |
1c79356b | 341 | bne- notufp ; Someone is trying to cheat... |
d7e50217 | 342 | |
de355530 A |
343 | mtcrf 0xFF,r11 ; Restore CR |
344 | lwz r11,pfAvailable(r13) ; Pick up the feature flags | |
345 | mtsrr1 r3 ; Set new MSR | |
346 | mfsprg r13,2 ; Restore R13 | |
347 | mtsprg 2,r11 ; Set the feature flags into sprg2 | |
348 | mfsprg r11,3 ; Restore R11 | |
349 | rfi ; Blast back | |
1c79356b A |
350 | |
351 | ||
352 | /* | |
353 | * Trace - generated by single stepping | |
354 | * performance monitor BE branch enable tracing/logging | |
355 | * is also done here now. while this is permanently in the | |
356 | * system the impact is completely unnoticable as this code is | |
357 | * only executed when (a) a single step or branch exception is | |
358 | * hit, (b) in the single step debugger case there is so much | |
359 | * overhead already the few extra instructions for testing for BE | |
360 | * are not even noticable, (c) the BE logging code is *only* run | |
361 | * when it is enabled by the tool which will not happen during | |
362 | * normal system usage | |
363 | * | |
364 | * Note that this trace is available only to user state so we do not | |
365 | * need to set sprg2 before returning. | |
366 | */ | |
367 | ||
368 | . = 0xD00 | |
369 | .L_handlerD00: | |
d7e50217 | 370 | mtsprg 2,r13 ; Save R13 |
de355530 A |
371 | mtsprg 3,r11 ; Save R11 |
372 | mfsrr1 r13 ; Get the old MSR | |
373 | mfcr r11 ; Get the CR | |
374 | rlwinm. r13,r13,0,MSR_PR_BIT,MSR_PR_BIT ; Are we in supervisor state? | |
375 | beq- notspectr ; Yes, not special trace... | |
376 | mfsprg r13,0 ; Get the per_proc area | |
377 | lhz r13,PP_CPU_FLAGS(r13) ; Get the flags | |
378 | rlwinm. r13,r13,0,traceBEb+16,traceBEb+16 ; Special trace enabled? | |
379 | bne+ specbrtr ; Yeah... | |
380 | ||
381 | notspectr: mtcr r11 ; Restore CR | |
d7e50217 | 382 | li r11,T_TRACE|T_FAM ; Set interrupt code |
1c79356b A |
383 | b .L_exception_entry ; Join common... |
384 | ||
385 | ; | |
386 | ; We are doing the special branch trace | |
387 | ; | |
388 | ||
de355530 A |
389 | specbrtr: mfsprg r13,0 ; Get the per_proc area |
390 | stw r1,emfp0(r13) ; Save in a scratch area | |
391 | stw r2,emfp0+4(r13) ; Save in a scratch area | |
392 | stw r3,emfp0+8(r13) ; Save in a scratch area | |
393 | ||
394 | lis r2,hi16(EXT(pc_trace_buf)) ; Get the top of the buffer | |
395 | lwz r3,spcTRp(r13) ; Pick up buffer position | |
396 | mr. r1,r1 ; Is it time to count? | |
1c79356b | 397 | ori r2,r2,lo16(EXT(pc_trace_buf)) ; Get the bottom of the buffer |
de355530 | 398 | cmplwi cr1,r3,4092 ; Set cr1_eq if we should take exception |
0b4e3aa0 | 399 | mfsrr0 r1 ; Get the pc |
1c79356b A |
400 | stwx r1,r2,r3 ; Save it in the buffer |
401 | addi r3,r3,4 ; Point to the next slot | |
1c79356b | 402 | rlwinm r3,r3,0,20,31 ; Wrap the slot at one page |
de355530 A |
403 | stw r3,spcTRp(r13) ; Save the new slot |
404 | lwz r1,emfp0(r13) ; Restore work register | |
405 | lwz r2,emfp0+4(r13) ; Restore work register | |
406 | lwz r3,emfp0+8(r13) ; Restore work register | |
407 | beq cr1,notspectr ; Buffer filled, make a rupt... | |
408 | ||
409 | mtcr r11 ; Restore the CR | |
410 | mfsprg r13,2 ; Restore R13 | |
411 | mfsprg r11,3 ; Restore R11 | |
412 | rfi ; Bail back... | |
1c79356b A |
413 | |
414 | /* | |
415 | * Floating point assist | |
416 | */ | |
417 | ||
de355530 | 418 | . = 0xe00 |
1c79356b A |
419 | .L_handlerE00: |
420 | mtsprg 2,r13 /* Save R13 */ | |
421 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
422 | li r11,T_FP_ASSIST /* Set 'rupt code */ |
423 | b .L_exception_entry /* Join common... */ | |
424 | ||
425 | ||
426 | /* | |
427 | * Performance monitor interruption | |
428 | */ | |
429 | ||
430 | . = 0xF00 | |
431 | PMIhandler: | |
432 | mtsprg 2,r13 /* Save R13 */ | |
433 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
434 | li r11,T_PERF_MON /* Set 'rupt code */ |
435 | b .L_exception_entry /* Join common... */ | |
436 | ||
437 | ||
438 | /* | |
439 | * VMX exception | |
440 | */ | |
441 | ||
442 | . = 0xF20 | |
443 | VMXhandler: | |
444 | mtsprg 2,r13 /* Save R13 */ | |
445 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
446 | li r11,T_VMX /* Set 'rupt code */ |
447 | b .L_exception_entry /* Join common... */ | |
448 | ||
449 | ||
450 | ||
de355530 A |
451 | /* |
452 | * Instruction translation miss - we inline this code. | |
453 | * Upon entry (done for us by the machine): | |
454 | * srr0 : addr of instruction that missed | |
455 | * srr1 : bits 0-3 = saved CR0 | |
456 | * 4 = lru way bit | |
457 | * 16-31 = saved msr | |
458 | * msr[tgpr] = 1 (so gpr0-3 become our temporary variables) | |
459 | * imiss: ea that missed | |
460 | * icmp : the compare value for the va that missed | |
461 | * hash1: pointer to first hash pteg | |
462 | * hash2: pointer to 2nd hash pteg | |
463 | * | |
464 | * Register usage: | |
465 | * tmp0: saved counter | |
466 | * tmp1: junk | |
467 | * tmp2: pointer to pteg | |
468 | * tmp3: current compare value | |
469 | * | |
470 | * This code is taken from the 603e User's Manual with | |
471 | * some bugfixes and minor improvements to save bytes and cycles | |
472 | * | |
473 | * NOTE: Do not touch sprg2 in here | |
474 | */ | |
1c79356b | 475 | |
de355530 | 476 | . = 0x1000 |
1c79356b | 477 | .L_handler1000: |
de355530 A |
478 | mfspr tmp2, hash1 |
479 | mfctr tmp0 /* use tmp0 to save ctr */ | |
480 | mfspr tmp3, icmp | |
481 | ||
482 | .L_imiss_find_pte_in_pteg: | |
483 | li tmp1, 8 /* count */ | |
484 | subi tmp2, tmp2, 8 /* offset for lwzu */ | |
485 | mtctr tmp1 /* count... */ | |
1c79356b | 486 | |
de355530 A |
487 | .L_imiss_pteg_loop: |
488 | lwz tmp1, 8(tmp2) /* check pte0 for match... */ | |
489 | addi tmp2, tmp2, 8 | |
490 | cmpw cr0, tmp1, tmp3 | |
491 | #if 0 | |
492 | bdnzf+ cr0, .L_imiss_pteg_loop | |
493 | #else | |
494 | bc 0,2, .L_imiss_pteg_loop | |
495 | #endif | |
496 | beq+ cr0, .L_imiss_found_pte | |
497 | ||
498 | /* Not found in PTEG, we must scan 2nd then give up */ | |
499 | ||
500 | andi. tmp1, tmp3, MASK(PTE0_HASH_ID) | |
501 | bne- .L_imiss_do_no_hash_exception /* give up */ | |
502 | ||
503 | mfspr tmp2, hash2 | |
504 | ori tmp3, tmp3, MASK(PTE0_HASH_ID) | |
505 | b .L_imiss_find_pte_in_pteg | |
506 | ||
507 | .L_imiss_found_pte: | |
508 | ||
509 | lwz tmp1, 4(tmp2) /* get pte1_t */ | |
510 | andi. tmp3, tmp1, MASK(PTE1_WIMG_GUARD) /* Fault? */ | |
511 | bne- .L_imiss_do_prot_exception /* Guarded - illegal */ | |
512 | ||
513 | /* Ok, we've found what we need to, restore and rfi! */ | |
514 | ||
515 | mtctr tmp0 /* restore ctr */ | |
516 | mfsrr1 tmp3 | |
517 | mfspr tmp0, imiss | |
518 | mtcrf 0x80, tmp3 /* Restore CR0 */ | |
519 | mtspr rpa, tmp1 /* set the pte */ | |
520 | ori tmp1, tmp1, MASK(PTE1_REFERENCED) /* set referenced */ | |
521 | tlbli tmp0 | |
522 | sth tmp1, 6(tmp2) | |
523 | rfi | |
524 | ||
525 | .L_imiss_do_prot_exception: | |
526 | /* set up srr1 to indicate protection exception... */ | |
527 | mfsrr1 tmp3 | |
528 | andi. tmp2, tmp3, 0xffff | |
529 | addis tmp2, tmp2, MASK(SRR1_TRANS_PROT) >> 16 | |
530 | b .L_imiss_do_exception | |
531 | ||
532 | .L_imiss_do_no_hash_exception: | |
533 | /* clean up registers for protection exception... */ | |
534 | mfsrr1 tmp3 | |
535 | andi. tmp2, tmp3, 0xffff | |
536 | addis tmp2, tmp2, MASK(SRR1_TRANS_HASH) >> 16 | |
537 | ||
538 | /* And the entry into the usual instruction fault handler ... */ | |
539 | .L_imiss_do_exception: | |
540 | ||
541 | mtctr tmp0 /* Restore ctr */ | |
542 | mtsrr1 tmp2 /* Set up srr1 */ | |
543 | mfmsr tmp0 | |
544 | xoris tmp0, tmp0, MASK(MSR_TGPR)>>16 /* no TGPR */ | |
545 | mtcrf 0x80, tmp3 /* Restore CR0 */ | |
546 | mtmsr tmp0 /* reset MSR[TGPR] */ | |
547 | b .L_handler400 /* Instr Access */ | |
548 | ||
549 | /* | |
550 | * Data load translation miss | |
551 | * | |
552 | * Upon entry (done for us by the machine): | |
553 | * srr0 : addr of instruction that missed | |
554 | * srr1 : bits 0-3 = saved CR0 | |
555 | * 4 = lru way bit | |
556 | * 5 = 1 if store | |
557 | * 16-31 = saved msr | |
558 | * msr[tgpr] = 1 (so gpr0-3 become our temporary variables) | |
559 | * dmiss: ea that missed | |
560 | * dcmp : the compare value for the va that missed | |
561 | * hash1: pointer to first hash pteg | |
562 | * hash2: pointer to 2nd hash pteg | |
563 | * | |
564 | * Register usage: | |
565 | * tmp0: saved counter | |
566 | * tmp1: junk | |
567 | * tmp2: pointer to pteg | |
568 | * tmp3: current compare value | |
569 | * | |
570 | * This code is taken from the 603e User's Manual with | |
571 | * some bugfixes and minor improvements to save bytes and cycles | |
572 | * | |
573 | * NOTE: Do not touch sprg2 in here | |
574 | */ | |
1c79356b | 575 | |
de355530 | 576 | . = 0x1100 |
1c79356b | 577 | .L_handler1100: |
de355530 A |
578 | mfspr tmp2, hash1 |
579 | mfctr tmp0 /* use tmp0 to save ctr */ | |
580 | mfspr tmp3, dcmp | |
581 | ||
582 | .L_dlmiss_find_pte_in_pteg: | |
583 | li tmp1, 8 /* count */ | |
584 | subi tmp2, tmp2, 8 /* offset for lwzu */ | |
585 | mtctr tmp1 /* count... */ | |
1c79356b | 586 | |
de355530 A |
587 | .L_dlmiss_pteg_loop: |
588 | lwz tmp1, 8(tmp2) /* check pte0 for match... */ | |
589 | addi tmp2, tmp2, 8 | |
590 | cmpw cr0, tmp1, tmp3 | |
591 | #if 0 /* How to write this correctly? */ | |
592 | bdnzf+ cr0, .L_dlmiss_pteg_loop | |
593 | #else | |
594 | bc 0,2, .L_dlmiss_pteg_loop | |
595 | #endif | |
596 | beq+ cr0, .L_dmiss_found_pte | |
597 | ||
598 | /* Not found in PTEG, we must scan 2nd then give up */ | |
599 | ||
600 | andi. tmp1, tmp3, MASK(PTE0_HASH_ID) /* already at 2nd? */ | |
601 | bne- .L_dmiss_do_no_hash_exception /* give up */ | |
602 | ||
603 | mfspr tmp2, hash2 | |
604 | ori tmp3, tmp3, MASK(PTE0_HASH_ID) | |
605 | b .L_dlmiss_find_pte_in_pteg | |
606 | ||
607 | .L_dmiss_found_pte: | |
608 | ||
609 | lwz tmp1, 4(tmp2) /* get pte1_t */ | |
610 | ||
611 | /* Ok, we've found what we need to, restore and rfi! */ | |
612 | ||
613 | mtctr tmp0 /* restore ctr */ | |
614 | mfsrr1 tmp3 | |
615 | mfspr tmp0, dmiss | |
616 | mtcrf 0x80, tmp3 /* Restore CR0 */ | |
617 | mtspr rpa, tmp1 /* set the pte */ | |
618 | ori tmp1, tmp1, MASK(PTE1_REFERENCED) /* set referenced */ | |
619 | tlbld tmp0 /* load up tlb */ | |
620 | sth tmp1, 6(tmp2) /* sth is faster? */ | |
621 | rfi | |
622 | ||
623 | /* This code is shared with data store translation miss */ | |
624 | ||
625 | .L_dmiss_do_no_hash_exception: | |
626 | /* clean up registers for protection exception... */ | |
627 | mfsrr1 tmp3 | |
628 | /* prepare to set DSISR_WRITE_BIT correctly from srr1 info */ | |
629 | rlwinm tmp1, tmp3, 9, 6, 6 | |
630 | addis tmp1, tmp1, MASK(DSISR_HASH) >> 16 | |
631 | ||
632 | /* And the entry into the usual data fault handler ... */ | |
633 | ||
634 | mtctr tmp0 /* Restore ctr */ | |
635 | andi. tmp2, tmp3, 0xffff /* Clean up srr1 */ | |
636 | mtsrr1 tmp2 /* Set srr1 */ | |
637 | mtdsisr tmp1 | |
638 | mfspr tmp2, dmiss | |
639 | mtdar tmp2 | |
640 | mfmsr tmp0 | |
641 | xoris tmp0, tmp0, MASK(MSR_TGPR)>>16 /* no TGPR */ | |
642 | mtcrf 0x80, tmp3 /* Restore CR0 */ | |
643 | sync /* Needed on some */ | |
644 | mtmsr tmp0 /* reset MSR[TGPR] */ | |
645 | b .L_handler300 /* Data Access */ | |
646 | ||
647 | /* | |
648 | * Data store translation miss (similar to data load) | |
649 | * | |
650 | * Upon entry (done for us by the machine): | |
651 | * srr0 : addr of instruction that missed | |
652 | * srr1 : bits 0-3 = saved CR0 | |
653 | * 4 = lru way bit | |
654 | * 5 = 1 if store | |
655 | * 16-31 = saved msr | |
656 | * msr[tgpr] = 1 (so gpr0-3 become our temporary variables) | |
657 | * dmiss: ea that missed | |
658 | * dcmp : the compare value for the va that missed | |
659 | * hash1: pointer to first hash pteg | |
660 | * hash2: pointer to 2nd hash pteg | |
661 | * | |
662 | * Register usage: | |
663 | * tmp0: saved counter | |
664 | * tmp1: junk | |
665 | * tmp2: pointer to pteg | |
666 | * tmp3: current compare value | |
667 | * | |
668 | * This code is taken from the 603e User's Manual with | |
669 | * some bugfixes and minor improvements to save bytes and cycles | |
670 | * | |
671 | * NOTE: Do not touch sprg2 in here | |
672 | */ | |
1c79356b | 673 | |
de355530 | 674 | . = 0x1200 |
1c79356b | 675 | .L_handler1200: |
de355530 A |
676 | mfspr tmp2, hash1 |
677 | mfctr tmp0 /* use tmp0 to save ctr */ | |
678 | mfspr tmp3, dcmp | |
679 | ||
680 | .L_dsmiss_find_pte_in_pteg: | |
681 | li tmp1, 8 /* count */ | |
682 | subi tmp2, tmp2, 8 /* offset for lwzu */ | |
683 | mtctr tmp1 /* count... */ | |
684 | ||
685 | .L_dsmiss_pteg_loop: | |
686 | lwz tmp1, 8(tmp2) /* check pte0 for match... */ | |
687 | addi tmp2, tmp2, 8 | |
688 | ||
689 | cmpw cr0, tmp1, tmp3 | |
690 | #if 0 /* I don't know how to write this properly */ | |
691 | bdnzf+ cr0, .L_dsmiss_pteg_loop | |
692 | #else | |
693 | bc 0,2, .L_dsmiss_pteg_loop | |
694 | #endif | |
695 | beq+ cr0, .L_dsmiss_found_pte | |
696 | ||
697 | /* Not found in PTEG, we must scan 2nd then give up */ | |
698 | ||
699 | andi. tmp1, tmp3, MASK(PTE0_HASH_ID) /* already at 2nd? */ | |
700 | bne- .L_dmiss_do_no_hash_exception /* give up */ | |
701 | ||
702 | mfspr tmp2, hash2 | |
703 | ori tmp3, tmp3, MASK(PTE0_HASH_ID) | |
704 | b .L_dsmiss_find_pte_in_pteg | |
705 | ||
706 | .L_dsmiss_found_pte: | |
707 | ||
708 | lwz tmp1, 4(tmp2) /* get pte1_t */ | |
709 | andi. tmp3, tmp1, MASK(PTE1_CHANGED) /* unchanged, check? */ | |
710 | beq- .L_dsmiss_check_prot /* yes, check prot */ | |
711 | ||
712 | .L_dsmiss_resolved: | |
713 | /* Ok, we've found what we need to, restore and rfi! */ | |
714 | ||
715 | mtctr tmp0 /* restore ctr */ | |
716 | mfsrr1 tmp3 | |
717 | mfspr tmp0, dmiss | |
718 | mtcrf 0x80, tmp3 /* Restore CR0 */ | |
719 | mtspr rpa, tmp1 /* set the pte */ | |
720 | tlbld tmp0 /* load up tlb */ | |
721 | rfi | |
722 | ||
723 | .L_dsmiss_check_prot: | |
724 | /* PTE is unchanged, we must check that we can write */ | |
725 | rlwinm. tmp3, tmp1, 30, 0, 1 /* check PP[1] */ | |
726 | bge- .L_dsmiss_check_prot_user_kern | |
727 | andi. tmp3, tmp1, 1 /* check PP[0] */ | |
728 | beq+ .L_dsmiss_check_prot_ok | |
729 | ||
730 | .L_dmiss_do_prot_exception: | |
731 | /* clean up registers for protection exception... */ | |
732 | mfsrr1 tmp3 | |
733 | /* prepare to set DSISR_WRITE_BIT correctly from srr1 info */ | |
734 | rlwinm tmp1, tmp3, 9, 6, 6 | |
735 | addis tmp1, tmp1, MASK(DSISR_PROT) >> 16 | |
736 | ||
737 | /* And the entry into the usual data fault handler ... */ | |
738 | ||
739 | mtctr tmp0 /* Restore ctr */ | |
740 | andi. tmp2, tmp3, 0xffff /* Clean up srr1 */ | |
741 | mtsrr1 tmp2 /* Set srr1 */ | |
742 | mtdsisr tmp1 | |
743 | mfspr tmp2, dmiss | |
744 | mtdar tmp2 | |
745 | mfmsr tmp0 | |
746 | xoris tmp0, tmp0, MASK(MSR_TGPR)>>16 /* no TGPR */ | |
747 | mtcrf 0x80, tmp3 /* Restore CR0 */ | |
748 | sync /* Needed on some */ | |
749 | mtmsr tmp0 /* reset MSR[TGPR] */ | |
750 | b .L_handler300 /* Data Access */ | |
751 | ||
752 | /* NB - if we knew we were on a 603e we could test just the MSR_KEY bit */ | |
753 | .L_dsmiss_check_prot_user_kern: | |
754 | mfsrr1 tmp3 | |
755 | andi. tmp3, tmp3, MASK(MSR_PR) | |
756 | beq+ .L_dsmiss_check_prot_kern | |
757 | mfspr tmp3, dmiss /* check user privs */ | |
758 | mfsrin tmp3, tmp3 /* get excepting SR */ | |
759 | andis. tmp3, tmp3, 0x2000 /* Test SR ku bit */ | |
760 | beq+ .L_dsmiss_check_prot_ok | |
761 | b .L_dmiss_do_prot_exception | |
762 | ||
763 | .L_dsmiss_check_prot_kern: | |
764 | mfspr tmp3, dmiss /* check kern privs */ | |
765 | mfsrin tmp3, tmp3 | |
766 | andis. tmp3, tmp3, 0x4000 /* Test SR Ks bit */ | |
767 | bne- .L_dmiss_do_prot_exception | |
768 | ||
769 | .L_dsmiss_check_prot_ok: | |
770 | /* Ok, mark as referenced and changed before resolving the fault */ | |
771 | ori tmp1, tmp1, (MASK(PTE1_REFERENCED)|MASK(PTE1_CHANGED)) | |
772 | sth tmp1, 6(tmp2) | |
773 | b .L_dsmiss_resolved | |
1c79356b A |
774 | |
775 | /* | |
776 | * Instruction address breakpoint | |
777 | */ | |
778 | ||
779 | . = 0x1300 | |
780 | .L_handler1300: | |
781 | mtsprg 2,r13 /* Save R13 */ | |
782 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
783 | li r11,T_INSTRUCTION_BKPT /* Set 'rupt code */ |
784 | b .L_exception_entry /* Join common... */ | |
785 | ||
786 | /* | |
787 | * System management interrupt | |
788 | */ | |
789 | ||
790 | . = 0x1400 | |
791 | .L_handler1400: | |
792 | mtsprg 2,r13 /* Save R13 */ | |
793 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
794 | li r11,T_SYSTEM_MANAGEMENT /* Set 'rupt code */ |
795 | b .L_exception_entry /* Join common... */ | |
796 | ||
797 | ; | |
de355530 | 798 | ; Altivec Java Mode Assist interrupt |
1c79356b A |
799 | ; |
800 | ||
801 | . = 0x1600 | |
802 | .L_handler1600: | |
803 | mtsprg 2,r13 /* Save R13 */ | |
804 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
805 | li r11,T_ALTIVEC_ASSIST /* Set 'rupt code */ |
806 | b .L_exception_entry /* Join common... */ | |
807 | ||
808 | ; | |
de355530 | 809 | ; Thermal interruption |
1c79356b A |
810 | ; |
811 | ||
812 | . = 0x1700 | |
813 | .L_handler1700: | |
814 | mtsprg 2,r13 /* Save R13 */ | |
815 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
816 | li r11,T_THERMAL /* Set 'rupt code */ |
817 | b .L_exception_entry /* Join common... */ | |
818 | ||
819 | /* | |
820 | * There is now a large gap of reserved traps | |
821 | */ | |
822 | ||
823 | /* | |
de355530 | 824 | * Run mode/ trace exception - single stepping on 601 processors |
1c79356b A |
825 | */ |
826 | ||
827 | . = 0x2000 | |
828 | .L_handler2000: | |
829 | mtsprg 2,r13 /* Save R13 */ | |
830 | mtsprg 3,r11 /* Save R11 */ | |
de355530 | 831 | li r11,T_RUNMODE_TRACE /* Set 'rupt code */ |
1c79356b A |
832 | b .L_exception_entry /* Join common... */ |
833 | ||
d7e50217 A |
834 | |
835 | /* | |
836 | * Filter Ultra Fast Path syscalls for VMM | |
837 | */ | |
838 | ufpVM: | |
de355530 A |
839 | cmpwi cr6,r0,0x6004 ; Is it vmm_dispatch |
840 | bne cr6,notufp ; Exit If not | |
d7e50217 | 841 | cmpwi cr5,r3,kvmmResumeGuest ; Compare r3 with kvmmResumeGuest |
de355530 A |
842 | cmpwi cr6,r3,kvmmSetGuestRegister ; Compare r3 with kvmmSetGuestRegister |
843 | cror cr1_eq,cr5_lt,cr6_gt ; Set true if out of VMM Fast syscall range | |
d7e50217 | 844 | bt- cr1_eq,notufp ; Exit if out of range |
de355530 A |
845 | rlwinm r13,r13,1+FamVMmodebit,30,31 ; Extract FamVMenabit and FamVMmodebit |
846 | cmpwi cr0,r13,3 ; Are FamVMena and FamVMmode set | |
847 | bne+ notufp ; Exit if not in FAM | |
d7e50217 A |
848 | b EXT(vmm_ufp) ; Ultra Fast Path syscall |
849 | ||
1c79356b A |
850 | /* |
851 | * .L_exception_entry(type) | |
852 | * | |
853 | * This is the common exception handling routine called by any | |
854 | * type of system exception. | |
855 | * | |
856 | * ENTRY: via a system exception handler, thus interrupts off, VM off. | |
857 | * r3 has been saved in sprg3 and now contains a number | |
858 | * representing the exception's origins | |
859 | * | |
860 | */ | |
861 | ||
862 | .data | |
863 | .align ALIGN | |
864 | .globl EXT(exception_entry) | |
865 | EXT(exception_entry): | |
866 | .long .L_exception_entry-EXT(ExceptionVectorsStart) /* phys addr of fn */ | |
867 | ||
868 | VECTOR_SEGMENT | |
869 | .align 5 | |
870 | ||
871 | .L_exception_entry: | |
872 | ||
873 | /* | |
874 | * | |
875 | * Here we will save off a mess of registers, the special ones and R0-R12. We use the DCBZ | |
876 | * instruction to clear and allcoate a line in the cache. This way we won't take any cache | |
877 | * misses, so these stores won't take all that long. Except the first line that is because | |
878 | * we can't do a DCBZ if the L1 D-cache is off. The rest we will skip if they are | |
879 | * off also. | |
880 | * | |
881 | * Note that if we are attempting to sleep (as opposed to nap or doze) all interruptions | |
882 | * are ignored. | |
883 | */ | |
de355530 A |
884 | mfsprg r13,0 /* Load per_proc */ |
885 | lwz r13,next_savearea(r13) /* Get the exception save area */ | |
1c79356b | 886 | |
de355530 A |
887 | stw r1,saver1(r13) ; Save register 1 |
888 | stw r0,saver0(r13) ; Save register 0 | |
889 | dcbtst 0,r13 ; We will need this in a bit | |
1c79356b | 890 | mfspr r1,hid0 ; Get HID0 |
de355530 A |
891 | mfcr r0 ; Save the CR |
892 | mtcrf 255,r1 ; Get set to test for cache and sleep | |
1c79356b A |
893 | bf sleep,notsleep ; Skip if we are not trying to sleep |
894 | ||
de355530 A |
895 | mtcrf 255,r0 ; Restore the CR |
896 | lwz r0,saver0(r13) ; Restore R0 | |
897 | lwz r1,saver1(r13) ; Restore R1 | |
1c79356b A |
898 | mfsprg r13,0 ; Get the per_proc |
899 | lwz r11,pfAvailable(r13) ; Get back the feature flags | |
900 | mfsprg r13,2 ; Restore R13 | |
901 | mtsprg 2,r11 ; Set sprg2 to the features | |
902 | mfsprg r11,3 ; Restore R11 | |
903 | rfi ; Jump back into sleep code... | |
904 | .long 0 ; Leave these here please... | |
905 | .long 0 | |
906 | .long 0 | |
907 | .long 0 | |
908 | .long 0 | |
909 | .long 0 | |
910 | .long 0 | |
911 | .long 0 | |
912 | ||
913 | .align 5 | |
914 | ||
de355530 A |
915 | notsleep: stw r2,saver2(r13) ; Save this one |
916 | crmove featL1ena,dce ; Copy the cache enable bit | |
1c79356b A |
917 | rlwinm r2,r1,0,nap+1,doze-1 ; Clear any possible nap and doze bits |
918 | mtspr hid0,r2 ; Clear the nap/doze bits | |
de355530 A |
919 | cmplw r2,r1 ; See if we were napping |
920 | la r1,saver8(r13) ; Point to the next line in case we need it | |
921 | crnot wasNapping,cr0_eq ; Remember if we were napping | |
1c79356b | 922 | mfsprg r2,0 ; Get the per_proc area |
de355530 A |
923 | bf- featL1ena,skipz1 ; L1 cache is disabled... |
924 | dcbz 0,r1 ; Reserve our line in cache | |
1c79356b A |
925 | |
926 | ; | |
927 | ; Remember, we are setting up CR6 with feature flags | |
928 | ; | |
de355530 A |
929 | skipz1: |
930 | andi. r1,r11,T_FAM ; Check FAM bit | |
931 | stw r3,saver3(r13) ; Save this one | |
932 | stw r4,saver4(r13) ; Save this one | |
d7e50217 A |
933 | andc r11,r11,r1 ; Clear FAM bit |
934 | beq+ noFAM ; Is it FAM intercept | |
935 | mfsrr1 r3 ; Load srr1 | |
936 | rlwinm. r3,r3,0,MSR_PR_BIT,MSR_PR_BIT ; Are we trapping from supervisor state? | |
937 | beq+ noFAM ; From supervisor state | |
938 | lwz r1,spcFlags(r2) ; Load spcFlags | |
939 | rlwinm r1,r1,1+FamVMmodebit,30,31 ; Extract FamVMenabit and FamVMmodebit | |
940 | cmpwi cr0,r1,2 ; Check FamVMena set without FamVMmode | |
941 | bne+ noFAM ; Can this context be FAM intercept | |
942 | lwz r4,FAMintercept(r2) ; Load exceptions mask to intercept | |
943 | srwi r1,r11,2 ; divide r11 by 4 | |
944 | lis r3,0x8000 ; Set r3 to 0x80000000 | |
945 | srw r1,r3,r1 ; Set bit for current exception | |
946 | and. r1,r1,r4 ; And current exception with the intercept mask | |
947 | beq+ noFAM ; Is it FAM intercept | |
de355530 | 948 | b EXT(vmm_fam_handler) |
d7e50217 A |
949 | noFAM: |
950 | lwz r1,pfAvailable(r2) ; Get the CPU features flags | |
de355530 A |
951 | la r3,savesrr0(r13) ; Point to the last line |
952 | mtcrf 0xE0,r1 ; Put the features flags (that we care about) in the CR | |
953 | stw r6,saver6(r13) ; Save this one | |
954 | crmove featSMP,pfSMPcapb ; See if we have a PIR | |
955 | stw r8,saver8(r13) ; Save this one | |
1c79356b | 956 | crmove featAltivec,pfAltivecb ; Set the Altivec flag |
9bccf70c | 957 | mfsrr0 r6 ; Get the interruption SRR0 |
de355530 A |
958 | stw r8,saver8(r13) ; Save this one |
959 | bf- featL1ena,skipz1a ; L1 cache is disabled... | |
960 | dcbz 0,r3 ; Reserve our line in cache | |
961 | skipz1a: crmove featFP,pfFloatb ; Remember that we have floating point | |
962 | stw r7,saver7(r13) ; Save this one | |
1c79356b | 963 | lhz r8,PP_CPU_FLAGS(r2) ; Get the flags |
9bccf70c | 964 | mfsrr1 r7 ; Get the interrupt SRR1 |
1c79356b | 965 | rlwinm r8,r8,(((31-MSR_BE_BIT)+(traceBEb+16+1))&31),MSR_BE_BIT,MSR_BE_BIT ; Set BE bit if special trace is on |
de355530 | 966 | stw r6,savesrr0(r13) ; Save the SRR0 |
1c79356b | 967 | rlwinm r6,r7,(((31-MSR_BE_BIT)+(MSR_PR_BIT+1))&31),MSR_BE_BIT,MSR_BE_BIT ; Move PR bit to BE bit |
de355530 | 968 | stw r5,saver5(r13) ; Save this one |
1c79356b A |
969 | and r8,r6,r8 ; Remove BE bit only if problem state and special tracing on |
970 | mfsprg r6,2 ; Get interrupt time R13 | |
971 | mtsprg 2,r1 ; Set the feature flags | |
972 | andc r7,r7,r8 ; Clear BE bit if special trace is on and PR is set | |
9bccf70c | 973 | mfsprg r8,3 ; Get rupt time R11 |
de355530 A |
974 | stw r7,savesrr1(r13) ; Save SRR1 |
975 | rlwinm. r7,r7,MSR_RI_BIT,MSR_RI_BIT ; Is this a special case access fault? | |
976 | stw r6,saver13(r13) ; Save rupt R1 | |
977 | crnot specAccess,cr0_eq ; Set that we are doing a special access if RI is set | |
978 | stw r8,saver11(r13) ; Save rupt time R11 | |
1c79356b A |
979 | |
980 | getTB: mftbu r6 ; Get the upper timebase | |
981 | mftb r7 ; Get the lower timebase | |
982 | mftbu r8 ; Get the upper one again | |
983 | cmplw r6,r8 ; Did the top tick? | |
984 | bne- getTB ; Yeah, need to get it again... | |
985 | ||
986 | stw r8,ruptStamp(r2) ; Save the top of time stamp | |
9bccf70c | 987 | stw r8,SAVtime(r13) ; Save the top of time stamp |
de355530 | 988 | la r6,saver16(r13) ; Point to the next cache line |
1c79356b | 989 | stw r7,ruptStamp+4(r2) ; Save the bottom of time stamp |
9bccf70c A |
990 | stw r7,SAVtime+4(r13) ; Save the bottom of time stamp |
991 | ||
de355530 A |
992 | bf- featL1ena,skipz2 ; L1 cache is disabled... |
993 | dcbz 0,r6 ; Allocate in cache | |
994 | skipz2: | |
995 | stw r9,saver9(r13) ; Save this one | |
1c79356b | 996 | |
de355530 | 997 | stw r10,saver10(r13) ; Save this one |
9bccf70c | 998 | mflr r4 ; Get the LR |
1c79356b A |
999 | mfxer r10 ; Get the XER |
1000 | ||
1001 | bf+ wasNapping,notNapping ; Skip if not waking up from nap... | |
1002 | ||
1003 | lwz r6,napStamp+4(r2) ; Pick up low order nap stamp | |
1004 | lis r3,hi16(EXT(machine_idle_ret)) ; Get high part of nap/doze return | |
1005 | lwz r5,napStamp(r2) ; and high order | |
1006 | subfc r7,r6,r7 ; Subtract low stamp from now | |
1007 | lwz r6,napTotal+4(r2) ; Pick up low total | |
1008 | subfe r5,r5,r8 ; Subtract high stamp and borrow from now | |
1009 | lwz r8,napTotal(r2) ; Pick up the high total | |
1010 | addc r6,r6,r7 ; Add low to total | |
1011 | ori r3,r3,lo16(EXT(machine_idle_ret)) ; Get low part of nap/doze return | |
1012 | adde r8,r8,r5 ; Add high and carry to total | |
1013 | stw r6,napTotal+4(r2) ; Save the low total | |
1014 | stw r8,napTotal(r2) ; Save the high total | |
de355530 | 1015 | stw r3,savesrr0(r13) ; Modify to return to nap/doze exit |
9bccf70c | 1016 | |
de355530 | 1017 | rlwinm. r3,r1,0,pfSlowNapb,pfSlowNapb ; Should HID1 be restored? |
9bccf70c A |
1018 | beq notInSlowNap |
1019 | ||
1020 | lwz r3,pfHID1(r2) ; Get saved HID1 value | |
de355530 | 1021 | mtspr hid1, r3 ; Restore HID1 |
d52fe63f | 1022 | |
9bccf70c | 1023 | notInSlowNap: |
de355530 | 1024 | rlwinm. r3,r1,0,pfNoL2PFNapb,pfNoL2PFNapb ; Should MSSCR0 be restored? |
9bccf70c | 1025 | beq notNapping |
d52fe63f | 1026 | |
9bccf70c | 1027 | lwz r3,pfMSSCR0(r2) ; Get saved MSSCR0 value |
de355530 | 1028 | mtspr msscr0, r3 ; Restore MSSCR0 |
9bccf70c A |
1029 | sync |
1030 | isync | |
1031 | ||
de355530 | 1032 | notNapping: stw r12,saver12(r13) ; Save this one |
1c79356b | 1033 | |
de355530 A |
1034 | stw r14,saver14(r13) ; Save this one |
1035 | stw r15,saver15(r13) ; Save this one | |
9bccf70c | 1036 | la r14,saver24(r13) ; Point to the next block to save into |
de355530 | 1037 | stw r0,savecr(r13) ; Save rupt CR |
9bccf70c | 1038 | mfctr r6 ; Get the CTR |
de355530 A |
1039 | stw r16,saver16(r13) ; Save this one |
1040 | stw r4,savelr(r13) ; Save rupt LR | |
1c79356b | 1041 | |
de355530 A |
1042 | bf- featL1ena,skipz4 ; L1 cache is disabled... |
1043 | dcbz 0,r14 ; Allocate next save area line | |
1044 | skipz4: | |
1045 | stw r17,saver17(r13) ; Save this one | |
1046 | stw r18,saver18(r13) ; Save this one | |
1047 | stw r6,savectr(r13) ; Save rupt CTR | |
1048 | stw r19,saver19(r13) ; Save this one | |
1049 | lis r12,hi16(KERNEL_SEG_REG0_VALUE) ; Get the high half of the kernel SR0 value | |
9bccf70c | 1050 | mfdar r6 ; Get the rupt DAR |
de355530 A |
1051 | stw r20,saver20(r13) ; Save this one |
1052 | ||
1053 | bf+ specAccess,noSRsave ; Do not save SRs if this is not a special access... | |
1054 | mfsr r14,sr0 ; Get SR0 | |
1055 | stw r14,savesr0(r13) ; and save | |
1056 | mfsr r14,sr1 ; Get SR1 | |
1057 | stw r14,savesr1(r13) ; and save | |
1058 | mfsr r14,sr2 ; get SR2 | |
1059 | stw r14,savesr2(r13) ; and save | |
1060 | mfsr r14,sr3 ; get SR3 | |
1061 | stw r14,savesr3(r13) ; and save | |
1062 | ||
1063 | noSRsave: mtsr sr0,r12 ; Set the kernel SR0 | |
1064 | stw r21,saver21(r13) ; Save this one | |
1065 | addis r12,r12,0x0010 ; Point to the second segment of kernel | |
1066 | stw r10,savexer(r13) ; Save the rupt XER | |
1067 | mtsr sr1,r12 ; Set the kernel SR1 | |
1068 | stw r30,saver30(r13) ; Save this one | |
1069 | addis r12,r12,0x0010 ; Point to the third segment of kernel | |
1070 | stw r31,saver31(r13) ; Save this one | |
1071 | mtsr sr2,r12 ; Set the kernel SR2 | |
1072 | stw r22,saver22(r13) ; Save this one | |
1073 | addis r12,r12,0x0010 ; Point to the third segment of kernel | |
1074 | stw r23,saver23(r13) ; Save this one | |
1075 | mtsr sr3,r12 ; Set the kernel SR3 | |
1076 | stw r24,saver24(r13) ; Save this one | |
1077 | stw r25,saver25(r13) ; Save this one | |
9bccf70c | 1078 | mfdsisr r7 ; Get the rupt DSISR |
de355530 A |
1079 | stw r26,saver26(r13) ; Save this one |
1080 | stw r27,saver27(r13) ; Save this one | |
1081 | li r10,emfp0 ; Point to floating point save | |
1082 | stw r28,saver28(r13) ; Save this one | |
1083 | stw r29,saver29(r13) ; Save this one | |
1084 | mfsr r14,sr14 ; Get the copyin/out segment register | |
1085 | stw r6,savedar(r13) ; Save the rupt DAR | |
1086 | bf- featL1ena,skipz5a ; Do not do this if no L1... | |
1087 | dcbz r10,r2 ; Clear and allocate an L1 slot | |
1088 | ||
1089 | skipz5a: stw r7,savedsisr(r13) ; Save the rupt code DSISR | |
9bccf70c | 1090 | stw r11,saveexception(r13) ; Save the exception code |
de355530 | 1091 | stw r14,savesr14(r13) ; Save copyin/copyout |
1c79356b | 1092 | |
9bccf70c A |
1093 | |
1094 | ; | |
de355530 A |
1095 | ; Here we will save some floating point and vector status |
1096 | ; and we also set a clean default status for a new interrupt level. | |
1097 | ; Note that we assume that emfp0 is on an altivec boundary | |
1098 | ; and that R10 points to it (as a displacemnt from R2). | |
9bccf70c A |
1099 | ; |
1100 | ||
de355530 A |
1101 | lis r8,hi16(MASK(MSR_VEC)) ; Get the vector enable bit |
1102 | mfmsr r6 ; Get the current MSR value | |
1103 | ori r8,r8,lo16(MASK(MSR_FP)) ; Add in the float enable | |
1104 | li r19,0 ; Assume no Altivec | |
1105 | or r7,r6,r8 ; Enable floating point | |
1106 | li r9,0 ; Get set to clear VRSAVE | |
1107 | mtmsr r7 ; Do it | |
1108 | isync | |
d7e50217 | 1109 | |
de355530 A |
1110 | bf featAltivec,noavec ; No Altivec on this CPU... |
1111 | addi r14,r10,16 ; Displacement to second vector register | |
1112 | stvxl v0,r10,r2 ; Save a register | |
1113 | stvxl v1,r14,r2 ; Save a second register | |
1114 | mfvscr v0 ; Get the vector status register | |
1115 | la r28,savevscr(r13) ; Point to the status area | |
1116 | vspltish v1,1 ; Turn on the non-Java bit and saturate | |
1117 | stvxl v0,0,r28 ; Save the vector status | |
1118 | vspltisw v0,1 ; Turn on the saturate bit | |
1119 | mfspr r19,vrsave ; Get the VRSAVE register | |
1120 | vxor v1,v1,v0 ; Turn off saturate | |
1121 | mtspr vrsave,r9 ; Clear VRSAVE for each interrupt level | |
1122 | mtvscr v1 ; Set the non-java, no saturate status for new level | |
d7e50217 | 1123 | |
de355530 A |
1124 | lvxl v0,r10,r2 ; Restore first work register |
1125 | lvxl v1,r14,r2 ; Restore second work register | |
d7e50217 | 1126 | |
de355530 | 1127 | noavec: stw r19,savevrsave(r13) ; Save the vector register usage flags |
d7e50217 A |
1128 | |
1129 | ; | |
de355530 A |
1130 | ; We need to save the FPSCR as if it is normal context. |
1131 | ; This is because pending exceptions will cause an exception even if | |
1132 | ; FP is disabled. We need to clear the FPSCR when we first start running in the | |
1133 | ; kernel. | |
d7e50217 A |
1134 | ; |
1135 | ||
de355530 | 1136 | bf- featFP,nofpexe ; No possible floating point exceptions... |
9bccf70c | 1137 | |
de355530 A |
1138 | stfd f0,emfp0(r2) ; Save FPR0 |
1139 | stfd f1,emfp1(r2) ; Save FPR1 | |
1140 | mffs f0 ; Get the FPSCR | |
1141 | fsub f1,f1,f1 ; Make a 0 | |
1142 | stfd f0,savefpscrpad(r13) ; Save the FPSCR | |
1143 | mtfsf 0xFF,f1 ; Clear it | |
1144 | lfd f0,emfp0(r2) ; Restore FPR0 | |
1145 | lfd f1,emfp1(r2) ; Restore FPR1 | |
d7e50217 | 1146 | |
de355530 A |
1147 | nofpexe: mtmsr r6 ; Turn off FP and vector |
1148 | isync | |
d7e50217 | 1149 | |
1c79356b | 1150 | |
9bccf70c A |
1151 | ; |
1152 | ; Everything is saved at this point, except for FPRs, and VMX registers. | |
1153 | ; Time for us to get a new savearea and then trace interrupt if it is enabled. | |
1154 | ; | |
1155 | ||
1156 | li r0,SAVgeneral ; Get the savearea type value | |
de355530 A |
1157 | lis r23,hi16(EXT(trcWork)) ; Get the trace work area address |
1158 | mr r14,r11 ; Save the interrupt code across the call | |
9bccf70c A |
1159 | stb r0,SAVflags+2(r13) ; Mark valid context |
1160 | ori r23,r23,lo16(EXT(trcWork)) ; Get the rest | |
1161 | rlwinm r22,r11,30,0,31 ; Divide interrupt code by 2 | |
de355530 | 1162 | lwz r25,traceMask(r23) ; Get the trace mask |
9bccf70c | 1163 | addi r22,r22,10 ; Adjust code so we shift into CR5 |
de355530 A |
1164 | |
1165 | bl EXT(save_get_phys) ; Grab a savearea | |
1166 | ||
1167 | mfsprg r2,0 ; Get back the per_proc block | |
d7e50217 | 1168 | rlwnm r7,r25,r22,22,22 ; Set CR5_EQ bit position to 0 if tracing allowed |
de355530 A |
1169 | lhz r19,PP_CPU_NUMBER(r2) ; Get the logical processor number |
1170 | li r26,0x8 ; Get start of cpu mask | |
1171 | mr r11,r14 ; Get the exception code back | |
9bccf70c A |
1172 | srw r26,r26,r19 ; Get bit position of cpu number |
1173 | mtcrf 0x04,r7 ; Set CR5 to show trace or not | |
1174 | and. r26,r26,r25 ; See if we trace this cpu | |
de355530 | 1175 | stw r3,next_savearea(r2) ; Remember the savearea we just got for the next rupt |
9bccf70c | 1176 | crandc cr5_eq,cr5_eq,cr0_eq ; Turn off tracing if cpu is disabled |
de355530 A |
1177 | ; |
1178 | ; At this point, we can take another exception and lose nothing. | |
1179 | ; | |
1180 | ||
1181 | lwz r0,saver0(r13) ; Get back interrupt time R0 (we need this whether we trace or not) | |
9bccf70c | 1182 | |
de355530 | 1183 | bne+ cr5,skipTrace ; Skip all of this if no tracing here... |
1c79356b | 1184 | |
9bccf70c A |
1185 | ; |
1186 | ; We select a trace entry using a compare and swap on the next entry field. | |
1187 | ; Since we do not lock the actual trace buffer, there is a potential that | |
1188 | ; another processor could wrap an trash our entry. Who cares? | |
1189 | ; | |
1c79356b | 1190 | |
de355530 A |
1191 | lwz r25,traceStart(r23) ; Get the start of trace table |
1192 | lwz r26,traceEnd(r23) ; Get end of trace table | |
1193 | ||
1194 | trcsel: lwarx r20,0,r23 ; Get and reserve the next slot to allocate | |
1c79356b | 1195 | |
9bccf70c A |
1196 | addi r22,r20,LTR_size ; Point to the next trace entry |
1197 | cmplw r22,r26 ; Do we need to wrap the trace table? | |
de355530 | 1198 | bne+ gotTrcEnt ; No wrap, we got us a trace entry... |
1c79356b | 1199 | |
9bccf70c A |
1200 | mr r22,r25 ; Wrap back to start |
1201 | ||
de355530 A |
1202 | gotTrcEnt: stwcx. r22,0,r23 ; Try to update the current pointer |
1203 | bne- trcsel ; Collision, try again... | |
1c79356b A |
1204 | |
1205 | #if ESPDEBUG | |
9bccf70c A |
1206 | dcbf 0,r23 ; Force to memory |
1207 | sync | |
1c79356b | 1208 | #endif |
de355530 A |
1209 | |
1210 | bf- featL1ena,skipz6 ; L1 cache is disabled... | |
1211 | dcbz 0,r20 ; Clear and allocate first trace line | |
1212 | skipz6: | |
1c79356b | 1213 | |
9bccf70c A |
1214 | ; |
1215 | ; Let us cut that trace entry now. | |
1216 | ; | |
1c79356b | 1217 | |
1c79356b | 1218 | |
de355530 A |
1219 | li r14,32 ; Offset to second line |
1220 | ||
1221 | lwz r16,ruptStamp(r2) ; Get top of time base | |
1222 | lwz r17,ruptStamp+4(r2) ; Get the bottom of time stamp | |
1223 | ||
1224 | bf- featL1ena,skipz7 ; L1 cache is disabled... | |
1225 | dcbz r14,r20 ; Zap the second half | |
1226 | ||
1227 | skipz7: stw r16,LTR_timeHi(r20) ; Set the upper part of TB | |
1228 | lwz r1,saver1(r13) ; Get back interrupt time R1 | |
1229 | stw r17,LTR_timeLo(r20) ; Set the lower part of TB | |
1230 | lwz r18,saver2(r13) ; Get back interrupt time R2 | |
1231 | stw r0,LTR_r0(r20) ; Save off register 0 | |
1232 | lwz r3,saver3(r13) ; Restore this one | |
9bccf70c | 1233 | sth r19,LTR_cpu(r20) ; Stash the cpu number |
de355530 A |
1234 | stw r1,LTR_r1(r20) ; Save off register 1 |
1235 | lwz r4,saver4(r13) ; Restore this one | |
1236 | stw r18,LTR_r2(r20) ; Save off register 2 | |
1237 | lwz r5,saver5(r13) ; Restore this one | |
1238 | stw r3,LTR_r3(r20) ; Save off register 3 | |
9bccf70c | 1239 | lwz r16,savecr(r13) ; Get the CR value |
de355530 | 1240 | stw r4,LTR_r4(r20) ; Save off register 4 |
9bccf70c | 1241 | mfsrr0 r17 ; Get SRR0 back, it is still good |
de355530 | 1242 | stw r5,LTR_r5(r20) ; Save off register 5 |
9bccf70c A |
1243 | mfsrr1 r18 ; SRR1 is still good in here |
1244 | stw r16,LTR_cr(r20) ; Save the CR | |
de355530 A |
1245 | stw r17,LTR_srr0(r20) ; Save the SSR0 |
1246 | stw r18,LTR_srr1(r20) ; Save the SRR1 | |
9bccf70c | 1247 | mfdar r17 ; Get this back |
de355530 A |
1248 | lwz r16,savelr(r13) ; Get the LR |
1249 | stw r17,LTR_dar(r20) ; Save the DAR | |
9bccf70c | 1250 | mfctr r17 ; Get the CTR (still good in register) |
de355530 A |
1251 | stw r16,LTR_lr(r20) ; Save the LR |
1252 | #if 0 | |
1253 | lwz r17,emfp1(r2) ; (TEST/DEBUG) | |
1254 | #endif | |
1255 | stw r17,LTR_ctr(r20) ; Save off the CTR | |
1256 | stw r13,LTR_save(r20) ; Save the savearea | |
9bccf70c | 1257 | sth r11,LTR_excpt(r20) ; Save the exception type |
1c79356b | 1258 | #if ESPDEBUG |
de355530 A |
1259 | addi r17,r20,32 ; (TEST/DEBUG) |
1260 | dcbst br0,r20 ; (TEST/DEBUG) | |
1261 | dcbst br0,r17 ; (TEST/DEBUG) | |
1262 | sync ; (TEST/DEBUG) | |
1c79356b | 1263 | #endif |
d7e50217 | 1264 | |
d7e50217 | 1265 | ; |
de355530 A |
1266 | ; We are done with the trace, except for maybe modifying the exception |
1267 | ; code later on. So, that means that we need to save R20 and CR5. | |
1268 | ; | |
1269 | ; So, finish setting up the kernel registers now. | |
d7e50217 A |
1270 | ; |
1271 | ||
de355530 A |
1272 | skipTrace: lhz r21,PP_CPU_NUMBER(r2) ; Get the logical processor number |
1273 | lis r12,hi16(EXT(hw_counts)) ; Get the high part of the interrupt counters | |
1274 | lwz r7,savesrr1(r13) ; Get the entering MSR | |
1275 | ori r12,r12,lo16(EXT(hw_counts)) ; Get the low part of the interrupt counters | |
1276 | rlwinm r21,r21,8,20,23 ; Get index to processor counts | |
9bccf70c | 1277 | mtcrf 0x80,r0 ; Set our CR0 to the high nybble of possible syscall code |
d7e50217 | 1278 | rlwinm r6,r0,1,0,31 ; Move sign bit to the end |
de355530 A |
1279 | cmplwi cr1,r11,T_SYSTEM_CALL ; Did we get a system call? |
1280 | add r12,r12,r21 ; Point to the processor count area | |
1281 | crandc cr0_lt,cr0_lt,cr0_gt ; See if we have R0 equal to 0b10xx...x | |
1282 | lwzx r22,r12,r11 ; Get the old value | |
1283 | cmplwi cr3,r11,T_IN_VAIN ; Was this all in vain? All for nothing? | |
1284 | addi r22,r22,1 ; Count this one | |
9bccf70c | 1285 | cmplwi cr2,r6,1 ; See if original R0 had the CutTrace request code in it |
de355530 | 1286 | stwx r22,r12,r11 ; Store it back |
1c79356b | 1287 | |
de355530 A |
1288 | beq- cr3,EatRupt ; Interrupt was all for nothing... |
1289 | cmplwi cr3,r11,T_MACHINE_CHECK ; Did we get a machine check? | |
1290 | bne+ cr1,noCutT ; Not a system call... | |
1291 | bnl+ cr0,noCutT ; R0 not 0b10xxx...x, can not be any kind of magical system call... | |
1c79356b | 1292 | rlwinm. r7,r7,0,MSR_PR_BIT,MSR_PR_BIT ; Did we come from user state? |
de355530 A |
1293 | lis r1,hi16(EXT(dgWork)) ; Get the diagnostics flags |
1294 | beq+ FCisok ; From supervisor state... | |
1c79356b | 1295 | |
de355530 A |
1296 | ori r1,r1,lo16(EXT(dgWork)) ; Again |
1297 | lwz r1,dgFlags(r1) ; Get the flags | |
1c79356b | 1298 | rlwinm. r1,r1,0,enaUsrFCallb,enaUsrFCallb ; Are they valid? |
de355530 | 1299 | beq- noCutT ; No... |
1c79356b | 1300 | |
de355530 | 1301 | FCisok: beq- cr2,isCutTrace ; This is a CutTrace system call... |
1c79356b | 1302 | |
9bccf70c A |
1303 | ; |
1304 | ; Here is where we call the firmware. If it returns T_IN_VAIN, that means | |
1305 | ; that it has handled the interruption. Remember: thou shalt not trash R13 | |
de355530 | 1306 | ; or R20 while you are away. Anything else is ok. |
9bccf70c A |
1307 | ; |
1308 | ||
de355530 A |
1309 | lwz r3,saver3(r13) ; Restore the first parameter |
1310 | bl EXT(FirmwareCall) ; Go handle the firmware call.... | |
1c79356b | 1311 | |
d7e50217 | 1312 | cmplwi r3,T_IN_VAIN ; Was it handled? |
de355530 | 1313 | mfsprg r2,0 ; Restore the per_proc |
d7e50217 A |
1314 | beq+ EatRupt ; Interrupt was handled... |
1315 | mr r11,r3 ; Put the rupt code into the right register | |
de355530 | 1316 | b filter ; Go to the normal system call handler... |
d7e50217 | 1317 | |
de355530 A |
1318 | .align 5 |
1319 | ||
1320 | isCutTrace: | |
1321 | li r7,-32768 ; Get a 0x8000 for the exception code | |
1322 | bne- cr5,EatRupt ; Tracing is disabled... | |
1323 | sth r7,LTR_excpt(r20) ; Modify the exception type to a CutTrace | |
1324 | b EatRupt ; Time to go home... | |
1325 | ||
1326 | ; We are here because we did not have a CutTrace system call | |
1327 | ||
1328 | .align 5 | |
1329 | ||
1330 | noCutT: beq- cr3,MachineCheck ; Whoa... Machine check... | |
1c79356b | 1331 | |
9bccf70c | 1332 | ; |
de355530 A |
1333 | ; The following interrupts are the only ones that can be redriven |
1334 | ; by the higher level code or emulation routines. | |
9bccf70c | 1335 | ; |
1c79356b | 1336 | |
de355530 A |
1337 | Redrive: cmplwi cr0,r11,T_IN_VAIN ; Did the signal handler eat the signal? |
1338 | mfsprg r2,0 ; Get the per_proc block | |
1339 | beq+ cr0,EatRupt ; Bail now if we ate the rupt... | |
1c79356b | 1340 | |
9bccf70c | 1341 | |
de355530 A |
1342 | ; |
1343 | ; Here ss where we check for the other fast-path exceptions: translation exceptions, | |
1344 | ; emulated instructions, etc. | |
1345 | ; | |
9bccf70c | 1346 | |
de355530 A |
1347 | filter: cmplwi cr3,r11,T_ALTIVEC_ASSIST ; Check for an Altivec denorm assist |
1348 | cmplwi cr4,r11,T_ALIGNMENT ; See if we got an alignment exception | |
1349 | cmplwi cr1,r11,T_PROGRAM ; See if we got a program exception | |
1350 | cmplwi cr2,r11,T_INSTRUCTION_ACCESS ; Check on an ISI | |
1351 | bne+ cr3,noAltivecAssist ; It is not an assist... | |
1352 | b EXT(AltivecAssist) ; It is an assist... | |
1353 | ||
1354 | .align 5 | |
1c79356b | 1355 | |
de355530 A |
1356 | noAltivecAssist: |
1357 | bne+ cr4,noAlignAssist ; No alignment here... | |
1358 | b EXT(AlignAssist) ; Go try to emulate... | |
1c79356b | 1359 | |
de355530 | 1360 | .align 5 |
9bccf70c | 1361 | |
de355530 A |
1362 | noAlignAssist: |
1363 | bne+ cr1,noEmulate ; No emulation here... | |
1364 | b EXT(Emulate) ; Go try to emulate... | |
1c79356b | 1365 | |
de355530 | 1366 | .align 5 |
1c79356b | 1367 | |
de355530 A |
1368 | noEmulate: cmplwi cr3,r11,T_CSWITCH ; Are we context switching |
1369 | cmplwi r11,T_DATA_ACCESS ; Check on a DSI | |
1370 | beq- cr2,DSIorISI ; It is a PTE fault... | |
1371 | beq- cr3,conswtch ; It is a context switch... | |
1372 | bne+ PassUp ; It is not a PTE fault... | |
1373 | ||
1374 | ; | |
1375 | ; This call will either handle the fault, in which case it will not | |
1376 | ; return, or return to pass the fault up the line. | |
1377 | ; | |
1378 | ||
1379 | DSIorISI: mr r3,r11 ; Move the rupt code | |
d7e50217 | 1380 | |
de355530 A |
1381 | bl EXT(handlePF) ; See if we can handle this fault |
1382 | ||
1383 | lwz r0,savesrr1(r13) ; Get the MSR in use at exception time | |
1384 | mfsprg r2,0 ; Get back per_proc | |
1385 | cmplwi cr1,r3,T_IN_VAIN ; Was it handled? | |
d7e50217 | 1386 | rlwinm. r4,r0,0,MSR_PR_BIT,MSR_PR_BIT ; Are we trapping from supervisor state? |
de355530 A |
1387 | mr r11,r3 ; Put interrupt code back into the right register |
1388 | beq+ cr1,EatRupt ; Yeah, just blast back to the user... | |
1389 | beq- NoFamPf | |
d7e50217 A |
1390 | lwz r1,spcFlags(r2) ; Load spcFlags |
1391 | rlwinm r1,r1,1+FamVMmodebit,30,31 ; Extract FamVMenabit and FamVMmodebit | |
1392 | cmpi cr0,r1,2 ; Check FamVMena set without FamVMmode | |
de355530 | 1393 | bne- cr0,NoFamPf |
d7e50217 | 1394 | lwz r6,FAMintercept(r2) ; Load exceptions mask to intercept |
d7e50217 | 1395 | srwi r1,r11,2 ; divide r11 by 4 |
de355530 | 1396 | lis r5,0x8000 ; Set r5 to 0x80000000 |
d7e50217 A |
1397 | srw r1,r5,r1 ; Set bit for current exception |
1398 | and. r1,r1,r6 ; And current exception with the intercept mask | |
de355530 A |
1399 | beq+ NoFamPf ; Is it FAM intercept |
1400 | bl EXT(vmm_fam_pf_handler) | |
d7e50217 | 1401 | b EatRupt |
de355530 A |
1402 | NoFamPf: |
1403 | andi. r4,r0,lo16(MASK(MSR_RI)) ; See if the recover bit is on | |
1404 | beq+ PassUp ; Not on, normal case... | |
9bccf70c A |
1405 | ; |
1406 | ; Here is where we handle the "recovery mode" stuff. | |
1407 | ; This is set by an emulation routine to trap any faults when it is fetching data or | |
1408 | ; instructions. | |
1409 | ; | |
1410 | ; If we get a fault, we turn off RI, set CR0_EQ to false, bump the PC, and set R0 | |
1411 | ; and R1 to the DAR and DSISR, respectively. | |
1412 | ; | |
de355530 | 1413 | lwz r4,savesrr0(r13) ; Get the failing instruction address |
1c79356b | 1414 | lwz r5,savecr(r13) ; Get the condition register |
de355530 A |
1415 | addi r4,r4,4 ; Skip failing instruction |
1416 | lwz r6,savedar(r13) ; Get the DAR | |
1c79356b | 1417 | rlwinm r5,r5,0,3,1 ; Clear CR0_EQ to let emulation code know we failed |
9bccf70c | 1418 | lwz r7,savedsisr(r13) ; Grab the DSISR |
de355530 A |
1419 | stw r0,savesrr1(r13) ; Save the result MSR |
1420 | stw r4,savesrr0(r13) ; Save resume address | |
9bccf70c | 1421 | stw r5,savecr(r13) ; And the resume CR |
de355530 A |
1422 | stw r6,saver0(r13) ; Pass back the DAR |
1423 | stw r7,saver1(r13) ; Pass back the DSISR | |
1c79356b A |
1424 | b EatRupt ; Resume emulated code |
1425 | ||
9bccf70c A |
1426 | ; |
1427 | ; Here is where we handle the context switch firmware call. The old | |
de355530 | 1428 | ; context has been saved, and the new savearea in in saver3. We will just |
9bccf70c A |
1429 | ; muck around with the savearea pointers, and then join the exit routine |
1430 | ; | |
1431 | ||
1432 | .align 5 | |
1433 | ||
1434 | conswtch: | |
1435 | mr r29,r13 ; Save the save | |
de355530 A |
1436 | rlwinm r30,r13,0,0,19 ; Get the start of the savearea block |
1437 | lwz r5,saver3(r13) ; Switch to the new savearea | |
1438 | lwz r30,SACvrswap(r30) ; get real to virtual translation | |
9bccf70c A |
1439 | mr r13,r5 ; Switch saveareas |
1440 | xor r27,r29,r30 ; Flip to virtual | |
de355530 | 1441 | stw r27,saver3(r5) ; Push the new savearea to the switch to routine |
9bccf70c | 1442 | b EatRupt ; Start it up... |
1c79356b A |
1443 | |
1444 | ; | |
1445 | ; Handle machine check here. | |
1446 | ; | |
1447 | ; ? | |
1448 | ; | |
9bccf70c A |
1449 | |
1450 | .align 5 | |
1451 | ||
1c79356b | 1452 | MachineCheck: |
9bccf70c | 1453 | |
de355530 A |
1454 | lwz r27,savesrr1(r13) ; ? |
1455 | rlwinm. r11,r27,0,dcmck,dcmck ; ? | |
1456 | beq+ notDCache ; ? | |
d7e50217 | 1457 | |
de355530 A |
1458 | mfspr r11,msscr0 ; ? |
1459 | dssall ; ? | |
d7e50217 A |
1460 | sync |
1461 | ||
de355530 | 1462 | lwz r27,savesrr1(r13) ; ? |
d7e50217 | 1463 | |
de355530 A |
1464 | hiccup: cmplw r27,r27 ; ? |
1465 | bne- hiccup ; ? | |
1466 | isync ; ? | |
1467 | ||
1468 | oris r11,r11,hi16(dl1hwfm) ; ? | |
1469 | mtspr msscr0,r11 ; ? | |
1470 | ||
1471 | rstbsy: mfspr r11,msscr0 ; ? | |
1c79356b | 1472 | |
de355530 A |
1473 | rlwinm. r11,r11,0,dl1hwf,dl1hwf ; ? |
1474 | bne rstbsy ; ? | |
1c79356b | 1475 | |
de355530 | 1476 | sync ; ? |
1c79356b | 1477 | |
de355530 | 1478 | b EatRupt ; ? |
1c79356b | 1479 | |
de355530 A |
1480 | .align 5 |
1481 | ||
1482 | notDCache: | |
1c79356b | 1483 | ; |
de355530 A |
1484 | ; Check if the failure was in |
1485 | ; ml_probe_read. If so, this is expected, so modify the PC to | |
1486 | ; ml_proble_read_mck and then eat the exception. | |
1c79356b | 1487 | ; |
de355530 A |
1488 | lwz r30,savesrr0(r13) ; Get the failing PC |
1489 | lis r28,hi16(EXT(ml_probe_read_mck)) ; High order part | |
1490 | lis r27,hi16(EXT(ml_probe_read)) ; High order part | |
1491 | ori r28,r28,lo16(EXT(ml_probe_read_mck)) ; Get the low part | |
1492 | ori r27,r27,lo16(EXT(ml_probe_read)) ; Get the low part | |
1493 | cmplw r30,r28 ; Check highest possible | |
1494 | cmplw cr1,r30,r27 ; Check lowest | |
1495 | bge- PassUp ; Outside of range | |
1496 | blt- cr1,PassUp ; Outside of range | |
1497 | ; | |
1498 | ; We need to fix up the BATs here because the probe | |
1499 | ; routine messed them all up... As long as we are at it, | |
1500 | ; fix up to return directly to caller of probe. | |
1501 | ; | |
1502 | ||
1503 | lis r11,hi16(EXT(shadow_BAT)+shdDBAT) ; Get shadow address | |
1504 | ori r11,r11,lo16(EXT(shadow_BAT)+shdDBAT) ; Get shadow address | |
1505 | ||
1506 | lwz r30,0(r11) ; Pick up DBAT 0 high | |
1507 | lwz r28,4(r11) ; Pick up DBAT 0 low | |
1508 | lwz r27,8(r11) ; Pick up DBAT 1 high | |
1509 | lwz r18,16(r11) ; Pick up DBAT 2 high | |
1510 | lwz r11,24(r11) ; Pick up DBAT 3 high | |
1c79356b | 1511 | |
de355530 A |
1512 | sync |
1513 | mtdbatu 0,r30 ; Restore DBAT 0 high | |
1514 | mtdbatl 0,r28 ; Restore DBAT 0 low | |
1515 | mtdbatu 1,r27 ; Restore DBAT 1 high | |
1516 | mtdbatu 2,r18 ; Restore DBAT 2 high | |
1517 | mtdbatu 3,r11 ; Restore DBAT 3 high | |
1518 | sync | |
1519 | ||
1520 | lwz r27,saver6(r13) ; Get the saved R6 value | |
1521 | mtspr hid0,r27 ; Restore HID0 | |
1522 | isync | |
1523 | ||
1524 | lwz r28,savelr(r13) ; Get return point | |
1525 | lwz r27,saver0(r13) ; Get the saved MSR | |
1526 | li r30,0 ; Get a failure RC | |
1527 | stw r28,savesrr0(r13) ; Set the return point | |
1528 | stw r27,savesrr1(r13) ; Set the continued MSR | |
1529 | stw r30,saver3(r13) ; Set return code | |
1530 | b EatRupt ; Yum, yum, eat it all up... | |
1c79356b A |
1531 | |
1532 | /* | |
1533 | * Here's where we come back from some instruction emulator. If we come back with | |
1534 | * T_IN_VAIN, the emulation is done and we should just reload state and directly | |
1535 | * go back to the interrupted code. Otherwise, we'll check to see if | |
1536 | * we need to redrive with a different interrupt, i.e., DSI. | |
1537 | */ | |
1538 | ||
1539 | .align 5 | |
1540 | .globl EXT(EmulExit) | |
1541 | ||
1542 | LEXT(EmulExit) | |
1543 | ||
de355530 | 1544 | cmplwi r11,T_IN_VAIN ; Was it emulated? |
1c79356b | 1545 | lis r1,hi16(SAVredrive) ; Get redrive request |
de355530 A |
1546 | mfsprg r2,0 ; Restore the per_proc area |
1547 | beq+ EatRupt ; Yeah, just blast back to the user... | |
1c79356b A |
1548 | lwz r4,SAVflags(r13) ; Pick up the flags |
1549 | ||
1550 | and. r0,r4,r1 ; Check if redrive requested | |
de355530 | 1551 | andc r4,r4,r1 ; Clear redrive |
1c79356b | 1552 | |
de355530 | 1553 | beq+ PassUp ; No redrive, just keep on going... |
1c79356b | 1554 | |
de355530 | 1555 | stw r4,SAVflags(r13) ; Set the flags |
1c79356b A |
1556 | b Redrive ; Redrive the exception... |
1557 | ||
9bccf70c A |
1558 | ; |
1559 | ; Jump into main handler code switching on VM at the same time. | |
1560 | ; | |
1561 | ; We assume kernel data is mapped contiguously in physical | |
1562 | ; memory, otherwise we would need to switch on (at least) virtual data. | |
1563 | ; SRs are already set up. | |
1564 | ; | |
1c79356b | 1565 | |
d7e50217 | 1566 | .align 5 |
9bccf70c | 1567 | |
de355530 A |
1568 | PassUp: lis r2,hi16(EXT(exception_handlers)) ; Get exception vector address |
1569 | ori r2,r2,lo16(EXT(exception_handlers)) ; And low half | |
1570 | lwzx r6,r2,r11 ; Get the actual exception handler address | |
d7e50217 | 1571 | |
de355530 A |
1572 | PassUpDeb: mtsrr0 r6 ; Set up the handler address |
1573 | rlwinm r5,r13,0,0,19 ; Back off to the start of savearea block | |
1574 | ||
9bccf70c | 1575 | mfmsr r3 ; Get our MSR |
de355530 A |
1576 | rlwinm r3,r3,0,MSR_BE_BIT+1,MSR_SE_BIT-1 ; Clear all but the trace bits |
1577 | li r2,MSR_SUPERVISOR_INT_OFF ; Get our normal MSR value | |
1578 | lwz r5,SACvrswap(r5) ; Get real to virtual conversion | |
1579 | or r2,r2,r3 ; Keep the trace bits if they are on | |
9bccf70c | 1580 | mr r3,r11 ; Pass the exception code in the paramter reg |
de355530 | 1581 | mtsrr1 r2 ; Set up our normal MSR value |
9bccf70c | 1582 | xor r4,r13,r5 ; Pass up the virtual address of context savearea |
9bccf70c | 1583 | |
d7e50217 | 1584 | rfi ; Launch the exception handler |
de355530 A |
1585 | |
1586 | .long 0 ; Leave these here gol durn it! | |
1587 | .long 0 | |
1588 | .long 0 | |
1589 | .long 0 | |
1590 | .long 0 | |
1591 | .long 0 | |
1592 | .long 0 | |
1593 | .long 0 | |
1c79356b A |
1594 | |
1595 | /* | |
de355530 A |
1596 | * This routine is the only place where we return from an interruption. |
1597 | * Anyplace else is wrong. Even if I write the code, it's still wrong. | |
1598 | * Feel free to come by and slap me if I do do it--even though I may | |
1599 | * have had a good reason to do it. | |
1c79356b A |
1600 | * |
1601 | * All we need to remember here is that R13 must point to the savearea | |
1602 | * that has the context we need to load up. Translation and interruptions | |
1603 | * must be disabled. | |
1604 | * | |
1605 | * This code always loads the context in the savearea pointed to | |
1606 | * by R13. In the process, it throws away the savearea. If there | |
1607 | * is any tomfoolery with savearea stacks, it must be taken care of | |
1608 | * before we get here. | |
1609 | * | |
de355530 A |
1610 | * Speaking of tomfoolery, this is where we synthesize interruptions |
1611 | * if we need to. | |
1c79356b A |
1612 | */ |
1613 | ||
1614 | .align 5 | |
1615 | ||
9bccf70c A |
1616 | EatRupt: mfsprg r29,0 ; Get the per_proc block back |
1617 | mr r31,r13 ; Move the savearea pointer to the far end of the register set | |
1618 | ||
de355530 | 1619 | lwz r30,quickfret(r29) ; Pick up the quick fret list, if any |
d7e50217 | 1620 | |
de355530 A |
1621 | mfsprg r27,2 ; Get the processor features |
1622 | lwz r21,savesrr1(r31) ; Get destination MSR | |
9bccf70c A |
1623 | |
1624 | erchkfret: mr. r3,r30 ; Any savearea to quickly release? | |
1625 | beq+ ernoqfret ; No quickfrets... | |
de355530 | 1626 | lwz r30,SAVprev(r30) ; Chain back now |
9bccf70c A |
1627 | |
1628 | bl EXT(save_ret_phys) ; Put it on the free list | |
de355530 | 1629 | stw r30,quickfret(r29) ; Dequeue previous guy (really, it is ok to wait until after the release) |
9bccf70c | 1630 | b erchkfret ; Try the next one... |
1c79356b | 1631 | |
de355530 | 1632 | |
9bccf70c A |
1633 | .align 5 |
1634 | ||
de355530 A |
1635 | ernoqfret: mtcrf 0x60,r27 ; Set CRs with thermal facilities |
1636 | rlwinm. r0,r21,0,MSR_EE_BIT,MSR_EE_BIT ; Are interruptions going to be enabled? | |
1637 | crandc 31,pfThermalb,pfThermIntb ; See if we have both thermometer and not interrupt facility | |
1638 | la r21,saver0(r31) ; Point to the first thing we restore | |
1639 | crandc 31,cr0_eq,31 ; Factor in enablement | |
1640 | bf 31,tempisok ; No thermal checking needed... | |
1641 | ||
1642 | ; | |
1643 | ; We get to here if 1) there is a thermal facility, and 2) the hardware | |
1644 | ; will or cannot interrupt, and 3) the interrupt will be enabled after this point. | |
1645 | ; | |
1646 | ||
1647 | mfspr r16,thrm3 ; Get thermal 3 | |
1648 | mfspr r14,thrm1 ; Get thermal 2 | |
1649 | rlwinm. r16,r16,0,thrme,thrme ; Is the themometer enabled? | |
1650 | mfspr r15,thrm2 ; Get thermal 2 | |
1651 | beq- tempisok ; No thermometer... | |
1652 | rlwinm r16,r14,2,28,31 ; Cluster THRM1s TIE, V, TIN, and TIV at bottom 4 bits | |
1653 | srawi r0,r15,31 ; Make a mask of 1s if temprature over | |
1654 | rlwinm r30,r15,2,28,31 ; Cluster THRM2s TIE, V, TIN, and TIV at bottom 4 bits | |
1655 | ; | |
1656 | ; Note that the following compare check that V, TIN, and TIV are set and that TIE is cleared. | |
1657 | ; This insures that we only emulate when the hardware is not set to interrupt. | |
1658 | ; | |
1659 | cmplwi cr0,r16,7 ; Is there a valid pending interruption for THRM1? | |
1660 | cmplwi cr1,r30,7 ; Is there a valid pending interruption for THRM2? | |
1661 | and r15,r15,r0 ; Keep high temp if that interrupted, zero if not | |
1662 | cror cr0_eq,cr0_eq,cr1_eq ; Merge both | |
1663 | andc r14,r14,r0 ; Keep low if high did not interrupt, zero if it did | |
1664 | bne+ tempisok ; Nope, temprature is in range | |
1665 | ||
1666 | li r11,T_THERMAL ; Time to emulate a thermal interruption | |
1667 | or r14,r14,r15 ; Get contents of interrupting register | |
1668 | mr r13,r31 ; Make sure savearea is pointed to correctly | |
1669 | stw r11,saveexception(r31) ; Set the exception code | |
1670 | stw r14,savedar(r31) ; Set the contents of the interrupting register into the dar | |
1671 | ||
1672 | ; | |
1673 | ; This code is here to prevent a problem that will probably never happen. If we are | |
1674 | ; returning from an emulation routine (alignment, altivec assist, etc.) the SRs may | |
1675 | ; not be set to the proper kernel values. Then, if we were to emulate a thermal here, | |
1676 | ; we would end up running in the kernel with a bogus SR. So, to prevent | |
1677 | ; this unfortunate circumstance, we slam the SRs here. (I worry too much...) | |
1678 | ; | |
1c79356b | 1679 | |
de355530 A |
1680 | lis r30,hi16(KERNEL_SEG_REG0_VALUE) ; Get the high half of the kernel SR0 value |
1681 | mtsr sr0,r30 ; Set the kernel SR0 | |
1682 | addis r30,r30,0x0010 ; Point to the second segment of kernel | |
1683 | mtsr sr1,r30 ; Set the kernel SR1 | |
1684 | addis r30,r30,0x0010 ; Point to the third segment of kernel | |
1685 | mtsr sr2,r30 ; Set the kernel SR2 | |
1686 | addis r30,r30,0x0010 ; Point to the third segment of kernel | |
1687 | mtsr sr3,r30 ; Set the kernel SR3 | |
1688 | b Redrive ; Go process this new interruption... | |
1689 | ||
1690 | ||
1691 | tempisok: dcbt 0,r21 ; Touch in the first thing we need | |
1c79356b | 1692 | |
1c79356b | 1693 | ; |
9bccf70c | 1694 | ; Here we release the savearea. |
1c79356b | 1695 | ; |
9bccf70c A |
1696 | ; Important!!!! The savearea is released before we are done with it. When the |
1697 | ; local free savearea list (anchored at lclfree) gets too long, save_ret_phys | |
1698 | ; will trim the list, making the extra saveareas allocatable by another processor | |
1699 | ; The code in there must ALWAYS leave our savearea on the local list, otherwise | |
1700 | ; we could be very, very unhappy. The code there always queues the "just released" | |
1701 | ; savearea to the head of the local list. Then, if it needs to trim, it will | |
1702 | ; start with the SECOND savearea, leaving ours intact. | |
1c79356b | 1703 | ; |
de355530 A |
1704 | ; Build the SR values depending upon destination. If we are going to the kernel, |
1705 | ; the SRs are almost all the way set up. SR14 (or the currently used copyin/out register) | |
1706 | ; must be set to whatever it was at the last exception because it varies. All the rest | |
1707 | ; have been set up already. | |
1708 | ; | |
1709 | ; If we are going into user space, we need to check a bit more. SR0, SR1, SR2, and | |
1710 | ; SR14 (current implementation) must be restored always. The others must be set if | |
1711 | ; they are different that what was loaded last time (i.e., tasks have switched). | |
1712 | ; We check the last loaded address space ID and if the same, we skip the loads. | |
1713 | ; This is a performance gain because SR manipulations are slow. | |
1714 | ; | |
1715 | ; There is also the special case when MSR_RI is set. This happens when we are trying to | |
1716 | ; make a special user state access when we are in the kernel. If we take an exception when | |
1717 | ; during that, the SRs may have been modified. Therefore, we need to restore them to | |
1718 | ; what they were before the exception because they could be non-standard. We saved them | |
1719 | ; during exception entry, so we will just load them here. | |
1c79356b A |
1720 | ; |
1721 | ||
9bccf70c A |
1722 | mr r3,r31 ; Get the exiting savearea in parm register |
1723 | bl EXT(save_ret_phys) ; Put it on the free list | |
1724 | ||
de355530 | 1725 | li r3,savesrr1 ; Get offset to the srr1 value |
9bccf70c A |
1726 | |
1727 | lwarx r26,r3,r31 ; Get destination MSR and take reservation along the way (just so we can blow it away) | |
de355530 A |
1728 | lwz r7,PP_USERPMAP(r29) ; Pick up the user pmap we may launch |
1729 | rlwinm. r17,r26,0,MSR_RI_BIT,MSR_RI_BIT ; See if we are returning from a special fault | |
1c79356b | 1730 | cmplw cr3,r14,r14 ; Set that we do not need to stop streams |
9bccf70c | 1731 | |
de355530 | 1732 | beq+ nSpecAcc ; Do not reload the kernel SRs if this is not a special access... |
9bccf70c | 1733 | |
de355530 A |
1734 | lwz r14,savesr0(r31) ; Get SR0 at fault time |
1735 | mtsr sr0,r14 ; Set SR0 | |
1736 | lwz r14,savesr1(r31) ; Get SR1 at fault time | |
1737 | mtsr sr1,r14 ; Set SR1 | |
1738 | lwz r14,savesr2(r31) ; Get SR2 at fault time | |
1739 | mtsr sr2,r14 ; Set SR2 | |
1740 | lwz r14,savesr3(r31) ; Get SR3 at fault timee | |
1741 | mtsr sr3,r14 ; Set SR3 | |
1742 | b segsdone ; We are all set up now... | |
9bccf70c | 1743 | |
de355530 | 1744 | .align 5 |
9bccf70c | 1745 | |
de355530 A |
1746 | nSpecAcc: rlwinm. r17,r26,0,MSR_PR_BIT,MSR_PR_BIT ; See if we are going to user or system |
1747 | li r14,PMAP_SEGS ; Point to segments | |
1748 | bne+ gotouser ; We are going into user state... | |
1c79356b | 1749 | |
de355530 A |
1750 | lwz r14,savesr14(r31) ; Get the copyin/out register at interrupt time |
1751 | mtsr sr14,r14 ; Set SR14 | |
1752 | b segsdone ; We are all set up now... | |
1753 | ||
1754 | .align 5 | |
1c79356b | 1755 | |
de355530 A |
1756 | gotouser: dcbt r14,r7 ; Touch the segment register contents |
1757 | lwz r9,spcFlags(r29) ; Pick up the special flags | |
1758 | lwz r16,PP_LASTPMAP(r29) ; Pick up the last loaded pmap | |
1759 | addi r14,r14,32 ; Second half of pmap segments | |
1760 | rlwinm r9,r9,userProtKeybit-2,2,2 ; Isolate the user state protection key | |
1761 | lwz r15,PMAP_SPACE(r7) ; Get the primary space | |
1762 | lwz r13,PMAP_VFLAGS(r7) ; Get the flags | |
1763 | dcbt r14,r7 ; Touch second page | |
1764 | oris r15,r15,hi16(SEG_REG_PROT) ; Set segment 0 SR value | |
1765 | mtcrf 0x0F,r13 ; Set CRs to correspond to the subordinate spaces | |
1766 | xor r15,r15,r9 ; Flip to proper segment register key | |
1767 | lhz r9,PP_CPU_FLAGS(r29) ; Get the processor flags | |
1c79356b | 1768 | |
de355530 A |
1769 | addis r13,r15,0x0000 ; Get SR0 value |
1770 | bf 16,nlsr0 ; No alternate here... | |
1771 | lwz r13,PMAP_SEGS+(0*4)(r7) ; Get SR0 value | |
1c79356b | 1772 | |
de355530 A |
1773 | nlsr0: mtsr sr0,r13 ; Load up the SR |
1774 | rlwinm r9,r9,(((31-MSR_BE_BIT)+(traceBEb+16+1))&31),MSR_BE_BIT,MSR_BE_BIT ; Set BE bit if special trace is on | |
1c79356b | 1775 | |
de355530 A |
1776 | addis r13,r15,0x0010 ; Get SR1 value |
1777 | bf 17,nlsr1 ; No alternate here... | |
1778 | lwz r13,PMAP_SEGS+(1*4)(r7) ; Get SR1 value | |
1779 | ||
1780 | nlsr1: mtsr sr1,r13 ; Load up the SR | |
1781 | or r26,r26,r9 ; Flip on the BE bit for special trace if needed | |
1c79356b | 1782 | |
de355530 | 1783 | cmplw cr3,r7,r16 ; Are we running the same segs as last time? |
1c79356b | 1784 | |
de355530 A |
1785 | addis r13,r15,0x0020 ; Get SR2 value |
1786 | bf 18,nlsr2 ; No alternate here... | |
1787 | lwz r13,PMAP_SEGS+(2*4)(r7) ; Get SR2 value | |
1788 | ||
1789 | nlsr2: mtsr sr2,r13 ; Load up the SR | |
1c79356b | 1790 | |
de355530 A |
1791 | addis r13,r15,0x0030 ; Get SR3 value |
1792 | bf 19,nlsr3 ; No alternate here... | |
1793 | lwz r13,PMAP_SEGS+(3*4)(r7) ; Get SR3 value | |
1794 | ||
1795 | nlsr3: mtsr sr3,r13 ; Load up the SR | |
1c79356b | 1796 | |
de355530 A |
1797 | addis r13,r15,0x00E0 ; Get SR14 value |
1798 | bf 30,nlsr14 ; No alternate here... | |
1799 | lwz r13,PMAP_SEGS+(14*4)(r7) ; Get SR14 value | |
1800 | ||
1801 | nlsr14: mtsr sr14,r13 ; Load up the SR | |
1c79356b | 1802 | |
de355530 A |
1803 | beq+ cr3,segsdone ; All done if same pmap as last time... |
1804 | ||
1805 | stw r7,PP_LASTPMAP(r29) ; Remember what we just loaded | |
1806 | ||
1807 | addis r13,r15,0x0040 ; Get SR4 value | |
1808 | bf 20,nlsr4 ; No alternate here... | |
1809 | lwz r13,PMAP_SEGS+(4*4)(r7) ; Get SR4 value | |
1810 | ||
1811 | nlsr4: mtsr sr4,r13 ; Load up the SR | |
1c79356b | 1812 | |
de355530 A |
1813 | addis r13,r15,0x0050 ; Get SR5 value |
1814 | bf 21,nlsr5 ; No alternate here... | |
1815 | lwz r13,PMAP_SEGS+(5*4)(r7) ; Get SR5 value | |
1816 | ||
1817 | nlsr5: mtsr sr5,r13 ; Load up the SR | |
1c79356b | 1818 | |
de355530 A |
1819 | addis r13,r15,0x0060 ; Get SR6 value |
1820 | bf 22,nlsr6 ; No alternate here... | |
1821 | lwz r13,PMAP_SEGS+(6*4)(r7) ; Get SR6 value | |
1c79356b | 1822 | |
de355530 A |
1823 | nlsr6: mtsr sr6,r13 ; Load up the SR |
1824 | ||
1825 | addis r13,r15,0x0070 ; Get SR7 value | |
1826 | bf 23,nlsr7 ; No alternate here... | |
1827 | lwz r13,PMAP_SEGS+(7*4)(r7) ; Get SR7 value | |
1c79356b | 1828 | |
de355530 | 1829 | nlsr7: mtsr sr7,r13 ; Load up the SR |
1c79356b | 1830 | |
de355530 A |
1831 | addis r13,r15,0x0080 ; Get SR8 value |
1832 | bf 24,nlsr8 ; No alternate here... | |
1833 | lwz r13,PMAP_SEGS+(8*4)(r7) ; Get SR8 value | |
1834 | ||
1835 | nlsr8: mtsr sr8,r13 ; Load up the SR | |
1c79356b | 1836 | |
de355530 A |
1837 | addis r13,r15,0x0090 ; Get SR9 value |
1838 | bf 25,nlsr9 ; No alternate here... | |
1839 | lwz r13,PMAP_SEGS+(9*4)(r7) ; Get SR9 value | |
1c79356b | 1840 | |
de355530 A |
1841 | nlsr9: mtsr sr9,r13 ; Load up the SR |
1842 | ||
1843 | addis r13,r15,0x00A0 ; Get SR10 value | |
1844 | bf 26,nlsr10 ; No alternate here... | |
1845 | lwz r13,PMAP_SEGS+(10*4)(r7) ; Get SR10 value | |
1c79356b | 1846 | |
de355530 | 1847 | nlsr10: mtsr sr10,r13 ; Load up the SR |
1c79356b | 1848 | |
de355530 A |
1849 | addis r13,r15,0x00B0 ; Get SR11 value |
1850 | bf 27,nlsr11 ; No alternate here... | |
1851 | lwz r13,PMAP_SEGS+(11*4)(r7) ; Get SR11 value | |
1c79356b | 1852 | |
de355530 | 1853 | nlsr11: mtsr sr11,r13 ; Load up the SR |
1c79356b | 1854 | |
de355530 A |
1855 | addis r13,r15,0x00C0 ; Get SR12 value |
1856 | bf 28,nlsr12 ; No alternate here... | |
1857 | lwz r13,PMAP_SEGS+(12*4)(r7) ; Get SR12 value | |
1858 | ||
1859 | nlsr12: mtsr sr12,r13 ; Load up the SR | |
1c79356b | 1860 | |
de355530 A |
1861 | addis r13,r15,0x00D0 ; Get SR13 value |
1862 | bf 29,nlsr13 ; No alternate here... | |
1863 | lwz r13,PMAP_SEGS+(13*4)(r7) ; Get SR13 value | |
1864 | ||
1865 | nlsr13: mtsr sr13,r13 ; Load up the SR | |
9bccf70c | 1866 | |
de355530 A |
1867 | addis r13,r15,0x00F0 ; Get SR15 value |
1868 | bf 31,nlsr15 ; No alternate here... | |
1869 | lwz r13,PMAP_SEGS+(15*4)(r7) ; Get SR15 value | |
1870 | ||
1871 | nlsr15: mtsr sr15,r13 ; Load up the SR | |
1872 | ||
1873 | segsdone: stwcx. r26,r3,r31 ; Blow away any reservations we hold | |
d7e50217 | 1874 | |
de355530 A |
1875 | li r21,emfp0 ; Point to the fp savearea |
1876 | lwz r25,savesrr0(r31) ; Get the SRR0 to use | |
1877 | la r28,saver8(r31) ; Point to the next line to use | |
1878 | dcbt r21,r29 ; Start moving in a work area | |
1879 | lwz r0,saver0(r31) ; Restore R0 | |
1880 | dcbt 0,r28 ; Touch it in | |
1881 | lwz r1,saver1(r31) ; Restore R1 | |
1882 | lwz r2,saver2(r31) ; Restore R2 | |
1883 | la r28,saver16(r31) ; Point to the next line to get | |
1884 | lwz r3,saver3(r31) ; Restore R3 | |
1c79356b | 1885 | mtcrf 0x80,r27 ; Get facility availability flags (do not touch CR1-7) |
de355530 | 1886 | lwz r4,saver4(r31) ; Restore R4 |
9bccf70c | 1887 | mtsrr0 r25 ; Restore the SRR0 now |
de355530 | 1888 | lwz r5,saver5(r31) ; Restore R5 |
9bccf70c | 1889 | mtsrr1 r26 ; Restore the SRR1 now |
de355530 A |
1890 | lwz r6,saver6(r31) ; Restore R6 |
1891 | ||
1892 | dcbt 0,r28 ; Touch that next line on in | |
1893 | la r28,savevscr(r31) ; Point to the saved facility context | |
1894 | ||
1895 | lwz r7,saver7(r31) ; Restore R7 | |
1896 | lwz r8,saver8(r31) ; Restore R8 | |
1897 | lwz r9,saver9(r31) ; Restore R9 | |
1898 | mfmsr r26 ; Get the current MSR | |
1899 | dcbt 0,r28 ; Touch saved facility context | |
1900 | lwz r10,saver10(r31) ; Restore R10 | |
1901 | lwz r11,saver11(r31) ; Restore R11 | |
1902 | oris r26,r26,hi16(MASK(MSR_VEC)) ; Get the vector enable bit | |
1903 | lwz r12,saver12(r31) ; Restore R12 | |
1904 | ori r26,r26,lo16(MASK(MSR_FP)) ; Add in the float enable | |
1905 | lwz r13,saver13(r31) ; Restore R13 | |
1906 | la r28,saver24(r31) ; Point to the next line to do | |
9bccf70c | 1907 | |
de355530 A |
1908 | ; |
1909 | ; Note that floating point and vector will be enabled from here on until the RFI | |
1910 | ; | |
9bccf70c | 1911 | |
de355530 A |
1912 | mtmsr r26 ; Turn on vectors and floating point |
1913 | isync | |
1914 | ||
1915 | dcbt 0,r28 ; Touch next line to do | |
1916 | ||
1917 | lwz r14,saver14(r31) ; Restore R14 | |
1918 | lwz r15,saver15(r31) ; Restore R15 | |
1919 | ||
1920 | bf pfAltivecb,noavec3 ; No Altivec on this CPU... | |
9bccf70c | 1921 | |
de355530 | 1922 | la r28,savevscr(r31) ; Point to the status area |
9bccf70c A |
1923 | stvxl v0,r21,r29 ; Save a vector register |
1924 | lvxl v0,0,r28 ; Get the vector status | |
de355530 | 1925 | lwz r27,savevrsave(r31) ; Get the vrsave |
9bccf70c A |
1926 | mtvscr v0 ; Set the vector status |
1927 | ||
1928 | lvxl v0,r21,r29 ; Restore work vector register | |
de355530 A |
1929 | beq+ cr3,noavec2 ; SRs have not changed, no need to stop the streams... |
1930 | dssall ; Kill all data streams | |
1931 | sync | |
9bccf70c | 1932 | |
de355530 A |
1933 | noavec2: mtspr vrsave,r27 ; Set the vrsave |
1934 | ||
1935 | noavec3: bf- pfFloatb,nofphere ; Skip if no floating point... | |
9bccf70c | 1936 | |
1c79356b | 1937 | stfd f0,emfp0(r29) ; Save FP0 |
9bccf70c | 1938 | lfd f0,savefpscrpad(r31) ; Get the fpscr |
1c79356b A |
1939 | mtfsf 0xFF,f0 ; Restore fpscr |
1940 | lfd f0,emfp0(r29) ; Restore the used register | |
de355530 A |
1941 | |
1942 | nofphere: lwz r16,saver16(r31) ; Restore R16 | |
1943 | lwz r17,saver17(r31) ; Restore R17 | |
1944 | lwz r18,saver18(r31) ; Restore R18 | |
1945 | lwz r19,saver19(r31) ; Restore R19 | |
1946 | lwz r20,saver20(r31) ; Restore R20 | |
1947 | lwz r21,saver21(r31) ; Restore R21 | |
1948 | lwz r22,saver22(r31) ; Restore R22 | |
1949 | ||
1950 | lwz r23,saver23(r31) ; Restore R23 | |
1951 | lwz r24,saver24(r31) ; Restore R24 | |
1952 | lwz r25,saver25(r31) ; Restore R25 | |
1953 | lwz r26,saver26(r31) ; Restore R26 | |
1954 | lwz r27,saver27(r31) ; Restore R27 | |
1955 | ||
9bccf70c | 1956 | lwz r28,savecr(r31) ; Get CR to restore |
de355530 A |
1957 | |
1958 | lwz r29,savexer(r31) ; Get XER to restore | |
9bccf70c | 1959 | mtcr r28 ; Restore the CR |
de355530 | 1960 | lwz r28,savelr(r31) ; Get LR to restore |
9bccf70c | 1961 | mtxer r29 ; Restore the XER |
de355530 | 1962 | lwz r29,savectr(r31) ; Get the CTR to restore |
9bccf70c | 1963 | mtlr r28 ; Restore the LR |
de355530 | 1964 | lwz r28,saver30(r31) ; Get R30 |
9bccf70c | 1965 | mtctr r29 ; Restore the CTR |
de355530 A |
1966 | lwz r29,saver31(r31) ; Get R31 |
1967 | mtsprg 2,r28 ; Save R30 for later | |
1968 | lwz r28,saver28(r31) ; Restore R28 | |
9bccf70c | 1969 | mtsprg 3,r29 ; Save R31 for later |
de355530 | 1970 | lwz r29,saver29(r31) ; Restore R29 |
1c79356b | 1971 | |
de355530 A |
1972 | mfsprg r31,0 ; Get per_proc |
1973 | mfsprg r30,2 ; Restore R30 | |
1974 | lwz r31,pfAvailable(r31) ; Get the feature flags | |
1c79356b | 1975 | mtsprg 2,r31 ; Set the feature flags |
9bccf70c | 1976 | mfsprg r31,3 ; Restore R31 |
1c79356b | 1977 | |
de355530 A |
1978 | rfi ; Click heels three times and think very hard that there is no place like home... |
1979 | ||
1980 | .long 0 ; Leave this here | |
1981 | .long 0 | |
1982 | .long 0 | |
1983 | .long 0 | |
1984 | .long 0 | |
1985 | .long 0 | |
1986 | .long 0 | |
1987 | .long 0 | |
1988 | ||
1c79356b A |
1989 | |
1990 | ||
1991 | ||
1992 | /* | |
1993 | * exception_exit(savearea *) | |
1994 | * | |
1995 | * | |
1996 | * ENTRY : IR and/or DR and/or interruptions can be on | |
de355530 | 1997 | * R3 points to the physical address of a savearea |
1c79356b A |
1998 | */ |
1999 | ||
2000 | .align 5 | |
2001 | .globl EXT(exception_exit) | |
2002 | ||
2003 | LEXT(exception_exit) | |
2004 | ||
2005 | mfsprg r29,2 ; Get feature flags | |
de355530 | 2006 | mfmsr r30 ; Get the current MSR |
d7e50217 | 2007 | mtcrf 0x04,r29 ; Set the features |
de355530 A |
2008 | rlwinm r30,r30,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off |
2009 | mr r31,r3 ; Get the savearea in the right register | |
2010 | rlwinm r30,r30,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off | |
2011 | li r10,savesrr0 ; Point to one of the first things we touch in the savearea on exit | |
2012 | andi. r30,r30,0x7FCF ; Turn off externals, IR, and DR | |
1c79356b | 2013 | lis r1,hi16(SAVredrive) ; Get redrive request |
de355530 | 2014 | |
1c79356b A |
2015 | bt pfNoMSRirb,eeNoMSR ; No MSR... |
2016 | ||
2017 | mtmsr r30 ; Translation and all off | |
2018 | isync ; Toss prefetch | |
2019 | b eeNoMSRx | |
2020 | ||
2021 | eeNoMSR: li r0,loadMSR ; Get the MSR setter SC | |
2022 | mr r3,r30 ; Get new MSR | |
2023 | sc ; Set it | |
2024 | ||
de355530 A |
2025 | eeNoMSRx: dcbt r10,r31 ; Touch in the first stuff we restore |
2026 | mfsprg r2,0 ; Get the per_proc block | |
1c79356b A |
2027 | lwz r4,SAVflags(r31) ; Pick up the flags |
2028 | mr r13,r31 ; Put savearea here also | |
2029 | ||
2030 | and. r0,r4,r1 ; Check if redrive requested | |
de355530 | 2031 | andc r4,r4,r1 ; Clear redrive |
1c79356b A |
2032 | |
2033 | dcbt br0,r2 ; We will need this in just a sec | |
2034 | ||
2035 | beq+ EatRupt ; No redrive, just exit... | |
2036 | ||
9bccf70c | 2037 | lwz r11,saveexception(r13) ; Restore exception code |
de355530 | 2038 | stw r4,SAVflags(r13) ; Set the flags |
1c79356b A |
2039 | b Redrive ; Redrive the exception... |
2040 | ||
1c79356b | 2041 | |
de355530 A |
2042 | /* |
2043 | * Start of the trace table | |
2044 | */ | |
2045 | ||
2046 | .align 12 /* Align to 4k boundary */ | |
2047 | ||
2048 | .globl EXT(traceTableBeg) | |
2049 | EXT(traceTableBeg): /* Start of trace table */ | |
2050 | /* .fill 2048,4,0 Make an 8k trace table for now */ | |
2051 | .fill 13760,4,0 /* Make an .trace table for now */ | |
2052 | /* .fill 240000,4,0 Make an .trace table for now */ | |
2053 | .globl EXT(traceTableEnd) | |
2054 | EXT(traceTableEnd): /* End of trace table */ | |
2055 | ||
1c79356b A |
2056 | .globl EXT(ExceptionVectorsEnd) |
2057 | EXT(ExceptionVectorsEnd): /* Used if relocating the exception vectors */ | |
de355530 A |
2058 | #ifndef HACKALERTHACKALERT |
2059 | /* | |
2060 | * This .long needs to be here because the linker gets confused and tries to | |
2061 | * include the final label in a section in the next section if there is nothing | |
2062 | * after it | |
2063 | */ | |
2064 | .long 0 /* (HACK/HACK/HACK) */ | |
1c79356b A |
2065 | #endif |
2066 | ||
2067 | .data | |
2068 | .align ALIGN | |
2069 | .globl EXT(exception_end) | |
2070 | EXT(exception_end): | |
2071 | .long EXT(ExceptionVectorsEnd) -EXT(ExceptionVectorsStart) /* phys fn */ | |
2072 | ||
2073 |