]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * @OSF_COPYRIGHT@ | |
24 | */ | |
25 | ||
26 | /* | |
27 | * Low-memory exception vector code for PowerPC MACH | |
28 | * | |
29 | * These are the only routines that are ever run with | |
30 | * VM instruction translation switched off. | |
31 | * | |
32 | * The PowerPC is quite strange in that rather than having a set | |
33 | * of exception vectors, the exception handlers are installed | |
34 | * in well-known addresses in low memory. This code must be loaded | |
35 | * at ZERO in physical memory. The simplest way of doing this is | |
36 | * to load the kernel at zero, and specify this as the first file | |
37 | * on the linker command line. | |
38 | * | |
39 | * When this code is loaded into place, it is loaded at virtual | |
40 | * address KERNELBASE, which is mapped to zero (physical). | |
41 | * | |
42 | * This code handles all powerpc exceptions and is always entered | |
43 | * in supervisor mode with translation off. It saves the minimum | |
44 | * processor state before switching back on translation and | |
45 | * jumping to the approprate routine. | |
46 | * | |
47 | * Vectors from 0x100 to 0x3fff occupy 0x100 bytes each (64 instructions) | |
48 | * | |
49 | * We use some of this space to decide which stack to use, and where to | |
50 | * save the context etc, before jumping to a generic handler. | |
51 | */ | |
52 | ||
53 | #include <assym.s> | |
54 | #include <debug.h> | |
55 | #include <cpus.h> | |
56 | #include <db_machine_commands.h> | |
57 | #include <mach_rt.h> | |
58 | ||
59 | #include <mach_debug.h> | |
60 | #include <ppc/asm.h> | |
61 | #include <ppc/proc_reg.h> | |
62 | #include <ppc/exception.h> | |
63 | #include <ppc/Performance.h> | |
9bccf70c | 64 | #include <ppc/savearea.h> |
1c79356b | 65 | #include <mach/ppc/vm_param.h> |
1c79356b A |
66 | |
67 | #define TRCSAVE 0 | |
68 | #define CHECKSAVE 0 | |
69 | #define PERFTIMES 0 | |
70 | #define ESPDEBUG 0 | |
71 | ||
72 | #if TRCSAVE | |
73 | #error The TRCSAVE option is broken.... Fix it | |
74 | #endif | |
75 | ||
76 | #define featL1ena 24 | |
77 | #define featSMP 25 | |
78 | #define featAltivec 26 | |
79 | #define wasNapping 27 | |
80 | #define featFP 28 | |
9bccf70c | 81 | #define specAccess 29 |
1c79356b A |
82 | |
83 | #define VECTOR_SEGMENT .section __VECTORS, __interrupts | |
84 | ||
85 | VECTOR_SEGMENT | |
86 | ||
87 | ||
88 | .globl EXT(ExceptionVectorsStart) | |
89 | ||
90 | EXT(ExceptionVectorsStart): /* Used if relocating the exception vectors */ | |
91 | baseR: /* Used so we have more readable code */ | |
92 | ||
93 | /* | |
94 | * System reset - call debugger | |
95 | */ | |
96 | . = 0xf0 | |
97 | .globl EXT(ResetHandler) | |
98 | EXT(ResetHandler): | |
99 | .long 0x0 | |
100 | .long 0x0 | |
101 | .long 0x0 | |
102 | ||
103 | . = 0x100 | |
104 | .L_handler100: | |
105 | mtsprg 2,r13 /* Save R13 */ | |
106 | mtsprg 3,r11 /* Save R11 */ | |
107 | lwz r13,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_TYPE)(br0) ; Get reset type | |
108 | mfcr r11 | |
109 | cmpi cr0,r13,RESET_HANDLER_START | |
110 | bne resetexc | |
111 | ||
112 | li r11,RESET_HANDLER_NULL | |
113 | stw r11,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_TYPE)(br0) ; Clear reset type | |
114 | ||
115 | lwz r4,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_CALL)(br0) | |
116 | lwz r3,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_ARG)(br0) | |
117 | mtlr r4 | |
118 | blr | |
119 | ||
120 | resetexc: | |
121 | mtcr r11 | |
1c79356b A |
122 | li r11,T_RESET /* Set 'rupt code */ |
123 | b .L_exception_entry /* Join common... */ | |
124 | ||
125 | /* | |
126 | * Machine check | |
127 | */ | |
128 | ||
129 | . = 0x200 | |
130 | .L_handler200: | |
131 | mtsprg 2,r13 /* Save R13 */ | |
132 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
133 | li r11,T_MACHINE_CHECK /* Set 'rupt code */ |
134 | b .L_exception_entry /* Join common... */ | |
135 | ||
136 | /* | |
137 | * Data access - page fault, invalid memory rights for operation | |
138 | */ | |
139 | ||
140 | . = 0x300 | |
141 | .L_handler300: | |
142 | mtsprg 2,r13 /* Save R13 */ | |
143 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
144 | li r11,T_DATA_ACCESS /* Set 'rupt code */ |
145 | b .L_exception_entry /* Join common... */ | |
146 | ||
147 | /* | |
148 | * Instruction access - as for data access | |
149 | */ | |
150 | ||
151 | . = 0x400 | |
152 | .L_handler400: | |
153 | mtsprg 2,r13 /* Save R13 */ | |
154 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
155 | li r11,T_INSTRUCTION_ACCESS /* Set 'rupt code */ |
156 | b .L_exception_entry /* Join common... */ | |
157 | ||
158 | /* | |
159 | * External interrupt | |
160 | */ | |
161 | ||
162 | . = 0x500 | |
163 | .L_handler500: | |
164 | mtsprg 2,r13 /* Save R13 */ | |
165 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
166 | li r11,T_INTERRUPT /* Set 'rupt code */ |
167 | b .L_exception_entry /* Join common... */ | |
168 | ||
169 | /* | |
170 | * Alignment - many reasons | |
171 | */ | |
172 | ||
173 | . = 0x600 | |
174 | .L_handler600: | |
175 | mtsprg 2,r13 /* Save R13 */ | |
176 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
177 | li r11,T_ALIGNMENT /* Set 'rupt code */ |
178 | b .L_exception_entry /* Join common... */ | |
179 | ||
180 | /* | |
181 | * Program - floating point exception, illegal inst, priv inst, user trap | |
182 | */ | |
183 | ||
184 | . = 0x700 | |
185 | .L_handler700: | |
186 | mtsprg 2,r13 /* Save R13 */ | |
187 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
188 | li r11,T_PROGRAM /* Set 'rupt code */ |
189 | b .L_exception_entry /* Join common... */ | |
190 | ||
191 | /* | |
192 | * Floating point disabled | |
193 | */ | |
194 | ||
195 | . = 0x800 | |
196 | .L_handler800: | |
197 | mtsprg 2,r13 /* Save R13 */ | |
198 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
199 | li r11,T_FP_UNAVAILABLE /* Set 'rupt code */ |
200 | b .L_exception_entry /* Join common... */ | |
201 | ||
202 | ||
203 | /* | |
204 | * Decrementer - DEC register has passed zero. | |
205 | */ | |
206 | ||
207 | . = 0x900 | |
208 | .L_handler900: | |
209 | mtsprg 2,r13 /* Save R13 */ | |
210 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
211 | li r11,T_DECREMENTER /* Set 'rupt code */ |
212 | b .L_exception_entry /* Join common... */ | |
213 | ||
214 | /* | |
215 | * I/O controller interface error - MACH does not use this | |
216 | */ | |
217 | ||
218 | . = 0xA00 | |
219 | .L_handlerA00: | |
220 | mtsprg 2,r13 /* Save R13 */ | |
221 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
222 | li r11,T_IO_ERROR /* Set 'rupt code */ |
223 | b .L_exception_entry /* Join common... */ | |
224 | ||
225 | /* | |
226 | * Reserved | |
227 | */ | |
228 | ||
229 | . = 0xB00 | |
230 | .L_handlerB00: | |
231 | mtsprg 2,r13 /* Save R13 */ | |
232 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
233 | li r11,T_RESERVED /* Set 'rupt code */ |
234 | b .L_exception_entry /* Join common... */ | |
235 | ||
0b4e3aa0 A |
236 | #if 0 |
237 | hackxxxx1: | |
238 | stmw r29,4(br0) | |
239 | lwz r29,0(br0) | |
240 | mr. r29,r29 | |
241 | bne+ xxxx1 | |
242 | lis r29,0x4000 | |
243 | ||
244 | xxxx1: | |
245 | stw r0,0(r29) | |
246 | mfsrr0 r30 | |
247 | stw r30,4(r29) | |
248 | mtlr r30 | |
249 | stw r30,8(r29) | |
250 | ||
251 | addi r29,r29,12 | |
252 | stw r29,0(br0) | |
253 | ||
254 | lmw r29,4(br0) | |
255 | b hackxxxx2 | |
256 | #endif | |
257 | ||
258 | ||
259 | ; | |
260 | ; System call - generated by the sc instruction | |
261 | ; | |
262 | ; We handle the ultra-fast traps right here. They are: | |
263 | ; | |
264 | ; 0xFFFFFFFF - BlueBox only - MKIsPreemptiveTask | |
265 | ; 0xFFFFFFFE - BlueBox only - kcNKIsPreemptiveTaskEnv | |
266 | ; 0x00007FF2 - User state only - thread info | |
267 | ; 0x00007FF3 - User state only - floating point / vector facility status | |
268 | ; 0x00007FF4 - Kernel only - loadMSR | |
269 | ; | |
270 | ; Note: none handled if virtual machine is running | |
9bccf70c | 271 | ; Also, it we treat SCs as kernel SCs if the RI bit is set |
0b4e3aa0 | 272 | ; |
1c79356b A |
273 | |
274 | . = 0xC00 | |
275 | .L_handlerC00: | |
1c79356b | 276 | mtsprg 2,r13 ; Save R13 |
0b4e3aa0 A |
277 | mfsrr1 r13 ; Get SRR1 for loadMSR |
278 | mtsprg 3,r11 ; Save R11 | |
9bccf70c | 279 | rlwimi r13,r13,MSR_PR_BIT,0,0 ; Move PR bit to non-volatile CR0 bit 0 |
1c79356b | 280 | mfcr r11 ; Save the CR |
9bccf70c A |
281 | mtcrf 0x81,r13 ; Get the moved PR and the RI for testing |
282 | crnot 0,0 ; Get !PR | |
283 | cror 0,0,MSR_RI_BIT ; See if we have !PR or RI | |
0b4e3aa0 | 284 | mfsprg r13,0 ; Get the per_proc_area |
9bccf70c | 285 | bt- 0,uftInKern ; We are in the kernel... |
0b4e3aa0 A |
286 | |
287 | cmplwi cr5,r0,0x7FF2 ; Ultra fast path cthread info call? | |
288 | cmpwi cr6,r0,0x7FF3 ; Ultra fast path facility status? | |
289 | cror cr1_eq,cr5_lt,cr6_gt ; Set true if not 0x7FF2 and not 0x7FF3 and not negative | |
290 | lwz r13,spcFlags(r13) ; Get the special flags | |
291 | bt- cr1_eq,notufp ; Exit if we can not be ultra fast... | |
292 | ||
293 | rlwimi r13,r13,runningVMbit+1,31,31 ; Move VM flag after the 3 blue box flags | |
294 | not. r0,r0 ; Flip bits and kind of subtract 1 | |
295 | mtcrf 1,r13 ; Set BB and VMM flags in CR7 | |
296 | ||
297 | cmplwi cr1,r0,1 ; Is this a bb fast path? | |
298 | not r0,r0 ; Restore to entry state | |
299 | bt- 31,notufp ; No fast paths if running VM (assume not)... | |
300 | bf- bbNoMachSCbit,ufpUSuft ; We are not running BlueBox... | |
301 | bgt cr1,notufp ; This can not be a bb ufp... | |
302 | #if 0 | |
303 | b hackxxxx1 | |
304 | hackxxxx2: | |
305 | #endif | |
1c79356b | 306 | |
0b4e3aa0 A |
307 | rlwimi r11,r13,bbPreemptivebit-cr0_eq,cr0_eq,cr0_eq ; Copy preemptive task flag into user cr0_eq |
308 | mfsprg r13,0 ; Get back pre_proc | |
309 | ||
310 | ||
311 | bne cr1,ufpIsBBpre ; This is the "isPreemptiveTask" call... | |
312 | ||
313 | lwz r0,ppbbTaskEnv(r13) ; Get the shadowed taskEnv from per_proc_area | |
1c79356b | 314 | |
0b4e3aa0 A |
315 | ufpIsBBpre: |
316 | mtcrf 0xFF,r11 ; Restore CR | |
317 | mfsprg r11,3 ; Restore R11 | |
318 | mfsprg r13,2 ; Restore R13 | |
319 | rfi ; All done, go back... | |
1c79356b | 320 | |
0b4e3aa0 A |
321 | ; |
322 | ; Normal fast path... | |
323 | ; | |
324 | ||
325 | ufpUSuft: bge+ notufp ; Bail if negative... (ARRRGGG -- BRANCH TO A BRANCH!!!!!) | |
1c79356b A |
326 | mfsprg r11,3 ; Restore R11 |
327 | mfsprg r3,0 ; Get the per_proc_area | |
328 | mfsprg r13,2 ; Restore R13 | |
0b4e3aa0 | 329 | bne- cr5,isvecfp ; This is the facility stat call |
1c79356b A |
330 | lwz r3,UAW(r3) ; Get the assist word |
331 | rfi ; All done, scream back... (no need to restore CR or R11, they are volatile) | |
332 | ; | |
333 | isvecfp: lwz r3,spcFlags(r3) ; Get the facility status | |
334 | rfi ; Bail back... | |
335 | ; | |
0b4e3aa0 | 336 | notufp: mtcrf 0xFF,r11 ; Restore the used CRs |
1c79356b | 337 | li r11,T_SYSTEM_CALL ; Set interrupt code |
1c79356b A |
338 | b .L_exception_entry ; Join common... |
339 | ||
0b4e3aa0 | 340 | uftInKern: cmplwi r0,0x7FF4 ; Ultra fast path loadMSR? |
1c79356b A |
341 | bne- notufp ; Someone is trying to cheat... |
342 | ||
0b4e3aa0 | 343 | mtcrf 0xFF,r11 ; Restore CR |
1c79356b A |
344 | lwz r11,pfAvailable(r13) ; Pick up the feature flags |
345 | mtsrr1 r3 ; Set new MSR | |
346 | mfsprg r13,2 ; Restore R13 | |
347 | mtsprg 2,r11 ; Set the feature flags into sprg2 | |
348 | mfsprg r11,3 ; Restore R11 | |
349 | rfi ; Blast back | |
350 | ||
351 | ||
352 | /* | |
353 | * Trace - generated by single stepping | |
354 | * performance monitor BE branch enable tracing/logging | |
355 | * is also done here now. while this is permanently in the | |
356 | * system the impact is completely unnoticable as this code is | |
357 | * only executed when (a) a single step or branch exception is | |
358 | * hit, (b) in the single step debugger case there is so much | |
359 | * overhead already the few extra instructions for testing for BE | |
360 | * are not even noticable, (c) the BE logging code is *only* run | |
361 | * when it is enabled by the tool which will not happen during | |
362 | * normal system usage | |
363 | * | |
364 | * Note that this trace is available only to user state so we do not | |
365 | * need to set sprg2 before returning. | |
366 | */ | |
367 | ||
368 | . = 0xD00 | |
369 | .L_handlerD00: | |
370 | mtsprg 2,r13 ; Save R13 | |
371 | mtsprg 3,r11 ; Save R11 | |
372 | mfsrr1 r13 ; Get the old MSR | |
373 | mfcr r11 ; Get the CR | |
374 | rlwinm. r13,r13,0,MSR_PR_BIT,MSR_PR_BIT ; Are we in supervisor state? | |
375 | beq- notspectr ; Yes, not special trace... | |
376 | mfsprg r13,0 ; Get the per_proc area | |
377 | lhz r13,PP_CPU_FLAGS(r13) ; Get the flags | |
378 | rlwinm. r13,r13,0,traceBEb+16,traceBEb+16 ; Special trace enabled? | |
379 | bne+ specbrtr ; Yeah... | |
380 | ||
381 | notspectr: mtcr r11 ; Restore CR | |
1c79356b A |
382 | li r11,T_TRACE ; Set interrupt code |
383 | b .L_exception_entry ; Join common... | |
384 | ||
385 | ; | |
386 | ; We are doing the special branch trace | |
387 | ; | |
388 | ||
389 | specbrtr: mfsprg r13,0 ; Get the per_proc area | |
390 | stw r1,emfp0(r13) ; Save in a scratch area | |
391 | stw r2,emfp0+4(r13) ; Save in a scratch area | |
392 | stw r3,emfp0+8(r13) ; Save in a scratch area | |
393 | ||
1c79356b | 394 | lis r2,hi16(EXT(pc_trace_buf)) ; Get the top of the buffer |
1c79356b A |
395 | lwz r3,spcTRp(r13) ; Pick up buffer position |
396 | mr. r1,r1 ; Is it time to count? | |
397 | ori r2,r2,lo16(EXT(pc_trace_buf)) ; Get the bottom of the buffer | |
0b4e3aa0 A |
398 | cmplwi cr1,r3,4092 ; Set cr1_eq if we should take exception |
399 | mfsrr0 r1 ; Get the pc | |
1c79356b A |
400 | stwx r1,r2,r3 ; Save it in the buffer |
401 | addi r3,r3,4 ; Point to the next slot | |
1c79356b A |
402 | rlwinm r3,r3,0,20,31 ; Wrap the slot at one page |
403 | stw r3,spcTRp(r13) ; Save the new slot | |
1c79356b A |
404 | lwz r1,emfp0(r13) ; Restore work register |
405 | lwz r2,emfp0+4(r13) ; Restore work register | |
406 | lwz r3,emfp0+8(r13) ; Restore work register | |
407 | beq cr1,notspectr ; Buffer filled, make a rupt... | |
408 | ||
409 | mtcr r11 ; Restore the CR | |
410 | mfsprg r13,2 ; Restore R13 | |
411 | mfsprg r11,3 ; Restore R11 | |
412 | rfi ; Bail back... | |
413 | ||
414 | /* | |
415 | * Floating point assist | |
416 | */ | |
417 | ||
418 | . = 0xe00 | |
419 | .L_handlerE00: | |
420 | mtsprg 2,r13 /* Save R13 */ | |
421 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
422 | li r11,T_FP_ASSIST /* Set 'rupt code */ |
423 | b .L_exception_entry /* Join common... */ | |
424 | ||
425 | ||
426 | /* | |
427 | * Performance monitor interruption | |
428 | */ | |
429 | ||
430 | . = 0xF00 | |
431 | PMIhandler: | |
432 | mtsprg 2,r13 /* Save R13 */ | |
433 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
434 | li r11,T_PERF_MON /* Set 'rupt code */ |
435 | b .L_exception_entry /* Join common... */ | |
436 | ||
437 | ||
438 | /* | |
439 | * VMX exception | |
440 | */ | |
441 | ||
442 | . = 0xF20 | |
443 | VMXhandler: | |
444 | mtsprg 2,r13 /* Save R13 */ | |
445 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
446 | li r11,T_VMX /* Set 'rupt code */ |
447 | b .L_exception_entry /* Join common... */ | |
448 | ||
449 | ||
450 | ||
451 | /* | |
452 | * Instruction translation miss - we inline this code. | |
453 | * Upon entry (done for us by the machine): | |
454 | * srr0 : addr of instruction that missed | |
455 | * srr1 : bits 0-3 = saved CR0 | |
456 | * 4 = lru way bit | |
457 | * 16-31 = saved msr | |
458 | * msr[tgpr] = 1 (so gpr0-3 become our temporary variables) | |
459 | * imiss: ea that missed | |
460 | * icmp : the compare value for the va that missed | |
461 | * hash1: pointer to first hash pteg | |
462 | * hash2: pointer to 2nd hash pteg | |
463 | * | |
464 | * Register usage: | |
465 | * tmp0: saved counter | |
466 | * tmp1: junk | |
467 | * tmp2: pointer to pteg | |
468 | * tmp3: current compare value | |
469 | * | |
470 | * This code is taken from the 603e User's Manual with | |
471 | * some bugfixes and minor improvements to save bytes and cycles | |
472 | * | |
473 | * NOTE: Do not touch sprg2 in here | |
474 | */ | |
475 | ||
476 | . = 0x1000 | |
477 | .L_handler1000: | |
478 | mfspr tmp2, hash1 | |
479 | mfctr tmp0 /* use tmp0 to save ctr */ | |
480 | mfspr tmp3, icmp | |
481 | ||
482 | .L_imiss_find_pte_in_pteg: | |
483 | li tmp1, 8 /* count */ | |
484 | subi tmp2, tmp2, 8 /* offset for lwzu */ | |
485 | mtctr tmp1 /* count... */ | |
486 | ||
487 | .L_imiss_pteg_loop: | |
488 | lwz tmp1, 8(tmp2) /* check pte0 for match... */ | |
489 | addi tmp2, tmp2, 8 | |
490 | cmpw cr0, tmp1, tmp3 | |
491 | #if 0 | |
492 | bdnzf+ cr0, .L_imiss_pteg_loop | |
493 | #else | |
494 | bc 0,2, .L_imiss_pteg_loop | |
495 | #endif | |
496 | beq+ cr0, .L_imiss_found_pte | |
497 | ||
498 | /* Not found in PTEG, we must scan 2nd then give up */ | |
499 | ||
500 | andi. tmp1, tmp3, MASK(PTE0_HASH_ID) | |
501 | bne- .L_imiss_do_no_hash_exception /* give up */ | |
502 | ||
503 | mfspr tmp2, hash2 | |
504 | ori tmp3, tmp3, MASK(PTE0_HASH_ID) | |
505 | b .L_imiss_find_pte_in_pteg | |
506 | ||
507 | .L_imiss_found_pte: | |
508 | ||
509 | lwz tmp1, 4(tmp2) /* get pte1_t */ | |
510 | andi. tmp3, tmp1, MASK(PTE1_WIMG_GUARD) /* Fault? */ | |
511 | bne- .L_imiss_do_prot_exception /* Guarded - illegal */ | |
512 | ||
513 | /* Ok, we've found what we need to, restore and rfi! */ | |
514 | ||
515 | mtctr tmp0 /* restore ctr */ | |
516 | mfsrr1 tmp3 | |
517 | mfspr tmp0, imiss | |
518 | mtcrf 0x80, tmp3 /* Restore CR0 */ | |
519 | mtspr rpa, tmp1 /* set the pte */ | |
520 | ori tmp1, tmp1, MASK(PTE1_REFERENCED) /* set referenced */ | |
521 | tlbli tmp0 | |
522 | sth tmp1, 6(tmp2) | |
523 | rfi | |
524 | ||
525 | .L_imiss_do_prot_exception: | |
526 | /* set up srr1 to indicate protection exception... */ | |
527 | mfsrr1 tmp3 | |
528 | andi. tmp2, tmp3, 0xffff | |
529 | addis tmp2, tmp2, MASK(SRR1_TRANS_PROT) >> 16 | |
530 | b .L_imiss_do_exception | |
531 | ||
532 | .L_imiss_do_no_hash_exception: | |
533 | /* clean up registers for protection exception... */ | |
534 | mfsrr1 tmp3 | |
535 | andi. tmp2, tmp3, 0xffff | |
536 | addis tmp2, tmp2, MASK(SRR1_TRANS_HASH) >> 16 | |
537 | ||
538 | /* And the entry into the usual instruction fault handler ... */ | |
539 | .L_imiss_do_exception: | |
540 | ||
541 | mtctr tmp0 /* Restore ctr */ | |
542 | mtsrr1 tmp2 /* Set up srr1 */ | |
543 | mfmsr tmp0 | |
544 | xoris tmp0, tmp0, MASK(MSR_TGPR)>>16 /* no TGPR */ | |
545 | mtcrf 0x80, tmp3 /* Restore CR0 */ | |
546 | mtmsr tmp0 /* reset MSR[TGPR] */ | |
547 | b .L_handler400 /* Instr Access */ | |
548 | ||
549 | /* | |
550 | * Data load translation miss | |
551 | * | |
552 | * Upon entry (done for us by the machine): | |
553 | * srr0 : addr of instruction that missed | |
554 | * srr1 : bits 0-3 = saved CR0 | |
555 | * 4 = lru way bit | |
556 | * 5 = 1 if store | |
557 | * 16-31 = saved msr | |
558 | * msr[tgpr] = 1 (so gpr0-3 become our temporary variables) | |
559 | * dmiss: ea that missed | |
560 | * dcmp : the compare value for the va that missed | |
561 | * hash1: pointer to first hash pteg | |
562 | * hash2: pointer to 2nd hash pteg | |
563 | * | |
564 | * Register usage: | |
565 | * tmp0: saved counter | |
566 | * tmp1: junk | |
567 | * tmp2: pointer to pteg | |
568 | * tmp3: current compare value | |
569 | * | |
570 | * This code is taken from the 603e User's Manual with | |
571 | * some bugfixes and minor improvements to save bytes and cycles | |
572 | * | |
573 | * NOTE: Do not touch sprg2 in here | |
574 | */ | |
575 | ||
576 | . = 0x1100 | |
577 | .L_handler1100: | |
578 | mfspr tmp2, hash1 | |
579 | mfctr tmp0 /* use tmp0 to save ctr */ | |
580 | mfspr tmp3, dcmp | |
581 | ||
582 | .L_dlmiss_find_pte_in_pteg: | |
583 | li tmp1, 8 /* count */ | |
584 | subi tmp2, tmp2, 8 /* offset for lwzu */ | |
585 | mtctr tmp1 /* count... */ | |
586 | ||
587 | .L_dlmiss_pteg_loop: | |
588 | lwz tmp1, 8(tmp2) /* check pte0 for match... */ | |
589 | addi tmp2, tmp2, 8 | |
590 | cmpw cr0, tmp1, tmp3 | |
591 | #if 0 /* How to write this correctly? */ | |
592 | bdnzf+ cr0, .L_dlmiss_pteg_loop | |
593 | #else | |
594 | bc 0,2, .L_dlmiss_pteg_loop | |
595 | #endif | |
596 | beq+ cr0, .L_dmiss_found_pte | |
597 | ||
598 | /* Not found in PTEG, we must scan 2nd then give up */ | |
599 | ||
600 | andi. tmp1, tmp3, MASK(PTE0_HASH_ID) /* already at 2nd? */ | |
601 | bne- .L_dmiss_do_no_hash_exception /* give up */ | |
602 | ||
603 | mfspr tmp2, hash2 | |
604 | ori tmp3, tmp3, MASK(PTE0_HASH_ID) | |
605 | b .L_dlmiss_find_pte_in_pteg | |
606 | ||
607 | .L_dmiss_found_pte: | |
608 | ||
609 | lwz tmp1, 4(tmp2) /* get pte1_t */ | |
610 | ||
611 | /* Ok, we've found what we need to, restore and rfi! */ | |
612 | ||
613 | mtctr tmp0 /* restore ctr */ | |
614 | mfsrr1 tmp3 | |
615 | mfspr tmp0, dmiss | |
616 | mtcrf 0x80, tmp3 /* Restore CR0 */ | |
617 | mtspr rpa, tmp1 /* set the pte */ | |
618 | ori tmp1, tmp1, MASK(PTE1_REFERENCED) /* set referenced */ | |
619 | tlbld tmp0 /* load up tlb */ | |
620 | sth tmp1, 6(tmp2) /* sth is faster? */ | |
621 | rfi | |
622 | ||
623 | /* This code is shared with data store translation miss */ | |
624 | ||
625 | .L_dmiss_do_no_hash_exception: | |
626 | /* clean up registers for protection exception... */ | |
627 | mfsrr1 tmp3 | |
628 | /* prepare to set DSISR_WRITE_BIT correctly from srr1 info */ | |
629 | rlwinm tmp1, tmp3, 9, 6, 6 | |
630 | addis tmp1, tmp1, MASK(DSISR_HASH) >> 16 | |
631 | ||
632 | /* And the entry into the usual data fault handler ... */ | |
633 | ||
634 | mtctr tmp0 /* Restore ctr */ | |
635 | andi. tmp2, tmp3, 0xffff /* Clean up srr1 */ | |
636 | mtsrr1 tmp2 /* Set srr1 */ | |
637 | mtdsisr tmp1 | |
638 | mfspr tmp2, dmiss | |
639 | mtdar tmp2 | |
640 | mfmsr tmp0 | |
641 | xoris tmp0, tmp0, MASK(MSR_TGPR)>>16 /* no TGPR */ | |
642 | mtcrf 0x80, tmp3 /* Restore CR0 */ | |
643 | sync /* Needed on some */ | |
644 | mtmsr tmp0 /* reset MSR[TGPR] */ | |
645 | b .L_handler300 /* Data Access */ | |
646 | ||
647 | /* | |
648 | * Data store translation miss (similar to data load) | |
649 | * | |
650 | * Upon entry (done for us by the machine): | |
651 | * srr0 : addr of instruction that missed | |
652 | * srr1 : bits 0-3 = saved CR0 | |
653 | * 4 = lru way bit | |
654 | * 5 = 1 if store | |
655 | * 16-31 = saved msr | |
656 | * msr[tgpr] = 1 (so gpr0-3 become our temporary variables) | |
657 | * dmiss: ea that missed | |
658 | * dcmp : the compare value for the va that missed | |
659 | * hash1: pointer to first hash pteg | |
660 | * hash2: pointer to 2nd hash pteg | |
661 | * | |
662 | * Register usage: | |
663 | * tmp0: saved counter | |
664 | * tmp1: junk | |
665 | * tmp2: pointer to pteg | |
666 | * tmp3: current compare value | |
667 | * | |
668 | * This code is taken from the 603e User's Manual with | |
669 | * some bugfixes and minor improvements to save bytes and cycles | |
670 | * | |
671 | * NOTE: Do not touch sprg2 in here | |
672 | */ | |
673 | ||
674 | . = 0x1200 | |
675 | .L_handler1200: | |
676 | mfspr tmp2, hash1 | |
677 | mfctr tmp0 /* use tmp0 to save ctr */ | |
678 | mfspr tmp3, dcmp | |
679 | ||
680 | .L_dsmiss_find_pte_in_pteg: | |
681 | li tmp1, 8 /* count */ | |
682 | subi tmp2, tmp2, 8 /* offset for lwzu */ | |
683 | mtctr tmp1 /* count... */ | |
684 | ||
685 | .L_dsmiss_pteg_loop: | |
686 | lwz tmp1, 8(tmp2) /* check pte0 for match... */ | |
687 | addi tmp2, tmp2, 8 | |
688 | ||
689 | cmpw cr0, tmp1, tmp3 | |
690 | #if 0 /* I don't know how to write this properly */ | |
691 | bdnzf+ cr0, .L_dsmiss_pteg_loop | |
692 | #else | |
693 | bc 0,2, .L_dsmiss_pteg_loop | |
694 | #endif | |
695 | beq+ cr0, .L_dsmiss_found_pte | |
696 | ||
697 | /* Not found in PTEG, we must scan 2nd then give up */ | |
698 | ||
699 | andi. tmp1, tmp3, MASK(PTE0_HASH_ID) /* already at 2nd? */ | |
700 | bne- .L_dmiss_do_no_hash_exception /* give up */ | |
701 | ||
702 | mfspr tmp2, hash2 | |
703 | ori tmp3, tmp3, MASK(PTE0_HASH_ID) | |
704 | b .L_dsmiss_find_pte_in_pteg | |
705 | ||
706 | .L_dsmiss_found_pte: | |
707 | ||
708 | lwz tmp1, 4(tmp2) /* get pte1_t */ | |
709 | andi. tmp3, tmp1, MASK(PTE1_CHANGED) /* unchanged, check? */ | |
710 | beq- .L_dsmiss_check_prot /* yes, check prot */ | |
711 | ||
712 | .L_dsmiss_resolved: | |
713 | /* Ok, we've found what we need to, restore and rfi! */ | |
714 | ||
715 | mtctr tmp0 /* restore ctr */ | |
716 | mfsrr1 tmp3 | |
717 | mfspr tmp0, dmiss | |
718 | mtcrf 0x80, tmp3 /* Restore CR0 */ | |
719 | mtspr rpa, tmp1 /* set the pte */ | |
720 | tlbld tmp0 /* load up tlb */ | |
721 | rfi | |
722 | ||
723 | .L_dsmiss_check_prot: | |
724 | /* PTE is unchanged, we must check that we can write */ | |
725 | rlwinm. tmp3, tmp1, 30, 0, 1 /* check PP[1] */ | |
726 | bge- .L_dsmiss_check_prot_user_kern | |
727 | andi. tmp3, tmp1, 1 /* check PP[0] */ | |
728 | beq+ .L_dsmiss_check_prot_ok | |
729 | ||
730 | .L_dmiss_do_prot_exception: | |
731 | /* clean up registers for protection exception... */ | |
732 | mfsrr1 tmp3 | |
733 | /* prepare to set DSISR_WRITE_BIT correctly from srr1 info */ | |
734 | rlwinm tmp1, tmp3, 9, 6, 6 | |
735 | addis tmp1, tmp1, MASK(DSISR_PROT) >> 16 | |
736 | ||
737 | /* And the entry into the usual data fault handler ... */ | |
738 | ||
739 | mtctr tmp0 /* Restore ctr */ | |
740 | andi. tmp2, tmp3, 0xffff /* Clean up srr1 */ | |
741 | mtsrr1 tmp2 /* Set srr1 */ | |
742 | mtdsisr tmp1 | |
743 | mfspr tmp2, dmiss | |
744 | mtdar tmp2 | |
745 | mfmsr tmp0 | |
746 | xoris tmp0, tmp0, MASK(MSR_TGPR)>>16 /* no TGPR */ | |
747 | mtcrf 0x80, tmp3 /* Restore CR0 */ | |
748 | sync /* Needed on some */ | |
749 | mtmsr tmp0 /* reset MSR[TGPR] */ | |
750 | b .L_handler300 /* Data Access */ | |
751 | ||
752 | /* NB - if we knew we were on a 603e we could test just the MSR_KEY bit */ | |
753 | .L_dsmiss_check_prot_user_kern: | |
754 | mfsrr1 tmp3 | |
755 | andi. tmp3, tmp3, MASK(MSR_PR) | |
756 | beq+ .L_dsmiss_check_prot_kern | |
757 | mfspr tmp3, dmiss /* check user privs */ | |
758 | mfsrin tmp3, tmp3 /* get excepting SR */ | |
759 | andis. tmp3, tmp3, 0x2000 /* Test SR ku bit */ | |
760 | beq+ .L_dsmiss_check_prot_ok | |
761 | b .L_dmiss_do_prot_exception | |
762 | ||
763 | .L_dsmiss_check_prot_kern: | |
764 | mfspr tmp3, dmiss /* check kern privs */ | |
765 | mfsrin tmp3, tmp3 | |
766 | andis. tmp3, tmp3, 0x4000 /* Test SR Ks bit */ | |
767 | bne- .L_dmiss_do_prot_exception | |
768 | ||
769 | .L_dsmiss_check_prot_ok: | |
770 | /* Ok, mark as referenced and changed before resolving the fault */ | |
771 | ori tmp1, tmp1, (MASK(PTE1_REFERENCED)|MASK(PTE1_CHANGED)) | |
772 | sth tmp1, 6(tmp2) | |
773 | b .L_dsmiss_resolved | |
774 | ||
775 | /* | |
776 | * Instruction address breakpoint | |
777 | */ | |
778 | ||
779 | . = 0x1300 | |
780 | .L_handler1300: | |
781 | mtsprg 2,r13 /* Save R13 */ | |
782 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
783 | li r11,T_INSTRUCTION_BKPT /* Set 'rupt code */ |
784 | b .L_exception_entry /* Join common... */ | |
785 | ||
786 | /* | |
787 | * System management interrupt | |
788 | */ | |
789 | ||
790 | . = 0x1400 | |
791 | .L_handler1400: | |
792 | mtsprg 2,r13 /* Save R13 */ | |
793 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
794 | li r11,T_SYSTEM_MANAGEMENT /* Set 'rupt code */ |
795 | b .L_exception_entry /* Join common... */ | |
796 | ||
797 | ; | |
798 | ; Altivec Java Mode Assist interrupt | |
799 | ; | |
800 | ||
801 | . = 0x1600 | |
802 | .L_handler1600: | |
803 | mtsprg 2,r13 /* Save R13 */ | |
804 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
805 | li r11,T_ALTIVEC_ASSIST /* Set 'rupt code */ |
806 | b .L_exception_entry /* Join common... */ | |
807 | ||
808 | ; | |
809 | ; Thermal interruption | |
810 | ; | |
811 | ||
812 | . = 0x1700 | |
813 | .L_handler1700: | |
814 | mtsprg 2,r13 /* Save R13 */ | |
815 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
816 | li r11,T_THERMAL /* Set 'rupt code */ |
817 | b .L_exception_entry /* Join common... */ | |
818 | ||
819 | /* | |
820 | * There is now a large gap of reserved traps | |
821 | */ | |
822 | ||
823 | /* | |
824 | * Run mode/ trace exception - single stepping on 601 processors | |
825 | */ | |
826 | ||
827 | . = 0x2000 | |
828 | .L_handler2000: | |
829 | mtsprg 2,r13 /* Save R13 */ | |
830 | mtsprg 3,r11 /* Save R11 */ | |
1c79356b A |
831 | li r11,T_RUNMODE_TRACE /* Set 'rupt code */ |
832 | b .L_exception_entry /* Join common... */ | |
833 | ||
834 | /* | |
835 | * .L_exception_entry(type) | |
836 | * | |
837 | * This is the common exception handling routine called by any | |
838 | * type of system exception. | |
839 | * | |
840 | * ENTRY: via a system exception handler, thus interrupts off, VM off. | |
841 | * r3 has been saved in sprg3 and now contains a number | |
842 | * representing the exception's origins | |
843 | * | |
844 | */ | |
845 | ||
846 | .data | |
847 | .align ALIGN | |
848 | .globl EXT(exception_entry) | |
849 | EXT(exception_entry): | |
850 | .long .L_exception_entry-EXT(ExceptionVectorsStart) /* phys addr of fn */ | |
851 | ||
852 | VECTOR_SEGMENT | |
853 | .align 5 | |
854 | ||
855 | .L_exception_entry: | |
856 | ||
857 | /* | |
858 | * | |
859 | * Here we will save off a mess of registers, the special ones and R0-R12. We use the DCBZ | |
860 | * instruction to clear and allcoate a line in the cache. This way we won't take any cache | |
861 | * misses, so these stores won't take all that long. Except the first line that is because | |
862 | * we can't do a DCBZ if the L1 D-cache is off. The rest we will skip if they are | |
863 | * off also. | |
864 | * | |
865 | * Note that if we are attempting to sleep (as opposed to nap or doze) all interruptions | |
866 | * are ignored. | |
867 | */ | |
9bccf70c A |
868 | mfsprg r13,0 /* Load per_proc */ |
869 | lwz r13,next_savearea(r13) /* Get the exception save area */ | |
1c79356b A |
870 | |
871 | stw r1,saver1(r13) ; Save register 1 | |
872 | stw r0,saver0(r13) ; Save register 0 | |
9bccf70c | 873 | dcbtst 0,r13 ; We will need this in a bit |
1c79356b A |
874 | mfspr r1,hid0 ; Get HID0 |
875 | mfcr r0 ; Save the CR | |
876 | mtcrf 255,r1 ; Get set to test for cache and sleep | |
877 | bf sleep,notsleep ; Skip if we are not trying to sleep | |
878 | ||
879 | mtcrf 255,r0 ; Restore the CR | |
880 | lwz r0,saver0(r13) ; Restore R0 | |
881 | lwz r1,saver1(r13) ; Restore R1 | |
882 | mfsprg r13,0 ; Get the per_proc | |
883 | lwz r11,pfAvailable(r13) ; Get back the feature flags | |
884 | mfsprg r13,2 ; Restore R13 | |
885 | mtsprg 2,r11 ; Set sprg2 to the features | |
886 | mfsprg r11,3 ; Restore R11 | |
887 | rfi ; Jump back into sleep code... | |
888 | .long 0 ; Leave these here please... | |
889 | .long 0 | |
890 | .long 0 | |
891 | .long 0 | |
892 | .long 0 | |
893 | .long 0 | |
894 | .long 0 | |
895 | .long 0 | |
896 | ||
897 | .align 5 | |
898 | ||
899 | notsleep: stw r2,saver2(r13) ; Save this one | |
900 | crmove featL1ena,dce ; Copy the cache enable bit | |
901 | rlwinm r2,r1,0,nap+1,doze-1 ; Clear any possible nap and doze bits | |
902 | mtspr hid0,r2 ; Clear the nap/doze bits | |
903 | cmplw r2,r1 ; See if we were napping | |
9bccf70c | 904 | la r1,saver8(r13) ; Point to the next line in case we need it |
1c79356b A |
905 | crnot wasNapping,cr0_eq ; Remember if we were napping |
906 | mfsprg r2,0 ; Get the per_proc area | |
907 | bf- featL1ena,skipz1 ; L1 cache is disabled... | |
9bccf70c | 908 | dcbz 0,r1 ; Reserve our line in cache |
1c79356b A |
909 | |
910 | ; | |
911 | ; Remember, we are setting up CR6 with feature flags | |
912 | ; | |
913 | skipz1: lwz r1,pfAvailable(r2) ; Get the CPU features flags | |
914 | stw r3,saver3(r13) ; Save this one | |
9bccf70c | 915 | la r3,savesrr0(r13) ; Point to the last line |
1c79356b A |
916 | mtcrf 0xE0,r1 ; Put the features flags (that we care about) in the CR |
917 | stw r4,saver4(r13) ; Save this one | |
918 | stw r6,saver6(r13) ; Save this one | |
919 | crmove featSMP,pfSMPcapb ; See if we have a PIR | |
920 | stw r8,saver8(r13) ; Save this one | |
921 | crmove featAltivec,pfAltivecb ; Set the Altivec flag | |
9bccf70c A |
922 | mfsrr0 r6 ; Get the interruption SRR0 |
923 | stw r8,saver8(r13) ; Save this one | |
924 | bf- featL1ena,skipz1a ; L1 cache is disabled... | |
925 | dcbz 0,r3 ; Reserve our line in cache | |
926 | skipz1a: crmove featFP,pfFloatb ; Remember that we have floating point | |
927 | stw r7,saver7(r13) ; Save this one | |
1c79356b | 928 | lhz r8,PP_CPU_FLAGS(r2) ; Get the flags |
9bccf70c | 929 | mfsrr1 r7 ; Get the interrupt SRR1 |
1c79356b | 930 | rlwinm r8,r8,(((31-MSR_BE_BIT)+(traceBEb+16+1))&31),MSR_BE_BIT,MSR_BE_BIT ; Set BE bit if special trace is on |
9bccf70c | 931 | stw r6,savesrr0(r13) ; Save the SRR0 |
1c79356b | 932 | rlwinm r6,r7,(((31-MSR_BE_BIT)+(MSR_PR_BIT+1))&31),MSR_BE_BIT,MSR_BE_BIT ; Move PR bit to BE bit |
9bccf70c | 933 | stw r5,saver5(r13) ; Save this one |
1c79356b A |
934 | and r8,r6,r8 ; Remove BE bit only if problem state and special tracing on |
935 | mfsprg r6,2 ; Get interrupt time R13 | |
936 | mtsprg 2,r1 ; Set the feature flags | |
937 | andc r7,r7,r8 ; Clear BE bit if special trace is on and PR is set | |
9bccf70c A |
938 | mfsprg r8,3 ; Get rupt time R11 |
939 | stw r7,savesrr1(r13) ; Save SRR1 | |
940 | rlwinm. r7,r7,MSR_RI_BIT,MSR_RI_BIT ; Is this a special case access fault? | |
941 | stw r6,saver13(r13) ; Save rupt R1 | |
942 | crnot specAccess,cr0_eq ; Set that we are doing a special access if RI is set | |
943 | stw r8,saver11(r13) ; Save rupt time R11 | |
1c79356b A |
944 | |
945 | getTB: mftbu r6 ; Get the upper timebase | |
946 | mftb r7 ; Get the lower timebase | |
947 | mftbu r8 ; Get the upper one again | |
948 | cmplw r6,r8 ; Did the top tick? | |
949 | bne- getTB ; Yeah, need to get it again... | |
950 | ||
951 | stw r8,ruptStamp(r2) ; Save the top of time stamp | |
9bccf70c A |
952 | stw r8,SAVtime(r13) ; Save the top of time stamp |
953 | la r6,saver16(r13) ; Point to the next cache line | |
1c79356b | 954 | stw r7,ruptStamp+4(r2) ; Save the bottom of time stamp |
9bccf70c A |
955 | stw r7,SAVtime+4(r13) ; Save the bottom of time stamp |
956 | ||
1c79356b | 957 | bf- featL1ena,skipz2 ; L1 cache is disabled... |
9bccf70c | 958 | dcbz 0,r6 ; Allocate in cache |
1c79356b | 959 | skipz2: |
9bccf70c | 960 | stw r9,saver9(r13) ; Save this one |
1c79356b | 961 | |
9bccf70c A |
962 | stw r10,saver10(r13) ; Save this one |
963 | mflr r4 ; Get the LR | |
1c79356b A |
964 | mfxer r10 ; Get the XER |
965 | ||
966 | bf+ wasNapping,notNapping ; Skip if not waking up from nap... | |
967 | ||
968 | lwz r6,napStamp+4(r2) ; Pick up low order nap stamp | |
969 | lis r3,hi16(EXT(machine_idle_ret)) ; Get high part of nap/doze return | |
970 | lwz r5,napStamp(r2) ; and high order | |
971 | subfc r7,r6,r7 ; Subtract low stamp from now | |
972 | lwz r6,napTotal+4(r2) ; Pick up low total | |
973 | subfe r5,r5,r8 ; Subtract high stamp and borrow from now | |
974 | lwz r8,napTotal(r2) ; Pick up the high total | |
975 | addc r6,r6,r7 ; Add low to total | |
976 | ori r3,r3,lo16(EXT(machine_idle_ret)) ; Get low part of nap/doze return | |
977 | adde r8,r8,r5 ; Add high and carry to total | |
978 | stw r6,napTotal+4(r2) ; Save the low total | |
979 | stw r8,napTotal(r2) ; Save the high total | |
980 | stw r3,savesrr0(r13) ; Modify to return to nap/doze exit | |
9bccf70c A |
981 | |
982 | rlwinm. r3,r1,0,pfSlowNapb,pfSlowNapb ; Should HID1 be restored? | |
983 | beq notInSlowNap | |
984 | ||
985 | lwz r3,pfHID1(r2) ; Get saved HID1 value | |
986 | mtspr hid1, r3 ; Restore HID1 | |
d52fe63f | 987 | |
9bccf70c A |
988 | notInSlowNap: |
989 | rlwinm. r3,r1,0,pfNoL2PFNapb,pfNoL2PFNapb ; Should MSSCR0 be restored? | |
990 | beq notNapping | |
d52fe63f | 991 | |
9bccf70c A |
992 | lwz r3,pfMSSCR0(r2) ; Get saved MSSCR0 value |
993 | mtspr msscr0, r3 ; Restore MSSCR0 | |
994 | sync | |
995 | isync | |
996 | ||
997 | notNapping: stw r12,saver12(r13) ; Save this one | |
1c79356b | 998 | |
9bccf70c A |
999 | stw r14,saver14(r13) ; Save this one |
1000 | stw r15,saver15(r13) ; Save this one | |
1001 | la r14,saver24(r13) ; Point to the next block to save into | |
1c79356b | 1002 | stw r0,savecr(r13) ; Save rupt CR |
9bccf70c A |
1003 | mfctr r6 ; Get the CTR |
1004 | stw r16,saver16(r13) ; Save this one | |
1005 | stw r4,savelr(r13) ; Save rupt LR | |
1c79356b A |
1006 | |
1007 | bf- featL1ena,skipz4 ; L1 cache is disabled... | |
9bccf70c | 1008 | dcbz 0,r14 ; Allocate next save area line |
1c79356b | 1009 | skipz4: |
9bccf70c A |
1010 | stw r17,saver17(r13) ; Save this one |
1011 | stw r18,saver18(r13) ; Save this one | |
1012 | stw r6,savectr(r13) ; Save rupt CTR | |
1013 | stw r19,saver19(r13) ; Save this one | |
1014 | lis r12,hi16(KERNEL_SEG_REG0_VALUE) ; Get the high half of the kernel SR0 value | |
1015 | mfdar r6 ; Get the rupt DAR | |
1016 | stw r20,saver20(r13) ; Save this one | |
1017 | ||
1018 | bf+ specAccess,noSRsave ; Do not save SRs if this is not a special access... | |
1019 | mfsr r14,sr0 ; Get SR0 | |
1020 | stw r14,savesr0(r13) ; and save | |
1021 | mfsr r14,sr1 ; Get SR1 | |
1022 | stw r14,savesr1(r13) ; and save | |
1023 | mfsr r14,sr2 ; get SR2 | |
1024 | stw r14,savesr2(r13) ; and save | |
1025 | mfsr r14,sr3 ; get SR3 | |
1026 | stw r14,savesr3(r13) ; and save | |
1027 | ||
1028 | noSRsave: mtsr sr0,r12 ; Set the kernel SR0 | |
1029 | stw r21,saver21(r13) ; Save this one | |
1c79356b A |
1030 | addis r12,r12,0x0010 ; Point to the second segment of kernel |
1031 | stw r10,savexer(r13) ; Save the rupt XER | |
9bccf70c A |
1032 | mtsr sr1,r12 ; Set the kernel SR1 |
1033 | stw r30,saver30(r13) ; Save this one | |
1c79356b | 1034 | addis r12,r12,0x0010 ; Point to the third segment of kernel |
9bccf70c A |
1035 | stw r31,saver31(r13) ; Save this one |
1036 | mtsr sr2,r12 ; Set the kernel SR2 | |
1037 | stw r22,saver22(r13) ; Save this one | |
1c79356b | 1038 | addis r12,r12,0x0010 ; Point to the third segment of kernel |
9bccf70c A |
1039 | stw r23,saver23(r13) ; Save this one |
1040 | mtsr sr3,r12 ; Set the kernel SR3 | |
1041 | stw r24,saver24(r13) ; Save this one | |
1042 | stw r25,saver25(r13) ; Save this one | |
1043 | mfdsisr r7 ; Get the rupt DSISR | |
1044 | stw r26,saver26(r13) ; Save this one | |
1045 | stw r27,saver27(r13) ; Save this one | |
1c79356b | 1046 | li r10,emfp0 ; Point to floating point save |
9bccf70c A |
1047 | stw r28,saver28(r13) ; Save this one |
1048 | stw r29,saver29(r13) ; Save this one | |
1c79356b | 1049 | mfsr r14,sr14 ; Get the copyin/out segment register |
9bccf70c | 1050 | stw r6,savedar(r13) ; Save the rupt DAR |
1c79356b A |
1051 | bf- featL1ena,skipz5a ; Do not do this if no L1... |
1052 | dcbz r10,r2 ; Clear and allocate an L1 slot | |
1053 | ||
9bccf70c A |
1054 | skipz5a: stw r7,savedsisr(r13) ; Save the rupt code DSISR |
1055 | stw r11,saveexception(r13) ; Save the exception code | |
1c79356b A |
1056 | stw r14,savesr14(r13) ; Save copyin/copyout |
1057 | ||
9bccf70c A |
1058 | |
1059 | ; | |
1060 | ; Here we will save some floating point and vector status | |
1061 | ; and we also set a clean default status for a new interrupt level. | |
1062 | ; Note that we assume that emfp0 is on an altivec boundary | |
1063 | ; and that R10 points to it (as a displacemnt from R2). | |
1064 | ; | |
1065 | ||
1066 | lis r8,hi16(MASK(MSR_VEC)) ; Get the vector enable bit | |
1067 | mfmsr r6 ; Get the current MSR value | |
1068 | ori r8,r8,lo16(MASK(MSR_FP)) ; Add in the float enable | |
1c79356b | 1069 | li r19,0 ; Assume no Altivec |
9bccf70c A |
1070 | or r7,r6,r8 ; Enable floating point |
1071 | li r9,0 ; Get set to clear VRSAVE | |
1072 | mtmsr r7 ; Do it | |
1073 | isync | |
1c79356b A |
1074 | |
1075 | bf featAltivec,noavec ; No Altivec on this CPU... | |
9bccf70c A |
1076 | addi r14,r10,16 ; Displacement to second vector register |
1077 | stvxl v0,r10,r2 ; Save a register | |
1078 | stvxl v1,r14,r2 ; Save a second register | |
1079 | mfvscr v0 ; Get the vector status register | |
1080 | la r28,savevscr(r13) ; Point to the status area | |
1081 | vspltish v1,1 ; Turn on the non-Java bit and saturate | |
1082 | stvxl v0,0,r28 ; Save the vector status | |
1083 | vspltisw v0,1 ; Turn on the saturate bit | |
1c79356b | 1084 | mfspr r19,vrsave ; Get the VRSAVE register |
9bccf70c | 1085 | vxor v1,v1,v0 ; Turn off saturate |
1c79356b | 1086 | mtspr vrsave,r9 ; Clear VRSAVE for each interrupt level |
9bccf70c A |
1087 | mtvscr v1 ; Set the non-java, no saturate status for new level |
1088 | ||
1089 | lvxl v0,r10,r2 ; Restore first work register | |
1090 | lvxl v1,r14,r2 ; Restore second work register | |
1091 | ||
1092 | noavec: stw r19,savevrsave(r13) ; Save the vector register usage flags | |
1093 | ||
1c79356b A |
1094 | ; |
1095 | ; We need to save the FPSCR as if it is normal context. | |
1096 | ; This is because pending exceptions will cause an exception even if | |
1097 | ; FP is disabled. We need to clear the FPSCR when we first start running in the | |
1098 | ; kernel. | |
1099 | ; | |
1c79356b A |
1100 | |
1101 | bf- featFP,nofpexe ; No possible floating point exceptions... | |
1102 | ||
1c79356b A |
1103 | stfd f0,emfp0(r2) ; Save FPR0 |
1104 | stfd f1,emfp1(r2) ; Save FPR1 | |
1105 | mffs f0 ; Get the FPSCR | |
1106 | fsub f1,f1,f1 ; Make a 0 | |
9bccf70c | 1107 | stfd f0,savefpscrpad(r13) ; Save the FPSCR |
1c79356b A |
1108 | mtfsf 0xFF,f1 ; Clear it |
1109 | lfd f0,emfp0(r2) ; Restore FPR0 | |
1110 | lfd f1,emfp1(r2) ; Restore FPR1 | |
9bccf70c A |
1111 | |
1112 | nofpexe: mtmsr r6 ; Turn off FP and vector | |
1c79356b | 1113 | isync |
9bccf70c | 1114 | |
1c79356b | 1115 | |
9bccf70c A |
1116 | ; |
1117 | ; Everything is saved at this point, except for FPRs, and VMX registers. | |
1118 | ; Time for us to get a new savearea and then trace interrupt if it is enabled. | |
1119 | ; | |
1120 | ||
1121 | li r0,SAVgeneral ; Get the savearea type value | |
1122 | lis r23,hi16(EXT(trcWork)) ; Get the trace work area address | |
1123 | mr r14,r11 ; Save the interrupt code across the call | |
1124 | stb r0,SAVflags+2(r13) ; Mark valid context | |
1125 | ori r23,r23,lo16(EXT(trcWork)) ; Get the rest | |
1126 | rlwinm r22,r11,30,0,31 ; Divide interrupt code by 2 | |
1127 | lwz r25,traceMask(r23) ; Get the trace mask | |
1128 | addi r22,r22,10 ; Adjust code so we shift into CR5 | |
1129 | ||
1130 | bl EXT(save_get_phys) ; Grab a savearea | |
1131 | ||
1132 | mfsprg r2,0 ; Get back the per_proc block | |
1133 | rlwnm r7,r25,r22,22,22 ; Set CR5_EQ bit position to 0 if tracing allowed | |
1134 | lhz r19,PP_CPU_NUMBER(r2) ; Get the logical processor number | |
1135 | li r26,0x8 ; Get start of cpu mask | |
1136 | mr r11,r14 ; Get the exception code back | |
1137 | srw r26,r26,r19 ; Get bit position of cpu number | |
1138 | mtcrf 0x04,r7 ; Set CR5 to show trace or not | |
1139 | and. r26,r26,r25 ; See if we trace this cpu | |
1140 | stw r3,next_savearea(r2) ; Remember the savearea we just got for the next rupt | |
1141 | crandc cr5_eq,cr5_eq,cr0_eq ; Turn off tracing if cpu is disabled | |
1142 | ; | |
1143 | ; At this point, we can take another exception and lose nothing. | |
1144 | ; | |
1145 | ||
1146 | lwz r0,saver0(r13) ; Get back interrupt time R0 (we need this whether we trace or not) | |
1147 | ||
1148 | bne+ cr5,skipTrace ; Skip all of this if no tracing here... | |
1c79356b | 1149 | |
9bccf70c A |
1150 | ; |
1151 | ; We select a trace entry using a compare and swap on the next entry field. | |
1152 | ; Since we do not lock the actual trace buffer, there is a potential that | |
1153 | ; another processor could wrap an trash our entry. Who cares? | |
1154 | ; | |
1c79356b | 1155 | |
9bccf70c A |
1156 | lwz r25,traceStart(r23) ; Get the start of trace table |
1157 | lwz r26,traceEnd(r23) ; Get end of trace table | |
1158 | ||
1159 | trcsel: lwarx r20,0,r23 ; Get and reserve the next slot to allocate | |
1c79356b | 1160 | |
9bccf70c A |
1161 | addi r22,r20,LTR_size ; Point to the next trace entry |
1162 | cmplw r22,r26 ; Do we need to wrap the trace table? | |
1163 | bne+ gotTrcEnt ; No wrap, we got us a trace entry... | |
1c79356b | 1164 | |
9bccf70c A |
1165 | mr r22,r25 ; Wrap back to start |
1166 | ||
1167 | gotTrcEnt: stwcx. r22,0,r23 ; Try to update the current pointer | |
1168 | bne- trcsel ; Collision, try again... | |
1c79356b A |
1169 | |
1170 | #if ESPDEBUG | |
9bccf70c A |
1171 | dcbf 0,r23 ; Force to memory |
1172 | sync | |
1c79356b A |
1173 | #endif |
1174 | ||
1175 | bf- featL1ena,skipz6 ; L1 cache is disabled... | |
9bccf70c | 1176 | dcbz 0,r20 ; Clear and allocate first trace line |
1c79356b | 1177 | skipz6: |
1c79356b | 1178 | |
9bccf70c A |
1179 | ; |
1180 | ; Let us cut that trace entry now. | |
1181 | ; | |
1c79356b | 1182 | |
1c79356b | 1183 | |
9bccf70c | 1184 | li r14,32 ; Offset to second line |
1c79356b A |
1185 | |
1186 | lwz r16,ruptStamp(r2) ; Get top of time base | |
1187 | lwz r17,ruptStamp+4(r2) ; Get the bottom of time stamp | |
1188 | ||
1189 | bf- featL1ena,skipz7 ; L1 cache is disabled... | |
9bccf70c A |
1190 | dcbz r14,r20 ; Zap the second half |
1191 | ||
1192 | skipz7: stw r16,LTR_timeHi(r20) ; Set the upper part of TB | |
1c79356b | 1193 | lwz r1,saver1(r13) ; Get back interrupt time R1 |
9bccf70c A |
1194 | stw r17,LTR_timeLo(r20) ; Set the lower part of TB |
1195 | lwz r18,saver2(r13) ; Get back interrupt time R2 | |
1196 | stw r0,LTR_r0(r20) ; Save off register 0 | |
1c79356b | 1197 | lwz r3,saver3(r13) ; Restore this one |
9bccf70c A |
1198 | sth r19,LTR_cpu(r20) ; Stash the cpu number |
1199 | stw r1,LTR_r1(r20) ; Save off register 1 | |
1c79356b | 1200 | lwz r4,saver4(r13) ; Restore this one |
9bccf70c | 1201 | stw r18,LTR_r2(r20) ; Save off register 2 |
1c79356b | 1202 | lwz r5,saver5(r13) ; Restore this one |
9bccf70c A |
1203 | stw r3,LTR_r3(r20) ; Save off register 3 |
1204 | lwz r16,savecr(r13) ; Get the CR value | |
1205 | stw r4,LTR_r4(r20) ; Save off register 4 | |
1206 | mfsrr0 r17 ; Get SRR0 back, it is still good | |
1207 | stw r5,LTR_r5(r20) ; Save off register 5 | |
1208 | mfsrr1 r18 ; SRR1 is still good in here | |
1209 | stw r16,LTR_cr(r20) ; Save the CR | |
1210 | stw r17,LTR_srr0(r20) ; Save the SSR0 | |
1211 | stw r18,LTR_srr1(r20) ; Save the SRR1 | |
1212 | mfdar r17 ; Get this back | |
1213 | lwz r16,savelr(r13) ; Get the LR | |
1214 | stw r17,LTR_dar(r20) ; Save the DAR | |
1215 | mfctr r17 ; Get the CTR (still good in register) | |
1216 | stw r16,LTR_lr(r20) ; Save the LR | |
1c79356b | 1217 | #if 0 |
9bccf70c | 1218 | lwz r17,emfp1(r2) ; (TEST/DEBUG) |
1c79356b | 1219 | #endif |
9bccf70c A |
1220 | stw r17,LTR_ctr(r20) ; Save off the CTR |
1221 | stw r13,LTR_save(r20) ; Save the savearea | |
1222 | sth r11,LTR_excpt(r20) ; Save the exception type | |
1c79356b A |
1223 | #if ESPDEBUG |
1224 | addi r17,r20,32 ; (TEST/DEBUG) | |
1225 | dcbst br0,r20 ; (TEST/DEBUG) | |
1226 | dcbst br0,r17 ; (TEST/DEBUG) | |
1227 | sync ; (TEST/DEBUG) | |
1228 | #endif | |
1229 | ||
9bccf70c A |
1230 | ; |
1231 | ; We are done with the trace, except for maybe modifying the exception | |
1232 | ; code later on. So, that means that we need to save R20 and CR5. | |
1233 | ; | |
1234 | ; So, finish setting up the kernel registers now. | |
1235 | ; | |
1c79356b | 1236 | |
9bccf70c | 1237 | skipTrace: lhz r21,PP_CPU_NUMBER(r2) ; Get the logical processor number |
1c79356b | 1238 | lis r12,hi16(EXT(hw_counts)) ; Get the high part of the interrupt counters |
1c79356b | 1239 | lwz r7,savesrr1(r13) ; Get the entering MSR |
9bccf70c | 1240 | ori r12,r12,lo16(EXT(hw_counts)) ; Get the low part of the interrupt counters |
1c79356b | 1241 | rlwinm r21,r21,8,20,23 ; Get index to processor counts |
9bccf70c A |
1242 | mtcrf 0x80,r0 ; Set our CR0 to the high nybble of possible syscall code |
1243 | rlwinm r6,r0,1,0,31 ; Move sign bit to the end | |
1244 | cmplwi cr1,r11,T_SYSTEM_CALL ; Did we get a system call? | |
1c79356b | 1245 | add r12,r12,r21 ; Point to the processor count area |
9bccf70c | 1246 | crandc cr0_lt,cr0_lt,cr0_gt ; See if we have R0 equal to 0b10xx...x |
1c79356b | 1247 | lwzx r22,r12,r11 ; Get the old value |
9bccf70c | 1248 | cmplwi cr3,r11,T_IN_VAIN ; Was this all in vain? All for nothing? |
1c79356b | 1249 | addi r22,r22,1 ; Count this one |
9bccf70c | 1250 | cmplwi cr2,r6,1 ; See if original R0 had the CutTrace request code in it |
1c79356b A |
1251 | stwx r22,r12,r11 ; Store it back |
1252 | ||
9bccf70c | 1253 | beq- cr3,EatRupt ; Interrupt was all for nothing... |
1c79356b | 1254 | cmplwi cr3,r11,T_MACHINE_CHECK ; Did we get a machine check? |
9bccf70c A |
1255 | bne+ cr1,noCutT ; Not a system call... |
1256 | bnl+ cr0,noCutT ; R0 not 0b10xxx...x, can not be any kind of magical system call... | |
1c79356b A |
1257 | rlwinm. r7,r7,0,MSR_PR_BIT,MSR_PR_BIT ; Did we come from user state? |
1258 | lis r1,hi16(EXT(dgWork)) ; Get the diagnostics flags | |
1259 | beq+ FCisok ; From supervisor state... | |
1260 | ||
1261 | ori r1,r1,lo16(EXT(dgWork)) ; Again | |
1262 | lwz r1,dgFlags(r1) ; Get the flags | |
1263 | rlwinm. r1,r1,0,enaUsrFCallb,enaUsrFCallb ; Are they valid? | |
1264 | beq- noCutT ; No... | |
1265 | ||
9bccf70c | 1266 | FCisok: beq- cr2,isCutTrace ; This is a CutTrace system call... |
1c79356b | 1267 | |
9bccf70c A |
1268 | ; |
1269 | ; Here is where we call the firmware. If it returns T_IN_VAIN, that means | |
1270 | ; that it has handled the interruption. Remember: thou shalt not trash R13 | |
1271 | ; or R20 while you are away. Anything else is ok. | |
1272 | ; | |
1273 | ||
1274 | lwz r3,saver3(r13) ; Restore the first parameter | |
1275 | bl EXT(FirmwareCall) ; Go handle the firmware call.... | |
1276 | ||
1277 | cmplwi r3,T_IN_VAIN ; Was it handled? | |
1278 | mfsprg r2,0 ; Restore the per_proc | |
1279 | beq+ EatRupt ; Interrupt was handled... | |
1280 | mr r11,r3 ; Put the rupt code into the right register | |
1281 | b filter ; Go to the normal system call handler... | |
1282 | ||
1283 | .align 5 | |
1c79356b A |
1284 | |
1285 | isCutTrace: | |
9bccf70c A |
1286 | li r7,-32768 ; Get a 0x8000 for the exception code |
1287 | bne- cr5,EatRupt ; Tracing is disabled... | |
1288 | sth r7,LTR_excpt(r20) ; Modify the exception type to a CutTrace | |
1289 | b EatRupt ; Time to go home... | |
1c79356b | 1290 | |
9bccf70c A |
1291 | ; We are here because we did not have a CutTrace system call |
1292 | ||
1293 | .align 5 | |
1c79356b A |
1294 | |
1295 | noCutT: beq- cr3,MachineCheck ; Whoa... Machine check... | |
9bccf70c | 1296 | |
1c79356b A |
1297 | ; |
1298 | ; The following interrupts are the only ones that can be redriven | |
1299 | ; by the higher level code or emulation routines. | |
1300 | ; | |
1301 | ||
9bccf70c A |
1302 | Redrive: cmplwi cr0,r11,T_IN_VAIN ; Did the signal handler eat the signal? |
1303 | mfsprg r2,0 ; Get the per_proc block | |
1304 | beq+ cr0,EatRupt ; Bail now if we ate the rupt... | |
1c79356b A |
1305 | |
1306 | ||
9bccf70c A |
1307 | ; |
1308 | ; Here ss where we check for the other fast-path exceptions: translation exceptions, | |
1309 | ; emulated instructions, etc. | |
1310 | ; | |
1c79356b | 1311 | |
9bccf70c A |
1312 | filter: cmplwi cr3,r11,T_ALTIVEC_ASSIST ; Check for an Altivec denorm assist |
1313 | cmplwi cr4,r11,T_ALIGNMENT ; See if we got an alignment exception | |
1314 | cmplwi cr1,r11,T_PROGRAM ; See if we got a program exception | |
1315 | cmplwi cr2,r11,T_INSTRUCTION_ACCESS ; Check on an ISI | |
1c79356b A |
1316 | bne+ cr3,noAltivecAssist ; It is not an assist... |
1317 | b EXT(AltivecAssist) ; It is an assist... | |
9bccf70c A |
1318 | |
1319 | .align 5 | |
1c79356b A |
1320 | |
1321 | noAltivecAssist: | |
9bccf70c A |
1322 | bne+ cr4,noAlignAssist ; No alignment here... |
1323 | b EXT(AlignAssist) ; Go try to emulate... | |
1324 | ||
1325 | .align 5 | |
1326 | ||
1327 | noAlignAssist: | |
1c79356b A |
1328 | bne+ cr1,noEmulate ; No emulation here... |
1329 | b EXT(Emulate) ; Go try to emulate... | |
1330 | ||
9bccf70c | 1331 | .align 5 |
1c79356b | 1332 | |
9bccf70c A |
1333 | noEmulate: cmplwi cr3,r11,T_CSWITCH ; Are we context switching |
1334 | cmplwi r11,T_DATA_ACCESS ; Check on a DSI | |
1335 | beq- cr2,DSIorISI ; It is a PTE fault... | |
1336 | beq- cr3,conswtch ; It is a context switch... | |
1337 | bne+ PassUp ; It is not a PTE fault... | |
1338 | ||
1339 | ; | |
1340 | ; This call will either handle the fault, in which case it will not | |
1341 | ; return, or return to pass the fault up the line. | |
1342 | ; | |
1c79356b | 1343 | |
9bccf70c | 1344 | DSIorISI: mr r3,r11 ; Move the rupt code |
1c79356b | 1345 | |
9bccf70c | 1346 | bl EXT(handlePF) ; See if we can handle this fault |
1c79356b A |
1347 | |
1348 | lwz r0,savesrr1(r13) ; Get the MSR in use at exception time | |
9bccf70c | 1349 | mfsprg r2,0 ; Get back per_proc |
1c79356b A |
1350 | cmplwi cr1,r3,T_IN_VAIN ; Was it handled? |
1351 | andi. r4,r0,lo16(MASK(MSR_RI)) ; See if the recover bit is on | |
9bccf70c | 1352 | mr r11,r3 ; Put interrupt code back into the right register |
1c79356b | 1353 | beq+ cr1,EatRupt ; Yeah, just blast back to the user... |
1c79356b | 1354 | beq+ PassUp ; Not on, normal case... |
9bccf70c A |
1355 | ; |
1356 | ; Here is where we handle the "recovery mode" stuff. | |
1357 | ; This is set by an emulation routine to trap any faults when it is fetching data or | |
1358 | ; instructions. | |
1359 | ; | |
1360 | ; If we get a fault, we turn off RI, set CR0_EQ to false, bump the PC, and set R0 | |
1361 | ; and R1 to the DAR and DSISR, respectively. | |
1362 | ; | |
1c79356b A |
1363 | lwz r4,savesrr0(r13) ; Get the failing instruction address |
1364 | lwz r5,savecr(r13) ; Get the condition register | |
1c79356b | 1365 | addi r4,r4,4 ; Skip failing instruction |
9bccf70c | 1366 | lwz r6,savedar(r13) ; Get the DAR |
1c79356b | 1367 | rlwinm r5,r5,0,3,1 ; Clear CR0_EQ to let emulation code know we failed |
9bccf70c A |
1368 | lwz r7,savedsisr(r13) ; Grab the DSISR |
1369 | stw r0,savesrr1(r13) ; Save the result MSR | |
1370 | stw r4,savesrr0(r13) ; Save resume address | |
1371 | stw r5,savecr(r13) ; And the resume CR | |
1372 | stw r6,saver0(r13) ; Pass back the DAR | |
1373 | stw r7,saver1(r13) ; Pass back the DSISR | |
1c79356b A |
1374 | b EatRupt ; Resume emulated code |
1375 | ||
9bccf70c A |
1376 | ; |
1377 | ; Here is where we handle the context switch firmware call. The old | |
1378 | ; context has been saved, and the new savearea in in saver3. We will just | |
1379 | ; muck around with the savearea pointers, and then join the exit routine | |
1380 | ; | |
1381 | ||
1382 | .align 5 | |
1383 | ||
1384 | conswtch: | |
1385 | mr r29,r13 ; Save the save | |
1386 | rlwinm r30,r13,0,0,19 ; Get the start of the savearea block | |
1387 | lwz r5,saver3(r13) ; Switch to the new savearea | |
1388 | lwz r30,SACvrswap(r30) ; get real to virtual translation | |
1389 | mr r13,r5 ; Switch saveareas | |
1390 | xor r27,r29,r30 ; Flip to virtual | |
1391 | stw r27,saver3(r5) ; Push the new savearea to the switch to routine | |
1392 | b EatRupt ; Start it up... | |
1c79356b A |
1393 | |
1394 | ; | |
1395 | ; Handle machine check here. | |
1396 | ; | |
1397 | ; ? | |
1398 | ; | |
9bccf70c A |
1399 | |
1400 | .align 5 | |
1401 | ||
1c79356b | 1402 | MachineCheck: |
9bccf70c | 1403 | |
1c79356b A |
1404 | lwz r27,savesrr1(r13) ; ? |
1405 | rlwinm. r11,r27,0,dcmck,dcmck ; ? | |
1406 | beq+ notDCache ; ? | |
1407 | ||
1408 | mfspr r11,msscr0 ; ? | |
1409 | dssall ; ? | |
1410 | sync | |
1411 | ||
1412 | lwz r27,savesrr1(r13) ; ? | |
1413 | ||
1414 | hiccup: cmplw r27,r27 ; ? | |
1415 | bne- hiccup ; ? | |
1416 | isync ; ? | |
1417 | ||
1418 | oris r11,r11,hi16(dl1hwfm) ; ? | |
1419 | mtspr msscr0,r11 ; ? | |
1420 | ||
1421 | rstbsy: mfspr r11,msscr0 ; ? | |
1422 | ||
1423 | rlwinm. r11,r11,0,dl1hwf,dl1hwf ; ? | |
1424 | bne rstbsy ; ? | |
1425 | ||
1426 | sync ; ? | |
1427 | ||
1c79356b A |
1428 | b EatRupt ; ? |
1429 | ||
9bccf70c | 1430 | .align 5 |
1c79356b A |
1431 | |
1432 | notDCache: | |
1433 | ; | |
1434 | ; Check if the failure was in | |
1435 | ; ml_probe_read. If so, this is expected, so modify the PC to | |
1436 | ; ml_proble_read_mck and then eat the exception. | |
1437 | ; | |
1438 | lwz r30,savesrr0(r13) ; Get the failing PC | |
1439 | lis r28,hi16(EXT(ml_probe_read_mck)) ; High order part | |
1440 | lis r27,hi16(EXT(ml_probe_read)) ; High order part | |
1441 | ori r28,r28,lo16(EXT(ml_probe_read_mck)) ; Get the low part | |
1442 | ori r27,r27,lo16(EXT(ml_probe_read)) ; Get the low part | |
1443 | cmplw r30,r28 ; Check highest possible | |
1444 | cmplw cr1,r30,r27 ; Check lowest | |
1445 | bge- PassUp ; Outside of range | |
1446 | blt- cr1,PassUp ; Outside of range | |
1447 | ; | |
1448 | ; We need to fix up the BATs here because the probe | |
1449 | ; routine messed them all up... As long as we are at it, | |
1450 | ; fix up to return directly to caller of probe. | |
1451 | ; | |
1452 | ||
d52fe63f A |
1453 | lis r11,hi16(EXT(shadow_BAT)+shdDBAT) ; Get shadow address |
1454 | ori r11,r11,lo16(EXT(shadow_BAT)+shdDBAT) ; Get shadow address | |
1455 | ||
1456 | lwz r30,0(r11) ; Pick up DBAT 0 high | |
1457 | lwz r28,4(r11) ; Pick up DBAT 0 low | |
1458 | lwz r27,8(r11) ; Pick up DBAT 1 high | |
1459 | lwz r18,16(r11) ; Pick up DBAT 2 high | |
1460 | lwz r11,24(r11) ; Pick up DBAT 3 high | |
1c79356b A |
1461 | |
1462 | sync | |
1463 | mtdbatu 0,r30 ; Restore DBAT 0 high | |
1464 | mtdbatl 0,r28 ; Restore DBAT 0 low | |
1465 | mtdbatu 1,r27 ; Restore DBAT 1 high | |
d52fe63f A |
1466 | mtdbatu 2,r18 ; Restore DBAT 2 high |
1467 | mtdbatu 3,r11 ; Restore DBAT 3 high | |
1c79356b A |
1468 | sync |
1469 | ||
d52fe63f A |
1470 | lwz r27,saver6(r13) ; Get the saved R6 value |
1471 | mtspr hid0,r27 ; Restore HID0 | |
1472 | isync | |
1473 | ||
1c79356b A |
1474 | lwz r28,savelr(r13) ; Get return point |
1475 | lwz r27,saver0(r13) ; Get the saved MSR | |
1476 | li r30,0 ; Get a failure RC | |
1477 | stw r28,savesrr0(r13) ; Set the return point | |
1478 | stw r27,savesrr1(r13) ; Set the continued MSR | |
1479 | stw r30,saver3(r13) ; Set return code | |
1c79356b A |
1480 | b EatRupt ; Yum, yum, eat it all up... |
1481 | ||
1482 | /* | |
1483 | * Here's where we come back from some instruction emulator. If we come back with | |
1484 | * T_IN_VAIN, the emulation is done and we should just reload state and directly | |
1485 | * go back to the interrupted code. Otherwise, we'll check to see if | |
1486 | * we need to redrive with a different interrupt, i.e., DSI. | |
1487 | */ | |
1488 | ||
1489 | .align 5 | |
1490 | .globl EXT(EmulExit) | |
1491 | ||
1492 | LEXT(EmulExit) | |
1493 | ||
9bccf70c | 1494 | cmplwi r11,T_IN_VAIN ; Was it emulated? |
1c79356b A |
1495 | lis r1,hi16(SAVredrive) ; Get redrive request |
1496 | mfsprg r2,0 ; Restore the per_proc area | |
9bccf70c | 1497 | beq+ EatRupt ; Yeah, just blast back to the user... |
1c79356b A |
1498 | lwz r4,SAVflags(r13) ; Pick up the flags |
1499 | ||
1500 | and. r0,r4,r1 ; Check if redrive requested | |
1501 | andc r4,r4,r1 ; Clear redrive | |
1502 | ||
1503 | beq+ PassUp ; No redrive, just keep on going... | |
1504 | ||
1c79356b A |
1505 | stw r4,SAVflags(r13) ; Set the flags |
1506 | b Redrive ; Redrive the exception... | |
1507 | ||
9bccf70c A |
1508 | ; |
1509 | ; Jump into main handler code switching on VM at the same time. | |
1510 | ; | |
1511 | ; We assume kernel data is mapped contiguously in physical | |
1512 | ; memory, otherwise we would need to switch on (at least) virtual data. | |
1513 | ; SRs are already set up. | |
1514 | ; | |
1515 | ||
1516 | .align 5 | |
1c79356b | 1517 | |
1c79356b A |
1518 | PassUp: lis r2,hi16(EXT(exception_handlers)) ; Get exception vector address |
1519 | ori r2,r2,lo16(EXT(exception_handlers)) ; And low half | |
9bccf70c A |
1520 | lwzx r6,r2,r11 ; Get the actual exception handler address |
1521 | ||
1522 | PassUpDeb: mtsrr0 r6 ; Set up the handler address | |
1523 | rlwinm r5,r13,0,0,19 ; Back off to the start of savearea block | |
1524 | ||
1525 | mfmsr r3 ; Get our MSR | |
1526 | rlwinm r3,r3,0,MSR_BE_BIT+1,MSR_SE_BIT-1 ; Clear all but the trace bits | |
1527 | li r2,MSR_SUPERVISOR_INT_OFF ; Get our normal MSR value | |
1528 | lwz r5,SACvrswap(r5) ; Get real to virtual conversion | |
1529 | or r2,r2,r3 ; Keep the trace bits if they are on | |
1530 | mr r3,r11 ; Pass the exception code in the paramter reg | |
1531 | mtsrr1 r2 ; Set up our normal MSR value | |
1532 | xor r4,r13,r5 ; Pass up the virtual address of context savearea | |
1533 | ||
1534 | rfi ; Launch the exception handler | |
1535 | ||
1536 | .long 0 ; Leave these here gol durn it! | |
1c79356b A |
1537 | .long 0 |
1538 | .long 0 | |
1539 | .long 0 | |
1540 | .long 0 | |
1541 | .long 0 | |
1542 | .long 0 | |
1543 | .long 0 | |
1544 | ||
1545 | /* | |
1546 | * This routine is the only place where we return from an interruption. | |
1547 | * Anyplace else is wrong. Even if I write the code, it's still wrong. | |
1548 | * Feel free to come by and slap me if I do do it--even though I may | |
1549 | * have had a good reason to do it. | |
1550 | * | |
1551 | * All we need to remember here is that R13 must point to the savearea | |
1552 | * that has the context we need to load up. Translation and interruptions | |
1553 | * must be disabled. | |
1554 | * | |
1555 | * This code always loads the context in the savearea pointed to | |
1556 | * by R13. In the process, it throws away the savearea. If there | |
1557 | * is any tomfoolery with savearea stacks, it must be taken care of | |
1558 | * before we get here. | |
1559 | * | |
1560 | * Speaking of tomfoolery, this is where we synthesize interruptions | |
9bccf70c | 1561 | * if we need to. |
1c79356b A |
1562 | */ |
1563 | ||
1564 | .align 5 | |
1565 | ||
9bccf70c A |
1566 | EatRupt: mfsprg r29,0 ; Get the per_proc block back |
1567 | mr r31,r13 ; Move the savearea pointer to the far end of the register set | |
1568 | ||
1569 | lwz r30,quickfret(r29) ; Pick up the quick fret list, if any | |
1c79356b | 1570 | |
9bccf70c A |
1571 | mfsprg r27,2 ; Get the processor features |
1572 | lwz r21,savesrr1(r31) ; Get destination MSR | |
1573 | ||
1574 | erchkfret: mr. r3,r30 ; Any savearea to quickly release? | |
1575 | beq+ ernoqfret ; No quickfrets... | |
1576 | lwz r30,SAVprev(r30) ; Chain back now | |
1577 | ||
1578 | bl EXT(save_ret_phys) ; Put it on the free list | |
1579 | stw r30,quickfret(r29) ; Dequeue previous guy (really, it is ok to wait until after the release) | |
1580 | b erchkfret ; Try the next one... | |
1c79356b | 1581 | |
1c79356b | 1582 | |
9bccf70c A |
1583 | .align 5 |
1584 | ||
1585 | ernoqfret: mtcrf 0x60,r27 ; Set CRs with thermal facilities | |
1586 | rlwinm. r0,r21,0,MSR_EE_BIT,MSR_EE_BIT ; Are interruptions going to be enabled? | |
1c79356b | 1587 | crandc 31,pfThermalb,pfThermIntb ; See if we have both thermometer and not interrupt facility |
9bccf70c A |
1588 | la r21,saver0(r31) ; Point to the first thing we restore |
1589 | crandc 31,cr0_eq,31 ; Factor in enablement | |
1c79356b A |
1590 | bf 31,tempisok ; No thermal checking needed... |
1591 | ||
1592 | ; | |
1593 | ; We get to here if 1) there is a thermal facility, and 2) the hardware | |
1594 | ; will or cannot interrupt, and 3) the interrupt will be enabled after this point. | |
1595 | ; | |
1596 | ||
1597 | mfspr r16,thrm3 ; Get thermal 3 | |
1598 | mfspr r14,thrm1 ; Get thermal 2 | |
1599 | rlwinm. r16,r16,0,thrme,thrme ; Is the themometer enabled? | |
1600 | mfspr r15,thrm2 ; Get thermal 2 | |
1601 | beq- tempisok ; No thermometer... | |
1602 | rlwinm r16,r14,2,28,31 ; Cluster THRM1s TIE, V, TIN, and TIV at bottom 4 bits | |
1603 | srawi r0,r15,31 ; Make a mask of 1s if temprature over | |
1604 | rlwinm r30,r15,2,28,31 ; Cluster THRM2s TIE, V, TIN, and TIV at bottom 4 bits | |
1605 | ; | |
1606 | ; Note that the following compare check that V, TIN, and TIV are set and that TIE is cleared. | |
1607 | ; This insures that we only emulate when the hardware is not set to interrupt. | |
1608 | ; | |
1609 | cmplwi cr0,r16,7 ; Is there a valid pending interruption for THRM1? | |
1610 | cmplwi cr1,r30,7 ; Is there a valid pending interruption for THRM2? | |
1611 | and r15,r15,r0 ; Keep high temp if that interrupted, zero if not | |
1612 | cror cr0_eq,cr0_eq,cr1_eq ; Merge both | |
1613 | andc r14,r14,r0 ; Keep low if high did not interrupt, zero if it did | |
1614 | bne+ tempisok ; Nope, temprature is in range | |
1615 | ||
9bccf70c | 1616 | li r11,T_THERMAL ; Time to emulate a thermal interruption |
1c79356b A |
1617 | or r14,r14,r15 ; Get contents of interrupting register |
1618 | mr r13,r31 ; Make sure savearea is pointed to correctly | |
9bccf70c | 1619 | stw r11,saveexception(r31) ; Set the exception code |
1c79356b | 1620 | stw r14,savedar(r31) ; Set the contents of the interrupting register into the dar |
1c79356b | 1621 | |
9bccf70c A |
1622 | ; |
1623 | ; This code is here to prevent a problem that will probably never happen. If we are | |
1624 | ; returning from an emulation routine (alignment, altivec assist, etc.) the SRs may | |
1625 | ; not be set to the proper kernel values. Then, if we were to emulate a thermal here, | |
1626 | ; we would end up running in the kernel with a bogus SR. So, to prevent | |
1627 | ; this unfortunate circumstance, we slam the SRs here. (I worry too much...) | |
1628 | ; | |
1c79356b | 1629 | |
9bccf70c A |
1630 | lis r30,hi16(KERNEL_SEG_REG0_VALUE) ; Get the high half of the kernel SR0 value |
1631 | mtsr sr0,r30 ; Set the kernel SR0 | |
1632 | addis r30,r30,0x0010 ; Point to the second segment of kernel | |
1633 | mtsr sr1,r30 ; Set the kernel SR1 | |
1634 | addis r30,r30,0x0010 ; Point to the third segment of kernel | |
1635 | mtsr sr2,r30 ; Set the kernel SR2 | |
1636 | addis r30,r30,0x0010 ; Point to the third segment of kernel | |
1637 | mtsr sr3,r30 ; Set the kernel SR3 | |
1638 | b Redrive ; Go process this new interruption... | |
1c79356b | 1639 | |
1c79356b | 1640 | |
9bccf70c | 1641 | tempisok: dcbt 0,r21 ; Touch in the first thing we need |
1c79356b | 1642 | |
1c79356b | 1643 | ; |
9bccf70c | 1644 | ; Here we release the savearea. |
1c79356b | 1645 | ; |
9bccf70c A |
1646 | ; Important!!!! The savearea is released before we are done with it. When the |
1647 | ; local free savearea list (anchored at lclfree) gets too long, save_ret_phys | |
1648 | ; will trim the list, making the extra saveareas allocatable by another processor | |
1649 | ; The code in there must ALWAYS leave our savearea on the local list, otherwise | |
1650 | ; we could be very, very unhappy. The code there always queues the "just released" | |
1651 | ; savearea to the head of the local list. Then, if it needs to trim, it will | |
1652 | ; start with the SECOND savearea, leaving ours intact. | |
1c79356b A |
1653 | ; |
1654 | ; Build the SR values depending upon destination. If we are going to the kernel, | |
1655 | ; the SRs are almost all the way set up. SR14 (or the currently used copyin/out register) | |
1656 | ; must be set to whatever it was at the last exception because it varies. All the rest | |
1657 | ; have been set up already. | |
1658 | ; | |
1659 | ; If we are going into user space, we need to check a bit more. SR0, SR1, SR2, and | |
1660 | ; SR14 (current implementation) must be restored always. The others must be set if | |
1661 | ; they are different that what was loaded last time (i.e., tasks have switched). | |
1662 | ; We check the last loaded address space ID and if the same, we skip the loads. | |
1663 | ; This is a performance gain because SR manipulations are slow. | |
9bccf70c A |
1664 | ; |
1665 | ; There is also the special case when MSR_RI is set. This happens when we are trying to | |
1666 | ; make a special user state access when we are in the kernel. If we take an exception when | |
1667 | ; during that, the SRs may have been modified. Therefore, we need to restore them to | |
1668 | ; what they were before the exception because they could be non-standard. We saved them | |
1669 | ; during exception entry, so we will just load them here. | |
1c79356b A |
1670 | ; |
1671 | ||
9bccf70c A |
1672 | mr r3,r31 ; Get the exiting savearea in parm register |
1673 | bl EXT(save_ret_phys) ; Put it on the free list | |
1674 | ||
1675 | li r3,savesrr1 ; Get offset to the srr1 value | |
1676 | ||
1677 | lwarx r26,r3,r31 ; Get destination MSR and take reservation along the way (just so we can blow it away) | |
1678 | lwz r7,PP_USERPMAP(r29) ; Pick up the user pmap we may launch | |
1679 | rlwinm. r17,r26,0,MSR_RI_BIT,MSR_RI_BIT ; See if we are returning from a special fault | |
1c79356b | 1680 | cmplw cr3,r14,r14 ; Set that we do not need to stop streams |
9bccf70c A |
1681 | |
1682 | beq+ nSpecAcc ; Do not reload the kernel SRs if this is not a special access... | |
1683 | ||
1684 | lwz r14,savesr0(r31) ; Get SR0 at fault time | |
1685 | mtsr sr0,r14 ; Set SR0 | |
1686 | lwz r14,savesr1(r31) ; Get SR1 at fault time | |
1687 | mtsr sr1,r14 ; Set SR1 | |
1688 | lwz r14,savesr2(r31) ; Get SR2 at fault time | |
1689 | mtsr sr2,r14 ; Set SR2 | |
1690 | lwz r14,savesr3(r31) ; Get SR3 at fault timee | |
1691 | mtsr sr3,r14 ; Set SR3 | |
1692 | b segsdone ; We are all set up now... | |
1693 | ||
1694 | .align 5 | |
1695 | ||
1696 | nSpecAcc: rlwinm. r17,r26,0,MSR_PR_BIT,MSR_PR_BIT ; See if we are going to user or system | |
1c79356b A |
1697 | li r14,PMAP_SEGS ; Point to segments |
1698 | bne+ gotouser ; We are going into user state... | |
1699 | ||
1700 | lwz r14,savesr14(r31) ; Get the copyin/out register at interrupt time | |
1701 | mtsr sr14,r14 ; Set SR14 | |
1702 | b segsdone ; We are all set up now... | |
1703 | ||
1704 | .align 5 | |
1705 | ||
1706 | gotouser: dcbt r14,r7 ; Touch the segment register contents | |
9bccf70c A |
1707 | lwz r9,spcFlags(r29) ; Pick up the special flags |
1708 | lwz r16,PP_LASTPMAP(r29) ; Pick up the last loaded pmap | |
1c79356b | 1709 | addi r14,r14,32 ; Second half of pmap segments |
0b4e3aa0 | 1710 | rlwinm r9,r9,userProtKeybit-2,2,2 ; Isolate the user state protection key |
1c79356b | 1711 | lwz r15,PMAP_SPACE(r7) ; Get the primary space |
0b4e3aa0 | 1712 | lwz r13,PMAP_VFLAGS(r7) ; Get the flags |
1c79356b | 1713 | dcbt r14,r7 ; Touch second page |
1c79356b | 1714 | oris r15,r15,hi16(SEG_REG_PROT) ; Set segment 0 SR value |
0b4e3aa0 A |
1715 | mtcrf 0x0F,r13 ; Set CRs to correspond to the subordinate spaces |
1716 | xor r15,r15,r9 ; Flip to proper segment register key | |
9bccf70c | 1717 | lhz r9,PP_CPU_FLAGS(r29) ; Get the processor flags |
1c79356b A |
1718 | |
1719 | addis r13,r15,0x0000 ; Get SR0 value | |
1720 | bf 16,nlsr0 ; No alternate here... | |
1721 | lwz r13,PMAP_SEGS+(0*4)(r7) ; Get SR0 value | |
1722 | ||
1723 | nlsr0: mtsr sr0,r13 ; Load up the SR | |
1724 | rlwinm r9,r9,(((31-MSR_BE_BIT)+(traceBEb+16+1))&31),MSR_BE_BIT,MSR_BE_BIT ; Set BE bit if special trace is on | |
1725 | ||
1726 | addis r13,r15,0x0010 ; Get SR1 value | |
1727 | bf 17,nlsr1 ; No alternate here... | |
1728 | lwz r13,PMAP_SEGS+(1*4)(r7) ; Get SR1 value | |
1729 | ||
1730 | nlsr1: mtsr sr1,r13 ; Load up the SR | |
1731 | or r26,r26,r9 ; Flip on the BE bit for special trace if needed | |
1732 | ||
9bccf70c | 1733 | cmplw cr3,r7,r16 ; Are we running the same segs as last time? |
1c79356b A |
1734 | |
1735 | addis r13,r15,0x0020 ; Get SR2 value | |
1736 | bf 18,nlsr2 ; No alternate here... | |
1737 | lwz r13,PMAP_SEGS+(2*4)(r7) ; Get SR2 value | |
1738 | ||
1739 | nlsr2: mtsr sr2,r13 ; Load up the SR | |
1740 | ||
1741 | addis r13,r15,0x0030 ; Get SR3 value | |
1742 | bf 19,nlsr3 ; No alternate here... | |
1743 | lwz r13,PMAP_SEGS+(3*4)(r7) ; Get SR3 value | |
1744 | ||
1745 | nlsr3: mtsr sr3,r13 ; Load up the SR | |
1746 | ||
1747 | addis r13,r15,0x00E0 ; Get SR14 value | |
1748 | bf 30,nlsr14 ; No alternate here... | |
1749 | lwz r13,PMAP_SEGS+(14*4)(r7) ; Get SR14 value | |
1750 | ||
1751 | nlsr14: mtsr sr14,r13 ; Load up the SR | |
1752 | ||
9bccf70c | 1753 | beq+ cr3,segsdone ; All done if same pmap as last time... |
1c79356b | 1754 | |
9bccf70c | 1755 | stw r7,PP_LASTPMAP(r29) ; Remember what we just loaded |
0b4e3aa0 | 1756 | |
1c79356b A |
1757 | addis r13,r15,0x0040 ; Get SR4 value |
1758 | bf 20,nlsr4 ; No alternate here... | |
1759 | lwz r13,PMAP_SEGS+(4*4)(r7) ; Get SR4 value | |
1760 | ||
1761 | nlsr4: mtsr sr4,r13 ; Load up the SR | |
1762 | ||
1763 | addis r13,r15,0x0050 ; Get SR5 value | |
1764 | bf 21,nlsr5 ; No alternate here... | |
1765 | lwz r13,PMAP_SEGS+(5*4)(r7) ; Get SR5 value | |
1766 | ||
1767 | nlsr5: mtsr sr5,r13 ; Load up the SR | |
1768 | ||
1769 | addis r13,r15,0x0060 ; Get SR6 value | |
1770 | bf 22,nlsr6 ; No alternate here... | |
1771 | lwz r13,PMAP_SEGS+(6*4)(r7) ; Get SR6 value | |
1772 | ||
1773 | nlsr6: mtsr sr6,r13 ; Load up the SR | |
1774 | ||
1775 | addis r13,r15,0x0070 ; Get SR7 value | |
1776 | bf 23,nlsr7 ; No alternate here... | |
1777 | lwz r13,PMAP_SEGS+(7*4)(r7) ; Get SR7 value | |
1778 | ||
1779 | nlsr7: mtsr sr7,r13 ; Load up the SR | |
1780 | ||
1781 | addis r13,r15,0x0080 ; Get SR8 value | |
1782 | bf 24,nlsr8 ; No alternate here... | |
1783 | lwz r13,PMAP_SEGS+(8*4)(r7) ; Get SR8 value | |
1784 | ||
1785 | nlsr8: mtsr sr8,r13 ; Load up the SR | |
1786 | ||
1787 | addis r13,r15,0x0090 ; Get SR9 value | |
1788 | bf 25,nlsr9 ; No alternate here... | |
1789 | lwz r13,PMAP_SEGS+(9*4)(r7) ; Get SR9 value | |
1790 | ||
1791 | nlsr9: mtsr sr9,r13 ; Load up the SR | |
1792 | ||
1793 | addis r13,r15,0x00A0 ; Get SR10 value | |
1794 | bf 26,nlsr10 ; No alternate here... | |
1795 | lwz r13,PMAP_SEGS+(10*4)(r7) ; Get SR10 value | |
1796 | ||
1797 | nlsr10: mtsr sr10,r13 ; Load up the SR | |
1798 | ||
1799 | addis r13,r15,0x00B0 ; Get SR11 value | |
1800 | bf 27,nlsr11 ; No alternate here... | |
1801 | lwz r13,PMAP_SEGS+(11*4)(r7) ; Get SR11 value | |
1802 | ||
1803 | nlsr11: mtsr sr11,r13 ; Load up the SR | |
1804 | ||
1805 | addis r13,r15,0x00C0 ; Get SR12 value | |
1806 | bf 28,nlsr12 ; No alternate here... | |
1807 | lwz r13,PMAP_SEGS+(12*4)(r7) ; Get SR12 value | |
1808 | ||
1809 | nlsr12: mtsr sr12,r13 ; Load up the SR | |
1810 | ||
1811 | addis r13,r15,0x00D0 ; Get SR13 value | |
1812 | bf 29,nlsr13 ; No alternate here... | |
1813 | lwz r13,PMAP_SEGS+(13*4)(r7) ; Get SR13 value | |
1814 | ||
1815 | nlsr13: mtsr sr13,r13 ; Load up the SR | |
1816 | ||
1817 | addis r13,r15,0x00F0 ; Get SR15 value | |
1818 | bf 31,nlsr15 ; No alternate here... | |
1819 | lwz r13,PMAP_SEGS+(15*4)(r7) ; Get SR15 value | |
1820 | ||
1821 | nlsr15: mtsr sr15,r13 ; Load up the SR | |
1822 | ||
9bccf70c A |
1823 | segsdone: stwcx. r26,r3,r31 ; Blow away any reservations we hold |
1824 | ||
1825 | li r21,emfp0 ; Point to the fp savearea | |
1c79356b | 1826 | lwz r25,savesrr0(r31) ; Get the SRR0 to use |
9bccf70c A |
1827 | la r28,saver8(r31) ; Point to the next line to use |
1828 | dcbt r21,r29 ; Start moving in a work area | |
1829 | lwz r0,saver0(r31) ; Restore R0 | |
1830 | dcbt 0,r28 ; Touch it in | |
1831 | lwz r1,saver1(r31) ; Restore R1 | |
1832 | lwz r2,saver2(r31) ; Restore R2 | |
1833 | la r28,saver16(r31) ; Point to the next line to get | |
1834 | lwz r3,saver3(r31) ; Restore R3 | |
1c79356b | 1835 | mtcrf 0x80,r27 ; Get facility availability flags (do not touch CR1-7) |
9bccf70c A |
1836 | lwz r4,saver4(r31) ; Restore R4 |
1837 | mtsrr0 r25 ; Restore the SRR0 now | |
1838 | lwz r5,saver5(r31) ; Restore R5 | |
1839 | mtsrr1 r26 ; Restore the SRR1 now | |
1840 | lwz r6,saver6(r31) ; Restore R6 | |
1841 | ||
1842 | dcbt 0,r28 ; Touch that next line on in | |
1843 | la r28,savevscr(r31) ; Point to the saved facility context | |
1844 | ||
1845 | lwz r7,saver7(r31) ; Restore R7 | |
1846 | lwz r8,saver8(r31) ; Restore R8 | |
1847 | lwz r9,saver9(r31) ; Restore R9 | |
1848 | mfmsr r26 ; Get the current MSR | |
1849 | dcbt 0,r28 ; Touch saved facility context | |
1850 | lwz r10,saver10(r31) ; Restore R10 | |
1851 | lwz r11,saver11(r31) ; Restore R11 | |
1852 | oris r26,r26,hi16(MASK(MSR_VEC)) ; Get the vector enable bit | |
1853 | lwz r12,saver12(r31) ; Restore R12 | |
1854 | ori r26,r26,lo16(MASK(MSR_FP)) ; Add in the float enable | |
1855 | lwz r13,saver13(r31) ; Restore R13 | |
1856 | la r28,saver24(r31) ; Point to the next line to do | |
1857 | ||
1858 | ; | |
1859 | ; Note that floating point and vector will be enabled from here on until the RFI | |
1860 | ; | |
1861 | ||
1862 | mtmsr r26 ; Turn on vectors and floating point | |
1863 | isync | |
1864 | ||
1865 | dcbt 0,r28 ; Touch next line to do | |
1866 | ||
1867 | lwz r14,saver14(r31) ; Restore R14 | |
1868 | lwz r15,saver15(r31) ; Restore R15 | |
1869 | ||
1870 | bf pfAltivecb,noavec3 ; No Altivec on this CPU... | |
1871 | ||
1872 | la r28,savevscr(r31) ; Point to the status area | |
1873 | stvxl v0,r21,r29 ; Save a vector register | |
1874 | lvxl v0,0,r28 ; Get the vector status | |
1875 | lwz r27,savevrsave(r31) ; Get the vrsave | |
1876 | mtvscr v0 ; Set the vector status | |
1877 | ||
1878 | lvxl v0,r21,r29 ; Restore work vector register | |
1879 | beq+ cr3,noavec2 ; SRs have not changed, no need to stop the streams... | |
1880 | dssall ; Kill all data streams | |
1881 | sync | |
1882 | ||
1883 | noavec2: mtspr vrsave,r27 ; Set the vrsave | |
1884 | ||
1885 | noavec3: bf- pfFloatb,nofphere ; Skip if no floating point... | |
1886 | ||
1c79356b | 1887 | stfd f0,emfp0(r29) ; Save FP0 |
9bccf70c | 1888 | lfd f0,savefpscrpad(r31) ; Get the fpscr |
1c79356b A |
1889 | mtfsf 0xFF,f0 ; Restore fpscr |
1890 | lfd f0,emfp0(r29) ; Restore the used register | |
1891 | ||
9bccf70c A |
1892 | nofphere: lwz r16,saver16(r31) ; Restore R16 |
1893 | lwz r17,saver17(r31) ; Restore R17 | |
1894 | lwz r18,saver18(r31) ; Restore R18 | |
1895 | lwz r19,saver19(r31) ; Restore R19 | |
1896 | lwz r20,saver20(r31) ; Restore R20 | |
1897 | lwz r21,saver21(r31) ; Restore R21 | |
1898 | lwz r22,saver22(r31) ; Restore R22 | |
1899 | ||
1900 | lwz r23,saver23(r31) ; Restore R23 | |
1901 | lwz r24,saver24(r31) ; Restore R24 | |
1902 | lwz r25,saver25(r31) ; Restore R25 | |
1903 | lwz r26,saver26(r31) ; Restore R26 | |
1904 | lwz r27,saver27(r31) ; Restore R27 | |
1905 | ||
1906 | lwz r28,savecr(r31) ; Get CR to restore | |
1907 | ||
1908 | lwz r29,savexer(r31) ; Get XER to restore | |
1909 | mtcr r28 ; Restore the CR | |
1910 | lwz r28,savelr(r31) ; Get LR to restore | |
1911 | mtxer r29 ; Restore the XER | |
1912 | lwz r29,savectr(r31) ; Get the CTR to restore | |
1913 | mtlr r28 ; Restore the LR | |
1914 | lwz r28,saver30(r31) ; Get R30 | |
1915 | mtctr r29 ; Restore the CTR | |
1916 | lwz r29,saver31(r31) ; Get R31 | |
1917 | mtsprg 2,r28 ; Save R30 for later | |
1918 | lwz r28,saver28(r31) ; Restore R28 | |
1919 | mtsprg 3,r29 ; Save R31 for later | |
1920 | lwz r29,saver29(r31) ; Restore R29 | |
1c79356b | 1921 | |
1c79356b | 1922 | mfsprg r31,0 ; Get per_proc |
9bccf70c | 1923 | mfsprg r30,2 ; Restore R30 |
1c79356b A |
1924 | lwz r31,pfAvailable(r31) ; Get the feature flags |
1925 | mtsprg 2,r31 ; Set the feature flags | |
9bccf70c | 1926 | mfsprg r31,3 ; Restore R31 |
1c79356b | 1927 | |
9bccf70c | 1928 | rfi ; Click heels three times and think very hard that there is no place like home... |
1c79356b | 1929 | |
9bccf70c | 1930 | .long 0 ; Leave this here |
1c79356b A |
1931 | .long 0 |
1932 | .long 0 | |
1933 | .long 0 | |
1934 | .long 0 | |
1935 | .long 0 | |
1936 | .long 0 | |
1937 | .long 0 | |
1938 | ||
1939 | ||
1940 | ||
1941 | ||
1942 | /* | |
1943 | * exception_exit(savearea *) | |
1944 | * | |
1945 | * | |
1946 | * ENTRY : IR and/or DR and/or interruptions can be on | |
1947 | * R3 points to the physical address of a savearea | |
1948 | */ | |
1949 | ||
1950 | .align 5 | |
1951 | .globl EXT(exception_exit) | |
1952 | ||
1953 | LEXT(exception_exit) | |
1954 | ||
1955 | mfsprg r29,2 ; Get feature flags | |
9bccf70c | 1956 | mfmsr r30 ; Get the current MSR |
1c79356b | 1957 | mtcrf 0x04,r29 ; Set the features |
9bccf70c A |
1958 | rlwinm r30,r30,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Force floating point off |
1959 | mr r31,r3 ; Get the savearea in the right register | |
1960 | rlwinm r30,r30,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Force vectors off | |
1961 | li r10,savesrr0 ; Point to one of the first things we touch in the savearea on exit | |
1962 | andi. r30,r30,0x7FCF ; Turn off externals, IR, and DR | |
1c79356b A |
1963 | lis r1,hi16(SAVredrive) ; Get redrive request |
1964 | ||
1965 | bt pfNoMSRirb,eeNoMSR ; No MSR... | |
1966 | ||
1967 | mtmsr r30 ; Translation and all off | |
1968 | isync ; Toss prefetch | |
1969 | b eeNoMSRx | |
1970 | ||
1971 | eeNoMSR: li r0,loadMSR ; Get the MSR setter SC | |
1972 | mr r3,r30 ; Get new MSR | |
1973 | sc ; Set it | |
1974 | ||
9bccf70c | 1975 | eeNoMSRx: dcbt r10,r31 ; Touch in the first stuff we restore |
1c79356b A |
1976 | mfsprg r2,0 ; Get the per_proc block |
1977 | lwz r4,SAVflags(r31) ; Pick up the flags | |
1978 | mr r13,r31 ; Put savearea here also | |
1979 | ||
1980 | and. r0,r4,r1 ; Check if redrive requested | |
1981 | andc r4,r4,r1 ; Clear redrive | |
1982 | ||
1983 | dcbt br0,r2 ; We will need this in just a sec | |
1984 | ||
1985 | beq+ EatRupt ; No redrive, just exit... | |
1986 | ||
9bccf70c | 1987 | lwz r11,saveexception(r13) ; Restore exception code |
1c79356b A |
1988 | stw r4,SAVflags(r13) ; Set the flags |
1989 | b Redrive ; Redrive the exception... | |
1990 | ||
1c79356b A |
1991 | |
1992 | /* | |
1993 | * Start of the trace table | |
1994 | */ | |
1995 | ||
1996 | .align 12 /* Align to 4k boundary */ | |
1997 | ||
1998 | .globl EXT(traceTableBeg) | |
1999 | EXT(traceTableBeg): /* Start of trace table */ | |
2000 | /* .fill 2048,4,0 Make an 8k trace table for now */ | |
2001 | .fill 13760,4,0 /* Make an .trace table for now */ | |
2002 | /* .fill 240000,4,0 Make an .trace table for now */ | |
2003 | .globl EXT(traceTableEnd) | |
2004 | EXT(traceTableEnd): /* End of trace table */ | |
2005 | ||
2006 | .globl EXT(ExceptionVectorsEnd) | |
2007 | EXT(ExceptionVectorsEnd): /* Used if relocating the exception vectors */ | |
2008 | #ifndef HACKALERTHACKALERT | |
2009 | /* | |
2010 | * This .long needs to be here because the linker gets confused and tries to | |
2011 | * include the final label in a section in the next section if there is nothing | |
2012 | * after it | |
2013 | */ | |
2014 | .long 0 /* (HACK/HACK/HACK) */ | |
2015 | #endif | |
2016 | ||
2017 | .data | |
2018 | .align ALIGN | |
2019 | .globl EXT(exception_end) | |
2020 | EXT(exception_end): | |
2021 | .long EXT(ExceptionVectorsEnd) -EXT(ExceptionVectorsStart) /* phys fn */ | |
2022 | ||
2023 |