]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * @OSF_COPYRIGHT@ | |
24 | */ | |
25 | ||
26 | /* | |
27 | * Low-memory exception vector code for PowerPC MACH | |
28 | * | |
29 | * These are the only routines that are ever run with | |
30 | * VM instruction translation switched off. | |
31 | * | |
32 | * The PowerPC is quite strange in that rather than having a set | |
33 | * of exception vectors, the exception handlers are installed | |
34 | * in well-known addresses in low memory. This code must be loaded | |
35 | * at ZERO in physical memory. The simplest way of doing this is | |
36 | * to load the kernel at zero, and specify this as the first file | |
37 | * on the linker command line. | |
38 | * | |
39 | * When this code is loaded into place, it is loaded at virtual | |
40 | * address KERNELBASE, which is mapped to zero (physical). | |
41 | * | |
42 | * This code handles all powerpc exceptions and is always entered | |
43 | * in supervisor mode with translation off. It saves the minimum | |
44 | * processor state before switching back on translation and | |
45 | * jumping to the approprate routine. | |
46 | * | |
47 | * Vectors from 0x100 to 0x3fff occupy 0x100 bytes each (64 instructions) | |
48 | * | |
49 | * We use some of this space to decide which stack to use, and where to | |
50 | * save the context etc, before jumping to a generic handler. | |
51 | */ | |
52 | ||
53 | #include <assym.s> | |
54 | #include <debug.h> | |
55 | #include <cpus.h> | |
56 | #include <db_machine_commands.h> | |
57 | #include <mach_rt.h> | |
58 | ||
59 | #include <mach_debug.h> | |
60 | #include <ppc/asm.h> | |
61 | #include <ppc/proc_reg.h> | |
62 | #include <ppc/exception.h> | |
63 | #include <ppc/Performance.h> | |
64 | #include <mach/ppc/vm_param.h> | |
65 | #include <ppc/POWERMAC/mp/MPPlugIn.h> | |
66 | ||
67 | #define TRCSAVE 0 | |
68 | #define CHECKSAVE 0 | |
69 | #define PERFTIMES 0 | |
70 | #define ESPDEBUG 0 | |
71 | ||
72 | #if TRCSAVE | |
73 | #error The TRCSAVE option is broken.... Fix it | |
74 | #endif | |
75 | ||
76 | #define featL1ena 24 | |
77 | #define featSMP 25 | |
78 | #define featAltivec 26 | |
79 | #define wasNapping 27 | |
80 | #define featFP 28 | |
81 | ||
82 | #define VECTOR_SEGMENT .section __VECTORS, __interrupts | |
83 | ||
84 | VECTOR_SEGMENT | |
85 | ||
86 | ||
87 | .globl EXT(ExceptionVectorsStart) | |
88 | ||
89 | EXT(ExceptionVectorsStart): /* Used if relocating the exception vectors */ | |
90 | baseR: /* Used so we have more readable code */ | |
91 | ||
92 | /* | |
93 | * System reset - call debugger | |
94 | */ | |
95 | . = 0xf0 | |
96 | .globl EXT(ResetHandler) | |
97 | EXT(ResetHandler): | |
98 | .long 0x0 | |
99 | .long 0x0 | |
100 | .long 0x0 | |
101 | ||
102 | . = 0x100 | |
103 | .L_handler100: | |
104 | mtsprg 2,r13 /* Save R13 */ | |
105 | mtsprg 3,r11 /* Save R11 */ | |
106 | lwz r13,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_TYPE)(br0) ; Get reset type | |
107 | mfcr r11 | |
108 | cmpi cr0,r13,RESET_HANDLER_START | |
109 | bne resetexc | |
110 | ||
111 | li r11,RESET_HANDLER_NULL | |
112 | stw r11,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_TYPE)(br0) ; Clear reset type | |
113 | ||
114 | lwz r4,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_CALL)(br0) | |
115 | lwz r3,lo16(EXT(ResetHandler)-EXT(ExceptionVectorsStart)+RESETHANDLER_ARG)(br0) | |
116 | mtlr r4 | |
117 | blr | |
118 | ||
119 | resetexc: | |
120 | mtcr r11 | |
121 | mfsprg r13,1 /* Get the exception save area */ | |
122 | li r11,T_RESET /* Set 'rupt code */ | |
123 | b .L_exception_entry /* Join common... */ | |
124 | ||
125 | /* | |
126 | * Machine check | |
127 | */ | |
128 | ||
129 | . = 0x200 | |
130 | .L_handler200: | |
131 | mtsprg 2,r13 /* Save R13 */ | |
132 | mtsprg 3,r11 /* Save R11 */ | |
133 | mfsprg r13,1 /* Get the exception save area */ | |
134 | li r11,T_MACHINE_CHECK /* Set 'rupt code */ | |
135 | b .L_exception_entry /* Join common... */ | |
136 | ||
137 | /* | |
138 | * Data access - page fault, invalid memory rights for operation | |
139 | */ | |
140 | ||
141 | . = 0x300 | |
142 | .L_handler300: | |
143 | mtsprg 2,r13 /* Save R13 */ | |
144 | mtsprg 3,r11 /* Save R11 */ | |
145 | mfsprg r13,1 /* Get the exception save area */ | |
146 | li r11,T_DATA_ACCESS /* Set 'rupt code */ | |
147 | b .L_exception_entry /* Join common... */ | |
148 | ||
149 | /* | |
150 | * Instruction access - as for data access | |
151 | */ | |
152 | ||
153 | . = 0x400 | |
154 | .L_handler400: | |
155 | mtsprg 2,r13 /* Save R13 */ | |
156 | mtsprg 3,r11 /* Save R11 */ | |
157 | mfsprg r13,1 /* Get the exception save area */ | |
158 | li r11,T_INSTRUCTION_ACCESS /* Set 'rupt code */ | |
159 | b .L_exception_entry /* Join common... */ | |
160 | ||
161 | /* | |
162 | * External interrupt | |
163 | */ | |
164 | ||
165 | . = 0x500 | |
166 | .L_handler500: | |
167 | mtsprg 2,r13 /* Save R13 */ | |
168 | mtsprg 3,r11 /* Save R11 */ | |
169 | mfsprg r13,1 /* Get the exception save area */ | |
170 | li r11,T_INTERRUPT /* Set 'rupt code */ | |
171 | b .L_exception_entry /* Join common... */ | |
172 | ||
173 | /* | |
174 | * Alignment - many reasons | |
175 | */ | |
176 | ||
177 | . = 0x600 | |
178 | .L_handler600: | |
179 | mtsprg 2,r13 /* Save R13 */ | |
180 | mtsprg 3,r11 /* Save R11 */ | |
181 | mfsprg r13,1 /* Get the exception save area */ | |
182 | li r11,T_ALIGNMENT /* Set 'rupt code */ | |
183 | b .L_exception_entry /* Join common... */ | |
184 | ||
185 | /* | |
186 | * Program - floating point exception, illegal inst, priv inst, user trap | |
187 | */ | |
188 | ||
189 | . = 0x700 | |
190 | .L_handler700: | |
191 | mtsprg 2,r13 /* Save R13 */ | |
192 | mtsprg 3,r11 /* Save R11 */ | |
193 | mfsprg r13,1 /* Get the exception save area */ | |
194 | li r11,T_PROGRAM /* Set 'rupt code */ | |
195 | b .L_exception_entry /* Join common... */ | |
196 | ||
197 | /* | |
198 | * Floating point disabled | |
199 | */ | |
200 | ||
201 | . = 0x800 | |
202 | .L_handler800: | |
203 | mtsprg 2,r13 /* Save R13 */ | |
204 | mtsprg 3,r11 /* Save R11 */ | |
205 | mfsprg r13,1 /* Get the exception save area */ | |
206 | li r11,T_FP_UNAVAILABLE /* Set 'rupt code */ | |
207 | b .L_exception_entry /* Join common... */ | |
208 | ||
209 | ||
210 | /* | |
211 | * Decrementer - DEC register has passed zero. | |
212 | */ | |
213 | ||
214 | . = 0x900 | |
215 | .L_handler900: | |
216 | mtsprg 2,r13 /* Save R13 */ | |
217 | mtsprg 3,r11 /* Save R11 */ | |
218 | mfsprg r13,1 /* Get the exception save area */ | |
219 | li r11,T_DECREMENTER /* Set 'rupt code */ | |
220 | b .L_exception_entry /* Join common... */ | |
221 | ||
222 | /* | |
223 | * I/O controller interface error - MACH does not use this | |
224 | */ | |
225 | ||
226 | . = 0xA00 | |
227 | .L_handlerA00: | |
228 | mtsprg 2,r13 /* Save R13 */ | |
229 | mtsprg 3,r11 /* Save R11 */ | |
230 | mfsprg r13,1 /* Get the exception save area */ | |
231 | li r11,T_IO_ERROR /* Set 'rupt code */ | |
232 | b .L_exception_entry /* Join common... */ | |
233 | ||
234 | /* | |
235 | * Reserved | |
236 | */ | |
237 | ||
238 | . = 0xB00 | |
239 | .L_handlerB00: | |
240 | mtsprg 2,r13 /* Save R13 */ | |
241 | mtsprg 3,r11 /* Save R11 */ | |
242 | mfsprg r13,1 /* Get the exception save area */ | |
243 | li r11,T_RESERVED /* Set 'rupt code */ | |
244 | b .L_exception_entry /* Join common... */ | |
245 | ||
246 | /* | |
247 | * System call - generated by the sc instruction | |
248 | */ | |
249 | ||
250 | . = 0xC00 | |
251 | .L_handlerC00: | |
252 | mtsprg 3,r11 ; Save R11 | |
253 | mtsprg 2,r13 ; Save R13 | |
254 | mfcr r11 ; Save the CR | |
255 | ||
256 | ; Note: this first compare takes care of almost all of the non-fast paths | |
257 | ; BSD system calls are negative and, platform-specific and mach system | |
258 | ; calls are all less than 0x7000. | |
259 | ; | |
260 | ; Note that 0x7FF2 and 0x7FF3 are user state only and do not need to set sprg2. | |
261 | ||
262 | cmpwi r0,0x7FF2 ; Ultra fast path cthread info call? | |
263 | blt+ notufp ; Not ultra fast... | |
264 | mfsprg r13,0 ; Get the per_proc_area | |
265 | cmplwi cr1,r0,0x7FF4 ; Ultra fast path fp/vec facility state? | |
266 | bgt+ cr1,notufp ; Not ultra fast... | |
267 | beq+ cr1,scloadmsr ; It is the load msr guy... | |
268 | lwz r13,spcFlags(r13) ; Get the facility status | |
269 | rlwinm. r13,r13,0,runningVMbit,runningVMbit ; Are we running a VM right now? | |
270 | bne- notufp ; Yes, no fast trap allowed... | |
271 | ||
272 | mfsprg r11,3 ; Restore R11 | |
273 | mfsprg r3,0 ; Get the per_proc_area | |
274 | mfsprg r13,2 ; Restore R13 | |
275 | beq- cr1,isvecfp ; This is the facility stat call | |
276 | lwz r3,UAW(r3) ; Get the assist word | |
277 | rfi ; All done, scream back... (no need to restore CR or R11, they are volatile) | |
278 | ; | |
279 | isvecfp: lwz r3,spcFlags(r3) ; Get the facility status | |
280 | rfi ; Bail back... | |
281 | ; | |
282 | .align 5 | |
283 | notufp: mtcrf 0xC0,r11 ; Restore the used CRs | |
284 | li r11,T_SYSTEM_CALL ; Set interrupt code | |
285 | mfsprg r13,1 ; Get the exception save area | |
286 | b .L_exception_entry ; Join common... | |
287 | ||
288 | scloadmsr: mfsrr1 r13 ; Get the old SRR | |
289 | rlwinm. r13,r13,0,MSR_PR_BIT,MSR_PR_BIT ; From problem state? | |
290 | mfsprg r13,0 ; Restore per_proc | |
291 | bne- notufp ; Someone is trying to cheat... | |
292 | ||
293 | mtcrf 0xC0,r11 ; Restore CR | |
294 | lwz r11,pfAvailable(r13) ; Pick up the feature flags | |
295 | mtsrr1 r3 ; Set new MSR | |
296 | mfsprg r13,2 ; Restore R13 | |
297 | mtsprg 2,r11 ; Set the feature flags into sprg2 | |
298 | mfsprg r11,3 ; Restore R11 | |
299 | rfi ; Blast back | |
300 | ||
301 | ||
302 | /* | |
303 | * Trace - generated by single stepping | |
304 | * performance monitor BE branch enable tracing/logging | |
305 | * is also done here now. while this is permanently in the | |
306 | * system the impact is completely unnoticable as this code is | |
307 | * only executed when (a) a single step or branch exception is | |
308 | * hit, (b) in the single step debugger case there is so much | |
309 | * overhead already the few extra instructions for testing for BE | |
310 | * are not even noticable, (c) the BE logging code is *only* run | |
311 | * when it is enabled by the tool which will not happen during | |
312 | * normal system usage | |
313 | * | |
314 | * Note that this trace is available only to user state so we do not | |
315 | * need to set sprg2 before returning. | |
316 | */ | |
317 | ||
318 | . = 0xD00 | |
319 | .L_handlerD00: | |
320 | mtsprg 2,r13 ; Save R13 | |
321 | mtsprg 3,r11 ; Save R11 | |
322 | mfsrr1 r13 ; Get the old MSR | |
323 | mfcr r11 ; Get the CR | |
324 | rlwinm. r13,r13,0,MSR_PR_BIT,MSR_PR_BIT ; Are we in supervisor state? | |
325 | beq- notspectr ; Yes, not special trace... | |
326 | mfsprg r13,0 ; Get the per_proc area | |
327 | lhz r13,PP_CPU_FLAGS(r13) ; Get the flags | |
328 | rlwinm. r13,r13,0,traceBEb+16,traceBEb+16 ; Special trace enabled? | |
329 | bne+ specbrtr ; Yeah... | |
330 | ||
331 | notspectr: mtcr r11 ; Restore CR | |
332 | mfsprg r13,1 ; Get the savearea | |
333 | li r11,T_TRACE ; Set interrupt code | |
334 | b .L_exception_entry ; Join common... | |
335 | ||
336 | ; | |
337 | ; We are doing the special branch trace | |
338 | ; | |
339 | ||
340 | specbrtr: mfsprg r13,0 ; Get the per_proc area | |
341 | stw r1,emfp0(r13) ; Save in a scratch area | |
342 | stw r2,emfp0+4(r13) ; Save in a scratch area | |
343 | stw r3,emfp0+8(r13) ; Save in a scratch area | |
344 | ||
345 | lwz r1,spcTRc(r13) ; Pick up the count | |
346 | lis r2,hi16(EXT(pc_trace_buf)) ; Get the top of the buffer | |
347 | subi r1,r1,1 ; Count down | |
348 | lwz r3,spcTRp(r13) ; Pick up buffer position | |
349 | mr. r1,r1 ; Is it time to count? | |
350 | ori r2,r2,lo16(EXT(pc_trace_buf)) ; Get the bottom of the buffer | |
351 | cmplwi cr1,r3,4092 ; Set cr1_eq if we should take exception | |
352 | ble+ spclogpc ; We are logging this one... | |
353 | cmplwi cr1,r2,0 ; Set cr1_eq false so we do not take an interrupt | |
354 | b spcskip ; Fly away... | |
355 | ||
356 | spclogpc: mfsrr0 r1 ; Get the pc | |
357 | stwx r1,r2,r3 ; Save it in the buffer | |
358 | addi r3,r3,4 ; Point to the next slot | |
359 | li r1,2 ; Number of branches to skip | |
360 | rlwinm r3,r3,0,20,31 ; Wrap the slot at one page | |
361 | stw r3,spcTRp(r13) ; Save the new slot | |
362 | ||
363 | spcskip: stw r1,spcTRc(r13) ; Save the new count | |
364 | ||
365 | lwz r1,emfp0(r13) ; Restore work register | |
366 | lwz r2,emfp0+4(r13) ; Restore work register | |
367 | lwz r3,emfp0+8(r13) ; Restore work register | |
368 | beq cr1,notspectr ; Buffer filled, make a rupt... | |
369 | ||
370 | mtcr r11 ; Restore the CR | |
371 | mfsprg r13,2 ; Restore R13 | |
372 | mfsprg r11,3 ; Restore R11 | |
373 | rfi ; Bail back... | |
374 | ||
375 | /* | |
376 | * Floating point assist | |
377 | */ | |
378 | ||
379 | . = 0xe00 | |
380 | .L_handlerE00: | |
381 | mtsprg 2,r13 /* Save R13 */ | |
382 | mtsprg 3,r11 /* Save R11 */ | |
383 | mfsprg r13,1 /* Get the exception save area */ | |
384 | li r11,T_FP_ASSIST /* Set 'rupt code */ | |
385 | b .L_exception_entry /* Join common... */ | |
386 | ||
387 | ||
388 | /* | |
389 | * Performance monitor interruption | |
390 | */ | |
391 | ||
392 | . = 0xF00 | |
393 | PMIhandler: | |
394 | mtsprg 2,r13 /* Save R13 */ | |
395 | mtsprg 3,r11 /* Save R11 */ | |
396 | mfsprg r13,1 /* Get the exception save area */ | |
397 | li r11,T_PERF_MON /* Set 'rupt code */ | |
398 | b .L_exception_entry /* Join common... */ | |
399 | ||
400 | ||
401 | /* | |
402 | * VMX exception | |
403 | */ | |
404 | ||
405 | . = 0xF20 | |
406 | VMXhandler: | |
407 | mtsprg 2,r13 /* Save R13 */ | |
408 | mtsprg 3,r11 /* Save R11 */ | |
409 | mfsprg r13,1 /* Get the exception save area */ | |
410 | li r11,T_VMX /* Set 'rupt code */ | |
411 | b .L_exception_entry /* Join common... */ | |
412 | ||
413 | ||
414 | ||
415 | /* | |
416 | * Instruction translation miss - we inline this code. | |
417 | * Upon entry (done for us by the machine): | |
418 | * srr0 : addr of instruction that missed | |
419 | * srr1 : bits 0-3 = saved CR0 | |
420 | * 4 = lru way bit | |
421 | * 16-31 = saved msr | |
422 | * msr[tgpr] = 1 (so gpr0-3 become our temporary variables) | |
423 | * imiss: ea that missed | |
424 | * icmp : the compare value for the va that missed | |
425 | * hash1: pointer to first hash pteg | |
426 | * hash2: pointer to 2nd hash pteg | |
427 | * | |
428 | * Register usage: | |
429 | * tmp0: saved counter | |
430 | * tmp1: junk | |
431 | * tmp2: pointer to pteg | |
432 | * tmp3: current compare value | |
433 | * | |
434 | * This code is taken from the 603e User's Manual with | |
435 | * some bugfixes and minor improvements to save bytes and cycles | |
436 | * | |
437 | * NOTE: Do not touch sprg2 in here | |
438 | */ | |
439 | ||
440 | . = 0x1000 | |
441 | .L_handler1000: | |
442 | mfspr tmp2, hash1 | |
443 | mfctr tmp0 /* use tmp0 to save ctr */ | |
444 | mfspr tmp3, icmp | |
445 | ||
446 | .L_imiss_find_pte_in_pteg: | |
447 | li tmp1, 8 /* count */ | |
448 | subi tmp2, tmp2, 8 /* offset for lwzu */ | |
449 | mtctr tmp1 /* count... */ | |
450 | ||
451 | .L_imiss_pteg_loop: | |
452 | lwz tmp1, 8(tmp2) /* check pte0 for match... */ | |
453 | addi tmp2, tmp2, 8 | |
454 | cmpw cr0, tmp1, tmp3 | |
455 | #if 0 | |
456 | bdnzf+ cr0, .L_imiss_pteg_loop | |
457 | #else | |
458 | bc 0,2, .L_imiss_pteg_loop | |
459 | #endif | |
460 | beq+ cr0, .L_imiss_found_pte | |
461 | ||
462 | /* Not found in PTEG, we must scan 2nd then give up */ | |
463 | ||
464 | andi. tmp1, tmp3, MASK(PTE0_HASH_ID) | |
465 | bne- .L_imiss_do_no_hash_exception /* give up */ | |
466 | ||
467 | mfspr tmp2, hash2 | |
468 | ori tmp3, tmp3, MASK(PTE0_HASH_ID) | |
469 | b .L_imiss_find_pte_in_pteg | |
470 | ||
471 | .L_imiss_found_pte: | |
472 | ||
473 | lwz tmp1, 4(tmp2) /* get pte1_t */ | |
474 | andi. tmp3, tmp1, MASK(PTE1_WIMG_GUARD) /* Fault? */ | |
475 | bne- .L_imiss_do_prot_exception /* Guarded - illegal */ | |
476 | ||
477 | /* Ok, we've found what we need to, restore and rfi! */ | |
478 | ||
479 | mtctr tmp0 /* restore ctr */ | |
480 | mfsrr1 tmp3 | |
481 | mfspr tmp0, imiss | |
482 | mtcrf 0x80, tmp3 /* Restore CR0 */ | |
483 | mtspr rpa, tmp1 /* set the pte */ | |
484 | ori tmp1, tmp1, MASK(PTE1_REFERENCED) /* set referenced */ | |
485 | tlbli tmp0 | |
486 | sth tmp1, 6(tmp2) | |
487 | rfi | |
488 | ||
489 | .L_imiss_do_prot_exception: | |
490 | /* set up srr1 to indicate protection exception... */ | |
491 | mfsrr1 tmp3 | |
492 | andi. tmp2, tmp3, 0xffff | |
493 | addis tmp2, tmp2, MASK(SRR1_TRANS_PROT) >> 16 | |
494 | b .L_imiss_do_exception | |
495 | ||
496 | .L_imiss_do_no_hash_exception: | |
497 | /* clean up registers for protection exception... */ | |
498 | mfsrr1 tmp3 | |
499 | andi. tmp2, tmp3, 0xffff | |
500 | addis tmp2, tmp2, MASK(SRR1_TRANS_HASH) >> 16 | |
501 | ||
502 | /* And the entry into the usual instruction fault handler ... */ | |
503 | .L_imiss_do_exception: | |
504 | ||
505 | mtctr tmp0 /* Restore ctr */ | |
506 | mtsrr1 tmp2 /* Set up srr1 */ | |
507 | mfmsr tmp0 | |
508 | xoris tmp0, tmp0, MASK(MSR_TGPR)>>16 /* no TGPR */ | |
509 | mtcrf 0x80, tmp3 /* Restore CR0 */ | |
510 | mtmsr tmp0 /* reset MSR[TGPR] */ | |
511 | b .L_handler400 /* Instr Access */ | |
512 | ||
513 | /* | |
514 | * Data load translation miss | |
515 | * | |
516 | * Upon entry (done for us by the machine): | |
517 | * srr0 : addr of instruction that missed | |
518 | * srr1 : bits 0-3 = saved CR0 | |
519 | * 4 = lru way bit | |
520 | * 5 = 1 if store | |
521 | * 16-31 = saved msr | |
522 | * msr[tgpr] = 1 (so gpr0-3 become our temporary variables) | |
523 | * dmiss: ea that missed | |
524 | * dcmp : the compare value for the va that missed | |
525 | * hash1: pointer to first hash pteg | |
526 | * hash2: pointer to 2nd hash pteg | |
527 | * | |
528 | * Register usage: | |
529 | * tmp0: saved counter | |
530 | * tmp1: junk | |
531 | * tmp2: pointer to pteg | |
532 | * tmp3: current compare value | |
533 | * | |
534 | * This code is taken from the 603e User's Manual with | |
535 | * some bugfixes and minor improvements to save bytes and cycles | |
536 | * | |
537 | * NOTE: Do not touch sprg2 in here | |
538 | */ | |
539 | ||
540 | . = 0x1100 | |
541 | .L_handler1100: | |
542 | mfspr tmp2, hash1 | |
543 | mfctr tmp0 /* use tmp0 to save ctr */ | |
544 | mfspr tmp3, dcmp | |
545 | ||
546 | .L_dlmiss_find_pte_in_pteg: | |
547 | li tmp1, 8 /* count */ | |
548 | subi tmp2, tmp2, 8 /* offset for lwzu */ | |
549 | mtctr tmp1 /* count... */ | |
550 | ||
551 | .L_dlmiss_pteg_loop: | |
552 | lwz tmp1, 8(tmp2) /* check pte0 for match... */ | |
553 | addi tmp2, tmp2, 8 | |
554 | cmpw cr0, tmp1, tmp3 | |
555 | #if 0 /* How to write this correctly? */ | |
556 | bdnzf+ cr0, .L_dlmiss_pteg_loop | |
557 | #else | |
558 | bc 0,2, .L_dlmiss_pteg_loop | |
559 | #endif | |
560 | beq+ cr0, .L_dmiss_found_pte | |
561 | ||
562 | /* Not found in PTEG, we must scan 2nd then give up */ | |
563 | ||
564 | andi. tmp1, tmp3, MASK(PTE0_HASH_ID) /* already at 2nd? */ | |
565 | bne- .L_dmiss_do_no_hash_exception /* give up */ | |
566 | ||
567 | mfspr tmp2, hash2 | |
568 | ori tmp3, tmp3, MASK(PTE0_HASH_ID) | |
569 | b .L_dlmiss_find_pte_in_pteg | |
570 | ||
571 | .L_dmiss_found_pte: | |
572 | ||
573 | lwz tmp1, 4(tmp2) /* get pte1_t */ | |
574 | ||
575 | /* Ok, we've found what we need to, restore and rfi! */ | |
576 | ||
577 | mtctr tmp0 /* restore ctr */ | |
578 | mfsrr1 tmp3 | |
579 | mfspr tmp0, dmiss | |
580 | mtcrf 0x80, tmp3 /* Restore CR0 */ | |
581 | mtspr rpa, tmp1 /* set the pte */ | |
582 | ori tmp1, tmp1, MASK(PTE1_REFERENCED) /* set referenced */ | |
583 | tlbld tmp0 /* load up tlb */ | |
584 | sth tmp1, 6(tmp2) /* sth is faster? */ | |
585 | rfi | |
586 | ||
587 | /* This code is shared with data store translation miss */ | |
588 | ||
589 | .L_dmiss_do_no_hash_exception: | |
590 | /* clean up registers for protection exception... */ | |
591 | mfsrr1 tmp3 | |
592 | /* prepare to set DSISR_WRITE_BIT correctly from srr1 info */ | |
593 | rlwinm tmp1, tmp3, 9, 6, 6 | |
594 | addis tmp1, tmp1, MASK(DSISR_HASH) >> 16 | |
595 | ||
596 | /* And the entry into the usual data fault handler ... */ | |
597 | ||
598 | mtctr tmp0 /* Restore ctr */ | |
599 | andi. tmp2, tmp3, 0xffff /* Clean up srr1 */ | |
600 | mtsrr1 tmp2 /* Set srr1 */ | |
601 | mtdsisr tmp1 | |
602 | mfspr tmp2, dmiss | |
603 | mtdar tmp2 | |
604 | mfmsr tmp0 | |
605 | xoris tmp0, tmp0, MASK(MSR_TGPR)>>16 /* no TGPR */ | |
606 | mtcrf 0x80, tmp3 /* Restore CR0 */ | |
607 | sync /* Needed on some */ | |
608 | mtmsr tmp0 /* reset MSR[TGPR] */ | |
609 | b .L_handler300 /* Data Access */ | |
610 | ||
611 | /* | |
612 | * Data store translation miss (similar to data load) | |
613 | * | |
614 | * Upon entry (done for us by the machine): | |
615 | * srr0 : addr of instruction that missed | |
616 | * srr1 : bits 0-3 = saved CR0 | |
617 | * 4 = lru way bit | |
618 | * 5 = 1 if store | |
619 | * 16-31 = saved msr | |
620 | * msr[tgpr] = 1 (so gpr0-3 become our temporary variables) | |
621 | * dmiss: ea that missed | |
622 | * dcmp : the compare value for the va that missed | |
623 | * hash1: pointer to first hash pteg | |
624 | * hash2: pointer to 2nd hash pteg | |
625 | * | |
626 | * Register usage: | |
627 | * tmp0: saved counter | |
628 | * tmp1: junk | |
629 | * tmp2: pointer to pteg | |
630 | * tmp3: current compare value | |
631 | * | |
632 | * This code is taken from the 603e User's Manual with | |
633 | * some bugfixes and minor improvements to save bytes and cycles | |
634 | * | |
635 | * NOTE: Do not touch sprg2 in here | |
636 | */ | |
637 | ||
638 | . = 0x1200 | |
639 | .L_handler1200: | |
640 | mfspr tmp2, hash1 | |
641 | mfctr tmp0 /* use tmp0 to save ctr */ | |
642 | mfspr tmp3, dcmp | |
643 | ||
644 | .L_dsmiss_find_pte_in_pteg: | |
645 | li tmp1, 8 /* count */ | |
646 | subi tmp2, tmp2, 8 /* offset for lwzu */ | |
647 | mtctr tmp1 /* count... */ | |
648 | ||
649 | .L_dsmiss_pteg_loop: | |
650 | lwz tmp1, 8(tmp2) /* check pte0 for match... */ | |
651 | addi tmp2, tmp2, 8 | |
652 | ||
653 | cmpw cr0, tmp1, tmp3 | |
654 | #if 0 /* I don't know how to write this properly */ | |
655 | bdnzf+ cr0, .L_dsmiss_pteg_loop | |
656 | #else | |
657 | bc 0,2, .L_dsmiss_pteg_loop | |
658 | #endif | |
659 | beq+ cr0, .L_dsmiss_found_pte | |
660 | ||
661 | /* Not found in PTEG, we must scan 2nd then give up */ | |
662 | ||
663 | andi. tmp1, tmp3, MASK(PTE0_HASH_ID) /* already at 2nd? */ | |
664 | bne- .L_dmiss_do_no_hash_exception /* give up */ | |
665 | ||
666 | mfspr tmp2, hash2 | |
667 | ori tmp3, tmp3, MASK(PTE0_HASH_ID) | |
668 | b .L_dsmiss_find_pte_in_pteg | |
669 | ||
670 | .L_dsmiss_found_pte: | |
671 | ||
672 | lwz tmp1, 4(tmp2) /* get pte1_t */ | |
673 | andi. tmp3, tmp1, MASK(PTE1_CHANGED) /* unchanged, check? */ | |
674 | beq- .L_dsmiss_check_prot /* yes, check prot */ | |
675 | ||
676 | .L_dsmiss_resolved: | |
677 | /* Ok, we've found what we need to, restore and rfi! */ | |
678 | ||
679 | mtctr tmp0 /* restore ctr */ | |
680 | mfsrr1 tmp3 | |
681 | mfspr tmp0, dmiss | |
682 | mtcrf 0x80, tmp3 /* Restore CR0 */ | |
683 | mtspr rpa, tmp1 /* set the pte */ | |
684 | tlbld tmp0 /* load up tlb */ | |
685 | rfi | |
686 | ||
687 | .L_dsmiss_check_prot: | |
688 | /* PTE is unchanged, we must check that we can write */ | |
689 | rlwinm. tmp3, tmp1, 30, 0, 1 /* check PP[1] */ | |
690 | bge- .L_dsmiss_check_prot_user_kern | |
691 | andi. tmp3, tmp1, 1 /* check PP[0] */ | |
692 | beq+ .L_dsmiss_check_prot_ok | |
693 | ||
694 | .L_dmiss_do_prot_exception: | |
695 | /* clean up registers for protection exception... */ | |
696 | mfsrr1 tmp3 | |
697 | /* prepare to set DSISR_WRITE_BIT correctly from srr1 info */ | |
698 | rlwinm tmp1, tmp3, 9, 6, 6 | |
699 | addis tmp1, tmp1, MASK(DSISR_PROT) >> 16 | |
700 | ||
701 | /* And the entry into the usual data fault handler ... */ | |
702 | ||
703 | mtctr tmp0 /* Restore ctr */ | |
704 | andi. tmp2, tmp3, 0xffff /* Clean up srr1 */ | |
705 | mtsrr1 tmp2 /* Set srr1 */ | |
706 | mtdsisr tmp1 | |
707 | mfspr tmp2, dmiss | |
708 | mtdar tmp2 | |
709 | mfmsr tmp0 | |
710 | xoris tmp0, tmp0, MASK(MSR_TGPR)>>16 /* no TGPR */ | |
711 | mtcrf 0x80, tmp3 /* Restore CR0 */ | |
712 | sync /* Needed on some */ | |
713 | mtmsr tmp0 /* reset MSR[TGPR] */ | |
714 | b .L_handler300 /* Data Access */ | |
715 | ||
716 | /* NB - if we knew we were on a 603e we could test just the MSR_KEY bit */ | |
717 | .L_dsmiss_check_prot_user_kern: | |
718 | mfsrr1 tmp3 | |
719 | andi. tmp3, tmp3, MASK(MSR_PR) | |
720 | beq+ .L_dsmiss_check_prot_kern | |
721 | mfspr tmp3, dmiss /* check user privs */ | |
722 | mfsrin tmp3, tmp3 /* get excepting SR */ | |
723 | andis. tmp3, tmp3, 0x2000 /* Test SR ku bit */ | |
724 | beq+ .L_dsmiss_check_prot_ok | |
725 | b .L_dmiss_do_prot_exception | |
726 | ||
727 | .L_dsmiss_check_prot_kern: | |
728 | mfspr tmp3, dmiss /* check kern privs */ | |
729 | mfsrin tmp3, tmp3 | |
730 | andis. tmp3, tmp3, 0x4000 /* Test SR Ks bit */ | |
731 | bne- .L_dmiss_do_prot_exception | |
732 | ||
733 | .L_dsmiss_check_prot_ok: | |
734 | /* Ok, mark as referenced and changed before resolving the fault */ | |
735 | ori tmp1, tmp1, (MASK(PTE1_REFERENCED)|MASK(PTE1_CHANGED)) | |
736 | sth tmp1, 6(tmp2) | |
737 | b .L_dsmiss_resolved | |
738 | ||
739 | /* | |
740 | * Instruction address breakpoint | |
741 | */ | |
742 | ||
743 | . = 0x1300 | |
744 | .L_handler1300: | |
745 | mtsprg 2,r13 /* Save R13 */ | |
746 | mtsprg 3,r11 /* Save R11 */ | |
747 | mfsprg r13,1 /* Get the exception save area */ | |
748 | li r11,T_INSTRUCTION_BKPT /* Set 'rupt code */ | |
749 | b .L_exception_entry /* Join common... */ | |
750 | ||
751 | /* | |
752 | * System management interrupt | |
753 | */ | |
754 | ||
755 | . = 0x1400 | |
756 | .L_handler1400: | |
757 | mtsprg 2,r13 /* Save R13 */ | |
758 | mtsprg 3,r11 /* Save R11 */ | |
759 | mfsprg r13,1 /* Get the exception save area */ | |
760 | li r11,T_SYSTEM_MANAGEMENT /* Set 'rupt code */ | |
761 | b .L_exception_entry /* Join common... */ | |
762 | ||
763 | ; | |
764 | ; Altivec Java Mode Assist interrupt | |
765 | ; | |
766 | ||
767 | . = 0x1600 | |
768 | .L_handler1600: | |
769 | mtsprg 2,r13 /* Save R13 */ | |
770 | mtsprg 3,r11 /* Save R11 */ | |
771 | mfsprg r13,1 /* Get the exception save area */ | |
772 | li r11,T_ALTIVEC_ASSIST /* Set 'rupt code */ | |
773 | b .L_exception_entry /* Join common... */ | |
774 | ||
775 | ; | |
776 | ; Thermal interruption | |
777 | ; | |
778 | ||
779 | . = 0x1700 | |
780 | .L_handler1700: | |
781 | mtsprg 2,r13 /* Save R13 */ | |
782 | mtsprg 3,r11 /* Save R11 */ | |
783 | mfsprg r13,1 /* Get the exception save area */ | |
784 | li r11,T_THERMAL /* Set 'rupt code */ | |
785 | b .L_exception_entry /* Join common... */ | |
786 | ||
787 | /* | |
788 | * There is now a large gap of reserved traps | |
789 | */ | |
790 | ||
791 | /* | |
792 | * Run mode/ trace exception - single stepping on 601 processors | |
793 | */ | |
794 | ||
795 | . = 0x2000 | |
796 | .L_handler2000: | |
797 | mtsprg 2,r13 /* Save R13 */ | |
798 | mtsprg 3,r11 /* Save R11 */ | |
799 | mfsprg r13,1 /* Get the exception save area */ | |
800 | li r11,T_RUNMODE_TRACE /* Set 'rupt code */ | |
801 | b .L_exception_entry /* Join common... */ | |
802 | ||
803 | /* | |
804 | * .L_exception_entry(type) | |
805 | * | |
806 | * This is the common exception handling routine called by any | |
807 | * type of system exception. | |
808 | * | |
809 | * ENTRY: via a system exception handler, thus interrupts off, VM off. | |
810 | * r3 has been saved in sprg3 and now contains a number | |
811 | * representing the exception's origins | |
812 | * | |
813 | */ | |
814 | ||
815 | .data | |
816 | .align ALIGN | |
817 | .globl EXT(exception_entry) | |
818 | EXT(exception_entry): | |
819 | .long .L_exception_entry-EXT(ExceptionVectorsStart) /* phys addr of fn */ | |
820 | ||
821 | VECTOR_SEGMENT | |
822 | .align 5 | |
823 | ||
824 | .L_exception_entry: | |
825 | ||
826 | /* | |
827 | * | |
828 | * Here we will save off a mess of registers, the special ones and R0-R12. We use the DCBZ | |
829 | * instruction to clear and allcoate a line in the cache. This way we won't take any cache | |
830 | * misses, so these stores won't take all that long. Except the first line that is because | |
831 | * we can't do a DCBZ if the L1 D-cache is off. The rest we will skip if they are | |
832 | * off also. | |
833 | * | |
834 | * Note that if we are attempting to sleep (as opposed to nap or doze) all interruptions | |
835 | * are ignored. | |
836 | */ | |
837 | ||
838 | stw r1,saver1(r13) ; Save register 1 | |
839 | stw r0,saver0(r13) ; Save register 0 | |
840 | mfspr r1,hid0 ; Get HID0 | |
841 | mfcr r0 ; Save the CR | |
842 | mtcrf 255,r1 ; Get set to test for cache and sleep | |
843 | bf sleep,notsleep ; Skip if we are not trying to sleep | |
844 | ||
845 | mtcrf 255,r0 ; Restore the CR | |
846 | lwz r0,saver0(r13) ; Restore R0 | |
847 | lwz r1,saver1(r13) ; Restore R1 | |
848 | mfsprg r13,0 ; Get the per_proc | |
849 | lwz r11,pfAvailable(r13) ; Get back the feature flags | |
850 | mfsprg r13,2 ; Restore R13 | |
851 | mtsprg 2,r11 ; Set sprg2 to the features | |
852 | mfsprg r11,3 ; Restore R11 | |
853 | rfi ; Jump back into sleep code... | |
854 | .long 0 ; Leave these here please... | |
855 | .long 0 | |
856 | .long 0 | |
857 | .long 0 | |
858 | .long 0 | |
859 | .long 0 | |
860 | .long 0 | |
861 | .long 0 | |
862 | ||
863 | .align 5 | |
864 | ||
865 | notsleep: stw r2,saver2(r13) ; Save this one | |
866 | crmove featL1ena,dce ; Copy the cache enable bit | |
867 | rlwinm r2,r1,0,nap+1,doze-1 ; Clear any possible nap and doze bits | |
868 | mtspr hid0,r2 ; Clear the nap/doze bits | |
869 | cmplw r2,r1 ; See if we were napping | |
870 | li r1,32 ; Point to the next line in case we need it | |
871 | crnot wasNapping,cr0_eq ; Remember if we were napping | |
872 | mfsprg r2,0 ; Get the per_proc area | |
873 | bf- featL1ena,skipz1 ; L1 cache is disabled... | |
874 | dcbz r1,r13 ; Reserve our line in cache | |
875 | ||
876 | ; | |
877 | ; Remember, we are setting up CR6 with feature flags | |
878 | ; | |
879 | skipz1: lwz r1,pfAvailable(r2) ; Get the CPU features flags | |
880 | stw r3,saver3(r13) ; Save this one | |
881 | mtcrf 0xE0,r1 ; Put the features flags (that we care about) in the CR | |
882 | stw r4,saver4(r13) ; Save this one | |
883 | stw r6,saver6(r13) ; Save this one | |
884 | crmove featSMP,pfSMPcapb ; See if we have a PIR | |
885 | stw r8,saver8(r13) ; Save this one | |
886 | crmove featAltivec,pfAltivecb ; Set the Altivec flag | |
887 | mfsrr0 r6 /* Get the interruption SRR0 */ | |
888 | stw r8,saver8(r13) /* Save this one */ | |
889 | crmove featFP,pfFloatb ; Remember that we have floating point | |
890 | stw r7,saver7(r13) /* Save this one */ | |
891 | lhz r8,PP_CPU_FLAGS(r2) ; Get the flags | |
892 | mfsrr1 r7 /* Get the interrupt SRR1 */ | |
893 | rlwinm r8,r8,(((31-MSR_BE_BIT)+(traceBEb+16+1))&31),MSR_BE_BIT,MSR_BE_BIT ; Set BE bit if special trace is on | |
894 | stw r6,savesrr0(r13) /* Save the SRR0 */ | |
895 | rlwinm r6,r7,(((31-MSR_BE_BIT)+(MSR_PR_BIT+1))&31),MSR_BE_BIT,MSR_BE_BIT ; Move PR bit to BE bit | |
896 | stw r5,saver5(r13) /* Save this one */ | |
897 | and r8,r6,r8 ; Remove BE bit only if problem state and special tracing on | |
898 | mfsprg r6,2 ; Get interrupt time R13 | |
899 | mtsprg 2,r1 ; Set the feature flags | |
900 | andc r7,r7,r8 ; Clear BE bit if special trace is on and PR is set | |
901 | mfsprg r8,3 /* Get 'rupt time R11 */ | |
902 | stw r7,savesrr1(r13) /* Save SRR1 */ | |
903 | stw r6,saver13(r13) /* Save 'rupt R1 */ | |
904 | stw r8,saver11(r13) /* Save 'rupt time R11 */ | |
905 | ||
906 | getTB: mftbu r6 ; Get the upper timebase | |
907 | mftb r7 ; Get the lower timebase | |
908 | mftbu r8 ; Get the upper one again | |
909 | cmplw r6,r8 ; Did the top tick? | |
910 | bne- getTB ; Yeah, need to get it again... | |
911 | ||
912 | stw r8,ruptStamp(r2) ; Save the top of time stamp | |
913 | la r6,saver14(r13) ; Point to the next cache line | |
914 | stw r7,ruptStamp+4(r2) ; Save the bottom of time stamp | |
915 | bf- featL1ena,skipz2 ; L1 cache is disabled... | |
916 | dcbz 0,r6 /* Allocate in cache */ | |
917 | skipz2: | |
918 | stw r9,saver9(r13) /* Save this one */ | |
919 | ||
920 | la r9,saver30(r13) /* Point to the trailing end */ | |
921 | stw r10,saver10(r13) /* Save this one */ | |
922 | mflr r4 /* Get the LR */ | |
923 | mfxer r10 ; Get the XER | |
924 | ||
925 | bf+ wasNapping,notNapping ; Skip if not waking up from nap... | |
926 | ||
927 | lwz r6,napStamp+4(r2) ; Pick up low order nap stamp | |
928 | lis r3,hi16(EXT(machine_idle_ret)) ; Get high part of nap/doze return | |
929 | lwz r5,napStamp(r2) ; and high order | |
930 | subfc r7,r6,r7 ; Subtract low stamp from now | |
931 | lwz r6,napTotal+4(r2) ; Pick up low total | |
932 | subfe r5,r5,r8 ; Subtract high stamp and borrow from now | |
933 | lwz r8,napTotal(r2) ; Pick up the high total | |
934 | addc r6,r6,r7 ; Add low to total | |
935 | ori r3,r3,lo16(EXT(machine_idle_ret)) ; Get low part of nap/doze return | |
936 | adde r8,r8,r5 ; Add high and carry to total | |
937 | stw r6,napTotal+4(r2) ; Save the low total | |
938 | stw r8,napTotal(r2) ; Save the high total | |
939 | stw r3,savesrr0(r13) ; Modify to return to nap/doze exit | |
940 | ||
941 | notNapping: stw r12,saver12(r13) /* Save this one */ | |
942 | ||
943 | bf- featL1ena,skipz3 ; L1 cache is disabled... | |
944 | dcbz 0,r9 /* Allocate the last in the area */ | |
945 | skipz3: | |
946 | stw r14,saver14(r13) /* Save this one */ | |
947 | stw r15,saver15(r13) /* Save this one */ | |
948 | la r14,saver22(r13) /* Point to the next block to save into */ | |
949 | stw r0,savecr(r13) ; Save rupt CR | |
950 | mfctr r6 /* Get the CTR */ | |
951 | stw r16,saver16(r13) /* Save this one */ | |
952 | stw r4,savelr(r13) /* Save 'rupt LR */ | |
953 | ||
954 | bf- featL1ena,skipz4 ; L1 cache is disabled... | |
955 | dcbz 0,r14 /* Allocate next save area line */ | |
956 | skipz4: | |
957 | stw r17,saver17(r13) /* Save this one */ | |
958 | stw r18,saver18(r13) /* Save this one */ | |
959 | stw r6,savectr(r13) /* Save 'rupt CTR */ | |
960 | stw r19,saver19(r13) /* Save this one */ | |
961 | lis r12,HIGH_ADDR(KERNEL_SEG_REG0_VALUE) /* Get the high half of the kernel SR0 value */ | |
962 | mfdar r6 /* Get the 'rupt DAR */ | |
963 | stw r20,saver20(r13) /* Save this one */ | |
964 | #if 0 | |
965 | mfsr r14,sr0 ; (TEST/DEBUG) | |
966 | stw r14,savesr0(r13) ; (TEST/DEBUG) | |
967 | mfsr r14,sr1 ; (TEST/DEBUG) | |
968 | stw r14,savesr1(r13) ; (TEST/DEBUG) | |
969 | mfsr r14,sr2 ; (TEST/DEBUG) | |
970 | stw r14,savesr2(r13) ; (TEST/DEBUG) | |
971 | mfsr r14,sr3 ; (TEST/DEBUG) | |
972 | stw r14,savesr3(r13) ; (TEST/DEBUG) | |
973 | mfsr r14,sr4 ; (TEST/DEBUG) | |
974 | stw r14,savesr4(r13) ; (TEST/DEBUG) | |
975 | mfsr r14,sr5 ; (TEST/DEBUG) | |
976 | stw r14,savesr5(r13) ; (TEST/DEBUG) | |
977 | mfsr r14,sr6 ; (TEST/DEBUG) | |
978 | stw r14,savesr6(r13) ; (TEST/DEBUG) | |
979 | mfsr r14,sr7 ; (TEST/DEBUG) | |
980 | stw r14,savesr7(r13) ; (TEST/DEBUG) | |
981 | mfsr r14,sr8 ; (TEST/DEBUG) | |
982 | stw r14,savesr8(r13) ; (TEST/DEBUG) | |
983 | mfsr r14,sr9 ; (TEST/DEBUG) | |
984 | stw r14,savesr9(r13) ; (TEST/DEBUG) | |
985 | mfsr r14,sr10 ; (TEST/DEBUG) | |
986 | stw r14,savesr10(r13) ; (TEST/DEBUG) | |
987 | mfsr r14,sr11 ; (TEST/DEBUG) | |
988 | stw r14,savesr11(r13) ; (TEST/DEBUG) | |
989 | mfsr r14,sr12 ; (TEST/DEBUG) | |
990 | stw r14,savesr12(r13) ; (TEST/DEBUG) | |
991 | mfsr r14,sr13 ; (TEST/DEBUG) | |
992 | stw r14,savesr13(r13) ; (TEST/DEBUG) | |
993 | mfsr r14,sr15 ; (TEST/DEBUG) | |
994 | stw r14,savesr15(r13) ; (TEST/DEBUG) | |
995 | #endif | |
996 | ||
997 | mtsr sr0,r12 /* Set the kernel SR0 */ | |
998 | stw r21,saver21(r13) /* Save this one */ | |
999 | addis r12,r12,0x0010 ; Point to the second segment of kernel | |
1000 | stw r10,savexer(r13) ; Save the rupt XER | |
1001 | mtsr sr1,r12 /* Set the kernel SR1 */ | |
1002 | stw r30,saver30(r13) /* Save this one */ | |
1003 | addis r12,r12,0x0010 ; Point to the third segment of kernel | |
1004 | stw r31,saver31(r13) /* Save this one */ | |
1005 | mtsr sr2,r12 /* Set the kernel SR2 */ | |
1006 | stw r22,saver22(r13) /* Save this one */ | |
1007 | addis r12,r12,0x0010 ; Point to the third segment of kernel | |
1008 | la r10,savedar(r13) /* Point to exception info block */ | |
1009 | stw r23,saver23(r13) /* Save this one */ | |
1010 | mtsr sr3,r12 /* Set the kernel SR3 */ | |
1011 | stw r24,saver24(r13) /* Save this one */ | |
1012 | stw r25,saver25(r13) /* Save this one */ | |
1013 | mfdsisr r7 /* Get the 'rupt DSISR */ | |
1014 | stw r26,saver26(r13) /* Save this one */ | |
1015 | ||
1016 | bf- featL1ena,skipz5 ; L1 cache is disabled... | |
1017 | dcbz 0,r10 /* Allocate exception info line */ | |
1018 | skipz5: | |
1019 | ||
1020 | stw r27,saver27(r13) /* Save this one */ | |
1021 | li r10,emfp0 ; Point to floating point save | |
1022 | stw r28,saver28(r13) /* Save this one */ | |
1023 | stw r29,saver29(r13) /* Save this one */ | |
1024 | mfsr r14,sr14 ; Get the copyin/out segment register | |
1025 | stw r6,savedar(r13) /* Save the 'rupt DAR */ | |
1026 | bf- featL1ena,skipz5a ; Do not do this if no L1... | |
1027 | dcbz r10,r2 ; Clear and allocate an L1 slot | |
1028 | ||
1029 | skipz5a: stw r7,savedsisr(r13) /* Save the 'rupt code DSISR */ | |
1030 | stw r11,saveexception(r13) /* Save the exception code */ | |
1031 | stw r14,savesr14(r13) ; Save copyin/copyout | |
1032 | ||
1033 | lis r8,HIGH_ADDR(EXT(saveanchor)) /* Get the high part of the anchor */ | |
1034 | li r19,0 ; Assume no Altivec | |
1035 | ori r8,r8,LOW_ADDR(EXT(saveanchor)) /* Bottom half of the anchor */ | |
1036 | ||
1037 | bf featAltivec,noavec ; No Altivec on this CPU... | |
1038 | li r9,0 ; Get set to clear VRSAVE | |
1039 | mfspr r19,vrsave ; Get the VRSAVE register | |
1040 | mtspr vrsave,r9 ; Clear VRSAVE for each interrupt level | |
1041 | ; | |
1042 | ; We need to save the FPSCR as if it is normal context. | |
1043 | ; This is because pending exceptions will cause an exception even if | |
1044 | ; FP is disabled. We need to clear the FPSCR when we first start running in the | |
1045 | ; kernel. | |
1046 | ; | |
1047 | noavec: stw r19,savevrsave(r13) ; Save the vector register usage flags | |
1048 | ||
1049 | bf- featFP,nofpexe ; No possible floating point exceptions... | |
1050 | ||
1051 | mfmsr r9 ; Get the MSR value | |
1052 | ori r7,r9,lo16(MASK(MSR_FP)) ; Enable floating point | |
1053 | mtmsr r7 ; Do it | |
1054 | isync | |
1055 | stfd f0,emfp0(r2) ; Save FPR0 | |
1056 | stfd f1,emfp1(r2) ; Save FPR1 | |
1057 | mffs f0 ; Get the FPSCR | |
1058 | fsub f1,f1,f1 ; Make a 0 | |
1059 | stfd f0,savexfpscrpad(r13) ; Save the FPSCR | |
1060 | mtfsf 0xFF,f1 ; Clear it | |
1061 | lfd f0,emfp0(r2) ; Restore FPR0 | |
1062 | lfd f1,emfp1(r2) ; Restore FPR1 | |
1063 | mtmsr r9 ; Turn off FP | |
1064 | isync | |
1065 | nofpexe: | |
1066 | ||
1067 | /* | |
1068 | * Everything is saved at this point, except for FPRs, and VMX registers | |
1069 | * | |
1070 | * Time for a new save area. Allocate the trace table entry now also | |
1071 | * Note that we haven't touched R0-R5 yet. Except for R0 & R1, that's in the save | |
1072 | */ | |
1073 | ||
1074 | ||
1075 | lllck: lwarx r9,0,r8 /* Grab the lock value */ | |
1076 | li r7,1 /* Use part of the delay time */ | |
1077 | mr. r9,r9 /* Is it locked? */ | |
1078 | bne- lllcks /* Yeah, wait for it to clear... */ | |
1079 | stwcx. r7,0,r8 /* Try to seize that there durn lock */ | |
1080 | beq+ lllckd /* Got it... */ | |
1081 | b lllck /* Collision, try again... */ | |
1082 | ||
1083 | lllcks: lwz r9,SVlock(r8) /* Get that lock in here */ | |
1084 | mr. r9,r9 /* Is it free yet? */ | |
1085 | beq+ lllck /* Yeah, try for it again... */ | |
1086 | b lllcks /* Sniff away... */ | |
1087 | ||
1088 | lllckd: isync /* Purge any speculative executions here */ | |
1089 | lis r23,hi16(EXT(trcWork)) ; Get the work area address | |
1090 | rlwinm r7,r11,30,0,31 /* Save 'rupt code shifted right 2 */ | |
1091 | ori r23,r23,lo16(EXT(trcWork)) ; Get the rest | |
1092 | #if 1 | |
1093 | lwz r14,traceMask(r23) /* Get the trace mask */ | |
1094 | #else | |
1095 | li r14,-1 /* (TEST/DEBUG) */ | |
1096 | #endif | |
1097 | addi r7,r7,10 /* Adjust for CR5_EQ position */ | |
1098 | lwz r15,SVfree(r8) /* Get the head of the save area list */ | |
1099 | lwz r25,SVinuse(r8) /* Get the in use count */ | |
1100 | rlwnm r7,r14,r7,22,22 /* Set CR5_EQ bit position to 0 if tracing allowed */ | |
1101 | lwz r20,traceCurr(r23) /* Pick up the current trace entry */ | |
1102 | mtcrf 0x04,r7 /* Set CR5 to show trace or not */ | |
1103 | ||
1104 | lwz r14,SACalloc(r15) /* Pick up the allocation bits */ | |
1105 | addi r25,r25,1 /* Bump up the in use count for the new savearea */ | |
1106 | lwz r21,traceEnd(r23) /* Grab up the end of it all */ | |
1107 | mr. r14,r14 /* Can we use the first one? */ | |
1108 | blt use1st /* Yeah... */ | |
1109 | ||
1110 | andis. r14,r14,0x8000 /* Show we used the second and remember if it was the last */ | |
1111 | addi r10,r15,0x0800 /* Point to the first one */ | |
1112 | b gotsave /* We have the area now... */ | |
1113 | ||
1114 | use1st: andis. r14,r14,0x4000 /* Mark first gone and remember if empty */ | |
1115 | mr r10,r15 /* Set the save area */ | |
1116 | ||
1117 | gotsave: stw r14,SACalloc(r15) /* Put back the allocation bits */ | |
1118 | bne nodqsave /* There's still an empty slot, don't dequeue... */ | |
1119 | ||
1120 | lwz r16,SACnext(r15) /* Get the next in line */ | |
1121 | stw r16,SVfree(r8) /* Dequeue our now empty save area block */ | |
1122 | ||
1123 | nodqsave: addi r22,r20,LTR_size /* Point to the next trace entry */ | |
1124 | stw r25,SVinuse(r8) /* Set the in use count */ | |
1125 | li r17,0 /* Clear this for the lock */ | |
1126 | cmplw r22,r21 /* Do we need to wrap the trace table? */ | |
1127 | stw r17,SAVprev(r10) /* Clear back pointer for the newly allocated guy */ | |
1128 | mtsprg 1,r10 /* Get set for the next 'rupt */ | |
1129 | bne+ gotTrcEnt /* We got a trace entry... */ | |
1130 | ||
1131 | lwz r22,traceStart(r23) /* Wrap back to the top */ | |
1132 | ||
1133 | gotTrcEnt: bne- cr5,skipTrace1 /* Don't want to trace this kind... */ | |
1134 | ||
1135 | stw r22,traceCurr(r23) /* Set the next entry for the next guy */ | |
1136 | ||
1137 | #if ESPDEBUG | |
1138 | dcbst br0,r23 ; (TEST/DEBUG) | |
1139 | sync ; (TEST/DEBUG) | |
1140 | #endif | |
1141 | ||
1142 | bf- featL1ena,skipz6 ; L1 cache is disabled... | |
1143 | dcbz 0,r20 /* Allocate cache for the entry */ | |
1144 | skipz6: | |
1145 | ||
1146 | skipTrace1: sync /* Make sure all stores are done */ | |
1147 | stw r17,SVlock(r8) /* Unlock both save and trace areas */ | |
1148 | ||
1149 | ||
1150 | /* | |
1151 | * At this point, we can take another exception and lose nothing. | |
1152 | * | |
1153 | * We still have the current savearea pointed to by R13, the next by R10 and | |
1154 | * sprg1. R20 contains the pointer to a trace entry and CR5_eq says | |
1155 | * to do the trace or not. | |
1156 | * | |
1157 | * Note that R13 was chosen as the save area pointer because the SIGP, | |
1158 | * firmware, and DSI/ISI handlers aren't supposed to touch anything | |
1159 | * over R12. But, actually, the DSI/ISI stuff does. | |
1160 | * | |
1161 | * | |
1162 | * Let's cut that trace entry now. | |
1163 | */ | |
1164 | ||
1165 | lwz r0,saver0(r13) ; Get back interrupt time R0 | |
1166 | bne- cr5,skipTrace2 /* Don't want to trace this kind... */ | |
1167 | ||
1168 | mfsprg r2,0 ; Get the per_proc | |
1169 | li r14,32 /* Second line of entry */ | |
1170 | ||
1171 | lwz r16,ruptStamp(r2) ; Get top of time base | |
1172 | lwz r17,ruptStamp+4(r2) ; Get the bottom of time stamp | |
1173 | ||
1174 | bf- featL1ena,skipz7 ; L1 cache is disabled... | |
1175 | dcbz r14,r20 /* Zap the second half */ | |
1176 | ||
1177 | skipz7: stw r16,LTR_timeHi(r20) /* Set the upper part of TB */ | |
1178 | bf featSMP,nopir4 ; Is there a processor ID register on this guy? | |
1179 | mfspr r19,pir /* Get the processor address */ | |
1180 | b gotpir4 /* Got it... */ | |
1181 | nopir4: li r19,0 /* Assume processor 0 for those underprivileged folks */ | |
1182 | gotpir4: | |
1183 | lwz r1,saver1(r13) ; Get back interrupt time R1 | |
1184 | stw r17,LTR_timeLo(r20) /* Set the lower part of TB */ | |
1185 | rlwinm r19,r19,0,27,31 /* Cut the junk */ | |
1186 | lwz r2,saver2(r13) ; Get back interrupt time R2 | |
1187 | stw r0,LTR_r0(r20) /* Save off register 0 */ | |
1188 | lwz r3,saver3(r13) ; Restore this one | |
1189 | sth r19,LTR_cpu(r20) /* Stash the cpu address */ | |
1190 | stw r1,LTR_r1(r20) /* Save off register 1 */ | |
1191 | lwz r4,saver4(r13) ; Restore this one | |
1192 | stw r2,LTR_r2(r20) /* Save off register 2 */ | |
1193 | lwz r5,saver5(r13) ; Restore this one | |
1194 | stw r3,LTR_r3(r20) /* Save off register 3 */ | |
1195 | lwz r16,savecr(r13) /* We don't remember the CR anymore, get it */ | |
1196 | stw r4,LTR_r4(r20) /* Save off register 4 */ | |
1197 | mfsrr0 r17 /* Get this back, it's still good */ | |
1198 | stw r5,LTR_r5(r20) /* Save off register 5 */ | |
1199 | mfsrr1 r18 /* This is still good in here also */ | |
1200 | ||
1201 | stw r16,LTR_cr(r20) /* Save the CR (or dec) */ | |
1202 | stw r17,LTR_srr0(r20) /* Save the SSR0 */ | |
1203 | stw r18,LTR_srr1(r20) /* Save the SRR1 */ | |
1204 | mfdar r17 /* Get this back */ | |
1205 | ||
1206 | mflr r16 /* Get the LR */ | |
1207 | stw r17,LTR_dar(r20) /* Save the DAR */ | |
1208 | mfctr r17 /* Get the CTR */ | |
1209 | stw r16,LTR_lr(r20) /* Save the LR */ | |
1210 | #if 0 | |
1211 | lis r17,HIGH_ADDR(EXT(saveanchor)) ; (TEST/DEBUG) | |
1212 | ori r17,r17,LOW_ADDR(EXT(saveanchor)) ; (TEST/DEBUG) | |
1213 | lwz r16,SVcount(r17) ; (TEST/DEBUG) | |
1214 | lwz r17,SVinuse(r17) ; (TEST/DEBUG) | |
1215 | rlwimi r17,r16,16,0,15 ; (TEST/DEBUG) | |
1216 | #endif | |
1217 | stw r17,LTR_ctr(r20) /* Save off the CTR */ | |
1218 | stw r13,LTR_save(r20) /* Save the savearea */ | |
1219 | sth r11,LTR_excpt(r20) /* Save the exception type */ | |
1220 | #if ESPDEBUG | |
1221 | addi r17,r20,32 ; (TEST/DEBUG) | |
1222 | dcbst br0,r20 ; (TEST/DEBUG) | |
1223 | dcbst br0,r17 ; (TEST/DEBUG) | |
1224 | sync ; (TEST/DEBUG) | |
1225 | #endif | |
1226 | ||
1227 | /* | |
1228 | * We're done with the trace, except for maybe modifying the exception | |
1229 | * code later on. So, that means that we need to save R20 and CR5, but | |
1230 | * R0 to R5 are clear now. | |
1231 | * | |
1232 | * So, let's finish setting up the kernel registers now. | |
1233 | */ | |
1234 | ||
1235 | skipTrace2: | |
1236 | ||
1237 | #if PERFTIMES && DEBUG | |
1238 | li r3,68 ; Indicate interrupt | |
1239 | mr r4,r11 ; Get code to log | |
1240 | mr r5,r13 ; Get savearea to log | |
1241 | mr r8,r0 ; Save R0 | |
1242 | bl EXT(dbgLog2) ; Cut log entry | |
1243 | mr r0,r8 ; Restore R0 | |
1244 | #endif | |
1245 | ||
1246 | mfsprg r2,0 /* Get the per processor block */ | |
1247 | ||
1248 | #if CHECKSAVE | |
1249 | ||
1250 | lis r4,0x7FFF /* (TEST/DEBUG) */ | |
1251 | mfdec r12 /* (TEST/DEBUG) */ | |
1252 | or r4,r4,r12 /* (TEST/DEBUG) */ | |
1253 | mtdec r4 /* (TEST/DEBUG) */ | |
1254 | li r4,0x20 /* (TEST/DEBUG) */ | |
1255 | ||
1256 | lwarx r8,0,r4 ; ? | |
1257 | ||
1258 | mpwait2: lwarx r8,0,r4 /* (TEST/DEBUG) */ | |
1259 | mr. r8,r8 /* (TEST/DEBUG) */ | |
1260 | bne- mpwait2 /* (TEST/DEBUG) */ | |
1261 | stwcx. r4,0,r4 /* (TEST/DEBUG) */ | |
1262 | bne- mpwait2 /* (TEST/DEBUG) */ | |
1263 | ||
1264 | isync /* (TEST/DEBUG) */ | |
1265 | lwz r4,0xD80(br0) /* (TEST/DEBUG) */ | |
1266 | mr. r4,r4 /* (TEST/DEBUG) */ | |
1267 | li r4,1 /* (TEST/DEBUG) */ | |
1268 | bne- doncheksv /* (TEST/DEBUG) */ | |
1269 | ||
1270 | lis r8,HIGH_ADDR(EXT(saveanchor)) /* (TEST/DEBUG) */ | |
1271 | ori r8,r8,LOW_ADDR(EXT(saveanchor)) /* (TEST/DEBUG) */ | |
1272 | ||
1273 | stw r4,0xD80(br0) /* (TEST/DEBUG) */ | |
1274 | ||
1275 | lwarx r4,0,r8 ; ? | |
1276 | ||
1277 | mpwait2x: lwarx r4,0,r8 /* (TEST/DEBUG) */ | |
1278 | mr. r4,r4 /* (TEST/DEBUG) */ | |
1279 | bne- mpwait2x /* (TEST/DEBUG) */ | |
1280 | stwcx. r8,0,r8 /* (TEST/DEBUG) */ | |
1281 | bne- mpwait2x /* (TEST/DEBUG) */ | |
1282 | ||
1283 | isync /* (TEST/DEBUG) */ | |
1284 | ||
1285 | #if 0 | |
1286 | rlwinm r4,r13,0,0,19 /* (TEST/DEBUG) */ | |
1287 | lwz r21,SACflags(r4) /* (TEST/DEBUG) */ | |
1288 | rlwinm r22,r21,24,24,31 /* (TEST/DEBUG) */ | |
1289 | cmplwi r22,0x00EE /* (TEST/DEBUG) */ | |
1290 | lwz r22,SACvrswap(r4) /* (TEST/DEBUG) */ | |
1291 | bne- currbad /* (TEST/DEBUG) */ | |
1292 | andis. r21,r21,hi16(sac_perm) /* (TEST/DEBUG) */ | |
1293 | bne- currnotbad /* (TEST/DEBUG) */ | |
1294 | mr. r22,r22 /* (TEST/DEBUG) */ | |
1295 | bne+ currnotbad /* (TEST/DEBUG) */ | |
1296 | ||
1297 | currbad: lis r23,hi16(EXT(debugbackpocket)) /* (TEST/DEBUG) */ | |
1298 | ori r23,r23,lo16(EXT(debugbackpocket)) /* (TEST/DEBUG) */ | |
1299 | stw r23,SVfree(r8) /* (TEST/DEBUG) */ | |
1300 | ||
1301 | mfsprg r25,1 /* (TEST/DEBUG) */ | |
1302 | mtsprg 1,r23 /* (TEST/DEBUG) */ | |
1303 | lwz r26,SACalloc(r23) /* (TEST/DEBUG) */ | |
1304 | rlwinm r26,r26,0,1,31 /* (TEST/DEBUG) */ | |
1305 | stw r26,SACalloc(r23) /* (TEST/DEBUG) */ | |
1306 | ||
1307 | sync /* (TEST/DEBUG) */ | |
1308 | li r28,0 /* (TEST/DEBUG) */ | |
1309 | stw r28,0x20(br0) /* (TEST/DEBUG) */ | |
1310 | stw r28,0(r8) /* (TEST/DEBUG) */ | |
1311 | BREAKPOINT_TRAP /* (TEST/DEBUG) */ | |
1312 | ||
1313 | currnotbad: | |
1314 | #endif | |
1315 | ||
1316 | lwz r28,SVcount(r8) /* (TEST/DEBUG) */ | |
1317 | lwz r21,SVinuse(r8) /* (TEST/DEBUG) */ | |
1318 | lwz r23,SVmin(r8) /* (TEST/DEBUG) */ | |
1319 | sub r22,r28,r21 /* (TEST/DEBUG) */ | |
1320 | cmpw r22,r23 /* (TEST/DEBUG) */ | |
1321 | bge+ cksave0 /* (TEST/DEBUG) */ | |
1322 | ||
1323 | li r4,0 /* (TEST/DEBUG) */ | |
1324 | stw r4,0x20(br0) /* (TEST/DEBUG) */ | |
1325 | stw r4,0(r8) /* (TEST/DEBUG) */ | |
1326 | BREAKPOINT_TRAP /* (TEST/DEBUG) */ | |
1327 | ||
1328 | cksave0: lwz r28,SVfree(r8) /* (TEST/DEBUG) */ | |
1329 | li r24,0 /* (TEST/DEBUG) */ | |
1330 | li r29,1 /* (TEST/SAVE) */ | |
1331 | ||
1332 | cksave0a: mr. r28,r28 /* (TEST/DEBUG) */ | |
1333 | beq- cksave3 /* (TEST/DEBUG) */ | |
1334 | ||
1335 | rlwinm. r21,r28,0,4,19 /* (TEST/DEBUG) */ | |
1336 | bne+ cksave1 /* (TEST/DEBUG) */ | |
1337 | ||
1338 | li r4,0 /* (TEST/DEBUG) */ | |
1339 | stw r4,0x20(br0) /* (TEST/DEBUG) */ | |
1340 | stw r4,0(r8) /* (TEST/DEBUG) */ | |
1341 | BREAKPOINT_TRAP /* (TEST/DEBUG) */ | |
1342 | ||
1343 | cksave1: rlwinm. r21,r28,0,21,3 /* (TEST/DEBUG) */ | |
1344 | beq+ cksave2 /* (TEST/DEBUG) */ | |
1345 | ||
1346 | li r4,0 /* (TEST/DEBUG) */ | |
1347 | stw r4,0x20(br0) /* (TEST/DEBUG) */ | |
1348 | stw r4,0(r8) /* (TEST/DEBUG) */ | |
1349 | BREAKPOINT_TRAP /* (TEST/DEBUG) */ | |
1350 | ||
1351 | cksave2: lwz r25,SACalloc(r28) /* (TEST/DEBUG) */ | |
1352 | lbz r26,SACflags+2(r28) /* (TEST/DEBUG) */ | |
1353 | lbz r21,SACflags+3(r28) /* (TEST/DEBUG) */ | |
1354 | cmplwi r26,0x00EE /* (TEST/DEBUG) */ | |
1355 | stb r29,SACflags+3(r28) /* (TEST/DEBUG) */ | |
1356 | beq+ cksave2z | |
1357 | ||
1358 | li r4,0 /* (TEST/DEBUG) */ | |
1359 | stw r4,0x20(br0) /* (TEST/DEBUG) */ | |
1360 | stw r4,0(r8) /* (TEST/DEBUG) */ | |
1361 | BREAKPOINT_TRAP /* (TEST/DEBUG) */ | |
1362 | ||
1363 | cksave2z: mr. r21,r21 /* (TEST/DEBUG) */ | |
1364 | beq+ cksave2a /* (TEST/DEBUG) */ | |
1365 | ||
1366 | li r4,0 /* (TEST/DEBUG) */ | |
1367 | stw r4,0x20(br0) /* (TEST/DEBUG) */ | |
1368 | stw r4,0(r8) /* (TEST/DEBUG) */ | |
1369 | BREAKPOINT_TRAP /* (TEST/DEBUG) */ | |
1370 | ||
1371 | cksave2a: rlwinm r26,r25,1,31,31 /* (TEST/DEBUG) */ | |
1372 | rlwinm r27,r25,2,31,31 /* (TEST/DEBUG) */ | |
1373 | add r24,r24,r26 /* (TEST/DEBUG) */ | |
1374 | add r24,r24,r27 /* (TEST/DEBUG) */ | |
1375 | lwz r28,SACnext(r28) /* (TEST/DEBUG) */ | |
1376 | b cksave0a /* (TEST/DEBUG) */ | |
1377 | ||
1378 | cksave3: cmplw r24,r22 /* (TEST/DEBUG) */ | |
1379 | beq+ cksave4 /* (TEST/DEBUG) */ | |
1380 | ||
1381 | li r4,0 /* (TEST/DEBUG) */ | |
1382 | stw r4,0x20(br0) /* (TEST/DEBUG) */ | |
1383 | stw r4,0(r8) /* (TEST/DEBUG) */ | |
1384 | BREAKPOINT_TRAP /* (TEST/DEBUG) */ | |
1385 | ||
1386 | cksave4: lwz r28,SVfree(r8) /* (TEST/DEBUG) */ | |
1387 | li r24,0 /* (TEST/DEBUG) */ | |
1388 | ||
1389 | cksave5: mr. r28,r28 /* (TEST/DEBUG) */ | |
1390 | beq- cksave6 /* (TEST/DEBUG) */ | |
1391 | stb r24,SACflags+3(r28) /* (TEST/DEBUG) */ | |
1392 | lwz r28,SACnext(r28) /* (TEST/DEBUG) */ | |
1393 | b cksave5 /* (TEST/DEBUG) */ | |
1394 | ||
1395 | cksave6: | |
1396 | ||
1397 | li r4,0 /* (TEST/DEBUG) */ | |
1398 | stw r4,0xD80(br0) /* (TEST/DEBUG) */ | |
1399 | stw r4,0(r8) /* (TEST/DEBUG) */ | |
1400 | ||
1401 | doncheksv: | |
1402 | li r4,0 /* (TEST/DEBUG) */ | |
1403 | stw r4,0x20(br0) /* (TEST/DEBUG) */ | |
1404 | mtdec r12 /* (TEST/DEBUG) */ | |
1405 | #endif | |
1406 | ||
1407 | lis r4,HIGH_ADDR(EXT(MPspec)) /* Get the MP control block */ | |
1408 | dcbt 0,r2 /* We'll need the per_proc in a sec */ | |
1409 | cmplwi cr0,r11,T_INTERRUPT /* Do we have an external interrupt? */ | |
1410 | ori r4,r4,LOW_ADDR(EXT(MPspec)) /* Get the bottom half of the MP control block */ | |
1411 | bne+ notracex /* Not an external... */ | |
1412 | ||
1413 | /* | |
1414 | * Here we check to see if there was a interprocessor signal | |
1415 | */ | |
1416 | ||
1417 | lwz r4,MPSSIGPhandler(r4) /* Get the address of the SIGP interrupt filter */ | |
1418 | lhz r3,PP_CPU_FLAGS(r2) /* Get the CPU flags */ | |
1419 | cmplwi cr1,r4,0 /* Check if signal filter is initialized yet */ | |
1420 | andi. r3,r3,LOW_ADDR(SIGPactive) /* See if this processor has started up */ | |
1421 | mtlr r4 /* Load up filter address */ | |
1422 | beq- cr1,notracex /* We don't have a filter yet... */ | |
1423 | beq- notracex /* This processor hasn't started filtering yet... */ | |
1424 | ||
1425 | blrl /* Filter the interrupt */ | |
1426 | ||
1427 | mfsprg r2,0 /* Make sure we have the per processor block */ | |
1428 | cmplwi cr0,r3,kMPIOInterruptPending /* See what the filter says */ | |
1429 | li r11,T_INTERRUPT /* Assume we have a regular external 'rupt */ | |
1430 | beq+ modRupt /* Yeah, we figured it would be... */ | |
1431 | li r11,T_SIGP /* Assume we had a signal processor interrupt */ | |
1432 | bgt+ modRupt /* Yeah, at this point we would assume so... */ | |
1433 | li r11,T_IN_VAIN /* Nothing there actually, so eat it */ | |
1434 | ||
1435 | modRupt: stw r11,PP_SAVE_EXCEPTION_TYPE(r2) /* Set that it was either in vain or a SIGP */ | |
1436 | stw r11,saveexception(r13) /* Save the exception code here also */ | |
1437 | bne- cr5,notracex /* Jump if no tracing... */ | |
1438 | sth r11,LTR_excpt(r20) /* Save the exception type */ | |
1439 | ||
1440 | notracex: | |
1441 | ||
1442 | #if 0 | |
1443 | bf featSMP,nopir6 /* (TEST/DEBUG) */ | |
1444 | mfspr r7,pir /* (TEST/DEBUG) */ | |
1445 | b gotpir6 /* (TEST/DEBUG) */ | |
1446 | nopir6: li r7,0 /* (TEST/DEBUG) */ | |
1447 | gotpir6: /* (TEST/DEBUG) */ | |
1448 | lis r6,HIGH_ADDR(EXT(RuptCtrs)) /* (TEST/DEBUG) */ | |
1449 | rlwinm r7,r7,8,23,23 /* (TEST/DEBUG) */ | |
1450 | lis r12,HIGH_ADDR(EXT(GratefulDeb)) /* (TEST/DEBUG) */ | |
1451 | rlwimi r7,r7,1,22,22 /* (TEST/DEBUG) */ | |
1452 | ori r6,r6,LOW_ADDR(EXT(RuptCtrs)) /* (TEST/DEBUG) */ | |
1453 | rlwinm r1,r11,2,0,29 /* (TEST/DEBUG) */ | |
1454 | add r6,r6,r7 /* (TEST/DEBUG) */ | |
1455 | ori r12,r12,LOW_ADDR(EXT(GratefulDeb)) /* (TEST/DEBUG) */ | |
1456 | lwz r21,(47*16)+8(r6) /* (TEST/DEBUG) */ | |
1457 | lwz r22,(47*16)+12(r6) /* (TEST/DEBUG) */ | |
1458 | add r1,r1,r6 /* (TEST/DEBUG) */ | |
1459 | mftb r24 /* (TEST/DEBUG) */ | |
1460 | sub r22,r24,r22 /* (TEST/DEBUG) */ | |
1461 | lwz r4,4(r6) /* (TEST/DEBUG) */ | |
1462 | cmplw cr2,r22,r21 /* (TEST/DEBUG) */ | |
1463 | lwz r7,4(r1) /* (TEST/DEBUG) */ | |
1464 | lwz r21,8(r6) /* (TEST/DEBUG) */ | |
1465 | blt+ cr2,nottime /* (TEST/DEBUG) */ | |
1466 | stw r24,(47*16)+12(r6) /* (TEST/DEBUG) */ | |
1467 | ||
1468 | nottime: addi r4,r4,1 /* (TEST/DEBUG) */ | |
1469 | lwz r22,8(r1) /* (TEST/DEBUG) */ | |
1470 | addi r7,r7,1 /* (TEST/DEBUG) */ | |
1471 | stw r4,4(r6) /* (TEST/DEBUG) */ | |
1472 | lwz r3,0(r6) /* (TEST/DEBUG) */ | |
1473 | mr. r21,r21 /* (TEST/DEBUG) */ | |
1474 | stw r7,4(r1) /* (TEST/DEBUG) */ | |
1475 | mtlr r12 /* (TEST/DEBUG) */ | |
1476 | lwz r1,0(r1) /* (TEST/DEBUG) */ | |
1477 | beq- nottimed1 /* (TEST/DEBUG) */ | |
1478 | blt+ cr2,isnttime1 /* (TEST/DEBUG) */ | |
1479 | ||
1480 | nottimed1: mr. r3,r3 /* (TEST/DEBUG) */ | |
1481 | bgelrl+ /* (TEST/DEBUG) */ | |
1482 | ||
1483 | isnttime1: mr. r22,r22 /* (TEST/DEBUG) */ | |
1484 | beq- nottimed2 /* (TEST/DEBUG) */ | |
1485 | blt+ cr2,isnttime2 /* (TEST/DEBUG) */ | |
1486 | ||
1487 | nottimed2: mr. r3,r1 /* (TEST/DEBUG) */ | |
1488 | mtlr r12 /* (TEST/DEBUG) */ | |
1489 | mr r4,r7 /* (TEST/DEBUG) */ | |
1490 | bgelrl+ /* (TEST/DEBUG) */ | |
1491 | mr r3,r11 /* (TEST/DEBUG) */ | |
1492 | ||
1493 | isnttime2: cmplwi r11,T_DATA_ACCESS /* (TEST/DEBUG) */ | |
1494 | lis r12,HIGH_ADDR(EXT(GratefulDeb)) /* (TEST/DEBUG) */ | |
1495 | bne+ nodsidisp /* (TEST/DEBUG) */ | |
1496 | mr. r22,r22 /* (TEST/DEBUG) */ | |
1497 | beq- nottimed3 /* (TEST/DEBUG) */ | |
1498 | blt+ cr2,nodsidisp /* (TEST/DEBUG) */ | |
1499 | ||
1500 | nottimed3: li r3,5 /* (TEST/DEBUG) */ | |
1501 | ori r12,r12,LOW_ADDR(EXT(GratefulDeb)) /* (TEST/DEBUG) */ | |
1502 | lwz r4,savesrr0(r13) /* (TEST/DEBUG) */ | |
1503 | mtlr r12 /* (TEST/DEBUG) */ | |
1504 | blrl /* (TEST/DEBUG) */ | |
1505 | ||
1506 | lis r12,HIGH_ADDR(EXT(GratefulDeb)) /* (TEST/DEBUG) */ | |
1507 | ori r12,r12,LOW_ADDR(EXT(GratefulDeb)) /* (TEST/DEBUG) */ | |
1508 | lis r3,9 /* (TEST/DEBUG) */ | |
1509 | ori r3,r3,5 /* (TEST/DEBUG) */ | |
1510 | mtlr r12 /* (TEST/DEBUG) */ | |
1511 | lwz r4,savedar(r13) /* (TEST/DEBUG) */ | |
1512 | blrl /* (TEST/DEBUG) */ | |
1513 | ||
1514 | nodsidisp: cmplwi r11,T_INSTRUCTION_ACCESS /* (TEST/DEBUG) */ | |
1515 | lis r12,HIGH_ADDR(EXT(GratefulDeb)) /* (TEST/DEBUG) */ | |
1516 | bne+ noisidisp /* (TEST/DEBUG) */ | |
1517 | mr. r22,r22 /* (TEST/DEBUG) */ | |
1518 | beq- nottimed4 /* (TEST/DEBUG) */ | |
1519 | blt+ cr2,noisidisp /* (TEST/DEBUG) */ | |
1520 | ||
1521 | nottimed4: li r3,6 /* (TEST/DEBUG) */ | |
1522 | ori r12,r12,LOW_ADDR(EXT(GratefulDeb)) /* (TEST/DEBUG) */ | |
1523 | lwz r4,savesrr0(r13) /* (TEST/DEBUG) */ | |
1524 | mtlr r12 /* (TEST/DEBUG) */ | |
1525 | blrl /* (TEST/DEBUG) */ | |
1526 | ||
1527 | noisidisp: mr r3,r11 /* (TEST/DEBUG) */ | |
1528 | #endif | |
1529 | ||
1530 | #if 0 | |
1531 | cmplwi r11,T_PROGRAM /* (TEST/DEBUG) */ | |
1532 | lis r12,HIGH_ADDR(EXT(GratefulDeb)) /* (TEST/DEBUG) */ | |
1533 | bne+ nopgmdisp /* (TEST/DEBUG) */ | |
1534 | li r3,7 /* (TEST/DEBUG) */ | |
1535 | ori r12,r12,LOW_ADDR(EXT(GratefulDeb)) /* (TEST/DEBUG) */ | |
1536 | lwz r4,savesrr0(r13) /* (TEST/DEBUG) */ | |
1537 | mtlr r12 /* (TEST/DEBUG) */ | |
1538 | blrl /* (TEST/DEBUG) */ | |
1539 | ||
1540 | nopgmdisp: mr r3,r11 /* (TEST/DEBUG) */ | |
1541 | #endif | |
1542 | ||
1543 | li r21,0 ; Assume no processor register for now | |
1544 | lis r12,hi16(EXT(hw_counts)) ; Get the high part of the interrupt counters | |
1545 | bf featSMP,nopirhere ; Jump if this processor does not have a PIR... | |
1546 | mfspr r21,pir ; Get the PIR | |
1547 | ||
1548 | nopirhere: ori r12,r12,lo16(EXT(hw_counts)) ; Get the low part of the interrupt counters | |
1549 | lwz r7,savesrr1(r13) ; Get the entering MSR | |
1550 | rlwinm r21,r21,8,20,23 ; Get index to processor counts | |
1551 | mtcrf 0x80,r0 /* Set our CR0 to the high nybble of the request code */ | |
1552 | rlwinm r6,r0,1,0,31 /* Move sign bit to the end */ | |
1553 | cmplwi cr1,r11,T_SYSTEM_CALL /* Did we get a system call? */ | |
1554 | crandc cr0_lt,cr0_lt,cr0_gt /* See if we have R0 equal to 0b10xx...x */ | |
1555 | add r12,r12,r21 ; Point to the processor count area | |
1556 | cmplwi cr3,r11,T_IN_VAIN /* Was this all in vain? All for nothing? */ | |
1557 | lwzx r22,r12,r11 ; Get the old value | |
1558 | cmplwi cr2,r6,1 /* See if original R0 had the CutTrace request code in it */ | |
1559 | addi r22,r22,1 ; Count this one | |
1560 | cmplwi cr4,r11,T_SIGP /* Indicate if we had a SIGP 'rupt */ | |
1561 | stwx r22,r12,r11 ; Store it back | |
1562 | ||
1563 | beq- cr3,EatRupt /* Interrupt was all for nothing... */ | |
1564 | cmplwi cr3,r11,T_MACHINE_CHECK ; Did we get a machine check? | |
1565 | bne+ cr1,noCutT /* Not a system call... */ | |
1566 | bnl+ cr0,noCutT /* R0 not 0b10xxx...x, can't be any kind of magical system call... */ | |
1567 | rlwinm. r7,r7,0,MSR_PR_BIT,MSR_PR_BIT ; Did we come from user state? | |
1568 | lis r1,hi16(EXT(dgWork)) ; Get the diagnostics flags | |
1569 | beq+ FCisok ; From supervisor state... | |
1570 | ||
1571 | ori r1,r1,lo16(EXT(dgWork)) ; Again | |
1572 | lwz r1,dgFlags(r1) ; Get the flags | |
1573 | rlwinm. r1,r1,0,enaUsrFCallb,enaUsrFCallb ; Are they valid? | |
1574 | beq- noCutT ; No... | |
1575 | ||
1576 | FCisok: beq- cr2,isCutTrace /* This is a CutTrace system call */ | |
1577 | ||
1578 | /* | |
1579 | * Here's where we call the firmware. If it returns T_IN_VAIN, that means | |
1580 | * that it has handled the interruption. Remember: thou shalt not trash R13 | |
1581 | * or R20 while you are away. Anything else is ok. | |
1582 | */ | |
1583 | ||
1584 | lis r1,hi16(EXT(FirmwareCall)) /* Top half of firmware call handler */ | |
1585 | ori r1,r1,lo16(EXT(FirmwareCall)) /* Bottom half of it */ | |
1586 | lwz r3,saver3(r13) /* Restore the first parameter, the rest are ok already */ | |
1587 | mtlr r1 /* Get it in the link register */ | |
1588 | blrl /* Call the handler */ | |
1589 | ||
1590 | cmplwi r3,T_IN_VAIN /* Was it handled? */ | |
1591 | mfsprg r2,0 /* Restore the per_processor area */ | |
1592 | beq+ EatRupt /* Interrupt was handled... */ | |
1593 | mr r11,r3 /* Put the 'rupt code in the right register */ | |
1594 | b noSIGP /* Go to the normal system call handler */ | |
1595 | ||
1596 | isCutTrace: | |
1597 | li r7,-32768 /* Get a 0x8000 for the exception code */ | |
1598 | bne- cr5,EatRupt /* Tracing is disabled... */ | |
1599 | sth r7,LTR_excpt(r20) /* Modify the exception type to a CutTrace */ | |
1600 | b EatRupt /* Time to go home... */ | |
1601 | ||
1602 | /* We are here 'cause we didn't have a CutTrace system call */ | |
1603 | ||
1604 | noCutT: beq- cr3,MachineCheck ; Whoa... Machine check... | |
1605 | bne+ cr4,noSIGP /* Skip away if we didn't get a SIGP... */ | |
1606 | ||
1607 | lis r6,HIGH_ADDR(EXT(MPsignalFW)) /* Top half of SIGP handler */ | |
1608 | ori r6,r6,LOW_ADDR(EXT(MPsignalFW)) /* Bottom half of it */ | |
1609 | mtlr r6 /* Get it in the link register */ | |
1610 | ||
1611 | blrl /* Call the handler - we'll only come back if this is an AST, */ | |
1612 | /* 'cause FW can't handle that */ | |
1613 | mfsprg r2,0 /* Restore the per_processor area */ | |
1614 | ; | |
1615 | ; The following interrupts are the only ones that can be redriven | |
1616 | ; by the higher level code or emulation routines. | |
1617 | ; | |
1618 | ||
1619 | Redrive: cmplwi cr0,r3,T_IN_VAIN /* Did the signal handler eat the signal? */ | |
1620 | mr r11,r3 /* Move it to the right place */ | |
1621 | beq+ cr0,EatRupt /* Bail now if the signal handler processed the signal... */ | |
1622 | ||
1623 | ||
1624 | /* | |
1625 | * Here's where we check for the other fast-path exceptions: translation exceptions, | |
1626 | * emulated instructions, etc. | |
1627 | */ | |
1628 | ||
1629 | noSIGP: cmplwi cr3,r11,T_ALTIVEC_ASSIST ; Check for an Altivec denorm assist | |
1630 | cmplwi cr1,r11,T_PROGRAM /* See if we got a program exception */ | |
1631 | cmplwi cr2,r11,T_INSTRUCTION_ACCESS /* Check on an ISI */ | |
1632 | bne+ cr3,noAltivecAssist ; It is not an assist... | |
1633 | b EXT(AltivecAssist) ; It is an assist... | |
1634 | ||
1635 | noAltivecAssist: | |
1636 | bne+ cr1,noEmulate ; No emulation here... | |
1637 | b EXT(Emulate) ; Go try to emulate... | |
1638 | ||
1639 | noEmulate: cmplwi cr3,r11,T_CSWITCH /* Are we context switching */ | |
1640 | cmplwi r11,T_DATA_ACCESS /* Check on a DSI */ | |
1641 | beq- cr2,DSIorISI /* It's a PTE fault... */ | |
1642 | beq- cr3,conswtch /* It's a context switch... */ | |
1643 | bne+ PassUp /* It's not a PTE fault... */ | |
1644 | ||
1645 | /* | |
1646 | * This call will either handle the fault, in which case it will not | |
1647 | * return, or return to pass the fault up the line. | |
1648 | */ | |
1649 | ||
1650 | DSIorISI: | |
1651 | lis r7,HIGH_ADDR(EXT(handlePF)) /* Top half of DSI handler */ | |
1652 | ori r7,r7,LOW_ADDR(EXT(handlePF)) /* Bottom half of it */ | |
1653 | mtlr r7 /* Get it in the link register */ | |
1654 | mr r3,r11 /* Move the 'rupt code */ | |
1655 | ||
1656 | blrl /* See if we can handle this fault */ | |
1657 | ||
1658 | lwz r0,savesrr1(r13) ; Get the MSR in use at exception time | |
1659 | mfsprg r2, 0 /* Get back per_proc */ | |
1660 | cmplwi cr1,r3,T_IN_VAIN ; Was it handled? | |
1661 | andi. r4,r0,lo16(MASK(MSR_RI)) ; See if the recover bit is on | |
1662 | mr r11,r3 /* Make sure we can find this later */ | |
1663 | beq+ cr1,EatRupt ; Yeah, just blast back to the user... | |
1664 | andc r0,r0,r4 ; Remove the recover bit | |
1665 | beq+ PassUp ; Not on, normal case... | |
1666 | lwz r4,savesrr0(r13) ; Get the failing instruction address | |
1667 | lwz r5,savecr(r13) ; Get the condition register | |
1668 | stw r0,savesrr1(r13) ; Save the result MSR | |
1669 | addi r4,r4,4 ; Skip failing instruction | |
1670 | rlwinm r5,r5,0,3,1 ; Clear CR0_EQ to let emulation code know we failed | |
1671 | stw r4,savesrr0(r13) ; Save instruction address | |
1672 | stw r4,savecr(r13) ; And the resume CR | |
1673 | b EatRupt ; Resume emulated code | |
1674 | ||
1675 | /* | |
1676 | * Here is where we handle the context switch firmware call. The old | |
1677 | * context has been saved, and the new savearea in in saver3. We'll just | |
1678 | * muck around with the savearea pointers, and then join the exit routine | |
1679 | */ | |
1680 | conswtch: lwz r28,SAVflags(r13) /* The the flags of the current */ | |
1681 | mr r29,r13 /* Save the save */ | |
1682 | rlwinm r30,r13,0,0,19 /* Get the start of the savearea block */ | |
1683 | lwz r5,saver3(r13) /* Switch to the new savearea */ | |
1684 | oris r28,r28,HIGH_ADDR(SAVattach) /* Turn on the attached flag */ | |
1685 | lwz r30,SACvrswap(r30) /* get real to virtual translation */ | |
1686 | mr r13,r5 /* Switch saveareas */ | |
1687 | xor r27,r29,r30 /* Flip to virtual */ | |
1688 | stw r28,SAVflags(r29) /* Stash it back */ | |
1689 | stw r27,saver3(r5) /* Push the new savearea to the switch to routine */ | |
1690 | b EatRupt /* Start 'er up... */ | |
1691 | ||
1692 | ; | |
1693 | ; Handle machine check here. | |
1694 | ; | |
1695 | ; ? | |
1696 | ; | |
1697 | MachineCheck: | |
1698 | lwz r27,savesrr1(r13) ; ? | |
1699 | rlwinm. r11,r27,0,dcmck,dcmck ; ? | |
1700 | beq+ notDCache ; ? | |
1701 | ||
1702 | mfspr r11,msscr0 ; ? | |
1703 | dssall ; ? | |
1704 | sync | |
1705 | ||
1706 | lwz r27,savesrr1(r13) ; ? | |
1707 | ||
1708 | hiccup: cmplw r27,r27 ; ? | |
1709 | bne- hiccup ; ? | |
1710 | isync ; ? | |
1711 | ||
1712 | oris r11,r11,hi16(dl1hwfm) ; ? | |
1713 | mtspr msscr0,r11 ; ? | |
1714 | ||
1715 | rstbsy: mfspr r11,msscr0 ; ? | |
1716 | ||
1717 | rlwinm. r11,r11,0,dl1hwf,dl1hwf ; ? | |
1718 | bne rstbsy ; ? | |
1719 | ||
1720 | sync ; ? | |
1721 | ||
1722 | li r11,T_IN_VAIN ; ? | |
1723 | b EatRupt ; ? | |
1724 | ||
1725 | ||
1726 | notDCache: | |
1727 | ; | |
1728 | ; Check if the failure was in | |
1729 | ; ml_probe_read. If so, this is expected, so modify the PC to | |
1730 | ; ml_proble_read_mck and then eat the exception. | |
1731 | ; | |
1732 | lwz r30,savesrr0(r13) ; Get the failing PC | |
1733 | lis r28,hi16(EXT(ml_probe_read_mck)) ; High order part | |
1734 | lis r27,hi16(EXT(ml_probe_read)) ; High order part | |
1735 | ori r28,r28,lo16(EXT(ml_probe_read_mck)) ; Get the low part | |
1736 | ori r27,r27,lo16(EXT(ml_probe_read)) ; Get the low part | |
1737 | cmplw r30,r28 ; Check highest possible | |
1738 | cmplw cr1,r30,r27 ; Check lowest | |
1739 | bge- PassUp ; Outside of range | |
1740 | blt- cr1,PassUp ; Outside of range | |
1741 | ; | |
1742 | ; We need to fix up the BATs here because the probe | |
1743 | ; routine messed them all up... As long as we are at it, | |
1744 | ; fix up to return directly to caller of probe. | |
1745 | ; | |
1746 | ||
1747 | lwz r30,saver5(r13) ; Get proper DBAT values | |
1748 | lwz r28,saver6(r13) | |
1749 | lwz r27,saver7(r13) | |
1750 | lwz r11,saver8(r13) | |
1751 | lwz r18,saver9(r13) | |
1752 | ||
1753 | sync | |
1754 | mtdbatu 0,r30 ; Restore DBAT 0 high | |
1755 | mtdbatl 0,r28 ; Restore DBAT 0 low | |
1756 | mtdbatu 1,r27 ; Restore DBAT 1 high | |
1757 | mtdbatu 2,r11 ; Restore DBAT 2 high | |
1758 | mtdbatu 3,r18 ; Restore DBAT 3 high | |
1759 | sync | |
1760 | ||
1761 | lwz r28,savelr(r13) ; Get return point | |
1762 | lwz r27,saver0(r13) ; Get the saved MSR | |
1763 | li r30,0 ; Get a failure RC | |
1764 | stw r28,savesrr0(r13) ; Set the return point | |
1765 | stw r27,savesrr1(r13) ; Set the continued MSR | |
1766 | stw r30,saver3(r13) ; Set return code | |
1767 | li r11,T_IN_VAIN ; Set new interrupt code | |
1768 | b EatRupt ; Yum, yum, eat it all up... | |
1769 | ||
1770 | /* | |
1771 | * Here's where we come back from some instruction emulator. If we come back with | |
1772 | * T_IN_VAIN, the emulation is done and we should just reload state and directly | |
1773 | * go back to the interrupted code. Otherwise, we'll check to see if | |
1774 | * we need to redrive with a different interrupt, i.e., DSI. | |
1775 | */ | |
1776 | ||
1777 | .align 5 | |
1778 | .globl EXT(EmulExit) | |
1779 | ||
1780 | LEXT(EmulExit) | |
1781 | ||
1782 | cmplwi r11,T_IN_VAIN /* Was it emulated? */ | |
1783 | lis r1,hi16(SAVredrive) ; Get redrive request | |
1784 | mfsprg r2,0 ; Restore the per_proc area | |
1785 | beq+ EatRupt /* Yeah, just blast back to the user... */ | |
1786 | lwz r4,SAVflags(r13) ; Pick up the flags | |
1787 | ||
1788 | and. r0,r4,r1 ; Check if redrive requested | |
1789 | andc r4,r4,r1 ; Clear redrive | |
1790 | ||
1791 | beq+ PassUp ; No redrive, just keep on going... | |
1792 | ||
1793 | lwz r3,saveexception(r13) ; Restore exception code | |
1794 | stw r4,SAVflags(r13) ; Set the flags | |
1795 | b Redrive ; Redrive the exception... | |
1796 | ||
1797 | /* Jump into main handler code switching on VM at the same time */ | |
1798 | ||
1799 | /* We assume kernel data is mapped contiguously in physical | |
1800 | * memory, otherwise we'd need to switch on (at least) virtual data. | |
1801 | * SRs are already set up. | |
1802 | */ | |
1803 | PassUp: lis r2,hi16(EXT(exception_handlers)) ; Get exception vector address | |
1804 | ori r2,r2,lo16(EXT(exception_handlers)) ; And low half | |
1805 | lwzx r6,r2,r11 /* Get the actual exception handler address */ | |
1806 | ||
1807 | PassUpDeb: lwz r8,SAVflags(r13) /* Get the flags */ | |
1808 | mtsrr0 r6 /* Set up the handler address */ | |
1809 | oris r8,r8,HIGH_ADDR(SAVattach) /* Since we're passing it up, attach it */ | |
1810 | rlwinm r5,r13,0,0,19 /* Back off to the start of savearea block */ | |
1811 | ||
1812 | mfmsr r3 /* Get our MSR */ | |
1813 | stw r8,SAVflags(r13) /* Pass up the flags */ | |
1814 | rlwinm r3,r3,0,MSR_BE_BIT+1,MSR_SE_BIT-1 /* Clear all but the trace bits */ | |
1815 | li r2,MSR_SUPERVISOR_INT_OFF /* Get our normal MSR value */ | |
1816 | lwz r5,SACvrswap(r5) /* Get real to virtual conversion */ | |
1817 | or r2,r2,r3 /* Keep the trace bits if they're on */ | |
1818 | mr r3,r11 /* Pass the exception code in the paramter reg */ | |
1819 | mtsrr1 r2 /* Set up our normal MSR value */ | |
1820 | xor r4,r13,r5 /* Pass up the virtual address of context savearea */ | |
1821 | ||
1822 | rfi /* Launch the exception handler */ | |
1823 | ||
1824 | .long 0 /* Leave these here gol durn it! */ | |
1825 | .long 0 | |
1826 | .long 0 | |
1827 | .long 0 | |
1828 | .long 0 | |
1829 | .long 0 | |
1830 | .long 0 | |
1831 | .long 0 | |
1832 | ||
1833 | /* | |
1834 | * This routine is the only place where we return from an interruption. | |
1835 | * Anyplace else is wrong. Even if I write the code, it's still wrong. | |
1836 | * Feel free to come by and slap me if I do do it--even though I may | |
1837 | * have had a good reason to do it. | |
1838 | * | |
1839 | * All we need to remember here is that R13 must point to the savearea | |
1840 | * that has the context we need to load up. Translation and interruptions | |
1841 | * must be disabled. | |
1842 | * | |
1843 | * This code always loads the context in the savearea pointed to | |
1844 | * by R13. In the process, it throws away the savearea. If there | |
1845 | * is any tomfoolery with savearea stacks, it must be taken care of | |
1846 | * before we get here. | |
1847 | * | |
1848 | * Speaking of tomfoolery, this is where we synthesize interruptions | |
1849 | * if any need to be. | |
1850 | */ | |
1851 | ||
1852 | .align 5 | |
1853 | ||
1854 | EatRupt: mr r31,r13 /* Move the savearea pointer to the far end of the register set */ | |
1855 | ||
1856 | EatRupt2: mfsprg r2,0 /* Get the per_proc block */ | |
1857 | dcbt 0,r31 ; Get this because we need it very soon | |
1858 | ||
1859 | #if TRCSAVE | |
1860 | lwz r30,saver0(r31) ; (TEST/DEBUG) Get users R0 | |
1861 | lwz r20,saveexception(r31) ; (TEST/DEBUG) Returning from trace? | |
1862 | xor r30,r20,r30 ; (TEST/DEBUG) Make code | |
1863 | rlwinm r30,r30,1,0,31 ; (TEST/DEBUG) Make an easy test | |
1864 | cmplwi cr5,r30,0x61 ; (TEST/DEBUG) See if this is a trace | |
1865 | #endif | |
1866 | ||
1867 | /* | |
1868 | * First we see if we are able to free the new savearea. | |
1869 | * If it is not attached to anything, put it on the free list. | |
1870 | * This is real dangerous, we haven't restored context yet... | |
1871 | * So, the free savearea chain lock must stay until the bitter end! | |
1872 | */ | |
1873 | ||
1874 | /* | |
1875 | * It's dangerous here. We haven't restored anything from the current savearea yet. | |
1876 | * And, we mark it the active one. So, if we get an exception in here, it is | |
1877 | * unrecoverable. Unless we mess up, we can't get any kind of exception. So, | |
1878 | * it is important to assay this code as only the purest of gold. | |
1879 | * | |
1880 | * But first, see if there is a savearea hanging off of quickfret. If so, | |
1881 | * we release that one first and then come back for the other. We should rarely | |
1882 | * see one, they appear when FPU or VMX context is discarded by either returning | |
1883 | * to a higher exception level, or explicitly. | |
1884 | * | |
1885 | * A word about QUICKFRET: Multiple saveareas may be queued for release. It is | |
1886 | * the responsibility of the queuer to insure that the savearea is not multiply | |
1887 | * queued and that the appropriate inuse bits are reset. | |
1888 | */ | |
1889 | ||
1890 | ||
1891 | ||
1892 | mfsprg r27,2 ; Get the processor features | |
1893 | lwz r1,savesrr1(r31) ; Get destination MSR | |
1894 | mtcrf 0x60,r27 ; Set CRs with thermal facilities | |
1895 | mr r18,r31 ; Save the savearea pointer | |
1896 | rlwinm. r0,r1,0,MSR_EE_BIT,MSR_EE_BIT ; Are interruptions going to be enabled? | |
1897 | lwz r19,PP_QUICKFRET(r2) ; Get the quick release savearea | |
1898 | crandc 31,pfThermalb,pfThermIntb ; See if we have both thermometer and not interrupt facility | |
1899 | li r0,0 ; Get a zero | |
1900 | crandc 31,31,cr0_eq ; Factor in enablement | |
1901 | la r21,savesr0(r18) ; Point to the first thing we restore | |
1902 | bf 31,tempisok ; No thermal checking needed... | |
1903 | ||
1904 | ; | |
1905 | ; We get to here if 1) there is a thermal facility, and 2) the hardware | |
1906 | ; will or cannot interrupt, and 3) the interrupt will be enabled after this point. | |
1907 | ; | |
1908 | ||
1909 | mfspr r16,thrm3 ; Get thermal 3 | |
1910 | mfspr r14,thrm1 ; Get thermal 2 | |
1911 | rlwinm. r16,r16,0,thrme,thrme ; Is the themometer enabled? | |
1912 | mfspr r15,thrm2 ; Get thermal 2 | |
1913 | beq- tempisok ; No thermometer... | |
1914 | rlwinm r16,r14,2,28,31 ; Cluster THRM1s TIE, V, TIN, and TIV at bottom 4 bits | |
1915 | srawi r0,r15,31 ; Make a mask of 1s if temprature over | |
1916 | rlwinm r30,r15,2,28,31 ; Cluster THRM2s TIE, V, TIN, and TIV at bottom 4 bits | |
1917 | ; | |
1918 | ; Note that the following compare check that V, TIN, and TIV are set and that TIE is cleared. | |
1919 | ; This insures that we only emulate when the hardware is not set to interrupt. | |
1920 | ; | |
1921 | cmplwi cr0,r16,7 ; Is there a valid pending interruption for THRM1? | |
1922 | cmplwi cr1,r30,7 ; Is there a valid pending interruption for THRM2? | |
1923 | and r15,r15,r0 ; Keep high temp if that interrupted, zero if not | |
1924 | cror cr0_eq,cr0_eq,cr1_eq ; Merge both | |
1925 | andc r14,r14,r0 ; Keep low if high did not interrupt, zero if it did | |
1926 | bne+ tempisok ; Nope, temprature is in range | |
1927 | ||
1928 | li r3,T_THERMAL ; Time to emulate a thermal interruption | |
1929 | or r14,r14,r15 ; Get contents of interrupting register | |
1930 | mr r13,r31 ; Make sure savearea is pointed to correctly | |
1931 | stw r3,saveexception(r31) ; Restore exception code | |
1932 | stw r14,savedar(r31) ; Set the contents of the interrupting register into the dar | |
1933 | b Redrive ; Go process this new interruption... | |
1934 | ||
1935 | ||
1936 | tempisok: lis r30,HIGH_ADDR(EXT(saveanchor)) /* Get the high part of the anchor */ | |
1937 | stw r0,PP_QUICKFRET(r2) /* Clear quickfret pointer */ | |
1938 | ori r30,r30,LOW_ADDR(EXT(saveanchor)) /* Bottom half of the anchor */ | |
1939 | dcbt 0,r21 /* Touch in the first thing */ | |
1940 | ||
1941 | #if TRCSAVE | |
1942 | beq- cr5,trkill0 ; (TEST/DEBUG) Do not trace this type | |
1943 | lwz r14,LOW_ADDR(traceMask-EXT(ExceptionVectorsStart))(br0) ; (TEST/DEBUG) Get the trace mask | |
1944 | mr. r14,r14 ; (TEST/DEBUG) Is it stopped? | |
1945 | beq- trkill0 ; (TEST/DEBUG) yes... | |
1946 | bl cte ; (TEST/DEBUG) Trace this | |
1947 | stw r18,LTR_r1(r20) ; (TEST/DEBUG) Normal savearea | |
1948 | stw r19,LTR_r2(r20) ; (TEST/DEBUG) Quickfret savearea | |
1949 | trkill0: | |
1950 | #endif | |
1951 | ||
1952 | rtlck: lwarx r22,0,r30 /* Grab the lock value */ | |
1953 | li r23,1 /* Use part of the delay time */ | |
1954 | mr. r22,r22 /* Is it locked? */ | |
1955 | bne- rtlcks /* Yeah, wait for it to clear... */ | |
1956 | stwcx. r23,0,r30 /* Try to seize that there durn lock */ | |
1957 | beq+ fretagain ; Got it... | |
1958 | b rtlck /* Collision, try again... */ | |
1959 | ||
1960 | rtlcks: lwz r22,SVlock(r30) /* Get that lock in here */ | |
1961 | mr. r22,r22 /* Is it free yet? */ | |
1962 | beq+ rtlck /* Yeah, try for it again... */ | |
1963 | b rtlcks /* Sniff away... */ | |
1964 | ||
1965 | ; | |
1966 | ; Lock gotten, toss the saveareas | |
1967 | ; | |
1968 | fretagain: | |
1969 | #if TRCSAVE | |
1970 | beq- cr5,trkill1 ; (TEST/DEBUG) Do not trace this type | |
1971 | lwz r14,LOW_ADDR(traceMask-EXT(ExceptionVectorsStart))(br0) ; (TEST/DEBUG) Get the trace mask | |
1972 | mr. r14,r14 ; (TEST/DEBUG) Is it stopped? | |
1973 | beq- trkill1 ; (TEST/DEBUG) yes... | |
1974 | li r0,1 ; (TEST/DEBUG) ID number | |
1975 | bl cte ; (TEST/DEBUG) Trace this | |
1976 | stw r18,LTR_r1(r20) ; (TEST/DEBUG) Normal savearea | |
1977 | stw r19,LTR_r2(r20) ; (TEST/DEBUG) Quickfret savearea | |
1978 | trkill1: | |
1979 | #endif | |
1980 | ||
1981 | mr. r18,r18 ; Are we actually done here? | |
1982 | beq- donefret ; Yeah... | |
1983 | mr. r31,r19 ; Is there a quickfret to do? | |
1984 | beq+ noqfrt ; Nope... | |
1985 | lwz r19,SAVqfret(r19) ; Yes, get the next in line | |
1986 | #if TRCSAVE | |
1987 | beq- cr5,trkill2 ; (TEST/DEBUG) Do not trace this type | |
1988 | lwz r14,LOW_ADDR(traceMask-EXT(ExceptionVectorsStart))(br0) ; (TEST/DEBUG) Get the trace mask | |
1989 | mr. r14,r14 ; (TEST/DEBUG) Is it stopped? | |
1990 | beq- trkill2 ; (TEST/DEBUG) yes... | |
1991 | li r0,2 ; (TEST/DEBUG) ID number | |
1992 | bl cte ; (TEST/DEBUG) Trace this | |
1993 | stw r18,LTR_r1(r20) ; (TEST/DEBUG) Normal savearea | |
1994 | stw r19,LTR_r2(r20) ; (TEST/DEBUG) next quickfret savearea | |
1995 | stw r31,LTR_r3(r20) ; (TEST/DEBUG) Current one to toss | |
1996 | trkill2: | |
1997 | #endif | |
1998 | b doqfrt ; Go do it... | |
1999 | ||
2000 | noqfrt: mr r31,r18 ; Set the area to release | |
2001 | li r18,0 ; Show we have done it | |
2002 | #if TRCSAVE | |
2003 | beq- cr5,trkill3 ; (TEST/DEBUG) Do not trace this type | |
2004 | lwz r14,LOW_ADDR(traceMask-EXT(ExceptionVectorsStart))(br0) ; (TEST/DEBUG) Get the trace mask | |
2005 | mr. r14,r14 ; (TEST/DEBUG) Is it stopped? | |
2006 | beq- trkill3 ; (TEST/DEBUG) yes... | |
2007 | li r0,3 ; (TEST/DEBUG) ID number | |
2008 | bl cte ; (TEST/DEBUG) Trace this | |
2009 | stw r18,LTR_r1(r20) ; (TEST/DEBUG) Normal savearea | |
2010 | stw r19,LTR_r2(r20) ; (TEST/DEBUG) next quickfret savearea | |
2011 | stw r31,LTR_r3(r20) ; (TEST/DEBUG) Current one to toss | |
2012 | trkill3: | |
2013 | #endif | |
2014 | ||
2015 | doqfrt: li r0,0 ; Get a constant 0 | |
2016 | lis r26,0x8000 /* Build a bit mask and assume first savearea */ | |
2017 | stw r0,SAVqfret(r31) ; Make sure back chain is unlinked | |
2018 | lwz r28,SAVflags(r31) ; Get the flags for the old active one | |
2019 | #if TRCSAVE | |
2020 | beq- cr5,trkill4 ; (TEST/DEBUG) Do not trace this type | |
2021 | lwz r14,LOW_ADDR(traceMask-EXT(ExceptionVectorsStart))(br0) ; (TEST/DEBUG) Get the trace mask | |
2022 | mr. r14,r14 ; (TEST/DEBUG) Is it stopped? | |
2023 | beq- trkill4 ; (TEST/DEBUG) yes... | |
2024 | li r0,4 ; (TEST/DEBUG) ID number | |
2025 | bl cte ; (TEST/DEBUG) Trace this | |
2026 | stw r18,LTR_r1(r20) ; (TEST/DEBUG) Normal savearea | |
2027 | stw r19,LTR_r2(r20) ; (TEST/DEBUG) next quickfret savearea | |
2028 | stw r31,LTR_r3(r20) ; (TEST/DEBUG) Current one to toss | |
2029 | stw r28,LTR_r4(r20) ; (TEST/DEBUG) Save current flags | |
2030 | trkill4: | |
2031 | #endif | |
2032 | rlwinm r25,r31,21,31,31 /* Get position of savearea in block */ | |
2033 | andis. r28,r28,HIGH_ADDR(SAVinuse) /* See if we need to free it */ | |
2034 | srw r26,r26,r25 /* Get bit position to deallocate */ | |
2035 | rlwinm r29,r31,0,0,19 /* Round savearea pointer to even page address */ | |
2036 | ||
2037 | bne- fretagain /* Still in use, we can't free this one... */ | |
2038 | ||
2039 | lwz r23,SACalloc(r29) /* Get the allocation for this block */ | |
2040 | lwz r24,SVinuse(r30) /* Get the in use count */ | |
2041 | mr r28,r23 ; (TEST/DEBUG) save for trace | |
2042 | or r23,r23,r26 /* Turn on our bit */ | |
2043 | subi r24,r24,1 /* Show that this one is free */ | |
2044 | cmplw r23,r26 /* Is our's the only one free? */ | |
2045 | stw r23,SACalloc(r29) /* Save it out */ | |
2046 | bne+ rstrest /* Nope, then the block is already on the free list */ | |
2047 | ||
2048 | lwz r22,SVfree(r30) /* Get the old head of the free list */ | |
2049 | stw r29,SVfree(r30) /* Point the head at us now */ | |
2050 | stw r22,SACnext(r29) ; Point us to the old last | |
2051 | ||
2052 | rstrest: stw r24,SVinuse(r30) /* Set the in use count */ | |
2053 | #if TRCSAVE | |
2054 | beq- cr5,trkill5 ; (TEST/DEBUG) Do not trace this type | |
2055 | lwz r14,LOW_ADDR(traceMask-EXT(ExceptionVectorsStart))(br0) ; (TEST/DEBUG) Get the trace mask | |
2056 | mr. r14,r14 ; (TEST/DEBUG) Is it stopped? | |
2057 | beq- trkill5 ; (TEST/DEBUG) yes... | |
2058 | li r0,5 ; (TEST/DEBUG) ID number | |
2059 | bl cte ; (TEST/DEBUG) Trace this | |
2060 | stw r18,LTR_r1(r20) ; (TEST/DEBUG) Normal savearea | |
2061 | stw r19,LTR_r2(r20) ; (TEST/DEBUG) Next quickfret savearea | |
2062 | stw r31,LTR_r3(r20) ; (TEST/DEBUG) Current one to toss | |
2063 | stw r28,LTR_srr1(r20) ; (TEST/DEBUG) Save the original allocation | |
2064 | stw r23,LTR_dar(r20) ; (TEST/DEBUG) Save the new allocation | |
2065 | stw r24,LTR_save(r20) ; (TEST/DEBUG) Save the new in use count | |
2066 | stw r22,LTR_lr(r20) ; (TEST/DEBUG) Save the old top of free list | |
2067 | stw r29,LTR_ctr(r20) ; (TEST/DEBUG) Save the new top of free list | |
2068 | trkill5: | |
2069 | #endif | |
2070 | b fretagain ; Go finish up the rest... | |
2071 | ||
2072 | ; | |
2073 | ; Build the SR values depending upon destination. If we are going to the kernel, | |
2074 | ; the SRs are almost all the way set up. SR14 (or the currently used copyin/out register) | |
2075 | ; must be set to whatever it was at the last exception because it varies. All the rest | |
2076 | ; have been set up already. | |
2077 | ; | |
2078 | ; If we are going into user space, we need to check a bit more. SR0, SR1, SR2, and | |
2079 | ; SR14 (current implementation) must be restored always. The others must be set if | |
2080 | ; they are different that what was loaded last time (i.e., tasks have switched). | |
2081 | ; We check the last loaded address space ID and if the same, we skip the loads. | |
2082 | ; This is a performance gain because SR manipulations are slow. | |
2083 | ; | |
2084 | ||
2085 | .align 5 | |
2086 | ||
2087 | donefret: lwz r26,savesrr1(r31) ; Get destination state flags | |
2088 | lwz r7,PP_USERPMAP(r2) ; Pick up the user pmap we may launch | |
2089 | cmplw cr3,r14,r14 ; Set that we do not need to stop streams | |
2090 | rlwinm. r17,r26,0,MSR_PR_BIT,MSR_PR_BIT ; See if we are going to user or system | |
2091 | li r14,PMAP_SEGS ; Point to segments | |
2092 | bne+ gotouser ; We are going into user state... | |
2093 | ||
2094 | lwz r14,savesr14(r31) ; Get the copyin/out register at interrupt time | |
2095 | mtsr sr14,r14 ; Set SR14 | |
2096 | b segsdone ; We are all set up now... | |
2097 | ||
2098 | .align 5 | |
2099 | ||
2100 | gotouser: dcbt r14,r7 ; Touch the segment register contents | |
2101 | lwz r16,PP_LASTPMAP(r7) ; Pick up the last loaded pmap | |
2102 | addi r14,r14,32 ; Second half of pmap segments | |
2103 | lwz r13,PMAP_VFLAGS(r7) ; Get the flags | |
2104 | lwz r15,PMAP_SPACE(r7) ; Get the primary space | |
2105 | dcbt r14,r7 ; Touch second page | |
2106 | mtcrf 0x0F,r13 ; Set CRs to correspond to the subordinate spaces | |
2107 | oris r15,r15,hi16(SEG_REG_PROT) ; Set segment 0 SR value | |
2108 | lhz r9,PP_CPU_FLAGS(r2) ; Get the processor flags | |
2109 | ||
2110 | addis r13,r15,0x0000 ; Get SR0 value | |
2111 | bf 16,nlsr0 ; No alternate here... | |
2112 | lwz r13,PMAP_SEGS+(0*4)(r7) ; Get SR0 value | |
2113 | ||
2114 | nlsr0: mtsr sr0,r13 ; Load up the SR | |
2115 | rlwinm r9,r9,(((31-MSR_BE_BIT)+(traceBEb+16+1))&31),MSR_BE_BIT,MSR_BE_BIT ; Set BE bit if special trace is on | |
2116 | ||
2117 | addis r13,r15,0x0010 ; Get SR1 value | |
2118 | bf 17,nlsr1 ; No alternate here... | |
2119 | lwz r13,PMAP_SEGS+(1*4)(r7) ; Get SR1 value | |
2120 | ||
2121 | nlsr1: mtsr sr1,r13 ; Load up the SR | |
2122 | or r26,r26,r9 ; Flip on the BE bit for special trace if needed | |
2123 | ||
2124 | cmplw cr3,r7,r16 ; Are we running the same segs as last time? | |
2125 | ||
2126 | addis r13,r15,0x0020 ; Get SR2 value | |
2127 | bf 18,nlsr2 ; No alternate here... | |
2128 | lwz r13,PMAP_SEGS+(2*4)(r7) ; Get SR2 value | |
2129 | ||
2130 | nlsr2: mtsr sr2,r13 ; Load up the SR | |
2131 | ||
2132 | addis r13,r15,0x0030 ; Get SR3 value | |
2133 | bf 19,nlsr3 ; No alternate here... | |
2134 | lwz r13,PMAP_SEGS+(3*4)(r7) ; Get SR3 value | |
2135 | ||
2136 | nlsr3: mtsr sr3,r13 ; Load up the SR | |
2137 | ||
2138 | addis r13,r15,0x00E0 ; Get SR14 value | |
2139 | bf 30,nlsr14 ; No alternate here... | |
2140 | lwz r13,PMAP_SEGS+(14*4)(r7) ; Get SR14 value | |
2141 | ||
2142 | nlsr14: mtsr sr14,r13 ; Load up the SR | |
2143 | ||
2144 | beq+ segsdone ; All done if same pmap as last time... | |
2145 | ||
2146 | addis r13,r15,0x0040 ; Get SR4 value | |
2147 | bf 20,nlsr4 ; No alternate here... | |
2148 | lwz r13,PMAP_SEGS+(4*4)(r7) ; Get SR4 value | |
2149 | ||
2150 | nlsr4: mtsr sr4,r13 ; Load up the SR | |
2151 | ||
2152 | addis r13,r15,0x0050 ; Get SR5 value | |
2153 | bf 21,nlsr5 ; No alternate here... | |
2154 | lwz r13,PMAP_SEGS+(5*4)(r7) ; Get SR5 value | |
2155 | ||
2156 | nlsr5: mtsr sr5,r13 ; Load up the SR | |
2157 | ||
2158 | addis r13,r15,0x0060 ; Get SR6 value | |
2159 | bf 22,nlsr6 ; No alternate here... | |
2160 | lwz r13,PMAP_SEGS+(6*4)(r7) ; Get SR6 value | |
2161 | ||
2162 | nlsr6: mtsr sr6,r13 ; Load up the SR | |
2163 | ||
2164 | addis r13,r15,0x0070 ; Get SR7 value | |
2165 | bf 23,nlsr7 ; No alternate here... | |
2166 | lwz r13,PMAP_SEGS+(7*4)(r7) ; Get SR7 value | |
2167 | ||
2168 | nlsr7: mtsr sr7,r13 ; Load up the SR | |
2169 | ||
2170 | addis r13,r15,0x0080 ; Get SR8 value | |
2171 | bf 24,nlsr8 ; No alternate here... | |
2172 | lwz r13,PMAP_SEGS+(8*4)(r7) ; Get SR8 value | |
2173 | ||
2174 | nlsr8: mtsr sr8,r13 ; Load up the SR | |
2175 | ||
2176 | addis r13,r15,0x0090 ; Get SR9 value | |
2177 | bf 25,nlsr9 ; No alternate here... | |
2178 | lwz r13,PMAP_SEGS+(9*4)(r7) ; Get SR9 value | |
2179 | ||
2180 | nlsr9: mtsr sr9,r13 ; Load up the SR | |
2181 | ||
2182 | addis r13,r15,0x00A0 ; Get SR10 value | |
2183 | bf 26,nlsr10 ; No alternate here... | |
2184 | lwz r13,PMAP_SEGS+(10*4)(r7) ; Get SR10 value | |
2185 | ||
2186 | nlsr10: mtsr sr10,r13 ; Load up the SR | |
2187 | ||
2188 | addis r13,r15,0x00B0 ; Get SR11 value | |
2189 | bf 27,nlsr11 ; No alternate here... | |
2190 | lwz r13,PMAP_SEGS+(11*4)(r7) ; Get SR11 value | |
2191 | ||
2192 | nlsr11: mtsr sr11,r13 ; Load up the SR | |
2193 | ||
2194 | addis r13,r15,0x00C0 ; Get SR12 value | |
2195 | bf 28,nlsr12 ; No alternate here... | |
2196 | lwz r13,PMAP_SEGS+(12*4)(r7) ; Get SR12 value | |
2197 | ||
2198 | nlsr12: mtsr sr12,r13 ; Load up the SR | |
2199 | ||
2200 | addis r13,r15,0x00D0 ; Get SR13 value | |
2201 | bf 29,nlsr13 ; No alternate here... | |
2202 | lwz r13,PMAP_SEGS+(13*4)(r7) ; Get SR13 value | |
2203 | ||
2204 | nlsr13: mtsr sr13,r13 ; Load up the SR | |
2205 | ||
2206 | addis r13,r15,0x00F0 ; Get SR15 value | |
2207 | bf 31,nlsr15 ; No alternate here... | |
2208 | lwz r13,PMAP_SEGS+(15*4)(r7) ; Get SR15 value | |
2209 | ||
2210 | nlsr15: mtsr sr15,r13 ; Load up the SR | |
2211 | ||
2212 | segsdone: li r1,emfp0 ; Point to the fp savearea | |
2213 | lwz r25,savesrr0(r31) ; Get the SRR0 to use | |
2214 | la r28,saver6(r31) /* Point to the next line to use */ | |
2215 | dcbt r1,r2 ; Start moving in a work area | |
2216 | lwz r0,saver0(r31) /* Restore */ | |
2217 | dcbt 0,r28 /* Touch it in */ | |
2218 | mr r29,r2 ; Save the per_proc | |
2219 | lwz r1,saver1(r31) /* Restore */ | |
2220 | lwz r2,saver2(r31) /* Restore */ | |
2221 | la r28,saver14(r31) /* Point to the next line to get */ | |
2222 | lwz r3,saver3(r31) /* Restore */ | |
2223 | mtcrf 0x80,r27 ; Get facility availability flags (do not touch CR1-7) | |
2224 | lwz r4,saver4(r31) /* Restore */ | |
2225 | mtsrr0 r25 /* Restore the SRR0 now */ | |
2226 | lwz r5,saver5(r31) /* Restore */ | |
2227 | mtsrr1 r26 /* Restore the SRR1 now */ | |
2228 | lwz r6,saver6(r31) /* Restore */ | |
2229 | ||
2230 | dcbt 0,r28 /* Touch that next line on in */ | |
2231 | la r28,savexfpscrpad(r31) ; Point to the saved fpscr | |
2232 | ||
2233 | lwz r7,saver7(r31) /* Restore */ | |
2234 | dcbt 0,r28 ; Touch saved fpscr | |
2235 | lwz r8,saver8(r31) /* Restore */ | |
2236 | lwz r9,saver9(r31) /* Restore */ | |
2237 | lwz r10,saver10(r31) /* Restore */ | |
2238 | lwz r11,saver11(r31) /* Restore */ | |
2239 | lwz r12,saver12(r31) /* Restore */ | |
2240 | lwz r13,saver13(r31) /* Restore */ | |
2241 | la r28,saver22(r31) /* Point to the next line to do */ | |
2242 | lwz r14,saver14(r31) /* Restore */ | |
2243 | lwz r15,saver15(r31) /* Restore */ | |
2244 | ||
2245 | ; | |
2246 | ; Note that floating point will be enabled from here on until the RFI | |
2247 | ; | |
2248 | ||
2249 | bf- pfFloatb,nofphere ; Skip if no floating point... | |
2250 | mfmsr r27 ; Save the MSR | |
2251 | ori r27,r27,lo16(MASK(MSR_FP)) ; Enable floating point | |
2252 | mtmsr r27 ; Really enable | |
2253 | isync | |
2254 | stfd f0,emfp0(r29) ; Save FP0 | |
2255 | lfd f0,savexfpscrpad(r31) ; Get the fpscr | |
2256 | mtfsf 0xFF,f0 ; Restore fpscr | |
2257 | lfd f0,emfp0(r29) ; Restore the used register | |
2258 | ||
2259 | nofphere: dcbt 0,r28 /* Touch in another line of context */ | |
2260 | ||
2261 | lwz r16,saver16(r31) /* Restore */ | |
2262 | lwz r17,saver17(r31) /* Restore */ | |
2263 | lwz r18,saver18(r31) /* Restore */ | |
2264 | lwz r19,saver19(r31) /* Restore */ | |
2265 | lwz r20,saver20(r31) /* Restore */ | |
2266 | lwz r21,saver21(r31) /* Restore */ | |
2267 | la r28,saver30(r31) /* Point to the final line */ | |
2268 | lwz r22,saver22(r31) /* Restore */ | |
2269 | ||
2270 | dcbt 0,r28 /* Suck it in */ | |
2271 | ||
2272 | lwz r23,saver23(r31) /* Restore */ | |
2273 | lwz r24,saver24(r31) /* Restore */ | |
2274 | lwz r25,saver25(r31) /* Restore */ | |
2275 | lwz r26,saver26(r31) /* Restore */ | |
2276 | lwz r27,saver27(r31) /* Restore */ | |
2277 | ||
2278 | lwz r28,savecr(r31) /* Get CR to restore */ | |
2279 | bf pfAltivecb,noavec4 ; No vector on this machine | |
2280 | lwz r29,savevrsave(r31) ; Get the vrsave | |
2281 | beq+ cr3,noavec3 ; SRs have not changed, no need to stop the streams... | |
2282 | dssall ; Kill all data streams | |
2283 | ; The streams should be suspended | |
2284 | ; already, and we do a bunch of | |
2285 | ; dependent loads and a sync later | |
2286 | ; so we should be cool. | |
2287 | ||
2288 | noavec3: mtspr vrsave,r29 ; Set the vrsave | |
2289 | ||
2290 | noavec4: lwz r29,savexer(r31) /* Get XER to restore */ | |
2291 | mtcr r28 /* Restore the CR */ | |
2292 | lwz r28,savelr(r31) /* Get LR to restore */ | |
2293 | mtxer r29 /* Restore the XER */ | |
2294 | lwz r29,savectr(r31) /* Get the CTR to restore */ | |
2295 | mtlr r28 /* Restore the LR */ | |
2296 | lwz r28,saver30(r31) /* Restore */ | |
2297 | mtctr r29 /* Restore the CTR */ | |
2298 | lwz r29,saver31(r31) /* Restore */ | |
2299 | mtsprg 2,r28 /* Save R30 */ | |
2300 | lwz r28,saver28(r31) /* Restore */ | |
2301 | mtsprg 3,r29 /* Save R31 */ | |
2302 | lwz r29,saver29(r31) /* Restore */ | |
2303 | ||
2304 | #if PERFTIMES && DEBUG | |
2305 | stmw r1,0x280(br0) ; Save all registers | |
2306 | mfcr r20 ; Save the CR | |
2307 | mflr r21 ; Save the LR | |
2308 | mfsrr0 r9 ; Save SRR0 | |
2309 | mfsrr1 r11 ; Save SRR1 | |
2310 | mr r8,r0 ; Save R0 | |
2311 | li r3,69 ; Indicate interrupt | |
2312 | mr r4,r11 ; Set MSR to log | |
2313 | mr r5,r31 ; Get savearea to log | |
2314 | bl EXT(dbgLog2) ; Cut log entry | |
2315 | mr r0,r8 ; Restore R0 | |
2316 | mtsrr0 r9 ; Restore SRR0 | |
2317 | mtsrr1 r11 ; Restore SRR1 | |
2318 | mtlr r21 ; Restore the LR | |
2319 | mtcr r20 ; Restore the CR | |
2320 | lmw r1,0x280(br0) ; Restore all the rest | |
2321 | #endif | |
2322 | ||
2323 | li r31,0 /* Get set to clear lock */ | |
2324 | sync /* Make sure it's all out there */ | |
2325 | stw r31,SVlock(r30) /* Unlock it */ | |
2326 | mfsprg r30,2 /* Restore R30 */ | |
2327 | mfsprg r31,0 ; Get per_proc | |
2328 | lwz r31,pfAvailable(r31) ; Get the feature flags | |
2329 | mtsprg 2,r31 ; Set the feature flags | |
2330 | mfsprg r31,3 /* Restore R31 */ | |
2331 | ||
2332 | rfi /* Click heels three times and think very hard that there's no place like home */ | |
2333 | ||
2334 | .long 0 /* For old 601 bug */ | |
2335 | .long 0 | |
2336 | .long 0 | |
2337 | .long 0 | |
2338 | .long 0 | |
2339 | .long 0 | |
2340 | .long 0 | |
2341 | .long 0 | |
2342 | ||
2343 | ||
2344 | ||
2345 | ||
2346 | /* | |
2347 | * exception_exit(savearea *) | |
2348 | * | |
2349 | * | |
2350 | * ENTRY : IR and/or DR and/or interruptions can be on | |
2351 | * R3 points to the physical address of a savearea | |
2352 | */ | |
2353 | ||
2354 | .align 5 | |
2355 | .globl EXT(exception_exit) | |
2356 | ||
2357 | LEXT(exception_exit) | |
2358 | ||
2359 | mfsprg r29,2 ; Get feature flags | |
2360 | mfmsr r30 /* Get the current MSR */ | |
2361 | mtcrf 0x04,r29 ; Set the features | |
2362 | mr r31,r3 /* Get the savearea in the right register */ | |
2363 | andi. r30,r30,0x7FCF /* Turn off externals, IR, and DR */ | |
2364 | lis r1,hi16(SAVredrive) ; Get redrive request | |
2365 | ||
2366 | bt pfNoMSRirb,eeNoMSR ; No MSR... | |
2367 | ||
2368 | mtmsr r30 ; Translation and all off | |
2369 | isync ; Toss prefetch | |
2370 | b eeNoMSRx | |
2371 | ||
2372 | eeNoMSR: li r0,loadMSR ; Get the MSR setter SC | |
2373 | mr r3,r30 ; Get new MSR | |
2374 | sc ; Set it | |
2375 | ||
2376 | eeNoMSRx: | |
2377 | mfsprg r2,0 ; Get the per_proc block | |
2378 | lwz r4,SAVflags(r31) ; Pick up the flags | |
2379 | mr r13,r31 ; Put savearea here also | |
2380 | ||
2381 | and. r0,r4,r1 ; Check if redrive requested | |
2382 | andc r4,r4,r1 ; Clear redrive | |
2383 | ||
2384 | dcbt br0,r2 ; We will need this in just a sec | |
2385 | ||
2386 | beq+ EatRupt ; No redrive, just exit... | |
2387 | ||
2388 | lwz r3,saveexception(r13) ; Restore exception code | |
2389 | stw r4,SAVflags(r13) ; Set the flags | |
2390 | b Redrive ; Redrive the exception... | |
2391 | ||
2392 | ; | |
2393 | ; Make trace entry for lowmem_vectors internal debug | |
2394 | ; | |
2395 | #if TRCSAVE | |
2396 | cte: | |
2397 | lwz r20,LOW_ADDR(EXT(traceCurr)-EXT(ExceptionVectorsStart))(br0) ; Pick up the current trace entry | |
2398 | lwz r16,LOW_ADDR(EXT(traceEnd)-EXT(ExceptionVectorsStart))(br0) ; Grab up the end of it all | |
2399 | addi r17,r20,LTR_size ; Point to the next trace entry | |
2400 | cmplw r17,r16 ; Do we need to wrap the trace table? | |
2401 | li r15,32 ; Second line of entry | |
2402 | bne+ ctenwrap ; We got a trace entry... | |
2403 | lwz r17,LOW_ADDR(EXT(traceStart)-EXT(ExceptionVectorsStart))(br0) ; Wrap back to the top | |
2404 | ||
2405 | ctenwrap: stw r17,LOW_ADDR(EXT(traceCurr)-EXT(ExceptionVectorsStart))(br0) ; Set the next entry for the next guy | |
2406 | ||
2407 | bf- featL1ena,skipz8 ; L1 cache is disabled... | |
2408 | dcbz 0,r20 ; Allocate cache for the entry | |
2409 | dcbz r15,r20 ; Zap the second half | |
2410 | skipz8: | |
2411 | ||
2412 | ctegetTB: mftbu r16 ; Get the upper timebase | |
2413 | mftb r17 ; Get the lower timebase | |
2414 | mftbu r15 ; Get the upper one again | |
2415 | cmplw r16,r15 ; Did the top tick? | |
2416 | bne- ctegetTB ; Yeah, need to get it again... | |
2417 | ||
2418 | li r15,0x111 ; Get the special trace ID code | |
2419 | stw r0,LTR_r0(r20) ; Save R0 (usually used as an ID number | |
2420 | stw r16,LTR_timeHi(r20) ; Set the upper part of TB | |
2421 | mflr r16 ; Get the return point | |
2422 | stw r17,LTR_timeLo(r20) ; Set the lower part of TB | |
2423 | sth r15,LTR_excpt(r20) ; Save the exception type | |
2424 | stw r16,LTR_srr0(r20) ; Save the return point | |
2425 | blr ; Leave... | |
2426 | #endif | |
2427 | ||
2428 | /* | |
2429 | * Start of the trace table | |
2430 | */ | |
2431 | ||
2432 | .align 12 /* Align to 4k boundary */ | |
2433 | ||
2434 | .globl EXT(traceTableBeg) | |
2435 | EXT(traceTableBeg): /* Start of trace table */ | |
2436 | /* .fill 2048,4,0 Make an 8k trace table for now */ | |
2437 | .fill 13760,4,0 /* Make an .trace table for now */ | |
2438 | /* .fill 240000,4,0 Make an .trace table for now */ | |
2439 | .globl EXT(traceTableEnd) | |
2440 | EXT(traceTableEnd): /* End of trace table */ | |
2441 | ||
2442 | .globl EXT(ExceptionVectorsEnd) | |
2443 | EXT(ExceptionVectorsEnd): /* Used if relocating the exception vectors */ | |
2444 | #ifndef HACKALERTHACKALERT | |
2445 | /* | |
2446 | * This .long needs to be here because the linker gets confused and tries to | |
2447 | * include the final label in a section in the next section if there is nothing | |
2448 | * after it | |
2449 | */ | |
2450 | .long 0 /* (HACK/HACK/HACK) */ | |
2451 | #endif | |
2452 | ||
2453 | .data | |
2454 | .align ALIGN | |
2455 | .globl EXT(exception_end) | |
2456 | EXT(exception_end): | |
2457 | .long EXT(ExceptionVectorsEnd) -EXT(ExceptionVectorsStart) /* phys fn */ | |
2458 | ||
2459 |