]>
Commit | Line | Data |
---|---|---|
55e303ae A |
1 | /* |
2 | * Copyright (c) 2002 Apple Computer, Inc. All rights reserved. | |
3 | * | |
8f6c56a5 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
55e303ae | 5 | * |
8f6c56a5 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
8ad349bb | 24 | * limitations under the License. |
8f6c56a5 A |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
55e303ae A |
27 | */ |
28 | ||
29 | /* Emulate64.s | |
30 | * | |
31 | * Software emulation of instructions not handled in hw, on 64-bit machines. | |
32 | */ | |
33 | ||
34 | #include <sys/appleapiopts.h> | |
55e303ae A |
35 | #include <ppc/asm.h> |
36 | #include <ppc/proc_reg.h> | |
37 | #include <ppc/exception.h> | |
38 | #include <mach/machine/vm_param.h> | |
39 | #include <ppc/cpu_capabilities.h> | |
40 | #include <assym.s> | |
41 | ||
42 | // CR bit set if the instruction is an "update" form (LFDU, STWU, etc): | |
43 | #define kUpdate 25 | |
44 | ||
45 | // CR bit set if interrupt occured in trace mode (ie, MSR_SE_BIT): | |
46 | #define kTrace 8 | |
47 | ||
48 | // CR bit set if notification on alignment interrupts is requested (notifyUnalignbit in spcFlags): | |
49 | #define kNotify 9 | |
50 | ||
51 | // CR bit distinguishes between alignment and program exceptions: | |
52 | #define kAlignment 10 | |
53 | ||
54 | ||
55 | ||
56 | // ************************************* | |
57 | // * P R O G R A M I N T E R R U P T * | |
58 | // ************************************* | |
59 | // | |
60 | // These are floating pt exceptions, illegal instructions, privileged mode violations, | |
61 | // and traps. All we're interested in at this low level is illegal instructions. | |
62 | // The ones we "emulate" are: | |
63 | // DCBA, which is not implemented in the IBM 970. The emulation is to ignore it, | |
64 | // as it is just a hint. | |
65 | // MCRXR, which is not implemented on the IBM 970, but is in the PPC ISA. | |
66 | // | |
67 | // Additionally, to facilitate debugging the alignment handler, we recognize a special | |
68 | // diagnostic mode that is used to simulate alignment exceptions. When in this mode, | |
69 | // if the instruction has opcode==0 and the extended opcode is one of the X-form | |
70 | // instructions that can take an alignment interrupt, then we change the opcode to | |
71 | // 31 and pretend it got an alignment interrupt. This exercises paths that | |
72 | // are hard to drive or perhaps never driven on this particular CPU. | |
73 | ||
74 | .text | |
75 | .globl EXT(Emulate64) | |
76 | .align 5 | |
77 | LEXT(Emulate64) | |
78 | crclr kAlignment // not an alignment exception | |
79 | b a64AlignAssistJoin // join alignment handler | |
80 | ||
81 | ||
82 | // Return from alignment handler with all the regs loaded for opcode emulation. | |
83 | ||
84 | a64HandleProgramInt: | |
85 | rlwinm. r0,r29,0,SRR1_PRG_ILL_INS_BIT,SRR1_PRG_ILL_INS_BIT // illegal opcode? | |
86 | beq a64PassAlong // No, must have been trap or priv violation etc | |
87 | rlwinm r3,r20,6,26,31 // right justify opcode field (bits 0-5) | |
88 | rlwinm r4,r20,31,22,31 // right justify extended opcode field (bits 21-30) | |
89 | cmpwi cr0,r3,31 // X-form? | |
90 | cmpwi cr1,r4,758 // DCBA? | |
91 | cmpwi cr4,r4,512 // MCRXR? | |
92 | crand cr1_eq,cr0_eq,cr1_eq // merge the two tests for DCBA | |
93 | crand cr4_eq,cr0_eq,cr4_eq // and for MCRXR | |
94 | beq++ cr1_eq,a64ExitEm // was DCBA, so ignore | |
95 | bne-- cr4_eq,a64NotEmulated // skip if not MCRXR | |
96 | ||
97 | // Was MCRXR, so emulate. | |
98 | ||
99 | ld r3,savexer(r13) // get the XER | |
100 | lwz r4,savecr(r13) // and the CR | |
101 | rlwinm r5,r20,11,27,29 // get (CR# * 4) from instruction | |
102 | rlwinm r6,r3,0,4,31 // zero XER[32-35] (also XER[0-31]) | |
103 | sld r4,r4,r5 // move target CR field to bits 32-35 | |
104 | rlwimi r4,r3,0,0,3 // move XER[32-35] into CR field | |
105 | stw r6,savexer+4(r13) // update XER | |
106 | srd r4,r4,r5 // re-position CR | |
107 | stw r4,savecr(r13) // update CR | |
108 | b a64ExitEm // done | |
109 | ||
110 | // Not an opcode we normally emulate. If in special diagnostic mode and opcode=0, | |
111 | // emulate as an alignment exception. This special case is for test software. | |
112 | ||
113 | a64NotEmulated: | |
114 | lwz r30,dgFlags(0) // Get the flags | |
115 | rlwinm. r0,r30,0,enaDiagEMb,enaDiagEMb // Do we want to try to emulate something? | |
116 | beq++ a64PassAlong // No emulation allowed | |
117 | cmpwi r3,0 // opcode==0 ? | |
118 | bne a64PassAlong // not the special case | |
119 | oris r20,r20,0x7C00 // change opcode to 31 | |
120 | crset kAlignment // say we took alignment exception | |
121 | rlwinm r5,r4,0,26+1,26-1 // mask Update bit (32) out of extended opcode | |
122 | rlwinm r5,r5,0,0,31 // Clean out leftover junk from rlwinm | |
123 | ||
124 | cmpwi r4,1014 // dcbz/dcbz128 ? | |
125 | crmove cr1_eq,cr0_eq | |
126 | cmpwi r5,21 // ldx/ldux ? | |
127 | cror cr1_eq,cr0_eq,cr1_eq | |
128 | cmpwi r5,599 // lfdx/lfdux ? | |
129 | cror cr1_eq,cr0_eq,cr1_eq | |
130 | cmpwi r5,535 // lfsx/lfsux ? | |
131 | cror cr1_eq,cr0_eq,cr1_eq | |
132 | cmpwi r5,343 // lhax/lhaux ? | |
133 | cror cr1_eq,cr0_eq,cr1_eq | |
134 | cmpwi r4,790 // lhbrx ? | |
135 | cror cr1_eq,cr0_eq,cr1_eq | |
136 | cmpwi r5,279 // lhzx/lhzux ? | |
137 | cror cr1_eq,cr0_eq,cr1_eq | |
138 | cmpwi r4,597 // lswi ? | |
139 | cror cr1_eq,cr0_eq,cr1_eq | |
140 | cmpwi r4,533 // lswx ? | |
141 | cror cr1_eq,cr0_eq,cr1_eq | |
142 | cmpwi r5,341 // lwax/lwaux ? | |
143 | cror cr1_eq,cr0_eq,cr1_eq | |
144 | cmpwi r4,534 // lwbrx ? | |
145 | cror cr1_eq,cr0_eq,cr1_eq | |
146 | cmpwi r5,23 // lwz/lwzx ? | |
147 | cror cr1_eq,cr0_eq,cr1_eq | |
148 | cmpwi r5,149 // stdx/stdux ? | |
149 | cror cr1_eq,cr0_eq,cr1_eq | |
150 | cmpwi r5,727 // stfdx/stfdux ? | |
151 | cror cr1_eq,cr0_eq,cr1_eq | |
152 | cmpwi r4,983 // stfiwx ? | |
153 | cror cr1_eq,cr0_eq,cr1_eq | |
154 | cmpwi r5,663 // stfsx/stfsux ? | |
155 | cror cr1_eq,cr0_eq,cr1_eq | |
156 | cmpwi r4,918 // sthbrx ? | |
157 | cror cr1_eq,cr0_eq,cr1_eq | |
158 | cmpwi r5,407 // sthx/sthux ? | |
159 | cror cr1_eq,cr0_eq,cr1_eq | |
160 | cmpwi r4,725 // stswi ? | |
161 | cror cr1_eq,cr0_eq,cr1_eq | |
162 | cmpwi r4,661 // stswx ? | |
163 | cror cr1_eq,cr0_eq,cr1_eq | |
164 | cmpwi r4,662 // stwbrx ? | |
165 | cror cr1_eq,cr0_eq,cr1_eq | |
166 | cmpwi r5,151 // stwx/stwux ? | |
167 | cror cr1_eq,cr0_eq,cr1_eq | |
168 | ||
169 | beq++ cr1,a64GotInstruction // it was one of the X-forms we handle | |
170 | crclr kAlignment // revert to program interrupt | |
171 | b a64PassAlong // not recognized extended opcode | |
172 | ||
173 | ||
174 | // ***************************************** | |
175 | // * A L I G N M E N T I N T E R R U P T * | |
176 | // ***************************************** | |
177 | // | |
178 | // We get here in exception context, ie with interrupts disabled, translation off, and | |
179 | // in 64-bit mode, with: | |
180 | // r13 = save-area pointer, with general context already saved in it | |
181 | // cr6 = feature flags | |
182 | // We preserve r13 and cr6. Other GPRs and CRs, the LR and CTR are used. | |
183 | // | |
184 | // Current 64-bit processors (GPUL) handle almost all misaligned operations in hardware, | |
185 | // so this routine usually isn't called very often. Only floating pt ops that cross a page | |
186 | // boundary and are not word aligned, and LMW/STMW can take exceptions to cacheable memory. | |
187 | // However, in contrast to G3 and G4, any misaligned load/store will get an alignment | |
188 | // interrupt on uncached memory. | |
189 | // | |
190 | // We always emulate scalar ops with a series of byte load/stores. Doing so is no slower | |
191 | // than LWZ/STW in cases where a scalar op gets an alignment exception. | |
192 | // | |
193 | // This routine supports all legal permutations of alignment interrupts occuring in user or | |
194 | // supervisor mode, 32 or 64-bit addressing, and translation on or off. We do not emulate | |
195 | // instructions that go past the end of an address space, such as "LHZ -1(0)"; we just pass | |
91447636 | 196 | // along the alignment exception rather than wrap around to byte 0. |
55e303ae A |
197 | // |
198 | // First, check for a few special cases such as virtual machines, etc. | |
199 | ||
200 | .globl EXT(AlignAssist64) | |
201 | .align 5 | |
202 | LEXT(AlignAssist64) | |
203 | crset kAlignment // mark as alignment interrupt | |
204 | ||
205 | a64AlignAssistJoin: // join here from program interrupt handler | |
91447636 | 206 | li r0,0 // Get a 0 |
55e303ae A |
207 | mfsprg r31,0 // get the per_proc data ptr |
208 | mcrf cr3,cr6 // save feature flags here... | |
209 | lwz r21,spcFlags(r31) // grab the special flags | |
210 | ld r29,savesrr1(r13) // get the MSR etc at the fault | |
211 | ld r28,savesrr0(r13) // get the EA of faulting instruction | |
91447636 | 212 | stw r0,savemisc3(r13) // Assume we will handle this ok |
55e303ae A |
213 | mfmsr r26 // save MSR at entry |
214 | rlwinm. r0,r21,0,runningVMbit,runningVMbit // Are we running a VM? | |
215 | lwz r19,dgFlags(0) // Get the diagnostics flags | |
216 | bne-- a64PassAlong // yes, let the virtual machine monitor handle | |
217 | ||
218 | ||
219 | // Set up the MSR shadow regs. We turn on FP in this routine, and usually set DR and RI | |
220 | // when accessing user space (the SLB is still set up with all the user space translations.) | |
221 | // However, if the interrupt occured in the kernel with DR off, we keep it off while | |
222 | // accessing the "target" address space. If we set DR to access the target space, we also | |
223 | // set RI. The RI bit tells the exception handlers to clear cr0 beq and return if we get an | |
224 | // exception accessing the user address space. We are careful to test cr0 beq after every such | |
225 | // access. We keep the following "shadows" of the MSR in global regs across this code: | |
226 | // r25 = MSR at entry, plus FP and probably DR and RI (used to access target space) | |
227 | // r26 = MSR at entry | |
228 | // r27 = free | |
229 | // r29 = SRR1 (ie, MSR at interrupt) | |
230 | // Note that EE and IR are always off, and SF is always on in this code. | |
231 | ||
232 | rlwinm r3,r29,0,MSR_DR_BIT,MSR_DR_BIT // was translation on at fault? | |
233 | rlwimi r3,r3,32-MSR_RI_BIT+MSR_DR_BIT,MSR_RI_BIT,MSR_RI_BIT // if DR was set, set RI too | |
234 | or r25,r26,r3 // assemble MSR to use accessing target space | |
235 | ||
236 | ||
237 | // Because the DSISR and DAR are either not set or are not to be trusted on some 64-bit | |
238 | // processors on an alignment interrupt, we must fetch the faulting instruction ourselves, | |
239 | // then decode/hash the opcode and reconstruct the EA manually. | |
240 | ||
241 | mtmsr r25 // turn on FP and (if it was on at fault) DR and RI | |
242 | isync // wait for it to happen | |
243 | cmpw r0,r0 // turn on beq so we can check for DSIs | |
244 | lwz r20,0(r28) // fetch faulting instruction, probably with DR on | |
245 | bne-- a64RedriveAsISI // got a DSI trying to fetch it, pretend it was an ISI | |
246 | mtmsr r26 // turn DR back off | |
247 | isync // wait for it to happen | |
248 | ||
249 | ||
250 | // Set a few flags while we wait for the faulting instruction to arrive from cache. | |
251 | ||
252 | rlwinm. r0,r29,0,MSR_SE_BIT,MSR_SE_BIT // Were we single stepping? | |
253 | stw r20,savemisc2(r13) // Save the instruction image in case we notify | |
254 | crnot kTrace,cr0_eq | |
255 | rlwinm. r0,r19,0,enaNotifyEMb,enaNotifyEMb // Should we notify? | |
256 | crnot kNotify,cr0_eq | |
257 | ||
258 | ||
259 | // Hash the intruction into a 5-bit value "AAAAB" used to index the branch table, and a | |
260 | // 1-bit kUpdate flag, as follows: | |
261 |