]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/Emulate64.s
a3c117d8c231ab83913ca4b293b62627c5410324
[apple/xnu.git] / osfmk / ppc / Emulate64.s
1 /*
2 * Copyright (c) 2002 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30
31 /* Emulate64.s
32 *
33 * Software emulation of instructions not handled in hw, on 64-bit machines.
34 */
35
36 #include <sys/appleapiopts.h>
37 #include <ppc/asm.h>
38 #include <ppc/proc_reg.h>
39 #include <ppc/exception.h>
40 #include <mach/machine/vm_param.h>
41 #include <ppc/cpu_capabilities.h>
42 #include <assym.s>
43
44 // CR bit set if the instruction is an "update" form (LFDU, STWU, etc):
45 #define kUpdate 25
46
47 // CR bit set if interrupt occured in trace mode (ie, MSR_SE_BIT):
48 #define kTrace 8
49
50 // CR bit set if notification on alignment interrupts is requested (notifyUnalignbit in spcFlags):
51 #define kNotify 9
52
53 // CR bit distinguishes between alignment and program exceptions:
54 #define kAlignment 10
55
56
57
58 // *************************************
59 // * P R O G R A M I N T E R R U P T *
60 // *************************************
61 //
62 // These are floating pt exceptions, illegal instructions, privileged mode violations,
63 // and traps. All we're interested in at this low level is illegal instructions.
64 // The ones we "emulate" are:
65 // DCBA, which is not implemented in the IBM 970. The emulation is to ignore it,
66 // as it is just a hint.
67 // MCRXR, which is not implemented on the IBM 970, but is in the PPC ISA.
68 //
69 // Additionally, to facilitate debugging the alignment handler, we recognize a special
70 // diagnostic mode that is used to simulate alignment exceptions. When in this mode,
71 // if the instruction has opcode==0 and the extended opcode is one of the X-form
72 // instructions that can take an alignment interrupt, then we change the opcode to
73 // 31 and pretend it got an alignment interrupt. This exercises paths that
74 // are hard to drive or perhaps never driven on this particular CPU.
75
76 .text
77 .globl EXT(Emulate64)
78 .align 5
79 LEXT(Emulate64)
80 crclr kAlignment // not an alignment exception
81 b a64AlignAssistJoin // join alignment handler
82
83
84 // Return from alignment handler with all the regs loaded for opcode emulation.
85
86 a64HandleProgramInt:
87 rlwinm. r0,r29,0,SRR1_PRG_ILL_INS_BIT,SRR1_PRG_ILL_INS_BIT // illegal opcode?
88 beq a64PassAlong // No, must have been trap or priv violation etc
89 rlwinm r3,r20,6,26,31 // right justify opcode field (bits 0-5)
90 rlwinm r4,r20,31,22,31 // right justify extended opcode field (bits 21-30)
91 cmpwi cr0,r3,31 // X-form?
92 cmpwi cr1,r4,758 // DCBA?
93 cmpwi cr4,r4,512 // MCRXR?
94 crand cr1_eq,cr0_eq,cr1_eq // merge the two tests for DCBA
95 crand cr4_eq,cr0_eq,cr4_eq // and for MCRXR
96 beq++ cr1_eq,a64ExitEm // was DCBA, so ignore
97 bne-- cr4_eq,a64NotEmulated // skip if not MCRXR
98
99 // Was MCRXR, so emulate.
100
101 ld r3,savexer(r13) // get the XER
102 lwz r4,savecr(r13) // and the CR
103 rlwinm r5,r20,11,27,29 // get (CR# * 4) from instruction
104 rlwinm r6,r3,0,4,31 // zero XER[32-35] (also XER[0-31])
105 sld r4,r4,r5 // move target CR field to bits 32-35
106 rlwimi r4,r3,0,0,3 // move XER[32-35] into CR field
107 stw r6,savexer+4(r13) // update XER
108 srd r4,r4,r5 // re-position CR
109 stw r4,savecr(r13) // update CR
110 b a64ExitEm // done
111
112 // Not an opcode we normally emulate. If in special diagnostic mode and opcode=0,
113 // emulate as an alignment exception. This special case is for test software.
114
115 a64NotEmulated:
116 lwz r30,dgFlags(0) // Get the flags
117 rlwinm. r0,r30,0,enaDiagEMb,enaDiagEMb // Do we want to try to emulate something?
118 beq++ a64PassAlong // No emulation allowed
119 cmpwi r3,0 // opcode==0 ?
120 bne a64PassAlong // not the special case
121 oris r20,r20,0x7C00 // change opcode to 31
122 crset kAlignment // say we took alignment exception
123 rlwinm r5,r4,0,26+1,26-1 // mask Update bit (32) out of extended opcode
124 rlwinm r5,r5,0,0,31 // Clean out leftover junk from rlwinm
125
126 cmpwi r4,1014 // dcbz/dcbz128 ?
127 crmove cr1_eq,cr0_eq
128 cmpwi r5,21 // ldx/ldux ?
129 cror cr1_eq,cr0_eq,cr1_eq
130 cmpwi r5,599 // lfdx/lfdux ?
131 cror cr1_eq,cr0_eq,cr1_eq
132 cmpwi r5,535 // lfsx/lfsux ?
133 cror cr1_eq,cr0_eq,cr1_eq
134 cmpwi r5,343 // lhax/lhaux ?
135 cror cr1_eq,cr0_eq,cr1_eq
136 cmpwi r4,790 // lhbrx ?
137 cror cr1_eq,cr0_eq,cr1_eq
138 cmpwi r5,279 // lhzx/lhzux ?
139 cror cr1_eq,cr0_eq,cr1_eq
140 cmpwi r4,597 // lswi ?
141 cror cr1_eq,cr0_eq,cr1_eq
142 cmpwi r4,533 // lswx ?
143 cror cr1_eq,cr0_eq,cr1_eq
144 cmpwi r5,341 // lwax/lwaux ?
145 cror cr1_eq,cr0_eq,cr1_eq
146 cmpwi r4,534 // lwbrx ?
147 cror cr1_eq,cr0_eq,cr1_eq
148 cmpwi r5,23 // lwz/lwzx ?
149 cror cr1_eq,cr0_eq,cr1_eq
150 cmpwi r5,149 // stdx/stdux ?
151 cror cr1_eq,cr0_eq,cr1_eq
152 cmpwi r5,727 // stfdx/stfdux ?
153 cror cr1_eq,cr0_eq,cr1_eq
154 cmpwi r4,983 // stfiwx ?
155 cror cr1_eq,cr0_eq,cr1_eq
156 cmpwi r5,663 // stfsx/stfsux ?
157 cror cr1_eq,cr0_eq,cr1_eq
158 cmpwi r4,918 // sthbrx ?
159 cror cr1_eq,cr0_eq,cr1_eq
160 cmpwi r5,407 // sthx/sthux ?
161 cror cr1_eq,cr0_eq,cr1_eq
162 cmpwi r4,725 // stswi ?
163 cror cr1_eq,cr0_eq,cr1_eq
164 cmpwi r4,661 // stswx ?
165 cror cr1_eq,cr0_eq,cr1_eq
166 cmpwi r4,662 // stwbrx ?
167 cror cr1_eq,cr0_eq,cr1_eq
168 cmpwi r5,151 // stwx/stwux ?
169 cror cr1_eq,cr0_eq,cr1_eq
170
171 beq++ cr1,a64GotInstruction // it was one of the X-forms we handle
172 crclr kAlignment // revert to program interrupt
173 b a64PassAlong // not recognized extended opcode
174
175
176 // *****************************************
177 // * A L I G N M E N T I N T E R R U P T *
178 // *****************************************
179 //
180 // We get here in exception context, ie with interrupts disabled, translation off, and
181 // in 64-bit mode, with:
182 // r13 = save-area pointer, with general context already saved in it
183 // cr6 = feature flags
184 // We preserve r13 and cr6. Other GPRs and CRs, the LR and CTR are used.
185 //
186 // Current 64-bit processors (GPUL) handle almost all misaligned operations in hardware,
187 // so this routine usually isn't called very often. Only floating pt ops that cross a page
188 // boundary and are not word aligned, and LMW/STMW can take exceptions to cacheable memory.
189 // However, in contrast to G3 and G4, any misaligned load/store will get an alignment
190 // interrupt on uncached memory.
191 //
192 // We always emulate scalar ops with a series of byte load/stores. Doing so is no slower
193 // than LWZ/STW in cases where a scalar op gets an alignment exception.
194 //
195 // This routine supports all legal permutations of alignment interrupts occuring in user or
196 // supervisor mode, 32 or 64-bit addressing, and translation on or off. We do not emulate
197 // instructions that go past the end of an address space, such as "LHZ -1(0)"; we just pass
198 // along the alignment exception rather than wrap around to byte 0.
199 //
200 // First, check for a few special cases such as virtual machines, etc.
201
202 .globl EXT(AlignAssist64)
203 .align 5
204 LEXT(AlignAssist64)
205 crset kAlignment // mark as alignment interrupt
206
207 a64AlignAssistJoin: // join here from program interrupt handler
208 li r0,0 // Get a 0
209 mfsprg r31,0 // get the per_proc data ptr
210 mcrf cr3,cr6 // save feature flags here...
211 lwz r21,spcFlags(r31) // grab the special flags
212 ld r29,savesrr1(r13) // get the MSR etc at the fault
213 ld r28,savesrr0(r13) // get the EA of faulting instruction
214 stw r0,savemisc3(r13) // Assume we will handle this ok
215 mfmsr r26 // save MSR at entry
216 rlwinm. r0,r21,0,runningVMbit,runningVMbit // Are we running a VM?
217 lwz r19,dgFlags(0) // Get the diagnostics flags
218 bne-- a64PassAlong // yes, let the virtual machine monitor handle
219
220
221 // Set up the MSR shadow regs. We turn on FP in this routine, and usually set DR and RI
222 // when accessing user space (the SLB is still set up with all the user space translations.)
223 // However, if the interrupt occured in the kernel with DR off, we keep it off while
224 // accessing the "target" address space. If we set DR to access the target space, we also
225 // set RI. The RI bit tells the exception handlers to clear cr0 beq and return if we get an
226 // exception accessing the user address space. We are careful to test cr0 beq after every such
227 // access. We keep the following "shadows" of the MSR in global regs across this code:
228 // r25 = MSR at entry, plus FP and probably DR and RI (used to access target space)
229 // r26 = MSR at entry
230 // r27 = free
231 // r29 = SRR1 (ie, MSR at interrupt)
232 // Note that EE and IR are always off, and SF is always on in this code.
233
234 rlwinm r3,r29,0,MSR_DR_BIT,MSR_DR_BIT // was translation on at fault?
235 rlwimi r3,r3,32-MSR_RI_BIT+MSR_DR_BIT,MSR_RI_BIT,MSR_RI_BIT // if DR was set, set RI too
236 or r25,r26,r3 // assemble MSR to use accessing target space
237
238
239 // Because the DSISR and DAR are either not set or are not to be trusted on some 64-bit
240 // processors on an alignment interrupt, we must fetch the faulting instruction ourselves,
241 // then decode/hash the opcode and reconstruct the EA manually.
242
243 mtmsr r25 // turn on FP and (if it was on at fault) DR and RI
244 isync // wait for it to happen
245 cmpw r0,r0 // turn on beq so we can check for DSIs
246 lwz r20,0(r28) // fetch faulting instruction, probably with DR on
247 bne-- a64RedriveAsISI // got a DSI trying to fetch it, pretend it was an ISI
248 mtmsr r26 // turn DR back off
249 isync // wait for it to happen
250
251
252 // Set a few flags while we wait for the faulting instruction to arrive from cache.
253
254 rlwinm. r0,r29,0,MSR_SE_BIT,MSR_SE_BIT // Were we single stepping?
255 stw r20,savemisc2(r13) // Save the instruction image in case we notify
256 crnot kTrace,cr0_eq
257 rlwinm. r0,r19,0,enaNotifyEMb,enaNotifyEMb // Should we notify?
258 crnot kNotify,cr0_eq
259
260
261 // Hash the intruction into a 5-bit value "AAAAB" used to index the branch table, and a
262 // 1-bit kUpdate flag, as follows:
263 // ¥ for X-form instructions (with primary opcode 31):
264 // the "AAAA" bits are bits 21-24 of the instruction
265 // the "B" bit is the XOR of bits 29 and 30
266 // the update bit is instruction bit 25
267 // ¥ for D and DS-form instructions (actually, any primary opcode except 31):
268 // the "AAAA" bits are bits 1-4 of the instruction
269 // the "B" bit is 0
270 // the update bit is instruction bit 5
271 //
272 // Just for fun (and perhaps a little speed on deep-pipe machines), we compute the hash,
273 // update flag, and EA without branches and with ipc >= 2.
274 //
275 // When we "bctr" to the opcode-specific reoutine, the following are all set up:
276 // MSR = EE and IR off, SF and FP on
277 // r12 = full 64-bit EA (r17 is clamped EA)
278 // r13 = save-area pointer (physical)
279 // r14 = ptr to saver0 in save-area (ie, to base of GPRs)
280 // r15 = 0x00000000FFFFFFFF if 32-bit mode fault, 0xFFFFFFFFFFFFFFFF if 64
281 // r16 = RA * 8 (ie, reg# not reg value)
282 // r17 = EA, clamped to 32 bits if 32-bit mode fault (see also r12)
283 // r18 = (RA|0) (reg value)
284 // r19 = -1 if X-form, 0 if D-form
285 // r20 = faulting instruction
286 // r21 = RT * 8 (ie, reg# not reg value)
287 // r22 = addr(aaFPopTable)+(RT*32), ie ptr to floating pt table for target register
288 // r25 = MSR at entrance, probably with DR and RI set (for access to target space)
289 // r26 = MSR at entrance
290 // r27 = free
291 // r28 = SRR0 (ie, EA of faulting instruction)
292 // r29 = SRR1 (ie, MSR at fault)
293 // r30 = scratch, usually user data
294 // r31 = per-proc pointer
295 // cr2 = kTrace, kNotify, and kAlignment flags
296 // cr3 = saved copy of feature flags used in lowmem vector code
297 // cr6 = bits 24-27 of CR are bits 24-27 of opcode if X-form, or bits 4-5 and 00 if D-form
298 // bit 25 is the kUpdate flag, set for update form instructions
299 // cr7 = bits 28-31 of CR are bits 28-31 of opcode if X-form, or 0 if D-form
300
301 a64GotInstruction: // here from program interrupt with instruction in r20
302 rlwinm r21,r20,6+6,20,25 // move the primary opcode (bits 0-6) to bits 20-25
303 la r14,saver0(r13) // r14 <- base address of GPR registers
304 xori r19,r21,0x07C0 // iff primary opcode is 31, set r19 to 0
305 rlwinm r16,r20,16+3,24,28 // r16 <- RA*8
306 subi r19,r19,1 // set bit 0 iff X-form (ie, if primary opcode is 31)
307 rlwinm r17,r20,21+3,24,28 // r17 <- RB*8 (if X-form)
308 sradi r19,r19,63 // r19 <- -1 if X-form, 0 if D-form
309 extsh r22,r20 // r22 <- displacement (if D-form)
310
311 ldx r23,r14,r17 // get (RB), if any
312 and r15,r20,r19 // instruction if X, 0 if D
313 andc r17,r21,r19 // primary opcode in bits 20-25 if D, 0 if X
314 ldx r18,r14,r16 // get (RA)
315 subi r24,r16,1 // set bit 0 iff RA==0
316 or r21,r15,r17 // r21 <- instruction if X, or bits 0-5 in bits 20-25 if D
317 sradi r24,r24,63 // r24 <- -1 if RA==0, 0 otherwise
318 rlwinm r17,r21,32-4,25,28 // shift opcode bits 21-24 to 25-28 (hash "AAAA" bits)
319 lis r10,ha16(a64BranchTable) // start to build up branch table address
320 rlwimi r17,r21,0,29,29 // move opcode bit 29 into hash as start of "B" bit
321 rlwinm r30,r21,1,29,29 // position opcode bit 30 in position 29
322 and r12,r23,r19 // RB if X-form, 0 if D-form
323 andc r11,r22,r19 // 0 if X-form, sign extended displacement if D-form
324 xor r17,r17,r30 // bit 29 ("B") of hash is xor(bit29,bit30)
325 addi r10,r10,lo16(a64BranchTable)
326 or r12,r12,r11 // r12 <- (RB) or displacement, as appropriate
327 lwzx r30,r10,r17 // get address from branch table
328 mtcrf 0x01,r21 // move opcode bits 28-31 to CR7
329 sradi r15,r29,32 // propogate SF bit from SRR1 (MSR_SF, which is bit 0)
330 andc r18,r18,r24 // r18 <- (RA|0)
331 mtcrf 0x02,r21 // move opcode bits 24-27 to CR6 (kUpdate is bit 25)
332 add r12,r18,r12 // r12 <- 64-bit EA
333 mtctr r30 // set up branch address
334
335 oris r15,r15,0xFFFF // start to fill low word of r15 with 1s
336 rlwinm r21,r20,11+3,24,28 // r21 <- RT * 8
337 lis r22,ha16(EXT(aaFPopTable)) // start to compute address of floating pt table
338 ori r15,r15,0xFFFF // now bits 32-63 of r15 are 1s
339 addi r22,r22,lo16(EXT(aaFPopTable))
340 and r17,r12,r15 // clamp EA to 32 bits if fault occured in 32-bit mode
341 rlwimi r22,r21,2,22,26 // move RT into aaFPopTable address (which is 1KB aligned)
342
343 bf-- kAlignment,a64HandleProgramInt // return to Program Interrupt handler
344 bctr // if alignment interrupt, jump to opcode-specific routine
345
346
347 // Floating-pt load single (lfs[u], lfsx[u])
348
349 a64LfsLfsx:
350 bl a64Load4Bytes // get data in r30
351 mtctr r22 // set up address of "lfs fRT,emfp0(r31)"
352 stw r30,emfp0(r31) // put word here for aaFPopTable routine
353 bctrl // do the lfs
354 b a64UpdateCheck // update RA if necessary and exit
355
356
357 // Floating-pt store single (stfs[u], stfsx[u])
358
359 a64StfsStfsx:
360 ori r22,r22,8 // set dir==1 (ie, single store) in aaFPopTable
361 mtctr r22 // set up address of "stfs fRT,emfp0(r31)"
362 bctrl // execute the store into emfp0
363 lwz r30,emfp0(r31) // get the word
364 bl a64Store4Bytes // store r30 into user space
365 b a64UpdateCheck // update RA if necessary and exit
366
367
368 // Floating-pt store as integer word (stfiwx)
369
370 a64Stfiwx:
371 ori r22,r22,16+8 // set size=1, dir==1 (ie, double store) in aaFPopTable
372 mtctr r22 // set up FP register table address
373 bctrl // double precision store into emfp0
374 lwz r30,emfp0+4(r31) // get the low-order word
375 bl a64Store4Bytes // store r30 into user space
376 b a64Exit // successfully emulated
377
378
379 // Floating-pt load double (lfd[u], lfdx[u])
380
381 a64LfdLfdx:
382 ori r22,r22,16 // set Double bit in aaFPopTable address
383 bl a64Load8Bytes // get data in r30
384 mtctr r22 // set up address of "lfd fRT,emfp0(r31)"
385 std r30,emfp0(r31) // put doubleword here for aaFPopTable routine
386 bctrl // execute the load
387 b a64UpdateCheck // update RA if necessary and exit
388
389
390 // Floating-pt store double (stfd[u], stfdx[u])
391
392 a64StfdStfdx:
393 ori r22,r22,16+8 // set size=1, dir==1 (ie, double store) in aaFPopTable address
394 mtctr r22 // address of routine to stfd RT
395 bctrl // store into emfp0
396 ld r30,emfp0(r31) // get the doubleword
397 bl a64Store8Bytes // store r30 into user space
398 b a64UpdateCheck // update RA if necessary and exit
399
400
401 // Load halfword w 0-fill (lhz[u], lhzx[u])
402
403 a64LhzLhzx:
404 bl a64Load2Bytes // load into r30 from user space (w 0-fill)
405 stdx r30,r14,r21 // store into RT slot in register file
406 b a64UpdateCheck // update RA if necessary and exit
407
408
409 // Load halfword w sign fill (lha[u], lhax[u])
410
411 a64LhaLhax:
412 bl a64Load2Bytes // load into r30 from user space (w 0-fill)
413 extsh r30,r30 // sign-extend
414 stdx r30,r14,r21 // store into RT slot in register file
415 b a64UpdateCheck // update RA if necessary and exit
416
417
418 // Load halfword byte reversed (lhbrx)
419
420 a64Lhbrx:
421 bl a64Load2Bytes // load into r30 from user space (w 0-fill)
422 rlwinm r3,r30,8,16,23 // reverse bytes into r3
423 rlwimi r3,r30,24,24,31
424 stdx r3,r14,r21 // store into RT slot in register file
425 b a64Exit // successfully emulated
426
427
428 // Store halfword (sth[u], sthx[u])
429
430 a64SthSthx:
431 ldx r30,r14,r21 // get RT
432 bl a64Store2Bytes // store r30 into user space
433 b a64UpdateCheck // update RA if necessary and exit
434
435
436 // Store halfword byte reversed (sthbrx)
437
438 a64Sthbrx:
439 addi r21,r21,6 // point to low two bytes of RT
440 lhbrx r30,r14,r21 // load and reverse
441 bl a64Store2Bytes // store r30 into user space
442 b a64Exit // successfully emulated
443
444
445 // Load word w 0-fill (lwz[u], lwzx[u]), also lwarx.
446
447 a64LwzLwzxLwarx:
448 andc r3,r19,r20 // light bit 30 of r3 iff lwarx
449 andi. r0,r3,2 // is it lwarx?
450 bne-- a64PassAlong // yes, never try to emulate a lwarx
451 bl a64Load4Bytes // load 4 bytes from user space into r30 (0-filled)
452 stdx r30,r14,r21 // update register file
453 b a64UpdateCheck // update RA if necessary and exit
454
455
456 // Load word w sign fill (lwa, lwax[u])
457
458 a64Lwa:
459 crclr kUpdate // no update form of lwa (its a reserved encoding)
460 a64Lwax:
461 bl a64Load4Bytes // load 4 bytes from user space into r30 (0-filled)
462 extsw r30,r30 // sign extend
463 stdx r30,r14,r21 // update register file
464 b a64UpdateCheck // update RA if necessary and exit
465
466
467 // Load word byte reversed (lwbrx)
468
469 a64Lwbrx:
470 bl a64Load4Bytes // load 4 bytes from user space into r30 (0-filled)
471 rlwinm r3,r30,24,0,31 // flip bytes 1234 to 4123
472 rlwimi r3,r30,8,8,15 // r3 is now 4323
473 rlwimi r3,r30,8,24,31 // r3 is now 4321
474 stdx r3,r14,r21 // update register file
475 b a64Exit // successfully emulated
476
477
478 // Store word (stw[u], stwx[u])
479
480 a64StwStwx:
481 ldx r30,r14,r21 // get RT
482 bl a64Store4Bytes // store r30 into user space
483 b a64UpdateCheck // update RA if necessary and exit
484
485
486 // Store word byte reversed (stwbrx)
487
488 a64Stwbrx:
489 addi r21,r21,4 // point to low word of RT
490 lwbrx r30,r14,r21 // load and reverse
491 bl a64Store4Bytes // store r30 into user space
492 b a64Exit // successfully emulated
493
494
495 // Load doubleword (ld[u], ldx[u]), also lwa.
496
497 a64LdLwa: // these are DS form: ld=0, ldu=1, and lwa=2
498 mtcrf 0x01,r20 // move DS field to cr7
499 rlwinm r3,r20,0,30,31 // must adjust EA by subtracting DS field
500 sub r12,r12,r3 // subtract from full 64-bit EA
501 and r17,r12,r15 // then re-clamp to 32 bits if necessary
502 bt 30,a64Lwa // handle lwa
503 crmove kUpdate,31 // if opcode bit 31 is set, it is ldu so set update flag
504 a64Ldx:
505 bl a64Load8Bytes // load 8 bytes from user space into r30
506 stdx r30,r14,r21 // update register file
507 b a64UpdateCheck // update RA if necessary and exit
508
509
510 // Store doubleword (stdx[u], std[u], stwcx)
511
512 a64StdxStwcx:
513 bf-- 30,a64PassAlong // stwcx, so pass along alignment exception
514 b a64Stdx // was stdx
515 a64StdStfiwx: // if DS form: 0=std, 1=stdu, 2-3=undefined
516 bt 30,a64Stfiwx // handle stfiwx
517 rlwinm r3,r20,0,30,31 // must adjust EA by subtracting DS field
518 mtcrf 0x01,r20 // move DS field to cr7
519 sub r12,r12,r3 // subtract from full 64-bit EA
520 and r17,r12,r15 // then re-clamp to 32 bits if necessary
521 crmove kUpdate,31 // if DS==1, then it is update form
522 a64Stdx:
523 ldx r30,r14,r21 // get RT
524 bl a64Store8Bytes // store RT into user space
525 b a64UpdateCheck // update RA if necessary and exit
526
527
528 // Dcbz and Dcbz128 (bit 10 distinguishes the two forms)
529
530 a64DcbzDcbz128:
531 andis. r0,r20,0x0020 // bit 10 set?
532 li r3,0 // get a 0 to store
533 li r0,4 // assume 32-bit version, store 8 bytes 4x
534 rldicr r17,r17,0,63-5 // 32-byte align EA
535 li r4,_COMM_PAGE_BASE_ADDRESS
536 beq a64DcbzSetup // it was the 32-byte version
537 rldicr r17,r17,0,63-7 // zero low 7 bits of EA
538 li r0,16 // store 8 bytes 16x
539 a64DcbzSetup:
540 sub r4,r28,r4 // get instruction offset from start of commpage
541 and r4,r4,r15 // mask off high-order bits if 32-bit mode
542 cmpldi r4,_COMM_PAGE_AREA_USED // did fault occur in commpage area?
543 bge a64NotCommpage // not in commpage
544 rlwinm. r4,r29,0,MSR_PR_BIT,MSR_PR_BIT // did fault occur in user mode?
545 beq-- a64NotCommpage // do not zero cr7 if kernel got alignment exception
546 lwz r4,savecr(r13) // if we take a dcbz{128} in the commpage...
547 rlwinm r4,r4,0,0,27 // ...clear user's cr7...
548 stw r4,savecr(r13) // ...as a flag for commpage code
549 a64NotCommpage:
550 mtctr r0
551 cmpw r0,r0 // turn cr0 beq on so we can check for DSIs
552 mtmsr r25 // turn on DR and RI so we can address user space
553 isync // wait for it to happen
554 a64DcbzLoop:
555 std r3,0(r17) // store into user space
556 bne-- a64RedriveAsDSI
557 addi r17,r17,8
558 bdnz a64DcbzLoop
559
560 mtmsr r26 // restore MSR
561 isync // wait for it to happen
562 b a64Exit
563
564
565 // Load and store multiple (lmw, stmw), distinguished by bit 25
566
567 a64LmwStmw:
568 subfic r22,r21,32*8 // how many regs to load or store?
569 srwi r22,r22,1 // get bytes to load/store
570 bf 25,a64LoadMultiple // handle lmw
571 b a64StoreMultiple // it was stmw
572
573
574 // Load string word immediate (lswi)
575
576 a64Lswi:
577 rlwinm r22,r20,21,27,31 // get #bytes in r22
578 and r17,r18,r15 // recompute EA as (RA|0), and clamp
579 subi r3,r22,1 // r22==0?
580 rlwimi r22,r3,6,26,26 // map count of 0 to 32
581 b a64LoadMultiple
582
583
584 // Store string word immediate (stswi)
585
586 a64Stswi:
587 rlwinm r22,r20,21,27,31 // get #bytes in r22
588 and r17,r18,r15 // recompute EA as (RA|0), and clamp
589 subi r3,r22,1 // r22==0?
590 rlwimi r22,r3,6,26,26 // map count of 0 to 32
591 b a64StoreMultiple
592
593
594 // Load string word indexed (lswx), also lwbrx
595
596 a64LswxLwbrx:
597 bf 30,a64Lwbrx // was lwbrx
598 ld r22,savexer(r13) // get the xer
599 rlwinm r22,r22,0,25,31 // isolate the byte count
600 b a64LoadMultiple // join common code
601
602
603 // Store string word indexed (stswx), also stwbrx
604
605 a64StswxStwbrx:
606 bf 30,a64Stwbrx // was stwbrx
607 ld r22,savexer(r13) // get the xer
608 rlwinm r22,r22,0,25,31 // isolate the byte count
609 b a64StoreMultiple // join common code
610
611
612 // Load multiple words. This handles lmw, lswi, and lswx.
613
614 a64LoadMultiple: // r22 = byte count, may be 0
615 subic. r3,r22,1 // get (#bytes-1)
616 blt a64Exit // done if 0
617 add r4,r17,r3 // get EA of last operand byte
618 and r4,r4,r15 // clamp
619 cmpld r4,r17 // address space wrap?
620 blt-- a64PassAlong // pass along exception if so
621 srwi. r4,r22,2 // get # full words to load
622 rlwinm r22,r22,0,30,31 // r22 <- leftover byte count
623 cmpwi cr1,r22,0 // leftover bytes?
624 beq a64Lm3 // no words
625 mtctr r4 // set up word count
626 cmpw r0,r0 // set beq for DSI test
627 a64Lm2:
628 mtmsr r25 // turn on DR and RI
629 isync // wait for it to happen
630 lbz r3,0(r17)
631 bne-- a64RedriveAsDSI // got a DSI
632 lbz r4,1(r17)
633 bne-- a64RedriveAsDSI // got a DSI
634 lbz r5,2(r17)
635 bne-- a64RedriveAsDSI // got a DSI
636 lbz r6,3(r17)
637 bne-- a64RedriveAsDSI // got a DSI
638 rlwinm r30,r3,24,0,7 // pack bytes into r30
639 rldimi r30,r4,16,40
640 rldimi r30,r5,8,48
641 rldimi r30,r6,0,56
642 mtmsr r26 // turn DR back off so we can store into register file
643 isync
644 addi r17,r17,4 // bump EA
645 stdx r30,r14,r21 // pack into register file
646 addi r21,r21,8 // bump register file offset
647 rlwinm r21,r21,0,24,28 // wrap around to 0
648 bdnz a64Lm2
649 a64Lm3: // cr1/r22 = leftover bytes (0-3), cr0 beq set
650 beq cr1,a64Exit // no leftover bytes
651 mtctr r22
652 mtmsr r25 // turn on DR so we can access user space
653 isync
654 lbz r3,0(r17) // get 1st leftover byte
655 bne-- a64RedriveAsDSI // got a DSI
656 rlwinm r30,r3,24,0,7 // position in byte 4 of r30 (and clear rest of r30)
657 bdz a64Lm4 // only 1 byte leftover
658 lbz r3,1(r17) // get 2nd byte
659 bne-- a64RedriveAsDSI // got a DSI
660 rldimi r30,r3,16,40 // insert into byte 5 of r30
661 bdz a64Lm4 // only 2 bytes leftover
662 lbz r3,2(r17) // get 3rd byte
663 bne-- a64RedriveAsDSI // got a DSI
664 rldimi r30,r3,8,48 // insert into byte 6
665 a64Lm4:
666 mtmsr r26 // turn DR back off so we can store into register file
667 isync
668 stdx r30,r14,r21 // pack partially-filled word into register file
669 b a64Exit
670
671
672 // Store multiple words. This handles stmw, stswi, and stswx.
673
674 a64StoreMultiple: // r22 = byte count, may be 0
675 subic. r3,r22,1 // get (#bytes-1)
676 blt a64Exit // done if 0
677 add r4,r17,r3 // get EA of last operand byte
678 and r4,r4,r15 // clamp
679 cmpld r4,r17 // address space wrap?
680 blt-- a64PassAlong // pass along exception if so
681 srwi. r4,r22,2 // get # full words to load
682 rlwinm r22,r22,0,30,31 // r22 <- leftover byte count
683 cmpwi cr1,r22,0 // leftover bytes?
684 beq a64Sm3 // no words
685 mtctr r4 // set up word count
686 cmpw r0,r0 // turn on beq so we can check for DSIs
687 a64Sm2:
688 ldx r30,r14,r21 // get next register
689 addi r21,r21,8 // bump register file offset
690 rlwinm r21,r21,0,24,28 // wrap around to 0
691 srwi r3,r30,24 // shift the four bytes into position
692 srwi r4,r30,16
693 srwi r5,r30,8
694 mtmsr r25 // turn on DR so we can access user space
695 isync // wait for it to happen
696 stb r3,0(r17)
697 bne-- a64RedriveAsDSI // got a DSI
698 stb r4,1(r17)
699 bne-- a64RedriveAsDSI // got a DSI
700 stb r5,2(r17)
701 bne-- a64RedriveAsDSI // got a DSI
702 stb r30,3(r17)
703 bne-- a64RedriveAsDSI // got a DSI
704 mtmsr r26 // turn DR back off
705 isync
706 addi r17,r17,4 // bump EA
707 bdnz a64Sm2
708 a64Sm3: // r22 = 0-3, cr1 set on r22, cr0 beq set
709 beq cr1,a64Exit // no leftover bytes
710 ldx r30,r14,r21 // get last register
711 mtctr r22
712 mtmsr r25 // turn on DR so we can access user space
713 isync // wait for it to happen
714 a64Sm4:
715 rlwinm r30,r30,8,0,31 // position next byte
716 stb r30,0(r17) // pack into user space
717 addi r17,r17,1 // bump user space ptr
718 bne-- a64RedriveAsDSI // got a DSI
719 bdnz a64Sm4
720 mtmsr r26 // turn DR back off
721 isync
722 b a64Exit
723
724
725 // Subroutines to load bytes from user space.
726
727 a64Load2Bytes: // load 2 bytes right-justified into r30
728 addi r7,r17,1 // get EA of last byte
729 and r7,r7,r15 // clamp
730 cmpld r7,r17 // address wrap?
731 blt-- a64PassAlong // yes
732 mtmsr r25 // turn on DR so we can access user space
733 isync // wait for it to happen
734 sub. r30,r30,r30 // 0-fill dest and set beq
735 b a64Load2 // jump into routine
736 a64Load4Bytes: // load 4 bytes right-justified into r30 (ie, low order word)
737 addi r7,r17,3 // get EA of last byte
738 and r7,r7,r15 // clamp
739 cmpld r7,r17 // address wrap?
740 blt-- a64PassAlong // yes
741 mtmsr r25 // turn on DR so we can access user space
742 isync // wait for it to happen
743 sub. r30,r30,r30 // 0-fill dest and set beq
744 b a64Load4 // jump into routine
745 a64Load8Bytes: // load 8 bytes into r30
746 addi r7,r17,7 // get EA of last byte
747 and r7,r7,r15 // clamp
748 cmpld r7,r17 // address wrap?
749 blt-- a64PassAlong // yes
750 mtmsr r25 // turn on DR so we can access user space
751 isync // wait for it to happen
752 sub. r30,r30,r30 // 0-fill dest and set beq
753 lbz r3,-7(r7) // get byte 0
754 bne-- a64RedriveAsDSI // got a DSI
755 lbz r4,-6(r7) // and byte 1, etc
756 bne-- a64RedriveAsDSI // got a DSI
757 lbz r5,-5(r7)
758 bne-- a64RedriveAsDSI // got a DSI
759 lbz r6,-4(r7)
760 bne-- a64RedriveAsDSI // got a DSI
761 rldimi r30,r3,56,0 // position bytes in upper word
762 rldimi r30,r4,48,8
763 rldimi r30,r5,40,16
764 rldimi r30,r6,32,24
765 a64Load4:
766 lbz r3,-3(r7)
767 bne-- a64RedriveAsDSI // got a DSI
768 lbz r4,-2(r7)
769 bne-- a64RedriveAsDSI // got a DSI
770 rldimi r30,r3,24,32 // insert bytes 4 and 5 into r30
771 rldimi r30,r4,16,40
772 a64Load2:
773 lbz r3,-1(r7)
774 bne-- a64RedriveAsDSI // got a DSI
775 lbz r4,0(r7)
776 bne-- a64RedriveAsDSI // got a DSI
777 mtmsr r26 // turn DR back off
778 isync
779 rldimi r30,r3,8,48 // insert bytes 6 and 7 into r30
780 rldimi r30,r4,0,56
781 blr
782
783
784 // Subroutines to store bytes into user space.
785
786 a64Store2Bytes: // store bytes 6 and 7 of r30
787 addi r7,r17,1 // get EA of last byte
788 and r7,r7,r15 // clamp
789 cmpld r7,r17 // address wrap?
790 blt-- a64PassAlong // yes
791 mtmsr r25 // turn on DR so we can access user space
792 isync // wait for it to happen
793 cmpw r0,r0 // set beq so we can check for DSI
794 b a64Store2 // jump into routine
795 a64Store4Bytes: // store bytes 4-7 of r30 (ie, low order word)
796 addi r7,r17,3 // get EA of last byte
797 and r7,r7,r15 // clamp
798 cmpld r7,r17 // address wrap?
799 blt-- a64PassAlong // yes
800 mtmsr r25 // turn on DR so we can access user space
801 isync // wait for it to happen
802 cmpw r0,r0 // set beq so we can check for DSI
803 b a64Store4 // jump into routine
804 a64Store8Bytes: // r30 = bytes
805 addi r7,r17,7 // get EA of last byte
806 and r7,r7,r15 // clamp
807 cmpld r7,r17 // address wrap?
808 blt-- a64PassAlong // yes
809 mtmsr r25 // turn on DR so we can access user space
810 isync // wait for it to happen
811 cmpw r0,r0 // set beq so we can check for DSI
812 rotldi r3,r30,8 // shift byte 0 into position
813 rotldi r4,r30,16 // and byte 1
814 rotldi r5,r30,24 // and byte 2
815 rotldi r6,r30,32 // and byte 3
816 stb r3,-7(r7) // store byte 0
817 bne-- a64RedriveAsDSI // got a DSI
818 stb r4,-6(r7) // and byte 1 etc...
819 bne-- a64RedriveAsDSI // got a DSI
820 stb r5,-5(r7)
821 bne-- a64RedriveAsDSI // got a DSI
822 stb r6,-4(r7)
823 bne-- a64RedriveAsDSI // got a DSI
824 a64Store4:
825 rotldi r3,r30,40 // shift byte 4 into position
826 rotldi r4,r30,48 // and byte 5
827 stb r3,-3(r7)
828 bne-- a64RedriveAsDSI // got a DSI
829 stb r4,-2(r7)
830 bne-- a64RedriveAsDSI // got a DSI
831 a64Store2:
832 rotldi r3,r30,56 // shift byte 6 into position
833 stb r3,-1(r7) // store byte 6
834 bne-- a64RedriveAsDSI // got a DSI
835 stb r30,0(r7) // store byte 7, which is already positioned
836 bne-- a64RedriveAsDSI // got a DSI
837 mtmsr r26 // turn off DR
838 isync
839 blr
840
841
842 // Exit routines.
843
844 a64ExitEm:
845 li r30,T_EMULATE // Change exception code to emulate
846 stw r30,saveexception(r13) // Save it
847 b a64Exit // Join standard exit routine...
848
849 a64PassAlong: // unhandled exception, just pass it along
850 li r0,1 // Set that the alignment/program exception was not emulated
851 crset kNotify // return T_ALIGNMENT or T_PROGRAM
852 stw r0,savemisc3(r13) // Set that emulation was not done
853 crclr kTrace // not a trace interrupt
854 b a64Exit1
855 a64UpdateCheck: // successfully emulated, may be update form
856 bf kUpdate,a64Exit // update?
857 stdx r12,r14,r16 // yes, store 64-bit EA into RA
858 a64Exit: // instruction successfully emulated
859 addi r28,r28,4 // bump SRR0 past the emulated instruction
860 li r30,T_IN_VAIN // eat the interrupt since we emulated it
861 and r28,r28,r15 // clamp to address space size (32 vs 64)
862 std r28,savesrr0(r13) // save, so we return to next instruction
863 a64Exit1:
864 bt-- kTrace,a64Trace // were we in single-step at fault?
865 bt-- kNotify,a64Notify // should we say T_ALIGNMENT anyway?
866 a64Exit2:
867 mcrf cr6,cr3 // restore feature flags
868 mr r11,r30 // pass back exception code (T_IN_VAIN etc) in r11
869 b EXT(EmulExit) // return to exception processing
870
871
872 // Notification requested: pass exception upstairs even though it might have been emulated.
873
874 a64Notify:
875 li r30,T_ALIGNMENT // somebody wants to know about it (but don't redrive)
876 bt kAlignment,a64Exit2 // was an alignment exception
877 li r30,T_PROGRAM // was an emulated instruction
878 b a64Exit2
879
880
881 // Emulate a trace interrupt after handling alignment interrupt.
882
883 a64Trace:
884 lwz r9,SAVflags(r13) // get the save-area flags
885 li r30,T_TRACE
886 oris r9,r9,hi16(SAVredrive) // Set the redrive bit
887 stw r30,saveexception(r13) // Set the exception code
888 stw r9,SAVflags(r13) // Set the flags
889 b a64Exit2 // Exit and do trace interrupt...
890
891
892 // Got a DSI accessing user space. Redrive. One way this can happen is if another
893 // processor removes a mapping while we are emulating.
894
895 a64RedriveAsISI: // this DSI happened fetching the opcode (r1==DSISR r4==DAR)
896 mtmsr r26 // turn DR back off
897 isync // wait for it to happen
898 li r30,T_INSTRUCTION_ACCESS
899 rlwimi r29,r1,0,0,4 // insert the fault type from DSI's DSISR
900 std r29,savesrr1(r13) // update SRR1 to look like an ISI
901 b a64Redrive
902
903 a64RedriveAsDSI: // r0==DAR r1==DSISR
904 mtmsr r26 // turn DR back off
905 isync // wait for it to happen
906 stw r1,savedsisr(r13) // Set the DSISR of failed access
907 std r0,savedar(r13) // Set the address of the failed access
908 li r30,T_DATA_ACCESS // Set failing data access code
909 a64Redrive:
910 lwz r9,SAVflags(r13) // Pick up the flags
911 stw r30,saveexception(r13) // Set the replacement code
912 oris r9,r9,hi16(SAVredrive) // Set the redrive bit
913 stw r9,SAVflags(r13) // Set redrive request
914 crclr kTrace // don't take a trace interrupt
915 crclr kNotify // don't pass alignment exception
916 b a64Exit2 // done
917
918
919 // This is the branch table, indexed by the "AAAAB" opcode hash.
920
921 a64BranchTable:
922 .long a64LwzLwzxLwarx // 00000 lwz[u], lwzx[u], lwarx
923 .long a64Ldx // 00001 ldx[u]
924 .long a64PassAlong // 00010 ldarx (never emulate these)
925 .long a64PassAlong // 00011
926 .long a64StwStwx // 00100 stw[u], stwx[u]
927 .long a64StdxStwcx // 00101 stdx[u], stwcx
928 .long a64PassAlong // 00110
929 .long a64PassAlong // 00111 stdcx (never emulate these)
930 .long a64LhzLhzx // 01000 lhz[u], lhzx[u]
931 .long a64PassAlong // 01001
932 .long a64LhaLhax // 01010 lha[u], lhax[u]
933 .long a64Lwax // 01011 lwax[u]
934 .long a64SthSthx // 01100 sth[u], sthx[u]
935 .long a64PassAlong // 01101
936 .long a64LmwStmw // 01110 lmw, stmw
937 .long a64PassAlong // 01111
938 .long a64LfsLfsx // 10000 lfs[u], lfsx[u]
939 .long a64LswxLwbrx // 10001 lswx, lwbrx
940 .long a64LfdLfdx // 10010 lfd[u], lfdx[u]
941 .long a64Lswi // 10011 lswi
942 .long a64StfsStfsx // 10100 stfs[u], stfsx[u]
943 .long a64StswxStwbrx // 10101 stswx, stwbrx
944 .long a64StfdStfdx // 10110 stfd[u], stfdx[u]
945 .long a64Stswi // 10111 stswi
946 .long a64PassAlong // 11000
947 .long a64Lhbrx // 11001 lhbrx
948 .long a64LdLwa // 11010 ld[u], lwa
949 .long a64PassAlong // 11011
950 .long a64PassAlong // 11100
951 .long a64Sthbrx // 11101 sthbrx
952 .long a64StdStfiwx // 11110 std[u], stfiwx
953 .long a64DcbzDcbz128 // 11111 dcbz, dcbz128
954
955