]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/cswtch.s
xnu-517.12.7.tar.gz
[apple/xnu.git] / osfmk / ppc / cswtch.s
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
e5568f75
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
e5568f75
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
e5568f75
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25
26#include <ppc/asm.h>
27#include <ppc/proc_reg.h>
28#include <cpus.h>
29#include <assym.s>
30#include <debug.h>
31#include <mach/ppc/vm_param.h>
32#include <ppc/exception.h>
9bccf70c 33#include <ppc/savearea.h>
1c79356b
A
34
35#define FPVECDBG 0
36#define GDDBG 0
37
38 .text
39
40/*
55e303ae 41 * void machine_load_context(thread_t thread)
1c79356b 42 *
55e303ae
A
43 * Load the context for the first thread to run on a
44 * cpu, and go.
1c79356b
A
45 */
46
9bccf70c 47 .align 5
55e303ae 48 .globl EXT(machine_load_context)
1c79356b 49
55e303ae 50LEXT(machine_load_context)
1c79356b
A
51 mfsprg r6,0
52 lwz r0,PP_INTSTACK_TOP_SS(r6)
1c79356b 53 stw r0,PP_ISTACKPTR(r6)
55e303ae
A
54 lwz r9,THREAD_TOP_ACT(r3) /* Set up the current thread */
55 mtsprg 1,r9
1c79356b 56 li r0,0 /* Clear a register */
55e303ae 57 lwz r3,ACT_MACT_PCB(r9) /* Get the savearea used */
1c79356b 58 mfmsr r5 /* Since we are passing control, get our MSR values */
55e303ae
A
59 lwz r11,SAVprev+4(r3) /* Get the previous savearea */
60 lwz r1,saver1+4(r3) /* Load new stack pointer */
ab86ba33 61 lwz r10,ACT_MACT_SPF(r9) /* Get the special flags */
55e303ae 62 stw r0,saver3+4(r3) /* Make sure we pass in a 0 for the continuation */
1c79356b 63 stw r0,FM_BACKPTR(r1) /* zero backptr */
55e303ae 64 stw r5,savesrr1+4(r3) /* Pass our MSR to the new guy */
1c79356b 65 stw r11,ACT_MACT_PCB(r9) /* Unstack our savearea */
ab86ba33 66 oris r10,r10,hi16(OnProc) /* Set OnProc bit */
55e303ae 67 stw r0,ACT_PREEMPT_CNT(r9) /* Enable preemption */
ab86ba33
A
68 stw r10,ACT_MACT_SPF(r9) /* Update the special flags */
69 stw r10,spcFlags(r6) /* Set per_proc copy of the special flags */
55e303ae 70 b EXT(exception_exit) /* Go for it */
1c79356b 71
55e303ae
A
72/* thread_t Switch_context(thread_t old,
73 * void (*cont)(void),
74 * thread_t new)
1c79356b
A
75 *
76 * Switch from one thread to another. If a continuation is supplied, then
77 * we do not need to save callee save registers.
78 *
79 */
80
81/* void Call_continuation( void (*continuation)(void), vm_offset_t stack_ptr)
82 */
83
9bccf70c
A
84 .align 5
85 .globl EXT(Call_continuation)
86
87LEXT(Call_continuation)
88
89 mtlr r3
90 mr r1, r4 /* Load new stack pointer */
91 blr /* Jump to the continuation */
1c79356b
A
92
93/*
94 * Get the old kernel stack, and store into the thread structure.
95 * See if a continuation is supplied, and skip state save if so.
55e303ae
A
96 *
97 * Note that interrupts must be disabled before we get here (i.e., splsched)
1c79356b
A
98 */
99
100/* Context switches are double jumps. We pass the following to the
101 * context switch firmware call:
102 *
55e303ae 103 * R3 = switchee's savearea, virtual if continuation, low order physical for full switch
1c79356b
A
104 * R4 = old thread
105 * R5 = new SRR0
106 * R6 = new SRR1
55e303ae 107 * R7 = high order physical address of savearea for full switch
1c79356b
A
108 *
109 * savesrr0 is set to go to switch_in
110 * savesrr1 is set to uninterruptible with translation on
111 */
112
113
9bccf70c
A
114 .align 5
115 .globl EXT(Switch_context)
1c79356b 116
9bccf70c
A
117LEXT(Switch_context)
118
119 mfsprg r12,0 ; Get the per_proc block
1c79356b 120#if DEBUG
55e303ae
A
121 lwz r0,PP_ISTACKPTR(r12) ; (DEBUG/TRACE) make sure we are not
122 mr. r0,r0 ; (DEBUG/TRACE) on the interrupt
123 bne++ notonintstack ; (DEBUG/TRACE) stack
1c79356b
A
124 BREAKPOINT_TRAP
125notonintstack:
126#endif
de355530 127 lwz r5,THREAD_TOP_ACT(r5) ; Get the new activation
de355530 128 lwz r8,ACT_MACT_PCB(r5) ; Get the PCB for the new guy
55e303ae
A
129 lwz r9,cioSpace(r5) ; Get copyin/out address space
130 cmpwi cr1,r4,0 ; Remeber if there is a continuation - used waaaay down below
131 lwz r7,CTHREAD_SELF(r5) ; Pick up the user assist word
132 lwz r11,ACT_MACT_BTE(r5) ; Get BlueBox Task Environment
133 lwz r6,cioRelo(r5) ; Get copyin/out relocation top
134 mtsprg 1,r5
135 lwz r2,cioRelo+4(r5) ; Get copyin/out relocation bottom
de355530 136
9bccf70c 137 stw r7,UAW(r12) ; Save the assist word for the "ultra fast path"
55e303ae 138
1c79356b
A
139 lwz r7,ACT_MACT_SPF(r5) ; Get the special flags
140
55e303ae
A
141 sth r9,ppCIOmp+mpSpace(r12) ; Save the space
142 stw r6,ppCIOmp+mpNestReloc(r12) ; Save top part of physical address
143 stw r2,ppCIOmp+mpNestReloc+4(r12) ; Save bottom part of physical address
9bccf70c 144 stw r11,ppbbTaskEnv(r12) ; Save the bb task env
55e303ae 145 lwz r2,traceMask(0) ; Get the enabled traces
9bccf70c 146 stw r7,spcFlags(r12) ; Set per_proc copy of the special flags
1c79356b 147 lis r0,hi16(CutTrace) ; Trace FW call
55e303ae
A
148 mr. r2,r2 ; Any tracing going on?
149 lwz r11,SAVprev+4(r8) ; Get the previous of the switchee savearea
1c79356b 150 ori r0,r0,lo16(CutTrace) ; Trace FW call
55e303ae 151 beq++ cswNoTrc ; No trace today, dude...
1c79356b
A
152 mr r10,r3 ; Save across trace
153 lwz r2,THREAD_TOP_ACT(r3) ; Trace old activation
154 mr r3,r11 ; Trace prev savearea
155 sc ; Cut trace entry of context switch
156 mr r3,r10 ; Restore
157
55e303ae 158cswNoTrc: lwz r2,curctx(r5) ; Grab our current context pointer
9bccf70c
A
159 lwz r10,FPUowner(r12) ; Grab the owner of the FPU
160 lwz r9,VMXowner(r12) ; Grab the owner of the vector
55e303ae
A
161 lhz r0,PP_CPU_NUMBER(r12) ; Get our CPU number
162 mfmsr r6 ; Get the MSR because the switched to thread should inherit it
163 stw r11,ACT_MACT_PCB(r5) ; Dequeue the savearea we are switching to
164 li r0,1 ; Get set to hold off quickfret
165
166 rlwinm r6,r6,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Turn off the FP
9bccf70c
A
167 cmplw r10,r2 ; Do we have the live float context?
168 lwz r10,FPUlevel(r2) ; Get the live level
55e303ae 169 mr r4,r3 ; Save our old thread to pass back
9bccf70c 170 cmplw cr5,r9,r2 ; Do we have the live vector context?
55e303ae
A
171 rlwinm r6,r6,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Turn off the vector
172 stw r0,holdQFret(r12) ; Make sure we hold off releasing quickfret
173 bne++ cswnofloat ; Float is not ours...
9bccf70c
A
174
175 cmplw r10,r11 ; Is the level the same?
176 lwz r5,FPUcpu(r2) ; Get the owning cpu
55e303ae 177 bne++ cswnofloat ; Level not the same, this is not live...
9bccf70c
A
178
179 cmplw r5,r0 ; Still owned by this cpu?
180 lwz r10,FPUsave(r2) ; Get the level
55e303ae 181 bne++ cswnofloat ; CPU claimed by someone else...
9bccf70c
A
182
183 mr. r10,r10 ; Is there a savearea here?
184 ori r6,r6,lo16(MASK(MSR_FP)) ; Enable floating point
185
55e303ae 186 beq-- cswnofloat ; No savearea to check...
9bccf70c
A
187
188 lwz r3,SAVlevel(r10) ; Get the level
55e303ae 189 lwz r5,SAVprev+4(r10) ; Get the previous of this savearea
9bccf70c
A
190 cmplw r3,r11 ; Is it for the current level?
191
55e303ae 192 bne++ cswnofloat ; Nope...
9bccf70c
A
193
194 stw r5,FPUsave(r2) ; Pop off this savearea
55e303ae
A
195
196 rlwinm r3,r10,0,0,19 ; Move back to start of page
197
198 lwz r5,quickfret(r12) ; Get the first in quickfret list (top)
199 lwz r9,quickfret+4(r12) ; Get the first in quickfret list (bottom)
200 lwz r7,SACvrswap(r3) ; Get the virtual to real conversion (top)
201 lwz r3,SACvrswap+4(r3) ; Get the virtual to real conversion (bottom)
202 stw r5,SAVprev(r10) ; Link the old in (top)
203 stw r9,SAVprev+4(r10) ; Link the old in (bottom)
204 xor r3,r10,r3 ; Convert to physical
205 stw r7,quickfret(r12) ; Set the first in quickfret list (top)
206 stw r3,quickfret+4(r12) ; Set the first in quickfret list (bottom)
9bccf70c
A
207
208#if FPVECDBG
209 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
55e303ae 210 mr r7,r2 ; (TEST/DEBUG)
9bccf70c
A
211 li r2,0x4401 ; (TEST/DEBUG)
212 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
213 sc ; (TEST/DEBUG)
214 lhz r0,PP_CPU_NUMBER(r12) ; (TEST/DEBUG)
55e303ae 215 mr r2,r7 ; (TEST/DEBUG)
9bccf70c
A
216#endif
217
55e303ae 218cswnofloat: bne++ cr5,cswnovect ; Vector is not ours...
9bccf70c
A
219
220 lwz r10,VMXlevel(r2) ; Get the live level
221
222 cmplw r10,r11 ; Is the level the same?
223 lwz r5,VMXcpu(r2) ; Get the owning cpu
55e303ae 224 bne++ cswnovect ; Level not the same, this is not live...
9bccf70c
A
225
226 cmplw r5,r0 ; Still owned by this cpu?
227 lwz r10,VMXsave(r2) ; Get the level
55e303ae 228 bne++ cswnovect ; CPU claimed by someone else...
9bccf70c
A
229
230 mr. r10,r10 ; Is there a savearea here?
231 oris r6,r6,hi16(MASK(MSR_VEC)) ; Enable vector
232
55e303ae 233 beq-- cswnovect ; No savearea to check...
9bccf70c
A
234
235 lwz r3,SAVlevel(r10) ; Get the level
55e303ae 236 lwz r5,SAVprev+4(r10) ; Get the previous of this savearea
9bccf70c
A
237 cmplw r3,r11 ; Is it for the current level?
238
55e303ae 239 bne++ cswnovect ; Nope...
9bccf70c
A
240
241 stw r5,VMXsave(r2) ; Pop off this savearea
55e303ae
A
242 rlwinm r3,r10,0,0,19 ; Move back to start of page
243
244 lwz r5,quickfret(r12) ; Get the first in quickfret list (top)
245 lwz r9,quickfret+4(r12) ; Get the first in quickfret list (bottom)
246 lwz r2,SACvrswap(r3) ; Get the virtual to real conversion (top)
247 lwz r3,SACvrswap+4(r3) ; Get the virtual to real conversion (bottom)
248 stw r5,SAVprev(r10) ; Link the old in (top)
249 stw r9,SAVprev+4(r10) ; Link the old in (bottom)
250 xor r3,r10,r3 ; Convert to physical
251 stw r2,quickfret(r12) ; Set the first in quickfret list (top)
252 stw r3,quickfret+4(r12) ; Set the first in quickfret list (bottom)
9bccf70c
A
253
254#if FPVECDBG
255 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
256 li r2,0x4501 ; (TEST/DEBUG)
257 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
258 sc ; (TEST/DEBUG)
259#endif
260
55e303ae
A
261cswnovect: li r0,0 ; Get set to release quickfret holdoff
262 rlwinm r11,r8,0,0,19 ; Switch to savearea base
263 lis r9,hi16(EXT(switch_in)) ; Get top of switch in routine
264 lwz r5,savesrr0+4(r8) ; Set up the new SRR0
265 lwz r7,SACvrswap(r11) ; Get the high order V to R translation
266 lwz r11,SACvrswap+4(r11) ; Get the low order V to R translation
267 ori r9,r9,lo16(EXT(switch_in)) ; Bottom half of switch in
268 stw r0,holdQFret(r12) ; Make sure we release quickfret holdoff
269 stw r9,savesrr0+4(r8) ; Make us jump to the switch in routine
9bccf70c 270
55e303ae 271 lwz r9,SAVflags(r8) /* Get the flags */
9bccf70c 272 lis r0,hi16(SwitchContextCall) /* Top part of switch context */
1c79356b 273 li r10,MSR_SUPERVISOR_INT_OFF /* Get the switcher's MSR */
de355530 274 ori r0,r0,lo16(SwitchContextCall) /* Bottom part of switch context */
55e303ae
A
275 stw r10,savesrr1+4(r8) /* Set up for switch in */
276 rlwinm r9,r9,0,15,13 /* Reset the syscall flag */
277 xor r3,r11,r8 /* Get the physical address of the new context save area */
1c79356b 278 stw r9,SAVflags(r8) /* Set the flags */
9bccf70c
A
279
280 bne cr1,swtchtocont ; Switch to the continuation
1c79356b
A
281 sc /* Switch to the new context */
282
283/* We come back here in the new thread context
284 * R4 was set to hold the old thread pointer, but switch_in will put it into
285 * R3 where it belongs.
286 */
287 blr /* Jump into the new thread */
9bccf70c
A
288
289;
290; This is where we go when a continuation is set. We are actually
291; killing off the old context of the new guy so we need to pop off
292; any float or vector states for the ditched level.
293;
294; Note that we do the same kind of thing a chkfac in hw_exceptions.s
295;
296
1c79356b 297
9bccf70c 298swtchtocont:
55e303ae
A
299
300 stw r5,savesrr0+4(r8) ; Set the pc
301 stw r6,savesrr1+4(r8) ; Set the next MSR to use
302 stw r4,saver3+4(r8) ; Make sure we pass back the old thread
303 mr r3,r8 ; Pass in the virtual address of savearea
9bccf70c
A
304
305 b EXT(exception_exit) ; Blocking on continuation, toss old context...
1c79356b
A
306
307
308
309/*
310 * All switched to threads come here first to clean up the old thread.
311 * We need to do the following contortions because we need to keep
312 * the LR clean. And because we need to manipulate the savearea chain
313 * with translation on. If we could, this should be done in lowmem_vectors
314 * before translation is turned on. But we can't, dang it!
315 *
55e303ae 316 * R3 = switcher's savearea (32-bit virtual)
1c79356b
A
317 * saver4 = old thread in switcher's save
318 * saver5 = new SRR0 in switcher's save
319 * saver6 = new SRR1 in switcher's save
320
321
322 */
323
9bccf70c
A
324
325 .align 5
326 .globl EXT(switch_in)
327
328LEXT(switch_in)
1c79356b 329
55e303ae
A
330 lwz r4,saver4+4(r3) ; Get the old thread
331 lwz r5,saver5+4(r3) ; Get the srr0 value
332
333 mfsprg r0,2 ; Get feature flags
334 lwz r9,THREAD_TOP_ACT(r4) ; Get the switched from ACT
335 lwz r6,saver6+4(r3) ; Get the srr1 value
336 rlwinm. r0,r0,0,pf64Bitb,pf64Bitb ; Check for 64-bit
337 lwz r10,ACT_MACT_PCB(r9) ; Get the top PCB on the old thread
1c79356b 338
55e303ae
A
339 stw r3,ACT_MACT_PCB(r9) ; Put the new one on top
340 stw r10,SAVprev+4(r3) ; Chain on the old one
de355530 341
55e303ae 342 mr r3,r4 ; Pass back the old thread
1c79356b 343
55e303ae
A
344 mtsrr0 r5 ; Set return point
345 mtsrr1 r6 ; Set return MSR
346
347 bne++ siSixtyFour ; Go do 64-bit...
1c79356b 348
55e303ae
A
349 rfi ; Jam...
350
351siSixtyFour:
352 rfid ; Jam...
1c79356b
A
353
354/*
9bccf70c 355 * void fpu_save(facility_context ctx)
1c79356b 356 *
9bccf70c
A
357 * Note that there are some oddities here when we save a context we are using.
358 * It is really not too cool to do this, but what the hey... Anyway,
359 * we turn fpus and vecs off before we leave., The oddity is that if you use fpus after this, the
360 * savearea containing the context just saved will go away. So, bottom line is
361 * that don't use fpus until after you are done with the saved context.
1c79356b 362 */
9bccf70c
A
363 .align 5
364 .globl EXT(fpu_save)
1c79356b 365
9bccf70c
A
366LEXT(fpu_save)
367
55e303ae
A
368 lis r2,hi16(MASK(MSR_VEC)) ; Get the vector enable
369 li r12,lo16(MASK(MSR_EE)) ; Get the EE bit
370 ori r2,r2,lo16(MASK(MSR_FP)) ; Get FP
9bccf70c
A
371
372 mfmsr r0 ; Get the MSR
55e303ae
A
373 andc r0,r0,r2 ; Clear FP, VEC
374 andc r2,r0,r12 ; Clear EE
9bccf70c 375 ori r2,r2,MASK(MSR_FP) ; Enable the floating point feature for now also
9bccf70c 376 mtmsr r2 ; Set the MSR
1c79356b 377 isync
9bccf70c
A
378
379 mfsprg r6,0 ; Get the per_processor block
380 lwz r12,FPUowner(r6) ; Get the context ID for owner
381
1c79356b 382#if FPVECDBG
9bccf70c
A
383 mr r7,r0 ; (TEST/DEBUG)
384 li r4,0 ; (TEST/DEBUG)
385 mr r10,r3 ; (TEST/DEBUG)
386 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
387 mr. r3,r12 ; (TEST/DEBUG)
388 li r2,0x6F00 ; (TEST/DEBUG)
389 li r5,0 ; (TEST/DEBUG)
55e303ae 390 beq-- noowneryet ; (TEST/DEBUG)
9bccf70c
A
391 lwz r4,FPUlevel(r12) ; (TEST/DEBUG)
392 lwz r5,FPUsave(r12) ; (TEST/DEBUG)
393
394noowneryet: oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
395 sc ; (TEST/DEBUG)
396 mr r0,r7 ; (TEST/DEBUG)
397 mr r3,r10 ; (TEST/DEBUG)
1c79356b 398#endif
9bccf70c
A
399 mflr r2 ; Save the return address
400
401fsretry: mr. r12,r12 ; Anyone own the FPU?
402 lhz r11,PP_CPU_NUMBER(r6) ; Get our CPU number
55e303ae 403 beq-- fsret ; Nobody owns the FPU, no save required...
1c79356b 404
9bccf70c 405 cmplw cr1,r3,r12 ; Is the specified context live?
1c79356b 406
9bccf70c 407 isync ; Force owner check first
1c79356b 408
9bccf70c 409 lwz r9,FPUcpu(r12) ; Get the cpu that context was last on
55e303ae 410 bne-- cr1,fsret ; No, it is not...
1c79356b 411
9bccf70c 412 cmplw cr1,r9,r11 ; Was the context for this processor?
55e303ae 413 beq-- cr1,fsgoodcpu ; Facility last used on this processor...
0b4e3aa0 414
9bccf70c 415 b fsret ; Someone else claimed it...
1c79356b 416
9bccf70c 417 .align 5
1c79356b 418
9bccf70c
A
419fsgoodcpu: lwz r3,FPUsave(r12) ; Get the current FPU savearea for the thread
420 lwz r9,FPUlevel(r12) ; Get our current level indicator
1c79356b 421
9bccf70c
A
422 cmplwi cr1,r3,0 ; Have we ever saved this facility context?
423 beq- cr1,fsneedone ; Never saved it, so go do it...
1c79356b 424
9bccf70c
A
425 lwz r8,SAVlevel(r3) ; Get the level this savearea is for
426 cmplw cr1,r9,r8 ; Correct level?
55e303ae 427 beq-- cr1,fsret ; The current level is already saved, bail out...
1c79356b 428
9bccf70c
A
429fsneedone: bl EXT(save_get) ; Get a savearea for the context
430
431 mfsprg r6,0 ; Get back per_processor block
432 li r4,SAVfloat ; Get floating point tag
433 lwz r12,FPUowner(r6) ; Get back our thread
434 stb r4,SAVflags+2(r3) ; Mark this savearea as a float
435 mr. r12,r12 ; See if we were disowned while away. Very, very small chance of it...
55e303ae 436 beq-- fsbackout ; If disowned, just toss savearea...
9bccf70c 437 lwz r4,facAct(r12) ; Get the activation associated with live context
9bccf70c
A
438 lwz r8,FPUsave(r12) ; Get the current top floating point savearea
439 stw r4,SAVact(r3) ; Indicate the right activation for this context
440 lwz r9,FPUlevel(r12) ; Get our current level indicator again
441 stw r3,FPUsave(r12) ; Set this as the most current floating point context
55e303ae 442 stw r8,SAVprev+4(r3) ; And then chain this in front
1c79356b 443
9bccf70c 444 stw r9,SAVlevel(r3) ; Show level in savearea
1c79356b 445
55e303ae
A
446 bl fp_store ; save all 32 FPRs in the save area at r3
447 mtlr r2 ; Restore return
448
9bccf70c 449fsret: mtmsr r0 ; Put interrupts on if they were and floating point off
1c79356b
A
450 isync
451
452 blr
453
55e303ae
A
454fsbackout: mr r4,r0 ; restore the original MSR
455 b EXT(save_ret_wMSR) ; Toss savearea and return from there...
9bccf70c 456
1c79356b
A
457/*
458 * fpu_switch()
459 *
460 * Entered to handle the floating-point unavailable exception and
461 * switch fpu context
462 *
463 * This code is run in virtual address mode on with interrupts off.
464 *
465 * Upon exit, the code returns to the users context with the floating
466 * point facility turned on.
467 *
468 * ENTRY: VM switched ON
469 * Interrupts OFF
470 * State is saved in savearea pointed to by R4.
471 * All other registers are free.
472 *
473 */
474
9bccf70c
A
475 .align 5
476 .globl EXT(fpu_switch)
477
478LEXT(fpu_switch)
479
1c79356b 480#if DEBUG
1c79356b
A
481 lis r3,hi16(EXT(fpu_trap_count)) ; Get address of FP trap counter
482 ori r3,r3,lo16(EXT(fpu_trap_count)) ; Get address of FP trap counter
483 lwz r1,0(r3)
484 addi r1,r1,1
485 stw r1,0(r3)
1c79356b
A
486#endif /* DEBUG */
487
9bccf70c 488 mfsprg r26,0 ; Get the per_processor block
55e303ae
A
489 mfmsr r19 ; Get the current MSR
490 mfsprg r17,1 ; Get the current thread
1c79356b 491
9bccf70c
A
492 mr r25,r4 ; Save the entry savearea
493 lwz r22,FPUowner(r26) ; Get the thread that owns the FPU
9bccf70c 494 ori r19,r19,lo16(MASK(MSR_FP)) ; Enable the floating point feature
1c79356b 495
9bccf70c 496 mtmsr r19 ; Enable floating point instructions
1c79356b 497 isync
1c79356b 498
9bccf70c
A
499 lwz r27,ACT_MACT_PCB(r17) ; Get the current level
500 lwz r29,curctx(r17) ; Grab the current context anchor of the current thread
0b4e3aa0 501
9bccf70c
A
502; R22 has the "old" context anchor
503; R29 has the "new" context anchor
0b4e3aa0 504
1c79356b 505#if FPVECDBG
9bccf70c
A
506 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
507 li r2,0x7F01 ; (TEST/DEBUG)
508 mr r3,r22 ; (TEST/DEBUG)
509 mr r5,r29 ; (TEST/DEBUG)
510 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
511 sc ; (TEST/DEBUG)
1c79356b 512#endif
9bccf70c
A
513
514 lhz r16,PP_CPU_NUMBER(r26) ; Get the current CPU number
1c79356b 515
9bccf70c 516fswretry: mr. r22,r22 ; See if there is any live FP status
1c79356b 517
9bccf70c 518 beq- fsnosave ; No live context, so nothing to save...
1c79356b 519
9bccf70c 520 isync ; Make sure we see this in the right order
1c79356b 521
9bccf70c
A
522 lwz r30,FPUsave(r22) ; Get the top savearea
523 cmplw cr2,r22,r29 ; Are both old and new the same context?
524 lwz r18,FPUcpu(r22) ; Get the last CPU we ran on
525 cmplwi cr1,r30,0 ; Anything saved yet?
526 cmplw r18,r16 ; Make sure we are on the right processor
527 lwz r31,FPUlevel(r22) ; Get the context level
1c79356b 528
9bccf70c
A
529 bne- fsnosave ; No, not on the same processor...
530
1c79356b 531;
9bccf70c
A
532; Check to see if the live context has already been saved.
533; Also check to see if all we are here just to re-enable the MSR
534; and handle specially if so.
1c79356b 535;
9bccf70c
A
536
537 cmplw r31,r27 ; See if the current and active levels are the same
538 crand cr0_eq,cr2_eq,cr0_eq ; Remember if both the levels and contexts are the same
539 li r3,0 ; Clear this
1c79356b 540
9bccf70c
A
541 beq- fsthesame ; New and old are the same, just go enable...
542
543 beq- cr1,fsmstsave ; Not saved yet, go do it...
1c79356b 544
9bccf70c 545 lwz r11,SAVlevel(r30) ; Get the level of top saved context
1c79356b 546
9bccf70c
A
547 cmplw r31,r11 ; Are live and saved the same?
548
1c79356b 549#if FPVECDBG
9bccf70c
A
550 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
551 li r2,0x7F02 ; (TEST/DEBUG)
552 mr r3,r30 ; (TEST/DEBUG)
553 mr r5,r31 ; (TEST/DEBUG)
554 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
555 sc ; (TEST/DEBUG)
55e303ae 556 li r3,0 ; (TEST/DEBUG)
1c79356b 557#endif
9bccf70c
A
558
559 beq+ fsnosave ; Same level, so already saved...
560
1c79356b 561
9bccf70c
A
562fsmstsave: stw r3,FPUowner(r26) ; Kill the context now
563 eieio ; Make sure everyone sees it
564 bl EXT(save_get) ; Go get a savearea
565
55e303ae
A
566 mr. r31,r31 ; Are we saving the user state?
567 la r15,FPUsync(r22) ; Point to the sync word
568 beq++ fswusave ; Yeah, no need for lock...
569;
570; Here we make sure that the live context is not tossed while we are
571; trying to push it. This can happen only for kernel context and
572; then only by a race with act_machine_sv_free.
573;
574; We only need to hold this for a very short time, so no sniffing needed.
575; If we find any change to the level, we just abandon.
576;
577fswsync: lwarx r19,0,r15 ; Get the sync word
578 li r0,1 ; Get the lock
579 cmplwi cr1,r19,0 ; Is it unlocked?
580 stwcx. r0,0,r15 ; Store lock and test reservation
581 cror cr0_eq,cr1_eq,cr0_eq ; Combine lost reservation and previously locked
582 bne-- fswsync ; Try again if lost reservation or locked...
583
584 isync ; Toss speculation
585
586 lwz r0,FPUlevel(r22) ; Pick up the level again
587 li r7,0 ; Get unlock value
588 cmplw r0,r31 ; Same level?
589 beq++ fswusave ; Yeah, we expect it to be...
590
591 stw r7,FPUsync(r22) ; Unlock lock. No need to sync here
592
593 bl EXT(save_ret) ; Toss save area because we are abandoning save
594 b fsnosave ; Skip the save...
de355530 595
55e303ae
A
596 .align 5
597
598fswusave: lwz r12,facAct(r22) ; Get the activation associated with the context
599 stw r3,FPUsave(r22) ; Set this as the latest context savearea for the thread
600 mr. r31,r31 ; Check again if we were user level
601 stw r30,SAVprev+4(r3) ; Point us to the old context
9bccf70c
A
602 stw r31,SAVlevel(r3) ; Tag our level
603 li r7,SAVfloat ; Get the floating point ID
604 stw r12,SAVact(r3) ; Make sure we point to the right guy
605 stb r7,SAVflags+2(r3) ; Set that we have a floating point save area
1c79356b 606
55e303ae
A
607 li r7,0 ; Get the unlock value
608
609 beq-- fswnulock ; Skip unlock if user (we did not lock it)...
610 eieio ; Make sure that these updates make it out
611 stw r7,FPUsync(r22) ; Unlock it.
612
613fswnulock:
614
1c79356b 615#if FPVECDBG
9bccf70c
A
616 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
617 li r2,0x7F03 ; (TEST/DEBUG)
618 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
619 sc ; (TEST/DEBUG)
1c79356b
A
620#endif
621
55e303ae 622 bl fp_store ; store all 32 FPRs
1c79356b 623
1c79356b
A
624;
625; The context is all saved now and the facility is free.
626;
9bccf70c 627; If we do not we need to fill the registers with junk, because this level has
1c79356b
A
628; never used them before and some thieving bastard could hack the old values
629; of some thread! Just imagine what would happen if they could! Why, nothing
630; would be safe! My God! It is terrifying!
631;
632
0b4e3aa0 633
9bccf70c
A
634fsnosave: lwz r15,ACT_MACT_PCB(r17) ; Get the current level of the "new" one
635 lwz r19,FPUcpu(r29) ; Get the last CPU we ran on
636 lwz r14,FPUsave(r29) ; Point to the top of the "new" context stack
0b4e3aa0 637
9bccf70c
A
638 stw r16,FPUcpu(r29) ; Claim context for us
639 eieio
0b4e3aa0 640
1c79356b 641#if FPVECDBG
9bccf70c
A
642 lwz r13,FPUlevel(r29) ; (TEST/DEBUG)
643 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
644 li r2,0x7F04 ; (TEST/DEBUG)
645 mr r1,r15 ; (TEST/DEBUG)
646 mr r3,r14 ; (TEST/DEBUG)
647 mr r5,r13 ; (TEST/DEBUG)
648 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
649 sc ; (TEST/DEBUG)
1c79356b 650#endif
9bccf70c
A
651
652 lis r18,hi16(EXT(per_proc_info)) ; Set base per_proc
653 mulli r19,r19,ppSize ; Find offset to the owner per_proc
654 ori r18,r18,lo16(EXT(per_proc_info)) ; Set base per_proc
655 li r16,FPUowner ; Displacement to float owner
656 add r19,r18,r19 ; Point to the owner per_proc
9bccf70c
A
657
658fsinvothr: lwarx r18,r16,r19 ; Get the owner
55e303ae
A
659 sub r0,r18,r29 ; Subtract one from the other
660 sub r11,r29,r18 ; Subtract the other from the one
661 or r11,r11,r0 ; Combine them
662 srawi r11,r11,31 ; Get a 0 if equal or -1 of not
663 and r18,r18,r11 ; Make 0 if same, unchanged if not
664 stwcx. r18,r16,r19 ; Try to invalidate it
665 bne-- fsinvothr ; Try again if there was a collision...
9bccf70c 666
55e303ae 667 cmplwi cr1,r14,0 ; Do we possibly have some context to load?
9bccf70c
A
668 la r11,savefp0(r14) ; Point to first line to bring in
669 stw r15,FPUlevel(r29) ; Set the "new" active level
670 eieio
671 stw r29,FPUowner(r26) ; Mark us as having the live context
1c79356b 672
55e303ae 673 beq++ cr1,MakeSureThatNoTerroristsCanHurtUsByGod ; No "new" context to load...
9bccf70c
A
674
675 dcbt 0,r11 ; Touch line in
676
55e303ae 677 lwz r3,SAVprev+4(r14) ; Get the previous context
9bccf70c
A
678 lwz r0,SAVlevel(r14) ; Get the level of first facility savearea
679 cmplw r0,r15 ; Top level correct to load?
55e303ae 680 bne-- MakeSureThatNoTerroristsCanHurtUsByGod ; No, go initialize...
9bccf70c
A
681
682 stw r3,FPUsave(r29) ; Pop the context (we will toss the savearea later)
1c79356b
A
683
684#if FPVECDBG
9bccf70c
A
685 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
686 li r2,0x7F05 ; (TEST/DEBUG)
687 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
688 sc ; (TEST/DEBUG)
1c79356b
A
689#endif
690
55e303ae
A
691// Note this code is used both by 32- and 128-byte processors. This means six extra DCBTs
692// are executed on a 128-byte machine, but that is better than a mispredicted branch.
693
9bccf70c
A
694 la r11,savefp4(r14) ; Point to next line
695 dcbt 0,r11 ; Touch line in
1c79356b
A
696 lfd f0, savefp0(r14)
697 lfd f1,savefp1(r14)
1c79356b 698 lfd f2,savefp2(r14)
9bccf70c 699 la r11,savefp8(r14) ; Point to next line
1c79356b 700 lfd f3,savefp3(r14)
9bccf70c 701 dcbt 0,r11 ; Touch line in
1c79356b
A
702 lfd f4,savefp4(r14)
703 lfd f5,savefp5(r14)
704 lfd f6,savefp6(r14)
9bccf70c 705 la r11,savefp12(r14) ; Point to next line
1c79356b 706 lfd f7,savefp7(r14)
9bccf70c 707 dcbt 0,r11 ; Touch line in
1c79356b
A
708 lfd f8,savefp8(r14)
709 lfd f9,savefp9(r14)
710 lfd f10,savefp10(r14)
9bccf70c 711 la r11,savefp16(r14) ; Point to next line
1c79356b 712 lfd f11,savefp11(r14)
9bccf70c 713 dcbt 0,r11 ; Touch line in
1c79356b
A
714 lfd f12,savefp12(r14)
715 lfd f13,savefp13(r14)
716 lfd f14,savefp14(r14)
9bccf70c 717 la r11,savefp20(r14) ; Point to next line
1c79356b 718 lfd f15,savefp15(r14)
9bccf70c 719 dcbt 0,r11 ; Touch line in
1c79356b
A
720 lfd f16,savefp16(r14)
721 lfd f17,savefp17(r14)
722 lfd f18,savefp18(r14)
9bccf70c 723 la r11,savefp24(r14) ; Point to next line
1c79356b 724 lfd f19,savefp19(r14)
9bccf70c 725 dcbt 0,r11 ; Touch line in
1c79356b
A
726 lfd f20,savefp20(r14)
727 lfd f21,savefp21(r14)
9bccf70c 728 la r11,savefp28(r14) ; Point to next line
1c79356b
A
729 lfd f22,savefp22(r14)
730 lfd f23,savefp23(r14)
9bccf70c 731 dcbt 0,r11 ; Touch line in
1c79356b
A
732 lfd f24,savefp24(r14)
733 lfd f25,savefp25(r14)
734 lfd f26,savefp26(r14)
735 lfd f27,savefp27(r14)
736 lfd f28,savefp28(r14)
737 lfd f29,savefp29(r14)
738 lfd f30,savefp30(r14)
739 lfd f31,savefp31(r14)
740
9bccf70c
A
741 mr r3,r14 ; Get the old savearea (we popped it before)
742 bl EXT(save_ret) ; Toss it
743
55e303ae 744fsenable: lwz r8,savesrr1+4(r25) ; Get the msr of the interrupted guy
9bccf70c 745 ori r8,r8,MASK(MSR_FP) ; Enable the floating point feature
d7e50217
A
746 lwz r10,ACT_MACT_SPF(r17) ; Get the act special flags
747 lwz r11,spcFlags(r26) ; Get per_proc spec flags cause not in sync with act
1c79356b 748 oris r10,r10,hi16(floatUsed|floatCng) ; Set that we used floating point
d7e50217 749 oris r11,r11,hi16(floatUsed|floatCng) ; Set that we used floating point
9bccf70c 750 rlwinm. r0,r8,0,MSR_PR_BIT,MSR_PR_BIT ; See if we are doing this for user state
55e303ae
A
751 stw r8,savesrr1+4(r25) ; Set the msr of the interrupted guy
752 mr r3,r25 ; Pass the virtual addres of savearea
b4c24cb9 753 beq- fsnuser ; We are not user state...
9bccf70c 754 stw r10,ACT_MACT_SPF(r17) ; Set the activation copy
d7e50217 755 stw r11,spcFlags(r26) ; Set per_proc copy
1c79356b
A
756
757fsnuser:
758#if FPVECDBG
9bccf70c
A
759 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
760 li r2,0x7F07 ; (TEST/DEBUG)
761 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
762 sc ; (TEST/DEBUG)
1c79356b 763#endif
1c79356b 764
9bccf70c 765 b EXT(exception_exit) ; Exit to the fray...
1c79356b
A
766
767/*
768 * Initialize the registers to some bogus value
769 */
770
771MakeSureThatNoTerroristsCanHurtUsByGod:
0b4e3aa0 772
1c79356b 773#if FPVECDBG
9bccf70c
A
774 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
775 li r2,0x7F06 ; (TEST/DEBUG)
776 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
777 sc ; (TEST/DEBUG)
1c79356b 778#endif
9bccf70c
A
779 lis r5,hi16(EXT(FloatInit)) ; Get top secret floating point init value address
780 ori r5,r5,lo16(EXT(FloatInit)) ; Slam bottom
781 lfd f0,0(r5) ; Initialize FP0
782 fmr f1,f0 ; Do them all
1c79356b
A
783 fmr f2,f0
784 fmr f3,f0
785 fmr f4,f0
786 fmr f5,f0
787 fmr f6,f0
788 fmr f7,f0
789 fmr f8,f0
790 fmr f9,f0
791 fmr f10,f0
792 fmr f11,f0
793 fmr f12,f0
794 fmr f13,f0
795 fmr f14,f0
796 fmr f15,f0
797 fmr f16,f0
798 fmr f17,f0
1c79356b
A
799 fmr f18,f0
800 fmr f19,f0
801 fmr f20,f0
1c79356b
A
802 fmr f21,f0
803 fmr f22,f0
804 fmr f23,f0
805 fmr f24,f0
806 fmr f25,f0
807 fmr f26,f0
808 fmr f27,f0
809 fmr f28,f0
810 fmr f29,f0
811 fmr f30,f0
812 fmr f31,f0
9bccf70c
A
813 b fsenable ; Finish setting it all up...
814
1c79356b
A
815
816;
9bccf70c
A
817; We get here when we are switching to the same context at the same level and the context
818; is still live. Essentially, all we are doing is turning on the faility. It may have
819; gotten turned off due to doing a context save for the current level or a context switch
820; back to the live guy.
1c79356b 821;
9bccf70c
A
822
823 .align 5
1c79356b 824
9bccf70c 825fsthesame:
1c79356b 826
9bccf70c
A
827#if FPVECDBG
828 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
829 li r2,0x7F0A ; (TEST/DEBUG)
830 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
831 sc ; (TEST/DEBUG)
832#endif
833 beq- cr1,fsenable ; Not saved yet, nothing to pop, go enable and exit...
1c79356b 834
9bccf70c 835 lwz r11,SAVlevel(r30) ; Get the level of top saved context
55e303ae 836 lwz r14,SAVprev+4(r30) ; Get the previous savearea
1c79356b 837
9bccf70c 838 cmplw r11,r31 ; Are live and saved the same?
1c79356b 839
9bccf70c 840 bne+ fsenable ; Level not the same, nothing to pop, go enable and exit...
1c79356b 841
9bccf70c 842 mr r3,r30 ; Get the old savearea (we popped it before)
55e303ae 843 stw r14,FPUsave(r22) ; Pop the savearea from the stack
9bccf70c
A
844 bl EXT(save_ret) ; Toss it
845 b fsenable ; Go enable and exit...
846
847
848;
849; This function invalidates any live floating point context for the passed in facility_context.
850; This is intended to be called just before act_machine_sv_free tosses saveareas.
851;
852
853 .align 5
854 .globl EXT(toss_live_fpu)
855
856LEXT(toss_live_fpu)
857
55e303ae 858 lis r0,hi16(MASK(MSR_VEC)) ; Get VEC
9bccf70c 859 mfmsr r9 ; Get the MSR
55e303ae 860 ori r0,r0,lo16(MASK(MSR_FP)) ; Add in FP
9bccf70c 861 rlwinm. r8,r9,0,MSR_FP_BIT,MSR_FP_BIT ; Are floats on right now?
55e303ae
A
862 andc r9,r9,r0 ; Force off VEC and FP
863 ori r0,r0,lo16(MASK(MSR_EE)) ; Turn off EE
864 andc r0,r9,r0 ; Turn off EE now
9bccf70c
A
865 mtmsr r0 ; No interruptions
866 isync
867 beq+ tlfnotours ; Floats off, can not be live here...
868
869 mfsprg r8,0 ; Get the per proc
870
871;
872; Note that at this point, since floats are on, we are the owner
873; of live state on this processor
1c79356b 874;
9bccf70c
A
875
876 lwz r6,FPUowner(r8) ; Get the thread that owns the floats
877 li r0,0 ; Clear this just in case we need it
878 cmplw r6,r3 ; Are we tossing our own context?
55e303ae 879 bne-- tlfnotours ; Nope...
9bccf70c 880
55e303ae 881 lfd f1,Zero(0) ; Make a 0
9bccf70c
A
882 mtfsf 0xFF,f1 ; Clear it
883
884tlfnotours: lwz r11,FPUcpu(r3) ; Get the cpu on which we last loaded context
885 lis r12,hi16(EXT(per_proc_info)) ; Set base per_proc
886 mulli r11,r11,ppSize ; Find offset to the owner per_proc
887 ori r12,r12,lo16(EXT(per_proc_info)) ; Set base per_proc
888 li r10,FPUowner ; Displacement to float owner
889 add r11,r12,r11 ; Point to the owner per_proc
9bccf70c
A
890
891tlfinvothr: lwarx r12,r10,r11 ; Get the owner
9bccf70c 892
55e303ae
A
893 sub r0,r12,r3 ; Subtract one from the other
894 sub r8,r3,r12 ; Subtract the other from the one
895 or r8,r8,r0 ; Combine them
896 srawi r8,r8,31 ; Get a 0 if equal or -1 of not
897 and r12,r12,r8 ; Make 0 if same, unchanged if not
898 stwcx. r12,r10,r11 ; Try to invalidate it
899 bne-- tlfinvothr ; Try again if there was a collision...
900
901 mtmsr r9 ; Restore interruptions
9bccf70c
A
902 isync ; Could be turning off floats here
903 blr ; Leave...
904
1c79356b
A
905
906/*
907 * Altivec stuff is here. The techniques used are pretty identical to
908 * the floating point. Except that we will honor the VRSAVE register
909 * settings when loading and restoring registers.
910 *
911 * There are two indications of saved VRs: the VRSAVE register and the vrvalid
912 * mask. VRSAVE is set by the vector user and represents the VRs that they
913 * say that they are using. The vrvalid mask indicates which vector registers
914 * are saved in the savearea. Whenever context is saved, it is saved according
915 * to the VRSAVE register. It is loaded based on VRSAVE anded with
916 * vrvalid (all other registers are splatted with 0s). This is done because we
917 * don't want to load any registers we don't have a copy of, we want to set them
918 * to zero instead.
919 *
9bccf70c
A
920 * Note that there are some oddities here when we save a context we are using.
921 * It is really not too cool to do this, but what the hey... Anyway,
922 * we turn vectors and fpu off before we leave.
923 * The oddity is that if you use vectors after this, the
924 * savearea containing the context just saved will go away. So, bottom line is
925 * that don't use vectors until after you are done with the saved context.
926 *
1c79356b
A
927 */
928
9bccf70c
A
929 .align 5
930 .globl EXT(vec_save)
931
932LEXT(vec_save)
1c79356b 933
55e303ae
A
934
935 lis r2,hi16(MASK(MSR_VEC)) ; Get VEC
9bccf70c 936 mfmsr r0 ; Get the MSR
55e303ae
A
937 ori r2,r2,lo16(MASK(MSR_FP)) ; Add in FP
938 andc r0,r0,r2 ; Force off VEC and FP
939 ori r2,r2,lo16(MASK(MSR_EE)) ; Clear EE
940 andc r2,r0,r2 ; Clear EE for now
9bccf70c 941 oris r2,r2,hi16(MASK(MSR_VEC)) ; Enable the vector facility for now also
9bccf70c 942 mtmsr r2 ; Set the MSR
1c79356b
A
943 isync
944
9bccf70c
A
945 mfsprg r6,0 ; Get the per_processor block
946 lwz r12,VMXowner(r6) ; Get the context ID for owner
947
1c79356b 948#if FPVECDBG
9bccf70c
A
949 mr r7,r0 ; (TEST/DEBUG)
950 li r4,0 ; (TEST/DEBUG)
951 mr r10,r3 ; (TEST/DEBUG)
952 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
953 mr. r3,r12 ; (TEST/DEBUG)
954 li r2,0x5F00 ; (TEST/DEBUG)
955 li r5,0 ; (TEST/DEBUG)
956 beq- noowneryeu ; (TEST/DEBUG)
957 lwz r4,VMXlevel(r12) ; (TEST/DEBUG)
958 lwz r5,VMXsave(r12) ; (TEST/DEBUG)
959
960noowneryeu: oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
961 sc ; (TEST/DEBUG)
962 mr r0,r7 ; (TEST/DEBUG)
963 mr r3,r10 ; (TEST/DEBUG)
1c79356b 964#endif
9bccf70c
A
965 mflr r2 ; Save the return address
966
967vsretry: mr. r12,r12 ; Anyone own the vector?
968 lhz r11,PP_CPU_NUMBER(r6) ; Get our CPU number
969 beq- vsret ; Nobody owns the vector, no save required...
970
971 cmplw cr1,r3,r12 ; Is the specified context live?
972
973 isync ; Force owner check first
974
975 lwz r9,VMXcpu(r12) ; Get the cpu that context was last on
976 bne- cr1,vsret ; Specified context is not live
977
978 cmplw cr1,r9,r11 ; Was the context for this processor?
979 beq+ cr1,vsgoodcpu ; Facility last used on this processor...
980
981 b vsret ; Someone else claimed this...
982
983 .align 5
984
985vsgoodcpu: lwz r3,VMXsave(r12) ; Get the current vector savearea for the thread
986 lwz r10,liveVRS(r6) ; Get the right VRSave register
987 lwz r9,VMXlevel(r12) ; Get our current level indicator
988
989
990 cmplwi cr1,r3,0 ; Have we ever saved this facility context?
991 beq- cr1,vsneedone ; Never saved it, so we need an area...
992
993 lwz r8,SAVlevel(r3) ; Get the level this savearea is for
994 mr. r10,r10 ; Is VRsave set to 0?
995 cmplw cr1,r9,r8 ; Correct level?
996 bne- cr1,vsneedone ; Different level, so we need to save...
1c79356b 997
9bccf70c 998 bne+ vsret ; VRsave is non-zero so we need to keep what is saved...
0b4e3aa0 999
55e303ae 1000 lwz r4,SAVprev+4(r3) ; Pick up the previous area
9bccf70c
A
1001 lwz r5,SAVlevel(r4) ; Get the level associated with save
1002 stw r4,VMXsave(r12) ; Dequeue this savearea
55e303ae 1003 li r4,0 ; Clear
9bccf70c
A
1004 stw r5,VMXlevel(r12) ; Save the level
1005
55e303ae 1006 stw r4,VMXowner(r12) ; Show no live context here
9bccf70c
A
1007 eieio
1008
55e303ae
A
1009vsbackout: mr r4,r0 ; restore the saved MSR
1010 b EXT(save_ret_wMSR) ; Toss the savearea and return from there...
9bccf70c
A
1011
1012 .align 5
1013
1014vsneedone: mr. r10,r10 ; Is VRsave set to 0?
1015 beq- vsret ; Yeah, they do not care about any of them...
1016
1017 bl EXT(save_get) ; Get a savearea for the context
1018
1019 mfsprg r6,0 ; Get back per_processor block
1020 li r4,SAVvector ; Get vector tag
1021 lwz r12,VMXowner(r6) ; Get back our context ID
1022 stb r4,SAVflags+2(r3) ; Mark this savearea as a vector
1023 mr. r12,r12 ; See if we were disowned while away. Very, very small chance of it...
1024 beq- vsbackout ; If disowned, just toss savearea...
1025 lwz r4,facAct(r12) ; Get the activation associated with live context
9bccf70c
A
1026 lwz r8,VMXsave(r12) ; Get the current top vector savearea
1027 stw r4,SAVact(r3) ; Indicate the right activation for this context
1028 lwz r9,VMXlevel(r12) ; Get our current level indicator again
1029 stw r3,VMXsave(r12) ; Set this as the most current floating point context
55e303ae 1030 stw r8,SAVprev+4(r3) ; And then chain this in front
9bccf70c
A
1031
1032 stw r9,SAVlevel(r3) ; Set level in savearea
55e303ae
A
1033 mfcr r12 ; save CRs across call to vr_store
1034 lwz r10,liveVRS(r6) ; Get the right VRSave register
1035
1036 bl vr_store ; store live VRs into savearea as required (uses r4-r11)
9bccf70c 1037
55e303ae
A
1038 mtcrf 255,r12 ; Restore the non-volatile CRs
1039 mtlr r2 ; restore return address
1040
1041vsret: mtmsr r0 ; Put interrupts on if they were and vector off
1c79356b
A
1042 isync
1043
1044 blr
1045
1046/*
1047 * vec_switch()
1048 *
1049 * Entered to handle the vector unavailable exception and
1050 * switch vector context
1051 *
1052 * This code is run with virtual address mode on and interrupts off.
1053 *
1054 * Upon exit, the code returns to the users context with the vector
1055 * facility turned on.
1056 *
1057 * ENTRY: VM switched ON
1058 * Interrupts OFF
1059 * State is saved in savearea pointed to by R4.
1060 * All other registers are free.
1061 *
1062 */
1063
9bccf70c
A
1064 .align 5
1065 .globl EXT(vec_switch)
1066
1067LEXT(vec_switch)
1c79356b
A
1068
1069#if DEBUG
1c79356b
A
1070 lis r3,hi16(EXT(vec_trap_count)) ; Get address of vector trap counter
1071 ori r3,r3,lo16(EXT(vec_trap_count)) ; Get address of vector trap counter
1072 lwz r1,0(r3)
1073 addi r1,r1,1
1074 stw r1,0(r3)
1c79356b
A
1075#endif /* DEBUG */
1076
9bccf70c 1077 mfsprg r26,0 ; Get the per_processor block
55e303ae
A
1078 mfmsr r19 ; Get the current MSR
1079 mfsprg r17,1 ; Get the current thread
1c79356b 1080
9bccf70c 1081 mr r25,r4 ; Save the entry savearea
9bccf70c 1082 oris r19,r19,hi16(MASK(MSR_VEC)) ; Enable the vector feature
55e303ae 1083 lwz r22,VMXowner(r26) ; Get the thread that owns the vector
9bccf70c
A
1084
1085 mtmsr r19 ; Enable vector instructions
1c79356b
A
1086 isync
1087
9bccf70c
A
1088 lwz r27,ACT_MACT_PCB(r17) ; Get the current level
1089 lwz r29,curctx(r17) ; Grab the current context anchor of the current thread
0b4e3aa0 1090
9bccf70c
A
1091; R22 has the "old" context anchor
1092; R29 has the "new" context anchor
0b4e3aa0 1093
1c79356b 1094#if FPVECDBG
9bccf70c
A
1095 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
1096 li r2,0x5F01 ; (TEST/DEBUG)
1097 mr r3,r22 ; (TEST/DEBUG)
1098 mr r5,r29 ; (TEST/DEBUG)
1099 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
1100 sc ; (TEST/DEBUG)
1c79356b
A
1101#endif
1102
9bccf70c 1103 lhz r16,PP_CPU_NUMBER(r26) ; Get the current CPU number
1c79356b 1104
9bccf70c 1105vsvretry: mr. r22,r22 ; See if there is any live vector status
1c79356b 1106
9bccf70c 1107 beq- vsnosave ; No live context, so nothing to save...
1c79356b 1108
9bccf70c 1109 isync ; Make sure we see this in the right order
1c79356b 1110
9bccf70c
A
1111 lwz r30,VMXsave(r22) ; Get the top savearea
1112 cmplw cr2,r22,r29 ; Are both old and new the same context?
1113 lwz r18,VMXcpu(r22) ; Get the last CPU we ran on
1114 cmplwi cr1,r30,0 ; Anything saved yet?
1115 cmplw r18,r16 ; Make sure we are on the right processor
1116 lwz r31,VMXlevel(r22) ; Get the context level
1c79356b 1117
9bccf70c
A
1118 lwz r10,liveVRS(r26) ; Get the right VRSave register
1119
1120 bne- vsnosave ; No, not on the same processor...
1121
1c79356b 1122;
9bccf70c
A
1123; Check to see if the live context has already been saved.
1124; Also check to see if all we are here just to re-enable the MSR
1125; and handle specially if so.
1c79356b 1126;
9bccf70c
A
1127
1128 cmplw r31,r27 ; See if the current and active levels are the same
1129 crand cr0_eq,cr2_eq,cr0_eq ; Remember if both the levels and contexts are the same
1130 li r8,0 ; Clear this
1c79356b 1131
9bccf70c
A
1132 beq- vsthesame ; New and old are the same, just go enable...
1133
1134 cmplwi cr2,r10,0 ; Check VRSave to see if we really need to save anything...
1135 beq- cr1,vsmstsave ; Not saved yet, go do it...
1c79356b 1136
9bccf70c 1137 lwz r11,SAVlevel(r30) ; Get the level of top saved context
1c79356b 1138
9bccf70c
A
1139 cmplw r31,r11 ; Are live and saved the same?
1140
1c79356b 1141#if FPVECDBG
9bccf70c
A
1142 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
1143 li r2,0x5F02 ; (TEST/DEBUG)
1144 mr r3,r30 ; (TEST/DEBUG)
1145 mr r5,r31 ; (TEST/DEBUG)
1146 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
1147 sc ; (TEST/DEBUG)
1c79356b 1148#endif
9bccf70c
A
1149
1150 bne- vsmstsave ; Live context has not been saved yet...
1151
1152 bne- cr2,vsnosave ; Live context saved and VRSave not 0, no save and keep context...
1c79356b 1153
55e303ae 1154 lwz r4,SAVprev+4(r30) ; Pick up the previous area
9bccf70c
A
1155 li r5,0 ; Assume this is the only one (which should be the ususal case)
1156 mr. r4,r4 ; Was this the only one?
1157 stw r4,VMXsave(r22) ; Dequeue this savearea
1158 beq+ vsonlyone ; This was the only one...
1159 lwz r5,SAVlevel(r4) ; Get the level associated with previous save
1160
1161vsonlyone: stw r5,VMXlevel(r22) ; Save the level
1162 stw r8,VMXowner(r26) ; Clear owner
1163 eieio
1164 mr r3,r30 ; Copy the savearea we are tossing
1165 bl EXT(save_ret) ; Toss the savearea
1166 b vsnosave ; Go load up the context...
1167
1168 .align 5
1c79356b 1169
9bccf70c
A
1170
1171vsmstsave: stw r8,VMXowner(r26) ; Clear owner
1172 eieio
1173 beq- cr2,vsnosave ; The VRSave was 0, so there is nothing to save...
1174
1175 bl EXT(save_get) ; Go get a savearea
1176
55e303ae
A
1177 mr. r31,r31 ; Are we saving the user state?
1178 la r15,VMXsync(r22) ; Point to the sync word
1179 beq++ vswusave ; Yeah, no need for lock...
1180;
1181; Here we make sure that the live context is not tossed while we are
1182; trying to push it. This can happen only for kernel context and
1183; then only by a race with act_machine_sv_free.
1184;
1185; We only need to hold this for a very short time, so no sniffing needed.
1186; If we find any change to the level, we just abandon.
1187;
1188vswsync: lwarx r19,0,r15 ; Get the sync word
1189 li r0,1 ; Get the lock
1190 cmplwi cr1,r19,0 ; Is it unlocked?
1191 stwcx. r0,0,r15 ; Store lock and test reservation
1192 cror cr0_eq,cr1_eq,cr0_eq ; Combine lost reservation and previously locked
1193 bne-- vswsync ; Try again if lost reservation or locked...
de355530 1194
55e303ae
A
1195 isync ; Toss speculation
1196
1197 lwz r0,VMXlevel(r22) ; Pick up the level again
1198 li r7,0 ; Get unlock value
1199 cmplw r0,r31 ; Same level?
1200 beq++ vswusave ; Yeah, we expect it to be...
1201
1202 stw r7,VMXsync(r22) ; Unlock lock. No need to sync here
1203
1204 bl EXT(save_ret) ; Toss save area because we are abandoning save
1205 b vsnosave ; Skip the save...
1206
1207 .align 5
1208
1209vswusave: lwz r12,facAct(r22) ; Get the activation associated with the context
1210 stw r3,VMXsave(r22) ; Set this as the latest context savearea for the thread
1211 mr. r31,r31 ; Check again if we were user level
1212 stw r30,SAVprev+4(r3) ; Point us to the old context
9bccf70c
A
1213 stw r31,SAVlevel(r3) ; Tag our level
1214 li r7,SAVvector ; Get the vector ID
1215 stw r12,SAVact(r3) ; Make sure we point to the right guy
1216 stb r7,SAVflags+2(r3) ; Set that we have a vector save area
1c79356b 1217
55e303ae
A
1218 li r7,0 ; Get the unlock value
1219
1220 beq-- vswnulock ; Skip unlock if user (we did not lock it)...
1221 eieio ; Make sure that these updates make it out
1222 stw r7,VMXsync(r22) ; Unlock it.
1223
1224vswnulock:
1225
1c79356b 1226#if FPVECDBG
9bccf70c
A
1227 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
1228 li r2,0x5F03 ; (TEST/DEBUG)
1229 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
1230 sc ; (TEST/DEBUG)
1c79356b
A
1231#endif
1232
9bccf70c 1233 lwz r10,liveVRS(r26) ; Get the right VRSave register
55e303ae 1234 bl vr_store ; store VRs into savearea according to vrsave (uses r4-r11)
1c79356b
A
1235
1236
9bccf70c
A
1237;
1238; The context is all saved now and the facility is free.
1239;
1240; If we do not we need to fill the registers with junk, because this level has
1241; never used them before and some thieving bastard could hack the old values
1242; of some thread! Just imagine what would happen if they could! Why, nothing
1243; would be safe! My God! It is terrifying!
1244;
1245; Also, along the way, thanks to Ian Ollmann, we generate the 0x7FFFDEAD (QNaNbarbarian)
1246; constant that we may need to fill unused vector registers.
1247;
1c79356b 1248
0b4e3aa0
A
1249
1250
0b4e3aa0 1251
9bccf70c
A
1252vsnosave: vspltisb v31,-10 ; Get 0xF6F6F6F6
1253 lwz r15,ACT_MACT_PCB(r17) ; Get the current level of the "new" one
1254 vspltisb v30,5 ; Get 0x05050505
1255 lwz r19,VMXcpu(r29) ; Get the last CPU we ran on
1256 vspltish v29,4 ; Get 0x00040004
1257 lwz r14,VMXsave(r29) ; Point to the top of the "new" context stack
1258 vrlb v31,v31,v30 ; Get 0xDEDEDEDE
1259
1260 stw r16,VMXcpu(r29) ; Claim context for us
1261 eieio
1c79356b
A
1262
1263#if FPVECDBG
9bccf70c
A
1264 lwz r13,VMXlevel(r29) ; (TEST/DEBUG)
1265 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
1266 li r2,0x5F04 ; (TEST/DEBUG)
1267 mr r1,r15 ; (TEST/DEBUG)
1268 mr r3,r14 ; (TEST/DEBUG)
1269 mr r5,r13 ; (TEST/DEBUG)
1270 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
1271 sc ; (TEST/DEBUG)
1c79356b 1272#endif
9bccf70c
A
1273
1274 lis r18,hi16(EXT(per_proc_info)) ; Set base per_proc
1275 vspltisb v28,-2 ; Get 0xFEFEFEFE
1276 mulli r19,r19,ppSize ; Find offset to the owner per_proc
1277 vsubuhm v31,v31,v29 ; Get 0xDEDADEDA
1278 ori r18,r18,lo16(EXT(per_proc_info)) ; Set base per_proc
1279 vpkpx v30,v28,v3 ; Get 0x7FFF7FFF
1280 li r16,VMXowner ; Displacement to vector owner
1281 add r19,r18,r19 ; Point to the owner per_proc
1282 vrlb v31,v31,v29 ; Get 0xDEADDEAD
9bccf70c
A
1283
1284vsinvothr: lwarx r18,r16,r19 ; Get the owner
55e303ae
A
1285
1286 sub r0,r18,r29 ; Subtract one from the other
1287 sub r11,r29,r18 ; Subtract the other from the one
1288 or r11,r11,r0 ; Combine them
1289 srawi r11,r11,31 ; Get a 0 if equal or -1 of not
1290 and r18,r18,r11 ; Make 0 if same, unchanged if not
1291 stwcx. r18,r16,r19 ; Try to invalidate it
1292 bne-- vsinvothr ; Try again if there was a collision...
9bccf70c 1293
55e303ae 1294 cmplwi cr1,r14,0 ; Do we possibly have some context to load?
9bccf70c
A
1295 vmrghh v31,v30,v31 ; Get 0x7FFFDEAD. V31 keeps this value until the bitter end
1296 stw r15,VMXlevel(r29) ; Set the "new" active level
1297 eieio
1298 stw r29,VMXowner(r26) ; Mark us as having the live context
1c79356b 1299
55e303ae 1300 beq-- cr1,ProtectTheAmericanWay ; Nothing to restore, first time use...
9bccf70c 1301
55e303ae 1302 lwz r3,SAVprev+4(r14) ; Get the previous context
9bccf70c
A
1303 lwz r0,SAVlevel(r14) ; Get the level of first facility savearea
1304 cmplw r0,r15 ; Top level correct to load?
55e303ae 1305 bne-- ProtectTheAmericanWay ; No, go initialize...
1c79356b 1306
9bccf70c
A
1307 stw r3,VMXsave(r29) ; Pop the context (we will toss the savearea later)
1308
1c79356b 1309#if FPVECDBG
9bccf70c
A
1310 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
1311 li r2,0x5F05 ; (TEST/DEBUG)
1312 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
1313 sc ; (TEST/DEBUG)
1c79356b
A
1314#endif
1315
de355530 1316 lwz r10,savevrvalid(r14) ; Get the valid VRs in the savearea
55e303ae 1317 lwz r22,savevrsave(r25) ; Get the most current VRSAVE
9bccf70c 1318 and r10,r10,r22 ; Figure out just what registers need to be loaded
55e303ae
A
1319 mr r3,r14 ; r3 <- ptr to savearea with VRs
1320 bl vr_load ; load VRs from save area based on vrsave in r10
1321
1322 bl EXT(save_ret) ; Toss the save area after loading VRs
de355530 1323
55e303ae 1324vrenable: lwz r8,savesrr1+4(r25) ; Get the msr of the interrupted guy
9bccf70c 1325 oris r8,r8,hi16(MASK(MSR_VEC)) ; Enable the vector facility
d7e50217
A
1326 lwz r10,ACT_MACT_SPF(r17) ; Get the act special flags
1327 lwz r11,spcFlags(r26) ; Get per_proc spec flags cause not in sync with act
1c79356b 1328 oris r10,r10,hi16(vectorUsed|vectorCng) ; Set that we used vectors
d7e50217 1329 oris r11,r11,hi16(vectorUsed|vectorCng) ; Set that we used vectors
9bccf70c 1330 rlwinm. r0,r8,0,MSR_PR_BIT,MSR_PR_BIT ; See if we are doing this for user state
55e303ae
A
1331 stw r8,savesrr1+4(r25) ; Set the msr of the interrupted guy
1332 mr r3,r25 ; Pass virtual address of the savearea
b4c24cb9 1333 beq- vrnuser ; We are not user state...
9bccf70c 1334 stw r10,ACT_MACT_SPF(r17) ; Set the activation copy
d7e50217 1335 stw r11,spcFlags(r26) ; Set per_proc copy
1c79356b
A
1336
1337vrnuser:
1338#if FPVECDBG
9bccf70c
A
1339 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
1340 li r2,0x5F07 ; (TEST/DEBUG)
1341 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
1342 sc ; (TEST/DEBUG)
1c79356b 1343#endif
9bccf70c 1344 b EXT(exception_exit) ; Exit to the fray...
1c79356b
A
1345
1346/*
1347 * Initialize the registers to some bogus value
1c79356b
A
1348 */
1349
1350ProtectTheAmericanWay:
1351
1352#if FPVECDBG
9bccf70c
A
1353 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
1354 li r2,0x5F06 ; (TEST/DEBUG)
1355 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
1356 sc ; (TEST/DEBUG)
1c79356b 1357#endif
9bccf70c
A
1358
1359 vor v0,v31,v31 ; Copy into the next register
1360 vor v1,v31,v31 ; Copy into the next register
1361 vor v2,v31,v31 ; Copy into the next register
1362 vor v3,v31,v31 ; Copy into the next register
1363 vor v4,v31,v31 ; Copy into the next register
1364 vor v5,v31,v31 ; Copy into the next register
1365 vor v6,v31,v31 ; Copy into the next register
1366 vor v7,v31,v31 ; Copy into the next register
1367 vor v8,v31,v31 ; Copy into the next register
1368 vor v9,v31,v31 ; Copy into the next register
1369 vor v10,v31,v31 ; Copy into the next register
1370 vor v11,v31,v31 ; Copy into the next register
1371 vor v12,v31,v31 ; Copy into the next register
1372 vor v13,v31,v31 ; Copy into the next register
1373 vor v14,v31,v31 ; Copy into the next register
1374 vor v15,v31,v31 ; Copy into the next register
1375 vor v16,v31,v31 ; Copy into the next register
1376 vor v17,v31,v31 ; Copy into the next register
1377 vor v18,v31,v31 ; Copy into the next register
1378 vor v19,v31,v31 ; Copy into the next register
1379 vor v20,v31,v31 ; Copy into the next register
1380 vor v21,v31,v31 ; Copy into the next register
1381 vor v22,v31,v31 ; Copy into the next register
1382 vor v23,v31,v31 ; Copy into the next register
1383 vor v24,v31,v31 ; Copy into the next register
1384 vor v25,v31,v31 ; Copy into the next register
1385 vor v26,v31,v31 ; Copy into the next register
1386 vor v27,v31,v31 ; Copy into the next register
1387 vor v28,v31,v31 ; Copy into the next register
1388 vor v29,v31,v31 ; Copy into the next register
1389 vor v30,v31,v31 ; Copy into the next register
1390 b vrenable ; Finish setting it all up...
1391
1392
1393
1394;
1395; We get here when we are switching to the same context at the same level and the context
1396; is still live. Essentially, all we are doing is turning on the faility. It may have
1397; gotten turned off due to doing a context save for the current level or a context switch
1398; back to the live guy.
1399;
1400
1401 .align 5
1402
1403vsthesame:
1404
1405#if FPVECDBG
1406 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
1407 li r2,0x5F0A ; (TEST/DEBUG)
1408 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
1409 sc ; (TEST/DEBUG)
1410#endif
1411 beq- cr1,vrenable ; Not saved yet, nothing to pop, go enable and exit...
1412
1413 lwz r11,SAVlevel(r30) ; Get the level of top saved context
55e303ae 1414 lwz r14,SAVprev+4(r30) ; Get the previous savearea
9bccf70c
A
1415
1416 cmplw r11,r31 ; Are live and saved the same?
1417
1418 bne+ vrenable ; Level not the same, nothing to pop, go enable and exit...
1419
1420 mr r3,r30 ; Get the old savearea (we popped it before)
55e303ae 1421 stw r11,VMXsave(r22) ; Pop the vector stack
9bccf70c
A
1422 bl EXT(save_ret) ; Toss it
1423 b vrenable ; Go enable and exit...
1424
1425
1426;
1427; This function invalidates any live vector context for the passed in facility_context.
1428; This is intended to be called just before act_machine_sv_free tosses saveareas.
1c79356b 1429;
1c79356b 1430
9bccf70c
A
1431 .align 5
1432 .globl EXT(toss_live_vec)
1c79356b 1433
9bccf70c
A
1434LEXT(toss_live_vec)
1435
55e303ae 1436 lis r0,hi16(MASK(MSR_VEC)) ; Get VEC
9bccf70c 1437 mfmsr r9 ; Get the MSR
55e303ae
A
1438 ori r0,r0,lo16(MASK(MSR_FP)) ; Add in FP
1439 rlwinm. r8,r9,0,MSR_VEC_BIT,MSR_VEC_BIT ; Are vectors on right now?
1440 andc r9,r9,r0 ; Force off VEC and FP
1441 ori r0,r0,lo16(MASK(MSR_EE)) ; Turn off EE
1442 andc r0,r9,r0 ; Turn off EE now
9bccf70c
A
1443 mtmsr r0 ; No interruptions
1444 isync
1445 beq+ tlvnotours ; Vector off, can not be live here...
1c79356b 1446
9bccf70c
A
1447 mfsprg r8,0 ; Get the per proc
1448
1449;
1450; Note that at this point, since vecs are on, we are the owner
1451; of live state on this processor
1452;
1453
1454 lwz r6,VMXowner(r8) ; Get the thread that owns the vector
1455 li r0,0 ; Clear this just in case we need it
1456 cmplw r6,r3 ; Are we tossing our own context?
1457 bne- tlvnotours ; Nope...
1458
1459 vspltish v1,1 ; Turn on the non-Java bit and saturate
1460 vspltisw v0,1 ; Turn on the saturate bit
1461 vxor v1,v1,v0 ; Turn off saturate
1462 mtspr vrsave,r0 ; Clear VRSAVE
1463 mtvscr v1 ; Set the non-java, no saturate status
1464
1465tlvnotours: lwz r11,VMXcpu(r3) ; Get the cpu on which we last loaded context
1466 lis r12,hi16(EXT(per_proc_info)) ; Set base per_proc
1467 mulli r11,r11,ppSize ; Find offset to the owner per_proc
1468 ori r12,r12,lo16(EXT(per_proc_info)) ; Set base per_proc
1469 li r10,VMXowner ; Displacement to vector owner
1470 add r11,r12,r11 ; Point to the owner per_proc
1471 li r0,0 ; Set a 0 to invalidate context
1472
1473tlvinvothr: lwarx r12,r10,r11 ; Get the owner
9bccf70c 1474
55e303ae
A
1475 sub r0,r12,r3 ; Subtract one from the other
1476 sub r8,r3,r12 ; Subtract the other from the one
1477 or r8,r8,r0 ; Combine them
1478 srawi r8,r8,31 ; Get a 0 if equal or -1 of not
1479 and r12,r12,r8 ; Make 0 if same, unchanged if not
1480 stwcx. r12,r10,r11 ; Try to invalidate it
1481 bne-- tlvinvothr ; Try again if there was a collision...
1482
1483 mtmsr r9 ; Restore interruptions
9bccf70c
A
1484 isync ; Could be turning off vectors here
1485 blr ; Leave....
1486
1487#if 0
1488;
1489; This function invalidates any live vector context for the passed in facility_context
1490; if the level is current. It also tosses the corresponding savearea if there is one.
1491; This function is primarily used whenever we detect a VRSave that is all zeros.
1492;
1493
1494 .align 5
1495 .globl EXT(vec_trash)
1496
1497LEXT(vec_trash)
1498
1499 lwz r12,facAct(r3) ; Get the activation
1500 lwz r11,VMXlevel(r3) ; Get the context level
1501 lwz r10,ACT_MACT_PCB(r12) ; Grab the current level for the thread
1502 lwz r9,VMXsave(r3) ; Get the savearea, if any
1503 cmplw r10,r11 ; Are we at the right level?
1504 cmplwi cr1,r9,0 ; Remember if there is a savearea
1505 bnelr+ ; No, we do nothing...
1506
1507 lwz r11,VMXcpu(r3) ; Get the cpu on which we last loaded context
1508 lis r12,hi16(EXT(per_proc_info)) ; Set base per_proc
1509 mulli r11,r11,ppSize ; Find offset to the owner per_proc
1510 ori r12,r12,lo16(EXT(per_proc_info)) ; Set base per_proc
1511 li r10,VMXowner ; Displacement to vector owner
1512 add r11,r12,r11 ; Point to the owner per_proc
9bccf70c
A
1513
1514vtinvothr: lwarx r12,r10,r11 ; Get the owner
9bccf70c 1515
55e303ae
A
1516 sub r0,r12,r3 ; Subtract one from the other
1517 sub r8,r3,r12 ; Subtract the other from the one
1518 or r8,r8,r0 ; Combine them
1519 srawi r8,r8,31 ; Get a 0 if equal or -1 of not
1520 and r12,r12,r8 ; Make 0 if same, unchanged if not
1521 stwcx. r12,r10,r11 ; Try to invalidate it
1522 bne-- vtinvothr ; Try again if there was a collision...
1523
1524
1525 beqlr++ cr1 ; Leave if there is no savearea
9bccf70c
A
1526 lwz r8,SAVlevel(r9) ; Get the level of the savearea
1527 cmplw r8,r11 ; Savearea for the current level?
55e303ae 1528 bnelr++ ; No, nothing to release...
9bccf70c 1529
55e303ae 1530 lwz r8,SAVprev+4(r9) ; Pick up the previous area
9bccf70c 1531 mr. r8,r8 ; Is there a previous?
55e303ae 1532 beq-- vtnoprev ; Nope...
9bccf70c
A
1533 lwz r7,SAVlevel(r8) ; Get the level associated with save
1534
1535vtnoprev: stw r8,VMXsave(r3) ; Dequeue this savearea
1536 stw r7,VMXlevel(r3) ; Pop the level
1537
1538 mr r3,r9 ; Get the savearea to release
1539 b EXT(save_ret) ; Go and toss the save area (note, we will return from there)...
1540#endif
1541
1542;
1543; Just some test code to force vector and/or floating point in the kernel
1544;
1545
1546 .align 5
1547 .globl EXT(fctx_test)
1c79356b 1548
9bccf70c
A
1549LEXT(fctx_test)
1550
55e303ae 1551 mfsprg r3,1 ; Get the current thread
9bccf70c
A
1552 mr. r3,r3 ; Are we actually up and running?
1553 beqlr- ; No...
1554
1555 fmr f0,f0 ; Use floating point
1556 mftb r4 ; Get time base for a random number
1557 li r5,1 ; Get a potential vrsave to use
1558 andi. r4,r4,0x3F ; Get a number from 0 - 63
1559 slw r5,r5,r4 ; Choose a register to save (should be 0 half the time)
1560 mtspr vrsave,r5 ; Set VRSave
1561 vor v0,v0,v0 ; Use vectors
1562 blr
55e303ae
A
1563
1564
1565// *******************
1566// * f p _ s t o r e *
1567// *******************
1568//
1569// Store FPRs into a save area. Called by fpu_save and fpu_switch.
1570//
1571// When called:
1572// floating pt is enabled
1573// r3 = ptr to save area
1574//
1575// We destroy:
1576// r11.
1577
1578fp_store:
1579 mfsprg r11,2 ; get feature flags
1580 mtcrf 0x02,r11 ; put cache line size bits in cr6
1581 la r11,savefp0(r3) ; point to 1st line
1582 dcbz128 0,r11 ; establish 1st line no matter what linesize is
1583 bt-- pf32Byteb,fp_st32 ; skip if a 32-byte machine
1584
1585// Store the FPRs on a 128-byte machine.
1586
1587 stfd f0,savefp0(r3)
1588 stfd f1,savefp1(r3)
1589 la r11,savefp16(r3) ; Point to the 2nd cache line
1590 stfd f2,savefp2(r3)
1591 stfd f3,savefp3(r3)
1592 dcbz128 0,r11 ; establish 2nd line
1593 stfd f4,savefp4(r3)
1594 stfd f5,savefp5(r3)
1595 stfd f6,savefp6(r3)
1596 stfd f7,savefp7(r3)
1597 stfd f8,savefp8(r3)
1598 stfd f9,savefp9(r3)
1599 stfd f10,savefp10(r3)
1600 stfd f11,savefp11(r3)
1601 stfd f12,savefp12(r3)
1602 stfd f13,savefp13(r3)
1603 stfd f14,savefp14(r3)
1604 stfd f15,savefp15(r3)
1605 stfd f16,savefp16(r3)
1606 stfd f17,savefp17(r3)
1607 stfd f18,savefp18(r3)
1608 stfd f19,savefp19(r3)
1609 stfd f20,savefp20(r3)
1610 stfd f21,savefp21(r3)
1611 stfd f22,savefp22(r3)
1612 stfd f23,savefp23(r3)
1613 stfd f24,savefp24(r3)
1614 stfd f25,savefp25(r3)
1615 stfd f26,savefp26(r3)
1616 stfd f27,savefp27(r3)
1617 stfd f28,savefp28(r3)
1618 stfd f29,savefp29(r3)
1619 stfd f30,savefp30(r3)
1620 stfd f31,savefp31(r3)
1621 blr
1622
1623// Store FPRs on a 32-byte machine.
1624
1625fp_st32:
1626 la r11,savefp4(r3) ; Point to the 2nd line
1627 stfd f0,savefp0(r3)
1628 dcbz 0,r11 ; Allocate cache
1629 stfd f1,savefp1(r3)
1630 stfd f2,savefp2(r3)
1631 la r11,savefp8(r3) ; Point to the 3rd line
1632 stfd f3,savefp3(r3)
1633 dcbz 0,r11 ; Allocate cache
1634 stfd f4,savefp4(r3)
1635 stfd f5,savefp5(r3)
1636 stfd f6,savefp6(r3)
1637 la r11,savefp12(r3) ; Point to the 4th line
1638 stfd f7,savefp7(r3)
1639 dcbz 0,r11 ; Allocate cache
1640 stfd f8,savefp8(r3)
1641 stfd f9,savefp9(r3)
1642 stfd f10,savefp10(r3)
1643 la r11,savefp16(r3) ; Point to the 5th line
1644 stfd f11,savefp11(r3)
1645 dcbz 0,r11 ; Allocate cache
1646 stfd f12,savefp12(r3)
1647 stfd f13,savefp13(r3)
1648 stfd f14,savefp14(r3)
1649 la r11,savefp20(r3) ; Point to the 6th line
1650 stfd f15,savefp15(r3)
1651 dcbz 0,r11 ; Allocate cache
1652 stfd f16,savefp16(r3)
1653 stfd f17,savefp17(r3)
1654 stfd f18,savefp18(r3)
1655 la r11,savefp24(r3) ; Point to the 7th line
1656 stfd f19,savefp19(r3)
1657 dcbz 0,r11 ; Allocate cache
1658 stfd f20,savefp20(r3)
1659
1660 stfd f21,savefp21(r3)
1661 stfd f22,savefp22(r3)
1662 la r11,savefp28(r3) ; Point to the 8th line
1663 stfd f23,savefp23(r3)
1664 dcbz 0,r11 ; allocate it
1665 stfd f24,savefp24(r3)
1666 stfd f25,savefp25(r3)
1667 stfd f26,savefp26(r3)
1668 stfd f27,savefp27(r3)
1669
1670 stfd f28,savefp28(r3)
1671 stfd f29,savefp29(r3)
1672 stfd f30,savefp30(r3)
1673 stfd f31,savefp31(r3)
1674 blr
1675
1676
1677// *******************
1678// * v r _ s t o r e *
1679// *******************
1680//
1681// Store VRs into savearea, according to bits set in passed vrsave bitfield. This routine is used
1682// both by vec_save and vec_switch. In order to minimize conditional branches and touching in
1683// unnecessary cache blocks, we either save all or none of the VRs in a block. We have separate paths
1684// for each cache block size.
1685//
1686// When called:
1687// interrupts are off, vectors are enabled
1688// r3 = ptr to save area
1689// r10 = vrsave (not 0)
1690//
1691// We destroy:
1692// r4 - r11, all CRs.
1693
1694vr_store:
1695 mfsprg r9,2 ; get feature flags
1696 stw r10,savevrvalid(r3) ; Save the validity information in savearea
1697 slwi r8,r10,1 ; Shift over 1
1698 mtcrf 0x02,r9 ; put cache line size bits in cr6 where we can test
1699 or r8,r10,r8 ; r8 <- even bits show which pairs are in use
1700 bt-- pf32Byteb,vr_st32 ; skip if 32-byte cacheline processor
1701
1702
1703; Save vectors on a 128-byte linesize processor. We save all or none of the 8 registers in each of
1704; the four cache lines. This minimizes mispredicted branches yet handles cache lines optimally.
1705
1706 slwi r7,r8,2 ; shift groups-of-2 over by 2
1707 li r4,16 ; load offsets for X-form stores
1708 or r8,r7,r8 ; show if any in group of 4 are in use
1709 li r5,32
1710 slwi r7,r8,4 ; shift groups-of-4 over by 4
1711 li r6,48
1712 or r11,r7,r8 ; show if any in group of 8 are in use
1713 li r7,64
1714 mtcrf 0x80,r11 ; set CRs one at a time (faster)
1715 li r8,80
1716 mtcrf 0x20,r11
1717 li r9,96
1718 mtcrf 0x08,r11
1719 li r10,112
1720 mtcrf 0x02,r11
1721
1722 bf 0,vr_st64b ; skip if none of vr0-vr7 are in use
1723 la r11,savevr0(r3) ; get address of this group of registers in save area
1724 dcbz128 0,r11 ; zero the line
1725 stvxl v0,0,r11 ; save 8 VRs in the line
1726 stvxl v1,r4,r11
1727 stvxl v2,r5,r11
1728 stvxl v3,r6,r11
1729 stvxl v4,r7,r11
1730 stvxl v5,r8,r11
1731 stvxl v6,r9,r11
1732 stvxl v7,r10,r11
1733
1734vr_st64b:
1735 bf 8,vr_st64c ; skip if none of vr8-vr15 are in use
1736 la r11,savevr8(r3) ; get address of this group of registers in save area
1737 dcbz128 0,r11 ; zero the line
1738 stvxl v8,0,r11 ; save 8 VRs in the line
1739 stvxl v9,r4,r11
1740 stvxl v10,r5,r11
1741 stvxl v11,r6,r11
1742 stvxl v12,r7,r11
1743 stvxl v13,r8,r11
1744 stvxl v14,r9,r11
1745 stvxl v15,r10,r11
1746
1747vr_st64c:
1748 bf 16,vr_st64d ; skip if none of vr16-vr23 are in use
1749 la r11,savevr16(r3) ; get address of this group of registers in save area
1750 dcbz128 0,r11 ; zero the line
1751 stvxl v16,0,r11 ; save 8 VRs in the line
1752 stvxl v17,r4,r11
1753 stvxl v18,r5,r11
1754 stvxl v19,r6,r11
1755 stvxl v20,r7,r11
1756 stvxl v21,r8,r11
1757 stvxl v22,r9,r11
1758 stvxl v23,r10,r11
1759
1760vr_st64d:
1761 bflr 24 ; done if none of vr24-vr31 are in use
1762 la r11,savevr24(r3) ; get address of this group of registers in save area
1763 dcbz128 0,r11 ; zero the line
1764 stvxl v24,0,r11 ; save 8 VRs in the line
1765 stvxl v25,r4,r11
1766 stvxl v26,r5,r11
1767 stvxl v27,r6,r11
1768 stvxl v28,r7,r11
1769 stvxl v29,r8,r11
1770 stvxl v30,r9,r11
1771 stvxl v31,r10,r11
1772 blr
1773
1774; Save vectors on a 32-byte linesize processor. We save in 16 groups of 2: we either save both
1775; or neither in each group. This cuts down on conditional branches.
1776; r8 = bitmask with bit n set (for even n) if either of that pair of VRs is in use
1777; r3 = savearea
1778
1779vr_st32:
1780 mtcrf 0xFF,r8 ; set CR bits so we can branch on them
1781 li r4,16 ; load offset for X-form stores
1782
1783 bf 0,vr_st32b ; skip if neither VR in this pair is in use
1784 la r11,savevr0(r3) ; get address of this group of registers in save area
1785 dcba 0,r11 ; establish the line wo reading it
1786 stvxl v0,0,r11 ; save the two VRs in the line
1787 stvxl v1,r4,r11
1788
1789vr_st32b:
1790 bf 2,vr_st32c ; skip if neither VR in this pair is in use
1791 la r11,savevr2(r3) ; get address of this group of registers in save area
1792 dcba 0,r11 ; establish the line wo reading it
1793 stvxl v2,0,r11 ; save the two VRs in the line
1794 stvxl v3,r4,r11
1795
1796vr_st32c:
1797 bf 4,vr_st32d ; skip if neither VR in this pair is in use
1798 la r11,savevr4(r3) ; get address of this group of registers in save area
1799 dcba 0,r11 ; establish the line wo reading it
1800 stvxl v4,0,r11 ; save the two VRs in the line
1801 stvxl v5,r4,r11
1802
1803vr_st32d:
1804 bf 6,vr_st32e ; skip if neither VR in this pair is in use
1805 la r11,savevr6(r3) ; get address of this group of registers in save area
1806 dcba 0,r11 ; establish the line wo reading it
1807 stvxl v6,0,r11 ; save the two VRs in the line
1808 stvxl v7,r4,r11
1809
1810vr_st32e:
1811 bf 8,vr_st32f ; skip if neither VR in this pair is in use
1812 la r11,savevr8(r3) ; get address of this group of registers in save area
1813 dcba 0,r11 ; establish the line wo reading it
1814 stvxl v8,0,r11 ; save the two VRs in the line
1815 stvxl v9,r4,r11
1816
1817vr_st32f:
1818 bf 10,vr_st32g ; skip if neither VR in this pair is in use
1819 la r11,savevr10(r3) ; get address of this group of registers in save area
1820 dcba 0,r11 ; establish the line wo reading it
1821 stvxl v10,0,r11 ; save the two VRs in the line
1822 stvxl v11,r4,r11
1823
1824vr_st32g:
1825 bf 12,vr_st32h ; skip if neither VR in this pair is in use
1826 la r11,savevr12(r3) ; get address of this group of registers in save area
1827 dcba 0,r11 ; establish the line wo reading it
1828 stvxl v12,0,r11 ; save the two VRs in the line
1829 stvxl v13,r4,r11
1830
1831vr_st32h:
1832 bf 14,vr_st32i ; skip if neither VR in this pair is in use
1833 la r11,savevr14(r3) ; get address of this group of registers in save area
1834 dcba 0,r11 ; establish the line wo reading it
1835 stvxl v14,0,r11 ; save the two VRs in the line
1836 stvxl v15,r4,r11
1837
1838vr_st32i:
1839 bf 16,vr_st32j ; skip if neither VR in this pair is in use
1840 la r11,savevr16(r3) ; get address of this group of registers in save area
1841 dcba 0,r11 ; establish the line wo reading it
1842 stvxl v16,0,r11 ; save the two VRs in the line
1843 stvxl v17,r4,r11
1844
1845vr_st32j:
1846 bf 18,vr_st32k ; skip if neither VR in this pair is in use
1847 la r11,savevr18(r3) ; get address of this group of registers in save area
1848 dcba 0,r11 ; establish the line wo reading it
1849 stvxl v18,0,r11 ; save the two VRs in the line
1850 stvxl v19,r4,r11
1851
1852vr_st32k:
1853 bf 20,vr_st32l ; skip if neither VR in this pair is in use
1854 la r11,savevr20(r3) ; get address of this group of registers in save area
1855 dcba 0,r11 ; establish the line wo reading it
1856 stvxl v20,0,r11 ; save the two VRs in the line
1857 stvxl v21,r4,r11
1858
1859vr_st32l:
1860 bf 22,vr_st32m ; skip if neither VR in this pair is in use
1861 la r11,savevr22(r3) ; get address of this group of registers in save area
1862 dcba 0,r11 ; establish the line wo reading it
1863 stvxl v22,0,r11 ; save the two VRs in the line
1864 stvxl v23,r4,r11
1865
1866vr_st32m:
1867 bf 24,vr_st32n ; skip if neither VR in this pair is in use
1868 la r11,savevr24(r3) ; get address of this group of registers in save area
1869 dcba 0,r11 ; establish the line wo reading it
1870 stvxl v24,0,r11 ; save the two VRs in the line
1871 stvxl v25,r4,r11
1872
1873vr_st32n:
1874 bf 26,vr_st32o ; skip if neither VR in this pair is in use
1875 la r11,savevr26(r3) ; get address of this group of registers in save area
1876 dcba 0,r11 ; establish the line wo reading it
1877 stvxl v26,0,r11 ; save the two VRs in the line
1878 stvxl v27,r4,r11
1879
1880vr_st32o:
1881 bf 28,vr_st32p ; skip if neither VR in this pair is in use
1882 la r11,savevr28(r3) ; get address of this group of registers in save area
1883 dcba 0,r11 ; establish the line wo reading it
1884 stvxl v28,0,r11 ; save the two VRs in the line
1885 stvxl v29,r4,r11
1886
1887vr_st32p:
1888 bflr 30 ; done if neither VR in this pair is in use
1889 la r11,savevr30(r3) ; get address of this group of registers in save area
1890 dcba 0,r11 ; establish the line wo reading it
1891 stvxl v30,0,r11 ; save the two VRs in the line
1892 stvxl v31,r4,r11
1893 blr
1894
1895
1896// *****************
1897// * v r _ l o a d *
1898// *****************
1899//
1900// Load live VRs from a savearea, according to bits set in a passed vector. This is the reverse
1901// of "vr_store". Like it, we avoid touching unnecessary cache blocks and minimize conditional
1902// branches by loading all VRs from a cache line, if we have to load any. If we don't load the VRs
1903// in a cache line, we bug them. Note that this behavior is slightly different from earlier kernels,
1904// which would bug all VRs that aren't live.
1905//
1906// When called:
1907// interrupts are off, vectors are enabled
1908// r3 = ptr to save area
1909// r10 = vector of live regs to load (ie, savevrsave & savevrvalid, may be 0)
1910// v31 = bugbug constant (0x7FFFDEAD7FFFDEAD7FFFDEAD7FFFDEAD)
1911//
1912// We destroy:
1913// r4 - r11, all CRs.
1914
1915vr_load:
1916 mfsprg r9,2 ; get feature flags
1917 li r6,1 ; assuming 32-byte, get (#VRs)-1 in a cacheline
1918 mtcrf 0x02,r9 ; set cache line size bits in cr6
1919 lis r7,0xC000 ; assuming 32-byte, set bits 0-1
1920 bt-- pf32Byteb,vr_ld0 ; skip if 32-bit processor
1921 li r6,7 ; 128-byte machines have 8 VRs in a cacheline
1922 lis r7,0xFF00 ; so set bits 0-7
1923
1924// Loop touching in cache blocks we will load from.
1925// r3 = savearea ptr
1926// r5 = we light bits for the VRs we will be loading
1927// r6 = 1 if 32-byte, 7 if 128-byte
1928// r7 = 0xC0000000 if 32-byte, 0xFF000000 if 128-byte
1929// r10 = live VR bits
1930// v31 = bugbug constant
1931
1932vr_ld0:
1933 li r5,0 ; initialize set of VRs to load
1934 la r11,savevr0(r3) ; get address of register file
1935 b vr_ld2 ; enter loop in middle
1936
1937 .align 5
1938vr_ld1: ; loop over each cache line we will load
1939 dcbt r4,r11 ; start prefetch of the line
1940 andc r10,r10,r9 ; turn off the bits in this line
1941 or r5,r5,r9 ; we will load all these
1942vr_ld2: ; initial entry pt
1943 cntlzw r4,r10 ; get offset to next live VR
1944 andc r4,r4,r6 ; cacheline align it
1945 srw. r9,r7,r4 ; position bits for VRs in that cache line
1946 slwi r4,r4,4 ; get byte offset within register file to that line
1947 bne vr_ld1 ; loop if more bits in r10
1948
1949 bf-- pf128Byteb,vr_ld32 ; skip if not 128-byte lines
1950
1951// Handle a processor with 128-byte cache lines. Four groups of 8 VRs.
1952// r3 = savearea ptr
1953// r5 = 1st bit in each cacheline is 1 iff any reg in that line must be loaded
1954// r11 = addr(savevr0)
1955// v31 = bugbug constant
1956
1957 mtcrf 0x80,r5 ; set up bits for conditional branches
1958 li r4,16 ; load offsets for X-form stores
1959 li r6,48
1960 mtcrf 0x20,r5 ; load CRs ona at a time, which is faster
1961 li r7,64
1962 li r8,80
1963 mtcrf 0x08,r5
1964 li r9,96
1965 li r10,112
1966 mtcrf 0x02,r5
1967 li r5,32
1968
1969 bt 0,vr_ld128a ; skip if this line must be loaded
1970 vor v0,v31,v31 ; no VR must be loaded, so bug them all
1971 vor v1,v31,v31
1972 vor v2,v31,v31
1973 vor v3,v31,v31
1974 vor v4,v31,v31
1975 vor v5,v31,v31
1976 vor v6,v31,v31
1977 vor v7,v31,v31
1978 b vr_ld128b
1979vr_ld128a: ; must load from this line
1980 lvxl v0,0,r11
1981 lvxl v1,r4,r11
1982 lvxl v2,r5,r11
1983 lvxl v3,r6,r11
1984 lvxl v4,r7,r11
1985 lvxl v5,r8,r11
1986 lvxl v6,r9,r11
1987 lvxl v7,r10,r11
1988
1989vr_ld128b: ; here to handle next cache line
1990 la r11,savevr8(r3) ; load offset to it
1991 bt 8,vr_ld128c ; skip if this line must be loaded
1992 vor v8,v31,v31 ; no VR must be loaded, so bug them all
1993 vor v9,v31,v31
1994 vor v10,v31,v31
1995 vor v11,v31,v31
1996 vor v12,v31,v31
1997 vor v13,v31,v31
1998 vor v14,v31,v31
1999 vor v15,v31,v31
2000 b vr_ld128d
2001vr_ld128c: ; must load from this line
2002 lvxl v8,0,r11
2003 lvxl v9,r4,r11
2004 lvxl v10,r5,r11
2005 lvxl v11,r6,r11
2006 lvxl v12,r7,r11
2007 lvxl v13,r8,r11
2008 lvxl v14,r9,r11
2009 lvxl v15,r10,r11
2010
2011vr_ld128d: ; here to handle next cache line
2012 la r11,savevr16(r3) ; load offset to it
2013 bt 16,vr_ld128e ; skip if this line must be loaded
2014 vor v16,v31,v31 ; no VR must be loaded, so bug them all
2015 vor v17,v31,v31
2016 vor v18,v31,v31
2017 vor v19,v31,v31
2018 vor v20,v31,v31
2019 vor v21,v31,v31
2020 vor v22,v31,v31
2021 vor v23,v31,v31
2022 b vr_ld128f
2023vr_ld128e: ; must load from this line
2024 lvxl v16,0,r11
2025 lvxl v17,r4,r11
2026 lvxl v18,r5,r11
2027 lvxl v19,r6,r11
2028 lvxl v20,r7,r11
2029 lvxl v21,r8,r11
2030 lvxl v22,r9,r11
2031 lvxl v23,r10,r11
2032
2033vr_ld128f: ; here to handle next cache line
2034 la r11,savevr24(r3) ; load offset to it
2035 bt 24,vr_ld128g ; skip if this line must be loaded
2036 vor v24,v31,v31 ; no VR must be loaded, so bug them all
2037 vor v25,v31,v31
2038 vor v26,v31,v31
2039 vor v27,v31,v31
2040 vor v28,v31,v31
2041 vor v29,v31,v31
2042 vor v30,v31,v31
2043 blr
2044vr_ld128g: ; must load from this line
2045 lvxl v24,0,r11
2046 lvxl v25,r4,r11
2047 lvxl v26,r5,r11
2048 lvxl v27,r6,r11
2049 lvxl v28,r7,r11
2050 lvxl v29,r8,r11
2051 lvxl v30,r9,r11
2052 lvxl v31,r10,r11
2053 blr
2054
2055// Handle a processor with 32-byte cache lines. Sixteen groups of two VRs.
2056// r5 = 1st bit in each cacheline is 1 iff any reg in that line must be loaded
2057// r11 = addr(savevr0)
2058
2059vr_ld32:
2060 mtcrf 0xFF,r5 ; set up bits for conditional branches
2061 li r4,16 ; load offset for X-form stores
2062
2063 bt 0,vr_ld32load0 ; skip if we must load this line
2064 vor v0,v31,v31 ; neither VR is live, so bug them both
2065 vor v1,v31,v31
2066 b vr_ld32test2
2067vr_ld32load0: ; must load VRs in this line
2068 lvxl v0,0,r11
2069 lvxl v1,r4,r11
2070
2071vr_ld32test2: ; here to handle next cache line
2072 la r11,savevr2(r3) ; get offset to next cache line
2073 bt 2,vr_ld32load2 ; skip if we must load this line
2074 vor v2,v31,v31 ; neither VR is live, so bug them both
2075 vor v3,v31,v31
2076 b vr_ld32test4
2077vr_ld32load2: ; must load VRs in this line
2078 lvxl v2,0,r11
2079 lvxl v3,r4,r11
2080
2081vr_ld32test4: ; here to handle next cache line
2082 la r11,savevr4(r3) ; get offset to next cache line
2083 bt 4,vr_ld32load4 ; skip if we must load this line
2084 vor v4,v31,v31 ; neither VR is live, so bug them both
2085 vor v5,v31,v31
2086 b vr_ld32test6
2087vr_ld32load4: ; must load VRs in this line
2088 lvxl v4,0,r11
2089 lvxl v5,r4,r11
2090
2091vr_ld32test6: ; here to handle next cache line
2092 la r11,savevr6(r3) ; get offset to next cache line
2093 bt 6,vr_ld32load6 ; skip if we must load this line
2094 vor v6,v31,v31 ; neither VR is live, so bug them both
2095 vor v7,v31,v31
2096 b vr_ld32test8
2097vr_ld32load6: ; must load VRs in this line
2098 lvxl v6,0,r11
2099 lvxl v7,r4,r11
2100
2101vr_ld32test8: ; here to handle next cache line
2102 la r11,savevr8(r3) ; get offset to next cache line
2103 bt 8,vr_ld32load8 ; skip if we must load this line
2104 vor v8,v31,v31 ; neither VR is live, so bug them both
2105 vor v9,v31,v31
2106 b vr_ld32test10
2107vr_ld32load8: ; must load VRs in this line
2108 lvxl v8,0,r11
2109 lvxl v9,r4,r11
2110
2111vr_ld32test10: ; here to handle next cache line
2112 la r11,savevr10(r3) ; get offset to next cache line
2113 bt 10,vr_ld32load10 ; skip if we must load this line
2114 vor v10,v31,v31 ; neither VR is live, so bug them both
2115 vor v11,v31,v31
2116 b vr_ld32test12
2117vr_ld32load10: ; must load VRs in this line
2118 lvxl v10,0,r11
2119 lvxl v11,r4,r11
2120
2121vr_ld32test12: ; here to handle next cache line
2122 la r11,savevr12(r3) ; get offset to next cache line
2123 bt 12,vr_ld32load12 ; skip if we must load this line
2124 vor v12,v31,v31 ; neither VR is live, so bug them both
2125 vor v13,v31,v31
2126 b vr_ld32test14
2127vr_ld32load12: ; must load VRs in this line
2128 lvxl v12,0,r11
2129 lvxl v13,r4,r11
2130
2131vr_ld32test14: ; here to handle next cache line
2132 la r11,savevr14(r3) ; get offset to next cache line
2133 bt 14,vr_ld32load14 ; skip if we must load this line
2134 vor v14,v31,v31 ; neither VR is live, so bug them both
2135 vor v15,v31,v31
2136 b vr_ld32test16
2137vr_ld32load14: ; must load VRs in this line
2138 lvxl v14,0,r11
2139 lvxl v15,r4,r11
2140
2141vr_ld32test16: ; here to handle next cache line
2142 la r11,savevr16(r3) ; get offset to next cache line
2143 bt 16,vr_ld32load16 ; skip if we must load this line
2144 vor v16,v31,v31 ; neither VR is live, so bug them both
2145 vor v17,v31,v31
2146 b vr_ld32test18
2147vr_ld32load16: ; must load VRs in this line
2148 lvxl v16,0,r11
2149 lvxl v17,r4,r11
2150
2151vr_ld32test18: ; here to handle next cache line
2152 la r11,savevr18(r3) ; get offset to next cache line
2153 bt 18,vr_ld32load18 ; skip if we must load this line
2154 vor v18,v31,v31 ; neither VR is live, so bug them both
2155 vor v19,v31,v31
2156 b vr_ld32test20
2157vr_ld32load18: ; must load VRs in this line
2158 lvxl v18,0,r11
2159 lvxl v19,r4,r11
2160
2161vr_ld32test20: ; here to handle next cache line
2162 la r11,savevr20(r3) ; get offset to next cache line
2163 bt 20,vr_ld32load20 ; skip if we must load this line
2164 vor v20,v31,v31 ; neither VR is live, so bug them both
2165 vor v21,v31,v31
2166 b vr_ld32test22
2167vr_ld32load20: ; must load VRs in this line
2168 lvxl v20,0,r11
2169 lvxl v21,r4,r11
2170
2171vr_ld32test22: ; here to handle next cache line
2172 la r11,savevr22(r3) ; get offset to next cache line
2173 bt 22,vr_ld32load22 ; skip if we must load this line
2174 vor v22,v31,v31 ; neither VR is live, so bug them both
2175 vor v23,v31,v31
2176 b vr_ld32test24
2177vr_ld32load22: ; must load VRs in this line
2178 lvxl v22,0,r11
2179 lvxl v23,r4,r11
2180
2181vr_ld32test24: ; here to handle next cache line
2182 la r11,savevr24(r3) ; get offset to next cache line
2183 bt 24,vr_ld32load24 ; skip if we must load this line
2184 vor v24,v31,v31 ; neither VR is live, so bug them both
2185 vor v25,v31,v31
2186 b vr_ld32test26
2187vr_ld32load24: ; must load VRs in this line
2188 lvxl v24,0,r11
2189 lvxl v25,r4,r11
2190
2191vr_ld32test26: ; here to handle next cache line
2192 la r11,savevr26(r3) ; get offset to next cache line
2193 bt 26,vr_ld32load26 ; skip if we must load this line
2194 vor v26,v31,v31 ; neither VR is live, so bug them both
2195 vor v27,v31,v31
2196 b vr_ld32test28
2197vr_ld32load26: ; must load VRs in this line
2198 lvxl v26,0,r11
2199 lvxl v27,r4,r11
2200
2201vr_ld32test28: ; here to handle next cache line
2202 la r11,savevr28(r3) ; get offset to next cache line
2203 bt 28,vr_ld32load28 ; skip if we must load this line
2204 vor v28,v31,v31 ; neither VR is live, so bug them both
2205 vor v29,v31,v31
2206 b vr_ld32test30
2207vr_ld32load28: ; must load VRs in this line
2208 lvxl v28,0,r11
2209 lvxl v29,r4,r11
2210
2211vr_ld32test30: ; here to handle next cache line
2212 la r11,savevr30(r3) ; get offset to next cache line
2213 bt 30,vr_ld32load30 ; skip if we must load this line
2214 vor v30,v31,v31 ; neither VR is live, so bug them both
2215 blr
2216vr_ld32load30: ; must load VRs in this line
2217 lvxl v30,0,r11
2218 lvxl v31,r4,r11
2219 blr