]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/cswtch.s
xnu-517.tar.gz
[apple/xnu.git] / osfmk / ppc / cswtch.s
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
43866e37 6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
1c79356b 7 *
43866e37
A
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
43866e37
A
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
1c79356b
A
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25/*
26 * @OSF_COPYRIGHT@
27 */
28
29#include <ppc/asm.h>
30#include <ppc/proc_reg.h>
31#include <cpus.h>
32#include <assym.s>
33#include <debug.h>
34#include <mach/ppc/vm_param.h>
35#include <ppc/exception.h>
9bccf70c 36#include <ppc/savearea.h>
1c79356b
A
37
38#define FPVECDBG 0
39#define GDDBG 0
40
41 .text
42
43/*
55e303ae 44 * void machine_load_context(thread_t thread)
1c79356b 45 *
55e303ae
A
46 * Load the context for the first thread to run on a
47 * cpu, and go.
1c79356b
A
48 */
49
9bccf70c 50 .align 5
55e303ae 51 .globl EXT(machine_load_context)
1c79356b 52
55e303ae 53LEXT(machine_load_context)
1c79356b
A
54 mfsprg r6,0
55 lwz r0,PP_INTSTACK_TOP_SS(r6)
1c79356b 56 stw r0,PP_ISTACKPTR(r6)
55e303ae
A
57 lwz r9,THREAD_TOP_ACT(r3) /* Set up the current thread */
58 mtsprg 1,r9
1c79356b 59 li r0,0 /* Clear a register */
55e303ae 60 lwz r3,ACT_MACT_PCB(r9) /* Get the savearea used */
1c79356b 61 mfmsr r5 /* Since we are passing control, get our MSR values */
55e303ae
A
62 lwz r11,SAVprev+4(r3) /* Get the previous savearea */
63 lwz r1,saver1+4(r3) /* Load new stack pointer */
64 stw r0,saver3+4(r3) /* Make sure we pass in a 0 for the continuation */
1c79356b 65 stw r0,FM_BACKPTR(r1) /* zero backptr */
55e303ae 66 stw r5,savesrr1+4(r3) /* Pass our MSR to the new guy */
1c79356b 67 stw r11,ACT_MACT_PCB(r9) /* Unstack our savearea */
55e303ae
A
68 stw r0,ACT_PREEMPT_CNT(r9) /* Enable preemption */
69 b EXT(exception_exit) /* Go for it */
1c79356b 70
55e303ae
A
71/* thread_t Switch_context(thread_t old,
72 * void (*cont)(void),
73 * thread_t new)
1c79356b
A
74 *
75 * Switch from one thread to another. If a continuation is supplied, then
76 * we do not need to save callee save registers.
77 *
78 */
79
80/* void Call_continuation( void (*continuation)(void), vm_offset_t stack_ptr)
81 */
82
9bccf70c
A
83 .align 5
84 .globl EXT(Call_continuation)
85
86LEXT(Call_continuation)
87
88 mtlr r3
89 mr r1, r4 /* Load new stack pointer */
90 blr /* Jump to the continuation */
1c79356b
A
91
92/*
93 * Get the old kernel stack, and store into the thread structure.
94 * See if a continuation is supplied, and skip state save if so.
55e303ae
A
95 *
96 * Note that interrupts must be disabled before we get here (i.e., splsched)
1c79356b
A
97 */
98
99/* Context switches are double jumps. We pass the following to the
100 * context switch firmware call:
101 *
55e303ae 102 * R3 = switchee's savearea, virtual if continuation, low order physical for full switch
1c79356b
A
103 * R4 = old thread
104 * R5 = new SRR0
105 * R6 = new SRR1
55e303ae 106 * R7 = high order physical address of savearea for full switch
1c79356b
A
107 *
108 * savesrr0 is set to go to switch_in
109 * savesrr1 is set to uninterruptible with translation on
110 */
111
112
9bccf70c
A
113 .align 5
114 .globl EXT(Switch_context)
1c79356b 115
9bccf70c
A
116LEXT(Switch_context)
117
118 mfsprg r12,0 ; Get the per_proc block
1c79356b 119#if DEBUG
55e303ae
A
120 lwz r0,PP_ISTACKPTR(r12) ; (DEBUG/TRACE) make sure we are not
121 mr. r0,r0 ; (DEBUG/TRACE) on the interrupt
122 bne++ notonintstack ; (DEBUG/TRACE) stack
1c79356b
A
123 BREAKPOINT_TRAP
124notonintstack:
125#endif
de355530 126 lwz r5,THREAD_TOP_ACT(r5) ; Get the new activation
de355530 127 lwz r8,ACT_MACT_PCB(r5) ; Get the PCB for the new guy
55e303ae
A
128 lwz r9,cioSpace(r5) ; Get copyin/out address space
129 cmpwi cr1,r4,0 ; Remeber if there is a continuation - used waaaay down below
130 lwz r7,CTHREAD_SELF(r5) ; Pick up the user assist word
131 lwz r11,ACT_MACT_BTE(r5) ; Get BlueBox Task Environment
132 lwz r6,cioRelo(r5) ; Get copyin/out relocation top
133 mtsprg 1,r5
134 lwz r2,cioRelo+4(r5) ; Get copyin/out relocation bottom
de355530 135
9bccf70c 136 stw r7,UAW(r12) ; Save the assist word for the "ultra fast path"
55e303ae 137
1c79356b
A
138 lwz r7,ACT_MACT_SPF(r5) ; Get the special flags
139
55e303ae
A
140 sth r9,ppCIOmp+mpSpace(r12) ; Save the space
141 stw r6,ppCIOmp+mpNestReloc(r12) ; Save top part of physical address
142 stw r2,ppCIOmp+mpNestReloc+4(r12) ; Save bottom part of physical address
9bccf70c 143 stw r11,ppbbTaskEnv(r12) ; Save the bb task env
55e303ae 144 lwz r2,traceMask(0) ; Get the enabled traces
9bccf70c 145 stw r7,spcFlags(r12) ; Set per_proc copy of the special flags
1c79356b 146 lis r0,hi16(CutTrace) ; Trace FW call
55e303ae
A
147 mr. r2,r2 ; Any tracing going on?
148 lwz r11,SAVprev+4(r8) ; Get the previous of the switchee savearea
1c79356b 149 ori r0,r0,lo16(CutTrace) ; Trace FW call
55e303ae 150 beq++ cswNoTrc ; No trace today, dude...
1c79356b
A
151 mr r10,r3 ; Save across trace
152 lwz r2,THREAD_TOP_ACT(r3) ; Trace old activation
153 mr r3,r11 ; Trace prev savearea
154 sc ; Cut trace entry of context switch
155 mr r3,r10 ; Restore
156
55e303ae 157cswNoTrc: lwz r2,curctx(r5) ; Grab our current context pointer
9bccf70c
A
158 lwz r10,FPUowner(r12) ; Grab the owner of the FPU
159 lwz r9,VMXowner(r12) ; Grab the owner of the vector
55e303ae
A
160 lhz r0,PP_CPU_NUMBER(r12) ; Get our CPU number
161 mfmsr r6 ; Get the MSR because the switched to thread should inherit it
162 stw r11,ACT_MACT_PCB(r5) ; Dequeue the savearea we are switching to
163 li r0,1 ; Get set to hold off quickfret
164
165 rlwinm r6,r6,0,MSR_FP_BIT+1,MSR_FP_BIT-1 ; Turn off the FP
9bccf70c
A
166 cmplw r10,r2 ; Do we have the live float context?
167 lwz r10,FPUlevel(r2) ; Get the live level
55e303ae 168 mr r4,r3 ; Save our old thread to pass back
9bccf70c 169 cmplw cr5,r9,r2 ; Do we have the live vector context?
55e303ae
A
170 rlwinm r6,r6,0,MSR_VEC_BIT+1,MSR_VEC_BIT-1 ; Turn off the vector
171 stw r0,holdQFret(r12) ; Make sure we hold off releasing quickfret
172 bne++ cswnofloat ; Float is not ours...
9bccf70c
A
173
174 cmplw r10,r11 ; Is the level the same?
175 lwz r5,FPUcpu(r2) ; Get the owning cpu
55e303ae 176 bne++ cswnofloat ; Level not the same, this is not live...
9bccf70c
A
177
178 cmplw r5,r0 ; Still owned by this cpu?
179 lwz r10,FPUsave(r2) ; Get the level
55e303ae 180 bne++ cswnofloat ; CPU claimed by someone else...
9bccf70c
A
181
182 mr. r10,r10 ; Is there a savearea here?
183 ori r6,r6,lo16(MASK(MSR_FP)) ; Enable floating point
184
55e303ae 185 beq-- cswnofloat ; No savearea to check...
9bccf70c
A
186
187 lwz r3,SAVlevel(r10) ; Get the level
55e303ae 188 lwz r5,SAVprev+4(r10) ; Get the previous of this savearea
9bccf70c
A
189 cmplw r3,r11 ; Is it for the current level?
190
55e303ae 191 bne++ cswnofloat ; Nope...
9bccf70c
A
192
193 stw r5,FPUsave(r2) ; Pop off this savearea
55e303ae
A
194
195 rlwinm r3,r10,0,0,19 ; Move back to start of page
196
197 lwz r5,quickfret(r12) ; Get the first in quickfret list (top)
198 lwz r9,quickfret+4(r12) ; Get the first in quickfret list (bottom)
199 lwz r7,SACvrswap(r3) ; Get the virtual to real conversion (top)
200 lwz r3,SACvrswap+4(r3) ; Get the virtual to real conversion (bottom)
201 stw r5,SAVprev(r10) ; Link the old in (top)
202 stw r9,SAVprev+4(r10) ; Link the old in (bottom)
203 xor r3,r10,r3 ; Convert to physical
204 stw r7,quickfret(r12) ; Set the first in quickfret list (top)
205 stw r3,quickfret+4(r12) ; Set the first in quickfret list (bottom)
9bccf70c
A
206
207#if FPVECDBG
208 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
55e303ae 209 mr r7,r2 ; (TEST/DEBUG)
9bccf70c
A
210 li r2,0x4401 ; (TEST/DEBUG)
211 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
212 sc ; (TEST/DEBUG)
213 lhz r0,PP_CPU_NUMBER(r12) ; (TEST/DEBUG)
55e303ae 214 mr r2,r7 ; (TEST/DEBUG)
9bccf70c
A
215#endif
216
55e303ae 217cswnofloat: bne++ cr5,cswnovect ; Vector is not ours...
9bccf70c
A
218
219 lwz r10,VMXlevel(r2) ; Get the live level
220
221 cmplw r10,r11 ; Is the level the same?
222 lwz r5,VMXcpu(r2) ; Get the owning cpu
55e303ae 223 bne++ cswnovect ; Level not the same, this is not live...
9bccf70c
A
224
225 cmplw r5,r0 ; Still owned by this cpu?
226 lwz r10,VMXsave(r2) ; Get the level
55e303ae 227 bne++ cswnovect ; CPU claimed by someone else...
9bccf70c
A
228
229 mr. r10,r10 ; Is there a savearea here?
230 oris r6,r6,hi16(MASK(MSR_VEC)) ; Enable vector
231
55e303ae 232 beq-- cswnovect ; No savearea to check...
9bccf70c
A
233
234 lwz r3,SAVlevel(r10) ; Get the level
55e303ae 235 lwz r5,SAVprev+4(r10) ; Get the previous of this savearea
9bccf70c
A
236 cmplw r3,r11 ; Is it for the current level?
237
55e303ae 238 bne++ cswnovect ; Nope...
9bccf70c
A
239
240 stw r5,VMXsave(r2) ; Pop off this savearea
55e303ae
A
241 rlwinm r3,r10,0,0,19 ; Move back to start of page
242
243 lwz r5,quickfret(r12) ; Get the first in quickfret list (top)
244 lwz r9,quickfret+4(r12) ; Get the first in quickfret list (bottom)
245 lwz r2,SACvrswap(r3) ; Get the virtual to real conversion (top)
246 lwz r3,SACvrswap+4(r3) ; Get the virtual to real conversion (bottom)
247 stw r5,SAVprev(r10) ; Link the old in (top)
248 stw r9,SAVprev+4(r10) ; Link the old in (bottom)
249 xor r3,r10,r3 ; Convert to physical
250 stw r2,quickfret(r12) ; Set the first in quickfret list (top)
251 stw r3,quickfret+4(r12) ; Set the first in quickfret list (bottom)
9bccf70c
A
252
253#if FPVECDBG
254 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
255 li r2,0x4501 ; (TEST/DEBUG)
256 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
257 sc ; (TEST/DEBUG)
258#endif
259
55e303ae
A
260cswnovect: li r0,0 ; Get set to release quickfret holdoff
261 rlwinm r11,r8,0,0,19 ; Switch to savearea base
262 lis r9,hi16(EXT(switch_in)) ; Get top of switch in routine
263 lwz r5,savesrr0+4(r8) ; Set up the new SRR0
264 lwz r7,SACvrswap(r11) ; Get the high order V to R translation
265 lwz r11,SACvrswap+4(r11) ; Get the low order V to R translation
266 ori r9,r9,lo16(EXT(switch_in)) ; Bottom half of switch in
267 stw r0,holdQFret(r12) ; Make sure we release quickfret holdoff
268 stw r9,savesrr0+4(r8) ; Make us jump to the switch in routine
9bccf70c 269
55e303ae 270 lwz r9,SAVflags(r8) /* Get the flags */
9bccf70c 271 lis r0,hi16(SwitchContextCall) /* Top part of switch context */
1c79356b 272 li r10,MSR_SUPERVISOR_INT_OFF /* Get the switcher's MSR */
de355530 273 ori r0,r0,lo16(SwitchContextCall) /* Bottom part of switch context */
55e303ae
A
274 stw r10,savesrr1+4(r8) /* Set up for switch in */
275 rlwinm r9,r9,0,15,13 /* Reset the syscall flag */
276 xor r3,r11,r8 /* Get the physical address of the new context save area */
1c79356b 277 stw r9,SAVflags(r8) /* Set the flags */
9bccf70c
A
278
279 bne cr1,swtchtocont ; Switch to the continuation
1c79356b
A
280 sc /* Switch to the new context */
281
282/* We come back here in the new thread context
283 * R4 was set to hold the old thread pointer, but switch_in will put it into
284 * R3 where it belongs.
285 */
286 blr /* Jump into the new thread */
9bccf70c
A
287
288;
289; This is where we go when a continuation is set. We are actually
290; killing off the old context of the new guy so we need to pop off
291; any float or vector states for the ditched level.
292;
293; Note that we do the same kind of thing a chkfac in hw_exceptions.s
294;
295
1c79356b 296
9bccf70c 297swtchtocont:
55e303ae
A
298
299 stw r5,savesrr0+4(r8) ; Set the pc
300 stw r6,savesrr1+4(r8) ; Set the next MSR to use
301 stw r4,saver3+4(r8) ; Make sure we pass back the old thread
302 mr r3,r8 ; Pass in the virtual address of savearea
9bccf70c
A
303
304 b EXT(exception_exit) ; Blocking on continuation, toss old context...
1c79356b
A
305
306
307
308/*
309 * All switched to threads come here first to clean up the old thread.
310 * We need to do the following contortions because we need to keep
311 * the LR clean. And because we need to manipulate the savearea chain
312 * with translation on. If we could, this should be done in lowmem_vectors
313 * before translation is turned on. But we can't, dang it!
314 *
55e303ae 315 * R3 = switcher's savearea (32-bit virtual)
1c79356b
A
316 * saver4 = old thread in switcher's save
317 * saver5 = new SRR0 in switcher's save
318 * saver6 = new SRR1 in switcher's save
319
320
321 */
322
9bccf70c
A
323
324 .align 5
325 .globl EXT(switch_in)
326
327LEXT(switch_in)
1c79356b 328
55e303ae
A
329 lwz r4,saver4+4(r3) ; Get the old thread
330 lwz r5,saver5+4(r3) ; Get the srr0 value
331
332 mfsprg r0,2 ; Get feature flags
333 lwz r9,THREAD_TOP_ACT(r4) ; Get the switched from ACT
334 lwz r6,saver6+4(r3) ; Get the srr1 value
335 rlwinm. r0,r0,0,pf64Bitb,pf64Bitb ; Check for 64-bit
336 lwz r10,ACT_MACT_PCB(r9) ; Get the top PCB on the old thread
1c79356b 337
55e303ae
A
338 stw r3,ACT_MACT_PCB(r9) ; Put the new one on top
339 stw r10,SAVprev+4(r3) ; Chain on the old one
de355530 340
55e303ae 341 mr r3,r4 ; Pass back the old thread
1c79356b 342
55e303ae
A
343 mtsrr0 r5 ; Set return point
344 mtsrr1 r6 ; Set return MSR
345
346 bne++ siSixtyFour ; Go do 64-bit...
1c79356b 347
55e303ae
A
348 rfi ; Jam...
349
350siSixtyFour:
351 rfid ; Jam...
1c79356b
A
352
353/*
9bccf70c 354 * void fpu_save(facility_context ctx)
1c79356b 355 *
9bccf70c
A
356 * Note that there are some oddities here when we save a context we are using.
357 * It is really not too cool to do this, but what the hey... Anyway,
358 * we turn fpus and vecs off before we leave., The oddity is that if you use fpus after this, the
359 * savearea containing the context just saved will go away. So, bottom line is
360 * that don't use fpus until after you are done with the saved context.
1c79356b 361 */
9bccf70c
A
362 .align 5
363 .globl EXT(fpu_save)
1c79356b 364
9bccf70c
A
365LEXT(fpu_save)
366
55e303ae
A
367 lis r2,hi16(MASK(MSR_VEC)) ; Get the vector enable
368 li r12,lo16(MASK(MSR_EE)) ; Get the EE bit
369 ori r2,r2,lo16(MASK(MSR_FP)) ; Get FP
9bccf70c
A
370
371 mfmsr r0 ; Get the MSR
55e303ae
A
372 andc r0,r0,r2 ; Clear FP, VEC
373 andc r2,r0,r12 ; Clear EE
9bccf70c 374 ori r2,r2,MASK(MSR_FP) ; Enable the floating point feature for now also
9bccf70c 375 mtmsr r2 ; Set the MSR
1c79356b 376 isync
9bccf70c
A
377
378 mfsprg r6,0 ; Get the per_processor block
379 lwz r12,FPUowner(r6) ; Get the context ID for owner
380
1c79356b 381#if FPVECDBG
9bccf70c
A
382 mr r7,r0 ; (TEST/DEBUG)
383 li r4,0 ; (TEST/DEBUG)
384 mr r10,r3 ; (TEST/DEBUG)
385 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
386 mr. r3,r12 ; (TEST/DEBUG)
387 li r2,0x6F00 ; (TEST/DEBUG)
388 li r5,0 ; (TEST/DEBUG)
55e303ae 389 beq-- noowneryet ; (TEST/DEBUG)
9bccf70c
A
390 lwz r4,FPUlevel(r12) ; (TEST/DEBUG)
391 lwz r5,FPUsave(r12) ; (TEST/DEBUG)
392
393noowneryet: oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
394 sc ; (TEST/DEBUG)
395 mr r0,r7 ; (TEST/DEBUG)
396 mr r3,r10 ; (TEST/DEBUG)
1c79356b 397#endif
9bccf70c
A
398 mflr r2 ; Save the return address
399
400fsretry: mr. r12,r12 ; Anyone own the FPU?
401 lhz r11,PP_CPU_NUMBER(r6) ; Get our CPU number
55e303ae 402 beq-- fsret ; Nobody owns the FPU, no save required...
1c79356b 403
9bccf70c 404 cmplw cr1,r3,r12 ; Is the specified context live?
1c79356b 405
9bccf70c 406 isync ; Force owner check first
1c79356b 407
9bccf70c 408 lwz r9,FPUcpu(r12) ; Get the cpu that context was last on
55e303ae 409 bne-- cr1,fsret ; No, it is not...
1c79356b 410
9bccf70c 411 cmplw cr1,r9,r11 ; Was the context for this processor?
55e303ae 412 beq-- cr1,fsgoodcpu ; Facility last used on this processor...
0b4e3aa0 413
9bccf70c 414 b fsret ; Someone else claimed it...
1c79356b 415
9bccf70c 416 .align 5
1c79356b 417
9bccf70c
A
418fsgoodcpu: lwz r3,FPUsave(r12) ; Get the current FPU savearea for the thread
419 lwz r9,FPUlevel(r12) ; Get our current level indicator
1c79356b 420
9bccf70c
A
421 cmplwi cr1,r3,0 ; Have we ever saved this facility context?
422 beq- cr1,fsneedone ; Never saved it, so go do it...
1c79356b 423
9bccf70c
A
424 lwz r8,SAVlevel(r3) ; Get the level this savearea is for
425 cmplw cr1,r9,r8 ; Correct level?
55e303ae 426 beq-- cr1,fsret ; The current level is already saved, bail out...
1c79356b 427
9bccf70c
A
428fsneedone: bl EXT(save_get) ; Get a savearea for the context
429
430 mfsprg r6,0 ; Get back per_processor block
431 li r4,SAVfloat ; Get floating point tag
432 lwz r12,FPUowner(r6) ; Get back our thread
433 stb r4,SAVflags+2(r3) ; Mark this savearea as a float
434 mr. r12,r12 ; See if we were disowned while away. Very, very small chance of it...
55e303ae 435 beq-- fsbackout ; If disowned, just toss savearea...
9bccf70c 436 lwz r4,facAct(r12) ; Get the activation associated with live context
9bccf70c
A
437 lwz r8,FPUsave(r12) ; Get the current top floating point savearea
438 stw r4,SAVact(r3) ; Indicate the right activation for this context
439 lwz r9,FPUlevel(r12) ; Get our current level indicator again
440 stw r3,FPUsave(r12) ; Set this as the most current floating point context
55e303ae 441 stw r8,SAVprev+4(r3) ; And then chain this in front
1c79356b 442
9bccf70c 443 stw r9,SAVlevel(r3) ; Show level in savearea
1c79356b 444
55e303ae
A
445 bl fp_store ; save all 32 FPRs in the save area at r3
446 mtlr r2 ; Restore return
447
9bccf70c 448fsret: mtmsr r0 ; Put interrupts on if they were and floating point off
1c79356b
A
449 isync
450
451 blr
452
55e303ae
A
453fsbackout: mr r4,r0 ; restore the original MSR
454 b EXT(save_ret_wMSR) ; Toss savearea and return from there...
9bccf70c 455
1c79356b
A
456/*
457 * fpu_switch()
458 *
459 * Entered to handle the floating-point unavailable exception and
460 * switch fpu context
461 *
462 * This code is run in virtual address mode on with interrupts off.
463 *
464 * Upon exit, the code returns to the users context with the floating
465 * point facility turned on.
466 *
467 * ENTRY: VM switched ON
468 * Interrupts OFF
469 * State is saved in savearea pointed to by R4.
470 * All other registers are free.
471 *
472 */
473
9bccf70c
A
474 .align 5
475 .globl EXT(fpu_switch)
476
477LEXT(fpu_switch)
478
1c79356b 479#if DEBUG
1c79356b
A
480 lis r3,hi16(EXT(fpu_trap_count)) ; Get address of FP trap counter
481 ori r3,r3,lo16(EXT(fpu_trap_count)) ; Get address of FP trap counter
482 lwz r1,0(r3)
483 addi r1,r1,1
484 stw r1,0(r3)
1c79356b
A
485#endif /* DEBUG */
486
9bccf70c 487 mfsprg r26,0 ; Get the per_processor block
55e303ae
A
488 mfmsr r19 ; Get the current MSR
489 mfsprg r17,1 ; Get the current thread
1c79356b 490
9bccf70c
A
491 mr r25,r4 ; Save the entry savearea
492 lwz r22,FPUowner(r26) ; Get the thread that owns the FPU
9bccf70c 493 ori r19,r19,lo16(MASK(MSR_FP)) ; Enable the floating point feature
1c79356b 494
9bccf70c 495 mtmsr r19 ; Enable floating point instructions
1c79356b 496 isync
1c79356b 497
9bccf70c
A
498 lwz r27,ACT_MACT_PCB(r17) ; Get the current level
499 lwz r29,curctx(r17) ; Grab the current context anchor of the current thread
0b4e3aa0 500
9bccf70c
A
501; R22 has the "old" context anchor
502; R29 has the "new" context anchor
0b4e3aa0 503
1c79356b 504#if FPVECDBG
9bccf70c
A
505 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
506 li r2,0x7F01 ; (TEST/DEBUG)
507 mr r3,r22 ; (TEST/DEBUG)
508 mr r5,r29 ; (TEST/DEBUG)
509 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
510 sc ; (TEST/DEBUG)
1c79356b 511#endif
9bccf70c
A
512
513 lhz r16,PP_CPU_NUMBER(r26) ; Get the current CPU number
1c79356b 514
9bccf70c 515fswretry: mr. r22,r22 ; See if there is any live FP status
1c79356b 516
9bccf70c 517 beq- fsnosave ; No live context, so nothing to save...
1c79356b 518
9bccf70c 519 isync ; Make sure we see this in the right order
1c79356b 520
9bccf70c
A
521 lwz r30,FPUsave(r22) ; Get the top savearea
522 cmplw cr2,r22,r29 ; Are both old and new the same context?
523 lwz r18,FPUcpu(r22) ; Get the last CPU we ran on
524 cmplwi cr1,r30,0 ; Anything saved yet?
525 cmplw r18,r16 ; Make sure we are on the right processor
526 lwz r31,FPUlevel(r22) ; Get the context level
1c79356b 527
9bccf70c
A
528 bne- fsnosave ; No, not on the same processor...
529
1c79356b 530;
9bccf70c
A
531; Check to see if the live context has already been saved.
532; Also check to see if all we are here just to re-enable the MSR
533; and handle specially if so.
1c79356b 534;
9bccf70c
A
535
536 cmplw r31,r27 ; See if the current and active levels are the same
537 crand cr0_eq,cr2_eq,cr0_eq ; Remember if both the levels and contexts are the same
538 li r3,0 ; Clear this
1c79356b 539
9bccf70c
A
540 beq- fsthesame ; New and old are the same, just go enable...
541
542 beq- cr1,fsmstsave ; Not saved yet, go do it...
1c79356b 543
9bccf70c 544 lwz r11,SAVlevel(r30) ; Get the level of top saved context
1c79356b 545
9bccf70c
A
546 cmplw r31,r11 ; Are live and saved the same?
547
1c79356b 548#if FPVECDBG
9bccf70c
A
549 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
550 li r2,0x7F02 ; (TEST/DEBUG)
551 mr r3,r30 ; (TEST/DEBUG)
552 mr r5,r31 ; (TEST/DEBUG)
553 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
554 sc ; (TEST/DEBUG)
55e303ae 555 li r3,0 ; (TEST/DEBUG)
1c79356b 556#endif
9bccf70c
A
557
558 beq+ fsnosave ; Same level, so already saved...
559
1c79356b 560
9bccf70c
A
561fsmstsave: stw r3,FPUowner(r26) ; Kill the context now
562 eieio ; Make sure everyone sees it
563 bl EXT(save_get) ; Go get a savearea
564
55e303ae
A
565 mr. r31,r31 ; Are we saving the user state?
566 la r15,FPUsync(r22) ; Point to the sync word
567 beq++ fswusave ; Yeah, no need for lock...
568;
569; Here we make sure that the live context is not tossed while we are
570; trying to push it. This can happen only for kernel context and
571; then only by a race with act_machine_sv_free.
572;
573; We only need to hold this for a very short time, so no sniffing needed.
574; If we find any change to the level, we just abandon.
575;
576fswsync: lwarx r19,0,r15 ; Get the sync word
577 li r0,1 ; Get the lock
578 cmplwi cr1,r19,0 ; Is it unlocked?
579 stwcx. r0,0,r15 ; Store lock and test reservation
580 cror cr0_eq,cr1_eq,cr0_eq ; Combine lost reservation and previously locked
581 bne-- fswsync ; Try again if lost reservation or locked...
582
583 isync ; Toss speculation
584
585 lwz r0,FPUlevel(r22) ; Pick up the level again
586 li r7,0 ; Get unlock value
587 cmplw r0,r31 ; Same level?
588 beq++ fswusave ; Yeah, we expect it to be...
589
590 stw r7,FPUsync(r22) ; Unlock lock. No need to sync here
591
592 bl EXT(save_ret) ; Toss save area because we are abandoning save
593 b fsnosave ; Skip the save...
de355530 594
55e303ae
A
595 .align 5
596
597fswusave: lwz r12,facAct(r22) ; Get the activation associated with the context
598 stw r3,FPUsave(r22) ; Set this as the latest context savearea for the thread
599 mr. r31,r31 ; Check again if we were user level
600 stw r30,SAVprev+4(r3) ; Point us to the old context
9bccf70c
A
601 stw r31,SAVlevel(r3) ; Tag our level
602 li r7,SAVfloat ; Get the floating point ID
603 stw r12,SAVact(r3) ; Make sure we point to the right guy
604 stb r7,SAVflags+2(r3) ; Set that we have a floating point save area
1c79356b 605
55e303ae
A
606 li r7,0 ; Get the unlock value
607
608 beq-- fswnulock ; Skip unlock if user (we did not lock it)...
609 eieio ; Make sure that these updates make it out
610 stw r7,FPUsync(r22) ; Unlock it.
611
612fswnulock:
613
1c79356b 614#if FPVECDBG
9bccf70c
A
615 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
616 li r2,0x7F03 ; (TEST/DEBUG)
617 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
618 sc ; (TEST/DEBUG)
1c79356b
A
619#endif
620
55e303ae 621 bl fp_store ; store all 32 FPRs
1c79356b 622
1c79356b
A
623;
624; The context is all saved now and the facility is free.
625;
9bccf70c 626; If we do not we need to fill the registers with junk, because this level has
1c79356b
A
627; never used them before and some thieving bastard could hack the old values
628; of some thread! Just imagine what would happen if they could! Why, nothing
629; would be safe! My God! It is terrifying!
630;
631
0b4e3aa0 632
9bccf70c
A
633fsnosave: lwz r15,ACT_MACT_PCB(r17) ; Get the current level of the "new" one
634 lwz r19,FPUcpu(r29) ; Get the last CPU we ran on
635 lwz r14,FPUsave(r29) ; Point to the top of the "new" context stack
0b4e3aa0 636
9bccf70c
A
637 stw r16,FPUcpu(r29) ; Claim context for us
638 eieio
0b4e3aa0 639
1c79356b 640#if FPVECDBG
9bccf70c
A
641 lwz r13,FPUlevel(r29) ; (TEST/DEBUG)
642 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
643 li r2,0x7F04 ; (TEST/DEBUG)
644 mr r1,r15 ; (TEST/DEBUG)
645 mr r3,r14 ; (TEST/DEBUG)
646 mr r5,r13 ; (TEST/DEBUG)
647 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
648 sc ; (TEST/DEBUG)
1c79356b 649#endif
9bccf70c
A
650
651 lis r18,hi16(EXT(per_proc_info)) ; Set base per_proc
652 mulli r19,r19,ppSize ; Find offset to the owner per_proc
653 ori r18,r18,lo16(EXT(per_proc_info)) ; Set base per_proc
654 li r16,FPUowner ; Displacement to float owner
655 add r19,r18,r19 ; Point to the owner per_proc
9bccf70c
A
656
657fsinvothr: lwarx r18,r16,r19 ; Get the owner
55e303ae
A
658 sub r0,r18,r29 ; Subtract one from the other
659 sub r11,r29,r18 ; Subtract the other from the one
660 or r11,r11,r0 ; Combine them
661 srawi r11,r11,31 ; Get a 0 if equal or -1 of not
662 and r18,r18,r11 ; Make 0 if same, unchanged if not
663 stwcx. r18,r16,r19 ; Try to invalidate it
664 bne-- fsinvothr ; Try again if there was a collision...
9bccf70c 665
55e303ae 666 cmplwi cr1,r14,0 ; Do we possibly have some context to load?
9bccf70c
A
667 la r11,savefp0(r14) ; Point to first line to bring in
668 stw r15,FPUlevel(r29) ; Set the "new" active level
669 eieio
670 stw r29,FPUowner(r26) ; Mark us as having the live context
1c79356b 671
55e303ae 672 beq++ cr1,MakeSureThatNoTerroristsCanHurtUsByGod ; No "new" context to load...
9bccf70c
A
673
674 dcbt 0,r11 ; Touch line in
675
55e303ae 676 lwz r3,SAVprev+4(r14) ; Get the previous context
9bccf70c
A
677 lwz r0,SAVlevel(r14) ; Get the level of first facility savearea
678 cmplw r0,r15 ; Top level correct to load?
55e303ae 679 bne-- MakeSureThatNoTerroristsCanHurtUsByGod ; No, go initialize...
9bccf70c
A
680
681 stw r3,FPUsave(r29) ; Pop the context (we will toss the savearea later)
1c79356b
A
682
683#if FPVECDBG
9bccf70c
A
684 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
685 li r2,0x7F05 ; (TEST/DEBUG)
686 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
687 sc ; (TEST/DEBUG)
1c79356b
A
688#endif
689
55e303ae
A
690// Note this code is used both by 32- and 128-byte processors. This means six extra DCBTs
691// are executed on a 128-byte machine, but that is better than a mispredicted branch.
692
9bccf70c
A
693 la r11,savefp4(r14) ; Point to next line
694 dcbt 0,r11 ; Touch line in
1c79356b
A
695 lfd f0, savefp0(r14)
696 lfd f1,savefp1(r14)
1c79356b 697 lfd f2,savefp2(r14)
9bccf70c 698 la r11,savefp8(r14) ; Point to next line
1c79356b 699 lfd f3,savefp3(r14)
9bccf70c 700 dcbt 0,r11 ; Touch line in
1c79356b
A
701 lfd f4,savefp4(r14)
702 lfd f5,savefp5(r14)
703 lfd f6,savefp6(r14)
9bccf70c 704 la r11,savefp12(r14) ; Point to next line
1c79356b 705 lfd f7,savefp7(r14)
9bccf70c 706 dcbt 0,r11 ; Touch line in
1c79356b
A
707 lfd f8,savefp8(r14)
708 lfd f9,savefp9(r14)
709 lfd f10,savefp10(r14)
9bccf70c 710 la r11,savefp16(r14) ; Point to next line
1c79356b 711 lfd f11,savefp11(r14)
9bccf70c 712 dcbt 0,r11 ; Touch line in
1c79356b
A
713 lfd f12,savefp12(r14)
714 lfd f13,savefp13(r14)
715 lfd f14,savefp14(r14)
9bccf70c 716 la r11,savefp20(r14) ; Point to next line
1c79356b 717 lfd f15,savefp15(r14)
9bccf70c 718 dcbt 0,r11 ; Touch line in
1c79356b
A
719 lfd f16,savefp16(r14)
720 lfd f17,savefp17(r14)
721 lfd f18,savefp18(r14)
9bccf70c 722 la r11,savefp24(r14) ; Point to next line
1c79356b 723 lfd f19,savefp19(r14)
9bccf70c 724 dcbt 0,r11 ; Touch line in
1c79356b
A
725 lfd f20,savefp20(r14)
726 lfd f21,savefp21(r14)
9bccf70c 727 la r11,savefp28(r14) ; Point to next line
1c79356b
A
728 lfd f22,savefp22(r14)
729 lfd f23,savefp23(r14)
9bccf70c 730 dcbt 0,r11 ; Touch line in
1c79356b
A
731 lfd f24,savefp24(r14)
732 lfd f25,savefp25(r14)
733 lfd f26,savefp26(r14)
734 lfd f27,savefp27(r14)
735 lfd f28,savefp28(r14)
736 lfd f29,savefp29(r14)
737 lfd f30,savefp30(r14)
738 lfd f31,savefp31(r14)
739
9bccf70c
A
740 mr r3,r14 ; Get the old savearea (we popped it before)
741 bl EXT(save_ret) ; Toss it
742
55e303ae 743fsenable: lwz r8,savesrr1+4(r25) ; Get the msr of the interrupted guy
9bccf70c 744 ori r8,r8,MASK(MSR_FP) ; Enable the floating point feature
d7e50217
A
745 lwz r10,ACT_MACT_SPF(r17) ; Get the act special flags
746 lwz r11,spcFlags(r26) ; Get per_proc spec flags cause not in sync with act
1c79356b 747 oris r10,r10,hi16(floatUsed|floatCng) ; Set that we used floating point
d7e50217 748 oris r11,r11,hi16(floatUsed|floatCng) ; Set that we used floating point
9bccf70c 749 rlwinm. r0,r8,0,MSR_PR_BIT,MSR_PR_BIT ; See if we are doing this for user state
55e303ae
A
750 stw r8,savesrr1+4(r25) ; Set the msr of the interrupted guy
751 mr r3,r25 ; Pass the virtual addres of savearea
b4c24cb9 752 beq- fsnuser ; We are not user state...
9bccf70c 753 stw r10,ACT_MACT_SPF(r17) ; Set the activation copy
d7e50217 754 stw r11,spcFlags(r26) ; Set per_proc copy
1c79356b
A
755
756fsnuser:
757#if FPVECDBG
9bccf70c
A
758 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
759 li r2,0x7F07 ; (TEST/DEBUG)
760 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
761 sc ; (TEST/DEBUG)
1c79356b 762#endif
1c79356b 763
9bccf70c 764 b EXT(exception_exit) ; Exit to the fray...
1c79356b
A
765
766/*
767 * Initialize the registers to some bogus value
768 */
769
770MakeSureThatNoTerroristsCanHurtUsByGod:
0b4e3aa0 771
1c79356b 772#if FPVECDBG
9bccf70c
A
773 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
774 li r2,0x7F06 ; (TEST/DEBUG)
775 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
776 sc ; (TEST/DEBUG)
1c79356b 777#endif
9bccf70c
A
778 lis r5,hi16(EXT(FloatInit)) ; Get top secret floating point init value address
779 ori r5,r5,lo16(EXT(FloatInit)) ; Slam bottom
780 lfd f0,0(r5) ; Initialize FP0
781 fmr f1,f0 ; Do them all
1c79356b
A
782 fmr f2,f0
783 fmr f3,f0
784 fmr f4,f0
785 fmr f5,f0
786 fmr f6,f0
787 fmr f7,f0
788 fmr f8,f0
789 fmr f9,f0
790 fmr f10,f0
791 fmr f11,f0
792 fmr f12,f0
793 fmr f13,f0
794 fmr f14,f0
795 fmr f15,f0
796 fmr f16,f0
797 fmr f17,f0
1c79356b
A
798 fmr f18,f0
799 fmr f19,f0
800 fmr f20,f0
1c79356b
A
801 fmr f21,f0
802 fmr f22,f0
803 fmr f23,f0
804 fmr f24,f0
805 fmr f25,f0
806 fmr f26,f0
807 fmr f27,f0
808 fmr f28,f0
809 fmr f29,f0
810 fmr f30,f0
811 fmr f31,f0
9bccf70c
A
812 b fsenable ; Finish setting it all up...
813
1c79356b
A
814
815;
9bccf70c
A
816; We get here when we are switching to the same context at the same level and the context
817; is still live. Essentially, all we are doing is turning on the faility. It may have
818; gotten turned off due to doing a context save for the current level or a context switch
819; back to the live guy.
1c79356b 820;
9bccf70c
A
821
822 .align 5
1c79356b 823
9bccf70c 824fsthesame:
1c79356b 825
9bccf70c
A
826#if FPVECDBG
827 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
828 li r2,0x7F0A ; (TEST/DEBUG)
829 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
830 sc ; (TEST/DEBUG)
831#endif
832 beq- cr1,fsenable ; Not saved yet, nothing to pop, go enable and exit...
1c79356b 833
9bccf70c 834 lwz r11,SAVlevel(r30) ; Get the level of top saved context
55e303ae 835 lwz r14,SAVprev+4(r30) ; Get the previous savearea
1c79356b 836
9bccf70c 837 cmplw r11,r31 ; Are live and saved the same?
1c79356b 838
9bccf70c 839 bne+ fsenable ; Level not the same, nothing to pop, go enable and exit...
1c79356b 840
9bccf70c 841 mr r3,r30 ; Get the old savearea (we popped it before)
55e303ae 842 stw r14,FPUsave(r22) ; Pop the savearea from the stack
9bccf70c
A
843 bl EXT(save_ret) ; Toss it
844 b fsenable ; Go enable and exit...
845
846
847;
848; This function invalidates any live floating point context for the passed in facility_context.
849; This is intended to be called just before act_machine_sv_free tosses saveareas.
850;
851
852 .align 5
853 .globl EXT(toss_live_fpu)
854
855LEXT(toss_live_fpu)
856
55e303ae 857 lis r0,hi16(MASK(MSR_VEC)) ; Get VEC
9bccf70c 858 mfmsr r9 ; Get the MSR
55e303ae 859 ori r0,r0,lo16(MASK(MSR_FP)) ; Add in FP
9bccf70c 860 rlwinm. r8,r9,0,MSR_FP_BIT,MSR_FP_BIT ; Are floats on right now?
55e303ae
A
861 andc r9,r9,r0 ; Force off VEC and FP
862 ori r0,r0,lo16(MASK(MSR_EE)) ; Turn off EE
863 andc r0,r9,r0 ; Turn off EE now
9bccf70c
A
864 mtmsr r0 ; No interruptions
865 isync
866 beq+ tlfnotours ; Floats off, can not be live here...
867
868 mfsprg r8,0 ; Get the per proc
869
870;
871; Note that at this point, since floats are on, we are the owner
872; of live state on this processor
1c79356b 873;
9bccf70c
A
874
875 lwz r6,FPUowner(r8) ; Get the thread that owns the floats
876 li r0,0 ; Clear this just in case we need it
877 cmplw r6,r3 ; Are we tossing our own context?
55e303ae 878 bne-- tlfnotours ; Nope...
9bccf70c 879
55e303ae 880 lfd f1,Zero(0) ; Make a 0
9bccf70c
A
881 mtfsf 0xFF,f1 ; Clear it
882
883tlfnotours: lwz r11,FPUcpu(r3) ; Get the cpu on which we last loaded context
884 lis r12,hi16(EXT(per_proc_info)) ; Set base per_proc
885 mulli r11,r11,ppSize ; Find offset to the owner per_proc
886 ori r12,r12,lo16(EXT(per_proc_info)) ; Set base per_proc
887 li r10,FPUowner ; Displacement to float owner
888 add r11,r12,r11 ; Point to the owner per_proc
9bccf70c
A
889
890tlfinvothr: lwarx r12,r10,r11 ; Get the owner
9bccf70c 891
55e303ae
A
892 sub r0,r12,r3 ; Subtract one from the other
893 sub r8,r3,r12 ; Subtract the other from the one
894 or r8,r8,r0 ; Combine them
895 srawi r8,r8,31 ; Get a 0 if equal or -1 of not
896 and r12,r12,r8 ; Make 0 if same, unchanged if not
897 stwcx. r12,r10,r11 ; Try to invalidate it
898 bne-- tlfinvothr ; Try again if there was a collision...
899
900 mtmsr r9 ; Restore interruptions
9bccf70c
A
901 isync ; Could be turning off floats here
902 blr ; Leave...
903
1c79356b
A
904
905/*
906 * Altivec stuff is here. The techniques used are pretty identical to
907 * the floating point. Except that we will honor the VRSAVE register
908 * settings when loading and restoring registers.
909 *
910 * There are two indications of saved VRs: the VRSAVE register and the vrvalid
911 * mask. VRSAVE is set by the vector user and represents the VRs that they
912 * say that they are using. The vrvalid mask indicates which vector registers
913 * are saved in the savearea. Whenever context is saved, it is saved according
914 * to the VRSAVE register. It is loaded based on VRSAVE anded with
915 * vrvalid (all other registers are splatted with 0s). This is done because we
916 * don't want to load any registers we don't have a copy of, we want to set them
917 * to zero instead.
918 *
9bccf70c
A
919 * Note that there are some oddities here when we save a context we are using.
920 * It is really not too cool to do this, but what the hey... Anyway,
921 * we turn vectors and fpu off before we leave.
922 * The oddity is that if you use vectors after this, the
923 * savearea containing the context just saved will go away. So, bottom line is
924 * that don't use vectors until after you are done with the saved context.
925 *
1c79356b
A
926 */
927
9bccf70c
A
928 .align 5
929 .globl EXT(vec_save)
930
931LEXT(vec_save)
1c79356b 932
55e303ae
A
933
934 lis r2,hi16(MASK(MSR_VEC)) ; Get VEC
9bccf70c 935 mfmsr r0 ; Get the MSR
55e303ae
A
936 ori r2,r2,lo16(MASK(MSR_FP)) ; Add in FP
937 andc r0,r0,r2 ; Force off VEC and FP
938 ori r2,r2,lo16(MASK(MSR_EE)) ; Clear EE
939 andc r2,r0,r2 ; Clear EE for now
9bccf70c 940 oris r2,r2,hi16(MASK(MSR_VEC)) ; Enable the vector facility for now also
9bccf70c 941 mtmsr r2 ; Set the MSR
1c79356b
A
942 isync
943
9bccf70c
A
944 mfsprg r6,0 ; Get the per_processor block
945 lwz r12,VMXowner(r6) ; Get the context ID for owner
946
1c79356b 947#if FPVECDBG
9bccf70c
A
948 mr r7,r0 ; (TEST/DEBUG)
949 li r4,0 ; (TEST/DEBUG)
950 mr r10,r3 ; (TEST/DEBUG)
951 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
952 mr. r3,r12 ; (TEST/DEBUG)
953 li r2,0x5F00 ; (TEST/DEBUG)
954 li r5,0 ; (TEST/DEBUG)
955 beq- noowneryeu ; (TEST/DEBUG)
956 lwz r4,VMXlevel(r12) ; (TEST/DEBUG)
957 lwz r5,VMXsave(r12) ; (TEST/DEBUG)
958
959noowneryeu: oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
960 sc ; (TEST/DEBUG)
961 mr r0,r7 ; (TEST/DEBUG)
962 mr r3,r10 ; (TEST/DEBUG)
1c79356b 963#endif
9bccf70c
A
964 mflr r2 ; Save the return address
965
966vsretry: mr. r12,r12 ; Anyone own the vector?
967 lhz r11,PP_CPU_NUMBER(r6) ; Get our CPU number
968 beq- vsret ; Nobody owns the vector, no save required...
969
970 cmplw cr1,r3,r12 ; Is the specified context live?
971
972 isync ; Force owner check first
973
974 lwz r9,VMXcpu(r12) ; Get the cpu that context was last on
975 bne- cr1,vsret ; Specified context is not live
976
977 cmplw cr1,r9,r11 ; Was the context for this processor?
978 beq+ cr1,vsgoodcpu ; Facility last used on this processor...
979
980 b vsret ; Someone else claimed this...
981
982 .align 5
983
984vsgoodcpu: lwz r3,VMXsave(r12) ; Get the current vector savearea for the thread
985 lwz r10,liveVRS(r6) ; Get the right VRSave register
986 lwz r9,VMXlevel(r12) ; Get our current level indicator
987
988
989 cmplwi cr1,r3,0 ; Have we ever saved this facility context?
990 beq- cr1,vsneedone ; Never saved it, so we need an area...
991
992 lwz r8,SAVlevel(r3) ; Get the level this savearea is for
993 mr. r10,r10 ; Is VRsave set to 0?
994 cmplw cr1,r9,r8 ; Correct level?
995 bne- cr1,vsneedone ; Different level, so we need to save...
1c79356b 996
9bccf70c 997 bne+ vsret ; VRsave is non-zero so we need to keep what is saved...
0b4e3aa0 998
55e303ae 999 lwz r4,SAVprev+4(r3) ; Pick up the previous area
9bccf70c
A
1000 lwz r5,SAVlevel(r4) ; Get the level associated with save
1001 stw r4,VMXsave(r12) ; Dequeue this savearea
55e303ae 1002 li r4,0 ; Clear
9bccf70c
A
1003 stw r5,VMXlevel(r12) ; Save the level
1004
55e303ae 1005 stw r4,VMXowner(r12) ; Show no live context here
9bccf70c
A
1006 eieio
1007
55e303ae
A
1008vsbackout: mr r4,r0 ; restore the saved MSR
1009 b EXT(save_ret_wMSR) ; Toss the savearea and return from there...
9bccf70c
A
1010
1011 .align 5
1012
1013vsneedone: mr. r10,r10 ; Is VRsave set to 0?
1014 beq- vsret ; Yeah, they do not care about any of them...
1015
1016 bl EXT(save_get) ; Get a savearea for the context
1017
1018 mfsprg r6,0 ; Get back per_processor block
1019 li r4,SAVvector ; Get vector tag
1020 lwz r12,VMXowner(r6) ; Get back our context ID
1021 stb r4,SAVflags+2(r3) ; Mark this savearea as a vector
1022 mr. r12,r12 ; See if we were disowned while away. Very, very small chance of it...
1023 beq- vsbackout ; If disowned, just toss savearea...
1024 lwz r4,facAct(r12) ; Get the activation associated with live context
9bccf70c
A
1025 lwz r8,VMXsave(r12) ; Get the current top vector savearea
1026 stw r4,SAVact(r3) ; Indicate the right activation for this context
1027 lwz r9,VMXlevel(r12) ; Get our current level indicator again
1028 stw r3,VMXsave(r12) ; Set this as the most current floating point context
55e303ae 1029 stw r8,SAVprev+4(r3) ; And then chain this in front
9bccf70c
A
1030
1031 stw r9,SAVlevel(r3) ; Set level in savearea
55e303ae
A
1032 mfcr r12 ; save CRs across call to vr_store
1033 lwz r10,liveVRS(r6) ; Get the right VRSave register
1034
1035 bl vr_store ; store live VRs into savearea as required (uses r4-r11)
9bccf70c 1036
55e303ae
A
1037 mtcrf 255,r12 ; Restore the non-volatile CRs
1038 mtlr r2 ; restore return address
1039
1040vsret: mtmsr r0 ; Put interrupts on if they were and vector off
1c79356b
A
1041 isync
1042
1043 blr
1044
1045/*
1046 * vec_switch()
1047 *
1048 * Entered to handle the vector unavailable exception and
1049 * switch vector context
1050 *
1051 * This code is run with virtual address mode on and interrupts off.
1052 *
1053 * Upon exit, the code returns to the users context with the vector
1054 * facility turned on.
1055 *
1056 * ENTRY: VM switched ON
1057 * Interrupts OFF
1058 * State is saved in savearea pointed to by R4.
1059 * All other registers are free.
1060 *
1061 */
1062
9bccf70c
A
1063 .align 5
1064 .globl EXT(vec_switch)
1065
1066LEXT(vec_switch)
1c79356b
A
1067
1068#if DEBUG
1c79356b
A
1069 lis r3,hi16(EXT(vec_trap_count)) ; Get address of vector trap counter
1070 ori r3,r3,lo16(EXT(vec_trap_count)) ; Get address of vector trap counter
1071 lwz r1,0(r3)
1072 addi r1,r1,1
1073 stw r1,0(r3)
1c79356b
A
1074#endif /* DEBUG */
1075
9bccf70c 1076 mfsprg r26,0 ; Get the per_processor block
55e303ae
A
1077 mfmsr r19 ; Get the current MSR
1078 mfsprg r17,1 ; Get the current thread
1c79356b 1079
9bccf70c 1080 mr r25,r4 ; Save the entry savearea
9bccf70c 1081 oris r19,r19,hi16(MASK(MSR_VEC)) ; Enable the vector feature
55e303ae 1082 lwz r22,VMXowner(r26) ; Get the thread that owns the vector
9bccf70c
A
1083
1084 mtmsr r19 ; Enable vector instructions
1c79356b
A
1085 isync
1086
9bccf70c
A
1087 lwz r27,ACT_MACT_PCB(r17) ; Get the current level
1088 lwz r29,curctx(r17) ; Grab the current context anchor of the current thread
0b4e3aa0 1089
9bccf70c
A
1090; R22 has the "old" context anchor
1091; R29 has the "new" context anchor
0b4e3aa0 1092
1c79356b 1093#if FPVECDBG
9bccf70c
A
1094 lis r0,HIGH_ADDR(CutTrace) ; (TEST/DEBUG)
1095 li r2,0x5F01 ; (TEST/DEBUG)
1096 mr r3,r22 ; (TEST/DEBUG)
1097 mr r5,r29 ; (TEST/DEBUG)
1098 oris r0,r0,LOW_ADDR(CutTrace) ; (TEST/DEBUG)
1099 sc ; (TEST/DEBUG)
1c79356b
A
1100#endif
1101
9bccf70c 1102 lhz r16,PP_CPU_NUMBER(r26) ; Get the current CPU number
1c79356b 1103
9bccf70c 1104vsvretry: mr. r22,r22 ; See if there is any live vector status
1c79356b 1105
9bccf70c 1106 beq- vsnosave ; No live context, so nothing to save...
1c79356b 1107
9bccf70c 1108 isync ; Make sure we see this in the right order
1c79356b 1109
9bccf70c
A
1110 lwz r30,VMXsave(r22) ; Get the top savearea
1111 cmplw cr2,r22,r29 ; Are both old and new the same context?
1112 lwz r18,VMXcpu(r22) ; Get the last CPU we ran on
1113 cmplwi cr1,r30,0 ; Anything saved yet?
1114 cmplw r18,r16 ; Make sure we are on the right processor
1115 lwz r31,VMXlevel(r22) ; Get the context level
1c79356b 1116
9bccf70c
A
1117 lwz r10,liveVRS(r26) ; Get the right VRSave register
1118
1119 bne- vsnosave ; No, not on the same processor...
1120
1c79356b 1121;
9bccf70c
A
1122; Check to see if the live context has already been saved.
1123; Also check to see if all we are here just to re-enable the MSR
1124; and handle specially if so.
1c79356b 1125;
9bccf70c
A
1126
1127 cmplw r31,r27 ; See if the current and active levels are the same
1128 crand cr0_eq,cr2_eq,cr0_eq ; Remember if both the levels and contexts are the same
1129 li r8,0 ; Clear this
1c79356b 1130
9bccf70c
A
1131 beq- vsthesame ; New and old are the same, just go enable...
1132
1133 cmplwi cr2,r10,0 ; Check VRSave to see if we really need to save anything...
1134 beq- cr1,vsmstsave ; Not saved yet, go do it...
1c79356b 1135
9bccf70c 1136 lwz r11,SAVlevel(r30) ; Get the level of top saved context
1c79356b 1137
9bccf70c
A
1138 cmplw r31,r11 ; Are live and saved the same?
1139
1c79356b 1140#if FPVECDBG
9bccf70c
A
1141 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
1142 li r2,0x5F02 ; (TEST/DEBUG)
1143 mr r3,r30 ; (TEST/DEBUG)
1144 mr r5,r31 ; (TEST/DEBUG)
1145 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
1146 sc ; (TEST/DEBUG)
1c79356b 1147#endif
9bccf70c
A
1148
1149 bne- vsmstsave ; Live context has not been saved yet...
1150
1151 bne- cr2,vsnosave ; Live context saved and VRSave not 0, no save and keep context...
1c79356b 1152
55e303ae 1153 lwz r4,SAVprev+4(r30) ; Pick up the previous area
9bccf70c
A
1154 li r5,0 ; Assume this is the only one (which should be the ususal case)
1155 mr. r4,r4 ; Was this the only one?
1156 stw r4,VMXsave(r22) ; Dequeue this savearea
1157 beq+ vsonlyone ; This was the only one...
1158 lwz r5,SAVlevel(r4) ; Get the level associated with previous save
1159
1160vsonlyone: stw r5,VMXlevel(r22) ; Save the level
1161 stw r8,VMXowner(r26) ; Clear owner
1162 eieio
1163 mr r3,r30 ; Copy the savearea we are tossing
1164 bl EXT(save_ret) ; Toss the savearea
1165 b vsnosave ; Go load up the context...
1166
1167 .align 5
1c79356b 1168
9bccf70c
A
1169
1170vsmstsave: stw r8,VMXowner(r26) ; Clear owner
1171 eieio
1172 beq- cr2,vsnosave ; The VRSave was 0, so there is nothing to save...
1173
1174 bl EXT(save_get) ; Go get a savearea
1175
55e303ae
A
1176 mr. r31,r31 ; Are we saving the user state?
1177 la r15,VMXsync(r22) ; Point to the sync word
1178 beq++ vswusave ; Yeah, no need for lock...
1179;
1180; Here we make sure that the live context is not tossed while we are
1181; trying to push it. This can happen only for kernel context and
1182; then only by a race with act_machine_sv_free.
1183;
1184; We only need to hold this for a very short time, so no sniffing needed.
1185; If we find any change to the level, we just abandon.
1186;
1187vswsync: lwarx r19,0,r15 ; Get the sync word
1188 li r0,1 ; Get the lock
1189 cmplwi cr1,r19,0 ; Is it unlocked?
1190 stwcx. r0,0,r15 ; Store lock and test reservation
1191 cror cr0_eq,cr1_eq,cr0_eq ; Combine lost reservation and previously locked
1192 bne-- vswsync ; Try again if lost reservation or locked...
de355530 1193
55e303ae
A
1194 isync ; Toss speculation
1195
1196 lwz r0,VMXlevel(r22) ; Pick up the level again
1197 li r7,0 ; Get unlock value
1198 cmplw r0,r31 ; Same level?
1199 beq++ vswusave ; Yeah, we expect it to be...
1200
1201 stw r7,VMXsync(r22) ; Unlock lock. No need to sync here
1202
1203 bl EXT(save_ret) ; Toss save area because we are abandoning save
1204 b vsnosave ; Skip the save...
1205
1206 .align 5
1207
1208vswusave: lwz r12,facAct(r22) ; Get the activation associated with the context
1209 stw r3,VMXsave(r22) ; Set this as the latest context savearea for the thread
1210 mr. r31,r31 ; Check again if we were user level
1211 stw r30,SAVprev+4(r3) ; Point us to the old context
9bccf70c
A
1212 stw r31,SAVlevel(r3) ; Tag our level
1213 li r7,SAVvector ; Get the vector ID
1214 stw r12,SAVact(r3) ; Make sure we point to the right guy
1215 stb r7,SAVflags+2(r3) ; Set that we have a vector save area
1c79356b 1216
55e303ae
A
1217 li r7,0 ; Get the unlock value
1218
1219 beq-- vswnulock ; Skip unlock if user (we did not lock it)...
1220 eieio ; Make sure that these updates make it out
1221 stw r7,VMXsync(r22) ; Unlock it.
1222
1223vswnulock:
1224
1c79356b 1225#if FPVECDBG
9bccf70c
A
1226 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
1227 li r2,0x5F03 ; (TEST/DEBUG)
1228 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
1229 sc ; (TEST/DEBUG)
1c79356b
A
1230#endif
1231
9bccf70c 1232 lwz r10,liveVRS(r26) ; Get the right VRSave register
55e303ae 1233 bl vr_store ; store VRs into savearea according to vrsave (uses r4-r11)
1c79356b
A
1234
1235
9bccf70c
A
1236;
1237; The context is all saved now and the facility is free.
1238;
1239; If we do not we need to fill the registers with junk, because this level has
1240; never used them before and some thieving bastard could hack the old values
1241; of some thread! Just imagine what would happen if they could! Why, nothing
1242; would be safe! My God! It is terrifying!
1243;
1244; Also, along the way, thanks to Ian Ollmann, we generate the 0x7FFFDEAD (QNaNbarbarian)
1245; constant that we may need to fill unused vector registers.
1246;
1c79356b 1247
0b4e3aa0
A
1248
1249
0b4e3aa0 1250
9bccf70c
A
1251vsnosave: vspltisb v31,-10 ; Get 0xF6F6F6F6
1252 lwz r15,ACT_MACT_PCB(r17) ; Get the current level of the "new" one
1253 vspltisb v30,5 ; Get 0x05050505
1254 lwz r19,VMXcpu(r29) ; Get the last CPU we ran on
1255 vspltish v29,4 ; Get 0x00040004
1256 lwz r14,VMXsave(r29) ; Point to the top of the "new" context stack
1257 vrlb v31,v31,v30 ; Get 0xDEDEDEDE
1258
1259 stw r16,VMXcpu(r29) ; Claim context for us
1260 eieio
1c79356b
A
1261
1262#if FPVECDBG
9bccf70c
A
1263 lwz r13,VMXlevel(r29) ; (TEST/DEBUG)
1264 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
1265 li r2,0x5F04 ; (TEST/DEBUG)
1266 mr r1,r15 ; (TEST/DEBUG)
1267 mr r3,r14 ; (TEST/DEBUG)
1268 mr r5,r13 ; (TEST/DEBUG)
1269 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
1270 sc ; (TEST/DEBUG)
1c79356b 1271#endif
9bccf70c
A
1272
1273 lis r18,hi16(EXT(per_proc_info)) ; Set base per_proc
1274 vspltisb v28,-2 ; Get 0xFEFEFEFE
1275 mulli r19,r19,ppSize ; Find offset to the owner per_proc
1276 vsubuhm v31,v31,v29 ; Get 0xDEDADEDA
1277 ori r18,r18,lo16(EXT(per_proc_info)) ; Set base per_proc
1278 vpkpx v30,v28,v3 ; Get 0x7FFF7FFF
1279 li r16,VMXowner ; Displacement to vector owner
1280 add r19,r18,r19 ; Point to the owner per_proc
1281 vrlb v31,v31,v29 ; Get 0xDEADDEAD
9bccf70c
A
1282
1283vsinvothr: lwarx r18,r16,r19 ; Get the owner
55e303ae
A
1284
1285 sub r0,r18,r29 ; Subtract one from the other
1286 sub r11,r29,r18 ; Subtract the other from the one
1287 or r11,r11,r0 ; Combine them
1288 srawi r11,r11,31 ; Get a 0 if equal or -1 of not
1289 and r18,r18,r11 ; Make 0 if same, unchanged if not
1290 stwcx. r18,r16,r19 ; Try to invalidate it
1291 bne-- vsinvothr ; Try again if there was a collision...
9bccf70c 1292
55e303ae 1293 cmplwi cr1,r14,0 ; Do we possibly have some context to load?
9bccf70c
A
1294 vmrghh v31,v30,v31 ; Get 0x7FFFDEAD. V31 keeps this value until the bitter end
1295 stw r15,VMXlevel(r29) ; Set the "new" active level
1296 eieio
1297 stw r29,VMXowner(r26) ; Mark us as having the live context
1c79356b 1298
55e303ae 1299 beq-- cr1,ProtectTheAmericanWay ; Nothing to restore, first time use...
9bccf70c 1300
55e303ae 1301 lwz r3,SAVprev+4(r14) ; Get the previous context
9bccf70c
A
1302 lwz r0,SAVlevel(r14) ; Get the level of first facility savearea
1303 cmplw r0,r15 ; Top level correct to load?
55e303ae 1304 bne-- ProtectTheAmericanWay ; No, go initialize...
1c79356b 1305
9bccf70c
A
1306 stw r3,VMXsave(r29) ; Pop the context (we will toss the savearea later)
1307
1c79356b 1308#if FPVECDBG
9bccf70c
A
1309 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
1310 li r2,0x5F05 ; (TEST/DEBUG)
1311 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
1312 sc ; (TEST/DEBUG)
1c79356b
A
1313#endif
1314
de355530 1315 lwz r10,savevrvalid(r14) ; Get the valid VRs in the savearea
55e303ae 1316 lwz r22,savevrsave(r25) ; Get the most current VRSAVE
9bccf70c 1317 and r10,r10,r22 ; Figure out just what registers need to be loaded
55e303ae
A
1318 mr r3,r14 ; r3 <- ptr to savearea with VRs
1319 bl vr_load ; load VRs from save area based on vrsave in r10
1320
1321 bl EXT(save_ret) ; Toss the save area after loading VRs
de355530 1322
55e303ae 1323vrenable: lwz r8,savesrr1+4(r25) ; Get the msr of the interrupted guy
9bccf70c 1324 oris r8,r8,hi16(MASK(MSR_VEC)) ; Enable the vector facility
d7e50217
A
1325 lwz r10,ACT_MACT_SPF(r17) ; Get the act special flags
1326 lwz r11,spcFlags(r26) ; Get per_proc spec flags cause not in sync with act
1c79356b 1327 oris r10,r10,hi16(vectorUsed|vectorCng) ; Set that we used vectors
d7e50217 1328 oris r11,r11,hi16(vectorUsed|vectorCng) ; Set that we used vectors
9bccf70c 1329 rlwinm. r0,r8,0,MSR_PR_BIT,MSR_PR_BIT ; See if we are doing this for user state
55e303ae
A
1330 stw r8,savesrr1+4(r25) ; Set the msr of the interrupted guy
1331 mr r3,r25 ; Pass virtual address of the savearea
b4c24cb9 1332 beq- vrnuser ; We are not user state...
9bccf70c 1333 stw r10,ACT_MACT_SPF(r17) ; Set the activation copy
d7e50217 1334 stw r11,spcFlags(r26) ; Set per_proc copy
1c79356b
A
1335
1336vrnuser:
1337#if FPVECDBG
9bccf70c
A
1338 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
1339 li r2,0x5F07 ; (TEST/DEBUG)
1340 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
1341 sc ; (TEST/DEBUG)
1c79356b 1342#endif
9bccf70c 1343 b EXT(exception_exit) ; Exit to the fray...
1c79356b
A
1344
1345/*
1346 * Initialize the registers to some bogus value
1c79356b
A
1347 */
1348
1349ProtectTheAmericanWay:
1350
1351#if FPVECDBG
9bccf70c
A
1352 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
1353 li r2,0x5F06 ; (TEST/DEBUG)
1354 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
1355 sc ; (TEST/DEBUG)
1c79356b 1356#endif
9bccf70c
A
1357
1358 vor v0,v31,v31 ; Copy into the next register
1359 vor v1,v31,v31 ; Copy into the next register
1360 vor v2,v31,v31 ; Copy into the next register
1361 vor v3,v31,v31 ; Copy into the next register
1362 vor v4,v31,v31 ; Copy into the next register
1363 vor v5,v31,v31 ; Copy into the next register
1364 vor v6,v31,v31 ; Copy into the next register
1365 vor v7,v31,v31 ; Copy into the next register
1366 vor v8,v31,v31 ; Copy into the next register
1367 vor v9,v31,v31 ; Copy into the next register
1368 vor v10,v31,v31 ; Copy into the next register
1369 vor v11,v31,v31 ; Copy into the next register
1370 vor v12,v31,v31 ; Copy into the next register
1371 vor v13,v31,v31 ; Copy into the next register
1372 vor v14,v31,v31 ; Copy into the next register
1373 vor v15,v31,v31 ; Copy into the next register
1374 vor v16,v31,v31 ; Copy into the next register
1375 vor v17,v31,v31 ; Copy into the next register
1376 vor v18,v31,v31 ; Copy into the next register
1377 vor v19,v31,v31 ; Copy into the next register
1378 vor v20,v31,v31 ; Copy into the next register
1379 vor v21,v31,v31 ; Copy into the next register
1380 vor v22,v31,v31 ; Copy into the next register
1381 vor v23,v31,v31 ; Copy into the next register
1382 vor v24,v31,v31 ; Copy into the next register
1383 vor v25,v31,v31 ; Copy into the next register
1384 vor v26,v31,v31 ; Copy into the next register
1385 vor v27,v31,v31 ; Copy into the next register
1386 vor v28,v31,v31 ; Copy into the next register
1387 vor v29,v31,v31 ; Copy into the next register
1388 vor v30,v31,v31 ; Copy into the next register
1389 b vrenable ; Finish setting it all up...
1390
1391
1392
1393;
1394; We get here when we are switching to the same context at the same level and the context
1395; is still live. Essentially, all we are doing is turning on the faility. It may have
1396; gotten turned off due to doing a context save for the current level or a context switch
1397; back to the live guy.
1398;
1399
1400 .align 5
1401
1402vsthesame:
1403
1404#if FPVECDBG
1405 lis r0,hi16(CutTrace) ; (TEST/DEBUG)
1406 li r2,0x5F0A ; (TEST/DEBUG)
1407 oris r0,r0,lo16(CutTrace) ; (TEST/DEBUG)
1408 sc ; (TEST/DEBUG)
1409#endif
1410 beq- cr1,vrenable ; Not saved yet, nothing to pop, go enable and exit...
1411
1412 lwz r11,SAVlevel(r30) ; Get the level of top saved context
55e303ae 1413 lwz r14,SAVprev+4(r30) ; Get the previous savearea
9bccf70c
A
1414
1415 cmplw r11,r31 ; Are live and saved the same?
1416
1417 bne+ vrenable ; Level not the same, nothing to pop, go enable and exit...
1418
1419 mr r3,r30 ; Get the old savearea (we popped it before)
55e303ae 1420 stw r11,VMXsave(r22) ; Pop the vector stack
9bccf70c
A
1421 bl EXT(save_ret) ; Toss it
1422 b vrenable ; Go enable and exit...
1423
1424
1425;
1426; This function invalidates any live vector context for the passed in facility_context.
1427; This is intended to be called just before act_machine_sv_free tosses saveareas.
1c79356b 1428;
1c79356b 1429
9bccf70c
A
1430 .align 5
1431 .globl EXT(toss_live_vec)
1c79356b 1432
9bccf70c
A
1433LEXT(toss_live_vec)
1434
55e303ae 1435 lis r0,hi16(MASK(MSR_VEC)) ; Get VEC
9bccf70c 1436 mfmsr r9 ; Get the MSR
55e303ae
A
1437 ori r0,r0,lo16(MASK(MSR_FP)) ; Add in FP
1438 rlwinm. r8,r9,0,MSR_VEC_BIT,MSR_VEC_BIT ; Are vectors on right now?
1439 andc r9,r9,r0 ; Force off VEC and FP
1440 ori r0,r0,lo16(MASK(MSR_EE)) ; Turn off EE
1441 andc r0,r9,r0 ; Turn off EE now
9bccf70c
A
1442 mtmsr r0 ; No interruptions
1443 isync
1444 beq+ tlvnotours ; Vector off, can not be live here...
1c79356b 1445
9bccf70c
A
1446 mfsprg r8,0 ; Get the per proc
1447
1448;
1449; Note that at this point, since vecs are on, we are the owner
1450; of live state on this processor
1451;
1452
1453 lwz r6,VMXowner(r8) ; Get the thread that owns the vector
1454 li r0,0 ; Clear this just in case we need it
1455 cmplw r6,r3 ; Are we tossing our own context?
1456 bne- tlvnotours ; Nope...
1457
1458 vspltish v1,1 ; Turn on the non-Java bit and saturate
1459 vspltisw v0,1 ; Turn on the saturate bit
1460 vxor v1,v1,v0 ; Turn off saturate
1461 mtspr vrsave,r0 ; Clear VRSAVE
1462 mtvscr v1 ; Set the non-java, no saturate status
1463
1464tlvnotours: lwz r11,VMXcpu(r3) ; Get the cpu on which we last loaded context
1465 lis r12,hi16(EXT(per_proc_info)) ; Set base per_proc
1466 mulli r11,r11,ppSize ; Find offset to the owner per_proc
1467 ori r12,r12,lo16(EXT(per_proc_info)) ; Set base per_proc
1468 li r10,VMXowner ; Displacement to vector owner
1469 add r11,r12,r11 ; Point to the owner per_proc
1470 li r0,0 ; Set a 0 to invalidate context
1471
1472tlvinvothr: lwarx r12,r10,r11 ; Get the owner
9bccf70c 1473
55e303ae
A
1474 sub r0,r12,r3 ; Subtract one from the other
1475 sub r8,r3,r12 ; Subtract the other from the one
1476 or r8,r8,r0 ; Combine them
1477 srawi r8,r8,31 ; Get a 0 if equal or -1 of not
1478 and r12,r12,r8 ; Make 0 if same, unchanged if not
1479 stwcx. r12,r10,r11 ; Try to invalidate it
1480 bne-- tlvinvothr ; Try again if there was a collision...
1481
1482 mtmsr r9 ; Restore interruptions
9bccf70c
A
1483 isync ; Could be turning off vectors here
1484 blr ; Leave....
1485
1486#if 0
1487;
1488; This function invalidates any live vector context for the passed in facility_context
1489; if the level is current. It also tosses the corresponding savearea if there is one.
1490; This function is primarily used whenever we detect a VRSave that is all zeros.
1491;
1492
1493 .align 5
1494 .globl EXT(vec_trash)
1495
1496LEXT(vec_trash)
1497
1498 lwz r12,facAct(r3) ; Get the activation
1499 lwz r11,VMXlevel(r3) ; Get the context level
1500 lwz r10,ACT_MACT_PCB(r12) ; Grab the current level for the thread
1501 lwz r9,VMXsave(r3) ; Get the savearea, if any
1502 cmplw r10,r11 ; Are we at the right level?
1503 cmplwi cr1,r9,0 ; Remember if there is a savearea
1504 bnelr+ ; No, we do nothing...
1505
1506 lwz r11,VMXcpu(r3) ; Get the cpu on which we last loaded context
1507 lis r12,hi16(EXT(per_proc_info)) ; Set base per_proc
1508 mulli r11,r11,ppSize ; Find offset to the owner per_proc
1509 ori r12,r12,lo16(EXT(per_proc_info)) ; Set base per_proc
1510 li r10,VMXowner ; Displacement to vector owner
1511 add r11,r12,r11 ; Point to the owner per_proc
9bccf70c
A
1512
1513vtinvothr: lwarx r12,r10,r11 ; Get the owner
9bccf70c 1514
55e303ae
A
1515 sub r0,r12,r3 ; Subtract one from the other
1516 sub r8,r3,r12 ; Subtract the other from the one
1517 or r8,r8,r0 ; Combine them
1518 srawi r8,r8,31 ; Get a 0 if equal or -1 of not
1519 and r12,r12,r8 ; Make 0 if same, unchanged if not
1520 stwcx. r12,r10,r11 ; Try to invalidate it
1521 bne-- vtinvothr ; Try again if there was a collision...
1522
1523
1524 beqlr++ cr1 ; Leave if there is no savearea
9bccf70c
A
1525 lwz r8,SAVlevel(r9) ; Get the level of the savearea
1526 cmplw r8,r11 ; Savearea for the current level?
55e303ae 1527 bnelr++ ; No, nothing to release...
9bccf70c 1528
55e303ae 1529 lwz r8,SAVprev+4(r9) ; Pick up the previous area
9bccf70c 1530 mr. r8,r8 ; Is there a previous?
55e303ae 1531 beq-- vtnoprev ; Nope...
9bccf70c
A
1532 lwz r7,SAVlevel(r8) ; Get the level associated with save
1533
1534vtnoprev: stw r8,VMXsave(r3) ; Dequeue this savearea
1535 stw r7,VMXlevel(r3) ; Pop the level
1536
1537 mr r3,r9 ; Get the savearea to release
1538 b EXT(save_ret) ; Go and toss the save area (note, we will return from there)...
1539#endif
1540
1541;
1542; Just some test code to force vector and/or floating point in the kernel
1543;
1544
1545 .align 5
1546 .globl EXT(fctx_test)
1c79356b 1547
9bccf70c
A
1548LEXT(fctx_test)
1549
55e303ae 1550 mfsprg r3,1 ; Get the current thread
9bccf70c
A
1551 mr. r3,r3 ; Are we actually up and running?
1552 beqlr- ; No...
1553
1554 fmr f0,f0 ; Use floating point
1555 mftb r4 ; Get time base for a random number
1556 li r5,1 ; Get a potential vrsave to use
1557 andi. r4,r4,0x3F ; Get a number from 0 - 63
1558 slw r5,r5,r4 ; Choose a register to save (should be 0 half the time)
1559 mtspr vrsave,r5 ; Set VRSave
1560 vor v0,v0,v0 ; Use vectors
1561 blr
55e303ae
A
1562
1563
1564// *******************
1565// * f p _ s t o r e *
1566// *******************
1567//
1568// Store FPRs into a save area. Called by fpu_save and fpu_switch.
1569//
1570// When called:
1571// floating pt is enabled
1572// r3 = ptr to save area
1573//
1574// We destroy:
1575// r11.
1576
1577fp_store:
1578 mfsprg r11,2 ; get feature flags
1579 mtcrf 0x02,r11 ; put cache line size bits in cr6
1580 la r11,savefp0(r3) ; point to 1st line
1581 dcbz128 0,r11 ; establish 1st line no matter what linesize is
1582 bt-- pf32Byteb,fp_st32 ; skip if a 32-byte machine
1583
1584// Store the FPRs on a 128-byte machine.
1585
1586 stfd f0,savefp0(r3)
1587 stfd f1,savefp1(r3)
1588 la r11,savefp16(r3) ; Point to the 2nd cache line
1589 stfd f2,savefp2(r3)
1590 stfd f3,savefp3(r3)
1591 dcbz128 0,r11 ; establish 2nd line
1592 stfd f4,savefp4(r3)
1593 stfd f5,savefp5(r3)
1594 stfd f6,savefp6(r3)
1595 stfd f7,savefp7(r3)
1596 stfd f8,savefp8(r3)
1597 stfd f9,savefp9(r3)
1598 stfd f10,savefp10(r3)
1599 stfd f11,savefp11(r3)
1600 stfd f12,savefp12(r3)
1601 stfd f13,savefp13(r3)
1602 stfd f14,savefp14(r3)
1603 stfd f15,savefp15(r3)
1604 stfd f16,savefp16(r3)
1605 stfd f17,savefp17(r3)
1606 stfd f18,savefp18(r3)
1607 stfd f19,savefp19(r3)
1608 stfd f20,savefp20(r3)
1609 stfd f21,savefp21(r3)
1610 stfd f22,savefp22(r3)
1611 stfd f23,savefp23(r3)
1612 stfd f24,savefp24(r3)
1613 stfd f25,savefp25(r3)
1614 stfd f26,savefp26(r3)
1615 stfd f27,savefp27(r3)
1616 stfd f28,savefp28(r3)
1617 stfd f29,savefp29(r3)
1618 stfd f30,savefp30(r3)
1619 stfd f31,savefp31(r3)
1620 blr
1621
1622// Store FPRs on a 32-byte machine.
1623
1624fp_st32:
1625 la r11,savefp4(r3) ; Point to the 2nd line
1626 stfd f0,savefp0(r3)
1627 dcbz 0,r11 ; Allocate cache
1628 stfd f1,savefp1(r3)
1629 stfd f2,savefp2(r3)
1630 la r11,savefp8(r3) ; Point to the 3rd line
1631 stfd f3,savefp3(r3)
1632 dcbz 0,r11 ; Allocate cache
1633 stfd f4,savefp4(r3)
1634 stfd f5,savefp5(r3)
1635 stfd f6,savefp6(r3)
1636 la r11,savefp12(r3) ; Point to the 4th line
1637 stfd f7,savefp7(r3)
1638 dcbz 0,r11 ; Allocate cache
1639 stfd f8,savefp8(r3)
1640 stfd f9,savefp9(r3)
1641 stfd f10,savefp10(r3)
1642 la r11,savefp16(r3) ; Point to the 5th line
1643 stfd f11,savefp11(r3)
1644 dcbz 0,r11 ; Allocate cache
1645 stfd f12,savefp12(r3)
1646 stfd f13,savefp13(r3)
1647 stfd f14,savefp14(r3)
1648 la r11,savefp20(r3) ; Point to the 6th line
1649 stfd f15,savefp15(r3)
1650 dcbz 0,r11 ; Allocate cache
1651 stfd f16,savefp16(r3)
1652 stfd f17,savefp17(r3)
1653 stfd f18,savefp18(r3)
1654 la r11,savefp24(r3) ; Point to the 7th line
1655 stfd f19,savefp19(r3)
1656 dcbz 0,r11 ; Allocate cache
1657 stfd f20,savefp20(r3)
1658
1659 stfd f21,savefp21(r3)
1660 stfd f22,savefp22(r3)
1661 la r11,savefp28(r3) ; Point to the 8th line
1662 stfd f23,savefp23(r3)
1663 dcbz 0,r11 ; allocate it
1664 stfd f24,savefp24(r3)
1665 stfd f25,savefp25(r3)
1666 stfd f26,savefp26(r3)
1667 stfd f27,savefp27(r3)
1668
1669 stfd f28,savefp28(r3)
1670 stfd f29,savefp29(r3)
1671 stfd f30,savefp30(r3)
1672 stfd f31,savefp31(r3)
1673 blr
1674
1675
1676// *******************
1677// * v r _ s t o r e *
1678// *******************
1679//
1680// Store VRs into savearea, according to bits set in passed vrsave bitfield. This routine is used
1681// both by vec_save and vec_switch. In order to minimize conditional branches and touching in
1682// unnecessary cache blocks, we either save all or none of the VRs in a block. We have separate paths
1683// for each cache block size.
1684//
1685// When called:
1686// interrupts are off, vectors are enabled
1687// r3 = ptr to save area
1688// r10 = vrsave (not 0)
1689//
1690// We destroy:
1691// r4 - r11, all CRs.
1692
1693vr_store:
1694 mfsprg r9,2 ; get feature flags
1695 stw r10,savevrvalid(r3) ; Save the validity information in savearea
1696 slwi r8,r10,1 ; Shift over 1
1697 mtcrf 0x02,r9 ; put cache line size bits in cr6 where we can test
1698 or r8,r10,r8 ; r8 <- even bits show which pairs are in use
1699 bt-- pf32Byteb,vr_st32 ; skip if 32-byte cacheline processor
1700
1701
1702; Save vectors on a 128-byte linesize processor. We save all or none of the 8 registers in each of
1703; the four cache lines. This minimizes mispredicted branches yet handles cache lines optimally.
1704
1705 slwi r7,r8,2 ; shift groups-of-2 over by 2
1706 li r4,16 ; load offsets for X-form stores
1707 or r8,r7,r8 ; show if any in group of 4 are in use
1708 li r5,32
1709 slwi r7,r8,4 ; shift groups-of-4 over by 4
1710 li r6,48
1711 or r11,r7,r8 ; show if any in group of 8 are in use
1712 li r7,64
1713 mtcrf 0x80,r11 ; set CRs one at a time (faster)
1714 li r8,80
1715 mtcrf 0x20,r11
1716 li r9,96
1717 mtcrf 0x08,r11
1718 li r10,112
1719 mtcrf 0x02,r11
1720
1721 bf 0,vr_st64b ; skip if none of vr0-vr7 are in use
1722 la r11,savevr0(r3) ; get address of this group of registers in save area
1723 dcbz128 0,r11 ; zero the line
1724 stvxl v0,0,r11 ; save 8 VRs in the line
1725 stvxl v1,r4,r11
1726 stvxl v2,r5,r11
1727 stvxl v3,r6,r11
1728 stvxl v4,r7,r11
1729 stvxl v5,r8,r11
1730 stvxl v6,r9,r11
1731 stvxl v7,r10,r11
1732
1733vr_st64b:
1734 bf 8,vr_st64c ; skip if none of vr8-vr15 are in use
1735 la r11,savevr8(r3) ; get address of this group of registers in save area
1736 dcbz128 0,r11 ; zero the line
1737 stvxl v8,0,r11 ; save 8 VRs in the line
1738 stvxl v9,r4,r11
1739 stvxl v10,r5,r11
1740 stvxl v11,r6,r11
1741 stvxl v12,r7,r11
1742 stvxl v13,r8,r11
1743 stvxl v14,r9,r11
1744 stvxl v15,r10,r11
1745
1746vr_st64c:
1747 bf 16,vr_st64d ; skip if none of vr16-vr23 are in use
1748 la r11,savevr16(r3) ; get address of this group of registers in save area
1749 dcbz128 0,r11 ; zero the line
1750 stvxl v16,0,r11 ; save 8 VRs in the line
1751 stvxl v17,r4,r11
1752 stvxl v18,r5,r11
1753 stvxl v19,r6,r11
1754 stvxl v20,r7,r11
1755 stvxl v21,r8,r11
1756 stvxl v22,r9,r11
1757 stvxl v23,r10,r11
1758
1759vr_st64d:
1760 bflr 24 ; done if none of vr24-vr31 are in use
1761 la r11,savevr24(r3) ; get address of this group of registers in save area
1762 dcbz128 0,r11 ; zero the line
1763 stvxl v24,0,r11 ; save 8 VRs in the line
1764 stvxl v25,r4,r11
1765 stvxl v26,r5,r11
1766 stvxl v27,r6,r11
1767 stvxl v28,r7,r11
1768 stvxl v29,r8,r11
1769 stvxl v30,r9,r11
1770 stvxl v31,r10,r11
1771 blr
1772
1773; Save vectors on a 32-byte linesize processor. We save in 16 groups of 2: we either save both
1774; or neither in each group. This cuts down on conditional branches.
1775; r8 = bitmask with bit n set (for even n) if either of that pair of VRs is in use
1776; r3 = savearea
1777
1778vr_st32:
1779 mtcrf 0xFF,r8 ; set CR bits so we can branch on them
1780 li r4,16 ; load offset for X-form stores
1781
1782 bf 0,vr_st32b ; skip if neither VR in this pair is in use
1783 la r11,savevr0(r3) ; get address of this group of registers in save area
1784 dcba 0,r11 ; establish the line wo reading it
1785 stvxl v0,0,r11 ; save the two VRs in the line
1786 stvxl v1,r4,r11
1787
1788vr_st32b:
1789 bf 2,vr_st32c ; skip if neither VR in this pair is in use
1790 la r11,savevr2(r3) ; get address of this group of registers in save area
1791 dcba 0,r11 ; establish the line wo reading it
1792 stvxl v2,0,r11 ; save the two VRs in the line
1793 stvxl v3,r4,r11
1794
1795vr_st32c:
1796 bf 4,vr_st32d ; skip if neither VR in this pair is in use
1797 la r11,savevr4(r3) ; get address of this group of registers in save area
1798 dcba 0,r11 ; establish the line wo reading it
1799 stvxl v4,0,r11 ; save the two VRs in the line
1800 stvxl v5,r4,r11
1801
1802vr_st32d:
1803 bf 6,vr_st32e ; skip if neither VR in this pair is in use
1804 la r11,savevr6(r3) ; get address of this group of registers in save area
1805 dcba 0,r11 ; establish the line wo reading it
1806 stvxl v6,0,r11 ; save the two VRs in the line
1807 stvxl v7,r4,r11
1808
1809vr_st32e:
1810 bf 8,vr_st32f ; skip if neither VR in this pair is in use
1811 la r11,savevr8(r3) ; get address of this group of registers in save area
1812 dcba 0,r11 ; establish the line wo reading it
1813 stvxl v8,0,r11 ; save the two VRs in the line
1814 stvxl v9,r4,r11
1815
1816vr_st32f:
1817 bf 10,vr_st32g ; skip if neither VR in this pair is in use
1818 la r11,savevr10(r3) ; get address of this group of registers in save area
1819 dcba 0,r11 ; establish the line wo reading it
1820 stvxl v10,0,r11 ; save the two VRs in the line
1821 stvxl v11,r4,r11
1822
1823vr_st32g:
1824 bf 12,vr_st32h ; skip if neither VR in this pair is in use
1825 la r11,savevr12(r3) ; get address of this group of registers in save area
1826 dcba 0,r11 ; establish the line wo reading it
1827 stvxl v12,0,r11 ; save the two VRs in the line
1828 stvxl v13,r4,r11
1829
1830vr_st32h:
1831 bf 14,vr_st32i ; skip if neither VR in this pair is in use
1832 la r11,savevr14(r3) ; get address of this group of registers in save area
1833 dcba 0,r11 ; establish the line wo reading it
1834 stvxl v14,0,r11 ; save the two VRs in the line
1835 stvxl v15,r4,r11
1836
1837vr_st32i:
1838 bf 16,vr_st32j ; skip if neither VR in this pair is in use
1839 la r11,savevr16(r3) ; get address of this group of registers in save area
1840 dcba 0,r11 ; establish the line wo reading it
1841 stvxl v16,0,r11 ; save the two VRs in the line
1842 stvxl v17,r4,r11
1843
1844vr_st32j:
1845 bf 18,vr_st32k ; skip if neither VR in this pair is in use
1846 la r11,savevr18(r3) ; get address of this group of registers in save area
1847 dcba 0,r11 ; establish the line wo reading it
1848 stvxl v18,0,r11 ; save the two VRs in the line
1849 stvxl v19,r4,r11
1850
1851vr_st32k:
1852 bf 20,vr_st32l ; skip if neither VR in this pair is in use
1853 la r11,savevr20(r3) ; get address of this group of registers in save area
1854 dcba 0,r11 ; establish the line wo reading it
1855 stvxl v20,0,r11 ; save the two VRs in the line
1856 stvxl v21,r4,r11
1857
1858vr_st32l:
1859 bf 22,vr_st32m ; skip if neither VR in this pair is in use
1860 la r11,savevr22(r3) ; get address of this group of registers in save area
1861 dcba 0,r11 ; establish the line wo reading it
1862 stvxl v22,0,r11 ; save the two VRs in the line
1863 stvxl v23,r4,r11
1864
1865vr_st32m:
1866 bf 24,vr_st32n ; skip if neither VR in this pair is in use
1867 la r11,savevr24(r3) ; get address of this group of registers in save area
1868 dcba 0,r11 ; establish the line wo reading it
1869 stvxl v24,0,r11 ; save the two VRs in the line
1870 stvxl v25,r4,r11
1871
1872vr_st32n:
1873 bf 26,vr_st32o ; skip if neither VR in this pair is in use
1874 la r11,savevr26(r3) ; get address of this group of registers in save area
1875 dcba 0,r11 ; establish the line wo reading it
1876 stvxl v26,0,r11 ; save the two VRs in the line
1877 stvxl v27,r4,r11
1878
1879vr_st32o:
1880 bf 28,vr_st32p ; skip if neither VR in this pair is in use
1881 la r11,savevr28(r3) ; get address of this group of registers in save area
1882 dcba 0,r11 ; establish the line wo reading it
1883 stvxl v28,0,r11 ; save the two VRs in the line
1884 stvxl v29,r4,r11
1885
1886vr_st32p:
1887 bflr 30 ; done if neither VR in this pair is in use
1888 la r11,savevr30(r3) ; get address of this group of registers in save area
1889 dcba 0,r11 ; establish the line wo reading it
1890 stvxl v30,0,r11 ; save the two VRs in the line
1891 stvxl v31,r4,r11
1892 blr
1893
1894
1895// *****************
1896// * v r _ l o a d *
1897// *****************
1898//
1899// Load live VRs from a savearea, according to bits set in a passed vector. This is the reverse
1900// of "vr_store". Like it, we avoid touching unnecessary cache blocks and minimize conditional
1901// branches by loading all VRs from a cache line, if we have to load any. If we don't load the VRs
1902// in a cache line, we bug them. Note that this behavior is slightly different from earlier kernels,
1903// which would bug all VRs that aren't live.
1904//
1905// When called:
1906// interrupts are off, vectors are enabled
1907// r3 = ptr to save area
1908// r10 = vector of live regs to load (ie, savevrsave & savevrvalid, may be 0)
1909// v31 = bugbug constant (0x7FFFDEAD7FFFDEAD7FFFDEAD7FFFDEAD)
1910//
1911// We destroy:
1912// r4 - r11, all CRs.
1913
1914vr_load:
1915 mfsprg r9,2 ; get feature flags
1916 li r6,1 ; assuming 32-byte, get (#VRs)-1 in a cacheline
1917 mtcrf 0x02,r9 ; set cache line size bits in cr6
1918 lis r7,0xC000 ; assuming 32-byte, set bits 0-1
1919 bt-- pf32Byteb,vr_ld0 ; skip if 32-bit processor
1920 li r6,7 ; 128-byte machines have 8 VRs in a cacheline
1921 lis r7,0xFF00 ; so set bits 0-7
1922
1923// Loop touching in cache blocks we will load from.
1924// r3 = savearea ptr
1925// r5 = we light bits for the VRs we will be loading
1926// r6 = 1 if 32-byte, 7 if 128-byte
1927// r7 = 0xC0000000 if 32-byte, 0xFF000000 if 128-byte
1928// r10 = live VR bits
1929// v31 = bugbug constant
1930
1931vr_ld0:
1932 li r5,0 ; initialize set of VRs to load
1933 la r11,savevr0(r3) ; get address of register file
1934 b vr_ld2 ; enter loop in middle
1935
1936 .align 5
1937vr_ld1: ; loop over each cache line we will load
1938 dcbt r4,r11 ; start prefetch of the line
1939 andc r10,r10,r9 ; turn off the bits in this line
1940 or r5,r5,r9 ; we will load all these
1941vr_ld2: ; initial entry pt
1942 cntlzw r4,r10 ; get offset to next live VR
1943 andc r4,r4,r6 ; cacheline align it
1944 srw. r9,r7,r4 ; position bits for VRs in that cache line
1945 slwi r4,r4,4 ; get byte offset within register file to that line
1946 bne vr_ld1 ; loop if more bits in r10
1947
1948 bf-- pf128Byteb,vr_ld32 ; skip if not 128-byte lines
1949
1950// Handle a processor with 128-byte cache lines. Four groups of 8 VRs.
1951// r3 = savearea ptr
1952// r5 = 1st bit in each cacheline is 1 iff any reg in that line must be loaded
1953// r11 = addr(savevr0)
1954// v31 = bugbug constant
1955
1956 mtcrf 0x80,r5 ; set up bits for conditional branches
1957 li r4,16 ; load offsets for X-form stores
1958 li r6,48
1959 mtcrf 0x20,r5 ; load CRs ona at a time, which is faster
1960 li r7,64
1961 li r8,80
1962 mtcrf 0x08,r5
1963 li r9,96
1964 li r10,112
1965 mtcrf 0x02,r5
1966 li r5,32
1967
1968 bt 0,vr_ld128a ; skip if this line must be loaded
1969 vor v0,v31,v31 ; no VR must be loaded, so bug them all
1970 vor v1,v31,v31
1971 vor v2,v31,v31
1972 vor v3,v31,v31
1973 vor v4,v31,v31
1974 vor v5,v31,v31
1975 vor v6,v31,v31
1976 vor v7,v31,v31
1977 b vr_ld128b
1978vr_ld128a: ; must load from this line
1979 lvxl v0,0,r11
1980 lvxl v1,r4,r11
1981 lvxl v2,r5,r11
1982 lvxl v3,r6,r11
1983 lvxl v4,r7,r11
1984 lvxl v5,r8,r11
1985 lvxl v6,r9,r11
1986 lvxl v7,r10,r11
1987
1988vr_ld128b: ; here to handle next cache line
1989 la r11,savevr8(r3) ; load offset to it
1990 bt 8,vr_ld128c ; skip if this line must be loaded
1991 vor v8,v31,v31 ; no VR must be loaded, so bug them all
1992 vor v9,v31,v31
1993 vor v10,v31,v31
1994 vor v11,v31,v31
1995 vor v12,v31,v31
1996 vor v13,v31,v31
1997 vor v14,v31,v31
1998 vor v15,v31,v31
1999 b vr_ld128d
2000vr_ld128c: ; must load from this line
2001 lvxl v8,0,r11
2002 lvxl v9,r4,r11
2003 lvxl v10,r5,r11
2004 lvxl v11,r6,r11
2005 lvxl v12,r7,r11
2006 lvxl v13,r8,r11
2007 lvxl v14,r9,r11
2008 lvxl v15,r10,r11
2009
2010vr_ld128d: ; here to handle next cache line
2011 la r11,savevr16(r3) ; load offset to it
2012 bt 16,vr_ld128e ; skip if this line must be loaded
2013 vor v16,v31,v31 ; no VR must be loaded, so bug them all
2014 vor v17,v31,v31
2015 vor v18,v31,v31
2016 vor v19,v31,v31
2017 vor v20,v31,v31
2018 vor v21,v31,v31
2019 vor v22,v31,v31
2020 vor v23,v31,v31
2021 b vr_ld128f
2022vr_ld128e: ; must load from this line
2023 lvxl v16,0,r11
2024 lvxl v17,r4,r11
2025 lvxl v18,r5,r11
2026 lvxl v19,r6,r11
2027 lvxl v20,r7,r11
2028 lvxl v21,r8,r11
2029 lvxl v22,r9,r11
2030 lvxl v23,r10,r11
2031
2032vr_ld128f: ; here to handle next cache line
2033 la r11,savevr24(r3) ; load offset to it
2034 bt 24,vr_ld128g ; skip if this line must be loaded
2035 vor v24,v31,v31 ; no VR must be loaded, so bug them all
2036 vor v25,v31,v31
2037 vor v26,v31,v31
2038 vor v27,v31,v31
2039 vor v28,v31,v31
2040 vor v29,v31,v31
2041 vor v30,v31,v31
2042 blr
2043vr_ld128g: ; must load from this line
2044 lvxl v24,0,r11
2045 lvxl v25,r4,r11
2046 lvxl v26,r5,r11
2047 lvxl v27,r6,r11
2048 lvxl v28,r7,r11
2049 lvxl v29,r8,r11
2050 lvxl v30,r9,r11
2051 lvxl v31,r10,r11
2052 blr
2053
2054// Handle a processor with 32-byte cache lines. Sixteen groups of two VRs.
2055// r5 = 1st bit in each cacheline is 1 iff any reg in that line must be loaded
2056// r11 = addr(savevr0)
2057
2058vr_ld32:
2059 mtcrf 0xFF,r5 ; set up bits for conditional branches
2060 li r4,16 ; load offset for X-form stores
2061
2062 bt 0,vr_ld32load0 ; skip if we must load this line
2063 vor v0,v31,v31 ; neither VR is live, so bug them both
2064 vor v1,v31,v31
2065 b vr_ld32test2
2066vr_ld32load0: ; must load VRs in this line
2067 lvxl v0,0,r11
2068 lvxl v1,r4,r11
2069
2070vr_ld32test2: ; here to handle next cache line
2071 la r11,savevr2(r3) ; get offset to next cache line
2072 bt 2,vr_ld32load2 ; skip if we must load this line
2073 vor v2,v31,v31 ; neither VR is live, so bug them both
2074 vor v3,v31,v31
2075 b vr_ld32test4
2076vr_ld32load2: ; must load VRs in this line
2077 lvxl v2,0,r11
2078 lvxl v3,r4,r11
2079
2080vr_ld32test4: ; here to handle next cache line
2081 la r11,savevr4(r3) ; get offset to next cache line
2082 bt 4,vr_ld32load4 ; skip if we must load this line
2083 vor v4,v31,v31 ; neither VR is live, so bug them both
2084 vor v5,v31,v31
2085 b vr_ld32test6
2086vr_ld32load4: ; must load VRs in this line
2087 lvxl v4,0,r11
2088 lvxl v5,r4,r11
2089
2090vr_ld32test6: ; here to handle next cache line
2091 la r11,savevr6(r3) ; get offset to next cache line
2092 bt 6,vr_ld32load6 ; skip if we must load this line
2093 vor v6,v31,v31 ; neither VR is live, so bug them both
2094 vor v7,v31,v31
2095 b vr_ld32test8
2096vr_ld32load6: ; must load VRs in this line
2097 lvxl v6,0,r11
2098 lvxl v7,r4,r11
2099
2100vr_ld32test8: ; here to handle next cache line
2101 la r11,savevr8(r3) ; get offset to next cache line
2102 bt 8,vr_ld32load8 ; skip if we must load this line
2103 vor v8,v31,v31 ; neither VR is live, so bug them both
2104 vor v9,v31,v31
2105 b vr_ld32test10
2106vr_ld32load8: ; must load VRs in this line
2107 lvxl v8,0,r11
2108 lvxl v9,r4,r11
2109
2110vr_ld32test10: ; here to handle next cache line
2111 la r11,savevr10(r3) ; get offset to next cache line
2112 bt 10,vr_ld32load10 ; skip if we must load this line
2113 vor v10,v31,v31 ; neither VR is live, so bug them both
2114 vor v11,v31,v31
2115 b vr_ld32test12
2116vr_ld32load10: ; must load VRs in this line
2117 lvxl v10,0,r11
2118 lvxl v11,r4,r11
2119
2120vr_ld32test12: ; here to handle next cache line
2121 la r11,savevr12(r3) ; get offset to next cache line
2122 bt 12,vr_ld32load12 ; skip if we must load this line
2123 vor v12,v31,v31 ; neither VR is live, so bug them both
2124 vor v13,v31,v31
2125 b vr_ld32test14
2126vr_ld32load12: ; must load VRs in this line
2127 lvxl v12,0,r11
2128 lvxl v13,r4,r11
2129
2130vr_ld32test14: ; here to handle next cache line
2131 la r11,savevr14(r3) ; get offset to next cache line
2132 bt 14,vr_ld32load14 ; skip if we must load this line
2133 vor v14,v31,v31 ; neither VR is live, so bug them both
2134 vor v15,v31,v31
2135 b vr_ld32test16
2136vr_ld32load14: ; must load VRs in this line
2137 lvxl v14,0,r11
2138 lvxl v15,r4,r11
2139
2140vr_ld32test16: ; here to handle next cache line
2141 la r11,savevr16(r3) ; get offset to next cache line
2142 bt 16,vr_ld32load16 ; skip if we must load this line
2143 vor v16,v31,v31 ; neither VR is live, so bug them both
2144 vor v17,v31,v31
2145 b vr_ld32test18
2146vr_ld32load16: ; must load VRs in this line
2147 lvxl v16,0,r11
2148 lvxl v17,r4,r11
2149
2150vr_ld32test18: ; here to handle next cache line
2151 la r11,savevr18(r3) ; get offset to next cache line
2152 bt 18,vr_ld32load18 ; skip if we must load this line
2153 vor v18,v31,v31 ; neither VR is live, so bug them both
2154 vor v19,v31,v31
2155 b vr_ld32test20
2156vr_ld32load18: ; must load VRs in this line
2157 lvxl v18,0,r11
2158 lvxl v19,r4,r11
2159
2160vr_ld32test20: ; here to handle next cache line
2161 la r11,savevr20(r3) ; get offset to next cache line
2162 bt 20,vr_ld32load20 ; skip if we must load this line
2163 vor v20,v31,v31 ; neither VR is live, so bug them both
2164 vor v21,v31,v31
2165 b vr_ld32test22
2166vr_ld32load20: ; must load VRs in this line
2167 lvxl v20,0,r11
2168 lvxl v21,r4,r11
2169
2170vr_ld32test22: ; here to handle next cache line
2171 la r11,savevr22(r3) ; get offset to next cache line
2172 bt 22,vr_ld32load22 ; skip if we must load this line
2173 vor v22,v31,v31 ; neither VR is live, so bug them both
2174 vor v23,v31,v31
2175 b vr_ld32test24
2176vr_ld32load22: ; must load VRs in this line
2177 lvxl v22,0,r11
2178 lvxl v23,r4,r11
2179
2180vr_ld32test24: ; here to handle next cache line
2181 la r11,savevr24(r3) ; get offset to next cache line
2182 bt 24,vr_ld32load24 ; skip if we must load this line
2183 vor v24,v31,v31 ; neither VR is live, so bug them both
2184 vor v25,v31,v31
2185 b vr_ld32test26
2186vr_ld32load24: ; must load VRs in this line
2187 lvxl v24,0,r11
2188 lvxl v25,r4,r11
2189
2190vr_ld32test26: ; here to handle next cache line
2191 la r11,savevr26(r3) ; get offset to next cache line
2192 bt 26,vr_ld32load26 ; skip if we must load this line
2193 vor v26,v31,v31 ; neither VR is live, so bug them both
2194 vor v27,v31,v31
2195 b vr_ld32test28
2196vr_ld32load26: ; must load VRs in this line
2197 lvxl v26,0,r11
2198 lvxl v27,r4,r11
2199
2200vr_ld32test28: ; here to handle next cache line
2201 la r11,savevr28(r3) ; get offset to next cache line
2202 bt 28,vr_ld32load28 ; skip if we must load this line
2203 vor v28,v31,v31 ; neither VR is live, so bug them both
2204 vor v29,v31,v31
2205 b vr_ld32test30
2206vr_ld32load28: ; must load VRs in this line
2207 lvxl v28,0,r11
2208 lvxl v29,r4,r11
2209
2210vr_ld32test30: ; here to handle next cache line
2211 la r11,savevr30(r3) ; get offset to next cache line
2212 bt 30,vr_ld32load30 ; skip if we must load this line
2213 vor v30,v31,v31 ; neither VR is live, so bug them both
2214 blr
2215vr_ld32load30: ; must load VRs in this line
2216 lvxl v30,0,r11
2217 lvxl v31,r4,r11
2218 blr