]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/arm/cswitch.s
xnu-7195.50.7.100.1.tar.gz
[apple/xnu.git] / osfmk / arm / cswitch.s
... / ...
CommitLineData
1/*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <machine/asm.h>
29#include <arm/proc_reg.h>
30#include "assym.s"
31
32/*
33 * save_vfp_registers
34 *
35 * Expects a pointer to the VFP save area in r3; saves the callee-saved registers to that save area.
36 * Clobbers r2 and r3.
37 */
38.macro save_vfp_registers
39#if __ARM_VFP__
40 fmrx r2, fpscr // Get the current FPSCR...
41 str r2, [r3, VSS_FPSCR] // ...and save it to the save area
42 add r3, r3, #64 // Only s16-s31 are callee-saved
43#if (__ARM_VFP__ >= 3)
44 vstmia.64 r3!, {d8-d11}
45 vstmia.64 r3!, {d12-d15}
46#else
47 fstmias r3!, {s16-s31}
48#endif /* __ARM_VFP__ >= 3 */
49#endif /* __ARM_VFP__ */
50.endmacro
51
52/*
53 * load_vfp_registers
54 *
55 * Expects a pointer to the VFP save area in r3; loads the callee-saved registers from that save area.
56 * Clobbers r2 and r3.
57 */
58.macro load_vfp_registers
59#if __ARM_VFP__
60 add r2, r3, #64 // Only s16-s31 are callee-saved
61#if (__ARM_VFP__ >= 3)
62 vldmia.64 r2!, {d8-d11}
63 vldmia.64 r2!, {d12-d15}
64#else
65 fldmias r2!, {s16-s31}
66#endif /* __ARM_VFP__ >= 3 */
67 ldr r3, [r3, VSS_FPSCR] // Get our saved FPSCR value...
68 fmxr fpscr, r3 // ...and restore it
69#endif /* __ARM_VFP__ */
70.endmacro
71
72/*
73 * void machine_load_context(thread_t thread)
74 *
75 * Load the context for the first thread to run on a
76 * cpu, and go.
77 */
78 .syntax unified
79 .text
80 .align 2
81 .globl EXT(machine_load_context)
82
83LEXT(machine_load_context)
84 mcr p15, 0, r0, c13, c0, 4 // Write TPIDRPRW
85 ldr r1, [r0, TH_CTH_SELF]
86 mrc p15, 0, r2, c13, c0, 3 // Read TPIDRURO
87 and r2, r2, #3 // Extract cpu number
88 orr r1, r1, r2 //
89 mcr p15, 0, r1, c13, c0, 3 // Write TPIDRURO
90 mov r1, #0
91 mcr p15, 0, r1, c13, c0, 2 // Write TPIDRURW
92 mov r7, #0 // Clear frame pointer
93 ldr r3, [r0, TH_KSTACKPTR] // Get kernel stack top
94 mov r0, #0 // no param
95 add r3, r3, SS_R4
96 ldmia r3!, {r4-r14} // Load thread status
97 bx lr // Return
98
99/*
100 * typedef void (*thread_continue_t)(void *param, wait_result_t)
101 *
102 * void Call_continuation( thread_continue_t continuation,
103 * void *param,
104 * wait_result_t wresult,
105 * bool enable interrupts)
106 */
107 .text
108 .align 5
109 .globl EXT(Call_continuation)
110
111LEXT(Call_continuation)
112 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
113 ldr sp, [r9, TH_KSTACKPTR] // Set stack pointer
114 mov r7, #0 // Clear frame pointer
115
116 mov r4,r0 // Load continuation
117 mov r5,r1 // continuation parameter
118 mov r6,r2 // Set wait result arg
119
120 teq r3, #0
121 beq 1f
122 mov r0, #1
123 bl _ml_set_interrupts_enabled
1241:
125
126 mov r0,r5 // Set first parameter
127 mov r1,r6 // Set wait result arg
128 blx r4 // Branch to continuation
129
130 mrc p15, 0, r0, c13, c0, 4 // Read TPIDRPRW
131 LOAD_ADDR_PC(thread_terminate)
132 b . // Not reach
133
134
135/*
136 * thread_t Switch_context(thread_t old,
137 * void (*cont)(void),
138 * thread_t new)
139 */
140 .text
141 .align 5
142 .globl EXT(Switch_context)
143
144LEXT(Switch_context)
145 teq r1, #0 // Test if blocking on continuaton
146 bne switch_threads // No need to save GPR/NEON state if we are
147#if __ARM_VFP__
148 mov r1, r2 // r2 will be clobbered by the save, so preserve it
149 ldr r3, [r0, TH_KSTACKPTR] // Get old kernel stack top
150 add r3, r3, SS_KVFP // Get the kernel VFP save area for the old thread...
151 save_vfp_registers // ...and save our VFP state to it
152 mov r2, r1 // Restore r2 (the new thread pointer)
153#endif /* __ARM_VFP__ */
154 ldr r3, [r0, TH_KSTACKPTR] // Get old kernel stack top
155 add r3, r3, SS_R4
156 stmia r3!, {r4-r14} // Save general registers to pcb
157switch_threads:
158 ldr r3, [r2, ACT_CPUDATAP]
159 str r2, [r3, CPU_ACTIVE_THREAD]
160 ldr r3, [r2, TH_KSTACKPTR] // get kernel stack top
161 mcr p15, 0, r2, c13, c0, 4 // Write TPIDRPRW
162 ldr r6, [r2, TH_CTH_SELF]
163 mrc p15, 0, r5, c13, c0, 3 // Read TPIDRURO
164 and r5, r5, #3 // Extract cpu number
165 orr r6, r6, r5
166 mcr p15, 0, r6, c13, c0, 3 // Write TPIDRURO
167 mov r6, #0
168 mcr p15, 0, r6, c13, c0, 2 // Write TPIDRURW
169load_reg:
170 add r3, r3, SS_R4
171 ldmia r3!, {r4-r14} // Restore new thread status
172#if __ARM_VFP__
173 ldr r3, [r2, TH_KSTACKPTR] // get kernel stack top
174 add r3, r3, SS_KVFP // Get the kernel VFP save area for the new thread...
175 load_vfp_registers // ...and load the saved state
176#endif /* __ARM_VFP__ */
177 bx lr // Return
178
179/*
180 * thread_t Shutdown_context(void (*doshutdown)(processor_t), processor_t processor)
181 *
182 */
183 .text
184 .align 5
185 .globl EXT(Shutdown_context)
186
187LEXT(Shutdown_context)
188 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
189#if __ARM_VFP__
190 ldr r3, [r9, TH_KSTACKPTR] // get kernel stack top
191 add r3, r3, SS_KVFP // Get the kernel VFP save area for the current thread...
192 save_vfp_registers // ...and save our VFP state to it
193#endif
194 ldr r3, [r9, TH_KSTACKPTR] // Get kernel stack top
195 add r3, r3, SS_R4
196 stmia r3!, {r4-r14} // Save general registers to pcb
197 cpsid if // Disable FIQ IRQ
198
199 ldr r12, [r9, ACT_CPUDATAP] // Get current cpu
200 ldr sp, [r12, CPU_ISTACKPTR] // Switch to interrupt stack
201 LOAD_ADDR_PC(cpu_doshutdown)
202
203/*
204 * thread_t Idle_context(void)
205 *
206 */
207 .text
208 .align 5
209 .globl EXT(Idle_context)
210
211LEXT(Idle_context)
212
213 mrc p15, 0, r9, c13, c0, 4 // Read TPIDRPRW
214#if __ARM_VFP__
215 ldr r3, [r9, TH_KSTACKPTR] // get kernel stack top
216 add r3, r3, SS_KVFP // Get the kernel VFP save area for the current thread...
217 save_vfp_registers // ...and save our VFP state to it
218#endif
219 ldr r3, [r9, TH_KSTACKPTR] // Get kernel stack top
220 add r3, r3, SS_R4
221 stmia r3!, {r4-r14} // Save general registers to pcb
222
223 ldr r12, [r9, ACT_CPUDATAP] // Get current cpu
224 ldr sp, [r12, CPU_ISTACKPTR] // Switch to interrupt stack
225 LOAD_ADDR_PC(cpu_idle)
226
227/*
228 * thread_t Idle_context(void)
229 *
230 */
231 .text
232 .align 5
233 .globl EXT(Idle_load_context)
234
235LEXT(Idle_load_context)
236
237 mrc p15, 0, r12, c13, c0, 4 // Read TPIDRPRW
238 ldr r3, [r12, TH_KSTACKPTR] // Get kernel stack top
239 add r3, r3, SS_R4
240 ldmia r3!, {r4-r14} // Restore new thread status
241#if __ARM_VFP__
242 ldr r3, [r9, TH_KSTACKPTR] // get kernel stack top
243 add r3, r3, SS_KVFP // Get the kernel VFP save area for the current thread...
244 load_vfp_registers // ...and load the saved state
245#endif
246 bx lr // Return
247
248/*
249 * void vfp_save(struct arm_vfpsaved_state *vfp_ss)
250 */
251 .text
252 .align 2
253 .globl EXT(vfp_save)
254
255LEXT(vfp_save)
256#if __ARM_VFP__
257 fmrx r1, fpscr // Get the current FPSCR...
258 str r1, [r0, VSS_FPSCR] // ...and save it to the save area
259#if (__ARM_VFP__ >= 3)
260 vstmia.64 r0!, {d0-d3} // Save vfp registers
261 vstmia.64 r0!, {d4-d7}
262 vstmia.64 r0!, {d8-d11}
263 vstmia.64 r0!, {d12-d15}
264 vstmia.64 r0!, {d16-d19}
265 vstmia.64 r0!, {d20-d23}
266 vstmia.64 r0!, {d24-d27}
267 vstmia.64 r0!, {d28-d31}
268#else
269 fstmias r0!, {s0-s31} // Save vfp registers
270#endif
271#endif /* __ARM_VFP__ */
272 bx lr // Return
273
274/*
275 * void vfp_load(struct arm_vfpsaved_state *vfp_ss)
276 *
277 * Loads the state in vfp_ss into the VFP registers.
278 */
279 .text
280 .align 2
281 .globl EXT(vfp_load)
282LEXT(vfp_load)
283#if __ARM_VFP__
284 /* r0: vfp_ss, r1: unused, r2: unused, r3: unused */
285 mov r1, r0
286#if (__ARM_VFP__ >= 3)
287 vldmia.64 r0!, {d0-d3} // Restore vfp registers
288 vldmia.64 r0!, {d4-d7}
289 vldmia.64 r0!, {d8-d11}
290 vldmia.64 r0!, {d12-d15}
291 vldmia.64 r0!, {d16-d19}
292 vldmia.64 r0!, {d20-d23}
293 vldmia.64 r0!, {d24-d27}
294 vldmia.64 r0!, {d28-d31}
295#else
296 fldmias r0!, {s0-s31} // Restore vfp registers
297#endif /* __ARM_VFP__ >= 3 */
298 ldr r1, [r1, VSS_FPSCR] // Get fpscr from the save state...
299 fmxr fpscr, r1 // ...and load it into the register
300#endif /* __ARM_VFP__ */
301 bx lr // Return
302
303#include "globals_asm.h"
304
305LOAD_ADDR_GEN_DEF(thread_terminate)
306LOAD_ADDR_GEN_DEF(cpu_doshutdown)
307LOAD_ADDR_GEN_DEF(cpu_idle)
308
309/* vim: set ts=4: */
310