]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm64/cswitch.s
05c38a36d70db523cb80f75508d57f1976dcae1f
[apple/xnu.git] / osfmk / arm64 / cswitch.s
1 /*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <machine/asm.h>
29 #include <arm64/machine_machdep.h>
30 #include <arm64/machine_routines_asm.h>
31 #include <arm64/pac_asm.h>
32 #include <arm64/proc_reg.h>
33 #include "assym.s"
34
35 /*
36 * save_general_registers
37 *
38 * Saves variable registers to kernel PCB.
39 * arg0 - thread_kernel_state pointer
40 * arg1 - Scratch register
41 */
42
43 .macro save_general_registers
44 /* AAPCS-64 Page 14
45 *
46 * A subroutine invocation must preserve the contents of the registers r19-r29
47 * and SP. We also save IP0 and IP1, as machine_idle uses IP0 for saving the LR.
48 */
49 stp x16, x17, [$0, SS64_KERNEL_X16]
50 stp x19, x20, [$0, SS64_KERNEL_X19]
51 stp x21, x22, [$0, SS64_KERNEL_X21]
52 stp x23, x24, [$0, SS64_KERNEL_X23]
53 stp x25, x26, [$0, SS64_KERNEL_X25]
54 stp x27, x28, [$0, SS64_KERNEL_X27]
55 stp fp, lr, [$0, SS64_KERNEL_FP]
56 str xzr, [$0, SS64_KERNEL_PC]
57 MOV32 w$1, PSR64_KERNEL_POISON
58 str w$1, [$0, SS64_KERNEL_CPSR]
59 #ifdef HAS_APPLE_PAC
60 stp x0, x1, [sp, #-16]!
61 stp x2, x3, [sp, #-16]!
62 stp x4, x5, [sp, #-16]!
63
64 /*
65 * Arg0: The ARM context pointer
66 * Arg1: PC value to sign
67 * Arg2: CPSR value to sign
68 * Arg3: LR to sign
69 */
70 mov x0, $0
71 mov x1, #0
72 mov w2, w$1
73 mov x3, lr
74 mov x4, x16
75 mov x5, x17
76 bl EXT(ml_sign_kernel_thread_state)
77
78 ldp x4, x5, [sp], #16
79 ldp x2, x3, [sp], #16
80 ldp x0, x1, [sp], #16
81 ldp fp, lr, [$0, SS64_KERNEL_FP]
82 #endif /* defined(HAS_APPLE_PAC) */
83 mov x$1, sp
84 str x$1, [$0, SS64_KERNEL_SP]
85
86 /* AAPCS-64 Page 14
87 *
88 * Registers d8-d15 (s8-s15) must be preserved by a callee across subroutine
89 * calls; the remaining registers (v0-v7, v16-v31) do not need to be preserved
90 * (or should be preserved by the caller).
91 */
92 str d8, [$0, NS64_KERNEL_D8]
93 str d9, [$0, NS64_KERNEL_D9]
94 str d10,[$0, NS64_KERNEL_D10]
95 str d11,[$0, NS64_KERNEL_D11]
96 str d12,[$0, NS64_KERNEL_D12]
97 str d13,[$0, NS64_KERNEL_D13]
98 str d14,[$0, NS64_KERNEL_D14]
99 str d15,[$0, NS64_KERNEL_D15]
100
101 mrs x$1, FPCR
102 str w$1, [$0, NS64_KERNEL_FPCR]
103 .endmacro
104
105 /*
106 * load_general_registers
107 *
108 * Loads variable registers from kernel PCB.
109 * arg0 - thread_kernel_state pointer
110 * arg1 - Scratch register
111 */
112 .macro load_general_registers
113 mov x20, x0
114 mov x21, x1
115 mov x22, x2
116
117 mov x0, $0
118 AUTH_KERNEL_THREAD_STATE_IN_X0 x23, x24, x25, x26, x27
119
120 mov x0, x20
121 mov x1, x21
122 mov x2, x22
123
124 ldr w$1, [$0, NS64_KERNEL_FPCR]
125 mrs x19, FPCR
126 CMSR FPCR, x19, x$1, 1
127 1:
128
129 // Skip x16, x17 - already loaded + authed by AUTH_THREAD_STATE_IN_X0
130 ldp x19, x20, [$0, SS64_KERNEL_X19]
131 ldp x21, x22, [$0, SS64_KERNEL_X21]
132 ldp x23, x24, [$0, SS64_KERNEL_X23]
133 ldp x25, x26, [$0, SS64_KERNEL_X25]
134 ldp x27, x28, [$0, SS64_KERNEL_X27]
135 ldr fp, [$0, SS64_KERNEL_FP]
136 // Skip lr - already loaded + authed by AUTH_THREAD_STATE_IN_X0
137 ldr x$1, [$0, SS64_KERNEL_SP]
138 mov sp, x$1
139
140 ldr d8, [$0, NS64_KERNEL_D8]
141 ldr d9, [$0, NS64_KERNEL_D9]
142 ldr d10,[$0, NS64_KERNEL_D10]
143 ldr d11,[$0, NS64_KERNEL_D11]
144 ldr d12,[$0, NS64_KERNEL_D12]
145 ldr d13,[$0, NS64_KERNEL_D13]
146 ldr d14,[$0, NS64_KERNEL_D14]
147 ldr d15,[$0, NS64_KERNEL_D15]
148 .endmacro
149
150
151 /*
152 * set_thread_registers
153 *
154 * Updates thread registers during context switch
155 * arg0 - New thread pointer
156 * arg1 - Scratch register
157 * arg2 - Scratch register
158 */
159 .macro set_thread_registers
160 msr TPIDR_EL1, $0 // Write new thread pointer to TPIDR_EL1
161 ldr $1, [$0, ACT_CPUDATAP]
162 str $0, [$1, CPU_ACTIVE_THREAD]
163 ldr $1, [$0, TH_CTH_SELF] // Get cthread pointer
164 mrs $2, TPIDRRO_EL0 // Extract cpu number from TPIDRRO_EL0
165 and $2, $2, #(MACHDEP_CPUNUM_MASK)
166 orr $2, $1, $2 // Save new cthread/cpu to TPIDRRO_EL0
167 msr TPIDRRO_EL0, $2
168 msr TPIDR_EL0, xzr
169 #if DEBUG || DEVELOPMENT
170 ldr $1, [$0, TH_THREAD_ID] // Save the bottom 32-bits of the thread ID into
171 msr CONTEXTIDR_EL1, $1 // CONTEXTIDR_EL1 (top 32-bits are RES0).
172 #endif /* DEBUG || DEVELOPMENT */
173 .endmacro
174
175 /*
176 * set_process_dependent_keys_and_sync_context
177 *
178 * Updates process dependent keys and issues explicit context sync during context switch if necessary
179 * Per CPU Data rop_key is initialized in arm_init() for bootstrap processor
180 * and in cpu_data_init for slave processors
181 *
182 * thread - New thread pointer
183 * new_key - Scratch register: New Thread Key
184 * tmp_key - Scratch register: Current CPU Key
185 * cpudatap - Scratch register: Current CPU Data pointer
186 * wsync - Half-width scratch register: CPU sync required flag
187 *
188 * to save on ISBs, for ARMv8.5 we use the CPU_SYNC_ON_CSWITCH field, cached in wsync, for pre-ARMv8.5,
189 * we just use wsync to keep track of needing an ISB
190 */
191 .macro set_process_dependent_keys_and_sync_context thread, new_key, tmp_key, cpudatap, wsync
192
193
194 #if defined(__ARM_ARCH_8_5__) || defined(HAS_APPLE_PAC)
195 ldr \cpudatap, [\thread, ACT_CPUDATAP]
196 #endif /* defined(__ARM_ARCH_8_5__) || defined(HAS_APPLE_PAC) */
197
198 mov \wsync, #0
199
200
201 #if defined(HAS_APPLE_PAC)
202 ldr \new_key, [\thread, TH_ROP_PID]
203 ldr \tmp_key, [\cpudatap, CPU_ROP_KEY]
204 cmp \new_key, \tmp_key
205 b.eq 1f
206 str \new_key, [\cpudatap, CPU_ROP_KEY]
207 msr APIBKeyLo_EL1, \new_key
208 add \new_key, \new_key, #1
209 msr APIBKeyHi_EL1, \new_key
210 add \new_key, \new_key, #1
211 msr APDBKeyLo_EL1, \new_key
212 add \new_key, \new_key, #1
213 msr APDBKeyHi_EL1, \new_key
214 mov \wsync, #1
215 1:
216
217 #if HAS_PAC_FAST_A_KEY_SWITCHING
218 IF_PAC_SLOW_A_KEY_SWITCHING Lskip_jop_keys_\@, \new_key
219 ldr \new_key, [\thread, TH_JOP_PID]
220 REPROGRAM_JOP_KEYS Lskip_jop_keys_\@, \new_key, \cpudatap, \tmp_key
221 mov \wsync, #1
222 Lskip_jop_keys_\@:
223 #endif /* HAS_PAC_FAST_A_KEY_SWITCHING */
224
225 #endif /* defined(HAS_APPLE_PAC) */
226
227 cbz \wsync, 1f
228 isb sy
229
230 1:
231 .endmacro
232
233 /*
234 * void machine_load_context(thread_t thread)
235 *
236 * Load the context for the first thread to run on a
237 * cpu, and go.
238 */
239 .text
240 .align 2
241 .globl EXT(machine_load_context)
242
243 LEXT(machine_load_context)
244 set_thread_registers x0, x1, x2
245 ldr x1, [x0, TH_KSTACKPTR] // Get top of kernel stack
246 load_general_registers x1, 2
247 set_process_dependent_keys_and_sync_context x0, x1, x2, x3, w4
248 mov x0, #0 // Clear argument to thread_continue
249 ret
250
251 /*
252 * typedef void (*thread_continue_t)(void *param, wait_result_t)
253 *
254 * void Call_continuation( thread_continue_t continuation,
255 * void *param,
256 * wait_result_t wresult,
257 * bool enable interrupts)
258 */
259 .text
260 .align 5
261 .globl EXT(Call_continuation)
262
263 LEXT(Call_continuation)
264 mrs x4, TPIDR_EL1 // Get the current thread pointer
265
266 /* ARM64_TODO arm loads the kstack top instead of arg4. What should we use? */
267 ldr x5, [x4, TH_KSTACKPTR] // Get the top of the kernel stack
268 mov sp, x5 // Set stack pointer
269 mov fp, #0 // Clear the frame pointer
270
271 set_process_dependent_keys_and_sync_context x4, x5, x6, x7, w20
272
273 mov x20, x0 //continuation
274 mov x21, x1 //continuation parameter
275 mov x22, x2 //wait result
276
277 cbz x3, 1f
278 mov x0, #1
279 bl EXT(ml_set_interrupts_enabled)
280 1:
281
282 mov x0, x21 // Set the first parameter
283 mov x1, x22 // Set the wait result arg
284 #ifdef HAS_APPLE_PAC
285 mov x21, THREAD_CONTINUE_T_DISC
286 blraa x20, x21 // Branch to the continuation
287 #else
288 blr x20 // Branch to the continuation
289 #endif
290 mrs x0, TPIDR_EL1 // Get the current thread pointer
291 b EXT(thread_terminate) // Kill the thread
292
293
294 /*
295 * thread_t Switch_context(thread_t old,
296 * void (*cont)(void),
297 * thread_t new)
298 */
299 .text
300 .align 5
301 .globl EXT(Switch_context)
302
303 LEXT(Switch_context)
304 cbnz x1, Lswitch_threads // Skip saving old state if blocking on continuation
305 ldr x3, [x0, TH_KSTACKPTR] // Get the old kernel stack top
306 save_general_registers x3, 4
307 Lswitch_threads:
308 set_thread_registers x2, x3, x4
309 ldr x3, [x2, TH_KSTACKPTR]
310 load_general_registers x3, 4
311 set_process_dependent_keys_and_sync_context x2, x3, x4, x5, w6
312 ret
313
314 /*
315 * thread_t Shutdown_context(void (*doshutdown)(processor_t), processor_t processor)
316 *
317 */
318 .text
319 .align 5
320 .globl EXT(Shutdown_context)
321
322 LEXT(Shutdown_context)
323 mrs x10, TPIDR_EL1 // Get thread pointer
324 ldr x11, [x10, TH_KSTACKPTR] // Get the top of the kernel stack
325 save_general_registers x11, 12
326 msr DAIFSet, #(DAIFSC_FIQF | DAIFSC_IRQF) // Disable interrupts
327 ldr x11, [x10, ACT_CPUDATAP] // Get current cpu
328 ldr x12, [x11, CPU_ISTACKPTR] // Switch to interrupt stack
329 mov sp, x12
330 b EXT(cpu_doshutdown)
331
332 /*
333 * thread_t Idle_context(void)
334 *
335 */
336 .text
337 .align 5
338 .globl EXT(Idle_context)
339
340 LEXT(Idle_context)
341 mrs x0, TPIDR_EL1 // Get thread pointer
342 ldr x1, [x0, TH_KSTACKPTR] // Get the top of the kernel stack
343 save_general_registers x1, 2
344 ldr x1, [x0, ACT_CPUDATAP] // Get current cpu
345 ldr x2, [x1, CPU_ISTACKPTR] // Switch to interrupt stack
346 mov sp, x2
347 b EXT(cpu_idle)
348
349 /*
350 * thread_t Idle_context(void)
351 *
352 */
353 .text
354 .align 5
355 .globl EXT(Idle_load_context)
356
357 LEXT(Idle_load_context)
358 mrs x0, TPIDR_EL1 // Get thread pointer
359 ldr x1, [x0, TH_KSTACKPTR] // Get the top of the kernel stack
360 load_general_registers x1, 2
361 set_process_dependent_keys_and_sync_context x0, x1, x2, x3, w4
362 ret
363
364 .align 2
365 .globl EXT(machine_set_current_thread)
366 LEXT(machine_set_current_thread)
367 set_thread_registers x0, x1, x2
368 ret
369
370
371 /* vim: set ts=4: */