]> git.saurik.com Git - apple/xnu.git/blame - osfmk/arm64/cswitch.s
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / arm64 / cswitch.s
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <machine/asm.h>
29#include <arm64/machine_machdep.h>
cb323159 30#include <arm64/machine_routines_asm.h>
5ba3f43e
A
31#include <arm64/proc_reg.h>
32#include "assym.s"
33
34/*
35 * save_general_registers
36 *
37 * Saves variable registers to kernel PCB.
38 * arg0 - thread_kernel_state pointer
39 * arg1 - Scratch register
40 */
41
42.macro save_general_registers
43/* AAPCS-64 Page 14
44 *
45 * A subroutine invocation must preserve the contents of the registers r19-r29
46 * and SP. We also save IP0 and IP1, as machine_idle uses IP0 for saving the LR.
47 */
f427ee49
A
48 stp x16, x17, [$0, SS64_KERNEL_X16]
49 stp x19, x20, [$0, SS64_KERNEL_X19]
50 stp x21, x22, [$0, SS64_KERNEL_X21]
51 stp x23, x24, [$0, SS64_KERNEL_X23]
52 stp x25, x26, [$0, SS64_KERNEL_X25]
53 stp x27, x28, [$0, SS64_KERNEL_X27]
54 stp fp, lr, [$0, SS64_KERNEL_FP]
55 str xzr, [$0, SS64_KERNEL_PC]
eb6b6ca3 56 MOV32 w$1, PSR64_KERNEL_POISON
f427ee49 57 str w$1, [$0, SS64_KERNEL_CPSR]
cb323159
A
58#ifdef HAS_APPLE_PAC
59 stp x0, x1, [sp, #-16]!
60 stp x2, x3, [sp, #-16]!
61 stp x4, x5, [sp, #-16]!
62
63 /*
64 * Arg0: The ARM context pointer
65 * Arg1: PC value to sign
66 * Arg2: CPSR value to sign
67 * Arg3: LR to sign
68 */
69 mov x0, $0
eb6b6ca3
A
70 mov x1, #0
71 mov w2, w$1
cb323159
A
72 mov x3, lr
73 mov x4, x16
74 mov x5, x17
f427ee49 75 bl EXT(ml_sign_kernel_thread_state)
cb323159
A
76
77 ldp x4, x5, [sp], #16
78 ldp x2, x3, [sp], #16
79 ldp x0, x1, [sp], #16
f427ee49 80 ldp fp, lr, [$0, SS64_KERNEL_FP]
cb323159 81#endif /* defined(HAS_APPLE_PAC) */
eb6b6ca3 82 mov x$1, sp
f427ee49 83 str x$1, [$0, SS64_KERNEL_SP]
5ba3f43e
A
84
85/* AAPCS-64 Page 14
86 *
87 * Registers d8-d15 (s8-s15) must be preserved by a callee across subroutine
88 * calls; the remaining registers (v0-v7, v16-v31) do not need to be preserved
89 * (or should be preserved by the caller).
90 */
f427ee49
A
91 str d8, [$0, NS64_KERNEL_D8]
92 str d9, [$0, NS64_KERNEL_D9]
93 str d10,[$0, NS64_KERNEL_D10]
94 str d11,[$0, NS64_KERNEL_D11]
95 str d12,[$0, NS64_KERNEL_D12]
96 str d13,[$0, NS64_KERNEL_D13]
97 str d14,[$0, NS64_KERNEL_D14]
98 str d15,[$0, NS64_KERNEL_D15]
99
100 mrs x$1, FPCR
101 str w$1, [$0, NS64_KERNEL_FPCR]
5ba3f43e
A
102.endmacro
103
104/*
105 * load_general_registers
106 *
107 * Loads variable registers from kernel PCB.
108 * arg0 - thread_kernel_state pointer
109 * arg1 - Scratch register
110 */
111.macro load_general_registers
cb323159
A
112 mov x20, x0
113 mov x21, x1
114 mov x22, x2
115
116 mov x0, $0
f427ee49 117 AUTH_KERNEL_THREAD_STATE_IN_X0 x23, x24, x25, x26, x27
d9a64523 118
cb323159
A
119 mov x0, x20
120 mov x1, x21
121 mov x2, x22
122
f427ee49
A
123 ldr w$1, [$0, NS64_KERNEL_FPCR]
124 mrs x19, FPCR
125 CMSR FPCR, x19, x$1, 1
1261:
127
cb323159 128 // Skip x16, x17 - already loaded + authed by AUTH_THREAD_STATE_IN_X0
f427ee49
A
129 ldp x19, x20, [$0, SS64_KERNEL_X19]
130 ldp x21, x22, [$0, SS64_KERNEL_X21]
131 ldp x23, x24, [$0, SS64_KERNEL_X23]
132 ldp x25, x26, [$0, SS64_KERNEL_X25]
133 ldp x27, x28, [$0, SS64_KERNEL_X27]
134 ldr fp, [$0, SS64_KERNEL_FP]
cb323159 135 // Skip lr - already loaded + authed by AUTH_THREAD_STATE_IN_X0
f427ee49
A
136 ldr x$1, [$0, SS64_KERNEL_SP]
137 mov sp, x$1
138
139 ldr d8, [$0, NS64_KERNEL_D8]
140 ldr d9, [$0, NS64_KERNEL_D9]
141 ldr d10,[$0, NS64_KERNEL_D10]
142 ldr d11,[$0, NS64_KERNEL_D11]
143 ldr d12,[$0, NS64_KERNEL_D12]
144 ldr d13,[$0, NS64_KERNEL_D13]
145 ldr d14,[$0, NS64_KERNEL_D14]
146 ldr d15,[$0, NS64_KERNEL_D15]
5ba3f43e
A
147.endmacro
148
cb323159 149
5ba3f43e
A
150/*
151 * set_thread_registers
152 *
153 * Updates thread registers during context switch
154 * arg0 - New thread pointer
155 * arg1 - Scratch register
156 * arg2 - Scratch register
157 */
158.macro set_thread_registers
159 msr TPIDR_EL1, $0 // Write new thread pointer to TPIDR_EL1
ea3f0419
A
160 ldr $1, [$0, ACT_CPUDATAP]
161 str $0, [$1, CPU_ACTIVE_THREAD]
5ba3f43e
A
162 ldr $1, [$0, TH_CTH_SELF] // Get cthread pointer
163 mrs $2, TPIDRRO_EL0 // Extract cpu number from TPIDRRO_EL0
164 and $2, $2, #(MACHDEP_CPUNUM_MASK)
165 orr $2, $1, $2 // Save new cthread/cpu to TPIDRRO_EL0
166 msr TPIDRRO_EL0, $2
94ff46dc 167 msr TPIDR_EL0, xzr
f427ee49
A
168#if DEBUG || DEVELOPMENT
169 ldr $1, [$0, TH_THREAD_ID] // Save the bottom 32-bits of the thread ID into
170 msr CONTEXTIDR_EL1, $1 // CONTEXTIDR_EL1 (top 32-bits are RES0).
171#endif /* DEBUG || DEVELOPMENT */
5ba3f43e
A
172.endmacro
173
cb323159 174/*
f427ee49 175 * set_process_dependent_keys_and_sync_context
cb323159 176 *
f427ee49 177 * Updates process dependent keys and issues explicit context sync during context switch if necessary
cb323159
A
178 * Per CPU Data rop_key is initialized in arm_init() for bootstrap processor
179 * and in cpu_data_init for slave processors
180 *
f427ee49
A
181 * thread - New thread pointer
182 * new_key - Scratch register: New Thread Key
183 * tmp_key - Scratch register: Current CPU Key
184 * cpudatap - Scratch register: Current CPU Data pointer
185 * wsync - Half-width scratch register: CPU sync required flag
186 *
187 * to save on ISBs, for ARMv8.5 we use the CPU_SYNC_ON_CSWITCH field, cached in wsync, for pre-ARMv8.5,
188 * we just use wsync to keep track of needing an ISB
cb323159 189 */
f427ee49
A
190.macro set_process_dependent_keys_and_sync_context thread, new_key, tmp_key, cpudatap, wsync
191
192
193#if defined(__ARM_ARCH_8_5__) || defined(HAS_APPLE_PAC)
194 ldr \cpudatap, [\thread, ACT_CPUDATAP]
195#endif /* defined(__ARM_ARCH_8_5__) || defined(HAS_APPLE_PAC) */
196
2a1bd2d3
A
197#if defined(__ARM_ARCH_8_5__)
198 ldrb \wsync, [\cpudatap, CPU_SYNC_ON_CSWITCH]
199#else /* defined(__ARM_ARCH_8_5__) */
f427ee49 200 mov \wsync, #0
2a1bd2d3 201#endif
f427ee49
A
202
203
c3c9b80d
A
204#if CSWITCH_ROP_KEYS
205 ldr \new_key, [\thread, TH_ROP_PID]
206 REPROGRAM_ROP_KEYS Lskip_rop_keys_\@, \new_key, \cpudatap, \tmp_key
207 mov \wsync, #1
208Lskip_rop_keys_\@:
209#endif /* CSWITCH_ROP_KEYS */
210
211#if CSWITCH_JOP_KEYS
212 ldr \new_key, [\thread, TH_JOP_PID]
213 REPROGRAM_JOP_KEYS Lskip_jop_keys_\@, \new_key, \cpudatap, \tmp_key
214 mov \wsync, #1
215Lskip_jop_keys_\@:
216#endif /* CSWITCH_JOP_KEYS */
f427ee49
A
217
218 cbz \wsync, 1f
cb323159 219 isb sy
f427ee49 220
2a1bd2d3
A
221#if defined(__ARM_ARCH_8_5__)
222 strb wzr, [\cpudatap, CPU_SYNC_ON_CSWITCH]
223#endif
cb323159
A
2241:
225.endmacro
5ba3f43e
A
226
227/*
228 * void machine_load_context(thread_t thread)
229 *
230 * Load the context for the first thread to run on a
231 * cpu, and go.
232 */
233 .text
234 .align 2
235 .globl EXT(machine_load_context)
236
237LEXT(machine_load_context)
238 set_thread_registers x0, x1, x2
239 ldr x1, [x0, TH_KSTACKPTR] // Get top of kernel stack
f427ee49
A
240 load_general_registers x1, 2
241 set_process_dependent_keys_and_sync_context x0, x1, x2, x3, w4
d9a64523 242 mov x0, #0 // Clear argument to thread_continue
5ba3f43e
A
243 ret
244
245/*
d9a64523
A
246 * typedef void (*thread_continue_t)(void *param, wait_result_t)
247 *
248 * void Call_continuation( thread_continue_t continuation,
249 * void *param,
250 * wait_result_t wresult,
251 * bool enable interrupts)
5ba3f43e
A
252 */
253 .text
254 .align 5
255 .globl EXT(Call_continuation)
256
257LEXT(Call_continuation)
258 mrs x4, TPIDR_EL1 // Get the current thread pointer
259
260 /* ARM64_TODO arm loads the kstack top instead of arg4. What should we use? */
261 ldr x5, [x4, TH_KSTACKPTR] // Get the top of the kernel stack
262 mov sp, x5 // Set stack pointer
d9a64523
A
263 mov fp, #0 // Clear the frame pointer
264
f427ee49 265 set_process_dependent_keys_and_sync_context x4, x5, x6, x7, w20
d9a64523 266
f427ee49
A
267 mov x20, x0 //continuation
268 mov x21, x1 //continuation parameter
269 mov x22, x2 //wait result
d9a64523 270
f427ee49
A
271 cbz x3, 1f
272 mov x0, #1
273 bl EXT(ml_set_interrupts_enabled)
d9a64523 2741:
5ba3f43e 275
d9a64523
A
276 mov x0, x21 // Set the first parameter
277 mov x1, x22 // Set the wait result arg
cb323159 278#ifdef HAS_APPLE_PAC
f427ee49
A
279 mov x21, THREAD_CONTINUE_T_DISC
280 blraa x20, x21 // Branch to the continuation
cb323159 281#else
d9a64523 282 blr x20 // Branch to the continuation
cb323159 283#endif
5ba3f43e
A
284 mrs x0, TPIDR_EL1 // Get the current thread pointer
285 b EXT(thread_terminate) // Kill the thread
286
287
288/*
289 * thread_t Switch_context(thread_t old,
290 * void (*cont)(void),
291 * thread_t new)
292 */
293 .text
294 .align 5
295 .globl EXT(Switch_context)
296
297LEXT(Switch_context)
298 cbnz x1, Lswitch_threads // Skip saving old state if blocking on continuation
299 ldr x3, [x0, TH_KSTACKPTR] // Get the old kernel stack top
eb6b6ca3 300 save_general_registers x3, 4
5ba3f43e
A
301Lswitch_threads:
302 set_thread_registers x2, x3, x4
303 ldr x3, [x2, TH_KSTACKPTR]
f427ee49
A
304 load_general_registers x3, 4
305 set_process_dependent_keys_and_sync_context x2, x3, x4, x5, w6
5ba3f43e
A
306 ret
307
308/*
309 * thread_t Shutdown_context(void (*doshutdown)(processor_t), processor_t processor)
310 *
311 */
312 .text
313 .align 5
314 .globl EXT(Shutdown_context)
315
316LEXT(Shutdown_context)
317 mrs x10, TPIDR_EL1 // Get thread pointer
318 ldr x11, [x10, TH_KSTACKPTR] // Get the top of the kernel stack
eb6b6ca3 319 save_general_registers x11, 12
5ba3f43e
A
320 msr DAIFSet, #(DAIFSC_FIQF | DAIFSC_IRQF) // Disable interrupts
321 ldr x11, [x10, ACT_CPUDATAP] // Get current cpu
322 ldr x12, [x11, CPU_ISTACKPTR] // Switch to interrupt stack
323 mov sp, x12
324 b EXT(cpu_doshutdown)
325
5ba3f43e
A
326/*
327 * thread_t Idle_context(void)
328 *
329 */
330 .text
331 .align 5
332 .globl EXT(Idle_context)
333
334LEXT(Idle_context)
335 mrs x0, TPIDR_EL1 // Get thread pointer
336 ldr x1, [x0, TH_KSTACKPTR] // Get the top of the kernel stack
eb6b6ca3 337 save_general_registers x1, 2
5ba3f43e
A
338 ldr x1, [x0, ACT_CPUDATAP] // Get current cpu
339 ldr x2, [x1, CPU_ISTACKPTR] // Switch to interrupt stack
340 mov sp, x2
341 b EXT(cpu_idle)
342
343/*
344 * thread_t Idle_context(void)
345 *
346 */
347 .text
348 .align 5
349 .globl EXT(Idle_load_context)
350
351LEXT(Idle_load_context)
352 mrs x0, TPIDR_EL1 // Get thread pointer
353 ldr x1, [x0, TH_KSTACKPTR] // Get the top of the kernel stack
f427ee49
A
354 load_general_registers x1, 2
355 set_process_dependent_keys_and_sync_context x0, x1, x2, x3, w4
5ba3f43e
A
356 ret
357
358 .align 2
359 .globl EXT(machine_set_current_thread)
360LEXT(machine_set_current_thread)
361 set_thread_registers x0, x1, x2
362 ret
cb323159
A
363
364
f427ee49 365/* vim: set ts=4: */