]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2007-2020 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | #include <debug.h> | |
29 | #include <mach/mach_types.h> | |
30 | #include <mach/kern_return.h> | |
31 | #include <mach/thread_status.h> | |
32 | #include <kern/thread.h> | |
33 | #include <kern/kalloc.h> | |
34 | #include <arm/vmparam.h> | |
35 | #include <arm/cpu_data_internal.h> | |
36 | #include <arm/misc_protos.h> | |
37 | #include <arm64/proc_reg.h> | |
38 | #if __has_feature(ptrauth_calls) | |
39 | #include <ptrauth.h> | |
40 | #endif | |
41 | ||
42 | ||
43 | struct arm_vfpv2_state { | |
44 | __uint32_t __r[32]; | |
45 | __uint32_t __fpscr; | |
46 | }; | |
47 | ||
48 | typedef struct arm_vfpv2_state arm_vfpv2_state_t; | |
49 | ||
50 | #define ARM_VFPV2_STATE_COUNT \ | |
51 | ((mach_msg_type_number_t)(sizeof (arm_vfpv2_state_t)/sizeof(uint32_t))) | |
52 | ||
53 | /* | |
54 | * Forward definitions | |
55 | */ | |
56 | void thread_set_child(thread_t child, int pid); | |
57 | void thread_set_parent(thread_t parent, int pid); | |
58 | static void free_debug_state(thread_t thread); | |
59 | ||
60 | /* | |
61 | * Maps state flavor to number of words in the state: | |
62 | */ | |
63 | /* __private_extern__ */ | |
64 | unsigned int _MachineStateCount[] = { | |
65 | [ARM_UNIFIED_THREAD_STATE] = ARM_UNIFIED_THREAD_STATE_COUNT, | |
66 | [ARM_VFP_STATE] = ARM_VFP_STATE_COUNT, | |
67 | [ARM_EXCEPTION_STATE] = ARM_EXCEPTION_STATE_COUNT, | |
68 | [ARM_DEBUG_STATE] = ARM_DEBUG_STATE_COUNT, | |
69 | [ARM_THREAD_STATE64] = ARM_THREAD_STATE64_COUNT, | |
70 | [ARM_EXCEPTION_STATE64] = ARM_EXCEPTION_STATE64_COUNT, | |
71 | [ARM_THREAD_STATE32] = ARM_THREAD_STATE32_COUNT, | |
72 | [ARM_DEBUG_STATE32] = ARM_DEBUG_STATE32_COUNT, | |
73 | [ARM_DEBUG_STATE64] = ARM_DEBUG_STATE64_COUNT, | |
74 | [ARM_NEON_STATE] = ARM_NEON_STATE_COUNT, | |
75 | [ARM_NEON_STATE64] = ARM_NEON_STATE64_COUNT, | |
76 | [ARM_PAGEIN_STATE] = ARM_PAGEIN_STATE_COUNT, | |
77 | }; | |
78 | ||
79 | extern zone_t ads_zone; | |
80 | ||
81 | #if __arm64__ | |
82 | /* | |
83 | * Copy values from saved_state to ts64. | |
84 | */ | |
85 | void | |
86 | saved_state_to_thread_state64(const arm_saved_state_t * saved_state, | |
87 | arm_thread_state64_t * ts64) | |
88 | { | |
89 | uint32_t i; | |
90 | ||
91 | assert(is_saved_state64(saved_state)); | |
92 | ||
93 | ts64->fp = get_saved_state_fp(saved_state); | |
94 | ts64->lr = get_saved_state_lr(saved_state); | |
95 | ts64->sp = get_saved_state_sp(saved_state); | |
96 | ts64->pc = get_saved_state_pc(saved_state); | |
97 | ts64->cpsr = get_saved_state_cpsr(saved_state); | |
98 | for (i = 0; i < 29; i++) { | |
99 | ts64->x[i] = get_saved_state_reg(saved_state, i); | |
100 | } | |
101 | } | |
102 | ||
103 | /* | |
104 | * Copy values from ts64 to saved_state | |
105 | */ | |
106 | void | |
107 | thread_state64_to_saved_state(const arm_thread_state64_t * ts64, | |
108 | arm_saved_state_t * saved_state) | |
109 | { | |
110 | uint32_t i; | |
111 | #if __has_feature(ptrauth_calls) | |
112 | boolean_t intr = ml_set_interrupts_enabled(FALSE); | |
113 | #endif /* __has_feature(ptrauth_calls) */ | |
114 | ||
115 | assert(is_saved_state64(saved_state)); | |
116 | ||
117 | #if __has_feature(ptrauth_calls) | |
118 | MANIPULATE_SIGNED_THREAD_STATE(saved_state, | |
119 | "and w2, w2, %w[not_psr64_user_mask] \n" | |
120 | "mov w6, %w[cpsr] \n" | |
121 | "and w6, w6, %w[psr64_user_mask] \n" | |
122 | "orr w2, w2, w6 \n" | |
123 | "str w2, [x0, %[SS64_CPSR]] \n", | |
124 | [cpsr] "r"(ts64->cpsr), | |
125 | [psr64_user_mask] "i"(PSR64_USER_MASK), | |
126 | [not_psr64_user_mask] "i"(~PSR64_USER_MASK) | |
127 | ); | |
128 | /* | |
129 | * Make writes to ts64->cpsr visible first, since it's useful as a | |
130 | * canary to detect thread-state corruption. | |
131 | */ | |
132 | __builtin_arm_dmb(DMB_ST); | |
133 | #else | |
134 | set_saved_state_cpsr(saved_state, | |
135 | (get_saved_state_cpsr(saved_state) & ~PSR64_USER_MASK) | (ts64->cpsr & PSR64_USER_MASK)); | |
136 | #endif /* __has_feature(ptrauth_calls) */ | |
137 | set_saved_state_fp(saved_state, ts64->fp); | |
138 | set_saved_state_lr(saved_state, ts64->lr); | |
139 | set_saved_state_sp(saved_state, ts64->sp); | |
140 | set_saved_state_pc(saved_state, ts64->pc); | |
141 | for (i = 0; i < 29; i++) { | |
142 | set_saved_state_reg(saved_state, i, ts64->x[i]); | |
143 | } | |
144 | ||
145 | #if __has_feature(ptrauth_calls) | |
146 | ml_set_interrupts_enabled(intr); | |
147 | #endif /* __has_feature(ptrauth_calls) */ | |
148 | } | |
149 | ||
150 | #endif /* __arm64__ */ | |
151 | ||
152 | static kern_return_t | |
153 | handle_get_arm32_thread_state(thread_state_t tstate, | |
154 | mach_msg_type_number_t * count, | |
155 | const arm_saved_state_t * saved_state) | |
156 | { | |
157 | if (*count < ARM_THREAD_STATE32_COUNT) { | |
158 | return KERN_INVALID_ARGUMENT; | |
159 | } | |
160 | if (!is_saved_state32(saved_state)) { | |
161 | return KERN_INVALID_ARGUMENT; | |
162 | } | |
163 | ||
164 | (void)saved_state_to_thread_state32(saved_state, (arm_thread_state32_t *)tstate); | |
165 | *count = ARM_THREAD_STATE32_COUNT; | |
166 | return KERN_SUCCESS; | |
167 | } | |
168 | ||
169 | static kern_return_t | |
170 | handle_get_arm64_thread_state(thread_state_t tstate, | |
171 | mach_msg_type_number_t * count, | |
172 | const arm_saved_state_t * saved_state) | |
173 | { | |
174 | if (*count < ARM_THREAD_STATE64_COUNT) { | |
175 | return KERN_INVALID_ARGUMENT; | |
176 | } | |
177 | if (!is_saved_state64(saved_state)) { | |
178 | return KERN_INVALID_ARGUMENT; | |
179 | } | |
180 | ||
181 | (void)saved_state_to_thread_state64(saved_state, (arm_thread_state64_t *)tstate); | |
182 | *count = ARM_THREAD_STATE64_COUNT; | |
183 | return KERN_SUCCESS; | |
184 | } | |
185 | ||
186 | ||
187 | static kern_return_t | |
188 | handle_get_arm_thread_state(thread_state_t tstate, | |
189 | mach_msg_type_number_t * count, | |
190 | const arm_saved_state_t * saved_state) | |
191 | { | |
192 | /* In an arm64 world, this flavor can be used to retrieve the thread | |
193 | * state of a 32-bit or 64-bit thread into a unified structure, but we | |
194 | * need to support legacy clients who are only aware of 32-bit, so | |
195 | * check the count to see what the client is expecting. | |
196 | */ | |
197 | if (*count < ARM_UNIFIED_THREAD_STATE_COUNT) { | |
198 | return handle_get_arm32_thread_state(tstate, count, saved_state); | |
199 | } | |
200 | ||
201 | arm_unified_thread_state_t *unified_state = (arm_unified_thread_state_t *) tstate; | |
202 | bzero(unified_state, sizeof(*unified_state)); | |
203 | #if __arm64__ | |
204 | if (is_saved_state64(saved_state)) { | |
205 | unified_state->ash.flavor = ARM_THREAD_STATE64; | |
206 | unified_state->ash.count = ARM_THREAD_STATE64_COUNT; | |
207 | (void)saved_state_to_thread_state64(saved_state, thread_state64(unified_state)); | |
208 | } else | |
209 | #endif | |
210 | { | |
211 | unified_state->ash.flavor = ARM_THREAD_STATE32; | |
212 | unified_state->ash.count = ARM_THREAD_STATE32_COUNT; | |
213 | (void)saved_state_to_thread_state32(saved_state, thread_state32(unified_state)); | |
214 | } | |
215 | *count = ARM_UNIFIED_THREAD_STATE_COUNT; | |
216 | return KERN_SUCCESS; | |
217 | } | |
218 | ||
219 | ||
220 | static kern_return_t | |
221 | handle_set_arm32_thread_state(const thread_state_t tstate, | |
222 | mach_msg_type_number_t count, | |
223 | arm_saved_state_t * saved_state) | |
224 | { | |
225 | if (count != ARM_THREAD_STATE32_COUNT) { | |
226 | return KERN_INVALID_ARGUMENT; | |
227 | } | |
228 | ||
229 | (void)thread_state32_to_saved_state((const arm_thread_state32_t *)tstate, saved_state); | |
230 | return KERN_SUCCESS; | |
231 | } | |
232 | ||
233 | static kern_return_t | |
234 | handle_set_arm64_thread_state(const thread_state_t tstate, | |
235 | mach_msg_type_number_t count, | |
236 | arm_saved_state_t * saved_state) | |
237 | { | |
238 | if (count != ARM_THREAD_STATE64_COUNT) { | |
239 | return KERN_INVALID_ARGUMENT; | |
240 | } | |
241 | ||
242 | (void)thread_state64_to_saved_state((const arm_thread_state64_t *)tstate, saved_state); | |
243 | return KERN_SUCCESS; | |
244 | } | |
245 | ||
246 | ||
247 | static kern_return_t | |
248 | handle_set_arm_thread_state(const thread_state_t tstate, | |
249 | mach_msg_type_number_t count, | |
250 | arm_saved_state_t * saved_state) | |
251 | { | |
252 | /* In an arm64 world, this flavor can be used to set the thread state of a | |
253 | * 32-bit or 64-bit thread from a unified structure, but we need to support | |
254 | * legacy clients who are only aware of 32-bit, so check the count to see | |
255 | * what the client is expecting. | |
256 | */ | |
257 | if (count < ARM_UNIFIED_THREAD_STATE_COUNT) { | |
258 | if (!is_saved_state32(saved_state)) { | |
259 | return KERN_INVALID_ARGUMENT; | |
260 | } | |
261 | return handle_set_arm32_thread_state(tstate, count, saved_state); | |
262 | } | |
263 | ||
264 | const arm_unified_thread_state_t *unified_state = (const arm_unified_thread_state_t *) tstate; | |
265 | #if __arm64__ | |
266 | if (is_thread_state64(unified_state)) { | |
267 | if (!is_saved_state64(saved_state)) { | |
268 | return KERN_INVALID_ARGUMENT; | |
269 | } | |
270 | (void)thread_state64_to_saved_state(const_thread_state64(unified_state), saved_state); | |
271 | } else | |
272 | #endif | |
273 | { | |
274 | if (!is_saved_state32(saved_state)) { | |
275 | return KERN_INVALID_ARGUMENT; | |
276 | } | |
277 | (void)thread_state32_to_saved_state(const_thread_state32(unified_state), saved_state); | |
278 | } | |
279 | ||
280 | return KERN_SUCCESS; | |
281 | } | |
282 | ||
283 | ||
284 | /* | |
285 | * Translate thread state arguments to userspace representation | |
286 | */ | |
287 | ||
288 | kern_return_t | |
289 | machine_thread_state_convert_to_user( | |
290 | thread_t thread, | |
291 | thread_flavor_t flavor, | |
292 | thread_state_t tstate, | |
293 | mach_msg_type_number_t *count) | |
294 | { | |
295 | #if __has_feature(ptrauth_calls) | |
296 | arm_thread_state64_t *ts64; | |
297 | ||
298 | switch (flavor) { | |
299 | case ARM_THREAD_STATE: | |
300 | { | |
301 | arm_unified_thread_state_t *unified_state = (arm_unified_thread_state_t *)tstate; | |
302 | ||
303 | if (*count < ARM_UNIFIED_THREAD_STATE_COUNT || !is_thread_state64(unified_state)) { | |
304 | return KERN_SUCCESS; | |
305 | } | |
306 | ts64 = thread_state64(unified_state); | |
307 | break; | |
308 | } | |
309 | case ARM_THREAD_STATE64: | |
310 | { | |
311 | if (*count < ARM_THREAD_STATE64_COUNT) { | |
312 | return KERN_SUCCESS; | |
313 | } | |
314 | ts64 = (arm_thread_state64_t *)tstate; | |
315 | break; | |
316 | } | |
317 | default: | |
318 | return KERN_SUCCESS; | |
319 | } | |
320 | ||
321 | // Note that kernel threads never have disable_user_jop set | |
322 | if (current_thread()->machine.disable_user_jop || !thread_is_64bit_addr(current_thread()) || | |
323 | thread->machine.disable_user_jop || !thread_is_64bit_addr(thread) | |
324 | ) { | |
325 | ts64->flags = __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH; | |
326 | return KERN_SUCCESS; | |
327 | } | |
328 | ||
329 | ts64->flags = 0; | |
330 | if (ts64->lr) { | |
331 | // lr might contain an IB-signed return address (strip is a no-op on unsigned addresses) | |
332 | uintptr_t stripped_lr = (uintptr_t)ptrauth_strip((void *)ts64->lr, | |
333 | ptrauth_key_return_address); | |
334 | if (ts64->lr != stripped_lr) { | |
335 | // Need to allow already-signed lr value to round-trip as is | |
336 | ts64->flags |= __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR; | |
337 | } | |
338 | // Note that an IB-signed return address that happens to have a 0 signature value | |
339 | // will round-trip correctly even if IA-signed again below (and IA-authd later) | |
340 | } | |
341 | ||
342 | if (arm_user_jop_disabled()) { | |
343 | return KERN_SUCCESS; | |
344 | } | |
345 | ||
346 | if (ts64->pc) { | |
347 | ts64->pc = (uintptr_t)pmap_sign_user_ptr((void*)ts64->pc, | |
348 | ptrauth_key_process_independent_code, ptrauth_string_discriminator("pc"), | |
349 | thread->machine.jop_pid); | |
350 | } | |
351 | if (ts64->lr && !(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR)) { | |
352 | ts64->lr = (uintptr_t)pmap_sign_user_ptr((void*)ts64->lr, | |
353 | ptrauth_key_process_independent_code, ptrauth_string_discriminator("lr"), | |
354 | thread->machine.jop_pid); | |
355 | } | |
356 | if (ts64->sp) { | |
357 | ts64->sp = (uintptr_t)pmap_sign_user_ptr((void*)ts64->sp, | |
358 | ptrauth_key_process_independent_data, ptrauth_string_discriminator("sp"), | |
359 | thread->machine.jop_pid); | |
360 | } | |
361 | if (ts64->fp) { | |
362 | ts64->fp = (uintptr_t)pmap_sign_user_ptr((void*)ts64->fp, | |
363 | ptrauth_key_process_independent_data, ptrauth_string_discriminator("fp"), | |
364 | thread->machine.jop_pid); | |
365 | } | |
366 | ||
367 | return KERN_SUCCESS; | |
368 | #else | |
369 | // No conversion to userspace representation on this platform | |
370 | (void)thread; (void)flavor; (void)tstate; (void)count; | |
371 | return KERN_SUCCESS; | |
372 | #endif /* __has_feature(ptrauth_calls) */ | |
373 | } | |
374 | ||
375 | /* | |
376 | * Translate thread state arguments from userspace representation | |
377 | */ | |
378 | ||
379 | kern_return_t | |
380 | machine_thread_state_convert_from_user( | |
381 | thread_t thread, | |
382 | thread_flavor_t flavor, | |
383 | thread_state_t tstate, | |
384 | mach_msg_type_number_t count) | |
385 | { | |
386 | #if __has_feature(ptrauth_calls) | |
387 | arm_thread_state64_t *ts64; | |
388 | ||
389 | switch (flavor) { | |
390 | case ARM_THREAD_STATE: | |
391 | { | |
392 | arm_unified_thread_state_t *unified_state = (arm_unified_thread_state_t *)tstate; | |
393 | ||
394 | if (count < ARM_UNIFIED_THREAD_STATE_COUNT || !is_thread_state64(unified_state)) { | |
395 | return KERN_SUCCESS; | |
396 | } | |
397 | ts64 = thread_state64(unified_state); | |
398 | break; | |
399 | } | |
400 | case ARM_THREAD_STATE64: | |
401 | { | |
402 | if (count != ARM_THREAD_STATE64_COUNT) { | |
403 | return KERN_SUCCESS; | |
404 | } | |
405 | ts64 = (arm_thread_state64_t *)tstate; | |
406 | break; | |
407 | } | |
408 | default: | |
409 | return KERN_SUCCESS; | |
410 | } | |
411 | ||
412 | // Note that kernel threads never have disable_user_jop set | |
413 | if (current_thread()->machine.disable_user_jop || !thread_is_64bit_addr(current_thread())) { | |
414 | if (thread->machine.disable_user_jop || !thread_is_64bit_addr(thread)) { | |
415 | ts64->flags = __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH; | |
416 | return KERN_SUCCESS; | |
417 | } | |
418 | // A JOP-disabled process must not set thread state on a JOP-enabled process | |
419 | return KERN_PROTECTION_FAILURE; | |
420 | } | |
421 | ||
422 | if (ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH) { | |
423 | if (thread->machine.disable_user_jop || !thread_is_64bit_addr(thread) | |
424 | ) { | |
425 | return KERN_SUCCESS; | |
426 | } | |
427 | // Disallow setting unsigned thread state on JOP-enabled processes. | |
428 | // Ignore flag and treat thread state arguments as signed, ptrauth | |
429 | // poisoning will cause resulting thread state to be invalid | |
430 | ts64->flags &= ~__DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH; | |
431 | } | |
432 | ||
433 | if (ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR) { | |
434 | // lr might contain an IB-signed return address (strip is a no-op on unsigned addresses) | |
435 | uintptr_t stripped_lr = (uintptr_t)ptrauth_strip((void *)ts64->lr, | |
436 | ptrauth_key_return_address); | |
437 | if (ts64->lr == stripped_lr) { | |
438 | // Don't allow unsigned pointer to be passed through as is. Ignore flag and | |
439 | // treat as IA-signed below (where auth failure may poison the value). | |
440 | ts64->flags &= ~__DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR; | |
441 | } | |
442 | // Note that an IB-signed return address that happens to have a 0 signature value | |
443 | // will also have been IA-signed (without this flag being set) and so will IA-auth | |
444 | // correctly below. | |
445 | } | |
446 | ||
447 | if (arm_user_jop_disabled()) { | |
448 | return KERN_SUCCESS; | |
449 | } | |
450 | ||
451 | if (ts64->pc) { | |
452 | ts64->pc = (uintptr_t)pmap_auth_user_ptr((void*)ts64->pc, | |
453 | ptrauth_key_process_independent_code, ptrauth_string_discriminator("pc"), | |
454 | thread->machine.jop_pid); | |
455 | } | |
456 | if (ts64->lr && !(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR)) { | |
457 | ts64->lr = (uintptr_t)pmap_auth_user_ptr((void*)ts64->lr, | |
458 | ptrauth_key_process_independent_code, ptrauth_string_discriminator("lr"), | |
459 | thread->machine.jop_pid); | |
460 | } | |
461 | if (ts64->sp) { | |
462 | ts64->sp = (uintptr_t)pmap_auth_user_ptr((void*)ts64->sp, | |
463 | ptrauth_key_process_independent_data, ptrauth_string_discriminator("sp"), | |
464 | thread->machine.jop_pid); | |
465 | } | |
466 | if (ts64->fp) { | |
467 | ts64->fp = (uintptr_t)pmap_auth_user_ptr((void*)ts64->fp, | |
468 | ptrauth_key_process_independent_data, ptrauth_string_discriminator("fp"), | |
469 | thread->machine.jop_pid); | |
470 | } | |
471 | ||
472 | return KERN_SUCCESS; | |
473 | #else | |
474 | // No conversion from userspace representation on this platform | |
475 | (void)thread; (void)flavor; (void)tstate; (void)count; | |
476 | return KERN_SUCCESS; | |
477 | #endif /* __has_feature(ptrauth_calls) */ | |
478 | } | |
479 | ||
480 | /* | |
481 | * Translate signal context data pointer to userspace representation | |
482 | */ | |
483 | ||
484 | kern_return_t | |
485 | machine_thread_siguctx_pointer_convert_to_user( | |
486 | thread_t thread, | |
487 | user_addr_t *uctxp) | |
488 | { | |
489 | #if __has_feature(ptrauth_calls) | |
490 | if (current_thread()->machine.disable_user_jop || !thread_is_64bit_addr(current_thread())) { | |
491 | assert(thread->machine.disable_user_jop || !thread_is_64bit_addr(thread)); | |
492 | return KERN_SUCCESS; | |
493 | } | |
494 | ||
495 | if (arm_user_jop_disabled()) { | |
496 | return KERN_SUCCESS; | |
497 | } | |
498 | ||
499 | if (*uctxp) { | |
500 | *uctxp = (uintptr_t)pmap_sign_user_ptr((void*)*uctxp, | |
501 | ptrauth_key_process_independent_data, ptrauth_string_discriminator("uctx"), | |
502 | thread->machine.jop_pid); | |
503 | } | |
504 | ||
505 | return KERN_SUCCESS; | |
506 | #else | |
507 | // No conversion to userspace representation on this platform | |
508 | (void)thread; (void)uctxp; | |
509 | return KERN_SUCCESS; | |
510 | #endif /* __has_feature(ptrauth_calls) */ | |
511 | } | |
512 | ||
513 | /* | |
514 | * Translate array of function pointer syscall arguments from userspace representation | |
515 | */ | |
516 | ||
517 | kern_return_t | |
518 | machine_thread_function_pointers_convert_from_user( | |
519 | thread_t thread, | |
520 | user_addr_t *fptrs, | |
521 | uint32_t count) | |
522 | { | |
523 | #if __has_feature(ptrauth_calls) | |
524 | if (current_thread()->machine.disable_user_jop || !thread_is_64bit_addr(current_thread())) { | |
525 | assert(thread->machine.disable_user_jop || !thread_is_64bit_addr(thread)); | |
526 | return KERN_SUCCESS; | |
527 | } | |
528 | ||
529 | if (arm_user_jop_disabled()) { | |
530 | return KERN_SUCCESS; | |
531 | } | |
532 | ||
533 | while (count--) { | |
534 | if (*fptrs) { | |
535 | *fptrs = (uintptr_t)pmap_auth_user_ptr((void*)*fptrs, | |
536 | ptrauth_key_function_pointer, 0, thread->machine.jop_pid); | |
537 | } | |
538 | fptrs++; | |
539 | } | |
540 | ||
541 | return KERN_SUCCESS; | |
542 | #else | |
543 | // No conversion from userspace representation on this platform | |
544 | (void)thread; (void)fptrs; (void)count; | |
545 | return KERN_SUCCESS; | |
546 | #endif /* __has_feature(ptrauth_calls) */ | |
547 | } | |
548 | ||
549 | /* | |
550 | * Routine: machine_thread_get_state | |
551 | * | |
552 | */ | |
553 | kern_return_t | |
554 | machine_thread_get_state(thread_t thread, | |
555 | thread_flavor_t flavor, | |
556 | thread_state_t tstate, | |
557 | mach_msg_type_number_t * count) | |
558 | { | |
559 | switch (flavor) { | |
560 | case THREAD_STATE_FLAVOR_LIST: | |
561 | if (*count < 4) { | |
562 | return KERN_INVALID_ARGUMENT; | |
563 | } | |
564 | ||
565 | tstate[0] = ARM_THREAD_STATE; | |
566 | tstate[1] = ARM_VFP_STATE; | |
567 | tstate[2] = ARM_EXCEPTION_STATE; | |
568 | tstate[3] = ARM_DEBUG_STATE; | |
569 | *count = 4; | |
570 | break; | |
571 | ||
572 | case THREAD_STATE_FLAVOR_LIST_NEW: | |
573 | if (*count < 4) { | |
574 | return KERN_INVALID_ARGUMENT; | |
575 | } | |
576 | ||
577 | tstate[0] = ARM_THREAD_STATE; | |
578 | tstate[1] = ARM_VFP_STATE; | |
579 | tstate[2] = thread_is_64bit_data(thread) ? ARM_EXCEPTION_STATE64 : ARM_EXCEPTION_STATE; | |
580 | tstate[3] = thread_is_64bit_data(thread) ? ARM_DEBUG_STATE64 : ARM_DEBUG_STATE32; | |
581 | *count = 4; | |
582 | break; | |
583 | ||
584 | case THREAD_STATE_FLAVOR_LIST_10_15: | |
585 | if (*count < 5) { | |
586 | return KERN_INVALID_ARGUMENT; | |
587 | } | |
588 | ||
589 | tstate[0] = ARM_THREAD_STATE; | |
590 | tstate[1] = ARM_VFP_STATE; | |
591 | tstate[2] = thread_is_64bit_data(thread) ? ARM_EXCEPTION_STATE64 : ARM_EXCEPTION_STATE; | |
592 | tstate[3] = thread_is_64bit_data(thread) ? ARM_DEBUG_STATE64 : ARM_DEBUG_STATE32; | |
593 | tstate[4] = ARM_PAGEIN_STATE; | |
594 | *count = 5; | |
595 | break; | |
596 | ||
597 | case ARM_THREAD_STATE: | |
598 | { | |
599 | kern_return_t rn = handle_get_arm_thread_state(tstate, count, thread->machine.upcb); | |
600 | if (rn) { | |
601 | return rn; | |
602 | } | |
603 | break; | |
604 | } | |
605 | case ARM_THREAD_STATE32: | |
606 | { | |
607 | if (thread_is_64bit_data(thread)) { | |
608 | return KERN_INVALID_ARGUMENT; | |
609 | } | |
610 | ||
611 | kern_return_t rn = handle_get_arm32_thread_state(tstate, count, thread->machine.upcb); | |
612 | if (rn) { | |
613 | return rn; | |
614 | } | |
615 | break; | |
616 | } | |
617 | #if __arm64__ | |
618 | case ARM_THREAD_STATE64: | |
619 | { | |
620 | if (!thread_is_64bit_data(thread)) { | |
621 | return KERN_INVALID_ARGUMENT; | |
622 | } | |
623 | ||
624 | const arm_saved_state_t *current_state = thread->machine.upcb; | |
625 | ||
626 | kern_return_t rn = handle_get_arm64_thread_state(tstate, count, | |
627 | current_state); | |
628 | if (rn) { | |
629 | return rn; | |
630 | } | |
631 | ||
632 | break; | |
633 | } | |
634 | #endif | |
635 | case ARM_EXCEPTION_STATE:{ | |
636 | struct arm_exception_state *state; | |
637 | struct arm_saved_state32 *saved_state; | |
638 | ||
639 | if (*count < ARM_EXCEPTION_STATE_COUNT) { | |
640 | return KERN_INVALID_ARGUMENT; | |
641 | } | |
642 | if (thread_is_64bit_data(thread)) { | |
643 | return KERN_INVALID_ARGUMENT; | |
644 | } | |
645 | ||
646 | state = (struct arm_exception_state *) tstate; | |
647 | saved_state = saved_state32(thread->machine.upcb); | |
648 | ||
649 | state->exception = saved_state->exception; | |
650 | state->fsr = saved_state->esr; | |
651 | state->far = saved_state->far; | |
652 | ||
653 | *count = ARM_EXCEPTION_STATE_COUNT; | |
654 | break; | |
655 | } | |
656 | case ARM_EXCEPTION_STATE64:{ | |
657 | struct arm_exception_state64 *state; | |
658 | struct arm_saved_state64 *saved_state; | |
659 | ||
660 | if (*count < ARM_EXCEPTION_STATE64_COUNT) { | |
661 | return KERN_INVALID_ARGUMENT; | |
662 | } | |
663 | if (!thread_is_64bit_data(thread)) { | |
664 | return KERN_INVALID_ARGUMENT; | |
665 | } | |
666 | ||
667 | state = (struct arm_exception_state64 *) tstate; | |
668 | saved_state = saved_state64(thread->machine.upcb); | |
669 | ||
670 | state->exception = saved_state->exception; | |
671 | state->far = saved_state->far; | |
672 | state->esr = saved_state->esr; | |
673 | ||
674 | *count = ARM_EXCEPTION_STATE64_COUNT; | |
675 | break; | |
676 | } | |
677 | case ARM_DEBUG_STATE:{ | |
678 | arm_legacy_debug_state_t *state; | |
679 | arm_debug_state32_t *thread_state; | |
680 | ||
681 | if (*count < ARM_LEGACY_DEBUG_STATE_COUNT) { | |
682 | return KERN_INVALID_ARGUMENT; | |
683 | } | |
684 | ||
685 | if (thread_is_64bit_data(thread)) { | |
686 | return KERN_INVALID_ARGUMENT; | |
687 | } | |
688 | ||
689 | state = (arm_legacy_debug_state_t *) tstate; | |
690 | thread_state = find_debug_state32(thread); | |
691 | ||
692 | if (thread_state == NULL) { | |
693 | bzero(state, sizeof(arm_legacy_debug_state_t)); | |
694 | } else { | |
695 | bcopy(thread_state, state, sizeof(arm_legacy_debug_state_t)); | |
696 | } | |
697 | ||
698 | *count = ARM_LEGACY_DEBUG_STATE_COUNT; | |
699 | break; | |
700 | } | |
701 | case ARM_DEBUG_STATE32:{ | |
702 | arm_debug_state32_t *state; | |
703 | arm_debug_state32_t *thread_state; | |
704 | ||
705 | if (*count < ARM_DEBUG_STATE32_COUNT) { | |
706 | return KERN_INVALID_ARGUMENT; | |
707 | } | |
708 | ||
709 | if (thread_is_64bit_data(thread)) { | |
710 | return KERN_INVALID_ARGUMENT; | |
711 | } | |
712 | ||
713 | state = (arm_debug_state32_t *) tstate; | |
714 | thread_state = find_debug_state32(thread); | |
715 | ||
716 | if (thread_state == NULL) { | |
717 | bzero(state, sizeof(arm_debug_state32_t)); | |
718 | } else { | |
719 | bcopy(thread_state, state, sizeof(arm_debug_state32_t)); | |
720 | } | |
721 | ||
722 | *count = ARM_DEBUG_STATE32_COUNT; | |
723 | break; | |
724 | } | |
725 | ||
726 | case ARM_DEBUG_STATE64:{ | |
727 | arm_debug_state64_t *state; | |
728 | arm_debug_state64_t *thread_state; | |
729 | ||
730 | if (*count < ARM_DEBUG_STATE64_COUNT) { | |
731 | return KERN_INVALID_ARGUMENT; | |
732 | } | |
733 | ||
734 | if (!thread_is_64bit_data(thread)) { | |
735 | return KERN_INVALID_ARGUMENT; | |
736 | } | |
737 | ||
738 | state = (arm_debug_state64_t *) tstate; | |
739 | thread_state = find_debug_state64(thread); | |
740 | ||
741 | if (thread_state == NULL) { | |
742 | bzero(state, sizeof(arm_debug_state64_t)); | |
743 | } else { | |
744 | bcopy(thread_state, state, sizeof(arm_debug_state64_t)); | |
745 | } | |
746 | ||
747 | *count = ARM_DEBUG_STATE64_COUNT; | |
748 | break; | |
749 | } | |
750 | ||
751 | case ARM_VFP_STATE:{ | |
752 | struct arm_vfp_state *state; | |
753 | arm_neon_saved_state32_t *thread_state; | |
754 | unsigned int max; | |
755 | ||
756 | if (*count < ARM_VFP_STATE_COUNT) { | |
757 | if (*count < ARM_VFPV2_STATE_COUNT) { | |
758 | return KERN_INVALID_ARGUMENT; | |
759 | } else { | |
760 | *count = ARM_VFPV2_STATE_COUNT; | |
761 | } | |
762 | } | |
763 | ||
764 | if (*count == ARM_VFPV2_STATE_COUNT) { | |
765 | max = 32; | |
766 | } else { | |
767 | max = 64; | |
768 | } | |
769 | ||
770 | state = (struct arm_vfp_state *) tstate; | |
771 | thread_state = neon_state32(thread->machine.uNeon); | |
772 | /* ARM64 TODO: set fpsr and fpcr from state->fpscr */ | |
773 | ||
774 | bcopy(thread_state, state, (max + 1) * sizeof(uint32_t)); | |
775 | *count = (max + 1); | |
776 | break; | |
777 | } | |
778 | case ARM_NEON_STATE:{ | |
779 | arm_neon_state_t *state; | |
780 | arm_neon_saved_state32_t *thread_state; | |
781 | ||
782 | if (*count < ARM_NEON_STATE_COUNT) { | |
783 | return KERN_INVALID_ARGUMENT; | |
784 | } | |
785 | ||
786 | if (thread_is_64bit_data(thread)) { | |
787 | return KERN_INVALID_ARGUMENT; | |
788 | } | |
789 | ||
790 | state = (arm_neon_state_t *)tstate; | |
791 | thread_state = neon_state32(thread->machine.uNeon); | |
792 | ||
793 | assert(sizeof(*thread_state) == sizeof(*state)); | |
794 | bcopy(thread_state, state, sizeof(arm_neon_state_t)); | |
795 | ||
796 | *count = ARM_NEON_STATE_COUNT; | |
797 | break; | |
798 | } | |
799 | ||
800 | case ARM_NEON_STATE64:{ | |
801 | arm_neon_state64_t *state; | |
802 | arm_neon_saved_state64_t *thread_state; | |
803 | ||
804 | if (*count < ARM_NEON_STATE64_COUNT) { | |
805 | return KERN_INVALID_ARGUMENT; | |
806 | } | |
807 | ||
808 | if (!thread_is_64bit_data(thread)) { | |
809 | return KERN_INVALID_ARGUMENT; | |
810 | } | |
811 | ||
812 | state = (arm_neon_state64_t *)tstate; | |
813 | thread_state = neon_state64(thread->machine.uNeon); | |
814 | ||
815 | /* For now, these are identical */ | |
816 | assert(sizeof(*state) == sizeof(*thread_state)); | |
817 | bcopy(thread_state, state, sizeof(arm_neon_state64_t)); | |
818 | ||
819 | ||
820 | *count = ARM_NEON_STATE64_COUNT; | |
821 | break; | |
822 | } | |
823 | ||
824 | ||
825 | case ARM_PAGEIN_STATE: { | |
826 | arm_pagein_state_t *state; | |
827 | ||
828 | if (*count < ARM_PAGEIN_STATE_COUNT) { | |
829 | return KERN_INVALID_ARGUMENT; | |
830 | } | |
831 | ||
832 | state = (arm_pagein_state_t *)tstate; | |
833 | state->__pagein_error = thread->t_pagein_error; | |
834 | ||
835 | *count = ARM_PAGEIN_STATE_COUNT; | |
836 | break; | |
837 | } | |
838 | ||
839 | ||
840 | default: | |
841 | return KERN_INVALID_ARGUMENT; | |
842 | } | |
843 | return KERN_SUCCESS; | |
844 | } | |
845 | ||
846 | ||
847 | /* | |
848 | * Routine: machine_thread_get_kern_state | |
849 | * | |
850 | */ | |
851 | kern_return_t | |
852 | machine_thread_get_kern_state(thread_t thread, | |
853 | thread_flavor_t flavor, | |
854 | thread_state_t tstate, | |
855 | mach_msg_type_number_t * count) | |
856 | { | |
857 | /* | |
858 | * This works only for an interrupted kernel thread | |
859 | */ | |
860 | if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) { | |
861 | return KERN_FAILURE; | |
862 | } | |
863 | ||
864 | switch (flavor) { | |
865 | case ARM_THREAD_STATE: | |
866 | { | |
867 | kern_return_t rn = handle_get_arm_thread_state(tstate, count, getCpuDatap()->cpu_int_state); | |
868 | if (rn) { | |
869 | return rn; | |
870 | } | |
871 | break; | |
872 | } | |
873 | case ARM_THREAD_STATE32: | |
874 | { | |
875 | kern_return_t rn = handle_get_arm32_thread_state(tstate, count, getCpuDatap()->cpu_int_state); | |
876 | if (rn) { | |
877 | return rn; | |
878 | } | |
879 | break; | |
880 | } | |
881 | #if __arm64__ | |
882 | case ARM_THREAD_STATE64: | |
883 | { | |
884 | kern_return_t rn = handle_get_arm64_thread_state(tstate, count, getCpuDatap()->cpu_int_state); | |
885 | if (rn) { | |
886 | return rn; | |
887 | } | |
888 | break; | |
889 | } | |
890 | #endif | |
891 | default: | |
892 | return KERN_INVALID_ARGUMENT; | |
893 | } | |
894 | return KERN_SUCCESS; | |
895 | } | |
896 | ||
897 | void | |
898 | machine_thread_switch_addrmode(thread_t thread) | |
899 | { | |
900 | if (task_has_64Bit_data(thread->task)) { | |
901 | thread->machine.upcb->ash.flavor = ARM_SAVED_STATE64; | |
902 | thread->machine.upcb->ash.count = ARM_SAVED_STATE64_COUNT; | |
903 | thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64; | |
904 | thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT; | |
905 | ||
906 | /* | |
907 | * Reinitialize the NEON state. | |
908 | */ | |
909 | bzero(&thread->machine.uNeon->uns, sizeof(thread->machine.uNeon->uns)); | |
910 | thread->machine.uNeon->ns_64.fpcr = FPCR_DEFAULT; | |
911 | } else { | |
912 | thread->machine.upcb->ash.flavor = ARM_SAVED_STATE32; | |
913 | thread->machine.upcb->ash.count = ARM_SAVED_STATE32_COUNT; | |
914 | thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32; | |
915 | thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT; | |
916 | ||
917 | /* | |
918 | * Reinitialize the NEON state. | |
919 | */ | |
920 | bzero(&thread->machine.uNeon->uns, sizeof(thread->machine.uNeon->uns)); | |
921 | thread->machine.uNeon->ns_32.fpcr = FPCR_DEFAULT_32; | |
922 | } | |
923 | } | |
924 | ||
925 | extern long long arm_debug_get(void); | |
926 | ||
927 | /* | |
928 | * Routine: machine_thread_set_state | |
929 | * | |
930 | */ | |
931 | kern_return_t | |
932 | machine_thread_set_state(thread_t thread, | |
933 | thread_flavor_t flavor, | |
934 | thread_state_t tstate, | |
935 | mach_msg_type_number_t count) | |
936 | { | |
937 | kern_return_t rn; | |
938 | ||
939 | switch (flavor) { | |
940 | case ARM_THREAD_STATE: | |
941 | rn = handle_set_arm_thread_state(tstate, count, thread->machine.upcb); | |
942 | if (rn) { | |
943 | return rn; | |
944 | } | |
945 | break; | |
946 | ||
947 | case ARM_THREAD_STATE32: | |
948 | if (thread_is_64bit_data(thread)) { | |
949 | return KERN_INVALID_ARGUMENT; | |
950 | } | |
951 | ||
952 | rn = handle_set_arm32_thread_state(tstate, count, thread->machine.upcb); | |
953 | if (rn) { | |
954 | return rn; | |
955 | } | |
956 | break; | |
957 | ||
958 | #if __arm64__ | |
959 | case ARM_THREAD_STATE64: | |
960 | if (!thread_is_64bit_data(thread)) { | |
961 | return KERN_INVALID_ARGUMENT; | |
962 | } | |
963 | ||
964 | ||
965 | rn = handle_set_arm64_thread_state(tstate, count, thread->machine.upcb); | |
966 | if (rn) { | |
967 | return rn; | |
968 | } | |
969 | break; | |
970 | #endif | |
971 | case ARM_EXCEPTION_STATE:{ | |
972 | if (count != ARM_EXCEPTION_STATE_COUNT) { | |
973 | return KERN_INVALID_ARGUMENT; | |
974 | } | |
975 | if (thread_is_64bit_data(thread)) { | |
976 | return KERN_INVALID_ARGUMENT; | |
977 | } | |
978 | ||
979 | break; | |
980 | } | |
981 | case ARM_EXCEPTION_STATE64:{ | |
982 | if (count != ARM_EXCEPTION_STATE64_COUNT) { | |
983 | return KERN_INVALID_ARGUMENT; | |
984 | } | |
985 | if (!thread_is_64bit_data(thread)) { | |
986 | return KERN_INVALID_ARGUMENT; | |
987 | } | |
988 | ||
989 | break; | |
990 | } | |
991 | case ARM_DEBUG_STATE: | |
992 | { | |
993 | arm_legacy_debug_state_t *state; | |
994 | boolean_t enabled = FALSE; | |
995 | unsigned int i; | |
996 | ||
997 | if (count != ARM_LEGACY_DEBUG_STATE_COUNT) { | |
998 | return KERN_INVALID_ARGUMENT; | |
999 | } | |
1000 | if (thread_is_64bit_data(thread)) { | |
1001 | return KERN_INVALID_ARGUMENT; | |
1002 | } | |
1003 | ||
1004 | state = (arm_legacy_debug_state_t *) tstate; | |
1005 | ||
1006 | for (i = 0; i < 16; i++) { | |
1007 | /* do not allow context IDs to be set */ | |
1008 | if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA) | |
1009 | || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED) | |
1010 | || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA) | |
1011 | || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) { | |
1012 | return KERN_PROTECTION_FAILURE; | |
1013 | } | |
1014 | if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) | |
1015 | || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) { | |
1016 | enabled = TRUE; | |
1017 | } | |
1018 | } | |
1019 | ||
1020 | if (!enabled) { | |
1021 | free_debug_state(thread); | |
1022 | } else { | |
1023 | arm_debug_state32_t *thread_state = find_or_allocate_debug_state32(thread); | |
1024 | ||
1025 | if (thread_state == NULL) { | |
1026 | return KERN_FAILURE; | |
1027 | } | |
1028 | ||
1029 | for (i = 0; i < 16; i++) { | |
1030 | /* set appropriate privilege; mask out unknown bits */ | |
1031 | thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK | |
1032 | | ARM_DBGBCR_MATCH_MASK | |
1033 | | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK | |
1034 | | ARM_DBG_CR_ENABLE_MASK)) | |
1035 | | ARM_DBGBCR_TYPE_IVA | |
1036 | | ARM_DBG_CR_LINKED_UNLINKED | |
1037 | | ARM_DBG_CR_SECURITY_STATE_BOTH | |
1038 | | ARM_DBG_CR_MODE_CONTROL_USER; | |
1039 | thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK; | |
1040 | thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK | |
1041 | | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK | |
1042 | | ARM_DBGWCR_ACCESS_CONTROL_MASK | |
1043 | | ARM_DBG_CR_ENABLE_MASK)) | |
1044 | | ARM_DBG_CR_LINKED_UNLINKED | |
1045 | | ARM_DBG_CR_SECURITY_STATE_BOTH | |
1046 | | ARM_DBG_CR_MODE_CONTROL_USER; | |
1047 | thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK; | |
1048 | } | |
1049 | ||
1050 | thread_state->mdscr_el1 = 0ULL; // Legacy customers issuing ARM_DEBUG_STATE dont drive single stepping. | |
1051 | } | |
1052 | ||
1053 | if (thread == current_thread()) { | |
1054 | arm_debug_set32(thread->machine.DebugData); | |
1055 | } | |
1056 | ||
1057 | break; | |
1058 | } | |
1059 | case ARM_DEBUG_STATE32: | |
1060 | /* ARM64_TODO subtle bcr/wcr semantic differences e.g. wcr and ARM_DBGBCR_TYPE_IVA */ | |
1061 | { | |
1062 | arm_debug_state32_t *state; | |
1063 | boolean_t enabled = FALSE; | |
1064 | unsigned int i; | |
1065 | ||
1066 | if (count != ARM_DEBUG_STATE32_COUNT) { | |
1067 | return KERN_INVALID_ARGUMENT; | |
1068 | } | |
1069 | if (thread_is_64bit_data(thread)) { | |
1070 | return KERN_INVALID_ARGUMENT; | |
1071 | } | |
1072 | ||
1073 | state = (arm_debug_state32_t *) tstate; | |
1074 | ||
1075 | if (state->mdscr_el1 & MDSCR_SS) { | |
1076 | enabled = TRUE; | |
1077 | } | |
1078 | ||
1079 | for (i = 0; i < 16; i++) { | |
1080 | /* do not allow context IDs to be set */ | |
1081 | if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA) | |
1082 | || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED) | |
1083 | || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA) | |
1084 | || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) { | |
1085 | return KERN_PROTECTION_FAILURE; | |
1086 | } | |
1087 | if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) | |
1088 | || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) { | |
1089 | enabled = TRUE; | |
1090 | } | |
1091 | } | |
1092 | ||
1093 | if (!enabled) { | |
1094 | free_debug_state(thread); | |
1095 | } else { | |
1096 | arm_debug_state32_t * thread_state = find_or_allocate_debug_state32(thread); | |
1097 | ||
1098 | if (thread_state == NULL) { | |
1099 | return KERN_FAILURE; | |
1100 | } | |
1101 | ||
1102 | if (state->mdscr_el1 & MDSCR_SS) { | |
1103 | thread_state->mdscr_el1 |= MDSCR_SS; | |
1104 | } else { | |
1105 | thread_state->mdscr_el1 &= ~MDSCR_SS; | |
1106 | } | |
1107 | ||
1108 | for (i = 0; i < 16; i++) { | |
1109 | /* set appropriate privilege; mask out unknown bits */ | |
1110 | thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK | |
1111 | | ARM_DBGBCR_MATCH_MASK | |
1112 | | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK | |
1113 | | ARM_DBG_CR_ENABLE_MASK)) | |
1114 | | ARM_DBGBCR_TYPE_IVA | |
1115 | | ARM_DBG_CR_LINKED_UNLINKED | |
1116 | | ARM_DBG_CR_SECURITY_STATE_BOTH | |
1117 | | ARM_DBG_CR_MODE_CONTROL_USER; | |
1118 | thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK; | |
1119 | thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK | |
1120 | | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK | |
1121 | | ARM_DBGWCR_ACCESS_CONTROL_MASK | |
1122 | | ARM_DBG_CR_ENABLE_MASK)) | |
1123 | | ARM_DBG_CR_LINKED_UNLINKED | |
1124 | | ARM_DBG_CR_SECURITY_STATE_BOTH | |
1125 | | ARM_DBG_CR_MODE_CONTROL_USER; | |
1126 | thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK; | |
1127 | } | |
1128 | } | |
1129 | ||
1130 | if (thread == current_thread()) { | |
1131 | arm_debug_set32(thread->machine.DebugData); | |
1132 | } | |
1133 | ||
1134 | break; | |
1135 | } | |
1136 | ||
1137 | case ARM_DEBUG_STATE64: | |
1138 | { | |
1139 | arm_debug_state64_t *state; | |
1140 | boolean_t enabled = FALSE; | |
1141 | unsigned int i; | |
1142 | ||
1143 | if (count != ARM_DEBUG_STATE64_COUNT) { | |
1144 | return KERN_INVALID_ARGUMENT; | |
1145 | } | |
1146 | if (!thread_is_64bit_data(thread)) { | |
1147 | return KERN_INVALID_ARGUMENT; | |
1148 | } | |
1149 | ||
1150 | state = (arm_debug_state64_t *) tstate; | |
1151 | ||
1152 | if (state->mdscr_el1 & MDSCR_SS) { | |
1153 | enabled = TRUE; | |
1154 | } | |
1155 | ||
1156 | for (i = 0; i < 16; i++) { | |
1157 | /* do not allow context IDs to be set */ | |
1158 | if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA) | |
1159 | || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED) | |
1160 | || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) { | |
1161 | return KERN_PROTECTION_FAILURE; | |
1162 | } | |
1163 | if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) | |
1164 | || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) { | |
1165 | enabled = TRUE; | |
1166 | } | |
1167 | } | |
1168 | ||
1169 | if (!enabled) { | |
1170 | free_debug_state(thread); | |
1171 | } else { | |
1172 | arm_debug_state64_t *thread_state = find_or_allocate_debug_state64(thread); | |
1173 | ||
1174 | if (thread_state == NULL) { | |
1175 | return KERN_FAILURE; | |
1176 | } | |
1177 | ||
1178 | if (state->mdscr_el1 & MDSCR_SS) { | |
1179 | thread_state->mdscr_el1 |= MDSCR_SS; | |
1180 | } else { | |
1181 | thread_state->mdscr_el1 &= ~MDSCR_SS; | |
1182 | } | |
1183 | ||
1184 | for (i = 0; i < 16; i++) { | |
1185 | /* set appropriate privilege; mask out unknown bits */ | |
1186 | thread_state->bcr[i] = (state->bcr[i] & (0 /* Was ARM_DBG_CR_ADDRESS_MASK_MASK deprecated in v8 */ | |
1187 | | 0 /* Was ARM_DBGBCR_MATCH_MASK, ignored in AArch64 state */ | |
1188 | | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK | |
1189 | | ARM_DBG_CR_ENABLE_MASK)) | |
1190 | | ARM_DBGBCR_TYPE_IVA | |
1191 | | ARM_DBG_CR_LINKED_UNLINKED | |
1192 | | ARM_DBG_CR_SECURITY_STATE_BOTH | |
1193 | | ARM_DBG_CR_MODE_CONTROL_USER; | |
1194 | thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK64; | |
1195 | thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK | |
1196 | | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK | |
1197 | | ARM_DBGWCR_ACCESS_CONTROL_MASK | |
1198 | | ARM_DBG_CR_ENABLE_MASK)) | |
1199 | | ARM_DBG_CR_LINKED_UNLINKED | |
1200 | | ARM_DBG_CR_SECURITY_STATE_BOTH | |
1201 | | ARM_DBG_CR_MODE_CONTROL_USER; | |
1202 | thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK64; | |
1203 | } | |
1204 | } | |
1205 | ||
1206 | if (thread == current_thread()) { | |
1207 | arm_debug_set64(thread->machine.DebugData); | |
1208 | } | |
1209 | ||
1210 | break; | |
1211 | } | |
1212 | ||
1213 | case ARM_VFP_STATE:{ | |
1214 | struct arm_vfp_state *state; | |
1215 | arm_neon_saved_state32_t *thread_state; | |
1216 | unsigned int max; | |
1217 | ||
1218 | if (count != ARM_VFP_STATE_COUNT && count != ARM_VFPV2_STATE_COUNT) { | |
1219 | return KERN_INVALID_ARGUMENT; | |
1220 | } | |
1221 | ||
1222 | if (count == ARM_VFPV2_STATE_COUNT) { | |
1223 | max = 32; | |
1224 | } else { | |
1225 | max = 64; | |
1226 | } | |
1227 | ||
1228 | state = (struct arm_vfp_state *) tstate; | |
1229 | thread_state = neon_state32(thread->machine.uNeon); | |
1230 | /* ARM64 TODO: combine fpsr and fpcr into state->fpscr */ | |
1231 | ||
1232 | bcopy(state, thread_state, (max + 1) * sizeof(uint32_t)); | |
1233 | ||
1234 | thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32; | |
1235 | thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT; | |
1236 | break; | |
1237 | } | |
1238 | ||
1239 | case ARM_NEON_STATE:{ | |
1240 | arm_neon_state_t *state; | |
1241 | arm_neon_saved_state32_t *thread_state; | |
1242 | ||
1243 | if (count != ARM_NEON_STATE_COUNT) { | |
1244 | return KERN_INVALID_ARGUMENT; | |
1245 | } | |
1246 | ||
1247 | if (thread_is_64bit_data(thread)) { | |
1248 | return KERN_INVALID_ARGUMENT; | |
1249 | } | |
1250 | ||
1251 | state = (arm_neon_state_t *)tstate; | |
1252 | thread_state = neon_state32(thread->machine.uNeon); | |
1253 | ||
1254 | assert(sizeof(*state) == sizeof(*thread_state)); | |
1255 | bcopy(state, thread_state, sizeof(arm_neon_state_t)); | |
1256 | ||
1257 | thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32; | |
1258 | thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT; | |
1259 | break; | |
1260 | } | |
1261 | ||
1262 | case ARM_NEON_STATE64:{ | |
1263 | arm_neon_state64_t *state; | |
1264 | arm_neon_saved_state64_t *thread_state; | |
1265 | ||
1266 | if (count != ARM_NEON_STATE64_COUNT) { | |
1267 | return KERN_INVALID_ARGUMENT; | |
1268 | } | |
1269 | ||
1270 | if (!thread_is_64bit_data(thread)) { | |
1271 | return KERN_INVALID_ARGUMENT; | |
1272 | } | |
1273 | ||
1274 | state = (arm_neon_state64_t *)tstate; | |
1275 | thread_state = neon_state64(thread->machine.uNeon); | |
1276 | ||
1277 | assert(sizeof(*state) == sizeof(*thread_state)); | |
1278 | bcopy(state, thread_state, sizeof(arm_neon_state64_t)); | |
1279 | ||
1280 | ||
1281 | thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64; | |
1282 | thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT; | |
1283 | break; | |
1284 | } | |
1285 | ||
1286 | ||
1287 | default: | |
1288 | return KERN_INVALID_ARGUMENT; | |
1289 | } | |
1290 | return KERN_SUCCESS; | |
1291 | } | |
1292 | ||
1293 | mach_vm_address_t | |
1294 | machine_thread_pc(thread_t thread) | |
1295 | { | |
1296 | struct arm_saved_state *ss = get_user_regs(thread); | |
1297 | return (mach_vm_address_t)get_saved_state_pc(ss); | |
1298 | } | |
1299 | ||
1300 | void | |
1301 | machine_thread_reset_pc(thread_t thread, mach_vm_address_t pc) | |
1302 | { | |
1303 | set_saved_state_pc(get_user_regs(thread), (register_t)pc); | |
1304 | } | |
1305 | ||
1306 | /* | |
1307 | * Routine: machine_thread_state_initialize | |
1308 | * | |
1309 | */ | |
1310 | kern_return_t | |
1311 | machine_thread_state_initialize(thread_t thread) | |
1312 | { | |
1313 | arm_context_t *context = thread->machine.contextData; | |
1314 | ||
1315 | /* | |
1316 | * Should always be set up later. For a kernel thread, we don't care | |
1317 | * about this state. For a user thread, we'll set the state up in | |
1318 | * setup_wqthread, bsdthread_create, load_main(), or load_unixthread(). | |
1319 | */ | |
1320 | ||
1321 | if (context != NULL) { | |
1322 | bzero(&context->ss.uss, sizeof(context->ss.uss)); | |
1323 | bzero(&context->ns.uns, sizeof(context->ns.uns)); | |
1324 | ||
1325 | if (context->ns.nsh.flavor == ARM_NEON_SAVED_STATE64) { | |
1326 | context->ns.ns_64.fpcr = FPCR_DEFAULT; | |
1327 | } else { | |
1328 | context->ns.ns_32.fpcr = FPCR_DEFAULT_32; | |
1329 | } | |
1330 | } | |
1331 | ||
1332 | thread->machine.DebugData = NULL; | |
1333 | ||
1334 | #if defined(HAS_APPLE_PAC) | |
1335 | /* Sign the initial user-space thread state */ | |
1336 | if (thread->machine.upcb != NULL) { | |
1337 | boolean_t intr = ml_set_interrupts_enabled(FALSE); | |
1338 | ml_sign_thread_state(thread->machine.upcb, 0, 0, 0, 0, 0); | |
1339 | ml_set_interrupts_enabled(intr); | |
1340 | } | |
1341 | #endif /* defined(HAS_APPLE_PAC) */ | |
1342 | ||
1343 | return KERN_SUCCESS; | |
1344 | } | |
1345 | ||
1346 | /* | |
1347 | * Routine: machine_thread_dup | |
1348 | * | |
1349 | */ | |
1350 | kern_return_t | |
1351 | machine_thread_dup(thread_t self, | |
1352 | thread_t target, | |
1353 | __unused boolean_t is_corpse) | |
1354 | { | |
1355 | struct arm_saved_state *self_saved_state; | |
1356 | struct arm_saved_state *target_saved_state; | |
1357 | ||
1358 | target->machine.cthread_self = self->machine.cthread_self; | |
1359 | ||
1360 | self_saved_state = self->machine.upcb; | |
1361 | target_saved_state = target->machine.upcb; | |
1362 | bcopy(self_saved_state, target_saved_state, sizeof(struct arm_saved_state)); | |
1363 | #if defined(HAS_APPLE_PAC) | |
1364 | if (!is_corpse && is_saved_state64(self_saved_state)) { | |
1365 | check_and_sign_copied_thread_state(target_saved_state, self_saved_state); | |
1366 | } | |
1367 | #endif /* defined(HAS_APPLE_PAC) */ | |
1368 | ||
1369 | return KERN_SUCCESS; | |
1370 | } | |
1371 | ||
1372 | /* | |
1373 | * Routine: get_user_regs | |
1374 | * | |
1375 | */ | |
1376 | struct arm_saved_state * | |
1377 | get_user_regs(thread_t thread) | |
1378 | { | |
1379 | return thread->machine.upcb; | |
1380 | } | |
1381 | ||
1382 | arm_neon_saved_state_t * | |
1383 | get_user_neon_regs(thread_t thread) | |
1384 | { | |
1385 | return thread->machine.uNeon; | |
1386 | } | |
1387 | ||
1388 | /* | |
1389 | * Routine: find_user_regs | |
1390 | * | |
1391 | */ | |
1392 | struct arm_saved_state * | |
1393 | find_user_regs(thread_t thread) | |
1394 | { | |
1395 | return thread->machine.upcb; | |
1396 | } | |
1397 | ||
1398 | /* | |
1399 | * Routine: find_kern_regs | |
1400 | * | |
1401 | */ | |
1402 | struct arm_saved_state * | |
1403 | find_kern_regs(thread_t thread) | |
1404 | { | |
1405 | /* | |
1406 | * This works only for an interrupted kernel thread | |
1407 | */ | |
1408 | if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) { | |
1409 | return (struct arm_saved_state *) NULL; | |
1410 | } else { | |
1411 | return getCpuDatap()->cpu_int_state; | |
1412 | } | |
1413 | } | |
1414 | ||
1415 | arm_debug_state32_t * | |
1416 | find_debug_state32(thread_t thread) | |
1417 | { | |
1418 | if (thread && thread->machine.DebugData) { | |
1419 | return &(thread->machine.DebugData->uds.ds32); | |
1420 | } else { | |
1421 | return NULL; | |
1422 | } | |
1423 | } | |
1424 | ||
1425 | arm_debug_state64_t * | |
1426 | find_debug_state64(thread_t thread) | |
1427 | { | |
1428 | if (thread && thread->machine.DebugData) { | |
1429 | return &(thread->machine.DebugData->uds.ds64); | |
1430 | } else { | |
1431 | return NULL; | |
1432 | } | |
1433 | } | |
1434 | ||
1435 | /** | |
1436 | * Finds the debug state for the given 64 bit thread, allocating one if it | |
1437 | * does not exist. | |
1438 | * | |
1439 | * @param thread 64 bit thread to find or allocate debug state for | |
1440 | * | |
1441 | * @returns A pointer to the given thread's 64 bit debug state or a null | |
1442 | * pointer if the given thread is null or the allocation of a new | |
1443 | * debug state fails. | |
1444 | */ | |
1445 | arm_debug_state64_t * | |
1446 | find_or_allocate_debug_state64(thread_t thread) | |
1447 | { | |
1448 | arm_debug_state64_t *thread_state = find_debug_state64(thread); | |
1449 | if (thread != NULL && thread_state == NULL) { | |
1450 | thread->machine.DebugData = zalloc(ads_zone); | |
1451 | if (thread->machine.DebugData != NULL) { | |
1452 | bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData)); | |
1453 | thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE64; | |
1454 | thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE64_COUNT; | |
1455 | thread_state = find_debug_state64(thread); | |
1456 | } | |
1457 | } | |
1458 | return thread_state; | |
1459 | } | |
1460 | ||
1461 | /** | |
1462 | * Finds the debug state for the given 32 bit thread, allocating one if it | |
1463 | * does not exist. | |
1464 | * | |
1465 | * @param thread 32 bit thread to find or allocate debug state for | |
1466 | * | |
1467 | * @returns A pointer to the given thread's 32 bit debug state or a null | |
1468 | * pointer if the given thread is null or the allocation of a new | |
1469 | * debug state fails. | |
1470 | */ | |
1471 | arm_debug_state32_t * | |
1472 | find_or_allocate_debug_state32(thread_t thread) | |
1473 | { | |
1474 | arm_debug_state32_t *thread_state = find_debug_state32(thread); | |
1475 | if (thread != NULL && thread_state == NULL) { | |
1476 | thread->machine.DebugData = zalloc(ads_zone); | |
1477 | if (thread->machine.DebugData != NULL) { | |
1478 | bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData)); | |
1479 | thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE32; | |
1480 | thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE32_COUNT; | |
1481 | thread_state = find_debug_state32(thread); | |
1482 | } | |
1483 | } | |
1484 | return thread_state; | |
1485 | } | |
1486 | ||
1487 | /** | |
1488 | * Frees a thread's debug state if allocated. Otherwise does nothing. | |
1489 | * | |
1490 | * @param thread thread to free the debug state of | |
1491 | */ | |
1492 | static inline void | |
1493 | free_debug_state(thread_t thread) | |
1494 | { | |
1495 | if (thread != NULL && thread->machine.DebugData != NULL) { | |
1496 | void *pTmp = thread->machine.DebugData; | |
1497 | thread->machine.DebugData = NULL; | |
1498 | zfree(ads_zone, pTmp); | |
1499 | } | |
1500 | } | |
1501 | ||
1502 | /* | |
1503 | * Routine: thread_userstack | |
1504 | * | |
1505 | */ | |
1506 | kern_return_t | |
1507 | thread_userstack(__unused thread_t thread, | |
1508 | int flavor, | |
1509 | thread_state_t tstate, | |
1510 | unsigned int count, | |
1511 | mach_vm_offset_t * user_stack, | |
1512 | int * customstack, | |
1513 | boolean_t is_64bit_data | |
1514 | ) | |
1515 | { | |
1516 | register_t sp; | |
1517 | ||
1518 | switch (flavor) { | |
1519 | case ARM_THREAD_STATE: | |
1520 | if (count == ARM_UNIFIED_THREAD_STATE_COUNT) { | |
1521 | #if __arm64__ | |
1522 | if (is_64bit_data) { | |
1523 | sp = ((arm_unified_thread_state_t *)tstate)->ts_64.sp; | |
1524 | } else | |
1525 | #endif | |
1526 | { | |
1527 | sp = ((arm_unified_thread_state_t *)tstate)->ts_32.sp; | |
1528 | } | |
1529 | ||
1530 | break; | |
1531 | } | |
1532 | ||
1533 | /* INTENTIONAL FALL THROUGH (see machine_thread_set_state) */ | |
1534 | OS_FALLTHROUGH; | |
1535 | case ARM_THREAD_STATE32: | |
1536 | if (count != ARM_THREAD_STATE32_COUNT) { | |
1537 | return KERN_INVALID_ARGUMENT; | |
1538 | } | |
1539 | if (is_64bit_data) { | |
1540 | return KERN_INVALID_ARGUMENT; | |
1541 | } | |
1542 | ||
1543 | sp = ((arm_thread_state32_t *)tstate)->sp; | |
1544 | break; | |
1545 | #if __arm64__ | |
1546 | case ARM_THREAD_STATE64: | |
1547 | if (count != ARM_THREAD_STATE64_COUNT) { | |
1548 | return KERN_INVALID_ARGUMENT; | |
1549 | } | |
1550 | if (!is_64bit_data) { | |
1551 | return KERN_INVALID_ARGUMENT; | |
1552 | } | |
1553 | ||
1554 | sp = ((arm_thread_state32_t *)tstate)->sp; | |
1555 | break; | |
1556 | #endif | |
1557 | default: | |
1558 | return KERN_INVALID_ARGUMENT; | |
1559 | } | |
1560 | ||
1561 | if (sp) { | |
1562 | *user_stack = CAST_USER_ADDR_T(sp); | |
1563 | if (customstack) { | |
1564 | *customstack = 1; | |
1565 | } | |
1566 | } else { | |
1567 | *user_stack = CAST_USER_ADDR_T(USRSTACK64); | |
1568 | if (customstack) { | |
1569 | *customstack = 0; | |
1570 | } | |
1571 | } | |
1572 | ||
1573 | return KERN_SUCCESS; | |
1574 | } | |
1575 | ||
1576 | /* | |
1577 | * thread_userstackdefault: | |
1578 | * | |
1579 | * Return the default stack location for the | |
1580 | * thread, if otherwise unknown. | |
1581 | */ | |
1582 | kern_return_t | |
1583 | thread_userstackdefault(mach_vm_offset_t * default_user_stack, | |
1584 | boolean_t is64bit) | |
1585 | { | |
1586 | if (is64bit) { | |
1587 | *default_user_stack = USRSTACK64; | |
1588 | } else { | |
1589 | *default_user_stack = USRSTACK; | |
1590 | } | |
1591 | ||
1592 | return KERN_SUCCESS; | |
1593 | } | |
1594 | ||
1595 | /* | |
1596 | * Routine: thread_setuserstack | |
1597 | * | |
1598 | */ | |
1599 | void | |
1600 | thread_setuserstack(thread_t thread, | |
1601 | mach_vm_address_t user_stack) | |
1602 | { | |
1603 | struct arm_saved_state *sv; | |
1604 | ||
1605 | sv = get_user_regs(thread); | |
1606 | ||
1607 | set_saved_state_sp(sv, user_stack); | |
1608 | ||
1609 | return; | |
1610 | } | |
1611 | ||
1612 | /* | |
1613 | * Routine: thread_adjuserstack | |
1614 | * | |
1615 | */ | |
1616 | user_addr_t | |
1617 | thread_adjuserstack(thread_t thread, | |
1618 | int adjust) | |
1619 | { | |
1620 | struct arm_saved_state *sv; | |
1621 | uint64_t sp; | |
1622 | ||
1623 | sv = get_user_regs(thread); | |
1624 | ||
1625 | sp = get_saved_state_sp(sv); | |
1626 | sp += adjust; | |
1627 | set_saved_state_sp(sv, sp);; | |
1628 | ||
1629 | return sp; | |
1630 | } | |
1631 | ||
1632 | ||
1633 | /* | |
1634 | * Routine: thread_setentrypoint | |
1635 | * | |
1636 | */ | |
1637 | void | |
1638 | thread_setentrypoint(thread_t thread, | |
1639 | mach_vm_offset_t entry) | |
1640 | { | |
1641 | struct arm_saved_state *sv; | |
1642 | ||
1643 | sv = get_user_regs(thread); | |
1644 | ||
1645 | set_saved_state_pc(sv, entry); | |
1646 | ||
1647 | return; | |
1648 | } | |
1649 | ||
1650 | /* | |
1651 | * Routine: thread_entrypoint | |
1652 | * | |
1653 | */ | |
1654 | kern_return_t | |
1655 | thread_entrypoint(__unused thread_t thread, | |
1656 | int flavor, | |
1657 | thread_state_t tstate, | |
1658 | unsigned int count, | |
1659 | mach_vm_offset_t * entry_point | |
1660 | ) | |
1661 | { | |
1662 | switch (flavor) { | |
1663 | case ARM_THREAD_STATE: | |
1664 | { | |
1665 | struct arm_thread_state *state; | |
1666 | ||
1667 | if (count != ARM_THREAD_STATE_COUNT) { | |
1668 | return KERN_INVALID_ARGUMENT; | |
1669 | } | |
1670 | ||
1671 | state = (struct arm_thread_state *) tstate; | |
1672 | ||
1673 | /* | |
1674 | * If a valid entry point is specified, use it. | |
1675 | */ | |
1676 | if (state->pc) { | |
1677 | *entry_point = CAST_USER_ADDR_T(state->pc); | |
1678 | } else { | |
1679 | *entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS); | |
1680 | } | |
1681 | } | |
1682 | break; | |
1683 | ||
1684 | case ARM_THREAD_STATE64: | |
1685 | { | |
1686 | struct arm_thread_state64 *state; | |
1687 | ||
1688 | if (count != ARM_THREAD_STATE64_COUNT) { | |
1689 | return KERN_INVALID_ARGUMENT; | |
1690 | } | |
1691 | ||
1692 | state = (struct arm_thread_state64*) tstate; | |
1693 | ||
1694 | /* | |
1695 | * If a valid entry point is specified, use it. | |
1696 | */ | |
1697 | if (state->pc) { | |
1698 | *entry_point = CAST_USER_ADDR_T(state->pc); | |
1699 | } else { | |
1700 | *entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS); | |
1701 | } | |
1702 | ||
1703 | break; | |
1704 | } | |
1705 | default: | |
1706 | return KERN_INVALID_ARGUMENT; | |
1707 | } | |
1708 | ||
1709 | return KERN_SUCCESS; | |
1710 | } | |
1711 | ||
1712 | ||
1713 | /* | |
1714 | * Routine: thread_set_child | |
1715 | * | |
1716 | */ | |
1717 | void | |
1718 | thread_set_child(thread_t child, | |
1719 | int pid) | |
1720 | { | |
1721 | struct arm_saved_state *child_state; | |
1722 | ||
1723 | child_state = get_user_regs(child); | |
1724 | ||
1725 | set_saved_state_reg(child_state, 0, pid); | |
1726 | set_saved_state_reg(child_state, 1, 1ULL); | |
1727 | } | |
1728 | ||
1729 | ||
1730 | /* | |
1731 | * Routine: thread_set_parent | |
1732 | * | |
1733 | */ | |
1734 | void | |
1735 | thread_set_parent(thread_t parent, | |
1736 | int pid) | |
1737 | { | |
1738 | struct arm_saved_state *parent_state; | |
1739 | ||
1740 | parent_state = get_user_regs(parent); | |
1741 | ||
1742 | set_saved_state_reg(parent_state, 0, pid); | |
1743 | set_saved_state_reg(parent_state, 1, 0); | |
1744 | } | |
1745 | ||
1746 | ||
1747 | struct arm_act_context { | |
1748 | struct arm_unified_thread_state ss; | |
1749 | #if __ARM_VFP__ | |
1750 | struct arm_neon_saved_state ns; | |
1751 | #endif | |
1752 | }; | |
1753 | ||
1754 | /* | |
1755 | * Routine: act_thread_csave | |
1756 | * | |
1757 | */ | |
1758 | void * | |
1759 | act_thread_csave(void) | |
1760 | { | |
1761 | struct arm_act_context *ic; | |
1762 | kern_return_t kret; | |
1763 | unsigned int val; | |
1764 | thread_t thread = current_thread(); | |
1765 | ||
1766 | ic = (struct arm_act_context *) kalloc(sizeof(struct arm_act_context)); | |
1767 | if (ic == (struct arm_act_context *) NULL) { | |
1768 | return (void *) 0; | |
1769 | } | |
1770 | ||
1771 | val = ARM_UNIFIED_THREAD_STATE_COUNT; | |
1772 | kret = machine_thread_get_state(thread, ARM_THREAD_STATE, (thread_state_t)&ic->ss, &val); | |
1773 | if (kret != KERN_SUCCESS) { | |
1774 | kfree(ic, sizeof(struct arm_act_context)); | |
1775 | return (void *) 0; | |
1776 | } | |
1777 | ||
1778 | #if __ARM_VFP__ | |
1779 | if (thread_is_64bit_data(thread)) { | |
1780 | val = ARM_NEON_STATE64_COUNT; | |
1781 | kret = machine_thread_get_state(thread, | |
1782 | ARM_NEON_STATE64, | |
1783 | (thread_state_t)&ic->ns, | |
1784 | &val); | |
1785 | } else { | |
1786 | val = ARM_NEON_STATE_COUNT; | |
1787 | kret = machine_thread_get_state(thread, | |
1788 | ARM_NEON_STATE, | |
1789 | (thread_state_t)&ic->ns, | |
1790 | &val); | |
1791 | } | |
1792 | if (kret != KERN_SUCCESS) { | |
1793 | kfree(ic, sizeof(struct arm_act_context)); | |
1794 | return (void *) 0; | |
1795 | } | |
1796 | #endif | |
1797 | return ic; | |
1798 | } | |
1799 | ||
1800 | /* | |
1801 | * Routine: act_thread_catt | |
1802 | * | |
1803 | */ | |
1804 | void | |
1805 | act_thread_catt(void * ctx) | |
1806 | { | |
1807 | struct arm_act_context *ic; | |
1808 | kern_return_t kret; | |
1809 | thread_t thread = current_thread(); | |
1810 | ||
1811 | ic = (struct arm_act_context *) ctx; | |
1812 | if (ic == (struct arm_act_context *) NULL) { | |
1813 | return; | |
1814 | } | |
1815 | ||
1816 | kret = machine_thread_set_state(thread, ARM_THREAD_STATE, (thread_state_t)&ic->ss, ARM_UNIFIED_THREAD_STATE_COUNT); | |
1817 | if (kret != KERN_SUCCESS) { | |
1818 | goto out; | |
1819 | } | |
1820 | ||
1821 | #if __ARM_VFP__ | |
1822 | if (thread_is_64bit_data(thread)) { | |
1823 | kret = machine_thread_set_state(thread, | |
1824 | ARM_NEON_STATE64, | |
1825 | (thread_state_t)&ic->ns, | |
1826 | ARM_NEON_STATE64_COUNT); | |
1827 | } else { | |
1828 | kret = machine_thread_set_state(thread, | |
1829 | ARM_NEON_STATE, | |
1830 | (thread_state_t)&ic->ns, | |
1831 | ARM_NEON_STATE_COUNT); | |
1832 | } | |
1833 | if (kret != KERN_SUCCESS) { | |
1834 | goto out; | |
1835 | } | |
1836 | #endif | |
1837 | out: | |
1838 | kfree(ic, sizeof(struct arm_act_context)); | |
1839 | } | |
1840 | ||
1841 | /* | |
1842 | * Routine: act_thread_catt | |
1843 | * | |
1844 | */ | |
1845 | void | |
1846 | act_thread_cfree(void *ctx) | |
1847 | { | |
1848 | kfree(ctx, sizeof(struct arm_act_context)); | |
1849 | } | |
1850 | ||
1851 | kern_return_t | |
1852 | thread_set_wq_state32(thread_t thread, | |
1853 | thread_state_t tstate) | |
1854 | { | |
1855 | arm_thread_state_t *state; | |
1856 | struct arm_saved_state *saved_state; | |
1857 | struct arm_saved_state32 *saved_state_32; | |
1858 | thread_t curth = current_thread(); | |
1859 | spl_t s = 0; | |
1860 | ||
1861 | assert(!thread_is_64bit_data(thread)); | |
1862 | ||
1863 | saved_state = thread->machine.upcb; | |
1864 | saved_state_32 = saved_state32(saved_state); | |
1865 | ||
1866 | state = (arm_thread_state_t *)tstate; | |
1867 | ||
1868 | if (curth != thread) { | |
1869 | s = splsched(); | |
1870 | thread_lock(thread); | |
1871 | } | |
1872 | ||
1873 | /* | |
1874 | * do not zero saved_state, it can be concurrently accessed | |
1875 | * and zero is not a valid state for some of the registers, | |
1876 | * like sp. | |
1877 | */ | |
1878 | thread_state32_to_saved_state(state, saved_state); | |
1879 | saved_state_32->cpsr = PSR64_USER32_DEFAULT; | |
1880 | ||
1881 | if (curth != thread) { | |
1882 | thread_unlock(thread); | |
1883 | splx(s); | |
1884 | } | |
1885 | ||
1886 | return KERN_SUCCESS; | |
1887 | } | |
1888 | ||
1889 | kern_return_t | |
1890 | thread_set_wq_state64(thread_t thread, | |
1891 | thread_state_t tstate) | |
1892 | { | |
1893 | arm_thread_state64_t *state; | |
1894 | struct arm_saved_state *saved_state; | |
1895 | struct arm_saved_state64 *saved_state_64; | |
1896 | thread_t curth = current_thread(); | |
1897 | spl_t s = 0; | |
1898 | ||
1899 | assert(thread_is_64bit_data(thread)); | |
1900 | ||
1901 | saved_state = thread->machine.upcb; | |
1902 | saved_state_64 = saved_state64(saved_state); | |
1903 | state = (arm_thread_state64_t *)tstate; | |
1904 | ||
1905 | if (curth != thread) { | |
1906 | s = splsched(); | |
1907 | thread_lock(thread); | |
1908 | } | |
1909 | ||
1910 | /* | |
1911 | * do not zero saved_state, it can be concurrently accessed | |
1912 | * and zero is not a valid state for some of the registers, | |
1913 | * like sp. | |
1914 | */ | |
1915 | thread_state64_to_saved_state(state, saved_state); | |
1916 | set_saved_state_cpsr(saved_state, PSR64_USER64_DEFAULT); | |
1917 | ||
1918 | if (curth != thread) { | |
1919 | thread_unlock(thread); | |
1920 | splx(s); | |
1921 | } | |
1922 | ||
1923 | return KERN_SUCCESS; | |
1924 | } |