]> git.saurik.com Git - apple/xnu.git/blame - osfmk/arm64/status.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / arm64 / status.c
CommitLineData
5ba3f43e 1/*
f427ee49 2 * Copyright (c) 2007-2020 Apple Inc. All rights reserved.
5ba3f43e
A
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <debug.h>
29#include <mach/mach_types.h>
30#include <mach/kern_return.h>
31#include <mach/thread_status.h>
32#include <kern/thread.h>
33#include <kern/kalloc.h>
34#include <arm/vmparam.h>
35#include <arm/cpu_data_internal.h>
f427ee49 36#include <arm/misc_protos.h>
5ba3f43e 37#include <arm64/proc_reg.h>
cb323159
A
38#if __has_feature(ptrauth_calls)
39#include <ptrauth.h>
40#endif
5ba3f43e 41
f427ee49 42
0a7de745 43struct arm_vfpv2_state {
cb323159
A
44 __uint32_t __r[32];
45 __uint32_t __fpscr;
5ba3f43e
A
46};
47
cb323159 48typedef struct arm_vfpv2_state arm_vfpv2_state_t;
5ba3f43e 49
cb323159
A
50#define ARM_VFPV2_STATE_COUNT \
51 ((mach_msg_type_number_t)(sizeof (arm_vfpv2_state_t)/sizeof(uint32_t)))
5ba3f43e
A
52
53/*
54 * Forward definitions
55 */
56void thread_set_child(thread_t child, int pid);
57void thread_set_parent(thread_t parent, int pid);
f427ee49 58static void free_debug_state(thread_t thread);
5ba3f43e
A
59
60/*
61 * Maps state flavor to number of words in the state:
62 */
63/* __private_extern__ */
cb323159
A
64unsigned int _MachineStateCount[] = {
65 [ARM_UNIFIED_THREAD_STATE] = ARM_UNIFIED_THREAD_STATE_COUNT,
66 [ARM_VFP_STATE] = ARM_VFP_STATE_COUNT,
67 [ARM_EXCEPTION_STATE] = ARM_EXCEPTION_STATE_COUNT,
68 [ARM_DEBUG_STATE] = ARM_DEBUG_STATE_COUNT,
69 [ARM_THREAD_STATE64] = ARM_THREAD_STATE64_COUNT,
70 [ARM_EXCEPTION_STATE64] = ARM_EXCEPTION_STATE64_COUNT,
71 [ARM_THREAD_STATE32] = ARM_THREAD_STATE32_COUNT,
72 [ARM_DEBUG_STATE32] = ARM_DEBUG_STATE32_COUNT,
73 [ARM_DEBUG_STATE64] = ARM_DEBUG_STATE64_COUNT,
74 [ARM_NEON_STATE] = ARM_NEON_STATE_COUNT,
75 [ARM_NEON_STATE64] = ARM_NEON_STATE64_COUNT,
76 [ARM_PAGEIN_STATE] = ARM_PAGEIN_STATE_COUNT,
5ba3f43e
A
77};
78
79extern zone_t ads_zone;
80
81#if __arm64__
82/*
83 * Copy values from saved_state to ts64.
84 */
85void
cb323159
A
86saved_state_to_thread_state64(const arm_saved_state_t * saved_state,
87 arm_thread_state64_t * ts64)
5ba3f43e
A
88{
89 uint32_t i;
90
91 assert(is_saved_state64(saved_state));
92
93 ts64->fp = get_saved_state_fp(saved_state);
94 ts64->lr = get_saved_state_lr(saved_state);
95 ts64->sp = get_saved_state_sp(saved_state);
96 ts64->pc = get_saved_state_pc(saved_state);
97 ts64->cpsr = get_saved_state_cpsr(saved_state);
0a7de745 98 for (i = 0; i < 29; i++) {
5ba3f43e 99 ts64->x[i] = get_saved_state_reg(saved_state, i);
0a7de745 100 }
5ba3f43e
A
101}
102
103/*
104 * Copy values from ts64 to saved_state
105 */
106void
cb323159
A
107thread_state64_to_saved_state(const arm_thread_state64_t * ts64,
108 arm_saved_state_t * saved_state)
5ba3f43e
A
109{
110 uint32_t i;
bca245ac
A
111#if __has_feature(ptrauth_calls)
112 boolean_t intr = ml_set_interrupts_enabled(FALSE);
113#endif /* __has_feature(ptrauth_calls) */
5ba3f43e
A
114
115 assert(is_saved_state64(saved_state));
116
bca245ac 117#if __has_feature(ptrauth_calls)
f427ee49
A
118 MANIPULATE_SIGNED_THREAD_STATE(saved_state,
119 "and w2, w2, %w[not_psr64_user_mask] \n"
120 "mov w6, %w[cpsr] \n"
121 "and w6, w6, %w[psr64_user_mask] \n"
122 "orr w2, w2, w6 \n"
123 "str w2, [x0, %[SS64_CPSR]] \n",
124 [cpsr] "r"(ts64->cpsr),
125 [psr64_user_mask] "i"(PSR64_USER_MASK),
126 [not_psr64_user_mask] "i"(~PSR64_USER_MASK)
127 );
bca245ac
A
128 /*
129 * Make writes to ts64->cpsr visible first, since it's useful as a
130 * canary to detect thread-state corruption.
131 */
132 __builtin_arm_dmb(DMB_ST);
f427ee49
A
133#else
134 set_saved_state_cpsr(saved_state,
135 (get_saved_state_cpsr(saved_state) & ~PSR64_USER_MASK) | (ts64->cpsr & PSR64_USER_MASK));
136#endif /* __has_feature(ptrauth_calls) */
5ba3f43e
A
137 set_saved_state_fp(saved_state, ts64->fp);
138 set_saved_state_lr(saved_state, ts64->lr);
139 set_saved_state_sp(saved_state, ts64->sp);
140 set_saved_state_pc(saved_state, ts64->pc);
0a7de745 141 for (i = 0; i < 29; i++) {
5ba3f43e 142 set_saved_state_reg(saved_state, i, ts64->x[i]);
0a7de745 143 }
bca245ac
A
144
145#if __has_feature(ptrauth_calls)
146 ml_set_interrupts_enabled(intr);
147#endif /* __has_feature(ptrauth_calls) */
5ba3f43e 148}
5ba3f43e 149
cb323159
A
150#endif /* __arm64__ */
151
152static kern_return_t
153handle_get_arm32_thread_state(thread_state_t tstate,
154 mach_msg_type_number_t * count,
155 const arm_saved_state_t * saved_state)
5ba3f43e 156{
0a7de745
A
157 if (*count < ARM_THREAD_STATE32_COUNT) {
158 return KERN_INVALID_ARGUMENT;
159 }
160 if (!is_saved_state32(saved_state)) {
161 return KERN_INVALID_ARGUMENT;
162 }
5ba3f43e
A
163
164 (void)saved_state_to_thread_state32(saved_state, (arm_thread_state32_t *)tstate);
165 *count = ARM_THREAD_STATE32_COUNT;
166 return KERN_SUCCESS;
167}
168
cb323159
A
169static kern_return_t
170handle_get_arm64_thread_state(thread_state_t tstate,
171 mach_msg_type_number_t * count,
172 const arm_saved_state_t * saved_state)
5ba3f43e 173{
0a7de745
A
174 if (*count < ARM_THREAD_STATE64_COUNT) {
175 return KERN_INVALID_ARGUMENT;
176 }
177 if (!is_saved_state64(saved_state)) {
178 return KERN_INVALID_ARGUMENT;
179 }
5ba3f43e
A
180
181 (void)saved_state_to_thread_state64(saved_state, (arm_thread_state64_t *)tstate);
182 *count = ARM_THREAD_STATE64_COUNT;
183 return KERN_SUCCESS;
184}
185
186
cb323159
A
187static kern_return_t
188handle_get_arm_thread_state(thread_state_t tstate,
189 mach_msg_type_number_t * count,
190 const arm_saved_state_t * saved_state)
5ba3f43e
A
191{
192 /* In an arm64 world, this flavor can be used to retrieve the thread
193 * state of a 32-bit or 64-bit thread into a unified structure, but we
194 * need to support legacy clients who are only aware of 32-bit, so
195 * check the count to see what the client is expecting.
196 */
197 if (*count < ARM_UNIFIED_THREAD_STATE_COUNT) {
198 return handle_get_arm32_thread_state(tstate, count, saved_state);
199 }
200
201 arm_unified_thread_state_t *unified_state = (arm_unified_thread_state_t *) tstate;
202 bzero(unified_state, sizeof(*unified_state));
203#if __arm64__
204 if (is_saved_state64(saved_state)) {
205 unified_state->ash.flavor = ARM_THREAD_STATE64;
206 unified_state->ash.count = ARM_THREAD_STATE64_COUNT;
207 (void)saved_state_to_thread_state64(saved_state, thread_state64(unified_state));
208 } else
209#endif
210 {
211 unified_state->ash.flavor = ARM_THREAD_STATE32;
212 unified_state->ash.count = ARM_THREAD_STATE32_COUNT;
213 (void)saved_state_to_thread_state32(saved_state, thread_state32(unified_state));
214 }
215 *count = ARM_UNIFIED_THREAD_STATE_COUNT;
0a7de745 216 return KERN_SUCCESS;
5ba3f43e
A
217}
218
cb323159
A
219
220static kern_return_t
221handle_set_arm32_thread_state(const thread_state_t tstate,
222 mach_msg_type_number_t count,
223 arm_saved_state_t * saved_state)
5ba3f43e 224{
0a7de745
A
225 if (count != ARM_THREAD_STATE32_COUNT) {
226 return KERN_INVALID_ARGUMENT;
227 }
5ba3f43e
A
228
229 (void)thread_state32_to_saved_state((const arm_thread_state32_t *)tstate, saved_state);
230 return KERN_SUCCESS;
231}
232
cb323159
A
233static kern_return_t
234handle_set_arm64_thread_state(const thread_state_t tstate,
235 mach_msg_type_number_t count,
236 arm_saved_state_t * saved_state)
5ba3f43e 237{
0a7de745
A
238 if (count != ARM_THREAD_STATE64_COUNT) {
239 return KERN_INVALID_ARGUMENT;
240 }
5ba3f43e
A
241
242 (void)thread_state64_to_saved_state((const arm_thread_state64_t *)tstate, saved_state);
243 return KERN_SUCCESS;
244}
245
246
cb323159
A
247static kern_return_t
248handle_set_arm_thread_state(const thread_state_t tstate,
249 mach_msg_type_number_t count,
250 arm_saved_state_t * saved_state)
5ba3f43e
A
251{
252 /* In an arm64 world, this flavor can be used to set the thread state of a
253 * 32-bit or 64-bit thread from a unified structure, but we need to support
254 * legacy clients who are only aware of 32-bit, so check the count to see
255 * what the client is expecting.
256 */
257 if (count < ARM_UNIFIED_THREAD_STATE_COUNT) {
d9a64523 258 if (!is_saved_state32(saved_state)) {
0a7de745 259 return KERN_INVALID_ARGUMENT;
d9a64523 260 }
5ba3f43e
A
261 return handle_set_arm32_thread_state(tstate, count, saved_state);
262 }
263
264 const arm_unified_thread_state_t *unified_state = (const arm_unified_thread_state_t *) tstate;
265#if __arm64__
266 if (is_thread_state64(unified_state)) {
d9a64523 267 if (!is_saved_state64(saved_state)) {
0a7de745 268 return KERN_INVALID_ARGUMENT;
d9a64523 269 }
5ba3f43e
A
270 (void)thread_state64_to_saved_state(const_thread_state64(unified_state), saved_state);
271 } else
272#endif
273 {
d9a64523 274 if (!is_saved_state32(saved_state)) {
0a7de745 275 return KERN_INVALID_ARGUMENT;
d9a64523 276 }
5ba3f43e
A
277 (void)thread_state32_to_saved_state(const_thread_state32(unified_state), saved_state);
278 }
279
0a7de745 280 return KERN_SUCCESS;
5ba3f43e
A
281}
282
cb323159 283
d9a64523
A
284/*
285 * Translate thread state arguments to userspace representation
286 */
287
288kern_return_t
289machine_thread_state_convert_to_user(
0a7de745
A
290 thread_t thread,
291 thread_flavor_t flavor,
292 thread_state_t tstate,
293 mach_msg_type_number_t *count)
d9a64523 294{
cb323159
A
295#if __has_feature(ptrauth_calls)
296 arm_thread_state64_t *ts64;
297
298 switch (flavor) {
299 case ARM_THREAD_STATE:
300 {
301 arm_unified_thread_state_t *unified_state = (arm_unified_thread_state_t *)tstate;
302
303 if (*count < ARM_UNIFIED_THREAD_STATE_COUNT || !is_thread_state64(unified_state)) {
304 return KERN_SUCCESS;
305 }
306 ts64 = thread_state64(unified_state);
307 break;
308 }
309 case ARM_THREAD_STATE64:
310 {
311 if (*count < ARM_THREAD_STATE64_COUNT) {
312 return KERN_SUCCESS;
313 }
314 ts64 = (arm_thread_state64_t *)tstate;
315 break;
316 }
317 default:
318 return KERN_SUCCESS;
319 }
320
321 // Note that kernel threads never have disable_user_jop set
322 if (current_thread()->machine.disable_user_jop || !thread_is_64bit_addr(current_thread()) ||
2a1bd2d3
A
323 thread->machine.disable_user_jop || !thread_is_64bit_addr(thread)
324 ) {
cb323159
A
325 ts64->flags = __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH;
326 return KERN_SUCCESS;
327 }
328
329 ts64->flags = 0;
330 if (ts64->lr) {
331 // lr might contain an IB-signed return address (strip is a no-op on unsigned addresses)
332 uintptr_t stripped_lr = (uintptr_t)ptrauth_strip((void *)ts64->lr,
333 ptrauth_key_return_address);
334 if (ts64->lr != stripped_lr) {
335 // Need to allow already-signed lr value to round-trip as is
336 ts64->flags |= __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR;
337 }
338 // Note that an IB-signed return address that happens to have a 0 signature value
339 // will round-trip correctly even if IA-signed again below (and IA-authd later)
340 }
341
f427ee49 342 if (arm_user_jop_disabled()) {
cb323159
A
343 return KERN_SUCCESS;
344 }
345
346 if (ts64->pc) {
347 ts64->pc = (uintptr_t)pmap_sign_user_ptr((void*)ts64->pc,
f427ee49
A
348 ptrauth_key_process_independent_code, ptrauth_string_discriminator("pc"),
349 thread->machine.jop_pid);
cb323159
A
350 }
351 if (ts64->lr && !(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR)) {
352 ts64->lr = (uintptr_t)pmap_sign_user_ptr((void*)ts64->lr,
f427ee49
A
353 ptrauth_key_process_independent_code, ptrauth_string_discriminator("lr"),
354 thread->machine.jop_pid);
cb323159
A
355 }
356 if (ts64->sp) {
357 ts64->sp = (uintptr_t)pmap_sign_user_ptr((void*)ts64->sp,
f427ee49
A
358 ptrauth_key_process_independent_data, ptrauth_string_discriminator("sp"),
359 thread->machine.jop_pid);
cb323159
A
360 }
361 if (ts64->fp) {
362 ts64->fp = (uintptr_t)pmap_sign_user_ptr((void*)ts64->fp,
f427ee49
A
363 ptrauth_key_process_independent_data, ptrauth_string_discriminator("fp"),
364 thread->machine.jop_pid);
cb323159
A
365 }
366
367 return KERN_SUCCESS;
368#else
d9a64523
A
369 // No conversion to userspace representation on this platform
370 (void)thread; (void)flavor; (void)tstate; (void)count;
371 return KERN_SUCCESS;
cb323159 372#endif /* __has_feature(ptrauth_calls) */
d9a64523
A
373}
374
375/*
376 * Translate thread state arguments from userspace representation
377 */
378
379kern_return_t
380machine_thread_state_convert_from_user(
0a7de745
A
381 thread_t thread,
382 thread_flavor_t flavor,
383 thread_state_t tstate,
384 mach_msg_type_number_t count)
d9a64523 385{
cb323159
A
386#if __has_feature(ptrauth_calls)
387 arm_thread_state64_t *ts64;
388
389 switch (flavor) {
390 case ARM_THREAD_STATE:
391 {
392 arm_unified_thread_state_t *unified_state = (arm_unified_thread_state_t *)tstate;
393
394 if (count < ARM_UNIFIED_THREAD_STATE_COUNT || !is_thread_state64(unified_state)) {
395 return KERN_SUCCESS;
396 }
397 ts64 = thread_state64(unified_state);
398 break;
399 }
400 case ARM_THREAD_STATE64:
401 {
402 if (count != ARM_THREAD_STATE64_COUNT) {
403 return KERN_SUCCESS;
404 }
405 ts64 = (arm_thread_state64_t *)tstate;
406 break;
407 }
408 default:
409 return KERN_SUCCESS;
410 }
411
412 // Note that kernel threads never have disable_user_jop set
413 if (current_thread()->machine.disable_user_jop || !thread_is_64bit_addr(current_thread())) {
414 if (thread->machine.disable_user_jop || !thread_is_64bit_addr(thread)) {
415 ts64->flags = __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH;
416 return KERN_SUCCESS;
417 }
418 // A JOP-disabled process must not set thread state on a JOP-enabled process
419 return KERN_PROTECTION_FAILURE;
420 }
421
422 if (ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH) {
2a1bd2d3
A
423 if (thread->machine.disable_user_jop || !thread_is_64bit_addr(thread)
424 ) {
cb323159
A
425 return KERN_SUCCESS;
426 }
427 // Disallow setting unsigned thread state on JOP-enabled processes.
428 // Ignore flag and treat thread state arguments as signed, ptrauth
429 // poisoning will cause resulting thread state to be invalid
430 ts64->flags &= ~__DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH;
431 }
432
433 if (ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR) {
434 // lr might contain an IB-signed return address (strip is a no-op on unsigned addresses)
435 uintptr_t stripped_lr = (uintptr_t)ptrauth_strip((void *)ts64->lr,
436 ptrauth_key_return_address);
437 if (ts64->lr == stripped_lr) {
438 // Don't allow unsigned pointer to be passed through as is. Ignore flag and
439 // treat as IA-signed below (where auth failure may poison the value).
440 ts64->flags &= ~__DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR;
441 }
442 // Note that an IB-signed return address that happens to have a 0 signature value
443 // will also have been IA-signed (without this flag being set) and so will IA-auth
444 // correctly below.
445 }
446
f427ee49 447 if (arm_user_jop_disabled()) {
cb323159
A
448 return KERN_SUCCESS;
449 }
450
451 if (ts64->pc) {
452 ts64->pc = (uintptr_t)pmap_auth_user_ptr((void*)ts64->pc,
f427ee49
A
453 ptrauth_key_process_independent_code, ptrauth_string_discriminator("pc"),
454 thread->machine.jop_pid);
cb323159
A
455 }
456 if (ts64->lr && !(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR)) {
457 ts64->lr = (uintptr_t)pmap_auth_user_ptr((void*)ts64->lr,
f427ee49
A
458 ptrauth_key_process_independent_code, ptrauth_string_discriminator("lr"),
459 thread->machine.jop_pid);
cb323159
A
460 }
461 if (ts64->sp) {
462 ts64->sp = (uintptr_t)pmap_auth_user_ptr((void*)ts64->sp,
f427ee49
A
463 ptrauth_key_process_independent_data, ptrauth_string_discriminator("sp"),
464 thread->machine.jop_pid);
cb323159
A
465 }
466 if (ts64->fp) {
467 ts64->fp = (uintptr_t)pmap_auth_user_ptr((void*)ts64->fp,
f427ee49
A
468 ptrauth_key_process_independent_data, ptrauth_string_discriminator("fp"),
469 thread->machine.jop_pid);
cb323159
A
470 }
471
472 return KERN_SUCCESS;
473#else
d9a64523
A
474 // No conversion from userspace representation on this platform
475 (void)thread; (void)flavor; (void)tstate; (void)count;
476 return KERN_SUCCESS;
cb323159 477#endif /* __has_feature(ptrauth_calls) */
d9a64523
A
478}
479
480/*
481 * Translate signal context data pointer to userspace representation
482 */
483
484kern_return_t
485machine_thread_siguctx_pointer_convert_to_user(
f427ee49 486 thread_t thread,
0a7de745 487 user_addr_t *uctxp)
d9a64523 488{
cb323159
A
489#if __has_feature(ptrauth_calls)
490 if (current_thread()->machine.disable_user_jop || !thread_is_64bit_addr(current_thread())) {
491 assert(thread->machine.disable_user_jop || !thread_is_64bit_addr(thread));
492 return KERN_SUCCESS;
493 }
494
f427ee49 495 if (arm_user_jop_disabled()) {
cb323159
A
496 return KERN_SUCCESS;
497 }
498
499 if (*uctxp) {
500 *uctxp = (uintptr_t)pmap_sign_user_ptr((void*)*uctxp,
f427ee49
A
501 ptrauth_key_process_independent_data, ptrauth_string_discriminator("uctx"),
502 thread->machine.jop_pid);
cb323159
A
503 }
504
505 return KERN_SUCCESS;
506#else
d9a64523
A
507 // No conversion to userspace representation on this platform
508 (void)thread; (void)uctxp;
509 return KERN_SUCCESS;
cb323159 510#endif /* __has_feature(ptrauth_calls) */
d9a64523
A
511}
512
513/*
514 * Translate array of function pointer syscall arguments from userspace representation
515 */
516
517kern_return_t
518machine_thread_function_pointers_convert_from_user(
f427ee49 519 thread_t thread,
0a7de745
A
520 user_addr_t *fptrs,
521 uint32_t count)
d9a64523 522{
cb323159
A
523#if __has_feature(ptrauth_calls)
524 if (current_thread()->machine.disable_user_jop || !thread_is_64bit_addr(current_thread())) {
525 assert(thread->machine.disable_user_jop || !thread_is_64bit_addr(thread));
526 return KERN_SUCCESS;
527 }
528
f427ee49 529 if (arm_user_jop_disabled()) {
cb323159
A
530 return KERN_SUCCESS;
531 }
532
533 while (count--) {
534 if (*fptrs) {
535 *fptrs = (uintptr_t)pmap_auth_user_ptr((void*)*fptrs,
f427ee49 536 ptrauth_key_function_pointer, 0, thread->machine.jop_pid);
cb323159
A
537 }
538 fptrs++;
539 }
540
541 return KERN_SUCCESS;
542#else
d9a64523
A
543 // No conversion from userspace representation on this platform
544 (void)thread; (void)fptrs; (void)count;
545 return KERN_SUCCESS;
cb323159 546#endif /* __has_feature(ptrauth_calls) */
d9a64523
A
547}
548
5ba3f43e 549/*
cb323159 550 * Routine: machine_thread_get_state
5ba3f43e
A
551 *
552 */
553kern_return_t
cb323159
A
554machine_thread_get_state(thread_t thread,
555 thread_flavor_t flavor,
556 thread_state_t tstate,
557 mach_msg_type_number_t * count)
5ba3f43e
A
558{
559 switch (flavor) {
560 case THREAD_STATE_FLAVOR_LIST:
0a7de745
A
561 if (*count < 4) {
562 return KERN_INVALID_ARGUMENT;
563 }
5ba3f43e
A
564
565 tstate[0] = ARM_THREAD_STATE;
566 tstate[1] = ARM_VFP_STATE;
567 tstate[2] = ARM_EXCEPTION_STATE;
568 tstate[3] = ARM_DEBUG_STATE;
569 *count = 4;
570 break;
571
572 case THREAD_STATE_FLAVOR_LIST_NEW:
0a7de745
A
573 if (*count < 4) {
574 return KERN_INVALID_ARGUMENT;
575 }
5ba3f43e
A
576
577 tstate[0] = ARM_THREAD_STATE;
578 tstate[1] = ARM_VFP_STATE;
d9a64523
A
579 tstate[2] = thread_is_64bit_data(thread) ? ARM_EXCEPTION_STATE64 : ARM_EXCEPTION_STATE;
580 tstate[3] = thread_is_64bit_data(thread) ? ARM_DEBUG_STATE64 : ARM_DEBUG_STATE32;
5ba3f43e
A
581 *count = 4;
582 break;
583
cb323159
A
584 case THREAD_STATE_FLAVOR_LIST_10_15:
585 if (*count < 5) {
586 return KERN_INVALID_ARGUMENT;
587 }
588
589 tstate[0] = ARM_THREAD_STATE;
590 tstate[1] = ARM_VFP_STATE;
591 tstate[2] = thread_is_64bit_data(thread) ? ARM_EXCEPTION_STATE64 : ARM_EXCEPTION_STATE;
592 tstate[3] = thread_is_64bit_data(thread) ? ARM_DEBUG_STATE64 : ARM_DEBUG_STATE32;
593 tstate[4] = ARM_PAGEIN_STATE;
594 *count = 5;
595 break;
596
5ba3f43e
A
597 case ARM_THREAD_STATE:
598 {
599 kern_return_t rn = handle_get_arm_thread_state(tstate, count, thread->machine.upcb);
0a7de745
A
600 if (rn) {
601 return rn;
602 }
5ba3f43e
A
603 break;
604 }
605 case ARM_THREAD_STATE32:
606 {
0a7de745 607 if (thread_is_64bit_data(thread)) {
5ba3f43e 608 return KERN_INVALID_ARGUMENT;
0a7de745 609 }
5ba3f43e
A
610
611 kern_return_t rn = handle_get_arm32_thread_state(tstate, count, thread->machine.upcb);
0a7de745
A
612 if (rn) {
613 return rn;
614 }
5ba3f43e
A
615 break;
616 }
617#if __arm64__
618 case ARM_THREAD_STATE64:
619 {
0a7de745 620 if (!thread_is_64bit_data(thread)) {
5ba3f43e 621 return KERN_INVALID_ARGUMENT;
0a7de745 622 }
5ba3f43e 623
f427ee49
A
624 const arm_saved_state_t *current_state = thread->machine.upcb;
625
626 kern_return_t rn = handle_get_arm64_thread_state(tstate, count,
627 current_state);
0a7de745
A
628 if (rn) {
629 return rn;
630 }
f427ee49 631
5ba3f43e
A
632 break;
633 }
634#endif
635 case ARM_EXCEPTION_STATE:{
0a7de745
A
636 struct arm_exception_state *state;
637 struct arm_saved_state32 *saved_state;
5ba3f43e 638
0a7de745
A
639 if (*count < ARM_EXCEPTION_STATE_COUNT) {
640 return KERN_INVALID_ARGUMENT;
641 }
642 if (thread_is_64bit_data(thread)) {
643 return KERN_INVALID_ARGUMENT;
644 }
5ba3f43e 645
0a7de745
A
646 state = (struct arm_exception_state *) tstate;
647 saved_state = saved_state32(thread->machine.upcb);
5ba3f43e 648
0a7de745
A
649 state->exception = saved_state->exception;
650 state->fsr = saved_state->esr;
651 state->far = saved_state->far;
5ba3f43e 652
0a7de745
A
653 *count = ARM_EXCEPTION_STATE_COUNT;
654 break;
655 }
5ba3f43e 656 case ARM_EXCEPTION_STATE64:{
0a7de745
A
657 struct arm_exception_state64 *state;
658 struct arm_saved_state64 *saved_state;
659
660 if (*count < ARM_EXCEPTION_STATE64_COUNT) {
661 return KERN_INVALID_ARGUMENT;
662 }
663 if (!thread_is_64bit_data(thread)) {
664 return KERN_INVALID_ARGUMENT;
665 }
5ba3f43e 666
0a7de745
A
667 state = (struct arm_exception_state64 *) tstate;
668 saved_state = saved_state64(thread->machine.upcb);
5ba3f43e 669
0a7de745
A
670 state->exception = saved_state->exception;
671 state->far = saved_state->far;
672 state->esr = saved_state->esr;
5ba3f43e 673
0a7de745
A
674 *count = ARM_EXCEPTION_STATE64_COUNT;
675 break;
676 }
677 case ARM_DEBUG_STATE:{
678 arm_legacy_debug_state_t *state;
679 arm_debug_state32_t *thread_state;
5ba3f43e 680
0a7de745
A
681 if (*count < ARM_LEGACY_DEBUG_STATE_COUNT) {
682 return KERN_INVALID_ARGUMENT;
5ba3f43e 683 }
0a7de745
A
684
685 if (thread_is_64bit_data(thread)) {
686 return KERN_INVALID_ARGUMENT;
687 }
688
689 state = (arm_legacy_debug_state_t *) tstate;
690 thread_state = find_debug_state32(thread);
691
692 if (thread_state == NULL) {
693 bzero(state, sizeof(arm_legacy_debug_state_t));
694 } else {
695 bcopy(thread_state, state, sizeof(arm_legacy_debug_state_t));
5ba3f43e 696 }
0a7de745
A
697
698 *count = ARM_LEGACY_DEBUG_STATE_COUNT;
699 break;
700 }
5ba3f43e 701 case ARM_DEBUG_STATE32:{
0a7de745
A
702 arm_debug_state32_t *state;
703 arm_debug_state32_t *thread_state;
704
705 if (*count < ARM_DEBUG_STATE32_COUNT) {
706 return KERN_INVALID_ARGUMENT;
5ba3f43e
A
707 }
708
0a7de745
A
709 if (thread_is_64bit_data(thread)) {
710 return KERN_INVALID_ARGUMENT;
711 }
712
713 state = (arm_debug_state32_t *) tstate;
714 thread_state = find_debug_state32(thread);
715
716 if (thread_state == NULL) {
717 bzero(state, sizeof(arm_debug_state32_t));
718 } else {
719 bcopy(thread_state, state, sizeof(arm_debug_state32_t));
720 }
721
722 *count = ARM_DEBUG_STATE32_COUNT;
723 break;
724 }
725
5ba3f43e 726 case ARM_DEBUG_STATE64:{
0a7de745
A
727 arm_debug_state64_t *state;
728 arm_debug_state64_t *thread_state;
729
730 if (*count < ARM_DEBUG_STATE64_COUNT) {
731 return KERN_INVALID_ARGUMENT;
5ba3f43e
A
732 }
733
0a7de745
A
734 if (!thread_is_64bit_data(thread)) {
735 return KERN_INVALID_ARGUMENT;
736 }
737
738 state = (arm_debug_state64_t *) tstate;
739 thread_state = find_debug_state64(thread);
740
741 if (thread_state == NULL) {
742 bzero(state, sizeof(arm_debug_state64_t));
743 } else {
744 bcopy(thread_state, state, sizeof(arm_debug_state64_t));
745 }
746
747 *count = ARM_DEBUG_STATE64_COUNT;
748 break;
749 }
750
5ba3f43e 751 case ARM_VFP_STATE:{
0a7de745
A
752 struct arm_vfp_state *state;
753 arm_neon_saved_state32_t *thread_state;
cb323159 754 unsigned int max;
0a7de745
A
755
756 if (*count < ARM_VFP_STATE_COUNT) {
757 if (*count < ARM_VFPV2_STATE_COUNT) {
758 return KERN_INVALID_ARGUMENT;
759 } else {
760 *count = ARM_VFPV2_STATE_COUNT;
5ba3f43e 761 }
0a7de745 762 }
5ba3f43e 763
0a7de745
A
764 if (*count == ARM_VFPV2_STATE_COUNT) {
765 max = 32;
766 } else {
767 max = 64;
768 }
5ba3f43e 769
0a7de745
A
770 state = (struct arm_vfp_state *) tstate;
771 thread_state = neon_state32(thread->machine.uNeon);
772 /* ARM64 TODO: set fpsr and fpcr from state->fpscr */
5ba3f43e 773
0a7de745
A
774 bcopy(thread_state, state, (max + 1) * sizeof(uint32_t));
775 *count = (max + 1);
776 break;
777 }
5ba3f43e
A
778 case ARM_NEON_STATE:{
779 arm_neon_state_t *state;
780 arm_neon_saved_state32_t *thread_state;
781
0a7de745
A
782 if (*count < ARM_NEON_STATE_COUNT) {
783 return KERN_INVALID_ARGUMENT;
784 }
5ba3f43e 785
0a7de745
A
786 if (thread_is_64bit_data(thread)) {
787 return KERN_INVALID_ARGUMENT;
788 }
5ba3f43e
A
789
790 state = (arm_neon_state_t *)tstate;
791 thread_state = neon_state32(thread->machine.uNeon);
792
793 assert(sizeof(*thread_state) == sizeof(*state));
794 bcopy(thread_state, state, sizeof(arm_neon_state_t));
795
796 *count = ARM_NEON_STATE_COUNT;
797 break;
0a7de745 798 }
5ba3f43e
A
799
800 case ARM_NEON_STATE64:{
801 arm_neon_state64_t *state;
802 arm_neon_saved_state64_t *thread_state;
803
0a7de745
A
804 if (*count < ARM_NEON_STATE64_COUNT) {
805 return KERN_INVALID_ARGUMENT;
806 }
5ba3f43e 807
0a7de745
A
808 if (!thread_is_64bit_data(thread)) {
809 return KERN_INVALID_ARGUMENT;
810 }
5ba3f43e
A
811
812 state = (arm_neon_state64_t *)tstate;
813 thread_state = neon_state64(thread->machine.uNeon);
814
815 /* For now, these are identical */
816 assert(sizeof(*state) == sizeof(*thread_state));
817 bcopy(thread_state, state, sizeof(arm_neon_state64_t));
818
f427ee49 819
5ba3f43e
A
820 *count = ARM_NEON_STATE64_COUNT;
821 break;
0a7de745 822 }
5ba3f43e 823
cb323159
A
824
825 case ARM_PAGEIN_STATE: {
826 arm_pagein_state_t *state;
827
828 if (*count < ARM_PAGEIN_STATE_COUNT) {
829 return KERN_INVALID_ARGUMENT;
830 }
831
832 state = (arm_pagein_state_t *)tstate;
833 state->__pagein_error = thread->t_pagein_error;
834
835 *count = ARM_PAGEIN_STATE_COUNT;
836 break;
837 }
838
839
5ba3f43e 840 default:
0a7de745 841 return KERN_INVALID_ARGUMENT;
5ba3f43e 842 }
0a7de745 843 return KERN_SUCCESS;
5ba3f43e
A
844}
845
846
847/*
cb323159 848 * Routine: machine_thread_get_kern_state
5ba3f43e
A
849 *
850 */
851kern_return_t
cb323159
A
852machine_thread_get_kern_state(thread_t thread,
853 thread_flavor_t flavor,
854 thread_state_t tstate,
855 mach_msg_type_number_t * count)
5ba3f43e
A
856{
857 /*
858 * This works only for an interrupted kernel thread
859 */
0a7de745 860 if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) {
5ba3f43e 861 return KERN_FAILURE;
0a7de745 862 }
5ba3f43e
A
863
864 switch (flavor) {
865 case ARM_THREAD_STATE:
866 {
867 kern_return_t rn = handle_get_arm_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
0a7de745
A
868 if (rn) {
869 return rn;
870 }
5ba3f43e
A
871 break;
872 }
873 case ARM_THREAD_STATE32:
874 {
875 kern_return_t rn = handle_get_arm32_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
0a7de745
A
876 if (rn) {
877 return rn;
878 }
5ba3f43e
A
879 break;
880 }
881#if __arm64__
882 case ARM_THREAD_STATE64:
883 {
884 kern_return_t rn = handle_get_arm64_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
0a7de745
A
885 if (rn) {
886 return rn;
887 }
5ba3f43e
A
888 break;
889 }
890#endif
891 default:
0a7de745 892 return KERN_INVALID_ARGUMENT;
5ba3f43e 893 }
0a7de745 894 return KERN_SUCCESS;
5ba3f43e
A
895}
896
897void
898machine_thread_switch_addrmode(thread_t thread)
899{
d9a64523 900 if (task_has_64Bit_data(thread->task)) {
5ba3f43e
A
901 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE64;
902 thread->machine.upcb->ash.count = ARM_SAVED_STATE64_COUNT;
903 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
904 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
905
906 /*
907 * Reinitialize the NEON state.
908 */
909 bzero(&thread->machine.uNeon->uns, sizeof(thread->machine.uNeon->uns));
910 thread->machine.uNeon->ns_64.fpcr = FPCR_DEFAULT;
911 } else {
912 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE32;
913 thread->machine.upcb->ash.count = ARM_SAVED_STATE32_COUNT;
914 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
915 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
916
917 /*
918 * Reinitialize the NEON state.
919 */
920 bzero(&thread->machine.uNeon->uns, sizeof(thread->machine.uNeon->uns));
921 thread->machine.uNeon->ns_32.fpcr = FPCR_DEFAULT_32;
922 }
923}
924
925extern long long arm_debug_get(void);
926
927/*
cb323159 928 * Routine: machine_thread_set_state
5ba3f43e
A
929 *
930 */
931kern_return_t
cb323159
A
932machine_thread_set_state(thread_t thread,
933 thread_flavor_t flavor,
934 thread_state_t tstate,
935 mach_msg_type_number_t count)
5ba3f43e
A
936{
937 kern_return_t rn;
938
939 switch (flavor) {
940 case ARM_THREAD_STATE:
941 rn = handle_set_arm_thread_state(tstate, count, thread->machine.upcb);
0a7de745
A
942 if (rn) {
943 return rn;
944 }
5ba3f43e
A
945 break;
946
947 case ARM_THREAD_STATE32:
0a7de745
A
948 if (thread_is_64bit_data(thread)) {
949 return KERN_INVALID_ARGUMENT;
950 }
5ba3f43e
A
951
952 rn = handle_set_arm32_thread_state(tstate, count, thread->machine.upcb);
0a7de745
A
953 if (rn) {
954 return rn;
955 }
5ba3f43e
A
956 break;
957
958#if __arm64__
959 case ARM_THREAD_STATE64:
0a7de745
A
960 if (!thread_is_64bit_data(thread)) {
961 return KERN_INVALID_ARGUMENT;
962 }
5ba3f43e 963
f427ee49 964
5ba3f43e 965 rn = handle_set_arm64_thread_state(tstate, count, thread->machine.upcb);
0a7de745
A
966 if (rn) {
967 return rn;
968 }
5ba3f43e
A
969 break;
970#endif
971 case ARM_EXCEPTION_STATE:{
0a7de745
A
972 if (count != ARM_EXCEPTION_STATE_COUNT) {
973 return KERN_INVALID_ARGUMENT;
5ba3f43e 974 }
0a7de745
A
975 if (thread_is_64bit_data(thread)) {
976 return KERN_INVALID_ARGUMENT;
977 }
978
979 break;
980 }
5ba3f43e 981 case ARM_EXCEPTION_STATE64:{
0a7de745
A
982 if (count != ARM_EXCEPTION_STATE64_COUNT) {
983 return KERN_INVALID_ARGUMENT;
984 }
985 if (!thread_is_64bit_data(thread)) {
986 return KERN_INVALID_ARGUMENT;
987 }
5ba3f43e 988
0a7de745
A
989 break;
990 }
991 case ARM_DEBUG_STATE:
992 {
993 arm_legacy_debug_state_t *state;
994 boolean_t enabled = FALSE;
995 unsigned int i;
5ba3f43e 996
0a7de745
A
997 if (count != ARM_LEGACY_DEBUG_STATE_COUNT) {
998 return KERN_INVALID_ARGUMENT;
999 }
1000 if (thread_is_64bit_data(thread)) {
1001 return KERN_INVALID_ARGUMENT;
5ba3f43e 1002 }
5ba3f43e 1003
0a7de745 1004 state = (arm_legacy_debug_state_t *) tstate;
5ba3f43e 1005
0a7de745
A
1006 for (i = 0; i < 16; i++) {
1007 /* do not allow context IDs to be set */
1008 if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1009 || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
1010 || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1011 || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
1012 return KERN_PROTECTION_FAILURE;
1013 }
1014 if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
1015 || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
1016 enabled = TRUE;
1017 }
1018 }
5ba3f43e 1019
0a7de745 1020 if (!enabled) {
f427ee49 1021 free_debug_state(thread);
0a7de745 1022 } else {
f427ee49
A
1023 arm_debug_state32_t *thread_state = find_or_allocate_debug_state32(thread);
1024
0a7de745 1025 if (thread_state == NULL) {
f427ee49 1026 return KERN_FAILURE;
5ba3f43e 1027 }
0a7de745
A
1028
1029 for (i = 0; i < 16; i++) {
1030 /* set appropriate privilege; mask out unknown bits */
1031 thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1032 | ARM_DBGBCR_MATCH_MASK
1033 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
1034 | ARM_DBG_CR_ENABLE_MASK))
1035 | ARM_DBGBCR_TYPE_IVA
1036 | ARM_DBG_CR_LINKED_UNLINKED
1037 | ARM_DBG_CR_SECURITY_STATE_BOTH
1038 | ARM_DBG_CR_MODE_CONTROL_USER;
1039 thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK;
1040 thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1041 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1042 | ARM_DBGWCR_ACCESS_CONTROL_MASK
1043 | ARM_DBG_CR_ENABLE_MASK))
1044 | ARM_DBG_CR_LINKED_UNLINKED
1045 | ARM_DBG_CR_SECURITY_STATE_BOTH
1046 | ARM_DBG_CR_MODE_CONTROL_USER;
1047 thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK;
5ba3f43e 1048 }
0a7de745
A
1049
1050 thread_state->mdscr_el1 = 0ULL; // Legacy customers issuing ARM_DEBUG_STATE dont drive single stepping.
5ba3f43e 1051 }
0a7de745
A
1052
1053 if (thread == current_thread()) {
1054 arm_debug_set32(thread->machine.DebugData);
1055 }
1056
1057 break;
1058 }
5ba3f43e
A
1059 case ARM_DEBUG_STATE32:
1060 /* ARM64_TODO subtle bcr/wcr semantic differences e.g. wcr and ARM_DBGBCR_TYPE_IVA */
0a7de745
A
1061 {
1062 arm_debug_state32_t *state;
1063 boolean_t enabled = FALSE;
1064 unsigned int i;
5ba3f43e 1065
0a7de745
A
1066 if (count != ARM_DEBUG_STATE32_COUNT) {
1067 return KERN_INVALID_ARGUMENT;
1068 }
1069 if (thread_is_64bit_data(thread)) {
1070 return KERN_INVALID_ARGUMENT;
1071 }
1072
1073 state = (arm_debug_state32_t *) tstate;
5ba3f43e 1074
f427ee49 1075 if (state->mdscr_el1 & MDSCR_SS) {
0a7de745
A
1076 enabled = TRUE;
1077 }
5ba3f43e 1078
0a7de745
A
1079 for (i = 0; i < 16; i++) {
1080 /* do not allow context IDs to be set */
1081 if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1082 || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
1083 || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1084 || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
1085 return KERN_PROTECTION_FAILURE;
1086 }
1087 if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
1088 || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
5ba3f43e 1089 enabled = TRUE;
0a7de745
A
1090 }
1091 }
5ba3f43e 1092
0a7de745 1093 if (!enabled) {
f427ee49 1094 free_debug_state(thread);
0a7de745 1095 } else {
f427ee49
A
1096 arm_debug_state32_t * thread_state = find_or_allocate_debug_state32(thread);
1097
0a7de745 1098 if (thread_state == NULL) {
f427ee49 1099 return KERN_FAILURE;
5ba3f43e 1100 }
0a7de745 1101
f427ee49
A
1102 if (state->mdscr_el1 & MDSCR_SS) {
1103 thread_state->mdscr_el1 |= MDSCR_SS;
5ba3f43e 1104 } else {
f427ee49 1105 thread_state->mdscr_el1 &= ~MDSCR_SS;
5ba3f43e 1106 }
0a7de745
A
1107
1108 for (i = 0; i < 16; i++) {
1109 /* set appropriate privilege; mask out unknown bits */
1110 thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1111 | ARM_DBGBCR_MATCH_MASK
1112 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
1113 | ARM_DBG_CR_ENABLE_MASK))
1114 | ARM_DBGBCR_TYPE_IVA
1115 | ARM_DBG_CR_LINKED_UNLINKED
1116 | ARM_DBG_CR_SECURITY_STATE_BOTH
1117 | ARM_DBG_CR_MODE_CONTROL_USER;
1118 thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK;
1119 thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1120 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1121 | ARM_DBGWCR_ACCESS_CONTROL_MASK
1122 | ARM_DBG_CR_ENABLE_MASK))
1123 | ARM_DBG_CR_LINKED_UNLINKED
1124 | ARM_DBG_CR_SECURITY_STATE_BOTH
1125 | ARM_DBG_CR_MODE_CONTROL_USER;
1126 thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK;
5ba3f43e 1127 }
5ba3f43e
A
1128 }
1129
0a7de745
A
1130 if (thread == current_thread()) {
1131 arm_debug_set32(thread->machine.DebugData);
1132 }
1133
1134 break;
1135 }
1136
5ba3f43e 1137 case ARM_DEBUG_STATE64:
0a7de745
A
1138 {
1139 arm_debug_state64_t *state;
1140 boolean_t enabled = FALSE;
cb323159 1141 unsigned int i;
5ba3f43e 1142
0a7de745
A
1143 if (count != ARM_DEBUG_STATE64_COUNT) {
1144 return KERN_INVALID_ARGUMENT;
1145 }
1146 if (!thread_is_64bit_data(thread)) {
1147 return KERN_INVALID_ARGUMENT;
1148 }
5ba3f43e 1149
0a7de745
A
1150 state = (arm_debug_state64_t *) tstate;
1151
f427ee49 1152 if (state->mdscr_el1 & MDSCR_SS) {
0a7de745
A
1153 enabled = TRUE;
1154 }
5ba3f43e 1155
0a7de745
A
1156 for (i = 0; i < 16; i++) {
1157 /* do not allow context IDs to be set */
1158 if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1159 || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
1160 || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
1161 return KERN_PROTECTION_FAILURE;
1162 }
1163 if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
1164 || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
5ba3f43e 1165 enabled = TRUE;
0a7de745
A
1166 }
1167 }
5ba3f43e 1168
0a7de745 1169 if (!enabled) {
f427ee49 1170 free_debug_state(thread);
0a7de745 1171 } else {
f427ee49
A
1172 arm_debug_state64_t *thread_state = find_or_allocate_debug_state64(thread);
1173
0a7de745 1174 if (thread_state == NULL) {
f427ee49 1175 return KERN_FAILURE;
0a7de745 1176 }
5ba3f43e 1177
f427ee49
A
1178 if (state->mdscr_el1 & MDSCR_SS) {
1179 thread_state->mdscr_el1 |= MDSCR_SS;
5ba3f43e 1180 } else {
f427ee49 1181 thread_state->mdscr_el1 &= ~MDSCR_SS;
5ba3f43e 1182 }
0a7de745
A
1183
1184 for (i = 0; i < 16; i++) {
1185 /* set appropriate privilege; mask out unknown bits */
1186 thread_state->bcr[i] = (state->bcr[i] & (0 /* Was ARM_DBG_CR_ADDRESS_MASK_MASK deprecated in v8 */
cb323159 1187 | 0 /* Was ARM_DBGBCR_MATCH_MASK, ignored in AArch64 state */
0a7de745
A
1188 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
1189 | ARM_DBG_CR_ENABLE_MASK))
1190 | ARM_DBGBCR_TYPE_IVA
1191 | ARM_DBG_CR_LINKED_UNLINKED
1192 | ARM_DBG_CR_SECURITY_STATE_BOTH
1193 | ARM_DBG_CR_MODE_CONTROL_USER;
1194 thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK64;
1195 thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1196 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1197 | ARM_DBGWCR_ACCESS_CONTROL_MASK
1198 | ARM_DBG_CR_ENABLE_MASK))
1199 | ARM_DBG_CR_LINKED_UNLINKED
1200 | ARM_DBG_CR_SECURITY_STATE_BOTH
1201 | ARM_DBG_CR_MODE_CONTROL_USER;
1202 thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK64;
5ba3f43e 1203 }
5ba3f43e
A
1204 }
1205
0a7de745
A
1206 if (thread == current_thread()) {
1207 arm_debug_set64(thread->machine.DebugData);
1208 }
1209
1210 break;
1211 }
1212
5ba3f43e 1213 case ARM_VFP_STATE:{
0a7de745
A
1214 struct arm_vfp_state *state;
1215 arm_neon_saved_state32_t *thread_state;
1216 unsigned int max;
5ba3f43e 1217
0a7de745
A
1218 if (count != ARM_VFP_STATE_COUNT && count != ARM_VFPV2_STATE_COUNT) {
1219 return KERN_INVALID_ARGUMENT;
1220 }
5ba3f43e 1221
0a7de745
A
1222 if (count == ARM_VFPV2_STATE_COUNT) {
1223 max = 32;
1224 } else {
1225 max = 64;
1226 }
5ba3f43e 1227
0a7de745
A
1228 state = (struct arm_vfp_state *) tstate;
1229 thread_state = neon_state32(thread->machine.uNeon);
1230 /* ARM64 TODO: combine fpsr and fpcr into state->fpscr */
5ba3f43e 1231
0a7de745 1232 bcopy(state, thread_state, (max + 1) * sizeof(uint32_t));
5ba3f43e 1233
0a7de745
A
1234 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
1235 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
1236 break;
1237 }
5ba3f43e
A
1238
1239 case ARM_NEON_STATE:{
1240 arm_neon_state_t *state;
1241 arm_neon_saved_state32_t *thread_state;
1242
0a7de745
A
1243 if (count != ARM_NEON_STATE_COUNT) {
1244 return KERN_INVALID_ARGUMENT;
1245 }
5ba3f43e 1246
0a7de745
A
1247 if (thread_is_64bit_data(thread)) {
1248 return KERN_INVALID_ARGUMENT;
1249 }
5ba3f43e
A
1250
1251 state = (arm_neon_state_t *)tstate;
1252 thread_state = neon_state32(thread->machine.uNeon);
1253
1254 assert(sizeof(*state) == sizeof(*thread_state));
1255 bcopy(state, thread_state, sizeof(arm_neon_state_t));
1256
1257 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
1258 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
1259 break;
0a7de745 1260 }
5ba3f43e
A
1261
1262 case ARM_NEON_STATE64:{
1263 arm_neon_state64_t *state;
1264 arm_neon_saved_state64_t *thread_state;
1265
0a7de745
A
1266 if (count != ARM_NEON_STATE64_COUNT) {
1267 return KERN_INVALID_ARGUMENT;
1268 }
5ba3f43e 1269
0a7de745
A
1270 if (!thread_is_64bit_data(thread)) {
1271 return KERN_INVALID_ARGUMENT;
1272 }
5ba3f43e
A
1273
1274 state = (arm_neon_state64_t *)tstate;
1275 thread_state = neon_state64(thread->machine.uNeon);
1276
1277 assert(sizeof(*state) == sizeof(*thread_state));
1278 bcopy(state, thread_state, sizeof(arm_neon_state64_t));
1279
f427ee49 1280
5ba3f43e
A
1281 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
1282 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
1283 break;
0a7de745 1284 }
5ba3f43e 1285
cb323159 1286
5ba3f43e 1287 default:
0a7de745 1288 return KERN_INVALID_ARGUMENT;
5ba3f43e 1289 }
0a7de745 1290 return KERN_SUCCESS;
5ba3f43e
A
1291}
1292
cb323159
A
1293mach_vm_address_t
1294machine_thread_pc(thread_t thread)
1295{
1296 struct arm_saved_state *ss = get_user_regs(thread);
1297 return (mach_vm_address_t)get_saved_state_pc(ss);
1298}
1299
1300void
1301machine_thread_reset_pc(thread_t thread, mach_vm_address_t pc)
1302{
1303 set_saved_state_pc(get_user_regs(thread), (register_t)pc);
1304}
1305
5ba3f43e 1306/*
cb323159 1307 * Routine: machine_thread_state_initialize
5ba3f43e
A
1308 *
1309 */
1310kern_return_t
cb323159 1311machine_thread_state_initialize(thread_t thread)
5ba3f43e
A
1312{
1313 arm_context_t *context = thread->machine.contextData;
1314
0a7de745 1315 /*
5ba3f43e 1316 * Should always be set up later. For a kernel thread, we don't care
0a7de745 1317 * about this state. For a user thread, we'll set the state up in
5ba3f43e
A
1318 * setup_wqthread, bsdthread_create, load_main(), or load_unixthread().
1319 */
1320
1321 if (context != NULL) {
1322 bzero(&context->ss.uss, sizeof(context->ss.uss));
1323 bzero(&context->ns.uns, sizeof(context->ns.uns));
1324
1325 if (context->ns.nsh.flavor == ARM_NEON_SAVED_STATE64) {
1326 context->ns.ns_64.fpcr = FPCR_DEFAULT;
1327 } else {
1328 context->ns.ns_32.fpcr = FPCR_DEFAULT_32;
1329 }
1330 }
1331
1332 thread->machine.DebugData = NULL;
1333
cb323159
A
1334#if defined(HAS_APPLE_PAC)
1335 /* Sign the initial user-space thread state */
1336 if (thread->machine.upcb != NULL) {
bca245ac 1337 boolean_t intr = ml_set_interrupts_enabled(FALSE);
cb323159 1338 ml_sign_thread_state(thread->machine.upcb, 0, 0, 0, 0, 0);
bca245ac 1339 ml_set_interrupts_enabled(intr);
cb323159
A
1340 }
1341#endif /* defined(HAS_APPLE_PAC) */
d9a64523 1342
5ba3f43e
A
1343 return KERN_SUCCESS;
1344}
1345
1346/*
cb323159 1347 * Routine: machine_thread_dup
5ba3f43e
A
1348 *
1349 */
1350kern_return_t
cb323159
A
1351machine_thread_dup(thread_t self,
1352 thread_t target,
1353 __unused boolean_t is_corpse)
5ba3f43e
A
1354{
1355 struct arm_saved_state *self_saved_state;
1356 struct arm_saved_state *target_saved_state;
1357
1358 target->machine.cthread_self = self->machine.cthread_self;
5ba3f43e
A
1359
1360 self_saved_state = self->machine.upcb;
1361 target_saved_state = target->machine.upcb;
1362 bcopy(self_saved_state, target_saved_state, sizeof(struct arm_saved_state));
cb323159
A
1363#if defined(HAS_APPLE_PAC)
1364 if (!is_corpse && is_saved_state64(self_saved_state)) {
1365 check_and_sign_copied_thread_state(target_saved_state, self_saved_state);
1366 }
1367#endif /* defined(HAS_APPLE_PAC) */
5ba3f43e 1368
0a7de745 1369 return KERN_SUCCESS;
5ba3f43e
A
1370}
1371
1372/*
cb323159 1373 * Routine: get_user_regs
5ba3f43e
A
1374 *
1375 */
1376struct arm_saved_state *
cb323159 1377get_user_regs(thread_t thread)
5ba3f43e 1378{
0a7de745 1379 return thread->machine.upcb;
5ba3f43e
A
1380}
1381
1382arm_neon_saved_state_t *
cb323159 1383get_user_neon_regs(thread_t thread)
5ba3f43e 1384{
0a7de745 1385 return thread->machine.uNeon;
5ba3f43e
A
1386}
1387
1388/*
cb323159 1389 * Routine: find_user_regs
5ba3f43e
A
1390 *
1391 */
1392struct arm_saved_state *
cb323159 1393find_user_regs(thread_t thread)
5ba3f43e 1394{
0a7de745 1395 return thread->machine.upcb;
5ba3f43e
A
1396}
1397
1398/*
cb323159 1399 * Routine: find_kern_regs
5ba3f43e
A
1400 *
1401 */
1402struct arm_saved_state *
cb323159 1403find_kern_regs(thread_t thread)
5ba3f43e
A
1404{
1405 /*
0a7de745
A
1406 * This works only for an interrupted kernel thread
1407 */
1408 if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) {
1409 return (struct arm_saved_state *) NULL;
1410 } else {
1411 return getCpuDatap()->cpu_int_state;
1412 }
5ba3f43e
A
1413}
1414
1415arm_debug_state32_t *
cb323159 1416find_debug_state32(thread_t thread)
5ba3f43e 1417{
0a7de745 1418 if (thread && thread->machine.DebugData) {
5ba3f43e 1419 return &(thread->machine.DebugData->uds.ds32);
0a7de745 1420 } else {
5ba3f43e 1421 return NULL;
0a7de745 1422 }
5ba3f43e
A
1423}
1424
1425arm_debug_state64_t *
cb323159 1426find_debug_state64(thread_t thread)
5ba3f43e 1427{
0a7de745 1428 if (thread && thread->machine.DebugData) {
5ba3f43e 1429 return &(thread->machine.DebugData->uds.ds64);
0a7de745 1430 } else {
5ba3f43e 1431 return NULL;
0a7de745 1432 }
5ba3f43e
A
1433}
1434
f427ee49
A
1435/**
1436 * Finds the debug state for the given 64 bit thread, allocating one if it
1437 * does not exist.
1438 *
1439 * @param thread 64 bit thread to find or allocate debug state for
1440 *
1441 * @returns A pointer to the given thread's 64 bit debug state or a null
1442 * pointer if the given thread is null or the allocation of a new
1443 * debug state fails.
1444 */
1445arm_debug_state64_t *
1446find_or_allocate_debug_state64(thread_t thread)
1447{
1448 arm_debug_state64_t *thread_state = find_debug_state64(thread);
1449 if (thread != NULL && thread_state == NULL) {
1450 thread->machine.DebugData = zalloc(ads_zone);
1451 if (thread->machine.DebugData != NULL) {
1452 bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData));
1453 thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE64;
1454 thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE64_COUNT;
1455 thread_state = find_debug_state64(thread);
1456 }
1457 }
1458 return thread_state;
1459}
1460
1461/**
1462 * Finds the debug state for the given 32 bit thread, allocating one if it
1463 * does not exist.
1464 *
1465 * @param thread 32 bit thread to find or allocate debug state for
1466 *
1467 * @returns A pointer to the given thread's 32 bit debug state or a null
1468 * pointer if the given thread is null or the allocation of a new
1469 * debug state fails.
1470 */
1471arm_debug_state32_t *
1472find_or_allocate_debug_state32(thread_t thread)
1473{
1474 arm_debug_state32_t *thread_state = find_debug_state32(thread);
1475 if (thread != NULL && thread_state == NULL) {
1476 thread->machine.DebugData = zalloc(ads_zone);
1477 if (thread->machine.DebugData != NULL) {
1478 bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData));
1479 thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE32;
1480 thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE32_COUNT;
1481 thread_state = find_debug_state32(thread);
1482 }
1483 }
1484 return thread_state;
1485}
1486
1487/**
1488 * Frees a thread's debug state if allocated. Otherwise does nothing.
1489 *
1490 * @param thread thread to free the debug state of
1491 */
1492static inline void
1493free_debug_state(thread_t thread)
1494{
1495 if (thread != NULL && thread->machine.DebugData != NULL) {
1496 void *pTmp = thread->machine.DebugData;
1497 thread->machine.DebugData = NULL;
1498 zfree(ads_zone, pTmp);
1499 }
1500}
1501
5ba3f43e 1502/*
cb323159 1503 * Routine: thread_userstack
5ba3f43e
A
1504 *
1505 */
1506kern_return_t
cb323159
A
1507thread_userstack(__unused thread_t thread,
1508 int flavor,
1509 thread_state_t tstate,
1510 unsigned int count,
1511 mach_vm_offset_t * user_stack,
1512 int * customstack,
1513 boolean_t is_64bit_data
1514 )
5ba3f43e
A
1515{
1516 register_t sp;
1517
1518 switch (flavor) {
1519 case ARM_THREAD_STATE:
1520 if (count == ARM_UNIFIED_THREAD_STATE_COUNT) {
1521#if __arm64__
d9a64523 1522 if (is_64bit_data) {
5ba3f43e
A
1523 sp = ((arm_unified_thread_state_t *)tstate)->ts_64.sp;
1524 } else
1525#endif
1526 {
1527 sp = ((arm_unified_thread_state_t *)tstate)->ts_32.sp;
1528 }
1529
1530 break;
1531 }
1532
f427ee49
A
1533 /* INTENTIONAL FALL THROUGH (see machine_thread_set_state) */
1534 OS_FALLTHROUGH;
5ba3f43e 1535 case ARM_THREAD_STATE32:
0a7de745
A
1536 if (count != ARM_THREAD_STATE32_COUNT) {
1537 return KERN_INVALID_ARGUMENT;
1538 }
1539 if (is_64bit_data) {
1540 return KERN_INVALID_ARGUMENT;
1541 }
5ba3f43e
A
1542
1543 sp = ((arm_thread_state32_t *)tstate)->sp;
1544 break;
1545#if __arm64__
1546 case ARM_THREAD_STATE64:
0a7de745
A
1547 if (count != ARM_THREAD_STATE64_COUNT) {
1548 return KERN_INVALID_ARGUMENT;
1549 }
1550 if (!is_64bit_data) {
1551 return KERN_INVALID_ARGUMENT;
1552 }
5ba3f43e
A
1553
1554 sp = ((arm_thread_state32_t *)tstate)->sp;
1555 break;
1556#endif
1557 default:
0a7de745 1558 return KERN_INVALID_ARGUMENT;
5ba3f43e
A
1559 }
1560
1561 if (sp) {
1562 *user_stack = CAST_USER_ADDR_T(sp);
0a7de745 1563 if (customstack) {
5ba3f43e 1564 *customstack = 1;
0a7de745 1565 }
5ba3f43e
A
1566 } else {
1567 *user_stack = CAST_USER_ADDR_T(USRSTACK64);
0a7de745 1568 if (customstack) {
5ba3f43e 1569 *customstack = 0;
0a7de745 1570 }
5ba3f43e
A
1571 }
1572
0a7de745 1573 return KERN_SUCCESS;
5ba3f43e
A
1574}
1575
1576/*
1577 * thread_userstackdefault:
1578 *
1579 * Return the default stack location for the
1580 * thread, if otherwise unknown.
1581 */
1582kern_return_t
cb323159
A
1583thread_userstackdefault(mach_vm_offset_t * default_user_stack,
1584 boolean_t is64bit)
5ba3f43e
A
1585{
1586 if (is64bit) {
1587 *default_user_stack = USRSTACK64;
1588 } else {
1589 *default_user_stack = USRSTACK;
1590 }
1591
0a7de745 1592 return KERN_SUCCESS;
5ba3f43e
A
1593}
1594
1595/*
cb323159 1596 * Routine: thread_setuserstack
5ba3f43e
A
1597 *
1598 */
1599void
cb323159
A
1600thread_setuserstack(thread_t thread,
1601 mach_vm_address_t user_stack)
5ba3f43e
A
1602{
1603 struct arm_saved_state *sv;
1604
1605 sv = get_user_regs(thread);
1606
1607 set_saved_state_sp(sv, user_stack);
1608
1609 return;
1610}
1611
1612/*
cb323159 1613 * Routine: thread_adjuserstack
5ba3f43e
A
1614 *
1615 */
f427ee49 1616user_addr_t
cb323159
A
1617thread_adjuserstack(thread_t thread,
1618 int adjust)
5ba3f43e
A
1619{
1620 struct arm_saved_state *sv;
1621 uint64_t sp;
1622
1623 sv = get_user_regs(thread);
1624
1625 sp = get_saved_state_sp(sv);
1626 sp += adjust;
1627 set_saved_state_sp(sv, sp);;
1628
1629 return sp;
1630}
1631
f427ee49 1632
5ba3f43e 1633/*
cb323159 1634 * Routine: thread_setentrypoint
5ba3f43e
A
1635 *
1636 */
1637void
cb323159
A
1638thread_setentrypoint(thread_t thread,
1639 mach_vm_offset_t entry)
5ba3f43e
A
1640{
1641 struct arm_saved_state *sv;
1642
1643 sv = get_user_regs(thread);
1644
1645 set_saved_state_pc(sv, entry);
1646
1647 return;
1648}
1649
1650/*
cb323159 1651 * Routine: thread_entrypoint
5ba3f43e
A
1652 *
1653 */
1654kern_return_t
cb323159
A
1655thread_entrypoint(__unused thread_t thread,
1656 int flavor,
1657 thread_state_t tstate,
eb6b6ca3 1658 unsigned int count,
cb323159
A
1659 mach_vm_offset_t * entry_point
1660 )
5ba3f43e
A
1661{
1662 switch (flavor) {
1663 case ARM_THREAD_STATE:
0a7de745
A
1664 {
1665 struct arm_thread_state *state;
5ba3f43e 1666
eb6b6ca3
A
1667 if (count != ARM_THREAD_STATE_COUNT) {
1668 return KERN_INVALID_ARGUMENT;
1669 }
1670
0a7de745 1671 state = (struct arm_thread_state *) tstate;
5ba3f43e 1672
0a7de745
A
1673 /*
1674 * If a valid entry point is specified, use it.
1675 */
1676 if (state->pc) {
1677 *entry_point = CAST_USER_ADDR_T(state->pc);
1678 } else {
1679 *entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS);
5ba3f43e 1680 }
0a7de745
A
1681 }
1682 break;
5ba3f43e
A
1683
1684 case ARM_THREAD_STATE64:
0a7de745
A
1685 {
1686 struct arm_thread_state64 *state;
5ba3f43e 1687
eb6b6ca3
A
1688 if (count != ARM_THREAD_STATE64_COUNT) {
1689 return KERN_INVALID_ARGUMENT;
1690 }
1691
0a7de745 1692 state = (struct arm_thread_state64*) tstate;
5ba3f43e 1693
0a7de745
A
1694 /*
1695 * If a valid entry point is specified, use it.
1696 */
1697 if (state->pc) {
1698 *entry_point = CAST_USER_ADDR_T(state->pc);
1699 } else {
1700 *entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS);
5ba3f43e 1701 }
0a7de745
A
1702
1703 break;
1704 }
5ba3f43e 1705 default:
0a7de745 1706 return KERN_INVALID_ARGUMENT;
5ba3f43e
A
1707 }
1708
0a7de745 1709 return KERN_SUCCESS;
5ba3f43e
A
1710}
1711
1712
1713/*
cb323159 1714 * Routine: thread_set_child
5ba3f43e
A
1715 *
1716 */
1717void
cb323159
A
1718thread_set_child(thread_t child,
1719 int pid)
5ba3f43e
A
1720{
1721 struct arm_saved_state *child_state;
1722
1723 child_state = get_user_regs(child);
1724
1725 set_saved_state_reg(child_state, 0, pid);
1726 set_saved_state_reg(child_state, 1, 1ULL);
1727}
1728
1729
1730/*
cb323159 1731 * Routine: thread_set_parent
5ba3f43e
A
1732 *
1733 */
1734void
cb323159
A
1735thread_set_parent(thread_t parent,
1736 int pid)
5ba3f43e
A
1737{
1738 struct arm_saved_state *parent_state;
1739
1740 parent_state = get_user_regs(parent);
1741
1742 set_saved_state_reg(parent_state, 0, pid);
1743 set_saved_state_reg(parent_state, 1, 0);
1744}
1745
1746
1747struct arm_act_context {
1748 struct arm_unified_thread_state ss;
1749#if __ARM_VFP__
1750 struct arm_neon_saved_state ns;
1751#endif
1752};
1753
1754/*
cb323159 1755 * Routine: act_thread_csave
5ba3f43e
A
1756 *
1757 */
cb323159 1758void *
5ba3f43e
A
1759act_thread_csave(void)
1760{
1761 struct arm_act_context *ic;
1762 kern_return_t kret;
1763 unsigned int val;
1764 thread_t thread = current_thread();
1765
1766 ic = (struct arm_act_context *) kalloc(sizeof(struct arm_act_context));
0a7de745
A
1767 if (ic == (struct arm_act_context *) NULL) {
1768 return (void *) 0;
1769 }
5ba3f43e
A
1770
1771 val = ARM_UNIFIED_THREAD_STATE_COUNT;
1772 kret = machine_thread_get_state(thread, ARM_THREAD_STATE, (thread_state_t)&ic->ss, &val);
1773 if (kret != KERN_SUCCESS) {
1774 kfree(ic, sizeof(struct arm_act_context));
0a7de745 1775 return (void *) 0;
5ba3f43e
A
1776 }
1777
1778#if __ARM_VFP__
d9a64523 1779 if (thread_is_64bit_data(thread)) {
5ba3f43e
A
1780 val = ARM_NEON_STATE64_COUNT;
1781 kret = machine_thread_get_state(thread,
0a7de745 1782 ARM_NEON_STATE64,
cb323159 1783 (thread_state_t)&ic->ns,
0a7de745 1784 &val);
5ba3f43e
A
1785 } else {
1786 val = ARM_NEON_STATE_COUNT;
1787 kret = machine_thread_get_state(thread,
0a7de745 1788 ARM_NEON_STATE,
cb323159 1789 (thread_state_t)&ic->ns,
0a7de745 1790 &val);
5ba3f43e
A
1791 }
1792 if (kret != KERN_SUCCESS) {
1793 kfree(ic, sizeof(struct arm_act_context));
0a7de745 1794 return (void *) 0;
5ba3f43e
A
1795 }
1796#endif
0a7de745 1797 return ic;
5ba3f43e
A
1798}
1799
1800/*
cb323159 1801 * Routine: act_thread_catt
5ba3f43e
A
1802 *
1803 */
1804void
cb323159 1805act_thread_catt(void * ctx)
5ba3f43e
A
1806{
1807 struct arm_act_context *ic;
1808 kern_return_t kret;
1809 thread_t thread = current_thread();
1810
1811 ic = (struct arm_act_context *) ctx;
0a7de745 1812 if (ic == (struct arm_act_context *) NULL) {
5ba3f43e 1813 return;
0a7de745 1814 }
5ba3f43e
A
1815
1816 kret = machine_thread_set_state(thread, ARM_THREAD_STATE, (thread_state_t)&ic->ss, ARM_UNIFIED_THREAD_STATE_COUNT);
0a7de745 1817 if (kret != KERN_SUCCESS) {
5ba3f43e 1818 goto out;
0a7de745 1819 }
5ba3f43e
A
1820
1821#if __ARM_VFP__
d9a64523 1822 if (thread_is_64bit_data(thread)) {
5ba3f43e 1823 kret = machine_thread_set_state(thread,
0a7de745 1824 ARM_NEON_STATE64,
cb323159 1825 (thread_state_t)&ic->ns,
0a7de745 1826 ARM_NEON_STATE64_COUNT);
5ba3f43e
A
1827 } else {
1828 kret = machine_thread_set_state(thread,
0a7de745 1829 ARM_NEON_STATE,
cb323159 1830 (thread_state_t)&ic->ns,
0a7de745 1831 ARM_NEON_STATE_COUNT);
5ba3f43e 1832 }
0a7de745 1833 if (kret != KERN_SUCCESS) {
5ba3f43e 1834 goto out;
0a7de745 1835 }
5ba3f43e
A
1836#endif
1837out:
1838 kfree(ic, sizeof(struct arm_act_context));
1839}
1840
1841/*
cb323159 1842 * Routine: act_thread_catt
5ba3f43e
A
1843 *
1844 */
0a7de745 1845void
5ba3f43e
A
1846act_thread_cfree(void *ctx)
1847{
1848 kfree(ctx, sizeof(struct arm_act_context));
1849}
1850
1851kern_return_t
cb323159
A
1852thread_set_wq_state32(thread_t thread,
1853 thread_state_t tstate)
5ba3f43e
A
1854{
1855 arm_thread_state_t *state;
1856 struct arm_saved_state *saved_state;
1857 struct arm_saved_state32 *saved_state_32;
1858 thread_t curth = current_thread();
0a7de745 1859 spl_t s = 0;
5ba3f43e 1860
d9a64523 1861 assert(!thread_is_64bit_data(thread));
5ba3f43e
A
1862
1863 saved_state = thread->machine.upcb;
1864 saved_state_32 = saved_state32(saved_state);
1865
1866 state = (arm_thread_state_t *)tstate;
1867
1868 if (curth != thread) {
1869 s = splsched();
1870 thread_lock(thread);
1871 }
1872
1873 /*
1874 * do not zero saved_state, it can be concurrently accessed
1875 * and zero is not a valid state for some of the registers,
1876 * like sp.
1877 */
1878 thread_state32_to_saved_state(state, saved_state);
1879 saved_state_32->cpsr = PSR64_USER32_DEFAULT;
1880
1881 if (curth != thread) {
1882 thread_unlock(thread);
1883 splx(s);
1884 }
1885
1886 return KERN_SUCCESS;
1887}
1888
1889kern_return_t
cb323159
A
1890thread_set_wq_state64(thread_t thread,
1891 thread_state_t tstate)
5ba3f43e
A
1892{
1893 arm_thread_state64_t *state;
1894 struct arm_saved_state *saved_state;
1895 struct arm_saved_state64 *saved_state_64;
1896 thread_t curth = current_thread();
0a7de745 1897 spl_t s = 0;
5ba3f43e 1898
d9a64523 1899 assert(thread_is_64bit_data(thread));
5ba3f43e
A
1900
1901 saved_state = thread->machine.upcb;
1902 saved_state_64 = saved_state64(saved_state);
1903 state = (arm_thread_state64_t *)tstate;
1904
1905 if (curth != thread) {
1906 s = splsched();
1907 thread_lock(thread);
1908 }
1909
1910 /*
1911 * do not zero saved_state, it can be concurrently accessed
1912 * and zero is not a valid state for some of the registers,
1913 * like sp.
1914 */
1915 thread_state64_to_saved_state(state, saved_state);
d9a64523 1916 set_saved_state_cpsr(saved_state, PSR64_USER64_DEFAULT);
5ba3f43e
A
1917
1918 if (curth != thread) {
1919 thread_unlock(thread);
1920 splx(s);
1921 }
1922
1923 return KERN_SUCCESS;
1924}